Compare commits

..

24 Commits

Author SHA1 Message Date
dgtlmoon
6cad2d9422 Re-test under HIDE_REFERER, use strtobool so you can use 'False' 2022-11-12 13:38:11 +01:00
dgtlmoon
359dcb63e3 Stability fix related to the new watch check count (#1113) 2022-11-10 20:01:07 +01:00
dgtlmoon
b043d477dc Use deepcopy to stop possible data corruption (#1108) 2022-11-08 12:18:38 +01:00
dgtlmoon
06bcfb28e5 Code- Use dict .get instead of key 2022-11-07 20:43:20 +01:00
dgtlmoon
ca3b351bae Adding a check counter to watch fetching (#1099) 2022-11-06 09:48:07 +01:00
dgtlmoon
b7e0f0a5e4 Update README.md 2022-11-05 12:22:52 +01:00
dgtlmoon
61f0ac2937 HIDE_REFERER incompatible with password based login, added comment to code #996 2022-11-04 23:46:03 +01:00
dgtlmoon
fca66eb558 Update README.md 2022-11-03 14:29:38 +01:00
dgtlmoon
359fc48fb4 Filters can now accept a list/multiple filters (#1064) #623 2022-11-03 12:13:54 +01:00
dgtlmoon
d0efeb9770 0.39.21.1 2022-11-02 23:48:10 +01:00
dgtlmoon
3416532cd6 Playwright extension added back to Dockerfile to resolve conditional fix Alpine (musl) based systems (#1087) 2022-11-02 23:47:44 +01:00
dgtlmoon
defc7a340e 0.39.21 2022-11-02 15:12:33 +01:00
dgtlmoon
c197c062e1 Disable version check when pytest is running (#1084) 2022-11-01 18:26:29 +01:00
dgtlmoon
77b59809ca Removing unused code (#1070) 2022-10-28 18:36:07 +02:00
dgtlmoon
f90b170e68 Docker & python - Jq conditional pip requirements.txt include (Don't install in Windows because theres no Windows library/wheel) 2022-10-27 23:26:14 +02:00
dgtlmoon
c93ca1841c Docker & python - Use pip conditional requirements to not install playwright for ARM (unsupported on ARM) (#1067) 2022-10-27 23:17:05 +02:00
Sandro
57f604dff1 UI - Make fetch error more readable (#1038) 2022-10-27 16:40:24 +02:00
dgtlmoon
8499468749 Update README.md 2022-10-27 15:17:14 +02:00
dgtlmoon
7f6a13ea6c Re #1052 - Watch 'open' link should use any dynamic/template info (#1063) 2022-10-27 13:29:24 +02:00
dgtlmoon
9874f0cbc7 Remove accidental files 2022-10-27 12:43:02 +02:00
dgtlmoon
72834a42fd Backups and Snapshots - Data directory now fully portable, (all paths are relative) , refactored backup zip export creation 2022-10-27 12:35:26 +02:00
dgtlmoon
724cb17224 Re #1052 - Dynamic URLs, use variables in the URL (such as the current date, the date in a month, and other logic see https://github.com/dgtlmoon/changedetection.io/wiki/Handling-variables-in-the-watched-URL ) (#1057) 2022-10-24 23:20:39 +02:00
dgtlmoon
4eb4b401a1 API - system info - allow 5 minutes grace before watch is considered 'overdue' 2022-10-23 23:12:28 +02:00
dgtlmoon
5d40e16c73 API - Adding basic system info/system state API (#1051) 2022-10-23 19:15:11 +02:00
38 changed files with 540 additions and 299 deletions

31
.github/test/Dockerfile-alpine vendored Normal file
View File

@@ -0,0 +1,31 @@
# Taken from https://github.com/linuxserver/docker-changedetection.io/blob/main/Dockerfile
# Test that we can still build on Alpine (musl modified libc https://musl.libc.org/)
# Some packages wont install via pypi because they dont have a wheel available under this architecture.
FROM ghcr.io/linuxserver/baseimage-alpine:3.16
ENV PYTHONUNBUFFERED=1
COPY requirements.txt /requirements.txt
RUN \
apk add --update --no-cache --virtual=build-dependencies \
cargo \
g++ \
gcc \
libc-dev \
libffi-dev \
libxslt-dev \
make \
openssl-dev \
py3-wheel \
python3-dev \
zlib-dev && \
apk add --update --no-cache \
libxslt \
python3 \
py3-pip && \
echo "**** pip3 install test of changedetection.io ****" && \
pip3 install -U pip wheel setuptools && \
pip3 install -U --no-cache-dir --find-links https://wheel-index.linuxserver.io/alpine-3.16/ -r /requirements.txt && \
apk del --purge \
build-dependencies

View File

@@ -43,6 +43,16 @@ jobs:
version: latest version: latest
driver-opts: image=moby/buildkit:master driver-opts: image=moby/buildkit:master
# https://github.com/dgtlmoon/changedetection.io/pull/1067
# Check we can still build under alpine/musl
- name: Test that the docker containers can build (musl via alpine check)
id: docker_build_musl
uses: docker/build-push-action@v2
with:
context: ./
file: ./.github/test/Dockerfile-alpine
platforms: linux/amd64,linux/arm64
- name: Test that the docker containers can build - name: Test that the docker containers can build
id: docker_build id: docker_build
uses: docker/build-push-action@v2 uses: docker/build-push-action@v2
@@ -53,3 +63,4 @@ jobs:
platforms: linux/arm/v7,linux/arm/v6,linux/amd64,linux/arm64, platforms: linux/arm/v7,linux/arm/v6,linux/amd64,linux/arm64,
cache-from: type=local,src=/tmp/.buildx-cache cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache cache-to: type=local,dest=/tmp/.buildx-cache

View File

@@ -23,14 +23,10 @@ RUN pip install --target=/dependencies -r /requirements.txt
# Playwright is an alternative to Selenium # Playwright is an alternative to Selenium
# Excluded this package from requirements.txt to prevent arm/v6 and arm/v7 builds from failing # Excluded this package from requirements.txt to prevent arm/v6 and arm/v7 builds from failing
# https://github.com/dgtlmoon/changedetection.io/pull/1067 also musl/alpine (not supported)
RUN pip install --target=/dependencies playwright~=1.26 \ RUN pip install --target=/dependencies playwright~=1.26 \
|| echo "WARN: Failed to install Playwright. The application can still run, but the Playwright option will be disabled." || echo "WARN: Failed to install Playwright. The application can still run, but the Playwright option will be disabled."
RUN pip install --target=/dependencies jq~=1.3 \
|| echo "WARN: Failed to install JQ. The application can still run, but the Jq: filter option will be disabled."
# Final image stage # Final image stage
FROM python:3.8-slim FROM python:3.8-slim

View File

@@ -1,6 +1,7 @@
## Web Site Change Detection, Monitoring and Notification. ## Web Site Change Detection, Monitoring and Notification.
Live your data-life pro-actively, track website content changes and receive notifications via Discord, Email, Slack, Telegram and 70+ more _Live your data-life pro-actively, Detect website changes and perform meaningful actions, trigger notifications via Discord, Email, Slack, Telegram, API calls and many more._
[<img src="https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/docs/screenshot.png" style="max-width:100%;" alt="Self-hosted web page change monitoring" title="Self-hosted web page change monitoring" />](https://lemonade.changedetection.io/start?src=github) [<img src="https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/docs/screenshot.png" style="max-width:100%;" alt="Self-hosted web page change monitoring" title="Self-hosted web page change monitoring" />](https://lemonade.changedetection.io/start?src=github)
@@ -8,8 +9,6 @@ Live your data-life pro-actively, track website content changes and receive noti
![changedetection.io](https://github.com/dgtlmoon/changedetection.io/actions/workflows/test-only.yml/badge.svg?branch=master) ![changedetection.io](https://github.com/dgtlmoon/changedetection.io/actions/workflows/test-only.yml/badge.svg?branch=master)
Know when important content changes, we support notifications via Discord, Telegram, Home-Assistant, Slack, Email and 70+ more
[**Don't have time? Let us host it for you! try our $6.99/month subscription - use our proxies and support!**](https://lemonade.changedetection.io/start) , _half the price of other website change monitoring services and comes with unlimited watches & checks!_ [**Don't have time? Let us host it for you! try our $6.99/month subscription - use our proxies and support!**](https://lemonade.changedetection.io/start) , _half the price of other website change monitoring services and comes with unlimited watches & checks!_
- Chrome browser included. - Chrome browser included.
@@ -167,9 +166,6 @@ One big advantage of `jq` is that you can use logic in your JSON filter, such as
See the wiki https://github.com/dgtlmoon/changedetection.io/wiki/JSON-Selector-Filter-help for more information and examples See the wiki https://github.com/dgtlmoon/changedetection.io/wiki/JSON-Selector-Filter-help for more information and examples
Note: `jq` library must be added separately (`pip3 install jq`)
### Parse JSON embedded in HTML! ### Parse JSON embedded in HTML!
When you enable a `json:` or `jq:` filter, you can even automatically extract and parse embedded JSON inside a HTML page! Amazingly handy for sites that build content based on JSON, such as many e-commerce websites. When you enable a `json:` or `jq:` filter, you can even automatically extract and parse embedded JSON inside a HTML page! Amazingly handy for sites that build content based on JSON, such as many e-commerce websites.
@@ -184,9 +180,9 @@ When you enable a `json:` or `jq:` filter, you can even automatically extract an
`json:$.price` or `jq:.price` would give `23.50`, or you can extract the whole structure `json:$.price` or `jq:.price` would give `23.50`, or you can extract the whole structure
## Proxy configuration ## Proxy Configuration
See the wiki https://github.com/dgtlmoon/changedetection.io/wiki/Proxy-configuration See the wiki https://github.com/dgtlmoon/changedetection.io/wiki/Proxy-configuration , we also support using [BrightData proxy services where possible]( https://github.com/dgtlmoon/changedetection.io/wiki/Proxy-configuration#brightdata-proxy-support)
## Raspberry Pi support? ## Raspberry Pi support?

View File

@@ -33,7 +33,7 @@ from flask_wtf import CSRFProtect
from changedetectionio import html_tools from changedetectionio import html_tools
from changedetectionio.api import api_v1 from changedetectionio.api import api_v1
__version__ = '0.39.20.4' __version__ = '0.39.21.1'
datastore = None datastore = None
@@ -199,8 +199,6 @@ def changedetection_app(config=None, datastore_o=None):
# Setup cors headers to allow all domains # Setup cors headers to allow all domains
# https://flask-cors.readthedocs.io/en/latest/ # https://flask-cors.readthedocs.io/en/latest/
# CORS(app) # CORS(app)
@@ -601,7 +599,7 @@ def changedetection_app(config=None, datastore_o=None):
extra_update_obj['previous_md5'] = get_current_checksum_include_ignore_text(uuid=uuid) extra_update_obj['previous_md5'] = get_current_checksum_include_ignore_text(uuid=uuid)
# Reset the previous_md5 so we process a new snapshot including stripping ignore text. # Reset the previous_md5 so we process a new snapshot including stripping ignore text.
if form.css_filter.data.strip() != datastore.data['watching'][uuid]['css_filter']: if form.include_filters.data != datastore.data['watching'][uuid].get('include_filters', []):
if len(datastore.data['watching'][uuid].history): if len(datastore.data['watching'][uuid].history):
extra_update_obj['previous_md5'] = get_current_checksum_include_ignore_text(uuid=uuid) extra_update_obj['previous_md5'] = get_current_checksum_include_ignore_text(uuid=uuid)
@@ -987,9 +985,6 @@ def changedetection_app(config=None, datastore_o=None):
# create a ZipFile object # create a ZipFile object
backupname = "changedetection-backup-{}.zip".format(int(time.time())) backupname = "changedetection-backup-{}.zip".format(int(time.time()))
# We only care about UUIDS from the current index file
uuids = list(datastore.data['watching'].keys())
backup_filepath = os.path.join(datastore_o.datastore_path, backupname) backup_filepath = os.path.join(datastore_o.datastore_path, backupname)
with zipfile.ZipFile(backup_filepath, "w", with zipfile.ZipFile(backup_filepath, "w",
@@ -1005,12 +1000,12 @@ def changedetection_app(config=None, datastore_o=None):
# Add the flask app secret # Add the flask app secret
zipObj.write(os.path.join(datastore_o.datastore_path, "secret.txt"), arcname="secret.txt") zipObj.write(os.path.join(datastore_o.datastore_path, "secret.txt"), arcname="secret.txt")
# Add any snapshot data we find, use the full path to access the file, but make the file 'relative' in the Zip. # Add any data in the watch data directory.
for txt_file_path in Path(datastore_o.datastore_path).rglob('*.txt'): for uuid, w in datastore.data['watching'].items():
parent_p = txt_file_path.parent for f in Path(w.watch_data_dir).glob('*'):
if parent_p.name in uuids: zipObj.write(f,
zipObj.write(txt_file_path, # Use the full path to access the file, but make the file 'relative' in the Zip.
arcname=str(txt_file_path).replace(datastore_o.datastore_path, ''), arcname=os.path.join(f.parts[-2], f.parts[-1]),
compress_type=zipfile.ZIP_DEFLATED, compress_type=zipfile.ZIP_DEFLATED,
compresslevel=8) compresslevel=8)
@@ -1312,8 +1307,8 @@ def changedetection_app(config=None, datastore_o=None):
threading.Thread(target=notification_runner).start() threading.Thread(target=notification_runner).start()
# Check for new release version, but not when running in test/build # Check for new release version, but not when running in test/build or pytest
if not os.getenv("GITHUB_REF", False): if not os.getenv("GITHUB_REF", False) and not config.get('disable_checkver') == True:
threading.Thread(target=check_for_new_version).start() threading.Thread(target=check_for_new_version).start()
return app return app

View File

@@ -141,9 +141,13 @@ class SystemInfo(Resource):
# this is not super accurate (maybe they just edited it) but better than nothing # this is not super accurate (maybe they just edited it) but better than nothing
t = watch.threshold_seconds() t = watch.threshold_seconds()
if not t: if not t:
# Use the system wide default
t = self.datastore.threshold_seconds t = self.datastore.threshold_seconds
time_since_check = time.time() - watch.get('last_checked') time_since_check = time.time() - watch.get('last_checked')
if time_since_check > t:
# Allow 5 minutes of grace time before we decide it's overdue
if time_since_check - (5 * 60) > t:
overdue_watches.append(uuid) overdue_watches.append(uuid)
return { return {

View File

@@ -2,19 +2,20 @@
# Launch as a eventlet.wsgi server instance. # Launch as a eventlet.wsgi server instance.
from distutils.util import strtobool
import eventlet
import eventlet.wsgi
import getopt import getopt
import os import os
import signal import signal
import sys import sys
import eventlet
import eventlet.wsgi
from . import store, changedetection_app, content_fetcher from . import store, changedetection_app, content_fetcher
from . import __version__ from . import __version__
# Only global so we can access it in the signal handler # Only global so we can access it in the signal handler
datastore = None
app = None app = None
datastore = None
def sigterm_handler(_signo, _stack_frame): def sigterm_handler(_signo, _stack_frame):
global app global app
@@ -102,12 +103,13 @@ def main():
has_password=datastore.data['settings']['application']['password'] != False has_password=datastore.data['settings']['application']['password'] != False
) )
# Monitored websites will not receive a Referer header # Monitored websites will not receive a Referer header when a user clicks on an outgoing link.
# when a user clicks on an outgoing link. # @Note: Incompatible with password login (and maybe other features) for now, submit a PR!
@app.after_request @app.after_request
def hide_referrer(response): def hide_referrer(response):
if os.getenv("HIDE_REFERER", False): if strtobool(os.getenv("HIDE_REFERER", False)):
response.headers["Referrer-Policy"] = "no-referrer" response.headers["Referrer-Policy"] = "no-referrer"
return response return response
# Proxy sub-directory support # Proxy sub-directory support

View File

@@ -164,16 +164,16 @@ class Fetcher():
} }
// inject the current one set in the css_filter, which may be a CSS rule // inject the current one set in the include_filters, which may be a CSS rule
// used for displaying the current one in VisualSelector, where its not one we generated. // used for displaying the current one in VisualSelector, where its not one we generated.
if (css_filter.length) { if (include_filters.length) {
q=false; q=false;
try { try {
// is it xpath? // is it xpath?
if (css_filter.startsWith('/') || css_filter.startsWith('xpath:')) { if (include_filters.startsWith('/') || include_filters.startsWith('xpath:')) {
q=document.evaluate(css_filter.replace('xpath:',''), document, null, XPathResult.FIRST_ORDERED_NODE_TYPE, null).singleNodeValue; q=document.evaluate(include_filters.replace('xpath:',''), document, null, XPathResult.FIRST_ORDERED_NODE_TYPE, null).singleNodeValue;
} else { } else {
q=document.querySelector(css_filter); q=document.querySelector(include_filters);
} }
} catch (e) { } catch (e) {
// Maybe catch DOMException and alert? // Maybe catch DOMException and alert?
@@ -186,7 +186,7 @@ class Fetcher():
if (bbox && bbox['width'] >0 && bbox['height']>0) { if (bbox && bbox['width'] >0 && bbox['height']>0) {
size_pos.push({ size_pos.push({
xpath: css_filter, xpath: include_filters,
width: bbox['width'], width: bbox['width'],
height: bbox['height'], height: bbox['height'],
left: bbox['left'], left: bbox['left'],
@@ -220,7 +220,7 @@ class Fetcher():
request_body, request_body,
request_method, request_method,
ignore_status_codes=False, ignore_status_codes=False,
current_css_filter=None): current_include_filters=None):
# Should set self.error, self.status_code and self.content # Should set self.error, self.status_code and self.content
pass pass
@@ -310,7 +310,7 @@ class base_html_playwright(Fetcher):
request_body, request_body,
request_method, request_method,
ignore_status_codes=False, ignore_status_codes=False,
current_css_filter=None): current_include_filters=None):
from playwright.sync_api import sync_playwright from playwright.sync_api import sync_playwright
import playwright._impl._api_types import playwright._impl._api_types
@@ -413,10 +413,10 @@ class base_html_playwright(Fetcher):
self.status_code = response.status self.status_code = response.status
self.headers = response.all_headers() self.headers = response.all_headers()
if current_css_filter is not None: if current_include_filters is not None:
page.evaluate("var css_filter={}".format(json.dumps(current_css_filter))) page.evaluate("var include_filters={}".format(json.dumps(current_include_filters)))
else: else:
page.evaluate("var css_filter=''") page.evaluate("var include_filters=''")
self.xpath_data = page.evaluate("async () => {" + self.xpath_element_js + "}") self.xpath_data = page.evaluate("async () => {" + self.xpath_element_js + "}")
@@ -497,7 +497,7 @@ class base_html_webdriver(Fetcher):
request_body, request_body,
request_method, request_method,
ignore_status_codes=False, ignore_status_codes=False,
current_css_filter=None): current_include_filters=None):
from selenium import webdriver from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
@@ -573,7 +573,7 @@ class html_requests(Fetcher):
request_body, request_body,
request_method, request_method,
ignore_status_codes=False, ignore_status_codes=False,
current_css_filter=None): current_include_filters=None):
# Make requests use a more modern looking user-agent # Make requests use a more modern looking user-agent
if not 'User-Agent' in request_headers: if not 'User-Agent' in request_headers:

View File

@@ -10,6 +10,11 @@ from changedetectionio import content_fetcher, html_tools
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
class FilterNotFoundInResponse(ValueError):
def __init__(self, msg):
ValueError.__init__(self, msg)
# Some common stuff here that can be moved to a base class # Some common stuff here that can be moved to a base class
# (set_proxy_from_list) # (set_proxy_from_list)
class perform_site_check(): class perform_site_check():
@@ -33,18 +38,20 @@ class perform_site_check():
return regex return regex
def run(self, uuid): def run(self, uuid):
from copy import deepcopy
changed_detected = False changed_detected = False
screenshot = False # as bytes screenshot = False # as bytes
stripped_text_from_html = "" stripped_text_from_html = ""
watch = self.datastore.data['watching'].get(uuid) # DeepCopy so we can be sure we don't accidently change anything by reference
watch = deepcopy(self.datastore.data['watching'].get(uuid))
if not watch: if not watch:
return return
# Protect against file:// access # Protect against file:// access
if re.search(r'^file', watch['url'], re.IGNORECASE) and not os.getenv('ALLOW_FILE_URI', False): if re.search(r'^file', watch.get('url', ''), re.IGNORECASE) and not os.getenv('ALLOW_FILE_URI', False):
raise Exception( raise Exception(
"file:// type access is denied for security reasons." "file:// type access is denied for security reasons."
) )
@@ -52,10 +59,10 @@ class perform_site_check():
# Unset any existing notification error # Unset any existing notification error
update_obj = {'last_notification_error': False, 'last_error': False} update_obj = {'last_notification_error': False, 'last_error': False}
extra_headers =self.datastore.data['watching'][uuid].get('headers') extra_headers = watch.get('headers', [])
# Tweak the base config with the per-watch ones # Tweak the base config with the per-watch ones
request_headers = self.datastore.data['settings']['headers'].copy() request_headers = deepcopy(self.datastore.data['settings']['headers'])
request_headers.update(extra_headers) request_headers.update(extra_headers)
# https://github.com/psf/requests/issues/4525 # https://github.com/psf/requests/issues/4525
@@ -65,7 +72,9 @@ class perform_site_check():
request_headers['Accept-Encoding'] = request_headers['Accept-Encoding'].replace(', br', '') request_headers['Accept-Encoding'] = request_headers['Accept-Encoding'].replace(', br', '')
timeout = self.datastore.data['settings']['requests'].get('timeout') timeout = self.datastore.data['settings']['requests'].get('timeout')
url = watch.get('url')
url = watch.link
request_body = self.datastore.data['watching'][uuid].get('body') request_body = self.datastore.data['watching'][uuid].get('body')
request_method = self.datastore.data['watching'][uuid].get('method') request_method = self.datastore.data['watching'][uuid].get('method')
ignore_status_codes = self.datastore.data['watching'][uuid].get('ignore_status_codes', False) ignore_status_codes = self.datastore.data['watching'][uuid].get('ignore_status_codes', False)
@@ -77,7 +86,7 @@ class perform_site_check():
is_source = True is_source = True
# Pluggable content fetcher # Pluggable content fetcher
prefer_backend = watch['fetch_backend'] prefer_backend = watch.get('fetch_backend')
if hasattr(content_fetcher, prefer_backend): if hasattr(content_fetcher, prefer_backend):
klass = getattr(content_fetcher, prefer_backend) klass = getattr(content_fetcher, prefer_backend)
else: else:
@@ -88,21 +97,21 @@ class perform_site_check():
proxy_url = None proxy_url = None
if proxy_id: if proxy_id:
proxy_url = self.datastore.proxy_list.get(proxy_id).get('url') proxy_url = self.datastore.proxy_list.get(proxy_id).get('url')
print ("UUID {} Using proxy {}".format(uuid, proxy_url)) print("UUID {} Using proxy {}".format(uuid, proxy_url))
fetcher = klass(proxy_override=proxy_url) fetcher = klass(proxy_override=proxy_url)
# Configurable per-watch or global extra delay before extracting text (for webDriver types) # Configurable per-watch or global extra delay before extracting text (for webDriver types)
system_webdriver_delay = self.datastore.data['settings']['application'].get('webdriver_delay', None) system_webdriver_delay = self.datastore.data['settings']['application'].get('webdriver_delay', None)
if watch['webdriver_delay'] is not None: if watch['webdriver_delay'] is not None:
fetcher.render_extract_delay = watch['webdriver_delay'] fetcher.render_extract_delay = watch.get('webdriver_delay')
elif system_webdriver_delay is not None: elif system_webdriver_delay is not None:
fetcher.render_extract_delay = system_webdriver_delay fetcher.render_extract_delay = system_webdriver_delay
if watch['webdriver_js_execute_code'] is not None and watch['webdriver_js_execute_code'].strip(): if watch.get('webdriver_js_execute_code') is not None and watch.get('webdriver_js_execute_code').strip():
fetcher.webdriver_js_execute_code = watch['webdriver_js_execute_code'] fetcher.webdriver_js_execute_code = watch.get('webdriver_js_execute_code')
fetcher.run(url, timeout, request_headers, request_body, request_method, ignore_status_codes, watch['css_filter']) fetcher.run(url, timeout, request_headers, request_body, request_method, ignore_status_codes, watch.get('include_filters'))
fetcher.quit() fetcher.quit()
self.screenshot = fetcher.screenshot self.screenshot = fetcher.screenshot
@@ -126,28 +135,30 @@ class perform_site_check():
is_html = False is_html = False
is_json = False is_json = False
css_filter_rule = watch['css_filter'] include_filters_rule = watch.get('include_filters', [])
# include_filters_rule = watch['include_filters']
subtractive_selectors = watch.get( subtractive_selectors = watch.get(
"subtractive_selectors", [] "subtractive_selectors", []
) + self.datastore.data["settings"]["application"].get( ) + self.datastore.data["settings"]["application"].get(
"global_subtractive_selectors", [] "global_subtractive_selectors", []
) )
has_filter_rule = css_filter_rule and len(css_filter_rule.strip()) has_filter_rule = include_filters_rule and len("".join(include_filters_rule).strip())
has_subtractive_selectors = subtractive_selectors and len(subtractive_selectors[0].strip()) has_subtractive_selectors = subtractive_selectors and len(subtractive_selectors[0].strip())
if is_json and not has_filter_rule: if is_json and not has_filter_rule:
css_filter_rule = "json:$" include_filters_rule.append("json:$")
has_filter_rule = True has_filter_rule = True
if has_filter_rule: if has_filter_rule:
json_filter_prefixes = ['json:', 'jq:'] json_filter_prefixes = ['json:', 'jq:']
if any(prefix in css_filter_rule for prefix in json_filter_prefixes): for filter in include_filters_rule:
stripped_text_from_html = html_tools.extract_json_as_string(content=fetcher.content, json_filter=css_filter_rule) if any(prefix in filter for prefix in json_filter_prefixes):
is_html = False stripped_text_from_html += html_tools.extract_json_as_string(content=fetcher.content, json_filter=filter)
is_html = False
if is_html or is_source: if is_html or is_source:
# CSS Filter, extract the HTML that matches and feed that into the existing inscriptis::get_text # CSS Filter, extract the HTML that matches and feed that into the existing inscriptis::get_text
fetcher.content = html_tools.workarounds_for_obfuscations(fetcher.content) fetcher.content = html_tools.workarounds_for_obfuscations(fetcher.content)
html_content = fetcher.content html_content = fetcher.content
@@ -159,33 +170,36 @@ class perform_site_check():
else: else:
# Then we assume HTML # Then we assume HTML
if has_filter_rule: if has_filter_rule:
# For HTML/XML we offer xpath as an option, just start a regular xPath "/.." html_content = ""
if css_filter_rule[0] == '/' or css_filter_rule.startswith('xpath:'): for filter_rule in include_filters_rule:
html_content = html_tools.xpath_filter(xpath_filter=css_filter_rule.replace('xpath:', ''), # For HTML/XML we offer xpath as an option, just start a regular xPath "/.."
html_content=fetcher.content) if filter_rule[0] == '/' or filter_rule.startswith('xpath:'):
else: html_content += html_tools.xpath_filter(xpath_filter=filter_rule.replace('xpath:', ''),
# CSS Filter, extract the HTML that matches and feed that into the existing inscriptis::get_text html_content=fetcher.content,
html_content = html_tools.css_filter(css_filter=css_filter_rule, html_content=fetcher.content) append_pretty_line_formatting=not is_source)
else:
# CSS Filter, extract the HTML that matches and feed that into the existing inscriptis::get_text
html_content += html_tools.include_filters(include_filters=filter_rule,
html_content=fetcher.content,
append_pretty_line_formatting=not is_source)
if not html_content.strip():
raise FilterNotFoundInResponse(include_filters_rule)
if has_subtractive_selectors: if has_subtractive_selectors:
html_content = html_tools.element_removal(subtractive_selectors, html_content) html_content = html_tools.element_removal(subtractive_selectors, html_content)
if not is_source: if is_source:
stripped_text_from_html = html_content
else:
# extract text # extract text
do_anchor = self.datastore.data["settings"]["application"].get("render_anchor_tag_content", False)
stripped_text_from_html = \ stripped_text_from_html = \
html_tools.html_to_text( html_tools.html_to_text(
html_content, html_content,
render_anchor_tag_content=self.datastore.data["settings"][ render_anchor_tag_content=do_anchor
"application"].get(
"render_anchor_tag_content", False)
) )
elif is_source:
stripped_text_from_html = html_content
# Re #340 - return the content before the 'ignore text' was applied
text_content_before_ignored_filter = stripped_text_from_html.encode('utf-8')
# Re #340 - return the content before the 'ignore text' was applied # Re #340 - return the content before the 'ignore text' was applied
text_content_before_ignored_filter = stripped_text_from_html.encode('utf-8') text_content_before_ignored_filter = stripped_text_from_html.encode('utf-8')
@@ -218,7 +232,7 @@ class perform_site_check():
for l in result: for l in result:
if type(l) is tuple: if type(l) is tuple:
#@todo - some formatter option default (between groups) # @todo - some formatter option default (between groups)
regex_matched_output += list(l) + [b'\n'] regex_matched_output += list(l) + [b'\n']
else: else:
# @todo - some formatter option default (between each ungrouped result) # @todo - some formatter option default (between each ungrouped result)
@@ -232,7 +246,6 @@ class perform_site_check():
stripped_text_from_html = b''.join(regex_matched_output) stripped_text_from_html = b''.join(regex_matched_output)
text_content_before_ignored_filter = stripped_text_from_html text_content_before_ignored_filter = stripped_text_from_html
# Re #133 - if we should strip whitespaces from triggering the change detected comparison # Re #133 - if we should strip whitespaces from triggering the change detected comparison
if self.datastore.data['settings']['application'].get('ignore_whitespace', False): if self.datastore.data['settings']['application'].get('ignore_whitespace', False):
fetched_md5 = hashlib.md5(stripped_text_from_html.translate(None, b'\r\n\t ')).hexdigest() fetched_md5 = hashlib.md5(stripped_text_from_html.translate(None, b'\r\n\t ')).hexdigest()
@@ -242,29 +255,30 @@ class perform_site_check():
############ Blocking rules, after checksum ################# ############ Blocking rules, after checksum #################
blocked = False blocked = False
if len(watch['trigger_text']): trigger_text = watch.get('trigger_text', [])
if len(trigger_text):
# Assume blocked # Assume blocked
blocked = True blocked = True
# Filter and trigger works the same, so reuse it # Filter and trigger works the same, so reuse it
# It should return the line numbers that match # It should return the line numbers that match
result = html_tools.strip_ignore_text(content=str(stripped_text_from_html), result = html_tools.strip_ignore_text(content=str(stripped_text_from_html),
wordlist=watch['trigger_text'], wordlist=trigger_text,
mode="line numbers") mode="line numbers")
# Unblock if the trigger was found # Unblock if the trigger was found
if result: if result:
blocked = False blocked = False
text_should_not_be_present = watch.get('text_should_not_be_present', [])
if len(watch['text_should_not_be_present']): if len(text_should_not_be_present):
# If anything matched, then we should block a change from happening # If anything matched, then we should block a change from happening
result = html_tools.strip_ignore_text(content=str(stripped_text_from_html), result = html_tools.strip_ignore_text(content=str(stripped_text_from_html),
wordlist=watch['text_should_not_be_present'], wordlist=text_should_not_be_present,
mode="line numbers") mode="line numbers")
if result: if result:
blocked = True blocked = True
# The main thing that all this at the moment comes down to :) # The main thing that all this at the moment comes down to :)
if watch['previous_md5'] != fetched_md5: if watch.get('previous_md5') != fetched_md5:
changed_detected = True changed_detected = True
# Looks like something changed, but did it match all the rules? # Looks like something changed, but did it match all the rules?
@@ -273,7 +287,7 @@ class perform_site_check():
# Extract title as title # Extract title as title
if is_html: if is_html:
if self.datastore.data['settings']['application']['extract_title_as_title'] or watch['extract_title_as_title']: if self.datastore.data['settings']['application'].get('extract_title_as_title') or watch['extract_title_as_title']:
if not watch['title'] or not len(watch['title']): if not watch['title'] or not len(watch['title']):
update_obj['title'] = html_tools.extract_element(find='title', html_content=fetcher.content) update_obj['title'] = html_tools.extract_element(find='title', html_content=fetcher.content)

View File

@@ -349,7 +349,7 @@ class watchForm(commonSettingsForm):
time_between_check = FormField(TimeBetweenCheckForm) time_between_check = FormField(TimeBetweenCheckForm)
css_filter = StringField('CSS/JSON/XPATH Filter', [ValidateCSSJSONXPATHInput()], default='') include_filters = StringListField('CSS/JSONPath/JQ/XPath Filters', [ValidateCSSJSONXPATHInput()], default='')
subtractive_selectors = StringListField('Remove elements', [ValidateCSSJSONXPATHInput(allow_xpath=False, allow_json=False)]) subtractive_selectors = StringListField('Remove elements', [ValidateCSSJSONXPATHInput(allow_xpath=False, allow_json=False)])

View File

@@ -7,26 +7,30 @@ from typing import List
import json import json
import re import re
class FilterNotFoundInResponse(ValueError): # HTML added to be sure each result matching a filter (.example) gets converted to a new line by Inscriptis
def __init__(self, msg): TEXT_FILTER_LIST_LINE_SUFFIX = "<br/>"
ValueError.__init__(self, msg)
class JSONNotFound(ValueError): class JSONNotFound(ValueError):
def __init__(self, msg): def __init__(self, msg):
ValueError.__init__(self, msg) ValueError.__init__(self, msg)
# Given a CSS Rule, and a blob of HTML, return the blob of HTML that matches # Given a CSS Rule, and a blob of HTML, return the blob of HTML that matches
def css_filter(css_filter, html_content): def include_filters(include_filters, html_content, append_pretty_line_formatting=False):
soup = BeautifulSoup(html_content, "html.parser") soup = BeautifulSoup(html_content, "html.parser")
html_block = "" html_block = ""
r = soup.select(css_filter, separator="") r = soup.select(include_filters, separator="")
if len(html_content) > 0 and len(r) == 0:
raise FilterNotFoundInResponse(css_filter)
for item in r:
html_block += str(item)
return html_block + "\n" for element in r:
# When there's more than 1 match, then add the suffix to separate each line
# And where the matched result doesn't include something that will cause Inscriptis to add a newline
# (This way each 'match' reliably has a new-line in the diff)
# Divs are converted to 4 whitespaces by inscriptis
if append_pretty_line_formatting and len(html_block) and not element.name in (['br', 'hr', 'div', 'p']):
html_block += TEXT_FILTER_LIST_LINE_SUFFIX
html_block += str(element)
return html_block
def subtractive_css_selector(css_selector, html_content): def subtractive_css_selector(css_selector, html_content):
soup = BeautifulSoup(html_content, "html.parser") soup = BeautifulSoup(html_content, "html.parser")
@@ -42,25 +46,29 @@ def element_removal(selectors: List[str], html_content):
# Return str Utf-8 of matched rules # Return str Utf-8 of matched rules
def xpath_filter(xpath_filter, html_content): def xpath_filter(xpath_filter, html_content, append_pretty_line_formatting=False):
from lxml import etree, html from lxml import etree, html
tree = html.fromstring(bytes(html_content, encoding='utf-8')) tree = html.fromstring(bytes(html_content, encoding='utf-8'))
html_block = "" html_block = ""
r = tree.xpath(xpath_filter.strip(), namespaces={'re': 'http://exslt.org/regular-expressions'}) r = tree.xpath(xpath_filter.strip(), namespaces={'re': 'http://exslt.org/regular-expressions'})
if len(html_content) > 0 and len(r) == 0:
raise FilterNotFoundInResponse(xpath_filter)
#@note: //title/text() wont work where <title>CDATA.. #@note: //title/text() wont work where <title>CDATA..
for element in r: for element in r:
# When there's more than 1 match, then add the suffix to separate each line
# And where the matched result doesn't include something that will cause Inscriptis to add a newline
# (This way each 'match' reliably has a new-line in the diff)
# Divs are converted to 4 whitespaces by inscriptis
if append_pretty_line_formatting and len(html_block) and (not hasattr( element, 'tag' ) or not element.tag in (['br', 'hr', 'div', 'p'])):
html_block += TEXT_FILTER_LIST_LINE_SUFFIX
if type(element) == etree._ElementStringResult: if type(element) == etree._ElementStringResult:
html_block += str(element) + "<br/>" html_block += str(element)
elif type(element) == etree._ElementUnicodeResult: elif type(element) == etree._ElementUnicodeResult:
html_block += str(element) + "<br/>" html_block += str(element)
else: else:
html_block += etree.tostring(element, pretty_print=True).decode('utf-8') + "<br/>" html_block += etree.tostring(element, pretty_print=True).decode('utf-8')
return html_block return html_block

View File

@@ -103,12 +103,12 @@ class import_distill_io_json(Importer):
pass pass
except IndexError: except IndexError:
pass pass
extras['include_filters'] = []
try: try:
extras['css_filter'] = d_config['selections'][0]['frames'][0]['includes'][0]['expr']
if d_config['selections'][0]['frames'][0]['includes'][0]['type'] == 'xpath': if d_config['selections'][0]['frames'][0]['includes'][0]['type'] == 'xpath':
extras['css_filter'] = 'xpath:' + extras['css_filter'] extras['include_filters'].append('xpath:' + d_config['selections'][0]['frames'][0]['includes'][0]['expr'])
else:
extras['include_filters'].append(d_config['selections'][0]['frames'][0]['includes'][0]['expr'])
except KeyError: except KeyError:
pass pass
except IndexError: except IndexError:

View File

@@ -1,6 +1,8 @@
import os
import uuid as uuid_builder
from distutils.util import strtobool from distutils.util import strtobool
import logging
import os
import time
import uuid
minimum_seconds_recheck_time = int(os.getenv('MINIMUM_SECONDS_RECHECK_TIME', 60)) minimum_seconds_recheck_time = int(os.getenv('MINIMUM_SECONDS_RECHECK_TIME', 60))
mtable = {'seconds': 1, 'minutes': 60, 'hours': 3600, 'days': 86400, 'weeks': 86400 * 7} mtable = {'seconds': 1, 'minutes': 60, 'hours': 3600, 'days': 86400, 'weeks': 86400 * 7}
@@ -14,42 +16,43 @@ class model(dict):
__newest_history_key = None __newest_history_key = None
__history_n=0 __history_n=0
__base_config = { __base_config = {
'url': None, #'history': {}, # Dict of timestamp and output stripped filename (removed)
'tag': None, #'newest_history_key': 0, (removed, taken from history.txt index)
'last_checked': 0,
'paused': False,
'last_viewed': 0, # history key value of the last viewed via the [diff] link
#'newest_history_key': 0,
'title': None,
'previous_md5': False,
'uuid': str(uuid_builder.uuid4()),
'headers': {}, # Extra headers to send
'body': None, 'body': None,
'method': 'GET', 'check_unique_lines': False, # On change-detected, compare against all history if its something new
#'history': {}, # Dict of timestamp and output stripped filename 'check_count': 0,
'consecutive_filter_failures': 0, # Every time the CSS/xPath filter cannot be located, reset when all is fine.
'extract_text': [], # Extract text by regex after filters
'extract_title_as_title': False,
'fetch_backend': None,
'filter_failure_notification_send': strtobool(os.getenv('FILTER_FAILURE_NOTIFICATION_SEND_DEFAULT', 'True')),
'headers': {}, # Extra headers to send
'ignore_text': [], # List of text to ignore when calculating the comparison checksum 'ignore_text': [], # List of text to ignore when calculating the comparison checksum
# Custom notification content 'include_filters': [],
'notification_urls': [], # List of URLs to add to the notification Queue (Usually AppRise) 'last_checked': 0,
'notification_title': None, 'last_error': False,
'last_viewed': 0, # history key value of the last viewed via the [diff] link
'method': 'GET',
# Custom notification content
'notification_body': None, 'notification_body': None,
'notification_format': default_notification_format_for_watch, 'notification_format': default_notification_format_for_watch,
'notification_muted': False, 'notification_muted': False,
'css_filter': '', 'notification_title': None,
'last_error': False, 'notification_urls': [], # List of URLs to add to the notification Queue (Usually AppRise)
'extract_text': [], # Extract text by regex after filters 'paused': False,
'subtractive_selectors': [], 'previous_md5': False,
'trigger_text': [], # List of text or regex to wait for until a change is detected
'text_should_not_be_present': [], # Text that should not present
'fetch_backend': None,
'filter_failure_notification_send': strtobool(os.getenv('FILTER_FAILURE_NOTIFICATION_SEND_DEFAULT', 'True')),
'consecutive_filter_failures': 0, # Every time the CSS/xPath filter cannot be located, reset when all is fine.
'extract_title_as_title': False,
'check_unique_lines': False, # On change-detected, compare against all history if its something new
'proxy': None, # Preferred proxy connection 'proxy': None, # Preferred proxy connection
'subtractive_selectors': [],
'tag': None,
'text_should_not_be_present': [], # Text that should not present
# Re #110, so then if this is set to None, we know to use the default value instead # Re #110, so then if this is set to None, we know to use the default value instead
# Requires setting to None on submit if it's the same as the default # Requires setting to None on submit if it's the same as the default
# Should be all None by default, so we use the system default in this case. # Should be all None by default, so we use the system default in this case.
'time_between_check': {'weeks': None, 'days': None, 'hours': None, 'minutes': None, 'seconds': None}, 'time_between_check': {'weeks': None, 'days': None, 'hours': None, 'minutes': None, 'seconds': None},
'title': None,
'trigger_text': [], # List of text or regex to wait for until a change is detected
'url': None,
'uuid': str(uuid.uuid4()),
'webdriver_delay': None, 'webdriver_delay': None,
'webdriver_js_execute_code': None, # Run before change-detection 'webdriver_js_execute_code': None, # Run before change-detection
} }
@@ -60,7 +63,7 @@ class model(dict):
self.update(self.__base_config) self.update(self.__base_config)
self.__datastore_path = kw['datastore_path'] self.__datastore_path = kw['datastore_path']
self['uuid'] = str(uuid_builder.uuid4()) self['uuid'] = str(uuid.uuid4())
del kw['datastore_path'] del kw['datastore_path']
@@ -82,10 +85,19 @@ class model(dict):
return False return False
def ensure_data_dir_exists(self): def ensure_data_dir_exists(self):
target_path = os.path.join(self.__datastore_path, self['uuid']) if not os.path.isdir(self.watch_data_dir):
if not os.path.isdir(target_path): print ("> Creating data dir {}".format(self.watch_data_dir))
print ("> Creating data dir {}".format(target_path)) os.mkdir(self.watch_data_dir)
os.mkdir(target_path)
@property
def link(self):
url = self.get('url', '')
if '{%' in url or '{{' in url:
from jinja2 import Environment
# Jinja2 available in URLs along with https://pypi.org/project/jinja2-time/
jinja2_env = Environment(extensions=['jinja2_time.TimeExtension'])
return str(jinja2_env.from_string(url).render())
return url
@property @property
def label(self): def label(self):
@@ -109,18 +121,39 @@ class model(dict):
@property @property
def history(self): def history(self):
"""History index is just a text file as a list
{watch-uuid}/history.txt
contains a list like
{epoch-time},{filename}\n
We read in this list as the history information
"""
tmp_history = {} tmp_history = {}
import logging
import time
# Read the history file as a dict # Read the history file as a dict
fname = os.path.join(self.__datastore_path, self.get('uuid'), "history.txt") fname = os.path.join(self.watch_data_dir, "history.txt")
if os.path.isfile(fname): if os.path.isfile(fname):
logging.debug("Reading history index " + str(time.time())) logging.debug("Reading history index " + str(time.time()))
with open(fname, "r") as f: with open(fname, "r") as f:
for i in f.readlines(): for i in f.readlines():
if ',' in i: if ',' in i:
k, v = i.strip().split(',', 2) k, v = i.strip().split(',', 2)
# The index history could contain a relative path, so we need to make the fullpath
# so that python can read it
if not '/' in v and not '\'' in v:
v = os.path.join(self.watch_data_dir, v)
else:
# It's possible that they moved the datadir on older versions
# So the snapshot exists but is in a different path
snapshot_fname = v.split('/')[-1]
proposed_new_path = os.path.join(self.watch_data_dir, snapshot_fname)
if not os.path.exists(v) and os.path.exists(proposed_new_path):
v = proposed_new_path
tmp_history[k] = v tmp_history[k] = v
if len(tmp_history): if len(tmp_history):
@@ -132,7 +165,7 @@ class model(dict):
@property @property
def has_history(self): def has_history(self):
fname = os.path.join(self.__datastore_path, self.get('uuid'), "history.txt") fname = os.path.join(self.watch_data_dir, "history.txt")
return os.path.isfile(fname) return os.path.isfile(fname)
# Returns the newest key, but if theres only 1 record, then it's counted as not being new, so return 0. # Returns the newest key, but if theres only 1 record, then it's counted as not being new, so return 0.
@@ -151,25 +184,25 @@ class model(dict):
# Save some text file to the appropriate path and bump the history # Save some text file to the appropriate path and bump the history
# result_obj from fetch_site_status.run() # result_obj from fetch_site_status.run()
def save_history_text(self, contents, timestamp): def save_history_text(self, contents, timestamp):
import uuid
import logging
output_path = os.path.join(self.__datastore_path, self['uuid'])
self.ensure_data_dir_exists() self.ensure_data_dir_exists()
snapshot_fname = os.path.join(output_path, str(uuid.uuid4()))
logging.debug("Saving history text {}".format(snapshot_fname)) # Small hack so that we sleep just enough to allow 1 second between history snapshots
# this is because history.txt indexes/keys snapshots by epoch seconds and we dont want dupe keys
if self.__newest_history_key and int(timestamp) == int(self.__newest_history_key):
time.sleep(timestamp - self.__newest_history_key)
snapshot_fname = "{}.txt".format(str(uuid.uuid4()))
# in /diff/ and /preview/ we are going to assume for now that it's UTF-8 when reading # in /diff/ and /preview/ we are going to assume for now that it's UTF-8 when reading
# most sites are utf-8 and some are even broken utf-8 # most sites are utf-8 and some are even broken utf-8
with open(snapshot_fname, 'wb') as f: with open(os.path.join(self.watch_data_dir, snapshot_fname), 'wb') as f:
f.write(contents) f.write(contents)
f.close() f.close()
# Append to index # Append to index
# @todo check last char was \n # @todo check last char was \n
index_fname = os.path.join(output_path, "history.txt") index_fname = os.path.join(self.watch_data_dir, "history.txt")
with open(index_fname, 'a') as f: with open(index_fname, 'a') as f:
f.write("{},{}\n".format(timestamp, snapshot_fname)) f.write("{},{}\n".format(timestamp, snapshot_fname))
f.close() f.close()
@@ -210,14 +243,14 @@ class model(dict):
return not local_lines.issubset(existing_history) return not local_lines.issubset(existing_history)
def get_screenshot(self): def get_screenshot(self):
fname = os.path.join(self.__datastore_path, self['uuid'], "last-screenshot.png") fname = os.path.join(self.watch_data_dir, "last-screenshot.png")
if os.path.isfile(fname): if os.path.isfile(fname):
return fname return fname
return False return False
def __get_file_ctime(self, filename): def __get_file_ctime(self, filename):
fname = os.path.join(self.__datastore_path, self['uuid'], filename) fname = os.path.join(self.watch_data_dir, filename)
if os.path.isfile(fname): if os.path.isfile(fname):
return int(os.path.getmtime(fname)) return int(os.path.getmtime(fname))
return False return False
@@ -242,9 +275,14 @@ class model(dict):
def snapshot_error_screenshot_ctime(self): def snapshot_error_screenshot_ctime(self):
return self.__get_file_ctime('last-error-screenshot.png') return self.__get_file_ctime('last-error-screenshot.png')
@property
def watch_data_dir(self):
# The base dir of the watch data
return os.path.join(self.__datastore_path, self['uuid'])
def get_error_text(self): def get_error_text(self):
"""Return the text saved from a previous request that resulted in a non-200 error""" """Return the text saved from a previous request that resulted in a non-200 error"""
fname = os.path.join(self.__datastore_path, self['uuid'], "last-error.txt") fname = os.path.join(self.watch_data_dir, "last-error.txt")
if os.path.isfile(fname): if os.path.isfile(fname):
with open(fname, 'r') as f: with open(fname, 'r') as f:
return f.read() return f.read()
@@ -252,7 +290,7 @@ class model(dict):
def get_error_snapshot(self): def get_error_snapshot(self):
"""Return path to the screenshot that resulted in a non-200 error""" """Return path to the screenshot that resulted in a non-200 error"""
fname = os.path.join(self.__datastore_path, self['uuid'], "last-error-screenshot.png") fname = os.path.join(self.watch_data_dir, "last-error-screenshot.png")
if os.path.isfile(fname): if os.path.isfile(fname):
return fname return fname
return False return False

View File

@@ -25,11 +25,9 @@ export BASE_URL="https://really-unique-domain.io"
pytest tests/test_notification.py pytest tests/test_notification.py
## JQ + JSON: filter test # Re-run with HIDE_REFERER set - could affect login
# jq is not available on windows and we should just test it when the package is installed export HIDE_REFERER=True
# this will re-test with jq support pytest tests/test_access_control.py
pip3 install jq~=1.3
pytest tests/test_jsonpath_jq_selector.py
# Now for the selenium and playwright/browserless fetchers # Now for the selenium and playwright/browserless fetchers

View File

@@ -50,7 +50,7 @@ $(document).ready(function() {
state_clicked=false; state_clicked=false;
ctx.clearRect(0, 0, c.width, c.height); ctx.clearRect(0, 0, c.width, c.height);
xctx.clearRect(0, 0, c.width, c.height); xctx.clearRect(0, 0, c.width, c.height);
$("#css_filter").val(''); $("#include_filters").val('');
}); });
@@ -68,7 +68,7 @@ $(document).ready(function() {
xctx = c.getContext("2d"); xctx = c.getContext("2d");
// redline highlight context // redline highlight context
ctx = c.getContext("2d"); ctx = c.getContext("2d");
current_default_xpath =$("#css_filter").val(); current_default_xpath =$("#include_filters").val();
fetch_data(); fetch_data();
$('#selector-canvas').off("mousemove mousedown"); $('#selector-canvas').off("mousemove mousedown");
// screenshot_url defined in the edit.html template // screenshot_url defined in the edit.html template
@@ -205,9 +205,9 @@ $(document).ready(function() {
var sel = selector_data['size_pos'][current_selected_i]; var sel = selector_data['size_pos'][current_selected_i];
if (sel[0] == '/') { if (sel[0] == '/') {
// @todo - not sure just checking / is right // @todo - not sure just checking / is right
$("#css_filter").val('xpath:'+sel.xpath); $("#include_filters").val('xpath:'+sel.xpath);
} else { } else {
$("#css_filter").val(sel.xpath); $("#include_filters").val(sel.xpath);
} }
xctx.fillStyle = 'rgba(205,205,205,0.95)'; xctx.fillStyle = 'rgba(205,205,205,0.95)';
xctx.strokeStyle = 'rgba(225,0,0,0.9)'; xctx.strokeStyle = 'rgba(225,0,0,0.9)';

View File

@@ -156,7 +156,7 @@ body:after, body:before {
.fetch-error { .fetch-error {
padding-top: 1em; padding-top: 1em;
font-size: 60%; font-size: 80%;
max-width: 400px; max-width: 400px;
display: block; display: block;
} }
@@ -803,4 +803,4 @@ ul {
padding: 0.5rem; padding: 0.5rem;
border-radius: 5px; border-radius: 5px;
color: #ff3300; color: #ff3300;
} }

View File

@@ -27,6 +27,8 @@ class ChangeDetectionStore:
# For when we edit, we should write to disk # For when we edit, we should write to disk
needs_write_urgent = False needs_write_urgent = False
__version_check = True
def __init__(self, datastore_path="/datastore", include_default_watches=True, version_tag="0.0.0"): def __init__(self, datastore_path="/datastore", include_default_watches=True, version_tag="0.0.0"):
# Should only be active for docker # Should only be active for docker
# logging.basicConfig(filename='/dev/stdout', level=logging.INFO) # logging.basicConfig(filename='/dev/stdout', level=logging.INFO)
@@ -37,7 +39,6 @@ class ChangeDetectionStore:
self.proxy_list = None self.proxy_list = None
self.start_time = time.time() self.start_time = time.time()
self.stop_thread = False self.stop_thread = False
# Base definition for all watchers # Base definition for all watchers
# deepcopy part of #569 - not sure why its needed exactly # deepcopy part of #569 - not sure why its needed exactly
self.generic_definition = deepcopy(Watch.model(datastore_path = datastore_path, default={})) self.generic_definition = deepcopy(Watch.model(datastore_path = datastore_path, default={}))
@@ -81,8 +82,13 @@ class ChangeDetectionStore:
except (FileNotFoundError, json.decoder.JSONDecodeError): except (FileNotFoundError, json.decoder.JSONDecodeError):
if include_default_watches: if include_default_watches:
print("Creating JSON store at", self.datastore_path) print("Creating JSON store at", self.datastore_path)
self.add_watch(url='https://news.ycombinator.com/', tag='Tech news') self.add_watch(url='https://news.ycombinator.com/',
self.add_watch(url='https://changedetection.io/CHANGELOG.txt', tag='changedetection.io') tag='Tech news',
extras={'fetch_backend': 'html_requests'})
self.add_watch(url='https://changedetection.io/CHANGELOG.txt',
tag='changedetection.io',
extras={'fetch_backend': 'html_requests'})
self.__data['version_tag'] = version_tag self.__data['version_tag'] = version_tag
@@ -266,7 +272,7 @@ class ChangeDetectionStore:
extras = {} extras = {}
# should always be str # should always be str
if tag is None or not tag: if tag is None or not tag:
tag='' tag = ''
# Incase these are copied across, assume it's a reference and deepcopy() # Incase these are copied across, assume it's a reference and deepcopy()
apply_extras = deepcopy(extras) apply_extras = deepcopy(extras)
@@ -281,17 +287,31 @@ class ChangeDetectionStore:
res = r.json() res = r.json()
# List of permissible attributes we accept from the wild internet # List of permissible attributes we accept from the wild internet
for k in ['url', 'tag', for k in [
'paused', 'title', 'body',
'previous_md5', 'headers', 'css_filter',
'body', 'method', 'extract_text',
'ignore_text', 'css_filter', 'extract_title_as_title',
'subtractive_selectors', 'trigger_text', 'headers',
'extract_title_as_title', 'extract_text', 'ignore_text',
'text_should_not_be_present', 'include_filters',
'webdriver_js_execute_code']: 'method',
'paused',
'previous_md5',
'subtractive_selectors',
'tag',
'text_should_not_be_present',
'title',
'trigger_text',
'webdriver_js_execute_code',
'url',
]:
if res.get(k): if res.get(k):
apply_extras[k] = res[k] if k != 'css_filter':
apply_extras[k] = res[k]
else:
# We renamed the field and made it a list
apply_extras['include_filters'] = [res['css_filter']]
except Exception as e: except Exception as e:
logging.error("Error fetching metadata for shared watch link", url, str(e)) logging.error("Error fetching metadata for shared watch link", url, str(e))
@@ -314,12 +334,13 @@ class ChangeDetectionStore:
del apply_extras[k] del apply_extras[k]
new_watch.update(apply_extras) new_watch.update(apply_extras)
self.__data['watching'][new_uuid]=new_watch self.__data['watching'][new_uuid] = new_watch
self.__data['watching'][new_uuid].ensure_data_dir_exists() self.__data['watching'][new_uuid].ensure_data_dir_exists()
if write_to_disk_now: if write_to_disk_now:
self.sync_to_json() self.sync_to_json()
return new_uuid return new_uuid
def visualselector_data_is_ready(self, watch_uuid): def visualselector_data_is_ready(self, watch_uuid):
@@ -583,3 +604,14 @@ class ChangeDetectionStore:
for v in ['User-Agent', 'Accept', 'Accept-Encoding', 'Accept-Language']: for v in ['User-Agent', 'Accept', 'Accept-Encoding', 'Accept-Language']:
if self.data['settings']['headers'].get(v): if self.data['settings']['headers'].get(v):
del self.data['settings']['headers'][v] del self.data['settings']['headers'][v]
# Convert filters to a list of filters css_filter -> include_filters
def update_8(self):
for uuid, watch in self.data['watching'].items():
try:
existing_filter = watch.get('css_filter', '')
if existing_filter:
watch['include_filters'] = [existing_filter]
except:
continue
return

View File

@@ -40,7 +40,8 @@
<fieldset> <fieldset>
<div class="pure-control-group"> <div class="pure-control-group">
{{ render_field(form.url, placeholder="https://...", required=true, class="m-d") }} {{ render_field(form.url, placeholder="https://...", required=true, class="m-d") }}
<span class="pure-form-message-inline">Some sites use JavaScript to create the content, for this you should <a href="https://github.com/dgtlmoon/changedetection.io/wiki/Fetching-pages-with-WebDriver">use the Chrome/WebDriver Fetcher</a></span> <span class="pure-form-message-inline">Some sites use JavaScript to create the content, for this you should <a href="https://github.com/dgtlmoon/changedetection.io/wiki/Fetching-pages-with-WebDriver">use the Chrome/WebDriver Fetcher</a></span><br/>
<span class="pure-form-message-inline">You can use variables in the URL, perfect for inserting the current date and other logic, <a href="https://github.com/dgtlmoon/changedetection.io/wiki/Handling-variables-in-the-watched-URL">help and examples here</a></span><br/>
</div> </div>
<div class="pure-control-group"> <div class="pure-control-group">
{{ render_field(form.title, class="m-d") }} {{ render_field(form.title, class="m-d") }}
@@ -173,15 +174,17 @@ User-Agent: wonderbra 1.0") }}
</div> </div>
</fieldset> </fieldset>
<div class="pure-control-group"> <div class="pure-control-group">
{% set field = render_field(form.css_filter, {% set field = render_field(form.include_filters,
placeholder=".class-name or #some-id, or other CSS selector rule.", rows=5,
placeholder="#example
xpath://body/div/span[contains(@class, 'example-class')]",
class="m-d") class="m-d")
%} %}
{{ field }} {{ field }}
{% if '/text()' in field %} {% if '/text()' in field %}
<span class="pure-form-message-inline"><strong>Note!: //text() function does not work where the &lt;element&gt; contains &lt;![CDATA[]]&gt;</strong></span><br/> <span class="pure-form-message-inline"><strong>Note!: //text() function does not work where the &lt;element&gt; contains &lt;![CDATA[]]&gt;</strong></span><br/>
{% endif %} {% endif %}
<span class="pure-form-message-inline"> <span class="pure-form-message-inline">One rule per line, <i>any</i> rules that matches will be used.<br/>
<ul> <ul>
<li>CSS - Limit text to this CSS rule, only text matching this CSS rule is included.</li> <li>CSS - Limit text to this CSS rule, only text matching this CSS rule is included.</li>
<li>JSON - Limit text to this JSON rule, using either <a href="https://pypi.org/project/jsonpath-ng/" target="new">JSONPath</a> or <a href="https://stedolan.github.io/jq/" target="new">jq</a> (if installed). <li>JSON - Limit text to this JSON rule, using either <a href="https://pypi.org/project/jsonpath-ng/" target="new">JSONPath</a> or <a href="https://stedolan.github.io/jq/" target="new">jq</a> (if installed).

View File

@@ -87,7 +87,7 @@
<a class="state-{{'on' if watch.notification_muted}}" href="{{url_for('index', op='mute', uuid=watch.uuid, tag=active_tag)}}"><img src="{{url_for('static_content', group='images', filename='bell-off.svg')}}" alt="Mute notifications" title="Mute notifications"/></a> <a class="state-{{'on' if watch.notification_muted}}" href="{{url_for('index', op='mute', uuid=watch.uuid, tag=active_tag)}}"><img src="{{url_for('static_content', group='images', filename='bell-off.svg')}}" alt="Mute notifications" title="Mute notifications"/></a>
</td> </td>
<td class="title-col inline">{{watch.title if watch.title is not none and watch.title|length > 0 else watch.url}} <td class="title-col inline">{{watch.title if watch.title is not none and watch.title|length > 0 else watch.url}}
<a class="external" target="_blank" rel="noopener" href="{{ watch.url.replace('source:','') }}"></a> <a class="external" target="_blank" rel="noopener" href="{{ watch.link.replace('source:','') }}"></a>
<a href="{{url_for('form_share_put_watch', uuid=watch.uuid)}}"><img style="height: 1em;display:inline-block;" src="{{url_for('static_content', group='images', filename='spread.svg')}}" /></a> <a href="{{url_for('form_share_put_watch', uuid=watch.uuid)}}"><img style="height: 1em;display:inline-block;" src="{{url_for('static_content', group='images', filename='spread.svg')}}" /></a>
{%if watch.fetch_backend == "html_webdriver" %}<img style="height: 1em; display:inline-block;" src="{{url_for('static_content', group='images', filename='Google-Chrome-icon.png')}}" />{% endif %} {%if watch.fetch_backend == "html_webdriver" %}<img style="height: 1em; display:inline-block;" src="{{url_for('static_content', group='images', filename='Google-Chrome-icon.png')}}" />{% endif %}

View File

@@ -41,7 +41,7 @@ def app(request):
cleanup(datastore_path) cleanup(datastore_path)
app_config = {'datastore_path': datastore_path} app_config = {'datastore_path': datastore_path, 'disable_checkver' : True}
cleanup(app_config['datastore_path']) cleanup(app_config['datastore_path'])
datastore = store.ChangeDetectionStore(datastore_path=app_config['datastore_path'], include_default_watches=False) datastore = store.ChangeDetectionStore(datastore_path=app_config['datastore_path'], include_default_watches=False)
app = changedetection_app(app_config, datastore) app = changedetection_app(app_config, datastore)

View File

@@ -24,7 +24,7 @@ def test_preferred_proxy(client, live_server):
res = client.post( res = client.post(
url_for("edit_page", uuid="first"), url_for("edit_page", uuid="first"),
data={ data={
"css_filter": "", "include_filters": "",
"fetch_backend": "html_requests", "fetch_backend": "html_requests",
"headers": "", "headers": "",
"proxy": "proxy-two", "proxy": "proxy-two",

View File

@@ -23,7 +23,7 @@ def test_basic_auth(client, live_server):
# Check form validation # Check form validation
res = client.post( res = client.post(
url_for("edit_page", uuid="first"), url_for("edit_page", uuid="first"),
data={"css_filter": "", "url": test_url, "tag": "", "headers": "", 'fetch_backend': "html_requests"}, data={"include_filters": "", "url": test_url, "tag": "", "headers": "", 'fetch_backend': "html_requests"},
follow_redirects=True follow_redirects=True
) )
assert b"Updated watch." in res.data assert b"Updated watch." in res.data

View File

@@ -3,7 +3,7 @@
import time import time
from flask import url_for from flask import url_for
from urllib.request import urlopen from urllib.request import urlopen
from .util import set_original_response, set_modified_response, live_server_setup from .util import set_original_response, set_modified_response, live_server_setup, wait_for_all_checks
sleep_time_for_fetch_thread = 3 sleep_time_for_fetch_thread = 3
@@ -36,7 +36,7 @@ def test_check_basic_change_detection_functionality(client, live_server):
client.get(url_for("form_watch_checknow"), follow_redirects=True) client.get(url_for("form_watch_checknow"), follow_redirects=True)
# Give the thread time to pick it up # Give the thread time to pick it up
time.sleep(sleep_time_for_fetch_thread) wait_for_all_checks(client)
# It should report nothing found (no new 'unviewed' class) # It should report nothing found (no new 'unviewed' class)
res = client.get(url_for("index")) res = client.get(url_for("index"))
@@ -69,7 +69,7 @@ def test_check_basic_change_detection_functionality(client, live_server):
res = client.get(url_for("form_watch_checknow"), follow_redirects=True) res = client.get(url_for("form_watch_checknow"), follow_redirects=True)
assert b'1 watches are queued for rechecking.' in res.data assert b'1 watches are queued for rechecking.' in res.data
time.sleep(sleep_time_for_fetch_thread) wait_for_all_checks(client)
# Now something should be ready, indicated by having a 'unviewed' class # Now something should be ready, indicated by having a 'unviewed' class
res = client.get(url_for("index")) res = client.get(url_for("index"))
@@ -98,14 +98,14 @@ def test_check_basic_change_detection_functionality(client, live_server):
assert b'which has this one new line' in res.data assert b'which has this one new line' in res.data
assert b'Which is across multiple lines' not in res.data assert b'Which is across multiple lines' not in res.data
time.sleep(2) wait_for_all_checks(client)
# Do this a few times.. ensures we dont accidently set the status # Do this a few times.. ensures we dont accidently set the status
for n in range(2): for n in range(2):
client.get(url_for("form_watch_checknow"), follow_redirects=True) client.get(url_for("form_watch_checknow"), follow_redirects=True)
# Give the thread time to pick it up # Give the thread time to pick it up
time.sleep(sleep_time_for_fetch_thread) wait_for_all_checks(client)
# It should report nothing found (no new 'unviewed' class) # It should report nothing found (no new 'unviewed' class)
res = client.get(url_for("index")) res = client.get(url_for("index"))
@@ -125,7 +125,7 @@ def test_check_basic_change_detection_functionality(client, live_server):
) )
client.get(url_for("form_watch_checknow"), follow_redirects=True) client.get(url_for("form_watch_checknow"), follow_redirects=True)
time.sleep(sleep_time_for_fetch_thread) wait_for_all_checks(client)
res = client.get(url_for("index")) res = client.get(url_for("index"))
assert b'unviewed' in res.data assert b'unviewed' in res.data

View File

@@ -1,18 +1,31 @@
#!/usr/bin/python3 #!/usr/bin/python3
import time from .util import set_original_response, set_modified_response, live_server_setup
from flask import url_for from flask import url_for
from urllib.request import urlopen from urllib.request import urlopen
from . util import set_original_response, set_modified_response, live_server_setup from zipfile import ZipFile
import re
import time
def test_backup(client, live_server): def test_backup(client, live_server):
live_server_setup(live_server) live_server_setup(live_server)
set_original_response()
# Give the endpoint time to spin up # Give the endpoint time to spin up
time.sleep(1) time.sleep(1)
# Add our URL to the import page
res = client.post(
url_for("import_page"),
data={"urls": url_for('test_endpoint', _external=True)},
follow_redirects=True
)
assert b"1 Imported" in res.data
time.sleep(3)
res = client.get( res = client.get(
url_for("get_backup"), url_for("get_backup"),
follow_redirects=True follow_redirects=True
@@ -20,6 +33,19 @@ def test_backup(client, live_server):
# Should get the right zip content type # Should get the right zip content type
assert res.content_type == "application/zip" assert res.content_type == "application/zip"
# Should be PK/ZIP stream # Should be PK/ZIP stream
assert res.data.count(b'PK') >= 2 assert res.data.count(b'PK') >= 2
# ZipFile from buffer seems non-obvious, just save it instead
with open("download.zip", 'wb') as f:
f.write(res.data)
zip = ZipFile('download.zip')
l = zip.namelist()
uuid4hex = re.compile('^[a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab][a-f0-9]{3}-?[a-f0-9]{12}.*txt', re.I)
newlist = list(filter(uuid4hex.match, l)) # Read Note below
# Should be two txt files in the archive (history and the snapshot)
assert len(newlist) == 2

View File

@@ -46,22 +46,23 @@ def set_modified_response():
# Test that the CSS extraction works how we expect, important here is the right placing of new lines \n's # Test that the CSS extraction works how we expect, important here is the right placing of new lines \n's
def test_css_filter_output(): def test_include_filters_output():
from changedetectionio import fetch_site_status
from inscriptis import get_text from inscriptis import get_text
# Check text with sub-parts renders correctly # Check text with sub-parts renders correctly
content = """<html> <body><div id="thingthing" > Some really <b>bold</b> text </div> </body> </html>""" content = """<html> <body><div id="thingthing" > Some really <b>bold</b> text </div> </body> </html>"""
html_blob = css_filter(css_filter="#thingthing", html_content=content) html_blob = include_filters(include_filters="#thingthing", html_content=content)
text = get_text(html_blob) text = get_text(html_blob)
assert text == " Some really bold text" assert text == " Some really bold text"
content = """<html> <body> content = """<html> <body>
<p>foo bar blah</p> <p>foo bar blah</p>
<div class="parts">Block A</div> <div class="parts">Block B</div></body> <DIV class="parts">Block A</DiV> <div class="parts">Block B</DIV></body>
</html> </html>
""" """
html_blob = css_filter(css_filter=".parts", html_content=content)
# in xPath this would be //*[@class='parts']
html_blob = include_filters(include_filters=".parts", html_content=content)
text = get_text(html_blob) text = get_text(html_blob)
# Divs are converted to 4 whitespaces by inscriptis # Divs are converted to 4 whitespaces by inscriptis
@@ -69,10 +70,10 @@ def test_css_filter_output():
# Tests the whole stack works with the CSS Filter # Tests the whole stack works with the CSS Filter
def test_check_markup_css_filter_restriction(client, live_server): def test_check_markup_include_filters_restriction(client, live_server):
sleep_time_for_fetch_thread = 3 sleep_time_for_fetch_thread = 3
css_filter = "#sametext" include_filters = "#sametext"
set_original_response() set_original_response()
@@ -98,7 +99,7 @@ def test_check_markup_css_filter_restriction(client, live_server):
# Add our URL to the import page # Add our URL to the import page
res = client.post( res = client.post(
url_for("edit_page", uuid="first"), url_for("edit_page", uuid="first"),
data={"css_filter": css_filter, "url": test_url, "tag": "", "headers": "", 'fetch_backend': "html_requests"}, data={"include_filters": include_filters, "url": test_url, "tag": "", "headers": "", 'fetch_backend': "html_requests"},
follow_redirects=True follow_redirects=True
) )
assert b"Updated watch." in res.data assert b"Updated watch." in res.data
@@ -107,7 +108,7 @@ def test_check_markup_css_filter_restriction(client, live_server):
res = client.get( res = client.get(
url_for("edit_page", uuid="first"), url_for("edit_page", uuid="first"),
) )
assert bytes(css_filter.encode('utf-8')) in res.data assert bytes(include_filters.encode('utf-8')) in res.data
# Trigger a check # Trigger a check
client.get(url_for("form_watch_checknow"), follow_redirects=True) client.get(url_for("form_watch_checknow"), follow_redirects=True)
@@ -126,3 +127,58 @@ def test_check_markup_css_filter_restriction(client, live_server):
# Because it should be looking at only that 'sametext' id # Because it should be looking at only that 'sametext' id
res = client.get(url_for("index")) res = client.get(url_for("index"))
assert b'unviewed' in res.data assert b'unviewed' in res.data
# Tests the whole stack works with the CSS Filter
def test_check_multiple_filters(client, live_server):
sleep_time_for_fetch_thread = 3
include_filters = "#blob-a\r\nxpath://*[contains(@id,'blob-b')]"
with open("test-datastore/endpoint-content.txt", "w") as f:
f.write("""<html><body>
<div id="blob-a">Blob A</div>
<div id="blob-b">Blob B</div>
<div id="blob-c">Blob C</div>
</body>
</html>
""")
# Give the endpoint time to spin up
time.sleep(1)
# Add our URL to the import page
test_url = url_for('test_endpoint', _external=True)
res = client.post(
url_for("import_page"),
data={"urls": test_url},
follow_redirects=True
)
assert b"1 Imported" in res.data
time.sleep(1)
# Goto the edit page, add our ignore text
# Add our URL to the import page
res = client.post(
url_for("edit_page", uuid="first"),
data={"include_filters": include_filters,
"url": test_url,
"tag": "",
"headers": "",
'fetch_backend': "html_requests"},
follow_redirects=True
)
assert b"Updated watch." in res.data
# Give the thread time to pick it up
time.sleep(sleep_time_for_fetch_thread)
res = client.get(
url_for("preview_page", uuid="first"),
follow_redirects=True
)
# Only the two blobs should be here
assert b"Blob A" in res.data # CSS was ok
assert b"Blob B" in res.data # xPath was ok
assert b"Blob C" not in res.data # Should not be included

View File

@@ -88,7 +88,7 @@ def test_check_filter_multiline(client, live_server):
# Add our URL to the import page # Add our URL to the import page
res = client.post( res = client.post(
url_for("edit_page", uuid="first"), url_for("edit_page", uuid="first"),
data={"css_filter": '', data={"include_filters": '',
'extract_text': '/something.+?6 billion.+?lines/si', 'extract_text': '/something.+?6 billion.+?lines/si',
"url": test_url, "url": test_url,
"tag": "", "tag": "",
@@ -116,7 +116,7 @@ def test_check_filter_multiline(client, live_server):
def test_check_filter_and_regex_extract(client, live_server): def test_check_filter_and_regex_extract(client, live_server):
sleep_time_for_fetch_thread = 3 sleep_time_for_fetch_thread = 3
css_filter = ".changetext" include_filters = ".changetext"
set_original_response() set_original_response()
@@ -143,7 +143,7 @@ def test_check_filter_and_regex_extract(client, live_server):
# Add our URL to the import page # Add our URL to the import page
res = client.post( res = client.post(
url_for("edit_page", uuid="first"), url_for("edit_page", uuid="first"),
data={"css_filter": css_filter, data={"include_filters": include_filters,
'extract_text': '\d+ online\r\n\d+ guests\r\n/somecase insensitive \d+/i\r\n/somecase insensitive (345\d)/i', 'extract_text': '\d+ online\r\n\d+ guests\r\n/somecase insensitive \d+/i\r\n/somecase insensitive (345\d)/i',
"url": test_url, "url": test_url,
"tag": "", "tag": "",

View File

@@ -92,7 +92,7 @@ def test_filter_doesnt_exist_then_exists_should_get_notification(client, live_se
"tag": "my tag", "tag": "my tag",
"title": "my title", "title": "my title",
"headers": "", "headers": "",
"css_filter": '.ticket-available', "include_filters": '.ticket-available',
"fetch_backend": "html_requests"}) "fetch_backend": "html_requests"})
res = client.post( res = client.post(

View File

@@ -76,7 +76,7 @@ def run_filter_test(client, content_filter):
"title": "my title", "title": "my title",
"headers": "", "headers": "",
"filter_failure_notification_send": 'y', "filter_failure_notification_send": 'y',
"css_filter": content_filter, "include_filters": content_filter,
"fetch_backend": "html_requests"}) "fetch_backend": "html_requests"})
res = client.post( res = client.post(
@@ -95,7 +95,7 @@ def run_filter_test(client, content_filter):
time.sleep(3) time.sleep(3)
# We should see something in the frontend # We should see something in the frontend
assert b'Warning, filter' in res.data assert b'Warning, no filters were found' in res.data
# Now it should exist and contain our "filter not found" alert # Now it should exist and contain our "filter not found" alert
assert os.path.isfile("test-datastore/notification.txt") assert os.path.isfile("test-datastore/notification.txt")
@@ -131,7 +131,7 @@ def run_filter_test(client, content_filter):
def test_setup(live_server): def test_setup(live_server):
live_server_setup(live_server) live_server_setup(live_server)
def test_check_css_filter_failure_notification(client, live_server): def test_check_include_filters_failure_notification(client, live_server):
set_original_response() set_original_response()
time.sleep(1) time.sleep(1)
run_filter_test(client, '#nope-doesnt-exist') run_filter_test(client, '#nope-doesnt-exist')

View File

@@ -0,0 +1,33 @@
#!/usr/bin/python3
import time
from flask import url_for
from .util import live_server_setup
# If there was only a change in the whitespacing, then we shouldnt have a change detected
def test_jinja2_in_url_query(client, live_server):
live_server_setup(live_server)
# Give the endpoint time to spin up
time.sleep(1)
# Add our URL to the import page
test_url = url_for('test_return_query', _external=True)
# because url_for() will URL-encode the var, but we dont here
full_url = "{}?{}".format(test_url,
"date={% now 'Europe/Berlin', '%Y' %}.{% now 'Europe/Berlin', '%m' %}.{% now 'Europe/Berlin', '%d' %}", )
res = client.post(
url_for("form_quick_watch_add"),
data={"url": full_url, "tag": "test"},
follow_redirects=True
)
assert b"Watch added" in res.data
time.sleep(3)
# It should report nothing found (no new 'unviewed' class)
res = client.get(
url_for("preview_page", uuid="first"),
follow_redirects=True
)
assert b'date=2' in res.data

View File

@@ -132,7 +132,7 @@ def set_original_response():
return None return None
def set_response_with_html(): def set_json_response_with_html():
test_return_data = """ test_return_data = """
{ {
"test": [ "test": [
@@ -176,7 +176,7 @@ def set_modified_response():
def test_check_json_without_filter(client, live_server): def test_check_json_without_filter(client, live_server):
# Request a JSON document from a application/json source containing HTML # Request a JSON document from a application/json source containing HTML
# and be sure it doesn't get chewed up by instriptis # and be sure it doesn't get chewed up by instriptis
set_response_with_html() set_json_response_with_html()
# Give the endpoint time to spin up # Give the endpoint time to spin up
time.sleep(1) time.sleep(1)
@@ -189,9 +189,6 @@ def test_check_json_without_filter(client, live_server):
follow_redirects=True follow_redirects=True
) )
# Trigger a check
client.get(url_for("form_watch_checknow"), follow_redirects=True)
# Give the thread time to pick it up # Give the thread time to pick it up
time.sleep(3) time.sleep(3)
@@ -200,6 +197,7 @@ def test_check_json_without_filter(client, live_server):
follow_redirects=True follow_redirects=True
) )
# Should still see '"html": "<b>"'
assert b'&#34;&lt;b&gt;' in res.data assert b'&#34;&lt;b&gt;' in res.data
assert res.data.count(b'{\n') >= 2 assert res.data.count(b'{\n') >= 2
@@ -221,9 +219,6 @@ def check_json_filter(json_filter, client, live_server):
) )
assert b"1 Imported" in res.data assert b"1 Imported" in res.data
# Trigger a check
client.get(url_for("form_watch_checknow"), follow_redirects=True)
# Give the thread time to pick it up # Give the thread time to pick it up
time.sleep(3) time.sleep(3)
@@ -231,7 +226,7 @@ def check_json_filter(json_filter, client, live_server):
# Add our URL to the import page # Add our URL to the import page
res = client.post( res = client.post(
url_for("edit_page", uuid="first"), url_for("edit_page", uuid="first"),
data={"css_filter": json_filter, data={"include_filters": json_filter,
"url": test_url, "url": test_url,
"tag": "", "tag": "",
"headers": "", "headers": "",
@@ -247,9 +242,6 @@ def check_json_filter(json_filter, client, live_server):
) )
assert bytes(escape(json_filter).encode('utf-8')) in res.data assert bytes(escape(json_filter).encode('utf-8')) in res.data
# Trigger a check
client.get(url_for("form_watch_checknow"), follow_redirects=True)
# Give the thread time to pick it up # Give the thread time to pick it up
time.sleep(3) time.sleep(3)
# Make a change # Make a change
@@ -301,7 +293,7 @@ def check_json_filter_bool_val(json_filter, client, live_server):
# Add our URL to the import page # Add our URL to the import page
res = client.post( res = client.post(
url_for("edit_page", uuid="first"), url_for("edit_page", uuid="first"),
data={"css_filter": json_filter, data={"include_filters": json_filter,
"url": test_url, "url": test_url,
"tag": "", "tag": "",
"headers": "", "headers": "",
@@ -311,11 +303,6 @@ def check_json_filter_bool_val(json_filter, client, live_server):
) )
assert b"Updated watch." in res.data assert b"Updated watch." in res.data
time.sleep(3)
# Trigger a check
client.get(url_for("form_watch_checknow"), follow_redirects=True)
# Give the thread time to pick it up # Give the thread time to pick it up
time.sleep(3) time.sleep(3)
# Make a change # Make a change
@@ -360,9 +347,6 @@ def check_json_ext_filter(json_filter, client, live_server):
) )
assert b"1 Imported" in res.data assert b"1 Imported" in res.data
# Trigger a check
client.get(url_for("form_watch_checknow"), follow_redirects=True)
# Give the thread time to pick it up # Give the thread time to pick it up
time.sleep(3) time.sleep(3)
@@ -370,7 +354,7 @@ def check_json_ext_filter(json_filter, client, live_server):
# Add our URL to the import page # Add our URL to the import page
res = client.post( res = client.post(
url_for("edit_page", uuid="first"), url_for("edit_page", uuid="first"),
data={"css_filter": json_filter, data={"include_filters": json_filter,
"url": test_url, "url": test_url,
"tag": "", "tag": "",
"headers": "", "headers": "",
@@ -386,9 +370,6 @@ def check_json_ext_filter(json_filter, client, live_server):
) )
assert bytes(escape(json_filter).encode('utf-8')) in res.data assert bytes(escape(json_filter).encode('utf-8')) in res.data
# Trigger a check
client.get(url_for("form_watch_checknow"), follow_redirects=True)
# Give the thread time to pick it up # Give the thread time to pick it up
time.sleep(3) time.sleep(3)
# Make a change # Make a change

View File

@@ -14,7 +14,7 @@ def test_share_watch(client, live_server):
live_server_setup(live_server) live_server_setup(live_server)
test_url = url_for('test_endpoint', _external=True) test_url = url_for('test_endpoint', _external=True)
css_filter = ".nice-filter" include_filters = ".nice-filter"
# Add our URL to the import page # Add our URL to the import page
res = client.post( res = client.post(
@@ -29,7 +29,7 @@ def test_share_watch(client, live_server):
# Add our URL to the import page # Add our URL to the import page
res = client.post( res = client.post(
url_for("edit_page", uuid="first"), url_for("edit_page", uuid="first"),
data={"css_filter": css_filter, "url": test_url, "tag": "", "headers": "", 'fetch_backend': "html_requests"}, data={"include_filters": include_filters, "url": test_url, "tag": "", "headers": "", 'fetch_backend': "html_requests"},
follow_redirects=True follow_redirects=True
) )
assert b"Updated watch." in res.data assert b"Updated watch." in res.data
@@ -37,7 +37,7 @@ def test_share_watch(client, live_server):
res = client.get( res = client.get(
url_for("edit_page", uuid="first"), url_for("edit_page", uuid="first"),
) )
assert bytes(css_filter.encode('utf-8')) in res.data assert bytes(include_filters.encode('utf-8')) in res.data
# click share the link # click share the link
res = client.get( res = client.get(
@@ -73,4 +73,8 @@ def test_share_watch(client, live_server):
res = client.get( res = client.get(
url_for("edit_page", uuid="first"), url_for("edit_page", uuid="first"),
) )
assert bytes(css_filter.encode('utf-8')) in res.data assert bytes(include_filters.encode('utf-8')) in res.data
# Check it saved the URL
res = client.get(url_for("index"))
assert bytes(test_url.encode('utf-8')) in res.data

View File

@@ -57,10 +57,9 @@ def test_check_basic_change_detection_functionality_source(client, live_server):
# `subtractive_selectors` should still work in `source:` type requests
def test_check_ignore_elements(client, live_server): def test_check_ignore_elements(client, live_server):
set_original_response() set_original_response()
time.sleep(2) time.sleep(2)
test_url = 'source:'+url_for('test_endpoint', _external=True) test_url = 'source:'+url_for('test_endpoint', _external=True)
# Add our URL to the import page # Add our URL to the import page
@@ -77,9 +76,9 @@ def test_check_ignore_elements(client, live_server):
##################### #####################
# We want <span> and <p> ONLY, but ignore span with .foobar-detection # We want <span> and <p> ONLY, but ignore span with .foobar-detection
res = client.post( client.post(
url_for("edit_page", uuid="first"), url_for("edit_page", uuid="first"),
data={"css_filter": 'span,p', "url": test_url, "tag": "", "subtractive_selectors": ".foobar-detection", 'fetch_backend': "html_requests"}, data={"include_filters": 'span,p', "url": test_url, "tag": "", "subtractive_selectors": ".foobar-detection", 'fetch_backend': "html_requests"},
follow_redirects=True follow_redirects=True
) )
@@ -89,7 +88,6 @@ def test_check_ignore_elements(client, live_server):
url_for("preview_page", uuid="first"), url_for("preview_page", uuid="first"),
follow_redirects=True follow_redirects=True
) )
assert b'foobar-detection' not in res.data assert b'foobar-detection' not in res.data
assert b'&lt;br' not in res.data assert b'&lt;br' not in res.data
assert b'&lt;p' in res.data assert b'&lt;p' in res.data

View File

@@ -49,7 +49,7 @@ def test_trigger_regex_functionality_with_filter(client, live_server):
url_for("edit_page", uuid="first"), url_for("edit_page", uuid="first"),
data={"trigger_text": "/cool.stuff/", data={"trigger_text": "/cool.stuff/",
"url": test_url, "url": test_url,
"css_filter": '#in-here', "include_filters": '#in-here',
"fetch_backend": "html_requests"}, "fetch_backend": "html_requests"},
follow_redirects=True follow_redirects=True
) )

View File

@@ -22,7 +22,7 @@ def test_check_watch_field_storage(client, live_server):
url_for("edit_page", uuid="first"), url_for("edit_page", uuid="first"),
data={ "notification_urls": "json://127.0.0.1:30000\r\njson://128.0.0.1\r\n", data={ "notification_urls": "json://127.0.0.1:30000\r\njson://128.0.0.1\r\n",
"time_between_check-minutes": 126, "time_between_check-minutes": 126,
"css_filter" : ".fooclass", "include_filters" : ".fooclass",
"title" : "My title", "title" : "My title",
"ignore_text" : "ignore this", "ignore_text" : "ignore this",
"url": test_url, "url": test_url,

View File

@@ -89,7 +89,7 @@ def test_check_xpath_filter_utf8(client, live_server):
time.sleep(1) time.sleep(1)
res = client.post( res = client.post(
url_for("edit_page", uuid="first"), url_for("edit_page", uuid="first"),
data={"css_filter": filter, "url": test_url, "tag": "", "headers": "", 'fetch_backend': "html_requests"}, data={"include_filters": filter, "url": test_url, "tag": "", "headers": "", 'fetch_backend': "html_requests"},
follow_redirects=True follow_redirects=True
) )
assert b"Updated watch." in res.data assert b"Updated watch." in res.data
@@ -143,7 +143,7 @@ def test_check_xpath_text_function_utf8(client, live_server):
time.sleep(1) time.sleep(1)
res = client.post( res = client.post(
url_for("edit_page", uuid="first"), url_for("edit_page", uuid="first"),
data={"css_filter": filter, "url": test_url, "tag": "", "headers": "", 'fetch_backend': "html_requests"}, data={"include_filters": filter, "url": test_url, "tag": "", "headers": "", 'fetch_backend': "html_requests"},
follow_redirects=True follow_redirects=True
) )
assert b"Updated watch." in res.data assert b"Updated watch." in res.data
@@ -182,9 +182,6 @@ def test_check_markup_xpath_filter_restriction(client, live_server):
) )
assert b"1 Imported" in res.data assert b"1 Imported" in res.data
# Trigger a check
client.get(url_for("form_watch_checknow"), follow_redirects=True)
# Give the thread time to pick it up # Give the thread time to pick it up
time.sleep(sleep_time_for_fetch_thread) time.sleep(sleep_time_for_fetch_thread)
@@ -192,7 +189,7 @@ def test_check_markup_xpath_filter_restriction(client, live_server):
# Add our URL to the import page # Add our URL to the import page
res = client.post( res = client.post(
url_for("edit_page", uuid="first"), url_for("edit_page", uuid="first"),
data={"css_filter": xpath_filter, "url": test_url, "tag": "", "headers": "", 'fetch_backend': "html_requests"}, data={"include_filters": xpath_filter, "url": test_url, "tag": "", "headers": "", 'fetch_backend': "html_requests"},
follow_redirects=True follow_redirects=True
) )
assert b"Updated watch." in res.data assert b"Updated watch." in res.data
@@ -230,10 +227,11 @@ def test_xpath_validation(client, live_server):
follow_redirects=True follow_redirects=True
) )
assert b"1 Imported" in res.data assert b"1 Imported" in res.data
time.sleep(2)
res = client.post( res = client.post(
url_for("edit_page", uuid="first"), url_for("edit_page", uuid="first"),
data={"css_filter": "/something horrible", "url": test_url, "tag": "", "headers": "", 'fetch_backend': "html_requests"}, data={"include_filters": "/something horrible", "url": test_url, "tag": "", "headers": "", 'fetch_backend': "html_requests"},
follow_redirects=True follow_redirects=True
) )
assert b"is not a valid XPath expression" in res.data assert b"is not a valid XPath expression" in res.data
@@ -242,7 +240,7 @@ def test_xpath_validation(client, live_server):
# actually only really used by the distll.io importer, but could be handy too # actually only really used by the distll.io importer, but could be handy too
def test_check_with_prefix_css_filter(client, live_server): def test_check_with_prefix_include_filters(client, live_server):
res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True) res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True)
assert b'Deleted' in res.data assert b'Deleted' in res.data
@@ -263,7 +261,7 @@ def test_check_with_prefix_css_filter(client, live_server):
res = client.post( res = client.post(
url_for("edit_page", uuid="first"), url_for("edit_page", uuid="first"),
data={"css_filter": "xpath://*[contains(@class, 'sametext')]", "url": test_url, "tag": "", "headers": "", 'fetch_backend': "html_requests"}, data={"include_filters": "xpath://*[contains(@class, 'sametext')]", "url": test_url, "tag": "", "headers": "", 'fetch_backend': "html_requests"},
follow_redirects=True follow_redirects=True
) )

View File

@@ -86,6 +86,7 @@ def extract_UUID_from_client(client):
def wait_for_all_checks(client): def wait_for_all_checks(client):
# Loop waiting until done.. # Loop waiting until done..
attempt=0 attempt=0
time.sleep(0.1)
while attempt < 60: while attempt < 60:
time.sleep(1) time.sleep(1)
res = client.get(url_for("index")) res = client.get(url_for("index"))
@@ -159,5 +160,10 @@ def live_server_setup(live_server):
ret = " ".join([auth.username, auth.password, auth.type]) ret = " ".join([auth.username, auth.password, auth.type])
return ret return ret
# Just return some GET var
@live_server.app.route('/test-return-query', methods=['GET'])
def test_return_query():
return request.query_string
live_server.start() live_server.start()

View File

@@ -4,7 +4,7 @@ import queue
import time import time
from changedetectionio import content_fetcher from changedetectionio import content_fetcher
from changedetectionio.html_tools import FilterNotFoundInResponse from changedetectionio.fetch_site_status import FilterNotFoundInResponse
# A single update worker # A single update worker
# #
@@ -91,8 +91,8 @@ class update_worker(threading.Thread):
return return
n_object = {'notification_title': 'Changedetection.io - Alert - CSS/xPath filter was not present in the page', n_object = {'notification_title': 'Changedetection.io - Alert - CSS/xPath filter was not present in the page',
'notification_body': "Your configured CSS/xPath filter of '{}' for {{watch_url}} did not appear on the page after {} attempts, did the page change layout?\n\nLink: {{base_url}}/edit/{{watch_uuid}}\n\nThanks - Your omniscient changedetection.io installation :)\n".format( 'notification_body': "Your configured CSS/xPath filters of '{}' for {{watch_url}} did not appear on the page after {} attempts, did the page change layout?\n\nLink: {{base_url}}/edit/{{watch_uuid}}\n\nThanks - Your omniscient changedetection.io installation :)\n".format(
watch['css_filter'], ", ".join(watch['include_filters']),
threshold), threshold),
'notification_format': 'text'} 'notification_format': 'text'}
@@ -189,7 +189,7 @@ class update_worker(threading.Thread):
if not self.datastore.data['watching'].get(uuid): if not self.datastore.data['watching'].get(uuid):
continue continue
err_text = "Warning, filter '{}' not found".format(str(e)) err_text = "Warning, no filters were found, no change detection ran."
self.datastore.update_watch(uuid=uuid, update_obj={'last_error': err_text, self.datastore.update_watch(uuid=uuid, update_obj={'last_error': err_text,
# So that we get a trigger when the content is added again # So that we get a trigger when the content is added again
'previous_md5': ''}) 'previous_md5': ''})
@@ -282,16 +282,19 @@ class update_worker(threading.Thread):
self.app.logger.error("Exception reached processing watch UUID: %s - %s", uuid, str(e)) self.app.logger.error("Exception reached processing watch UUID: %s - %s", uuid, str(e))
self.datastore.update_watch(uuid=uuid, update_obj={'last_error': str(e)}) self.datastore.update_watch(uuid=uuid, update_obj={'last_error': str(e)})
if self.datastore.data['watching'].get(uuid):
# Always record that we atleast tried
count = self.datastore.data['watching'][uuid].get('check_count', 0) + 1
self.datastore.update_watch(uuid=uuid, update_obj={'fetch_time': round(time.time() - now, 3),
'last_checked': round(time.time()),
'check_count': count
})
# Always record that we atleast tried # Always save the screenshot if it's available
self.datastore.update_watch(uuid=uuid, update_obj={'fetch_time': round(time.time() - now, 3), if update_handler.screenshot:
'last_checked': round(time.time())}) self.datastore.save_screenshot(watch_uuid=uuid, screenshot=update_handler.screenshot)
if update_handler.xpath_data:
# Always save the screenshot if it's available self.datastore.save_xpath_data(watch_uuid=uuid, data=update_handler.xpath_data)
if update_handler.screenshot:
self.datastore.save_screenshot(watch_uuid=uuid, screenshot=update_handler.screenshot)
if update_handler.xpath_data:
self.datastore.save_xpath_data(watch_uuid=uuid, data=update_handler.xpath_data)
self.current_uuid = None # Done self.current_uuid = None # Done

View File

@@ -1,36 +1,36 @@
flask ~= 2.0 flask~=2.0
flask_wtf flask_wtf
eventlet >= 0.31.0 eventlet>=0.31.0
validators validators
timeago ~= 1.0 timeago~=1.0
inscriptis ~= 2.2 inscriptis~=2.2
feedgen ~= 0.9 feedgen~=0.9
flask-login ~= 0.5 flask-login~=0.5
flask_restful flask_restful
pytz pytz
# Set these versions together to avoid a RequestsDependencyWarning # Set these versions together to avoid a RequestsDependencyWarning
# >= 2.26 also adds Brotli support if brotli is installed # >= 2.26 also adds Brotli support if brotli is installed
brotli ~= 1.0 brotli~=1.0
requests[socks] ~= 2.28 requests[socks] ~=2.28
urllib3 > 1.26 urllib3>1.26
chardet > 2.3.0 chardet>2.3.0
wtforms ~= 3.0 wtforms~=3.0
jsonpath-ng ~= 1.5.3 jsonpath-ng~=1.5.3
# jq not available on Windows so must be installed manually # jq not available on Windows so must be installed manually
# Notification library # Notification library
apprise ~= 1.1.0 apprise~=1.1.0
# apprise mqtt https://github.com/dgtlmoon/changedetection.io/issues/315 # apprise mqtt https://github.com/dgtlmoon/changedetection.io/issues/315
paho-mqtt paho-mqtt
# Pinned version of cryptography otherwise # Pinned version of cryptography otherwise
# ERROR: Could not build wheels for cryptography which use PEP 517 and cannot be installed directly # ERROR: Could not build wheels for cryptography which use PEP 517 and cannot be installed directly
cryptography ~= 3.4 cryptography~=3.4
# Used for CSS filtering # Used for CSS filtering
bs4 bs4
@@ -39,12 +39,20 @@ bs4
lxml lxml
# 3.141 was missing socksVersion, 3.150 was not in pypi, so we try 4.1.0 # 3.141 was missing socksVersion, 3.150 was not in pypi, so we try 4.1.0
selenium ~= 4.1.0 selenium~=4.1.0
# https://stackoverflow.com/questions/71652965/importerror-cannot-import-name-safe-str-cmp-from-werkzeug-security/71653849#71653849 # https://stackoverflow.com/questions/71652965/importerror-cannot-import-name-safe-str-cmp-from-werkzeug-security/71653849#71653849
# ImportError: cannot import name 'safe_str_cmp' from 'werkzeug.security' # ImportError: cannot import name 'safe_str_cmp' from 'werkzeug.security'
# need to revisit flask login versions # need to revisit flask login versions
werkzeug ~= 2.0.0 werkzeug~=2.0.0
# Templating, so far just in the URLs but in the future can be for the notifications also
jinja2~=3.1
jinja2-time
# https://peps.python.org/pep-0508/#environment-markers
# https://github.com/dgtlmoon/changedetection.io/pull/1009
jq~=1.3 ;python_version >= "3.8" and sys_platform == "linux"
# playwright is installed at Dockerfile build time because it's not available on all platforms # playwright is installed at Dockerfile build time because it's not available on all platforms