mirror of
https://github.com/dgtlmoon/changedetection.io.git
synced 2025-11-01 07:08:47 +00:00
Compare commits
21 Commits
1924-dynam
...
1967-regen
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6d62995e31 | ||
|
|
c8dcc072c8 | ||
|
|
7c97a5a403 | ||
|
|
7dd967be8e | ||
|
|
3607d15185 | ||
|
|
3382b4cb3f | ||
|
|
5f030d3668 | ||
|
|
06975d6d8f | ||
|
|
f58e5b7f19 | ||
|
|
e50eff8e35 | ||
|
|
07a853ce59 | ||
|
|
80f8d23309 | ||
|
|
9f41d15908 | ||
|
|
89797dfe02 | ||
|
|
c905652780 | ||
|
|
99246d3e6d | ||
|
|
f9f69bf0dd | ||
|
|
68efb25e9b | ||
|
|
70606ab05d | ||
|
|
d3c8386874 | ||
|
|
47103d7f3d |
16
.github/workflows/containers.yml
vendored
16
.github/workflows/containers.yml
vendored
@@ -96,8 +96,9 @@ jobs:
|
||||
tags: |
|
||||
${{ secrets.DOCKER_HUB_USERNAME }}/changedetection.io:dev,ghcr.io/${{ github.repository }}:dev
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v6,linux/arm/v7,linux/arm/v8
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
# Looks like this was disabled
|
||||
# provenance: false
|
||||
|
||||
@@ -116,18 +117,11 @@ jobs:
|
||||
${{ secrets.DOCKER_HUB_USERNAME }}/changedetection.io:latest
|
||||
ghcr.io/dgtlmoon/changedetection.io:latest
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v6,linux/arm/v7,linux/arm/v8
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
# Looks like this was disabled
|
||||
# provenance: false
|
||||
|
||||
- name: Image digest
|
||||
run: echo step SHA ${{ steps.vars.outputs.sha_short }} tag ${{steps.vars.outputs.tag}} branch ${{steps.vars.outputs.branch}} digest ${{ steps.docker_build.outputs.digest }}
|
||||
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
|
||||
@@ -20,17 +20,12 @@ WORKDIR /install
|
||||
|
||||
COPY requirements.txt /requirements.txt
|
||||
|
||||
# Instructing pip to fetch wheels from piwheels.org" on ARMv6 and ARMv7 machines
|
||||
RUN if [ "$(dpkg --print-architecture)" = "armhf" ] || [ "$(dpkg --print-architecture)" = "armel" ]; then \
|
||||
printf "[global]\nextra-index-url=https://www.piwheels.org/simple\n" > /etc/pip.conf; \
|
||||
fi;
|
||||
|
||||
RUN pip install --target=/dependencies -r /requirements.txt
|
||||
|
||||
# Playwright is an alternative to Selenium
|
||||
# Excluded this package from requirements.txt to prevent arm/v6 and arm/v7 builds from failing
|
||||
# https://github.com/dgtlmoon/changedetection.io/pull/1067 also musl/alpine (not supported)
|
||||
RUN pip install --target=/dependencies playwright~=1.27.1 \
|
||||
RUN pip install --target=/dependencies playwright~=1.39 \
|
||||
|| echo "WARN: Failed to install Playwright. The application can still run, but the Playwright option will be disabled."
|
||||
|
||||
# Final image stage
|
||||
|
||||
@@ -16,3 +16,4 @@ global-exclude venv
|
||||
|
||||
global-exclude test-datastore
|
||||
global-exclude changedetection.io*dist-info
|
||||
global-exclude changedetectionio/tests/proxy_socks5/test-datastore
|
||||
|
||||
@@ -232,6 +232,13 @@ See the wiki https://github.com/dgtlmoon/changedetection.io/wiki/Proxy-configura
|
||||
|
||||
Raspberry Pi and linux/arm/v6 linux/arm/v7 arm64 devices are supported! See the wiki for [details](https://github.com/dgtlmoon/changedetection.io/wiki/Fetching-pages-with-WebDriver)
|
||||
|
||||
## Import support
|
||||
|
||||
Easily [import your list of websites to watch for changes in Excel .xslx file format](https://changedetection.io/tutorial/how-import-your-website-change-detection-lists-excel), or paste in lists of website URLs as plaintext.
|
||||
|
||||
Excel import is recommended - that way you can better organise tags/groups of websites and other features.
|
||||
|
||||
|
||||
## API Support
|
||||
|
||||
Supports managing the website watch list [via our API](https://changedetection.io/docs/api_v1/index.html)
|
||||
|
||||
@@ -38,7 +38,7 @@ from flask_paginate import Pagination, get_page_parameter
|
||||
from changedetectionio import html_tools
|
||||
from changedetectionio.api import api_v1
|
||||
|
||||
__version__ = '0.45.5'
|
||||
__version__ = '0.45.7.3'
|
||||
|
||||
from changedetectionio.store import BASE_URL_NOT_SET_TEXT
|
||||
|
||||
@@ -105,6 +105,10 @@ def get_darkmode_state():
|
||||
css_dark_mode = request.cookies.get('css_dark_mode', 'false')
|
||||
return 'true' if css_dark_mode and strtobool(css_dark_mode) else 'false'
|
||||
|
||||
@app.template_global()
|
||||
def get_css_version():
|
||||
return __version__
|
||||
|
||||
# We use the whole watch object from the store/JSON so we can see if there's some related status in terms of a thread
|
||||
# running or something similar.
|
||||
@app.template_filter('format_last_checked_time')
|
||||
@@ -815,6 +819,16 @@ def changedetection_app(config=None, datastore_o=None):
|
||||
|
||||
return output
|
||||
|
||||
@app.route("/settings/reset-api-key", methods=['GET'])
|
||||
@login_optionally_required
|
||||
def settings_reset_api_key():
|
||||
import secrets
|
||||
secret = secrets.token_hex(16)
|
||||
datastore.data['settings']['application']['api_access_token'] = secret
|
||||
datastore.needs_write_urgent = True
|
||||
flash("API Key was regenerated.")
|
||||
return redirect(url_for('settings_page')+'#api')
|
||||
|
||||
@app.route("/import", methods=['GET', "POST"])
|
||||
@login_optionally_required
|
||||
def import_page():
|
||||
@@ -1208,8 +1222,7 @@ def changedetection_app(config=None, datastore_o=None):
|
||||
# These files should be in our subdirectory
|
||||
try:
|
||||
# set nocache, set content-type
|
||||
watch_dir = datastore_o.datastore_path + "/" + filename
|
||||
response = make_response(send_from_directory(filename="elements.json", directory=watch_dir, path=watch_dir + "/elements.json"))
|
||||
response = make_response(send_from_directory(os.path.join(datastore_o.datastore_path, filename), "elements.json"))
|
||||
response.headers['Content-type'] = 'application/json'
|
||||
response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'
|
||||
response.headers['Pragma'] = 'no-cache'
|
||||
|
||||
@@ -40,8 +40,8 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
contents = ''
|
||||
now = time.time()
|
||||
try:
|
||||
update_handler = text_json_diff.perform_site_check(datastore=datastore)
|
||||
changed_detected, update_obj, contents = update_handler.run(uuid, preferred_proxy=preferred_proxy, skip_when_checksum_same=False)
|
||||
update_handler = text_json_diff.perform_site_check(datastore=datastore, watch_uuid=uuid)
|
||||
update_handler.call_browser()
|
||||
# title, size is len contents not len xfer
|
||||
except content_fetcher.Non200ErrorCodeReceived as e:
|
||||
if e.status_code == 404:
|
||||
|
||||
@@ -419,11 +419,7 @@ class base_html_playwright(Fetcher):
|
||||
is_binary=False):
|
||||
|
||||
# For now, USE_EXPERIMENTAL_PUPPETEER_FETCH is not supported by watches with BrowserSteps (for now!)
|
||||
has_browser_steps = self.browser_steps and list(filter(
|
||||
lambda s: (s['operation'] and len(s['operation']) and s['operation'] != 'Choose one' and s['operation'] != 'Goto site'),
|
||||
self.browser_steps))
|
||||
|
||||
if not has_browser_steps and os.getenv('USE_EXPERIMENTAL_PUPPETEER_FETCH'):
|
||||
if not self.browser_steps and os.getenv('USE_EXPERIMENTAL_PUPPETEER_FETCH'):
|
||||
if strtobool(os.getenv('USE_EXPERIMENTAL_PUPPETEER_FETCH')):
|
||||
# Temporary backup solution until we rewrite the playwright code
|
||||
return self.run_fetch_browserless_puppeteer(
|
||||
@@ -671,6 +667,7 @@ class html_requests(Fetcher):
|
||||
fetcher_description = "Basic fast Plaintext/HTTP Client"
|
||||
|
||||
def __init__(self, proxy_override=None):
|
||||
super().__init__()
|
||||
self.proxy_override = proxy_override
|
||||
|
||||
def run(self,
|
||||
|
||||
@@ -137,6 +137,7 @@ class import_distill_io_json(Importer):
|
||||
|
||||
flash("{} Imported from Distill.io in {:.2f}s, {} Skipped.".format(len(self.new_uuids), time.time() - now, len(self.remaining_data)))
|
||||
|
||||
|
||||
class import_xlsx_wachete(Importer):
|
||||
|
||||
def run(self,
|
||||
@@ -144,6 +145,7 @@ class import_xlsx_wachete(Importer):
|
||||
flash,
|
||||
datastore,
|
||||
):
|
||||
|
||||
good = 0
|
||||
now = time.time()
|
||||
self.new_uuids = []
|
||||
@@ -153,62 +155,69 @@ class import_xlsx_wachete(Importer):
|
||||
try:
|
||||
wb = load_workbook(data)
|
||||
except Exception as e:
|
||||
#@todo correct except
|
||||
# @todo correct except
|
||||
flash("Unable to read export XLSX file, something wrong with the file?", 'error')
|
||||
return
|
||||
|
||||
sheet_obj = wb.active
|
||||
row_id = 2
|
||||
for row in wb.active.iter_rows(min_row=row_id):
|
||||
try:
|
||||
extras = {}
|
||||
data = {}
|
||||
for cell in row:
|
||||
if not cell.value:
|
||||
continue
|
||||
column_title = wb.active.cell(row=1, column=cell.column).value.strip().lower()
|
||||
data[column_title] = cell.value
|
||||
|
||||
i = 1
|
||||
row = 2
|
||||
while sheet_obj.cell(row=row, column=1).value:
|
||||
data = {}
|
||||
while sheet_obj.cell(row=row, column=i).value:
|
||||
column_title = sheet_obj.cell(row=1, column=i).value.strip().lower()
|
||||
column_row_value = sheet_obj.cell(row=row, column=i).value
|
||||
data[column_title] = column_row_value
|
||||
# Forced switch to webdriver/playwright/etc
|
||||
dynamic_wachet = str(data.get('dynamic wachet', '')).strip().lower() # Convert bool to str to cover all cases
|
||||
# libreoffice and others can have it as =FALSE() =TRUE(), or bool(true)
|
||||
if 'true' in dynamic_wachet or dynamic_wachet == '1':
|
||||
extras['fetch_backend'] = 'html_webdriver'
|
||||
elif 'false' in dynamic_wachet or dynamic_wachet == '0':
|
||||
extras['fetch_backend'] = 'html_requests'
|
||||
|
||||
i += 1
|
||||
if data.get('xpath'):
|
||||
# @todo split by || ?
|
||||
extras['include_filters'] = [data.get('xpath')]
|
||||
if data.get('name'):
|
||||
extras['title'] = data.get('name').strip()
|
||||
if data.get('interval (min)'):
|
||||
minutes = int(data.get('interval (min)'))
|
||||
hours, minutes = divmod(minutes, 60)
|
||||
days, hours = divmod(hours, 24)
|
||||
weeks, days = divmod(days, 7)
|
||||
extras['time_between_check'] = {'weeks': weeks, 'days': days, 'hours': hours, 'minutes': minutes, 'seconds': 0}
|
||||
|
||||
extras = {}
|
||||
if data.get('xpath'):
|
||||
#@todo split by || ?
|
||||
extras['include_filters'] = [data.get('xpath')]
|
||||
if data.get('name'):
|
||||
extras['title'] = [data.get('name').strip()]
|
||||
if data.get('interval (min)'):
|
||||
minutes = int(data.get('interval (min)'))
|
||||
hours, minutes = divmod(minutes, 60)
|
||||
days, hours = divmod(hours, 24)
|
||||
weeks, days = divmod(days, 7)
|
||||
extras['time_between_check'] = {'weeks': weeks, 'days': days, 'hours': hours, 'minutes': minutes, 'seconds': 0}
|
||||
|
||||
|
||||
# At minimum a URL is required.
|
||||
if data.get('url'):
|
||||
try:
|
||||
validate_url(data.get('url'))
|
||||
except ValidationError as e:
|
||||
print(">> import URL error", data.get('url'), str(e))
|
||||
# Don't bother processing anything else on this row
|
||||
continue
|
||||
|
||||
new_uuid = datastore.add_watch(url=data['url'].strip(),
|
||||
extras=extras,
|
||||
tag=data.get('folder'),
|
||||
write_to_disk_now=False)
|
||||
if new_uuid:
|
||||
# Straight into the queue.
|
||||
self.new_uuids.append(new_uuid)
|
||||
good += 1
|
||||
|
||||
row += 1
|
||||
i = 1
|
||||
# At minimum a URL is required.
|
||||
if data.get('url'):
|
||||
try:
|
||||
validate_url(data.get('url'))
|
||||
except ValidationError as e:
|
||||
print(">> import URL error", data.get('url'), str(e))
|
||||
flash(f"Error processing row number {row_id}, URL value was incorrect, row was skipped.", 'error')
|
||||
# Don't bother processing anything else on this row
|
||||
continue
|
||||
|
||||
new_uuid = datastore.add_watch(url=data['url'].strip(),
|
||||
extras=extras,
|
||||
tag=data.get('folder'),
|
||||
write_to_disk_now=False)
|
||||
if new_uuid:
|
||||
# Straight into the queue.
|
||||
self.new_uuids.append(new_uuid)
|
||||
good += 1
|
||||
except Exception as e:
|
||||
print(e)
|
||||
flash(f"Error processing row number {row_id}, check all cell data types are correct, row was skipped.", 'error')
|
||||
else:
|
||||
row_id += 1
|
||||
|
||||
flash(
|
||||
"{} imported from Wachete .xlsx in {:.2f}s".format(len(self.new_uuids), time.time() - now))
|
||||
|
||||
|
||||
class import_xlsx_custom(Importer):
|
||||
|
||||
def run(self,
|
||||
@@ -216,6 +225,7 @@ class import_xlsx_custom(Importer):
|
||||
flash,
|
||||
datastore,
|
||||
):
|
||||
|
||||
good = 0
|
||||
now = time.time()
|
||||
self.new_uuids = []
|
||||
@@ -225,56 +235,68 @@ class import_xlsx_custom(Importer):
|
||||
try:
|
||||
wb = load_workbook(data)
|
||||
except Exception as e:
|
||||
#@todo correct except
|
||||
# @todo correct except
|
||||
flash("Unable to read export XLSX file, something wrong with the file?", 'error')
|
||||
return
|
||||
|
||||
# @todo cehck atleast 2 rows, same in other method
|
||||
|
||||
sheet_obj = wb.active
|
||||
from .forms import validate_url
|
||||
row = 2
|
||||
while sheet_obj.cell(row=row, column=1).value:
|
||||
url = None
|
||||
tags = None
|
||||
extras = {}
|
||||
for col_i, cell_map in self.import_profile.items():
|
||||
cell_val = sheet_obj.cell(row=row, column=col_i).value
|
||||
if cell_map == 'url':
|
||||
url = cell_val.strip()
|
||||
try:
|
||||
validate_url(url)
|
||||
except ValidationError as e:
|
||||
print (">> Import URL error",url, str(e))
|
||||
# Don't bother processing anything else on this row
|
||||
url = None
|
||||
break
|
||||
row_i = 1
|
||||
|
||||
elif cell_map == 'tag':
|
||||
tags = cell_val.strip()
|
||||
elif cell_map == 'include_filters':
|
||||
# @todo validate?
|
||||
extras['include_filters'] = [cell_val.strip()]
|
||||
elif cell_map == 'interval_minutes':
|
||||
hours, minutes = divmod(int(cell_val), 60)
|
||||
days, hours = divmod(hours, 24)
|
||||
weeks, days = divmod(days, 7)
|
||||
extras['time_between_check'] = {'weeks': weeks, 'days': days, 'hours': hours, 'minutes': minutes, 'seconds': 0}
|
||||
else:
|
||||
extras[cell_map] = cell_val.strip()
|
||||
try:
|
||||
for row in wb.active.iter_rows():
|
||||
url = None
|
||||
tags = None
|
||||
extras = {}
|
||||
|
||||
# At minimum a URL is required.
|
||||
if url:
|
||||
new_uuid = datastore.add_watch(url=url,
|
||||
extras=extras,
|
||||
tag=tags,
|
||||
write_to_disk_now=False)
|
||||
if new_uuid:
|
||||
# Straight into the queue.
|
||||
self.new_uuids.append(new_uuid)
|
||||
good += 1
|
||||
for cell in row:
|
||||
if not self.import_profile.get(cell.col_idx):
|
||||
continue
|
||||
if not cell.value:
|
||||
continue
|
||||
|
||||
row += 1
|
||||
cell_map = self.import_profile.get(cell.col_idx)
|
||||
|
||||
cell_val = str(cell.value).strip() # could be bool
|
||||
|
||||
if cell_map == 'url':
|
||||
url = cell.value.strip()
|
||||
try:
|
||||
validate_url(url)
|
||||
except ValidationError as e:
|
||||
print(">> Import URL error", url, str(e))
|
||||
flash(f"Error processing row number {row_i}, URL value was incorrect, row was skipped.", 'error')
|
||||
# Don't bother processing anything else on this row
|
||||
url = None
|
||||
break
|
||||
elif cell_map == 'tag':
|
||||
tags = cell.value.strip()
|
||||
elif cell_map == 'include_filters':
|
||||
# @todo validate?
|
||||
extras['include_filters'] = [cell.value.strip()]
|
||||
elif cell_map == 'interval_minutes':
|
||||
hours, minutes = divmod(int(cell_val), 60)
|
||||
days, hours = divmod(hours, 24)
|
||||
weeks, days = divmod(days, 7)
|
||||
extras['time_between_check'] = {'weeks': weeks, 'days': days, 'hours': hours, 'minutes': minutes, 'seconds': 0}
|
||||
else:
|
||||
extras[cell_map] = cell_val
|
||||
|
||||
# At minimum a URL is required.
|
||||
if url:
|
||||
new_uuid = datastore.add_watch(url=url,
|
||||
extras=extras,
|
||||
tag=tags,
|
||||
write_to_disk_now=False)
|
||||
if new_uuid:
|
||||
# Straight into the queue.
|
||||
self.new_uuids.append(new_uuid)
|
||||
good += 1
|
||||
except Exception as e:
|
||||
print(e)
|
||||
flash(f"Error processing row number {row_i}, check all cell data types are correct, row was skipped.", 'error')
|
||||
else:
|
||||
row_i += 1
|
||||
|
||||
flash(
|
||||
"{} imported from custom .xlsx in {:.2f}s".format(len(self.new_uuids), time.time() - now))
|
||||
|
||||
@@ -19,6 +19,7 @@ from changedetectionio.notification import (
|
||||
|
||||
base_config = {
|
||||
'body': None,
|
||||
'browser_steps': [],
|
||||
'browser_steps_last_error_step': None,
|
||||
'check_unique_lines': False, # On change-detected, compare against all history if its something new
|
||||
'check_count': 0,
|
||||
@@ -145,8 +146,14 @@ class model(dict):
|
||||
flash(message, 'error')
|
||||
return ''
|
||||
|
||||
if ready_url.startswith('source:'):
|
||||
ready_url=ready_url.replace('source:', '')
|
||||
return ready_url
|
||||
|
||||
@property
|
||||
def is_source_type_url(self):
|
||||
return self.get('url', '').startswith('source:')
|
||||
|
||||
@property
|
||||
def get_fetch_backend(self):
|
||||
"""
|
||||
@@ -234,6 +241,14 @@ class model(dict):
|
||||
fname = os.path.join(self.watch_data_dir, "history.txt")
|
||||
return os.path.isfile(fname)
|
||||
|
||||
@property
|
||||
def has_browser_steps(self):
|
||||
has_browser_steps = self.get('browser_steps') and list(filter(
|
||||
lambda s: (s['operation'] and len(s['operation']) and s['operation'] != 'Choose one' and s['operation'] != 'Goto site'),
|
||||
self.get('browser_steps')))
|
||||
|
||||
return has_browser_steps
|
||||
|
||||
# Returns the newest key, but if theres only 1 record, then it's counted as not being new, so return 0.
|
||||
@property
|
||||
def newest_history_key(self):
|
||||
|
||||
@@ -1,15 +1,108 @@
|
||||
from abc import abstractmethod
|
||||
import os
|
||||
import hashlib
|
||||
|
||||
import re
|
||||
from changedetectionio import content_fetcher
|
||||
from copy import deepcopy
|
||||
from distutils.util import strtobool
|
||||
|
||||
class difference_detection_processor():
|
||||
|
||||
datastore = None
|
||||
fetcher = None
|
||||
screenshot = None
|
||||
xpath_data = None
|
||||
browser_steps = None
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
def __init__(self, *args, datastore, watch_uuid, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.datastore = datastore
|
||||
self.watch = deepcopy(self.datastore.data['watching'].get(watch_uuid))
|
||||
|
||||
def call_browser(self):
|
||||
|
||||
# Protect against file:// access
|
||||
if re.search(r'^file://', self.watch.get('url', '').strip(), re.IGNORECASE):
|
||||
if not strtobool(os.getenv('ALLOW_FILE_URI', 'false')):
|
||||
raise Exception(
|
||||
"file:// type access is denied for security reasons."
|
||||
)
|
||||
|
||||
url = self.watch.link
|
||||
|
||||
# Requests, playwright, other browser via wss:// etc, fetch_extra_something
|
||||
prefer_fetch_backend = self.watch.get('fetch_backend', 'system')
|
||||
|
||||
# Proxy ID "key"
|
||||
preferred_proxy_id = self.datastore.get_preferred_proxy_for_watch(uuid=self.watch.get('uuid'))
|
||||
|
||||
# Pluggable content self.fetcher
|
||||
if not prefer_fetch_backend or prefer_fetch_backend == 'system':
|
||||
prefer_fetch_backend = self.datastore.data['settings']['application'].get('fetch_backend')
|
||||
|
||||
# Grab the right kind of 'fetcher', (playwright, requests, etc)
|
||||
if hasattr(content_fetcher, prefer_fetch_backend):
|
||||
fetcher_obj = getattr(content_fetcher, prefer_fetch_backend)
|
||||
else:
|
||||
# If the klass doesnt exist, just use a default
|
||||
fetcher_obj = getattr(content_fetcher, "html_requests")
|
||||
|
||||
|
||||
proxy_url = None
|
||||
if preferred_proxy_id:
|
||||
proxy_url = self.datastore.proxy_list.get(preferred_proxy_id).get('url')
|
||||
print(f"Using proxy Key: {preferred_proxy_id} as Proxy URL {proxy_url}")
|
||||
|
||||
# Now call the fetcher (playwright/requests/etc) with arguments that only a fetcher would need.
|
||||
self.fetcher = fetcher_obj(proxy_override=proxy_url,
|
||||
#browser_url_extra/configurable browser url=...
|
||||
)
|
||||
|
||||
if self.watch.has_browser_steps:
|
||||
self.fetcher.browser_steps = self.watch.get('browser_steps', [])
|
||||
self.fetcher.browser_steps_screenshot_path = os.path.join(self.datastore.datastore_path, self.watch.get('uuid'))
|
||||
|
||||
# Tweak the base config with the per-watch ones
|
||||
request_headers = self.watch.get('headers', [])
|
||||
request_headers.update(self.datastore.get_all_base_headers())
|
||||
request_headers.update(self.datastore.get_all_headers_in_textfile_for_watch(uuid=self.watch.get('uuid')))
|
||||
|
||||
# https://github.com/psf/requests/issues/4525
|
||||
# Requests doesnt yet support brotli encoding, so don't put 'br' here, be totally sure that the user cannot
|
||||
# do this by accident.
|
||||
if 'Accept-Encoding' in request_headers and "br" in request_headers['Accept-Encoding']:
|
||||
request_headers['Accept-Encoding'] = request_headers['Accept-Encoding'].replace(', br', '')
|
||||
|
||||
timeout = self.datastore.data['settings']['requests'].get('timeout')
|
||||
|
||||
request_body = self.watch.get('body')
|
||||
request_method = self.watch.get('method')
|
||||
ignore_status_codes = self.watch.get('ignore_status_codes', False)
|
||||
|
||||
# Configurable per-watch or global extra delay before extracting text (for webDriver types)
|
||||
system_webdriver_delay = self.datastore.data['settings']['application'].get('webdriver_delay', None)
|
||||
if self.watch.get('webdriver_delay'):
|
||||
self.fetcher.render_extract_delay = self.watch.get('webdriver_delay')
|
||||
elif system_webdriver_delay is not None:
|
||||
self.fetcher.render_extract_delay = system_webdriver_delay
|
||||
|
||||
if self.watch.get('webdriver_js_execute_code') is not None and self.watch.get('webdriver_js_execute_code').strip():
|
||||
self.fetcher.webdriver_js_execute_code = self.watch.get('webdriver_js_execute_code')
|
||||
|
||||
# Requests for PDF's, images etc should be passwd the is_binary flag
|
||||
is_binary = self.watch.is_pdf
|
||||
|
||||
# And here we go! call the right browser with browser-specific settings
|
||||
self.fetcher.run(url, timeout, request_headers, request_body, request_method, ignore_status_codes, self.watch.get('include_filters'),
|
||||
is_binary=is_binary)
|
||||
|
||||
#@todo .quit here could go on close object, so we can run JS if change-detected
|
||||
self.fetcher.quit()
|
||||
|
||||
# After init, call run_changedetection() which will do the actual change-detection
|
||||
|
||||
@abstractmethod
|
||||
def run(self, uuid, skip_when_checksum_same=True, preferred_proxy=None):
|
||||
def run_changedetection(self, uuid, skip_when_checksum_same=True):
|
||||
update_obj = {'last_notification_error': False, 'last_error': False}
|
||||
some_data = 'xxxxx'
|
||||
update_obj["previous_md5"] = hashlib.md5(some_data.encode('utf-8')).hexdigest()
|
||||
|
||||
@@ -1,10 +1,7 @@
|
||||
|
||||
import hashlib
|
||||
import os
|
||||
import re
|
||||
import urllib3
|
||||
from . import difference_detection_processor
|
||||
from changedetectionio import content_fetcher
|
||||
from copy import deepcopy
|
||||
|
||||
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
||||
@@ -22,11 +19,7 @@ class perform_site_check(difference_detection_processor):
|
||||
screenshot = None
|
||||
xpath_data = None
|
||||
|
||||
def __init__(self, *args, datastore, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.datastore = datastore
|
||||
|
||||
def run(self, uuid, skip_when_checksum_same=True):
|
||||
def run_changedetection(self, uuid, skip_when_checksum_same=True):
|
||||
|
||||
# DeepCopy so we can be sure we don't accidently change anything by reference
|
||||
watch = deepcopy(self.datastore.data['watching'].get(uuid))
|
||||
@@ -34,84 +27,24 @@ class perform_site_check(difference_detection_processor):
|
||||
if not watch:
|
||||
raise Exception("Watch no longer exists.")
|
||||
|
||||
# Protect against file:// access
|
||||
if re.search(r'^file', watch.get('url', ''), re.IGNORECASE) and not os.getenv('ALLOW_FILE_URI', False):
|
||||
raise Exception(
|
||||
"file:// type access is denied for security reasons."
|
||||
)
|
||||
|
||||
# Unset any existing notification error
|
||||
update_obj = {'last_notification_error': False, 'last_error': False}
|
||||
|
||||
request_headers = watch.get('headers', [])
|
||||
request_headers.update(self.datastore.get_all_base_headers())
|
||||
request_headers.update(self.datastore.get_all_headers_in_textfile_for_watch(uuid=uuid))
|
||||
|
||||
# https://github.com/psf/requests/issues/4525
|
||||
# Requests doesnt yet support brotli encoding, so don't put 'br' here, be totally sure that the user cannot
|
||||
# do this by accident.
|
||||
if 'Accept-Encoding' in request_headers and "br" in request_headers['Accept-Encoding']:
|
||||
request_headers['Accept-Encoding'] = request_headers['Accept-Encoding'].replace(', br', '')
|
||||
|
||||
timeout = self.datastore.data['settings']['requests'].get('timeout')
|
||||
|
||||
url = watch.link
|
||||
|
||||
request_body = self.datastore.data['watching'][uuid].get('body')
|
||||
request_method = self.datastore.data['watching'][uuid].get('method')
|
||||
ignore_status_codes = self.datastore.data['watching'][uuid].get('ignore_status_codes', False)
|
||||
|
||||
# Pluggable content fetcher
|
||||
prefer_backend = watch.get_fetch_backend
|
||||
if not prefer_backend or prefer_backend == 'system':
|
||||
prefer_backend = self.datastore.data['settings']['application']['fetch_backend']
|
||||
|
||||
if hasattr(content_fetcher, prefer_backend):
|
||||
klass = getattr(content_fetcher, prefer_backend)
|
||||
else:
|
||||
# If the klass doesnt exist, just use a default
|
||||
klass = getattr(content_fetcher, "html_requests")
|
||||
|
||||
proxy_id = self.datastore.get_preferred_proxy_for_watch(uuid=uuid)
|
||||
proxy_url = None
|
||||
if proxy_id:
|
||||
proxy_url = self.datastore.proxy_list.get(proxy_id).get('url')
|
||||
print("UUID {} Using proxy {}".format(uuid, proxy_url))
|
||||
|
||||
fetcher = klass(proxy_override=proxy_url)
|
||||
|
||||
# Configurable per-watch or global extra delay before extracting text (for webDriver types)
|
||||
system_webdriver_delay = self.datastore.data['settings']['application'].get('webdriver_delay', None)
|
||||
if watch['webdriver_delay'] is not None:
|
||||
fetcher.render_extract_delay = watch.get('webdriver_delay')
|
||||
elif system_webdriver_delay is not None:
|
||||
fetcher.render_extract_delay = system_webdriver_delay
|
||||
|
||||
# Could be removed if requests/plaintext could also return some info?
|
||||
if prefer_backend != 'html_webdriver':
|
||||
raise Exception("Re-stock detection requires Chrome or compatible webdriver/playwright fetcher to work")
|
||||
|
||||
if watch.get('webdriver_js_execute_code') is not None and watch.get('webdriver_js_execute_code').strip():
|
||||
fetcher.webdriver_js_execute_code = watch.get('webdriver_js_execute_code')
|
||||
|
||||
fetcher.run(url, timeout, request_headers, request_body, request_method, ignore_status_codes, watch.get('include_filters'))
|
||||
fetcher.quit()
|
||||
|
||||
self.screenshot = fetcher.screenshot
|
||||
self.xpath_data = fetcher.xpath_data
|
||||
self.screenshot = self.fetcher.screenshot
|
||||
self.xpath_data = self.fetcher.xpath_data
|
||||
|
||||
# Track the content type
|
||||
update_obj['content_type'] = fetcher.headers.get('Content-Type', '')
|
||||
update_obj["last_check_status"] = fetcher.get_last_status_code()
|
||||
update_obj['content_type'] = self.fetcher.headers.get('Content-Type', '')
|
||||
update_obj["last_check_status"] = self.fetcher.get_last_status_code()
|
||||
|
||||
# Main detection method
|
||||
fetched_md5 = None
|
||||
if fetcher.instock_data:
|
||||
fetched_md5 = hashlib.md5(fetcher.instock_data.encode('utf-8')).hexdigest()
|
||||
if self.fetcher.instock_data:
|
||||
fetched_md5 = hashlib.md5(self.fetcher.instock_data.encode('utf-8')).hexdigest()
|
||||
# 'Possibly in stock' comes from stock-not-in-stock.js when no string found above the fold.
|
||||
update_obj["in_stock"] = True if fetcher.instock_data == 'Possibly in stock' else False
|
||||
update_obj["in_stock"] = True if self.fetcher.instock_data == 'Possibly in stock' else False
|
||||
else:
|
||||
raise UnableToExtractRestockData(status_code=fetcher.status_code)
|
||||
raise UnableToExtractRestockData(status_code=self.fetcher.status_code)
|
||||
|
||||
# The main thing that all this at the moment comes down to :)
|
||||
changed_detected = False
|
||||
@@ -128,4 +61,4 @@ class perform_site_check(difference_detection_processor):
|
||||
# Always record the new checksum
|
||||
update_obj["previous_md5"] = fetched_md5
|
||||
|
||||
return changed_detected, update_obj, fetcher.instock_data.encode('utf-8')
|
||||
return changed_detected, update_obj, self.fetcher.instock_data.encode('utf-8')
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# HTML to TEXT/JSON DIFFERENCE FETCHER
|
||||
# HTML to TEXT/JSON DIFFERENCE self.fetcher
|
||||
|
||||
import hashlib
|
||||
import json
|
||||
@@ -32,15 +32,10 @@ class PDFToHTMLToolNotFound(ValueError):
|
||||
# Some common stuff here that can be moved to a base class
|
||||
# (set_proxy_from_list)
|
||||
class perform_site_check(difference_detection_processor):
|
||||
screenshot = None
|
||||
xpath_data = None
|
||||
|
||||
def __init__(self, *args, datastore, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.datastore = datastore
|
||||
|
||||
def run(self, uuid, skip_when_checksum_same=True, preferred_proxy=None):
|
||||
def run_changedetection(self, uuid, skip_when_checksum_same=True):
|
||||
changed_detected = False
|
||||
html_content = ""
|
||||
screenshot = False # as bytes
|
||||
stripped_text_from_html = ""
|
||||
|
||||
@@ -49,100 +44,25 @@ class perform_site_check(difference_detection_processor):
|
||||
if not watch:
|
||||
raise Exception("Watch no longer exists.")
|
||||
|
||||
# Protect against file:// access
|
||||
if re.search(r'^file', watch.get('url', ''), re.IGNORECASE) and not os.getenv('ALLOW_FILE_URI', False):
|
||||
raise Exception(
|
||||
"file:// type access is denied for security reasons."
|
||||
)
|
||||
|
||||
# Unset any existing notification error
|
||||
update_obj = {'last_notification_error': False, 'last_error': False}
|
||||
|
||||
# Tweak the base config with the per-watch ones
|
||||
request_headers = watch.get('headers', [])
|
||||
request_headers.update(self.datastore.get_all_base_headers())
|
||||
request_headers.update(self.datastore.get_all_headers_in_textfile_for_watch(uuid=uuid))
|
||||
|
||||
# https://github.com/psf/requests/issues/4525
|
||||
# Requests doesnt yet support brotli encoding, so don't put 'br' here, be totally sure that the user cannot
|
||||
# do this by accident.
|
||||
if 'Accept-Encoding' in request_headers and "br" in request_headers['Accept-Encoding']:
|
||||
request_headers['Accept-Encoding'] = request_headers['Accept-Encoding'].replace(', br', '')
|
||||
|
||||
timeout = self.datastore.data['settings']['requests'].get('timeout')
|
||||
|
||||
url = watch.link
|
||||
|
||||
request_body = self.datastore.data['watching'][uuid].get('body')
|
||||
request_method = self.datastore.data['watching'][uuid].get('method')
|
||||
ignore_status_codes = self.datastore.data['watching'][uuid].get('ignore_status_codes', False)
|
||||
|
||||
# source: support
|
||||
is_source = False
|
||||
if url.startswith('source:'):
|
||||
url = url.replace('source:', '')
|
||||
is_source = True
|
||||
|
||||
# Pluggable content fetcher
|
||||
prefer_backend = watch.get_fetch_backend
|
||||
if not prefer_backend or prefer_backend == 'system':
|
||||
prefer_backend = self.datastore.data['settings']['application']['fetch_backend']
|
||||
|
||||
if hasattr(content_fetcher, prefer_backend):
|
||||
klass = getattr(content_fetcher, prefer_backend)
|
||||
else:
|
||||
# If the klass doesnt exist, just use a default
|
||||
klass = getattr(content_fetcher, "html_requests")
|
||||
|
||||
if preferred_proxy:
|
||||
proxy_id = preferred_proxy
|
||||
else:
|
||||
proxy_id = self.datastore.get_preferred_proxy_for_watch(uuid=uuid)
|
||||
|
||||
proxy_url = None
|
||||
if proxy_id:
|
||||
proxy_url = self.datastore.proxy_list.get(proxy_id).get('url')
|
||||
print("UUID {} Using proxy {}".format(uuid, proxy_url))
|
||||
|
||||
fetcher = klass(proxy_override=proxy_url)
|
||||
|
||||
# Configurable per-watch or global extra delay before extracting text (for webDriver types)
|
||||
system_webdriver_delay = self.datastore.data['settings']['application'].get('webdriver_delay', None)
|
||||
if watch['webdriver_delay'] is not None:
|
||||
fetcher.render_extract_delay = watch.get('webdriver_delay')
|
||||
elif system_webdriver_delay is not None:
|
||||
fetcher.render_extract_delay = system_webdriver_delay
|
||||
|
||||
# Possible conflict
|
||||
if prefer_backend == 'html_webdriver':
|
||||
fetcher.browser_steps = watch.get('browser_steps', None)
|
||||
fetcher.browser_steps_screenshot_path = os.path.join(self.datastore.datastore_path, uuid)
|
||||
|
||||
if watch.get('webdriver_js_execute_code') is not None and watch.get('webdriver_js_execute_code').strip():
|
||||
fetcher.webdriver_js_execute_code = watch.get('webdriver_js_execute_code')
|
||||
|
||||
# requests for PDF's, images etc should be passwd the is_binary flag
|
||||
is_binary = watch.is_pdf
|
||||
|
||||
fetcher.run(url, timeout, request_headers, request_body, request_method, ignore_status_codes, watch.get('include_filters'),
|
||||
is_binary=is_binary)
|
||||
fetcher.quit()
|
||||
|
||||
self.screenshot = fetcher.screenshot
|
||||
self.xpath_data = fetcher.xpath_data
|
||||
self.screenshot = self.fetcher.screenshot
|
||||
self.xpath_data = self.fetcher.xpath_data
|
||||
|
||||
# Track the content type
|
||||
update_obj['content_type'] = fetcher.get_all_headers().get('content-type', '').lower()
|
||||
update_obj['content_type'] = self.fetcher.get_all_headers().get('content-type', '').lower()
|
||||
|
||||
# Watches added automatically in the queue manager will skip if its the same checksum as the previous run
|
||||
# Saves a lot of CPU
|
||||
update_obj['previous_md5_before_filters'] = hashlib.md5(fetcher.content.encode('utf-8')).hexdigest()
|
||||
update_obj['previous_md5_before_filters'] = hashlib.md5(self.fetcher.content.encode('utf-8')).hexdigest()
|
||||
if skip_when_checksum_same:
|
||||
if update_obj['previous_md5_before_filters'] == watch.get('previous_md5_before_filters'):
|
||||
raise content_fetcher.checksumFromPreviousCheckWasTheSame()
|
||||
|
||||
# Fetching complete, now filters
|
||||
# @todo move to class / maybe inside of fetcher abstract base?
|
||||
|
||||
# @note: I feel like the following should be in a more obvious chain system
|
||||
# - Check filter text
|
||||
@@ -151,24 +71,24 @@ class perform_site_check(difference_detection_processor):
|
||||
# https://stackoverflow.com/questions/41817578/basic-method-chaining ?
|
||||
# return content().textfilter().jsonextract().checksumcompare() ?
|
||||
|
||||
is_json = 'application/json' in fetcher.get_all_headers().get('content-type', '').lower()
|
||||
is_json = 'application/json' in self.fetcher.get_all_headers().get('content-type', '').lower()
|
||||
is_html = not is_json
|
||||
is_rss = False
|
||||
|
||||
ctype_header = fetcher.get_all_headers().get('content-type', '').lower()
|
||||
ctype_header = self.fetcher.get_all_headers().get('content-type', '').lower()
|
||||
# Go into RSS preprocess for converting CDATA/comment to usable text
|
||||
if any(substring in ctype_header for substring in ['application/xml', 'application/rss', 'text/xml']):
|
||||
if '<rss' in fetcher.content[:100].lower():
|
||||
fetcher.content = cdata_in_document_to_text(html_content=fetcher.content)
|
||||
if '<rss' in self.fetcher.content[:100].lower():
|
||||
self.fetcher.content = cdata_in_document_to_text(html_content=self.fetcher.content)
|
||||
is_rss = True
|
||||
|
||||
# source: support, basically treat it as plaintext
|
||||
if is_source:
|
||||
if watch.is_source_type_url:
|
||||
is_html = False
|
||||
is_json = False
|
||||
|
||||
inline_pdf = fetcher.get_all_headers().get('content-disposition', '') and '%PDF-1' in fetcher.content[:10]
|
||||
if watch.is_pdf or 'application/pdf' in fetcher.get_all_headers().get('content-type', '').lower() or inline_pdf:
|
||||
inline_pdf = self.fetcher.get_all_headers().get('content-disposition', '') and '%PDF-1' in self.fetcher.content[:10]
|
||||
if watch.is_pdf or 'application/pdf' in self.fetcher.get_all_headers().get('content-type', '').lower() or inline_pdf:
|
||||
from shutil import which
|
||||
tool = os.getenv("PDF_TO_HTML_TOOL", "pdftohtml")
|
||||
if not which(tool):
|
||||
@@ -179,18 +99,18 @@ class perform_site_check(difference_detection_processor):
|
||||
[tool, '-stdout', '-', '-s', 'out.pdf', '-i'],
|
||||
stdout=subprocess.PIPE,
|
||||
stdin=subprocess.PIPE)
|
||||
proc.stdin.write(fetcher.raw_content)
|
||||
proc.stdin.write(self.fetcher.raw_content)
|
||||
proc.stdin.close()
|
||||
fetcher.content = proc.stdout.read().decode('utf-8')
|
||||
self.fetcher.content = proc.stdout.read().decode('utf-8')
|
||||
proc.wait(timeout=60)
|
||||
|
||||
# Add a little metadata so we know if the file changes (like if an image changes, but the text is the same
|
||||
# @todo may cause problems with non-UTF8?
|
||||
metadata = "<p>Added by changedetection.io: Document checksum - {} Filesize - {} bytes</p>".format(
|
||||
hashlib.md5(fetcher.raw_content).hexdigest().upper(),
|
||||
len(fetcher.content))
|
||||
hashlib.md5(self.fetcher.raw_content).hexdigest().upper(),
|
||||
len(self.fetcher.content))
|
||||
|
||||
fetcher.content = fetcher.content.replace('</body>', metadata + '</body>')
|
||||
self.fetcher.content = self.fetcher.content.replace('</body>', metadata + '</body>')
|
||||
|
||||
# Better would be if Watch.model could access the global data also
|
||||
# and then use getattr https://docs.python.org/3/reference/datamodel.html#object.__getitem__
|
||||
@@ -217,7 +137,7 @@ class perform_site_check(difference_detection_processor):
|
||||
if is_json:
|
||||
# Sort the JSON so we dont get false alerts when the content is just re-ordered
|
||||
try:
|
||||
fetcher.content = json.dumps(json.loads(fetcher.content), sort_keys=True)
|
||||
self.fetcher.content = json.dumps(json.loads(self.fetcher.content), sort_keys=True)
|
||||
except Exception as e:
|
||||
# Might have just been a snippet, or otherwise bad JSON, continue
|
||||
pass
|
||||
@@ -225,22 +145,22 @@ class perform_site_check(difference_detection_processor):
|
||||
if has_filter_rule:
|
||||
for filter in include_filters_rule:
|
||||
if any(prefix in filter for prefix in json_filter_prefixes):
|
||||
stripped_text_from_html += html_tools.extract_json_as_string(content=fetcher.content, json_filter=filter)
|
||||
stripped_text_from_html += html_tools.extract_json_as_string(content=self.fetcher.content, json_filter=filter)
|
||||
is_html = False
|
||||
|
||||
if is_html or is_source:
|
||||
if is_html or watch.is_source_type_url:
|
||||
|
||||
# CSS Filter, extract the HTML that matches and feed that into the existing inscriptis::get_text
|
||||
fetcher.content = html_tools.workarounds_for_obfuscations(fetcher.content)
|
||||
html_content = fetcher.content
|
||||
self.fetcher.content = html_tools.workarounds_for_obfuscations(self.fetcher.content)
|
||||
html_content = self.fetcher.content
|
||||
|
||||
# If not JSON, and if it's not text/plain..
|
||||
if 'text/plain' in fetcher.get_all_headers().get('content-type', '').lower():
|
||||
if 'text/plain' in self.fetcher.get_all_headers().get('content-type', '').lower():
|
||||
# Don't run get_text or xpath/css filters on plaintext
|
||||
stripped_text_from_html = html_content
|
||||
else:
|
||||
# Does it have some ld+json price data? used for easier monitoring
|
||||
update_obj['has_ldjson_price_data'] = html_tools.has_ldjson_product_info(fetcher.content)
|
||||
update_obj['has_ldjson_price_data'] = html_tools.has_ldjson_product_info(self.fetcher.content)
|
||||
|
||||
# Then we assume HTML
|
||||
if has_filter_rule:
|
||||
@@ -250,14 +170,14 @@ class perform_site_check(difference_detection_processor):
|
||||
# For HTML/XML we offer xpath as an option, just start a regular xPath "/.."
|
||||
if filter_rule[0] == '/' or filter_rule.startswith('xpath:'):
|
||||
html_content += html_tools.xpath_filter(xpath_filter=filter_rule.replace('xpath:', ''),
|
||||
html_content=fetcher.content,
|
||||
append_pretty_line_formatting=not is_source,
|
||||
html_content=self.fetcher.content,
|
||||
append_pretty_line_formatting=not watch.is_source_type_url,
|
||||
is_rss=is_rss)
|
||||
else:
|
||||
# CSS Filter, extract the HTML that matches and feed that into the existing inscriptis::get_text
|
||||
html_content += html_tools.include_filters(include_filters=filter_rule,
|
||||
html_content=fetcher.content,
|
||||
append_pretty_line_formatting=not is_source)
|
||||
html_content=self.fetcher.content,
|
||||
append_pretty_line_formatting=not watch.is_source_type_url)
|
||||
|
||||
if not html_content.strip():
|
||||
raise FilterNotFoundInResponse(include_filters_rule)
|
||||
@@ -265,7 +185,7 @@ class perform_site_check(difference_detection_processor):
|
||||
if has_subtractive_selectors:
|
||||
html_content = html_tools.element_removal(subtractive_selectors, html_content)
|
||||
|
||||
if is_source:
|
||||
if watch.is_source_type_url:
|
||||
stripped_text_from_html = html_content
|
||||
else:
|
||||
# extract text
|
||||
@@ -311,7 +231,7 @@ class perform_site_check(difference_detection_processor):
|
||||
empty_pages_are_a_change = self.datastore.data['settings']['application'].get('empty_pages_are_a_change', False)
|
||||
if not is_json and not empty_pages_are_a_change and len(stripped_text_from_html.strip()) == 0:
|
||||
raise content_fetcher.ReplyWithContentButNoText(url=url,
|
||||
status_code=fetcher.get_last_status_code(),
|
||||
status_code=self.fetcher.get_last_status_code(),
|
||||
screenshot=screenshot,
|
||||
has_filters=has_filter_rule,
|
||||
html_content=html_content
|
||||
@@ -320,7 +240,7 @@ class perform_site_check(difference_detection_processor):
|
||||
# We rely on the actual text in the html output.. many sites have random script vars etc,
|
||||
# in the future we'll implement other mechanisms.
|
||||
|
||||
update_obj["last_check_status"] = fetcher.get_last_status_code()
|
||||
update_obj["last_check_status"] = self.fetcher.get_last_status_code()
|
||||
|
||||
# If there's text to skip
|
||||
# @todo we could abstract out the get_text() to handle this cleaner
|
||||
@@ -408,7 +328,7 @@ class perform_site_check(difference_detection_processor):
|
||||
if is_html:
|
||||
if self.datastore.data['settings']['application'].get('extract_title_as_title') or watch['extract_title_as_title']:
|
||||
if not watch['title'] or not len(watch['title']):
|
||||
update_obj['title'] = html_tools.extract_element(find='title', html_content=fetcher.content)
|
||||
update_obj['title'] = html_tools.extract_element(find='title', html_content=self.fetcher.content)
|
||||
|
||||
if changed_detected:
|
||||
if watch.get('check_unique_lines', False):
|
||||
|
||||
@@ -3,45 +3,50 @@
|
||||
* Toggles theme between light and dark mode.
|
||||
*/
|
||||
$(document).ready(function () {
|
||||
const button = document.getElementById("toggle-light-mode");
|
||||
const button = document.getElementById("toggle-light-mode");
|
||||
|
||||
button.onclick = () => {
|
||||
const htmlElement = document.getElementsByTagName("html");
|
||||
const isDarkMode = htmlElement[0].dataset.darkmode === "true";
|
||||
htmlElement[0].dataset.darkmode = !isDarkMode;
|
||||
setCookieValue(!isDarkMode);
|
||||
};
|
||||
button.onclick = () => {
|
||||
const htmlElement = document.getElementsByTagName("html");
|
||||
const isDarkMode = htmlElement[0].dataset.darkmode === "true";
|
||||
htmlElement[0].dataset.darkmode = !isDarkMode;
|
||||
setCookieValue(!isDarkMode);
|
||||
};
|
||||
|
||||
const setCookieValue = (value) => {
|
||||
document.cookie = `css_dark_mode=${value};max-age=31536000;path=/`
|
||||
}
|
||||
const setCookieValue = (value) => {
|
||||
document.cookie = `css_dark_mode=${value};max-age=31536000;path=/`
|
||||
}
|
||||
|
||||
// Search input box behaviour
|
||||
// Search input box behaviour
|
||||
const toggle_search = document.getElementById("toggle-search");
|
||||
const search_q = document.getElementById("search-q");
|
||||
window.addEventListener('keydown', function (e) {
|
||||
const search_q = document.getElementById("search-q");
|
||||
if(search_q) {
|
||||
window.addEventListener('keydown', function (e) {
|
||||
if (e.altKey == true && e.keyCode == 83) {
|
||||
search_q.classList.toggle('expanded');
|
||||
search_q.focus();
|
||||
}
|
||||
});
|
||||
|
||||
if (e.altKey == true && e.keyCode == 83)
|
||||
search_q.classList.toggle('expanded');
|
||||
search_q.focus();
|
||||
});
|
||||
|
||||
|
||||
search_q.onkeydown = (e) => {
|
||||
var key = e.keyCode || e.which;
|
||||
if (key === 13) {
|
||||
document.searchForm.submit();
|
||||
search_q.onkeydown = (e) => {
|
||||
var key = e.keyCode || e.which;
|
||||
if (key === 13) {
|
||||
document.searchForm.submit();
|
||||
}
|
||||
};
|
||||
toggle_search.onclick = () => {
|
||||
// Could be that they want to search something once text is in there
|
||||
if (search_q.value.length) {
|
||||
document.searchForm.submit();
|
||||
} else {
|
||||
// If not..
|
||||
search_q.classList.toggle('expanded');
|
||||
search_q.focus();
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
toggle_search.onclick = () => {
|
||||
// Could be that they want to search something once text is in there
|
||||
if (search_q.value.length) {
|
||||
document.searchForm.submit();
|
||||
} else {
|
||||
// If not..
|
||||
search_q.classList.toggle('expanded');
|
||||
search_q.focus();
|
||||
}
|
||||
};
|
||||
|
||||
$('#heart-us').click(function () {
|
||||
$("#overlay").toggleClass('visible');
|
||||
heartpath.style.fill = document.getElementById("overlay").classList.contains("visible") ? '#ff0000' : 'var(--color-background)';
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
|
||||
#toggle-light-mode {
|
||||
width: 3rem;
|
||||
/* width: 3rem;*/
|
||||
/* default */
|
||||
.icon-dark {
|
||||
display: none;
|
||||
|
||||
38
changedetectionio/static/styles/scss/parts/_love.scss
Normal file
38
changedetectionio/static/styles/scss/parts/_love.scss
Normal file
@@ -0,0 +1,38 @@
|
||||
#overlay {
|
||||
|
||||
opacity: 0.95;
|
||||
position: fixed;
|
||||
|
||||
width: 350px;
|
||||
max-width: 100%;
|
||||
height: 100%;
|
||||
top: 0;
|
||||
right: -350px;
|
||||
background-color: var(--color-table-stripe);
|
||||
z-index: 2;
|
||||
|
||||
transform: translateX(0);
|
||||
transition: transform .5s ease;
|
||||
|
||||
|
||||
&.visible {
|
||||
transform: translateX(-100%);
|
||||
|
||||
}
|
||||
|
||||
.content {
|
||||
font-size: 0.875rem;
|
||||
padding: 1rem;
|
||||
margin-top: 5rem;
|
||||
max-width: 400px;
|
||||
color: var(--color-watch-table-row-text);
|
||||
}
|
||||
}
|
||||
|
||||
#heartpath {
|
||||
&:hover {
|
||||
fill: #ff0000 !important;
|
||||
transition: all ease 0.3s !important;
|
||||
}
|
||||
transition: all ease 0.3s !important;
|
||||
}
|
||||
25
changedetectionio/static/styles/scss/parts/_menu.scss
Normal file
25
changedetectionio/static/styles/scss/parts/_menu.scss
Normal file
@@ -0,0 +1,25 @@
|
||||
.pure-menu-link {
|
||||
padding: 0.5rem 1em;
|
||||
line-height: 1.2rem;
|
||||
}
|
||||
|
||||
.pure-menu-item {
|
||||
svg {
|
||||
height: 1.2rem;
|
||||
}
|
||||
* {
|
||||
vertical-align: middle;
|
||||
}
|
||||
.github-link {
|
||||
height: 1.8rem;
|
||||
display: block;
|
||||
svg {
|
||||
height: 100%;
|
||||
}
|
||||
}
|
||||
.bi-heart {
|
||||
&:hover {
|
||||
cursor: pointer;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -9,10 +9,13 @@
|
||||
@import "parts/_spinners";
|
||||
@import "parts/_variables";
|
||||
@import "parts/_darkmode";
|
||||
@import "parts/_menu";
|
||||
@import "parts/_love";
|
||||
|
||||
body {
|
||||
color: var(--color-text);
|
||||
background: var(--color-background-page);
|
||||
font-family: Helvetica Neue, Helvetica, Lucida Grande, Arial, Ubuntu, Cantarell, Fira Sans, sans-serif;
|
||||
}
|
||||
|
||||
.visually-hidden {
|
||||
@@ -55,11 +58,6 @@ a.github-link {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#toggle-search {
|
||||
width: 2rem;
|
||||
}
|
||||
|
||||
#search-q {
|
||||
opacity: 0;
|
||||
-webkit-transition: all .9s ease;
|
||||
@@ -1082,3 +1080,4 @@ ul {
|
||||
border-radius: 3px;
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
|
||||
@@ -331,7 +331,7 @@ html[data-darkmode="true"] {
|
||||
color: var(--color-watch-table-error); }
|
||||
|
||||
#toggle-light-mode {
|
||||
width: 3rem;
|
||||
/* width: 3rem;*/
|
||||
/* default */ }
|
||||
#toggle-light-mode .icon-dark {
|
||||
display: none; }
|
||||
@@ -342,9 +342,56 @@ html[data-darkmode="true"] #toggle-light-mode .icon-light {
|
||||
html[data-darkmode="true"] #toggle-light-mode .icon-dark {
|
||||
display: block; }
|
||||
|
||||
.pure-menu-link {
|
||||
padding: 0.5rem 1em;
|
||||
line-height: 1.2rem; }
|
||||
|
||||
.pure-menu-item svg {
|
||||
height: 1.2rem; }
|
||||
|
||||
.pure-menu-item * {
|
||||
vertical-align: middle; }
|
||||
|
||||
.pure-menu-item .github-link {
|
||||
height: 1.8rem;
|
||||
display: block; }
|
||||
.pure-menu-item .github-link svg {
|
||||
height: 100%; }
|
||||
|
||||
.pure-menu-item .bi-heart:hover {
|
||||
cursor: pointer; }
|
||||
|
||||
#overlay {
|
||||
opacity: 0.95;
|
||||
position: fixed;
|
||||
width: 350px;
|
||||
max-width: 100%;
|
||||
height: 100%;
|
||||
top: 0;
|
||||
right: -350px;
|
||||
background-color: var(--color-table-stripe);
|
||||
z-index: 2;
|
||||
transform: translateX(0);
|
||||
transition: transform .5s ease; }
|
||||
#overlay.visible {
|
||||
transform: translateX(-100%); }
|
||||
#overlay .content {
|
||||
font-size: 0.875rem;
|
||||
padding: 1rem;
|
||||
margin-top: 5rem;
|
||||
max-width: 400px;
|
||||
color: var(--color-watch-table-row-text); }
|
||||
|
||||
#heartpath {
|
||||
transition: all ease 0.3s !important; }
|
||||
#heartpath:hover {
|
||||
fill: #ff0000 !important;
|
||||
transition: all ease 0.3s !important; }
|
||||
|
||||
body {
|
||||
color: var(--color-text);
|
||||
background: var(--color-background-page); }
|
||||
background: var(--color-background-page);
|
||||
font-family: Helvetica Neue, Helvetica, Lucida Grande, Arial, Ubuntu, Cantarell, Fira Sans, sans-serif; }
|
||||
|
||||
.visually-hidden {
|
||||
clip: rect(0 0 0 0);
|
||||
@@ -376,9 +423,6 @@ a.github-link {
|
||||
a.github-link:hover {
|
||||
color: var(--color-icon-github-hover); }
|
||||
|
||||
#toggle-search {
|
||||
width: 2rem; }
|
||||
|
||||
#search-q {
|
||||
opacity: 0;
|
||||
-webkit-transition: all .9s ease;
|
||||
|
||||
@@ -8,10 +8,10 @@
|
||||
<title>Change Detection{{extra_title}}</title>
|
||||
<link rel="alternate" type="application/rss+xml" title="Changedetection.io » Feed{% if active_tag %}- {{active_tag}}{% endif %}" href="{{ url_for('rss', tag=active_tag , token=app_rss_token)}}" >
|
||||
<link rel="stylesheet" href="{{url_for('static_content', group='styles', filename='pure-min.css')}}" >
|
||||
<link rel="stylesheet" href="{{url_for('static_content', group='styles', filename='styles.css')}}" >
|
||||
<link rel="stylesheet" href="{{url_for('static_content', group='styles', filename='styles.css')}}?v={{ get_css_version() }}" >
|
||||
{% if extra_stylesheets %}
|
||||
{% for m in extra_stylesheets %}
|
||||
<link rel="stylesheet" href="{{ m }}?ver=1000" >
|
||||
<link rel="stylesheet" href="{{ m }}?ver={{ get_css_version() }}" >
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
@@ -108,6 +108,20 @@
|
||||
</span>
|
||||
</button>
|
||||
</li>
|
||||
<li class="pure-menu-item" id="heart-us">
|
||||
<svg
|
||||
fill="#ff0000"
|
||||
class="bi bi-heart"
|
||||
preserveAspectRatio="xMidYMid meet"
|
||||
viewBox="0 0 16.9 16.1"
|
||||
id="svg-heart"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:svg="http://www.w3.org/2000/svg">
|
||||
<path id="heartpath" d="M 5.338316,0.50302766 C 0.71136983,0.50647126 -3.9576371,7.2707777 8.5004254,15.503028 23.833425,5.3700277 13.220206,-2.5384409 8.6762066,1.6475589 c -0.060791,0.054322 -0.11943,0.1110064 -0.1757812,0.1699219 -0.057,-0.059 -0.1157813,-0.116875 -0.1757812,-0.171875 C 7.4724566,0.86129334 6.4060729,0.50223298 5.338316,0.50302766 Z"
|
||||
style="fill:var(--color-background);fill-opacity:1;stroke:#ff0000;stroke-opacity:1" />
|
||||
</svg>
|
||||
|
||||
</li>
|
||||
<li class="pure-menu-item">
|
||||
<a class="github-link" href="https://github.com/dgtlmoon/changedetection.io">
|
||||
{% include "svgs/github.svg" %}
|
||||
@@ -131,7 +145,44 @@
|
||||
<div class="sticky-tab" id="right-sticky">{{ right_sticky }}</div>
|
||||
{% endif %}
|
||||
<section class="content">
|
||||
<header>
|
||||
<div id="overlay">
|
||||
<div class="content">
|
||||
<strong>changedetection.io needs your support!</strong><br>
|
||||
<p>
|
||||
You can help us by supporting changedetection.io on these platforms;
|
||||
</p>
|
||||
<p>
|
||||
<ul>
|
||||
<li>
|
||||
<a href="https://alternativeto.net/software/changedetection-io/about/">Rate us at
|
||||
AlternativeTo.net</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="https://github.com/dgtlmoon/changedetection.io">Star us on GitHub</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="https://twitter.com/change_det_io">Follow us at Twitter/X</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="https://www.linkedin.com/company/changedetection-io">Check us out on LinkedIn</a>
|
||||
</li>
|
||||
<li>
|
||||
And tell your friends and colleagues :)
|
||||
</li>
|
||||
</ul>
|
||||
</p>
|
||||
<p>
|
||||
The more popular changedetection.io is, the more time we can dedicate to adding amazing features!
|
||||
</p>
|
||||
<p>
|
||||
Many thanks :)<br>
|
||||
</p>
|
||||
<p>
|
||||
<i>changedetection.io team</i>
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
<header>
|
||||
{% block header %}{% endblock %}
|
||||
</header>
|
||||
|
||||
|
||||
@@ -178,6 +178,9 @@ nav
|
||||
<span style="display:none;" id="api-key-copy" >copy</span>
|
||||
</div>
|
||||
</div>
|
||||
<div class="pure-control-group">
|
||||
<a href="{{url_for('settings_reset_api_key')}}" class="pure-button button-small button-cancel">Regenerate API key</a>
|
||||
</div>
|
||||
</div>
|
||||
<div class="tab-pane-inner" id="proxies">
|
||||
<div id="recommended-proxy">
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
<svg class="octicon octicon-mark-github v-align-middle" height="32" viewbox="0 0 16 16" version="1.1" width="32" aria-hidden="true">
|
||||
<path fill-rule="evenodd" d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0016 8c0-4.42-3.58-8-8-8z"></path>
|
||||
<svg class="octicon octicon-mark-github v-align-middle" viewbox="0 0 16 16" version="1.1" aria-hidden="true">
|
||||
<path
|
||||
fill-rule="evenodd"
|
||||
d="M 8,0 C 3.58,0 0,3.58 0,8 c 0,3.54 2.29,6.53 5.47,7.59 0.4,0.07 0.55,-0.17 0.55,-0.38 0,-0.19 -0.01,-0.82 -0.01,-1.49 C 4,14.09 3.48,13.23 3.32,12.78 3.23,12.55 2.84,11.84 2.5,11.65 2.22,11.5 1.82,11.13 2.49,11.12 3.12,11.11 3.57,11.7 3.72,11.94 4.44,13.15 5.59,12.81 6.05,12.6 6.12,12.08 6.33,11.73 6.56,11.53 4.78,11.33 2.92,10.64 2.92,7.58 2.92,6.71 3.23,5.99 3.74,5.43 3.66,5.23 3.38,4.41 3.82,3.31 c 0,0 0.67,-0.21 2.2,0.82 0.64,-0.18 1.32,-0.27 2,-0.27 0.68,0 1.36,0.09 2,0.27 1.53,-1.04 2.2,-0.82 2.2,-0.82 0.44,1.1 0.16,1.92 0.08,2.12 0.51,0.56 0.82,1.27 0.82,2.15 0,3.07 -1.87,3.75 -3.65,3.95 0.29,0.25 0.54,0.73 0.54,1.48 0,1.07 -0.01,1.93 -0.01,2.2 0,0.21 0.15,0.46 0.55,0.38 A 8.013,8.013 0 0 0 16,8 C 16,3.58 12.42,0 8,0 Z"
|
||||
id="path2" />
|
||||
</svg>
|
||||
|
||||
|
Before Width: | Height: | Size: 749 B After Width: | Height: | Size: 917 B |
Binary file not shown.
@@ -24,7 +24,7 @@ def test_check_extract_text_from_diff(client, live_server):
|
||||
)
|
||||
|
||||
assert b"1 Imported" in res.data
|
||||
time.sleep(1)
|
||||
wait_for_all_checks(client)
|
||||
|
||||
# Load in 5 different numbers/changes
|
||||
last_date=""
|
||||
|
||||
@@ -33,8 +33,6 @@ def test_strip_regex_text_func():
|
||||
"/not"
|
||||
]
|
||||
|
||||
|
||||
fetcher = fetch_site_status.perform_site_check(datastore=False)
|
||||
stripped_content = html_tools.strip_ignore_text(test_content, ignore_lines)
|
||||
|
||||
assert b"but 1 lines" in stripped_content
|
||||
|
||||
@@ -24,7 +24,6 @@ def test_strip_text_func():
|
||||
|
||||
ignore_lines = ["sometimes"]
|
||||
|
||||
fetcher = fetch_site_status.perform_site_check(datastore=False)
|
||||
stripped_content = html_tools.strip_ignore_text(test_content, ignore_lines)
|
||||
|
||||
assert b"sometimes" not in stripped_content
|
||||
|
||||
@@ -127,6 +127,7 @@ def test_import_custom_xlsx(client, live_server):
|
||||
"""Test can upload a excel spreadsheet and the watches are created correctly"""
|
||||
|
||||
#live_server_setup(live_server)
|
||||
|
||||
dirname = os.path.dirname(__file__)
|
||||
filename = os.path.join(dirname, 'import/spreadsheet.xlsx')
|
||||
with open(filename, 'rb') as f:
|
||||
@@ -150,13 +151,14 @@ def test_import_custom_xlsx(client, live_server):
|
||||
follow_redirects=True,
|
||||
)
|
||||
|
||||
assert b'2 imported from custom .xlsx' in res.data
|
||||
assert b'4 imported from custom .xlsx' in res.data
|
||||
# Because this row was actually just a header with no usable URL, we should get an error
|
||||
assert b'Error processing row number 1' in res.data
|
||||
|
||||
res = client.get(
|
||||
url_for("index")
|
||||
)
|
||||
|
||||
|
||||
assert b'Somesite results ABC' in res.data
|
||||
assert b'City news results' in res.data
|
||||
|
||||
@@ -167,6 +169,9 @@ def test_import_custom_xlsx(client, live_server):
|
||||
assert filters[0] == '/html[1]/body[1]/div[4]/div[1]/div[1]/div[1]||//*[@id=\'content\']/div[3]/div[1]/div[1]||//*[@id=\'content\']/div[1]'
|
||||
assert watch.get('time_between_check') == {'weeks': 0, 'days': 1, 'hours': 6, 'minutes': 24, 'seconds': 0}
|
||||
|
||||
res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True)
|
||||
assert b'Deleted' in res.data
|
||||
|
||||
def test_import_watchete_xlsx(client, live_server):
|
||||
"""Test can upload a excel spreadsheet and the watches are created correctly"""
|
||||
|
||||
@@ -186,7 +191,7 @@ def test_import_watchete_xlsx(client, live_server):
|
||||
follow_redirects=True,
|
||||
)
|
||||
|
||||
assert b'2 imported from Wachete .xlsx' in res.data
|
||||
assert b'4 imported from Wachete .xlsx' in res.data
|
||||
|
||||
res = client.get(
|
||||
url_for("index")
|
||||
@@ -201,3 +206,13 @@ def test_import_watchete_xlsx(client, live_server):
|
||||
filters = watch.get('include_filters')
|
||||
assert filters[0] == '/html[1]/body[1]/div[4]/div[1]/div[1]/div[1]||//*[@id=\'content\']/div[3]/div[1]/div[1]||//*[@id=\'content\']/div[1]'
|
||||
assert watch.get('time_between_check') == {'weeks': 0, 'days': 1, 'hours': 6, 'minutes': 24, 'seconds': 0}
|
||||
assert watch.get('fetch_backend') == 'html_requests' # Has inactive 'dynamic wachet'
|
||||
|
||||
if watch.get('title') == 'JS website':
|
||||
assert watch.get('fetch_backend') == 'html_webdriver' # Has active 'dynamic wachet'
|
||||
|
||||
if watch.get('title') == 'system default website':
|
||||
assert watch.get('fetch_backend') == 'system' # uses default if blank
|
||||
|
||||
res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True)
|
||||
assert b'Deleted' in res.data
|
||||
|
||||
@@ -80,8 +80,11 @@ def test_headers_in_request(client, live_server):
|
||||
|
||||
# Should be only one with headers set
|
||||
assert watches_with_headers==1
|
||||
res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True)
|
||||
assert b'Deleted' in res.data
|
||||
|
||||
def test_body_in_request(client, live_server):
|
||||
|
||||
# Add our URL to the import page
|
||||
test_url = url_for('test_body', _external=True)
|
||||
if os.getenv('PLAYWRIGHT_DRIVER_URL'):
|
||||
@@ -170,7 +173,8 @@ def test_body_in_request(client, live_server):
|
||||
follow_redirects=True
|
||||
)
|
||||
assert b"Body must be empty when Request Method is set to GET" in res.data
|
||||
|
||||
res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True)
|
||||
assert b'Deleted' in res.data
|
||||
|
||||
def test_method_in_request(client, live_server):
|
||||
# Add our URL to the import page
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from flask import url_for
|
||||
from . util import set_original_response, set_modified_response, live_server_setup
|
||||
from .util import set_original_response, set_modified_response, live_server_setup, wait_for_all_checks
|
||||
import time
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@ def test_bad_access(client, live_server):
|
||||
)
|
||||
|
||||
assert b"1 Imported" in res.data
|
||||
wait_for_all_checks(client)
|
||||
|
||||
# Attempt to add a body with a GET method
|
||||
res = client.post(
|
||||
@@ -59,7 +60,7 @@ def test_bad_access(client, live_server):
|
||||
data={"url": 'file:///tasty/disk/drive', "tags": ''},
|
||||
follow_redirects=True
|
||||
)
|
||||
time.sleep(1)
|
||||
wait_for_all_checks(client)
|
||||
res = client.get(url_for("index"))
|
||||
|
||||
assert b'file:// type access is denied for security reasons.' in res.data
|
||||
@@ -54,6 +54,13 @@ def test_visual_selector_content_ready(client, live_server):
|
||||
with open(os.path.join('test-datastore', uuid, 'elements.json'), 'r') as f:
|
||||
json.load(f)
|
||||
|
||||
# Attempt to fetch it via the web hook that the browser would use
|
||||
res = client.get(url_for('static_content', group='visual_selector_data', filename=uuid))
|
||||
json.loads(res.data)
|
||||
assert res.mimetype == 'application/json'
|
||||
assert res.status_code == 200
|
||||
|
||||
|
||||
# Some options should be enabled
|
||||
# @todo - in the future, the visibility should be toggled by JS from the request type setting
|
||||
res = client.get(
|
||||
|
||||
@@ -209,6 +209,7 @@ class update_worker(threading.Thread):
|
||||
from .processors import text_json_diff, restock_diff
|
||||
|
||||
while not self.app.config.exit.is_set():
|
||||
update_handler = None
|
||||
|
||||
try:
|
||||
queued_item_data = self.q.get(block=False)
|
||||
@@ -229,17 +230,35 @@ class update_worker(threading.Thread):
|
||||
now = time.time()
|
||||
|
||||
try:
|
||||
processor = self.datastore.data['watching'][uuid].get('processor','text_json_diff')
|
||||
# Processor is what we are using for detecting the "Change"
|
||||
processor = self.datastore.data['watching'][uuid].get('processor', 'text_json_diff')
|
||||
# if system...
|
||||
|
||||
# Abort processing when the content was the same as the last fetch
|
||||
skip_when_same_checksum = queued_item_data.item.get('skip_when_checksum_same')
|
||||
|
||||
|
||||
# @todo some way to switch by name
|
||||
# Init a new 'difference_detection_processor'
|
||||
|
||||
if processor == 'restock_diff':
|
||||
update_handler = restock_diff.perform_site_check(datastore=self.datastore)
|
||||
update_handler = restock_diff.perform_site_check(datastore=self.datastore,
|
||||
watch_uuid=uuid
|
||||
)
|
||||
else:
|
||||
# Used as a default and also by some tests
|
||||
update_handler = text_json_diff.perform_site_check(datastore=self.datastore)
|
||||
update_handler = text_json_diff.perform_site_check(datastore=self.datastore,
|
||||
watch_uuid=uuid
|
||||
)
|
||||
|
||||
# Clear last errors (move to preflight func?)
|
||||
self.datastore.data['watching'][uuid]['browser_steps_last_error_step'] = None
|
||||
changed_detected, update_obj, contents = update_handler.run(uuid, skip_when_checksum_same=queued_item_data.item.get('skip_when_checksum_same'))
|
||||
|
||||
update_handler.call_browser()
|
||||
|
||||
changed_detected, update_obj, contents = update_handler.run_changedetection(uuid,
|
||||
skip_when_checksum_same=skip_when_same_checksum,
|
||||
)
|
||||
|
||||
# Re #342
|
||||
# In Python 3, all strings are sequences of Unicode characters. There is a bytes type that holds raw bytes.
|
||||
@@ -391,6 +410,9 @@ class update_worker(threading.Thread):
|
||||
self.datastore.update_watch(uuid=uuid, update_obj={'last_error': str(e)})
|
||||
# Other serious error
|
||||
process_changedetection_results = False
|
||||
# import traceback
|
||||
# print(traceback.format_exc())
|
||||
|
||||
else:
|
||||
# Crash protection, the watch entry could have been removed by this point (during a slow chrome fetch etc)
|
||||
if not self.datastore.data['watching'].get(uuid):
|
||||
|
||||
@@ -66,25 +66,12 @@ services:
|
||||
# browser-chrome:
|
||||
# condition: service_started
|
||||
|
||||
# browser-chrome:
|
||||
# hostname: browser-chrome
|
||||
# image: selenium/standalone-chrome:4
|
||||
# environment:
|
||||
# - VNC_NO_PASSWORD=1
|
||||
# - SCREEN_WIDTH=1920
|
||||
# - SCREEN_HEIGHT=1080
|
||||
# - SCREEN_DEPTH=24
|
||||
# volumes:
|
||||
# # Workaround to avoid the browser crashing inside a docker container
|
||||
# # See https://github.com/SeleniumHQ/docker-selenium#quick-start
|
||||
# - /dev/shm:/dev/shm
|
||||
# restart: unless-stopped
|
||||
|
||||
# Used for fetching pages via Playwright+Chrome where you need Javascript support.
|
||||
# Note: Playwright/browserless not supported on ARM type devices (rPi etc)
|
||||
# RECOMMENDED FOR FETCHING PAGES WITH CHROME
|
||||
# playwright-chrome:
|
||||
# hostname: playwright-chrome
|
||||
# image: browserless/chrome
|
||||
# image: browserless/chrome:1.60-chrome-stable
|
||||
# restart: unless-stopped
|
||||
# environment:
|
||||
# - SCREEN_WIDTH=1920
|
||||
@@ -101,6 +88,23 @@ services:
|
||||
# Ignore HTTPS errors, like for self-signed certs
|
||||
# - DEFAULT_IGNORE_HTTPS_ERRORS=true
|
||||
#
|
||||
|
||||
# Used for fetching pages via Playwright+Chrome where you need Javascript support.
|
||||
# Note: works well but is deprecated, doesnt fetch full page screenshots and other issues
|
||||
# browser-chrome:
|
||||
# hostname: browser-chrome
|
||||
# image: selenium/standalone-chrome:4
|
||||
# environment:
|
||||
# - VNC_NO_PASSWORD=1
|
||||
# - SCREEN_WIDTH=1920
|
||||
# - SCREEN_HEIGHT=1080
|
||||
# - SCREEN_DEPTH=24
|
||||
# volumes:
|
||||
# # Workaround to avoid the browser crashing inside a docker container
|
||||
# # See https://github.com/SeleniumHQ/docker-selenium#quick-start
|
||||
# - /dev/shm:/dev/shm
|
||||
# restart: unless-stopped
|
||||
|
||||
volumes:
|
||||
changedetection-data:
|
||||
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
eventlet>=0.31.0
|
||||
eventlet>=0.33.3 # related to dnspython fixes
|
||||
feedgen~=0.9
|
||||
flask-compress
|
||||
flask-login~=0.6
|
||||
# 0.6.3 included compatibility fix for werkzeug 3.x (2.x had deprecation of url handlers)
|
||||
flask-login>=0.6.3
|
||||
flask-paginate
|
||||
flask_expects_json~=1.7
|
||||
flask_restful
|
||||
flask_wtf
|
||||
flask_wtf~=1.2
|
||||
flask~=2.3
|
||||
inscriptis~=2.2
|
||||
pytz
|
||||
@@ -24,11 +25,7 @@ chardet>2.3.0
|
||||
wtforms~=3.0
|
||||
jsonpath-ng~=1.5.3
|
||||
|
||||
|
||||
# dnspython 2.3.0 is not compatible with eventlet
|
||||
# * https://github.com/eventlet/eventlet/issues/781
|
||||
# * https://datastax-oss.atlassian.net/browse/PYTHON-1320
|
||||
dnspython<2.3.0
|
||||
dnspython~=2.4 # related to eventlet fixes
|
||||
|
||||
# jq not available on Windows so must be installed manually
|
||||
|
||||
@@ -51,7 +48,7 @@ lxml
|
||||
|
||||
selenium~=4.14.0
|
||||
|
||||
werkzeug
|
||||
werkzeug~=3.0
|
||||
|
||||
# Templating, so far just in the URLs but in the future can be for the notifications also
|
||||
jinja2~=3.1
|
||||
|
||||
Reference in New Issue
Block a user