mirror of
https://github.com/dgtlmoon/changedetection.io.git
synced 2025-11-02 15:47:19 +00:00
Compare commits
2 Commits
pdf-improv
...
fixing-eve
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
76158a3804 | ||
|
|
6bdd812106 |
16
.github/workflows/containers.yml
vendored
16
.github/workflows/containers.yml
vendored
@@ -96,9 +96,8 @@ jobs:
|
||||
tags: |
|
||||
${{ secrets.DOCKER_HUB_USERNAME }}/changedetection.io:dev,ghcr.io/${{ github.repository }}:dev
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v6,linux/arm/v7,linux/arm/v8
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache
|
||||
# Looks like this was disabled
|
||||
# provenance: false
|
||||
|
||||
@@ -117,11 +116,18 @@ jobs:
|
||||
${{ secrets.DOCKER_HUB_USERNAME }}/changedetection.io:latest
|
||||
ghcr.io/dgtlmoon/changedetection.io:latest
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v6,linux/arm/v7,linux/arm/v8
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache
|
||||
# Looks like this was disabled
|
||||
# provenance: false
|
||||
|
||||
- name: Image digest
|
||||
run: echo step SHA ${{ steps.vars.outputs.sha_short }} tag ${{steps.vars.outputs.tag}} branch ${{steps.vars.outputs.branch}} digest ${{ steps.docker_build.outputs.digest }}
|
||||
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
|
||||
12
.github/workflows/test-only.yml
vendored
12
.github/workflows/test-only.yml
vendored
@@ -30,10 +30,7 @@ jobs:
|
||||
|
||||
# Selenium+browserless
|
||||
docker run --network changedet-network -d --hostname selenium -p 4444:4444 --rm --shm-size="2g" selenium/standalone-chrome:4
|
||||
docker run --network changedet-network -d --name browserless --hostname browserless -e "FUNCTION_BUILT_INS=[\"fs\",\"crypto\"]" -e "DEFAULT_LAUNCH_ARGS=[\"--window-size=1920,1080\"]" --rm -p 3000:3000 --shm-size="2g" browserless/chrome:1.60-chrome-stable
|
||||
|
||||
# For accessing custom browser tests
|
||||
docker run --network changedet-network -d --name browserless-custom-url --hostname browserless-custom-url -e "FUNCTION_BUILT_INS=[\"fs\",\"crypto\"]" -e "DEFAULT_LAUNCH_ARGS=[\"--window-size=1920,1080\"]" --rm --shm-size="2g" browserless/chrome:1.60-chrome-stable
|
||||
docker run --network changedet-network -d --hostname browserless -e "FUNCTION_BUILT_INS=[\"fs\",\"crypto\"]" -e "DEFAULT_LAUNCH_ARGS=[\"--window-size=1920,1080\"]" --rm -p 3000:3000 --shm-size="2g" browserless/chrome:1.60-chrome-stable
|
||||
|
||||
- name: Build changedetection.io container for testing
|
||||
run: |
|
||||
@@ -51,7 +48,6 @@ jobs:
|
||||
run: |
|
||||
# Unit tests
|
||||
docker run test-changedetectionio bash -c 'python3 -m unittest changedetectionio.tests.unit.test_notification_diff'
|
||||
docker run test-changedetectionio bash -c 'python3 -m unittest changedetectionio.tests.unit.test_watch_model'
|
||||
|
||||
# All tests
|
||||
docker run --network changedet-network test-changedetectionio bash -c 'cd changedetectionio && ./run_basic_tests.sh'
|
||||
@@ -90,12 +86,6 @@ jobs:
|
||||
# And again with PLAYWRIGHT_DRIVER_URL=..
|
||||
cd ..
|
||||
|
||||
- name: Test custom browser URL
|
||||
run: |
|
||||
cd changedetectionio
|
||||
./run_custom_browser_url_tests.sh
|
||||
cd ..
|
||||
|
||||
- name: Test changedetection.io container starts+runs basically without error
|
||||
run: |
|
||||
docker run -p 5556:5000 -d test-changedetectionio
|
||||
|
||||
@@ -20,6 +20,11 @@ WORKDIR /install
|
||||
|
||||
COPY requirements.txt /requirements.txt
|
||||
|
||||
# Instructing pip to fetch wheels from piwheels.org" on ARMv6 and ARMv7 machines
|
||||
RUN if [ "$(dpkg --print-architecture)" = "armhf" ] || [ "$(dpkg --print-architecture)" = "armel" ]; then \
|
||||
printf "[global]\nextra-index-url=https://www.piwheels.org/simple\n" > /etc/pip.conf; \
|
||||
fi;
|
||||
|
||||
RUN pip install --target=/dependencies -r /requirements.txt
|
||||
|
||||
# Playwright is an alternative to Selenium
|
||||
|
||||
@@ -268,7 +268,3 @@ I offer commercial support, this software is depended on by network security, ae
|
||||
[license-shield]: https://img.shields.io/github/license/dgtlmoon/changedetection.io.svg?style=for-the-badge
|
||||
[release-link]: https://github.com/dgtlmoon/changedetection.io/releases
|
||||
[docker-link]: https://hub.docker.com/r/dgtlmoon/changedetection.io
|
||||
|
||||
## Third-party licenses
|
||||
|
||||
changedetectionio.html_tools.elementpath_tostring: Copyright (c), 2018-2021, SISSA (Scuola Internazionale Superiore di Studi Avanzati), Licensed under [MIT license](https://github.com/sissaschool/elementpath/blob/master/LICENSE)
|
||||
|
||||
@@ -38,7 +38,7 @@ from flask_paginate import Pagination, get_page_parameter
|
||||
from changedetectionio import html_tools
|
||||
from changedetectionio.api import api_v1
|
||||
|
||||
__version__ = '0.45.7.3'
|
||||
__version__ = '0.45.7'
|
||||
|
||||
from changedetectionio.store import BASE_URL_NOT_SET_TEXT
|
||||
|
||||
@@ -105,10 +105,6 @@ def get_darkmode_state():
|
||||
css_dark_mode = request.cookies.get('css_dark_mode', 'false')
|
||||
return 'true' if css_dark_mode and strtobool(css_dark_mode) else 'false'
|
||||
|
||||
@app.template_global()
|
||||
def get_css_version():
|
||||
return __version__
|
||||
|
||||
# We use the whole watch object from the store/JSON so we can see if there's some related status in terms of a thread
|
||||
# running or something similar.
|
||||
@app.template_filter('format_last_checked_time')
|
||||
@@ -614,8 +610,6 @@ def changedetection_app(config=None, datastore_o=None):
|
||||
# For the form widget tag uuid lookup
|
||||
form.tags.datastore = datastore # in _value
|
||||
|
||||
for p in datastore.extra_browsers:
|
||||
form.fetch_backend.choices.append(p)
|
||||
|
||||
form.fetch_backend.choices.append(("system", 'System settings default'))
|
||||
|
||||
@@ -716,7 +710,7 @@ def changedetection_app(config=None, datastore_o=None):
|
||||
system_uses_webdriver = datastore.data['settings']['application']['fetch_backend'] == 'html_webdriver'
|
||||
|
||||
is_html_webdriver = False
|
||||
if (watch.get('fetch_backend') == 'system' and system_uses_webdriver) or watch.get('fetch_backend') == 'html_webdriver' or watch.get('fetch_backend', '').startswith('extra_browser_'):
|
||||
if (watch.get('fetch_backend') == 'system' and system_uses_webdriver) or watch.get('fetch_backend') == 'html_webdriver':
|
||||
is_html_webdriver = True
|
||||
|
||||
# Only works reliably with Playwright
|
||||
@@ -821,16 +815,6 @@ def changedetection_app(config=None, datastore_o=None):
|
||||
|
||||
return output
|
||||
|
||||
@app.route("/settings/reset-api-key", methods=['GET'])
|
||||
@login_optionally_required
|
||||
def settings_reset_api_key():
|
||||
import secrets
|
||||
secret = secrets.token_hex(16)
|
||||
datastore.data['settings']['application']['api_access_token'] = secret
|
||||
datastore.needs_write_urgent = True
|
||||
flash("API Key was regenerated.")
|
||||
return redirect(url_for('settings_page')+'#api')
|
||||
|
||||
@app.route("/import", methods=['GET', "POST"])
|
||||
@login_optionally_required
|
||||
def import_page():
|
||||
@@ -961,7 +945,7 @@ def changedetection_app(config=None, datastore_o=None):
|
||||
# Read as binary and force decode as UTF-8
|
||||
# Windows may fail decode in python if we just use 'r' mode (chardet decode exception)
|
||||
from_version = request.args.get('from_version')
|
||||
from_version_index = -2 # second newest
|
||||
from_version_index = -2 # second newest
|
||||
if from_version and from_version in dates:
|
||||
from_version_index = dates.index(from_version)
|
||||
else:
|
||||
@@ -970,7 +954,7 @@ def changedetection_app(config=None, datastore_o=None):
|
||||
try:
|
||||
from_version_file_contents = watch.get_history_snapshot(dates[from_version_index])
|
||||
except Exception as e:
|
||||
from_version_file_contents = f"Unable to read to-version at index {dates[from_version_index]}.\n"
|
||||
from_version_file_contents = "Unable to read to-version at index{}.\n".format(dates[from_version_index])
|
||||
|
||||
to_version = request.args.get('to_version')
|
||||
to_version_index = -1
|
||||
@@ -989,7 +973,7 @@ def changedetection_app(config=None, datastore_o=None):
|
||||
system_uses_webdriver = datastore.data['settings']['application']['fetch_backend'] == 'html_webdriver'
|
||||
|
||||
is_html_webdriver = False
|
||||
if (watch.get('fetch_backend') == 'system' and system_uses_webdriver) or watch.get('fetch_backend') == 'html_webdriver' or watch.get('fetch_backend', '').startswith('extra_browser_'):
|
||||
if (watch.get('fetch_backend') == 'system' and system_uses_webdriver) or watch.get('fetch_backend') == 'html_webdriver':
|
||||
is_html_webdriver = True
|
||||
|
||||
password_enabled_and_share_is_off = False
|
||||
@@ -1043,7 +1027,7 @@ def changedetection_app(config=None, datastore_o=None):
|
||||
|
||||
|
||||
is_html_webdriver = False
|
||||
if (watch.get('fetch_backend') == 'system' and system_uses_webdriver) or watch.get('fetch_backend') == 'html_webdriver' or watch.get('fetch_backend', '').startswith('extra_browser_'):
|
||||
if (watch.get('fetch_backend') == 'system' and system_uses_webdriver) or watch.get('fetch_backend') == 'html_webdriver':
|
||||
is_html_webdriver = True
|
||||
|
||||
# Never requested successfully, but we detected a fetch error
|
||||
|
||||
@@ -40,8 +40,8 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
contents = ''
|
||||
now = time.time()
|
||||
try:
|
||||
update_handler = text_json_diff.perform_site_check(datastore=datastore, watch_uuid=uuid)
|
||||
update_handler.call_browser()
|
||||
update_handler = text_json_diff.perform_site_check(datastore=datastore)
|
||||
changed_detected, update_obj, contents = update_handler.run(uuid, preferred_proxy=preferred_proxy, skip_when_checksum_same=False)
|
||||
# title, size is len contents not len xfer
|
||||
except content_fetcher.Non200ErrorCodeReceived as e:
|
||||
if e.status_code == 404:
|
||||
|
||||
@@ -69,12 +69,11 @@ xpath://body/div/span[contains(@class, 'example-class')]",
|
||||
{% endif %}
|
||||
</ul>
|
||||
</li>
|
||||
<li>XPath - Limit text to this XPath rule, simply start with a forward-slash. To specify XPath to be used explicitly or the XPath rule starts with an XPath function: Prefix with <code>xpath:</code>
|
||||
<li>XPath - Limit text to this XPath rule, simply start with a forward-slash,
|
||||
<ul>
|
||||
<li>Example: <code>//*[contains(@class, 'sametext')]</code> or <code>xpath:count(//*[contains(@class, 'sametext')])</code>, <a
|
||||
<li>Example: <code>//*[contains(@class, 'sametext')]</code> or <code>xpath://*[contains(@class, 'sametext')]</code>, <a
|
||||
href="http://xpather.com/" target="new">test your XPath here</a></li>
|
||||
<li>Example: Get all titles from an RSS feed <code>//title/text()</code></li>
|
||||
<li>To use XPath1.0: Prefix with <code>xpath1:</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -96,7 +96,6 @@ class Fetcher():
|
||||
content = None
|
||||
error = None
|
||||
fetcher_description = "No description"
|
||||
browser_connection_url = None
|
||||
headers = {}
|
||||
status_code = None
|
||||
webdriver_js_execute_code = None
|
||||
@@ -252,16 +251,14 @@ class base_html_playwright(Fetcher):
|
||||
|
||||
proxy = None
|
||||
|
||||
def __init__(self, proxy_override=None, browser_connection_url=None):
|
||||
def __init__(self, proxy_override=None):
|
||||
super().__init__()
|
||||
|
||||
self.browser_type = os.getenv("PLAYWRIGHT_BROWSER_TYPE", 'chromium').strip('"')
|
||||
|
||||
# .strip('"') is going to save someone a lot of time when they accidently wrap the env value
|
||||
if not browser_connection_url:
|
||||
self.browser_connection_url = os.getenv("PLAYWRIGHT_DRIVER_URL", 'ws://playwright-chrome:3000').strip('"')
|
||||
else:
|
||||
self.browser_connection_url = browser_connection_url
|
||||
self.browser_type = os.getenv("PLAYWRIGHT_BROWSER_TYPE", 'chromium').strip('"')
|
||||
self.command_executor = os.getenv(
|
||||
"PLAYWRIGHT_DRIVER_URL",
|
||||
'ws://playwright-chrome:3000'
|
||||
).strip('"')
|
||||
|
||||
# If any proxy settings are enabled, then we should setup the proxy object
|
||||
proxy_args = {}
|
||||
@@ -422,7 +419,11 @@ class base_html_playwright(Fetcher):
|
||||
is_binary=False):
|
||||
|
||||
# For now, USE_EXPERIMENTAL_PUPPETEER_FETCH is not supported by watches with BrowserSteps (for now!)
|
||||
if not self.browser_steps and os.getenv('USE_EXPERIMENTAL_PUPPETEER_FETCH'):
|
||||
has_browser_steps = self.browser_steps and list(filter(
|
||||
lambda s: (s['operation'] and len(s['operation']) and s['operation'] != 'Choose one' and s['operation'] != 'Goto site'),
|
||||
self.browser_steps))
|
||||
|
||||
if not has_browser_steps and os.getenv('USE_EXPERIMENTAL_PUPPETEER_FETCH'):
|
||||
if strtobool(os.getenv('USE_EXPERIMENTAL_PUPPETEER_FETCH')):
|
||||
# Temporary backup solution until we rewrite the playwright code
|
||||
return self.run_fetch_browserless_puppeteer(
|
||||
@@ -447,7 +448,7 @@ class base_html_playwright(Fetcher):
|
||||
# Seemed to cause a connection Exception even tho I can see it connect
|
||||
# self.browser = browser_type.connect(self.command_executor, timeout=timeout*1000)
|
||||
# 60,000 connection timeout only
|
||||
browser = browser_type.connect_over_cdp(self.browser_connection_url, timeout=60000)
|
||||
browser = browser_type.connect_over_cdp(self.command_executor, timeout=60000)
|
||||
|
||||
# SOCKS5 with authentication is not supported (yet)
|
||||
# https://github.com/microsoft/playwright/issues/10567
|
||||
@@ -507,11 +508,7 @@ class base_html_playwright(Fetcher):
|
||||
self.status_code = response.status
|
||||
|
||||
if self.status_code != 200 and not ignore_status_codes:
|
||||
|
||||
screenshot=self.page.screenshot(type='jpeg', full_page=True,
|
||||
quality=int(os.getenv("PLAYWRIGHT_SCREENSHOT_QUALITY", 72)))
|
||||
|
||||
raise Non200ErrorCodeReceived(url=url, status_code=self.status_code, screenshot=screenshot)
|
||||
raise Non200ErrorCodeReceived(url=url, status_code=self.status_code)
|
||||
|
||||
if len(self.page.content().strip()) == 0:
|
||||
context.close()
|
||||
@@ -562,6 +559,8 @@ class base_html_webdriver(Fetcher):
|
||||
else:
|
||||
fetcher_description = "WebDriver Chrome/Javascript"
|
||||
|
||||
command_executor = ''
|
||||
|
||||
# Configs for Proxy setup
|
||||
# In the ENV vars, is prefixed with "webdriver_", so it is for example "webdriver_sslProxy"
|
||||
selenium_proxy_settings_mappings = ['proxyType', 'ftpProxy', 'httpProxy', 'noProxy',
|
||||
@@ -569,15 +568,12 @@ class base_html_webdriver(Fetcher):
|
||||
'socksProxy', 'socksVersion', 'socksUsername', 'socksPassword']
|
||||
proxy = None
|
||||
|
||||
def __init__(self, proxy_override=None, browser_connection_url=None):
|
||||
def __init__(self, proxy_override=None):
|
||||
super().__init__()
|
||||
from selenium.webdriver.common.proxy import Proxy as SeleniumProxy
|
||||
|
||||
# .strip('"') is going to save someone a lot of time when they accidently wrap the env value
|
||||
if not browser_connection_url:
|
||||
self.browser_connection_url = os.getenv("WEBDRIVER_URL", 'http://browser-chrome:4444/wd/hub').strip('"')
|
||||
else:
|
||||
self.browser_connection_url = browser_connection_url
|
||||
self.command_executor = os.getenv("WEBDRIVER_URL", 'http://browser-chrome:4444/wd/hub').strip('"')
|
||||
|
||||
# If any proxy settings are enabled, then we should setup the proxy object
|
||||
proxy_args = {}
|
||||
@@ -619,7 +615,7 @@ class base_html_webdriver(Fetcher):
|
||||
options.proxy = self.proxy
|
||||
|
||||
self.driver = webdriver.Remote(
|
||||
command_executor=self.browser_connection_url,
|
||||
command_executor=self.command_executor,
|
||||
options=options)
|
||||
|
||||
try:
|
||||
@@ -674,10 +670,8 @@ class base_html_webdriver(Fetcher):
|
||||
class html_requests(Fetcher):
|
||||
fetcher_description = "Basic fast Plaintext/HTTP Client"
|
||||
|
||||
def __init__(self, proxy_override=None, browser_connection_url=None):
|
||||
super().__init__()
|
||||
def __init__(self, proxy_override=None):
|
||||
self.proxy_override = proxy_override
|
||||
# browser_connection_url is none because its always 'launched locally'
|
||||
|
||||
def run(self,
|
||||
url,
|
||||
|
||||
@@ -168,9 +168,7 @@ class ValidateContentFetcherIsReady(object):
|
||||
def __call__(self, form, field):
|
||||
import urllib3.exceptions
|
||||
from changedetectionio import content_fetcher
|
||||
return
|
||||
|
||||
# AttributeError: module 'changedetectionio.content_fetcher' has no attribute 'extra_browser_unlocked<>ASDF213r123r'
|
||||
# Better would be a radiohandler that keeps a reference to each class
|
||||
if field.data is not None and field.data != 'system':
|
||||
klass = getattr(content_fetcher, field.data)
|
||||
@@ -328,30 +326,11 @@ class ValidateCSSJSONXPATHInput(object):
|
||||
return
|
||||
|
||||
# Does it look like XPath?
|
||||
if line.strip()[0] == '/' or line.strip().startswith('xpath:'):
|
||||
if not self.allow_xpath:
|
||||
raise ValidationError("XPath not permitted in this field!")
|
||||
from lxml import etree, html
|
||||
import elementpath
|
||||
# xpath 2.0-3.1
|
||||
from elementpath.xpath3 import XPath3Parser
|
||||
tree = html.fromstring("<html></html>")
|
||||
line = line.replace('xpath:', '')
|
||||
|
||||
try:
|
||||
elementpath.select(tree, line.strip(), parser=XPath3Parser)
|
||||
except elementpath.ElementPathError as e:
|
||||
message = field.gettext('\'%s\' is not a valid XPath expression. (%s)')
|
||||
raise ValidationError(message % (line, str(e)))
|
||||
except:
|
||||
raise ValidationError("A system-error occurred when validating your XPath expression")
|
||||
|
||||
if line.strip().startswith('xpath1:'):
|
||||
if line.strip()[0] == '/':
|
||||
if not self.allow_xpath:
|
||||
raise ValidationError("XPath not permitted in this field!")
|
||||
from lxml import etree, html
|
||||
tree = html.fromstring("<html></html>")
|
||||
line = re.sub(r'^xpath1:', '', line)
|
||||
|
||||
try:
|
||||
tree.xpath(line.strip())
|
||||
@@ -517,12 +496,6 @@ class SingleExtraProxy(Form):
|
||||
proxy_url = StringField('Proxy URL', [validators.Optional()], render_kw={"placeholder": "socks5:// or regular proxy http://user:pass@...:3128", "size":50})
|
||||
# @todo do the validation here instead
|
||||
|
||||
class SingleExtraBrowser(Form):
|
||||
browser_name = StringField('Name', [validators.Optional()], render_kw={"placeholder": "Name"})
|
||||
browser_connection_url = StringField('Browser connection URL', [validators.Optional()], render_kw={"placeholder": "wss://brightdata... wss://oxylabs etc", "size":50})
|
||||
# @todo do the validation here instead
|
||||
|
||||
|
||||
# datastore.data['settings']['requests']..
|
||||
class globalSettingsRequestForm(Form):
|
||||
time_between_check = FormField(TimeBetweenCheckForm)
|
||||
@@ -531,7 +504,6 @@ class globalSettingsRequestForm(Form):
|
||||
render_kw={"style": "width: 5em;"},
|
||||
validators=[validators.NumberRange(min=0, message="Should contain zero or more seconds")])
|
||||
extra_proxies = FieldList(FormField(SingleExtraProxy), min_entries=5)
|
||||
extra_browsers = FieldList(FormField(SingleExtraBrowser), min_entries=5)
|
||||
|
||||
def validate_extra_proxies(self, extra_validators=None):
|
||||
for e in self.data['extra_proxies']:
|
||||
|
||||
@@ -69,89 +69,10 @@ def element_removal(selectors: List[str], html_content):
|
||||
selector = ",".join(selectors)
|
||||
return subtractive_css_selector(selector, html_content)
|
||||
|
||||
def elementpath_tostring(obj):
|
||||
"""
|
||||
change elementpath.select results to string type
|
||||
# The MIT License (MIT), Copyright (c), 2018-2021, SISSA (Scuola Internazionale Superiore di Studi Avanzati)
|
||||
# https://github.com/sissaschool/elementpath/blob/dfcc2fd3d6011b16e02bf30459a7924f547b47d0/elementpath/xpath_tokens.py#L1038
|
||||
"""
|
||||
|
||||
import elementpath
|
||||
from decimal import Decimal
|
||||
import math
|
||||
|
||||
if obj is None:
|
||||
return ''
|
||||
# https://elementpath.readthedocs.io/en/latest/xpath_api.html#elementpath.select
|
||||
elif isinstance(obj, elementpath.XPathNode):
|
||||
return obj.string_value
|
||||
elif isinstance(obj, bool):
|
||||
return 'true' if obj else 'false'
|
||||
elif isinstance(obj, Decimal):
|
||||
value = format(obj, 'f')
|
||||
if '.' in value:
|
||||
return value.rstrip('0').rstrip('.')
|
||||
return value
|
||||
|
||||
elif isinstance(obj, float):
|
||||
if math.isnan(obj):
|
||||
return 'NaN'
|
||||
elif math.isinf(obj):
|
||||
return str(obj).upper()
|
||||
|
||||
value = str(obj)
|
||||
if '.' in value:
|
||||
value = value.rstrip('0').rstrip('.')
|
||||
if '+' in value:
|
||||
value = value.replace('+', '')
|
||||
if 'e' in value:
|
||||
return value.upper()
|
||||
return value
|
||||
|
||||
return str(obj)
|
||||
|
||||
# Return str Utf-8 of matched rules
|
||||
def xpath_filter(xpath_filter, html_content, append_pretty_line_formatting=False, is_rss=False):
|
||||
from lxml import etree, html
|
||||
import elementpath
|
||||
# xpath 2.0-3.1
|
||||
from elementpath.xpath3 import XPath3Parser
|
||||
|
||||
parser = etree.HTMLParser()
|
||||
if is_rss:
|
||||
# So that we can keep CDATA for cdata_in_document_to_text() to process
|
||||
parser = etree.XMLParser(strip_cdata=False)
|
||||
|
||||
tree = html.fromstring(bytes(html_content, encoding='utf-8'), parser=parser)
|
||||
html_block = ""
|
||||
|
||||
r = elementpath.select(tree, xpath_filter.strip(), namespaces={'re': 'http://exslt.org/regular-expressions'}, parser=XPath3Parser)
|
||||
#@note: //title/text() wont work where <title>CDATA..
|
||||
|
||||
if type(r) != list:
|
||||
r = [r]
|
||||
|
||||
for element in r:
|
||||
# When there's more than 1 match, then add the suffix to separate each line
|
||||
# And where the matched result doesn't include something that will cause Inscriptis to add a newline
|
||||
# (This way each 'match' reliably has a new-line in the diff)
|
||||
# Divs are converted to 4 whitespaces by inscriptis
|
||||
if append_pretty_line_formatting and len(html_block) and (not hasattr( element, 'tag' ) or not element.tag in (['br', 'hr', 'div', 'p'])):
|
||||
html_block += TEXT_FILTER_LIST_LINE_SUFFIX
|
||||
|
||||
if type(element) == str:
|
||||
html_block += element
|
||||
elif issubclass(type(element), etree._Element) or issubclass(type(element), etree._ElementTree):
|
||||
html_block += etree.tostring(element, pretty_print=True).decode('utf-8')
|
||||
else:
|
||||
html_block += elementpath_tostring(element)
|
||||
|
||||
return html_block
|
||||
|
||||
# Return str Utf-8 of matched rules
|
||||
# 'xpath1:'
|
||||
def xpath1_filter(xpath_filter, html_content, append_pretty_line_formatting=False, is_rss=False):
|
||||
from lxml import etree, html
|
||||
|
||||
parser = None
|
||||
if is_rss:
|
||||
|
||||
@@ -16,7 +16,6 @@ class model(dict):
|
||||
},
|
||||
'requests': {
|
||||
'extra_proxies': [], # Configurable extra proxies via the UI
|
||||
'extra_browsers': [], # Configurable extra proxies via the UI
|
||||
'jitter_seconds': 0,
|
||||
'proxy': None, # Preferred proxy connection
|
||||
'time_between_check': {'weeks': None, 'days': None, 'hours': 3, 'minutes': None, 'seconds': None},
|
||||
|
||||
@@ -19,7 +19,6 @@ from changedetectionio.notification import (
|
||||
|
||||
base_config = {
|
||||
'body': None,
|
||||
'browser_steps': [],
|
||||
'browser_steps_last_error_step': None,
|
||||
'check_unique_lines': False, # On change-detected, compare against all history if its something new
|
||||
'check_count': 0,
|
||||
@@ -146,14 +145,8 @@ class model(dict):
|
||||
flash(message, 'error')
|
||||
return ''
|
||||
|
||||
if ready_url.startswith('source:'):
|
||||
ready_url=ready_url.replace('source:', '')
|
||||
return ready_url
|
||||
|
||||
@property
|
||||
def is_source_type_url(self):
|
||||
return self.get('url', '').startswith('source:')
|
||||
|
||||
@property
|
||||
def get_fetch_backend(self):
|
||||
"""
|
||||
@@ -241,14 +234,6 @@ class model(dict):
|
||||
fname = os.path.join(self.watch_data_dir, "history.txt")
|
||||
return os.path.isfile(fname)
|
||||
|
||||
@property
|
||||
def has_browser_steps(self):
|
||||
has_browser_steps = self.get('browser_steps') and list(filter(
|
||||
lambda s: (s['operation'] and len(s['operation']) and s['operation'] != 'Choose one' and s['operation'] != 'Goto site'),
|
||||
self.get('browser_steps')))
|
||||
|
||||
return has_browser_steps
|
||||
|
||||
# Returns the newest key, but if theres only 1 record, then it's counted as not being new, so return 0.
|
||||
@property
|
||||
def newest_history_key(self):
|
||||
@@ -262,38 +247,6 @@ class model(dict):
|
||||
bump = self.history
|
||||
return self.__newest_history_key
|
||||
|
||||
# Given an arbitrary timestamp, find the closest next key
|
||||
# For example, last_viewed = 1000 so it should return the next 1001 timestamp
|
||||
#
|
||||
# used for the [diff] button so it can preset a smarter from_version
|
||||
@property
|
||||
def get_next_snapshot_key_to_last_viewed(self):
|
||||
|
||||
"""Unfortunately for now timestamp is stored as string key"""
|
||||
keys = list(self.history.keys())
|
||||
if not keys:
|
||||
return None
|
||||
|
||||
last_viewed = int(self.get('last_viewed'))
|
||||
prev_k = keys[0]
|
||||
sorted_keys = sorted(keys, key=lambda x: int(x))
|
||||
sorted_keys.reverse()
|
||||
|
||||
# When the 'last viewed' timestamp is greater than the newest snapshot, return second last
|
||||
if last_viewed > int(sorted_keys[0]):
|
||||
return sorted_keys[1]
|
||||
|
||||
for k in sorted_keys:
|
||||
if int(k) < last_viewed:
|
||||
if prev_k == sorted_keys[0]:
|
||||
# Return the second last one so we dont recommend the same version compares itself
|
||||
return sorted_keys[1]
|
||||
|
||||
return prev_k
|
||||
prev_k = k
|
||||
|
||||
return keys[0]
|
||||
|
||||
def get_history_snapshot(self, timestamp):
|
||||
import brotli
|
||||
filepath = self.history[timestamp]
|
||||
|
||||
@@ -1,122 +1,15 @@
|
||||
from abc import abstractmethod
|
||||
import os
|
||||
import hashlib
|
||||
import re
|
||||
from changedetectionio import content_fetcher
|
||||
from copy import deepcopy
|
||||
from distutils.util import strtobool
|
||||
|
||||
|
||||
class difference_detection_processor():
|
||||
|
||||
browser_steps = None
|
||||
datastore = None
|
||||
fetcher = None
|
||||
screenshot = None
|
||||
watch = None
|
||||
xpath_data = None
|
||||
|
||||
def __init__(self, *args, datastore, watch_uuid, **kwargs):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.datastore = datastore
|
||||
self.watch = deepcopy(self.datastore.data['watching'].get(watch_uuid))
|
||||
|
||||
def call_browser(self):
|
||||
|
||||
# Protect against file:// access
|
||||
if re.search(r'^file://', self.watch.get('url', '').strip(), re.IGNORECASE):
|
||||
if not strtobool(os.getenv('ALLOW_FILE_URI', 'false')):
|
||||
raise Exception(
|
||||
"file:// type access is denied for security reasons."
|
||||
)
|
||||
|
||||
url = self.watch.link
|
||||
|
||||
# Requests, playwright, other browser via wss:// etc, fetch_extra_something
|
||||
prefer_fetch_backend = self.watch.get('fetch_backend', 'system')
|
||||
|
||||
# Proxy ID "key"
|
||||
preferred_proxy_id = self.datastore.get_preferred_proxy_for_watch(uuid=self.watch.get('uuid'))
|
||||
|
||||
# Pluggable content self.fetcher
|
||||
if not prefer_fetch_backend or prefer_fetch_backend == 'system':
|
||||
prefer_fetch_backend = self.datastore.data['settings']['application'].get('fetch_backend')
|
||||
|
||||
# In the case that the preferred fetcher was a browser config with custom connection URL..
|
||||
# @todo - on save watch, if its extra_browser_ then it should be obvious it will use playwright (like if its requests now..)
|
||||
browser_connection_url = None
|
||||
if prefer_fetch_backend.startswith('extra_browser_'):
|
||||
(t, key) = prefer_fetch_backend.split('extra_browser_')
|
||||
connection = list(
|
||||
filter(lambda s: (s['browser_name'] == key), self.datastore.data['settings']['requests'].get('extra_browsers', [])))
|
||||
if connection:
|
||||
prefer_fetch_backend = 'base_html_playwright'
|
||||
browser_connection_url = connection[0].get('browser_connection_url')
|
||||
|
||||
|
||||
# Grab the right kind of 'fetcher', (playwright, requests, etc)
|
||||
if hasattr(content_fetcher, prefer_fetch_backend):
|
||||
fetcher_obj = getattr(content_fetcher, prefer_fetch_backend)
|
||||
else:
|
||||
# If the klass doesnt exist, just use a default
|
||||
fetcher_obj = getattr(content_fetcher, "html_requests")
|
||||
|
||||
|
||||
proxy_url = None
|
||||
if preferred_proxy_id:
|
||||
proxy_url = self.datastore.proxy_list.get(preferred_proxy_id).get('url')
|
||||
print(f"Using proxy Key: {preferred_proxy_id} as Proxy URL {proxy_url}")
|
||||
|
||||
# Now call the fetcher (playwright/requests/etc) with arguments that only a fetcher would need.
|
||||
# When browser_connection_url is None, it method should default to working out whats the best defaults (os env vars etc)
|
||||
self.fetcher = fetcher_obj(proxy_override=proxy_url,
|
||||
browser_connection_url=browser_connection_url
|
||||
)
|
||||
|
||||
if self.watch.has_browser_steps:
|
||||
self.fetcher.browser_steps = self.watch.get('browser_steps', [])
|
||||
self.fetcher.browser_steps_screenshot_path = os.path.join(self.datastore.datastore_path, self.watch.get('uuid'))
|
||||
|
||||
# Tweak the base config with the per-watch ones
|
||||
request_headers = self.watch.get('headers', [])
|
||||
request_headers.update(self.datastore.get_all_base_headers())
|
||||
request_headers.update(self.datastore.get_all_headers_in_textfile_for_watch(uuid=self.watch.get('uuid')))
|
||||
|
||||
# https://github.com/psf/requests/issues/4525
|
||||
# Requests doesnt yet support brotli encoding, so don't put 'br' here, be totally sure that the user cannot
|
||||
# do this by accident.
|
||||
if 'Accept-Encoding' in request_headers and "br" in request_headers['Accept-Encoding']:
|
||||
request_headers['Accept-Encoding'] = request_headers['Accept-Encoding'].replace(', br', '')
|
||||
|
||||
timeout = self.datastore.data['settings']['requests'].get('timeout')
|
||||
|
||||
request_body = self.watch.get('body')
|
||||
request_method = self.watch.get('method')
|
||||
ignore_status_codes = self.watch.get('ignore_status_codes', False)
|
||||
|
||||
# Configurable per-watch or global extra delay before extracting text (for webDriver types)
|
||||
system_webdriver_delay = self.datastore.data['settings']['application'].get('webdriver_delay', None)
|
||||
if self.watch.get('webdriver_delay'):
|
||||
self.fetcher.render_extract_delay = self.watch.get('webdriver_delay')
|
||||
elif system_webdriver_delay is not None:
|
||||
self.fetcher.render_extract_delay = system_webdriver_delay
|
||||
|
||||
if self.watch.get('webdriver_js_execute_code') is not None and self.watch.get('webdriver_js_execute_code').strip():
|
||||
self.fetcher.webdriver_js_execute_code = self.watch.get('webdriver_js_execute_code')
|
||||
|
||||
# Requests for PDF's, images etc should be passwd the is_binary flag
|
||||
is_binary = self.watch.is_pdf
|
||||
|
||||
# And here we go! call the right browser with browser-specific settings
|
||||
self.fetcher.run(url, timeout, request_headers, request_body, request_method, ignore_status_codes, self.watch.get('include_filters'),
|
||||
is_binary=is_binary)
|
||||
|
||||
#@todo .quit here could go on close object, so we can run JS if change-detected
|
||||
self.fetcher.quit()
|
||||
|
||||
# After init, call run_changedetection() which will do the actual change-detection
|
||||
|
||||
@abstractmethod
|
||||
def run_changedetection(self, uuid, skip_when_checksum_same=True):
|
||||
def run(self, uuid, skip_when_checksum_same=True, preferred_proxy=None):
|
||||
update_obj = {'last_notification_error': False, 'last_error': False}
|
||||
some_data = 'xxxxx'
|
||||
update_obj["previous_md5"] = hashlib.md5(some_data.encode('utf-8')).hexdigest()
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
|
||||
import hashlib
|
||||
import os
|
||||
import re
|
||||
import urllib3
|
||||
from . import difference_detection_processor
|
||||
from changedetectionio import content_fetcher
|
||||
from copy import deepcopy
|
||||
|
||||
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
||||
@@ -19,7 +22,11 @@ class perform_site_check(difference_detection_processor):
|
||||
screenshot = None
|
||||
xpath_data = None
|
||||
|
||||
def run_changedetection(self, uuid, skip_when_checksum_same=True):
|
||||
def __init__(self, *args, datastore, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.datastore = datastore
|
||||
|
||||
def run(self, uuid, skip_when_checksum_same=True):
|
||||
|
||||
# DeepCopy so we can be sure we don't accidently change anything by reference
|
||||
watch = deepcopy(self.datastore.data['watching'].get(uuid))
|
||||
@@ -27,24 +34,84 @@ class perform_site_check(difference_detection_processor):
|
||||
if not watch:
|
||||
raise Exception("Watch no longer exists.")
|
||||
|
||||
# Protect against file:// access
|
||||
if re.search(r'^file', watch.get('url', ''), re.IGNORECASE) and not os.getenv('ALLOW_FILE_URI', False):
|
||||
raise Exception(
|
||||
"file:// type access is denied for security reasons."
|
||||
)
|
||||
|
||||
# Unset any existing notification error
|
||||
update_obj = {'last_notification_error': False, 'last_error': False}
|
||||
|
||||
self.screenshot = self.fetcher.screenshot
|
||||
self.xpath_data = self.fetcher.xpath_data
|
||||
request_headers = watch.get('headers', [])
|
||||
request_headers.update(self.datastore.get_all_base_headers())
|
||||
request_headers.update(self.datastore.get_all_headers_in_textfile_for_watch(uuid=uuid))
|
||||
|
||||
# https://github.com/psf/requests/issues/4525
|
||||
# Requests doesnt yet support brotli encoding, so don't put 'br' here, be totally sure that the user cannot
|
||||
# do this by accident.
|
||||
if 'Accept-Encoding' in request_headers and "br" in request_headers['Accept-Encoding']:
|
||||
request_headers['Accept-Encoding'] = request_headers['Accept-Encoding'].replace(', br', '')
|
||||
|
||||
timeout = self.datastore.data['settings']['requests'].get('timeout')
|
||||
|
||||
url = watch.link
|
||||
|
||||
request_body = self.datastore.data['watching'][uuid].get('body')
|
||||
request_method = self.datastore.data['watching'][uuid].get('method')
|
||||
ignore_status_codes = self.datastore.data['watching'][uuid].get('ignore_status_codes', False)
|
||||
|
||||
# Pluggable content fetcher
|
||||
prefer_backend = watch.get_fetch_backend
|
||||
if not prefer_backend or prefer_backend == 'system':
|
||||
prefer_backend = self.datastore.data['settings']['application']['fetch_backend']
|
||||
|
||||
if hasattr(content_fetcher, prefer_backend):
|
||||
klass = getattr(content_fetcher, prefer_backend)
|
||||
else:
|
||||
# If the klass doesnt exist, just use a default
|
||||
klass = getattr(content_fetcher, "html_requests")
|
||||
|
||||
proxy_id = self.datastore.get_preferred_proxy_for_watch(uuid=uuid)
|
||||
proxy_url = None
|
||||
if proxy_id:
|
||||
proxy_url = self.datastore.proxy_list.get(proxy_id).get('url')
|
||||
print("UUID {} Using proxy {}".format(uuid, proxy_url))
|
||||
|
||||
fetcher = klass(proxy_override=proxy_url)
|
||||
|
||||
# Configurable per-watch or global extra delay before extracting text (for webDriver types)
|
||||
system_webdriver_delay = self.datastore.data['settings']['application'].get('webdriver_delay', None)
|
||||
if watch['webdriver_delay'] is not None:
|
||||
fetcher.render_extract_delay = watch.get('webdriver_delay')
|
||||
elif system_webdriver_delay is not None:
|
||||
fetcher.render_extract_delay = system_webdriver_delay
|
||||
|
||||
# Could be removed if requests/plaintext could also return some info?
|
||||
if prefer_backend != 'html_webdriver':
|
||||
raise Exception("Re-stock detection requires Chrome or compatible webdriver/playwright fetcher to work")
|
||||
|
||||
if watch.get('webdriver_js_execute_code') is not None and watch.get('webdriver_js_execute_code').strip():
|
||||
fetcher.webdriver_js_execute_code = watch.get('webdriver_js_execute_code')
|
||||
|
||||
fetcher.run(url, timeout, request_headers, request_body, request_method, ignore_status_codes, watch.get('include_filters'))
|
||||
fetcher.quit()
|
||||
|
||||
self.screenshot = fetcher.screenshot
|
||||
self.xpath_data = fetcher.xpath_data
|
||||
|
||||
# Track the content type
|
||||
update_obj['content_type'] = self.fetcher.headers.get('Content-Type', '')
|
||||
update_obj["last_check_status"] = self.fetcher.get_last_status_code()
|
||||
update_obj['content_type'] = fetcher.headers.get('Content-Type', '')
|
||||
update_obj["last_check_status"] = fetcher.get_last_status_code()
|
||||
|
||||
# Main detection method
|
||||
fetched_md5 = None
|
||||
if self.fetcher.instock_data:
|
||||
fetched_md5 = hashlib.md5(self.fetcher.instock_data.encode('utf-8')).hexdigest()
|
||||
if fetcher.instock_data:
|
||||
fetched_md5 = hashlib.md5(fetcher.instock_data.encode('utf-8')).hexdigest()
|
||||
# 'Possibly in stock' comes from stock-not-in-stock.js when no string found above the fold.
|
||||
update_obj["in_stock"] = True if self.fetcher.instock_data == 'Possibly in stock' else False
|
||||
update_obj["in_stock"] = True if fetcher.instock_data == 'Possibly in stock' else False
|
||||
else:
|
||||
raise UnableToExtractRestockData(status_code=self.fetcher.status_code)
|
||||
raise UnableToExtractRestockData(status_code=fetcher.status_code)
|
||||
|
||||
# The main thing that all this at the moment comes down to :)
|
||||
changed_detected = False
|
||||
@@ -61,4 +128,4 @@ class perform_site_check(difference_detection_processor):
|
||||
# Always record the new checksum
|
||||
update_obj["previous_md5"] = fetched_md5
|
||||
|
||||
return changed_detected, update_obj, self.fetcher.instock_data.encode('utf-8')
|
||||
return changed_detected, update_obj, fetcher.instock_data.encode('utf-8')
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# HTML to TEXT/JSON DIFFERENCE self.fetcher
|
||||
# HTML to TEXT/JSON DIFFERENCE FETCHER
|
||||
|
||||
import hashlib
|
||||
import json
|
||||
@@ -32,10 +32,15 @@ class PDFToHTMLToolNotFound(ValueError):
|
||||
# Some common stuff here that can be moved to a base class
|
||||
# (set_proxy_from_list)
|
||||
class perform_site_check(difference_detection_processor):
|
||||
screenshot = None
|
||||
xpath_data = None
|
||||
|
||||
def run_changedetection(self, uuid, skip_when_checksum_same=True):
|
||||
def __init__(self, *args, datastore, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.datastore = datastore
|
||||
|
||||
def run(self, uuid, skip_when_checksum_same=True, preferred_proxy=None):
|
||||
changed_detected = False
|
||||
html_content = ""
|
||||
screenshot = False # as bytes
|
||||
stripped_text_from_html = ""
|
||||
|
||||
@@ -44,25 +49,100 @@ class perform_site_check(difference_detection_processor):
|
||||
if not watch:
|
||||
raise Exception("Watch no longer exists.")
|
||||
|
||||
# Protect against file:// access
|
||||
if re.search(r'^file', watch.get('url', ''), re.IGNORECASE) and not os.getenv('ALLOW_FILE_URI', False):
|
||||
raise Exception(
|
||||
"file:// type access is denied for security reasons."
|
||||
)
|
||||
|
||||
# Unset any existing notification error
|
||||
update_obj = {'last_notification_error': False, 'last_error': False}
|
||||
|
||||
# Tweak the base config with the per-watch ones
|
||||
request_headers = watch.get('headers', [])
|
||||
request_headers.update(self.datastore.get_all_base_headers())
|
||||
request_headers.update(self.datastore.get_all_headers_in_textfile_for_watch(uuid=uuid))
|
||||
|
||||
# https://github.com/psf/requests/issues/4525
|
||||
# Requests doesnt yet support brotli encoding, so don't put 'br' here, be totally sure that the user cannot
|
||||
# do this by accident.
|
||||
if 'Accept-Encoding' in request_headers and "br" in request_headers['Accept-Encoding']:
|
||||
request_headers['Accept-Encoding'] = request_headers['Accept-Encoding'].replace(', br', '')
|
||||
|
||||
timeout = self.datastore.data['settings']['requests'].get('timeout')
|
||||
|
||||
url = watch.link
|
||||
|
||||
self.screenshot = self.fetcher.screenshot
|
||||
self.xpath_data = self.fetcher.xpath_data
|
||||
request_body = self.datastore.data['watching'][uuid].get('body')
|
||||
request_method = self.datastore.data['watching'][uuid].get('method')
|
||||
ignore_status_codes = self.datastore.data['watching'][uuid].get('ignore_status_codes', False)
|
||||
|
||||
# source: support
|
||||
is_source = False
|
||||
if url.startswith('source:'):
|
||||
url = url.replace('source:', '')
|
||||
is_source = True
|
||||
|
||||
# Pluggable content fetcher
|
||||
prefer_backend = watch.get_fetch_backend
|
||||
if not prefer_backend or prefer_backend == 'system':
|
||||
prefer_backend = self.datastore.data['settings']['application']['fetch_backend']
|
||||
|
||||
if hasattr(content_fetcher, prefer_backend):
|
||||
klass = getattr(content_fetcher, prefer_backend)
|
||||
else:
|
||||
# If the klass doesnt exist, just use a default
|
||||
klass = getattr(content_fetcher, "html_requests")
|
||||
|
||||
if preferred_proxy:
|
||||
proxy_id = preferred_proxy
|
||||
else:
|
||||
proxy_id = self.datastore.get_preferred_proxy_for_watch(uuid=uuid)
|
||||
|
||||
proxy_url = None
|
||||
if proxy_id:
|
||||
proxy_url = self.datastore.proxy_list.get(proxy_id).get('url')
|
||||
print("UUID {} Using proxy {}".format(uuid, proxy_url))
|
||||
|
||||
fetcher = klass(proxy_override=proxy_url)
|
||||
|
||||
# Configurable per-watch or global extra delay before extracting text (for webDriver types)
|
||||
system_webdriver_delay = self.datastore.data['settings']['application'].get('webdriver_delay', None)
|
||||
if watch['webdriver_delay'] is not None:
|
||||
fetcher.render_extract_delay = watch.get('webdriver_delay')
|
||||
elif system_webdriver_delay is not None:
|
||||
fetcher.render_extract_delay = system_webdriver_delay
|
||||
|
||||
# Possible conflict
|
||||
if prefer_backend == 'html_webdriver':
|
||||
fetcher.browser_steps = watch.get('browser_steps', None)
|
||||
fetcher.browser_steps_screenshot_path = os.path.join(self.datastore.datastore_path, uuid)
|
||||
|
||||
if watch.get('webdriver_js_execute_code') is not None and watch.get('webdriver_js_execute_code').strip():
|
||||
fetcher.webdriver_js_execute_code = watch.get('webdriver_js_execute_code')
|
||||
|
||||
# requests for PDF's, images etc should be passwd the is_binary flag
|
||||
is_binary = watch.is_pdf
|
||||
|
||||
fetcher.run(url, timeout, request_headers, request_body, request_method, ignore_status_codes, watch.get('include_filters'),
|
||||
is_binary=is_binary)
|
||||
fetcher.quit()
|
||||
|
||||
self.screenshot = fetcher.screenshot
|
||||
self.xpath_data = fetcher.xpath_data
|
||||
|
||||
# Track the content type
|
||||
update_obj['content_type'] = self.fetcher.get_all_headers().get('content-type', '').lower()
|
||||
update_obj['content_type'] = fetcher.get_all_headers().get('content-type', '').lower()
|
||||
|
||||
# Watches added automatically in the queue manager will skip if its the same checksum as the previous run
|
||||
# Saves a lot of CPU
|
||||
update_obj['previous_md5_before_filters'] = hashlib.md5(self.fetcher.content.encode('utf-8')).hexdigest()
|
||||
update_obj['previous_md5_before_filters'] = hashlib.md5(fetcher.content.encode('utf-8')).hexdigest()
|
||||
if skip_when_checksum_same:
|
||||
if update_obj['previous_md5_before_filters'] == watch.get('previous_md5_before_filters'):
|
||||
raise content_fetcher.checksumFromPreviousCheckWasTheSame()
|
||||
|
||||
# Fetching complete, now filters
|
||||
# @todo move to class / maybe inside of fetcher abstract base?
|
||||
|
||||
# @note: I feel like the following should be in a more obvious chain system
|
||||
# - Check filter text
|
||||
@@ -71,24 +151,24 @@ class perform_site_check(difference_detection_processor):
|
||||
# https://stackoverflow.com/questions/41817578/basic-method-chaining ?
|
||||
# return content().textfilter().jsonextract().checksumcompare() ?
|
||||
|
||||
is_json = 'application/json' in self.fetcher.get_all_headers().get('content-type', '').lower()
|
||||
is_json = 'application/json' in fetcher.get_all_headers().get('content-type', '').lower()
|
||||
is_html = not is_json
|
||||
is_rss = False
|
||||
|
||||
ctype_header = self.fetcher.get_all_headers().get('content-type', '').lower()
|
||||
ctype_header = fetcher.get_all_headers().get('content-type', '').lower()
|
||||
# Go into RSS preprocess for converting CDATA/comment to usable text
|
||||
if any(substring in ctype_header for substring in ['application/xml', 'application/rss', 'text/xml']):
|
||||
if '<rss' in self.fetcher.content[:100].lower():
|
||||
self.fetcher.content = cdata_in_document_to_text(html_content=self.fetcher.content)
|
||||
if '<rss' in fetcher.content[:100].lower():
|
||||
fetcher.content = cdata_in_document_to_text(html_content=fetcher.content)
|
||||
is_rss = True
|
||||
|
||||
# source: support, basically treat it as plaintext
|
||||
if watch.is_source_type_url:
|
||||
if is_source:
|
||||
is_html = False
|
||||
is_json = False
|
||||
|
||||
inline_pdf = self.fetcher.get_all_headers().get('content-disposition', '') and '%PDF-1' in self.fetcher.content[:10]
|
||||
if watch.is_pdf or 'application/pdf' in self.fetcher.get_all_headers().get('content-type', '').lower() or inline_pdf:
|
||||
inline_pdf = fetcher.get_all_headers().get('content-disposition', '') and '%PDF-1' in fetcher.content[:10]
|
||||
if watch.is_pdf or 'application/pdf' in fetcher.get_all_headers().get('content-type', '').lower() or inline_pdf:
|
||||
from shutil import which
|
||||
tool = os.getenv("PDF_TO_HTML_TOOL", "pdftohtml")
|
||||
if not which(tool):
|
||||
@@ -99,18 +179,18 @@ class perform_site_check(difference_detection_processor):
|
||||
[tool, '-stdout', '-', '-s', 'out.pdf', '-i'],
|
||||
stdout=subprocess.PIPE,
|
||||
stdin=subprocess.PIPE)
|
||||
proc.stdin.write(self.fetcher.raw_content)
|
||||
proc.stdin.write(fetcher.raw_content)
|
||||
proc.stdin.close()
|
||||
self.fetcher.content = proc.stdout.read().decode('utf-8')
|
||||
fetcher.content = proc.stdout.read().decode('utf-8')
|
||||
proc.wait(timeout=60)
|
||||
|
||||
# Add a little metadata so we know if the file changes (like if an image changes, but the text is the same
|
||||
# @todo may cause problems with non-UTF8?
|
||||
metadata = "<p>Added by changedetection.io: Document checksum - {} Filesize - {} bytes</p>".format(
|
||||
hashlib.md5(self.fetcher.raw_content).hexdigest().upper(),
|
||||
len(self.fetcher.content))
|
||||
hashlib.md5(fetcher.raw_content).hexdigest().upper(),
|
||||
len(fetcher.content))
|
||||
|
||||
self.fetcher.content = self.fetcher.content.replace('</body>', metadata + '</body>')
|
||||
fetcher.content = fetcher.content.replace('</body>', metadata + '</body>')
|
||||
|
||||
# Better would be if Watch.model could access the global data also
|
||||
# and then use getattr https://docs.python.org/3/reference/datamodel.html#object.__getitem__
|
||||
@@ -137,7 +217,7 @@ class perform_site_check(difference_detection_processor):
|
||||
if is_json:
|
||||
# Sort the JSON so we dont get false alerts when the content is just re-ordered
|
||||
try:
|
||||
self.fetcher.content = json.dumps(json.loads(self.fetcher.content), sort_keys=True)
|
||||
fetcher.content = json.dumps(json.loads(fetcher.content), sort_keys=True)
|
||||
except Exception as e:
|
||||
# Might have just been a snippet, or otherwise bad JSON, continue
|
||||
pass
|
||||
@@ -145,22 +225,22 @@ class perform_site_check(difference_detection_processor):
|
||||
if has_filter_rule:
|
||||
for filter in include_filters_rule:
|
||||
if any(prefix in filter for prefix in json_filter_prefixes):
|
||||
stripped_text_from_html += html_tools.extract_json_as_string(content=self.fetcher.content, json_filter=filter)
|
||||
stripped_text_from_html += html_tools.extract_json_as_string(content=fetcher.content, json_filter=filter)
|
||||
is_html = False
|
||||
|
||||
if is_html or watch.is_source_type_url:
|
||||
if is_html or is_source:
|
||||
|
||||
# CSS Filter, extract the HTML that matches and feed that into the existing inscriptis::get_text
|
||||
self.fetcher.content = html_tools.workarounds_for_obfuscations(self.fetcher.content)
|
||||
html_content = self.fetcher.content
|
||||
fetcher.content = html_tools.workarounds_for_obfuscations(fetcher.content)
|
||||
html_content = fetcher.content
|
||||
|
||||
# If not JSON, and if it's not text/plain..
|
||||
if 'text/plain' in self.fetcher.get_all_headers().get('content-type', '').lower():
|
||||
if 'text/plain' in fetcher.get_all_headers().get('content-type', '').lower():
|
||||
# Don't run get_text or xpath/css filters on plaintext
|
||||
stripped_text_from_html = html_content
|
||||
else:
|
||||
# Does it have some ld+json price data? used for easier monitoring
|
||||
update_obj['has_ldjson_price_data'] = html_tools.has_ldjson_product_info(self.fetcher.content)
|
||||
update_obj['has_ldjson_price_data'] = html_tools.has_ldjson_product_info(fetcher.content)
|
||||
|
||||
# Then we assume HTML
|
||||
if has_filter_rule:
|
||||
@@ -170,19 +250,14 @@ class perform_site_check(difference_detection_processor):
|
||||
# For HTML/XML we offer xpath as an option, just start a regular xPath "/.."
|
||||
if filter_rule[0] == '/' or filter_rule.startswith('xpath:'):
|
||||
html_content += html_tools.xpath_filter(xpath_filter=filter_rule.replace('xpath:', ''),
|
||||
html_content=self.fetcher.content,
|
||||
append_pretty_line_formatting=not watch.is_source_type_url,
|
||||
is_rss=is_rss)
|
||||
elif filter_rule.startswith('xpath1:'):
|
||||
html_content += html_tools.xpath1_filter(xpath_filter=filter_rule.replace('xpath1:', ''),
|
||||
html_content=self.fetcher.content,
|
||||
append_pretty_line_formatting=not watch.is_source_type_url,
|
||||
html_content=fetcher.content,
|
||||
append_pretty_line_formatting=not is_source,
|
||||
is_rss=is_rss)
|
||||
else:
|
||||
# CSS Filter, extract the HTML that matches and feed that into the existing inscriptis::get_text
|
||||
html_content += html_tools.include_filters(include_filters=filter_rule,
|
||||
html_content=self.fetcher.content,
|
||||
append_pretty_line_formatting=not watch.is_source_type_url)
|
||||
html_content=fetcher.content,
|
||||
append_pretty_line_formatting=not is_source)
|
||||
|
||||
if not html_content.strip():
|
||||
raise FilterNotFoundInResponse(include_filters_rule)
|
||||
@@ -190,7 +265,7 @@ class perform_site_check(difference_detection_processor):
|
||||
if has_subtractive_selectors:
|
||||
html_content = html_tools.element_removal(subtractive_selectors, html_content)
|
||||
|
||||
if watch.is_source_type_url:
|
||||
if is_source:
|
||||
stripped_text_from_html = html_content
|
||||
else:
|
||||
# extract text
|
||||
@@ -236,7 +311,7 @@ class perform_site_check(difference_detection_processor):
|
||||
empty_pages_are_a_change = self.datastore.data['settings']['application'].get('empty_pages_are_a_change', False)
|
||||
if not is_json and not empty_pages_are_a_change and len(stripped_text_from_html.strip()) == 0:
|
||||
raise content_fetcher.ReplyWithContentButNoText(url=url,
|
||||
status_code=self.fetcher.get_last_status_code(),
|
||||
status_code=fetcher.get_last_status_code(),
|
||||
screenshot=screenshot,
|
||||
has_filters=has_filter_rule,
|
||||
html_content=html_content
|
||||
@@ -245,7 +320,7 @@ class perform_site_check(difference_detection_processor):
|
||||
# We rely on the actual text in the html output.. many sites have random script vars etc,
|
||||
# in the future we'll implement other mechanisms.
|
||||
|
||||
update_obj["last_check_status"] = self.fetcher.get_last_status_code()
|
||||
update_obj["last_check_status"] = fetcher.get_last_status_code()
|
||||
|
||||
# If there's text to skip
|
||||
# @todo we could abstract out the get_text() to handle this cleaner
|
||||
@@ -333,7 +408,7 @@ class perform_site_check(difference_detection_processor):
|
||||
if is_html:
|
||||
if self.datastore.data['settings']['application'].get('extract_title_as_title') or watch['extract_title_as_title']:
|
||||
if not watch['title'] or not len(watch['title']):
|
||||
update_obj['title'] = html_tools.extract_element(find='title', html_content=self.fetcher.content)
|
||||
update_obj['title'] = html_tools.extract_element(find='title', html_content=fetcher.content)
|
||||
|
||||
if changed_detected:
|
||||
if watch.get('check_unique_lines', False):
|
||||
|
||||
@@ -170,12 +170,9 @@ if (include_filters.length) {
|
||||
|
||||
try {
|
||||
// is it xpath?
|
||||
if (f.startsWith('/') || f.startsWith('xpath')) {
|
||||
var qry_f = f.replace(/xpath(:|\d:)/, '')
|
||||
console.log("[xpath] Scanning for included filter " + qry_f)
|
||||
q = document.evaluate(qry_f, document, null, XPathResult.FIRST_ORDERED_NODE_TYPE, null).singleNodeValue;
|
||||
if (f.startsWith('/') || f.startsWith('xpath:')) {
|
||||
q = document.evaluate(f.replace('xpath:', ''), document, null, XPathResult.FIRST_ORDERED_NODE_TYPE, null).singleNodeValue;
|
||||
} else {
|
||||
console.log("[css] Scanning for included filter " + f)
|
||||
q = document.querySelector(f);
|
||||
}
|
||||
} catch (e) {
|
||||
@@ -185,18 +182,8 @@ if (include_filters.length) {
|
||||
}
|
||||
|
||||
if (q) {
|
||||
// Try to resolve //something/text() back to its /something so we can atleast get the bounding box
|
||||
try {
|
||||
if (typeof q.nodeName == 'string' && q.nodeName === '#text') {
|
||||
q = q.parentElement
|
||||
}
|
||||
} catch (e) {
|
||||
console.log(e)
|
||||
console.log("xpath_element_scraper: #text resolver")
|
||||
}
|
||||
|
||||
// #1231 - IN the case XPath attribute filter is applied, we will have to traverse up and find the element.
|
||||
if (typeof q.getBoundingClientRect == 'function') {
|
||||
if (q.hasOwnProperty('getBoundingClientRect')) {
|
||||
bbox = q.getBoundingClientRect();
|
||||
console.log("xpath_element_scraper: Got filter element, scroll from top was " + scroll_y)
|
||||
} else {
|
||||
@@ -205,8 +192,7 @@ if (include_filters.length) {
|
||||
bbox = q.ownerElement.getBoundingClientRect();
|
||||
console.log("xpath_element_scraper: Got filter by ownerElement element, scroll from top was " + scroll_y)
|
||||
} catch (e) {
|
||||
console.log(e)
|
||||
console.log("xpath_element_scraper: error looking up q.ownerElement")
|
||||
console.log("xpath_element_scraper: error looking up ownerElement")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,44 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# run some tests and look if the 'custom-browser-search-string=1' connect string appeared in the correct containers
|
||||
|
||||
# enable debug
|
||||
set -x
|
||||
|
||||
# A extra browser is configured, but we never chose to use it, so it should NOT show in the logs
|
||||
docker run --rm -e "PLAYWRIGHT_DRIVER_URL=ws://browserless:3000" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest tests/custom_browser_url/test_custom_browser_url.py::test_request_not_via_custom_browser_url'
|
||||
docker logs browserless-custom-url &>log.txt
|
||||
grep 'custom-browser-search-string=1' log.txt
|
||||
if [ $? -ne 1 ]
|
||||
then
|
||||
echo "Saw a request in 'browserless-custom-url' container with 'custom-browser-search-string=1' when I should not"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
docker logs browserless &>log.txt
|
||||
grep 'custom-browser-search-string=1' log.txt
|
||||
if [ $? -ne 1 ]
|
||||
then
|
||||
echo "Saw a request in 'browser' container with 'custom-browser-search-string=1' when I should not"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Special connect string should appear in the custom-url container, but not in the 'default' one
|
||||
docker run --rm -e "PLAYWRIGHT_DRIVER_URL=ws://browserless:3000" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest tests/custom_browser_url/test_custom_browser_url.py::test_request_via_custom_browser_url'
|
||||
docker logs browserless-custom-url &>log.txt
|
||||
grep 'custom-browser-search-string=1' log.txt
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
echo "Did not see request in 'browserless-custom-url' container with 'custom-browser-search-string=1' when I should"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
docker logs browserless &>log.txt
|
||||
grep 'custom-browser-search-string=1' log.txt
|
||||
if [ $? -ne 1 ]
|
||||
then
|
||||
echo "Saw a request in 'browser' container with 'custom-browser-search-string=1' when I should not"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
@@ -149,7 +149,7 @@ $(document).ready(function () {
|
||||
// @todo In the future paint all that match
|
||||
for (const c of current_default_xpath) {
|
||||
for (var i = selector_data['size_pos'].length; i !== 0; i--) {
|
||||
if (selector_data['size_pos'][i - 1].xpath.trim() === c.trim()) {
|
||||
if (selector_data['size_pos'][i - 1].xpath === c) {
|
||||
console.log("highlighting " + c);
|
||||
current_selected_i = i - 1;
|
||||
highlight_current_selected_i();
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
ul#requests-extra_browsers {
|
||||
list-style: none;
|
||||
/* tidy up the table to look more "inline" */
|
||||
li {
|
||||
> label {
|
||||
display: none;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/* each proxy entry is a `table` */
|
||||
table {
|
||||
tr {
|
||||
display: inline;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#extra-browsers-setting {
|
||||
border: 1px solid var(--color-grey-800);
|
||||
border-radius: 4px;
|
||||
margin: 1em;
|
||||
padding: 1em;
|
||||
}
|
||||
@@ -60,10 +60,3 @@ body.proxy-check-active {
|
||||
|
||||
padding-bottom: 1em;
|
||||
}
|
||||
|
||||
#extra-proxies-setting {
|
||||
border: 1px solid var(--color-grey-800);
|
||||
border-radius: 4px;
|
||||
margin: 1em;
|
||||
padding: 1em;
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
@import "parts/_arrows";
|
||||
@import "parts/_browser-steps";
|
||||
@import "parts/_extra_proxies";
|
||||
@import "parts/_extra_browsers";
|
||||
@import "parts/_pagination";
|
||||
@import "parts/_spinners";
|
||||
@import "parts/_variables";
|
||||
@@ -16,7 +15,6 @@
|
||||
body {
|
||||
color: var(--color-text);
|
||||
background: var(--color-background-page);
|
||||
font-family: Helvetica Neue, Helvetica, Lucida Grande, Arial, Ubuntu, Cantarell, Fira Sans, sans-serif;
|
||||
}
|
||||
|
||||
.visually-hidden {
|
||||
|
||||
@@ -128,27 +128,6 @@ body.proxy-check-active #request .proxy-timing {
|
||||
border-radius: 4px;
|
||||
padding: 1em; }
|
||||
|
||||
#extra-proxies-setting {
|
||||
border: 1px solid var(--color-grey-800);
|
||||
border-radius: 4px;
|
||||
margin: 1em;
|
||||
padding: 1em; }
|
||||
|
||||
ul#requests-extra_browsers {
|
||||
list-style: none;
|
||||
/* tidy up the table to look more "inline" */
|
||||
/* each proxy entry is a `table` */ }
|
||||
ul#requests-extra_browsers li > label {
|
||||
display: none; }
|
||||
ul#requests-extra_browsers table tr {
|
||||
display: inline; }
|
||||
|
||||
#extra-browsers-setting {
|
||||
border: 1px solid var(--color-grey-800);
|
||||
border-radius: 4px;
|
||||
margin: 1em;
|
||||
padding: 1em; }
|
||||
|
||||
.pagination-page-info {
|
||||
color: #fff;
|
||||
font-size: 0.85rem;
|
||||
@@ -411,8 +390,7 @@ html[data-darkmode="true"] #toggle-light-mode .icon-dark {
|
||||
|
||||
body {
|
||||
color: var(--color-text);
|
||||
background: var(--color-background-page);
|
||||
font-family: Helvetica Neue, Helvetica, Lucida Grande, Arial, Ubuntu, Cantarell, Fira Sans, sans-serif; }
|
||||
background: var(--color-background-page); }
|
||||
|
||||
.visually-hidden {
|
||||
clip: rect(0 0 0 0);
|
||||
|
||||
@@ -633,18 +633,6 @@ class ChangeDetectionStore:
|
||||
|
||||
return {}
|
||||
|
||||
@property
|
||||
def extra_browsers(self):
|
||||
res = []
|
||||
p = list(filter(
|
||||
lambda s: (s.get('browser_name') and s.get('browser_connection_url')),
|
||||
self.__data['settings']['requests'].get('extra_browsers', [])))
|
||||
if p:
|
||||
for i in p:
|
||||
res.append(("extra_browser_"+i['browser_name'], i['browser_name']))
|
||||
|
||||
return res
|
||||
|
||||
def tag_exists_by_name(self, tag_name):
|
||||
return any(v.get('title', '').lower() == tag_name.lower() for k, v in self.__data['settings']['application']['tags'].items())
|
||||
|
||||
@@ -847,14 +835,4 @@ class ChangeDetectionStore:
|
||||
if not watch.get('date_created'):
|
||||
self.data['watching'][uuid]['date_created'] = i
|
||||
i+=1
|
||||
return
|
||||
|
||||
# #1774 - protect xpath1 against migration
|
||||
def update_14(self):
|
||||
for awatch in self.__data["watching"]:
|
||||
if self.__data["watching"][awatch]['include_filters']:
|
||||
for num, selector in enumerate(self.__data["watching"][awatch]['include_filters']):
|
||||
if selector.startswith('/'):
|
||||
self.__data["watching"][awatch]['include_filters'][num] = 'xpath1:' + selector
|
||||
if selector.startswith('xpath:'):
|
||||
self.__data["watching"][awatch]['include_filters'][num] = selector.replace('xpath:', 'xpath1:', 1)
|
||||
return
|
||||
@@ -8,10 +8,10 @@
|
||||
<title>Change Detection{{extra_title}}</title>
|
||||
<link rel="alternate" type="application/rss+xml" title="Changedetection.io » Feed{% if active_tag %}- {{active_tag}}{% endif %}" href="{{ url_for('rss', tag=active_tag , token=app_rss_token)}}" >
|
||||
<link rel="stylesheet" href="{{url_for('static_content', group='styles', filename='pure-min.css')}}" >
|
||||
<link rel="stylesheet" href="{{url_for('static_content', group='styles', filename='styles.css')}}?v={{ get_css_version() }}" >
|
||||
<link rel="stylesheet" href="{{url_for('static_content', group='styles', filename='styles.css')}}" >
|
||||
{% if extra_stylesheets %}
|
||||
{% for m in extra_stylesheets %}
|
||||
<link rel="stylesheet" href="{{ m }}?ver={{ get_css_version() }}" >
|
||||
<link rel="stylesheet" href="{{ m }}?ver=1000" >
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
|
||||
@@ -290,12 +290,11 @@ xpath://body/div/span[contains(@class, 'example-class')]",
|
||||
{% endif %}
|
||||
</ul>
|
||||
</li>
|
||||
<li>XPath - Limit text to this XPath rule, simply start with a forward-slash. To specify XPath to be used explicitly or the XPath rule starts with an XPath function: Prefix with <code>xpath:</code>
|
||||
<li>XPath - Limit text to this XPath rule, simply start with a forward-slash,
|
||||
<ul>
|
||||
<li>Example: <code>//*[contains(@class, 'sametext')]</code> or <code>xpath:count(//*[contains(@class, 'sametext')])</code>, <a
|
||||
<li>Example: <code>//*[contains(@class, 'sametext')]</code> or <code>xpath://*[contains(@class, 'sametext')]</code>, <a
|
||||
href="http://xpather.com/" target="new">test your XPath here</a></li>
|
||||
<li>Example: Get all titles from an RSS feed <code>//title/text()</code></li>
|
||||
<li>To use XPath1.0: Prefix with <code>xpath1:</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -178,9 +178,6 @@ nav
|
||||
<span style="display:none;" id="api-key-copy" >copy</span>
|
||||
</div>
|
||||
</div>
|
||||
<div class="pure-control-group">
|
||||
<a href="{{url_for('settings_reset_api_key')}}" class="pure-button button-small button-cancel">Regenerate API key</a>
|
||||
</div>
|
||||
</div>
|
||||
<div class="tab-pane-inner" id="proxies">
|
||||
<div id="recommended-proxy">
|
||||
@@ -230,15 +227,11 @@ nav
|
||||
</p>
|
||||
<p><strong>Tip</strong>: "Residential" and "Mobile" proxy type can be more successfull than "Data Center" for blocked websites.
|
||||
|
||||
<div class="pure-control-group" id="extra-proxies-setting">
|
||||
<div class="pure-control-group">
|
||||
{{ render_field(form.requests.form.extra_proxies) }}
|
||||
<span class="pure-form-message-inline">"Name" will be used for selecting the proxy in the Watch Edit settings</span><br>
|
||||
<span class="pure-form-message-inline">SOCKS5 proxies with authentication are only supported with 'plain requests' fetcher, for other fetchers you should whitelist the IP access instead</span>
|
||||
</div>
|
||||
<div class="pure-control-group" id="extra-browsers-setting">
|
||||
<span class="pure-form-message-inline"><i>Extra Browsers</i> allow changedetection.io to communicate with a different web-browser.</span><br>
|
||||
{{ render_field(form.requests.form.extra_browsers) }}
|
||||
</div>
|
||||
</div>
|
||||
<div id="actions">
|
||||
<div class="pure-control-group">
|
||||
|
||||
@@ -82,15 +82,12 @@
|
||||
</tr>
|
||||
{% endif %}
|
||||
{% for watch in (watches|sort(attribute=sort_attribute, reverse=sort_order == 'asc'))|pagination_slice(skip=pagination.skip) %}
|
||||
|
||||
{% set is_unviewed = watch.newest_history_key| int > watch.last_viewed and watch.history_n>=2 %}
|
||||
|
||||
<tr id="{{ watch.uuid }}"
|
||||
class="{{ loop.cycle('pure-table-odd', 'pure-table-even') }} processor-{{ watch['processor'] }}
|
||||
{% if watch.last_error is defined and watch.last_error != False %}error{% endif %}
|
||||
{% if watch.last_notification_error is defined and watch.last_notification_error != False %}error{% endif %}
|
||||
{% if watch.paused is defined and watch.paused != False %}paused{% endif %}
|
||||
{% if is_unviewed %}unviewed{% endif %}
|
||||
{% if watch.newest_history_key| int > watch.last_viewed and watch.history_n>=2 %}unviewed{% endif %}
|
||||
{% if watch.uuid in queued_uuids %}queued{% endif %}">
|
||||
<td class="inline checkbox-uuid" ><input name="uuids" type="checkbox" value="{{ watch.uuid}} " > <span>{{ loop.index+pagination.skip }}</span></td>
|
||||
<td class="inline watch-controls">
|
||||
@@ -107,9 +104,8 @@
|
||||
|
||||
{% if watch.get_fetch_backend == "html_webdriver"
|
||||
or ( watch.get_fetch_backend == "system" and system_default_fetcher == 'html_webdriver' )
|
||||
or "extra_browser_" in watch.get_fetch_backend
|
||||
%}
|
||||
<img class="status-icon" src="{{url_for('static_content', group='images', filename='Google-Chrome-icon.png')}}" title="Using a Chrome browser" >
|
||||
<img class="status-icon" src="{{url_for('static_content', group='images', filename='Google-Chrome-icon.png')}}" title="Using a chrome browser" >
|
||||
{% endif %}
|
||||
|
||||
{%if watch.is_pdf %}<img class="status-icon" src="{{url_for('static_content', group='images', filename='pdf-icon.svg')}}" title="Converting PDF to text" >{% endif %}
|
||||
@@ -170,13 +166,7 @@
|
||||
class="recheck pure-button pure-button-primary">{% if watch.uuid in queued_uuids %}Queued{% else %}Recheck{% endif %}</a>
|
||||
<a href="{{ url_for('edit_page', uuid=watch.uuid)}}" class="pure-button pure-button-primary">Edit</a>
|
||||
{% if watch.history_n >= 2 %}
|
||||
|
||||
{% if is_unviewed %}
|
||||
<a href="{{ url_for('diff_history_page', uuid=watch.uuid, from_version=watch.get_next_snapshot_key_to_last_viewed) }}" target="{{watch.uuid}}" class="pure-button pure-button-primary diff-link">Diff</a>
|
||||
{% else %}
|
||||
<a href="{{ url_for('diff_history_page', uuid=watch.uuid)}}" target="{{watch.uuid}}" class="pure-button pure-button-primary diff-link">Diff</a>
|
||||
{% endif %}
|
||||
|
||||
<a href="{{ url_for('diff_history_page', uuid=watch.uuid) }}" target="{{watch.uuid}}" class="pure-button pure-button-primary diff-link">Diff</a>
|
||||
{% else %}
|
||||
{% if watch.history_n == 1 or (watch.history_n ==0 and watch.error_text_ctime )%}
|
||||
<a href="{{ url_for('preview_page', uuid=watch.uuid)}}" target="{{watch.uuid}}" class="pure-button pure-button-primary">Preview</a>
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
# placeholder
|
||||
@@ -1,89 +0,0 @@
|
||||
# !/usr/bin/python3
|
||||
import os
|
||||
|
||||
from flask import url_for
|
||||
from ..util import live_server_setup, wait_for_all_checks
|
||||
|
||||
def do_test(client, live_server, make_test_use_extra_browser=False):
|
||||
|
||||
# Grep for this string in the logs?
|
||||
test_url = f"https://changedetection.io/ci-test.html"
|
||||
custom_browser_name = 'custom browser URL'
|
||||
|
||||
# needs to be set and something like 'ws://127.0.0.1:3000?stealth=1&--disable-web-security=true'
|
||||
assert os.getenv('PLAYWRIGHT_DRIVER_URL'), "Needs PLAYWRIGHT_DRIVER_URL set for this test"
|
||||
|
||||
#####################
|
||||
res = client.post(
|
||||
url_for("settings_page"),
|
||||
data={"application-empty_pages_are_a_change": "",
|
||||
"requests-time_between_check-minutes": 180,
|
||||
'application-fetch_backend': "html_webdriver",
|
||||
# browserless-custom-url is setup in .github/workflows/test-only.yml
|
||||
# the test script run_custom_browser_url_test.sh will look for 'custom-browser-search-string' in the container logs
|
||||
'requests-extra_browsers-0-browser_connection_url': 'ws://browserless-custom-url:3000?stealth=1&--disable-web-security=true&custom-browser-search-string=1',
|
||||
'requests-extra_browsers-0-browser_name': custom_browser_name
|
||||
},
|
||||
follow_redirects=True
|
||||
)
|
||||
|
||||
assert b"Settings updated." in res.data
|
||||
|
||||
# Add our URL to the import page
|
||||
res = client.post(
|
||||
url_for("import_page"),
|
||||
data={"urls": test_url},
|
||||
follow_redirects=True
|
||||
)
|
||||
|
||||
assert b"1 Imported" in res.data
|
||||
wait_for_all_checks(client)
|
||||
|
||||
if make_test_use_extra_browser:
|
||||
|
||||
# So the name should appear in the edit page under "Request" > "Fetch Method"
|
||||
res = client.get(
|
||||
url_for("edit_page", uuid="first"),
|
||||
follow_redirects=True
|
||||
)
|
||||
assert b'custom browser URL' in res.data
|
||||
|
||||
res = client.post(
|
||||
url_for("edit_page", uuid="first"),
|
||||
data={
|
||||
"url": test_url,
|
||||
"tags": "",
|
||||
"headers": "",
|
||||
'fetch_backend': f"extra_browser_{custom_browser_name}",
|
||||
'webdriver_js_execute_code': ''
|
||||
},
|
||||
follow_redirects=True
|
||||
)
|
||||
|
||||
assert b"Updated watch." in res.data
|
||||
wait_for_all_checks(client)
|
||||
|
||||
# Force recheck
|
||||
res = client.get(url_for("form_watch_checknow"), follow_redirects=True)
|
||||
assert b'1 watches queued for rechecking.' in res.data
|
||||
|
||||
wait_for_all_checks(client)
|
||||
|
||||
res = client.get(
|
||||
url_for("preview_page", uuid="first"),
|
||||
follow_redirects=True
|
||||
)
|
||||
assert b'cool it works' in res.data
|
||||
|
||||
|
||||
# Requires playwright to be installed
|
||||
def test_request_via_custom_browser_url(client, live_server):
|
||||
live_server_setup(live_server)
|
||||
# We do this so we can grep the logs of the custom container and see if the request actually went through that container
|
||||
do_test(client, live_server, make_test_use_extra_browser=True)
|
||||
|
||||
|
||||
def test_request_not_via_custom_browser_url(client, live_server):
|
||||
live_server_setup(live_server)
|
||||
# We do this so we can grep the logs of the custom container and see if the request actually went through that container
|
||||
do_test(client, live_server, make_test_use_extra_browser=False)
|
||||
Binary file not shown.
@@ -24,7 +24,7 @@ def test_check_extract_text_from_diff(client, live_server):
|
||||
)
|
||||
|
||||
assert b"1 Imported" in res.data
|
||||
wait_for_all_checks(client)
|
||||
time.sleep(1)
|
||||
|
||||
# Load in 5 different numbers/changes
|
||||
last_date=""
|
||||
|
||||
@@ -227,6 +227,9 @@ def test_regex_error_handling(client, live_server):
|
||||
follow_redirects=True
|
||||
)
|
||||
|
||||
with open('/tmp/fuck.html', 'wb') as f:
|
||||
f.write(res.data)
|
||||
|
||||
assert b'is not a valid regular expression.' in res.data
|
||||
|
||||
res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True)
|
||||
|
||||
@@ -33,6 +33,8 @@ def test_strip_regex_text_func():
|
||||
"/not"
|
||||
]
|
||||
|
||||
|
||||
fetcher = fetch_site_status.perform_site_check(datastore=False)
|
||||
stripped_content = html_tools.strip_ignore_text(test_content, ignore_lines)
|
||||
|
||||
assert b"but 1 lines" in stripped_content
|
||||
|
||||
@@ -24,6 +24,7 @@ def test_strip_text_func():
|
||||
|
||||
ignore_lines = ["sometimes"]
|
||||
|
||||
fetcher = fetch_site_status.perform_site_check(datastore=False)
|
||||
stripped_content = html_tools.strip_ignore_text(test_content, ignore_lines)
|
||||
|
||||
assert b"sometimes" not in stripped_content
|
||||
|
||||
@@ -2,8 +2,9 @@
|
||||
|
||||
import time
|
||||
from flask import url_for
|
||||
from .util import set_original_response, set_modified_response, live_server_setup, wait_for_all_checks
|
||||
from .util import set_original_response, set_modified_response, live_server_setup
|
||||
|
||||
sleep_time_for_fetch_thread = 3
|
||||
|
||||
# `subtractive_selectors` should still work in `source:` type requests
|
||||
def test_fetch_pdf(client, live_server):
|
||||
@@ -21,9 +22,7 @@ def test_fetch_pdf(client, live_server):
|
||||
|
||||
assert b"1 Imported" in res.data
|
||||
|
||||
|
||||
wait_for_all_checks(client)
|
||||
|
||||
time.sleep(sleep_time_for_fetch_thread)
|
||||
res = client.get(
|
||||
url_for("preview_page", uuid="first"),
|
||||
follow_redirects=True
|
||||
@@ -34,42 +33,8 @@ def test_fetch_pdf(client, live_server):
|
||||
|
||||
# So we know if the file changes in other ways
|
||||
import hashlib
|
||||
original_md5 = hashlib.md5(open("test-datastore/endpoint-test.pdf", 'rb').read()).hexdigest().upper()
|
||||
md5 = hashlib.md5(open("test-datastore/endpoint-test.pdf", 'rb').read()).hexdigest().upper()
|
||||
# We should have one
|
||||
assert len(original_md5) >0
|
||||
assert len(md5) >0
|
||||
# And it's going to be in the document
|
||||
assert b'Document checksum - '+bytes(str(original_md5).encode('utf-8')) in res.data
|
||||
|
||||
|
||||
shutil.copy("tests/test2.pdf", "test-datastore/endpoint-test.pdf")
|
||||
changed_md5 = hashlib.md5(open("test-datastore/endpoint-test.pdf", 'rb').read()).hexdigest().upper()
|
||||
res = client.get(url_for("form_watch_checknow"), follow_redirects=True)
|
||||
assert b'1 watches queued for rechecking.' in res.data
|
||||
|
||||
wait_for_all_checks(client)
|
||||
|
||||
# Now something should be ready, indicated by having a 'unviewed' class
|
||||
res = client.get(url_for("index"))
|
||||
assert b'unviewed' in res.data
|
||||
|
||||
# The original checksum should be not be here anymore (cdio adds it to the bottom of the text)
|
||||
|
||||
res = client.get(
|
||||
url_for("preview_page", uuid="first"),
|
||||
follow_redirects=True
|
||||
)
|
||||
|
||||
assert original_md5.encode('utf-8') not in res.data
|
||||
assert changed_md5.encode('utf-8') in res.data
|
||||
|
||||
|
||||
res = client.get(
|
||||
url_for("diff_history_page", uuid="first"),
|
||||
follow_redirects=True
|
||||
)
|
||||
|
||||
assert original_md5.encode('utf-8') in res.data
|
||||
assert changed_md5.encode('utf-8') in res.data
|
||||
|
||||
assert b'here is a change' in res.data
|
||||
|
||||
assert b'Document checksum - '+bytes(str(md5).encode('utf-8')) in res.data
|
||||
@@ -80,11 +80,8 @@ def test_headers_in_request(client, live_server):
|
||||
|
||||
# Should be only one with headers set
|
||||
assert watches_with_headers==1
|
||||
res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True)
|
||||
assert b'Deleted' in res.data
|
||||
|
||||
def test_body_in_request(client, live_server):
|
||||
|
||||
# Add our URL to the import page
|
||||
test_url = url_for('test_body', _external=True)
|
||||
if os.getenv('PLAYWRIGHT_DRIVER_URL'):
|
||||
@@ -173,8 +170,7 @@ def test_body_in_request(client, live_server):
|
||||
follow_redirects=True
|
||||
)
|
||||
assert b"Body must be empty when Request Method is set to GET" in res.data
|
||||
res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True)
|
||||
assert b'Deleted' in res.data
|
||||
|
||||
|
||||
def test_method_in_request(client, live_server):
|
||||
# Add our URL to the import page
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from flask import url_for
|
||||
from .util import set_original_response, set_modified_response, live_server_setup, wait_for_all_checks
|
||||
from . util import set_original_response, set_modified_response, live_server_setup
|
||||
import time
|
||||
|
||||
|
||||
@@ -12,7 +12,6 @@ def test_bad_access(client, live_server):
|
||||
)
|
||||
|
||||
assert b"1 Imported" in res.data
|
||||
wait_for_all_checks(client)
|
||||
|
||||
# Attempt to add a body with a GET method
|
||||
res = client.post(
|
||||
@@ -60,7 +59,7 @@ def test_bad_access(client, live_server):
|
||||
data={"url": 'file:///tasty/disk/drive', "tags": ''},
|
||||
follow_redirects=True
|
||||
)
|
||||
wait_for_all_checks(client)
|
||||
time.sleep(1)
|
||||
res = client.get(url_for("index"))
|
||||
|
||||
assert b'file:// type access is denied for security reasons.' in res.data
|
||||
@@ -6,11 +6,9 @@ from .util import live_server_setup, wait_for_all_checks
|
||||
|
||||
from ..html_tools import *
|
||||
|
||||
|
||||
def test_setup(live_server):
|
||||
live_server_setup(live_server)
|
||||
|
||||
|
||||
def set_original_response():
|
||||
test_return_data = """<html>
|
||||
<body>
|
||||
@@ -28,7 +26,6 @@ def set_original_response():
|
||||
f.write(test_return_data)
|
||||
return None
|
||||
|
||||
|
||||
def set_modified_response():
|
||||
test_return_data = """<html>
|
||||
<body>
|
||||
@@ -47,12 +44,11 @@ def set_modified_response():
|
||||
|
||||
return None
|
||||
|
||||
|
||||
# Handle utf-8 charset replies https://github.com/dgtlmoon/changedetection.io/pull/613
|
||||
def test_check_xpath_filter_utf8(client, live_server):
|
||||
filter = '//item/*[self::description]'
|
||||
filter='//item/*[self::description]'
|
||||
|
||||
d = '''<?xml version="1.0" encoding="UTF-8"?>
|
||||
d='''<?xml version="1.0" encoding="UTF-8"?>
|
||||
<rss xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:itunes="http://www.itunes.com/dtds/podcast-1.0.dtd" xmlns:dc="http://purl.org/dc/elements/1.1/" version="2.0">
|
||||
<channel>
|
||||
<title>rpilocator.com</title>
|
||||
@@ -106,9 +102,9 @@ def test_check_xpath_filter_utf8(client, live_server):
|
||||
|
||||
# Handle utf-8 charset replies https://github.com/dgtlmoon/changedetection.io/pull/613
|
||||
def test_check_xpath_text_function_utf8(client, live_server):
|
||||
filter = '//item/title/text()'
|
||||
filter='//item/title/text()'
|
||||
|
||||
d = '''<?xml version="1.0" encoding="UTF-8"?>
|
||||
d='''<?xml version="1.0" encoding="UTF-8"?>
|
||||
<rss xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:itunes="http://www.itunes.com/dtds/podcast-1.0.dtd" xmlns:dc="http://purl.org/dc/elements/1.1/" version="2.0">
|
||||
<channel>
|
||||
<title>rpilocator.com</title>
|
||||
@@ -167,12 +163,15 @@ def test_check_xpath_text_function_utf8(client, live_server):
|
||||
res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True)
|
||||
assert b'Deleted' in res.data
|
||||
|
||||
|
||||
def test_check_markup_xpath_filter_restriction(client, live_server):
|
||||
|
||||
xpath_filter = "//*[contains(@class, 'sametext')]"
|
||||
|
||||
set_original_response()
|
||||
|
||||
# Give the endpoint time to spin up
|
||||
time.sleep(1)
|
||||
|
||||
# Add our URL to the import page
|
||||
test_url = url_for('test_endpoint', _external=True)
|
||||
res = client.post(
|
||||
@@ -215,6 +214,7 @@ def test_check_markup_xpath_filter_restriction(client, live_server):
|
||||
|
||||
|
||||
def test_xpath_validation(client, live_server):
|
||||
|
||||
# Add our URL to the import page
|
||||
test_url = url_for('test_endpoint', _external=True)
|
||||
res = client.post(
|
||||
@@ -235,48 +235,6 @@ def test_xpath_validation(client, live_server):
|
||||
assert b'Deleted' in res.data
|
||||
|
||||
|
||||
def test_xpath23_prefix_validation(client, live_server):
|
||||
# Add our URL to the import page
|
||||
test_url = url_for('test_endpoint', _external=True)
|
||||
res = client.post(
|
||||
url_for("import_page"),
|
||||
data={"urls": test_url},
|
||||
follow_redirects=True
|
||||
)
|
||||
assert b"1 Imported" in res.data
|
||||
wait_for_all_checks(client)
|
||||
|
||||
res = client.post(
|
||||
url_for("edit_page", uuid="first"),
|
||||
data={"include_filters": "xpath:/something horrible", "url": test_url, "tags": "", "headers": "", 'fetch_backend': "html_requests"},
|
||||
follow_redirects=True
|
||||
)
|
||||
assert b"is not a valid XPath expression" in res.data
|
||||
res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True)
|
||||
assert b'Deleted' in res.data
|
||||
|
||||
|
||||
def test_xpath1_validation(client, live_server):
|
||||
# Add our URL to the import page
|
||||
test_url = url_for('test_endpoint', _external=True)
|
||||
res = client.post(
|
||||
url_for("import_page"),
|
||||
data={"urls": test_url},
|
||||
follow_redirects=True
|
||||
)
|
||||
assert b"1 Imported" in res.data
|
||||
wait_for_all_checks(client)
|
||||
|
||||
res = client.post(
|
||||
url_for("edit_page", uuid="first"),
|
||||
data={"include_filters": "xpath1:/something horrible", "url": test_url, "tags": "", "headers": "", 'fetch_backend': "html_requests"},
|
||||
follow_redirects=True
|
||||
)
|
||||
assert b"is not a valid XPath expression" in res.data
|
||||
res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True)
|
||||
assert b'Deleted' in res.data
|
||||
|
||||
|
||||
# actually only really used by the distll.io importer, but could be handy too
|
||||
def test_check_with_prefix_include_filters(client, live_server):
|
||||
res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True)
|
||||
@@ -296,8 +254,7 @@ def test_check_with_prefix_include_filters(client, live_server):
|
||||
|
||||
res = client.post(
|
||||
url_for("edit_page", uuid="first"),
|
||||
data={"include_filters": "xpath://*[contains(@class, 'sametext')]", "url": test_url, "tags": "", "headers": "",
|
||||
'fetch_backend': "html_requests"},
|
||||
data={"include_filters": "xpath://*[contains(@class, 'sametext')]", "url": test_url, "tags": "", "headers": "", 'fetch_backend': "html_requests"},
|
||||
follow_redirects=True
|
||||
)
|
||||
|
||||
@@ -309,15 +266,13 @@ def test_check_with_prefix_include_filters(client, live_server):
|
||||
follow_redirects=True
|
||||
)
|
||||
|
||||
assert b"Some text thats the same" in res.data # in selector
|
||||
assert b"Some text that will change" not in res.data # not in selector
|
||||
assert b"Some text thats the same" in res.data #in selector
|
||||
assert b"Some text that will change" not in res.data #not in selector
|
||||
|
||||
client.get(url_for("form_delete", uuid="all"), follow_redirects=True)
|
||||
|
||||
|
||||
def test_various_rules(client, live_server):
|
||||
# Just check these don't error
|
||||
# live_server_setup(live_server)
|
||||
#live_server_setup(live_server)
|
||||
with open("test-datastore/endpoint-content.txt", "w") as f:
|
||||
f.write("""<html>
|
||||
<body>
|
||||
@@ -330,11 +285,10 @@ def test_various_rules(client, live_server):
|
||||
<a href=''>some linky </a>
|
||||
<a href=''>another some linky </a>
|
||||
<!-- related to https://github.com/dgtlmoon/changedetection.io/pull/1774 -->
|
||||
<input type="email" id="email" />
|
||||
<input type="email" id="email" />
|
||||
</body>
|
||||
</html>
|
||||
""")
|
||||
|
||||
test_url = url_for('test_endpoint', _external=True)
|
||||
res = client.post(
|
||||
url_for("import_page"),
|
||||
@@ -344,6 +298,7 @@ def test_various_rules(client, live_server):
|
||||
assert b"1 Imported" in res.data
|
||||
wait_for_all_checks(client)
|
||||
|
||||
|
||||
for r in ['//div', '//a', 'xpath://div', 'xpath://a']:
|
||||
res = client.post(
|
||||
url_for("edit_page", uuid="first"),
|
||||
@@ -358,153 +313,3 @@ def test_various_rules(client, live_server):
|
||||
assert b"Updated watch." in res.data
|
||||
res = client.get(url_for("index"))
|
||||
assert b'fetch-error' not in res.data, f"Should not see errors after '{r} filter"
|
||||
|
||||
res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True)
|
||||
assert b'Deleted' in res.data
|
||||
|
||||
|
||||
def test_xpath_20(client, live_server):
|
||||
test_url = url_for('test_endpoint', _external=True)
|
||||
res = client.post(
|
||||
url_for("import_page"),
|
||||
data={"urls": test_url},
|
||||
follow_redirects=True
|
||||
)
|
||||
assert b"1 Imported" in res.data
|
||||
wait_for_all_checks(client)
|
||||
|
||||
set_original_response()
|
||||
|
||||
test_url = url_for('test_endpoint', _external=True)
|
||||
res = client.post(
|
||||
url_for("edit_page", uuid="first"),
|
||||
data={"include_filters": "//*[contains(@class, 'sametext')]|//*[contains(@class, 'changetext')]",
|
||||
"url": test_url,
|
||||
"tags": "",
|
||||
"headers": "",
|
||||
'fetch_backend': "html_requests"},
|
||||
follow_redirects=True
|
||||
)
|
||||
|
||||
assert b"Updated watch." in res.data
|
||||
wait_for_all_checks(client)
|
||||
|
||||
res = client.get(
|
||||
url_for("preview_page", uuid="first"),
|
||||
follow_redirects=True
|
||||
)
|
||||
|
||||
assert b"Some text thats the same" in res.data # in selector
|
||||
assert b"Some text that will change" in res.data # in selector
|
||||
|
||||
client.get(url_for("form_delete", uuid="all"), follow_redirects=True)
|
||||
|
||||
|
||||
def test_xpath_20_function_count(client, live_server):
|
||||
set_original_response()
|
||||
|
||||
# Add our URL to the import page
|
||||
test_url = url_for('test_endpoint', _external=True)
|
||||
res = client.post(
|
||||
url_for("import_page"),
|
||||
data={"urls": test_url},
|
||||
follow_redirects=True
|
||||
)
|
||||
assert b"1 Imported" in res.data
|
||||
wait_for_all_checks(client)
|
||||
|
||||
res = client.post(
|
||||
url_for("edit_page", uuid="first"),
|
||||
data={"include_filters": "xpath:count(//div) * 123456789987654321",
|
||||
"url": test_url,
|
||||
"tags": "",
|
||||
"headers": "",
|
||||
'fetch_backend': "html_requests"},
|
||||
follow_redirects=True
|
||||
)
|
||||
|
||||
assert b"Updated watch." in res.data
|
||||
wait_for_all_checks(client)
|
||||
|
||||
res = client.get(
|
||||
url_for("preview_page", uuid="first"),
|
||||
follow_redirects=True
|
||||
)
|
||||
|
||||
assert b"246913579975308642" in res.data # in selector
|
||||
|
||||
client.get(url_for("form_delete", uuid="all"), follow_redirects=True)
|
||||
|
||||
|
||||
def test_xpath_20_function_count2(client, live_server):
|
||||
set_original_response()
|
||||
|
||||
# Add our URL to the import page
|
||||
test_url = url_for('test_endpoint', _external=True)
|
||||
res = client.post(
|
||||
url_for("import_page"),
|
||||
data={"urls": test_url},
|
||||
follow_redirects=True
|
||||
)
|
||||
assert b"1 Imported" in res.data
|
||||
wait_for_all_checks(client)
|
||||
|
||||
res = client.post(
|
||||
url_for("edit_page", uuid="first"),
|
||||
data={"include_filters": "/html/body/count(div) * 123456789987654321",
|
||||
"url": test_url,
|
||||
"tags": "",
|
||||
"headers": "",
|
||||
'fetch_backend': "html_requests"},
|
||||
follow_redirects=True
|
||||
)
|
||||
|
||||
assert b"Updated watch." in res.data
|
||||
wait_for_all_checks(client)
|
||||
|
||||
res = client.get(
|
||||
url_for("preview_page", uuid="first"),
|
||||
follow_redirects=True
|
||||
)
|
||||
|
||||
assert b"246913579975308642" in res.data # in selector
|
||||
|
||||
client.get(url_for("form_delete", uuid="all"), follow_redirects=True)
|
||||
|
||||
|
||||
def test_xpath_20_function_string_join_matches(client, live_server):
|
||||
set_original_response()
|
||||
|
||||
# Add our URL to the import page
|
||||
test_url = url_for('test_endpoint', _external=True)
|
||||
res = client.post(
|
||||
url_for("import_page"),
|
||||
data={"urls": test_url},
|
||||
follow_redirects=True
|
||||
)
|
||||
assert b"1 Imported" in res.data
|
||||
wait_for_all_checks(client)
|
||||
|
||||
res = client.post(
|
||||
url_for("edit_page", uuid="first"),
|
||||
data={
|
||||
"include_filters": "xpath:string-join(//*[contains(@class, 'sametext')]|//*[matches(@class, 'changetext')], 'specialconjunction')",
|
||||
"url": test_url,
|
||||
"tags": "",
|
||||
"headers": "",
|
||||
'fetch_backend': "html_requests"},
|
||||
follow_redirects=True
|
||||
)
|
||||
|
||||
assert b"Updated watch." in res.data
|
||||
wait_for_all_checks(client)
|
||||
|
||||
res = client.get(
|
||||
url_for("preview_page", uuid="first"),
|
||||
follow_redirects=True
|
||||
)
|
||||
|
||||
assert b"Some text thats the samespecialconjunctionSome text that will change" in res.data # in selector
|
||||
|
||||
client.get(url_for("form_delete", uuid="all"), follow_redirects=True)
|
||||
|
||||
|
||||
@@ -1,203 +0,0 @@
|
||||
import sys
|
||||
import os
|
||||
import pytest
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
import html_tools
|
||||
|
||||
# test generation guide.
|
||||
# 1. Do not include encoding in the xml declaration if the test object is a str type.
|
||||
# 2. Always paraphrase test.
|
||||
|
||||
hotels = """
|
||||
<hotel>
|
||||
<branch location="California">
|
||||
<staff>
|
||||
<given_name>Christopher</given_name>
|
||||
<surname>Anderson</surname>
|
||||
<age>25</age>
|
||||
</staff>
|
||||
<staff>
|
||||
<given_name>Christopher</given_name>
|
||||
<surname>Carter</surname>
|
||||
<age>30</age>
|
||||
</staff>
|
||||
</branch>
|
||||
<branch location="Las Vegas">
|
||||
<staff>
|
||||
<given_name>Lisa</given_name>
|
||||
<surname>Walker</surname>
|
||||
<age>60</age>
|
||||
</staff>
|
||||
<staff>
|
||||
<given_name>Jessica</given_name>
|
||||
<surname>Walker</surname>
|
||||
<age>32</age>
|
||||
</staff>
|
||||
<staff>
|
||||
<given_name>Jennifer</given_name>
|
||||
<surname>Roberts</surname>
|
||||
<age>50</age>
|
||||
</staff>
|
||||
</branch>
|
||||
</hotel>"""
|
||||
|
||||
@pytest.mark.parametrize("html_content", [hotels])
|
||||
@pytest.mark.parametrize("xpath, answer", [('(//staff/given_name, //staff/age)', '25'),
|
||||
("xs:date('2023-10-10')", '2023-10-10'),
|
||||
("if (/hotel/branch[@location = 'California']/staff[1]/age = 25) then 'is 25' else 'is not 25'", 'is 25'),
|
||||
("if (//hotel/branch[@location = 'California']/staff[1]/age = 25) then 'is 25' else 'is not 25'", 'is 25'),
|
||||
("if (count(/hotel/branch/staff) = 5) then true() else false()", 'true'),
|
||||
("if (count(//hotel/branch/staff) = 5) then true() else false()", 'true'),
|
||||
("for $i in /hotel/branch/staff return if ($i/age >= 40) then upper-case($i/surname) else lower-case($i/surname)", 'anderson'),
|
||||
("given_name = 'Christopher' and age = 40", 'false'),
|
||||
("//given_name = 'Christopher' and //age = 40", 'false'),
|
||||
#("(staff/given_name, staff/age)", 'Lisa'),
|
||||
("(//staff/given_name, //staff/age)", 'Lisa'),
|
||||
#("hotel/branch[@location = 'California']/staff/age union hotel/branch[@location = 'Las Vegas']/staff/age", ''),
|
||||
("(//hotel/branch[@location = 'California']/staff/age union //hotel/branch[@location = 'Las Vegas']/staff/age)", '60'),
|
||||
("(200 to 210)", "205"),
|
||||
("(//hotel/branch[@location = 'California']/staff/age union //hotel/branch[@location = 'Las Vegas']/staff/age)", "50"),
|
||||
("(1, 9, 9, 5)", "5"),
|
||||
("(3, (), (14, 15), 92, 653)", "653"),
|
||||
("for $i in /hotel/branch/staff return $i/given_name", "Christopher"),
|
||||
("for $i in //hotel/branch/staff return $i/given_name", "Christopher"),
|
||||
("distinct-values(for $i in /hotel/branch/staff return $i/given_name)", "Jessica"),
|
||||
("distinct-values(for $i in //hotel/branch/staff return $i/given_name)", "Jessica"),
|
||||
("for $i in (7 to 15) return $i*10", "130"),
|
||||
("some $i in /hotel/branch/staff satisfies $i/age < 20", "false"),
|
||||
("some $i in //hotel/branch/staff satisfies $i/age < 20", "false"),
|
||||
("every $i in /hotel/branch/staff satisfies $i/age > 20", "true"),
|
||||
("every $i in //hotel/branch/staff satisfies $i/age > 20 ", "true"),
|
||||
("let $x := branch[@location = 'California'], $y := branch[@location = 'Las Vegas'] return (avg($x/staff/age), avg($y/staff/age))", "27.5"),
|
||||
("let $x := //branch[@location = 'California'], $y := //branch[@location = 'Las Vegas'] return (avg($x/staff/age), avg($y/staff/age))", "27.5"),
|
||||
("let $nu := 1, $de := 1000 return 'probability = ' || $nu div $de * 100 || '%'", "0.1%"),
|
||||
("let $nu := 2, $probability := function ($argument) { 'probability = ' || $nu div $argument * 100 || '%'}, $de := 5 return $probability($de)", "40%"),
|
||||
("'XPATH2.0-3.1 dissemination' instance of xs:string ", "true"),
|
||||
("'new stackoverflow question incoming' instance of xs:integer ", "false"),
|
||||
("'50000' cast as xs:integer", "50000"),
|
||||
("//branch[@location = 'California']/staff[1]/surname eq 'Anderson'", "true"),
|
||||
("fn:false()", "false")])
|
||||
def test_hotels(html_content, xpath, answer):
|
||||
html_content = html_tools.xpath_filter(xpath, html_content, append_pretty_line_formatting=True)
|
||||
assert type(html_content) == str
|
||||
assert answer in html_content
|
||||
|
||||
|
||||
|
||||
branches_to_visit = """<?xml version="1.0" ?>
|
||||
<branches_to_visit>
|
||||
<manager name="Godot" room_no="501">
|
||||
<branch>Area 51</branch>
|
||||
<branch>A place with no name</branch>
|
||||
<branch>Stalsk12</branch>
|
||||
</manager>
|
||||
<manager name="Freya" room_no="305">
|
||||
<branch>Stalsk12</branch>
|
||||
<branch>Barcelona</branch>
|
||||
<branch>Paris</branch>
|
||||
</manager>
|
||||
</branches_to_visit>"""
|
||||
@pytest.mark.parametrize("html_content", [branches_to_visit])
|
||||
@pytest.mark.parametrize("xpath, answer", [
|
||||
("manager[@name = 'Godot']/branch union manager[@name = 'Freya']/branch", "Area 51"),
|
||||
("//manager[@name = 'Godot']/branch union //manager[@name = 'Freya']/branch", "Stalsk12"),
|
||||
("manager[@name = 'Godot']/branch | manager[@name = 'Freya']/branch", "Stalsk12"),
|
||||
("//manager[@name = 'Godot']/branch | //manager[@name = 'Freya']/branch", "Stalsk12"),
|
||||
("manager/branch intersect manager[@name = 'Godot']/branch", "A place with no name"),
|
||||
("//manager/branch intersect //manager[@name = 'Godot']/branch", "A place with no name"),
|
||||
("manager[@name = 'Godot']/branch intersect manager[@name = 'Freya']/branch", ""),
|
||||
("manager/branch except manager[@name = 'Godot']/branch", "Barcelona"),
|
||||
("manager[@name = 'Godot']/branch[1] eq 'Area 51'", "true"),
|
||||
("//manager[@name = 'Godot']/branch[1] eq 'Area 51'", "true"),
|
||||
("manager[@name = 'Godot']/branch[1] eq 'Seoul'", "false"),
|
||||
("//manager[@name = 'Godot']/branch[1] eq 'Seoul'", "false"),
|
||||
("manager[@name = 'Godot']/branch[2] eq manager[@name = 'Freya']/branch[2]", "false"),
|
||||
("//manager[@name = 'Godot']/branch[2] eq //manager[@name = 'Freya']/branch[2]", "false"),
|
||||
("manager[1]/@room_no lt manager[2]/@room_no", "false"),
|
||||
("//manager[1]/@room_no lt //manager[2]/@room_no", "false"),
|
||||
("manager[1]/@room_no gt manager[2]/@room_no", "true"),
|
||||
("//manager[1]/@room_no gt //manager[2]/@room_no", "true"),
|
||||
("manager[@name = 'Godot']/branch[1] = 'Area 51'", "true"),
|
||||
("//manager[@name = 'Godot']/branch[1] = 'Area 51'", "true"),
|
||||
("manager[@name = 'Godot']/branch[1] = 'Seoul'", "false"),
|
||||
("//manager[@name = 'Godot']/branch[1] = 'Seoul'", "false"),
|
||||
("manager[@name = 'Godot']/branch = 'Area 51'", "true"),
|
||||
("//manager[@name = 'Godot']/branch = 'Area 51'", "true"),
|
||||
("manager[@name = 'Godot']/branch = 'Barcelona'", "false"),
|
||||
("//manager[@name = 'Godot']/branch = 'Barcelona'", "false"),
|
||||
("manager[1]/@room_no > manager[2]/@room_no", "true"),
|
||||
("//manager[1]/@room_no > //manager[2]/@room_no", "true"),
|
||||
("manager[@name = 'Godot']/branch[ . = 'Stalsk12'] is manager[1]/branch[1]", "false"),
|
||||
("//manager[@name = 'Godot']/branch[ . = 'Stalsk12'] is //manager[1]/branch[1]", "false"),
|
||||
("manager[@name = 'Godot']/branch[ . = 'Stalsk12'] is manager[1]/branch[3]", "true"),
|
||||
("//manager[@name = 'Godot']/branch[ . = 'Stalsk12'] is //manager[1]/branch[3]", "true"),
|
||||
("manager[@name = 'Godot']/branch[ . = 'Stalsk12'] << manager[1]/branch[1]", "false"),
|
||||
("//manager[@name = 'Godot']/branch[ . = 'Stalsk12'] << //manager[1]/branch[1]", "false"),
|
||||
("manager[@name = 'Godot']/branch[ . = 'Stalsk12'] >> manager[1]/branch[1]", "true"),
|
||||
("//manager[@name = 'Godot']/branch[ . = 'Stalsk12'] >> //manager[1]/branch[1]", "true"),
|
||||
("manager[@name = 'Godot']/branch[ . = 'Stalsk12'] is manager[@name = 'Freya']/branch[ . = 'Stalsk12']", "false"),
|
||||
("//manager[@name = 'Godot']/branch[ . = 'Stalsk12'] is //manager[@name = 'Freya']/branch[ . = 'Stalsk12']", "false"),
|
||||
("manager[1]/@name || manager[2]/@name", "GodotFreya"),
|
||||
("//manager[1]/@name || //manager[2]/@name", "GodotFreya"),
|
||||
])
|
||||
def test_branches_to_visit(html_content, xpath, answer):
|
||||
html_content = html_tools.xpath_filter(xpath, html_content, append_pretty_line_formatting=True)
|
||||
assert type(html_content) == str
|
||||
assert answer in html_content
|
||||
|
||||
trips = """
|
||||
<trips>
|
||||
<trip reservation_number="10">
|
||||
<depart>2023-10-06</depart>
|
||||
<arrive>2023-10-10</arrive>
|
||||
<traveler name="Christopher Anderson">
|
||||
<duration>4</duration>
|
||||
<price>2000.00</price>
|
||||
</traveler>
|
||||
</trip>
|
||||
<trip reservation_number="12">
|
||||
<depart>2023-10-06</depart>
|
||||
<arrive>2023-10-12</arrive>
|
||||
<traveler name="Frank Carter">
|
||||
<duration>6</duration>
|
||||
<price>3500.34</price>
|
||||
</traveler>
|
||||
</trip>
|
||||
</trips>"""
|
||||
@pytest.mark.parametrize("html_content", [trips])
|
||||
@pytest.mark.parametrize("xpath, answer", [
|
||||
("1 + 9 * 9 + 5 div 5", "83"),
|
||||
("(1 + 9 * 9 + 5) div 6", "14.5"),
|
||||
("23 idiv 3", "7"),
|
||||
("23 div 3", "7.66666666"),
|
||||
("for $i in ./trip return $i/traveler/duration * $i/traveler/price", "21002.04"),
|
||||
("for $i in ./trip return $i/traveler/duration ", "4"),
|
||||
("for $i in .//trip return $i/traveler/duration * $i/traveler/price", "21002.04"),
|
||||
("sum(for $i in ./trip return $i/traveler/duration * $i/traveler/price)", "29002.04"),
|
||||
("sum(for $i in .//trip return $i/traveler/duration * $i/traveler/price)", "29002.04"),
|
||||
#("trip[1]/depart - trip[1]/arrive", "fail_to_get_answer"),
|
||||
#("//trip[1]/depart - //trip[1]/arrive", "fail_to_get_answer"),
|
||||
#("trip[1]/depart + trip[1]/arrive", "fail_to_get_answer"),
|
||||
#("xs:date(trip[1]/depart) + xs:date(trip[1]/arrive)", "fail_to_get_answer"),
|
||||
("(//trip[1]/arrive cast as xs:date) - (//trip[1]/depart cast as xs:date)", "P4D"),
|
||||
("(//trip[1]/depart cast as xs:date) - (//trip[1]/arrive cast as xs:date)", "-P4D"),
|
||||
("(//trip[1]/depart cast as xs:date) + xs:dayTimeDuration('P3D')", "2023-10-09"),
|
||||
("(//trip[1]/depart cast as xs:date) - xs:dayTimeDuration('P3D')", "2023-10-03"),
|
||||
("(456, 623) instance of xs:integer", "false"),
|
||||
("(456, 623) instance of xs:integer*", "true"),
|
||||
("/trips/trip instance of element()", "false"),
|
||||
("/trips/trip instance of element()*", "true"),
|
||||
("/trips/trip[1]/arrive instance of xs:date", "false"),
|
||||
("date(/trips/trip[1]/arrive) instance of xs:date", "true"),
|
||||
("'8' cast as xs:integer", "8"),
|
||||
("'11.1E3' cast as xs:double", "11100"),
|
||||
("6.5 cast as xs:integer", "6"),
|
||||
#("/trips/trip[1]/arrive cast as xs:dateTime", "fail_to_get_answer"),
|
||||
("/trips/trip[1]/arrive cast as xs:date", "2023-10-10"),
|
||||
("('2023-10-12') cast as xs:date", "2023-10-12"),
|
||||
("for $i in //trip return concat($i/depart, ' ', $i/arrive)", "2023-10-06 2023-10-10"),
|
||||
])
|
||||
def test_trips(html_content, xpath, answer):
|
||||
html_content = html_tools.xpath_filter(xpath, html_content, append_pretty_line_formatting=True)
|
||||
assert type(html_content) == str
|
||||
assert answer in html_content
|
||||
@@ -1,54 +0,0 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
# run from dir above changedetectionio/ dir
|
||||
# python3 -m unittest changedetectionio.tests.unit.test_notification_diff
|
||||
|
||||
import unittest
|
||||
import os
|
||||
|
||||
from changedetectionio.model import Watch
|
||||
|
||||
# mostly
|
||||
class TestDiffBuilder(unittest.TestCase):
|
||||
|
||||
def test_watch_get_suggested_from_diff_timestamp(self):
|
||||
import uuid as uuid_builder
|
||||
watch = Watch.model(datastore_path='/tmp', default={})
|
||||
watch.ensure_data_dir_exists()
|
||||
|
||||
watch['last_viewed'] = 110
|
||||
|
||||
watch.save_history_text(contents=b"hello world", timestamp=100, snapshot_id=str(uuid_builder.uuid4()))
|
||||
watch.save_history_text(contents=b"hello world", timestamp=105, snapshot_id=str(uuid_builder.uuid4()))
|
||||
watch.save_history_text(contents=b"hello world", timestamp=109, snapshot_id=str(uuid_builder.uuid4()))
|
||||
watch.save_history_text(contents=b"hello world", timestamp=112, snapshot_id=str(uuid_builder.uuid4()))
|
||||
watch.save_history_text(contents=b"hello world", timestamp=115, snapshot_id=str(uuid_builder.uuid4()))
|
||||
watch.save_history_text(contents=b"hello world", timestamp=117, snapshot_id=str(uuid_builder.uuid4()))
|
||||
|
||||
p = watch.get_next_snapshot_key_to_last_viewed
|
||||
assert p == "112", "Correct last-viewed timestamp was detected"
|
||||
|
||||
# When there is only one step of difference from the end of the list, it should return second-last change
|
||||
watch['last_viewed'] = 116
|
||||
p = watch.get_next_snapshot_key_to_last_viewed
|
||||
assert p == "115", "Correct 'second last' last-viewed timestamp was detected when using the last timestamp"
|
||||
|
||||
watch['last_viewed'] = 99
|
||||
p = watch.get_next_snapshot_key_to_last_viewed
|
||||
assert p == "100"
|
||||
|
||||
watch['last_viewed'] = 200
|
||||
p = watch.get_next_snapshot_key_to_last_viewed
|
||||
assert p == "115", "When the 'last viewed' timestamp is greater than the newest snapshot, return second last "
|
||||
|
||||
watch['last_viewed'] = 109
|
||||
p = watch.get_next_snapshot_key_to_last_viewed
|
||||
assert p == "109", "Correct when its the same time"
|
||||
|
||||
# new empty one
|
||||
watch = Watch.model(datastore_path='/tmp', default={})
|
||||
p = watch.get_next_snapshot_key_to_last_viewed
|
||||
assert p == None, "None when no history available"
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
@@ -209,7 +209,6 @@ class update_worker(threading.Thread):
|
||||
from .processors import text_json_diff, restock_diff
|
||||
|
||||
while not self.app.config.exit.is_set():
|
||||
update_handler = None
|
||||
|
||||
try:
|
||||
queued_item_data = self.q.get(block=False)
|
||||
@@ -230,35 +229,17 @@ class update_worker(threading.Thread):
|
||||
now = time.time()
|
||||
|
||||
try:
|
||||
# Processor is what we are using for detecting the "Change"
|
||||
processor = self.datastore.data['watching'][uuid].get('processor', 'text_json_diff')
|
||||
# if system...
|
||||
|
||||
# Abort processing when the content was the same as the last fetch
|
||||
skip_when_same_checksum = queued_item_data.item.get('skip_when_checksum_same')
|
||||
|
||||
processor = self.datastore.data['watching'][uuid].get('processor','text_json_diff')
|
||||
|
||||
# @todo some way to switch by name
|
||||
# Init a new 'difference_detection_processor'
|
||||
|
||||
if processor == 'restock_diff':
|
||||
update_handler = restock_diff.perform_site_check(datastore=self.datastore,
|
||||
watch_uuid=uuid
|
||||
)
|
||||
update_handler = restock_diff.perform_site_check(datastore=self.datastore)
|
||||
else:
|
||||
# Used as a default and also by some tests
|
||||
update_handler = text_json_diff.perform_site_check(datastore=self.datastore,
|
||||
watch_uuid=uuid
|
||||
)
|
||||
update_handler = text_json_diff.perform_site_check(datastore=self.datastore)
|
||||
|
||||
# Clear last errors (move to preflight func?)
|
||||
self.datastore.data['watching'][uuid]['browser_steps_last_error_step'] = None
|
||||
|
||||
update_handler.call_browser()
|
||||
|
||||
changed_detected, update_obj, contents = update_handler.run_changedetection(uuid,
|
||||
skip_when_checksum_same=skip_when_same_checksum,
|
||||
)
|
||||
changed_detected, update_obj, contents = update_handler.run(uuid, skip_when_checksum_same=queued_item_data.item.get('skip_when_checksum_same'))
|
||||
|
||||
# Re #342
|
||||
# In Python 3, all strings are sequences of Unicode characters. There is a bytes type that holds raw bytes.
|
||||
@@ -410,9 +391,6 @@ class update_worker(threading.Thread):
|
||||
self.datastore.update_watch(uuid=uuid, update_obj={'last_error': str(e)})
|
||||
# Other serious error
|
||||
process_changedetection_results = False
|
||||
# import traceback
|
||||
# print(traceback.format_exc())
|
||||
|
||||
else:
|
||||
# Crash protection, the watch entry could have been removed by this point (during a slow chrome fetch etc)
|
||||
if not self.datastore.data['watching'].get(uuid):
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
eventlet>=0.33.3 # related to dnspython fixes
|
||||
feedgen~=0.9
|
||||
flask-compress
|
||||
# 0.6.3 included compatibility fix for werkzeug 3.x (2.x had deprecation of url handlers)
|
||||
flask-login>=0.6.3
|
||||
flask-login~=0.6
|
||||
flask-paginate
|
||||
flask_expects_json~=1.7
|
||||
flask_restful
|
||||
@@ -46,12 +45,9 @@ beautifulsoup4
|
||||
# XPath filtering, lxml is required by bs4 anyway, but put it here to be safe.
|
||||
lxml
|
||||
|
||||
# XPath 2.0-3.1 support
|
||||
elementpath
|
||||
|
||||
selenium~=4.14.0
|
||||
|
||||
werkzeug~=3.0
|
||||
werkzeug
|
||||
|
||||
# Templating, so far just in the URLs but in the future can be for the notifications also
|
||||
jinja2~=3.1
|
||||
|
||||
Reference in New Issue
Block a user