Compare commits

...

7 Commits

Author SHA1 Message Date
dgtlmoon
3647fb822b Attempt to handle #text 2023-11-13 18:09:53 +01:00
dgtlmoon
9ec00f4752 Visual Selector - handle small bug where whitespace after the filter caused it to not be visualised 2023-11-13 18:04:31 +01:00
dgtlmoon
465ff6ee44 Misc fixes 2023-11-13 17:35:29 +01:00
dgtlmoon
5e2049c538 Fix build issue 2023-11-13 17:02:27 +01:00
Constantin Hong
26931e0167 feature: Support XPath2.0 to 3.1 (#1774) 2023-11-13 16:42:21 +01:00
dgtlmoon
5229094e44 New functionanlity - Selectable browser / ability to add extra browser connections (good for using "scraping browsers"/ etc) (#1943) 2023-11-13 16:39:11 +01:00
dgtlmoon
5a306aa78c API/UI - Button to regenerate API key (#1975 / #1967) 2023-11-13 16:26:50 +01:00
27 changed files with 843 additions and 51 deletions

View File

@@ -30,7 +30,10 @@ jobs:
# Selenium+browserless # Selenium+browserless
docker run --network changedet-network -d --hostname selenium -p 4444:4444 --rm --shm-size="2g" selenium/standalone-chrome:4 docker run --network changedet-network -d --hostname selenium -p 4444:4444 --rm --shm-size="2g" selenium/standalone-chrome:4
docker run --network changedet-network -d --hostname browserless -e "FUNCTION_BUILT_INS=[\"fs\",\"crypto\"]" -e "DEFAULT_LAUNCH_ARGS=[\"--window-size=1920,1080\"]" --rm -p 3000:3000 --shm-size="2g" browserless/chrome:1.60-chrome-stable docker run --network changedet-network -d --name browserless --hostname browserless -e "FUNCTION_BUILT_INS=[\"fs\",\"crypto\"]" -e "DEFAULT_LAUNCH_ARGS=[\"--window-size=1920,1080\"]" --rm -p 3000:3000 --shm-size="2g" browserless/chrome:1.60-chrome-stable
# For accessing custom browser tests
docker run --network changedet-network -d --name browserless-custom-url --hostname browserless-custom-url -e "FUNCTION_BUILT_INS=[\"fs\",\"crypto\"]" -e "DEFAULT_LAUNCH_ARGS=[\"--window-size=1920,1080\"]" --rm --shm-size="2g" browserless/chrome:1.60-chrome-stable
- name: Build changedetection.io container for testing - name: Build changedetection.io container for testing
run: | run: |
@@ -86,6 +89,12 @@ jobs:
# And again with PLAYWRIGHT_DRIVER_URL=.. # And again with PLAYWRIGHT_DRIVER_URL=..
cd .. cd ..
- name: Test custom browser URL
run: |
cd changedetectionio
./run_custom_browser_url_tests.sh
cd ..
- name: Test changedetection.io container starts+runs basically without error - name: Test changedetection.io container starts+runs basically without error
run: | run: |
docker run -p 5556:5000 -d test-changedetectionio docker run -p 5556:5000 -d test-changedetectionio

View File

@@ -268,3 +268,7 @@ I offer commercial support, this software is depended on by network security, ae
[license-shield]: https://img.shields.io/github/license/dgtlmoon/changedetection.io.svg?style=for-the-badge [license-shield]: https://img.shields.io/github/license/dgtlmoon/changedetection.io.svg?style=for-the-badge
[release-link]: https://github.com/dgtlmoon/changedetection.io/releases [release-link]: https://github.com/dgtlmoon/changedetection.io/releases
[docker-link]: https://hub.docker.com/r/dgtlmoon/changedetection.io [docker-link]: https://hub.docker.com/r/dgtlmoon/changedetection.io
## Third-party licenses
changedetectionio.html_tools.elementpath_tostring: Copyright (c), 2018-2021, SISSA (Scuola Internazionale Superiore di Studi Avanzati), Licensed under [MIT license](https://github.com/sissaschool/elementpath/blob/master/LICENSE)

View File

@@ -614,6 +614,8 @@ def changedetection_app(config=None, datastore_o=None):
# For the form widget tag uuid lookup # For the form widget tag uuid lookup
form.tags.datastore = datastore # in _value form.tags.datastore = datastore # in _value
for p in datastore.extra_browsers:
form.fetch_backend.choices.append(p)
form.fetch_backend.choices.append(("system", 'System settings default')) form.fetch_backend.choices.append(("system", 'System settings default'))
@@ -714,7 +716,7 @@ def changedetection_app(config=None, datastore_o=None):
system_uses_webdriver = datastore.data['settings']['application']['fetch_backend'] == 'html_webdriver' system_uses_webdriver = datastore.data['settings']['application']['fetch_backend'] == 'html_webdriver'
is_html_webdriver = False is_html_webdriver = False
if (watch.get('fetch_backend') == 'system' and system_uses_webdriver) or watch.get('fetch_backend') == 'html_webdriver': if (watch.get('fetch_backend') == 'system' and system_uses_webdriver) or watch.get('fetch_backend') == 'html_webdriver' or watch.get('fetch_backend', '').startswith('extra_browser_'):
is_html_webdriver = True is_html_webdriver = True
# Only works reliably with Playwright # Only works reliably with Playwright
@@ -819,6 +821,16 @@ def changedetection_app(config=None, datastore_o=None):
return output return output
@app.route("/settings/reset-api-key", methods=['GET'])
@login_optionally_required
def settings_reset_api_key():
import secrets
secret = secrets.token_hex(16)
datastore.data['settings']['application']['api_access_token'] = secret
datastore.needs_write_urgent = True
flash("API Key was regenerated.")
return redirect(url_for('settings_page')+'#api')
@app.route("/import", methods=['GET', "POST"]) @app.route("/import", methods=['GET', "POST"])
@login_optionally_required @login_optionally_required
def import_page(): def import_page():
@@ -977,7 +989,7 @@ def changedetection_app(config=None, datastore_o=None):
system_uses_webdriver = datastore.data['settings']['application']['fetch_backend'] == 'html_webdriver' system_uses_webdriver = datastore.data['settings']['application']['fetch_backend'] == 'html_webdriver'
is_html_webdriver = False is_html_webdriver = False
if (watch.get('fetch_backend') == 'system' and system_uses_webdriver) or watch.get('fetch_backend') == 'html_webdriver': if (watch.get('fetch_backend') == 'system' and system_uses_webdriver) or watch.get('fetch_backend') == 'html_webdriver' or watch.get('fetch_backend', '').startswith('extra_browser_'):
is_html_webdriver = True is_html_webdriver = True
password_enabled_and_share_is_off = False password_enabled_and_share_is_off = False
@@ -1031,7 +1043,7 @@ def changedetection_app(config=None, datastore_o=None):
is_html_webdriver = False is_html_webdriver = False
if (watch.get('fetch_backend') == 'system' and system_uses_webdriver) or watch.get('fetch_backend') == 'html_webdriver': if (watch.get('fetch_backend') == 'system' and system_uses_webdriver) or watch.get('fetch_backend') == 'html_webdriver' or watch.get('fetch_backend', '').startswith('extra_browser_'):
is_html_webdriver = True is_html_webdriver = True
# Never requested successfully, but we detected a fetch error # Never requested successfully, but we detected a fetch error

View File

@@ -69,11 +69,12 @@ xpath://body/div/span[contains(@class, 'example-class')]",
{% endif %} {% endif %}
</ul> </ul>
</li> </li>
<li>XPath - Limit text to this XPath rule, simply start with a forward-slash, <li>XPath - Limit text to this XPath rule, simply start with a forward-slash. To specify XPath to be used explicitly or the XPath rule starts with an XPath function: Prefix with <code>xpath:</code>
<ul> <ul>
<li>Example: <code>//*[contains(@class, 'sametext')]</code> or <code>xpath://*[contains(@class, 'sametext')]</code>, <a <li>Example: <code>//*[contains(@class, 'sametext')]</code> or <code>xpath:count(//*[contains(@class, 'sametext')])</code>, <a
href="http://xpather.com/" target="new">test your XPath here</a></li> href="http://xpather.com/" target="new">test your XPath here</a></li>
<li>Example: Get all titles from an RSS feed <code>//title/text()</code></li> <li>Example: Get all titles from an RSS feed <code>//title/text()</code></li>
<li>To use XPath1.0: Prefix with <code>xpath1:</code></li>
</ul> </ul>
</li> </li>
</ul> </ul>

View File

@@ -96,6 +96,7 @@ class Fetcher():
content = None content = None
error = None error = None
fetcher_description = "No description" fetcher_description = "No description"
browser_connection_url = None
headers = {} headers = {}
status_code = None status_code = None
webdriver_js_execute_code = None webdriver_js_execute_code = None
@@ -251,14 +252,16 @@ class base_html_playwright(Fetcher):
proxy = None proxy = None
def __init__(self, proxy_override=None): def __init__(self, proxy_override=None, browser_connection_url=None):
super().__init__() super().__init__()
# .strip('"') is going to save someone a lot of time when they accidently wrap the env value
self.browser_type = os.getenv("PLAYWRIGHT_BROWSER_TYPE", 'chromium').strip('"') self.browser_type = os.getenv("PLAYWRIGHT_BROWSER_TYPE", 'chromium').strip('"')
self.command_executor = os.getenv(
"PLAYWRIGHT_DRIVER_URL", # .strip('"') is going to save someone a lot of time when they accidently wrap the env value
'ws://playwright-chrome:3000' if not browser_connection_url:
).strip('"') self.browser_connection_url = os.getenv("PLAYWRIGHT_DRIVER_URL", 'ws://playwright-chrome:3000').strip('"')
else:
self.browser_connection_url = browser_connection_url
# If any proxy settings are enabled, then we should setup the proxy object # If any proxy settings are enabled, then we should setup the proxy object
proxy_args = {} proxy_args = {}
@@ -444,7 +447,7 @@ class base_html_playwright(Fetcher):
# Seemed to cause a connection Exception even tho I can see it connect # Seemed to cause a connection Exception even tho I can see it connect
# self.browser = browser_type.connect(self.command_executor, timeout=timeout*1000) # self.browser = browser_type.connect(self.command_executor, timeout=timeout*1000)
# 60,000 connection timeout only # 60,000 connection timeout only
browser = browser_type.connect_over_cdp(self.command_executor, timeout=60000) browser = browser_type.connect_over_cdp(self.browser_connection_url, timeout=60000)
# SOCKS5 with authentication is not supported (yet) # SOCKS5 with authentication is not supported (yet)
# https://github.com/microsoft/playwright/issues/10567 # https://github.com/microsoft/playwright/issues/10567
@@ -504,7 +507,11 @@ class base_html_playwright(Fetcher):
self.status_code = response.status self.status_code = response.status
if self.status_code != 200 and not ignore_status_codes: if self.status_code != 200 and not ignore_status_codes:
raise Non200ErrorCodeReceived(url=url, status_code=self.status_code)
screenshot=self.page.screenshot(type='jpeg', full_page=True,
quality=int(os.getenv("PLAYWRIGHT_SCREENSHOT_QUALITY", 72)))
raise Non200ErrorCodeReceived(url=url, status_code=self.status_code, screenshot=screenshot)
if len(self.page.content().strip()) == 0: if len(self.page.content().strip()) == 0:
context.close() context.close()
@@ -555,8 +562,6 @@ class base_html_webdriver(Fetcher):
else: else:
fetcher_description = "WebDriver Chrome/Javascript" fetcher_description = "WebDriver Chrome/Javascript"
command_executor = ''
# Configs for Proxy setup # Configs for Proxy setup
# In the ENV vars, is prefixed with "webdriver_", so it is for example "webdriver_sslProxy" # In the ENV vars, is prefixed with "webdriver_", so it is for example "webdriver_sslProxy"
selenium_proxy_settings_mappings = ['proxyType', 'ftpProxy', 'httpProxy', 'noProxy', selenium_proxy_settings_mappings = ['proxyType', 'ftpProxy', 'httpProxy', 'noProxy',
@@ -564,12 +569,15 @@ class base_html_webdriver(Fetcher):
'socksProxy', 'socksVersion', 'socksUsername', 'socksPassword'] 'socksProxy', 'socksVersion', 'socksUsername', 'socksPassword']
proxy = None proxy = None
def __init__(self, proxy_override=None): def __init__(self, proxy_override=None, browser_connection_url=None):
super().__init__() super().__init__()
from selenium.webdriver.common.proxy import Proxy as SeleniumProxy from selenium.webdriver.common.proxy import Proxy as SeleniumProxy
# .strip('"') is going to save someone a lot of time when they accidently wrap the env value # .strip('"') is going to save someone a lot of time when they accidently wrap the env value
self.command_executor = os.getenv("WEBDRIVER_URL", 'http://browser-chrome:4444/wd/hub').strip('"') if not browser_connection_url:
self.browser_connection_url = os.getenv("WEBDRIVER_URL", 'http://browser-chrome:4444/wd/hub').strip('"')
else:
self.browser_connection_url = browser_connection_url
# If any proxy settings are enabled, then we should setup the proxy object # If any proxy settings are enabled, then we should setup the proxy object
proxy_args = {} proxy_args = {}
@@ -611,7 +619,7 @@ class base_html_webdriver(Fetcher):
options.proxy = self.proxy options.proxy = self.proxy
self.driver = webdriver.Remote( self.driver = webdriver.Remote(
command_executor=self.command_executor, command_executor=self.browser_connection_url,
options=options) options=options)
try: try:
@@ -666,9 +674,10 @@ class base_html_webdriver(Fetcher):
class html_requests(Fetcher): class html_requests(Fetcher):
fetcher_description = "Basic fast Plaintext/HTTP Client" fetcher_description = "Basic fast Plaintext/HTTP Client"
def __init__(self, proxy_override=None): def __init__(self, proxy_override=None, browser_connection_url=None):
super().__init__() super().__init__()
self.proxy_override = proxy_override self.proxy_override = proxy_override
# browser_connection_url is none because its always 'launched locally'
def run(self, def run(self,
url, url,

View File

@@ -168,7 +168,9 @@ class ValidateContentFetcherIsReady(object):
def __call__(self, form, field): def __call__(self, form, field):
import urllib3.exceptions import urllib3.exceptions
from changedetectionio import content_fetcher from changedetectionio import content_fetcher
return
# AttributeError: module 'changedetectionio.content_fetcher' has no attribute 'extra_browser_unlocked<>ASDF213r123r'
# Better would be a radiohandler that keeps a reference to each class # Better would be a radiohandler that keeps a reference to each class
if field.data is not None and field.data != 'system': if field.data is not None and field.data != 'system':
klass = getattr(content_fetcher, field.data) klass = getattr(content_fetcher, field.data)
@@ -326,11 +328,30 @@ class ValidateCSSJSONXPATHInput(object):
return return
# Does it look like XPath? # Does it look like XPath?
if line.strip()[0] == '/': if line.strip()[0] == '/' or line.strip().startswith('xpath:'):
if not self.allow_xpath:
raise ValidationError("XPath not permitted in this field!")
from lxml import etree, html
import elementpath
# xpath 2.0-3.1
from elementpath.xpath3 import XPath3Parser
tree = html.fromstring("<html></html>")
line = line.replace('xpath:', '')
try:
elementpath.select(tree, line.strip(), parser=XPath3Parser)
except elementpath.ElementPathError as e:
message = field.gettext('\'%s\' is not a valid XPath expression. (%s)')
raise ValidationError(message % (line, str(e)))
except:
raise ValidationError("A system-error occurred when validating your XPath expression")
if line.strip().startswith('xpath1:'):
if not self.allow_xpath: if not self.allow_xpath:
raise ValidationError("XPath not permitted in this field!") raise ValidationError("XPath not permitted in this field!")
from lxml import etree, html from lxml import etree, html
tree = html.fromstring("<html></html>") tree = html.fromstring("<html></html>")
line = re.sub(r'^xpath1:', '', line)
try: try:
tree.xpath(line.strip()) tree.xpath(line.strip())
@@ -496,6 +517,12 @@ class SingleExtraProxy(Form):
proxy_url = StringField('Proxy URL', [validators.Optional()], render_kw={"placeholder": "socks5:// or regular proxy http://user:pass@...:3128", "size":50}) proxy_url = StringField('Proxy URL', [validators.Optional()], render_kw={"placeholder": "socks5:// or regular proxy http://user:pass@...:3128", "size":50})
# @todo do the validation here instead # @todo do the validation here instead
class SingleExtraBrowser(Form):
browser_name = StringField('Name', [validators.Optional()], render_kw={"placeholder": "Name"})
browser_connection_url = StringField('Browser connection URL', [validators.Optional()], render_kw={"placeholder": "wss://brightdata... wss://oxylabs etc", "size":50})
# @todo do the validation here instead
# datastore.data['settings']['requests'].. # datastore.data['settings']['requests']..
class globalSettingsRequestForm(Form): class globalSettingsRequestForm(Form):
time_between_check = FormField(TimeBetweenCheckForm) time_between_check = FormField(TimeBetweenCheckForm)
@@ -504,6 +531,7 @@ class globalSettingsRequestForm(Form):
render_kw={"style": "width: 5em;"}, render_kw={"style": "width: 5em;"},
validators=[validators.NumberRange(min=0, message="Should contain zero or more seconds")]) validators=[validators.NumberRange(min=0, message="Should contain zero or more seconds")])
extra_proxies = FieldList(FormField(SingleExtraProxy), min_entries=5) extra_proxies = FieldList(FormField(SingleExtraProxy), min_entries=5)
extra_browsers = FieldList(FormField(SingleExtraBrowser), min_entries=5)
def validate_extra_proxies(self, extra_validators=None): def validate_extra_proxies(self, extra_validators=None):
for e in self.data['extra_proxies']: for e in self.data['extra_proxies']:

View File

@@ -69,10 +69,89 @@ def element_removal(selectors: List[str], html_content):
selector = ",".join(selectors) selector = ",".join(selectors)
return subtractive_css_selector(selector, html_content) return subtractive_css_selector(selector, html_content)
def elementpath_tostring(obj):
"""
change elementpath.select results to string type
# The MIT License (MIT), Copyright (c), 2018-2021, SISSA (Scuola Internazionale Superiore di Studi Avanzati)
# https://github.com/sissaschool/elementpath/blob/dfcc2fd3d6011b16e02bf30459a7924f547b47d0/elementpath/xpath_tokens.py#L1038
"""
import elementpath
from decimal import Decimal
import math
if obj is None:
return ''
# https://elementpath.readthedocs.io/en/latest/xpath_api.html#elementpath.select
elif isinstance(obj, elementpath.XPathNode):
return obj.string_value
elif isinstance(obj, bool):
return 'true' if obj else 'false'
elif isinstance(obj, Decimal):
value = format(obj, 'f')
if '.' in value:
return value.rstrip('0').rstrip('.')
return value
elif isinstance(obj, float):
if math.isnan(obj):
return 'NaN'
elif math.isinf(obj):
return str(obj).upper()
value = str(obj)
if '.' in value:
value = value.rstrip('0').rstrip('.')
if '+' in value:
value = value.replace('+', '')
if 'e' in value:
return value.upper()
return value
return str(obj)
# Return str Utf-8 of matched rules # Return str Utf-8 of matched rules
def xpath_filter(xpath_filter, html_content, append_pretty_line_formatting=False, is_rss=False): def xpath_filter(xpath_filter, html_content, append_pretty_line_formatting=False, is_rss=False):
from lxml import etree, html from lxml import etree, html
import elementpath
# xpath 2.0-3.1
from elementpath.xpath3 import XPath3Parser
parser = etree.HTMLParser()
if is_rss:
# So that we can keep CDATA for cdata_in_document_to_text() to process
parser = etree.XMLParser(strip_cdata=False)
tree = html.fromstring(bytes(html_content, encoding='utf-8'), parser=parser)
html_block = ""
r = elementpath.select(tree, xpath_filter.strip(), namespaces={'re': 'http://exslt.org/regular-expressions'}, parser=XPath3Parser)
#@note: //title/text() wont work where <title>CDATA..
if type(r) != list:
r = [r]
for element in r:
# When there's more than 1 match, then add the suffix to separate each line
# And where the matched result doesn't include something that will cause Inscriptis to add a newline
# (This way each 'match' reliably has a new-line in the diff)
# Divs are converted to 4 whitespaces by inscriptis
if append_pretty_line_formatting and len(html_block) and (not hasattr( element, 'tag' ) or not element.tag in (['br', 'hr', 'div', 'p'])):
html_block += TEXT_FILTER_LIST_LINE_SUFFIX
if type(element) == str:
html_block += element
elif issubclass(type(element), etree._Element) or issubclass(type(element), etree._ElementTree):
html_block += etree.tostring(element, pretty_print=True).decode('utf-8')
else:
html_block += elementpath_tostring(element)
return html_block
# Return str Utf-8 of matched rules
# 'xpath1:'
def xpath1_filter(xpath_filter, html_content, append_pretty_line_formatting=False, is_rss=False):
from lxml import etree, html
parser = None parser = None
if is_rss: if is_rss:

View File

@@ -16,6 +16,7 @@ class model(dict):
}, },
'requests': { 'requests': {
'extra_proxies': [], # Configurable extra proxies via the UI 'extra_proxies': [], # Configurable extra proxies via the UI
'extra_browsers': [], # Configurable extra proxies via the UI
'jitter_seconds': 0, 'jitter_seconds': 0,
'proxy': None, # Preferred proxy connection 'proxy': None, # Preferred proxy connection
'time_between_check': {'weeks': None, 'days': None, 'hours': 3, 'minutes': None, 'seconds': None}, 'time_between_check': {'weeks': None, 'days': None, 'hours': 3, 'minutes': None, 'seconds': None},

View File

@@ -8,11 +8,12 @@ from distutils.util import strtobool
class difference_detection_processor(): class difference_detection_processor():
browser_steps = None
datastore = None datastore = None
fetcher = None fetcher = None
screenshot = None screenshot = None
watch = None
xpath_data = None xpath_data = None
browser_steps = None
def __init__(self, *args, datastore, watch_uuid, **kwargs): def __init__(self, *args, datastore, watch_uuid, **kwargs):
super().__init__(*args, **kwargs) super().__init__(*args, **kwargs)
@@ -40,6 +41,18 @@ class difference_detection_processor():
if not prefer_fetch_backend or prefer_fetch_backend == 'system': if not prefer_fetch_backend or prefer_fetch_backend == 'system':
prefer_fetch_backend = self.datastore.data['settings']['application'].get('fetch_backend') prefer_fetch_backend = self.datastore.data['settings']['application'].get('fetch_backend')
# In the case that the preferred fetcher was a browser config with custom connection URL..
# @todo - on save watch, if its extra_browser_ then it should be obvious it will use playwright (like if its requests now..)
browser_connection_url = None
if prefer_fetch_backend.startswith('extra_browser_'):
(t, key) = prefer_fetch_backend.split('extra_browser_')
connection = list(
filter(lambda s: (s['browser_name'] == key), self.datastore.data['settings']['requests'].get('extra_browsers', [])))
if connection:
prefer_fetch_backend = 'base_html_playwright'
browser_connection_url = connection[0].get('browser_connection_url')
# Grab the right kind of 'fetcher', (playwright, requests, etc) # Grab the right kind of 'fetcher', (playwright, requests, etc)
if hasattr(content_fetcher, prefer_fetch_backend): if hasattr(content_fetcher, prefer_fetch_backend):
fetcher_obj = getattr(content_fetcher, prefer_fetch_backend) fetcher_obj = getattr(content_fetcher, prefer_fetch_backend)
@@ -54,8 +67,9 @@ class difference_detection_processor():
print(f"Using proxy Key: {preferred_proxy_id} as Proxy URL {proxy_url}") print(f"Using proxy Key: {preferred_proxy_id} as Proxy URL {proxy_url}")
# Now call the fetcher (playwright/requests/etc) with arguments that only a fetcher would need. # Now call the fetcher (playwright/requests/etc) with arguments that only a fetcher would need.
# When browser_connection_url is None, it method should default to working out whats the best defaults (os env vars etc)
self.fetcher = fetcher_obj(proxy_override=proxy_url, self.fetcher = fetcher_obj(proxy_override=proxy_url,
#browser_url_extra/configurable browser url=... browser_connection_url=browser_connection_url
) )
if self.watch.has_browser_steps: if self.watch.has_browser_steps:

View File

@@ -173,6 +173,11 @@ class perform_site_check(difference_detection_processor):
html_content=self.fetcher.content, html_content=self.fetcher.content,
append_pretty_line_formatting=not watch.is_source_type_url, append_pretty_line_formatting=not watch.is_source_type_url,
is_rss=is_rss) is_rss=is_rss)
elif filter_rule.startswith('xpath1:'):
html_content += html_tools.xpath1_filter(xpath_filter=filter_rule.replace('xpath1:', ''),
html_content=self.fetcher.content,
append_pretty_line_formatting=not watch.is_source_type_url,
is_rss=is_rss)
else: else:
# CSS Filter, extract the HTML that matches and feed that into the existing inscriptis::get_text # CSS Filter, extract the HTML that matches and feed that into the existing inscriptis::get_text
html_content += html_tools.include_filters(include_filters=filter_rule, html_content += html_tools.include_filters(include_filters=filter_rule,

View File

@@ -170,9 +170,12 @@ if (include_filters.length) {
try { try {
// is it xpath? // is it xpath?
if (f.startsWith('/') || f.startsWith('xpath:')) { if (f.startsWith('/') || f.startsWith('xpath')) {
q = document.evaluate(f.replace('xpath:', ''), document, null, XPathResult.FIRST_ORDERED_NODE_TYPE, null).singleNodeValue; var qry_f = f.replace(/xpath(:|\d:)/, '')
console.log("[xpath] Scanning for included filter " + qry_f)
q = document.evaluate(qry_f, document, null, XPathResult.FIRST_ORDERED_NODE_TYPE, null).singleNodeValue;
} else { } else {
console.log("[css] Scanning for included filter " + f)
q = document.querySelector(f); q = document.querySelector(f);
} }
} catch (e) { } catch (e) {
@@ -182,8 +185,18 @@ if (include_filters.length) {
} }
if (q) { if (q) {
// Try to resolve //something/text() back to its /something so we can atleast get the bounding box
try {
if (typeof q.nodeName == 'string' && q.nodeName === '#text') {
q = q.parentElement
}
} catch (e) {
console.log(e)
console.log("xpath_element_scraper: #text resolver")
}
// #1231 - IN the case XPath attribute filter is applied, we will have to traverse up and find the element. // #1231 - IN the case XPath attribute filter is applied, we will have to traverse up and find the element.
if (q.hasOwnProperty('getBoundingClientRect')) { if (typeof q.getBoundingClientRect == 'function') {
bbox = q.getBoundingClientRect(); bbox = q.getBoundingClientRect();
console.log("xpath_element_scraper: Got filter element, scroll from top was " + scroll_y) console.log("xpath_element_scraper: Got filter element, scroll from top was " + scroll_y)
} else { } else {
@@ -192,7 +205,8 @@ if (include_filters.length) {
bbox = q.ownerElement.getBoundingClientRect(); bbox = q.ownerElement.getBoundingClientRect();
console.log("xpath_element_scraper: Got filter by ownerElement element, scroll from top was " + scroll_y) console.log("xpath_element_scraper: Got filter by ownerElement element, scroll from top was " + scroll_y)
} catch (e) { } catch (e) {
console.log("xpath_element_scraper: error looking up ownerElement") console.log(e)
console.log("xpath_element_scraper: error looking up q.ownerElement")
} }
} }
} }

View File

@@ -0,0 +1,44 @@
#!/bin/bash
# run some tests and look if the 'custom-browser-search-string=1' connect string appeared in the correct containers
# enable debug
set -x
# A extra browser is configured, but we never chose to use it, so it should NOT show in the logs
docker run --rm -e "PLAYWRIGHT_DRIVER_URL=ws://browserless:3000" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest tests/custom_browser_url/test_custom_browser_url.py::test_request_not_via_custom_browser_url'
docker logs browserless-custom-url &>log.txt
grep 'custom-browser-search-string=1' log.txt
if [ $? -ne 1 ]
then
echo "Saw a request in 'browserless-custom-url' container with 'custom-browser-search-string=1' when I should not"
exit 1
fi
docker logs browserless &>log.txt
grep 'custom-browser-search-string=1' log.txt
if [ $? -ne 1 ]
then
echo "Saw a request in 'browser' container with 'custom-browser-search-string=1' when I should not"
exit 1
fi
# Special connect string should appear in the custom-url container, but not in the 'default' one
docker run --rm -e "PLAYWRIGHT_DRIVER_URL=ws://browserless:3000" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest tests/custom_browser_url/test_custom_browser_url.py::test_request_via_custom_browser_url'
docker logs browserless-custom-url &>log.txt
grep 'custom-browser-search-string=1' log.txt
if [ $? -ne 0 ]
then
echo "Did not see request in 'browserless-custom-url' container with 'custom-browser-search-string=1' when I should"
exit 1
fi
docker logs browserless &>log.txt
grep 'custom-browser-search-string=1' log.txt
if [ $? -ne 1 ]
then
echo "Saw a request in 'browser' container with 'custom-browser-search-string=1' when I should not"
exit 1
fi

View File

@@ -149,7 +149,7 @@ $(document).ready(function () {
// @todo In the future paint all that match // @todo In the future paint all that match
for (const c of current_default_xpath) { for (const c of current_default_xpath) {
for (var i = selector_data['size_pos'].length; i !== 0; i--) { for (var i = selector_data['size_pos'].length; i !== 0; i--) {
if (selector_data['size_pos'][i - 1].xpath === c) { if (selector_data['size_pos'][i - 1].xpath.trim() === c.trim()) {
console.log("highlighting " + c); console.log("highlighting " + c);
current_selected_i = i - 1; current_selected_i = i - 1;
highlight_current_selected_i(); highlight_current_selected_i();

View File

@@ -0,0 +1,24 @@
ul#requests-extra_browsers {
list-style: none;
/* tidy up the table to look more "inline" */
li {
> label {
display: none;
}
}
/* each proxy entry is a `table` */
table {
tr {
display: inline;
}
}
}
#extra-browsers-setting {
border: 1px solid var(--color-grey-800);
border-radius: 4px;
margin: 1em;
padding: 1em;
}

View File

@@ -60,3 +60,10 @@ body.proxy-check-active {
padding-bottom: 1em; padding-bottom: 1em;
} }
#extra-proxies-setting {
border: 1px solid var(--color-grey-800);
border-radius: 4px;
margin: 1em;
padding: 1em;
}

View File

@@ -5,6 +5,7 @@
@import "parts/_arrows"; @import "parts/_arrows";
@import "parts/_browser-steps"; @import "parts/_browser-steps";
@import "parts/_extra_proxies"; @import "parts/_extra_proxies";
@import "parts/_extra_browsers";
@import "parts/_pagination"; @import "parts/_pagination";
@import "parts/_spinners"; @import "parts/_spinners";
@import "parts/_variables"; @import "parts/_variables";

View File

@@ -128,6 +128,27 @@ body.proxy-check-active #request .proxy-timing {
border-radius: 4px; border-radius: 4px;
padding: 1em; } padding: 1em; }
#extra-proxies-setting {
border: 1px solid var(--color-grey-800);
border-radius: 4px;
margin: 1em;
padding: 1em; }
ul#requests-extra_browsers {
list-style: none;
/* tidy up the table to look more "inline" */
/* each proxy entry is a `table` */ }
ul#requests-extra_browsers li > label {
display: none; }
ul#requests-extra_browsers table tr {
display: inline; }
#extra-browsers-setting {
border: 1px solid var(--color-grey-800);
border-radius: 4px;
margin: 1em;
padding: 1em; }
.pagination-page-info { .pagination-page-info {
color: #fff; color: #fff;
font-size: 0.85rem; font-size: 0.85rem;

View File

@@ -633,6 +633,18 @@ class ChangeDetectionStore:
return {} return {}
@property
def extra_browsers(self):
res = []
p = list(filter(
lambda s: (s.get('browser_name') and s.get('browser_connection_url')),
self.__data['settings']['requests'].get('extra_browsers', [])))
if p:
for i in p:
res.append(("extra_browser_"+i['browser_name'], i['browser_name']))
return res
def tag_exists_by_name(self, tag_name): def tag_exists_by_name(self, tag_name):
return any(v.get('title', '').lower() == tag_name.lower() for k, v in self.__data['settings']['application']['tags'].items()) return any(v.get('title', '').lower() == tag_name.lower() for k, v in self.__data['settings']['application']['tags'].items())
@@ -835,4 +847,14 @@ class ChangeDetectionStore:
if not watch.get('date_created'): if not watch.get('date_created'):
self.data['watching'][uuid]['date_created'] = i self.data['watching'][uuid]['date_created'] = i
i+=1 i+=1
return return
# #1774 - protect xpath1 against migration
def update_14(self):
for awatch in self.__data["watching"]:
if self.__data["watching"][awatch]['include_filters']:
for num, selector in enumerate(self.__data["watching"][awatch]['include_filters']):
if selector.startswith('/'):
self.__data["watching"][awatch]['include_filters'][num] = 'xpath1:' + selector
if selector.startswith('xpath:'):
self.__data["watching"][awatch]['include_filters'][num] = selector.replace('xpath:', 'xpath1:', 1)

View File

@@ -290,11 +290,12 @@ xpath://body/div/span[contains(@class, 'example-class')]",
{% endif %} {% endif %}
</ul> </ul>
</li> </li>
<li>XPath - Limit text to this XPath rule, simply start with a forward-slash, <li>XPath - Limit text to this XPath rule, simply start with a forward-slash. To specify XPath to be used explicitly or the XPath rule starts with an XPath function: Prefix with <code>xpath:</code>
<ul> <ul>
<li>Example: <code>//*[contains(@class, 'sametext')]</code> or <code>xpath://*[contains(@class, 'sametext')]</code>, <a <li>Example: <code>//*[contains(@class, 'sametext')]</code> or <code>xpath:count(//*[contains(@class, 'sametext')])</code>, <a
href="http://xpather.com/" target="new">test your XPath here</a></li> href="http://xpather.com/" target="new">test your XPath here</a></li>
<li>Example: Get all titles from an RSS feed <code>//title/text()</code></li> <li>Example: Get all titles from an RSS feed <code>//title/text()</code></li>
<li>To use XPath1.0: Prefix with <code>xpath1:</code></li>
</ul> </ul>
</li> </li>
</ul> </ul>

View File

@@ -178,6 +178,9 @@ nav
<span style="display:none;" id="api-key-copy" >copy</span> <span style="display:none;" id="api-key-copy" >copy</span>
</div> </div>
</div> </div>
<div class="pure-control-group">
<a href="{{url_for('settings_reset_api_key')}}" class="pure-button button-small button-cancel">Regenerate API key</a>
</div>
</div> </div>
<div class="tab-pane-inner" id="proxies"> <div class="tab-pane-inner" id="proxies">
<div id="recommended-proxy"> <div id="recommended-proxy">
@@ -227,11 +230,15 @@ nav
</p> </p>
<p><strong>Tip</strong>: "Residential" and "Mobile" proxy type can be more successfull than "Data Center" for blocked websites. <p><strong>Tip</strong>: "Residential" and "Mobile" proxy type can be more successfull than "Data Center" for blocked websites.
<div class="pure-control-group"> <div class="pure-control-group" id="extra-proxies-setting">
{{ render_field(form.requests.form.extra_proxies) }} {{ render_field(form.requests.form.extra_proxies) }}
<span class="pure-form-message-inline">"Name" will be used for selecting the proxy in the Watch Edit settings</span><br> <span class="pure-form-message-inline">"Name" will be used for selecting the proxy in the Watch Edit settings</span><br>
<span class="pure-form-message-inline">SOCKS5 proxies with authentication are only supported with 'plain requests' fetcher, for other fetchers you should whitelist the IP access instead</span> <span class="pure-form-message-inline">SOCKS5 proxies with authentication are only supported with 'plain requests' fetcher, for other fetchers you should whitelist the IP access instead</span>
</div> </div>
<div class="pure-control-group" id="extra-browsers-setting">
<span class="pure-form-message-inline"><i>Extra Browsers</i> allow changedetection.io to communicate with a different web-browser.</span><br>
{{ render_field(form.requests.form.extra_browsers) }}
</div>
</div> </div>
<div id="actions"> <div id="actions">
<div class="pure-control-group"> <div class="pure-control-group">

View File

@@ -104,8 +104,9 @@
{% if watch.get_fetch_backend == "html_webdriver" {% if watch.get_fetch_backend == "html_webdriver"
or ( watch.get_fetch_backend == "system" and system_default_fetcher == 'html_webdriver' ) or ( watch.get_fetch_backend == "system" and system_default_fetcher == 'html_webdriver' )
or "extra_browser_" in watch.get_fetch_backend
%} %}
<img class="status-icon" src="{{url_for('static_content', group='images', filename='Google-Chrome-icon.png')}}" title="Using a chrome browser" > <img class="status-icon" src="{{url_for('static_content', group='images', filename='Google-Chrome-icon.png')}}" title="Using a Chrome browser" >
{% endif %} {% endif %}
{%if watch.is_pdf %}<img class="status-icon" src="{{url_for('static_content', group='images', filename='pdf-icon.svg')}}" title="Converting PDF to text" >{% endif %} {%if watch.is_pdf %}<img class="status-icon" src="{{url_for('static_content', group='images', filename='pdf-icon.svg')}}" title="Converting PDF to text" >{% endif %}

View File

@@ -0,0 +1 @@
# placeholder

View File

@@ -0,0 +1,89 @@
# !/usr/bin/python3
import os
from flask import url_for
from ..util import live_server_setup, wait_for_all_checks
def do_test(client, live_server, make_test_use_extra_browser=False):
# Grep for this string in the logs?
test_url = f"https://changedetection.io/ci-test.html"
custom_browser_name = 'custom browser URL'
# needs to be set and something like 'ws://127.0.0.1:3000?stealth=1&--disable-web-security=true'
assert os.getenv('PLAYWRIGHT_DRIVER_URL'), "Needs PLAYWRIGHT_DRIVER_URL set for this test"
#####################
res = client.post(
url_for("settings_page"),
data={"application-empty_pages_are_a_change": "",
"requests-time_between_check-minutes": 180,
'application-fetch_backend': "html_webdriver",
# browserless-custom-url is setup in .github/workflows/test-only.yml
# the test script run_custom_browser_url_test.sh will look for 'custom-browser-search-string' in the container logs
'requests-extra_browsers-0-browser_connection_url': 'ws://browserless-custom-url:3000?stealth=1&--disable-web-security=true&custom-browser-search-string=1',
'requests-extra_browsers-0-browser_name': custom_browser_name
},
follow_redirects=True
)
assert b"Settings updated." in res.data
# Add our URL to the import page
res = client.post(
url_for("import_page"),
data={"urls": test_url},
follow_redirects=True
)
assert b"1 Imported" in res.data
wait_for_all_checks(client)
if make_test_use_extra_browser:
# So the name should appear in the edit page under "Request" > "Fetch Method"
res = client.get(
url_for("edit_page", uuid="first"),
follow_redirects=True
)
assert b'custom browser URL' in res.data
res = client.post(
url_for("edit_page", uuid="first"),
data={
"url": test_url,
"tags": "",
"headers": "",
'fetch_backend': f"extra_browser_{custom_browser_name}",
'webdriver_js_execute_code': ''
},
follow_redirects=True
)
assert b"Updated watch." in res.data
wait_for_all_checks(client)
# Force recheck
res = client.get(url_for("form_watch_checknow"), follow_redirects=True)
assert b'1 watches queued for rechecking.' in res.data
wait_for_all_checks(client)
res = client.get(
url_for("preview_page", uuid="first"),
follow_redirects=True
)
assert b'cool it works' in res.data
# Requires playwright to be installed
def test_request_via_custom_browser_url(client, live_server):
live_server_setup(live_server)
# We do this so we can grep the logs of the custom container and see if the request actually went through that container
do_test(client, live_server, make_test_use_extra_browser=True)
def test_request_not_via_custom_browser_url(client, live_server):
live_server_setup(live_server)
# We do this so we can grep the logs of the custom container and see if the request actually went through that container
do_test(client, live_server, make_test_use_extra_browser=False)

View File

@@ -227,9 +227,6 @@ def test_regex_error_handling(client, live_server):
follow_redirects=True follow_redirects=True
) )
with open('/tmp/fuck.html', 'wb') as f:
f.write(res.data)
assert b'is not a valid regular expression.' in res.data assert b'is not a valid regular expression.' in res.data
res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True) res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True)

View File

@@ -6,9 +6,11 @@ from .util import live_server_setup, wait_for_all_checks
from ..html_tools import * from ..html_tools import *
def test_setup(live_server): def test_setup(live_server):
live_server_setup(live_server) live_server_setup(live_server)
def set_original_response(): def set_original_response():
test_return_data = """<html> test_return_data = """<html>
<body> <body>
@@ -26,6 +28,7 @@ def set_original_response():
f.write(test_return_data) f.write(test_return_data)
return None return None
def set_modified_response(): def set_modified_response():
test_return_data = """<html> test_return_data = """<html>
<body> <body>
@@ -44,11 +47,12 @@ def set_modified_response():
return None return None
# Handle utf-8 charset replies https://github.com/dgtlmoon/changedetection.io/pull/613 # Handle utf-8 charset replies https://github.com/dgtlmoon/changedetection.io/pull/613
def test_check_xpath_filter_utf8(client, live_server): def test_check_xpath_filter_utf8(client, live_server):
filter='//item/*[self::description]' filter = '//item/*[self::description]'
d='''<?xml version="1.0" encoding="UTF-8"?> d = '''<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:itunes="http://www.itunes.com/dtds/podcast-1.0.dtd" xmlns:dc="http://purl.org/dc/elements/1.1/" version="2.0"> <rss xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:itunes="http://www.itunes.com/dtds/podcast-1.0.dtd" xmlns:dc="http://purl.org/dc/elements/1.1/" version="2.0">
<channel> <channel>
<title>rpilocator.com</title> <title>rpilocator.com</title>
@@ -102,9 +106,9 @@ def test_check_xpath_filter_utf8(client, live_server):
# Handle utf-8 charset replies https://github.com/dgtlmoon/changedetection.io/pull/613 # Handle utf-8 charset replies https://github.com/dgtlmoon/changedetection.io/pull/613
def test_check_xpath_text_function_utf8(client, live_server): def test_check_xpath_text_function_utf8(client, live_server):
filter='//item/title/text()' filter = '//item/title/text()'
d='''<?xml version="1.0" encoding="UTF-8"?> d = '''<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:itunes="http://www.itunes.com/dtds/podcast-1.0.dtd" xmlns:dc="http://purl.org/dc/elements/1.1/" version="2.0"> <rss xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:itunes="http://www.itunes.com/dtds/podcast-1.0.dtd" xmlns:dc="http://purl.org/dc/elements/1.1/" version="2.0">
<channel> <channel>
<title>rpilocator.com</title> <title>rpilocator.com</title>
@@ -163,15 +167,12 @@ def test_check_xpath_text_function_utf8(client, live_server):
res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True) res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True)
assert b'Deleted' in res.data assert b'Deleted' in res.data
def test_check_markup_xpath_filter_restriction(client, live_server):
def test_check_markup_xpath_filter_restriction(client, live_server):
xpath_filter = "//*[contains(@class, 'sametext')]" xpath_filter = "//*[contains(@class, 'sametext')]"
set_original_response() set_original_response()
# Give the endpoint time to spin up
time.sleep(1)
# Add our URL to the import page # Add our URL to the import page
test_url = url_for('test_endpoint', _external=True) test_url = url_for('test_endpoint', _external=True)
res = client.post( res = client.post(
@@ -214,7 +215,6 @@ def test_check_markup_xpath_filter_restriction(client, live_server):
def test_xpath_validation(client, live_server): def test_xpath_validation(client, live_server):
# Add our URL to the import page # Add our URL to the import page
test_url = url_for('test_endpoint', _external=True) test_url = url_for('test_endpoint', _external=True)
res = client.post( res = client.post(
@@ -235,6 +235,48 @@ def test_xpath_validation(client, live_server):
assert b'Deleted' in res.data assert b'Deleted' in res.data
def test_xpath23_prefix_validation(client, live_server):
# Add our URL to the import page
test_url = url_for('test_endpoint', _external=True)
res = client.post(
url_for("import_page"),
data={"urls": test_url},
follow_redirects=True
)
assert b"1 Imported" in res.data
wait_for_all_checks(client)
res = client.post(
url_for("edit_page", uuid="first"),
data={"include_filters": "xpath:/something horrible", "url": test_url, "tags": "", "headers": "", 'fetch_backend': "html_requests"},
follow_redirects=True
)
assert b"is not a valid XPath expression" in res.data
res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True)
assert b'Deleted' in res.data
def test_xpath1_validation(client, live_server):
# Add our URL to the import page
test_url = url_for('test_endpoint', _external=True)
res = client.post(
url_for("import_page"),
data={"urls": test_url},
follow_redirects=True
)
assert b"1 Imported" in res.data
wait_for_all_checks(client)
res = client.post(
url_for("edit_page", uuid="first"),
data={"include_filters": "xpath1:/something horrible", "url": test_url, "tags": "", "headers": "", 'fetch_backend': "html_requests"},
follow_redirects=True
)
assert b"is not a valid XPath expression" in res.data
res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True)
assert b'Deleted' in res.data
# actually only really used by the distll.io importer, but could be handy too # actually only really used by the distll.io importer, but could be handy too
def test_check_with_prefix_include_filters(client, live_server): def test_check_with_prefix_include_filters(client, live_server):
res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True) res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True)
@@ -254,7 +296,8 @@ def test_check_with_prefix_include_filters(client, live_server):
res = client.post( res = client.post(
url_for("edit_page", uuid="first"), url_for("edit_page", uuid="first"),
data={"include_filters": "xpath://*[contains(@class, 'sametext')]", "url": test_url, "tags": "", "headers": "", 'fetch_backend': "html_requests"}, data={"include_filters": "xpath://*[contains(@class, 'sametext')]", "url": test_url, "tags": "", "headers": "",
'fetch_backend': "html_requests"},
follow_redirects=True follow_redirects=True
) )
@@ -266,13 +309,15 @@ def test_check_with_prefix_include_filters(client, live_server):
follow_redirects=True follow_redirects=True
) )
assert b"Some text thats the same" in res.data #in selector assert b"Some text thats the same" in res.data # in selector
assert b"Some text that will change" not in res.data #not in selector assert b"Some text that will change" not in res.data # not in selector
client.get(url_for("form_delete", uuid="all"), follow_redirects=True) client.get(url_for("form_delete", uuid="all"), follow_redirects=True)
def test_various_rules(client, live_server): def test_various_rules(client, live_server):
# Just check these don't error # Just check these don't error
#live_server_setup(live_server) # live_server_setup(live_server)
with open("test-datastore/endpoint-content.txt", "w") as f: with open("test-datastore/endpoint-content.txt", "w") as f:
f.write("""<html> f.write("""<html>
<body> <body>
@@ -285,10 +330,11 @@ def test_various_rules(client, live_server):
<a href=''>some linky </a> <a href=''>some linky </a>
<a href=''>another some linky </a> <a href=''>another some linky </a>
<!-- related to https://github.com/dgtlmoon/changedetection.io/pull/1774 --> <!-- related to https://github.com/dgtlmoon/changedetection.io/pull/1774 -->
<input type="email" id="email" /> <input type="email" id="email" />
</body> </body>
</html> </html>
""") """)
test_url = url_for('test_endpoint', _external=True) test_url = url_for('test_endpoint', _external=True)
res = client.post( res = client.post(
url_for("import_page"), url_for("import_page"),
@@ -298,7 +344,6 @@ def test_various_rules(client, live_server):
assert b"1 Imported" in res.data assert b"1 Imported" in res.data
wait_for_all_checks(client) wait_for_all_checks(client)
for r in ['//div', '//a', 'xpath://div', 'xpath://a']: for r in ['//div', '//a', 'xpath://div', 'xpath://a']:
res = client.post( res = client.post(
url_for("edit_page", uuid="first"), url_for("edit_page", uuid="first"),
@@ -313,3 +358,153 @@ def test_various_rules(client, live_server):
assert b"Updated watch." in res.data assert b"Updated watch." in res.data
res = client.get(url_for("index")) res = client.get(url_for("index"))
assert b'fetch-error' not in res.data, f"Should not see errors after '{r} filter" assert b'fetch-error' not in res.data, f"Should not see errors after '{r} filter"
res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True)
assert b'Deleted' in res.data
def test_xpath_20(client, live_server):
test_url = url_for('test_endpoint', _external=True)
res = client.post(
url_for("import_page"),
data={"urls": test_url},
follow_redirects=True
)
assert b"1 Imported" in res.data
wait_for_all_checks(client)
set_original_response()
test_url = url_for('test_endpoint', _external=True)
res = client.post(
url_for("edit_page", uuid="first"),
data={"include_filters": "//*[contains(@class, 'sametext')]|//*[contains(@class, 'changetext')]",
"url": test_url,
"tags": "",
"headers": "",
'fetch_backend': "html_requests"},
follow_redirects=True
)
assert b"Updated watch." in res.data
wait_for_all_checks(client)
res = client.get(
url_for("preview_page", uuid="first"),
follow_redirects=True
)
assert b"Some text thats the same" in res.data # in selector
assert b"Some text that will change" in res.data # in selector
client.get(url_for("form_delete", uuid="all"), follow_redirects=True)
def test_xpath_20_function_count(client, live_server):
set_original_response()
# Add our URL to the import page
test_url = url_for('test_endpoint', _external=True)
res = client.post(
url_for("import_page"),
data={"urls": test_url},
follow_redirects=True
)
assert b"1 Imported" in res.data
wait_for_all_checks(client)
res = client.post(
url_for("edit_page", uuid="first"),
data={"include_filters": "xpath:count(//div) * 123456789987654321",
"url": test_url,
"tags": "",
"headers": "",
'fetch_backend': "html_requests"},
follow_redirects=True
)
assert b"Updated watch." in res.data
wait_for_all_checks(client)
res = client.get(
url_for("preview_page", uuid="first"),
follow_redirects=True
)
assert b"246913579975308642" in res.data # in selector
client.get(url_for("form_delete", uuid="all"), follow_redirects=True)
def test_xpath_20_function_count2(client, live_server):
set_original_response()
# Add our URL to the import page
test_url = url_for('test_endpoint', _external=True)
res = client.post(
url_for("import_page"),
data={"urls": test_url},
follow_redirects=True
)
assert b"1 Imported" in res.data
wait_for_all_checks(client)
res = client.post(
url_for("edit_page", uuid="first"),
data={"include_filters": "/html/body/count(div) * 123456789987654321",
"url": test_url,
"tags": "",
"headers": "",
'fetch_backend': "html_requests"},
follow_redirects=True
)
assert b"Updated watch." in res.data
wait_for_all_checks(client)
res = client.get(
url_for("preview_page", uuid="first"),
follow_redirects=True
)
assert b"246913579975308642" in res.data # in selector
client.get(url_for("form_delete", uuid="all"), follow_redirects=True)
def test_xpath_20_function_string_join_matches(client, live_server):
set_original_response()
# Add our URL to the import page
test_url = url_for('test_endpoint', _external=True)
res = client.post(
url_for("import_page"),
data={"urls": test_url},
follow_redirects=True
)
assert b"1 Imported" in res.data
wait_for_all_checks(client)
res = client.post(
url_for("edit_page", uuid="first"),
data={
"include_filters": "xpath:string-join(//*[contains(@class, 'sametext')]|//*[matches(@class, 'changetext')], 'specialconjunction')",
"url": test_url,
"tags": "",
"headers": "",
'fetch_backend': "html_requests"},
follow_redirects=True
)
assert b"Updated watch." in res.data
wait_for_all_checks(client)
res = client.get(
url_for("preview_page", uuid="first"),
follow_redirects=True
)
assert b"Some text thats the samespecialconjunctionSome text that will change" in res.data # in selector
client.get(url_for("form_delete", uuid="all"), follow_redirects=True)

View File

@@ -0,0 +1,203 @@
import sys
import os
import pytest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import html_tools
# test generation guide.
# 1. Do not include encoding in the xml declaration if the test object is a str type.
# 2. Always paraphrase test.
hotels = """
<hotel>
<branch location="California">
<staff>
<given_name>Christopher</given_name>
<surname>Anderson</surname>
<age>25</age>
</staff>
<staff>
<given_name>Christopher</given_name>
<surname>Carter</surname>
<age>30</age>
</staff>
</branch>
<branch location="Las Vegas">
<staff>
<given_name>Lisa</given_name>
<surname>Walker</surname>
<age>60</age>
</staff>
<staff>
<given_name>Jessica</given_name>
<surname>Walker</surname>
<age>32</age>
</staff>
<staff>
<given_name>Jennifer</given_name>
<surname>Roberts</surname>
<age>50</age>
</staff>
</branch>
</hotel>"""
@pytest.mark.parametrize("html_content", [hotels])
@pytest.mark.parametrize("xpath, answer", [('(//staff/given_name, //staff/age)', '25'),
("xs:date('2023-10-10')", '2023-10-10'),
("if (/hotel/branch[@location = 'California']/staff[1]/age = 25) then 'is 25' else 'is not 25'", 'is 25'),
("if (//hotel/branch[@location = 'California']/staff[1]/age = 25) then 'is 25' else 'is not 25'", 'is 25'),
("if (count(/hotel/branch/staff) = 5) then true() else false()", 'true'),
("if (count(//hotel/branch/staff) = 5) then true() else false()", 'true'),
("for $i in /hotel/branch/staff return if ($i/age >= 40) then upper-case($i/surname) else lower-case($i/surname)", 'anderson'),
("given_name = 'Christopher' and age = 40", 'false'),
("//given_name = 'Christopher' and //age = 40", 'false'),
#("(staff/given_name, staff/age)", 'Lisa'),
("(//staff/given_name, //staff/age)", 'Lisa'),
#("hotel/branch[@location = 'California']/staff/age union hotel/branch[@location = 'Las Vegas']/staff/age", ''),
("(//hotel/branch[@location = 'California']/staff/age union //hotel/branch[@location = 'Las Vegas']/staff/age)", '60'),
("(200 to 210)", "205"),
("(//hotel/branch[@location = 'California']/staff/age union //hotel/branch[@location = 'Las Vegas']/staff/age)", "50"),
("(1, 9, 9, 5)", "5"),
("(3, (), (14, 15), 92, 653)", "653"),
("for $i in /hotel/branch/staff return $i/given_name", "Christopher"),
("for $i in //hotel/branch/staff return $i/given_name", "Christopher"),
("distinct-values(for $i in /hotel/branch/staff return $i/given_name)", "Jessica"),
("distinct-values(for $i in //hotel/branch/staff return $i/given_name)", "Jessica"),
("for $i in (7 to 15) return $i*10", "130"),
("some $i in /hotel/branch/staff satisfies $i/age < 20", "false"),
("some $i in //hotel/branch/staff satisfies $i/age < 20", "false"),
("every $i in /hotel/branch/staff satisfies $i/age > 20", "true"),
("every $i in //hotel/branch/staff satisfies $i/age > 20 ", "true"),
("let $x := branch[@location = 'California'], $y := branch[@location = 'Las Vegas'] return (avg($x/staff/age), avg($y/staff/age))", "27.5"),
("let $x := //branch[@location = 'California'], $y := //branch[@location = 'Las Vegas'] return (avg($x/staff/age), avg($y/staff/age))", "27.5"),
("let $nu := 1, $de := 1000 return 'probability = ' || $nu div $de * 100 || '%'", "0.1%"),
("let $nu := 2, $probability := function ($argument) { 'probability = ' || $nu div $argument * 100 || '%'}, $de := 5 return $probability($de)", "40%"),
("'XPATH2.0-3.1 dissemination' instance of xs:string ", "true"),
("'new stackoverflow question incoming' instance of xs:integer ", "false"),
("'50000' cast as xs:integer", "50000"),
("//branch[@location = 'California']/staff[1]/surname eq 'Anderson'", "true"),
("fn:false()", "false")])
def test_hotels(html_content, xpath, answer):
html_content = html_tools.xpath_filter(xpath, html_content, append_pretty_line_formatting=True)
assert type(html_content) == str
assert answer in html_content
branches_to_visit = """<?xml version="1.0" ?>
<branches_to_visit>
<manager name="Godot" room_no="501">
<branch>Area 51</branch>
<branch>A place with no name</branch>
<branch>Stalsk12</branch>
</manager>
<manager name="Freya" room_no="305">
<branch>Stalsk12</branch>
<branch>Barcelona</branch>
<branch>Paris</branch>
</manager>
</branches_to_visit>"""
@pytest.mark.parametrize("html_content", [branches_to_visit])
@pytest.mark.parametrize("xpath, answer", [
("manager[@name = 'Godot']/branch union manager[@name = 'Freya']/branch", "Area 51"),
("//manager[@name = 'Godot']/branch union //manager[@name = 'Freya']/branch", "Stalsk12"),
("manager[@name = 'Godot']/branch | manager[@name = 'Freya']/branch", "Stalsk12"),
("//manager[@name = 'Godot']/branch | //manager[@name = 'Freya']/branch", "Stalsk12"),
("manager/branch intersect manager[@name = 'Godot']/branch", "A place with no name"),
("//manager/branch intersect //manager[@name = 'Godot']/branch", "A place with no name"),
("manager[@name = 'Godot']/branch intersect manager[@name = 'Freya']/branch", ""),
("manager/branch except manager[@name = 'Godot']/branch", "Barcelona"),
("manager[@name = 'Godot']/branch[1] eq 'Area 51'", "true"),
("//manager[@name = 'Godot']/branch[1] eq 'Area 51'", "true"),
("manager[@name = 'Godot']/branch[1] eq 'Seoul'", "false"),
("//manager[@name = 'Godot']/branch[1] eq 'Seoul'", "false"),
("manager[@name = 'Godot']/branch[2] eq manager[@name = 'Freya']/branch[2]", "false"),
("//manager[@name = 'Godot']/branch[2] eq //manager[@name = 'Freya']/branch[2]", "false"),
("manager[1]/@room_no lt manager[2]/@room_no", "false"),
("//manager[1]/@room_no lt //manager[2]/@room_no", "false"),
("manager[1]/@room_no gt manager[2]/@room_no", "true"),
("//manager[1]/@room_no gt //manager[2]/@room_no", "true"),
("manager[@name = 'Godot']/branch[1] = 'Area 51'", "true"),
("//manager[@name = 'Godot']/branch[1] = 'Area 51'", "true"),
("manager[@name = 'Godot']/branch[1] = 'Seoul'", "false"),
("//manager[@name = 'Godot']/branch[1] = 'Seoul'", "false"),
("manager[@name = 'Godot']/branch = 'Area 51'", "true"),
("//manager[@name = 'Godot']/branch = 'Area 51'", "true"),
("manager[@name = 'Godot']/branch = 'Barcelona'", "false"),
("//manager[@name = 'Godot']/branch = 'Barcelona'", "false"),
("manager[1]/@room_no > manager[2]/@room_no", "true"),
("//manager[1]/@room_no > //manager[2]/@room_no", "true"),
("manager[@name = 'Godot']/branch[ . = 'Stalsk12'] is manager[1]/branch[1]", "false"),
("//manager[@name = 'Godot']/branch[ . = 'Stalsk12'] is //manager[1]/branch[1]", "false"),
("manager[@name = 'Godot']/branch[ . = 'Stalsk12'] is manager[1]/branch[3]", "true"),
("//manager[@name = 'Godot']/branch[ . = 'Stalsk12'] is //manager[1]/branch[3]", "true"),
("manager[@name = 'Godot']/branch[ . = 'Stalsk12'] << manager[1]/branch[1]", "false"),
("//manager[@name = 'Godot']/branch[ . = 'Stalsk12'] << //manager[1]/branch[1]", "false"),
("manager[@name = 'Godot']/branch[ . = 'Stalsk12'] >> manager[1]/branch[1]", "true"),
("//manager[@name = 'Godot']/branch[ . = 'Stalsk12'] >> //manager[1]/branch[1]", "true"),
("manager[@name = 'Godot']/branch[ . = 'Stalsk12'] is manager[@name = 'Freya']/branch[ . = 'Stalsk12']", "false"),
("//manager[@name = 'Godot']/branch[ . = 'Stalsk12'] is //manager[@name = 'Freya']/branch[ . = 'Stalsk12']", "false"),
("manager[1]/@name || manager[2]/@name", "GodotFreya"),
("//manager[1]/@name || //manager[2]/@name", "GodotFreya"),
])
def test_branches_to_visit(html_content, xpath, answer):
html_content = html_tools.xpath_filter(xpath, html_content, append_pretty_line_formatting=True)
assert type(html_content) == str
assert answer in html_content
trips = """
<trips>
<trip reservation_number="10">
<depart>2023-10-06</depart>
<arrive>2023-10-10</arrive>
<traveler name="Christopher Anderson">
<duration>4</duration>
<price>2000.00</price>
</traveler>
</trip>
<trip reservation_number="12">
<depart>2023-10-06</depart>
<arrive>2023-10-12</arrive>
<traveler name="Frank Carter">
<duration>6</duration>
<price>3500.34</price>
</traveler>
</trip>
</trips>"""
@pytest.mark.parametrize("html_content", [trips])
@pytest.mark.parametrize("xpath, answer", [
("1 + 9 * 9 + 5 div 5", "83"),
("(1 + 9 * 9 + 5) div 6", "14.5"),
("23 idiv 3", "7"),
("23 div 3", "7.66666666"),
("for $i in ./trip return $i/traveler/duration * $i/traveler/price", "21002.04"),
("for $i in ./trip return $i/traveler/duration ", "4"),
("for $i in .//trip return $i/traveler/duration * $i/traveler/price", "21002.04"),
("sum(for $i in ./trip return $i/traveler/duration * $i/traveler/price)", "29002.04"),
("sum(for $i in .//trip return $i/traveler/duration * $i/traveler/price)", "29002.04"),
#("trip[1]/depart - trip[1]/arrive", "fail_to_get_answer"),
#("//trip[1]/depart - //trip[1]/arrive", "fail_to_get_answer"),
#("trip[1]/depart + trip[1]/arrive", "fail_to_get_answer"),
#("xs:date(trip[1]/depart) + xs:date(trip[1]/arrive)", "fail_to_get_answer"),
("(//trip[1]/arrive cast as xs:date) - (//trip[1]/depart cast as xs:date)", "P4D"),
("(//trip[1]/depart cast as xs:date) - (//trip[1]/arrive cast as xs:date)", "-P4D"),
("(//trip[1]/depart cast as xs:date) + xs:dayTimeDuration('P3D')", "2023-10-09"),
("(//trip[1]/depart cast as xs:date) - xs:dayTimeDuration('P3D')", "2023-10-03"),
("(456, 623) instance of xs:integer", "false"),
("(456, 623) instance of xs:integer*", "true"),
("/trips/trip instance of element()", "false"),
("/trips/trip instance of element()*", "true"),
("/trips/trip[1]/arrive instance of xs:date", "false"),
("date(/trips/trip[1]/arrive) instance of xs:date", "true"),
("'8' cast as xs:integer", "8"),
("'11.1E3' cast as xs:double", "11100"),
("6.5 cast as xs:integer", "6"),
#("/trips/trip[1]/arrive cast as xs:dateTime", "fail_to_get_answer"),
("/trips/trip[1]/arrive cast as xs:date", "2023-10-10"),
("('2023-10-12') cast as xs:date", "2023-10-12"),
("for $i in //trip return concat($i/depart, ' ', $i/arrive)", "2023-10-06 2023-10-10"),
])
def test_trips(html_content, xpath, answer):
html_content = html_tools.xpath_filter(xpath, html_content, append_pretty_line_formatting=True)
assert type(html_content) == str
assert answer in html_content

View File

@@ -46,6 +46,9 @@ beautifulsoup4
# XPath filtering, lxml is required by bs4 anyway, but put it here to be safe. # XPath filtering, lxml is required by bs4 anyway, but put it here to be safe.
lxml lxml
# XPath 2.0-3.1 support
elementpath
selenium~=4.14.0 selenium~=4.14.0
werkzeug~=3.0 werkzeug~=3.0