mirror of
				https://github.com/dgtlmoon/changedetection.io.git
				synced 2025-10-31 06:37:41 +00:00 
			
		
		
		
	Compare commits
	
		
			8 Commits
		
	
	
		
			browserles
			...
			plugin-2nd
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
|   | 70842193b0 | ||
|   | 0285d00f13 | ||
|   | f7f98945a2 | ||
|   | 5e2049c538 | ||
|   | 26931e0167 | ||
|   | 5229094e44 | ||
|   | 5a306aa78c | ||
|   | c8dcc072c8 | 
							
								
								
									
										12
									
								
								.github/workflows/test-only.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										12
									
								
								.github/workflows/test-only.yml
									
									
									
									
										vendored
									
									
								
							| @@ -30,7 +30,10 @@ jobs: | ||||
|  | ||||
|           # Selenium+browserless | ||||
|           docker run --network changedet-network -d --hostname selenium  -p 4444:4444 --rm --shm-size="2g"  selenium/standalone-chrome:4 | ||||
|           docker run --network changedet-network -d --hostname browserless -e "FUNCTION_BUILT_INS=[\"fs\",\"crypto\"]" -e "DEFAULT_LAUNCH_ARGS=[\"--window-size=1920,1080\"]" --rm  -p 3000:3000  --shm-size="2g"  browserless/chrome:1.60-chrome-stable | ||||
|           docker run --network changedet-network -d --name browserless --hostname browserless -e "FUNCTION_BUILT_INS=[\"fs\",\"crypto\"]" -e "DEFAULT_LAUNCH_ARGS=[\"--window-size=1920,1080\"]" --rm  -p 3000:3000  --shm-size="2g"  browserless/chrome:1.60-chrome-stable | ||||
|            | ||||
|           # For accessing custom browser tests | ||||
|           docker run --network changedet-network -d --name browserless-custom-url --hostname browserless-custom-url -e "FUNCTION_BUILT_INS=[\"fs\",\"crypto\"]" -e "DEFAULT_LAUNCH_ARGS=[\"--window-size=1920,1080\"]" --rm --shm-size="2g"  browserless/chrome:1.60-chrome-stable | ||||
|  | ||||
|       - name: Build changedetection.io container for testing | ||||
|         run: |          | ||||
| @@ -48,6 +51,7 @@ jobs: | ||||
|         run: | | ||||
|           # Unit tests | ||||
|           docker run test-changedetectionio  bash -c 'python3 -m unittest changedetectionio.tests.unit.test_notification_diff' | ||||
|           docker run test-changedetectionio  bash -c 'python3 -m unittest changedetectionio.tests.unit.test_watch_model' | ||||
|            | ||||
|           # All tests | ||||
|           docker run --network changedet-network  test-changedetectionio  bash -c 'cd changedetectionio && ./run_basic_tests.sh' | ||||
| @@ -86,6 +90,12 @@ jobs: | ||||
|           # And again with PLAYWRIGHT_DRIVER_URL=.. | ||||
|           cd .. | ||||
|  | ||||
|       - name: Test custom browser URL | ||||
|         run: | | ||||
|           cd changedetectionio | ||||
|           ./run_custom_browser_url_tests.sh | ||||
|           cd .. | ||||
|  | ||||
|       - name: Test changedetection.io container starts+runs basically without error | ||||
|         run: | | ||||
|           docker run -p 5556:5000 -d test-changedetectionio | ||||
|   | ||||
| @@ -268,3 +268,7 @@ I offer commercial support, this software is depended on by network security, ae | ||||
| [license-shield]: https://img.shields.io/github/license/dgtlmoon/changedetection.io.svg?style=for-the-badge | ||||
| [release-link]: https://github.com/dgtlmoon/changedetection.io/releases | ||||
| [docker-link]: https://hub.docker.com/r/dgtlmoon/changedetection.io | ||||
|  | ||||
| ## Third-party licenses | ||||
|  | ||||
| changedetectionio.html_tools.elementpath_tostring: Copyright (c), 2018-2021, SISSA (Scuola Internazionale Superiore di Studi Avanzati), Licensed under [MIT license](https://github.com/sissaschool/elementpath/blob/master/LICENSE) | ||||
|   | ||||
| @@ -16,6 +16,7 @@ import logging | ||||
| import os | ||||
| import pytz | ||||
| import queue | ||||
| import sys | ||||
| import threading | ||||
| import time | ||||
| import timeago | ||||
| @@ -80,6 +81,9 @@ csrf = CSRFProtect() | ||||
| csrf.init_app(app) | ||||
| notification_debug_log=[] | ||||
|  | ||||
| from pathlib import Path | ||||
| sys.path.append(os.path.join(Path.home(), 'changedetectionio-plugins')) | ||||
|  | ||||
| watch_api = Api(app, decorators=[csrf.exempt]) | ||||
|  | ||||
| def init_app_secret(datastore_path): | ||||
| @@ -614,6 +618,8 @@ def changedetection_app(config=None, datastore_o=None): | ||||
|         # For the form widget tag uuid lookup | ||||
|         form.tags.datastore = datastore # in _value | ||||
|  | ||||
|         for p in datastore.extra_browsers: | ||||
|             form.fetch_backend.choices.append(p) | ||||
|  | ||||
|         form.fetch_backend.choices.append(("system", 'System settings default')) | ||||
|  | ||||
| @@ -714,7 +720,7 @@ def changedetection_app(config=None, datastore_o=None): | ||||
|             system_uses_webdriver = datastore.data['settings']['application']['fetch_backend'] == 'html_webdriver' | ||||
|  | ||||
|             is_html_webdriver = False | ||||
|             if (watch.get('fetch_backend') == 'system' and system_uses_webdriver) or watch.get('fetch_backend') == 'html_webdriver': | ||||
|             if (watch.get('fetch_backend') == 'system' and system_uses_webdriver) or watch.get('fetch_backend') == 'html_webdriver' or watch.get('fetch_backend', '').startswith('extra_browser_'): | ||||
|                 is_html_webdriver = True | ||||
|  | ||||
|             # Only works reliably with Playwright | ||||
| @@ -819,6 +825,16 @@ def changedetection_app(config=None, datastore_o=None): | ||||
|  | ||||
|         return output | ||||
|  | ||||
|     @app.route("/settings/reset-api-key", methods=['GET']) | ||||
|     @login_optionally_required | ||||
|     def settings_reset_api_key(): | ||||
|         import secrets | ||||
|         secret = secrets.token_hex(16) | ||||
|         datastore.data['settings']['application']['api_access_token'] = secret | ||||
|         datastore.needs_write_urgent = True | ||||
|         flash("API Key was regenerated.") | ||||
|         return redirect(url_for('settings_page')+'#api') | ||||
|  | ||||
|     @app.route("/import", methods=['GET', "POST"]) | ||||
|     @login_optionally_required | ||||
|     def import_page(): | ||||
| @@ -949,7 +965,7 @@ def changedetection_app(config=None, datastore_o=None): | ||||
|         # Read as binary and force decode as UTF-8 | ||||
|         # Windows may fail decode in python if we just use 'r' mode (chardet decode exception) | ||||
|         from_version = request.args.get('from_version') | ||||
|         from_version_index = -2 # second newest | ||||
|         from_version_index = -2  # second newest | ||||
|         if from_version and from_version in dates: | ||||
|             from_version_index = dates.index(from_version) | ||||
|         else: | ||||
| @@ -958,7 +974,7 @@ def changedetection_app(config=None, datastore_o=None): | ||||
|         try: | ||||
|             from_version_file_contents = watch.get_history_snapshot(dates[from_version_index]) | ||||
|         except Exception as e: | ||||
|             from_version_file_contents = "Unable to read to-version at index{}.\n".format(dates[from_version_index]) | ||||
|             from_version_file_contents = f"Unable to read to-version at index {dates[from_version_index]}.\n" | ||||
|  | ||||
|         to_version = request.args.get('to_version') | ||||
|         to_version_index = -1 | ||||
| @@ -977,7 +993,7 @@ def changedetection_app(config=None, datastore_o=None): | ||||
|         system_uses_webdriver = datastore.data['settings']['application']['fetch_backend'] == 'html_webdriver' | ||||
|  | ||||
|         is_html_webdriver = False | ||||
|         if (watch.get('fetch_backend') == 'system' and system_uses_webdriver) or watch.get('fetch_backend') == 'html_webdriver': | ||||
|         if (watch.get('fetch_backend') == 'system' and system_uses_webdriver) or watch.get('fetch_backend') == 'html_webdriver' or watch.get('fetch_backend', '').startswith('extra_browser_'): | ||||
|             is_html_webdriver = True | ||||
|  | ||||
|         password_enabled_and_share_is_off = False | ||||
| @@ -1031,7 +1047,7 @@ def changedetection_app(config=None, datastore_o=None): | ||||
|  | ||||
|  | ||||
|         is_html_webdriver = False | ||||
|         if (watch.get('fetch_backend') == 'system' and system_uses_webdriver) or watch.get('fetch_backend') == 'html_webdriver': | ||||
|         if (watch.get('fetch_backend') == 'system' and system_uses_webdriver) or watch.get('fetch_backend') == 'html_webdriver' or watch.get('fetch_backend', '').startswith('extra_browser_'): | ||||
|             is_html_webdriver = True | ||||
|  | ||||
|         # Never requested successfully, but we detected a fetch error | ||||
|   | ||||
| @@ -76,7 +76,7 @@ class Watch(Resource): | ||||
|         # Properties are not returned as a JSON, so add the required props manually | ||||
|         watch['history_n'] = watch.history_n | ||||
|         watch['last_changed'] = watch.last_changed | ||||
|  | ||||
|         watch['viewed'] = watch.viewed | ||||
|         return watch | ||||
|  | ||||
|     @auth.check_token | ||||
|   | ||||
| @@ -97,7 +97,7 @@ def construct_blueprint(datastore: ChangeDetectionStore): | ||||
|             proxy=proxy) | ||||
|  | ||||
|         # For test | ||||
|         #browsersteps_start_session['browserstepper'].action_goto_url(value="http://example.com?time="+str(time.time())) | ||||
|         #browsersteps_start_session['browserstepper'].action_goto_url(value="http://exbaseample.com?time="+str(time.time())) | ||||
|  | ||||
|         return browsersteps_start_session | ||||
|  | ||||
|   | ||||
| @@ -40,8 +40,8 @@ def construct_blueprint(datastore: ChangeDetectionStore): | ||||
|         contents = '' | ||||
|         now = time.time() | ||||
|         try: | ||||
|             update_handler = text_json_diff.perform_site_check(datastore=datastore) | ||||
|             changed_detected, update_obj, contents = update_handler.run(uuid, preferred_proxy=preferred_proxy, skip_when_checksum_same=False) | ||||
|             update_handler = text_json_diff.perform_site_check(datastore=datastore, watch_uuid=uuid) | ||||
|             update_handler.fetch_content() | ||||
|         # title, size is len contents not len xfer | ||||
|         except content_fetcher.Non200ErrorCodeReceived as e: | ||||
|             if e.status_code == 404: | ||||
|   | ||||
| @@ -69,11 +69,12 @@ xpath://body/div/span[contains(@class, 'example-class')]", | ||||
|                                 {% endif %} | ||||
|                             </ul> | ||||
|                         </li> | ||||
|                         <li>XPath - Limit text to this XPath rule, simply start with a forward-slash, | ||||
|                         <li>XPath - Limit text to this XPath rule, simply start with a forward-slash. To specify XPath to be used explicitly or the XPath rule starts with an XPath function: Prefix with <code>xpath:</code> | ||||
|                             <ul> | ||||
|                                 <li>Example:  <code>//*[contains(@class, 'sametext')]</code> or <code>xpath://*[contains(@class, 'sametext')]</code>, <a | ||||
|                                 <li>Example:  <code>//*[contains(@class, 'sametext')]</code> or <code>xpath:count(//*[contains(@class, 'sametext')])</code>, <a | ||||
|                                 href="http://xpather.com/" target="new">test your XPath here</a></li> | ||||
|                                 <li>Example: Get all titles from an RSS feed <code>//title/text()</code></li> | ||||
|                                 <li>To use XPath1.0: Prefix with <code>xpath1:</code></li> | ||||
|                             </ul> | ||||
|                             </li> | ||||
|                     </ul> | ||||
|   | ||||
| @@ -96,6 +96,7 @@ class Fetcher(): | ||||
|     content = None | ||||
|     error = None | ||||
|     fetcher_description = "No description" | ||||
|     browser_connection_url = None | ||||
|     headers = {} | ||||
|     status_code = None | ||||
|     webdriver_js_execute_code = None | ||||
| @@ -251,14 +252,16 @@ class base_html_playwright(Fetcher): | ||||
|  | ||||
|     proxy = None | ||||
|  | ||||
|     def __init__(self, proxy_override=None): | ||||
|     def __init__(self, proxy_override=None, browser_connection_url=None): | ||||
|         super().__init__() | ||||
|         # .strip('"') is going to save someone a lot of time when they accidently wrap the env value | ||||
|  | ||||
|         self.browser_type = os.getenv("PLAYWRIGHT_BROWSER_TYPE", 'chromium').strip('"') | ||||
|         self.command_executor = os.getenv( | ||||
|             "PLAYWRIGHT_DRIVER_URL", | ||||
|             'ws://playwright-chrome:3000' | ||||
|         ).strip('"') | ||||
|  | ||||
|         # .strip('"') is going to save someone a lot of time when they accidently wrap the env value | ||||
|         if not browser_connection_url: | ||||
|             self.browser_connection_url = os.getenv("PLAYWRIGHT_DRIVER_URL", 'ws://playwright-chrome:3000').strip('"') | ||||
|         else: | ||||
|             self.browser_connection_url = browser_connection_url | ||||
|  | ||||
|         # If any proxy settings are enabled, then we should setup the proxy object | ||||
|         proxy_args = {} | ||||
| @@ -419,11 +422,7 @@ class base_html_playwright(Fetcher): | ||||
|             is_binary=False): | ||||
|  | ||||
|         # For now, USE_EXPERIMENTAL_PUPPETEER_FETCH is not supported by watches with BrowserSteps (for now!) | ||||
|         has_browser_steps = self.browser_steps and list(filter( | ||||
|                 lambda s: (s['operation'] and len(s['operation']) and s['operation'] != 'Choose one' and s['operation'] != 'Goto site'), | ||||
|                 self.browser_steps)) | ||||
|  | ||||
|         if not has_browser_steps and os.getenv('USE_EXPERIMENTAL_PUPPETEER_FETCH'): | ||||
|         if not self.browser_steps and os.getenv('USE_EXPERIMENTAL_PUPPETEER_FETCH'): | ||||
|             if strtobool(os.getenv('USE_EXPERIMENTAL_PUPPETEER_FETCH')): | ||||
|                 # Temporary backup solution until we rewrite the playwright code | ||||
|                 return self.run_fetch_browserless_puppeteer( | ||||
| @@ -448,7 +447,7 @@ class base_html_playwright(Fetcher): | ||||
|             # Seemed to cause a connection Exception even tho I can see it connect | ||||
|             # self.browser = browser_type.connect(self.command_executor, timeout=timeout*1000) | ||||
|             # 60,000 connection timeout only | ||||
|             browser = browser_type.connect_over_cdp(self.command_executor, timeout=60000) | ||||
|             browser = browser_type.connect_over_cdp(self.browser_connection_url, timeout=60000) | ||||
|  | ||||
|             # SOCKS5 with authentication is not supported (yet) | ||||
|             # https://github.com/microsoft/playwright/issues/10567 | ||||
| @@ -508,7 +507,11 @@ class base_html_playwright(Fetcher): | ||||
|             self.status_code = response.status | ||||
|  | ||||
|             if self.status_code != 200 and not ignore_status_codes: | ||||
|                 raise Non200ErrorCodeReceived(url=url, status_code=self.status_code) | ||||
|  | ||||
|                 screenshot=self.page.screenshot(type='jpeg', full_page=True, | ||||
|                                      quality=int(os.getenv("PLAYWRIGHT_SCREENSHOT_QUALITY", 72))) | ||||
|  | ||||
|                 raise Non200ErrorCodeReceived(url=url, status_code=self.status_code, screenshot=screenshot) | ||||
|  | ||||
|             if len(self.page.content().strip()) == 0: | ||||
|                 context.close() | ||||
| @@ -559,8 +562,6 @@ class base_html_webdriver(Fetcher): | ||||
|     else: | ||||
|         fetcher_description = "WebDriver Chrome/Javascript" | ||||
|  | ||||
|     command_executor = '' | ||||
|  | ||||
|     # Configs for Proxy setup | ||||
|     # In the ENV vars, is prefixed with "webdriver_", so it is for example "webdriver_sslProxy" | ||||
|     selenium_proxy_settings_mappings = ['proxyType', 'ftpProxy', 'httpProxy', 'noProxy', | ||||
| @@ -568,12 +569,15 @@ class base_html_webdriver(Fetcher): | ||||
|                                         'socksProxy', 'socksVersion', 'socksUsername', 'socksPassword'] | ||||
|     proxy = None | ||||
|  | ||||
|     def __init__(self, proxy_override=None): | ||||
|     def __init__(self, proxy_override=None, browser_connection_url=None): | ||||
|         super().__init__() | ||||
|         from selenium.webdriver.common.proxy import Proxy as SeleniumProxy | ||||
|  | ||||
|         # .strip('"') is going to save someone a lot of time when they accidently wrap the env value | ||||
|         self.command_executor = os.getenv("WEBDRIVER_URL", 'http://browser-chrome:4444/wd/hub').strip('"') | ||||
|         if not browser_connection_url: | ||||
|             self.browser_connection_url = os.getenv("WEBDRIVER_URL", 'http://browser-chrome:4444/wd/hub').strip('"') | ||||
|         else: | ||||
|             self.browser_connection_url = browser_connection_url | ||||
|  | ||||
|         # If any proxy settings are enabled, then we should setup the proxy object | ||||
|         proxy_args = {} | ||||
| @@ -615,7 +619,7 @@ class base_html_webdriver(Fetcher): | ||||
|             options.proxy = self.proxy | ||||
|  | ||||
|         self.driver = webdriver.Remote( | ||||
|             command_executor=self.command_executor, | ||||
|             command_executor=self.browser_connection_url, | ||||
|             options=options) | ||||
|  | ||||
|         try: | ||||
| @@ -670,8 +674,10 @@ class base_html_webdriver(Fetcher): | ||||
| class html_requests(Fetcher): | ||||
|     fetcher_description = "Basic fast Plaintext/HTTP Client" | ||||
|  | ||||
|     def __init__(self, proxy_override=None): | ||||
|     def __init__(self, proxy_override=None, browser_connection_url=None): | ||||
|         super().__init__() | ||||
|         self.proxy_override = proxy_override | ||||
|         # browser_connection_url is none because its always 'launched locally' | ||||
|  | ||||
|     def run(self, | ||||
|             url, | ||||
|   | ||||
| @@ -168,7 +168,9 @@ class ValidateContentFetcherIsReady(object): | ||||
|     def __call__(self, form, field): | ||||
|         import urllib3.exceptions | ||||
|         from changedetectionio import content_fetcher | ||||
|         return | ||||
|  | ||||
| # AttributeError: module 'changedetectionio.content_fetcher' has no attribute 'extra_browser_unlocked<>ASDF213r123r' | ||||
|         # Better would be a radiohandler that keeps a reference to each class | ||||
|         if field.data is not None and field.data != 'system': | ||||
|             klass = getattr(content_fetcher, field.data) | ||||
| @@ -326,11 +328,30 @@ class ValidateCSSJSONXPATHInput(object): | ||||
|                 return | ||||
|  | ||||
|             # Does it look like XPath? | ||||
|             if line.strip()[0] == '/': | ||||
|             if line.strip()[0] == '/' or line.strip().startswith('xpath:'): | ||||
|                 if not self.allow_xpath: | ||||
|                     raise ValidationError("XPath not permitted in this field!") | ||||
|                 from lxml import etree, html | ||||
|                 import elementpath | ||||
|                 # xpath 2.0-3.1 | ||||
|                 from elementpath.xpath3 import XPath3Parser | ||||
|                 tree = html.fromstring("<html></html>") | ||||
|                 line = line.replace('xpath:', '') | ||||
|  | ||||
|                 try: | ||||
|                     elementpath.select(tree, line.strip(), parser=XPath3Parser) | ||||
|                 except elementpath.ElementPathError as e: | ||||
|                     message = field.gettext('\'%s\' is not a valid XPath expression. (%s)') | ||||
|                     raise ValidationError(message % (line, str(e))) | ||||
|                 except: | ||||
|                     raise ValidationError("A system-error occurred when validating your XPath expression") | ||||
|  | ||||
|             if line.strip().startswith('xpath1:'): | ||||
|                 if not self.allow_xpath: | ||||
|                     raise ValidationError("XPath not permitted in this field!") | ||||
|                 from lxml import etree, html | ||||
|                 tree = html.fromstring("<html></html>") | ||||
|                 line = re.sub(r'^xpath1:', '', line) | ||||
|  | ||||
|                 try: | ||||
|                     tree.xpath(line.strip()) | ||||
| @@ -496,6 +517,12 @@ class SingleExtraProxy(Form): | ||||
|     proxy_url = StringField('Proxy URL', [validators.Optional()], render_kw={"placeholder": "socks5:// or regular proxy http://user:pass@...:3128", "size":50}) | ||||
|     # @todo do the validation here instead | ||||
|  | ||||
| class SingleExtraBrowser(Form): | ||||
|     browser_name = StringField('Name', [validators.Optional()], render_kw={"placeholder": "Name"}) | ||||
|     browser_connection_url = StringField('Browser connection URL', [validators.Optional()], render_kw={"placeholder": "wss://brightdata... wss://oxylabs etc", "size":50}) | ||||
|     # @todo do the validation here instead | ||||
|  | ||||
|  | ||||
| # datastore.data['settings']['requests'].. | ||||
| class globalSettingsRequestForm(Form): | ||||
|     time_between_check = FormField(TimeBetweenCheckForm) | ||||
| @@ -504,6 +531,7 @@ class globalSettingsRequestForm(Form): | ||||
|                                   render_kw={"style": "width: 5em;"}, | ||||
|                                   validators=[validators.NumberRange(min=0, message="Should contain zero or more seconds")]) | ||||
|     extra_proxies = FieldList(FormField(SingleExtraProxy), min_entries=5) | ||||
|     extra_browsers = FieldList(FormField(SingleExtraBrowser), min_entries=5) | ||||
|  | ||||
|     def validate_extra_proxies(self, extra_validators=None): | ||||
|         for e in self.data['extra_proxies']: | ||||
|   | ||||
| @@ -69,10 +69,89 @@ def element_removal(selectors: List[str], html_content): | ||||
|     selector = ",".join(selectors) | ||||
|     return subtractive_css_selector(selector, html_content) | ||||
|  | ||||
| def elementpath_tostring(obj): | ||||
|     """ | ||||
|     change elementpath.select results to string type | ||||
|     # The MIT License (MIT), Copyright (c), 2018-2021, SISSA (Scuola Internazionale Superiore di Studi Avanzati) | ||||
|     # https://github.com/sissaschool/elementpath/blob/dfcc2fd3d6011b16e02bf30459a7924f547b47d0/elementpath/xpath_tokens.py#L1038 | ||||
|     """ | ||||
|  | ||||
|     import elementpath | ||||
|     from decimal import Decimal | ||||
|     import math | ||||
|  | ||||
|     if obj is None: | ||||
|         return '' | ||||
|     # https://elementpath.readthedocs.io/en/latest/xpath_api.html#elementpath.select | ||||
|     elif isinstance(obj, elementpath.XPathNode): | ||||
|         return obj.string_value | ||||
|     elif isinstance(obj, bool): | ||||
|         return 'true' if obj else 'false' | ||||
|     elif isinstance(obj, Decimal): | ||||
|         value = format(obj, 'f') | ||||
|         if '.' in value: | ||||
|             return value.rstrip('0').rstrip('.') | ||||
|         return value | ||||
|  | ||||
|     elif isinstance(obj, float): | ||||
|         if math.isnan(obj): | ||||
|             return 'NaN' | ||||
|         elif math.isinf(obj): | ||||
|             return str(obj).upper() | ||||
|  | ||||
|         value = str(obj) | ||||
|         if '.' in value: | ||||
|             value = value.rstrip('0').rstrip('.') | ||||
|         if '+' in value: | ||||
|             value = value.replace('+', '') | ||||
|         if 'e' in value: | ||||
|             return value.upper() | ||||
|         return value | ||||
|  | ||||
|     return str(obj) | ||||
|  | ||||
| # Return str Utf-8 of matched rules | ||||
| def xpath_filter(xpath_filter, html_content, append_pretty_line_formatting=False, is_rss=False): | ||||
|     from lxml import etree, html | ||||
|     import elementpath | ||||
|     # xpath 2.0-3.1 | ||||
|     from elementpath.xpath3 import XPath3Parser | ||||
|  | ||||
|     parser = etree.HTMLParser() | ||||
|     if is_rss: | ||||
|         # So that we can keep CDATA for cdata_in_document_to_text() to process | ||||
|         parser = etree.XMLParser(strip_cdata=False) | ||||
|  | ||||
|     tree = html.fromstring(bytes(html_content, encoding='utf-8'), parser=parser) | ||||
|     html_block = "" | ||||
|  | ||||
|     r = elementpath.select(tree, xpath_filter.strip(), namespaces={'re': 'http://exslt.org/regular-expressions'}, parser=XPath3Parser) | ||||
|     #@note: //title/text() wont work where <title>CDATA.. | ||||
|  | ||||
|     if type(r) != list: | ||||
|         r = [r] | ||||
|  | ||||
|     for element in r: | ||||
|         # When there's more than 1 match, then add the suffix to separate each line | ||||
|         # And where the matched result doesn't include something that will cause Inscriptis to add a newline | ||||
|         # (This way each 'match' reliably has a new-line in the diff) | ||||
|         # Divs are converted to 4 whitespaces by inscriptis | ||||
|         if append_pretty_line_formatting and len(html_block) and (not hasattr( element, 'tag' ) or not element.tag in (['br', 'hr', 'div', 'p'])): | ||||
|             html_block += TEXT_FILTER_LIST_LINE_SUFFIX | ||||
|  | ||||
|         if type(element) == str: | ||||
|             html_block += element | ||||
|         elif issubclass(type(element), etree._Element) or issubclass(type(element), etree._ElementTree): | ||||
|             html_block += etree.tostring(element, pretty_print=True).decode('utf-8') | ||||
|         else: | ||||
|             html_block += elementpath_tostring(element) | ||||
|  | ||||
|     return html_block | ||||
|  | ||||
| # Return str Utf-8 of matched rules | ||||
| # 'xpath1:' | ||||
| def xpath1_filter(xpath_filter, html_content, append_pretty_line_formatting=False, is_rss=False): | ||||
|     from lxml import etree, html | ||||
|  | ||||
|     parser = None | ||||
|     if is_rss: | ||||
|   | ||||
| @@ -16,6 +16,7 @@ class model(dict): | ||||
|                 }, | ||||
|                 'requests': { | ||||
|                     'extra_proxies': [], # Configurable extra proxies via the UI | ||||
|                     'extra_browsers': [],  # Configurable extra proxies via the UI | ||||
|                     'jitter_seconds': 0, | ||||
|                     'proxy': None, # Preferred proxy connection | ||||
|                     'time_between_check': {'weeks': None, 'days': None, 'hours': 3, 'minutes': None, 'seconds': None}, | ||||
|   | ||||
| @@ -19,6 +19,7 @@ from changedetectionio.notification import ( | ||||
|  | ||||
| base_config = { | ||||
|     'body': None, | ||||
|     'browser_steps': [], | ||||
|     'browser_steps_last_error_step': None, | ||||
|     'check_unique_lines': False,  # On change-detected, compare against all history if its something new | ||||
|     'check_count': 0, | ||||
| @@ -145,8 +146,14 @@ class model(dict): | ||||
|                 flash(message, 'error') | ||||
|                 return '' | ||||
|  | ||||
|         if ready_url.startswith('source:'): | ||||
|             ready_url=ready_url.replace('source:', '') | ||||
|         return ready_url | ||||
|  | ||||
|     @property | ||||
|     def is_source_type_url(self): | ||||
|         return self.get('url', '').startswith('source:') | ||||
|  | ||||
|     @property | ||||
|     def get_fetch_backend(self): | ||||
|         """ | ||||
| @@ -234,6 +241,14 @@ class model(dict): | ||||
|         fname = os.path.join(self.watch_data_dir, "history.txt") | ||||
|         return os.path.isfile(fname) | ||||
|  | ||||
|     @property | ||||
|     def has_browser_steps(self): | ||||
|         has_browser_steps = self.get('browser_steps') and list(filter( | ||||
|                 lambda s: (s['operation'] and len(s['operation']) and s['operation'] != 'Choose one' and s['operation'] != 'Goto site'), | ||||
|                 self.get('browser_steps'))) | ||||
|  | ||||
|         return  has_browser_steps | ||||
|  | ||||
|     # Returns the newest key, but if theres only 1 record, then it's counted as not being new, so return 0. | ||||
|     @property | ||||
|     def newest_history_key(self): | ||||
| @@ -247,6 +262,38 @@ class model(dict): | ||||
|         bump = self.history | ||||
|         return self.__newest_history_key | ||||
|  | ||||
|     # Given an arbitrary timestamp, find the closest next key | ||||
|     # For example, last_viewed = 1000 so it should return the next 1001 timestamp | ||||
|     # | ||||
|     # used for the [diff] button so it can preset a smarter from_version | ||||
|     @property | ||||
|     def get_next_snapshot_key_to_last_viewed(self): | ||||
|  | ||||
|         """Unfortunately for now timestamp is stored as string key""" | ||||
|         keys = list(self.history.keys()) | ||||
|         if not keys: | ||||
|             return None | ||||
|  | ||||
|         last_viewed = int(self.get('last_viewed')) | ||||
|         prev_k = keys[0] | ||||
|         sorted_keys = sorted(keys, key=lambda x: int(x)) | ||||
|         sorted_keys.reverse() | ||||
|  | ||||
|         # When the 'last viewed' timestamp is greater than the newest snapshot, return second last | ||||
|         if last_viewed > int(sorted_keys[0]): | ||||
|             return sorted_keys[1] | ||||
|  | ||||
|         for k in sorted_keys: | ||||
|             if int(k) < last_viewed: | ||||
|                 if prev_k == sorted_keys[0]: | ||||
|                     # Return the second last one so we dont recommend the same version compares itself | ||||
|                     return sorted_keys[1] | ||||
|  | ||||
|                 return prev_k | ||||
|             prev_k = k | ||||
|  | ||||
|         return keys[0] | ||||
|  | ||||
|     def get_history_snapshot(self, timestamp): | ||||
|         import brotli | ||||
|         filepath = self.history[timestamp] | ||||
|   | ||||
| @@ -1,15 +1,21 @@ | ||||
| from abc import abstractmethod | ||||
| import os | ||||
| import hashlib | ||||
| import re | ||||
| from changedetectionio import content_fetcher | ||||
| from copy import deepcopy | ||||
|  | ||||
| class difference_detection_processor_interface(): | ||||
|     browser_steps = None | ||||
|     datastore = None | ||||
|     fetcher = None | ||||
|     screenshot = None | ||||
|     watch = None | ||||
|     xpath_data = None | ||||
|  | ||||
| class difference_detection_processor(): | ||||
|  | ||||
|  | ||||
|     def __init__(self, *args, **kwargs): | ||||
|         super().__init__(*args, **kwargs) | ||||
|  | ||||
|     @abstractmethod | ||||
|     def run(self, uuid, skip_when_checksum_same=True, preferred_proxy=None): | ||||
|     def run_changedetection(self, uuid, skip_when_checksum_same=True): | ||||
|         update_obj = {'last_notification_error': False, 'last_error': False} | ||||
|         some_data = 'xxxxx' | ||||
|         update_obj["previous_md5"] = hashlib.md5(some_data.encode('utf-8')).hexdigest() | ||||
| @@ -17,6 +23,95 @@ class difference_detection_processor(): | ||||
|         return changed_detected, update_obj, ''.encode('utf-8') | ||||
|  | ||||
|  | ||||
| class text_content_difference_detection_processor(difference_detection_processor_interface): | ||||
|  | ||||
|     def __init__(self, *args, datastore, watch_uuid, prefer_fetch_backend, **kwargs): | ||||
|         self.datastore = datastore | ||||
|         self.watch = deepcopy(self.datastore.data['watching'].get(watch_uuid)) | ||||
|         self.prefer_fetch_backend = prefer_fetch_backend | ||||
|         super().__init__(*args, **kwargs) | ||||
|  | ||||
|         ######################################## | ||||
|         # Attach the correct fetcher and proxy # | ||||
|         ######################################## | ||||
|         # Grab the right kind of 'fetcher', (playwright, requests, etc) | ||||
|         if hasattr(content_fetcher, self.prefer_fetch_backend): | ||||
|             fetcher_obj = getattr(content_fetcher, self.prefer_fetch_backend) | ||||
|         else: | ||||
|             # If the klass doesnt exist, just use a default | ||||
|             fetcher_obj = getattr(content_fetcher, "html_requests") | ||||
|  | ||||
|         # Proxy ID "key" | ||||
|         preferred_proxy_id = self.datastore.get_preferred_proxy_for_watch(uuid=self.watch.get('uuid')) | ||||
|         proxy_url = None | ||||
|         if preferred_proxy_id: | ||||
|             proxy_url = self.datastore.proxy_list.get(preferred_proxy_id).get('url') | ||||
|             print(f"Using proxy Key: {preferred_proxy_id} as Proxy URL {proxy_url}") | ||||
|  | ||||
|         # Now call the fetcher (playwright/requests/etc) with arguments that only a fetcher would need. | ||||
|         # When browser_connection_url is None, it method should default to working out whats the best defaults (os env vars etc) | ||||
|         self.fetcher = fetcher_obj(proxy_override=proxy_url, | ||||
|                                    browser_connection_url=None # Default, let each fetcher work it out | ||||
|                                    ) | ||||
|  | ||||
|     def fetch_content(self): | ||||
|  | ||||
|         url = self.watch.link | ||||
|  | ||||
|         # In the case that the preferred fetcher was a browser config with custom connection URL.. | ||||
|         # @todo - on save watch, if its extra_browser_ then it should be obvious it will use playwright (like if its requests now..) | ||||
|         if self.prefer_fetch_backend.startswith('extra_browser_'): | ||||
|             (t, key) = self.prefer_fetch_backend.split('extra_browser_') | ||||
|             connection = list( | ||||
|                 filter(lambda s: (s['browser_name'] == key), self.datastore.data['settings']['requests'].get('extra_browsers', []))) | ||||
|             if connection: | ||||
|                 prefer_fetch_backend = 'base_html_playwright' | ||||
|                 browser_connection_url = connection[0].get('browser_connection_url') | ||||
|  | ||||
|         if self.watch.has_browser_steps: | ||||
|             self.fetcher.browser_steps = self.watch.get('browser_steps', []) | ||||
|             self.fetcher.browser_steps_screenshot_path = os.path.join(self.datastore.datastore_path, self.watch.get('uuid')) | ||||
|  | ||||
|         # Tweak the base config with the per-watch ones | ||||
|         request_headers = self.watch.get('headers', []) | ||||
|         request_headers.update(self.datastore.get_all_base_headers()) | ||||
|         request_headers.update(self.datastore.get_all_headers_in_textfile_for_watch(uuid=self.watch.get('uuid'))) | ||||
|  | ||||
|         # https://github.com/psf/requests/issues/4525 | ||||
|         # Requests doesnt yet support brotli encoding, so don't put 'br' here, be totally sure that the user cannot | ||||
|         # do this by accident. | ||||
|         if 'Accept-Encoding' in request_headers and "br" in request_headers['Accept-Encoding']: | ||||
|             request_headers['Accept-Encoding'] = request_headers['Accept-Encoding'].replace(', br', '') | ||||
|  | ||||
|         timeout = self.datastore.data['settings']['requests'].get('timeout') | ||||
|  | ||||
|         request_body = self.watch.get('body') | ||||
|         request_method = self.watch.get('method') | ||||
|         ignore_status_codes = self.watch.get('ignore_status_codes', False) | ||||
|  | ||||
|         # Configurable per-watch or global extra delay before extracting text (for webDriver types) | ||||
|         system_webdriver_delay = self.datastore.data['settings']['application'].get('webdriver_delay', None) | ||||
|         if self.watch.get('webdriver_delay'): | ||||
|             self.fetcher.render_extract_delay = self.watch.get('webdriver_delay') | ||||
|         elif system_webdriver_delay is not None: | ||||
|             self.fetcher.render_extract_delay = system_webdriver_delay | ||||
|  | ||||
|         if self.watch.get('webdriver_js_execute_code') is not None and self.watch.get('webdriver_js_execute_code').strip(): | ||||
|             self.fetcher.webdriver_js_execute_code = self.watch.get('webdriver_js_execute_code') | ||||
|  | ||||
|         # Requests for PDF's, images etc should be passwd the is_binary flag | ||||
|         is_binary = self.watch.is_pdf | ||||
|  | ||||
|         # And here we go! call the right browser with browser-specific settings | ||||
|         self.fetcher.run(url, timeout, request_headers, request_body, request_method, ignore_status_codes, self.watch.get('include_filters'), | ||||
|                     is_binary=is_binary) | ||||
|  | ||||
|         #@todo .quit here could go on close object, so we can run JS if change-detected | ||||
|         self.fetcher.quit() | ||||
|  | ||||
|         # After init, call run_changedetection() which will do the actual change-detection | ||||
|  | ||||
|  | ||||
| def available_processors(): | ||||
|     from . import restock_diff, text_json_diff | ||||
|     x=[('text_json_diff', text_json_diff.name), ('restock_diff', restock_diff.name)] | ||||
|   | ||||
| @@ -1,11 +1,9 @@ | ||||
|  | ||||
| import hashlib | ||||
| import os | ||||
| import re | ||||
| import urllib3 | ||||
| from . import difference_detection_processor | ||||
| from changedetectionio import content_fetcher | ||||
| #from . import browser_content_difference_detection_processor | ||||
| from copy import deepcopy | ||||
| from . import text_content_difference_detection_processor | ||||
|  | ||||
| urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) | ||||
|  | ||||
| @@ -18,15 +16,11 @@ class UnableToExtractRestockData(Exception): | ||||
|         self.status_code = status_code | ||||
|         return | ||||
|  | ||||
| class perform_site_check(difference_detection_processor): | ||||
| class perform_site_check(text_content_difference_detection_processor): | ||||
|     screenshot = None | ||||
|     xpath_data = None | ||||
|  | ||||
|     def __init__(self, *args, datastore, **kwargs): | ||||
|         super().__init__(*args, **kwargs) | ||||
|         self.datastore = datastore | ||||
|  | ||||
|     def run(self, uuid, skip_when_checksum_same=True): | ||||
|     def run_changedetection(self, uuid, skip_when_checksum_same=True): | ||||
|  | ||||
|         # DeepCopy so we can be sure we don't accidently change anything by reference | ||||
|         watch = deepcopy(self.datastore.data['watching'].get(uuid)) | ||||
| @@ -34,84 +28,24 @@ class perform_site_check(difference_detection_processor): | ||||
|         if not watch: | ||||
|             raise Exception("Watch no longer exists.") | ||||
|  | ||||
|         # Protect against file:// access | ||||
|         if re.search(r'^file', watch.get('url', ''), re.IGNORECASE) and not os.getenv('ALLOW_FILE_URI', False): | ||||
|             raise Exception( | ||||
|                 "file:// type access is denied for security reasons." | ||||
|             ) | ||||
|  | ||||
|         # Unset any existing notification error | ||||
|         update_obj = {'last_notification_error': False, 'last_error': False} | ||||
|  | ||||
|         request_headers = watch.get('headers', []) | ||||
|         request_headers.update(self.datastore.get_all_base_headers()) | ||||
|         request_headers.update(self.datastore.get_all_headers_in_textfile_for_watch(uuid=uuid)) | ||||
|  | ||||
|         # https://github.com/psf/requests/issues/4525 | ||||
|         # Requests doesnt yet support brotli encoding, so don't put 'br' here, be totally sure that the user cannot | ||||
|         # do this by accident. | ||||
|         if 'Accept-Encoding' in request_headers and "br" in request_headers['Accept-Encoding']: | ||||
|             request_headers['Accept-Encoding'] = request_headers['Accept-Encoding'].replace(', br', '') | ||||
|  | ||||
|         timeout = self.datastore.data['settings']['requests'].get('timeout') | ||||
|  | ||||
|         url = watch.link | ||||
|  | ||||
|         request_body = self.datastore.data['watching'][uuid].get('body') | ||||
|         request_method = self.datastore.data['watching'][uuid].get('method') | ||||
|         ignore_status_codes = self.datastore.data['watching'][uuid].get('ignore_status_codes', False) | ||||
|  | ||||
|         # Pluggable content fetcher | ||||
|         prefer_backend = watch.get_fetch_backend | ||||
|         if not prefer_backend or prefer_backend == 'system': | ||||
|             prefer_backend = self.datastore.data['settings']['application']['fetch_backend'] | ||||
|  | ||||
|         if hasattr(content_fetcher, prefer_backend): | ||||
|             klass = getattr(content_fetcher, prefer_backend) | ||||
|         else: | ||||
|             # If the klass doesnt exist, just use a default | ||||
|             klass = getattr(content_fetcher, "html_requests") | ||||
|  | ||||
|         proxy_id = self.datastore.get_preferred_proxy_for_watch(uuid=uuid) | ||||
|         proxy_url = None | ||||
|         if proxy_id: | ||||
|             proxy_url = self.datastore.proxy_list.get(proxy_id).get('url') | ||||
|             print("UUID {} Using proxy {}".format(uuid, proxy_url)) | ||||
|  | ||||
|         fetcher = klass(proxy_override=proxy_url) | ||||
|  | ||||
|         # Configurable per-watch or global extra delay before extracting text (for webDriver types) | ||||
|         system_webdriver_delay = self.datastore.data['settings']['application'].get('webdriver_delay', None) | ||||
|         if watch['webdriver_delay'] is not None: | ||||
|             fetcher.render_extract_delay = watch.get('webdriver_delay') | ||||
|         elif system_webdriver_delay is not None: | ||||
|             fetcher.render_extract_delay = system_webdriver_delay | ||||
|  | ||||
|         # Could be removed if requests/plaintext could also return some info? | ||||
|         if prefer_backend != 'html_webdriver': | ||||
|             raise Exception("Re-stock detection requires Chrome or compatible webdriver/playwright fetcher to work") | ||||
|  | ||||
|         if watch.get('webdriver_js_execute_code') is not None and watch.get('webdriver_js_execute_code').strip(): | ||||
|             fetcher.webdriver_js_execute_code = watch.get('webdriver_js_execute_code') | ||||
|  | ||||
|         fetcher.run(url, timeout, request_headers, request_body, request_method, ignore_status_codes, watch.get('include_filters')) | ||||
|         fetcher.quit() | ||||
|  | ||||
|         self.screenshot = fetcher.screenshot | ||||
|         self.xpath_data = fetcher.xpath_data | ||||
|         self.screenshot = self.fetcher.screenshot | ||||
|         self.xpath_data = self.fetcher.xpath_data | ||||
|  | ||||
|         # Track the content type | ||||
|         update_obj['content_type'] = fetcher.headers.get('Content-Type', '') | ||||
|         update_obj["last_check_status"] = fetcher.get_last_status_code() | ||||
|         update_obj['content_type'] = self.fetcher.headers.get('Content-Type', '') | ||||
|         update_obj["last_check_status"] = self.fetcher.get_last_status_code() | ||||
|  | ||||
|         # Main detection method | ||||
|         fetched_md5 = None | ||||
|         if fetcher.instock_data: | ||||
|             fetched_md5 = hashlib.md5(fetcher.instock_data.encode('utf-8')).hexdigest() | ||||
|         if self.fetcher.instock_data: | ||||
|             fetched_md5 = hashlib.md5(self.fetcher.instock_data.encode('utf-8')).hexdigest() | ||||
|             # 'Possibly in stock' comes from stock-not-in-stock.js when no string found above the fold. | ||||
|             update_obj["in_stock"] = True if fetcher.instock_data == 'Possibly in stock' else False | ||||
|             update_obj["in_stock"] = True if self.fetcher.instock_data == 'Possibly in stock' else False | ||||
|         else: | ||||
|             raise UnableToExtractRestockData(status_code=fetcher.status_code) | ||||
|             raise UnableToExtractRestockData(status_code=self.fetcher.status_code) | ||||
|  | ||||
|         # The main thing that all this at the moment comes down to :) | ||||
|         changed_detected = False | ||||
| @@ -128,4 +62,4 @@ class perform_site_check(difference_detection_processor): | ||||
|         # Always record the new checksum | ||||
|         update_obj["previous_md5"] = fetched_md5 | ||||
|  | ||||
|         return changed_detected, update_obj, fetcher.instock_data.encode('utf-8') | ||||
|         return changed_detected, update_obj, self.fetcher.instock_data.encode('utf-8') | ||||
|   | ||||
| @@ -1,4 +1,4 @@ | ||||
| # HTML to TEXT/JSON DIFFERENCE FETCHER | ||||
| # HTML to TEXT/JSON DIFFERENCE self.fetcher | ||||
|  | ||||
| import hashlib | ||||
| import json | ||||
| @@ -10,8 +10,8 @@ import urllib3 | ||||
| from changedetectionio import content_fetcher, html_tools | ||||
| from changedetectionio.blueprint.price_data_follower import PRICE_DATA_TRACK_ACCEPT, PRICE_DATA_TRACK_REJECT | ||||
| from copy import deepcopy | ||||
| from . import difference_detection_processor | ||||
| from ..html_tools import PERL_STYLE_REGEX, cdata_in_document_to_text | ||||
| from . import text_content_difference_detection_processor | ||||
|  | ||||
| urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) | ||||
|  | ||||
| @@ -31,16 +31,11 @@ class PDFToHTMLToolNotFound(ValueError): | ||||
|  | ||||
| # Some common stuff here that can be moved to a base class | ||||
| # (set_proxy_from_list) | ||||
| class perform_site_check(difference_detection_processor): | ||||
|     screenshot = None | ||||
|     xpath_data = None | ||||
| class perform_site_check(text_content_difference_detection_processor): | ||||
|  | ||||
|     def __init__(self, *args, datastore, **kwargs): | ||||
|         super().__init__(*args, **kwargs) | ||||
|         self.datastore = datastore | ||||
|  | ||||
|     def run(self, uuid, skip_when_checksum_same=True, preferred_proxy=None): | ||||
|     def run_changedetection(self, uuid, skip_when_checksum_same=True): | ||||
|         changed_detected = False | ||||
|         html_content = "" | ||||
|         screenshot = False  # as bytes | ||||
|         stripped_text_from_html = "" | ||||
|  | ||||
| @@ -49,100 +44,25 @@ class perform_site_check(difference_detection_processor): | ||||
|         if not watch: | ||||
|             raise Exception("Watch no longer exists.") | ||||
|  | ||||
|         # Protect against file:// access | ||||
|         if re.search(r'^file', watch.get('url', ''), re.IGNORECASE) and not os.getenv('ALLOW_FILE_URI', False): | ||||
|             raise Exception( | ||||
|                 "file:// type access is denied for security reasons." | ||||
|             ) | ||||
|  | ||||
|         # Unset any existing notification error | ||||
|         update_obj = {'last_notification_error': False, 'last_error': False} | ||||
|  | ||||
|         # Tweak the base config with the per-watch ones | ||||
|         request_headers = watch.get('headers', []) | ||||
|         request_headers.update(self.datastore.get_all_base_headers()) | ||||
|         request_headers.update(self.datastore.get_all_headers_in_textfile_for_watch(uuid=uuid)) | ||||
|  | ||||
|         # https://github.com/psf/requests/issues/4525 | ||||
|         # Requests doesnt yet support brotli encoding, so don't put 'br' here, be totally sure that the user cannot | ||||
|         # do this by accident. | ||||
|         if 'Accept-Encoding' in request_headers and "br" in request_headers['Accept-Encoding']: | ||||
|             request_headers['Accept-Encoding'] = request_headers['Accept-Encoding'].replace(', br', '') | ||||
|  | ||||
|         timeout = self.datastore.data['settings']['requests'].get('timeout') | ||||
|  | ||||
|         url = watch.link | ||||
|  | ||||
|         request_body = self.datastore.data['watching'][uuid].get('body') | ||||
|         request_method = self.datastore.data['watching'][uuid].get('method') | ||||
|         ignore_status_codes = self.datastore.data['watching'][uuid].get('ignore_status_codes', False) | ||||
|  | ||||
|         # source: support | ||||
|         is_source = False | ||||
|         if url.startswith('source:'): | ||||
|             url = url.replace('source:', '') | ||||
|             is_source = True | ||||
|  | ||||
|         # Pluggable content fetcher | ||||
|         prefer_backend = watch.get_fetch_backend | ||||
|         if not prefer_backend or prefer_backend == 'system': | ||||
|             prefer_backend = self.datastore.data['settings']['application']['fetch_backend'] | ||||
|  | ||||
|         if hasattr(content_fetcher, prefer_backend): | ||||
|             klass = getattr(content_fetcher, prefer_backend) | ||||
|         else: | ||||
|             # If the klass doesnt exist, just use a default | ||||
|             klass = getattr(content_fetcher, "html_requests") | ||||
|  | ||||
|         if preferred_proxy: | ||||
|             proxy_id = preferred_proxy | ||||
|         else: | ||||
|             proxy_id = self.datastore.get_preferred_proxy_for_watch(uuid=uuid) | ||||
|  | ||||
|         proxy_url = None | ||||
|         if proxy_id: | ||||
|             proxy_url = self.datastore.proxy_list.get(proxy_id).get('url') | ||||
|             print("UUID {} Using proxy {}".format(uuid, proxy_url)) | ||||
|  | ||||
|         fetcher = klass(proxy_override=proxy_url) | ||||
|  | ||||
|         # Configurable per-watch or global extra delay before extracting text (for webDriver types) | ||||
|         system_webdriver_delay = self.datastore.data['settings']['application'].get('webdriver_delay', None) | ||||
|         if watch['webdriver_delay'] is not None: | ||||
|             fetcher.render_extract_delay = watch.get('webdriver_delay') | ||||
|         elif system_webdriver_delay is not None: | ||||
|             fetcher.render_extract_delay = system_webdriver_delay | ||||
|  | ||||
|         # Possible conflict | ||||
|         if prefer_backend == 'html_webdriver': | ||||
|             fetcher.browser_steps = watch.get('browser_steps', None) | ||||
|             fetcher.browser_steps_screenshot_path = os.path.join(self.datastore.datastore_path, uuid) | ||||
|  | ||||
|         if watch.get('webdriver_js_execute_code') is not None and watch.get('webdriver_js_execute_code').strip(): | ||||
|             fetcher.webdriver_js_execute_code = watch.get('webdriver_js_execute_code') | ||||
|  | ||||
|         # requests for PDF's, images etc should be passwd the is_binary flag | ||||
|         is_binary = watch.is_pdf | ||||
|  | ||||
|         fetcher.run(url, timeout, request_headers, request_body, request_method, ignore_status_codes, watch.get('include_filters'), | ||||
|                     is_binary=is_binary) | ||||
|         fetcher.quit() | ||||
|  | ||||
|         self.screenshot = fetcher.screenshot | ||||
|         self.xpath_data = fetcher.xpath_data | ||||
|         self.screenshot = self.fetcher.screenshot | ||||
|         self.xpath_data = self.fetcher.xpath_data | ||||
|  | ||||
|         # Track the content type | ||||
|         update_obj['content_type'] = fetcher.get_all_headers().get('content-type', '').lower() | ||||
|         update_obj['content_type'] = self.fetcher.get_all_headers().get('content-type', '').lower() | ||||
|  | ||||
|         # Watches added automatically in the queue manager will skip if its the same checksum as the previous run | ||||
|         # Saves a lot of CPU | ||||
|         update_obj['previous_md5_before_filters'] = hashlib.md5(fetcher.content.encode('utf-8')).hexdigest() | ||||
|         update_obj['previous_md5_before_filters'] = hashlib.md5(self.fetcher.content.encode('utf-8')).hexdigest() | ||||
|         if skip_when_checksum_same: | ||||
|             if update_obj['previous_md5_before_filters'] == watch.get('previous_md5_before_filters'): | ||||
|                 raise content_fetcher.checksumFromPreviousCheckWasTheSame() | ||||
|  | ||||
|         # Fetching complete, now filters | ||||
|         # @todo move to class / maybe inside of fetcher abstract base? | ||||
|  | ||||
|         # @note: I feel like the following should be in a more obvious chain system | ||||
|         #  - Check filter text | ||||
| @@ -151,24 +71,24 @@ class perform_site_check(difference_detection_processor): | ||||
|         # https://stackoverflow.com/questions/41817578/basic-method-chaining ? | ||||
|         # return content().textfilter().jsonextract().checksumcompare() ? | ||||
|  | ||||
|         is_json = 'application/json' in fetcher.get_all_headers().get('content-type', '').lower() | ||||
|         is_json = 'application/json' in self.fetcher.get_all_headers().get('content-type', '').lower() | ||||
|         is_html = not is_json | ||||
|         is_rss = False | ||||
|  | ||||
|         ctype_header = fetcher.get_all_headers().get('content-type', '').lower() | ||||
|         ctype_header = self.fetcher.get_all_headers().get('content-type', '').lower() | ||||
|         # Go into RSS preprocess for converting CDATA/comment to usable text | ||||
|         if any(substring in ctype_header for substring in ['application/xml', 'application/rss', 'text/xml']): | ||||
|             if '<rss' in fetcher.content[:100].lower(): | ||||
|                 fetcher.content = cdata_in_document_to_text(html_content=fetcher.content) | ||||
|             if '<rss' in self.fetcher.content[:100].lower(): | ||||
|                 self.fetcher.content = cdata_in_document_to_text(html_content=self.fetcher.content) | ||||
|                 is_rss = True | ||||
|  | ||||
|         # source: support, basically treat it as plaintext | ||||
|         if is_source: | ||||
|         if watch.is_source_type_url: | ||||
|             is_html = False | ||||
|             is_json = False | ||||
|  | ||||
|         inline_pdf = fetcher.get_all_headers().get('content-disposition', '') and '%PDF-1' in fetcher.content[:10] | ||||
|         if watch.is_pdf or 'application/pdf' in fetcher.get_all_headers().get('content-type', '').lower() or inline_pdf: | ||||
|         inline_pdf = self.fetcher.get_all_headers().get('content-disposition', '') and '%PDF-1' in self.fetcher.content[:10] | ||||
|         if watch.is_pdf or 'application/pdf' in self.fetcher.get_all_headers().get('content-type', '').lower() or inline_pdf: | ||||
|             from shutil import which | ||||
|             tool = os.getenv("PDF_TO_HTML_TOOL", "pdftohtml") | ||||
|             if not which(tool): | ||||
| @@ -179,18 +99,18 @@ class perform_site_check(difference_detection_processor): | ||||
|                 [tool, '-stdout', '-', '-s', 'out.pdf', '-i'], | ||||
|                 stdout=subprocess.PIPE, | ||||
|                 stdin=subprocess.PIPE) | ||||
|             proc.stdin.write(fetcher.raw_content) | ||||
|             proc.stdin.write(self.fetcher.raw_content) | ||||
|             proc.stdin.close() | ||||
|             fetcher.content = proc.stdout.read().decode('utf-8') | ||||
|             self.fetcher.content = proc.stdout.read().decode('utf-8') | ||||
|             proc.wait(timeout=60) | ||||
|  | ||||
|             # Add a little metadata so we know if the file changes (like if an image changes, but the text is the same | ||||
|             # @todo may cause problems with non-UTF8? | ||||
|             metadata = "<p>Added by changedetection.io: Document checksum - {} Filesize - {} bytes</p>".format( | ||||
|                 hashlib.md5(fetcher.raw_content).hexdigest().upper(), | ||||
|                 len(fetcher.content)) | ||||
|                 hashlib.md5(self.fetcher.raw_content).hexdigest().upper(), | ||||
|                 len(self.fetcher.content)) | ||||
|  | ||||
|             fetcher.content = fetcher.content.replace('</body>', metadata + '</body>') | ||||
|             self.fetcher.content = self.fetcher.content.replace('</body>', metadata + '</body>') | ||||
|  | ||||
|         # Better would be if Watch.model could access the global data also | ||||
|         # and then use getattr https://docs.python.org/3/reference/datamodel.html#object.__getitem__ | ||||
| @@ -217,7 +137,7 @@ class perform_site_check(difference_detection_processor): | ||||
|         if is_json: | ||||
|             # Sort the JSON so we dont get false alerts when the content is just re-ordered | ||||
|             try: | ||||
|                 fetcher.content = json.dumps(json.loads(fetcher.content), sort_keys=True) | ||||
|                 self.fetcher.content = json.dumps(json.loads(self.fetcher.content), sort_keys=True) | ||||
|             except Exception as e: | ||||
|                 # Might have just been a snippet, or otherwise bad JSON, continue | ||||
|                 pass | ||||
| @@ -225,22 +145,22 @@ class perform_site_check(difference_detection_processor): | ||||
|         if has_filter_rule: | ||||
|             for filter in include_filters_rule: | ||||
|                 if any(prefix in filter for prefix in json_filter_prefixes): | ||||
|                     stripped_text_from_html += html_tools.extract_json_as_string(content=fetcher.content, json_filter=filter) | ||||
|                     stripped_text_from_html += html_tools.extract_json_as_string(content=self.fetcher.content, json_filter=filter) | ||||
|                     is_html = False | ||||
|  | ||||
|         if is_html or is_source: | ||||
|         if is_html or watch.is_source_type_url: | ||||
|  | ||||
|             # CSS Filter, extract the HTML that matches and feed that into the existing inscriptis::get_text | ||||
|             fetcher.content = html_tools.workarounds_for_obfuscations(fetcher.content) | ||||
|             html_content = fetcher.content | ||||
|             self.fetcher.content = html_tools.workarounds_for_obfuscations(self.fetcher.content) | ||||
|             html_content = self.fetcher.content | ||||
|  | ||||
|             # If not JSON,  and if it's not text/plain.. | ||||
|             if 'text/plain' in fetcher.get_all_headers().get('content-type', '').lower(): | ||||
|             if 'text/plain' in self.fetcher.get_all_headers().get('content-type', '').lower(): | ||||
|                 # Don't run get_text or xpath/css filters on plaintext | ||||
|                 stripped_text_from_html = html_content | ||||
|             else: | ||||
|                 # Does it have some ld+json price data? used for easier monitoring | ||||
|                 update_obj['has_ldjson_price_data'] = html_tools.has_ldjson_product_info(fetcher.content) | ||||
|                 update_obj['has_ldjson_price_data'] = html_tools.has_ldjson_product_info(self.fetcher.content) | ||||
|  | ||||
|                 # Then we assume HTML | ||||
|                 if has_filter_rule: | ||||
| @@ -250,14 +170,19 @@ class perform_site_check(difference_detection_processor): | ||||
|                         # For HTML/XML we offer xpath as an option, just start a regular xPath "/.." | ||||
|                         if filter_rule[0] == '/' or filter_rule.startswith('xpath:'): | ||||
|                             html_content += html_tools.xpath_filter(xpath_filter=filter_rule.replace('xpath:', ''), | ||||
|                                                                     html_content=fetcher.content, | ||||
|                                                                     append_pretty_line_formatting=not is_source, | ||||
|                                                                     html_content=self.fetcher.content, | ||||
|                                                                     append_pretty_line_formatting=not watch.is_source_type_url, | ||||
|                                                                     is_rss=is_rss) | ||||
|                         elif filter_rule.startswith('xpath1:'): | ||||
|                             html_content += html_tools.xpath1_filter(xpath_filter=filter_rule.replace('xpath1:', ''), | ||||
|                                                                     html_content=self.fetcher.content, | ||||
|                                                                     append_pretty_line_formatting=not watch.is_source_type_url, | ||||
|                                                                     is_rss=is_rss) | ||||
|                         else: | ||||
|                             # CSS Filter, extract the HTML that matches and feed that into the existing inscriptis::get_text | ||||
|                             html_content += html_tools.include_filters(include_filters=filter_rule, | ||||
|                                                                        html_content=fetcher.content, | ||||
|                                                                        append_pretty_line_formatting=not is_source) | ||||
|                                                                        html_content=self.fetcher.content, | ||||
|                                                                        append_pretty_line_formatting=not watch.is_source_type_url) | ||||
|  | ||||
|                     if not html_content.strip(): | ||||
|                         raise FilterNotFoundInResponse(include_filters_rule) | ||||
| @@ -265,7 +190,7 @@ class perform_site_check(difference_detection_processor): | ||||
|                 if has_subtractive_selectors: | ||||
|                     html_content = html_tools.element_removal(subtractive_selectors, html_content) | ||||
|  | ||||
|                 if is_source: | ||||
|                 if watch.is_source_type_url: | ||||
|                     stripped_text_from_html = html_content | ||||
|                 else: | ||||
|                     # extract text | ||||
| @@ -311,7 +236,7 @@ class perform_site_check(difference_detection_processor): | ||||
|         empty_pages_are_a_change = self.datastore.data['settings']['application'].get('empty_pages_are_a_change', False) | ||||
|         if not is_json and not empty_pages_are_a_change and len(stripped_text_from_html.strip()) == 0: | ||||
|             raise content_fetcher.ReplyWithContentButNoText(url=url, | ||||
|                                                             status_code=fetcher.get_last_status_code(), | ||||
|                                                             status_code=self.fetcher.get_last_status_code(), | ||||
|                                                             screenshot=screenshot, | ||||
|                                                             has_filters=has_filter_rule, | ||||
|                                                             html_content=html_content | ||||
| @@ -320,7 +245,7 @@ class perform_site_check(difference_detection_processor): | ||||
|         # We rely on the actual text in the html output.. many sites have random script vars etc, | ||||
|         # in the future we'll implement other mechanisms. | ||||
|  | ||||
|         update_obj["last_check_status"] = fetcher.get_last_status_code() | ||||
|         update_obj["last_check_status"] = self.fetcher.get_last_status_code() | ||||
|  | ||||
|         # If there's text to skip | ||||
|         # @todo we could abstract out the get_text() to handle this cleaner | ||||
| @@ -408,7 +333,7 @@ class perform_site_check(difference_detection_processor): | ||||
|         if is_html: | ||||
|             if self.datastore.data['settings']['application'].get('extract_title_as_title') or watch['extract_title_as_title']: | ||||
|                 if not watch['title'] or not len(watch['title']): | ||||
|                     update_obj['title'] = html_tools.extract_element(find='title', html_content=fetcher.content) | ||||
|                     update_obj['title'] = html_tools.extract_element(find='title', html_content=self.fetcher.content) | ||||
|  | ||||
|         if changed_detected: | ||||
|             if watch.get('check_unique_lines', False): | ||||
|   | ||||
| @@ -170,9 +170,12 @@ if (include_filters.length) { | ||||
|  | ||||
|         try { | ||||
|             // is it xpath? | ||||
|             if (f.startsWith('/') || f.startsWith('xpath:')) { | ||||
|                 q = document.evaluate(f.replace('xpath:', ''), document, null, XPathResult.FIRST_ORDERED_NODE_TYPE, null).singleNodeValue; | ||||
|             if (f.startsWith('/') || f.startsWith('xpath')) { | ||||
|                 var qry_f = f.replace(/xpath(:|\d:)/, '') | ||||
|                 console.log("[xpath] Scanning for included filter " + qry_f) | ||||
|                 q = document.evaluate(qry_f, document, null, XPathResult.FIRST_ORDERED_NODE_TYPE, null).singleNodeValue; | ||||
|             } else { | ||||
|                 console.log("[css] Scanning for included filter " + f) | ||||
|                 q = document.querySelector(f); | ||||
|             } | ||||
|         } catch (e) { | ||||
| @@ -182,8 +185,18 @@ if (include_filters.length) { | ||||
|         } | ||||
|  | ||||
|         if (q) { | ||||
|             // Try to resolve //something/text() back to its /something so we can atleast get the bounding box | ||||
|             try { | ||||
|                 if (typeof q.nodeName == 'string' && q.nodeName === '#text') { | ||||
|                     q = q.parentElement | ||||
|                 } | ||||
|             } catch (e) { | ||||
|                 console.log(e) | ||||
|                 console.log("xpath_element_scraper: #text resolver") | ||||
|             } | ||||
|  | ||||
|             // #1231 - IN the case XPath attribute filter is applied, we will have to traverse up and find the element. | ||||
|             if (q.hasOwnProperty('getBoundingClientRect')) { | ||||
|             if (typeof q.getBoundingClientRect == 'function') { | ||||
|                 bbox = q.getBoundingClientRect(); | ||||
|                 console.log("xpath_element_scraper: Got filter element, scroll from top was " + scroll_y) | ||||
|             } else { | ||||
| @@ -192,7 +205,8 @@ if (include_filters.length) { | ||||
|                     bbox = q.ownerElement.getBoundingClientRect(); | ||||
|                     console.log("xpath_element_scraper: Got filter by ownerElement element, scroll from top was " + scroll_y) | ||||
|                 } catch (e) { | ||||
|                     console.log("xpath_element_scraper: error looking up ownerElement") | ||||
|                     console.log(e) | ||||
|                     console.log("xpath_element_scraper: error looking up q.ownerElement") | ||||
|                 } | ||||
|             } | ||||
|         } | ||||
|   | ||||
							
								
								
									
										44
									
								
								changedetectionio/run_custom_browser_url_tests.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										44
									
								
								changedetectionio/run_custom_browser_url_tests.sh
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,44 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| # run some tests and look if the 'custom-browser-search-string=1' connect string appeared in the correct containers | ||||
|  | ||||
| # enable debug | ||||
| set -x | ||||
|  | ||||
| # A extra browser is configured, but we never chose to use it, so it should NOT show in the logs | ||||
| docker run --rm -e "PLAYWRIGHT_DRIVER_URL=ws://browserless:3000" --network changedet-network test-changedetectionio  bash -c 'cd changedetectionio;pytest tests/custom_browser_url/test_custom_browser_url.py::test_request_not_via_custom_browser_url' | ||||
| docker logs browserless-custom-url &>log.txt | ||||
| grep 'custom-browser-search-string=1' log.txt | ||||
| if [ $? -ne 1 ] | ||||
| then | ||||
|   echo "Saw a request in 'browserless-custom-url' container with 'custom-browser-search-string=1' when I should not" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| docker logs browserless &>log.txt | ||||
| grep 'custom-browser-search-string=1' log.txt | ||||
| if [ $? -ne 1 ] | ||||
| then | ||||
|   echo "Saw a request in 'browser' container with 'custom-browser-search-string=1' when I should not" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| # Special connect string should appear in the custom-url container, but not in the 'default' one | ||||
| docker run --rm -e "PLAYWRIGHT_DRIVER_URL=ws://browserless:3000" --network changedet-network test-changedetectionio  bash -c 'cd changedetectionio;pytest tests/custom_browser_url/test_custom_browser_url.py::test_request_via_custom_browser_url' | ||||
| docker logs browserless-custom-url &>log.txt | ||||
| grep 'custom-browser-search-string=1' log.txt | ||||
| if [ $? -ne 0 ] | ||||
| then | ||||
|   echo "Did not see request in 'browserless-custom-url' container with 'custom-browser-search-string=1' when I should" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| docker logs browserless &>log.txt | ||||
| grep 'custom-browser-search-string=1' log.txt | ||||
| if [ $? -ne 1 ] | ||||
| then | ||||
|   echo "Saw a request in 'browser' container with 'custom-browser-search-string=1' when I should not" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
|  | ||||
| @@ -149,7 +149,7 @@ $(document).ready(function () { | ||||
|             // @todo In the future paint all that match | ||||
|             for (const c of current_default_xpath) { | ||||
|                 for (var i = selector_data['size_pos'].length; i !== 0; i--) { | ||||
|                     if (selector_data['size_pos'][i - 1].xpath === c) { | ||||
|                     if (selector_data['size_pos'][i - 1].xpath.trim() === c.trim()) { | ||||
|                         console.log("highlighting " + c); | ||||
|                         current_selected_i = i - 1; | ||||
|                         highlight_current_selected_i(); | ||||
|   | ||||
| @@ -0,0 +1,24 @@ | ||||
| ul#requests-extra_browsers { | ||||
|   list-style: none; | ||||
|   /* tidy up the table to look more "inline" */ | ||||
|   li { | ||||
|     > label { | ||||
|       display: none; | ||||
|     } | ||||
|  | ||||
|   } | ||||
|  | ||||
|   /* each proxy entry is a `table` */ | ||||
|   table { | ||||
|     tr { | ||||
|       display: inline; | ||||
|     } | ||||
|   } | ||||
| } | ||||
|  | ||||
| #extra-browsers-setting { | ||||
|   border: 1px solid var(--color-grey-800); | ||||
|   border-radius: 4px; | ||||
|   margin: 1em; | ||||
|    padding: 1em; | ||||
| } | ||||
| @@ -60,3 +60,10 @@ body.proxy-check-active { | ||||
|  | ||||
|   padding-bottom: 1em; | ||||
| } | ||||
|  | ||||
| #extra-proxies-setting { | ||||
|   border: 1px solid var(--color-grey-800); | ||||
|   border-radius: 4px; | ||||
|     margin: 1em; | ||||
|    padding: 1em; | ||||
| } | ||||
|   | ||||
| @@ -5,6 +5,7 @@ | ||||
| @import "parts/_arrows"; | ||||
| @import "parts/_browser-steps"; | ||||
| @import "parts/_extra_proxies"; | ||||
| @import "parts/_extra_browsers"; | ||||
| @import "parts/_pagination"; | ||||
| @import "parts/_spinners"; | ||||
| @import "parts/_variables"; | ||||
|   | ||||
| @@ -128,6 +128,27 @@ body.proxy-check-active #request .proxy-timing { | ||||
|     border-radius: 4px; | ||||
|     padding: 1em; } | ||||
|  | ||||
| #extra-proxies-setting { | ||||
|   border: 1px solid var(--color-grey-800); | ||||
|   border-radius: 4px; | ||||
|   margin: 1em; | ||||
|   padding: 1em; } | ||||
|  | ||||
| ul#requests-extra_browsers { | ||||
|   list-style: none; | ||||
|   /* tidy up the table to look more "inline" */ | ||||
|   /* each proxy entry is a `table` */ } | ||||
|   ul#requests-extra_browsers li > label { | ||||
|     display: none; } | ||||
|   ul#requests-extra_browsers table tr { | ||||
|     display: inline; } | ||||
|  | ||||
| #extra-browsers-setting { | ||||
|   border: 1px solid var(--color-grey-800); | ||||
|   border-radius: 4px; | ||||
|   margin: 1em; | ||||
|   padding: 1em; } | ||||
|  | ||||
| .pagination-page-info { | ||||
|   color: #fff; | ||||
|   font-size: 0.85rem; | ||||
|   | ||||
| @@ -633,6 +633,18 @@ class ChangeDetectionStore: | ||||
|  | ||||
|         return {} | ||||
|  | ||||
|     @property | ||||
|     def extra_browsers(self): | ||||
|         res = [] | ||||
|         p = list(filter( | ||||
|             lambda s: (s.get('browser_name') and s.get('browser_connection_url')), | ||||
|             self.__data['settings']['requests'].get('extra_browsers', []))) | ||||
|         if p: | ||||
|             for i in p: | ||||
|                 res.append(("extra_browser_"+i['browser_name'], i['browser_name'])) | ||||
|  | ||||
|         return res | ||||
|  | ||||
|     def tag_exists_by_name(self, tag_name): | ||||
|         return any(v.get('title', '').lower() == tag_name.lower() for k, v in self.__data['settings']['application']['tags'].items()) | ||||
|  | ||||
| @@ -835,4 +847,14 @@ class ChangeDetectionStore: | ||||
|             if not watch.get('date_created'): | ||||
|                 self.data['watching'][uuid]['date_created'] = i | ||||
|             i+=1 | ||||
|         return | ||||
|         return | ||||
|  | ||||
|     # #1774 - protect xpath1 against migration | ||||
|     def update_14(self): | ||||
|         for awatch in self.__data["watching"]: | ||||
|             if self.__data["watching"][awatch]['include_filters']: | ||||
|                 for num, selector in enumerate(self.__data["watching"][awatch]['include_filters']): | ||||
|                     if selector.startswith('/'): | ||||
|                         self.__data["watching"][awatch]['include_filters'][num] = 'xpath1:' + selector | ||||
|                     if selector.startswith('xpath:'): | ||||
|                         self.__data["watching"][awatch]['include_filters'][num] = selector.replace('xpath:', 'xpath1:', 1) | ||||
|   | ||||
| @@ -290,11 +290,12 @@ xpath://body/div/span[contains(@class, 'example-class')]", | ||||
|                                 {% endif %} | ||||
|                             </ul> | ||||
|                         </li> | ||||
|                         <li>XPath - Limit text to this XPath rule, simply start with a forward-slash, | ||||
|                         <li>XPath - Limit text to this XPath rule, simply start with a forward-slash. To specify XPath to be used explicitly or the XPath rule starts with an XPath function: Prefix with <code>xpath:</code> | ||||
|                             <ul> | ||||
|                                 <li>Example:  <code>//*[contains(@class, 'sametext')]</code> or <code>xpath://*[contains(@class, 'sametext')]</code>, <a | ||||
|                                 <li>Example:  <code>//*[contains(@class, 'sametext')]</code> or <code>xpath:count(//*[contains(@class, 'sametext')])</code>, <a | ||||
|                                 href="http://xpather.com/" target="new">test your XPath here</a></li> | ||||
|                                 <li>Example: Get all titles from an RSS feed <code>//title/text()</code></li> | ||||
|                                 <li>To use XPath1.0: Prefix with <code>xpath1:</code></li> | ||||
|                             </ul> | ||||
|                             </li> | ||||
|                     </ul> | ||||
|   | ||||
| @@ -178,6 +178,9 @@ nav | ||||
|                         <span style="display:none;" id="api-key-copy" >copy</span> | ||||
|                     </div> | ||||
|                 </div> | ||||
|                 <div class="pure-control-group"> | ||||
|                     <a href="{{url_for('settings_reset_api_key')}}" class="pure-button button-small button-cancel">Regenerate API key</a> | ||||
|                 </div> | ||||
|             </div> | ||||
|             <div class="tab-pane-inner" id="proxies"> | ||||
|                 <div id="recommended-proxy"> | ||||
| @@ -227,11 +230,15 @@ nav | ||||
|                 </p> | ||||
|                <p><strong>Tip</strong>: "Residential" and "Mobile" proxy type can be more successfull than "Data Center" for blocked websites. | ||||
|  | ||||
|                 <div class="pure-control-group"> | ||||
|                 <div class="pure-control-group" id="extra-proxies-setting"> | ||||
|                 {{ render_field(form.requests.form.extra_proxies) }} | ||||
|                 <span class="pure-form-message-inline">"Name" will be used for selecting the proxy in the Watch Edit settings</span><br> | ||||
|                 <span class="pure-form-message-inline">SOCKS5 proxies with authentication are only supported with 'plain requests' fetcher, for other fetchers you should whitelist the IP access instead</span> | ||||
|                 </div> | ||||
|                 <div class="pure-control-group" id="extra-browsers-setting"> | ||||
|                     <span class="pure-form-message-inline"><i>Extra Browsers</i> allow changedetection.io to communicate with a different web-browser.</span><br> | ||||
|                   {{ render_field(form.requests.form.extra_browsers) }} | ||||
|                 </div> | ||||
|             </div> | ||||
|             <div id="actions"> | ||||
|                 <div class="pure-control-group"> | ||||
|   | ||||
| @@ -82,12 +82,15 @@ | ||||
|             </tr> | ||||
|             {% endif %} | ||||
|             {% for watch in (watches|sort(attribute=sort_attribute, reverse=sort_order == 'asc'))|pagination_slice(skip=pagination.skip) %} | ||||
|  | ||||
|                 {% set is_unviewed =  watch.newest_history_key| int > watch.last_viewed and watch.history_n>=2 %} | ||||
|  | ||||
|             <tr id="{{ watch.uuid }}" | ||||
|                 class="{{ loop.cycle('pure-table-odd', 'pure-table-even') }} processor-{{ watch['processor'] }} | ||||
|                 {% if watch.last_error is defined and watch.last_error != False %}error{% endif %} | ||||
|                 {% if watch.last_notification_error is defined and watch.last_notification_error != False %}error{% endif %} | ||||
|                 {% if watch.paused is defined and watch.paused != False %}paused{% endif %} | ||||
|                 {% if watch.newest_history_key| int > watch.last_viewed and watch.history_n>=2 %}unviewed{% endif %} | ||||
|                 {% if is_unviewed %}unviewed{% endif %} | ||||
|                 {% if watch.uuid in queued_uuids %}queued{% endif %}"> | ||||
|                 <td class="inline checkbox-uuid" ><input name="uuids"  type="checkbox" value="{{ watch.uuid}} " > <span>{{ loop.index+pagination.skip }}</span></td> | ||||
|                 <td class="inline watch-controls"> | ||||
| @@ -104,8 +107,9 @@ | ||||
|  | ||||
|                     {% if watch.get_fetch_backend == "html_webdriver" | ||||
|                          or (  watch.get_fetch_backend == "system" and system_default_fetcher == 'html_webdriver'  ) | ||||
|                          or "extra_browser_" in watch.get_fetch_backend | ||||
|                     %} | ||||
|                     <img class="status-icon" src="{{url_for('static_content', group='images', filename='Google-Chrome-icon.png')}}" title="Using a chrome browser" > | ||||
|                     <img class="status-icon" src="{{url_for('static_content', group='images', filename='Google-Chrome-icon.png')}}" title="Using a Chrome browser" > | ||||
|                     {% endif %} | ||||
|  | ||||
|                     {%if watch.is_pdf  %}<img class="status-icon" src="{{url_for('static_content', group='images', filename='pdf-icon.svg')}}" title="Converting PDF to text" >{% endif %} | ||||
| @@ -166,7 +170,13 @@ | ||||
|                        class="recheck pure-button pure-button-primary">{% if watch.uuid in queued_uuids %}Queued{% else %}Recheck{% endif %}</a> | ||||
|                     <a href="{{ url_for('edit_page', uuid=watch.uuid)}}" class="pure-button pure-button-primary">Edit</a> | ||||
|                     {% if watch.history_n >= 2 %} | ||||
|                     <a href="{{ url_for('diff_history_page', uuid=watch.uuid) }}" target="{{watch.uuid}}" class="pure-button pure-button-primary diff-link">Diff</a> | ||||
|  | ||||
|                         {%  if is_unviewed %} | ||||
|                            <a href="{{ url_for('diff_history_page', uuid=watch.uuid, from_version=watch.get_next_snapshot_key_to_last_viewed) }}" target="{{watch.uuid}}" class="pure-button pure-button-primary diff-link">Diff</a> | ||||
|                         {% else %} | ||||
|                            <a href="{{ url_for('diff_history_page', uuid=watch.uuid)}}" target="{{watch.uuid}}" class="pure-button pure-button-primary diff-link">Diff</a> | ||||
|                         {% endif %} | ||||
|  | ||||
|                     {% else %} | ||||
|                         {% if watch.history_n == 1 or (watch.history_n ==0 and watch.error_text_ctime )%} | ||||
|                             <a href="{{ url_for('preview_page', uuid=watch.uuid)}}" target="{{watch.uuid}}" class="pure-button pure-button-primary">Preview</a> | ||||
|   | ||||
							
								
								
									
										1
									
								
								changedetectionio/tests/custom_browser_url/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1
									
								
								changedetectionio/tests/custom_browser_url/__init__.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1 @@ | ||||
| # placeholder | ||||
| @@ -0,0 +1,89 @@ | ||||
| # !/usr/bin/python3 | ||||
| import os | ||||
|  | ||||
| from flask import url_for | ||||
| from ..util import live_server_setup, wait_for_all_checks | ||||
|  | ||||
| def do_test(client, live_server, make_test_use_extra_browser=False): | ||||
|  | ||||
|     # Grep for this string in the logs? | ||||
|     test_url = f"https://changedetection.io/ci-test.html" | ||||
|     custom_browser_name = 'custom browser URL' | ||||
|  | ||||
|     # needs to be set and something like 'ws://127.0.0.1:3000?stealth=1&--disable-web-security=true' | ||||
|     assert os.getenv('PLAYWRIGHT_DRIVER_URL'), "Needs PLAYWRIGHT_DRIVER_URL set for this test" | ||||
|  | ||||
|     ##################### | ||||
|     res = client.post( | ||||
|         url_for("settings_page"), | ||||
|         data={"application-empty_pages_are_a_change": "", | ||||
|               "requests-time_between_check-minutes": 180, | ||||
|               'application-fetch_backend': "html_webdriver", | ||||
|               # browserless-custom-url is setup in  .github/workflows/test-only.yml | ||||
|               # the test script run_custom_browser_url_test.sh will look for 'custom-browser-search-string' in the container logs | ||||
|               'requests-extra_browsers-0-browser_connection_url': 'ws://browserless-custom-url:3000?stealth=1&--disable-web-security=true&custom-browser-search-string=1', | ||||
|               'requests-extra_browsers-0-browser_name': custom_browser_name | ||||
|               }, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|  | ||||
|     assert b"Settings updated." in res.data | ||||
|  | ||||
|     # Add our URL to the import page | ||||
|     res = client.post( | ||||
|         url_for("import_page"), | ||||
|         data={"urls": test_url}, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|  | ||||
|     assert b"1 Imported" in res.data | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     if make_test_use_extra_browser: | ||||
|  | ||||
|         # So the name should appear in the edit page under "Request" > "Fetch Method" | ||||
|         res = client.get( | ||||
|             url_for("edit_page", uuid="first"), | ||||
|             follow_redirects=True | ||||
|         ) | ||||
|         assert b'custom browser URL' in res.data | ||||
|  | ||||
|         res = client.post( | ||||
|             url_for("edit_page", uuid="first"), | ||||
|             data={ | ||||
|                   "url": test_url, | ||||
|                   "tags": "", | ||||
|                   "headers": "", | ||||
|                   'fetch_backend': f"extra_browser_{custom_browser_name}", | ||||
|                   'webdriver_js_execute_code': '' | ||||
|             }, | ||||
|             follow_redirects=True | ||||
|         ) | ||||
|  | ||||
|         assert b"Updated watch." in res.data | ||||
|         wait_for_all_checks(client) | ||||
|  | ||||
|     # Force recheck | ||||
|     res = client.get(url_for("form_watch_checknow"), follow_redirects=True) | ||||
|     assert b'1 watches queued for rechecking.' in res.data | ||||
|  | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     res = client.get( | ||||
|         url_for("preview_page", uuid="first"), | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b'cool it works' in res.data | ||||
|  | ||||
|  | ||||
| # Requires playwright to be installed | ||||
| def test_request_via_custom_browser_url(client, live_server): | ||||
|     live_server_setup(live_server) | ||||
|     # We do this so we can grep the logs of the custom container and see if the request actually went through that container | ||||
|     do_test(client, live_server, make_test_use_extra_browser=True) | ||||
|  | ||||
|  | ||||
| def test_request_not_via_custom_browser_url(client, live_server): | ||||
|     live_server_setup(live_server) | ||||
|     # We do this so we can grep the logs of the custom container and see if the request actually went through that container | ||||
|     do_test(client, live_server, make_test_use_extra_browser=False) | ||||
| @@ -24,7 +24,7 @@ def test_check_extract_text_from_diff(client, live_server): | ||||
|     ) | ||||
|  | ||||
|     assert b"1 Imported" in res.data | ||||
|     time.sleep(1) | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     # Load in 5 different numbers/changes | ||||
|     last_date="" | ||||
|   | ||||
| @@ -227,9 +227,6 @@ def test_regex_error_handling(client, live_server): | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|  | ||||
|     with open('/tmp/fuck.html', 'wb') as f: | ||||
|         f.write(res.data) | ||||
|  | ||||
|     assert b'is not a valid regular expression.' in res.data | ||||
|  | ||||
|     res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True) | ||||
|   | ||||
| @@ -33,8 +33,6 @@ def test_strip_regex_text_func(): | ||||
|         "/not" | ||||
|     ] | ||||
|  | ||||
|  | ||||
|     fetcher = fetch_site_status.perform_site_check(datastore=False) | ||||
|     stripped_content = html_tools.strip_ignore_text(test_content, ignore_lines) | ||||
|  | ||||
|     assert b"but 1 lines" in stripped_content | ||||
|   | ||||
| @@ -24,7 +24,6 @@ def test_strip_text_func(): | ||||
|  | ||||
|     ignore_lines = ["sometimes"] | ||||
|  | ||||
|     fetcher = fetch_site_status.perform_site_check(datastore=False) | ||||
|     stripped_content = html_tools.strip_ignore_text(test_content, ignore_lines) | ||||
|  | ||||
|     assert b"sometimes" not in stripped_content | ||||
|   | ||||
| @@ -80,8 +80,11 @@ def test_headers_in_request(client, live_server): | ||||
|  | ||||
|     # Should be only one with headers set | ||||
|     assert watches_with_headers==1 | ||||
|     res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True) | ||||
|     assert b'Deleted' in res.data | ||||
|  | ||||
| def test_body_in_request(client, live_server): | ||||
|  | ||||
|     # Add our URL to the import page | ||||
|     test_url = url_for('test_body', _external=True) | ||||
|     if os.getenv('PLAYWRIGHT_DRIVER_URL'): | ||||
| @@ -170,7 +173,8 @@ def test_body_in_request(client, live_server): | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b"Body must be empty when Request Method is set to GET" in res.data | ||||
|  | ||||
|     res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True) | ||||
|     assert b'Deleted' in res.data | ||||
|  | ||||
| def test_method_in_request(client, live_server): | ||||
|     # Add our URL to the import page | ||||
|   | ||||
| @@ -1,5 +1,5 @@ | ||||
| from flask import url_for | ||||
| from . util import set_original_response, set_modified_response, live_server_setup | ||||
| from .util import set_original_response, set_modified_response, live_server_setup, wait_for_all_checks | ||||
| import time | ||||
|  | ||||
|  | ||||
| @@ -12,6 +12,7 @@ def test_bad_access(client, live_server): | ||||
|     ) | ||||
|  | ||||
|     assert b"1 Imported" in res.data | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     # Attempt to add a body with a GET method | ||||
|     res = client.post( | ||||
| @@ -59,7 +60,7 @@ def test_bad_access(client, live_server): | ||||
|         data={"url": 'file:///tasty/disk/drive', "tags": ''}, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     time.sleep(1) | ||||
|     wait_for_all_checks(client) | ||||
|     res = client.get(url_for("index")) | ||||
|  | ||||
|     assert b'file:// type access is denied for security reasons.' in res.data | ||||
| @@ -6,9 +6,11 @@ from .util import live_server_setup, wait_for_all_checks | ||||
|  | ||||
| from ..html_tools import * | ||||
|  | ||||
|  | ||||
| def test_setup(live_server): | ||||
|     live_server_setup(live_server) | ||||
|  | ||||
|  | ||||
| def set_original_response(): | ||||
|     test_return_data = """<html> | ||||
|        <body> | ||||
| @@ -26,6 +28,7 @@ def set_original_response(): | ||||
|         f.write(test_return_data) | ||||
|     return None | ||||
|  | ||||
|  | ||||
| def set_modified_response(): | ||||
|     test_return_data = """<html> | ||||
|        <body> | ||||
| @@ -44,11 +47,12 @@ def set_modified_response(): | ||||
|  | ||||
|     return None | ||||
|  | ||||
|  | ||||
| # Handle utf-8 charset replies https://github.com/dgtlmoon/changedetection.io/pull/613 | ||||
| def test_check_xpath_filter_utf8(client, live_server): | ||||
|     filter='//item/*[self::description]' | ||||
|     filter = '//item/*[self::description]' | ||||
|  | ||||
|     d='''<?xml version="1.0" encoding="UTF-8"?> | ||||
|     d = '''<?xml version="1.0" encoding="UTF-8"?> | ||||
| <rss xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:itunes="http://www.itunes.com/dtds/podcast-1.0.dtd" xmlns:dc="http://purl.org/dc/elements/1.1/" version="2.0"> | ||||
| 	<channel> | ||||
| 		<title>rpilocator.com</title> | ||||
| @@ -102,9 +106,9 @@ def test_check_xpath_filter_utf8(client, live_server): | ||||
|  | ||||
| # Handle utf-8 charset replies https://github.com/dgtlmoon/changedetection.io/pull/613 | ||||
| def test_check_xpath_text_function_utf8(client, live_server): | ||||
|     filter='//item/title/text()' | ||||
|     filter = '//item/title/text()' | ||||
|  | ||||
|     d='''<?xml version="1.0" encoding="UTF-8"?> | ||||
|     d = '''<?xml version="1.0" encoding="UTF-8"?> | ||||
| <rss xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:itunes="http://www.itunes.com/dtds/podcast-1.0.dtd" xmlns:dc="http://purl.org/dc/elements/1.1/" version="2.0"> | ||||
| 	<channel> | ||||
| 		<title>rpilocator.com</title> | ||||
| @@ -163,15 +167,12 @@ def test_check_xpath_text_function_utf8(client, live_server): | ||||
|     res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True) | ||||
|     assert b'Deleted' in res.data | ||||
|  | ||||
| def test_check_markup_xpath_filter_restriction(client, live_server): | ||||
|  | ||||
| def test_check_markup_xpath_filter_restriction(client, live_server): | ||||
|     xpath_filter = "//*[contains(@class, 'sametext')]" | ||||
|  | ||||
|     set_original_response() | ||||
|  | ||||
|     # Give the endpoint time to spin up | ||||
|     time.sleep(1) | ||||
|  | ||||
|     # Add our URL to the import page | ||||
|     test_url = url_for('test_endpoint', _external=True) | ||||
|     res = client.post( | ||||
| @@ -214,7 +215,6 @@ def test_check_markup_xpath_filter_restriction(client, live_server): | ||||
|  | ||||
|  | ||||
| def test_xpath_validation(client, live_server): | ||||
|  | ||||
|     # Add our URL to the import page | ||||
|     test_url = url_for('test_endpoint', _external=True) | ||||
|     res = client.post( | ||||
| @@ -235,6 +235,48 @@ def test_xpath_validation(client, live_server): | ||||
|     assert b'Deleted' in res.data | ||||
|  | ||||
|  | ||||
| def test_xpath23_prefix_validation(client, live_server): | ||||
|     # Add our URL to the import page | ||||
|     test_url = url_for('test_endpoint', _external=True) | ||||
|     res = client.post( | ||||
|         url_for("import_page"), | ||||
|         data={"urls": test_url}, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b"1 Imported" in res.data | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     res = client.post( | ||||
|         url_for("edit_page", uuid="first"), | ||||
|         data={"include_filters": "xpath:/something horrible", "url": test_url, "tags": "", "headers": "", 'fetch_backend': "html_requests"}, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b"is not a valid XPath expression" in res.data | ||||
|     res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True) | ||||
|     assert b'Deleted' in res.data | ||||
|  | ||||
|  | ||||
| def test_xpath1_validation(client, live_server): | ||||
|     # Add our URL to the import page | ||||
|     test_url = url_for('test_endpoint', _external=True) | ||||
|     res = client.post( | ||||
|         url_for("import_page"), | ||||
|         data={"urls": test_url}, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b"1 Imported" in res.data | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     res = client.post( | ||||
|         url_for("edit_page", uuid="first"), | ||||
|         data={"include_filters": "xpath1:/something horrible", "url": test_url, "tags": "", "headers": "", 'fetch_backend': "html_requests"}, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b"is not a valid XPath expression" in res.data | ||||
|     res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True) | ||||
|     assert b'Deleted' in res.data | ||||
|  | ||||
|  | ||||
| # actually only really used by the distll.io importer, but could be handy too | ||||
| def test_check_with_prefix_include_filters(client, live_server): | ||||
|     res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True) | ||||
| @@ -254,7 +296,8 @@ def test_check_with_prefix_include_filters(client, live_server): | ||||
|  | ||||
|     res = client.post( | ||||
|         url_for("edit_page", uuid="first"), | ||||
|         data={"include_filters":  "xpath://*[contains(@class, 'sametext')]", "url": test_url, "tags": "", "headers": "", 'fetch_backend': "html_requests"}, | ||||
|         data={"include_filters": "xpath://*[contains(@class, 'sametext')]", "url": test_url, "tags": "", "headers": "", | ||||
|               'fetch_backend': "html_requests"}, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|  | ||||
| @@ -266,13 +309,15 @@ def test_check_with_prefix_include_filters(client, live_server): | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|  | ||||
|     assert b"Some text thats the same" in res.data #in selector | ||||
|     assert b"Some text that will change" not in res.data #not in selector | ||||
|     assert b"Some text thats the same" in res.data  # in selector | ||||
|     assert b"Some text that will change" not in res.data  # not in selector | ||||
|  | ||||
|     client.get(url_for("form_delete", uuid="all"), follow_redirects=True) | ||||
|  | ||||
|  | ||||
| def test_various_rules(client, live_server): | ||||
|     # Just check these don't error | ||||
|     #live_server_setup(live_server) | ||||
|     # live_server_setup(live_server) | ||||
|     with open("test-datastore/endpoint-content.txt", "w") as f: | ||||
|         f.write("""<html> | ||||
|        <body> | ||||
| @@ -285,10 +330,11 @@ def test_various_rules(client, live_server): | ||||
|      <a href=''>some linky </a> | ||||
|      <a href=''>another some linky </a> | ||||
|      <!-- related to https://github.com/dgtlmoon/changedetection.io/pull/1774 --> | ||||
|      <input   type="email"   id="email" /> | ||||
|      <input   type="email"   id="email" />      | ||||
|      </body> | ||||
|      </html> | ||||
|     """) | ||||
|  | ||||
|     test_url = url_for('test_endpoint', _external=True) | ||||
|     res = client.post( | ||||
|         url_for("import_page"), | ||||
| @@ -298,7 +344,6 @@ def test_various_rules(client, live_server): | ||||
|     assert b"1 Imported" in res.data | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|  | ||||
|     for r in ['//div', '//a', 'xpath://div', 'xpath://a']: | ||||
|         res = client.post( | ||||
|             url_for("edit_page", uuid="first"), | ||||
| @@ -313,3 +358,153 @@ def test_various_rules(client, live_server): | ||||
|         assert b"Updated watch." in res.data | ||||
|         res = client.get(url_for("index")) | ||||
|         assert b'fetch-error' not in res.data, f"Should not see errors after '{r} filter" | ||||
|  | ||||
|     res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True) | ||||
|     assert b'Deleted' in res.data | ||||
|  | ||||
|  | ||||
| def test_xpath_20(client, live_server): | ||||
|     test_url = url_for('test_endpoint', _external=True) | ||||
|     res = client.post( | ||||
|         url_for("import_page"), | ||||
|         data={"urls": test_url}, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b"1 Imported" in res.data | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     set_original_response() | ||||
|  | ||||
|     test_url = url_for('test_endpoint', _external=True) | ||||
|     res = client.post( | ||||
|         url_for("edit_page", uuid="first"), | ||||
|         data={"include_filters": "//*[contains(@class, 'sametext')]|//*[contains(@class, 'changetext')]", | ||||
|               "url": test_url, | ||||
|               "tags": "", | ||||
|               "headers": "", | ||||
|               'fetch_backend': "html_requests"}, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|  | ||||
|     assert b"Updated watch." in res.data | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     res = client.get( | ||||
|         url_for("preview_page", uuid="first"), | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|  | ||||
|     assert b"Some text thats the same" in res.data  # in selector | ||||
|     assert b"Some text that will change" in res.data  # in selector | ||||
|  | ||||
|     client.get(url_for("form_delete", uuid="all"), follow_redirects=True) | ||||
|  | ||||
|  | ||||
| def test_xpath_20_function_count(client, live_server): | ||||
|     set_original_response() | ||||
|  | ||||
|     # Add our URL to the import page | ||||
|     test_url = url_for('test_endpoint', _external=True) | ||||
|     res = client.post( | ||||
|         url_for("import_page"), | ||||
|         data={"urls": test_url}, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b"1 Imported" in res.data | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     res = client.post( | ||||
|         url_for("edit_page", uuid="first"), | ||||
|         data={"include_filters": "xpath:count(//div) * 123456789987654321", | ||||
|               "url": test_url, | ||||
|               "tags": "", | ||||
|               "headers": "", | ||||
|               'fetch_backend': "html_requests"}, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|  | ||||
|     assert b"Updated watch." in res.data | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     res = client.get( | ||||
|         url_for("preview_page", uuid="first"), | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|  | ||||
|     assert b"246913579975308642" in res.data  # in selector | ||||
|  | ||||
|     client.get(url_for("form_delete", uuid="all"), follow_redirects=True) | ||||
|  | ||||
|  | ||||
| def test_xpath_20_function_count2(client, live_server): | ||||
|     set_original_response() | ||||
|  | ||||
|     # Add our URL to the import page | ||||
|     test_url = url_for('test_endpoint', _external=True) | ||||
|     res = client.post( | ||||
|         url_for("import_page"), | ||||
|         data={"urls": test_url}, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b"1 Imported" in res.data | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     res = client.post( | ||||
|         url_for("edit_page", uuid="first"), | ||||
|         data={"include_filters": "/html/body/count(div) * 123456789987654321", | ||||
|               "url": test_url, | ||||
|               "tags": "", | ||||
|               "headers": "", | ||||
|               'fetch_backend': "html_requests"}, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|  | ||||
|     assert b"Updated watch." in res.data | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     res = client.get( | ||||
|         url_for("preview_page", uuid="first"), | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|  | ||||
|     assert b"246913579975308642" in res.data  # in selector | ||||
|  | ||||
|     client.get(url_for("form_delete", uuid="all"), follow_redirects=True) | ||||
|  | ||||
|  | ||||
| def test_xpath_20_function_string_join_matches(client, live_server): | ||||
|     set_original_response() | ||||
|  | ||||
|     # Add our URL to the import page | ||||
|     test_url = url_for('test_endpoint', _external=True) | ||||
|     res = client.post( | ||||
|         url_for("import_page"), | ||||
|         data={"urls": test_url}, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b"1 Imported" in res.data | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     res = client.post( | ||||
|         url_for("edit_page", uuid="first"), | ||||
|         data={ | ||||
|             "include_filters": "xpath:string-join(//*[contains(@class, 'sametext')]|//*[matches(@class, 'changetext')], 'specialconjunction')", | ||||
|             "url": test_url, | ||||
|             "tags": "", | ||||
|             "headers": "", | ||||
|             'fetch_backend': "html_requests"}, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|  | ||||
|     assert b"Updated watch." in res.data | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     res = client.get( | ||||
|         url_for("preview_page", uuid="first"), | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|  | ||||
|     assert b"Some text thats the samespecialconjunctionSome text that will change" in res.data  # in selector | ||||
|  | ||||
|     client.get(url_for("form_delete", uuid="all"), follow_redirects=True) | ||||
|  | ||||
|   | ||||
							
								
								
									
										203
									
								
								changedetectionio/tests/test_xpath_selector_unit.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										203
									
								
								changedetectionio/tests/test_xpath_selector_unit.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,203 @@ | ||||
| import sys | ||||
| import os | ||||
| import pytest | ||||
| sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) | ||||
| import html_tools | ||||
|  | ||||
| # test generation guide. | ||||
| # 1. Do not include encoding in the xml declaration if the test object is a str type. | ||||
| # 2. Always paraphrase test. | ||||
|  | ||||
| hotels = """ | ||||
| <hotel> | ||||
|   <branch location="California"> | ||||
|     <staff> | ||||
|       <given_name>Christopher</given_name> | ||||
|       <surname>Anderson</surname> | ||||
|       <age>25</age> | ||||
|     </staff> | ||||
|     <staff> | ||||
|       <given_name>Christopher</given_name> | ||||
|       <surname>Carter</surname> | ||||
|       <age>30</age> | ||||
|     </staff> | ||||
|   </branch> | ||||
|   <branch location="Las Vegas"> | ||||
|     <staff> | ||||
|       <given_name>Lisa</given_name> | ||||
|       <surname>Walker</surname> | ||||
|       <age>60</age> | ||||
|     </staff> | ||||
|     <staff> | ||||
|       <given_name>Jessica</given_name> | ||||
|       <surname>Walker</surname> | ||||
|       <age>32</age> | ||||
|     </staff> | ||||
|     <staff> | ||||
|       <given_name>Jennifer</given_name> | ||||
|       <surname>Roberts</surname> | ||||
|       <age>50</age> | ||||
|     </staff> | ||||
|   </branch> | ||||
| </hotel>""" | ||||
|  | ||||
| @pytest.mark.parametrize("html_content", [hotels]) | ||||
| @pytest.mark.parametrize("xpath, answer", [('(//staff/given_name, //staff/age)', '25'), | ||||
|                           ("xs:date('2023-10-10')", '2023-10-10'), | ||||
|                           ("if (/hotel/branch[@location = 'California']/staff[1]/age = 25) then 'is 25' else 'is not 25'", 'is 25'), | ||||
|                           ("if (//hotel/branch[@location = 'California']/staff[1]/age = 25) then 'is 25' else 'is not 25'", 'is 25'), | ||||
|                           ("if (count(/hotel/branch/staff) = 5) then true() else false()", 'true'), | ||||
|                           ("if (count(//hotel/branch/staff) = 5) then true() else false()", 'true'), | ||||
|                           ("for $i in /hotel/branch/staff return if ($i/age >= 40) then upper-case($i/surname) else lower-case($i/surname)", 'anderson'), | ||||
|                           ("given_name  =  'Christopher' and age  =  40", 'false'), | ||||
|                           ("//given_name  =  'Christopher' and //age  =  40", 'false'), | ||||
|                           #("(staff/given_name, staff/age)", 'Lisa'), | ||||
|                           ("(//staff/given_name, //staff/age)", 'Lisa'), | ||||
|                           #("hotel/branch[@location = 'California']/staff/age union hotel/branch[@location = 'Las Vegas']/staff/age", ''), | ||||
|                           ("(//hotel/branch[@location = 'California']/staff/age union //hotel/branch[@location = 'Las Vegas']/staff/age)", '60'), | ||||
|                           ("(200 to 210)", "205"), | ||||
|                           ("(//hotel/branch[@location = 'California']/staff/age union //hotel/branch[@location = 'Las Vegas']/staff/age)", "50"), | ||||
|                           ("(1, 9, 9, 5)", "5"), | ||||
|                           ("(3, (), (14, 15), 92, 653)", "653"), | ||||
|                           ("for $i in /hotel/branch/staff return $i/given_name", "Christopher"), | ||||
|                           ("for $i in //hotel/branch/staff return $i/given_name", "Christopher"), | ||||
|                           ("distinct-values(for $i in /hotel/branch/staff return $i/given_name)", "Jessica"), | ||||
|                           ("distinct-values(for $i in //hotel/branch/staff return $i/given_name)", "Jessica"), | ||||
|                           ("for $i in (7 to  15) return $i*10", "130"), | ||||
|                           ("some $i in /hotel/branch/staff satisfies $i/age < 20", "false"), | ||||
|                           ("some $i in //hotel/branch/staff satisfies $i/age < 20", "false"), | ||||
|                           ("every $i in /hotel/branch/staff satisfies $i/age > 20", "true"), | ||||
|                           ("every $i in //hotel/branch/staff satisfies $i/age > 20 ", "true"), | ||||
|                           ("let $x := branch[@location = 'California'], $y := branch[@location = 'Las Vegas'] return (avg($x/staff/age), avg($y/staff/age))", "27.5"), | ||||
|                           ("let $x := //branch[@location = 'California'], $y := //branch[@location = 'Las Vegas'] return (avg($x/staff/age), avg($y/staff/age))", "27.5"), | ||||
|                           ("let $nu := 1, $de := 1000 return  'probability = ' || $nu div $de * 100 || '%'", "0.1%"), | ||||
|                           ("let $nu := 2, $probability := function ($argument) { 'probability = ' ||  $nu div $argument  * 100 || '%'}, $de := 5 return $probability($de)", "40%"), | ||||
|                           ("'XPATH2.0-3.1 dissemination' instance of xs:string ", "true"), | ||||
|                           ("'new stackoverflow question incoming' instance of xs:integer ", "false"), | ||||
|                           ("'50000' cast as xs:integer", "50000"), | ||||
|                           ("//branch[@location = 'California']/staff[1]/surname eq 'Anderson'", "true"), | ||||
|                           ("fn:false()", "false")]) | ||||
| def test_hotels(html_content, xpath, answer): | ||||
|     html_content = html_tools.xpath_filter(xpath, html_content, append_pretty_line_formatting=True) | ||||
|     assert type(html_content) == str | ||||
|     assert answer in html_content | ||||
|  | ||||
|  | ||||
|  | ||||
| branches_to_visit = """<?xml version="1.0" ?> | ||||
|   <branches_to_visit> | ||||
|      <manager name="Godot" room_no="501"> | ||||
|          <branch>Area 51</branch> | ||||
|          <branch>A place with no name</branch> | ||||
|          <branch>Stalsk12</branch> | ||||
|      </manager> | ||||
|       <manager name="Freya" room_no="305"> | ||||
|          <branch>Stalsk12</branch> | ||||
|          <branch>Barcelona</branch> | ||||
|          <branch>Paris</branch> | ||||
|      </manager> | ||||
|  </branches_to_visit>""" | ||||
| @pytest.mark.parametrize("html_content", [branches_to_visit]) | ||||
| @pytest.mark.parametrize("xpath, answer", [ | ||||
|     ("manager[@name = 'Godot']/branch union manager[@name = 'Freya']/branch", "Area 51"), | ||||
|     ("//manager[@name = 'Godot']/branch union //manager[@name = 'Freya']/branch", "Stalsk12"), | ||||
|     ("manager[@name = 'Godot']/branch | manager[@name = 'Freya']/branch", "Stalsk12"), | ||||
|     ("//manager[@name = 'Godot']/branch | //manager[@name = 'Freya']/branch", "Stalsk12"), | ||||
|     ("manager/branch intersect manager[@name = 'Godot']/branch", "A place with no name"), | ||||
|     ("//manager/branch intersect //manager[@name = 'Godot']/branch", "A place with no name"), | ||||
|     ("manager[@name = 'Godot']/branch intersect manager[@name = 'Freya']/branch", ""), | ||||
|     ("manager/branch except manager[@name = 'Godot']/branch", "Barcelona"), | ||||
|     ("manager[@name = 'Godot']/branch[1]  eq 'Area 51'", "true"), | ||||
|     ("//manager[@name = 'Godot']/branch[1]  eq 'Area 51'", "true"), | ||||
|     ("manager[@name = 'Godot']/branch[1]  eq 'Seoul'", "false"), | ||||
|     ("//manager[@name = 'Godot']/branch[1]  eq 'Seoul'", "false"), | ||||
|     ("manager[@name = 'Godot']/branch[2] eq manager[@name = 'Freya']/branch[2]", "false"), | ||||
|     ("//manager[@name = 'Godot']/branch[2] eq //manager[@name = 'Freya']/branch[2]", "false"), | ||||
|     ("manager[1]/@room_no lt manager[2]/@room_no", "false"), | ||||
|     ("//manager[1]/@room_no lt //manager[2]/@room_no", "false"), | ||||
|     ("manager[1]/@room_no gt manager[2]/@room_no", "true"), | ||||
|     ("//manager[1]/@room_no gt //manager[2]/@room_no", "true"), | ||||
|     ("manager[@name = 'Godot']/branch[1]  = 'Area 51'", "true"), | ||||
|     ("//manager[@name = 'Godot']/branch[1]  = 'Area 51'", "true"), | ||||
|     ("manager[@name = 'Godot']/branch[1]  = 'Seoul'", "false"), | ||||
|     ("//manager[@name = 'Godot']/branch[1]  = 'Seoul'", "false"), | ||||
|     ("manager[@name = 'Godot']/branch  = 'Area 51'", "true"), | ||||
|     ("//manager[@name = 'Godot']/branch  = 'Area 51'", "true"), | ||||
|     ("manager[@name = 'Godot']/branch  = 'Barcelona'", "false"), | ||||
|     ("//manager[@name = 'Godot']/branch  = 'Barcelona'", "false"), | ||||
|     ("manager[1]/@room_no > manager[2]/@room_no", "true"), | ||||
|     ("//manager[1]/@room_no > //manager[2]/@room_no", "true"), | ||||
|     ("manager[@name = 'Godot']/branch[ . = 'Stalsk12'] is manager[1]/branch[1]", "false"), | ||||
|     ("//manager[@name = 'Godot']/branch[ . = 'Stalsk12'] is //manager[1]/branch[1]", "false"), | ||||
|     ("manager[@name = 'Godot']/branch[ . = 'Stalsk12'] is manager[1]/branch[3]", "true"), | ||||
|     ("//manager[@name = 'Godot']/branch[ . = 'Stalsk12'] is //manager[1]/branch[3]", "true"), | ||||
|     ("manager[@name = 'Godot']/branch[ . = 'Stalsk12'] <<  manager[1]/branch[1]", "false"), | ||||
|     ("//manager[@name = 'Godot']/branch[ . = 'Stalsk12'] <<  //manager[1]/branch[1]", "false"), | ||||
|     ("manager[@name = 'Godot']/branch[ . = 'Stalsk12']  >>  manager[1]/branch[1]", "true"), | ||||
|     ("//manager[@name = 'Godot']/branch[ . = 'Stalsk12'] >>  //manager[1]/branch[1]", "true"), | ||||
|     ("manager[@name = 'Godot']/branch[ . = 'Stalsk12'] is manager[@name = 'Freya']/branch[ . = 'Stalsk12']", "false"), | ||||
|     ("//manager[@name = 'Godot']/branch[ . = 'Stalsk12'] is //manager[@name = 'Freya']/branch[ . = 'Stalsk12']", "false"), | ||||
|     ("manager[1]/@name || manager[2]/@name", "GodotFreya"), | ||||
|     ("//manager[1]/@name || //manager[2]/@name", "GodotFreya"), | ||||
|                           ]) | ||||
| def test_branches_to_visit(html_content, xpath, answer): | ||||
|     html_content = html_tools.xpath_filter(xpath, html_content, append_pretty_line_formatting=True) | ||||
|     assert type(html_content) == str | ||||
|     assert answer in html_content | ||||
|  | ||||
| trips = """ | ||||
| <trips> | ||||
|    <trip reservation_number="10"> | ||||
|        <depart>2023-10-06</depart> | ||||
|        <arrive>2023-10-10</arrive> | ||||
|        <traveler name="Christopher Anderson"> | ||||
|            <duration>4</duration> | ||||
|            <price>2000.00</price> | ||||
|        </traveler> | ||||
|    </trip> | ||||
|    <trip reservation_number="12"> | ||||
|        <depart>2023-10-06</depart> | ||||
|        <arrive>2023-10-12</arrive> | ||||
|        <traveler name="Frank Carter"> | ||||
|            <duration>6</duration> | ||||
|            <price>3500.34</price> | ||||
|        </traveler> | ||||
|    </trip> | ||||
| </trips>""" | ||||
| @pytest.mark.parametrize("html_content", [trips]) | ||||
| @pytest.mark.parametrize("xpath, answer", [ | ||||
|     ("1 + 9 * 9 + 5 div 5", "83"), | ||||
|     ("(1 + 9 * 9 + 5) div 6", "14.5"), | ||||
|     ("23 idiv 3", "7"), | ||||
|     ("23 div 3", "7.66666666"), | ||||
|     ("for $i in ./trip return $i/traveler/duration * $i/traveler/price", "21002.04"), | ||||
|     ("for $i in ./trip return $i/traveler/duration ", "4"), | ||||
|     ("for $i in .//trip return $i/traveler/duration * $i/traveler/price", "21002.04"), | ||||
|     ("sum(for $i in ./trip return $i/traveler/duration * $i/traveler/price)", "29002.04"), | ||||
|     ("sum(for $i in .//trip return $i/traveler/duration * $i/traveler/price)", "29002.04"), | ||||
|     #("trip[1]/depart - trip[1]/arrive", "fail_to_get_answer"), | ||||
|     #("//trip[1]/depart - //trip[1]/arrive", "fail_to_get_answer"), | ||||
|     #("trip[1]/depart + trip[1]/arrive", "fail_to_get_answer"), | ||||
|     #("xs:date(trip[1]/depart) + xs:date(trip[1]/arrive)", "fail_to_get_answer"), | ||||
|     ("(//trip[1]/arrive cast as xs:date) - (//trip[1]/depart cast as xs:date)", "P4D"), | ||||
|     ("(//trip[1]/depart cast as xs:date) - (//trip[1]/arrive cast as xs:date)", "-P4D"), | ||||
|     ("(//trip[1]/depart cast as xs:date) + xs:dayTimeDuration('P3D')", "2023-10-09"), | ||||
|     ("(//trip[1]/depart cast as xs:date) - xs:dayTimeDuration('P3D')", "2023-10-03"), | ||||
|     ("(456, 623) instance of xs:integer", "false"), | ||||
|     ("(456, 623) instance of xs:integer*", "true"), | ||||
|     ("/trips/trip instance of element()", "false"), | ||||
|     ("/trips/trip instance of element()*", "true"), | ||||
|     ("/trips/trip[1]/arrive instance of xs:date", "false"), | ||||
|     ("date(/trips/trip[1]/arrive) instance of xs:date", "true"), | ||||
|     ("'8' cast as xs:integer", "8"), | ||||
|     ("'11.1E3' cast as xs:double", "11100"), | ||||
|     ("6.5 cast as xs:integer", "6"), | ||||
|     #("/trips/trip[1]/arrive cast as xs:dateTime", "fail_to_get_answer"), | ||||
|     ("/trips/trip[1]/arrive cast as xs:date", "2023-10-10"), | ||||
|     ("('2023-10-12') cast as xs:date", "2023-10-12"), | ||||
|     ("for $i in //trip return concat($i/depart, '  ', $i/arrive)", "2023-10-06  2023-10-10"), | ||||
|                           ]) | ||||
| def test_trips(html_content, xpath, answer): | ||||
|     html_content = html_tools.xpath_filter(xpath, html_content, append_pretty_line_formatting=True) | ||||
|     assert type(html_content) == str | ||||
|     assert answer in html_content | ||||
							
								
								
									
										54
									
								
								changedetectionio/tests/unit/test_watch_model.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										54
									
								
								changedetectionio/tests/unit/test_watch_model.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,54 @@ | ||||
| #!/usr/bin/python3 | ||||
|  | ||||
| # run from dir above changedetectionio/ dir | ||||
| # python3 -m unittest changedetectionio.tests.unit.test_notification_diff | ||||
|  | ||||
| import unittest | ||||
| import os | ||||
|  | ||||
| from changedetectionio.model import Watch | ||||
|  | ||||
| # mostly | ||||
| class TestDiffBuilder(unittest.TestCase): | ||||
|  | ||||
|     def test_watch_get_suggested_from_diff_timestamp(self): | ||||
|         import uuid as uuid_builder | ||||
|         watch = Watch.model(datastore_path='/tmp', default={}) | ||||
|         watch.ensure_data_dir_exists() | ||||
|  | ||||
|         watch['last_viewed'] = 110 | ||||
|  | ||||
|         watch.save_history_text(contents=b"hello world", timestamp=100, snapshot_id=str(uuid_builder.uuid4())) | ||||
|         watch.save_history_text(contents=b"hello world", timestamp=105, snapshot_id=str(uuid_builder.uuid4())) | ||||
|         watch.save_history_text(contents=b"hello world", timestamp=109, snapshot_id=str(uuid_builder.uuid4())) | ||||
|         watch.save_history_text(contents=b"hello world", timestamp=112, snapshot_id=str(uuid_builder.uuid4())) | ||||
|         watch.save_history_text(contents=b"hello world", timestamp=115, snapshot_id=str(uuid_builder.uuid4())) | ||||
|         watch.save_history_text(contents=b"hello world", timestamp=117, snapshot_id=str(uuid_builder.uuid4())) | ||||
|  | ||||
|         p = watch.get_next_snapshot_key_to_last_viewed | ||||
|         assert p == "112", "Correct last-viewed timestamp was detected" | ||||
|  | ||||
|         # When there is only one step of difference from the end of the list, it should return second-last change | ||||
|         watch['last_viewed'] = 116 | ||||
|         p = watch.get_next_snapshot_key_to_last_viewed | ||||
|         assert p == "115", "Correct 'second last' last-viewed timestamp was detected when using the last timestamp" | ||||
|  | ||||
|         watch['last_viewed'] = 99 | ||||
|         p = watch.get_next_snapshot_key_to_last_viewed | ||||
|         assert p == "100" | ||||
|  | ||||
|         watch['last_viewed'] = 200 | ||||
|         p = watch.get_next_snapshot_key_to_last_viewed | ||||
|         assert p == "115", "When the 'last viewed' timestamp is greater than the newest snapshot, return second last " | ||||
|  | ||||
|         watch['last_viewed'] = 109 | ||||
|         p = watch.get_next_snapshot_key_to_last_viewed | ||||
|         assert p == "109", "Correct when its the same time" | ||||
|  | ||||
|         # new empty one | ||||
|         watch = Watch.model(datastore_path='/tmp', default={}) | ||||
|         p = watch.get_next_snapshot_key_to_last_viewed | ||||
|         assert p == None, "None when no history available" | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|     unittest.main() | ||||
| @@ -1,9 +1,13 @@ | ||||
| import importlib | ||||
| import os | ||||
| import re | ||||
| import threading | ||||
| import queue | ||||
| import time | ||||
| from distutils.util import strtobool | ||||
|  | ||||
| from changedetectionio import content_fetcher, html_tools | ||||
|  | ||||
| from .processors.text_json_diff import FilterNotFoundInResponse | ||||
| from .processors.restock_diff import UnableToExtractRestockData | ||||
|  | ||||
| @@ -15,6 +19,7 @@ from .processors.restock_diff import UnableToExtractRestockData | ||||
| import logging | ||||
| import sys | ||||
|  | ||||
|  | ||||
| class update_worker(threading.Thread): | ||||
|     current_uuid = None | ||||
|  | ||||
| @@ -24,6 +29,7 @@ class update_worker(threading.Thread): | ||||
|         self.app = app | ||||
|         self.notification_q = notification_q | ||||
|         self.datastore = datastore | ||||
|  | ||||
|         super().__init__(*args, **kwargs) | ||||
|  | ||||
|     def queue_notification_for_watch(self, n_object, watch): | ||||
| @@ -209,6 +215,7 @@ class update_worker(threading.Thread): | ||||
|         from .processors import text_json_diff, restock_diff | ||||
|  | ||||
|         while not self.app.config.exit.is_set(): | ||||
|             change_processor = None | ||||
|  | ||||
|             try: | ||||
|                 queued_item_data = self.q.get(block=False) | ||||
| @@ -229,17 +236,46 @@ class update_worker(threading.Thread): | ||||
|                     now = time.time() | ||||
|  | ||||
|                     try: | ||||
|                         processor = self.datastore.data['watching'][uuid].get('processor','text_json_diff') | ||||
|                         # Protect against file:// access | ||||
|                         if re.search(r'^file://', self.datastore.data['watching'][uuid].get('url', '').strip(), re.IGNORECASE): | ||||
|                             if not strtobool(os.getenv('ALLOW_FILE_URI', 'false')): | ||||
|                                 raise Exception( | ||||
|                                     "file:// type access is denied for security reasons." | ||||
|                                 ) | ||||
|  | ||||
|                         # @todo some way to switch by name | ||||
|                         if processor == 'restock_diff': | ||||
|                             update_handler = restock_diff.perform_site_check(datastore=self.datastore) | ||||
|                         prefer_fetch_backend = self.datastore.data['watching'][uuid].get('fetch_backend', 'system') | ||||
|                         if not prefer_fetch_backend or prefer_fetch_backend == 'system': | ||||
|                             prefer_fetch_backend = self.datastore.data['settings']['application'].get('fetch_backend') | ||||
|  | ||||
|                         processor = self.datastore.data['watching'][uuid].get('processor', 'text_json_diff') | ||||
|  | ||||
|                         processor = 'cdio_whois_diff' | ||||
|  | ||||
|                         if processor in ['text_json_diff', 'restock_diff']: | ||||
|                             base_processor_module = f"changedetectionio.processors.{processor}" | ||||
|                         else: | ||||
|                             # Used as a default and also by some tests | ||||
|                             update_handler = text_json_diff.perform_site_check(datastore=self.datastore) | ||||
|                             # Each plugin is one processor exactly | ||||
|                             base_processor_module = f"{processor}.processor" | ||||
|  | ||||
| # its correct that processor dictates which fethcer it uses i think | ||||
|  | ||||
|                         # these should inherit the right fetcher too | ||||
|                         module = importlib.import_module(base_processor_module) | ||||
|                         change_processor = getattr(module, 'perform_site_check') | ||||
|                         change_processor = change_processor(datastore=self.datastore, | ||||
|                                                             watch_uuid=uuid, | ||||
|                                                             prefer_fetch_backend=prefer_fetch_backend | ||||
|                                                             ) | ||||
|  | ||||
|                         # Clear last errors (move to preflight func?) | ||||
|                         self.datastore.data['watching'][uuid]['browser_steps_last_error_step'] = None | ||||
|                         changed_detected, update_obj, contents = update_handler.run(uuid, skip_when_checksum_same=queued_item_data.item.get('skip_when_checksum_same')) | ||||
|  | ||||
|                         skip_when_same_checksum = queued_item_data.item.get('skip_when_checksum_same') | ||||
|                         # Each processor extends base class of the kind of fetcher it needs to run anyway | ||||
|                         change_processor.fetch_content() | ||||
|                         changed_detected, update_obj, contents = change_processor.run_changedetection(uuid, | ||||
|                                                                                                       skip_when_checksum_same=skip_when_same_checksum | ||||
|                                                                                                       ) | ||||
|  | ||||
|                         # Re #342 | ||||
|                         # In Python 3, all strings are sequences of Unicode characters. There is a bytes type that holds raw bytes. | ||||
| @@ -391,6 +427,9 @@ class update_worker(threading.Thread): | ||||
|                         self.datastore.update_watch(uuid=uuid, update_obj={'last_error': str(e)}) | ||||
|                         # Other serious error | ||||
|                         process_changedetection_results = False | ||||
| #                        import traceback | ||||
| #                        print(traceback.format_exc()) | ||||
|  | ||||
|                     else: | ||||
|                         # Crash protection, the watch entry could have been removed by this point (during a slow chrome fetch etc) | ||||
|                         if not self.datastore.data['watching'].get(uuid): | ||||
| @@ -443,10 +482,10 @@ class update_worker(threading.Thread): | ||||
|                                                                            }) | ||||
|  | ||||
|                         # Always save the screenshot if it's available | ||||
|                         if update_handler.screenshot: | ||||
|                             self.datastore.save_screenshot(watch_uuid=uuid, screenshot=update_handler.screenshot) | ||||
|                         if update_handler.xpath_data: | ||||
|                             self.datastore.save_xpath_data(watch_uuid=uuid, data=update_handler.xpath_data) | ||||
|                         if change_processor.screenshot: | ||||
|                             self.datastore.save_screenshot(watch_uuid=uuid, screenshot=change_processor.screenshot) | ||||
|                         if change_processor.xpath_data: | ||||
|                             self.datastore.save_xpath_data(watch_uuid=uuid, data=change_processor.xpath_data) | ||||
|  | ||||
|  | ||||
|                 self.current_uuid = None  # Done | ||||
|   | ||||
| @@ -46,6 +46,9 @@ beautifulsoup4 | ||||
| # XPath filtering, lxml is required by bs4 anyway, but put it here to be safe. | ||||
| lxml | ||||
|  | ||||
| # XPath 2.0-3.1 support | ||||
| elementpath | ||||
|  | ||||
| selenium~=4.14.0 | ||||
|  | ||||
| werkzeug~=3.0 | ||||
|   | ||||
		Reference in New Issue
	
	Block a user