mirror of
				https://github.com/dgtlmoon/changedetection.io.git
				synced 2025-10-30 22:27:52 +00:00 
			
		
		
		
	Compare commits
	
		
			32 Commits
		
	
	
		
			puppeteer-
			...
			0.42.3
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
|   | 9434eac72d | ||
|   | edb5e20de6 | ||
|   | e62eeb1c4a | ||
|   | a4e6fd1ec3 | ||
|   | d8b9f0fd78 | ||
|   | f9387522ee | ||
|   | ba8d2e0c2d | ||
|   | 247db22a33 | ||
|   | aeabd5b3fc | ||
|   | e9e1ce893f | ||
|   | b5a415c7b6 | ||
|   | 9e954532d6 | ||
|   | 955835df72 | ||
|   | 1aeafef910 | ||
|   | 1367197df7 | ||
|   | 143971123d | ||
|   | 04d2d3fb00 | ||
|   | 236f0c098d | ||
|   | 582c6b465b | ||
|   | a021ba87fa | ||
|   | e9057cb851 | ||
|   | 72ec438caa | ||
|   | 367dec48e1 | ||
|   | dd87912c88 | ||
|   | 0126cb0aac | ||
|   | 463b2d0449 | ||
|   | e4f6d54ae2 | ||
|   | 5f338d7824 | ||
|   | 0b563a93ec | ||
|   | d939882dde | ||
|   | 690cf4acc9 | ||
|   | 3cb3c7ba2e | 
							
								
								
									
										9
									
								
								.github/workflows/test-only.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										9
									
								
								.github/workflows/test-only.yml
									
									
									
									
										vendored
									
									
								
							| @@ -55,12 +55,17 @@ jobs: | ||||
|           # Playwright/Browserless fetch | ||||
|           docker run --rm -e "PLAYWRIGHT_DRIVER_URL=ws://browserless:3000" --network changedet-network test-changedetectionio  bash -c 'cd changedetectionio;pytest tests/fetchers/test_content.py && pytest tests/test_errorhandling.py && pytest tests/visualselector/test_fetch_data.py' | ||||
|            | ||||
|           # Settings headers playwright tests - Call back in from Browserless, check headers | ||||
|           docker run --name "changedet" --hostname changedet --rm -e "FLASK_SERVER_NAME=changedet" -e "PLAYWRIGHT_DRIVER_URL=ws://browserless:3000?dumpio=true" --network changedet-network test-changedetectionio  bash -c 'cd changedetectionio; pytest --live-server-host=0.0.0.0  --live-server-port=5004 tests/test_request.py' | ||||
|           docker run --name "changedet" --hostname changedet --rm -e "FLASK_SERVER_NAME=changedet" -e "WEBDRIVER_URL=http://selenium:4444/wd/hub" --network changedet-network test-changedetectionio  bash -c 'cd changedetectionio; pytest --live-server-host=0.0.0.0  --live-server-port=5004 tests/test_request.py' | ||||
|           docker run --name "changedet" --hostname changedet --rm -e "FLASK_SERVER_NAME=changedet" -e "USE_EXPERIMENTAL_PUPPETEER_FETCH=yes" -e "PLAYWRIGHT_DRIVER_URL=ws://browserless:3000?dumpio=true" --network changedet-network test-changedetectionio  bash -c 'cd changedetectionio; pytest --live-server-host=0.0.0.0  --live-server-port=5004 tests/test_request.py' | ||||
|            | ||||
|           # restock detection via playwright - added name=changedet here so that playwright/browserless can connect to it | ||||
|           docker run --rm --name "changedet" -e "FLASK_SERVER_NAME=changedet" -e "PLAYWRIGHT_DRIVER_URL=ws://browserless:3000" --network changedet-network test-changedetectionio  bash -c 'cd changedetectionio;pytest --live-server-port=5004 --live-server-host=0.0.0.0 tests/restock/test_restock.py' | ||||
|  | ||||
|       - name: Test with puppeteer fetcher | ||||
|       - name: Test with puppeteer fetcher and disk cache | ||||
|         run: | | ||||
|           docker run --rm -e "USE_EXPERIMENTAL_PUPPETEER_FETCH=yes" -e "PLAYWRIGHT_DRIVER_URL=ws://browserless:3000" --network changedet-network test-changedetectionio  bash -c 'cd changedetectionio;pytest tests/fetchers/test_content.py && pytest tests/test_errorhandling.py && pytest tests/visualselector/test_fetch_data.py' | ||||
|           docker run --rm -e "PUPPETEER_DISK_CACHE=/tmp/data/" -e "USE_EXPERIMENTAL_PUPPETEER_FETCH=yes" -e "PLAYWRIGHT_DRIVER_URL=ws://browserless:3000" --network changedet-network test-changedetectionio  bash -c 'cd changedetectionio;pytest tests/fetchers/test_content.py && pytest tests/test_errorhandling.py && pytest tests/visualselector/test_fetch_data.py' | ||||
|           # Browserless would have had -e "FUNCTION_BUILT_INS=[\"fs\",\"crypto\"]" added above | ||||
|  | ||||
|       - name: Test proxy interaction | ||||
|   | ||||
| @@ -2,10 +2,10 @@ | ||||
|  | ||||
| Live your data-life pro-actively, track website content changes and receive notifications via Discord, Email, Slack, Telegram and 70+ more | ||||
|  | ||||
| [<img src="https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/docs/screenshot.png" style="max-width:100%;" alt="Self-hosted web page change monitoring"  title="Self-hosted web page change monitoring"  />](https://lemonade.changedetection.io/start?src=pip) | ||||
| [<img src="https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/docs/screenshot.png" style="max-width:100%;" alt="Self-hosted web page change monitoring"  title="Self-hosted web page change monitoring"  />](https://changedetection.io) | ||||
|  | ||||
|  | ||||
| [**Don't have time? Let us host it for you! try our extremely affordable subscription use our proxies and support!**](https://lemonade.changedetection.io/start)  | ||||
| [**Don't have time? Let us host it for you! try our extremely affordable subscription use our proxies and support!**](https://changedetection.io)  | ||||
|  | ||||
|  | ||||
| #### Example use cases | ||||
|   | ||||
							
								
								
									
										15
									
								
								README.md
									
									
									
									
									
								
							
							
						
						
									
										15
									
								
								README.md
									
									
									
									
									
								
							| @@ -5,13 +5,13 @@ | ||||
| _Live your data-life pro-actively._  | ||||
|  | ||||
|  | ||||
| [<img src="https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/docs/screenshot.png" style="max-width:100%;" alt="Self-hosted web page change monitoring"  title="Self-hosted web page change monitoring"  />](https://lemonade.changedetection.io/start?src=github) | ||||
| [<img src="https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/docs/screenshot.png" style="max-width:100%;" alt="Self-hosted web page change monitoring"  title="Self-hosted web page change monitoring"  />](https://changedetection.io?src=github) | ||||
|  | ||||
| [![Release Version][release-shield]][release-link] [![Docker Pulls][docker-pulls]][docker-link] [![License][license-shield]](LICENSE.md) | ||||
|  | ||||
|  | ||||
|  | ||||
| [**Don't have time? Let us host it for you! try our $8.99/month subscription - use our proxies and support!**](https://lemonade.changedetection.io/start) , _half the price of other website change monitoring services and comes with unlimited watches & checks!_ | ||||
| [**Don't have time? Let us host it for you! try our $8.99/month subscription - use our proxies and support!**](https://changedetection.io) , _half the price of other website change monitoring services!_ | ||||
|  | ||||
| - Chrome browser included. | ||||
| - Super fast, no registration needed setup. | ||||
| @@ -22,11 +22,11 @@ _Live your data-life pro-actively._ | ||||
|  | ||||
| Available when connected to a <a href="https://github.com/dgtlmoon/changedetection.io/wiki/Playwright-content-fetcher">playwright content fetcher</a> (included as part of our subscription service) | ||||
|  | ||||
| [<img src="https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/docs/visualselector-anim.gif" style="max-width:100%;" alt="Self-hosted web page change monitoring context difference "  title="Self-hosted web page change monitoring context difference " />](https://lemonade.changedetection.io/start?src=github) | ||||
| [<img src="https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/docs/visualselector-anim.gif" style="max-width:100%;" alt="Self-hosted web page change monitoring context difference "  title="Self-hosted web page change monitoring context difference " />](https://changedetection.io?src=github) | ||||
|  | ||||
| ### Easily see what changed, examine by word, line, or individual character. | ||||
|  | ||||
| [<img src="https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/docs/screenshot-diff.png" style="max-width:100%;" alt="Self-hosted web page change monitoring context difference "  title="Self-hosted web page change monitoring context difference " />](https://lemonade.changedetection.io/start?src=github) | ||||
| [<img src="https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/docs/screenshot-diff.png" style="max-width:100%;" alt="Self-hosted web page change monitoring context difference "  title="Self-hosted web page change monitoring context difference " />](https://changedetection.io?src=github) | ||||
|  | ||||
|  | ||||
| ### Perform interactive browser steps | ||||
| @@ -35,7 +35,7 @@ Fill in text boxes, click buttons and more, setup your changedetection scenario. | ||||
|  | ||||
| Using the **Browser Steps** configuration, add basic steps before performing change detection, such as logging into websites, adding a product to a cart, accept cookie logins, entering dates and refining searches. | ||||
|  | ||||
| [<img src="docs/browsersteps-anim.gif" style="max-width:100%;" alt="Self-hosted web page change monitoring context difference "  title="Website change detection with interactive browser steps, login, cookies etc" />](https://lemonade.changedetection.io/start?src=github) | ||||
| [<img src="docs/browsersteps-anim.gif" style="max-width:100%;" alt="Self-hosted web page change monitoring context difference "  title="Website change detection with interactive browser steps, login, cookies etc" />](https://changedetection.io?src=github) | ||||
|  | ||||
| After **Browser Steps** have been run, then visit the **Visual Selector** tab to refine the content you're interested in. | ||||
| Requires Playwright to be enabled. | ||||
| @@ -66,6 +66,7 @@ Requires Playwright to be enabled. | ||||
| - Proactively search for jobs, get notified when companies update their careers page, search job portals for keywords. | ||||
| - Get alerts when new job positions are open on Bamboo HR and other job platforms | ||||
| - Website defacement monitoring | ||||
| - Pokémon Card Restock Tracker / Pokémon TCG Tracker | ||||
|  | ||||
| _Need an actual Chrome runner with Javascript support? We support fetching via WebDriver and Playwright!</a>_ | ||||
|  | ||||
| @@ -144,7 +145,7 @@ See the wiki for more information https://github.com/dgtlmoon/changedetection.io | ||||
| ## Filters | ||||
|  | ||||
| XPath, JSONPath, jq, and CSS support comes baked in! You can be as specific as you need, use XPath exported from various XPath element query creation tools.  | ||||
| (We support LXML `re:test`, `re:math` and `re:replace`.) | ||||
| (We support LXML `re:test`, `re:match` and `re:replace`.) | ||||
|  | ||||
| ## Notifications | ||||
|  | ||||
| @@ -237,7 +238,7 @@ Supports managing the website watch list [via our API](https://changedetection.i | ||||
| Do you use changedetection.io to make money? does it save you time or money? Does it make your life easier? less stressful? Remember, we write this software when we should be doing actual paid work, we have to buy food and pay rent just like you. | ||||
|  | ||||
|  | ||||
| Firstly, consider taking out a [change detection monthly subscription - unlimited checks and watches](https://lemonade.changedetection.io/start) , even if you don't use it, you still get the warm fuzzy feeling of helping out the project. (And who knows, you might just use it!) | ||||
| Firstly, consider taking out a [change detection monthly subscription - unlimited checks and watches](https://changedetection.io?src=github) , even if you don't use it, you still get the warm fuzzy feeling of helping out the project. (And who knows, you might just use it!) | ||||
|  | ||||
| Or directly donate an amount PayPal [](https://www.paypal.com/donate/?hosted_button_id=7CP6HR9ZCNDYJ) | ||||
|  | ||||
|   | ||||
| @@ -38,7 +38,7 @@ from flask_paginate import Pagination, get_page_parameter | ||||
| from changedetectionio import html_tools | ||||
| from changedetectionio.api import api_v1 | ||||
|  | ||||
| __version__ = '0.41.1' | ||||
| __version__ = '0.42.3' | ||||
|  | ||||
| datastore = None | ||||
|  | ||||
| @@ -124,6 +124,15 @@ def _jinja2_filter_datetimestamp(timestamp, format="%Y-%m-%d %H:%M:%S"): | ||||
|  | ||||
|     return timeago.format(timestamp, time.time()) | ||||
|  | ||||
|  | ||||
| @app.template_filter('pagination_slice') | ||||
| def _jinja2_filter_pagination_slice(arr, skip): | ||||
|     per_page = datastore.data['settings']['application'].get('pager_size', 50) | ||||
|     if per_page: | ||||
|         return arr[skip:skip + per_page] | ||||
|  | ||||
|     return arr | ||||
|  | ||||
| @app.template_filter('format_seconds_ago') | ||||
| def _jinja2_filter_seconds_precise(timestamp): | ||||
|     if timestamp == False: | ||||
| @@ -403,27 +412,40 @@ def changedetection_app(config=None, datastore_o=None): | ||||
|  | ||||
|         # Sort by last_changed and add the uuid which is usually the key.. | ||||
|         sorted_watches = [] | ||||
|         search_q = request.args.get('q').strip().lower() if request.args.get('q') else False | ||||
|         for uuid, watch in datastore.data['watching'].items(): | ||||
|  | ||||
|             if limit_tag != None: | ||||
|             if limit_tag: | ||||
|                 # Support for comma separated list of tags. | ||||
|                 if watch['tag'] is None: | ||||
|                 if not watch.get('tag'): | ||||
|                     continue | ||||
|                 for tag_in_watch in watch['tag'].split(','): | ||||
|                 for tag_in_watch in watch.get('tag', '').split(','): | ||||
|                     tag_in_watch = tag_in_watch.strip() | ||||
|                     if tag_in_watch == limit_tag: | ||||
|                         watch['uuid'] = uuid | ||||
|                         sorted_watches.append(watch) | ||||
|                         if search_q: | ||||
|                             if (watch.get('title') and search_q in watch.get('title').lower()) or search_q in watch.get('url', '').lower(): | ||||
|                                 sorted_watches.append(watch) | ||||
|                         else: | ||||
|                             sorted_watches.append(watch) | ||||
|  | ||||
|             else: | ||||
|                 watch['uuid'] = uuid | ||||
|                 sorted_watches.append(watch) | ||||
|                 #watch['uuid'] = uuid | ||||
|                 if search_q: | ||||
|                     if (watch.get('title') and search_q in watch.get('title').lower()) or search_q in watch.get('url', '').lower(): | ||||
|                         sorted_watches.append(watch) | ||||
|                 else: | ||||
|                     sorted_watches.append(watch) | ||||
|  | ||||
|         existing_tags = datastore.get_all_tags() | ||||
|         form = forms.quickWatchForm(request.form) | ||||
|         page = request.args.get(get_page_parameter(), type=int, default=1) | ||||
|         total_count = len(sorted_watches) if sorted_watches else len(datastore.data['watching']) | ||||
|         pagination = Pagination(page=page, total=total_count, per_page=int(os.getenv('pagination_per_page', 50)), css_framework = "semantic") | ||||
|         total_count = len(sorted_watches) | ||||
|  | ||||
|         pagination = Pagination(page=page, | ||||
|                                 total=total_count, | ||||
|                                 per_page=datastore.data['settings']['application'].get('pager_size', 50), css_framework="semantic") | ||||
|  | ||||
|  | ||||
|         output = render_template( | ||||
|             "watch-overview.html", | ||||
| @@ -437,6 +459,7 @@ def changedetection_app(config=None, datastore_o=None): | ||||
|                                  hosted_sticky=os.getenv("SALTED_PASS", False) == False, | ||||
|                                  pagination=pagination, | ||||
|                                  queued_uuids=[q_uuid.item['uuid'] for q_uuid in update_q.queue], | ||||
|                                  search_q=request.args.get('q','').strip(), | ||||
|                                  sort_attribute=request.args.get('sort') if request.args.get('sort') else request.cookies.get('sort'), | ||||
|                                  sort_order=request.args.get('order') if request.args.get('order') else request.cookies.get('order'), | ||||
|                                  system_default_fetcher=datastore.data['settings']['application'].get('fetch_backend'), | ||||
| @@ -690,6 +713,7 @@ def changedetection_app(config=None, datastore_o=None): | ||||
|                                      form=form, | ||||
|                                      has_default_notification_urls=True if len(datastore.data['settings']['application']['notification_urls']) else False, | ||||
|                                      has_empty_checktime=using_default_check_time, | ||||
|                                      has_extra_headers_file=watch.has_extra_headers_file or datastore.has_extra_headers_file, | ||||
|                                      is_html_webdriver=is_html_webdriver, | ||||
|                                      jq_support=jq_support, | ||||
|                                      playwright_enabled=os.getenv('PLAYWRIGHT_DRIVER_URL', False), | ||||
| @@ -1434,6 +1458,7 @@ def check_for_new_version(): | ||||
|         # Check daily | ||||
|         app.config.exit.wait(86400) | ||||
|  | ||||
|  | ||||
| def notification_runner(): | ||||
|     global notification_debug_log | ||||
|     from datetime import datetime | ||||
|   | ||||
| @@ -27,58 +27,106 @@ import os | ||||
| import logging | ||||
| from changedetectionio.store import ChangeDetectionStore | ||||
| from changedetectionio import login_optionally_required | ||||
| browsersteps_live_ui_o = {} | ||||
| browsersteps_playwright_browser_interface = None | ||||
| browsersteps_playwright_browser_interface_browser = None | ||||
| browsersteps_playwright_browser_interface_context = None | ||||
| browsersteps_playwright_browser_interface_end_time = None | ||||
| browsersteps_playwright_browser_interface_start_time = None | ||||
|  | ||||
| def cleanup_playwright_session(): | ||||
| browsersteps_sessions = {} | ||||
| io_interface_context = None | ||||
|  | ||||
|     global browsersteps_live_ui_o | ||||
|     global browsersteps_playwright_browser_interface | ||||
|     global browsersteps_playwright_browser_interface_browser | ||||
|     global browsersteps_playwright_browser_interface_context | ||||
|     global browsersteps_playwright_browser_interface_end_time | ||||
|     global browsersteps_playwright_browser_interface_start_time | ||||
|  | ||||
|     browsersteps_live_ui_o = {} | ||||
|     browsersteps_playwright_browser_interface = None | ||||
|     browsersteps_playwright_browser_interface_browser = None | ||||
|     browsersteps_playwright_browser_interface_end_time = None | ||||
|     browsersteps_playwright_browser_interface_start_time = None | ||||
|  | ||||
|     print("Cleaning up old playwright session because time was up, calling .goodbye()") | ||||
|     try: | ||||
|         browsersteps_playwright_browser_interface_context.goodbye() | ||||
|     except Exception as e: | ||||
|         print ("Got exception in shutdown, probably OK") | ||||
|         print (str(e)) | ||||
|  | ||||
|     browsersteps_playwright_browser_interface_context = None | ||||
|  | ||||
|     print ("Cleaning up old playwright session because time was up - done") | ||||
|  | ||||
| def construct_blueprint(datastore: ChangeDetectionStore): | ||||
|  | ||||
|     browser_steps_blueprint = Blueprint('browser_steps', __name__, template_folder="templates") | ||||
|  | ||||
|     def start_browsersteps_session(watch_uuid): | ||||
|         from . import nonContext | ||||
|         from . import browser_steps | ||||
|         import time | ||||
|         global browsersteps_sessions | ||||
|         global io_interface_context | ||||
|  | ||||
|  | ||||
|         # We keep the playwright session open for many minutes | ||||
|         seconds_keepalive = int(os.getenv('BROWSERSTEPS_MINUTES_KEEPALIVE', 10)) * 60 | ||||
|  | ||||
|         browsersteps_start_session = {'start_time': time.time()} | ||||
|  | ||||
|         # You can only have one of these running | ||||
|         # This should be very fine to leave running for the life of the application | ||||
|         # @idea - Make it global so the pool of watch fetchers can use it also | ||||
|         if not io_interface_context: | ||||
|             io_interface_context = nonContext.c_sync_playwright() | ||||
|             # Start the Playwright context, which is actually a nodejs sub-process and communicates over STDIN/STDOUT pipes | ||||
|             io_interface_context = io_interface_context.start() | ||||
|  | ||||
|  | ||||
|         # keep it alive for 10 seconds more than we advertise, sometimes it helps to keep it shutting down cleanly | ||||
|         keepalive = "&timeout={}".format(((seconds_keepalive + 3) * 1000)) | ||||
|         try: | ||||
|             browsersteps_start_session['browser'] = io_interface_context.chromium.connect_over_cdp( | ||||
|                 os.getenv('PLAYWRIGHT_DRIVER_URL', '') + keepalive) | ||||
|         except Exception as e: | ||||
|             if 'ECONNREFUSED' in str(e): | ||||
|                 return make_response('Unable to start the Playwright Browser session, is it running?', 401) | ||||
|             else: | ||||
|                 return make_response(str(e), 401) | ||||
|  | ||||
|         proxy_id = datastore.get_preferred_proxy_for_watch(uuid=watch_uuid) | ||||
|         proxy = None | ||||
|         if proxy_id: | ||||
|             proxy_url = datastore.proxy_list.get(proxy_id).get('url') | ||||
|             if proxy_url: | ||||
|  | ||||
|                 # Playwright needs separate username and password values | ||||
|                 from urllib.parse import urlparse | ||||
|                 parsed = urlparse(proxy_url) | ||||
|                 proxy = {'server': proxy_url} | ||||
|  | ||||
|                 if parsed.username: | ||||
|                     proxy['username'] = parsed.username | ||||
|  | ||||
|                 if parsed.password: | ||||
|                     proxy['password'] = parsed.password | ||||
|  | ||||
|                 print("Browser Steps: UUID {} selected proxy {}".format(watch_uuid, proxy_url)) | ||||
|  | ||||
|         # Tell Playwright to connect to Chrome and setup a new session via our stepper interface | ||||
|         browsersteps_start_session['browserstepper'] = browser_steps.browsersteps_live_ui( | ||||
|             playwright_browser=browsersteps_start_session['browser'], | ||||
|             proxy=proxy) | ||||
|  | ||||
|         # For test | ||||
|         #browsersteps_start_session['browserstepper'].action_goto_url(value="http://example.com?time="+str(time.time())) | ||||
|  | ||||
|         return browsersteps_start_session | ||||
|  | ||||
|  | ||||
|     @login_optionally_required | ||||
|     @browser_steps_blueprint.route("/browsersteps_update", methods=['GET', 'POST']) | ||||
|     @browser_steps_blueprint.route("/browsersteps_start_session", methods=['GET']) | ||||
|     def browsersteps_start_session(): | ||||
|         # A new session was requested, return sessionID | ||||
|  | ||||
|         import uuid | ||||
|         global browsersteps_sessions | ||||
|  | ||||
|         browsersteps_session_id = str(uuid.uuid4()) | ||||
|         watch_uuid = request.args.get('uuid') | ||||
|  | ||||
|         if not watch_uuid: | ||||
|             return make_response('No Watch UUID specified', 500) | ||||
|  | ||||
|         print("Starting connection with playwright") | ||||
|         logging.debug("browser_steps.py connecting") | ||||
|         browsersteps_sessions[browsersteps_session_id] = start_browsersteps_session(watch_uuid) | ||||
|         print("Starting connection with playwright - done") | ||||
|         return {'browsersteps_session_id': browsersteps_session_id} | ||||
|  | ||||
|     # A request for an action was received | ||||
|     @login_optionally_required | ||||
|     @browser_steps_blueprint.route("/browsersteps_update", methods=['POST']) | ||||
|     def browsersteps_ui_update(): | ||||
|         import base64 | ||||
|         import playwright._impl._api_types | ||||
|         import time | ||||
|  | ||||
|         global browsersteps_sessions | ||||
|         from changedetectionio.blueprint.browser_steps import browser_steps | ||||
|  | ||||
|         global browsersteps_live_ui_o, browsersteps_playwright_browser_interface_end_time | ||||
|         global browsersteps_playwright_browser_interface_browser | ||||
|         global browsersteps_playwright_browser_interface | ||||
|         global browsersteps_playwright_browser_interface_start_time | ||||
|  | ||||
|         step_n = None | ||||
|         remaining =0 | ||||
|         uuid = request.args.get('uuid') | ||||
|  | ||||
| @@ -87,13 +135,9 @@ def construct_blueprint(datastore: ChangeDetectionStore): | ||||
|         if not browsersteps_session_id: | ||||
|             return make_response('No browsersteps_session_id specified', 500) | ||||
|  | ||||
|         # Because we don't "really" run in a context manager ( we make the playwright interface global/long-living ) | ||||
|         # We need to manage the shutdown when the time is up | ||||
|         if browsersteps_playwright_browser_interface_end_time: | ||||
|             remaining = browsersteps_playwright_browser_interface_end_time-time.time() | ||||
|             if browsersteps_playwright_browser_interface_end_time and remaining <= 0: | ||||
|                 cleanup_playwright_session() | ||||
|                 return make_response('Browser session expired, please reload the Browser Steps interface', 401) | ||||
|         if not browsersteps_sessions.get(browsersteps_session_id): | ||||
|             return make_response('No session exists under that ID', 500) | ||||
|  | ||||
|  | ||||
|         # Actions - step/apply/etc, do the thing and return state | ||||
|         if request.method == 'POST': | ||||
| @@ -112,12 +156,7 @@ def construct_blueprint(datastore: ChangeDetectionStore): | ||||
|             # @todo try.. accept.. nice errors not popups.. | ||||
|             try: | ||||
|  | ||||
|                 this_session = browsersteps_live_ui_o.get(browsersteps_session_id) | ||||
|                 if not this_session: | ||||
|                     print("Browser exited") | ||||
|                     return make_response('Browser session ran out of time :( Please reload this page.', 401) | ||||
|  | ||||
|                 this_session.call_action(action_name=step_operation, | ||||
|                 browsersteps_sessions[browsersteps_session_id]['browserstepper'].call_action(action_name=step_operation, | ||||
|                                          selector=step_selector, | ||||
|                                          optional_value=step_optional_value) | ||||
|  | ||||
| @@ -129,108 +168,43 @@ def construct_blueprint(datastore: ChangeDetectionStore): | ||||
|             # Get visual selector ready/update its data (also use the current filter info from the page?) | ||||
|             # When the last 'apply' button was pressed | ||||
|             # @todo this adds overhead because the xpath selection is happening twice | ||||
|             u = this_session.page.url | ||||
|             u = browsersteps_sessions[browsersteps_session_id]['browserstepper'].page.url | ||||
|             if is_last_step and u: | ||||
|                 (screenshot, xpath_data) = this_session.request_visualselector_data() | ||||
|                 (screenshot, xpath_data) = browsersteps_sessions[browsersteps_session_id]['browserstepper'].request_visualselector_data() | ||||
|                 datastore.save_screenshot(watch_uuid=uuid, screenshot=screenshot) | ||||
|                 datastore.save_xpath_data(watch_uuid=uuid, data=xpath_data) | ||||
|  | ||||
|         # Setup interface | ||||
|         if request.method == 'GET': | ||||
| #        if not this_session.page: | ||||
| #            cleanup_playwright_session() | ||||
| #            return make_response('Browser session ran out of time :( Please reload this page.', 401) | ||||
|  | ||||
|             if not browsersteps_playwright_browser_interface: | ||||
|                 print("Starting connection with playwright") | ||||
|                 logging.debug("browser_steps.py connecting") | ||||
|         # Screenshots and other info only needed on requesting a step (POST) | ||||
|         try: | ||||
|             state = browsersteps_sessions[browsersteps_session_id]['browserstepper'].get_current_state() | ||||
|         except playwright._impl._api_types.Error as e: | ||||
|             return make_response("Browser session ran out of time :( Please reload this page."+str(e), 401) | ||||
|  | ||||
|                 global browsersteps_playwright_browser_interface_context | ||||
|                 from . import nonContext | ||||
|                 browsersteps_playwright_browser_interface_context = nonContext.c_sync_playwright() | ||||
|                 browsersteps_playwright_browser_interface = browsersteps_playwright_browser_interface_context.start() | ||||
|                 # At 20 minutes, some other variable is closing it | ||||
|                 # @todo find out what it is and set it | ||||
|                 seconds_keepalive = int(os.getenv('BROWSERSTEPS_MINUTES_KEEPALIVE', 10)) * 60 | ||||
|         # Use send_file() which is way faster than read/write loop on bytes | ||||
|         import json | ||||
|         from tempfile import mkstemp | ||||
|         from flask import send_file | ||||
|         tmp_fd, tmp_file = mkstemp(text=True, suffix=".json", prefix="changedetectionio-") | ||||
|  | ||||
|                 # keep it alive for 10 seconds more than we advertise, sometimes it helps to keep it shutting down cleanly | ||||
|                 keepalive = "&timeout={}".format(((seconds_keepalive+3) * 1000)) | ||||
|                 try: | ||||
|                     browsersteps_playwright_browser_interface_browser = browsersteps_playwright_browser_interface.chromium.connect_over_cdp( | ||||
|                         os.getenv('PLAYWRIGHT_DRIVER_URL', '') + keepalive) | ||||
|                 except Exception as e: | ||||
|                     if 'ECONNREFUSED' in str(e): | ||||
|                         return make_response('Unable to start the Playwright session properly, is it running?', 401) | ||||
|         output = json.dumps({'screenshot': "data:image/jpeg;base64,{}".format( | ||||
|             base64.b64encode(state[0]).decode('ascii')), | ||||
|             'xpath_data': state[1], | ||||
|             'session_age_start': browsersteps_sessions[browsersteps_session_id]['browserstepper'].age_start, | ||||
|             'browser_time_remaining': round(remaining) | ||||
|         }) | ||||
|  | ||||
|                 browsersteps_playwright_browser_interface_end_time = time.time() + (seconds_keepalive-3) | ||||
|                 print("Starting connection with playwright - done") | ||||
|         with os.fdopen(tmp_fd, 'w') as f: | ||||
|             f.write(output) | ||||
|  | ||||
|             if not browsersteps_live_ui_o.get(browsersteps_session_id): | ||||
|                 # Boot up a new session | ||||
|                 proxy_id = datastore.get_preferred_proxy_for_watch(uuid=uuid) | ||||
|                 proxy = None | ||||
|                 if proxy_id: | ||||
|                     proxy_url = datastore.proxy_list.get(proxy_id).get('url') | ||||
|                     if proxy_url: | ||||
|  | ||||
|                         # Playwright needs separate username and password values | ||||
|                         from urllib.parse import urlparse | ||||
|                         parsed = urlparse(proxy_url) | ||||
|                         proxy = {'server': proxy_url} | ||||
|  | ||||
|                         if parsed.username: | ||||
|                             proxy['username'] = parsed.username | ||||
|  | ||||
|                         if parsed.password: | ||||
|                             proxy['password'] = parsed.password | ||||
|  | ||||
|                         print("Browser Steps: UUID {} Using proxy {}".format(uuid, proxy_url)) | ||||
|  | ||||
|                 # Begin the new "Playwright Context" that re-uses the playwright interface | ||||
|                 # Each session is a "Playwright Context" as a list, that uses the playwright interface | ||||
|                 browsersteps_live_ui_o[browsersteps_session_id] = browser_steps.browsersteps_live_ui( | ||||
|                     playwright_browser=browsersteps_playwright_browser_interface_browser, | ||||
|                     proxy=proxy) | ||||
|                 this_session = browsersteps_live_ui_o[browsersteps_session_id] | ||||
|  | ||||
|         if not this_session.page: | ||||
|             cleanup_playwright_session() | ||||
|             return make_response('Browser session ran out of time :( Please reload this page.', 401) | ||||
|  | ||||
|         response = None | ||||
|  | ||||
|         if request.method == 'POST': | ||||
|             # Screenshots and other info only needed on requesting a step (POST) | ||||
|             try: | ||||
|                 state = this_session.get_current_state() | ||||
|             except playwright._impl._api_types.Error as e: | ||||
|                 return make_response("Browser session ran out of time :( Please reload this page."+str(e), 401) | ||||
|  | ||||
|             # Use send_file() which is way faster than read/write loop on bytes | ||||
|             import json | ||||
|             from tempfile import mkstemp | ||||
|             from flask import send_file | ||||
|             tmp_fd, tmp_file = mkstemp(text=True, suffix=".json", prefix="changedetectionio-") | ||||
|  | ||||
|             output = json.dumps({'screenshot': "data:image/jpeg;base64,{}".format( | ||||
|                 base64.b64encode(state[0]).decode('ascii')), | ||||
|                 'xpath_data': state[1], | ||||
|                 'session_age_start': this_session.age_start, | ||||
|                 'browser_time_remaining': round(remaining) | ||||
|             }) | ||||
|  | ||||
|             with os.fdopen(tmp_fd, 'w') as f: | ||||
|                 f.write(output) | ||||
|  | ||||
|             response = make_response(send_file(path_or_file=tmp_file, | ||||
|                                                mimetype='application/json; charset=UTF-8', | ||||
|                                                etag=True)) | ||||
|             # No longer needed | ||||
|             os.unlink(tmp_file) | ||||
|  | ||||
|         elif request.method == 'GET': | ||||
|             # Just enough to get the session rolling, it will call for goto-site via POST next | ||||
|             response = make_response({ | ||||
|                 'session_age_start': this_session.age_start, | ||||
|                 'browser_time_remaining': round(remaining) | ||||
|             }) | ||||
|         response = make_response(send_file(path_or_file=tmp_file, | ||||
|                                            mimetype='application/json; charset=UTF-8', | ||||
|                                            etag=True)) | ||||
|         # No longer needed | ||||
|         os.unlink(tmp_file) | ||||
|  | ||||
|         return response | ||||
|  | ||||
|   | ||||
| @@ -71,10 +71,10 @@ class steppable_browser_interface(): | ||||
|             optional_value = str(jinja2_env.from_string(optional_value).render()) | ||||
|  | ||||
|         action_handler(selector, optional_value) | ||||
|         self.page.wait_for_timeout(3 * 1000) | ||||
|         self.page.wait_for_timeout(1.5 * 1000) | ||||
|         print("Call action done in", time.time() - now) | ||||
|  | ||||
|     def action_goto_url(self, selector, value): | ||||
|     def action_goto_url(self, selector=None, value=None): | ||||
|         # self.page.set_viewport_size({"width": 1280, "height": 5000}) | ||||
|         now = time.time() | ||||
|         response = self.page.goto(value, timeout=0, wait_until='commit') | ||||
| @@ -105,7 +105,8 @@ class steppable_browser_interface(): | ||||
|         print("Clicking element") | ||||
|         if not len(selector.strip()): | ||||
|             return | ||||
|         self.page.click(selector, timeout=10 * 1000, delay=randint(200, 500)) | ||||
|  | ||||
|         self.page.click(selector=selector, timeout=30 * 1000, delay=randint(200, 500)) | ||||
|  | ||||
|     def action_click_element_if_exists(self, selector, value): | ||||
|         import playwright._impl._api_types as _api_types | ||||
| @@ -132,18 +133,18 @@ class steppable_browser_interface(): | ||||
|         self.page.wait_for_timeout(1000) | ||||
|  | ||||
|     def action_wait_for_seconds(self, selector, value): | ||||
|         self.page.wait_for_timeout(int(value) * 1000) | ||||
|         self.page.wait_for_timeout(float(value.strip()) * 1000) | ||||
|  | ||||
|     def action_wait_for_text(self, selector, value): | ||||
|         import json | ||||
|         v = json.dumps(value) | ||||
|         self.page.wait_for_function(f'document.querySelector("body").innerText.includes({v});', timeout=30000) | ||||
|         self.page.wait_for_function(f'document.querySelector("body").innerText.includes({v});', timeout=90000) | ||||
|  | ||||
|     def action_wait_for_text_in_element(self, selector, value): | ||||
|         import json | ||||
|         s = json.dumps(selector) | ||||
|         v = json.dumps(value) | ||||
|         self.page.wait_for_function(f'document.querySelector({s}).innerText.includes({v});', timeout=30000) | ||||
|         self.page.wait_for_function(f'document.querySelector({s}).innerText.includes({v});', timeout=90000) | ||||
|  | ||||
|     # @todo - in the future make some popout interface to capture what needs to be set | ||||
|     # https://playwright.dev/python/docs/api/class-keyboard | ||||
|   | ||||
| @@ -147,6 +147,13 @@ class Fetcher(): | ||||
|     def is_ready(self): | ||||
|         return True | ||||
|  | ||||
|     def get_all_headers(self): | ||||
|         """ | ||||
|         Get all headers but ensure all keys are lowercase | ||||
|         :return: | ||||
|         """ | ||||
|         return {k.lower(): v for k, v in self.headers.items()} | ||||
|  | ||||
|     def iterate_browser_steps(self): | ||||
|         from changedetectionio.blueprint.browser_steps.browser_steps import steppable_browser_interface | ||||
|         from playwright._impl._api_types import TimeoutError | ||||
| @@ -182,7 +189,8 @@ class Fetcher(): | ||||
|                                                       optional_value=optional_value) | ||||
|                     self.screenshot_step(step_n) | ||||
|                     self.save_step_html(step_n) | ||||
|                 except TimeoutError: | ||||
|                 except TimeoutError as e: | ||||
|                     print(str(e)) | ||||
|                     # Stop processing here | ||||
|                     raise BrowserStepsStepTimout(step_n=step_n) | ||||
|  | ||||
| @@ -287,168 +295,18 @@ class base_html_playwright(Fetcher): | ||||
|             current_include_filters=None, | ||||
|             is_binary=False): | ||||
|  | ||||
|         from pkg_resources import resource_string | ||||
|  | ||||
|         extra_wait_ms = (int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)) + self.render_extract_delay) * 1000 | ||||
|         xpath_element_js = self.xpath_element_js.replace('%ELEMENTS%', visualselector_xpath_selectors) | ||||
|  | ||||
|         code = f"""module.exports = async ({{ page, context }}) => {{ | ||||
|          | ||||
|           var {{ url, execute_js, user_agent, extra_wait_ms, req_headers, include_filters, xpath_element_js, screenshot_quality, proxy_username, proxy_password, disk_cache_dir}} = context; | ||||
|            | ||||
|           await page.setBypassCSP(true) | ||||
|           await page.setExtraHTTPHeaders(req_headers);           | ||||
|           await page.setUserAgent(user_agent); | ||||
|           // https://ourcodeworld.com/articles/read/1106/how-to-solve-puppeteer-timeouterror-navigation-timeout-of-30000-ms-exceeded | ||||
|            | ||||
|           await page.setDefaultNavigationTimeout(0); | ||||
|  | ||||
|           if(proxy_username) {{ | ||||
|             await page.authenticate({{ | ||||
|                 username: proxy_username, | ||||
|                 password: proxy_password | ||||
|             }}); | ||||
|           }} | ||||
|  | ||||
|         await page.setViewport({{ | ||||
|           width: 1024, | ||||
|           height: 768, | ||||
|           deviceScaleFactor: 1, | ||||
|         }}); | ||||
|  | ||||
|         // Very primitive disk cache - USE WITH EXTREME CAUTION | ||||
|         // Run browserless container with -e "FUNCTION_BUILT_INS=[\"fs\",\"crypto\"]" | ||||
|         if ( disk_cache_dir ) {{ | ||||
|              | ||||
|             await page.setRequestInterception(true); | ||||
|                           | ||||
|             console.log(">>>>>>>>>>>>>>> LOCAL DISK CACHE ENABLED <<<<<<<<<<<<<<<<<<<<<");                  | ||||
|             const fs = require('fs'); | ||||
|             const crypto = require('crypto'); | ||||
|             function file_is_expired(file_path) {{ | ||||
|                 if (!fs.existsSync(dir_path+key)) {{ | ||||
|                   return true; | ||||
|                 }} | ||||
|                 var stats = fs.statSync(file_path); | ||||
|                 const now_date = new Date(); | ||||
|                 const expire_seconds = 300; | ||||
|                 if ( (now_date/1000) - (stats.mtime.getTime() / 1000) > expire_seconds) {{                   | ||||
|                   console.log("CACHE EXPIRED: "+file_path); | ||||
|                   return true; | ||||
|                 }} | ||||
|                 return false; | ||||
|                  | ||||
|             }} | ||||
|          | ||||
|             page.on('request', async (request) => {{ | ||||
|                      | ||||
|                 // if (blockedExtensions.some((str) => req.url().endsWith(str))) return req.abort(); | ||||
| 		        const url = request.url(); | ||||
|                 const key = crypto.createHash('md5').update(url).digest("hex");                 | ||||
|                 const dir_path = disk_cache_dir + key.slice(0, 1) + '/' + key.slice(1, 2) + '/' + key.slice(2, 3) + '/';              | ||||
|                                         | ||||
|                 // https://stackoverflow.com/questions/4482686/check-synchronously-if-file-directory-exists-in-node-js | ||||
|                  | ||||
|                 if (fs.existsSync(dir_path+key)) {{ | ||||
|                     file_is_expired(dir_path+key); | ||||
|                     console.log("Cache exists "+dir_path+key+ " - "+url); | ||||
|                     const cached_data = fs.readFileSync(dir_path+key);                           | ||||
|                     request.respond({{ | ||||
|                         status: 200, | ||||
|                         //contentType: 'text/html', //@todo | ||||
|                         body: cached_data | ||||
|                     }}); | ||||
|                     return; | ||||
|                 }}                 | ||||
|                 request.continue(); | ||||
|             }}); | ||||
|              | ||||
|             page.on('response', async (response) => {{ | ||||
|                 const url = response.url(); | ||||
|                 // @todo - check response size() | ||||
|                 console.log("Cache - Got "+response.request().method()+" - "+url+" - "+response.request().resourceType()); | ||||
|                  | ||||
|                 if(response.request().method()  != 'GET' || response.request().resourceType() == 'xhr' || response.request().resourceType() == 'document' || response.status() != 200 ) {{ | ||||
|                     console.log("Skipping- "+url); | ||||
|                     return; | ||||
|                 }} | ||||
|                  | ||||
|                 const key = crypto.createHash('md5').update(url).digest("hex"); | ||||
|                 const dir_path = disk_cache_dir + key.slice(0, 1) + '/' + key.slice(1, 2) + '/' + key.slice(2, 3) + '/';                | ||||
|                 const data = await response.text(); | ||||
|                 if (!fs.existsSync(dir_path)) {{ | ||||
|                     fs.mkdirSync(dir_path, {{ recursive: true }}) | ||||
|                 }} | ||||
|                  | ||||
|                 var expired = false; | ||||
|                 if (fs.existsSync(dir_path+key)) {{ | ||||
|                   if (file_is_expired(dir_path+key)) {{ | ||||
|                     fs.writeFileSync(dir_path+key, data); | ||||
|                   }} | ||||
|                 }} else {{                 | ||||
|                     fs.writeFileSync(dir_path+key, data); | ||||
|                 }} | ||||
| 		    }});		     | ||||
|           }} | ||||
|  | ||||
|          | ||||
|           const r = await page.goto(url, {{ | ||||
|                 waitUntil: 'load'                 | ||||
|           }}); | ||||
|                              | ||||
|           await page.waitForTimeout(1000);  | ||||
|           await page.waitForTimeout(extra_wait_ms); | ||||
|            | ||||
|           if(execute_js) {{ | ||||
|             await page.evaluate(execute_js); | ||||
|             await page.waitForTimeout(200); | ||||
|           }} | ||||
|            | ||||
|         var xpath_data; | ||||
|         var instock_data; | ||||
|         try {{ | ||||
|              xpath_data = await page.evaluate((include_filters) => {{ {xpath_element_js} }}, include_filters); | ||||
|              instock_data = await page.evaluate(() => {{ {self.instock_data_js} }}); | ||||
|         }} catch (e) {{ | ||||
|             console.log(e); | ||||
|         }}    | ||||
|            | ||||
|       // Protocol error (Page.captureScreenshot): Cannot take screenshot with 0 width can come from a proxy auth failure | ||||
|       // Wrap it here (for now) | ||||
|        | ||||
|       var b64s = false; | ||||
|       try {{       | ||||
|              b64s = await page.screenshot({{ encoding: "base64", fullPage: true, quality: screenshot_quality, type: 'jpeg' }}); | ||||
|         }} catch (e) {{ | ||||
|             console.log(e); | ||||
|         }} | ||||
|          | ||||
|         // May fail on very large pages with 'WARNING: tile memory limits exceeded, some content may not draw' | ||||
|         if (!b64s) {{ | ||||
|             // @todo after text extract, we can place some overlay text with red background to say 'croppped'         | ||||
|             console.error('ERROR: content-fetcher page was maybe too large for a screenshot, reverting to viewport only screenshot'); | ||||
|             try {{ | ||||
|                  b64s = await page.screenshot({{ encoding: "base64", quality: screenshot_quality, type: 'jpeg' }}); | ||||
|             }} catch (e) {{ | ||||
|                 console.log(e); | ||||
|             }} | ||||
|          }} | ||||
|      | ||||
|              | ||||
|          var html = await page.content(); | ||||
|           return {{ | ||||
|             data: {{ | ||||
|                 'content': html,  | ||||
|                 'headers': r.headers(),  | ||||
|                 'instock_data': instock_data, | ||||
|                 'screenshot': b64s, | ||||
|                 'status_code': r.status(), | ||||
|                 'xpath_data': xpath_data | ||||
|             }}, | ||||
|             type: 'application/json', | ||||
|           }}; | ||||
|         }};""" | ||||
|         self.xpath_element_js = self.xpath_element_js.replace('%ELEMENTS%', visualselector_xpath_selectors) | ||||
|         code = resource_string(__name__, "res/puppeteer_fetch.js").decode('utf-8') | ||||
|         # In the future inject this is a proper JS package | ||||
|         code = code.replace('%xpath_scrape_code%', self.xpath_element_js) | ||||
|         code = code.replace('%instock_scrape_code%', self.instock_data_js) | ||||
|  | ||||
|         from requests.exceptions import ConnectTimeout, ReadTimeout | ||||
|         wait_browserless_seconds = 120 | ||||
|         wait_browserless_seconds = 240 | ||||
|  | ||||
|         browserless_function_url = os.getenv('BROWSERLESS_FUNCTION_URL') | ||||
|         from urllib.parse import urlparse | ||||
| @@ -475,7 +333,9 @@ class base_html_playwright(Fetcher): | ||||
|                 json={ | ||||
|                     "code": code, | ||||
|                     "context": { | ||||
|                         'disk_cache_dir': False, # or path to disk cache | ||||
|                         # Very primitive disk cache - USE WITH EXTREME CAUTION | ||||
|                         # Run browserless container  with -e "FUNCTION_BUILT_INS=[\"fs\",\"crypto\"]" | ||||
|                         'disk_cache_dir': os.getenv("PUPPETEER_DISK_CACHE", False), # or path to disk cache ending in /, ie /tmp/cache/ | ||||
|                         'execute_js': self.webdriver_js_execute_code, | ||||
|                         'extra_wait_ms': extra_wait_ms, | ||||
|                         'include_filters': current_include_filters, | ||||
| @@ -484,14 +344,26 @@ class base_html_playwright(Fetcher): | ||||
|                         'url': url, | ||||
|                         'user_agent': request_headers.get('User-Agent', 'Mozilla/5.0'), | ||||
|                         'proxy_username': self.proxy.get('username','') if self.proxy else False, | ||||
|                         'proxy_password': self.proxy.get('password','') if self.proxy else False, | ||||
|                         'proxy_password': self.proxy.get('password', '') if self.proxy else False, | ||||
|                         'no_cache_list': [ | ||||
|                             'twitter', | ||||
|                             '.pdf' | ||||
|                         ], | ||||
|                         # Could use https://github.com/easylist/easylist here, or install a plugin | ||||
|                         'block_url_list': [ | ||||
|                             'adnxs.com', | ||||
|                             'analytics.twitter.com', | ||||
|                             'doubleclick.net', | ||||
|                             'google-analytics.com', | ||||
|                             'googletagmanager', | ||||
|                             'trustpilot.com' | ||||
|                         ] | ||||
|                     } | ||||
|                 }, | ||||
|                 # @todo /function needs adding ws:// to http:// rebuild this | ||||
|                 url=browserless_function_url+f"{amp}--disable-features=AudioServiceOutOfProcess&dumpio=true&--disable-remote-fonts", | ||||
|                 timeout=wait_browserless_seconds) | ||||
|  | ||||
| # 'ziparchive::addglob() will throw an instance of error instead of resulting in a fatal error if glob support is not available.' | ||||
|         except ReadTimeout: | ||||
|             raise PageUnloadable(url=url, status_code=None, message=f"No response from browserless in {wait_browserless_seconds}s") | ||||
|         except ConnectTimeout: | ||||
| @@ -519,6 +391,7 @@ class base_html_playwright(Fetcher): | ||||
|                 self.headers = x.get('headers') | ||||
|                 self.instock_data = x.get('instock_data') | ||||
|                 self.screenshot = base64.b64decode(x.get('screenshot')) | ||||
|                 self.status_code = x.get('status_code') | ||||
|                 self.xpath_data = x.get('xpath_data') | ||||
|  | ||||
|             else: | ||||
| @@ -535,17 +408,23 @@ class base_html_playwright(Fetcher): | ||||
|             current_include_filters=None, | ||||
|             is_binary=False): | ||||
|  | ||||
|         if os.getenv('USE_EXPERIMENTAL_PUPPETEER_FETCH'): | ||||
|             # Temporary backup solution until we rewrite the playwright code | ||||
|             return self.run_fetch_browserless_puppeteer( | ||||
|                 url, | ||||
|                 timeout, | ||||
|                 request_headers, | ||||
|                 request_body, | ||||
|                 request_method, | ||||
|                 ignore_status_codes, | ||||
|                 current_include_filters, | ||||
|                 is_binary) | ||||
|         # For now, USE_EXPERIMENTAL_PUPPETEER_FETCH is not supported by watches with BrowserSteps (for now!) | ||||
|         has_browser_steps = self.browser_steps and list(filter( | ||||
|                 lambda s: (s['operation'] and len(s['operation']) and s['operation'] != 'Choose one' and s['operation'] != 'Goto site'), | ||||
|                 self.browser_steps)) | ||||
|  | ||||
|         if not has_browser_steps: | ||||
|             if os.getenv('USE_EXPERIMENTAL_PUPPETEER_FETCH'): | ||||
|                 # Temporary backup solution until we rewrite the playwright code | ||||
|                 return self.run_fetch_browserless_puppeteer( | ||||
|                     url, | ||||
|                     timeout, | ||||
|                     request_headers, | ||||
|                     request_body, | ||||
|                     request_method, | ||||
|                     ignore_status_codes, | ||||
|                     current_include_filters, | ||||
|                     is_binary) | ||||
|  | ||||
|         from playwright.sync_api import sync_playwright | ||||
|         import playwright._impl._api_types | ||||
|   | ||||
| @@ -481,6 +481,10 @@ class globalSettingsApplicationForm(commonSettingsForm): | ||||
|     global_subtractive_selectors = StringListField('Remove elements', [ValidateCSSJSONXPATHInput(allow_xpath=False, allow_json=False)]) | ||||
|     ignore_whitespace = BooleanField('Ignore whitespace') | ||||
|     password = SaltyPasswordField() | ||||
|     pager_size = IntegerField('Pager size', | ||||
|                               render_kw={"style": "width: 5em;"}, | ||||
|                               validators=[validators.NumberRange(min=0, | ||||
|                                                                  message="Should be atleast zero (disabled)")]) | ||||
|     removepassword_button = SubmitField('Remove password', render_kw={"class": "pure-button pure-button-primary"}) | ||||
|     render_anchor_tag_content = BooleanField('Render anchor tag content', default=False) | ||||
|     shared_diff_access = BooleanField('Allow access to view diff page when password is enabled', default=False, validators=[validators.Optional()]) | ||||
|   | ||||
| @@ -137,12 +137,13 @@ def _get_stripped_text_from_json_match(match): | ||||
| def extract_json_as_string(content, json_filter, ensure_is_ldjson_info_type=None): | ||||
|     stripped_text_from_html = False | ||||
|  | ||||
|     # Try to parse/filter out the JSON, if we get some parser error, then maybe it's embedded <script type=ldjson> | ||||
|     # Try to parse/filter out the JSON, if we get some parser error, then maybe it's embedded within HTML tags | ||||
|     try: | ||||
|         stripped_text_from_html = _parse_json(json.loads(content), json_filter) | ||||
|     except json.JSONDecodeError: | ||||
|  | ||||
|         # Foreach <script json></script> blob.. just return the first that matches json_filter | ||||
|         # As a last resort, try to parse the whole <body> | ||||
|         s = [] | ||||
|         soup = BeautifulSoup(content, 'html.parser') | ||||
|  | ||||
| @@ -150,32 +151,34 @@ def extract_json_as_string(content, json_filter, ensure_is_ldjson_info_type=None | ||||
|             bs_result = soup.findAll('script', {"type": "application/ld+json"}) | ||||
|         else: | ||||
|             bs_result = soup.findAll('script') | ||||
|         bs_result += soup.findAll('body') | ||||
|  | ||||
|  | ||||
|         if not bs_result: | ||||
|             raise JSONNotFound("No parsable JSON found in this document") | ||||
|  | ||||
|         bs_jsons = [] | ||||
|         for result in bs_result: | ||||
|             # Skip empty tags, and things that dont even look like JSON | ||||
|             if not result.string or not '{' in result.string: | ||||
|             if not result.text or '{' not in result.text: | ||||
|                 continue | ||||
|                  | ||||
|             try: | ||||
|                 json_data = json.loads(result.string) | ||||
|                 json_data = json.loads(result.text) | ||||
|                 bs_jsons.append(json_data) | ||||
|             except json.JSONDecodeError: | ||||
|                 # Just skip it | ||||
|                 # Skip objects which cannot be parsed | ||||
|                 continue | ||||
|             else: | ||||
|                 stripped_text_from_html = _parse_json(json_data, json_filter) | ||||
|                 if ensure_is_ldjson_info_type: | ||||
|                     # Could sometimes be list, string or something else random | ||||
|                     if isinstance(json_data, dict): | ||||
|                         # If it has LD JSON 'key' @type, and @type is 'product', and something was found for the search | ||||
|                         # (Some sites have multiple of the same ld+json @type='product', but some have the review part, some have the 'price' part) | ||||
|                         if json_data.get('@type', False) and json_data.get('@type','').lower() == ensure_is_ldjson_info_type.lower() and stripped_text_from_html: | ||||
|                             break | ||||
|                 elif stripped_text_from_html: | ||||
|                     break | ||||
|  | ||||
|         if not bs_jsons: | ||||
|             raise JSONNotFound("No parsable JSON found in this document") | ||||
|          | ||||
|         for json_data in bs_jsons: | ||||
|             stripped_text_from_html = _parse_json(json_data, json_filter) | ||||
|             if ensure_is_ldjson_info_type: | ||||
|                 # Could sometimes be list, string or something else random | ||||
|                 if isinstance(json_data, dict): | ||||
|                     # If it has LD JSON 'key' @type, and @type is 'product', and something was found for the search | ||||
|                     # (Some sites have multiple of the same ld+json @type='product', but some have the review part, some have the 'price' part) | ||||
|                     if json_data.get('@type', False) and json_data.get('@type','').lower() == ensure_is_ldjson_info_type.lower() and stripped_text_from_html: | ||||
|                         break | ||||
|             elif stripped_text_from_html: | ||||
|                 break | ||||
|  | ||||
|     if not stripped_text_from_html: | ||||
|         # Re 265 - Just return an empty string when filter not found | ||||
|   | ||||
| @@ -52,7 +52,8 @@ class import_url_list(Importer): | ||||
|  | ||||
|             # Flask wtform validators wont work with basic auth, use validators package | ||||
|             # Up to 5000 per batch so we dont flood the server | ||||
|             if len(url) and validators.url(url.replace('source:', '')) and good < 5000: | ||||
|             # @todo validators.url failed on local hostnames (such as referring to ourself when using browserless) | ||||
|             if len(url) and 'http' in url.lower() and good < 5000: | ||||
|                 extras = None | ||||
|                 if processor: | ||||
|                     extras = {'processor': processor} | ||||
|   | ||||
| @@ -23,25 +23,26 @@ class model(dict): | ||||
|                     'workers': int(getenv("DEFAULT_SETTINGS_REQUESTS_WORKERS", "10")),  # Number of threads, lower is better for slow connections | ||||
|                 }, | ||||
|                 'application': { | ||||
|                     # Custom notification content | ||||
|                     'api_access_token_enabled': True, | ||||
|                     'password': False, | ||||
|                     'base_url' : None, | ||||
|                     'extract_title_as_title': False, | ||||
|                     'empty_pages_are_a_change': False, | ||||
|                     'extract_title_as_title': False, | ||||
|                     'fetch_backend': getenv("DEFAULT_FETCH_BACKEND", "html_requests"), | ||||
|                     'filter_failure_notification_threshold_attempts': _FILTER_FAILURE_THRESHOLD_ATTEMPTS_DEFAULT, | ||||
|                     'global_ignore_text': [], # List of text to ignore when calculating the comparison checksum | ||||
|                     'global_subtractive_selectors': [], | ||||
|                     'ignore_whitespace': True, | ||||
|                     'render_anchor_tag_content': False, | ||||
|                     'notification_urls': [], # Apprise URL list | ||||
|                     # Custom notification content | ||||
|                     'notification_title': default_notification_title, | ||||
|                     'notification_body': default_notification_body, | ||||
|                     'notification_format': default_notification_format, | ||||
|                     'notification_title': default_notification_title, | ||||
|                     'notification_urls': [], # Apprise URL list | ||||
|                     'pager_size': 50, | ||||
|                     'password': False, | ||||
|                     'render_anchor_tag_content': False, | ||||
|                     'schema_version' : 0, | ||||
|                     'shared_diff_access': False, | ||||
|                     'webdriver_delay': None  # Extra delay in seconds before extracting text | ||||
|                     'webdriver_delay': None , # Extra delay in seconds before extracting text | ||||
|                 } | ||||
|             } | ||||
|         } | ||||
| @@ -49,3 +50,15 @@ class model(dict): | ||||
|     def __init__(self, *arg, **kw): | ||||
|         super(model, self).__init__(*arg, **kw) | ||||
|         self.update(self.base_config) | ||||
|  | ||||
|  | ||||
| def parse_headers_from_text_file(filepath): | ||||
|     headers = {} | ||||
|     with open(filepath, 'r') as f: | ||||
|         for l in f.readlines(): | ||||
|             l = l.strip() | ||||
|             if not l.startswith('#') and ':' in l: | ||||
|                 (k, v) = l.split(':') | ||||
|                 headers[k.strip()] = v.strip() | ||||
|  | ||||
|     return headers | ||||
| @@ -473,6 +473,40 @@ class model(dict): | ||||
|         # None is set | ||||
|         return False | ||||
|  | ||||
|     @property | ||||
|     def has_extra_headers_file(self): | ||||
|         if os.path.isfile(os.path.join(self.watch_data_dir, 'headers.txt')): | ||||
|             return True | ||||
|  | ||||
|         for f in self.all_tags: | ||||
|             fname = "headers-"+re.sub(r'[\W_]', '', f).lower().strip() + ".txt" | ||||
|             filepath = os.path.join(self.__datastore_path, fname) | ||||
|             if os.path.isfile(filepath): | ||||
|                 return True | ||||
|  | ||||
|         return False | ||||
|  | ||||
|     def get_all_headers(self): | ||||
|         from .App import parse_headers_from_text_file | ||||
|         headers = self.get('headers', {}).copy() | ||||
|         # Available headers on the disk could 'headers.txt' in the watch data dir | ||||
|         filepath = os.path.join(self.watch_data_dir, 'headers.txt') | ||||
|         try: | ||||
|             if os.path.isfile(filepath): | ||||
|                 headers.update(parse_headers_from_text_file(filepath)) | ||||
|         except Exception as e: | ||||
|             print(f"ERROR reading headers.txt at {filepath}", str(e)) | ||||
|  | ||||
|         # Or each by tag, as tagname.txt in the main datadir | ||||
|         for f in self.all_tags: | ||||
|             fname = "headers-"+re.sub(r'[\W_]', '', f).lower().strip() + ".txt" | ||||
|             filepath = os.path.join(self.__datastore_path, fname) | ||||
|             try: | ||||
|                 if os.path.isfile(filepath): | ||||
|                     headers.update(parse_headers_from_text_file(filepath)) | ||||
|             except Exception as e: | ||||
|                 print(f"ERROR reading headers.txt at {filepath}", str(e)) | ||||
|         return headers | ||||
|  | ||||
|     def get_last_fetched_before_filters(self): | ||||
|         import brotli | ||||
|   | ||||
| @@ -12,6 +12,12 @@ urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) | ||||
| name = 'Re-stock detection for single product pages' | ||||
| description = 'Detects if the product goes back to in-stock' | ||||
|  | ||||
| class UnableToExtractRestockData(Exception): | ||||
|     def __init__(self, status_code): | ||||
|         # Set this so we can use it in other parts of the app | ||||
|         self.status_code = status_code | ||||
|         return | ||||
|  | ||||
| class perform_site_check(difference_detection_processor): | ||||
|     screenshot = None | ||||
|     xpath_data = None | ||||
| @@ -105,7 +111,8 @@ class perform_site_check(difference_detection_processor): | ||||
|             fetched_md5 = hashlib.md5(fetcher.instock_data.encode('utf-8')).hexdigest() | ||||
|             # 'Possibly in stock' comes from stock-not-in-stock.js when no string found above the fold. | ||||
|             update_obj["in_stock"] = True if fetcher.instock_data == 'Possibly in stock' else False | ||||
|  | ||||
|         else: | ||||
|             raise UnableToExtractRestockData(status_code=fetcher.status_code) | ||||
|  | ||||
|         # The main thing that all this at the moment comes down to :) | ||||
|         changed_detected = False | ||||
|   | ||||
| @@ -70,10 +70,9 @@ class perform_site_check(difference_detection_processor): | ||||
|         # Unset any existing notification error | ||||
|         update_obj = {'last_notification_error': False, 'last_error': False} | ||||
|  | ||||
|         extra_headers = watch.get('headers', []) | ||||
|  | ||||
|         # Tweak the base config with the per-watch ones | ||||
|         request_headers = deepcopy(self.datastore.data['settings']['headers']) | ||||
|         extra_headers = watch.get_all_headers() | ||||
|         request_headers = self.datastore.get_all_headers() | ||||
|         request_headers.update(extra_headers) | ||||
|  | ||||
|         # https://github.com/psf/requests/issues/4525 | ||||
| @@ -140,7 +139,7 @@ class perform_site_check(difference_detection_processor): | ||||
|         self.xpath_data = fetcher.xpath_data | ||||
|  | ||||
|         # Track the content type | ||||
|         update_obj['content_type'] = fetcher.headers.get('Content-Type', '') | ||||
|         update_obj['content_type'] = fetcher.get_all_headers().get('content-type', '').lower() | ||||
|  | ||||
|         # Watches added automatically in the queue manager will skip if its the same checksum as the previous run | ||||
|         # Saves a lot of CPU | ||||
| @@ -160,7 +159,7 @@ class perform_site_check(difference_detection_processor): | ||||
|         # https://stackoverflow.com/questions/41817578/basic-method-chaining ? | ||||
|         # return content().textfilter().jsonextract().checksumcompare() ? | ||||
|  | ||||
|         is_json = 'application/json' in fetcher.headers.get('Content-Type', '') | ||||
|         is_json = 'application/json' in fetcher.get_all_headers().get('content-type', '').lower() | ||||
|         is_html = not is_json | ||||
|  | ||||
|         # source: support, basically treat it as plaintext | ||||
| @@ -168,7 +167,7 @@ class perform_site_check(difference_detection_processor): | ||||
|             is_html = False | ||||
|             is_json = False | ||||
|  | ||||
|         if watch.is_pdf or 'application/pdf' in fetcher.headers.get('Content-Type', '').lower(): | ||||
|         if watch.is_pdf or 'application/pdf' in fetcher.get_all_headers().get('content-type', '').lower(): | ||||
|             from shutil import which | ||||
|             tool = os.getenv("PDF_TO_HTML_TOOL", "pdftohtml") | ||||
|             if not which(tool): | ||||
| @@ -236,7 +235,7 @@ class perform_site_check(difference_detection_processor): | ||||
|             html_content = fetcher.content | ||||
|  | ||||
|             # If not JSON,  and if it's not text/plain.. | ||||
|             if 'text/plain' in fetcher.headers.get('Content-Type', '').lower(): | ||||
|             if 'text/plain' in fetcher.get_all_headers().get('content-type', '').lower(): | ||||
|                 # Don't run get_text or xpath/css filters on plaintext | ||||
|                 stripped_text_from_html = html_content | ||||
|             else: | ||||
|   | ||||
							
								
								
									
										183
									
								
								changedetectionio/res/puppeteer_fetch.js
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										183
									
								
								changedetectionio/res/puppeteer_fetch.js
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,183 @@ | ||||
| module.exports = async ({page, context}) => { | ||||
|  | ||||
|     var { | ||||
|         url, | ||||
|         execute_js, | ||||
|         user_agent, | ||||
|         extra_wait_ms, | ||||
|         req_headers, | ||||
|         include_filters, | ||||
|         xpath_element_js, | ||||
|         screenshot_quality, | ||||
|         proxy_username, | ||||
|         proxy_password, | ||||
|         disk_cache_dir, | ||||
|         no_cache_list, | ||||
|         block_url_list, | ||||
|     } = context; | ||||
|  | ||||
|     await page.setBypassCSP(true) | ||||
|     await page.setExtraHTTPHeaders(req_headers); | ||||
|     await page.setUserAgent(user_agent); | ||||
|     // https://ourcodeworld.com/articles/read/1106/how-to-solve-puppeteer-timeouterror-navigation-timeout-of-30000-ms-exceeded | ||||
|  | ||||
|     await page.setDefaultNavigationTimeout(0); | ||||
|  | ||||
|     if (proxy_username) { | ||||
|         await page.authenticate({ | ||||
|             username: proxy_username, | ||||
|             password: proxy_password | ||||
|         }); | ||||
|     } | ||||
|  | ||||
|     await page.setViewport({ | ||||
|         width: 1024, | ||||
|         height: 768, | ||||
|         deviceScaleFactor: 1, | ||||
|     }); | ||||
|  | ||||
|     await page.setRequestInterception(true); | ||||
|     if (disk_cache_dir) { | ||||
|         console.log(">>>>>>>>>>>>>>> LOCAL DISK CACHE ENABLED <<<<<<<<<<<<<<<<<<<<<"); | ||||
|     } | ||||
|     const fs = require('fs'); | ||||
|     const crypto = require('crypto'); | ||||
|  | ||||
|     function file_is_expired(file_path) { | ||||
|         if (!fs.existsSync(file_path)) { | ||||
|             return true; | ||||
|         } | ||||
|         var stats = fs.statSync(file_path); | ||||
|         const now_date = new Date(); | ||||
|         const expire_seconds = 300; | ||||
|         if ((now_date / 1000) - (stats.mtime.getTime() / 1000) > expire_seconds) { | ||||
|             console.log("CACHE EXPIRED: " + file_path); | ||||
|             return true; | ||||
|         } | ||||
|         return false; | ||||
|  | ||||
|     } | ||||
|  | ||||
|     page.on('request', async (request) => { | ||||
|         // General blocking of requests that waste traffic | ||||
|         if (block_url_list.some(substring => request.url().toLowerCase().includes(substring))) return request.abort(); | ||||
|  | ||||
|         if (disk_cache_dir) { | ||||
|             const url = request.url(); | ||||
|             const key = crypto.createHash('md5').update(url).digest("hex"); | ||||
|             const dir_path = disk_cache_dir + key.slice(0, 1) + '/' + key.slice(1, 2) + '/' + key.slice(2, 3) + '/'; | ||||
|  | ||||
|             // https://stackoverflow.com/questions/4482686/check-synchronously-if-file-directory-exists-in-node-js | ||||
|  | ||||
|             if (fs.existsSync(dir_path + key)) { | ||||
|                 console.log("* CACHE HIT , using - " + dir_path + key + " - " + url); | ||||
|                 const cached_data = fs.readFileSync(dir_path + key); | ||||
|                 // @todo headers can come from dir_path+key+".meta" json file | ||||
|                 request.respond({ | ||||
|                     status: 200, | ||||
|                     //contentType: 'text/html', //@todo | ||||
|                     body: cached_data | ||||
|                 }); | ||||
|                 return; | ||||
|             } | ||||
|         } | ||||
|         request.continue(); | ||||
|     }); | ||||
|  | ||||
|  | ||||
|     if (disk_cache_dir) { | ||||
|         page.on('response', async (response) => { | ||||
|             const url = response.url(); | ||||
|             // Basic filtering for sane responses | ||||
|             if (response.request().method() != 'GET' || response.request().resourceType() == 'xhr' || response.request().resourceType() == 'document' || response.status() != 200) { | ||||
|                 console.log("Skipping (not useful) - Status:" + response.status() + " Method:" + response.request().method() + " ResourceType:" + response.request().resourceType() + " " + url); | ||||
|                 return; | ||||
|             } | ||||
|             if (no_cache_list.some(substring => url.toLowerCase().includes(substring))) { | ||||
|                 console.log("Skipping (no_cache_list) - " + url); | ||||
|                 return; | ||||
|             } | ||||
|             if (url.toLowerCase().includes('data:')) { | ||||
|                 console.log("Skipping (embedded-data) - " + url); | ||||
|                 return; | ||||
|             } | ||||
|             response.buffer().then(buffer => { | ||||
|                 if (buffer.length > 100) { | ||||
|                     console.log("Cache - Saving " + response.request().method() + " - " + url + " - " + response.request().resourceType()); | ||||
|  | ||||
|                     const key = crypto.createHash('md5').update(url).digest("hex"); | ||||
|                     const dir_path = disk_cache_dir + key.slice(0, 1) + '/' + key.slice(1, 2) + '/' + key.slice(2, 3) + '/'; | ||||
|  | ||||
|                     if (!fs.existsSync(dir_path)) { | ||||
|                         fs.mkdirSync(dir_path, {recursive: true}) | ||||
|                     } | ||||
|  | ||||
|                     if (fs.existsSync(dir_path + key)) { | ||||
|                         if (file_is_expired(dir_path + key)) { | ||||
|                             fs.writeFileSync(dir_path + key, buffer); | ||||
|                         } | ||||
|                     } else { | ||||
|                         fs.writeFileSync(dir_path + key, buffer); | ||||
|                     } | ||||
|                 } | ||||
|             }); | ||||
|         }); | ||||
|     } | ||||
|  | ||||
|     const r = await page.goto(url, { | ||||
|         waitUntil: 'load' | ||||
|     }); | ||||
|  | ||||
|     await page.waitForTimeout(1000); | ||||
|     await page.waitForTimeout(extra_wait_ms); | ||||
|  | ||||
|     if (execute_js) { | ||||
|         await page.evaluate(execute_js); | ||||
|         await page.waitForTimeout(200); | ||||
|     } | ||||
|  | ||||
|     var xpath_data; | ||||
|     var instock_data; | ||||
|     try { | ||||
|         // Not sure the best way here, in the future this should be a new package added to npm then run in browserless | ||||
|         // (Once the old playwright is removed) | ||||
|         xpath_data = await page.evaluate((include_filters) => {%xpath_scrape_code%}, include_filters); | ||||
|         instock_data = await page.evaluate(() => {%instock_scrape_code%}); | ||||
|     } catch (e) { | ||||
|         console.log(e); | ||||
|     } | ||||
|  | ||||
|     // Protocol error (Page.captureScreenshot): Cannot take screenshot with 0 width can come from a proxy auth failure | ||||
|     // Wrap it here (for now) | ||||
|  | ||||
|     var b64s = false; | ||||
|     try { | ||||
|         b64s = await page.screenshot({encoding: "base64", fullPage: true, quality: screenshot_quality, type: 'jpeg'}); | ||||
|     } catch (e) { | ||||
|         console.log(e); | ||||
|     } | ||||
|  | ||||
|     // May fail on very large pages with 'WARNING: tile memory limits exceeded, some content may not draw' | ||||
|     if (!b64s) { | ||||
|         // @todo after text extract, we can place some overlay text with red background to say 'croppped' | ||||
|         console.error('ERROR: content-fetcher page was maybe too large for a screenshot, reverting to viewport only screenshot'); | ||||
|         try { | ||||
|             b64s = await page.screenshot({encoding: "base64", quality: screenshot_quality, type: 'jpeg'}); | ||||
|         } catch (e) { | ||||
|             console.log(e); | ||||
|         } | ||||
|     } | ||||
|  | ||||
|     var html = await page.content(); | ||||
|     return { | ||||
|         data: { | ||||
|             'content': html, | ||||
|             'headers': r.headers(), | ||||
|             'instock_data': instock_data, | ||||
|             'screenshot': b64s, | ||||
|             'status_code': r.status(), | ||||
|             'xpath_data': xpath_data | ||||
|         }, | ||||
|         type: 'application/json', | ||||
|     }; | ||||
| }; | ||||
| @@ -10,6 +10,7 @@ function isItemInStock() { | ||||
|     'brak na stanie', | ||||
|     'brak w magazynie', | ||||
|     'coming soon', | ||||
|     'currently have any tickets for this', | ||||
|     'currently unavailable', | ||||
|     'en rupture de stock', | ||||
|     'item is no longer available', | ||||
| @@ -20,7 +21,9 @@ function isItemInStock() { | ||||
|     'nicht zur verfügung', | ||||
|     'no disponible temporalmente', | ||||
|     'no longer in stock', | ||||
|     'no tickets available', | ||||
|     'not available', | ||||
|     'not currently available', | ||||
|     'not in stock', | ||||
|     'notify me when available', | ||||
|     'não estamos a aceitar encomendas', | ||||
| @@ -30,6 +33,8 @@ function isItemInStock() { | ||||
|     'sold out', | ||||
|     'temporarily out of stock', | ||||
|     'temporarily unavailable', | ||||
|     'tickets unavailable', | ||||
|     'unavailable tickets', | ||||
|     'we do not currently have an estimate of when this product will be back in stock.', | ||||
|     'zur zeit nicht an lager', | ||||
|   ]; | ||||
|   | ||||
| @@ -114,11 +114,11 @@ $(document).ready(function () { | ||||
|             e.preventDefault() | ||||
|         }); | ||||
|  | ||||
|         // When the mouse moves we know which element it should be above | ||||
|         // mousedown will link that to the UI (select the right action, highlight etc) | ||||
|         $('#browsersteps-selector-canvas').bind('mousedown', function (e) { | ||||
|             // https://developer.mozilla.org/en-US/docs/Web/API/MouseEvent | ||||
|             e.preventDefault() | ||||
|             console.log(e); | ||||
|             console.log("current xpath in index is " + current_selected_i); | ||||
|             last_click_xy = {'x': parseInt((1 / x_scale) * e.offsetX), 'y': parseInt((1 / y_scale) * e.offsetY)} | ||||
|             process_selected(current_selected_i); | ||||
|             current_selected_i = false; | ||||
| @@ -132,6 +132,7 @@ $(document).ready(function () { | ||||
|             } | ||||
|         }); | ||||
|  | ||||
|         // Debounce and find the current most 'interesting' element we are hovering above | ||||
|         $('#browsersteps-selector-canvas').bind('mousemove', function (e) { | ||||
|             if (!xpath_data) { | ||||
|                 return; | ||||
| @@ -151,41 +152,40 @@ $(document).ready(function () { | ||||
|             current_selected_i = false; | ||||
|             // Reverse order - the most specific one should be deeper/"laster" | ||||
|             // Basically, find the most 'deepest' | ||||
|             //$('#browsersteps-selector-canvas').css('cursor', 'pointer'); | ||||
|             for (var i = xpath_data['size_pos'].length; i !== 0; i--) { | ||||
|                 // draw all of them? let them choose somehow? | ||||
|                 var sel = xpath_data['size_pos'][i - 1]; | ||||
|             var possible_elements = []; | ||||
|             xpath_data['size_pos'].forEach(function (item, index) { | ||||
|                 // If we are in a bounding-box | ||||
|                 if (e.offsetY > sel.top * y_scale && e.offsetY < sel.top * y_scale + sel.height * y_scale | ||||
|                 if (e.offsetY > item.top * y_scale && e.offsetY < item.top * y_scale + item.height * y_scale | ||||
|                     && | ||||
|                     e.offsetX > sel.left * y_scale && e.offsetX < sel.left * y_scale + sel.width * y_scale | ||||
|                     e.offsetX > item.left * y_scale && e.offsetX < item.left * y_scale + item.width * y_scale | ||||
|  | ||||
|                 ) { | ||||
|                     // Only highlight these interesting types | ||||
|                     if (1) { | ||||
|                         ctx.strokeRect(sel.left * x_scale, sel.top * y_scale, sel.width * x_scale, sel.height * y_scale); | ||||
|                         ctx.fillRect(sel.left * x_scale, sel.top * y_scale, sel.width * x_scale, sel.height * y_scale); | ||||
|                         current_selected_i = i - 1; | ||||
|                         break; | ||||
|  | ||||
|                         // find the smallest one at this x,y | ||||
|                         // does it mean sort the xpath list by size (w*h) i think so! | ||||
|                     } else { | ||||
|  | ||||
|                         if (include_text_elements[0].checked === true) { | ||||
|                             // blue one with background instead? | ||||
|                             ctx.fillStyle = 'rgba(0,0,255, 0.1)'; | ||||
|                             ctx.strokeStyle = 'rgba(0,0,200, 0.7)'; | ||||
|                             $('#browsersteps-selector-canvas').css('cursor', 'grab'); | ||||
|                             ctx.strokeRect(sel.left * x_scale, sel.top * y_scale, sel.width * x_scale, sel.height * y_scale); | ||||
|                             ctx.fillRect(sel.left * x_scale, sel.top * y_scale, sel.width * x_scale, sel.height * y_scale); | ||||
|                             current_selected_i = i - 1; | ||||
|                             break; | ||||
|                         } | ||||
|                     // There could be many elements here, record them all and then we'll find out which is the most 'useful' | ||||
|                     // (input, textarea, button, A etc) | ||||
|                     if (item.width < xpath_data['browser_width']) { | ||||
|                         possible_elements.push(item); | ||||
|                     } | ||||
|                 } | ||||
|             }); | ||||
|  | ||||
|             // Find the best one | ||||
|             if (possible_elements.length) { | ||||
|                 possible_elements.forEach(function (item, index) { | ||||
|                   if (["a", "input", "textarea", "button"].includes(item['tagName'])) { | ||||
|                       current_selected_i = item; | ||||
|                   } | ||||
|                 }); | ||||
|  | ||||
|                 if (!current_selected_i) { | ||||
|                     current_selected_i = possible_elements[0]; | ||||
|                 } | ||||
|  | ||||
|                 sel = xpath_data['size_pos'][current_selected_i]; | ||||
|                 ctx.strokeRect(current_selected_i.left * x_scale, current_selected_i.top * y_scale, current_selected_i.width * x_scale, current_selected_i.height * y_scale); | ||||
|                 ctx.fillRect(current_selected_i.left * x_scale, current_selected_i.top * y_scale, current_selected_i.width * x_scale, current_selected_i.height * y_scale); | ||||
|             } | ||||
|  | ||||
|  | ||||
|         }.debounce(10)); | ||||
|     }); | ||||
|  | ||||
| @@ -195,16 +195,16 @@ $(document).ready(function () { | ||||
|  | ||||
|  | ||||
|     // callback for clicking on an xpath on the canvas | ||||
|     function process_selected(xpath_data_index) { | ||||
|     function process_selected(selected_in_xpath_list) { | ||||
|         found_something = false; | ||||
|         var first_available = $("ul#browser_steps li.empty").first(); | ||||
|  | ||||
|  | ||||
|         if (xpath_data_index !== false) { | ||||
|         if (selected_in_xpath_list !== false) { | ||||
|             // Nothing focused, so fill in a new one | ||||
|             // if inpt type button or <button> | ||||
|             // from the top, find the next not used one and use it | ||||
|             var x = xpath_data['size_pos'][xpath_data_index]; | ||||
|             var x = selected_in_xpath_list; | ||||
|             console.log(x); | ||||
|             if (x && first_available.length) { | ||||
|                 // @todo will it let you click shit that has a layer ontop? probably not. | ||||
| @@ -214,26 +214,18 @@ $(document).ready(function () { | ||||
|                     $('input[placeholder="Value"]', first_available).addClass('ok').click().focus(); | ||||
|                     found_something = true; | ||||
|                 } else { | ||||
|                     if (x['isClickable'] || x['tagName'].startsWith('h') || x['tagName'] === 'a' || x['tagName'] === 'button' || x['tagtype'] === 'submit' || x['tagtype'] === 'checkbox' || x['tagtype'] === 'radio' || x['tagtype'] === 'li') { | ||||
|                     // There's no good way (that I know) to find if this | ||||
|                     // see https://stackoverflow.com/questions/446892/how-to-find-event-listeners-on-a-dom-node-in-javascript-or-in-debugging | ||||
|                     // https://codepen.io/azaslavsky/pen/DEJVWv | ||||
|  | ||||
|                     // So we dont know if its really a clickable element or not :-( | ||||
|                     // Assume it is - then we dont fill the pages with unreliable "Click X,Y" selections | ||||
|                     // If you switch to "Click X,y" after an element here is setup, it will give the last co-ords anyway | ||||
|                     //if (x['isClickable'] || x['tagName'].startsWith('h') || x['tagName'] === 'a' || x['tagName'] === 'button' || x['tagtype'] === 'submit' || x['tagtype'] === 'checkbox' || x['tagtype'] === 'radio' || x['tagtype'] === 'li') { | ||||
|                         $('select', first_available).val('Click element').change(); | ||||
|                         $('input[type=text]', first_available).first().val(x['xpath']); | ||||
|                         found_something = true; | ||||
|                     } | ||||
|                 } | ||||
|  | ||||
|                 first_available.xpath_data_index = xpath_data_index; | ||||
|  | ||||
|                 if (!found_something) { | ||||
|                     if (include_text_elements[0].checked === true) { | ||||
|                         // Suggest that we use as filter? | ||||
|                         // @todo filters should always be in the last steps, nothing non-filter after it | ||||
|                         found_something = true; | ||||
|                         ctx.strokeStyle = 'rgba(0,0,255, 0.9)'; | ||||
|                         ctx.fillStyle = 'rgba(0,0,255, 0.1)'; | ||||
|                         $('select', first_available).val('Extract text and use as filter').change(); | ||||
|                         $('input[type=text]', first_available).first().val(x['xpath']); | ||||
|                         include_text_elements[0].checked = false; | ||||
|                     } | ||||
|                     //} | ||||
|                 } | ||||
|             } | ||||
|         } | ||||
| @@ -248,7 +240,7 @@ $(document).ready(function () { | ||||
|  | ||||
|     function start() { | ||||
|         console.log("Starting browser-steps UI"); | ||||
|         browsersteps_session_id = Date.now(); | ||||
|         browsersteps_session_id = false; | ||||
|         // @todo This setting of the first one should be done at the datalayer but wtforms doesnt wanna play nice | ||||
|         $('#browser_steps >li:first-child').removeClass('empty'); | ||||
|         set_first_gotosite_disabled(); | ||||
| @@ -256,7 +248,7 @@ $(document).ready(function () { | ||||
|         $('.clear,.remove', $('#browser_steps >li:first-child')).hide(); | ||||
|         $.ajax({ | ||||
|             type: "GET", | ||||
|             url: browser_steps_sync_url + "&browsersteps_session_id=" + browsersteps_session_id, | ||||
|             url: browser_steps_start_url, | ||||
|             statusCode: { | ||||
|                 400: function () { | ||||
|                     // More than likely the CSRF token was lost when the server restarted | ||||
| @@ -264,12 +256,12 @@ $(document).ready(function () { | ||||
|                 } | ||||
|             } | ||||
|         }).done(function (data) { | ||||
|             xpath_data = data.xpath_data; | ||||
|             $("#loading-status-text").fadeIn(); | ||||
|             browsersteps_session_id = data.browsersteps_session_id; | ||||
|             // This should trigger 'Goto site' | ||||
|             console.log("Got startup response, requesting Goto-Site (first) step fake click"); | ||||
|             $('#browser_steps >li:first-child .apply').click(); | ||||
|             browserless_seconds_remaining = data.browser_time_remaining; | ||||
|             browserless_seconds_remaining = 500; | ||||
|             set_first_gotosite_disabled(); | ||||
|         }).fail(function (data) { | ||||
|             console.log(data); | ||||
| @@ -430,7 +422,6 @@ $(document).ready(function () { | ||||
|             apply_buttons_disabled = false; | ||||
|             $("#browsersteps-img").css('opacity', 1); | ||||
|             $('ul#browser_steps li .control .apply').css('opacity', 1); | ||||
|             browserless_seconds_remaining = data.browser_time_remaining; | ||||
|             $("#loading-status-text").hide(); | ||||
|             set_first_gotosite_disabled(); | ||||
|         }).fail(function (data) { | ||||
|   | ||||
| @@ -3,7 +3,7 @@ | ||||
|  * Toggles theme between light and dark mode. | ||||
|  */ | ||||
| $(document).ready(function () { | ||||
|   const button = document.getElementsByClassName("toggle-theme")[0]; | ||||
|   const button = document.getElementById("toggle-light-mode"); | ||||
|  | ||||
|   button.onclick = () => { | ||||
|     const htmlElement = document.getElementsByTagName("html"); | ||||
| @@ -21,4 +21,33 @@ $(document).ready(function () { | ||||
|   const setCookieValue = (value) => { | ||||
|     document.cookie = `css_dark_mode=${value};max-age=31536000;path=/` | ||||
|   } | ||||
|  | ||||
|   // Search input box behaviour | ||||
|     const toggle_search = document.getElementById("toggle-search"); | ||||
|   const search_q = document.getElementById("search-q"); | ||||
|   window.addEventListener('keydown', function (e) { | ||||
|  | ||||
|     if (e.altKey == true && e.keyCode == 83) | ||||
|       search_q.classList.toggle('expanded'); | ||||
|       search_q.focus(); | ||||
|   }); | ||||
|  | ||||
|  | ||||
|   search_q.onkeydown = (e) => { | ||||
|     var key = e.keyCode || e.which; | ||||
|     if (key === 13) { | ||||
|       document.searchForm.submit(); | ||||
|     } | ||||
|   }; | ||||
|   toggle_search.onclick = () => { | ||||
|     // Could be that they want to search something once text is in there | ||||
|     if (search_q.value.length) { | ||||
|       document.searchForm.submit(); | ||||
|     } else { | ||||
|       // If not.. | ||||
|       search_q.classList.toggle('expanded'); | ||||
|       search_q.focus(); | ||||
|     } | ||||
|   }; | ||||
|  | ||||
| }); | ||||
|   | ||||
| @@ -61,7 +61,12 @@ $(document).ready(function () { | ||||
|     function bootstrap_visualselector() { | ||||
|         if (1) { | ||||
|             // bootstrap it, this will trigger everything else | ||||
|             $("img#selector-background").bind('load', function () { | ||||
|             $("img#selector-background").on("error", function () { | ||||
|                 $('.fetching-update-notice').html("<strong>Ooops!</strong> The VisualSelector tool needs atleast one fetched page, please unpause the watch and/or wait for the watch to complete fetching and then reload this page."); | ||||
|                 $('.fetching-update-notice').css('color','#bb0000'); | ||||
|                 $('#selector-current-xpath').hide(); | ||||
|                 $('#clear-selector').hide(); | ||||
|             }).bind('load', function () { | ||||
|                 console.log("Loaded background..."); | ||||
|                 c = document.getElementById("selector-canvas"); | ||||
|                 // greyed out fill context | ||||
| @@ -79,10 +84,11 @@ $(document).ready(function () { | ||||
|             }).attr("src", screenshot_url); | ||||
|         } | ||||
|         // Tell visualSelector that the image should update | ||||
|         var s = $("img#selector-background").attr('src')+"?"+ new Date().getTime(); | ||||
|         $("img#selector-background").attr('src',s) | ||||
|         var s = $("img#selector-background").attr('src') + "?" + new Date().getTime(); | ||||
|         $("img#selector-background").attr('src', s) | ||||
|     } | ||||
|  | ||||
|     // This is fired once the img src is loaded in bootstrap_visualselector() | ||||
|     function fetch_data() { | ||||
|         // Image is ready | ||||
|         $('.fetching-update-notice').html("Fetching element data.."); | ||||
| @@ -99,7 +105,8 @@ $(document).ready(function () { | ||||
|             reflow_selector(); | ||||
|             $('.fetching-update-notice').fadeOut(); | ||||
|         }); | ||||
|     }; | ||||
|  | ||||
|     } | ||||
|  | ||||
|  | ||||
|     function set_scale() { | ||||
|   | ||||
| @@ -54,8 +54,47 @@ a.github-link { | ||||
|   } | ||||
| } | ||||
|  | ||||
| button.toggle-theme { | ||||
|   width: 4rem; | ||||
| #toggle-light-mode { | ||||
|   width: 3rem; | ||||
|   .icon-dark { | ||||
|     display: none; | ||||
|   } | ||||
|  | ||||
|   &.dark { | ||||
|     .icon-light { | ||||
|       display: none; | ||||
|     } | ||||
|  | ||||
|     .icon-dark { | ||||
|       display: block; | ||||
|     } | ||||
|   } | ||||
| } | ||||
|  | ||||
| #toggle-search { | ||||
|   width: 2rem; | ||||
| } | ||||
|  | ||||
| #search-q { | ||||
|   opacity: 0; | ||||
|   -webkit-transition: all .9s ease; | ||||
|   -moz-transition: all .9s ease; | ||||
|   transition: all .9s ease; | ||||
|   width: 0; | ||||
|   display: none; | ||||
|   &.expanded { | ||||
|     width: auto; | ||||
|     display: inline-block; | ||||
|  | ||||
|     opacity: 1; | ||||
|   } | ||||
| } | ||||
| #search-result-info { | ||||
|   color: #fff; | ||||
| } | ||||
|  | ||||
| button.toggle-button { | ||||
|   vertical-align: middle; | ||||
|   background: transparent; | ||||
|   border: none; | ||||
|   cursor: pointer; | ||||
| @@ -74,19 +113,7 @@ button.toggle-theme { | ||||
|     display: block; | ||||
|   } | ||||
|  | ||||
|   .icon-dark { | ||||
|     display: none; | ||||
|   } | ||||
|  | ||||
|   &.dark { | ||||
|     .icon-light { | ||||
|       display: none; | ||||
|     } | ||||
|  | ||||
|     .icon-dark { | ||||
|       display: block; | ||||
|     } | ||||
|   } | ||||
| } | ||||
|  | ||||
| .pure-menu-horizontal { | ||||
|   | ||||
| @@ -331,23 +331,44 @@ a.github-link { | ||||
|   a.github-link:hover { | ||||
|     color: var(--color-icon-github-hover); } | ||||
|  | ||||
| button.toggle-theme { | ||||
|   width: 4rem; | ||||
| #toggle-light-mode { | ||||
|   width: 3rem; } | ||||
|   #toggle-light-mode .icon-dark { | ||||
|     display: none; } | ||||
|   #toggle-light-mode.dark .icon-light { | ||||
|     display: none; } | ||||
|   #toggle-light-mode.dark .icon-dark { | ||||
|     display: block; } | ||||
|  | ||||
| #toggle-search { | ||||
|   width: 2rem; } | ||||
|  | ||||
| #search-q { | ||||
|   opacity: 0; | ||||
|   -webkit-transition: all .9s ease; | ||||
|   -moz-transition: all .9s ease; | ||||
|   transition: all .9s ease; | ||||
|   width: 0; | ||||
|   display: none; } | ||||
|   #search-q.expanded { | ||||
|     width: auto; | ||||
|     display: inline-block; | ||||
|     opacity: 1; } | ||||
|  | ||||
| #search-result-info { | ||||
|   color: #fff; } | ||||
|  | ||||
| button.toggle-button { | ||||
|   vertical-align: middle; | ||||
|   background: transparent; | ||||
|   border: none; | ||||
|   cursor: pointer; | ||||
|   color: var(--color-icon-github); } | ||||
|   button.toggle-theme:hover { | ||||
|   button.toggle-button:hover { | ||||
|     color: var(--color-icon-github-hover); } | ||||
|   button.toggle-theme svg { | ||||
|   button.toggle-button svg { | ||||
|     fill: currentColor; } | ||||
|   button.toggle-theme .icon-light { | ||||
|     display: block; } | ||||
|   button.toggle-theme .icon-dark { | ||||
|     display: none; } | ||||
|   button.toggle-theme.dark .icon-light { | ||||
|     display: none; } | ||||
|   button.toggle-theme.dark .icon-dark { | ||||
|   button.toggle-button .icon-light { | ||||
|     display: block; } | ||||
|  | ||||
| .pure-menu-horizontal { | ||||
|   | ||||
| @@ -3,7 +3,7 @@ from flask import ( | ||||
| ) | ||||
|  | ||||
| from . model import App, Watch | ||||
| from copy import deepcopy | ||||
| from copy import deepcopy, copy | ||||
| from os import path, unlink | ||||
| from threading import Lock | ||||
| import json | ||||
| @@ -204,15 +204,16 @@ class ChangeDetectionStore: | ||||
|                 # GitHub #30 also delete history records | ||||
|                 for uuid in self.data['watching']: | ||||
|                     path = pathlib.Path(os.path.join(self.datastore_path, uuid)) | ||||
|                     shutil.rmtree(path) | ||||
|                     self.needs_write_urgent = True | ||||
|                     if os.path.exists(path): | ||||
|                         shutil.rmtree(path) | ||||
|  | ||||
|             else: | ||||
|                 path = pathlib.Path(os.path.join(self.datastore_path, uuid)) | ||||
|                 shutil.rmtree(path) | ||||
|                 if os.path.exists(path): | ||||
|                     shutil.rmtree(path) | ||||
|                 del self.data['watching'][uuid] | ||||
|  | ||||
|             self.needs_write_urgent = True | ||||
|         self.needs_write_urgent = True | ||||
|  | ||||
|     # Clone a watch by UUID | ||||
|     def clone(self, uuid): | ||||
| @@ -366,19 +367,21 @@ class ChangeDetectionStore: | ||||
|     def save_error_text(self, watch_uuid, contents): | ||||
|         if not self.data['watching'].get(watch_uuid): | ||||
|             return | ||||
|         target_path = os.path.join(self.datastore_path, watch_uuid, "last-error.txt") | ||||
|  | ||||
|         self.data['watching'][watch_uuid].ensure_data_dir_exists() | ||||
|         target_path = os.path.join(self.datastore_path, watch_uuid, "last-error.txt") | ||||
|         with open(target_path, 'w') as f: | ||||
|             f.write(contents) | ||||
|  | ||||
|     def save_xpath_data(self, watch_uuid, data, as_error=False): | ||||
|  | ||||
|         if not self.data['watching'].get(watch_uuid): | ||||
|             return | ||||
|         if as_error: | ||||
|             target_path = os.path.join(self.datastore_path, watch_uuid, "elements-error.json") | ||||
|         else: | ||||
|             target_path = os.path.join(self.datastore_path, watch_uuid, "elements.json") | ||||
|  | ||||
|         self.data['watching'][watch_uuid].ensure_data_dir_exists() | ||||
|         with open(target_path, 'w') as f: | ||||
|             f.write(json.dumps(data)) | ||||
|             f.close() | ||||
| @@ -472,8 +475,6 @@ class ChangeDetectionStore: | ||||
|         return proxy_list if len(proxy_list) else None | ||||
|  | ||||
|  | ||||
|  | ||||
|  | ||||
|     def get_preferred_proxy_for_watch(self, uuid): | ||||
|         """ | ||||
|         Returns the preferred proxy by ID key | ||||
| @@ -505,6 +506,25 @@ class ChangeDetectionStore: | ||||
|  | ||||
|         return None | ||||
|  | ||||
|     @property | ||||
|     def has_extra_headers_file(self): | ||||
|         filepath = os.path.join(self.datastore_path, 'headers.txt') | ||||
|         return os.path.isfile(filepath) | ||||
|  | ||||
|     def get_all_headers(self): | ||||
|         from .model.App import parse_headers_from_text_file | ||||
|         headers = copy(self.data['settings'].get('headers', {})) | ||||
|  | ||||
|         filepath = os.path.join(self.datastore_path, 'headers.txt') | ||||
|         try: | ||||
|             if os.path.isfile(filepath): | ||||
|                 headers.update(parse_headers_from_text_file(filepath)) | ||||
|         except Exception as e: | ||||
|             print(f"ERROR reading headers.txt at {filepath}", str(e)) | ||||
|  | ||||
|         return headers | ||||
|  | ||||
|  | ||||
|     # Run all updates | ||||
|     # IMPORTANT - Each update could be run even when they have a new install and the schema is correct | ||||
|     #             So therefor - each `update_n` should be very careful about checking if it needs to actually run | ||||
|   | ||||
| @@ -115,7 +115,7 @@ | ||||
|                                     URLs generated by changedetection.io (such as <code>{{ '{{diff_url}}' }}</code>) require the <code>BASE_URL</code> environment variable set.<br> | ||||
|                                     Your <code>BASE_URL</code> var is currently "{{settings_application['current_base_url']}}" | ||||
| 									<br> | ||||
| 									Warning: Contents of <code>{{ '{{diff}}' }}</code>, <code>{{ '{{diff_removed}}' }}</code>, and <code>{{ '{{diff_added}}' }}</code> depend on how the difference algorithm perceives the change. For example, an addition or removal could be perceived as a change in some cases. <a target="_new" href="https://github.com/dgtlmoon/changedetection.io/wiki/Using-the-%7B%7Bdiff%7D%7D,-%7B%7Bdiff_added%7D%7D,-and-%7B%7Bdiff_removal%7D%7D-notification-tokens">More Here</a> <br> | ||||
| 									Warning: Contents of <code>{{ '{{diff}}' }}</code>, <code>{{ '{{diff_removed}}' }}</code>, and <code>{{ '{{diff_added}}' }}</code> depend on how the difference algorithm perceives the change. For example, an addition or removal could be perceived as a change in some cases. <a target="_new" href="https://github.com/dgtlmoon/changedetection.io/wiki/Using-the-%7B%7Bdiff%7D%7D,-%7B%7Bdiff_added%7D%7D,-and-%7B%7Bdiff_removed%7D%7D-notification-tokens">More Here</a> <br> | ||||
|                                 </div> | ||||
|                             </div> | ||||
|                         </div> | ||||
|   | ||||
| @@ -82,11 +82,21 @@ | ||||
|               <a href="{{url_for('logout')}}" class="pure-menu-link">LOG OUT</a> | ||||
|             </li> | ||||
|           {% endif %} | ||||
|           <li class="pure-menu-item pure-form" id="search-menu-item"> | ||||
|             <!-- We use GET here so it offers people a chance to set bookmarks etc --> | ||||
|             <form name="searchForm" action="" method="GET"> | ||||
|               <input id="search-q" class="" name="q" placeholder="URL or Title {% if active_tag %}in '{{ active_tag }}'{% endif %}" required="" type="text" value=""> | ||||
|               <input name="tag" type="hidden" value="{% if active_tag %}{{active_tag}}{% endif %}"> | ||||
|               <button class="toggle-button " id="toggle-search" type="button" title="Search, or Use Alt+S Key" > | ||||
|                 {% include "svgs/search-icon.svg" %} | ||||
|               </button> | ||||
|             </form> | ||||
|           </li> | ||||
|           <li class="pure-menu-item"> | ||||
|             {% if dark_mode %} | ||||
|             {% set darkClass = 'dark' %} | ||||
|             {% endif %} | ||||
|             <button class="toggle-theme {{darkClass}}" type="button" title="Toggle Light/Dark Mode"> | ||||
|             <button class="toggle-button {{darkClass}}"  id ="toggle-light-mode" type="button" title="Toggle Light/Dark Mode"> | ||||
|               <span class="visually-hidden">Toggle light/dark mode</span> | ||||
|               <span class="icon-light"> | ||||
|                 {% include "svgs/light-mode-toggle-icon.svg" %} | ||||
| @@ -106,7 +116,7 @@ | ||||
|     </div> | ||||
|     {% if hosted_sticky %} | ||||
|       <div class="sticky-tab" id="hosted-sticky"> | ||||
|         <a href="https://lemonade.changedetection.io/start?ref={{guid}}">Let us host your instance!</a> | ||||
|         <a href="https://changedetection.io/?ref={{guid}}">Let us host your instance!</a> | ||||
|       </div> | ||||
|     {% endif %} | ||||
|     {% if left_sticky %} | ||||
|   | ||||
| @@ -14,7 +14,9 @@ | ||||
| {% endif %} | ||||
|  | ||||
|     const browser_steps_config=JSON.parse('{{ browser_steps_config|tojson }}'); | ||||
|     const browser_steps_start_url="{{url_for('browser_steps.browsersteps_start_session', uuid=uuid)}}"; | ||||
|     const browser_steps_sync_url="{{url_for('browser_steps.browsersteps_ui_update', uuid=uuid)}}"; | ||||
|  | ||||
| </script> | ||||
|  | ||||
| <script src="{{url_for('static_content', group='js', filename='watch-settings.js')}}" defer></script> | ||||
| @@ -150,6 +152,17 @@ | ||||
| {{ render_field(form.headers, rows=5, placeholder="Example | ||||
| Cookie: foobar | ||||
| User-Agent: wonderbra 1.0") }} | ||||
|  | ||||
|                         <div class="pure-form-message-inline"> | ||||
|                             {% if has_extra_headers_file %} | ||||
|                                 <strong>Alert! Extra headers file found and will be added to this watch!</strong> | ||||
|                             {% else %} | ||||
|                                 Headers can be also read from a file in your data-directory <a href="https://github.com/dgtlmoon/changedetection.io/wiki/Adding-headers-from-an-external-file">Read more here</a> | ||||
|                             {% endif %} | ||||
|                             <br> | ||||
|                             (Not supported by Selenium browser) | ||||
|                         </div> | ||||
|  | ||||
|                     </div> | ||||
|                     <div class="pure-control-group" id="request-body"> | ||||
|                                         {{ render_field(form.body, rows=5, placeholder="Example | ||||
| @@ -186,7 +199,8 @@ User-Agent: wonderbra 1.0") }} | ||||
|                                     <span class="loader" > | ||||
|                                         <span id="browsersteps-click-start"> | ||||
|                                             <h2 >Click here to Start</h2> | ||||
|                                             Please allow 10-15 seconds for the browser to connect. | ||||
|                                             <svg style="height: 3.5rem;" version="1.1" viewBox="0 0 32 32"  xml:space="preserve" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink"><g id="Layer_1"/><g id="play_x5F_alt"><path d="M16,0C7.164,0,0,7.164,0,16s7.164,16,16,16s16-7.164,16-16S24.836,0,16,0z M10,24V8l16.008,8L10,24z" style="fill: var(--color-grey-400);"/></g></svg><br> | ||||
|                                             Please allow 10-15 seconds for the browser to connect.<br> | ||||
|                                         </span> | ||||
|                                         <div class="spinner"  style="display: none;"></div> | ||||
|                                     </span> | ||||
|   | ||||
| @@ -70,6 +70,10 @@ | ||||
|                             <a href="https://github.com/dgtlmoon/changedetection.io/wiki/Configurable-BASE_URL-setting">read more here</a>. | ||||
|                         </span> | ||||
|                     </div> | ||||
|                     <div class="pure-control-group"> | ||||
|                         {{ render_field(form.application.form.pager_size) }} | ||||
|                         <span class="pure-form-message-inline">Number of items per page in the watch overview list, 0 to disable.</span> | ||||
|                     </div> | ||||
|  | ||||
|                     <div class="pure-control-group"> | ||||
|                         {{ render_checkbox_field(form.application.form.extract_title_as_title) }} | ||||
|   | ||||
							
								
								
									
										1
									
								
								changedetectionio/templates/svgs/search-icon.svg
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1
									
								
								changedetectionio/templates/svgs/search-icon.svg
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1 @@ | ||||
| <?xml version="1.0" encoding="utf-8"?><svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 122.879 119.799" enable-background="new 0 0 122.879 119.799" xml:space="preserve"><g><path d="M49.988,0h0.016v0.007C63.803,0.011,76.298,5.608,85.34,14.652c9.027,9.031,14.619,21.515,14.628,35.303h0.007v0.033v0.04 h-0.007c-0.005,5.557-0.917,10.905-2.594,15.892c-0.281,0.837-0.575,1.641-0.877,2.409v0.007c-1.446,3.66-3.315,7.12-5.547,10.307 l29.082,26.139l0.018,0.016l0.157,0.146l0.011,0.011c1.642,1.563,2.536,3.656,2.649,5.78c0.11,2.1-0.543,4.248-1.979,5.971 l-0.011,0.016l-0.175,0.203l-0.035,0.035l-0.146,0.16l-0.016,0.021c-1.565,1.642-3.654,2.534-5.78,2.646 c-2.097,0.111-4.247-0.54-5.971-1.978l-0.015-0.011l-0.204-0.175l-0.029-0.024L78.761,90.865c-0.88,0.62-1.778,1.209-2.687,1.765 c-1.233,0.755-2.51,1.466-3.813,2.115c-6.699,3.342-14.269,5.222-22.272,5.222v0.007h-0.016v-0.007 c-13.799-0.004-26.296-5.601-35.338-14.645C5.605,76.291,0.016,63.805,0.007,50.021H0v-0.033v-0.016h0.007 c0.004-13.799,5.601-26.296,14.645-35.338C23.683,5.608,36.167,0.016,49.955,0.007V0H49.988L49.988,0z M50.004,11.21v0.007h-0.016 h-0.033V11.21c-10.686,0.007-20.372,4.35-27.384,11.359C15.56,29.578,11.213,39.274,11.21,49.973h0.007v0.016v0.033H11.21 c0.007,10.686,4.347,20.367,11.359,27.381c7.009,7.012,16.705,11.359,27.403,11.361v-0.007h0.016h0.033v0.007 c10.686-0.007,20.368-4.348,27.382-11.359c7.011-7.009,11.358-16.702,11.36-27.4h-0.006v-0.016v-0.033h0.006 c-0.006-10.686-4.35-20.372-11.358-27.384C70.396,15.56,60.703,11.213,50.004,11.21L50.004,11.21z"/></g></svg> | ||||
| After Width: | Height: | Size: 1.6 KiB | 
| @@ -44,6 +44,7 @@ | ||||
|     {% if watches|length >= pagination.per_page %} | ||||
|         {{ pagination.info }} | ||||
|     {% endif %} | ||||
|     {% if search_q %}<div id="search-result-info">Searching "<strong><i>{{search_q}}</i></strong>"</div>{% endif %} | ||||
|     <div> | ||||
|         <a href="{{url_for('index')}}" class="pure-button button-tag {{'active' if not active_tag }}">All</a> | ||||
|         {% for tag in tags %} | ||||
| @@ -73,8 +74,12 @@ | ||||
|             </tr> | ||||
|             </thead> | ||||
|             <tbody> | ||||
|  | ||||
|             {% for watch in (watches|sort(attribute=sort_attribute, reverse=sort_order == 'asc'))[pagination.skip:pagination.skip+pagination.per_page] %} | ||||
|             {% if not watches|length %} | ||||
|             <tr> | ||||
|                 <td colspan="6">No website watches configured, please add a URL in the box above, or <a href="{{ url_for('import_page')}}" >import a list</a>.</td> | ||||
|             </tr> | ||||
|             {% endif %} | ||||
|             {% for watch in (watches|sort(attribute=sort_attribute, reverse=sort_order == 'asc'))|pagination_slice(skip=pagination.skip) %} | ||||
|             <tr id="{{ watch.uuid }}" | ||||
|                 class="{{ loop.cycle('pure-table-odd', 'pure-table-even') }} processor-{{ watch['processor'] }} | ||||
|                 {% if watch.last_error is defined and watch.last_error != False %}error{% endif %} | ||||
|   | ||||
| @@ -14,13 +14,16 @@ global app | ||||
|  | ||||
| def cleanup(datastore_path): | ||||
|     # Unlink test output files | ||||
|     files = ['output.txt', | ||||
|              'url-watches.json', | ||||
|              'secret.txt', | ||||
|              'notification.txt', | ||||
|              'count.txt', | ||||
|              'endpoint-content.txt' | ||||
|                  ] | ||||
|     files = [ | ||||
|         'count.txt', | ||||
|         'endpoint-content.txt' | ||||
|         'headers.txt', | ||||
|         'headers-testtag.txt', | ||||
|         'notification.txt', | ||||
|         'secret.txt', | ||||
|         'url-watches.json', | ||||
|         'output.txt', | ||||
|     ] | ||||
|     for file in files: | ||||
|         try: | ||||
|             os.unlink("{}/{}".format(datastore_path, file)) | ||||
|   | ||||
| @@ -3,7 +3,7 @@ | ||||
|  | ||||
| import time | ||||
| from flask import url_for, escape | ||||
| from . util import live_server_setup | ||||
| from . util import live_server_setup, wait_for_all_checks | ||||
| import pytest | ||||
| jq_support = True | ||||
|  | ||||
| @@ -64,6 +64,24 @@ and it can also be repeated | ||||
|         with pytest.raises(html_tools.JSONNotFound) as e_info: | ||||
|             html_tools.extract_json_as_string('COMPLETE GIBBERISH, NO JSON!', "jq:.id") | ||||
|  | ||||
|  | ||||
| def test_unittest_inline_extract_body(): | ||||
|     content = """ | ||||
|     <html> | ||||
|         <head></head> | ||||
|         <body> | ||||
|             <pre style="word-wrap: break-word; white-space: pre-wrap;"> | ||||
|                 {"testKey": 42} | ||||
|             </pre> | ||||
|         </body> | ||||
|     </html> | ||||
|     """ | ||||
|     from .. import html_tools | ||||
|  | ||||
|     # See that we can find the second <script> one, which is not broken, and matches our filter | ||||
|     text = html_tools.extract_json_as_string(content, "json:$.testKey") | ||||
|     assert text == '42' | ||||
|  | ||||
| def set_original_ext_response(): | ||||
|     data = """ | ||||
|         [ | ||||
| @@ -436,6 +454,37 @@ def test_ignore_json_order(client, live_server): | ||||
|     res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True) | ||||
|     assert b'Deleted' in res.data | ||||
|  | ||||
| def test_correct_header_detect(client, live_server): | ||||
|     # Like in https://github.com/dgtlmoon/changedetection.io/pull/1593 | ||||
|     # Specify extra html that JSON is sometimes wrapped in - when using Browserless/Puppeteer etc | ||||
|     with open("test-datastore/endpoint-content.txt", "w") as f: | ||||
|         f.write('<html><body>{"hello" : 123, "world": 123}') | ||||
|  | ||||
|     # Add our URL to the import page | ||||
|     # Check weird casing is cleaned up and detected also | ||||
|     test_url = url_for('test_endpoint', content_type="aPPlication/JSon", uppercase_headers=True, _external=True) | ||||
|     res = client.post( | ||||
|         url_for("import_page"), | ||||
|         data={"urls": test_url}, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b"1 Imported" in res.data | ||||
|     wait_for_all_checks(client) | ||||
|     res = client.get(url_for("index")) | ||||
|  | ||||
|     # Fixed in #1593 | ||||
|     assert b'No parsable JSON found in this document' not in res.data | ||||
|  | ||||
|     res = client.get( | ||||
|         url_for("preview_page", uuid="first"), | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b'"world":' in res.data | ||||
|     assert res.data.count(b'{') >= 2 | ||||
|  | ||||
|     res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True) | ||||
|     assert b'Deleted' in res.data | ||||
|  | ||||
| def test_check_jsonpath_ext_filter(client, live_server): | ||||
|     check_json_ext_filter('json:$[?(@.status==Sold)]', client, live_server) | ||||
|  | ||||
|   | ||||
| @@ -1,7 +1,8 @@ | ||||
| import json | ||||
| import os | ||||
| import time | ||||
| from flask import url_for | ||||
| from . util import set_original_response, set_modified_response, live_server_setup | ||||
| from . util import set_original_response, set_modified_response, live_server_setup, wait_for_all_checks, extract_UUID_from_client | ||||
|  | ||||
| def test_setup(live_server): | ||||
|     live_server_setup(live_server) | ||||
| @@ -9,8 +10,12 @@ def test_setup(live_server): | ||||
| # Hard to just add more live server URLs when one test is already running (I think) | ||||
| # So we add our test here (was in a different file) | ||||
| def test_headers_in_request(client, live_server): | ||||
|     #live_server_setup(live_server) | ||||
|     # Add our URL to the import page | ||||
|     test_url = url_for('test_headers', _external=True) | ||||
|     if os.getenv('PLAYWRIGHT_DRIVER_URL'): | ||||
|         # Because its no longer calling back to localhost but from browserless, set in test-only.yml | ||||
|         test_url = test_url.replace('localhost', 'changedet') | ||||
|  | ||||
|     # Add the test URL twice, we will check | ||||
|     res = client.post( | ||||
| @@ -29,7 +34,7 @@ def test_headers_in_request(client, live_server): | ||||
|     ) | ||||
|     assert b"1 Imported" in res.data | ||||
|  | ||||
|     time.sleep(3) | ||||
|     wait_for_all_checks(client) | ||||
|     cookie_header = '_ga=GA1.2.1022228332; cookie-preferences=analytics:accepted;' | ||||
|  | ||||
|  | ||||
| @@ -39,7 +44,7 @@ def test_headers_in_request(client, live_server): | ||||
|         data={ | ||||
|               "url": test_url, | ||||
|               "tag": "", | ||||
|               "fetch_backend": "html_requests", | ||||
|               "fetch_backend": 'html_webdriver' if os.getenv('PLAYWRIGHT_DRIVER_URL') else 'html_requests', | ||||
|               "headers": "xxx:ooo\ncool:yeah\r\ncookie:"+cookie_header}, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
| @@ -47,7 +52,7 @@ def test_headers_in_request(client, live_server): | ||||
|  | ||||
|  | ||||
|     # Give the thread time to pick up the first version | ||||
|     time.sleep(5) | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     # The service should echo back the request headers | ||||
|     res = client.get( | ||||
| @@ -63,7 +68,7 @@ def test_headers_in_request(client, live_server): | ||||
|     from html import escape | ||||
|     assert escape(cookie_header).encode('utf-8') in res.data | ||||
|  | ||||
|     time.sleep(5) | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     # Re #137 -  Examine the JSON index file, it should have only one set of headers entered | ||||
|     watches_with_headers = 0 | ||||
| @@ -79,6 +84,9 @@ def test_headers_in_request(client, live_server): | ||||
| def test_body_in_request(client, live_server): | ||||
|     # Add our URL to the import page | ||||
|     test_url = url_for('test_body', _external=True) | ||||
|     if os.getenv('PLAYWRIGHT_DRIVER_URL'): | ||||
|         # Because its no longer calling back to localhost but from browserless, set in test-only.yml | ||||
|         test_url = test_url.replace('localhost', 'cdio') | ||||
|  | ||||
|     res = client.post( | ||||
|         url_for("import_page"), | ||||
| @@ -167,6 +175,9 @@ def test_body_in_request(client, live_server): | ||||
| def test_method_in_request(client, live_server): | ||||
|     # Add our URL to the import page | ||||
|     test_url = url_for('test_method', _external=True) | ||||
|     if os.getenv('PLAYWRIGHT_DRIVER_URL'): | ||||
|         # Because its no longer calling back to localhost but from browserless, set in test-only.yml | ||||
|         test_url = test_url.replace('localhost', 'cdio') | ||||
|  | ||||
|     # Add the test URL twice, we will check | ||||
|     res = client.post( | ||||
| @@ -234,3 +245,76 @@ def test_method_in_request(client, live_server): | ||||
|     # Should be only one with method set to PATCH | ||||
|     assert watches_with_method == 1 | ||||
|  | ||||
|     res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True) | ||||
|     assert b'Deleted' in res.data | ||||
|  | ||||
| def test_headers_textfile_in_request(client, live_server): | ||||
|     #live_server_setup(live_server) | ||||
|     # Add our URL to the import page | ||||
|     test_url = url_for('test_headers', _external=True) | ||||
|     if os.getenv('PLAYWRIGHT_DRIVER_URL'): | ||||
|         # Because its no longer calling back to localhost but from browserless, set in test-only.yml | ||||
|         test_url = test_url.replace('localhost', 'cdio') | ||||
|  | ||||
|     print ("TEST URL IS ",test_url) | ||||
|     # Add the test URL twice, we will check | ||||
|     res = client.post( | ||||
|         url_for("import_page"), | ||||
|         data={"urls": test_url}, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b"1 Imported" in res.data | ||||
|  | ||||
|     time.sleep(1) | ||||
|  | ||||
|  | ||||
|     # Add some headers to a request | ||||
|     res = client.post( | ||||
|         url_for("edit_page", uuid="first"), | ||||
|         data={ | ||||
|               "url": test_url, | ||||
|               "tag": "testtag", | ||||
|               "fetch_backend": 'html_webdriver' if os.getenv('PLAYWRIGHT_DRIVER_URL') else 'html_requests', | ||||
|               "headers": "xxx:ooo\ncool:yeah\r\n"}, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b"Updated watch." in res.data | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     with open('test-datastore/headers-testtag.txt', 'w') as f: | ||||
|         f.write("tag-header: test") | ||||
|  | ||||
|     with open('test-datastore/headers.txt', 'w') as f: | ||||
|         f.write("global-header: nice\r\nnext-global-header: nice") | ||||
|  | ||||
|     with open('test-datastore/'+extract_UUID_from_client(client)+'/headers.txt', 'w') as f: | ||||
|         f.write("watch-header: nice") | ||||
|  | ||||
|     client.get(url_for("form_watch_checknow"), follow_redirects=True) | ||||
|  | ||||
|     # Give the thread time to pick it up | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     res = client.get(url_for("edit_page", uuid="first")) | ||||
|     assert b"Extra headers file found and will be added to this watch" in res.data | ||||
|  | ||||
|     # Not needed anymore | ||||
|     os.unlink('test-datastore/headers.txt') | ||||
|     os.unlink('test-datastore/headers-testtag.txt') | ||||
|     os.unlink('test-datastore/'+extract_UUID_from_client(client)+'/headers.txt') | ||||
|     # The service should echo back the request verb | ||||
|     res = client.get( | ||||
|         url_for("preview_page", uuid="first"), | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|  | ||||
|     assert b"Global-Header:nice" in res.data | ||||
|     assert b"Next-Global-Header:nice" in res.data | ||||
|     assert b"Xxx:ooo" in res.data | ||||
|     assert b"Watch-Header:nice" in res.data | ||||
|     assert b"Tag-Header:test" in res.data | ||||
|  | ||||
|  | ||||
|     #unlink headers.txt on start/stop | ||||
|     res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True) | ||||
|     assert b'Deleted' in res.data | ||||
| @@ -119,16 +119,26 @@ def live_server_setup(live_server): | ||||
|         status_code = request.args.get('status_code') | ||||
|         content = request.args.get('content') or None | ||||
|  | ||||
|         # Used to just try to break the header detection | ||||
|         uppercase_headers = request.args.get('uppercase_headers') | ||||
|  | ||||
|         try: | ||||
|             if content is not None: | ||||
|                 resp = make_response(content, status_code) | ||||
|                 resp.headers['Content-Type'] = ctype if ctype else 'text/html' | ||||
|                 if uppercase_headers: | ||||
|                     ctype=ctype.upper() | ||||
|                     resp.headers['CONTENT-TYPE'] = ctype if ctype else 'text/html' | ||||
|                 else: | ||||
|                     resp.headers['Content-Type'] = ctype if ctype else 'text/html' | ||||
|                 return resp | ||||
|  | ||||
|             # Tried using a global var here but didn't seem to work, so reading from a file instead. | ||||
|             with open("test-datastore/endpoint-content.txt", "r") as f: | ||||
|                 resp = make_response(f.read(), status_code) | ||||
|                 resp.headers['Content-Type'] = ctype if ctype else 'text/html' | ||||
|                 if uppercase_headers: | ||||
|                     resp.headers['CONTENT-TYPE'] = ctype if ctype else 'text/html' | ||||
|                 else: | ||||
|                     resp.headers['Content-Type'] = ctype if ctype else 'text/html' | ||||
|                 return resp | ||||
|         except FileNotFoundError: | ||||
|             return make_response('', status_code) | ||||
|   | ||||
| @@ -5,7 +5,7 @@ import time | ||||
|  | ||||
| from changedetectionio import content_fetcher | ||||
| from .processors.text_json_diff import FilterNotFoundInResponse | ||||
|  | ||||
| from .processors.restock_diff import UnableToExtractRestockData | ||||
|  | ||||
| # A single update worker | ||||
| # | ||||
| @@ -238,7 +238,7 @@ class update_worker(threading.Thread): | ||||
|                         if not self.datastore.data['watching'].get(uuid): | ||||
|                             continue | ||||
|  | ||||
|                         err_text = "Warning, no filters were found, no change detection ran." | ||||
|                         err_text = "Warning, no filters were found, no change detection ran - Did the page change layout? update your Visual Filter if necessary." | ||||
|                         self.datastore.update_watch(uuid=uuid, update_obj={'last_error': err_text}) | ||||
|  | ||||
|                         # Only when enabled, send the notification | ||||
| @@ -262,6 +262,7 @@ class update_worker(threading.Thread): | ||||
|                         # Yes fine, so nothing todo, don't continue to process. | ||||
|                         process_changedetection_results = False | ||||
|                         changed_detected = False | ||||
|                         self.datastore.update_watch(uuid=uuid, update_obj={'last_error': False}) | ||||
|  | ||||
|                     except content_fetcher.BrowserStepsStepTimout as e: | ||||
|  | ||||
| @@ -318,6 +319,11 @@ class update_worker(threading.Thread): | ||||
|                                                                            'last_check_status': e.status_code, | ||||
|                                                                            'has_ldjson_price_data': None}) | ||||
|                         process_changedetection_results = False | ||||
|                     except UnableToExtractRestockData as e: | ||||
|                         # Usually when fetcher.instock_data returns empty | ||||
|                         self.app.logger.error("Exception reached processing watch UUID: %s - %s", uuid, str(e)) | ||||
|                         self.datastore.update_watch(uuid=uuid, update_obj={'last_error': f"Unable to extract restock data for this page unfortunately. (Got code {e.status_code} from server)"}) | ||||
|                         process_changedetection_results = False | ||||
|                     except Exception as e: | ||||
|                         self.app.logger.error("Exception reached processing watch UUID: %s - %s", uuid, str(e)) | ||||
|                         self.datastore.update_watch(uuid=uuid, update_obj={'last_error': str(e)}) | ||||
|   | ||||
		Reference in New Issue
	
	Block a user