From 73f3beda00a4a39756d9e57a725aee727f006fcb Mon Sep 17 00:00:00 2001 From: dgtlmoon Date: Tue, 3 Jun 2025 10:17:19 +0200 Subject: [PATCH] Realtime UI - Socketio tweaks and refactor (#3220) --- .dockerignore | 32 + .../test-stack-reusable-workflow.yml | 8 +- changedetectionio/__init__.py | 143 ++-- changedetectionio/api/Watch.py | 7 +- changedetectionio/async_update_worker.py | 449 +++++++++++++ .../blueprint/browser_steps/__init__.py | 76 ++- .../blueprint/browser_steps/browser_steps.py | 146 +++-- .../blueprint/browser_steps/nonContext.py | 17 - .../blueprint/imports/__init__.py | 7 +- .../blueprint/price_data_follower/__init__.py | 3 +- .../blueprint/settings/__init__.py | 25 + .../settings/templates/settings.html | 10 +- changedetectionio/blueprint/ui/__init__.py | 19 +- changedetectionio/blueprint/ui/ajax.py | 35 - changedetectionio/blueprint/ui/edit.py | 3 +- changedetectionio/blueprint/ui/views.py | 3 +- .../blueprint/watchlist/__init__.py | 1 - .../watchlist/templates/watch-overview.html | 205 +++--- changedetectionio/content_fetchers/base.py | 14 +- .../content_fetchers/playwright.py | 112 ++-- .../content_fetchers/puppeteer.py | 10 +- .../content_fetchers/requests.py | 36 +- .../content_fetchers/webdriver_selenium.py | 125 ++-- changedetectionio/custom_queue.py | 448 +++++++++++++ changedetectionio/flask_app.py | 171 ++++- changedetectionio/forms.py | 7 + changedetectionio/model/App.py | 1 + changedetectionio/notification_service.py | 246 +++++++ changedetectionio/processors/__init__.py | 23 +- changedetectionio/realtime/README.md | 124 ++++ changedetectionio/realtime/events.py | 58 ++ changedetectionio/realtime/socket_server.py | 267 +++++--- changedetectionio/static/js/realtime.js | 40 +- changedetectionio/static/js/socket.io.min.js | 8 +- .../styles/scss/parts/_watch_table.scss | 22 +- .../static/styles/scss/styles.scss | 3 + changedetectionio/static/styles/styles.css | 20 +- changedetectionio/store.py | 8 +- changedetectionio/templates/base.html | 6 +- changedetectionio/tests/conftest.py | 43 ++ .../test_custom_browser_url.py | 4 +- .../tests/fetchers/test_content.py | 2 +- .../fetchers/test_custom_js_before_content.py | 2 +- .../tests/proxy_list/test_multiple_proxy.py | 2 +- .../tests/proxy_list/test_noproxy.py | 2 +- .../tests/proxy_list/test_proxy.py | 2 +- .../tests/proxy_list/test_proxy_noconnect.py | 2 +- .../proxy_list/test_select_custom_proxy.py | 2 +- .../tests/proxy_socks5/test_socks5_proxy.py | 2 +- .../proxy_socks5/test_socks5_proxy_sources.py | 2 +- .../tests/restock/test_restock.py | 2 +- .../tests/smtp/test_notification_smtp.py | 7 +- .../tests/test_access_control.py | 2 +- .../tests/test_add_replace_remove_filter.py | 19 +- changedetectionio/tests/test_api.py | 15 +- .../tests/test_api_notifications.py | 2 +- changedetectionio/tests/test_api_search.py | 2 +- changedetectionio/tests/test_api_tags.py | 2 +- changedetectionio/tests/test_auth.py | 2 +- .../test_automatic_follow_ldjson_price.py | 8 +- changedetectionio/tests/test_backend.py | 3 +- changedetectionio/tests/test_backup.py | 2 +- .../tests/test_basic_socketio.py | 2 +- .../tests/test_block_while_text_present.py | 2 +- changedetectionio/tests/test_clone.py | 2 +- changedetectionio/tests/test_conditions.py | 14 +- changedetectionio/tests/test_css_selector.py | 7 +- .../tests/test_element_removal.py | 7 +- changedetectionio/tests/test_encoding.py | 3 +- changedetectionio/tests/test_errorhandling.py | 32 +- changedetectionio/tests/test_extract_csv.py | 2 +- changedetectionio/tests/test_extract_regex.py | 8 +- .../tests/test_filter_exist_changes.py | 2 +- .../tests/test_filter_failure_notification.py | 7 +- changedetectionio/tests/test_group.py | 18 +- .../tests/test_history_consistency.py | 2 +- changedetectionio/tests/test_ignore.py | 2 +- .../tests/test_ignore_regex_text.py | 3 +- changedetectionio/tests/test_ignore_text.py | 7 +- .../tests/test_ignorehyperlinks.py | 3 +- .../tests/test_ignorestatuscode.py | 3 +- .../tests/test_ignorewhitespace.py | 3 +- changedetectionio/tests/test_import.py | 8 +- changedetectionio/tests/test_jinja2.py | 8 +- .../tests/test_jsonpath_jq_selector.py | 3 +- changedetectionio/tests/test_live_preview.py | 2 +- .../tests/test_nonrenderable_pages.py | 4 +- changedetectionio/tests/test_notification.py | 16 +- .../tests/test_notification_errors.py | 12 +- changedetectionio/tests/test_obfuscations.py | 2 +- changedetectionio/tests/test_pdf.py | 2 +- .../tests/test_preview_endpoints.py | 2 +- changedetectionio/tests/test_request.py | 11 +- .../tests/test_restock_itemprop.py | 22 +- changedetectionio/tests/test_rss.py | 12 +- changedetectionio/tests/test_scheduler.py | 8 +- changedetectionio/tests/test_search.py | 7 +- changedetectionio/tests/test_security.py | 10 +- changedetectionio/tests/test_share_watch.py | 2 +- changedetectionio/tests/test_source.py | 3 +- changedetectionio/tests/test_trigger.py | 2 +- changedetectionio/tests/test_trigger_regex.py | 2 +- .../tests/test_trigger_regex_with_filter.py | 2 +- changedetectionio/tests/test_ui.py | 2 +- changedetectionio/tests/test_unique_lines.py | 10 +- .../tests/test_watch_fields_storage.py | 2 +- .../tests/test_xpath_selector.py | 7 +- changedetectionio/tests/util.py | 72 +-- .../tests/visualselector/test_fetch_data.py | 16 +- changedetectionio/update_worker.py | 608 ------------------ changedetectionio/worker_handler.py | 395 ++++++++++++ requirements.txt | 22 +- 112 files changed, 2948 insertions(+), 1524 deletions(-) create mode 100644 changedetectionio/async_update_worker.py delete mode 100644 changedetectionio/blueprint/browser_steps/nonContext.py delete mode 100644 changedetectionio/blueprint/ui/ajax.py create mode 100644 changedetectionio/notification_service.py create mode 100644 changedetectionio/realtime/README.md create mode 100644 changedetectionio/realtime/events.py delete mode 100644 changedetectionio/update_worker.py create mode 100644 changedetectionio/worker_handler.py diff --git a/.dockerignore b/.dockerignore index 2f88d7d3..14fba462 100644 --- a/.dockerignore +++ b/.dockerignore @@ -29,3 +29,35 @@ venv/ # Visual Studio .vscode/ + +# Test and development files +test-datastore/ +tests/ +docs/ +*.md +!README.md + +# Temporary and log files +*.log +*.tmp +tmp/ +temp/ + +# Training data and large files +train-data/ +works-data/ + +# Container files +Dockerfile* +docker-compose*.yml +.dockerignore + +# Development certificates and keys +*.pem +*.key +*.crt +profile_output.prof + +# Large binary files that shouldn't be in container +*.pdf +chrome.json \ No newline at end of file diff --git a/.github/workflows/test-stack-reusable-workflow.yml b/.github/workflows/test-stack-reusable-workflow.yml index 8f3b9301..af3d0fe2 100644 --- a/.github/workflows/test-stack-reusable-workflow.yml +++ b/.github/workflows/test-stack-reusable-workflow.yml @@ -86,10 +86,10 @@ jobs: run: | # Playwright via Sockpuppetbrowser fetch # tests/visualselector/test_fetch_data.py will do browser steps - docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/fetchers/test_content.py' - docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/test_errorhandling.py' - docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/visualselector/test_fetch_data.py' - docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/fetchers/test_custom_js_before_content.py' + docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest -vv --capture=tee-sys --showlocals --tb=long --live-server-host=0.0.0.0 --live-server-port=5004 tests/fetchers/test_content.py' + docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest -vv --capture=tee-sys --showlocals --tb=long --live-server-host=0.0.0.0 --live-server-port=5004 tests/test_errorhandling.py' + docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest -vv --capture=tee-sys --showlocals --tb=long --live-server-host=0.0.0.0 --live-server-port=5004 tests/visualselector/test_fetch_data.py' + docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest -vv --capture=tee-sys --showlocals --tb=long --live-server-host=0.0.0.0 --live-server-port=5004 tests/fetchers/test_custom_js_before_content.py' - name: Playwright and SocketPuppetBrowser - Headers and requests diff --git a/changedetectionio/__init__.py b/changedetectionio/__init__.py index 0cc72250..c34ab04b 100644 --- a/changedetectionio/__init__.py +++ b/changedetectionio/__init__.py @@ -10,10 +10,11 @@ import os import getopt import platform import signal -import socket -import sys -from werkzeug.serving import run_simple +import sys + +# Eventlet completely removed - using threading mode for SocketIO +# This provides better Python 3.12+ compatibility and eliminates eventlet/asyncio conflicts from changedetectionio import store from changedetectionio.flask_app import changedetection_app from loguru import logger @@ -28,22 +29,34 @@ def get_version(): # Parent wrapper or OS sends us a SIGTERM/SIGINT, do everything required for a clean shutdown def sigshutdown_handler(_signo, _stack_frame): name = signal.Signals(_signo).name - logger.critical(f'Shutdown: Got Signal - {name} ({_signo}), Saving DB to disk and calling shutdown') - datastore.sync_to_json() - logger.success('Sync JSON to disk complete.') + logger.critical(f'Shutdown: Got Signal - {name} ({_signo}), Fast shutdown initiated') - # Shutdown socketio server if available + # Set exit flag immediately to stop all loops + app.config.exit.set() + datastore.stop_thread = True + + # Shutdown workers immediately + try: + from changedetectionio import worker_handler + worker_handler.shutdown_workers() + except Exception as e: + logger.error(f"Error shutting down workers: {str(e)}") + + # Shutdown socketio server fast from changedetectionio.flask_app import socketio_server if socketio_server and hasattr(socketio_server, 'shutdown'): try: - logger.info("Shutting down Socket.IO server...") socketio_server.shutdown() except Exception as e: logger.error(f"Error shutting down Socket.IO server: {str(e)}") - # Set flags for clean shutdown - datastore.stop_thread = True - app.config.exit.set() + # Save data quickly + try: + datastore.sync_to_json() + logger.success('Fast sync to disk complete.') + except Exception as e: + logger.error(f"Error syncing to disk: {str(e)}") + sys.exit() def main(): @@ -52,9 +65,9 @@ def main(): datastore_path = None do_cleanup = False - host = '' + host = "0.0.0.0" ipv6_enabled = False - port = os.environ.get('PORT') or 5000 + port = int(os.environ.get('PORT', 5000)) ssl_mode = False # On Windows, create and use a default path. @@ -150,6 +163,11 @@ def main(): app = changedetection_app(app_config, datastore) + # Get the SocketIO instance from the Flask app (created in flask_app.py) + from changedetectionio.flask_app import socketio_server + global socketio + socketio = socketio_server + signal.signal(signal.SIGTERM, sigshutdown_handler) signal.signal(signal.SIGINT, sigshutdown_handler) @@ -174,10 +192,11 @@ def main(): @app.context_processor - def inject_version(): + def inject_template_globals(): return dict(right_sticky="v{}".format(datastore.data['version_tag']), new_version_available=app.config['NEW_VERSION_AVAILABLE'], - has_password=datastore.data['settings']['application']['password'] != False + has_password=datastore.data['settings']['application']['password'] != False, + socket_io_enabled=datastore.data['settings']['application']['ui'].get('socket_io_enabled', True) ) # Monitored websites will not receive a Referer header when a user clicks on an outgoing link. @@ -201,87 +220,21 @@ def main(): from werkzeug.middleware.proxy_fix import ProxyFix app.wsgi_app = ProxyFix(app.wsgi_app, x_prefix=1, x_host=1) - s_type = socket.AF_INET6 if ipv6_enabled else socket.AF_INET - # Get socketio_server from flask_app - from changedetectionio.flask_app import socketio_server + # SocketIO instance is already initialized in flask_app.py - if socketio_server and datastore.data['settings']['application']['ui'].get('open_diff_in_new_tab'): - logger.info("Starting server with Socket.IO support (using threading)...") - - # Use Flask-SocketIO's run method with error handling for Werkzeug warning - # This is the cleanest approach that works with all Flask-SocketIO versions - # Use '0.0.0.0' as the default host if none is specified - # This will listen on all available interfaces - listen_host = '0.0.0.0' if host == '' else host - logger.info(f"Using host: {listen_host} and port: {port}") - - try: - # First try with the allow_unsafe_werkzeug parameter (newer versions) - if ssl_mode: - socketio_server.run( - app, - host=listen_host, - port=int(port), - certfile='cert.pem', - keyfile='privkey.pem', - debug=False, - use_reloader=False, - allow_unsafe_werkzeug=True # Only in newer versions - ) - else: - socketio_server.run( - app, - host=listen_host, - port=int(port), - debug=False, - use_reloader=False, - allow_unsafe_werkzeug=True # Only in newer versions - ) - except TypeError: - # If allow_unsafe_werkzeug is not a valid parameter, try without it - logger.info("Falling back to basic run method without allow_unsafe_werkzeug") - # Override the werkzeug safety check by setting an environment variable - os.environ['WERKZEUG_RUN_MAIN'] = 'true' - if ssl_mode: - socketio_server.run( - app, - host=listen_host, - port=int(port), - certfile='cert.pem', - keyfile='privkey.pem', - debug=False, - use_reloader=False - ) - else: - socketio_server.run( - app, - host=listen_host, - port=int(port), - debug=False, - use_reloader=False - ) - else: - logger.warning("Socket.IO server not initialized, falling back to standard WSGI server") - # Fallback to standard WSGI server if socketio_server is not available - listen_host = '0.0.0.0' if host == '' else host + # Launch using SocketIO run method for proper integration (if enabled) + if socketio_server: if ssl_mode: - # Use Werkzeug's run_simple with SSL support - run_simple( - hostname=listen_host, - port=int(port), - application=app, - use_reloader=False, - use_debugger=False, - ssl_context=('cert.pem', 'privkey.pem') - ) + socketio.run(app, host=host, port=int(port), debug=False, + certfile='cert.pem', keyfile='privkey.pem', allow_unsafe_werkzeug=True) else: - # Use Werkzeug's run_simple for standard HTTP - run_simple( - hostname=listen_host, - port=int(port), - application=app, - use_reloader=False, - use_debugger=False - ) - + socketio.run(app, host=host, port=int(port), debug=False, allow_unsafe_werkzeug=True) + else: + # Run Flask app without Socket.IO if disabled + logger.info("Starting Flask app without Socket.IO server") + if ssl_mode: + app.run(host=host, port=int(port), debug=False, + ssl_context=('cert.pem', 'privkey.pem')) + else: + app.run(host=host, port=int(port), debug=False) diff --git a/changedetectionio/api/Watch.py b/changedetectionio/api/Watch.py index 1a815670..c6011934 100644 --- a/changedetectionio/api/Watch.py +++ b/changedetectionio/api/Watch.py @@ -3,6 +3,7 @@ from changedetectionio.strtobool import strtobool from flask_expects_json import expects_json from changedetectionio import queuedWatchMetaData +from changedetectionio import worker_handler from flask_restful import abort, Resource from flask import request, make_response import validators @@ -47,7 +48,7 @@ class Watch(Resource): abort(404, message='No watch exists with the UUID of {}'.format(uuid)) if request.args.get('recheck'): - self.update_q.put(queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid})) + worker_handler.queue_item_async_safe(self.update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid})) return "OK", 200 if request.args.get('paused', '') == 'paused': self.datastore.data['watching'].get(uuid).pause() @@ -236,7 +237,7 @@ class CreateWatch(Resource): new_uuid = self.datastore.add_watch(url=url, extras=extras, tag=tags) if new_uuid: - self.update_q.put(queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': new_uuid})) + worker_handler.queue_item_async_safe(self.update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': new_uuid})) return {'uuid': new_uuid}, 201 else: return "Invalid or unsupported URL", 400 @@ -291,7 +292,7 @@ class CreateWatch(Resource): if request.args.get('recheck_all'): for uuid in self.datastore.data['watching'].keys(): - self.update_q.put(queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid})) + worker_handler.queue_item_async_safe(self.update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid})) return {'status': "OK"}, 200 return list, 200 \ No newline at end of file diff --git a/changedetectionio/async_update_worker.py b/changedetectionio/async_update_worker.py new file mode 100644 index 00000000..ae0501b0 --- /dev/null +++ b/changedetectionio/async_update_worker.py @@ -0,0 +1,449 @@ +from .processors.exceptions import ProcessorException +import changedetectionio.content_fetchers.exceptions as content_fetchers_exceptions +from changedetectionio.processors.text_json_diff.processor import FilterNotFoundInResponse +from changedetectionio import html_tools +from changedetectionio.flask_app import watch_check_update + +import asyncio +import importlib +import os +import time + +from loguru import logger + +# Async version of update_worker +# Processes jobs from AsyncSignalPriorityQueue instead of threaded queue + +async def async_update_worker(worker_id, q, notification_q, app, datastore): + """ + Async worker function that processes watch check jobs from the queue. + + Args: + worker_id: Unique identifier for this worker + q: AsyncSignalPriorityQueue containing jobs to process + notification_q: Standard queue for notifications + app: Flask application instance + datastore: Application datastore + """ + # Set a descriptive name for this task + task = asyncio.current_task() + if task: + task.set_name(f"async-worker-{worker_id}") + + logger.info(f"Starting async worker {worker_id}") + + while not app.config.exit.is_set(): + update_handler = None + watch = None + + try: + # Use asyncio wait_for to make queue.get() cancellable + queued_item_data = await asyncio.wait_for(q.get(), timeout=1.0) + except asyncio.TimeoutError: + # No jobs available, continue loop + continue + except Exception as e: + logger.error(f"Worker {worker_id} error getting queue item: {e}") + await asyncio.sleep(0.1) + continue + + uuid = queued_item_data.item.get('uuid') + fetch_start_time = round(time.time()) + + # Mark this UUID as being processed + from changedetectionio import worker_handler + worker_handler.set_uuid_processing(uuid, processing=True) + + try: + if uuid in list(datastore.data['watching'].keys()) and datastore.data['watching'][uuid].get('url'): + changed_detected = False + contents = b'' + process_changedetection_results = True + update_obj = {} + + # Clear last errors + datastore.data['watching'][uuid]['browser_steps_last_error_step'] = None + datastore.data['watching'][uuid]['last_checked'] = fetch_start_time + + watch = datastore.data['watching'].get(uuid) + + logger.info(f"Worker {worker_id} processing watch UUID {uuid} Priority {queued_item_data.priority} URL {watch['url']}") + + try: + watch_check_update.send(watch_uuid=uuid) + + # Processor is what we are using for detecting the "Change" + processor = watch.get('processor', 'text_json_diff') + + # Init a new 'difference_detection_processor' + processor_module_name = f"changedetectionio.processors.{processor}.processor" + try: + processor_module = importlib.import_module(processor_module_name) + except ModuleNotFoundError as e: + print(f"Processor module '{processor}' not found.") + raise e + + update_handler = processor_module.perform_site_check(datastore=datastore, + watch_uuid=uuid) + + # All fetchers are now async, so call directly + await update_handler.call_browser() + + # Run change detection (this is synchronous) + changed_detected, update_obj, contents = update_handler.run_changedetection(watch=watch) + + except PermissionError as e: + logger.critical(f"File permission error updating file, watch: {uuid}") + logger.critical(str(e)) + process_changedetection_results = False + + except ProcessorException as e: + if e.screenshot: + watch.save_screenshot(screenshot=e.screenshot) + if e.xpath_data: + watch.save_xpath_data(data=e.xpath_data) + datastore.update_watch(uuid=uuid, update_obj={'last_error': e.message}) + process_changedetection_results = False + + except content_fetchers_exceptions.ReplyWithContentButNoText as e: + extra_help = "" + if e.has_filters: + has_img = html_tools.include_filters(include_filters='img', + html_content=e.html_content) + if has_img: + extra_help = ", it's possible that the filters you have give an empty result or contain only an image." + else: + extra_help = ", it's possible that the filters were found, but contained no usable text." + + datastore.update_watch(uuid=uuid, update_obj={ + 'last_error': f"Got HTML content but no text found (With {e.status_code} reply code){extra_help}" + }) + + if e.screenshot: + watch.save_screenshot(screenshot=e.screenshot, as_error=True) + + if e.xpath_data: + watch.save_xpath_data(data=e.xpath_data) + + process_changedetection_results = False + + except content_fetchers_exceptions.Non200ErrorCodeReceived as e: + if e.status_code == 403: + err_text = "Error - 403 (Access denied) received" + elif e.status_code == 404: + err_text = "Error - 404 (Page not found) received" + elif e.status_code == 407: + err_text = "Error - 407 (Proxy authentication required) received, did you need a username and password for the proxy?" + elif e.status_code == 500: + err_text = "Error - 500 (Internal server error) received from the web site" + else: + extra = ' (Access denied or blocked)' if str(e.status_code).startswith('4') else '' + err_text = f"Error - Request returned a HTTP error code {e.status_code}{extra}" + + if e.screenshot: + watch.save_screenshot(screenshot=e.screenshot, as_error=True) + if e.xpath_data: + watch.save_xpath_data(data=e.xpath_data, as_error=True) + if e.page_text: + watch.save_error_text(contents=e.page_text) + + datastore.update_watch(uuid=uuid, update_obj={'last_error': err_text}) + process_changedetection_results = False + + except FilterNotFoundInResponse as e: + if not datastore.data['watching'].get(uuid): + continue + + err_text = "Warning, no filters were found, no change detection ran - Did the page change layout? update your Visual Filter if necessary." + datastore.update_watch(uuid=uuid, update_obj={'last_error': err_text}) + + # Filter wasnt found, but we should still update the visual selector so that they can have a chance to set it up again + if e.screenshot: + watch.save_screenshot(screenshot=e.screenshot) + + if e.xpath_data: + watch.save_xpath_data(data=e.xpath_data) + + # Only when enabled, send the notification + if watch.get('filter_failure_notification_send', False): + c = watch.get('consecutive_filter_failures', 0) + c += 1 + # Send notification if we reached the threshold? + threshold = datastore.data['settings']['application'].get('filter_failure_notification_threshold_attempts', 0) + logger.debug(f"Filter for {uuid} not found, consecutive_filter_failures: {c} of threshold {threshold}") + if c >= threshold: + if not watch.get('notification_muted'): + logger.debug(f"Sending filter failed notification for {uuid}") + await send_filter_failure_notification(uuid, notification_q, datastore) + c = 0 + logger.debug(f"Reset filter failure count back to zero") + + datastore.update_watch(uuid=uuid, update_obj={'consecutive_filter_failures': c}) + else: + logger.trace(f"{uuid} - filter_failure_notification_send not enabled, skipping") + + process_changedetection_results = False + + except content_fetchers_exceptions.checksumFromPreviousCheckWasTheSame as e: + # Yes fine, so nothing todo, don't continue to process. + process_changedetection_results = False + changed_detected = False + + except content_fetchers_exceptions.BrowserConnectError as e: + datastore.update_watch(uuid=uuid, + update_obj={'last_error': e.msg}) + process_changedetection_results = False + + except content_fetchers_exceptions.BrowserFetchTimedOut as e: + datastore.update_watch(uuid=uuid, + update_obj={'last_error': e.msg}) + process_changedetection_results = False + + except content_fetchers_exceptions.BrowserStepsStepException as e: + if not datastore.data['watching'].get(uuid): + continue + + error_step = e.step_n + 1 + from playwright._impl._errors import TimeoutError, Error + + # Generally enough info for TimeoutError (couldnt locate the element after default seconds) + err_text = f"Browser step at position {error_step} could not run, check the watch, add a delay if necessary, view Browser Steps to see screenshot at that step." + + if e.original_e.name == "TimeoutError": + # Just the first line is enough, the rest is the stack trace + err_text += " Could not find the target." + else: + # Other Error, more info is good. + err_text += " " + str(e.original_e).splitlines()[0] + + logger.debug(f"BrowserSteps exception at step {error_step} {str(e.original_e)}") + + datastore.update_watch(uuid=uuid, + update_obj={'last_error': err_text, + 'browser_steps_last_error_step': error_step}) + + if watch.get('filter_failure_notification_send', False): + c = watch.get('consecutive_filter_failures', 0) + c += 1 + # Send notification if we reached the threshold? + threshold = datastore.data['settings']['application'].get('filter_failure_notification_threshold_attempts', 0) + logger.error(f"Step for {uuid} not found, consecutive_filter_failures: {c}") + if threshold > 0 and c >= threshold: + if not watch.get('notification_muted'): + await send_step_failure_notification(watch_uuid=uuid, step_n=e.step_n, notification_q=notification_q, datastore=datastore) + c = 0 + + datastore.update_watch(uuid=uuid, update_obj={'consecutive_filter_failures': c}) + + process_changedetection_results = False + + except content_fetchers_exceptions.EmptyReply as e: + # Some kind of custom to-str handler in the exception handler that does this? + err_text = "EmptyReply - try increasing 'Wait seconds before extracting text', Status Code {}".format(e.status_code) + datastore.update_watch(uuid=uuid, update_obj={'last_error': err_text, + 'last_check_status': e.status_code}) + process_changedetection_results = False + + except content_fetchers_exceptions.ScreenshotUnavailable as e: + err_text = "Screenshot unavailable, page did not render fully in the expected time or page was too long - try increasing 'Wait seconds before extracting text'" + datastore.update_watch(uuid=uuid, update_obj={'last_error': err_text, + 'last_check_status': e.status_code}) + process_changedetection_results = False + + except content_fetchers_exceptions.JSActionExceptions as e: + err_text = "Error running JS Actions - Page request - "+e.message + if e.screenshot: + watch.save_screenshot(screenshot=e.screenshot, as_error=True) + datastore.update_watch(uuid=uuid, update_obj={'last_error': err_text, + 'last_check_status': e.status_code}) + process_changedetection_results = False + + except content_fetchers_exceptions.PageUnloadable as e: + err_text = "Page request from server didnt respond correctly" + if e.message: + err_text = "{} - {}".format(err_text, e.message) + + if e.screenshot: + watch.save_screenshot(screenshot=e.screenshot, as_error=True) + + datastore.update_watch(uuid=uuid, update_obj={'last_error': err_text, + 'last_check_status': e.status_code, + 'has_ldjson_price_data': None}) + process_changedetection_results = False + + except content_fetchers_exceptions.BrowserStepsInUnsupportedFetcher as e: + err_text = "This watch has Browser Steps configured and so it cannot run with the 'Basic fast Plaintext/HTTP Client', either remove the Browser Steps or select a Chrome fetcher." + datastore.update_watch(uuid=uuid, update_obj={'last_error': err_text}) + process_changedetection_results = False + logger.error(f"Exception (BrowserStepsInUnsupportedFetcher) reached processing watch UUID: {uuid}") + + except Exception as e: + logger.error(f"Worker {worker_id} exception processing watch UUID: {uuid}") + logger.error(str(e)) + datastore.update_watch(uuid=uuid, update_obj={'last_error': "Exception: " + str(e)}) + process_changedetection_results = False + + else: + if not datastore.data['watching'].get(uuid): + continue + + update_obj['content-type'] = update_handler.fetcher.get_all_headers().get('content-type', '').lower() + + if not watch.get('ignore_status_codes'): + update_obj['consecutive_filter_failures'] = 0 + + update_obj['last_error'] = False + cleanup_error_artifacts(uuid, datastore) + + if not datastore.data['watching'].get(uuid): + continue + + if process_changedetection_results: + # Extract title if needed + if datastore.data['settings']['application'].get('extract_title_as_title') or watch['extract_title_as_title']: + if not watch['title'] or not len(watch['title']): + try: + update_obj['title'] = html_tools.extract_element(find='title', html_content=update_handler.fetcher.content) + logger.info(f"UUID: {uuid} Extract updated title to '{update_obj['title']}") + except Exception as e: + logger.warning(f"UUID: {uuid} Extract <title> as watch title was enabled, but couldn't find a <title>.") + + try: + datastore.update_watch(uuid=uuid, update_obj=update_obj) + + if changed_detected or not watch.history_n: + if update_handler.screenshot: + watch.save_screenshot(screenshot=update_handler.screenshot) + + if update_handler.xpath_data: + watch.save_xpath_data(data=update_handler.xpath_data) + + # Ensure unique timestamp for history + if watch.newest_history_key and int(fetch_start_time) == int(watch.newest_history_key): + logger.warning(f"Timestamp {fetch_start_time} already exists, waiting 1 seconds") + fetch_start_time += 1 + await asyncio.sleep(1) + + watch.save_history_text(contents=contents, + timestamp=int(fetch_start_time), + snapshot_id=update_obj.get('previous_md5', 'none')) + + empty_pages_are_a_change = datastore.data['settings']['application'].get('empty_pages_are_a_change', False) + if update_handler.fetcher.content or (not update_handler.fetcher.content and empty_pages_are_a_change): + watch.save_last_fetched_html(contents=update_handler.fetcher.content, timestamp=int(fetch_start_time)) + + # Send notifications on second+ check + if watch.history_n >= 2: + logger.info(f"Change detected in UUID {uuid} - {watch['url']}") + if not watch.get('notification_muted'): + await send_content_changed_notification(uuid, notification_q, datastore) + + except Exception as e: + logger.critical(f"Worker {worker_id} exception in process_changedetection_results") + logger.critical(str(e)) + datastore.update_watch(uuid=uuid, update_obj={'last_error': str(e)}) + + # Always record attempt count + count = watch.get('check_count', 0) + 1 + + # Record server header + try: + server_header = update_handler.fetcher.headers.get('server', '').strip().lower()[:255] + datastore.update_watch(uuid=uuid, update_obj={'remote_server_reply': server_header}) + except Exception as e: + pass + + datastore.update_watch(uuid=uuid, update_obj={'fetch_time': round(time.time() - fetch_start_time, 3), + 'check_count': count}) + + except Exception as e: + logger.error(f"Worker {worker_id} unexpected error processing {uuid}: {e}") + logger.error(f"Worker {worker_id} traceback:", exc_info=True) + + # Also update the watch with error information + if datastore and uuid in datastore.data['watching']: + datastore.update_watch(uuid=uuid, update_obj={'last_error': f"Worker error: {str(e)}"}) + + finally: + # Always cleanup - this runs whether there was an exception or not + if uuid: + try: + # Mark UUID as no longer being processed + worker_handler.set_uuid_processing(uuid, processing=False) + + # Send completion signal + if watch: + #logger.info(f"Worker {worker_id} sending completion signal for UUID {watch['uuid']}") + watch_check_update.send(watch_uuid=watch['uuid']) + + update_handler = None + logger.debug(f"Worker {worker_id} completed watch {uuid} in {time.time()-fetch_start_time:.2f}s") + except Exception as cleanup_error: + logger.error(f"Worker {worker_id} error during cleanup: {cleanup_error}") + + # Brief pause before continuing to avoid tight error loops (only on error) + if 'e' in locals(): + await asyncio.sleep(1.0) + else: + # Small yield for normal completion + await asyncio.sleep(0.01) + + # Check if we should exit + if app.config.exit.is_set(): + break + + # Check if we're in pytest environment - if so, be more gentle with logging + import sys + in_pytest = "pytest" in sys.modules or "PYTEST_CURRENT_TEST" in os.environ + + if not in_pytest: + logger.info(f"Worker {worker_id} shutting down") + + +def cleanup_error_artifacts(uuid, datastore): + """Helper function to clean up error artifacts""" + cleanup_files = ["last-error-screenshot.png", "last-error.txt"] + for f in cleanup_files: + full_path = os.path.join(datastore.datastore_path, uuid, f) + if os.path.isfile(full_path): + os.unlink(full_path) + + + +async def send_content_changed_notification(watch_uuid, notification_q, datastore): + """Helper function to queue notifications using the new notification service""" + try: + from changedetectionio.notification_service import create_notification_service + + # Create notification service instance + notification_service = create_notification_service(datastore, notification_q) + + notification_service.send_content_changed_notification(watch_uuid) + except Exception as e: + logger.error(f"Error sending notification for {watch_uuid}: {e}") + + +async def send_filter_failure_notification(watch_uuid, notification_q, datastore): + """Helper function to send filter failure notifications using the new notification service""" + try: + from changedetectionio.notification_service import create_notification_service + + # Create notification service instance + notification_service = create_notification_service(datastore, notification_q) + + notification_service.send_filter_failure_notification(watch_uuid) + except Exception as e: + logger.error(f"Error sending filter failure notification for {watch_uuid}: {e}") + + +async def send_step_failure_notification(watch_uuid, step_n, notification_q, datastore): + """Helper function to send step failure notifications using the new notification service""" + try: + from changedetectionio.notification_service import create_notification_service + + # Create notification service instance + notification_service = create_notification_service(datastore, notification_q) + + notification_service.send_step_failure_notification(watch_uuid, step_n) + except Exception as e: + logger.error(f"Error sending step failure notification for {watch_uuid}: {e}") \ No newline at end of file diff --git a/changedetectionio/blueprint/browser_steps/__init__.py b/changedetectionio/blueprint/browser_steps/__init__.py index f7907c7c..f0de3057 100644 --- a/changedetectionio/blueprint/browser_steps/__init__.py +++ b/changedetectionio/blueprint/browser_steps/__init__.py @@ -25,35 +25,53 @@ io_interface_context = None import json import hashlib from flask import Response +import asyncio +import threading + +def run_async_in_browser_loop(coro): + """Run async coroutine using the existing async worker event loop""" + from changedetectionio import worker_handler + + # Use the existing async worker event loop instead of creating a new one + if worker_handler.USE_ASYNC_WORKERS and worker_handler.async_loop and not worker_handler.async_loop.is_closed(): + logger.debug("Browser steps using existing async worker event loop") + future = asyncio.run_coroutine_threadsafe(coro, worker_handler.async_loop) + return future.result() + else: + # Fallback: create a new event loop (for sync workers or if async loop not available) + logger.debug("Browser steps creating temporary event loop") + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + try: + return loop.run_until_complete(coro) + finally: + loop.close() def construct_blueprint(datastore: ChangeDetectionStore): browser_steps_blueprint = Blueprint('browser_steps', __name__, template_folder="templates") - def start_browsersteps_session(watch_uuid): - from . import nonContext + async def start_browsersteps_session(watch_uuid): from . import browser_steps import time - global io_interface_context + from playwright.async_api import async_playwright # We keep the playwright session open for many minutes keepalive_seconds = int(os.getenv('BROWSERSTEPS_MINUTES_KEEPALIVE', 10)) * 60 browsersteps_start_session = {'start_time': time.time()} - # You can only have one of these running - # This should be very fine to leave running for the life of the application - # @idea - Make it global so the pool of watch fetchers can use it also - if not io_interface_context: - io_interface_context = nonContext.c_sync_playwright() - # Start the Playwright context, which is actually a nodejs sub-process and communicates over STDIN/STDOUT pipes - io_interface_context = io_interface_context.start() + # Create a new async playwright instance for browser steps + playwright_instance = async_playwright() + playwright_context = await playwright_instance.start() keepalive_ms = ((keepalive_seconds + 3) * 1000) base_url = os.getenv('PLAYWRIGHT_DRIVER_URL', '').strip('"') a = "?" if not '?' in base_url else '&' base_url += a + f"timeout={keepalive_ms}" - browsersteps_start_session['browser'] = io_interface_context.chromium.connect_over_cdp(base_url) + browser = await playwright_context.chromium.connect_over_cdp(base_url, timeout=keepalive_ms) + browsersteps_start_session['browser'] = browser + browsersteps_start_session['playwright_context'] = playwright_context proxy_id = datastore.get_preferred_proxy_for_watch(uuid=watch_uuid) proxy = None @@ -75,15 +93,20 @@ def construct_blueprint(datastore: ChangeDetectionStore): logger.debug(f"Browser Steps: UUID {watch_uuid} selected proxy {proxy_url}") # Tell Playwright to connect to Chrome and setup a new session via our stepper interface - browsersteps_start_session['browserstepper'] = browser_steps.browsersteps_live_ui( - playwright_browser=browsersteps_start_session['browser'], + browserstepper = browser_steps.browsersteps_live_ui( + playwright_browser=browser, proxy=proxy, start_url=datastore.data['watching'][watch_uuid].link, headers=datastore.data['watching'][watch_uuid].get('headers') ) + + # Initialize the async connection + await browserstepper.connect(proxy=proxy) + + browsersteps_start_session['browserstepper'] = browserstepper # For test - #browsersteps_start_session['browserstepper'].action_goto_url(value="http://example.com?time="+str(time.time())) + #await browsersteps_start_session['browserstepper'].action_goto_url(value="http://example.com?time="+str(time.time())) return browsersteps_start_session @@ -92,7 +115,7 @@ def construct_blueprint(datastore: ChangeDetectionStore): @browser_steps_blueprint.route("/browsersteps_start_session", methods=['GET']) def browsersteps_start_session(): # A new session was requested, return sessionID - + import asyncio import uuid browsersteps_session_id = str(uuid.uuid4()) watch_uuid = request.args.get('uuid') @@ -104,7 +127,10 @@ def construct_blueprint(datastore: ChangeDetectionStore): logger.debug("browser_steps.py connecting") try: - browsersteps_sessions[browsersteps_session_id] = start_browsersteps_session(watch_uuid) + # Run the async function in the dedicated browser steps event loop + browsersteps_sessions[browsersteps_session_id] = run_async_in_browser_loop( + start_browsersteps_session(watch_uuid) + ) except Exception as e: if 'ECONNREFUSED' in str(e): return make_response('Unable to start the Playwright Browser session, is sockpuppetbrowser running? Network configuration is OK?', 401) @@ -169,9 +195,14 @@ def construct_blueprint(datastore: ChangeDetectionStore): is_last_step = strtobool(request.form.get('is_last_step')) try: - browsersteps_sessions[browsersteps_session_id]['browserstepper'].call_action(action_name=step_operation, - selector=step_selector, - optional_value=step_optional_value) + # Run the async call_action method in the dedicated browser steps event loop + run_async_in_browser_loop( + browsersteps_sessions[browsersteps_session_id]['browserstepper'].call_action( + action_name=step_operation, + selector=step_selector, + optional_value=step_optional_value + ) + ) except Exception as e: logger.error(f"Exception when calling step operation {step_operation} {str(e)}") @@ -185,7 +216,11 @@ def construct_blueprint(datastore: ChangeDetectionStore): # Screenshots and other info only needed on requesting a step (POST) try: - (screenshot, xpath_data) = browsersteps_sessions[browsersteps_session_id]['browserstepper'].get_current_state() + # Run the async get_current_state method in the dedicated browser steps event loop + (screenshot, xpath_data) = run_async_in_browser_loop( + browsersteps_sessions[browsersteps_session_id]['browserstepper'].get_current_state() + ) + if is_last_step: watch = datastore.data['watching'].get(uuid) u = browsersteps_sessions[browsersteps_session_id]['browserstepper'].page.url @@ -199,7 +234,6 @@ def construct_blueprint(datastore: ChangeDetectionStore): return make_response("Error fetching screenshot and element data - " + str(e), 401) # SEND THIS BACK TO THE BROWSER - output = { "screenshot": f"data:image/jpeg;base64,{base64.b64encode(screenshot).decode('ascii')}", "xpath_data": xpath_data, diff --git a/changedetectionio/blueprint/browser_steps/browser_steps.py b/changedetectionio/blueprint/browser_steps/browser_steps.py index d380d565..e0a3cb2c 100644 --- a/changedetectionio/blueprint/browser_steps/browser_steps.py +++ b/changedetectionio/blueprint/browser_steps/browser_steps.py @@ -63,7 +63,7 @@ class steppable_browser_interface(): self.start_url = start_url # Convert and perform "Click Button" for example - def call_action(self, action_name, selector=None, optional_value=None): + async def call_action(self, action_name, selector=None, optional_value=None): if self.page is None: logger.warning("Cannot call action on None page object") return @@ -93,73 +93,74 @@ class steppable_browser_interface(): optional_value = jinja_render(template_str=optional_value) - action_handler(selector, optional_value) + await action_handler(selector, optional_value) # Safely wait for timeout - self.page.wait_for_timeout(1.5 * 1000) + await self.page.wait_for_timeout(1.5 * 1000) logger.debug(f"Call action done in {time.time()-now:.2f}s") - def action_goto_url(self, selector=None, value=None): + async def action_goto_url(self, selector=None, value=None): if not value: logger.warning("No URL provided for goto_url action") return None now = time.time() - response = self.page.goto(value, timeout=0, wait_until='load') + response = await self.page.goto(value, timeout=0, wait_until='load') logger.debug(f"Time to goto URL {time.time()-now:.2f}s") return response # Incase they request to go back to the start - def action_goto_site(self, selector=None, value=None): - return self.action_goto_url(value=self.start_url) + async def action_goto_site(self, selector=None, value=None): + return await self.action_goto_url(value=self.start_url) - def action_click_element_containing_text(self, selector=None, value=''): + async def action_click_element_containing_text(self, selector=None, value=''): logger.debug("Clicking element containing text") if not value or not len(value.strip()): return elem = self.page.get_by_text(value) - if elem.count(): - elem.first.click(delay=randint(200, 500), timeout=self.action_timeout) + if await elem.count(): + await elem.first.click(delay=randint(200, 500), timeout=self.action_timeout) - def action_click_element_containing_text_if_exists(self, selector=None, value=''): + async def action_click_element_containing_text_if_exists(self, selector=None, value=''): logger.debug("Clicking element containing text if exists") if not value or not len(value.strip()): return elem = self.page.get_by_text(value) - logger.debug(f"Clicking element containing text - {elem.count()} elements found") - if elem.count(): - elem.first.click(delay=randint(200, 500), timeout=self.action_timeout) + count = await elem.count() + logger.debug(f"Clicking element containing text - {count} elements found") + if count: + await elem.first.click(delay=randint(200, 500), timeout=self.action_timeout) - def action_enter_text_in_field(self, selector, value): + async def action_enter_text_in_field(self, selector, value): if not selector or not len(selector.strip()): return - self.page.fill(selector, value, timeout=self.action_timeout) + await self.page.fill(selector, value, timeout=self.action_timeout) - def action_execute_js(self, selector, value): + async def action_execute_js(self, selector, value): if not value: return None - return self.page.evaluate(value) + return await self.page.evaluate(value) - def action_click_element(self, selector, value): + async def action_click_element(self, selector, value): logger.debug("Clicking element") if not selector or not len(selector.strip()): return - self.page.click(selector=selector, timeout=self.action_timeout + 20 * 1000, delay=randint(200, 500)) + await self.page.click(selector=selector, timeout=self.action_timeout + 20 * 1000, delay=randint(200, 500)) - def action_click_element_if_exists(self, selector, value): + async def action_click_element_if_exists(self, selector, value): import playwright._impl._errors as _api_types logger.debug("Clicking element if exists") if not selector or not len(selector.strip()): return try: - self.page.click(selector, timeout=self.action_timeout, delay=randint(200, 500)) + await self.page.click(selector, timeout=self.action_timeout, delay=randint(200, 500)) except _api_types.TimeoutError: return except _api_types.Error: @@ -167,7 +168,7 @@ class steppable_browser_interface(): return - def action_click_x_y(self, selector, value): + async def action_click_x_y(self, selector, value): if not value or not re.match(r'^\s?\d+\s?,\s?\d+\s?$', value): logger.warning("'Click X,Y' step should be in the format of '100 , 90'") return @@ -177,42 +178,42 @@ class steppable_browser_interface(): x = int(float(x.strip())) y = int(float(y.strip())) - self.page.mouse.click(x=x, y=y, delay=randint(200, 500)) + await self.page.mouse.click(x=x, y=y, delay=randint(200, 500)) except Exception as e: logger.error(f"Error parsing x,y coordinates: {str(e)}") - def action__select_by_option_text(self, selector, value): + async def action__select_by_option_text(self, selector, value): if not selector or not len(selector.strip()): return - self.page.select_option(selector, label=value, timeout=self.action_timeout) + await self.page.select_option(selector, label=value, timeout=self.action_timeout) - def action_scroll_down(self, selector, value): + async def action_scroll_down(self, selector, value): # Some sites this doesnt work on for some reason - self.page.mouse.wheel(0, 600) - self.page.wait_for_timeout(1000) + await self.page.mouse.wheel(0, 600) + await self.page.wait_for_timeout(1000) - def action_wait_for_seconds(self, selector, value): + async def action_wait_for_seconds(self, selector, value): try: seconds = float(value.strip()) if value else 1.0 - self.page.wait_for_timeout(seconds * 1000) + await self.page.wait_for_timeout(seconds * 1000) except (ValueError, TypeError) as e: logger.error(f"Invalid value for wait_for_seconds: {str(e)}") - def action_wait_for_text(self, selector, value): + async def action_wait_for_text(self, selector, value): if not value: return import json v = json.dumps(value) - self.page.wait_for_function( + await self.page.wait_for_function( f'document.querySelector("body").innerText.includes({v});', timeout=30000 ) - def action_wait_for_text_in_element(self, selector, value): + async def action_wait_for_text_in_element(self, selector, value): if not selector or not value: return @@ -220,49 +221,49 @@ class steppable_browser_interface(): s = json.dumps(selector) v = json.dumps(value) - self.page.wait_for_function( + await self.page.wait_for_function( f'document.querySelector({s}).innerText.includes({v});', timeout=30000 ) # @todo - in the future make some popout interface to capture what needs to be set # https://playwright.dev/python/docs/api/class-keyboard - def action_press_enter(self, selector, value): - self.page.keyboard.press("Enter", delay=randint(200, 500)) + async def action_press_enter(self, selector, value): + await self.page.keyboard.press("Enter", delay=randint(200, 500)) - def action_press_page_up(self, selector, value): - self.page.keyboard.press("PageUp", delay=randint(200, 500)) + async def action_press_page_up(self, selector, value): + await self.page.keyboard.press("PageUp", delay=randint(200, 500)) - def action_press_page_down(self, selector, value): - self.page.keyboard.press("PageDown", delay=randint(200, 500)) + async def action_press_page_down(self, selector, value): + await self.page.keyboard.press("PageDown", delay=randint(200, 500)) - def action_check_checkbox(self, selector, value): + async def action_check_checkbox(self, selector, value): if not selector: return - self.page.locator(selector).check(timeout=self.action_timeout) + await self.page.locator(selector).check(timeout=self.action_timeout) - def action_uncheck_checkbox(self, selector, value): + async def action_uncheck_checkbox(self, selector, value): if not selector: return - self.page.locator(selector).uncheck(timeout=self.action_timeout) + await self.page.locator(selector).uncheck(timeout=self.action_timeout) - def action_remove_elements(self, selector, value): + async def action_remove_elements(self, selector, value): """Removes all elements matching the given selector from the DOM.""" if not selector: return - self.page.locator(selector).evaluate_all("els => els.forEach(el => el.remove())") + await self.page.locator(selector).evaluate_all("els => els.forEach(el => el.remove())") - def action_make_all_child_elements_visible(self, selector, value): + async def action_make_all_child_elements_visible(self, selector, value): """Recursively makes all child elements inside the given selector fully visible.""" if not selector: return - self.page.locator(selector).locator("*").evaluate_all(""" + await self.page.locator(selector).locator("*").evaluate_all(""" els => els.forEach(el => { el.style.display = 'block'; // Forces it to be displayed el.style.visibility = 'visible'; // Ensures it's not hidden @@ -307,21 +308,22 @@ class browsersteps_live_ui(steppable_browser_interface): self.playwright_browser = playwright_browser self.start_url = start_url self._is_cleaned_up = False - if self.context is None: - self.connect(proxy=proxy) + self.proxy = proxy + # Note: connect() is now async and must be called separately def __del__(self): # Ensure cleanup happens if object is garbage collected - self.cleanup() + # Note: cleanup is now async, so we can only mark as cleaned up here + self._is_cleaned_up = True # Connect and setup a new context - def connect(self, proxy=None): + async def connect(self, proxy=None): # Should only get called once - test that keep_open = 1000 * 60 * 5 now = time.time() # @todo handle multiple contexts, bind a unique id from the browser on each req? - self.context = self.playwright_browser.new_context( + self.context = await self.playwright_browser.new_context( accept_downloads=False, # Should never be needed bypass_csp=True, # This is needed to enable JavaScript execution on GitHub and others extra_http_headers=self.headers, @@ -332,7 +334,7 @@ class browsersteps_live_ui(steppable_browser_interface): user_agent=manage_user_agent(headers=self.headers), ) - self.page = self.context.new_page() + self.page = await self.context.new_page() # self.page.set_default_navigation_timeout(keep_open) self.page.set_default_timeout(keep_open) @@ -342,13 +344,15 @@ class browsersteps_live_ui(steppable_browser_interface): self.page.on("console", lambda msg: print(f"Browser steps console - {msg.type}: {msg.text} {msg.args}")) logger.debug(f"Time to browser setup {time.time()-now:.2f}s") - self.page.wait_for_timeout(1 * 1000) + await self.page.wait_for_timeout(1 * 1000) def mark_as_closed(self): logger.debug("Page closed, cleaning up..") - self.cleanup() + # Note: This is called from a sync context (event handler) + # so we'll just mark as cleaned up and let __del__ handle the rest + self._is_cleaned_up = True - def cleanup(self): + async def cleanup(self): """Properly clean up all resources to prevent memory leaks""" if self._is_cleaned_up: return @@ -359,7 +363,7 @@ class browsersteps_live_ui(steppable_browser_interface): if hasattr(self, 'page') and self.page is not None: try: # Force garbage collection before closing - self.page.request_gc() + await self.page.request_gc() except Exception as e: logger.debug(f"Error during page garbage collection: {str(e)}") @@ -370,7 +374,7 @@ class browsersteps_live_ui(steppable_browser_interface): logger.debug(f"Error removing event listeners: {str(e)}") try: - self.page.close() + await self.page.close() except Exception as e: logger.debug(f"Error closing page: {str(e)}") @@ -379,7 +383,7 @@ class browsersteps_live_ui(steppable_browser_interface): # Clean up context if hasattr(self, 'context') and self.context is not None: try: - self.context.close() + await self.context.close() except Exception as e: logger.debug(f"Error closing context: {str(e)}") @@ -401,12 +405,12 @@ class browsersteps_live_ui(steppable_browser_interface): return False - def get_current_state(self): + async def get_current_state(self): """Return the screenshot and interactive elements mapping, generally always called after action_()""" import importlib.resources import json # because we for now only run browser steps in playwright mode (not puppeteer mode) - from changedetectionio.content_fetchers.playwright import capture_full_page + from changedetectionio.content_fetchers.playwright import capture_full_page_async # Safety check - don't proceed if resources are cleaned up if self._is_cleaned_up or self.page is None: @@ -416,29 +420,29 @@ class browsersteps_live_ui(steppable_browser_interface): xpath_element_js = importlib.resources.files("changedetectionio.content_fetchers.res").joinpath('xpath_element_scraper.js').read_text() now = time.time() - self.page.wait_for_timeout(1 * 1000) + await self.page.wait_for_timeout(1 * 1000) screenshot = None xpath_data = None try: # Get screenshot first - screenshot = capture_full_page(page=self.page) + screenshot = await capture_full_page_async(page=self.page) logger.debug(f"Time to get screenshot from browser {time.time() - now:.2f}s") # Then get interactive elements now = time.time() - self.page.evaluate("var include_filters=''") - self.page.request_gc() + await self.page.evaluate("var include_filters=''") + await self.page.request_gc() scan_elements = 'a,button,input,select,textarea,i,th,td,p,li,h1,h2,h3,h4,div,span' MAX_TOTAL_HEIGHT = int(os.getenv("SCREENSHOT_MAX_HEIGHT", SCREENSHOT_MAX_HEIGHT_DEFAULT)) - xpath_data = json.loads(self.page.evaluate(xpath_element_js, { + xpath_data = json.loads(await self.page.evaluate(xpath_element_js, { "visualselector_xpath_selectors": scan_elements, "max_height": MAX_TOTAL_HEIGHT })) - self.page.request_gc() + await self.page.request_gc() # Sort elements by size xpath_data['size_pos'] = sorted(xpath_data['size_pos'], key=lambda k: k['width'] * k['height'], reverse=True) @@ -448,13 +452,13 @@ class browsersteps_live_ui(steppable_browser_interface): logger.error(f"Error getting current state: {str(e)}") # Attempt recovery - force garbage collection try: - self.page.request_gc() + await self.page.request_gc() except: pass # Request garbage collection one final time try: - self.page.request_gc() + await self.page.request_gc() except: pass diff --git a/changedetectionio/blueprint/browser_steps/nonContext.py b/changedetectionio/blueprint/browser_steps/nonContext.py deleted file mode 100644 index 93abe269..00000000 --- a/changedetectionio/blueprint/browser_steps/nonContext.py +++ /dev/null @@ -1,17 +0,0 @@ -from playwright.sync_api import PlaywrightContextManager - -# So playwright wants to run as a context manager, but we do something horrible and hacky -# we are holding the session open for as long as possible, then shutting it down, and opening a new one -# So it means we don't get to use PlaywrightContextManager' __enter__ __exit__ -# To work around this, make goodbye() act the same as the __exit__() -# -# But actually I think this is because the context is opened correctly with __enter__() but we timeout the connection -# then theres some lock condition where we cant destroy it without it hanging - -class c_PlaywrightContextManager(PlaywrightContextManager): - - def goodbye(self) -> None: - self.__exit__() - -def c_sync_playwright() -> PlaywrightContextManager: - return c_PlaywrightContextManager() diff --git a/changedetectionio/blueprint/imports/__init__.py b/changedetectionio/blueprint/imports/__init__.py index 2e5fddf5..e6fbf760 100644 --- a/changedetectionio/blueprint/imports/__init__.py +++ b/changedetectionio/blueprint/imports/__init__.py @@ -1,6 +1,7 @@ from flask import Blueprint, request, redirect, url_for, flash, render_template from changedetectionio.store import ChangeDetectionStore from changedetectionio.auth_decorator import login_optionally_required +from changedetectionio import worker_handler from changedetectionio.blueprint.imports.importer import ( import_url_list, import_distill_io_json, @@ -24,7 +25,7 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q, queuedWatchMe importer_handler = import_url_list() importer_handler.run(data=request.values.get('urls'), flash=flash, datastore=datastore, processor=request.values.get('processor', 'text_json_diff')) for uuid in importer_handler.new_uuids: - update_q.put(queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid})) + worker_handler.queue_item_async_safe(update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid})) if len(importer_handler.remaining_data) == 0: return redirect(url_for('watchlist.index')) @@ -37,7 +38,7 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q, queuedWatchMe d_importer = import_distill_io_json() d_importer.run(data=request.values.get('distill-io'), flash=flash, datastore=datastore) for uuid in d_importer.new_uuids: - update_q.put(queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid})) + worker_handler.queue_item_async_safe(update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid})) # XLSX importer if request.files and request.files.get('xlsx_file'): @@ -60,7 +61,7 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q, queuedWatchMe w_importer.run(data=file, flash=flash, datastore=datastore) for uuid in w_importer.new_uuids: - update_q.put(queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid})) + worker_handler.queue_item_async_safe(update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid})) # Could be some remaining, or we could be on GET form = forms.importForm(formdata=request.form if request.method == 'POST' else None) diff --git a/changedetectionio/blueprint/price_data_follower/__init__.py b/changedetectionio/blueprint/price_data_follower/__init__.py index 99841d71..c2c6e768 100644 --- a/changedetectionio/blueprint/price_data_follower/__init__.py +++ b/changedetectionio/blueprint/price_data_follower/__init__.py @@ -4,6 +4,7 @@ from flask import Blueprint, flash, redirect, url_for from flask_login import login_required from changedetectionio.store import ChangeDetectionStore from changedetectionio import queuedWatchMetaData +from changedetectionio import worker_handler from queue import PriorityQueue PRICE_DATA_TRACK_ACCEPT = 'accepted' @@ -19,7 +20,7 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q: PriorityQueue datastore.data['watching'][uuid]['track_ldjson_price_data'] = PRICE_DATA_TRACK_ACCEPT datastore.data['watching'][uuid]['processor'] = 'restock_diff' datastore.data['watching'][uuid].clear_watch() - update_q.put(queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid})) + worker_handler.queue_item_async_safe(update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid})) return redirect(url_for("watchlist.index")) @login_required diff --git a/changedetectionio/blueprint/settings/__init__.py b/changedetectionio/blueprint/settings/__init__.py index 015cc274..548a5b70 100644 --- a/changedetectionio/blueprint/settings/__init__.py +++ b/changedetectionio/blueprint/settings/__init__.py @@ -67,7 +67,32 @@ def construct_blueprint(datastore: ChangeDetectionStore): del (app_update['password']) datastore.data['settings']['application'].update(app_update) + + # Handle dynamic worker count adjustment + old_worker_count = datastore.data['settings']['requests'].get('workers', 1) + new_worker_count = form.data['requests'].get('workers', 1) + datastore.data['settings']['requests'].update(form.data['requests']) + + # Adjust worker count if it changed + if new_worker_count != old_worker_count: + from changedetectionio import worker_handler + from changedetectionio.flask_app import update_q, notification_q, app, datastore as ds + + result = worker_handler.adjust_async_worker_count( + new_count=new_worker_count, + update_q=update_q, + notification_q=notification_q, + app=app, + datastore=ds + ) + + if result['status'] == 'success': + flash(f"Worker count adjusted: {result['message']}", 'notice') + elif result['status'] == 'not_supported': + flash("Dynamic worker adjustment not supported for sync workers", 'warning') + elif result['status'] == 'error': + flash(f"Error adjusting workers: {result['message']}", 'error') if not os.getenv("SALTED_PASS", False) and len(form.application.form.password.encrypted_password): datastore.data['settings']['application']['password'] = form.application.form.password.encrypted_password diff --git a/changedetectionio/blueprint/settings/templates/settings.html b/changedetectionio/blueprint/settings/templates/settings.html index 5f302331..88ebd6de 100644 --- a/changedetectionio/blueprint/settings/templates/settings.html +++ b/changedetectionio/blueprint/settings/templates/settings.html @@ -135,6 +135,12 @@ {{ render_field(form.application.form.webdriver_delay) }} </div> </fieldset> + <div class="pure-control-group"> + {{ render_field(form.requests.form.workers) }} + {% set worker_info = get_worker_status_info() %} + <span class="pure-form-message-inline">Number of concurrent workers to process watches. More workers = faster processing but higher memory usage.<br> + Currently running: <strong>{{ worker_info.count }}</strong> operational {{ worker_info.type }} workers{% if worker_info.active_workers > 0 %} ({{ worker_info.active_workers }} actively processing){% endif %}.</span> + </div> <div class="pure-control-group inline-radio"> {{ render_field(form.requests.form.default_ua) }} <span class="pure-form-message-inline"> @@ -247,9 +253,9 @@ nav <span class="pure-form-message-inline">Enable this setting to open the diff page in a new tab. If disabled, the diff page will open in the current tab.</span> </div> <div class="pure-control-group"> - <span class="pure-form-message-inline">Enable realtime updates in the UI</span> + {{ render_checkbox_field(form.application.form.ui.form.socket_io_enabled, class="socket_io_enabled") }} + <span class="pure-form-message-inline">Realtime UI Updates Enabled - (Restart required if this is changed)</span> </div> - </div> <div class="tab-pane-inner" id="proxies"> <div id="recommended-proxy"> diff --git a/changedetectionio/blueprint/ui/__init__.py b/changedetectionio/blueprint/ui/__init__.py index c9061bf7..9ed40554 100644 --- a/changedetectionio/blueprint/ui/__init__.py +++ b/changedetectionio/blueprint/ui/__init__.py @@ -1,15 +1,13 @@ import time from flask import Blueprint, request, redirect, url_for, flash, render_template, session from loguru import logger -from functools import wraps -from changedetectionio.blueprint.ui.ajax import constuct_ui_ajax_blueprint from changedetectionio.store import ChangeDetectionStore from changedetectionio.blueprint.ui.edit import construct_blueprint as construct_edit_blueprint from changedetectionio.blueprint.ui.notification import construct_blueprint as construct_notification_blueprint from changedetectionio.blueprint.ui.views import construct_blueprint as construct_views_blueprint -def construct_blueprint(datastore: ChangeDetectionStore, update_q, running_update_threads, queuedWatchMetaData, watch_check_update): +def construct_blueprint(datastore: ChangeDetectionStore, update_q, worker_handler, queuedWatchMetaData, watch_check_update): ui_blueprint = Blueprint('ui', __name__, template_folder="templates") # Register the edit blueprint @@ -24,9 +22,6 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q, running_updat views_blueprint = construct_views_blueprint(datastore, update_q, queuedWatchMetaData, watch_check_update) ui_blueprint.register_blueprint(views_blueprint) - ui_ajax_blueprint = constuct_ui_ajax_blueprint(datastore, update_q, running_update_threads, queuedWatchMetaData, watch_check_update) - ui_blueprint.register_blueprint(ui_ajax_blueprint) - # Import the login decorator from changedetectionio.auth_decorator import login_optionally_required @@ -100,7 +95,7 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q, running_updat new_uuid = datastore.clone(uuid) if not datastore.data['watching'].get(uuid).get('paused'): - update_q.put(queuedWatchMetaData.PrioritizedItem(priority=5, item={'uuid': new_uuid})) + worker_handler.queue_item_async_safe(update_q, queuedWatchMetaData.PrioritizedItem(priority=5, item={'uuid': new_uuid})) flash('Cloned, you are editing the new watch.') @@ -116,13 +111,11 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q, running_updat i = 0 - running_uuids = [] - for t in running_update_threads: - running_uuids.append(t.current_uuid) + running_uuids = worker_handler.get_running_uuids() if uuid: if uuid not in running_uuids: - update_q.put(queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid})) + worker_handler.queue_item_async_safe(update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid})) i += 1 else: @@ -139,7 +132,7 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q, running_updat if tag != None and tag not in watch['tags']: continue - update_q.put(queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': watch_uuid})) + worker_handler.queue_item_async_safe(update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': watch_uuid})) i += 1 if i == 1: @@ -197,7 +190,7 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q, running_updat for uuid in uuids: if datastore.data['watching'].get(uuid): # Recheck and require a full reprocessing - update_q.put(queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid})) + worker_handler.queue_item_async_safe(update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid})) flash("{} watches queued for rechecking".format(len(uuids))) elif (op == 'clear-errors'): diff --git a/changedetectionio/blueprint/ui/ajax.py b/changedetectionio/blueprint/ui/ajax.py deleted file mode 100644 index bbe3464d..00000000 --- a/changedetectionio/blueprint/ui/ajax.py +++ /dev/null @@ -1,35 +0,0 @@ -import time - -from blinker import signal -from flask import Blueprint, request, redirect, url_for, flash, render_template, session - - -from changedetectionio.store import ChangeDetectionStore - -def constuct_ui_ajax_blueprint(datastore: ChangeDetectionStore, update_q, running_update_threads, queuedWatchMetaData, watch_check_update): - ui_ajax_blueprint = Blueprint('ajax', __name__, template_folder="templates", url_prefix='/ajax') - - # Import the login decorator - from changedetectionio.auth_decorator import login_optionally_required - - @ui_ajax_blueprint.route("/toggle", methods=['POST']) - @login_optionally_required - def ajax_toggler(): - op = request.values.get('op') - uuid = request.values.get('uuid') - if op and datastore.data['watching'].get(uuid): - if op == 'pause': - datastore.data['watching'][uuid].toggle_pause() - elif op == 'mute': - datastore.data['watching'][uuid].toggle_mute() - elif op == 'recheck': - update_q.put(queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid})) - - watch_check_update = signal('watch_check_update') - if watch_check_update: - watch_check_update.send(watch_uuid=uuid) - - return 'OK' - - - return ui_ajax_blueprint diff --git a/changedetectionio/blueprint/ui/edit.py b/changedetectionio/blueprint/ui/edit.py index b491d854..bdee4725 100644 --- a/changedetectionio/blueprint/ui/edit.py +++ b/changedetectionio/blueprint/ui/edit.py @@ -9,6 +9,7 @@ from jinja2 import Environment, FileSystemLoader from changedetectionio.store import ChangeDetectionStore from changedetectionio.auth_decorator import login_optionally_required from changedetectionio.time_handler import is_within_schedule +from changedetectionio import worker_handler def construct_blueprint(datastore: ChangeDetectionStore, update_q, queuedWatchMetaData): edit_blueprint = Blueprint('ui_edit', __name__, template_folder="../ui/templates") @@ -201,7 +202,7 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q, queuedWatchMe ############################# if not datastore.data['watching'][uuid].get('paused') and is_in_schedule: # Queue the watch for immediate recheck, with a higher priority - update_q.put(queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid})) + worker_handler.queue_item_async_safe(update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid})) # Diff page [edit] link should go back to diff page if request.args.get("next") and request.args.get("next") == 'diff': diff --git a/changedetectionio/blueprint/ui/views.py b/changedetectionio/blueprint/ui/views.py index efcdc03a..7954a197 100644 --- a/changedetectionio/blueprint/ui/views.py +++ b/changedetectionio/blueprint/ui/views.py @@ -7,6 +7,7 @@ from copy import deepcopy from changedetectionio.store import ChangeDetectionStore from changedetectionio.auth_decorator import login_optionally_required from changedetectionio import html_tools +from changedetectionio import worker_handler def construct_blueprint(datastore: ChangeDetectionStore, update_q, queuedWatchMetaData, watch_check_update): views_blueprint = Blueprint('ui_views', __name__, template_folder="../ui/templates") @@ -212,7 +213,7 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q, queuedWatchMe return redirect(url_for('ui.ui_edit.edit_page', uuid=new_uuid, unpause_on_save=1, tag=request.args.get('tag'))) else: # Straight into the queue. - update_q.put(queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': new_uuid})) + worker_handler.queue_item_async_safe(update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': new_uuid})) flash("Watch added.") return redirect(url_for('watchlist.index', tag=request.args.get('tag',''))) diff --git a/changedetectionio/blueprint/watchlist/__init__.py b/changedetectionio/blueprint/watchlist/__init__.py index bd3b6c98..8cd5423a 100644 --- a/changedetectionio/blueprint/watchlist/__init__.py +++ b/changedetectionio/blueprint/watchlist/__init__.py @@ -78,7 +78,6 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q, queuedWatchMe active_tag=active_tag, active_tag_uuid=active_tag_uuid, app_rss_token=datastore.data['settings']['application'].get('rss_access_token'), - ajax_toggle_url=url_for('ui.ajax.ajax_toggler'), datastore=datastore, errored_count=errored_count, form=form, diff --git a/changedetectionio/blueprint/watchlist/templates/watch-overview.html b/changedetectionio/blueprint/watchlist/templates/watch-overview.html index 49fd2bd3..728a204c 100644 --- a/changedetectionio/blueprint/watchlist/templates/watch-overview.html +++ b/changedetectionio/blueprint/watchlist/templates/watch-overview.html @@ -1,11 +1,15 @@ -{% extends 'base.html' %} -{% block content %} -{% from '_helpers.html' import render_simple_field, render_field, render_nolabel_field, sort_by_title %} +{%- extends 'base.html' -%} +{%- block content -%} +{%- from '_helpers.html' import render_simple_field, render_field, render_nolabel_field, sort_by_title -%} <script src="{{url_for('static_content', group='js', filename='jquery-3.6.0.min.js')}}"></script> <script src="{{url_for('static_content', group='js', filename='watch-overview.js')}}" defer></script> <script>let nowtimeserver={{ now_time_server }};</script> -<script>let ajax_toggle_url="{{ ajax_toggle_url }}";</script> - +<script> +// Initialize Feather icons after the page loads +document.addEventListener('DOMContentLoaded', function() { + feather.replace(); +}); +</script> <style> .checking-now .last-checked { background-image: linear-gradient(to bottom, transparent 0%, rgba(0,0,0,0.05) 40%, rgba(0,0,0,0.1) 100%); @@ -39,139 +43,143 @@ <input type="hidden" name="csrf_token" value="{{ csrf_token() }}" > <input type="hidden" id="op_extradata" name="op_extradata" value="" > <div id="checkbox-operations"> - <button class="pure-button button-secondary button-xsmall" name="op" value="pause">Pause</button> - <button class="pure-button button-secondary button-xsmall" name="op" value="unpause">UnPause</button> - <button class="pure-button button-secondary button-xsmall" name="op" value="mute">Mute</button> - <button class="pure-button button-secondary button-xsmall" name="op" value="unmute">UnMute</button> - <button class="pure-button button-secondary button-xsmall" name="op" value="recheck">Recheck</button> - <button class="pure-button button-secondary button-xsmall" name="op" value="assign-tag" id="checkbox-assign-tag">Tag</button> - <button class="pure-button button-secondary button-xsmall" name="op" value="mark-viewed">Mark viewed</button> - <button class="pure-button button-secondary button-xsmall" name="op" value="notification-default">Use default notification</button> - <button class="pure-button button-secondary button-xsmall" name="op" value="clear-errors">Clear errors</button> - <button class="pure-button button-secondary button-xsmall" style="background: #dd4242;" name="op" value="clear-history">Clear/reset history</button> - <button class="pure-button button-secondary button-xsmall" style="background: #dd4242;" name="op" value="delete">Delete</button> + <button class="pure-button button-secondary button-xsmall" name="op" value="pause"><i data-feather="pause" style="width: 14px; height: 14px; stroke: white; margin-right: 4px;"></i>Pause</button> + <button class="pure-button button-secondary button-xsmall" name="op" value="unpause"><i data-feather="play" style="width: 14px; height: 14px; stroke: white; margin-right: 4px;"></i>UnPause</button> + <button class="pure-button button-secondary button-xsmall" name="op" value="mute"><i data-feather="volume-x" style="width: 14px; height: 14px; stroke: white; margin-right: 4px;"></i>Mute</button> + <button class="pure-button button-secondary button-xsmall" name="op" value="unmute"><i data-feather="volume-2" style="width: 14px; height: 14px; stroke: white; margin-right: 4px;"></i>UnMute</button> + <button class="pure-button button-secondary button-xsmall" name="op" value="recheck"><i data-feather="refresh-cw" style="width: 14px; height: 14px; stroke: white; margin-right: 4px;"></i>Recheck</button> + <button class="pure-button button-secondary button-xsmall" name="op" value="assign-tag" id="checkbox-assign-tag"><i data-feather="tag" style="width: 14px; height: 14px; stroke: white; margin-right: 4px;"></i>Tag</button> + <button class="pure-button button-secondary button-xsmall" name="op" value="mark-viewed"><i data-feather="eye" style="width: 14px; height: 14px; stroke: white; margin-right: 4px;"></i>Mark viewed</button> + <button class="pure-button button-secondary button-xsmall" name="op" value="notification-default"><i data-feather="bell" style="width: 14px; height: 14px; stroke: white; margin-right: 4px;"></i>Use default notification</button> + <button class="pure-button button-secondary button-xsmall" name="op" value="clear-errors"><i data-feather="x-circle" style="width: 14px; height: 14px; stroke: white; margin-right: 4px;"></i>Clear errors</button> + <button class="pure-button button-secondary button-xsmall" style="background: #dd4242;" name="op" value="clear-history"><i data-feather="trash-2" style="width: 14px; height: 14px; stroke: white; margin-right: 4px;"></i>Clear/reset history</button> + <button class="pure-button button-secondary button-xsmall" style="background: #dd4242;" name="op" value="delete"><i data-feather="trash" style="width: 14px; height: 14px; stroke: white; margin-right: 4px;"></i>Delete</button> </div> - {% if watches|length >= pagination.per_page %} + {%- if watches|length >= pagination.per_page -%} {{ pagination.info }} - {% endif %} - {% if search_q %}<div id="search-result-info">Searching "<strong><i>{{search_q}}</i></strong>"</div>{% endif %} + {%- endif -%} + {%- if search_q -%}<div id="search-result-info">Searching "<strong><i>{{search_q}}</i></strong>"</div>{%- endif -%} <div> <a href="{{url_for('watchlist.index')}}" class="pure-button button-tag {{'active' if not active_tag_uuid }}">All</a> <!-- tag list --> - {% for uuid, tag in tags %} - {% if tag != "" %} + {%- for uuid, tag in tags -%} + {%- if tag != "" -%} <a href="{{url_for('watchlist.index', tag=uuid) }}" class="pure-button button-tag {{'active' if active_tag_uuid == uuid }}">{{ tag.title }}</a> - {% endif %} - {% endfor %} + {%- endif -%} + {%- endfor -%} </div> - {% set sort_order = sort_order or 'asc' %} - {% set sort_attribute = sort_attribute or 'last_changed' %} - {% set pagination_page = request.args.get('page', 0) %} - {% set cols_required = 6 %} - {% set any_has_restock_price_processor = datastore.any_watches_have_processor_by_name("restock_diff") %} - {% if any_has_restock_price_processor %} - {% set cols_required = cols_required + 1 %} - {% endif %} + {%- set sort_order = sort_order or 'asc' -%} + {%- set sort_attribute = sort_attribute or 'last_changed' -%} + {%- set pagination_page = request.args.get('page', 0) -%} + {%- set cols_required = 6 -%} + {%- set any_has_restock_price_processor = datastore.any_watches_have_processor_by_name("restock_diff") -%} + {%- if any_has_restock_price_processor -%} + {%- set cols_required = cols_required + 1 -%} + {%- endif -%} <div id="watch-table-wrapper"> <table class="pure-table pure-table-striped watch-table"> <thead> <tr> - {% set link_order = "desc" if sort_order == 'asc' else "asc" %} - {% set arrow_span = "" %} + {%- set link_order = "desc" if sort_order == 'asc' else "asc" -%} + {%- set arrow_span = "" -%} <th><input style="vertical-align: middle" type="checkbox" id="check-all" > <a class="{{ 'active '+link_order if sort_attribute == 'date_created' else 'inactive' }}" href="{{url_for('watchlist.index', sort='date_created', order=link_order, tag=active_tag_uuid)}}"># <span class='arrow {{link_order}}'></span></a></th> <th class="empty-cell"></th> <th><a class="{{ 'active '+link_order if sort_attribute == 'label' else 'inactive' }}" href="{{url_for('watchlist.index', sort='label', order=link_order, tag=active_tag_uuid)}}">Website <span class='arrow {{link_order}}'></span></a></th> - {% if any_has_restock_price_processor %} + {%- if any_has_restock_price_processor -%} <th>Restock & Price</th> - {% endif %} + {%- endif -%} <th><a class="{{ 'active '+link_order if sort_attribute == 'last_checked' else 'inactive' }}" href="{{url_for('watchlist.index', sort='last_checked', order=link_order, tag=active_tag_uuid)}}"><span class="hide-on-mobile">Last</span> Checked <span class='arrow {{link_order}}'></span></a></th> <th><a class="{{ 'active '+link_order if sort_attribute == 'last_changed' else 'inactive' }}" href="{{url_for('watchlist.index', sort='last_changed', order=link_order, tag=active_tag_uuid)}}"><span class="hide-on-mobile">Last</span> Changed <span class='arrow {{link_order}}'></span></a></th> <th class="empty-cell"></th> </tr> </thead> <tbody> - {% if not watches|length %} + {%- if not watches|length -%} <tr> <td colspan="{{ cols_required }}" style="text-wrap: wrap;">No website watches configured, please add a URL in the box above, or <a href="{{ url_for('imports.import_page')}}" >import a list</a>.</td> </tr> - {% endif %} - {% for watch in (watches|sort(attribute=sort_attribute, reverse=sort_order == 'asc'))|pagination_slice(skip=pagination.skip) %} - - {% set checking_now = is_checking_now(watch) %} - <tr id="{{ watch.uuid }}" data-watch-uuid="{{ watch.uuid }}" - class="{{ loop.cycle('pure-table-odd', 'pure-table-even') }} processor-{{ watch['processor'] }} - {# realtime.js also sets these vars on the row for update #} - {% if watch.compile_error_texts()|length >2 %}has-error{% endif %} - {% if watch.paused is defined and watch.paused != False %}paused{% endif %} - {% if watch.has_unviewed %}unviewed{% endif %} - {% if watch.has_restock_info %} has-restock-info {% if watch['restock']['in_stock'] %}in-stock{% else %}not-in-stock{% endif %} {% else %}no-restock-info{% endif %} - {% if watch.uuid in queued_uuids %}queued{% endif %} - {% if checking_now %}checking-now{% endif %} - {% if watch.notification_muted %}notification_muted{% endif %} - "> + {%- endif -%} + {%- for watch in (watches|sort(attribute=sort_attribute, reverse=sort_order == 'asc'))|pagination_slice(skip=pagination.skip) -%} + {%- set checking_now = is_checking_now(watch) -%} + {%- set history_n = watch.history_n -%} + {# Mirror in changedetectionio/static/js/realtime.js for the frontend #} + {%- set row_classes = [ + loop.cycle('pure-table-odd', 'pure-table-even'), + 'processor-' ~ watch['processor'], + 'has-error' if watch.compile_error_texts()|length > 2 else '', + 'paused' if watch.paused is defined and watch.paused != False else '', + 'unviewed' if watch.has_unviewed else '', + 'has-restock-info' if watch.has_restock_info else 'no-restock-info', + 'in-stock' if watch.has_restock_info and watch['restock']['in_stock'] else '', + 'not-in-stock' if watch.has_restock_info and not watch['restock']['in_stock'] else '', + 'queued' if watch.uuid in queued_uuids else '', + 'checking-now' if checking_now else '', + 'notification_muted' if watch.notification_muted else '', + 'single-history' if history_n == 1 else '', + 'multiple-history' if history_n >= 2 else '' + ] -%} + <tr id="{{ watch.uuid }}" data-watch-uuid="{{ watch.uuid }}" class="{{ row_classes | reject('equalto', '') | join(' ') }}"> <td class="inline checkbox-uuid" ><input name="uuids" type="checkbox" value="{{ watch.uuid}} " > <span>{{ loop.index+pagination.skip }}</span></td> <td class="inline watch-controls"> <a class="ajax-op state-off pause-toggle" data-op="pause" href="{{url_for('watchlist.index', op='pause', uuid=watch.uuid, tag=active_tag_uuid)}}"><img src="{{url_for('static_content', group='images', filename='pause.svg')}}" alt="Pause checks" title="Pause checks" class="icon icon-pause" ></a> <a class="ajax-op state-on pause-toggle" data-op="pause" style="display: none" href="{{url_for('watchlist.index', op='pause', uuid=watch.uuid, tag=active_tag_uuid)}}"><img src="{{url_for('static_content', group='images', filename='play.svg')}}" alt="UnPause checks" title="UnPause checks" class="icon icon-unpause" ></a> - <a class="ajax-op state-off mute-toggle" data-op="mute" href="{{url_for('watchlist.index', op='mute', uuid=watch.uuid, tag=active_tag_uuid)}}"><img src="{{url_for('static_content', group='images', filename='bell-off.svg')}}" alt="Mute notification" title="Mute notification" class="icon icon-mute" ></a> <a class="ajax-op state-on mute-toggle" data-op="mute" style="display: none" href="{{url_for('watchlist.index', op='mute', uuid=watch.uuid, tag=active_tag_uuid)}}"><img src="{{url_for('static_content', group='images', filename='bell-off.svg')}}" alt="UnMute notification" title="UnMute notification" class="icon icon-mute" ></a> </td> <td class="title-col inline">{{watch.title if watch.title is not none and watch.title|length > 0 else watch.url}} - <a class="external" target="_blank" rel="noopener" href="{{ watch.link.replace('source:','') }}"></a> + <a class="external" target="_blank" rel="noopener" href="{{ watch.link.replace('source:','') }}"><i data-feather="external-link"></i></a> <a class="link-spread" href="{{url_for('ui.form_share_put_watch', uuid=watch.uuid)}}"><img src="{{url_for('static_content', group='images', filename='spread.svg')}}" class="status-icon icon icon-spread" title="Create a link to share watch config with others" ></a> - {% if watch.get_fetch_backend == "html_webdriver" + {%- if watch.get_fetch_backend == "html_webdriver" or ( watch.get_fetch_backend == "system" and system_default_fetcher == 'html_webdriver' ) or "extra_browser_" in watch.get_fetch_backend - %} + -%} <img class="status-icon" src="{{url_for('static_content', group='images', filename='google-chrome-icon.png')}}" alt="Using a Chrome browser" title="Using a Chrome browser" > - {% endif %} + {%- endif -%} - {% if watch.is_pdf %}<img class="status-icon" src="{{url_for('static_content', group='images', filename='pdf-icon.svg')}}" alt="Converting PDF to text" >{% endif %} - {% if watch.has_browser_steps %}<img class="status-icon status-browsersteps" src="{{url_for('static_content', group='images', filename='steps.svg')}}" alt="Browser Steps is enabled" >{% endif %} + {%- if watch.is_pdf -%}<img class="status-icon" src="{{url_for('static_content', group='images', filename='pdf-icon.svg')}}" alt="Converting PDF to text" >{%- endif -%} + {%- if watch.has_browser_steps -%}<img class="status-icon status-browsersteps" src="{{url_for('static_content', group='images', filename='steps.svg')}}" alt="Browser Steps is enabled" >{%- endif -%} <div class="error-text" style="display:none;">{{ watch.compile_error_texts(has_proxies=datastore.proxy_list)|safe }}</div> - {% if watch['processor'] == 'text_json_diff' %} - {% if watch['has_ldjson_price_data'] and not watch['track_ldjson_price_data'] %} + {%- if watch['processor'] == 'text_json_diff' -%} + {%- if watch['has_ldjson_price_data'] and not watch['track_ldjson_price_data'] -%} <div class="ldjson-price-track-offer">Switch to Restock & Price watch mode? <a href="{{url_for('price_data_follower.accept', uuid=watch.uuid)}}" class="pure-button button-xsmall">Yes</a> <a href="{{url_for('price_data_follower.reject', uuid=watch.uuid)}}" class="">No</a></div> - {% endif %} - {% endif %} - {% if watch['processor'] == 'restock_diff' %} + {%- endif -%} + {%- endif -%} + {%- if watch['processor'] == 'restock_diff' -%} <span class="tracking-ldjson-price-data" title="Automatically following embedded price information"><img src="{{url_for('static_content', group='images', filename='price-tag-icon.svg')}}" class="status-icon price-follow-tag-icon" > Price</span> - {% endif %} - {% for watch_tag_uuid, watch_tag in datastore.get_all_tags_for_watch(watch['uuid']).items() %} + {%- endif -%} + {%- for watch_tag_uuid, watch_tag in datastore.get_all_tags_for_watch(watch['uuid']).items() -%} <span class="watch-tag-list">{{ watch_tag.title }}</span> - {% endfor %} + {%- endfor -%} </td> - <!-- @todo make it so any watch handler obj can expose this ---> -{% if any_has_restock_price_processor %} +{%- if any_has_restock_price_processor -%} <td class="restock-and-price"> - {% if watch['processor'] == 'restock_diff' %} - {% if watch.has_restock_info %} + {%- if watch['processor'] == 'restock_diff' -%} + {%- if watch.has_restock_info -%} <span class="restock-label {{'in-stock' if watch['restock']['in_stock'] else 'not-in-stock' }}" title="Detecting restock and price"> <!-- maybe some object watch['processor'][restock_diff] or.. --> - {% if watch['restock']['in_stock'] %} In stock {% else %} Not in stock {% endif %} + {%- if watch['restock']['in_stock']-%} In stock {%- else-%} Not in stock {%- endif -%} </span> - {% endif %} + {%- endif -%} - {% if watch.get('restock') and watch['restock']['price'] != None %} - {% if watch['restock']['price'] != None %} + {%- if watch.get('restock') and watch['restock']['price'] != None -%} + {%- if watch['restock']['price'] != None -%} <span class="restock-label price" title="Price"> {{ watch['restock']['price']|format_number_locale }} {{ watch['restock']['currency'] }} </span> - {% endif %} - {% elif not watch.has_restock_info %} + {%- endif -%} + {%- elif not watch.has_restock_info -%} <span class="restock-label error">No information</span> - {% endif %} - {% endif %} + {%- endif -%} + {%- endif -%} </td> -{% endif %} +{%- endif -%} {#last_checked becomes fetch-start-time#} <td class="last-checked" data-timestamp="{{ watch.last_checked }}" data-fetchduration={{ watch.fetch_time }} data-eta_complete="{{ watch.last_checked+watch.fetch_time }}" > <div class="spinner-wrapper" style="display:none;" > @@ -179,51 +187,34 @@ </div> <span class="innertext">{{watch|format_last_checked_time|safe}}</span> </td> - - - <td class="last-changed" data-timestamp="{{ watch.last_changed }}">{% if watch.history_n >=2 and watch.last_changed >0 %} + <td class="last-changed" data-timestamp="{{ watch.last_changed }}">{%- if watch.history_n >=2 and watch.last_changed >0 -%} {{watch.last_changed|format_timestamp_timeago}} - {% else %} + {%- else -%} Not yet - {% endif %} + {%- endif -%} </td> <td> + {%- set target_attr = ' target="' ~ watch.uuid ~ '"' if datastore.data['settings']['application']['ui'].get('open_diff_in_new_tab') else '' -%} <a href="" class="already-in-queue-button recheck pure-button pure-button-primary" style="display: none;" disabled="disabled">Queued</a> - <a href="{{ url_for('ui.form_watch_checknow', uuid=watch.uuid, tag=request.args.get('tag')) }}" data-op='recheck' class="ajax-op recheck pure-button pure-button-primary">Recheck</a> <a href="{{ url_for('ui.ui_edit.edit_page', uuid=watch.uuid, tag=active_tag_uuid)}}#general" class="pure-button pure-button-primary">Edit</a> - - {% if watch.history_n >= 2 %} - - {% set open_diff_in_new_tab = datastore.data['settings']['application']['ui'].get('open_diff_in_new_tab') %} - {% set target_attr = ' target="' ~ watch.uuid ~ '"' if open_diff_in_new_tab else '' %} - - {% if watch.has_unviewed %} - <a href="{{ url_for('ui.ui_views.diff_history_page', uuid=watch.uuid, from_version=watch.get_from_version_based_on_last_viewed) }}" {{target_attr}} class="pure-button pure-button-primary diff-link">History</a> - {% else %} - <a href="{{ url_for('ui.ui_views.diff_history_page', uuid=watch.uuid)}}" {{target_attr}} class="pure-button pure-button-primary diff-link">History</a> - {% endif %} - - {% else %} - {% if watch.history_n == 1 or (watch.history_n ==0 and watch.error_text_ctime )%} - <a href="{{ url_for('ui.ui_views.preview_page', uuid=watch.uuid)}}" {{target_attr}} class="pure-button pure-button-primary">Preview</a> - {% endif %} - {% endif %} + <a href="{{ url_for('ui.ui_views.diff_history_page', uuid=watch.uuid)}}" {{target_attr}} class="pure-button pure-button-primary history-link" style="display: none;">History</a> + <a href="{{ url_for('ui.ui_views.preview_page', uuid=watch.uuid)}}" {{target_attr}} class="pure-button pure-button-primary preview-link" style="display: none;">Preview</a> </td> </tr> - {% endfor %} + {%- endfor -%} </tbody> </table> <ul id="post-list-buttons"> - <li id="post-list-with-errors" class="{% if errored_count %}has-error{% endif %}" style="display: none;" > + <li id="post-list-with-errors" class="{%- if errored_count -%}has-error{%- endif -%}" style="display: none;" > <a href="{{url_for('watchlist.index', with_errors=1, tag=request.args.get('tag')) }}" class="pure-button button-tag button-error">With errors ({{ errored_count }})</a> </li> - <li id="post-list-mark-views" class="{% if has_unviewed %}has-unviewed{% endif %}" style="display: none;" > + <li id="post-list-mark-views" class="{%- if has_unviewed -%}has-unviewed{%- endif -%}" style="display: none;" > <a href="{{url_for('ui.mark_all_viewed',with_errors=request.args.get('with_errors',0)) }}" class="pure-button button-tag " id="mark-all-viewed">Mark all viewed</a> </li> <li> <a href="{{ url_for('ui.form_watch_checknow', tag=active_tag_uuid, with_errors=request.args.get('with_errors',0)) }}" class="pure-button button-tag" id="recheck-all">Recheck - all {% if active_tag_uuid %} in "{{active_tag.title}}"{%endif%}</a> + all {%- if active_tag_uuid-%} in "{{active_tag.title}}"{%endif%}</a> </li> <li> <a href="{{ url_for('rss.feed', tag=active_tag_uuid, token=app_rss_token)}}"><img alt="RSS Feed" id="feed-icon" src="{{url_for('static_content', group='images', filename='generic_feed-icon.svg')}}" height="15"></a> @@ -233,4 +224,4 @@ </div> </form> </div> -{% endblock %} \ No newline at end of file +{%- endblock -%} \ No newline at end of file diff --git a/changedetectionio/content_fetchers/base.py b/changedetectionio/content_fetchers/base.py index bfa7e83c..1abce26d 100644 --- a/changedetectionio/content_fetchers/base.py +++ b/changedetectionio/content_fetchers/base.py @@ -68,7 +68,7 @@ class Fetcher(): return self.error @abstractmethod - def run(self, + async def run(self, url, timeout, request_headers, @@ -122,7 +122,7 @@ class Fetcher(): return None - def iterate_browser_steps(self, start_url=None): + async def iterate_browser_steps(self, start_url=None): from changedetectionio.blueprint.browser_steps.browser_steps import steppable_browser_interface from playwright._impl._errors import TimeoutError, Error from changedetectionio.safe_jinja import render as jinja_render @@ -136,8 +136,8 @@ class Fetcher(): for step in valid_steps: step_n += 1 logger.debug(f">> Iterating check - browser Step n {step_n} - {step['operation']}...") - self.screenshot_step("before-" + str(step_n)) - self.save_step_html("before-" + str(step_n)) + await self.screenshot_step("before-" + str(step_n)) + await self.save_step_html("before-" + str(step_n)) try: optional_value = step['optional_value'] @@ -148,11 +148,11 @@ class Fetcher(): if '{%' in step['selector'] or '{{' in step['selector']: selector = jinja_render(template_str=step['selector']) - getattr(interface, "call_action")(action_name=step['operation'], + await getattr(interface, "call_action")(action_name=step['operation'], selector=selector, optional_value=optional_value) - self.screenshot_step(step_n) - self.save_step_html(step_n) + await self.screenshot_step(step_n) + await self.save_step_html(step_n) except (Error, TimeoutError) as e: logger.debug(str(e)) # Stop processing here diff --git a/changedetectionio/content_fetchers/playwright.py b/changedetectionio/content_fetchers/playwright.py index bb8ade18..c5b5bd31 100644 --- a/changedetectionio/content_fetchers/playwright.py +++ b/changedetectionio/content_fetchers/playwright.py @@ -9,15 +9,15 @@ from changedetectionio.content_fetchers import SCREENSHOT_MAX_HEIGHT_DEFAULT, vi from changedetectionio.content_fetchers.base import Fetcher, manage_user_agent from changedetectionio.content_fetchers.exceptions import PageUnloadable, Non200ErrorCodeReceived, EmptyReply, ScreenshotUnavailable -def capture_full_page(page): +async def capture_full_page_async(page): import os import time from multiprocessing import Process, Pipe start = time.time() - page_height = page.evaluate("document.documentElement.scrollHeight") - page_width = page.evaluate("document.documentElement.scrollWidth") + page_height = await page.evaluate("document.documentElement.scrollHeight") + page_width = await page.evaluate("document.documentElement.scrollWidth") original_viewport = page.viewport_size logger.debug(f"Playwright viewport size {page.viewport_size} page height {page_height} page width {page_width}") @@ -32,23 +32,23 @@ def capture_full_page(page): step_size = page_height # Incase page is bigger than default viewport but smaller than proposed step size logger.debug(f"Setting bigger viewport to step through large page width W{page.viewport_size['width']}xH{step_size} because page_height > viewport_size") # Set viewport to a larger size to capture more content at once - page.set_viewport_size({'width': page.viewport_size['width'], 'height': step_size}) + await page.set_viewport_size({'width': page.viewport_size['width'], 'height': step_size}) # Capture screenshots in chunks up to the max total height while y < min(page_height, SCREENSHOT_MAX_TOTAL_HEIGHT): - page.request_gc() - page.evaluate(f"window.scrollTo(0, {y})") - page.request_gc() - screenshot_chunks.append(page.screenshot( + await page.request_gc() + await page.evaluate(f"window.scrollTo(0, {y})") + await page.request_gc() + screenshot_chunks.append(await page.screenshot( type="jpeg", full_page=False, quality=int(os.getenv("SCREENSHOT_QUALITY", 72)) )) y += step_size - page.request_gc() + await page.request_gc() # Restore original viewport size - page.set_viewport_size({'width': original_viewport['width'], 'height': original_viewport['height']}) + await page.set_viewport_size({'width': original_viewport['width'], 'height': original_viewport['height']}) # If we have multiple chunks, stitch them together if len(screenshot_chunks) > 1: @@ -73,7 +73,6 @@ def capture_full_page(page): return screenshot_chunks[0] - class fetcher(Fetcher): fetcher_description = "Playwright {}/Javascript".format( os.getenv("PLAYWRIGHT_BROWSER_TYPE", 'chromium').capitalize() @@ -124,9 +123,9 @@ class fetcher(Fetcher): self.proxy['username'] = parsed.username self.proxy['password'] = parsed.password - def screenshot_step(self, step_n=''): + async def screenshot_step(self, step_n=''): super().screenshot_step(step_n=step_n) - screenshot = capture_full_page(page=self.page) + screenshot = await capture_full_page_async(page=self.page) if self.browser_steps_screenshot_path is not None: @@ -135,15 +134,15 @@ class fetcher(Fetcher): with open(destination, 'wb') as f: f.write(screenshot) - def save_step_html(self, step_n): + async def save_step_html(self, step_n): super().save_step_html(step_n=step_n) - content = self.page.content() + content = await self.page.content() destination = os.path.join(self.browser_steps_screenshot_path, 'step_{}.html'.format(step_n)) logger.debug(f"Saving step HTML to {destination}") with open(destination, 'w') as f: f.write(content) - def run(self, + async def run(self, url, timeout, request_headers, @@ -154,26 +153,26 @@ class fetcher(Fetcher): is_binary=False, empty_pages_are_a_change=False): - from playwright.sync_api import sync_playwright + from playwright.async_api import async_playwright import playwright._impl._errors import time self.delete_browser_steps_screenshots() response = None - with sync_playwright() as p: + async with async_playwright() as p: browser_type = getattr(p, self.browser_type) # Seemed to cause a connection Exception even tho I can see it connect # self.browser = browser_type.connect(self.command_executor, timeout=timeout*1000) # 60,000 connection timeout only - browser = browser_type.connect_over_cdp(self.browser_connection_url, timeout=60000) + browser = await browser_type.connect_over_cdp(self.browser_connection_url, timeout=60000) # SOCKS5 with authentication is not supported (yet) # https://github.com/microsoft/playwright/issues/10567 # Set user agent to prevent Cloudflare from blocking the browser # Use the default one configured in the App.py model that's passed from fetch_site_status.py - context = browser.new_context( + context = await browser.new_context( accept_downloads=False, # Should never be needed bypass_csp=True, # This is needed to enable JavaScript execution on GitHub and others extra_http_headers=request_headers, @@ -183,7 +182,7 @@ class fetcher(Fetcher): user_agent=manage_user_agent(headers=request_headers), ) - self.page = context.new_page() + self.page = await context.new_page() # Listen for all console events and handle errors self.page.on("console", lambda msg: logger.debug(f"Playwright console: Watch URL: {url} {msg.type}: {msg.text} {msg.args}")) @@ -193,32 +192,37 @@ class fetcher(Fetcher): browsersteps_interface = steppable_browser_interface(start_url=url) browsersteps_interface.page = self.page - response = browsersteps_interface.action_goto_url(value=url) + response = await browsersteps_interface.action_goto_url(value=url) if response is None: - context.close() - browser.close() + await context.close() + await browser.close() logger.debug("Content Fetcher > Response object from the browser communication was none") raise EmptyReply(url=url, status_code=None) - self.headers = response.all_headers() + # In async_playwright, all_headers() returns a coroutine + try: + self.headers = await response.all_headers() + except TypeError: + # Fallback for sync version + self.headers = response.all_headers() try: if self.webdriver_js_execute_code is not None and len(self.webdriver_js_execute_code): - browsersteps_interface.action_execute_js(value=self.webdriver_js_execute_code, selector=None) + await browsersteps_interface.action_execute_js(value=self.webdriver_js_execute_code, selector=None) except playwright._impl._errors.TimeoutError as e: - context.close() - browser.close() + await context.close() + await browser.close() # This can be ok, we will try to grab what we could retrieve pass except Exception as e: logger.debug(f"Content Fetcher > Other exception when executing custom JS code {str(e)}") - context.close() - browser.close() + await context.close() + await browser.close() raise PageUnloadable(url=url, status_code=None, message=str(e)) extra_wait = int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)) + self.render_extract_delay - self.page.wait_for_timeout(extra_wait * 1000) + await self.page.wait_for_timeout(extra_wait * 1000) try: self.status_code = response.status @@ -226,48 +230,48 @@ class fetcher(Fetcher): # https://github.com/dgtlmoon/changedetection.io/discussions/2122#discussioncomment-8241962 logger.critical(f"Response from the browser/Playwright did not have a status_code! Response follows.") logger.critical(response) - context.close() - browser.close() + await context.close() + await browser.close() raise PageUnloadable(url=url, status_code=None, message=str(e)) if self.status_code != 200 and not ignore_status_codes: - screenshot = capture_full_page(self.page) + screenshot = await capture_full_page_async(self.page) raise Non200ErrorCodeReceived(url=url, status_code=self.status_code, screenshot=screenshot) - if not empty_pages_are_a_change and len(self.page.content().strip()) == 0: + if not empty_pages_are_a_change and len((await self.page.content()).strip()) == 0: logger.debug("Content Fetcher > Content was empty, empty_pages_are_a_change = False") - context.close() - browser.close() + await context.close() + await browser.close() raise EmptyReply(url=url, status_code=response.status) # Run Browser Steps here if self.browser_steps_get_valid_steps(): - self.iterate_browser_steps(start_url=url) + await self.iterate_browser_steps(start_url=url) - self.page.wait_for_timeout(extra_wait * 1000) + await self.page.wait_for_timeout(extra_wait * 1000) now = time.time() # So we can find an element on the page where its selector was entered manually (maybe not xPath etc) if current_include_filters is not None: - self.page.evaluate("var include_filters={}".format(json.dumps(current_include_filters))) + await self.page.evaluate("var include_filters={}".format(json.dumps(current_include_filters))) else: - self.page.evaluate("var include_filters=''") - self.page.request_gc() + await self.page.evaluate("var include_filters=''") + await self.page.request_gc() # request_gc before and after evaluate to free up memory # @todo browsersteps etc MAX_TOTAL_HEIGHT = int(os.getenv("SCREENSHOT_MAX_HEIGHT", SCREENSHOT_MAX_HEIGHT_DEFAULT)) - self.xpath_data = self.page.evaluate(XPATH_ELEMENT_JS, { + self.xpath_data = await self.page.evaluate(XPATH_ELEMENT_JS, { "visualselector_xpath_selectors": visualselector_xpath_selectors, "max_height": MAX_TOTAL_HEIGHT }) - self.page.request_gc() + await self.page.request_gc() - self.instock_data = self.page.evaluate(INSTOCK_DATA_JS) - self.page.request_gc() + self.instock_data = await self.page.evaluate(INSTOCK_DATA_JS) + await self.page.request_gc() - self.content = self.page.content() - self.page.request_gc() + self.content = await self.page.content() + await self.page.request_gc() logger.debug(f"Scrape xPath element data in browser done in {time.time() - now:.2f}s") # Bug 3 in Playwright screenshot handling @@ -279,7 +283,7 @@ class fetcher(Fetcher): # acceptable screenshot quality here try: # The actual screenshot - this always base64 and needs decoding! horrible! huge CPU usage - self.screenshot = capture_full_page(page=self.page) + self.screenshot = await capture_full_page_async(page=self.page) except Exception as e: # It's likely the screenshot was too long/big and something crashed @@ -287,30 +291,30 @@ class fetcher(Fetcher): finally: # Request garbage collection one more time before closing try: - self.page.request_gc() + await self.page.request_gc() except: pass # Clean up resources properly try: - self.page.request_gc() + await self.page.request_gc() except: pass try: - self.page.close() + await self.page.close() except: pass self.page = None try: - context.close() + await context.close() except: pass context = None try: - browser.close() + await browser.close() except: pass browser = None diff --git a/changedetectionio/content_fetchers/puppeteer.py b/changedetectionio/content_fetchers/puppeteer.py index 22b68569..d62d308d 100644 --- a/changedetectionio/content_fetchers/puppeteer.py +++ b/changedetectionio/content_fetchers/puppeteer.py @@ -310,15 +310,15 @@ class fetcher(Fetcher): async def main(self, **kwargs): await self.fetch_page(**kwargs) - def run(self, url, timeout, request_headers, request_body, request_method, ignore_status_codes=False, + async def run(self, url, timeout, request_headers, request_body, request_method, ignore_status_codes=False, current_include_filters=None, is_binary=False, empty_pages_are_a_change=False): #@todo make update_worker async which could run any of these content_fetchers within memory and time constraints - max_time = os.getenv('PUPPETEER_MAX_PROCESSING_TIMEOUT_SECONDS', 180) + max_time = int(os.getenv('PUPPETEER_MAX_PROCESSING_TIMEOUT_SECONDS', 180)) - # This will work in 3.10 but not >= 3.11 because 3.11 wants tasks only + # Now we run this properly in async context since we're called from async worker try: - asyncio.run(asyncio.wait_for(self.main( + await asyncio.wait_for(self.main( url=url, timeout=timeout, request_headers=request_headers, @@ -328,7 +328,7 @@ class fetcher(Fetcher): current_include_filters=current_include_filters, is_binary=is_binary, empty_pages_are_a_change=empty_pages_are_a_change - ), timeout=max_time)) + ), timeout=max_time) except asyncio.TimeoutError: raise(BrowserFetchTimedOut(msg=f"Browser connected but was unable to process the page in {max_time} seconds.")) diff --git a/changedetectionio/content_fetchers/requests.py b/changedetectionio/content_fetchers/requests.py index 70b6c319..aba5ed0d 100644 --- a/changedetectionio/content_fetchers/requests.py +++ b/changedetectionio/content_fetchers/requests.py @@ -1,6 +1,7 @@ from loguru import logger import hashlib import os +import asyncio from changedetectionio import strtobool from changedetectionio.content_fetchers.exceptions import BrowserStepsInUnsupportedFetcher, EmptyReply, Non200ErrorCodeReceived from changedetectionio.content_fetchers.base import Fetcher @@ -15,7 +16,7 @@ class fetcher(Fetcher): self.proxy_override = proxy_override # browser_connection_url is none because its always 'launched locally' - def run(self, + def _run_sync(self, url, timeout, request_headers, @@ -25,6 +26,7 @@ class fetcher(Fetcher): current_include_filters=None, is_binary=False, empty_pages_are_a_change=False): + """Synchronous version of run - the original requests implementation""" import chardet import requests @@ -36,7 +38,6 @@ class fetcher(Fetcher): proxies = {} # Allows override the proxy on a per-request basis - # https://requests.readthedocs.io/en/latest/user/advanced/#socks # Should also work with `socks5://user:pass@host:port` type syntax. @@ -100,9 +101,38 @@ class fetcher(Fetcher): else: self.content = r.text - self.raw_content = r.content + async def run(self, + url, + timeout, + request_headers, + request_body, + request_method, + ignore_status_codes=False, + current_include_filters=None, + is_binary=False, + empty_pages_are_a_change=False): + """Async wrapper that runs the synchronous requests code in a thread pool""" + + loop = asyncio.get_event_loop() + + # Run the synchronous _run_sync in a thread pool to avoid blocking the event loop + await loop.run_in_executor( + None, # Use default ThreadPoolExecutor + lambda: self._run_sync( + url=url, + timeout=timeout, + request_headers=request_headers, + request_body=request_body, + request_method=request_method, + ignore_status_codes=ignore_status_codes, + current_include_filters=current_include_filters, + is_binary=is_binary, + empty_pages_are_a_change=empty_pages_are_a_change + ) + ) + def quit(self, watch=None): # In case they switched to `requests` fetcher from something else diff --git a/changedetectionio/content_fetchers/webdriver_selenium.py b/changedetectionio/content_fetchers/webdriver_selenium.py index 180d6332..48897d7a 100644 --- a/changedetectionio/content_fetchers/webdriver_selenium.py +++ b/changedetectionio/content_fetchers/webdriver_selenium.py @@ -47,7 +47,7 @@ class fetcher(Fetcher): self.proxy_url = k.strip() - def run(self, + async def run(self, url, timeout, request_headers, @@ -58,77 +58,86 @@ class fetcher(Fetcher): is_binary=False, empty_pages_are_a_change=False): - from selenium.webdriver.chrome.options import Options as ChromeOptions - # request_body, request_method unused for now, until some magic in the future happens. + import asyncio + + # Wrap the entire selenium operation in a thread executor + def _run_sync(): + from selenium.webdriver.chrome.options import Options as ChromeOptions + # request_body, request_method unused for now, until some magic in the future happens. - options = ChromeOptions() + options = ChromeOptions() - # Load Chrome options from env - CHROME_OPTIONS = [ - line.strip() - for line in os.getenv("CHROME_OPTIONS", "").strip().splitlines() - if line.strip() - ] + # Load Chrome options from env + CHROME_OPTIONS = [ + line.strip() + for line in os.getenv("CHROME_OPTIONS", "").strip().splitlines() + if line.strip() + ] - for opt in CHROME_OPTIONS: - options.add_argument(opt) + for opt in CHROME_OPTIONS: + options.add_argument(opt) - # 1. proxy_config /Proxy(proxy_config) selenium object is REALLY unreliable - # 2. selenium-wire cant be used because the websocket version conflicts with pypeteer-ng - # 3. selenium only allows ONE runner at a time by default! - # 4. driver must use quit() or it will continue to block/hold the selenium process!! + # 1. proxy_config /Proxy(proxy_config) selenium object is REALLY unreliable + # 2. selenium-wire cant be used because the websocket version conflicts with pypeteer-ng + # 3. selenium only allows ONE runner at a time by default! + # 4. driver must use quit() or it will continue to block/hold the selenium process!! - if self.proxy_url: - options.add_argument(f'--proxy-server={self.proxy_url}') + if self.proxy_url: + options.add_argument(f'--proxy-server={self.proxy_url}') - from selenium.webdriver.remote.remote_connection import RemoteConnection - from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver - driver = None - try: - # Create the RemoteConnection and set timeout (e.g., 30 seconds) - remote_connection = RemoteConnection( - self.browser_connection_url, - ) - remote_connection.set_timeout(30) # seconds + from selenium.webdriver.remote.remote_connection import RemoteConnection + from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver + driver = None + try: + # Create the RemoteConnection and set timeout (e.g., 30 seconds) + remote_connection = RemoteConnection( + self.browser_connection_url, + ) + remote_connection.set_timeout(30) # seconds - # Now create the driver with the RemoteConnection - driver = RemoteWebDriver( - command_executor=remote_connection, - options=options - ) + # Now create the driver with the RemoteConnection + driver = RemoteWebDriver( + command_executor=remote_connection, + options=options + ) - driver.set_page_load_timeout(int(os.getenv("WEBDRIVER_PAGELOAD_TIMEOUT", 45))) - except Exception as e: - if driver: - driver.quit() - raise e + driver.set_page_load_timeout(int(os.getenv("WEBDRIVER_PAGELOAD_TIMEOUT", 45))) + except Exception as e: + if driver: + driver.quit() + raise e - try: - driver.get(url) + try: + driver.get(url) - if not "--window-size" in os.getenv("CHROME_OPTIONS", ""): - driver.set_window_size(1280, 1024) + if not "--window-size" in os.getenv("CHROME_OPTIONS", ""): + driver.set_window_size(1280, 1024) - driver.implicitly_wait(int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5))) - - if self.webdriver_js_execute_code is not None: - driver.execute_script(self.webdriver_js_execute_code) - # Selenium doesn't automatically wait for actions as good as Playwright, so wait again driver.implicitly_wait(int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5))) - # @todo - how to check this? is it possible? - self.status_code = 200 - # @todo somehow we should try to get this working for WebDriver - # raise EmptyReply(url=url, status_code=r.status_code) + if self.webdriver_js_execute_code is not None: + driver.execute_script(self.webdriver_js_execute_code) + # Selenium doesn't automatically wait for actions as good as Playwright, so wait again + driver.implicitly_wait(int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5))) + + # @todo - how to check this? is it possible? + self.status_code = 200 + # @todo somehow we should try to get this working for WebDriver + # raise EmptyReply(url=url, status_code=r.status_code) + + # @todo - dom wait loaded? + import time + time.sleep(int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)) + self.render_extract_delay) + self.content = driver.page_source + self.headers = {} + self.screenshot = driver.get_screenshot_as_png() + except Exception as e: + driver.quit() + raise e - # @todo - dom wait loaded? - time.sleep(int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)) + self.render_extract_delay) - self.content = driver.page_source - self.headers = {} - self.screenshot = driver.get_screenshot_as_png() - except Exception as e: driver.quit() - raise e - driver.quit() + # Run the selenium operations in a thread pool to avoid blocking the event loop + loop = asyncio.get_event_loop() + await loop.run_in_executor(None, _run_sync) diff --git a/changedetectionio/custom_queue.py b/changedetectionio/custom_queue.py index f5566fa5..feb1fbcd 100644 --- a/changedetectionio/custom_queue.py +++ b/changedetectionio/custom_queue.py @@ -1,4 +1,5 @@ import queue +import asyncio from blinker import signal from loguru import logger @@ -50,3 +51,450 @@ class SignalPriorityQueue(queue.PriorityQueue): except Exception as e: logger.critical(f"Exception: {e}") return item + + def get_uuid_position(self, target_uuid): + """ + Find the position of a watch UUID in the priority queue. + Optimized for large queues - O(n) complexity instead of O(n log n). + + Args: + target_uuid: The UUID to search for + + Returns: + dict: Contains position info or None if not found + - position: 0-based position in queue (0 = next to be processed) + - total_items: total number of items in queue + - priority: the priority value of the found item + """ + with self.mutex: + queue_list = list(self.queue) + total_items = len(queue_list) + + if total_items == 0: + return { + 'position': None, + 'total_items': 0, + 'priority': None, + 'found': False + } + + # Find the target item and its priority first - O(n) + target_item = None + target_priority = None + + for item in queue_list: + if (hasattr(item, 'item') and + isinstance(item.item, dict) and + item.item.get('uuid') == target_uuid): + target_item = item + target_priority = item.priority + break + + if target_item is None: + return { + 'position': None, + 'total_items': total_items, + 'priority': None, + 'found': False + } + + # Count how many items have higher priority (lower numbers) - O(n) + position = 0 + for item in queue_list: + # Items with lower priority numbers are processed first + if item.priority < target_priority: + position += 1 + elif item.priority == target_priority and item != target_item: + # For same priority, count items that come before this one + # (Note: this is approximate since heap order isn't guaranteed for equal priorities) + position += 1 + + return { + 'position': position, + 'total_items': total_items, + 'priority': target_priority, + 'found': True + } + + def get_all_queued_uuids(self, limit=None, offset=0): + """ + Get UUIDs currently in the queue with their positions. + For large queues, use limit/offset for pagination. + + Args: + limit: Maximum number of items to return (None = all) + offset: Number of items to skip (for pagination) + + Returns: + dict: Contains items and metadata + - items: List of dicts with uuid, position, and priority + - total_items: Total number of items in queue + - returned_items: Number of items returned + - has_more: Whether there are more items after this page + """ + with self.mutex: + queue_list = list(self.queue) + total_items = len(queue_list) + + if total_items == 0: + return { + 'items': [], + 'total_items': 0, + 'returned_items': 0, + 'has_more': False + } + + # For very large queues, warn about performance + if total_items > 1000 and limit is None: + logger.warning(f"Getting all {total_items} queued items without limit - this may be slow") + + # Sort only if we need exact positions (expensive for large queues) + if limit is not None and limit <= 100: + # For small requests, we can afford to sort + queue_items = sorted(queue_list) + end_idx = min(offset + limit, len(queue_items)) if limit else len(queue_items) + items_to_process = queue_items[offset:end_idx] + + result = [] + for position, item in enumerate(items_to_process, start=offset): + if (hasattr(item, 'item') and + isinstance(item.item, dict) and + 'uuid' in item.item): + + result.append({ + 'uuid': item.item['uuid'], + 'position': position, + 'priority': item.priority + }) + + return { + 'items': result, + 'total_items': total_items, + 'returned_items': len(result), + 'has_more': (offset + len(result)) < total_items + } + else: + # For large requests, return items with approximate positions + # This is much faster O(n) instead of O(n log n) + result = [] + processed = 0 + skipped = 0 + + for item in queue_list: + if (hasattr(item, 'item') and + isinstance(item.item, dict) and + 'uuid' in item.item): + + if skipped < offset: + skipped += 1 + continue + + if limit and processed >= limit: + break + + # Approximate position based on priority comparison + approx_position = sum(1 for other in queue_list if other.priority < item.priority) + + result.append({ + 'uuid': item.item['uuid'], + 'position': approx_position, # Approximate + 'priority': item.priority + }) + processed += 1 + + return { + 'items': result, + 'total_items': total_items, + 'returned_items': len(result), + 'has_more': (offset + len(result)) < total_items, + 'note': 'Positions are approximate for performance with large queues' + } + + def get_queue_summary(self): + """ + Get a quick summary of queue state without expensive operations. + O(n) complexity - fast even for large queues. + + Returns: + dict: Queue summary statistics + """ + with self.mutex: + queue_list = list(self.queue) + total_items = len(queue_list) + + if total_items == 0: + return { + 'total_items': 0, + 'priority_breakdown': {}, + 'immediate_items': 0, + 'clone_items': 0, + 'scheduled_items': 0 + } + + # Count items by priority type - O(n) + immediate_items = 0 # priority 1 + clone_items = 0 # priority 5 + scheduled_items = 0 # priority > 100 (timestamps) + priority_counts = {} + + for item in queue_list: + priority = item.priority + priority_counts[priority] = priority_counts.get(priority, 0) + 1 + + if priority == 1: + immediate_items += 1 + elif priority == 5: + clone_items += 1 + elif priority > 100: + scheduled_items += 1 + + return { + 'total_items': total_items, + 'priority_breakdown': priority_counts, + 'immediate_items': immediate_items, + 'clone_items': clone_items, + 'scheduled_items': scheduled_items, + 'min_priority': min(priority_counts.keys()) if priority_counts else None, + 'max_priority': max(priority_counts.keys()) if priority_counts else None + } + + +class AsyncSignalPriorityQueue(asyncio.PriorityQueue): + """ + Async version of SignalPriorityQueue that sends signals when items are added/removed. + + This class extends asyncio.PriorityQueue and maintains the same signal behavior + as the synchronous version for real-time UI updates. + """ + + def __init__(self, maxsize=0): + super().__init__(maxsize) + try: + self.queue_length_signal = signal('queue_length') + except Exception as e: + logger.critical(f"Exception: {e}") + + async def put(self, item): + # Call the parent's put method first + await super().put(item) + + # After putting the item in the queue, check if it has a UUID and emit signal + if hasattr(item, 'item') and isinstance(item.item, dict) and 'uuid' in item.item: + uuid = item.item['uuid'] + # Get the signal and send it if it exists + watch_check_update = signal('watch_check_update') + if watch_check_update: + # Send the watch_uuid parameter + watch_check_update.send(watch_uuid=uuid) + + # Send queue_length signal with current queue size + try: + if self.queue_length_signal: + self.queue_length_signal.send(length=self.qsize()) + except Exception as e: + logger.critical(f"Exception: {e}") + + async def get(self): + # Call the parent's get method first + item = await super().get() + + # Send queue_length signal with current queue size + try: + if self.queue_length_signal: + self.queue_length_signal.send(length=self.qsize()) + except Exception as e: + logger.critical(f"Exception: {e}") + return item + + @property + def queue(self): + """ + Provide compatibility with sync PriorityQueue.queue access + Returns the internal queue for template access + """ + return self._queue if hasattr(self, '_queue') else [] + + def get_uuid_position(self, target_uuid): + """ + Find the position of a watch UUID in the async priority queue. + Optimized for large queues - O(n) complexity instead of O(n log n). + + Args: + target_uuid: The UUID to search for + + Returns: + dict: Contains position info or None if not found + - position: 0-based position in queue (0 = next to be processed) + - total_items: total number of items in queue + - priority: the priority value of the found item + """ + queue_list = list(self._queue) + total_items = len(queue_list) + + if total_items == 0: + return { + 'position': None, + 'total_items': 0, + 'priority': None, + 'found': False + } + + # Find the target item and its priority first - O(n) + target_item = None + target_priority = None + + for item in queue_list: + if (hasattr(item, 'item') and + isinstance(item.item, dict) and + item.item.get('uuid') == target_uuid): + target_item = item + target_priority = item.priority + break + + if target_item is None: + return { + 'position': None, + 'total_items': total_items, + 'priority': None, + 'found': False + } + + # Count how many items have higher priority (lower numbers) - O(n) + position = 0 + for item in queue_list: + if item.priority < target_priority: + position += 1 + elif item.priority == target_priority and item != target_item: + position += 1 + + return { + 'position': position, + 'total_items': total_items, + 'priority': target_priority, + 'found': True + } + + def get_all_queued_uuids(self, limit=None, offset=0): + """ + Get UUIDs currently in the async queue with their positions. + For large queues, use limit/offset for pagination. + + Args: + limit: Maximum number of items to return (None = all) + offset: Number of items to skip (for pagination) + + Returns: + dict: Contains items and metadata (same structure as sync version) + """ + queue_list = list(self._queue) + total_items = len(queue_list) + + if total_items == 0: + return { + 'items': [], + 'total_items': 0, + 'returned_items': 0, + 'has_more': False + } + + # Same logic as sync version but without mutex + if limit is not None and limit <= 100: + queue_items = sorted(queue_list) + end_idx = min(offset + limit, len(queue_items)) if limit else len(queue_items) + items_to_process = queue_items[offset:end_idx] + + result = [] + for position, item in enumerate(items_to_process, start=offset): + if (hasattr(item, 'item') and + isinstance(item.item, dict) and + 'uuid' in item.item): + + result.append({ + 'uuid': item.item['uuid'], + 'position': position, + 'priority': item.priority + }) + + return { + 'items': result, + 'total_items': total_items, + 'returned_items': len(result), + 'has_more': (offset + len(result)) < total_items + } + else: + # Fast approximate positions for large queues + result = [] + processed = 0 + skipped = 0 + + for item in queue_list: + if (hasattr(item, 'item') and + isinstance(item.item, dict) and + 'uuid' in item.item): + + if skipped < offset: + skipped += 1 + continue + + if limit and processed >= limit: + break + + approx_position = sum(1 for other in queue_list if other.priority < item.priority) + + result.append({ + 'uuid': item.item['uuid'], + 'position': approx_position, + 'priority': item.priority + }) + processed += 1 + + return { + 'items': result, + 'total_items': total_items, + 'returned_items': len(result), + 'has_more': (offset + len(result)) < total_items, + 'note': 'Positions are approximate for performance with large queues' + } + + def get_queue_summary(self): + """ + Get a quick summary of async queue state. + O(n) complexity - fast even for large queues. + """ + queue_list = list(self._queue) + total_items = len(queue_list) + + if total_items == 0: + return { + 'total_items': 0, + 'priority_breakdown': {}, + 'immediate_items': 0, + 'clone_items': 0, + 'scheduled_items': 0 + } + + immediate_items = 0 + clone_items = 0 + scheduled_items = 0 + priority_counts = {} + + for item in queue_list: + priority = item.priority + priority_counts[priority] = priority_counts.get(priority, 0) + 1 + + if priority == 1: + immediate_items += 1 + elif priority == 5: + clone_items += 1 + elif priority > 100: + scheduled_items += 1 + + return { + 'total_items': total_items, + 'priority_breakdown': priority_counts, + 'immediate_items': immediate_items, + 'clone_items': clone_items, + 'scheduled_items': scheduled_items, + 'min_priority': min(priority_counts.keys()) if priority_counts else None, + 'max_priority': max(priority_counts.keys()) if priority_counts else None + } diff --git a/changedetectionio/flask_app.py b/changedetectionio/flask_app.py index fe3c0b97..164c0c8d 100644 --- a/changedetectionio/flask_app.py +++ b/changedetectionio/flask_app.py @@ -4,6 +4,7 @@ import flask_login import locale import os import queue +import sys import threading import time import timeago @@ -11,7 +12,8 @@ from blinker import signal from changedetectionio.strtobool import strtobool from threading import Event -from changedetectionio.custom_queue import SignalPriorityQueue +from changedetectionio.custom_queue import SignalPriorityQueue, AsyncSignalPriorityQueue +from changedetectionio import worker_handler from flask import ( Flask, @@ -45,12 +47,11 @@ from .time_handler import is_within_schedule datastore = None # Local -running_update_threads = [] ticker_thread = None - extra_stylesheets = [] -update_q = SignalPriorityQueue() +# Use async queue by default, keep sync for backward compatibility +update_q = AsyncSignalPriorityQueue() if worker_handler.USE_ASYNC_WORKERS else SignalPriorityQueue() notification_q = queue.Queue() MAX_QUEUE_SIZE = 2000 @@ -145,10 +146,32 @@ def _jinja2_filter_format_number_locale(value: float) -> str: @app.template_global('is_checking_now') def _watch_is_checking_now(watch_obj, format="%Y-%m-%d %H:%M:%S"): - # Worker thread tells us which UUID it is currently processing. - for t in running_update_threads: - if t.current_uuid == watch_obj['uuid']: - return True + return worker_handler.is_watch_running(watch_obj['uuid']) + +@app.template_global('get_watch_queue_position') +def _get_watch_queue_position(watch_obj): + """Get the position of a watch in the queue""" + uuid = watch_obj['uuid'] + return update_q.get_uuid_position(uuid) + +@app.template_global('get_current_worker_count') +def _get_current_worker_count(): + """Get the current number of operational workers""" + return worker_handler.get_worker_count() + +@app.template_global('get_worker_status_info') +def _get_worker_status_info(): + """Get detailed worker status information for display""" + status = worker_handler.get_worker_status() + running_uuids = worker_handler.get_running_uuids() + + return { + 'count': status['worker_count'], + 'type': status['worker_type'], + 'active_workers': len(running_uuids), + 'processing_watches': running_uuids, + 'loop_running': status.get('async_loop_running', None) + } # We use the whole watch object from the store/JSON so we can see if there's some related status in terms of a thread @@ -470,16 +493,21 @@ def changedetection_app(config=None, datastore_o=None): # watchlist UI buttons etc import changedetectionio.blueprint.ui as ui - app.register_blueprint(ui.construct_blueprint(datastore, update_q, running_update_threads, queuedWatchMetaData, watch_check_update)) + app.register_blueprint(ui.construct_blueprint(datastore, update_q, worker_handler, queuedWatchMetaData, watch_check_update)) import changedetectionio.blueprint.watchlist as watchlist app.register_blueprint(watchlist.construct_blueprint(datastore=datastore, update_q=update_q, queuedWatchMetaData=queuedWatchMetaData), url_prefix='') - # Initialize Socket.IO server - from changedetectionio.realtime.socket_server import init_socketio - global socketio_server - socketio_server = init_socketio(app, datastore) - logger.info("Socket.IO server initialized") + # Initialize Socket.IO server conditionally based on settings + socket_io_enabled = datastore.data['settings']['application']['ui'].get('socket_io_enabled', True) + if socket_io_enabled: + from changedetectionio.realtime.socket_server import init_socketio + global socketio_server + socketio_server = init_socketio(app, datastore) + logger.info("Socket.IO server initialized") + else: + logger.info("Socket.IO server disabled via settings") + socketio_server = None # Memory cleanup endpoint @app.route('/gc-cleanup', methods=['GET']) @@ -491,12 +519,91 @@ def changedetection_app(config=None, datastore_o=None): result = memory_cleanup(app) return jsonify({"status": "success", "message": "Memory cleanup completed", "result": result}) + # Worker health check endpoint + @app.route('/worker-health', methods=['GET']) + @login_optionally_required + def worker_health(): + from flask import jsonify + + expected_workers = int(os.getenv("FETCH_WORKERS", datastore.data['settings']['requests']['workers'])) + + # Get basic status + status = worker_handler.get_worker_status() + + # Perform health check + health_result = worker_handler.check_worker_health( + expected_count=expected_workers, + update_q=update_q, + notification_q=notification_q, + app=app, + datastore=datastore + ) + + return jsonify({ + "status": "success", + "worker_status": status, + "health_check": health_result, + "expected_workers": expected_workers + }) + + # Queue status endpoint + @app.route('/queue-status', methods=['GET']) + @login_optionally_required + def queue_status(): + from flask import jsonify, request + + # Get specific UUID position if requested + target_uuid = request.args.get('uuid') + + if target_uuid: + position_info = update_q.get_uuid_position(target_uuid) + return jsonify({ + "status": "success", + "uuid": target_uuid, + "queue_position": position_info + }) + else: + # Get pagination parameters + limit = request.args.get('limit', type=int) + offset = request.args.get('offset', type=int, default=0) + summary_only = request.args.get('summary', type=bool, default=False) + + if summary_only: + # Fast summary for large queues + summary = update_q.get_queue_summary() + return jsonify({ + "status": "success", + "queue_summary": summary + }) + else: + # Get queued items with pagination support + if limit is None: + # Default limit for large queues to prevent performance issues + queue_size = update_q.qsize() + if queue_size > 100: + limit = 50 + logger.warning(f"Large queue ({queue_size} items) detected, limiting to {limit} items. Use ?limit=N for more.") + + all_queued = update_q.get_all_queued_uuids(limit=limit, offset=offset) + return jsonify({ + "status": "success", + "queue_size": update_q.qsize(), + "queued_data": all_queued + }) + + # Start the async workers during app initialization + # Can be overridden by ENV or use the default settings + n_workers = int(os.getenv("FETCH_WORKERS", datastore.data['settings']['requests']['workers'])) + logger.info(f"Starting {n_workers} workers during app initialization") + worker_handler.start_workers(n_workers, update_q, notification_q, app, datastore) + # @todo handle ctrl break ticker_thread = threading.Thread(target=ticker_thread_check_time_launch_checks).start() threading.Thread(target=notification_runner).start() + in_pytest = "pytest" in sys.modules or "PYTEST_CURRENT_TEST" in os.environ # Check for new release version, but not when running in test/build or pytest - if not os.getenv("GITHUB_REF", False) and not strtobool(os.getenv('DISABLE_VERSION_CHECK', 'no')): + if not os.getenv("GITHUB_REF", False) and not strtobool(os.getenv('DISABLE_VERSION_CHECK', 'no')) and not in_pytest: threading.Thread(target=check_for_new_version).start() # Return the Flask app - the Socket.IO will be attached to it but initialized separately @@ -588,27 +695,35 @@ def notification_runner(): # Threaded runner, look for new watches to feed into the Queue. def ticker_thread_check_time_launch_checks(): import random - from changedetectionio import update_worker proxy_last_called_time = {} + last_health_check = 0 recheck_time_minimum_seconds = int(os.getenv('MINIMUM_SECONDS_RECHECK_TIME', 3)) logger.debug(f"System env MINIMUM_SECONDS_RECHECK_TIME {recheck_time_minimum_seconds}") - # Spin up Workers that do the fetching - # Can be overriden by ENV or use the default settings - n_workers = int(os.getenv("FETCH_WORKERS", datastore.data['settings']['requests']['workers'])) - for _ in range(n_workers): - new_worker = update_worker.update_worker(update_q, notification_q, app, datastore) - running_update_threads.append(new_worker) - new_worker.start() + # Workers are now started during app initialization, not here while not app.config.exit.is_set(): + # Periodic worker health check (every 60 seconds) + now = time.time() + if now - last_health_check > 60: + expected_workers = int(os.getenv("FETCH_WORKERS", datastore.data['settings']['requests']['workers'])) + health_result = worker_handler.check_worker_health( + expected_count=expected_workers, + update_q=update_q, + notification_q=notification_q, + app=app, + datastore=datastore + ) + + if health_result['status'] != 'healthy': + logger.warning(f"Worker health check: {health_result['message']}") + + last_health_check = now + # Get a list of watches by UUID that are currently fetching data - running_uuids = [] - for t in running_update_threads: - if t.current_uuid: - running_uuids.append(t.current_uuid) + running_uuids = worker_handler.get_running_uuids() # Re #232 - Deepcopy the data incase it changes while we're iterating through it all watch_uuid_list = [] @@ -711,7 +826,7 @@ def ticker_thread_check_time_launch_checks(): f"{now - watch['last_checked']:0.2f}s since last checked") # Into the queue with you - update_q.put(queuedWatchMetaData.PrioritizedItem(priority=priority, item={'uuid': uuid})) + worker_handler.queue_item_async_safe(update_q, queuedWatchMetaData.PrioritizedItem(priority=priority, item={'uuid': uuid})) # Reset for next time watch.jitter_seconds = 0 diff --git a/changedetectionio/forms.py b/changedetectionio/forms.py index cf751222..5d9dafa9 100644 --- a/changedetectionio/forms.py +++ b/changedetectionio/forms.py @@ -719,6 +719,12 @@ class globalSettingsRequestForm(Form): jitter_seconds = IntegerField('Random jitter seconds ± check', render_kw={"style": "width: 5em;"}, validators=[validators.NumberRange(min=0, message="Should contain zero or more seconds")]) + + workers = IntegerField('Number of fetch workers', + render_kw={"style": "width: 5em;"}, + validators=[validators.NumberRange(min=1, max=50, + message="Should be between 1 and 50")]) + extra_proxies = FieldList(FormField(SingleExtraProxy), min_entries=5) extra_browsers = FieldList(FormField(SingleExtraBrowser), min_entries=5) @@ -733,6 +739,7 @@ class globalSettingsRequestForm(Form): class globalSettingsApplicationUIForm(Form): open_diff_in_new_tab = BooleanField('Open diff page in a new tab', default=True, validators=[validators.Optional()]) + socket_io_enabled = BooleanField('Realtime UI Updates Enabled', default=True, validators=[validators.Optional()]) # datastore.data['settings']['application'].. class globalSettingsApplicationForm(commonSettingsForm): diff --git a/changedetectionio/model/App.py b/changedetectionio/model/App.py index 34809017..8de5b0aa 100644 --- a/changedetectionio/model/App.py +++ b/changedetectionio/model/App.py @@ -62,6 +62,7 @@ class model(dict): 'timezone': None, # Default IANA timezone name 'ui': { 'open_diff_in_new_tab': True, + 'socket_io_enabled': True }, } } diff --git a/changedetectionio/notification_service.py b/changedetectionio/notification_service.py new file mode 100644 index 00000000..5f3136b8 --- /dev/null +++ b/changedetectionio/notification_service.py @@ -0,0 +1,246 @@ +#!/usr/bin/env python3 + +""" +Notification Service Module +Extracted from update_worker.py to provide standalone notification functionality +for both sync and async workers +""" + +import time +from loguru import logger + + +class NotificationService: + """ + Standalone notification service that handles all notification functionality + previously embedded in the update_worker class + """ + + def __init__(self, datastore, notification_q): + self.datastore = datastore + self.notification_q = notification_q + + def queue_notification_for_watch(self, n_object, watch): + """ + Queue a notification for a watch with full diff rendering and template variables + """ + from changedetectionio import diff + from changedetectionio.notification import default_notification_format_for_watch + + dates = [] + trigger_text = '' + + now = time.time() + + if watch: + watch_history = watch.history + dates = list(watch_history.keys()) + trigger_text = watch.get('trigger_text', []) + + # Add text that was triggered + if len(dates): + snapshot_contents = watch.get_history_snapshot(dates[-1]) + else: + snapshot_contents = "No snapshot/history available, the watch should fetch atleast once." + + # If we ended up here with "System default" + if n_object.get('notification_format') == default_notification_format_for_watch: + n_object['notification_format'] = self.datastore.data['settings']['application'].get('notification_format') + + html_colour_enable = False + # HTML needs linebreak, but MarkDown and Text can use a linefeed + if n_object.get('notification_format') == 'HTML': + line_feed_sep = "<br>" + # Snapshot will be plaintext on the disk, convert to some kind of HTML + snapshot_contents = snapshot_contents.replace('\n', line_feed_sep) + elif n_object.get('notification_format') == 'HTML Color': + line_feed_sep = "<br>" + # Snapshot will be plaintext on the disk, convert to some kind of HTML + snapshot_contents = snapshot_contents.replace('\n', line_feed_sep) + html_colour_enable = True + else: + line_feed_sep = "\n" + + triggered_text = '' + if len(trigger_text): + from . import html_tools + triggered_text = html_tools.get_triggered_text(content=snapshot_contents, trigger_text=trigger_text) + if triggered_text: + triggered_text = line_feed_sep.join(triggered_text) + + # Could be called as a 'test notification' with only 1 snapshot available + prev_snapshot = "Example text: example test\nExample text: change detection is cool\nExample text: some more examples\n" + current_snapshot = "Example text: example test\nExample text: change detection is fantastic\nExample text: even more examples\nExample text: a lot more examples" + + if len(dates) > 1: + prev_snapshot = watch.get_history_snapshot(dates[-2]) + current_snapshot = watch.get_history_snapshot(dates[-1]) + + n_object.update({ + 'current_snapshot': snapshot_contents, + 'diff': diff.render_diff(prev_snapshot, current_snapshot, line_feed_sep=line_feed_sep, html_colour=html_colour_enable), + 'diff_added': diff.render_diff(prev_snapshot, current_snapshot, include_removed=False, line_feed_sep=line_feed_sep), + 'diff_full': diff.render_diff(prev_snapshot, current_snapshot, include_equal=True, line_feed_sep=line_feed_sep, html_colour=html_colour_enable), + 'diff_patch': diff.render_diff(prev_snapshot, current_snapshot, line_feed_sep=line_feed_sep, patch_format=True), + 'diff_removed': diff.render_diff(prev_snapshot, current_snapshot, include_added=False, line_feed_sep=line_feed_sep), + 'notification_timestamp': now, + 'screenshot': watch.get_screenshot() if watch and watch.get('notification_screenshot') else None, + 'triggered_text': triggered_text, + 'uuid': watch.get('uuid') if watch else None, + 'watch_url': watch.get('url') if watch else None, + }) + + if watch: + n_object.update(watch.extra_notification_token_values()) + + logger.trace(f"Main rendered notification placeholders (diff_added etc) calculated in {time.time()-now:.3f}s") + logger.debug("Queued notification for sending") + self.notification_q.put(n_object) + + def _check_cascading_vars(self, var_name, watch): + """ + Check notification variables in cascading priority: + Individual watch settings > Tag settings > Global settings + """ + from changedetectionio.notification import ( + default_notification_format_for_watch, + default_notification_body, + default_notification_title + ) + + # Would be better if this was some kind of Object where Watch can reference the parent datastore etc + v = watch.get(var_name) + if v and not watch.get('notification_muted'): + if var_name == 'notification_format' and v == default_notification_format_for_watch: + return self.datastore.data['settings']['application'].get('notification_format') + + return v + + tags = self.datastore.get_all_tags_for_watch(uuid=watch.get('uuid')) + if tags: + for tag_uuid, tag in tags.items(): + v = tag.get(var_name) + if v and not tag.get('notification_muted'): + return v + + if self.datastore.data['settings']['application'].get(var_name): + return self.datastore.data['settings']['application'].get(var_name) + + # Otherwise could be defaults + if var_name == 'notification_format': + return default_notification_format_for_watch + if var_name == 'notification_body': + return default_notification_body + if var_name == 'notification_title': + return default_notification_title + + return None + + def send_content_changed_notification(self, watch_uuid): + """ + Send notification when content changes are detected + """ + n_object = {} + watch = self.datastore.data['watching'].get(watch_uuid) + if not watch: + return + + watch_history = watch.history + dates = list(watch_history.keys()) + # Theoretically it's possible that this could be just 1 long, + # - In the case that the timestamp key was not unique + if len(dates) == 1: + raise ValueError( + "History index had 2 or more, but only 1 date loaded, timestamps were not unique? maybe two of the same timestamps got written, needs more delay?" + ) + + # Should be a better parent getter in the model object + + # Prefer - Individual watch settings > Tag settings > Global settings (in that order) + n_object['notification_urls'] = self._check_cascading_vars('notification_urls', watch) + n_object['notification_title'] = self._check_cascading_vars('notification_title', watch) + n_object['notification_body'] = self._check_cascading_vars('notification_body', watch) + n_object['notification_format'] = self._check_cascading_vars('notification_format', watch) + + # (Individual watch) Only prepare to notify if the rules above matched + queued = False + if n_object and n_object.get('notification_urls'): + queued = True + + count = watch.get('notification_alert_count', 0) + 1 + self.datastore.update_watch(uuid=watch_uuid, update_obj={'notification_alert_count': count}) + + self.queue_notification_for_watch(n_object=n_object, watch=watch) + + return queued + + def send_filter_failure_notification(self, watch_uuid): + """ + Send notification when CSS/XPath filters fail consecutively + """ + threshold = self.datastore.data['settings']['application'].get('filter_failure_notification_threshold_attempts') + watch = self.datastore.data['watching'].get(watch_uuid) + if not watch: + return + + n_object = {'notification_title': 'Changedetection.io - Alert - CSS/xPath filter was not present in the page', + 'notification_body': "Your configured CSS/xPath filters of '{}' for {{{{watch_url}}}} did not appear on the page after {} attempts, did the page change layout?\n\nLink: {{{{base_url}}}}/edit/{{{{watch_uuid}}}}\n\nThanks - Your omniscient changedetection.io installation :)\n".format( + ", ".join(watch['include_filters']), + threshold), + 'notification_format': 'text'} + + if len(watch['notification_urls']): + n_object['notification_urls'] = watch['notification_urls'] + + elif len(self.datastore.data['settings']['application']['notification_urls']): + n_object['notification_urls'] = self.datastore.data['settings']['application']['notification_urls'] + + # Only prepare to notify if the rules above matched + if 'notification_urls' in n_object: + n_object.update({ + 'watch_url': watch['url'], + 'uuid': watch_uuid, + 'screenshot': None + }) + self.notification_q.put(n_object) + logger.debug(f"Sent filter not found notification for {watch_uuid}") + else: + logger.debug(f"NOT sending filter not found notification for {watch_uuid} - no notification URLs") + + def send_step_failure_notification(self, watch_uuid, step_n): + """ + Send notification when browser steps fail consecutively + """ + watch = self.datastore.data['watching'].get(watch_uuid, False) + if not watch: + return + threshold = self.datastore.data['settings']['application'].get('filter_failure_notification_threshold_attempts') + n_object = {'notification_title': "Changedetection.io - Alert - Browser step at position {} could not be run".format(step_n+1), + 'notification_body': "Your configured browser step at position {} for {{{{watch_url}}}} " + "did not appear on the page after {} attempts, did the page change layout? " + "Does it need a delay added?\n\nLink: {{{{base_url}}}}/edit/{{{{watch_uuid}}}}\n\n" + "Thanks - Your omniscient changedetection.io installation :)\n".format(step_n+1, threshold), + 'notification_format': 'text'} + + if len(watch['notification_urls']): + n_object['notification_urls'] = watch['notification_urls'] + + elif len(self.datastore.data['settings']['application']['notification_urls']): + n_object['notification_urls'] = self.datastore.data['settings']['application']['notification_urls'] + + # Only prepare to notify if the rules above matched + if 'notification_urls' in n_object: + n_object.update({ + 'watch_url': watch['url'], + 'uuid': watch_uuid + }) + self.notification_q.put(n_object) + logger.error(f"Sent step not found notification for {watch_uuid}") + + +# Convenience functions for creating notification service instances +def create_notification_service(datastore, notification_q): + """ + Factory function to create a NotificationService instance + """ + return NotificationService(datastore, notification_q) \ No newline at end of file diff --git a/changedetectionio/processors/__init__.py b/changedetectionio/processors/__init__.py index e7c97a16..2ae6df4d 100644 --- a/changedetectionio/processors/__init__.py +++ b/changedetectionio/processors/__init__.py @@ -27,7 +27,7 @@ class difference_detection_processor(): # Generic fetcher that should be extended (requests, playwright etc) self.fetcher = Fetcher() - def call_browser(self, preferred_proxy_id=None): + async def call_browser(self, preferred_proxy_id=None): from requests.structures import CaseInsensitiveDict @@ -147,16 +147,17 @@ class difference_detection_processor(): # And here we go! call the right browser with browser-specific settings empty_pages_are_a_change = self.datastore.data['settings']['application'].get('empty_pages_are_a_change', False) - self.fetcher.run(url=url, - timeout=timeout, - request_headers=request_headers, - request_body=request_body, - request_method=request_method, - ignore_status_codes=ignore_status_codes, - current_include_filters=self.watch.get('include_filters'), - is_binary=is_binary, - empty_pages_are_a_change=empty_pages_are_a_change - ) + # All fetchers are now async + await self.fetcher.run(url=url, + timeout=timeout, + request_headers=request_headers, + request_body=request_body, + request_method=request_method, + ignore_status_codes=ignore_status_codes, + current_include_filters=self.watch.get('include_filters'), + is_binary=is_binary, + empty_pages_are_a_change=empty_pages_are_a_change + ) #@todo .quit here could go on close object, so we can run JS if change-detected self.fetcher.quit(watch=self.watch) diff --git a/changedetectionio/realtime/README.md b/changedetectionio/realtime/README.md new file mode 100644 index 00000000..a391ad2a --- /dev/null +++ b/changedetectionio/realtime/README.md @@ -0,0 +1,124 @@ +# Real-time Socket.IO Implementation + +This directory contains the Socket.IO implementation for changedetection.io's real-time updates. + +## Architecture Overview + +The real-time system provides live updates to the web interface for: +- Watch status changes (checking, completed, errors) +- Queue length updates +- General statistics updates + +## Current Implementation + +### Socket.IO Configuration +- **Async Mode**: `threading` (default) or `gevent` (optional via SOCKETIO_MODE env var) +- **Server**: Flask-SocketIO with threading support +- **Background Tasks**: Python threading with daemon threads + +### Async Worker Integration +- **Workers**: Async workers using asyncio for watch processing +- **Queue**: AsyncSignalPriorityQueue for job distribution +- **Signals**: Blinker signals for real-time updates between workers and Socket.IO + +### Environment Variables +- `SOCKETIO_MODE=threading` (default, recommended) +- `SOCKETIO_MODE=gevent` (optional, has cross-platform limitations) + +## Architecture Decision: Why Threading Mode? + +### Previous Issues with Eventlet +**Eventlet was completely removed** due to fundamental compatibility issues: + +1. **Monkey Patching Conflicts**: `eventlet.monkey_patch()` globally replaced Python's threading/socket modules, causing conflicts with: + - Playwright's synchronous browser automation + - Async worker event loops + - Various Python libraries expecting real threading + +2. **Python 3.12+ Compatibility**: Eventlet had issues with newer Python versions and asyncio integration + +3. **CVE-2023-29483**: Security vulnerability in eventlet's dnspython dependency + +### Current Solution Benefits +✅ **Threading Mode Advantages**: +- Full compatibility with async workers and Playwright +- No monkey patching - uses standard Python threading +- Better Python 3.12+ support +- Cross-platform compatibility (Windows, macOS, Linux) +- No external async library dependencies +- Fast shutdown capabilities + +✅ **Optional Gevent Support**: +- Available via `SOCKETIO_MODE=gevent` for high-concurrency scenarios +- Cross-platform limitations documented in requirements.txt +- Not recommended as default due to Windows socket limits and macOS ARM build issues + +## Socket.IO Mode Configuration + +### Threading Mode (Default) +```python +# Enabled automatically +async_mode = 'threading' +socketio = SocketIO(app, async_mode='threading') +``` + +### Gevent Mode (Optional) +```bash +# Set environment variable +export SOCKETIO_MODE=gevent +``` + +## Background Tasks + +### Queue Polling +- **Threading Mode**: `threading.Thread` with `threading.Event` for shutdown +- **Signal Handling**: Blinker signals for watch state changes +- **Real-time Updates**: Direct Socket.IO `emit()` calls to connected clients + +### Worker Integration +- **Async Workers**: Run in separate asyncio event loop thread +- **Communication**: AsyncSignalPriorityQueue bridges async workers and Socket.IO +- **Updates**: Real-time updates sent when workers complete tasks + +## Files in This Directory + +- `socket_server.py`: Main Socket.IO initialization and event handling +- `events.py`: Watch operation event handlers +- `__init__.py`: Module initialization + +## Production Deployment + +### Recommended WSGI Servers +For production with Socket.IO threading mode: +- **Gunicorn**: `gunicorn --worker-class eventlet changedetection:app` (if using gevent mode) +- **uWSGI**: With threading support +- **Docker**: Built-in Flask server works well for containerized deployments + +### Performance Considerations +- Threading mode: Better memory usage, standard Python threading +- Gevent mode: Higher concurrency but platform limitations +- Async workers: Separate from Socket.IO, provides scalability + +## Environment Variables + +| Variable | Default | Description | +|----------|---------|-------------| +| `SOCKETIO_MODE` | `threading` | Socket.IO async mode (`threading` or `gevent`) | +| `FETCH_WORKERS` | `10` | Number of async workers for watch processing | +| `CHANGEDETECTION_HOST` | `0.0.0.0` | Server bind address | +| `CHANGEDETECTION_PORT` | `5000` | Server port | + +## Debugging Tips + +1. **Socket.IO Issues**: Check browser dev tools for WebSocket connection errors +2. **Threading Issues**: Monitor with `ps -T` to check thread count +3. **Worker Issues**: Use `/worker-health` endpoint to check async worker status +4. **Queue Issues**: Use `/queue-status` endpoint to monitor job queue +5. **Performance**: Use `/gc-cleanup` endpoint to trigger memory cleanup + +## Migration Notes + +If upgrading from eventlet-based versions: +- Remove any `EVENTLET_*` environment variables +- No code changes needed - Socket.IO mode is automatically configured +- Optional: Set `SOCKETIO_MODE=gevent` if high concurrency is required and platform supports it \ No newline at end of file diff --git a/changedetectionio/realtime/events.py b/changedetectionio/realtime/events.py new file mode 100644 index 00000000..a68ea99c --- /dev/null +++ b/changedetectionio/realtime/events.py @@ -0,0 +1,58 @@ +from flask_socketio import emit +from loguru import logger +from blinker import signal + + +def register_watch_operation_handlers(socketio, datastore): + """Register Socket.IO event handlers for watch operations""" + + @socketio.on('watch_operation') + def handle_watch_operation(data): + """Handle watch operations like pause, mute, recheck via Socket.IO""" + try: + op = data.get('op') + uuid = data.get('uuid') + + logger.debug(f"Socket.IO: Received watch operation '{op}' for UUID {uuid}") + + if not op or not uuid: + emit('operation_result', {'success': False, 'error': 'Missing operation or UUID'}) + return + + # Check if watch exists + if not datastore.data['watching'].get(uuid): + emit('operation_result', {'success': False, 'error': 'Watch not found'}) + return + + watch = datastore.data['watching'][uuid] + + # Perform the operation + if op == 'pause': + watch.toggle_pause() + logger.info(f"Socket.IO: Toggled pause for watch {uuid}") + elif op == 'mute': + watch.toggle_mute() + logger.info(f"Socket.IO: Toggled mute for watch {uuid}") + elif op == 'recheck': + # Import here to avoid circular imports + from changedetectionio.flask_app import update_q + from changedetectionio import queuedWatchMetaData + from changedetectionio import worker_handler + + worker_handler.queue_item_async_safe(update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid})) + logger.info(f"Socket.IO: Queued recheck for watch {uuid}") + else: + emit('operation_result', {'success': False, 'error': f'Unknown operation: {op}'}) + return + + # Send signal to update UI + watch_check_update = signal('watch_check_update') + if watch_check_update: + watch_check_update.send(watch_uuid=uuid) + + # Send success response to client + emit('operation_result', {'success': True, 'operation': op, 'uuid': uuid}) + + except Exception as e: + logger.error(f"Socket.IO error in handle_watch_operation: {str(e)}") + emit('operation_result', {'success': False, 'error': str(e)}) diff --git a/changedetectionio/realtime/socket_server.py b/changedetectionio/realtime/socket_server.py index 9f7c39cf..c9241486 100644 --- a/changedetectionio/realtime/socket_server.py +++ b/changedetectionio/realtime/socket_server.py @@ -8,8 +8,10 @@ from blinker import signal from changedetectionio import strtobool + class SignalHandler: """A standalone class to receive signals""" + def __init__(self, socketio_instance, datastore): self.socketio_instance = socketio_instance self.datastore = datastore @@ -17,19 +19,22 @@ class SignalHandler: # Connect to the watch_check_update signal from changedetectionio.flask_app import watch_check_update as wcc wcc.connect(self.handle_signal, weak=False) - logger.info("SignalHandler: Connected to signal from direct import") - + # logger.info("SignalHandler: Connected to signal from direct import") + # Connect to the queue_length signal queue_length_signal = signal('queue_length') queue_length_signal.connect(self.handle_queue_length, weak=False) - logger.info("SignalHandler: Connected to queue_length signal") + # logger.info("SignalHandler: Connected to queue_length signal") + # Create and start the queue update thread using standard threading + import threading + self.polling_emitter_thread = threading.Thread( + target=self.polling_emit_running_or_queued_watches_threaded, + daemon=True + ) + self.polling_emitter_thread.start() + logger.info("Started polling thread using threading (eventlet-free)") - # Create and start the queue update thread using gevent - import gevent - logger.info("Using gevent for polling thread") - self.polling_emitter_thread = gevent.spawn(self.polling_emit_running_or_queued_watches) - # Store the thread reference in socketio for clean shutdown self.socketio_instance.polling_emitter_thread = self.polling_emitter_thread @@ -44,7 +49,7 @@ class SignalHandler: watch = self.datastore.data['watching'].get(watch_uuid) if watch: if app_context: - #note + # note with app_context.app_context(): with app_context.test_request_context(): # Forward to handle_watch_update with the watch parameter @@ -61,59 +66,85 @@ class SignalHandler: try: queue_length = kwargs.get('length', 0) logger.debug(f"SignalHandler: Queue length update received: {queue_length}") - + # Emit the queue size to all connected clients self.socketio_instance.emit("queue_size", { "q_length": queue_length, "event_timestamp": time.time() }) - + except Exception as e: logger.error(f"Socket.IO error in handle_queue_length: {str(e)}") - def polling_emit_running_or_queued_watches(self): - """Greenlet that periodically updates the browser/frontend with current state of who is being checked or queued - This is because sometimes the browser page could reload (like on clicking on a link) but the data is old - """ - logger.info("Queue update greenlet started") - - # Import the watch_check_update signal, update_q, and running_update_threads here to avoid circular imports - from changedetectionio.flask_app import app, running_update_threads + def polling_emit_running_or_queued_watches_threaded(self): + """Threading version of polling for Windows compatibility""" + import time + import threading + logger.info("Queue update thread started (threading mode)") + + # Import here to avoid circular imports + from changedetectionio.flask_app import app + from changedetectionio import worker_handler watch_check_update = signal('watch_check_update') - # Use gevent sleep for non-blocking operation - from gevent import sleep as gevent_sleep - - # Get the stop event from the socketio instance - stop_event = self.socketio_instance.stop_event if hasattr(self.socketio_instance, 'stop_event') else None - - # Run until explicitly stopped - while stop_event is None or not stop_event.is_set(): + # Track previous state to avoid unnecessary emissions + previous_running_uuids = set() + + # Run until app shutdown - check exit flag more frequently for fast shutdown + exit_event = getattr(app.config, 'exit', threading.Event()) + + while not exit_event.is_set(): try: - # For each item in the queue, send a signal, so we update the UI - for t in running_update_threads: - if hasattr(t, 'current_uuid') and t.current_uuid: - logger.trace(f"Sending update for {t.current_uuid}") - # Send with app_context to ensure proper URL generation - with app.app_context(): - watch_check_update.send(app_context=app, watch_uuid=t.current_uuid) - # Yield control back to gevent after each send to prevent blocking - gevent_sleep(0.1) # Small sleep to yield control - - # Check if we need to stop in the middle of processing - if stop_event is not None and stop_event.is_set(): + # Get current running UUIDs from async workers + running_uuids = set(worker_handler.get_running_uuids()) + + # Only send updates for UUIDs that changed state + newly_running = running_uuids - previous_running_uuids + no_longer_running = previous_running_uuids - running_uuids + + # Send updates for newly running UUIDs (but exit fast if shutdown requested) + for uuid in newly_running: + if exit_event.is_set(): break - - # Sleep between polling/update cycles - gevent_sleep(2) - + logger.trace(f"Threading polling: UUID {uuid} started processing") + with app.app_context(): + watch_check_update.send(app_context=app, watch_uuid=uuid) + time.sleep(0.01) # Small yield + + # Send updates for UUIDs that finished processing (but exit fast if shutdown requested) + if not exit_event.is_set(): + for uuid in no_longer_running: + if exit_event.is_set(): + break + logger.trace(f"Threading polling: UUID {uuid} finished processing") + with app.app_context(): + watch_check_update.send(app_context=app, watch_uuid=uuid) + time.sleep(0.01) # Small yield + + # Update tracking for next iteration + previous_running_uuids = running_uuids + + # Sleep between polling cycles, but check exit flag every 0.5 seconds for fast shutdown + for _ in range(20): # 20 * 0.5 = 10 seconds total + if exit_event.is_set(): + break + time.sleep(0.5) + except Exception as e: - logger.error(f"Error in queue update greenlet: {str(e)}") - # Sleep a bit to avoid flooding logs in case of persistent error - gevent_sleep(0.5) - - logger.info("Queue update greenlet stopped") + logger.error(f"Error in threading polling: {str(e)}") + # Even during error recovery, check for exit quickly + for _ in range(1): # 1 * 0.5 = 0.5 seconds + if exit_event.is_set(): + break + time.sleep(0.5) + + # Check if we're in pytest environment - if so, be more gentle with logging + import sys + in_pytest = "pytest" in sys.modules or "PYTEST_CURRENT_TEST" in os.environ + + if not in_pytest: + logger.info("Queue update thread stopped (threading mode)") def handle_watch_update(socketio, **kwargs): @@ -123,14 +154,12 @@ def handle_watch_update(socketio, **kwargs): datastore = kwargs.get('datastore') # Emit the watch update to all connected clients - from changedetectionio.flask_app import running_update_threads, update_q + from changedetectionio.flask_app import update_q from changedetectionio.flask_app import _jinja2_filter_datetime + from changedetectionio import worker_handler # Get list of watches that are currently running - running_uuids = [] - for t in running_update_threads: - if hasattr(t, 'current_uuid') and t.current_uuid: - running_uuids.append(t.current_uuid) + running_uuids = worker_handler.get_running_uuids() # Get list of watches in the queue queue_list = [] @@ -143,26 +172,30 @@ def handle_watch_update(socketio, **kwargs): error_texts = watch.compile_error_texts() # Create a simplified watch data object to send to clients + watch_uuid = watch.get('uuid') + watch_data = { - 'checking_now': True if watch.get('uuid') in running_uuids else False, + 'checking_now': True if watch_uuid in running_uuids else False, 'fetch_time': watch.get('fetch_time'), 'has_error': True if error_texts else False, 'last_changed': watch.get('last_changed'), 'last_checked': watch.get('last_checked'), 'error_text': error_texts, + 'history_n': watch.history_n, 'last_checked_text': _jinja2_filter_datetime(watch), - 'last_changed_text': timeago.format(int(watch['last_changed']), time.time()) if watch.history_n >= 2 and int(watch.get('last_changed', 0)) > 0 else 'Not yet', - 'queued': True if watch.get('uuid') in queue_list else False, + 'last_changed_text': timeago.format(int(watch['last_changed']), time.time()) if watch.history_n >= 2 and int( + watch.get('last_changed', 0)) > 0 else 'Not yet', + 'queued': True if watch_uuid in queue_list else False, 'paused': True if watch.get('paused') else False, 'notification_muted': True if watch.get('notification_muted') else False, 'unviewed': watch.has_unviewed, - 'uuid': watch.get('uuid'), + 'uuid': watch_uuid, 'event_timestamp': time.time() } - errored_count =0 - for uuid, watch in datastore.data['watching'].items(): - if watch.get('last_error'): + errored_count = 0 + for watch_uuid_iter, watch_iter in datastore.data['watching'].items(): + if watch_iter.get('last_error'): errored_count += 1 general_stats = { @@ -171,13 +204,13 @@ def handle_watch_update(socketio, **kwargs): } # Debug what's being emitted - #logger.debug(f"Emitting 'watch_update' event for {watch.get('uuid')}, data: {watch_data}") - + # logger.debug(f"Emitting 'watch_update' event for {watch.get('uuid')}, data: {watch_data}") + # Emit to all clients (no 'broadcast' parameter needed - it's the default behavior) socketio.emit("watch_update", {'watch': watch_data, 'general_stats': general_stats}) - - # Log after successful emit - #logger.info(f"Socket.IO: Emitted update for watch {watch.get('uuid')}, Checking now: {watch_data['checking_now']}") + + # Log after successful emit - use watch_data['uuid'] to avoid variable shadowing issues + logger.trace(f"Socket.IO: Emitted update for watch {watch_data['uuid']}, Checking now: {watch_data['checking_now']}") except Exception as e: logger.error(f"Socket.IO error in handle_watch_update: {str(e)}") @@ -185,35 +218,65 @@ def handle_watch_update(socketio, **kwargs): def init_socketio(app, datastore): """Initialize SocketIO with the main Flask app""" - # Use the threading async_mode instead of eventlet - # This avoids the need for monkey patching eventlet, - # Which leads to problems with async playwright etc - async_mode = 'gevent' - logger.info(f"Using {async_mode} mode for Socket.IO") + import platform + import sys + + # Platform-specific async_mode selection for better stability + system = platform.system().lower() + python_version = sys.version_info + + # Check for SocketIO mode configuration via environment variable + # Default is 'threading' for best cross-platform compatibility + socketio_mode = os.getenv('SOCKETIO_MODE', 'threading').lower() + + if socketio_mode == 'gevent': + # Use gevent mode (higher concurrency but platform limitations) + try: + import gevent + async_mode = 'gevent' + logger.info(f"SOCKETIO_MODE=gevent: Using {async_mode} mode for Socket.IO") + except ImportError: + async_mode = 'threading' + logger.warning(f"SOCKETIO_MODE=gevent but gevent not available, falling back to {async_mode} mode") + elif socketio_mode == 'threading': + # Use threading mode (default - best compatibility) + async_mode = 'threading' + logger.info(f"SOCKETIO_MODE=threading: Using {async_mode} mode for Socket.IO") + else: + # Invalid mode specified, use default + async_mode = 'threading' + logger.warning(f"Invalid SOCKETIO_MODE='{socketio_mode}', using default {async_mode} mode for Socket.IO") + + # Log platform info for debugging + logger.info(f"Platform: {system}, Python: {python_version.major}.{python_version.minor}, Socket.IO mode: {async_mode}") # Restrict SocketIO CORS to same origin by default, can be overridden with env var cors_origins = os.environ.get('SOCKETIO_CORS_ORIGINS', None) - + socketio = SocketIO(app, - async_mode=async_mode, - cors_allowed_origins=cors_origins, # None means same-origin only - logger=strtobool(os.getenv('SOCKETIO_LOGGING', 'False')), - engineio_logger=strtobool(os.getenv('SOCKETIO_LOGGING', 'False'))) + async_mode=async_mode, + cors_allowed_origins=cors_origins, # None means same-origin only + logger=strtobool(os.getenv('SOCKETIO_LOGGING', 'False')), + engineio_logger=strtobool(os.getenv('SOCKETIO_LOGGING', 'False'))) # Set up event handlers + logger.info("Socket.IO: Registering connect event handler") + @socketio.on('connect') def handle_connect(): """Handle client connection""" - from changedetectionio.auth_decorator import login_optionally_required + # logger.info("Socket.IO: CONNECT HANDLER CALLED - Starting connection process") from flask import request from flask_login import current_user from changedetectionio.flask_app import update_q # Access datastore from socketio datastore = socketio.datastore + # logger.info(f"Socket.IO: Current user authenticated: {current_user.is_authenticated if hasattr(current_user, 'is_authenticated') else 'No current_user'}") # Check if authentication is required and user is not authenticated has_password_enabled = datastore.data['settings']['application'].get('password') or os.getenv("SALTED_PASS", False) + # logger.info(f"Socket.IO: Password enabled: {has_password_enabled}") if has_password_enabled and not current_user.is_authenticated: logger.warning("Socket.IO: Rejecting unauthenticated connection") return False # Reject the connection @@ -231,6 +294,7 @@ def init_socketio(app, datastore): logger.info("Socket.IO: Client connected") + # logger.info("Socket.IO: Registering disconnect event handler") @socketio.on('disconnect') def handle_disconnect(): """Handle client disconnection""" @@ -239,45 +303,40 @@ def init_socketio(app, datastore): # Create a dedicated signal handler that will receive signals and emit them to clients signal_handler = SignalHandler(socketio, datastore) + # Register watch operation event handlers + from .events import register_watch_operation_handlers + register_watch_operation_handlers(socketio, datastore) + # Store the datastore reference on the socketio object for later use socketio.datastore = datastore - - # Create a stop event for our queue update thread using gevent Event - import gevent.event - stop_event = gevent.event.Event() - socketio.stop_event = stop_event - + # No stop event needed for threading mode - threads check app.config.exit directly + # Add a shutdown method to the socketio object def shutdown(): - """Shutdown the SocketIO server gracefully""" + """Shutdown the SocketIO server fast and aggressively""" try: - logger.info("Socket.IO: Shutting down server...") - - # Signal the queue update thread to stop - if hasattr(socketio, 'stop_event'): - socketio.stop_event.set() - logger.info("Socket.IO: Signaled queue update thread to stop") - - # Wait for the greenlet to exit (with timeout) + logger.info("Socket.IO: Fast shutdown initiated...") + + # For threading mode, give the thread a very short time to exit gracefully if hasattr(socketio, 'polling_emitter_thread'): - try: - # For gevent greenlets - socketio.polling_emitter_thread.join(timeout=5) - logger.info("Socket.IO: Queue update greenlet joined successfully") - except Exception as e: - logger.error(f"Error joining greenlet: {str(e)}") - logger.info("Socket.IO: Queue update greenlet did not exit in time") - - # Close any remaining client connections - #if hasattr(socketio, 'server'): - # socketio.server.disconnect() - logger.info("Socket.IO: Server shutdown complete") + if socketio.polling_emitter_thread.is_alive(): + logger.info("Socket.IO: Waiting 1 second for polling thread to stop...") + socketio.polling_emitter_thread.join(timeout=1.0) # Only 1 second timeout + if socketio.polling_emitter_thread.is_alive(): + logger.info("Socket.IO: Polling thread still running after timeout - continuing with shutdown") + else: + logger.info("Socket.IO: Polling thread stopped quickly") + else: + logger.info("Socket.IO: Polling thread already stopped") + + logger.info("Socket.IO: Fast shutdown complete") except Exception as e: logger.error(f"Socket.IO error during shutdown: {str(e)}") - + # Attach the shutdown method to the socketio object socketio.shutdown = shutdown logger.info("Socket.IO initialized and attached to main Flask app") + logger.info(f"Socket.IO: Registered event handlers: {socketio.handlers if hasattr(socketio, 'handlers') else 'No handlers found'}") return socketio diff --git a/changedetectionio/static/js/realtime.js b/changedetectionio/static/js/realtime.js index 1ff1d19e..5f6984c9 100644 --- a/changedetectionio/static/js/realtime.js +++ b/changedetectionio/static/js/realtime.js @@ -2,20 +2,20 @@ $(document).ready(function () { - function bindAjaxHandlerButtonsEvents() { - $('.ajax-op').on('click.ajaxHandlerNamespace', function (e) { + function bindSocketHandlerButtonsEvents(socket) { + $('.ajax-op').on('click.socketHandlerNamespace', function (e) { e.preventDefault(); - $.ajax({ - type: "POST", - url: ajax_toggle_url, - data: {'op': $(this).data('op'), 'uuid': $(this).closest('tr').data('watch-uuid')}, - statusCode: { - 400: function () { - // More than likely the CSRF token was lost when the server restarted - alert("There was a problem processing the request, please reload the page."); - } - } + const op = $(this).data('op'); + const uuid = $(this).closest('tr').data('watch-uuid'); + + console.log(`Socket.IO: Sending watch operation '${op}' for UUID ${uuid}`); + + // Emit the operation via Socket.IO + socket.emit('watch_operation', { + 'op': op, + 'uuid': uuid }); + return false; }); } @@ -38,7 +38,7 @@ $(document).ready(function () { socket.on('connect', function () { console.log('Socket.IO connected with path:', socketio_url); console.log('Socket transport:', socket.io.engine.transport.name); - bindAjaxHandlerButtonsEvents(); + bindSocketHandlerButtonsEvents(socket); }); socket.on('connect_error', function(error) { @@ -55,7 +55,7 @@ $(document).ready(function () { socket.on('disconnect', function (reason) { console.log('Socket.IO disconnected, reason:', reason); - $('.ajax-op').off('.ajaxHandlerNamespace') + $('.ajax-op').off('.socketHandlerNamespace') }); socket.on('queue_size', function (data) { @@ -63,6 +63,16 @@ $(document).ready(function () { // Update queue size display if implemented in the UI }) + // Listen for operation results + socket.on('operation_result', function (data) { + if (data.success) { + console.log(`Socket.IO: Operation '${data.operation}' completed successfully for UUID ${data.uuid}`); + } else { + console.error(`Socket.IO: Operation failed: ${data.error}`); + alert("There was a problem processing the request: " + data.error); + } + }); + // Listen for periodically emitted watch data console.log('Adding watch_update event listener'); @@ -87,6 +97,8 @@ $(document).ready(function () { $($watchRow).toggleClass('has-error', watch.has_error); $($watchRow).toggleClass('notification_muted', watch.notification_muted); $($watchRow).toggleClass('paused', watch.paused); + $($watchRow).toggleClass('single-history', watch.history_n === 1); + $($watchRow).toggleClass('multiple-history', watch.history_n >= 2); $('td.title-col .error-text', $watchRow).html(watch.error_text) diff --git a/changedetectionio/static/js/socket.io.min.js b/changedetectionio/static/js/socket.io.min.js index bb469c4e..7738dec1 100644 --- a/changedetectionio/static/js/socket.io.min.js +++ b/changedetectionio/static/js/socket.io.min.js @@ -1,7 +1,7 @@ /*! - * Socket.IO v4.6.0 - * (c) 2014-2023 Guillermo Rauch + * Socket.IO v3.1.3 + * (c) 2014-2021 Guillermo Rauch * Released under the MIT License. */ -!function(t,e){"object"==typeof exports&&"undefined"!=typeof module?module.exports=e():"function"==typeof define&&define.amd?define(e):(t="undefined"!=typeof globalThis?globalThis:t||self).io=e()}(this,(function(){"use strict";function t(e){return t="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t},t(e)}function e(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}function n(t,e){for(var n=0;n<e.length;n++){var r=e[n];r.enumerable=r.enumerable||!1,r.configurable=!0,"value"in r&&(r.writable=!0),Object.defineProperty(t,r.key,r)}}function r(t,e,r){return e&&n(t.prototype,e),r&&n(t,r),Object.defineProperty(t,"prototype",{writable:!1}),t}function i(){return i=Object.assign?Object.assign.bind():function(t){for(var e=1;e<arguments.length;e++){var n=arguments[e];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(t[r]=n[r])}return t},i.apply(this,arguments)}function o(t,e){if("function"!=typeof e&&null!==e)throw new TypeError("Super expression must either be null or a function");t.prototype=Object.create(e&&e.prototype,{constructor:{value:t,writable:!0,configurable:!0}}),Object.defineProperty(t,"prototype",{writable:!1}),e&&a(t,e)}function s(t){return s=Object.setPrototypeOf?Object.getPrototypeOf.bind():function(t){return t.__proto__||Object.getPrototypeOf(t)},s(t)}function a(t,e){return a=Object.setPrototypeOf?Object.setPrototypeOf.bind():function(t,e){return t.__proto__=e,t},a(t,e)}function c(){if("undefined"==typeof Reflect||!Reflect.construct)return!1;if(Reflect.construct.sham)return!1;if("function"==typeof Proxy)return!0;try{return Boolean.prototype.valueOf.call(Reflect.construct(Boolean,[],(function(){}))),!0}catch(t){return!1}}function u(t,e,n){return u=c()?Reflect.construct.bind():function(t,e,n){var r=[null];r.push.apply(r,e);var i=new(Function.bind.apply(t,r));return n&&a(i,n.prototype),i},u.apply(null,arguments)}function h(t){var e="function"==typeof Map?new Map:void 0;return h=function(t){if(null===t||(n=t,-1===Function.toString.call(n).indexOf("[native code]")))return t;var n;if("function"!=typeof t)throw new TypeError("Super expression must either be null or a function");if(void 0!==e){if(e.has(t))return e.get(t);e.set(t,r)}function r(){return u(t,arguments,s(this).constructor)}return r.prototype=Object.create(t.prototype,{constructor:{value:r,enumerable:!1,writable:!0,configurable:!0}}),a(r,t)},h(t)}function f(t){if(void 0===t)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return t}function l(t,e){if(e&&("object"==typeof e||"function"==typeof e))return e;if(void 0!==e)throw new TypeError("Derived constructors may only return object or undefined");return f(t)}function p(t){var e=c();return function(){var n,r=s(t);if(e){var i=s(this).constructor;n=Reflect.construct(r,arguments,i)}else n=r.apply(this,arguments);return l(this,n)}}function d(t,e){for(;!Object.prototype.hasOwnProperty.call(t,e)&&null!==(t=s(t)););return t}function y(){return y="undefined"!=typeof Reflect&&Reflect.get?Reflect.get.bind():function(t,e,n){var r=d(t,e);if(r){var i=Object.getOwnPropertyDescriptor(r,e);return i.get?i.get.call(arguments.length<3?t:n):i.value}},y.apply(this,arguments)}function v(t,e){(null==e||e>t.length)&&(e=t.length);for(var n=0,r=new Array(e);n<e;n++)r[n]=t[n];return r}function g(t,e){var n="undefined"!=typeof Symbol&&t[Symbol.iterator]||t["@@iterator"];if(!n){if(Array.isArray(t)||(n=function(t,e){if(t){if("string"==typeof t)return v(t,e);var n=Object.prototype.toString.call(t).slice(8,-1);return"Object"===n&&t.constructor&&(n=t.constructor.name),"Map"===n||"Set"===n?Array.from(t):"Arguments"===n||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n)?v(t,e):void 0}}(t))||e&&t&&"number"==typeof t.length){n&&(t=n);var r=0,i=function(){};return{s:i,n:function(){return r>=t.length?{done:!0}:{done:!1,value:t[r++]}},e:function(t){throw t},f:i}}throw new TypeError("Invalid attempt to iterate non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}var o,s=!0,a=!1;return{s:function(){n=n.call(t)},n:function(){var t=n.next();return s=t.done,t},e:function(t){a=!0,o=t},f:function(){try{s||null==n.return||n.return()}finally{if(a)throw o}}}}var m=Object.create(null);m.open="0",m.close="1",m.ping="2",m.pong="3",m.message="4",m.upgrade="5",m.noop="6";var k=Object.create(null);Object.keys(m).forEach((function(t){k[m[t]]=t}));for(var b={type:"error",data:"parser error"},w="function"==typeof Blob||"undefined"!=typeof Blob&&"[object BlobConstructor]"===Object.prototype.toString.call(Blob),_="function"==typeof ArrayBuffer,E=function(t,e,n){var r,i=t.type,o=t.data;return w&&o instanceof Blob?e?n(o):O(o,n):_&&(o instanceof ArrayBuffer||(r=o,"function"==typeof ArrayBuffer.isView?ArrayBuffer.isView(r):r&&r.buffer instanceof ArrayBuffer))?e?n(o):O(new Blob([o]),n):n(m[i]+(o||""))},O=function(t,e){var n=new FileReader;return n.onload=function(){var t=n.result.split(",")[1];e("b"+t)},n.readAsDataURL(t)},A="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",R="undefined"==typeof Uint8Array?[]:new Uint8Array(256),T=0;T<A.length;T++)R[A.charCodeAt(T)]=T;var C="function"==typeof ArrayBuffer,B=function(t,e){if("string"!=typeof t)return{type:"message",data:N(t,e)};var n=t.charAt(0);return"b"===n?{type:"message",data:S(t.substring(1),e)}:k[n]?t.length>1?{type:k[n],data:t.substring(1)}:{type:k[n]}:b},S=function(t,e){if(C){var n=function(t){var e,n,r,i,o,s=.75*t.length,a=t.length,c=0;"="===t[t.length-1]&&(s--,"="===t[t.length-2]&&s--);var u=new ArrayBuffer(s),h=new Uint8Array(u);for(e=0;e<a;e+=4)n=R[t.charCodeAt(e)],r=R[t.charCodeAt(e+1)],i=R[t.charCodeAt(e+2)],o=R[t.charCodeAt(e+3)],h[c++]=n<<2|r>>4,h[c++]=(15&r)<<4|i>>2,h[c++]=(3&i)<<6|63&o;return u}(t);return N(n,e)}return{base64:!0,data:t}},N=function(t,e){return"blob"===e&&t instanceof ArrayBuffer?new Blob([t]):t},x=String.fromCharCode(30);function L(t){if(t)return function(t){for(var e in L.prototype)t[e]=L.prototype[e];return t}(t)}L.prototype.on=L.prototype.addEventListener=function(t,e){return this._callbacks=this._callbacks||{},(this._callbacks["$"+t]=this._callbacks["$"+t]||[]).push(e),this},L.prototype.once=function(t,e){function n(){this.off(t,n),e.apply(this,arguments)}return n.fn=e,this.on(t,n),this},L.prototype.off=L.prototype.removeListener=L.prototype.removeAllListeners=L.prototype.removeEventListener=function(t,e){if(this._callbacks=this._callbacks||{},0==arguments.length)return this._callbacks={},this;var n,r=this._callbacks["$"+t];if(!r)return this;if(1==arguments.length)return delete this._callbacks["$"+t],this;for(var i=0;i<r.length;i++)if((n=r[i])===e||n.fn===e){r.splice(i,1);break}return 0===r.length&&delete this._callbacks["$"+t],this},L.prototype.emit=function(t){this._callbacks=this._callbacks||{};for(var e=new Array(arguments.length-1),n=this._callbacks["$"+t],r=1;r<arguments.length;r++)e[r-1]=arguments[r];if(n){r=0;for(var i=(n=n.slice(0)).length;r<i;++r)n[r].apply(this,e)}return this},L.prototype.emitReserved=L.prototype.emit,L.prototype.listeners=function(t){return this._callbacks=this._callbacks||{},this._callbacks["$"+t]||[]},L.prototype.hasListeners=function(t){return!!this.listeners(t).length};var P="undefined"!=typeof self?self:"undefined"!=typeof window?window:Function("return this")();function j(t){for(var e=arguments.length,n=new Array(e>1?e-1:0),r=1;r<e;r++)n[r-1]=arguments[r];return n.reduce((function(e,n){return t.hasOwnProperty(n)&&(e[n]=t[n]),e}),{})}var q=P.setTimeout,I=P.clearTimeout;function D(t,e){e.useNativeTimers?(t.setTimeoutFn=q.bind(P),t.clearTimeoutFn=I.bind(P)):(t.setTimeoutFn=P.setTimeout.bind(P),t.clearTimeoutFn=P.clearTimeout.bind(P))}var F,M=function(t){o(i,t);var n=p(i);function i(t,r,o){var s;return e(this,i),(s=n.call(this,t)).description=r,s.context=o,s.type="TransportError",s}return r(i)}(h(Error)),U=function(t){o(i,t);var n=p(i);function i(t){var r;return e(this,i),(r=n.call(this)).writable=!1,D(f(r),t),r.opts=t,r.query=t.query,r.socket=t.socket,r}return r(i,[{key:"onError",value:function(t,e,n){return y(s(i.prototype),"emitReserved",this).call(this,"error",new M(t,e,n)),this}},{key:"open",value:function(){return this.readyState="opening",this.doOpen(),this}},{key:"close",value:function(){return"opening"!==this.readyState&&"open"!==this.readyState||(this.doClose(),this.onClose()),this}},{key:"send",value:function(t){"open"===this.readyState&&this.write(t)}},{key:"onOpen",value:function(){this.readyState="open",this.writable=!0,y(s(i.prototype),"emitReserved",this).call(this,"open")}},{key:"onData",value:function(t){var e=B(t,this.socket.binaryType);this.onPacket(e)}},{key:"onPacket",value:function(t){y(s(i.prototype),"emitReserved",this).call(this,"packet",t)}},{key:"onClose",value:function(t){this.readyState="closed",y(s(i.prototype),"emitReserved",this).call(this,"close",t)}},{key:"pause",value:function(t){}}]),i}(L),V="0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz-_".split(""),H={},K=0,Y=0;function z(t){var e="";do{e=V[t%64]+e,t=Math.floor(t/64)}while(t>0);return e}function W(){var t=z(+new Date);return t!==F?(K=0,F=t):t+"."+z(K++)}for(;Y<64;Y++)H[V[Y]]=Y;function $(t){var e="";for(var n in t)t.hasOwnProperty(n)&&(e.length&&(e+="&"),e+=encodeURIComponent(n)+"="+encodeURIComponent(t[n]));return e}function J(t){for(var e={},n=t.split("&"),r=0,i=n.length;r<i;r++){var o=n[r].split("=");e[decodeURIComponent(o[0])]=decodeURIComponent(o[1])}return e}var Q=!1;try{Q="undefined"!=typeof XMLHttpRequest&&"withCredentials"in new XMLHttpRequest}catch(t){}var X=Q;function G(t){var e=t.xdomain;try{if("undefined"!=typeof XMLHttpRequest&&(!e||X))return new XMLHttpRequest}catch(t){}if(!e)try{return new(P[["Active"].concat("Object").join("X")])("Microsoft.XMLHTTP")}catch(t){}}function Z(){}var tt=null!=new G({xdomain:!1}).responseType,et=function(t){o(s,t);var n=p(s);function s(t){var r;if(e(this,s),(r=n.call(this,t)).polling=!1,"undefined"!=typeof location){var i="https:"===location.protocol,o=location.port;o||(o=i?"443":"80"),r.xd="undefined"!=typeof location&&t.hostname!==location.hostname||o!==t.port,r.xs=t.secure!==i}var a=t&&t.forceBase64;return r.supportsBinary=tt&&!a,r}return r(s,[{key:"name",get:function(){return"polling"}},{key:"doOpen",value:function(){this.poll()}},{key:"pause",value:function(t){var e=this;this.readyState="pausing";var n=function(){e.readyState="paused",t()};if(this.polling||!this.writable){var r=0;this.polling&&(r++,this.once("pollComplete",(function(){--r||n()}))),this.writable||(r++,this.once("drain",(function(){--r||n()})))}else n()}},{key:"poll",value:function(){this.polling=!0,this.doPoll(),this.emitReserved("poll")}},{key:"onData",value:function(t){var e=this;(function(t,e){for(var n=t.split(x),r=[],i=0;i<n.length;i++){var o=B(n[i],e);if(r.push(o),"error"===o.type)break}return r})(t,this.socket.binaryType).forEach((function(t){if("opening"===e.readyState&&"open"===t.type&&e.onOpen(),"close"===t.type)return e.onClose({description:"transport closed by the server"}),!1;e.onPacket(t)})),"closed"!==this.readyState&&(this.polling=!1,this.emitReserved("pollComplete"),"open"===this.readyState&&this.poll())}},{key:"doClose",value:function(){var t=this,e=function(){t.write([{type:"close"}])};"open"===this.readyState?e():this.once("open",e)}},{key:"write",value:function(t){var e=this;this.writable=!1,function(t,e){var n=t.length,r=new Array(n),i=0;t.forEach((function(t,o){E(t,!1,(function(t){r[o]=t,++i===n&&e(r.join(x))}))}))}(t,(function(t){e.doWrite(t,(function(){e.writable=!0,e.emitReserved("drain")}))}))}},{key:"uri",value:function(){var t=this.query||{},e=this.opts.secure?"https":"http",n="";!1!==this.opts.timestampRequests&&(t[this.opts.timestampParam]=W()),this.supportsBinary||t.sid||(t.b64=1),this.opts.port&&("https"===e&&443!==Number(this.opts.port)||"http"===e&&80!==Number(this.opts.port))&&(n=":"+this.opts.port);var r=$(t);return e+"://"+(-1!==this.opts.hostname.indexOf(":")?"["+this.opts.hostname+"]":this.opts.hostname)+n+this.opts.path+(r.length?"?"+r:"")}},{key:"request",value:function(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return i(t,{xd:this.xd,xs:this.xs},this.opts),new nt(this.uri(),t)}},{key:"doWrite",value:function(t,e){var n=this,r=this.request({method:"POST",data:t});r.on("success",e),r.on("error",(function(t,e){n.onError("xhr post error",t,e)}))}},{key:"doPoll",value:function(){var t=this,e=this.request();e.on("data",this.onData.bind(this)),e.on("error",(function(e,n){t.onError("xhr poll error",e,n)})),this.pollXhr=e}}]),s}(U),nt=function(t){o(i,t);var n=p(i);function i(t,r){var o;return e(this,i),D(f(o=n.call(this)),r),o.opts=r,o.method=r.method||"GET",o.uri=t,o.async=!1!==r.async,o.data=void 0!==r.data?r.data:null,o.create(),o}return r(i,[{key:"create",value:function(){var t=this,e=j(this.opts,"agent","pfx","key","passphrase","cert","ca","ciphers","rejectUnauthorized","autoUnref");e.xdomain=!!this.opts.xd,e.xscheme=!!this.opts.xs;var n=this.xhr=new G(e);try{n.open(this.method,this.uri,this.async);try{if(this.opts.extraHeaders)for(var r in n.setDisableHeaderCheck&&n.setDisableHeaderCheck(!0),this.opts.extraHeaders)this.opts.extraHeaders.hasOwnProperty(r)&&n.setRequestHeader(r,this.opts.extraHeaders[r])}catch(t){}if("POST"===this.method)try{n.setRequestHeader("Content-type","text/plain;charset=UTF-8")}catch(t){}try{n.setRequestHeader("Accept","*/*")}catch(t){}"withCredentials"in n&&(n.withCredentials=this.opts.withCredentials),this.opts.requestTimeout&&(n.timeout=this.opts.requestTimeout),n.onreadystatechange=function(){4===n.readyState&&(200===n.status||1223===n.status?t.onLoad():t.setTimeoutFn((function(){t.onError("number"==typeof n.status?n.status:0)}),0))},n.send(this.data)}catch(e){return void this.setTimeoutFn((function(){t.onError(e)}),0)}"undefined"!=typeof document&&(this.index=i.requestsCount++,i.requests[this.index]=this)}},{key:"onError",value:function(t){this.emitReserved("error",t,this.xhr),this.cleanup(!0)}},{key:"cleanup",value:function(t){if(void 0!==this.xhr&&null!==this.xhr){if(this.xhr.onreadystatechange=Z,t)try{this.xhr.abort()}catch(t){}"undefined"!=typeof document&&delete i.requests[this.index],this.xhr=null}}},{key:"onLoad",value:function(){var t=this.xhr.responseText;null!==t&&(this.emitReserved("data",t),this.emitReserved("success"),this.cleanup())}},{key:"abort",value:function(){this.cleanup()}}]),i}(L);if(nt.requestsCount=0,nt.requests={},"undefined"!=typeof document)if("function"==typeof attachEvent)attachEvent("onunload",rt);else if("function"==typeof addEventListener){addEventListener("onpagehide"in P?"pagehide":"unload",rt,!1)}function rt(){for(var t in nt.requests)nt.requests.hasOwnProperty(t)&&nt.requests[t].abort()}var it="function"==typeof Promise&&"function"==typeof Promise.resolve?function(t){return Promise.resolve().then(t)}:function(t,e){return e(t,0)},ot=P.WebSocket||P.MozWebSocket,st="undefined"!=typeof navigator&&"string"==typeof navigator.product&&"reactnative"===navigator.product.toLowerCase(),at=function(t){o(i,t);var n=p(i);function i(t){var r;return e(this,i),(r=n.call(this,t)).supportsBinary=!t.forceBase64,r}return r(i,[{key:"name",get:function(){return"websocket"}},{key:"doOpen",value:function(){if(this.check()){var t=this.uri(),e=this.opts.protocols,n=st?{}:j(this.opts,"agent","perMessageDeflate","pfx","key","passphrase","cert","ca","ciphers","rejectUnauthorized","localAddress","protocolVersion","origin","maxPayload","family","checkServerIdentity");this.opts.extraHeaders&&(n.headers=this.opts.extraHeaders);try{this.ws=st?new ot(t,e,n):e?new ot(t,e):new ot(t)}catch(t){return this.emitReserved("error",t)}this.ws.binaryType=this.socket.binaryType||"arraybuffer",this.addEventListeners()}}},{key:"addEventListeners",value:function(){var t=this;this.ws.onopen=function(){t.opts.autoUnref&&t.ws._socket.unref(),t.onOpen()},this.ws.onclose=function(e){return t.onClose({description:"websocket connection closed",context:e})},this.ws.onmessage=function(e){return t.onData(e.data)},this.ws.onerror=function(e){return t.onError("websocket error",e)}}},{key:"write",value:function(t){var e=this;this.writable=!1;for(var n=function(n){var r=t[n],i=n===t.length-1;E(r,e.supportsBinary,(function(t){try{e.ws.send(t)}catch(t){}i&&it((function(){e.writable=!0,e.emitReserved("drain")}),e.setTimeoutFn)}))},r=0;r<t.length;r++)n(r)}},{key:"doClose",value:function(){void 0!==this.ws&&(this.ws.close(),this.ws=null)}},{key:"uri",value:function(){var t=this.query||{},e=this.opts.secure?"wss":"ws",n="";this.opts.port&&("wss"===e&&443!==Number(this.opts.port)||"ws"===e&&80!==Number(this.opts.port))&&(n=":"+this.opts.port),this.opts.timestampRequests&&(t[this.opts.timestampParam]=W()),this.supportsBinary||(t.b64=1);var r=$(t);return e+"://"+(-1!==this.opts.hostname.indexOf(":")?"["+this.opts.hostname+"]":this.opts.hostname)+n+this.opts.path+(r.length?"?"+r:"")}},{key:"check",value:function(){return!!ot}}]),i}(U),ct={websocket:at,polling:et},ut=/^(?:(?![^:@\/?#]+:[^:@\/]*@)(http|https|ws|wss):\/\/)?((?:(([^:@\/?#]*)(?::([^:@\/?#]*))?)?@)?((?:[a-f0-9]{0,4}:){2,7}[a-f0-9]{0,4}|[^:\/?#]*)(?::(\d*))?)(((\/(?:[^?#](?![^?#\/]*\.[^?#\/.]+(?:[?#]|$)))*\/?)?([^?#\/]*))(?:\?([^#]*))?(?:#(.*))?)/,ht=["source","protocol","authority","userInfo","user","password","host","port","relative","path","directory","file","query","anchor"];function ft(t){var e=t,n=t.indexOf("["),r=t.indexOf("]");-1!=n&&-1!=r&&(t=t.substring(0,n)+t.substring(n,r).replace(/:/g,";")+t.substring(r,t.length));for(var i,o,s=ut.exec(t||""),a={},c=14;c--;)a[ht[c]]=s[c]||"";return-1!=n&&-1!=r&&(a.source=e,a.host=a.host.substring(1,a.host.length-1).replace(/;/g,":"),a.authority=a.authority.replace("[","").replace("]","").replace(/;/g,":"),a.ipv6uri=!0),a.pathNames=function(t,e){var n=/\/{2,9}/g,r=e.replace(n,"/").split("/");"/"!=e.slice(0,1)&&0!==e.length||r.splice(0,1);"/"==e.slice(-1)&&r.splice(r.length-1,1);return r}(0,a.path),a.queryKey=(i=a.query,o={},i.replace(/(?:^|&)([^&=]*)=?([^&]*)/g,(function(t,e,n){e&&(o[e]=n)})),o),a}var lt=function(n){o(a,n);var s=p(a);function a(n){var r,o=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};return e(this,a),(r=s.call(this)).writeBuffer=[],n&&"object"===t(n)&&(o=n,n=null),n?(n=ft(n),o.hostname=n.host,o.secure="https"===n.protocol||"wss"===n.protocol,o.port=n.port,n.query&&(o.query=n.query)):o.host&&(o.hostname=ft(o.host).host),D(f(r),o),r.secure=null!=o.secure?o.secure:"undefined"!=typeof location&&"https:"===location.protocol,o.hostname&&!o.port&&(o.port=r.secure?"443":"80"),r.hostname=o.hostname||("undefined"!=typeof location?location.hostname:"localhost"),r.port=o.port||("undefined"!=typeof location&&location.port?location.port:r.secure?"443":"80"),r.transports=o.transports||["polling","websocket"],r.writeBuffer=[],r.prevBufferLen=0,r.opts=i({path:"/engine.io",agent:!1,withCredentials:!1,upgrade:!0,timestampParam:"t",rememberUpgrade:!1,addTrailingSlash:!0,rejectUnauthorized:!0,perMessageDeflate:{threshold:1024},transportOptions:{},closeOnBeforeunload:!0},o),r.opts.path=r.opts.path.replace(/\/$/,"")+(r.opts.addTrailingSlash?"/":""),"string"==typeof r.opts.query&&(r.opts.query=J(r.opts.query)),r.id=null,r.upgrades=null,r.pingInterval=null,r.pingTimeout=null,r.pingTimeoutTimer=null,"function"==typeof addEventListener&&(r.opts.closeOnBeforeunload&&(r.beforeunloadEventListener=function(){r.transport&&(r.transport.removeAllListeners(),r.transport.close())},addEventListener("beforeunload",r.beforeunloadEventListener,!1)),"localhost"!==r.hostname&&(r.offlineEventListener=function(){r.onClose("transport close",{description:"network connection lost"})},addEventListener("offline",r.offlineEventListener,!1))),r.open(),r}return r(a,[{key:"createTransport",value:function(t){var e=i({},this.opts.query);e.EIO=4,e.transport=t,this.id&&(e.sid=this.id);var n=i({},this.opts.transportOptions[t],this.opts,{query:e,socket:this,hostname:this.hostname,secure:this.secure,port:this.port});return new ct[t](n)}},{key:"open",value:function(){var t,e=this;if(this.opts.rememberUpgrade&&a.priorWebsocketSuccess&&-1!==this.transports.indexOf("websocket"))t="websocket";else{if(0===this.transports.length)return void this.setTimeoutFn((function(){e.emitReserved("error","No transports available")}),0);t=this.transports[0]}this.readyState="opening";try{t=this.createTransport(t)}catch(t){return this.transports.shift(),void this.open()}t.open(),this.setTransport(t)}},{key:"setTransport",value:function(t){var e=this;this.transport&&this.transport.removeAllListeners(),this.transport=t,t.on("drain",this.onDrain.bind(this)).on("packet",this.onPacket.bind(this)).on("error",this.onError.bind(this)).on("close",(function(t){return e.onClose("transport close",t)}))}},{key:"probe",value:function(t){var e=this,n=this.createTransport(t),r=!1;a.priorWebsocketSuccess=!1;var i=function(){r||(n.send([{type:"ping",data:"probe"}]),n.once("packet",(function(t){if(!r)if("pong"===t.type&&"probe"===t.data){if(e.upgrading=!0,e.emitReserved("upgrading",n),!n)return;a.priorWebsocketSuccess="websocket"===n.name,e.transport.pause((function(){r||"closed"!==e.readyState&&(f(),e.setTransport(n),n.send([{type:"upgrade"}]),e.emitReserved("upgrade",n),n=null,e.upgrading=!1,e.flush())}))}else{var i=new Error("probe error");i.transport=n.name,e.emitReserved("upgradeError",i)}})))};function o(){r||(r=!0,f(),n.close(),n=null)}var s=function(t){var r=new Error("probe error: "+t);r.transport=n.name,o(),e.emitReserved("upgradeError",r)};function c(){s("transport closed")}function u(){s("socket closed")}function h(t){n&&t.name!==n.name&&o()}var f=function(){n.removeListener("open",i),n.removeListener("error",s),n.removeListener("close",c),e.off("close",u),e.off("upgrading",h)};n.once("open",i),n.once("error",s),n.once("close",c),this.once("close",u),this.once("upgrading",h),n.open()}},{key:"onOpen",value:function(){if(this.readyState="open",a.priorWebsocketSuccess="websocket"===this.transport.name,this.emitReserved("open"),this.flush(),"open"===this.readyState&&this.opts.upgrade)for(var t=0,e=this.upgrades.length;t<e;t++)this.probe(this.upgrades[t])}},{key:"onPacket",value:function(t){if("opening"===this.readyState||"open"===this.readyState||"closing"===this.readyState)switch(this.emitReserved("packet",t),this.emitReserved("heartbeat"),t.type){case"open":this.onHandshake(JSON.parse(t.data));break;case"ping":this.resetPingTimeout(),this.sendPacket("pong"),this.emitReserved("ping"),this.emitReserved("pong");break;case"error":var e=new Error("server error");e.code=t.data,this.onError(e);break;case"message":this.emitReserved("data",t.data),this.emitReserved("message",t.data)}}},{key:"onHandshake",value:function(t){this.emitReserved("handshake",t),this.id=t.sid,this.transport.query.sid=t.sid,this.upgrades=this.filterUpgrades(t.upgrades),this.pingInterval=t.pingInterval,this.pingTimeout=t.pingTimeout,this.maxPayload=t.maxPayload,this.onOpen(),"closed"!==this.readyState&&this.resetPingTimeout()}},{key:"resetPingTimeout",value:function(){var t=this;this.clearTimeoutFn(this.pingTimeoutTimer),this.pingTimeoutTimer=this.setTimeoutFn((function(){t.onClose("ping timeout")}),this.pingInterval+this.pingTimeout),this.opts.autoUnref&&this.pingTimeoutTimer.unref()}},{key:"onDrain",value:function(){this.writeBuffer.splice(0,this.prevBufferLen),this.prevBufferLen=0,0===this.writeBuffer.length?this.emitReserved("drain"):this.flush()}},{key:"flush",value:function(){if("closed"!==this.readyState&&this.transport.writable&&!this.upgrading&&this.writeBuffer.length){var t=this.getWritablePackets();this.transport.send(t),this.prevBufferLen=t.length,this.emitReserved("flush")}}},{key:"getWritablePackets",value:function(){if(!(this.maxPayload&&"polling"===this.transport.name&&this.writeBuffer.length>1))return this.writeBuffer;for(var t,e=1,n=0;n<this.writeBuffer.length;n++){var r=this.writeBuffer[n].data;if(r&&(e+="string"==typeof(t=r)?function(t){for(var e=0,n=0,r=0,i=t.length;r<i;r++)(e=t.charCodeAt(r))<128?n+=1:e<2048?n+=2:e<55296||e>=57344?n+=3:(r++,n+=4);return n}(t):Math.ceil(1.33*(t.byteLength||t.size))),n>0&&e>this.maxPayload)return this.writeBuffer.slice(0,n);e+=2}return this.writeBuffer}},{key:"write",value:function(t,e,n){return this.sendPacket("message",t,e,n),this}},{key:"send",value:function(t,e,n){return this.sendPacket("message",t,e,n),this}},{key:"sendPacket",value:function(t,e,n,r){if("function"==typeof e&&(r=e,e=void 0),"function"==typeof n&&(r=n,n=null),"closing"!==this.readyState&&"closed"!==this.readyState){(n=n||{}).compress=!1!==n.compress;var i={type:t,data:e,options:n};this.emitReserved("packetCreate",i),this.writeBuffer.push(i),r&&this.once("flush",r),this.flush()}}},{key:"close",value:function(){var t=this,e=function(){t.onClose("forced close"),t.transport.close()},n=function n(){t.off("upgrade",n),t.off("upgradeError",n),e()},r=function(){t.once("upgrade",n),t.once("upgradeError",n)};return"opening"!==this.readyState&&"open"!==this.readyState||(this.readyState="closing",this.writeBuffer.length?this.once("drain",(function(){t.upgrading?r():e()})):this.upgrading?r():e()),this}},{key:"onError",value:function(t){a.priorWebsocketSuccess=!1,this.emitReserved("error",t),this.onClose("transport error",t)}},{key:"onClose",value:function(t,e){"opening"!==this.readyState&&"open"!==this.readyState&&"closing"!==this.readyState||(this.clearTimeoutFn(this.pingTimeoutTimer),this.transport.removeAllListeners("close"),this.transport.close(),this.transport.removeAllListeners(),"function"==typeof removeEventListener&&(removeEventListener("beforeunload",this.beforeunloadEventListener,!1),removeEventListener("offline",this.offlineEventListener,!1)),this.readyState="closed",this.id=null,this.emitReserved("close",t,e),this.writeBuffer=[],this.prevBufferLen=0)}},{key:"filterUpgrades",value:function(t){for(var e=[],n=0,r=t.length;n<r;n++)~this.transports.indexOf(t[n])&&e.push(t[n]);return e}}]),a}(L);lt.protocol=4,lt.protocol;var pt="function"==typeof ArrayBuffer,dt=Object.prototype.toString,yt="function"==typeof Blob||"undefined"!=typeof Blob&&"[object BlobConstructor]"===dt.call(Blob),vt="function"==typeof File||"undefined"!=typeof File&&"[object FileConstructor]"===dt.call(File);function gt(t){return pt&&(t instanceof ArrayBuffer||function(t){return"function"==typeof ArrayBuffer.isView?ArrayBuffer.isView(t):t.buffer instanceof ArrayBuffer}(t))||yt&&t instanceof Blob||vt&&t instanceof File}function mt(e,n){if(!e||"object"!==t(e))return!1;if(Array.isArray(e)){for(var r=0,i=e.length;r<i;r++)if(mt(e[r]))return!0;return!1}if(gt(e))return!0;if(e.toJSON&&"function"==typeof e.toJSON&&1===arguments.length)return mt(e.toJSON(),!0);for(var o in e)if(Object.prototype.hasOwnProperty.call(e,o)&&mt(e[o]))return!0;return!1}function kt(t){var e=[],n=t.data,r=t;return r.data=bt(n,e),r.attachments=e.length,{packet:r,buffers:e}}function bt(e,n){if(!e)return e;if(gt(e)){var r={_placeholder:!0,num:n.length};return n.push(e),r}if(Array.isArray(e)){for(var i=new Array(e.length),o=0;o<e.length;o++)i[o]=bt(e[o],n);return i}if("object"===t(e)&&!(e instanceof Date)){var s={};for(var a in e)Object.prototype.hasOwnProperty.call(e,a)&&(s[a]=bt(e[a],n));return s}return e}function wt(t,e){return t.data=_t(t.data,e),delete t.attachments,t}function _t(e,n){if(!e)return e;if(e&&!0===e._placeholder){if("number"==typeof e.num&&e.num>=0&&e.num<n.length)return n[e.num];throw new Error("illegal attachments")}if(Array.isArray(e))for(var r=0;r<e.length;r++)e[r]=_t(e[r],n);else if("object"===t(e))for(var i in e)Object.prototype.hasOwnProperty.call(e,i)&&(e[i]=_t(e[i],n));return e}var Et;!function(t){t[t.CONNECT=0]="CONNECT",t[t.DISCONNECT=1]="DISCONNECT",t[t.EVENT=2]="EVENT",t[t.ACK=3]="ACK",t[t.CONNECT_ERROR=4]="CONNECT_ERROR",t[t.BINARY_EVENT=5]="BINARY_EVENT",t[t.BINARY_ACK=6]="BINARY_ACK"}(Et||(Et={}));var Ot=function(){function t(n){e(this,t),this.replacer=n}return r(t,[{key:"encode",value:function(t){return t.type!==Et.EVENT&&t.type!==Et.ACK||!mt(t)?[this.encodeAsString(t)]:this.encodeAsBinary({type:t.type===Et.EVENT?Et.BINARY_EVENT:Et.BINARY_ACK,nsp:t.nsp,data:t.data,id:t.id})}},{key:"encodeAsString",value:function(t){var e=""+t.type;return t.type!==Et.BINARY_EVENT&&t.type!==Et.BINARY_ACK||(e+=t.attachments+"-"),t.nsp&&"/"!==t.nsp&&(e+=t.nsp+","),null!=t.id&&(e+=t.id),null!=t.data&&(e+=JSON.stringify(t.data,this.replacer)),e}},{key:"encodeAsBinary",value:function(t){var e=kt(t),n=this.encodeAsString(e.packet),r=e.buffers;return r.unshift(n),r}}]),t}(),At=function(n){o(a,n);var i=p(a);function a(t){var n;return e(this,a),(n=i.call(this)).reviver=t,n}return r(a,[{key:"add",value:function(t){var e;if("string"==typeof t){if(this.reconstructor)throw new Error("got plaintext data when reconstructing a packet");var n=(e=this.decodeString(t)).type===Et.BINARY_EVENT;n||e.type===Et.BINARY_ACK?(e.type=n?Et.EVENT:Et.ACK,this.reconstructor=new Rt(e),0===e.attachments&&y(s(a.prototype),"emitReserved",this).call(this,"decoded",e)):y(s(a.prototype),"emitReserved",this).call(this,"decoded",e)}else{if(!gt(t)&&!t.base64)throw new Error("Unknown type: "+t);if(!this.reconstructor)throw new Error("got binary data when not reconstructing a packet");(e=this.reconstructor.takeBinaryData(t))&&(this.reconstructor=null,y(s(a.prototype),"emitReserved",this).call(this,"decoded",e))}}},{key:"decodeString",value:function(t){var e=0,n={type:Number(t.charAt(0))};if(void 0===Et[n.type])throw new Error("unknown packet type "+n.type);if(n.type===Et.BINARY_EVENT||n.type===Et.BINARY_ACK){for(var r=e+1;"-"!==t.charAt(++e)&&e!=t.length;);var i=t.substring(r,e);if(i!=Number(i)||"-"!==t.charAt(e))throw new Error("Illegal attachments");n.attachments=Number(i)}if("/"===t.charAt(e+1)){for(var o=e+1;++e;){if(","===t.charAt(e))break;if(e===t.length)break}n.nsp=t.substring(o,e)}else n.nsp="/";var s=t.charAt(e+1);if(""!==s&&Number(s)==s){for(var c=e+1;++e;){var u=t.charAt(e);if(null==u||Number(u)!=u){--e;break}if(e===t.length)break}n.id=Number(t.substring(c,e+1))}if(t.charAt(++e)){var h=this.tryParse(t.substr(e));if(!a.isPayloadValid(n.type,h))throw new Error("invalid payload");n.data=h}return n}},{key:"tryParse",value:function(t){try{return JSON.parse(t,this.reviver)}catch(t){return!1}}},{key:"destroy",value:function(){this.reconstructor&&(this.reconstructor.finishedReconstruction(),this.reconstructor=null)}}],[{key:"isPayloadValid",value:function(e,n){switch(e){case Et.CONNECT:return"object"===t(n);case Et.DISCONNECT:return void 0===n;case Et.CONNECT_ERROR:return"string"==typeof n||"object"===t(n);case Et.EVENT:case Et.BINARY_EVENT:return Array.isArray(n)&&n.length>0;case Et.ACK:case Et.BINARY_ACK:return Array.isArray(n)}}}]),a}(L),Rt=function(){function t(n){e(this,t),this.packet=n,this.buffers=[],this.reconPack=n}return r(t,[{key:"takeBinaryData",value:function(t){if(this.buffers.push(t),this.buffers.length===this.reconPack.attachments){var e=wt(this.reconPack,this.buffers);return this.finishedReconstruction(),e}return null}},{key:"finishedReconstruction",value:function(){this.reconPack=null,this.buffers=[]}}]),t}(),Tt=Object.freeze({__proto__:null,protocol:5,get PacketType(){return Et},Encoder:Ot,Decoder:At});function Ct(t,e,n){return t.on(e,n),function(){t.off(e,n)}}var Bt=Object.freeze({connect:1,connect_error:1,disconnect:1,disconnecting:1,newListener:1,removeListener:1}),St=function(t){o(a,t);var n=p(a);function a(t,r,o){var s;return e(this,a),(s=n.call(this)).connected=!1,s.recovered=!1,s.receiveBuffer=[],s.sendBuffer=[],s._queue=[],s.ids=0,s.acks={},s.flags={},s.io=t,s.nsp=r,o&&o.auth&&(s.auth=o.auth),s._opts=i({},o),s.io._autoConnect&&s.open(),s}return r(a,[{key:"disconnected",get:function(){return!this.connected}},{key:"subEvents",value:function(){if(!this.subs){var t=this.io;this.subs=[Ct(t,"open",this.onopen.bind(this)),Ct(t,"packet",this.onpacket.bind(this)),Ct(t,"error",this.onerror.bind(this)),Ct(t,"close",this.onclose.bind(this))]}}},{key:"active",get:function(){return!!this.subs}},{key:"connect",value:function(){return this.connected||(this.subEvents(),this.io._reconnecting||this.io.open(),"open"===this.io._readyState&&this.onopen()),this}},{key:"open",value:function(){return this.connect()}},{key:"send",value:function(){for(var t=arguments.length,e=new Array(t),n=0;n<t;n++)e[n]=arguments[n];return e.unshift("message"),this.emit.apply(this,e),this}},{key:"emit",value:function(t){if(Bt.hasOwnProperty(t))throw new Error('"'+t.toString()+'" is a reserved event name');for(var e=arguments.length,n=new Array(e>1?e-1:0),r=1;r<e;r++)n[r-1]=arguments[r];if(n.unshift(t),this._opts.retries&&!this.flags.fromQueue&&!this.flags.volatile)return this._addToQueue(n),this;var i={type:Et.EVENT,data:n,options:{}};if(i.options.compress=!1!==this.flags.compress,"function"==typeof n[n.length-1]){var o=this.ids++,s=n.pop();this._registerAckCallback(o,s),i.id=o}var a=this.io.engine&&this.io.engine.transport&&this.io.engine.transport.writable,c=this.flags.volatile&&(!a||!this.connected);return c||(this.connected?(this.notifyOutgoingListeners(i),this.packet(i)):this.sendBuffer.push(i)),this.flags={},this}},{key:"_registerAckCallback",value:function(t,e){var n,r=this,i=null!==(n=this.flags.timeout)&&void 0!==n?n:this._opts.ackTimeout;if(void 0!==i){var o=this.io.setTimeoutFn((function(){delete r.acks[t];for(var n=0;n<r.sendBuffer.length;n++)r.sendBuffer[n].id===t&&r.sendBuffer.splice(n,1);e.call(r,new Error("operation has timed out"))}),i);this.acks[t]=function(){r.io.clearTimeoutFn(o);for(var t=arguments.length,n=new Array(t),i=0;i<t;i++)n[i]=arguments[i];e.apply(r,[null].concat(n))}}else this.acks[t]=e}},{key:"emitWithAck",value:function(t){for(var e=this,n=arguments.length,r=new Array(n>1?n-1:0),i=1;i<n;i++)r[i-1]=arguments[i];var o=void 0!==this.flags.timeout||void 0!==this._opts.ackTimeout;return new Promise((function(n,i){r.push((function(t,e){return o?t?i(t):n(e):n(t)})),e.emit.apply(e,[t].concat(r))}))}},{key:"_addToQueue",value:function(t){var e,n=this;"function"==typeof t[t.length-1]&&(e=t.pop());var r={id:this.ids++,tryCount:0,pending:!1,args:t,flags:i({fromQueue:!0},this.flags)};t.push((function(t){if(r===n._queue[0]){var i=null!==t;if(i)r.tryCount>n._opts.retries&&(n._queue.shift(),e&&e(t));else if(n._queue.shift(),e){for(var o=arguments.length,s=new Array(o>1?o-1:0),a=1;a<o;a++)s[a-1]=arguments[a];e.apply(void 0,[null].concat(s))}return r.pending=!1,n._drainQueue()}})),this._queue.push(r),this._drainQueue()}},{key:"_drainQueue",value:function(){if(0!==this._queue.length){var t=this._queue[0];if(!t.pending){t.pending=!0,t.tryCount++;var e=this.ids;this.ids=t.id,this.flags=t.flags,this.emit.apply(this,t.args),this.ids=e}}}},{key:"packet",value:function(t){t.nsp=this.nsp,this.io._packet(t)}},{key:"onopen",value:function(){var t=this;"function"==typeof this.auth?this.auth((function(e){t._sendConnectPacket(e)})):this._sendConnectPacket(this.auth)}},{key:"_sendConnectPacket",value:function(t){this.packet({type:Et.CONNECT,data:this._pid?i({pid:this._pid,offset:this._lastOffset},t):t})}},{key:"onerror",value:function(t){this.connected||this.emitReserved("connect_error",t)}},{key:"onclose",value:function(t,e){this.connected=!1,delete this.id,this.emitReserved("disconnect",t,e)}},{key:"onpacket",value:function(t){if(t.nsp===this.nsp)switch(t.type){case Et.CONNECT:t.data&&t.data.sid?this.onconnect(t.data.sid,t.data.pid):this.emitReserved("connect_error",new Error("It seems you are trying to reach a Socket.IO server in v2.x with a v3.x client, but they are not compatible (more information here: https://socket.io/docs/v3/migrating-from-2-x-to-3-0/)"));break;case Et.EVENT:case Et.BINARY_EVENT:this.onevent(t);break;case Et.ACK:case Et.BINARY_ACK:this.onack(t);break;case Et.DISCONNECT:this.ondisconnect();break;case Et.CONNECT_ERROR:this.destroy();var e=new Error(t.data.message);e.data=t.data.data,this.emitReserved("connect_error",e)}}},{key:"onevent",value:function(t){var e=t.data||[];null!=t.id&&e.push(this.ack(t.id)),this.connected?this.emitEvent(e):this.receiveBuffer.push(Object.freeze(e))}},{key:"emitEvent",value:function(t){if(this._anyListeners&&this._anyListeners.length){var e,n=g(this._anyListeners.slice());try{for(n.s();!(e=n.n()).done;){e.value.apply(this,t)}}catch(t){n.e(t)}finally{n.f()}}y(s(a.prototype),"emit",this).apply(this,t),this._pid&&t.length&&"string"==typeof t[t.length-1]&&(this._lastOffset=t[t.length-1])}},{key:"ack",value:function(t){var e=this,n=!1;return function(){if(!n){n=!0;for(var r=arguments.length,i=new Array(r),o=0;o<r;o++)i[o]=arguments[o];e.packet({type:Et.ACK,id:t,data:i})}}}},{key:"onack",value:function(t){var e=this.acks[t.id];"function"==typeof e&&(e.apply(this,t.data),delete this.acks[t.id])}},{key:"onconnect",value:function(t,e){this.id=t,this.recovered=e&&this._pid===e,this._pid=e,this.connected=!0,this.emitBuffered(),this.emitReserved("connect")}},{key:"emitBuffered",value:function(){var t=this;this.receiveBuffer.forEach((function(e){return t.emitEvent(e)})),this.receiveBuffer=[],this.sendBuffer.forEach((function(e){t.notifyOutgoingListeners(e),t.packet(e)})),this.sendBuffer=[]}},{key:"ondisconnect",value:function(){this.destroy(),this.onclose("io server disconnect")}},{key:"destroy",value:function(){this.subs&&(this.subs.forEach((function(t){return t()})),this.subs=void 0),this.io._destroy(this)}},{key:"disconnect",value:function(){return this.connected&&this.packet({type:Et.DISCONNECT}),this.destroy(),this.connected&&this.onclose("io client disconnect"),this}},{key:"close",value:function(){return this.disconnect()}},{key:"compress",value:function(t){return this.flags.compress=t,this}},{key:"volatile",get:function(){return this.flags.volatile=!0,this}},{key:"timeout",value:function(t){return this.flags.timeout=t,this}},{key:"onAny",value:function(t){return this._anyListeners=this._anyListeners||[],this._anyListeners.push(t),this}},{key:"prependAny",value:function(t){return this._anyListeners=this._anyListeners||[],this._anyListeners.unshift(t),this}},{key:"offAny",value:function(t){if(!this._anyListeners)return this;if(t){for(var e=this._anyListeners,n=0;n<e.length;n++)if(t===e[n])return e.splice(n,1),this}else this._anyListeners=[];return this}},{key:"listenersAny",value:function(){return this._anyListeners||[]}},{key:"onAnyOutgoing",value:function(t){return this._anyOutgoingListeners=this._anyOutgoingListeners||[],this._anyOutgoingListeners.push(t),this}},{key:"prependAnyOutgoing",value:function(t){return this._anyOutgoingListeners=this._anyOutgoingListeners||[],this._anyOutgoingListeners.unshift(t),this}},{key:"offAnyOutgoing",value:function(t){if(!this._anyOutgoingListeners)return this;if(t){for(var e=this._anyOutgoingListeners,n=0;n<e.length;n++)if(t===e[n])return e.splice(n,1),this}else this._anyOutgoingListeners=[];return this}},{key:"listenersAnyOutgoing",value:function(){return this._anyOutgoingListeners||[]}},{key:"notifyOutgoingListeners",value:function(t){if(this._anyOutgoingListeners&&this._anyOutgoingListeners.length){var e,n=g(this._anyOutgoingListeners.slice());try{for(n.s();!(e=n.n()).done;){e.value.apply(this,t.data)}}catch(t){n.e(t)}finally{n.f()}}}}]),a}(L);function Nt(t){t=t||{},this.ms=t.min||100,this.max=t.max||1e4,this.factor=t.factor||2,this.jitter=t.jitter>0&&t.jitter<=1?t.jitter:0,this.attempts=0}Nt.prototype.duration=function(){var t=this.ms*Math.pow(this.factor,this.attempts++);if(this.jitter){var e=Math.random(),n=Math.floor(e*this.jitter*t);t=0==(1&Math.floor(10*e))?t-n:t+n}return 0|Math.min(t,this.max)},Nt.prototype.reset=function(){this.attempts=0},Nt.prototype.setMin=function(t){this.ms=t},Nt.prototype.setMax=function(t){this.max=t},Nt.prototype.setJitter=function(t){this.jitter=t};var xt=function(n){o(s,n);var i=p(s);function s(n,r){var o,a;e(this,s),(o=i.call(this)).nsps={},o.subs=[],n&&"object"===t(n)&&(r=n,n=void 0),(r=r||{}).path=r.path||"/socket.io",o.opts=r,D(f(o),r),o.reconnection(!1!==r.reconnection),o.reconnectionAttempts(r.reconnectionAttempts||1/0),o.reconnectionDelay(r.reconnectionDelay||1e3),o.reconnectionDelayMax(r.reconnectionDelayMax||5e3),o.randomizationFactor(null!==(a=r.randomizationFactor)&&void 0!==a?a:.5),o.backoff=new Nt({min:o.reconnectionDelay(),max:o.reconnectionDelayMax(),jitter:o.randomizationFactor()}),o.timeout(null==r.timeout?2e4:r.timeout),o._readyState="closed",o.uri=n;var c=r.parser||Tt;return o.encoder=new c.Encoder,o.decoder=new c.Decoder,o._autoConnect=!1!==r.autoConnect,o._autoConnect&&o.open(),o}return r(s,[{key:"reconnection",value:function(t){return arguments.length?(this._reconnection=!!t,this):this._reconnection}},{key:"reconnectionAttempts",value:function(t){return void 0===t?this._reconnectionAttempts:(this._reconnectionAttempts=t,this)}},{key:"reconnectionDelay",value:function(t){var e;return void 0===t?this._reconnectionDelay:(this._reconnectionDelay=t,null===(e=this.backoff)||void 0===e||e.setMin(t),this)}},{key:"randomizationFactor",value:function(t){var e;return void 0===t?this._randomizationFactor:(this._randomizationFactor=t,null===(e=this.backoff)||void 0===e||e.setJitter(t),this)}},{key:"reconnectionDelayMax",value:function(t){var e;return void 0===t?this._reconnectionDelayMax:(this._reconnectionDelayMax=t,null===(e=this.backoff)||void 0===e||e.setMax(t),this)}},{key:"timeout",value:function(t){return arguments.length?(this._timeout=t,this):this._timeout}},{key:"maybeReconnectOnOpen",value:function(){!this._reconnecting&&this._reconnection&&0===this.backoff.attempts&&this.reconnect()}},{key:"open",value:function(t){var e=this;if(~this._readyState.indexOf("open"))return this;this.engine=new lt(this.uri,this.opts);var n=this.engine,r=this;this._readyState="opening",this.skipReconnect=!1;var i=Ct(n,"open",(function(){r.onopen(),t&&t()})),o=Ct(n,"error",(function(n){r.cleanup(),r._readyState="closed",e.emitReserved("error",n),t?t(n):r.maybeReconnectOnOpen()}));if(!1!==this._timeout){var s=this._timeout;0===s&&i();var a=this.setTimeoutFn((function(){i(),n.close(),n.emit("error",new Error("timeout"))}),s);this.opts.autoUnref&&a.unref(),this.subs.push((function(){clearTimeout(a)}))}return this.subs.push(i),this.subs.push(o),this}},{key:"connect",value:function(t){return this.open(t)}},{key:"onopen",value:function(){this.cleanup(),this._readyState="open",this.emitReserved("open");var t=this.engine;this.subs.push(Ct(t,"ping",this.onping.bind(this)),Ct(t,"data",this.ondata.bind(this)),Ct(t,"error",this.onerror.bind(this)),Ct(t,"close",this.onclose.bind(this)),Ct(this.decoder,"decoded",this.ondecoded.bind(this)))}},{key:"onping",value:function(){this.emitReserved("ping")}},{key:"ondata",value:function(t){try{this.decoder.add(t)}catch(t){this.onclose("parse error",t)}}},{key:"ondecoded",value:function(t){var e=this;it((function(){e.emitReserved("packet",t)}),this.setTimeoutFn)}},{key:"onerror",value:function(t){this.emitReserved("error",t)}},{key:"socket",value:function(t,e){var n=this.nsps[t];return n||(n=new St(this,t,e),this.nsps[t]=n),this._autoConnect&&n.connect(),n}},{key:"_destroy",value:function(t){for(var e=0,n=Object.keys(this.nsps);e<n.length;e++){var r=n[e];if(this.nsps[r].active)return}this._close()}},{key:"_packet",value:function(t){for(var e=this.encoder.encode(t),n=0;n<e.length;n++)this.engine.write(e[n],t.options)}},{key:"cleanup",value:function(){this.subs.forEach((function(t){return t()})),this.subs.length=0,this.decoder.destroy()}},{key:"_close",value:function(){this.skipReconnect=!0,this._reconnecting=!1,this.onclose("forced close"),this.engine&&this.engine.close()}},{key:"disconnect",value:function(){return this._close()}},{key:"onclose",value:function(t,e){this.cleanup(),this.backoff.reset(),this._readyState="closed",this.emitReserved("close",t,e),this._reconnection&&!this.skipReconnect&&this.reconnect()}},{key:"reconnect",value:function(){var t=this;if(this._reconnecting||this.skipReconnect)return this;var e=this;if(this.backoff.attempts>=this._reconnectionAttempts)this.backoff.reset(),this.emitReserved("reconnect_failed"),this._reconnecting=!1;else{var n=this.backoff.duration();this._reconnecting=!0;var r=this.setTimeoutFn((function(){e.skipReconnect||(t.emitReserved("reconnect_attempt",e.backoff.attempts),e.skipReconnect||e.open((function(n){n?(e._reconnecting=!1,e.reconnect(),t.emitReserved("reconnect_error",n)):e.onreconnect()})))}),n);this.opts.autoUnref&&r.unref(),this.subs.push((function(){clearTimeout(r)}))}}},{key:"onreconnect",value:function(){var t=this.backoff.attempts;this._reconnecting=!1,this.backoff.reset(),this.emitReserved("reconnect",t)}}]),s}(L),Lt={};function Pt(e,n){"object"===t(e)&&(n=e,e=void 0);var r,i=function(t){var e=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"",n=arguments.length>2?arguments[2]:void 0,r=t;n=n||"undefined"!=typeof location&&location,null==t&&(t=n.protocol+"//"+n.host),"string"==typeof t&&("/"===t.charAt(0)&&(t="/"===t.charAt(1)?n.protocol+t:n.host+t),/^(https?|wss?):\/\//.test(t)||(t=void 0!==n?n.protocol+"//"+t:"https://"+t),r=ft(t)),r.port||(/^(http|ws)$/.test(r.protocol)?r.port="80":/^(http|ws)s$/.test(r.protocol)&&(r.port="443")),r.path=r.path||"/";var i=-1!==r.host.indexOf(":")?"["+r.host+"]":r.host;return r.id=r.protocol+"://"+i+":"+r.port+e,r.href=r.protocol+"://"+i+(n&&n.port===r.port?"":":"+r.port),r}(e,(n=n||{}).path||"/socket.io"),o=i.source,s=i.id,a=i.path,c=Lt[s]&&a in Lt[s].nsps;return n.forceNew||n["force new connection"]||!1===n.multiplex||c?r=new xt(o,n):(Lt[s]||(Lt[s]=new xt(o,n)),r=Lt[s]),i.query&&!n.query&&(n.query=i.queryKey),r.socket(i.path,n)}return i(Pt,{Manager:xt,Socket:St,io:Pt,connect:Pt}),Pt})); -//# sourceMappingURL=socket.io.min.js.map +!function(t,e){"object"==typeof exports&&"object"==typeof module?module.exports=e():"function"==typeof define&&define.amd?define([],e):"object"==typeof exports?exports.io=e():t.io=e()}(self,(function(){return function(t){var e={};function n(r){if(e[r])return e[r].exports;var o=e[r]={i:r,l:!1,exports:{}};return t[r].call(o.exports,o,o.exports,n),o.l=!0,o.exports}return n.m=t,n.c=e,n.d=function(t,e,r){n.o(t,e)||Object.defineProperty(t,e,{enumerable:!0,get:r})},n.r=function(t){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(t,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(t,"__esModule",{value:!0})},n.t=function(t,e){if(1&e&&(t=n(t)),8&e)return t;if(4&e&&"object"==typeof t&&t&&t.__esModule)return t;var r=Object.create(null);if(n.r(r),Object.defineProperty(r,"default",{enumerable:!0,value:t}),2&e&&"string"!=typeof t)for(var o in t)n.d(r,o,function(e){return t[e]}.bind(null,o));return r},n.n=function(t){var e=t&&t.__esModule?function(){return t.default}:function(){return t};return n.d(e,"a",e),e},n.o=function(t,e){return Object.prototype.hasOwnProperty.call(t,e)},n.p="",n(n.s=17)}([function(t,e,n){function r(t){if(t)return function(t){for(var e in r.prototype)t[e]=r.prototype[e];return t}(t)}t.exports=r,r.prototype.on=r.prototype.addEventListener=function(t,e){return this._callbacks=this._callbacks||{},(this._callbacks["$"+t]=this._callbacks["$"+t]||[]).push(e),this},r.prototype.once=function(t,e){function n(){this.off(t,n),e.apply(this,arguments)}return n.fn=e,this.on(t,n),this},r.prototype.off=r.prototype.removeListener=r.prototype.removeAllListeners=r.prototype.removeEventListener=function(t,e){if(this._callbacks=this._callbacks||{},0==arguments.length)return this._callbacks={},this;var n,r=this._callbacks["$"+t];if(!r)return this;if(1==arguments.length)return delete this._callbacks["$"+t],this;for(var o=0;o<r.length;o++)if((n=r[o])===e||n.fn===e){r.splice(o,1);break}return 0===r.length&&delete this._callbacks["$"+t],this},r.prototype.emit=function(t){this._callbacks=this._callbacks||{};for(var e=new Array(arguments.length-1),n=this._callbacks["$"+t],r=1;r<arguments.length;r++)e[r-1]=arguments[r];if(n){r=0;for(var o=(n=n.slice(0)).length;r<o;++r)n[r].apply(this,e)}return this},r.prototype.listeners=function(t){return this._callbacks=this._callbacks||{},this._callbacks["$"+t]||[]},r.prototype.hasListeners=function(t){return!!this.listeners(t).length}},function(t,e,n){var r=n(23),o=n(24),i=String.fromCharCode(30);t.exports={protocol:4,encodePacket:r,encodePayload:function(t,e){var n=t.length,o=new Array(n),s=0;t.forEach((function(t,c){r(t,!1,(function(t){o[c]=t,++s===n&&e(o.join(i))}))}))},decodePacket:o,decodePayload:function(t,e){for(var n=t.split(i),r=[],s=0;s<n.length;s++){var c=o(n[s],e);if(r.push(c),"error"===c.type)break}return r}}},function(t,e){t.exports="undefined"!=typeof self?self:"undefined"!=typeof window?window:Function("return this")()},function(t,e,n){function r(t){return(r="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t})(t)}function o(t,e){for(var n=0;n<e.length;n++){var r=e[n];r.enumerable=r.enumerable||!1,r.configurable=!0,"value"in r&&(r.writable=!0),Object.defineProperty(t,r.key,r)}}function i(t,e){return(i=Object.setPrototypeOf||function(t,e){return t.__proto__=e,t})(t,e)}function s(t){var e=function(){if("undefined"==typeof Reflect||!Reflect.construct)return!1;if(Reflect.construct.sham)return!1;if("function"==typeof Proxy)return!0;try{return Date.prototype.toString.call(Reflect.construct(Date,[],(function(){}))),!0}catch(t){return!1}}();return function(){var n,r=a(t);if(e){var o=a(this).constructor;n=Reflect.construct(r,arguments,o)}else n=r.apply(this,arguments);return c(this,n)}}function c(t,e){return!e||"object"!==r(e)&&"function"!=typeof e?function(t){if(void 0===t)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return t}(t):e}function a(t){return(a=Object.setPrototypeOf?Object.getPrototypeOf:function(t){return t.__proto__||Object.getPrototypeOf(t)})(t)}var u=n(1),f=function(t){!function(t,e){if("function"!=typeof e&&null!==e)throw new TypeError("Super expression must either be null or a function");t.prototype=Object.create(e&&e.prototype,{constructor:{value:t,writable:!0,configurable:!0}}),e&&i(t,e)}(a,t);var e,n,r,c=s(a);function a(t){var e;return function(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}(this,a),(e=c.call(this)).opts=t,e.query=t.query,e.readyState="",e.socket=t.socket,e}return e=a,(n=[{key:"onError",value:function(t,e){var n=new Error(t);return n.type="TransportError",n.description=e,this.emit("error",n),this}},{key:"open",value:function(){return"closed"!==this.readyState&&""!==this.readyState||(this.readyState="opening",this.doOpen()),this}},{key:"close",value:function(){return"opening"!==this.readyState&&"open"!==this.readyState||(this.doClose(),this.onClose()),this}},{key:"send",value:function(t){if("open"!==this.readyState)throw new Error("Transport not open");this.write(t)}},{key:"onOpen",value:function(){this.readyState="open",this.writable=!0,this.emit("open")}},{key:"onData",value:function(t){var e=u.decodePacket(t,this.socket.binaryType);this.onPacket(e)}},{key:"onPacket",value:function(t){this.emit("packet",t)}},{key:"onClose",value:function(){this.readyState="closed",this.emit("close")}}])&&o(e.prototype,n),r&&o(e,r),a}(n(0));t.exports=f},function(t,e){e.encode=function(t){var e="";for(var n in t)t.hasOwnProperty(n)&&(e.length&&(e+="&"),e+=encodeURIComponent(n)+"="+encodeURIComponent(t[n]));return e},e.decode=function(t){for(var e={},n=t.split("&"),r=0,o=n.length;r<o;r++){var i=n[r].split("=");e[decodeURIComponent(i[0])]=decodeURIComponent(i[1])}return e}},function(t,e,n){"use strict";function r(t){return(r="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t})(t)}function o(t,e,n){return(o="undefined"!=typeof Reflect&&Reflect.get?Reflect.get:function(t,e,n){var r=function(t,e){for(;!Object.prototype.hasOwnProperty.call(t,e)&&null!==(t=a(t)););return t}(t,e);if(r){var o=Object.getOwnPropertyDescriptor(r,e);return o.get?o.get.call(n):o.value}})(t,e,n||t)}function i(t,e){return(i=Object.setPrototypeOf||function(t,e){return t.__proto__=e,t})(t,e)}function s(t){var e=function(){if("undefined"==typeof Reflect||!Reflect.construct)return!1;if(Reflect.construct.sham)return!1;if("function"==typeof Proxy)return!0;try{return Date.prototype.toString.call(Reflect.construct(Date,[],(function(){}))),!0}catch(t){return!1}}();return function(){var n,r=a(t);if(e){var o=a(this).constructor;n=Reflect.construct(r,arguments,o)}else n=r.apply(this,arguments);return c(this,n)}}function c(t,e){return!e||"object"!==r(e)&&"function"!=typeof e?function(t){if(void 0===t)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return t}(t):e}function a(t){return(a=Object.setPrototypeOf?Object.getPrototypeOf:function(t){return t.__proto__||Object.getPrototypeOf(t)})(t)}function u(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}function f(t,e){for(var n=0;n<e.length;n++){var r=e[n];r.enumerable=r.enumerable||!1,r.configurable=!0,"value"in r&&(r.writable=!0),Object.defineProperty(t,r.key,r)}}function p(t,e,n){return e&&f(t.prototype,e),n&&f(t,n),t}Object.defineProperty(e,"__esModule",{value:!0}),e.Decoder=e.Encoder=e.PacketType=e.protocol=void 0;var l,h=n(0),y=n(29),d=n(15);e.protocol=5,function(t){t[t.CONNECT=0]="CONNECT",t[t.DISCONNECT=1]="DISCONNECT",t[t.EVENT=2]="EVENT",t[t.ACK=3]="ACK",t[t.CONNECT_ERROR=4]="CONNECT_ERROR",t[t.BINARY_EVENT=5]="BINARY_EVENT",t[t.BINARY_ACK=6]="BINARY_ACK"}(l=e.PacketType||(e.PacketType={}));var v=function(){function t(){u(this,t)}return p(t,[{key:"encode",value:function(t){return t.type!==l.EVENT&&t.type!==l.ACK||!d.hasBinary(t)?[this.encodeAsString(t)]:(t.type=t.type===l.EVENT?l.BINARY_EVENT:l.BINARY_ACK,this.encodeAsBinary(t))}},{key:"encodeAsString",value:function(t){var e=""+t.type;return t.type!==l.BINARY_EVENT&&t.type!==l.BINARY_ACK||(e+=t.attachments+"-"),t.nsp&&"/"!==t.nsp&&(e+=t.nsp+","),null!=t.id&&(e+=t.id),null!=t.data&&(e+=JSON.stringify(t.data)),e}},{key:"encodeAsBinary",value:function(t){var e=y.deconstructPacket(t),n=this.encodeAsString(e.packet),r=e.buffers;return r.unshift(n),r}}]),t}();e.Encoder=v;var b=function(t){!function(t,e){if("function"!=typeof e&&null!==e)throw new TypeError("Super expression must either be null or a function");t.prototype=Object.create(e&&e.prototype,{constructor:{value:t,writable:!0,configurable:!0}}),e&&i(t,e)}(n,t);var e=s(n);function n(){return u(this,n),e.call(this)}return p(n,[{key:"add",value:function(t){var e;if("string"==typeof t)(e=this.decodeString(t)).type===l.BINARY_EVENT||e.type===l.BINARY_ACK?(this.reconstructor=new m(e),0===e.attachments&&o(a(n.prototype),"emit",this).call(this,"decoded",e)):o(a(n.prototype),"emit",this).call(this,"decoded",e);else{if(!d.isBinary(t)&&!t.base64)throw new Error("Unknown type: "+t);if(!this.reconstructor)throw new Error("got binary data when not reconstructing a packet");(e=this.reconstructor.takeBinaryData(t))&&(this.reconstructor=null,o(a(n.prototype),"emit",this).call(this,"decoded",e))}}},{key:"decodeString",value:function(t){var e=0,r={type:Number(t.charAt(0))};if(void 0===l[r.type])throw new Error("unknown packet type "+r.type);if(r.type===l.BINARY_EVENT||r.type===l.BINARY_ACK){for(var o=e+1;"-"!==t.charAt(++e)&&e!=t.length;);var i=t.substring(o,e);if(i!=Number(i)||"-"!==t.charAt(e))throw new Error("Illegal attachments");r.attachments=Number(i)}if("/"===t.charAt(e+1)){for(var s=e+1;++e;){if(","===t.charAt(e))break;if(e===t.length)break}r.nsp=t.substring(s,e)}else r.nsp="/";var c=t.charAt(e+1);if(""!==c&&Number(c)==c){for(var a=e+1;++e;){var u=t.charAt(e);if(null==u||Number(u)!=u){--e;break}if(e===t.length)break}r.id=Number(t.substring(a,e+1))}if(t.charAt(++e)){var f=function(t){try{return JSON.parse(t)}catch(t){return!1}}(t.substr(e));if(!n.isPayloadValid(r.type,f))throw new Error("invalid payload");r.data=f}return r}},{key:"destroy",value:function(){this.reconstructor&&this.reconstructor.finishedReconstruction()}}],[{key:"isPayloadValid",value:function(t,e){switch(t){case l.CONNECT:return"object"===r(e);case l.DISCONNECT:return void 0===e;case l.CONNECT_ERROR:return"string"==typeof e||"object"===r(e);case l.EVENT:case l.BINARY_EVENT:return Array.isArray(e)&&e.length>0;case l.ACK:case l.BINARY_ACK:return Array.isArray(e)}}}]),n}(h);e.Decoder=b;var m=function(){function t(e){u(this,t),this.packet=e,this.buffers=[],this.reconPack=e}return p(t,[{key:"takeBinaryData",value:function(t){if(this.buffers.push(t),this.buffers.length===this.reconPack.attachments){var e=y.reconstructPacket(this.reconPack,this.buffers);return this.finishedReconstruction(),e}return null}},{key:"finishedReconstruction",value:function(){this.reconPack=null,this.buffers=[]}}]),t}()},function(t,e){var n=/^(?:(?![^:@]+:[^:@\/]*@)(http|https|ws|wss):\/\/)?((?:(([^:@]*)(?::([^:@]*))?)?@)?((?:[a-f0-9]{0,4}:){2,7}[a-f0-9]{0,4}|[^:\/?#]*)(?::(\d*))?)(((\/(?:[^?#](?![^?#\/]*\.[^?#\/.]+(?:[?#]|$)))*\/?)?([^?#\/]*))(?:\?([^#]*))?(?:#(.*))?)/,r=["source","protocol","authority","userInfo","user","password","host","port","relative","path","directory","file","query","anchor"];t.exports=function(t){var e=t,o=t.indexOf("["),i=t.indexOf("]");-1!=o&&-1!=i&&(t=t.substring(0,o)+t.substring(o,i).replace(/:/g,";")+t.substring(i,t.length));for(var s,c,a=n.exec(t||""),u={},f=14;f--;)u[r[f]]=a[f]||"";return-1!=o&&-1!=i&&(u.source=e,u.host=u.host.substring(1,u.host.length-1).replace(/;/g,":"),u.authority=u.authority.replace("[","").replace("]","").replace(/;/g,":"),u.ipv6uri=!0),u.pathNames=function(t,e){var n=e.replace(/\/{2,9}/g,"/").split("/");"/"!=e.substr(0,1)&&0!==e.length||n.splice(0,1);"/"==e.substr(e.length-1,1)&&n.splice(n.length-1,1);return n}(0,u.path),u.queryKey=(s=u.query,c={},s.replace(/(?:^|&)([^&=]*)=?([^&]*)/g,(function(t,e,n){e&&(c[e]=n)})),c),u}},function(t,e,n){"use strict";function r(t){return(r="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t})(t)}function o(t,e){for(var n=0;n<e.length;n++){var r=e[n];r.enumerable=r.enumerable||!1,r.configurable=!0,"value"in r&&(r.writable=!0),Object.defineProperty(t,r.key,r)}}function i(t,e,n){return(i="undefined"!=typeof Reflect&&Reflect.get?Reflect.get:function(t,e,n){var r=function(t,e){for(;!Object.prototype.hasOwnProperty.call(t,e)&&null!==(t=u(t)););return t}(t,e);if(r){var o=Object.getOwnPropertyDescriptor(r,e);return o.get?o.get.call(n):o.value}})(t,e,n||t)}function s(t,e){return(s=Object.setPrototypeOf||function(t,e){return t.__proto__=e,t})(t,e)}function c(t){var e=function(){if("undefined"==typeof Reflect||!Reflect.construct)return!1;if(Reflect.construct.sham)return!1;if("function"==typeof Proxy)return!0;try{return Date.prototype.toString.call(Reflect.construct(Date,[],(function(){}))),!0}catch(t){return!1}}();return function(){var n,r=u(t);if(e){var o=u(this).constructor;n=Reflect.construct(r,arguments,o)}else n=r.apply(this,arguments);return a(this,n)}}function a(t,e){return!e||"object"!==r(e)&&"function"!=typeof e?function(t){if(void 0===t)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return t}(t):e}function u(t){return(u=Object.setPrototypeOf?Object.getPrototypeOf:function(t){return t.__proto__||Object.getPrototypeOf(t)})(t)}Object.defineProperty(e,"__esModule",{value:!0}),e.Manager=void 0;var f=n(19),p=n(14),l=n(0),h=n(5),y=n(16),d=n(30),v=function(t){!function(t,e){if("function"!=typeof e&&null!==e)throw new TypeError("Super expression must either be null or a function");t.prototype=Object.create(e&&e.prototype,{constructor:{value:t,writable:!0,configurable:!0}}),e&&s(t,e)}(v,t);var e,n,a,l=c(v);function v(t,e){var n;!function(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}(this,v),(n=l.call(this)).nsps={},n.subs=[],t&&"object"===r(t)&&(e=t,t=void 0),(e=e||{}).path=e.path||"/socket.io",n.opts=e,n.reconnection(!1!==e.reconnection),n.reconnectionAttempts(e.reconnectionAttempts||1/0),n.reconnectionDelay(e.reconnectionDelay||1e3),n.reconnectionDelayMax(e.reconnectionDelayMax||5e3),n.randomizationFactor(e.randomizationFactor||.5),n.backoff=new d({min:n.reconnectionDelay(),max:n.reconnectionDelayMax(),jitter:n.randomizationFactor()}),n.timeout(null==e.timeout?2e4:e.timeout),n._readyState="closed",n.uri=t;var o=e.parser||h;return n.encoder=new o.Encoder,n.decoder=new o.Decoder,n._autoConnect=!1!==e.autoConnect,n._autoConnect&&n.open(),n}return e=v,(n=[{key:"reconnection",value:function(t){return arguments.length?(this._reconnection=!!t,this):this._reconnection}},{key:"reconnectionAttempts",value:function(t){return void 0===t?this._reconnectionAttempts:(this._reconnectionAttempts=t,this)}},{key:"reconnectionDelay",value:function(t){var e;return void 0===t?this._reconnectionDelay:(this._reconnectionDelay=t,null===(e=this.backoff)||void 0===e||e.setMin(t),this)}},{key:"randomizationFactor",value:function(t){var e;return void 0===t?this._randomizationFactor:(this._randomizationFactor=t,null===(e=this.backoff)||void 0===e||e.setJitter(t),this)}},{key:"reconnectionDelayMax",value:function(t){var e;return void 0===t?this._reconnectionDelayMax:(this._reconnectionDelayMax=t,null===(e=this.backoff)||void 0===e||e.setMax(t),this)}},{key:"timeout",value:function(t){return arguments.length?(this._timeout=t,this):this._timeout}},{key:"maybeReconnectOnOpen",value:function(){!this._reconnecting&&this._reconnection&&0===this.backoff.attempts&&this.reconnect()}},{key:"open",value:function(t){var e=this;if(~this._readyState.indexOf("open"))return this;this.engine=f(this.uri,this.opts);var n=this.engine,r=this;this._readyState="opening",this.skipReconnect=!1;var o=y.on(n,"open",(function(){r.onopen(),t&&t()})),s=y.on(n,"error",(function(n){r.cleanup(),r._readyState="closed",i(u(v.prototype),"emit",e).call(e,"error",n),t?t(n):r.maybeReconnectOnOpen()}));if(!1!==this._timeout){var c=this._timeout;0===c&&o();var a=setTimeout((function(){o(),n.close(),n.emit("error",new Error("timeout"))}),c);this.subs.push((function(){clearTimeout(a)}))}return this.subs.push(o),this.subs.push(s),this}},{key:"connect",value:function(t){return this.open(t)}},{key:"onopen",value:function(){this.cleanup(),this._readyState="open",i(u(v.prototype),"emit",this).call(this,"open");var t=this.engine;this.subs.push(y.on(t,"ping",this.onping.bind(this)),y.on(t,"data",this.ondata.bind(this)),y.on(t,"error",this.onerror.bind(this)),y.on(t,"close",this.onclose.bind(this)),y.on(this.decoder,"decoded",this.ondecoded.bind(this)))}},{key:"onping",value:function(){i(u(v.prototype),"emit",this).call(this,"ping")}},{key:"ondata",value:function(t){this.decoder.add(t)}},{key:"ondecoded",value:function(t){i(u(v.prototype),"emit",this).call(this,"packet",t)}},{key:"onerror",value:function(t){i(u(v.prototype),"emit",this).call(this,"error",t)}},{key:"socket",value:function(t,e){var n=this.nsps[t];return n||(n=new p.Socket(this,t,e),this.nsps[t]=n),n}},{key:"_destroy",value:function(t){for(var e=0,n=Object.keys(this.nsps);e<n.length;e++){var r=n[e];if(this.nsps[r].active)return}this._close()}},{key:"_packet",value:function(t){for(var e=this.encoder.encode(t),n=0;n<e.length;n++)this.engine.write(e[n],t.options)}},{key:"cleanup",value:function(){this.subs.forEach((function(t){return t()})),this.subs.length=0,this.decoder.destroy()}},{key:"_close",value:function(){this.skipReconnect=!0,this._reconnecting=!1,"opening"===this._readyState&&this.cleanup(),this.backoff.reset(),this._readyState="closed",this.engine&&this.engine.close()}},{key:"disconnect",value:function(){return this._close()}},{key:"onclose",value:function(t){this.cleanup(),this.backoff.reset(),this._readyState="closed",i(u(v.prototype),"emit",this).call(this,"close",t),this._reconnection&&!this.skipReconnect&&this.reconnect()}},{key:"reconnect",value:function(){var t=this;if(this._reconnecting||this.skipReconnect)return this;var e=this;if(this.backoff.attempts>=this._reconnectionAttempts)this.backoff.reset(),i(u(v.prototype),"emit",this).call(this,"reconnect_failed"),this._reconnecting=!1;else{var n=this.backoff.duration();this._reconnecting=!0;var r=setTimeout((function(){e.skipReconnect||(i(u(v.prototype),"emit",t).call(t,"reconnect_attempt",e.backoff.attempts),e.skipReconnect||e.open((function(n){n?(e._reconnecting=!1,e.reconnect(),i(u(v.prototype),"emit",t).call(t,"reconnect_error",n)):e.onreconnect()})))}),n);this.subs.push((function(){clearTimeout(r)}))}}},{key:"onreconnect",value:function(){var t=this.backoff.attempts;this._reconnecting=!1,this.backoff.reset(),i(u(v.prototype),"emit",this).call(this,"reconnect",t)}}])&&o(e.prototype,n),a&&o(e,a),v}(l);e.Manager=v},function(t,e,n){var r=n(9),o=n(22),i=n(26),s=n(27);e.polling=function(t){var e=!1,n=!1,s=!1!==t.jsonp;if("undefined"!=typeof location){var c="https:"===location.protocol,a=location.port;a||(a=c?443:80),e=t.hostname!==location.hostname||a!==t.port,n=t.secure!==c}if(t.xdomain=e,t.xscheme=n,"open"in new r(t)&&!t.forceJSONP)return new o(t);if(!s)throw new Error("JSONP disabled");return new i(t)},e.websocket=s},function(t,e,n){var r=n(21),o=n(2);t.exports=function(t){var e=t.xdomain,n=t.xscheme,i=t.enablesXDR;try{if("undefined"!=typeof XMLHttpRequest&&(!e||r))return new XMLHttpRequest}catch(t){}try{if("undefined"!=typeof XDomainRequest&&!n&&i)return new XDomainRequest}catch(t){}if(!e)try{return new(o[["Active"].concat("Object").join("X")])("Microsoft.XMLHTTP")}catch(t){}}},function(t,e,n){function r(t){return(r="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t})(t)}function o(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}function i(t,e){for(var n=0;n<e.length;n++){var r=e[n];r.enumerable=r.enumerable||!1,r.configurable=!0,"value"in r&&(r.writable=!0),Object.defineProperty(t,r.key,r)}}function s(t,e){return(s=Object.setPrototypeOf||function(t,e){return t.__proto__=e,t})(t,e)}function c(t){var e=function(){if("undefined"==typeof Reflect||!Reflect.construct)return!1;if(Reflect.construct.sham)return!1;if("function"==typeof Proxy)return!0;try{return Date.prototype.toString.call(Reflect.construct(Date,[],(function(){}))),!0}catch(t){return!1}}();return function(){var n,r=u(t);if(e){var o=u(this).constructor;n=Reflect.construct(r,arguments,o)}else n=r.apply(this,arguments);return a(this,n)}}function a(t,e){return!e||"object"!==r(e)&&"function"!=typeof e?function(t){if(void 0===t)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return t}(t):e}function u(t){return(u=Object.setPrototypeOf?Object.getPrototypeOf:function(t){return t.__proto__||Object.getPrototypeOf(t)})(t)}var f=n(3),p=n(4),l=n(1),h=n(12),y=function(t){!function(t,e){if("function"!=typeof e&&null!==e)throw new TypeError("Super expression must either be null or a function");t.prototype=Object.create(e&&e.prototype,{constructor:{value:t,writable:!0,configurable:!0}}),e&&s(t,e)}(u,t);var e,n,r,a=c(u);function u(){return o(this,u),a.apply(this,arguments)}return e=u,(n=[{key:"doOpen",value:function(){this.poll()}},{key:"pause",value:function(t){var e=this;function n(){e.readyState="paused",t()}if(this.readyState="pausing",this.polling||!this.writable){var r=0;this.polling&&(r++,this.once("pollComplete",(function(){--r||n()}))),this.writable||(r++,this.once("drain",(function(){--r||n()})))}else n()}},{key:"poll",value:function(){this.polling=!0,this.doPoll(),this.emit("poll")}},{key:"onData",value:function(t){var e=this;l.decodePayload(t,this.socket.binaryType).forEach((function(t,n,r){if("opening"===e.readyState&&"open"===t.type&&e.onOpen(),"close"===t.type)return e.onClose(),!1;e.onPacket(t)})),"closed"!==this.readyState&&(this.polling=!1,this.emit("pollComplete"),"open"===this.readyState&&this.poll())}},{key:"doClose",value:function(){var t=this;function e(){t.write([{type:"close"}])}"open"===this.readyState?e():this.once("open",e)}},{key:"write",value:function(t){var e=this;this.writable=!1,l.encodePayload(t,(function(t){e.doWrite(t,(function(){e.writable=!0,e.emit("drain")}))}))}},{key:"uri",value:function(){var t=this.query||{},e=this.opts.secure?"https":"http",n="";return!1!==this.opts.timestampRequests&&(t[this.opts.timestampParam]=h()),this.supportsBinary||t.sid||(t.b64=1),t=p.encode(t),this.opts.port&&("https"===e&&443!==Number(this.opts.port)||"http"===e&&80!==Number(this.opts.port))&&(n=":"+this.opts.port),t.length&&(t="?"+t),e+"://"+(-1!==this.opts.hostname.indexOf(":")?"["+this.opts.hostname+"]":this.opts.hostname)+n+this.opts.path+t}},{key:"name",get:function(){return"polling"}}])&&i(e.prototype,n),r&&i(e,r),u}(f);t.exports=y},function(t,e){var n=Object.create(null);n.open="0",n.close="1",n.ping="2",n.pong="3",n.message="4",n.upgrade="5",n.noop="6";var r=Object.create(null);Object.keys(n).forEach((function(t){r[n[t]]=t}));t.exports={PACKET_TYPES:n,PACKET_TYPES_REVERSE:r,ERROR_PACKET:{type:"error",data:"parser error"}}},function(t,e,n){"use strict";var r,o="0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz-_".split(""),i={},s=0,c=0;function a(t){var e="";do{e=o[t%64]+e,t=Math.floor(t/64)}while(t>0);return e}function u(){var t=a(+new Date);return t!==r?(s=0,r=t):t+"."+a(s++)}for(;c<64;c++)i[o[c]]=c;u.encode=a,u.decode=function(t){var e=0;for(c=0;c<t.length;c++)e=64*e+i[t.charAt(c)];return e},t.exports=u},function(t,e){t.exports.pick=function(t){for(var e=arguments.length,n=new Array(e>1?e-1:0),r=1;r<e;r++)n[r-1]=arguments[r];return n.reduce((function(e,n){return t.hasOwnProperty(n)&&(e[n]=t[n]),e}),{})}},function(t,e,n){"use strict";function r(t){return(r="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t})(t)}function o(t,e){var n;if("undefined"==typeof Symbol||null==t[Symbol.iterator]){if(Array.isArray(t)||(n=function(t,e){if(!t)return;if("string"==typeof t)return i(t,e);var n=Object.prototype.toString.call(t).slice(8,-1);"Object"===n&&t.constructor&&(n=t.constructor.name);if("Map"===n||"Set"===n)return Array.from(t);if("Arguments"===n||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n))return i(t,e)}(t))||e&&t&&"number"==typeof t.length){n&&(t=n);var r=0,o=function(){};return{s:o,n:function(){return r>=t.length?{done:!0}:{done:!1,value:t[r++]}},e:function(t){throw t},f:o}}throw new TypeError("Invalid attempt to iterate non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}var s,c=!0,a=!1;return{s:function(){n=t[Symbol.iterator]()},n:function(){var t=n.next();return c=t.done,t},e:function(t){a=!0,s=t},f:function(){try{c||null==n.return||n.return()}finally{if(a)throw s}}}}function i(t,e){(null==e||e>t.length)&&(e=t.length);for(var n=0,r=new Array(e);n<e;n++)r[n]=t[n];return r}function s(t,e){for(var n=0;n<e.length;n++){var r=e[n];r.enumerable=r.enumerable||!1,r.configurable=!0,"value"in r&&(r.writable=!0),Object.defineProperty(t,r.key,r)}}function c(t,e,n){return(c="undefined"!=typeof Reflect&&Reflect.get?Reflect.get:function(t,e,n){var r=function(t,e){for(;!Object.prototype.hasOwnProperty.call(t,e)&&null!==(t=p(t)););return t}(t,e);if(r){var o=Object.getOwnPropertyDescriptor(r,e);return o.get?o.get.call(n):o.value}})(t,e,n||t)}function a(t,e){return(a=Object.setPrototypeOf||function(t,e){return t.__proto__=e,t})(t,e)}function u(t){var e=function(){if("undefined"==typeof Reflect||!Reflect.construct)return!1;if(Reflect.construct.sham)return!1;if("function"==typeof Proxy)return!0;try{return Date.prototype.toString.call(Reflect.construct(Date,[],(function(){}))),!0}catch(t){return!1}}();return function(){var n,r=p(t);if(e){var o=p(this).constructor;n=Reflect.construct(r,arguments,o)}else n=r.apply(this,arguments);return f(this,n)}}function f(t,e){return!e||"object"!==r(e)&&"function"!=typeof e?function(t){if(void 0===t)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return t}(t):e}function p(t){return(p=Object.setPrototypeOf?Object.getPrototypeOf:function(t){return t.__proto__||Object.getPrototypeOf(t)})(t)}Object.defineProperty(e,"__esModule",{value:!0}),e.Socket=void 0;var l=n(5),h=n(0),y=n(16),d=Object.freeze({connect:1,connect_error:1,disconnect:1,disconnecting:1,newListener:1,removeListener:1}),v=function(t){!function(t,e){if("function"!=typeof e&&null!==e)throw new TypeError("Super expression must either be null or a function");t.prototype=Object.create(e&&e.prototype,{constructor:{value:t,writable:!0,configurable:!0}}),e&&a(t,e)}(f,t);var e,n,r,i=u(f);function f(t,e,n){var r;return function(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}(this,f),(r=i.call(this)).receiveBuffer=[],r.sendBuffer=[],r.ids=0,r.acks={},r.flags={},r.io=t,r.nsp=e,r.ids=0,r.acks={},r.receiveBuffer=[],r.sendBuffer=[],r.connected=!1,r.disconnected=!0,r.flags={},n&&n.auth&&(r.auth=n.auth),r.io._autoConnect&&r.open(),r}return e=f,(n=[{key:"subEvents",value:function(){if(!this.subs){var t=this.io;this.subs=[y.on(t,"open",this.onopen.bind(this)),y.on(t,"packet",this.onpacket.bind(this)),y.on(t,"error",this.onerror.bind(this)),y.on(t,"close",this.onclose.bind(this))]}}},{key:"connect",value:function(){return this.connected||(this.subEvents(),this.io._reconnecting||this.io.open(),"open"===this.io._readyState&&this.onopen()),this}},{key:"open",value:function(){return this.connect()}},{key:"send",value:function(){for(var t=arguments.length,e=new Array(t),n=0;n<t;n++)e[n]=arguments[n];return e.unshift("message"),this.emit.apply(this,e),this}},{key:"emit",value:function(t){if(d.hasOwnProperty(t))throw new Error('"'+t+'" is a reserved event name');for(var e=arguments.length,n=new Array(e>1?e-1:0),r=1;r<e;r++)n[r-1]=arguments[r];n.unshift(t);var o={type:l.PacketType.EVENT,data:n,options:{}};o.options.compress=!1!==this.flags.compress,"function"==typeof n[n.length-1]&&(this.acks[this.ids]=n.pop(),o.id=this.ids++);var i=this.io.engine&&this.io.engine.transport&&this.io.engine.transport.writable,s=this.flags.volatile&&(!i||!this.connected);return s||(this.connected?this.packet(o):this.sendBuffer.push(o)),this.flags={},this}},{key:"packet",value:function(t){t.nsp=this.nsp,this.io._packet(t)}},{key:"onopen",value:function(){var t=this;"function"==typeof this.auth?this.auth((function(e){t.packet({type:l.PacketType.CONNECT,data:e})})):this.packet({type:l.PacketType.CONNECT,data:this.auth})}},{key:"onerror",value:function(t){this.connected||c(p(f.prototype),"emit",this).call(this,"connect_error",t)}},{key:"onclose",value:function(t){this.connected=!1,this.disconnected=!0,delete this.id,c(p(f.prototype),"emit",this).call(this,"disconnect",t)}},{key:"onpacket",value:function(t){if(t.nsp===this.nsp)switch(t.type){case l.PacketType.CONNECT:if(t.data&&t.data.sid){var e=t.data.sid;this.onconnect(e)}else c(p(f.prototype),"emit",this).call(this,"connect_error",new Error("It seems you are trying to reach a Socket.IO server in v2.x with a v3.x client, but they are not compatible (more information here: https://socket.io/docs/v3/migrating-from-2-x-to-3-0/)"));break;case l.PacketType.EVENT:case l.PacketType.BINARY_EVENT:this.onevent(t);break;case l.PacketType.ACK:case l.PacketType.BINARY_ACK:this.onack(t);break;case l.PacketType.DISCONNECT:this.ondisconnect();break;case l.PacketType.CONNECT_ERROR:var n=new Error(t.data.message);n.data=t.data.data,c(p(f.prototype),"emit",this).call(this,"connect_error",n)}}},{key:"onevent",value:function(t){var e=t.data||[];null!=t.id&&e.push(this.ack(t.id)),this.connected?this.emitEvent(e):this.receiveBuffer.push(Object.freeze(e))}},{key:"emitEvent",value:function(t){if(this._anyListeners&&this._anyListeners.length){var e,n=o(this._anyListeners.slice());try{for(n.s();!(e=n.n()).done;)e.value.apply(this,t)}catch(t){n.e(t)}finally{n.f()}}c(p(f.prototype),"emit",this).apply(this,t)}},{key:"ack",value:function(t){var e=this,n=!1;return function(){if(!n){n=!0;for(var r=arguments.length,o=new Array(r),i=0;i<r;i++)o[i]=arguments[i];e.packet({type:l.PacketType.ACK,id:t,data:o})}}}},{key:"onack",value:function(t){var e=this.acks[t.id];"function"==typeof e&&(e.apply(this,t.data),delete this.acks[t.id])}},{key:"onconnect",value:function(t){this.id=t,this.connected=!0,this.disconnected=!1,c(p(f.prototype),"emit",this).call(this,"connect"),this.emitBuffered()}},{key:"emitBuffered",value:function(){var t=this;this.receiveBuffer.forEach((function(e){return t.emitEvent(e)})),this.receiveBuffer=[],this.sendBuffer.forEach((function(e){return t.packet(e)})),this.sendBuffer=[]}},{key:"ondisconnect",value:function(){this.destroy(),this.onclose("io server disconnect")}},{key:"destroy",value:function(){this.subs&&(this.subs.forEach((function(t){return t()})),this.subs=void 0),this.io._destroy(this)}},{key:"disconnect",value:function(){return this.connected&&this.packet({type:l.PacketType.DISCONNECT}),this.destroy(),this.connected&&this.onclose("io client disconnect"),this}},{key:"close",value:function(){return this.disconnect()}},{key:"compress",value:function(t){return this.flags.compress=t,this}},{key:"onAny",value:function(t){return this._anyListeners=this._anyListeners||[],this._anyListeners.push(t),this}},{key:"prependAny",value:function(t){return this._anyListeners=this._anyListeners||[],this._anyListeners.unshift(t),this}},{key:"offAny",value:function(t){if(!this._anyListeners)return this;if(t){for(var e=this._anyListeners,n=0;n<e.length;n++)if(t===e[n])return e.splice(n,1),this}else this._anyListeners=[];return this}},{key:"listenersAny",value:function(){return this._anyListeners||[]}},{key:"active",get:function(){return!!this.subs}},{key:"volatile",get:function(){return this.flags.volatile=!0,this}}])&&s(e.prototype,n),r&&s(e,r),f}(h);e.Socket=v},function(t,e,n){"use strict";function r(t){return(r="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t})(t)}Object.defineProperty(e,"__esModule",{value:!0}),e.hasBinary=e.isBinary=void 0;var o="function"==typeof ArrayBuffer,i=Object.prototype.toString,s="function"==typeof Blob||"undefined"!=typeof Blob&&"[object BlobConstructor]"===i.call(Blob),c="function"==typeof File||"undefined"!=typeof File&&"[object FileConstructor]"===i.call(File);function a(t){return o&&(t instanceof ArrayBuffer||function(t){return"function"==typeof ArrayBuffer.isView?ArrayBuffer.isView(t):t.buffer instanceof ArrayBuffer}(t))||s&&t instanceof Blob||c&&t instanceof File}e.isBinary=a,e.hasBinary=function t(e,n){if(!e||"object"!==r(e))return!1;if(Array.isArray(e)){for(var o=0,i=e.length;o<i;o++)if(t(e[o]))return!0;return!1}if(a(e))return!0;if(e.toJSON&&"function"==typeof e.toJSON&&1===arguments.length)return t(e.toJSON(),!0);for(var s in e)if(Object.prototype.hasOwnProperty.call(e,s)&&t(e[s]))return!0;return!1}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.on=void 0,e.on=function(t,e,n){return t.on(e,n),function(){t.off(e,n)}}},function(t,e,n){"use strict";function r(t){return(r="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t})(t)}Object.defineProperty(e,"__esModule",{value:!0}),e.Socket=e.io=e.Manager=e.protocol=void 0;var o=n(18),i=n(7),s=n(14);Object.defineProperty(e,"Socket",{enumerable:!0,get:function(){return s.Socket}}),t.exports=e=a;var c=e.managers={};function a(t,e){"object"===r(t)&&(e=t,t=void 0),e=e||{};var n,s=o.url(t,e.path),a=s.source,u=s.id,f=s.path,p=c[u]&&f in c[u].nsps;return e.forceNew||e["force new connection"]||!1===e.multiplex||p?n=new i.Manager(a,e):(c[u]||(c[u]=new i.Manager(a,e)),n=c[u]),s.query&&!e.query&&(e.query=s.queryKey),n.socket(s.path,e)}e.io=a;var u=n(5);Object.defineProperty(e,"protocol",{enumerable:!0,get:function(){return u.protocol}}),e.connect=a;var f=n(7);Object.defineProperty(e,"Manager",{enumerable:!0,get:function(){return f.Manager}})},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.url=void 0;var r=n(6);e.url=function(t){var e=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"",n=arguments.length>2?arguments[2]:void 0,o=t;n=n||"undefined"!=typeof location&&location,null==t&&(t=n.protocol+"//"+n.host),"string"==typeof t&&("/"===t.charAt(0)&&(t="/"===t.charAt(1)?n.protocol+t:n.host+t),/^(https?|wss?):\/\//.test(t)||(t=void 0!==n?n.protocol+"//"+t:"https://"+t),o=r(t)),o.port||(/^(http|ws)$/.test(o.protocol)?o.port="80":/^(http|ws)s$/.test(o.protocol)&&(o.port="443")),o.path=o.path||"/";var i=-1!==o.host.indexOf(":"),s=i?"["+o.host+"]":o.host;return o.id=o.protocol+"://"+s+":"+o.port+e,o.href=o.protocol+"://"+s+(n&&n.port===o.port?"":":"+o.port),o}},function(t,e,n){var r=n(20);t.exports=function(t,e){return new r(t,e)},t.exports.Socket=r,t.exports.protocol=r.protocol,t.exports.Transport=n(3),t.exports.transports=n(8),t.exports.parser=n(1)},function(t,e,n){function r(){return(r=Object.assign||function(t){for(var e=1;e<arguments.length;e++){var n=arguments[e];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(t[r]=n[r])}return t}).apply(this,arguments)}function o(t){return(o="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t})(t)}function i(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}function s(t,e){for(var n=0;n<e.length;n++){var r=e[n];r.enumerable=r.enumerable||!1,r.configurable=!0,"value"in r&&(r.writable=!0),Object.defineProperty(t,r.key,r)}}function c(t,e){return(c=Object.setPrototypeOf||function(t,e){return t.__proto__=e,t})(t,e)}function a(t){var e=function(){if("undefined"==typeof Reflect||!Reflect.construct)return!1;if(Reflect.construct.sham)return!1;if("function"==typeof Proxy)return!0;try{return Date.prototype.toString.call(Reflect.construct(Date,[],(function(){}))),!0}catch(t){return!1}}();return function(){var n,r=f(t);if(e){var o=f(this).constructor;n=Reflect.construct(r,arguments,o)}else n=r.apply(this,arguments);return u(this,n)}}function u(t,e){return!e||"object"!==o(e)&&"function"!=typeof e?function(t){if(void 0===t)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return t}(t):e}function f(t){return(f=Object.setPrototypeOf?Object.getPrototypeOf:function(t){return t.__proto__||Object.getPrototypeOf(t)})(t)}var p=n(8),l=n(0),h=n(1),y=n(6),d=n(4),v=function(t){!function(t,e){if("function"!=typeof e&&null!==e)throw new TypeError("Super expression must either be null or a function");t.prototype=Object.create(e&&e.prototype,{constructor:{value:t,writable:!0,configurable:!0}}),e&&c(t,e)}(l,t);var e,n,u,f=a(l);function l(t){var e,n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};return i(this,l),e=f.call(this),t&&"object"===o(t)&&(n=t,t=null),t?(t=y(t),n.hostname=t.host,n.secure="https"===t.protocol||"wss"===t.protocol,n.port=t.port,t.query&&(n.query=t.query)):n.host&&(n.hostname=y(n.host).host),e.secure=null!=n.secure?n.secure:"undefined"!=typeof location&&"https:"===location.protocol,n.hostname&&!n.port&&(n.port=e.secure?"443":"80"),e.hostname=n.hostname||("undefined"!=typeof location?location.hostname:"localhost"),e.port=n.port||("undefined"!=typeof location&&location.port?location.port:e.secure?443:80),e.transports=n.transports||["polling","websocket"],e.readyState="",e.writeBuffer=[],e.prevBufferLen=0,e.opts=r({path:"/engine.io",agent:!1,withCredentials:!1,upgrade:!0,jsonp:!0,timestampParam:"t",rememberUpgrade:!1,rejectUnauthorized:!0,perMessageDeflate:{threshold:1024},transportOptions:{}},n),e.opts.path=e.opts.path.replace(/\/$/,"")+"/","string"==typeof e.opts.query&&(e.opts.query=d.decode(e.opts.query)),e.id=null,e.upgrades=null,e.pingInterval=null,e.pingTimeout=null,e.pingTimeoutTimer=null,"function"==typeof addEventListener&&addEventListener("beforeunload",(function(){e.transport&&(e.transport.removeAllListeners(),e.transport.close())}),!1),e.open(),e}return e=l,(n=[{key:"createTransport",value:function(t){var e=function(t){var e={};for(var n in t)t.hasOwnProperty(n)&&(e[n]=t[n]);return e}(this.opts.query);e.EIO=h.protocol,e.transport=t,this.id&&(e.sid=this.id);var n=r({},this.opts.transportOptions[t],this.opts,{query:e,socket:this,hostname:this.hostname,secure:this.secure,port:this.port});return new p[t](n)}},{key:"open",value:function(){var t;if(this.opts.rememberUpgrade&&l.priorWebsocketSuccess&&-1!==this.transports.indexOf("websocket"))t="websocket";else{if(0===this.transports.length){var e=this;return void setTimeout((function(){e.emit("error","No transports available")}),0)}t=this.transports[0]}this.readyState="opening";try{t=this.createTransport(t)}catch(t){return this.transports.shift(),void this.open()}t.open(),this.setTransport(t)}},{key:"setTransport",value:function(t){var e=this;this.transport&&this.transport.removeAllListeners(),this.transport=t,t.on("drain",(function(){e.onDrain()})).on("packet",(function(t){e.onPacket(t)})).on("error",(function(t){e.onError(t)})).on("close",(function(){e.onClose("transport close")}))}},{key:"probe",value:function(t){var e=this.createTransport(t,{probe:1}),n=!1,r=this;function o(){if(r.onlyBinaryUpgrades){var t=!this.supportsBinary&&r.transport.supportsBinary;n=n||t}n||(e.send([{type:"ping",data:"probe"}]),e.once("packet",(function(t){if(!n)if("pong"===t.type&&"probe"===t.data){if(r.upgrading=!0,r.emit("upgrading",e),!e)return;l.priorWebsocketSuccess="websocket"===e.name,r.transport.pause((function(){n||"closed"!==r.readyState&&(f(),r.setTransport(e),e.send([{type:"upgrade"}]),r.emit("upgrade",e),e=null,r.upgrading=!1,r.flush())}))}else{var o=new Error("probe error");o.transport=e.name,r.emit("upgradeError",o)}})))}function i(){n||(n=!0,f(),e.close(),e=null)}function s(t){var n=new Error("probe error: "+t);n.transport=e.name,i(),r.emit("upgradeError",n)}function c(){s("transport closed")}function a(){s("socket closed")}function u(t){e&&t.name!==e.name&&i()}function f(){e.removeListener("open",o),e.removeListener("error",s),e.removeListener("close",c),r.removeListener("close",a),r.removeListener("upgrading",u)}l.priorWebsocketSuccess=!1,e.once("open",o),e.once("error",s),e.once("close",c),this.once("close",a),this.once("upgrading",u),e.open()}},{key:"onOpen",value:function(){if(this.readyState="open",l.priorWebsocketSuccess="websocket"===this.transport.name,this.emit("open"),this.flush(),"open"===this.readyState&&this.opts.upgrade&&this.transport.pause)for(var t=0,e=this.upgrades.length;t<e;t++)this.probe(this.upgrades[t])}},{key:"onPacket",value:function(t){if("opening"===this.readyState||"open"===this.readyState||"closing"===this.readyState)switch(this.emit("packet",t),this.emit("heartbeat"),t.type){case"open":this.onHandshake(JSON.parse(t.data));break;case"ping":this.resetPingTimeout(),this.sendPacket("pong"),this.emit("pong");break;case"error":var e=new Error("server error");e.code=t.data,this.onError(e);break;case"message":this.emit("data",t.data),this.emit("message",t.data)}}},{key:"onHandshake",value:function(t){this.emit("handshake",t),this.id=t.sid,this.transport.query.sid=t.sid,this.upgrades=this.filterUpgrades(t.upgrades),this.pingInterval=t.pingInterval,this.pingTimeout=t.pingTimeout,this.onOpen(),"closed"!==this.readyState&&this.resetPingTimeout()}},{key:"resetPingTimeout",value:function(){var t=this;clearTimeout(this.pingTimeoutTimer),this.pingTimeoutTimer=setTimeout((function(){t.onClose("ping timeout")}),this.pingInterval+this.pingTimeout)}},{key:"onDrain",value:function(){this.writeBuffer.splice(0,this.prevBufferLen),this.prevBufferLen=0,0===this.writeBuffer.length?this.emit("drain"):this.flush()}},{key:"flush",value:function(){"closed"!==this.readyState&&this.transport.writable&&!this.upgrading&&this.writeBuffer.length&&(this.transport.send(this.writeBuffer),this.prevBufferLen=this.writeBuffer.length,this.emit("flush"))}},{key:"write",value:function(t,e,n){return this.sendPacket("message",t,e,n),this}},{key:"send",value:function(t,e,n){return this.sendPacket("message",t,e,n),this}},{key:"sendPacket",value:function(t,e,n,r){if("function"==typeof e&&(r=e,e=void 0),"function"==typeof n&&(r=n,n=null),"closing"!==this.readyState&&"closed"!==this.readyState){(n=n||{}).compress=!1!==n.compress;var o={type:t,data:e,options:n};this.emit("packetCreate",o),this.writeBuffer.push(o),r&&this.once("flush",r),this.flush()}}},{key:"close",value:function(){var t=this;function e(){t.onClose("forced close"),t.transport.close()}function n(){t.removeListener("upgrade",n),t.removeListener("upgradeError",n),e()}function r(){t.once("upgrade",n),t.once("upgradeError",n)}return"opening"!==this.readyState&&"open"!==this.readyState||(this.readyState="closing",this.writeBuffer.length?this.once("drain",(function(){this.upgrading?r():e()})):this.upgrading?r():e()),this}},{key:"onError",value:function(t){l.priorWebsocketSuccess=!1,this.emit("error",t),this.onClose("transport error",t)}},{key:"onClose",value:function(t,e){"opening"!==this.readyState&&"open"!==this.readyState&&"closing"!==this.readyState||(clearTimeout(this.pingIntervalTimer),clearTimeout(this.pingTimeoutTimer),this.transport.removeAllListeners("close"),this.transport.close(),this.transport.removeAllListeners(),this.readyState="closed",this.id=null,this.emit("close",t,e),this.writeBuffer=[],this.prevBufferLen=0)}},{key:"filterUpgrades",value:function(t){for(var e=[],n=0,r=t.length;n<r;n++)~this.transports.indexOf(t[n])&&e.push(t[n]);return e}}])&&s(e.prototype,n),u&&s(e,u),l}(l);v.priorWebsocketSuccess=!1,v.protocol=h.protocol,t.exports=v},function(t,e){try{t.exports="undefined"!=typeof XMLHttpRequest&&"withCredentials"in new XMLHttpRequest}catch(e){t.exports=!1}},function(t,e,n){function r(t){return(r="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t})(t)}function o(){return(o=Object.assign||function(t){for(var e=1;e<arguments.length;e++){var n=arguments[e];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(t[r]=n[r])}return t}).apply(this,arguments)}function i(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}function s(t,e){for(var n=0;n<e.length;n++){var r=e[n];r.enumerable=r.enumerable||!1,r.configurable=!0,"value"in r&&(r.writable=!0),Object.defineProperty(t,r.key,r)}}function c(t,e,n){return e&&s(t.prototype,e),n&&s(t,n),t}function a(t,e){if("function"!=typeof e&&null!==e)throw new TypeError("Super expression must either be null or a function");t.prototype=Object.create(e&&e.prototype,{constructor:{value:t,writable:!0,configurable:!0}}),e&&u(t,e)}function u(t,e){return(u=Object.setPrototypeOf||function(t,e){return t.__proto__=e,t})(t,e)}function f(t){var e=function(){if("undefined"==typeof Reflect||!Reflect.construct)return!1;if(Reflect.construct.sham)return!1;if("function"==typeof Proxy)return!0;try{return Date.prototype.toString.call(Reflect.construct(Date,[],(function(){}))),!0}catch(t){return!1}}();return function(){var n,r=l(t);if(e){var o=l(this).constructor;n=Reflect.construct(r,arguments,o)}else n=r.apply(this,arguments);return p(this,n)}}function p(t,e){return!e||"object"!==r(e)&&"function"!=typeof e?function(t){if(void 0===t)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return t}(t):e}function l(t){return(l=Object.setPrototypeOf?Object.getPrototypeOf:function(t){return t.__proto__||Object.getPrototypeOf(t)})(t)}var h=n(9),y=n(10),d=n(0),v=n(13).pick,b=n(2);function m(){}var g=null!=new h({xdomain:!1}).responseType,k=function(t){a(n,t);var e=f(n);function n(t){var r;if(i(this,n),r=e.call(this,t),"undefined"!=typeof location){var o="https:"===location.protocol,s=location.port;s||(s=o?443:80),r.xd="undefined"!=typeof location&&t.hostname!==location.hostname||s!==t.port,r.xs=t.secure!==o}var c=t&&t.forceBase64;return r.supportsBinary=g&&!c,r}return c(n,[{key:"request",value:function(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return o(t,{xd:this.xd,xs:this.xs},this.opts),new w(this.uri(),t)}},{key:"doWrite",value:function(t,e){var n=this.request({method:"POST",data:t}),r=this;n.on("success",e),n.on("error",(function(t){r.onError("xhr post error",t)}))}},{key:"doPoll",value:function(){var t=this.request(),e=this;t.on("data",(function(t){e.onData(t)})),t.on("error",(function(t){e.onError("xhr poll error",t)})),this.pollXhr=t}}]),n}(y),w=function(t){a(n,t);var e=f(n);function n(t,r){var o;return i(this,n),(o=e.call(this)).opts=r,o.method=r.method||"GET",o.uri=t,o.async=!1!==r.async,o.data=void 0!==r.data?r.data:null,o.create(),o}return c(n,[{key:"create",value:function(){var t=v(this.opts,"agent","enablesXDR","pfx","key","passphrase","cert","ca","ciphers","rejectUnauthorized");t.xdomain=!!this.opts.xd,t.xscheme=!!this.opts.xs;var e=this.xhr=new h(t),r=this;try{e.open(this.method,this.uri,this.async);try{if(this.opts.extraHeaders)for(var o in e.setDisableHeaderCheck&&e.setDisableHeaderCheck(!0),this.opts.extraHeaders)this.opts.extraHeaders.hasOwnProperty(o)&&e.setRequestHeader(o,this.opts.extraHeaders[o])}catch(t){}if("POST"===this.method)try{e.setRequestHeader("Content-type","text/plain;charset=UTF-8")}catch(t){}try{e.setRequestHeader("Accept","*/*")}catch(t){}"withCredentials"in e&&(e.withCredentials=this.opts.withCredentials),this.opts.requestTimeout&&(e.timeout=this.opts.requestTimeout),this.hasXDR()?(e.onload=function(){r.onLoad()},e.onerror=function(){r.onError(e.responseText)}):e.onreadystatechange=function(){4===e.readyState&&(200===e.status||1223===e.status?r.onLoad():setTimeout((function(){r.onError("number"==typeof e.status?e.status:0)}),0))},e.send(this.data)}catch(t){return void setTimeout((function(){r.onError(t)}),0)}"undefined"!=typeof document&&(this.index=n.requestsCount++,n.requests[this.index]=this)}},{key:"onSuccess",value:function(){this.emit("success"),this.cleanup()}},{key:"onData",value:function(t){this.emit("data",t),this.onSuccess()}},{key:"onError",value:function(t){this.emit("error",t),this.cleanup(!0)}},{key:"cleanup",value:function(t){if(void 0!==this.xhr&&null!==this.xhr){if(this.hasXDR()?this.xhr.onload=this.xhr.onerror=m:this.xhr.onreadystatechange=m,t)try{this.xhr.abort()}catch(t){}"undefined"!=typeof document&&delete n.requests[this.index],this.xhr=null}}},{key:"onLoad",value:function(){var t=this.xhr.responseText;null!==t&&this.onData(t)}},{key:"hasXDR",value:function(){return"undefined"!=typeof XDomainRequest&&!this.xs&&this.enablesXDR}},{key:"abort",value:function(){this.cleanup()}}]),n}(d);if(w.requestsCount=0,w.requests={},"undefined"!=typeof document)if("function"==typeof attachEvent)attachEvent("onunload",_);else if("function"==typeof addEventListener){addEventListener("onpagehide"in b?"pagehide":"unload",_,!1)}function _(){for(var t in w.requests)w.requests.hasOwnProperty(t)&&w.requests[t].abort()}t.exports=k,t.exports.Request=w},function(t,e,n){var r=n(11).PACKET_TYPES,o="function"==typeof Blob||"undefined"!=typeof Blob&&"[object BlobConstructor]"===Object.prototype.toString.call(Blob),i="function"==typeof ArrayBuffer,s=function(t,e){var n=new FileReader;return n.onload=function(){var t=n.result.split(",")[1];e("b"+t)},n.readAsDataURL(t)};t.exports=function(t,e,n){var c,a=t.type,u=t.data;return o&&u instanceof Blob?e?n(u):s(u,n):i&&(u instanceof ArrayBuffer||(c=u,"function"==typeof ArrayBuffer.isView?ArrayBuffer.isView(c):c&&c.buffer instanceof ArrayBuffer))?e?n(u instanceof ArrayBuffer?u:u.buffer):s(new Blob([u]),n):n(r[a]+(u||""))}},function(t,e,n){var r,o=n(11),i=o.PACKET_TYPES_REVERSE,s=o.ERROR_PACKET;"function"==typeof ArrayBuffer&&(r=n(25));var c=function(t,e){if(r){var n=r.decode(t);return a(n,e)}return{base64:!0,data:t}},a=function(t,e){switch(e){case"blob":return t instanceof ArrayBuffer?new Blob([t]):t;case"arraybuffer":default:return t}};t.exports=function(t,e){if("string"!=typeof t)return{type:"message",data:a(t,e)};var n=t.charAt(0);return"b"===n?{type:"message",data:c(t.substring(1),e)}:i[n]?t.length>1?{type:i[n],data:t.substring(1)}:{type:i[n]}:s}},function(t,e){!function(t){"use strict";e.encode=function(e){var n,r=new Uint8Array(e),o=r.length,i="";for(n=0;n<o;n+=3)i+=t[r[n]>>2],i+=t[(3&r[n])<<4|r[n+1]>>4],i+=t[(15&r[n+1])<<2|r[n+2]>>6],i+=t[63&r[n+2]];return o%3==2?i=i.substring(0,i.length-1)+"=":o%3==1&&(i=i.substring(0,i.length-2)+"=="),i},e.decode=function(e){var n,r,o,i,s,c=.75*e.length,a=e.length,u=0;"="===e[e.length-1]&&(c--,"="===e[e.length-2]&&c--);var f=new ArrayBuffer(c),p=new Uint8Array(f);for(n=0;n<a;n+=4)r=t.indexOf(e[n]),o=t.indexOf(e[n+1]),i=t.indexOf(e[n+2]),s=t.indexOf(e[n+3]),p[u++]=r<<2|o>>4,p[u++]=(15&o)<<4|i>>2,p[u++]=(3&i)<<6|63&s;return f}}("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/")},function(t,e,n){function r(t){return(r="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t})(t)}function o(t,e){for(var n=0;n<e.length;n++){var r=e[n];r.enumerable=r.enumerable||!1,r.configurable=!0,"value"in r&&(r.writable=!0),Object.defineProperty(t,r.key,r)}}function i(t,e,n){return(i="undefined"!=typeof Reflect&&Reflect.get?Reflect.get:function(t,e,n){var r=function(t,e){for(;!Object.prototype.hasOwnProperty.call(t,e)&&null!==(t=f(t)););return t}(t,e);if(r){var o=Object.getOwnPropertyDescriptor(r,e);return o.get?o.get.call(n):o.value}})(t,e,n||t)}function s(t,e){return(s=Object.setPrototypeOf||function(t,e){return t.__proto__=e,t})(t,e)}function c(t){var e=function(){if("undefined"==typeof Reflect||!Reflect.construct)return!1;if(Reflect.construct.sham)return!1;if("function"==typeof Proxy)return!0;try{return Date.prototype.toString.call(Reflect.construct(Date,[],(function(){}))),!0}catch(t){return!1}}();return function(){var n,r=f(t);if(e){var o=f(this).constructor;n=Reflect.construct(r,arguments,o)}else n=r.apply(this,arguments);return a(this,n)}}function a(t,e){return!e||"object"!==r(e)&&"function"!=typeof e?u(t):e}function u(t){if(void 0===t)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return t}function f(t){return(f=Object.setPrototypeOf?Object.getPrototypeOf:function(t){return t.__proto__||Object.getPrototypeOf(t)})(t)}var p,l=n(10),h=n(2),y=/\n/g,d=/\\n/g,v=function(t){!function(t,e){if("function"!=typeof e&&null!==e)throw new TypeError("Super expression must either be null or a function");t.prototype=Object.create(e&&e.prototype,{constructor:{value:t,writable:!0,configurable:!0}}),e&&s(t,e)}(l,t);var e,n,r,a=c(l);function l(t){var e;!function(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}(this,l),(e=a.call(this,t)).query=e.query||{},p||(p=h.___eio=h.___eio||[]),e.index=p.length;var n=u(e);return p.push((function(t){n.onData(t)})),e.query.j=e.index,e}return e=l,(n=[{key:"doClose",value:function(){this.script&&(this.script.onerror=function(){},this.script.parentNode.removeChild(this.script),this.script=null),this.form&&(this.form.parentNode.removeChild(this.form),this.form=null,this.iframe=null),i(f(l.prototype),"doClose",this).call(this)}},{key:"doPoll",value:function(){var t=this,e=document.createElement("script");this.script&&(this.script.parentNode.removeChild(this.script),this.script=null),e.async=!0,e.src=this.uri(),e.onerror=function(e){t.onError("jsonp poll error",e)};var n=document.getElementsByTagName("script")[0];n?n.parentNode.insertBefore(e,n):(document.head||document.body).appendChild(e),this.script=e,"undefined"!=typeof navigator&&/gecko/i.test(navigator.userAgent)&&setTimeout((function(){var t=document.createElement("iframe");document.body.appendChild(t),document.body.removeChild(t)}),100)}},{key:"doWrite",value:function(t,e){var n,r=this;if(!this.form){var o=document.createElement("form"),i=document.createElement("textarea"),s=this.iframeId="eio_iframe_"+this.index;o.className="socketio",o.style.position="absolute",o.style.top="-1000px",o.style.left="-1000px",o.target=s,o.method="POST",o.setAttribute("accept-charset","utf-8"),i.name="d",o.appendChild(i),document.body.appendChild(o),this.form=o,this.area=i}function c(){a(),e()}function a(){if(r.iframe)try{r.form.removeChild(r.iframe)}catch(t){r.onError("jsonp polling iframe removal error",t)}try{var t='<iframe src="javascript:0" name="'+r.iframeId+'">';n=document.createElement(t)}catch(t){(n=document.createElement("iframe")).name=r.iframeId,n.src="javascript:0"}n.id=r.iframeId,r.form.appendChild(n),r.iframe=n}this.form.action=this.uri(),a(),t=t.replace(d,"\\\n"),this.area.value=t.replace(y,"\\n");try{this.form.submit()}catch(t){}this.iframe.attachEvent?this.iframe.onreadystatechange=function(){"complete"===r.iframe.readyState&&c()}:this.iframe.onload=c}},{key:"supportsBinary",get:function(){return!1}}])&&o(e.prototype,n),r&&o(e,r),l}(l);t.exports=v},function(t,e,n){function r(t){return(r="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t})(t)}function o(t,e){for(var n=0;n<e.length;n++){var r=e[n];r.enumerable=r.enumerable||!1,r.configurable=!0,"value"in r&&(r.writable=!0),Object.defineProperty(t,r.key,r)}}function i(t,e){return(i=Object.setPrototypeOf||function(t,e){return t.__proto__=e,t})(t,e)}function s(t){var e=function(){if("undefined"==typeof Reflect||!Reflect.construct)return!1;if(Reflect.construct.sham)return!1;if("function"==typeof Proxy)return!0;try{return Date.prototype.toString.call(Reflect.construct(Date,[],(function(){}))),!0}catch(t){return!1}}();return function(){var n,r=a(t);if(e){var o=a(this).constructor;n=Reflect.construct(r,arguments,o)}else n=r.apply(this,arguments);return c(this,n)}}function c(t,e){return!e||"object"!==r(e)&&"function"!=typeof e?function(t){if(void 0===t)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return t}(t):e}function a(t){return(a=Object.setPrototypeOf?Object.getPrototypeOf:function(t){return t.__proto__||Object.getPrototypeOf(t)})(t)}var u=n(3),f=n(1),p=n(4),l=n(12),h=n(13).pick,y=n(28),d=y.WebSocket,v=y.usingBrowserWebSocket,b=y.defaultBinaryType,m="undefined"!=typeof navigator&&"string"==typeof navigator.product&&"reactnative"===navigator.product.toLowerCase(),g=function(t){!function(t,e){if("function"!=typeof e&&null!==e)throw new TypeError("Super expression must either be null or a function");t.prototype=Object.create(e&&e.prototype,{constructor:{value:t,writable:!0,configurable:!0}}),e&&i(t,e)}(a,t);var e,n,r,c=s(a);function a(t){var e;return function(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}(this,a),(e=c.call(this,t)).supportsBinary=!t.forceBase64,e}return e=a,(n=[{key:"doOpen",value:function(){if(this.check()){var t=this.uri(),e=this.opts.protocols,n=m?{}:h(this.opts,"agent","perMessageDeflate","pfx","key","passphrase","cert","ca","ciphers","rejectUnauthorized","localAddress","protocolVersion","origin","maxPayload","family","checkServerIdentity");this.opts.extraHeaders&&(n.headers=this.opts.extraHeaders);try{this.ws=v&&!m?e?new d(t,e):new d(t):new d(t,e,n)}catch(t){return this.emit("error",t)}this.ws.binaryType=this.socket.binaryType||b,this.addEventListeners()}}},{key:"addEventListeners",value:function(){var t=this;this.ws.onopen=function(){t.onOpen()},this.ws.onclose=function(){t.onClose()},this.ws.onmessage=function(e){t.onData(e.data)},this.ws.onerror=function(e){t.onError("websocket error",e)}}},{key:"write",value:function(t){var e=this;this.writable=!1;for(var n=t.length,r=0,o=n;r<o;r++)!function(t){f.encodePacket(t,e.supportsBinary,(function(r){var o={};v||(t.options&&(o.compress=t.options.compress),e.opts.perMessageDeflate&&("string"==typeof r?Buffer.byteLength(r):r.length)<e.opts.perMessageDeflate.threshold&&(o.compress=!1));try{v?e.ws.send(r):e.ws.send(r,o)}catch(t){}--n||(e.emit("flush"),setTimeout((function(){e.writable=!0,e.emit("drain")}),0))}))}(t[r])}},{key:"onClose",value:function(){u.prototype.onClose.call(this)}},{key:"doClose",value:function(){void 0!==this.ws&&(this.ws.close(),this.ws=null)}},{key:"uri",value:function(){var t=this.query||{},e=this.opts.secure?"wss":"ws",n="";return this.opts.port&&("wss"===e&&443!==Number(this.opts.port)||"ws"===e&&80!==Number(this.opts.port))&&(n=":"+this.opts.port),this.opts.timestampRequests&&(t[this.opts.timestampParam]=l()),this.supportsBinary||(t.b64=1),(t=p.encode(t)).length&&(t="?"+t),e+"://"+(-1!==this.opts.hostname.indexOf(":")?"["+this.opts.hostname+"]":this.opts.hostname)+n+this.opts.path+t}},{key:"check",value:function(){return!(!d||"__initialize"in d&&this.name===a.prototype.name)}},{key:"name",get:function(){return"websocket"}}])&&o(e.prototype,n),r&&o(e,r),a}(u);t.exports=g},function(t,e,n){var r=n(2);t.exports={WebSocket:r.WebSocket||r.MozWebSocket,usingBrowserWebSocket:!0,defaultBinaryType:"arraybuffer"}},function(t,e,n){"use strict";function r(t){return(r="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t})(t)}Object.defineProperty(e,"__esModule",{value:!0}),e.reconstructPacket=e.deconstructPacket=void 0;var o=n(15);e.deconstructPacket=function(t){var e=[],n=t.data,i=t;return i.data=function t(e,n){if(!e)return e;if(o.isBinary(e)){var i={_placeholder:!0,num:n.length};return n.push(e),i}if(Array.isArray(e)){for(var s=new Array(e.length),c=0;c<e.length;c++)s[c]=t(e[c],n);return s}if("object"===r(e)&&!(e instanceof Date)){var a={};for(var u in e)e.hasOwnProperty(u)&&(a[u]=t(e[u],n));return a}return e}(n,e),i.attachments=e.length,{packet:i,buffers:e}},e.reconstructPacket=function(t,e){return t.data=function t(e,n){if(!e)return e;if(e&&e._placeholder)return n[e.num];if(Array.isArray(e))for(var o=0;o<e.length;o++)e[o]=t(e[o],n);else if("object"===r(e))for(var i in e)e.hasOwnProperty(i)&&(e[i]=t(e[i],n));return e}(t.data,e),t.attachments=void 0,t}},function(t,e){function n(t){t=t||{},this.ms=t.min||100,this.max=t.max||1e4,this.factor=t.factor||2,this.jitter=t.jitter>0&&t.jitter<=1?t.jitter:0,this.attempts=0}t.exports=n,n.prototype.duration=function(){var t=this.ms*Math.pow(this.factor,this.attempts++);if(this.jitter){var e=Math.random(),n=Math.floor(e*this.jitter*t);t=0==(1&Math.floor(10*e))?t-n:t+n}return 0|Math.min(t,this.max)},n.prototype.reset=function(){this.attempts=0},n.prototype.setMin=function(t){this.ms=t},n.prototype.setMax=function(t){this.max=t},n.prototype.setJitter=function(t){this.jitter=t}}])})); +//# sourceMappingURL=socket.io.min.js.map \ No newline at end of file diff --git a/changedetectionio/static/styles/scss/parts/_watch_table.scss b/changedetectionio/static/styles/scss/parts/_watch_table.scss index bce774c0..b6f0ff6b 100644 --- a/changedetectionio/static/styles/scss/parts/_watch_table.scss +++ b/changedetectionio/static/styles/scss/parts/_watch_table.scss @@ -39,10 +39,13 @@ } } - .title-col a[target="_blank"]::after, - .current-diff-url::after { - content: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAYAAACNMs+9AAAAQElEQVR42qXKwQkAIAxDUUdxtO6/RBQkQZvSi8I/pL4BoGw/XPkh4XigPmsUgh0626AjRsgxHTkUThsG2T/sIlzdTsp52kSS1wAAAABJRU5ErkJggg==); + .title-col a[target="_blank"] i[data-feather], + .current-diff-url i[data-feather] { + width: 12px; + height: 12px; + stroke: #666; margin: 0 3px 0 5px; + vertical-align: middle; } @@ -114,5 +117,18 @@ display: block !important; } } + + tr.single-history { + a.preview-link { + display: inline-block !important; + } + } + tr.multiple-history { + a.history-link { + display: inline-block !important; + } + } } + + diff --git a/changedetectionio/static/styles/scss/styles.scss b/changedetectionio/static/styles/scss/styles.scss index 07c4fc92..514d825c 100644 --- a/changedetectionio/static/styles/scss/styles.scss +++ b/changedetectionio/static/styles/scss/styles.scss @@ -1083,6 +1083,9 @@ ul { /* some space if they wrap the page */ margin-bottom: 3px; margin-top: 3px; + /* vertically center icon and text */ + display: inline-flex; + align-items: center; } } diff --git a/changedetectionio/static/styles/styles.css b/changedetectionio/static/styles/styles.css index ccdb0db8..7699ea28 100644 --- a/changedetectionio/static/styles/styles.css +++ b/changedetectionio/static/styles/styles.css @@ -545,10 +545,13 @@ body.preview-text-enabled { font-weight: bolder; } .watch-table th a.inactive .arrow { display: none; } - .watch-table .title-col a[target="_blank"]::after, - .watch-table .current-diff-url::after { - content: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAoAAAAKCAYAAACNMs+9AAAAQElEQVR42qXKwQkAIAxDUUdxtO6/RBQkQZvSi8I/pL4BoGw/XPkh4XigPmsUgh0626AjRsgxHTkUThsG2T/sIlzdTsp52kSS1wAAAABJRU5ErkJggg==); - margin: 0 3px 0 5px; } + .watch-table .title-col a[target="_blank"] i[data-feather], + .watch-table .current-diff-url i[data-feather] { + width: 12px; + height: 12px; + stroke: #666; + margin: 0 3px 0 5px; + vertical-align: middle; } .watch-table tr.checking-now td:first-child { position: relative; } .watch-table tr.checking-now td:first-child::before { @@ -579,6 +582,10 @@ body.preview-text-enabled { color: var(--color-watch-table-error); } .watch-table tr.has-error .error-text { display: block !important; } + .watch-table tr.single-history a.preview-link { + display: inline-block !important; } + .watch-table tr.multiple-history a.history-link { + display: inline-block !important; } ul#conditions_match_logic { list-style: none; } @@ -1457,7 +1464,10 @@ ul { #checkbox-operations button { /* some space if they wrap the page */ margin-bottom: 3px; - margin-top: 3px; } + margin-top: 3px; + /* vertically center icon and text */ + display: inline-flex; + align-items: center; } .checkbox-uuid > * { vertical-align: middle; } diff --git a/changedetectionio/store.py b/changedetectionio/store.py index f200bef3..ed5b4dca 100644 --- a/changedetectionio/store.py +++ b/changedetectionio/store.py @@ -238,6 +238,7 @@ class ChangeDetectionStore: with self.lock: if uuid == 'all': self.__data['watching'] = {} + time.sleep(1) # Mainly used for testing to allow all items to flush before running next test # GitHub #30 also delete history records for uuid in self.data['watching']: @@ -407,7 +408,12 @@ class ChangeDetectionStore: # This is a fairly basic strategy to deal with the case that the file is corrupted, # system was out of memory, out of RAM etc with open(self.json_store_path+".tmp", 'w') as json_file: - json.dump(data, json_file, indent=4) + # Use compact JSON in production for better performance + debug_mode = os.environ.get('CHANGEDETECTION_DEBUG', 'false').lower() == 'true' + if debug_mode: + json.dump(data, json_file, indent=4) + else: + json.dump(data, json_file, separators=(',', ':')) os.replace(self.json_store_path+".tmp", self.json_store_path) except Exception as e: logger.error(f"Error writing JSON!! (Main JSON file save was skipped) : {str(e)}") diff --git a/changedetectionio/templates/base.html b/changedetectionio/templates/base.html index ee6410c6..e564df6a 100644 --- a/changedetectionio/templates/base.html +++ b/changedetectionio/templates/base.html @@ -31,11 +31,13 @@ const socketio_url="{{ get_socketio_path() }}/socket.io"; const is_authenticated = {% if current_user.is_authenticated or not has_password %}true{% else %}false{% endif %}; </script> + <script src="https://unpkg.com/feather-icons"></script> <script src="{{url_for('static_content', group='js', filename='jquery-3.6.0.min.js')}}"></script> <script src="{{url_for('static_content', group='js', filename='csrf.js')}}" defer></script> - <script src="{{url_for('static_content', group='js', filename='socket.io.min.js')}}" integrity="sha384-c79GN5VsunZvi+Q/WObgk2in0CbZsHnjEqvFxC5DxHn9lTfNce2WW6h2pH6u/kF+" crossorigin="anonymous"></script> + {% if socket_io_enabled %} + <script src="{{url_for('static_content', group='js', filename='socket.io.min.js')}}"></script> <script src="{{url_for('static_content', group='js', filename='realtime.js')}}" defer></script> - <script src="{{url_for('static_content', group='js', filename='timeago-init.js')}}" defer></script> + {% endif %} </head> <body class=""> diff --git a/changedetectionio/tests/conftest.py b/changedetectionio/tests/conftest.py index c1195bcb..3e7069ca 100644 --- a/changedetectionio/tests/conftest.py +++ b/changedetectionio/tests/conftest.py @@ -10,6 +10,8 @@ import os import sys from loguru import logger +from changedetectionio.tests.util import live_server_setup, new_live_server_setup + # https://github.com/pallets/flask/blob/1.1.2/examples/tutorial/tests/test_auth.py # Much better boilerplate than the docs # https://www.python-boilerplate.com/py3+flask+pytest/ @@ -70,6 +72,22 @@ def cleanup(datastore_path): if os.path.isfile(f): os.unlink(f) +@pytest.fixture(scope='function', autouse=True) +def prepare_test_function(live_server): + + routes = [rule.rule for rule in live_server.app.url_map.iter_rules()] + if '/test-random-content-endpoint' not in routes: + logger.debug("Setting up test URL routes") + new_live_server_setup(live_server) + + + yield + # Then cleanup/shutdown + live_server.app.config['DATASTORE'].data['watching']={} + time.sleep(0.3) + live_server.app.config['DATASTORE'].data['watching']={} + + @pytest.fixture(scope='session') def app(request): """Create application for the tests.""" @@ -106,8 +124,33 @@ def app(request): app.config['STOP_THREADS'] = True def teardown(): + # Stop all threads and services datastore.stop_thread = True app.config.exit.set() + + # Shutdown workers gracefully before loguru cleanup + try: + from changedetectionio import worker_handler + worker_handler.shutdown_workers() + except Exception: + pass + + # Stop socket server threads + try: + from changedetectionio.flask_app import socketio_server + if socketio_server and hasattr(socketio_server, 'shutdown'): + socketio_server.shutdown() + except Exception: + pass + + # Give threads a moment to finish their shutdown + import time + time.sleep(0.1) + + # Remove all loguru handlers to prevent "closed file" errors + logger.remove() + + # Cleanup files cleanup(app_config['datastore_path']) diff --git a/changedetectionio/tests/custom_browser_url/test_custom_browser_url.py b/changedetectionio/tests/custom_browser_url/test_custom_browser_url.py index efc6e127..6ec4205e 100644 --- a/changedetectionio/tests/custom_browser_url/test_custom_browser_url.py +++ b/changedetectionio/tests/custom_browser_url/test_custom_browser_url.py @@ -78,12 +78,12 @@ def do_test(client, live_server, make_test_use_extra_browser=False): # Requires playwright to be installed def test_request_via_custom_browser_url(client, live_server, measure_memory_usage): - live_server_setup(live_server) + # live_server_setup(live_server) # Setup on conftest per function # We do this so we can grep the logs of the custom container and see if the request actually went through that container do_test(client, live_server, make_test_use_extra_browser=True) def test_request_not_via_custom_browser_url(client, live_server, measure_memory_usage): - live_server_setup(live_server) + # live_server_setup(live_server) # Setup on conftest per function # We do this so we can grep the logs of the custom container and see if the request actually went through that container do_test(client, live_server, make_test_use_extra_browser=False) diff --git a/changedetectionio/tests/fetchers/test_content.py b/changedetectionio/tests/fetchers/test_content.py index dc02f50c..e09781c5 100644 --- a/changedetectionio/tests/fetchers/test_content.py +++ b/changedetectionio/tests/fetchers/test_content.py @@ -7,7 +7,7 @@ import logging # Requires playwright to be installed def test_fetch_webdriver_content(client, live_server, measure_memory_usage): - live_server_setup(live_server) + # live_server_setup(live_server) # Setup on conftest per function ##################### res = client.post( diff --git a/changedetectionio/tests/fetchers/test_custom_js_before_content.py b/changedetectionio/tests/fetchers/test_custom_js_before_content.py index e145a79e..cb4d6286 100644 --- a/changedetectionio/tests/fetchers/test_custom_js_before_content.py +++ b/changedetectionio/tests/fetchers/test_custom_js_before_content.py @@ -5,7 +5,7 @@ from ..util import live_server_setup, wait_for_all_checks, extract_UUID_from_cli def test_execute_custom_js(client, live_server, measure_memory_usage): - live_server_setup(live_server) + # live_server_setup(live_server) # Setup on conftest per function assert os.getenv('PLAYWRIGHT_DRIVER_URL'), "Needs PLAYWRIGHT_DRIVER_URL set for this test" test_url = url_for('test_interactive_html_endpoint', _external=True) diff --git a/changedetectionio/tests/proxy_list/test_multiple_proxy.py b/changedetectionio/tests/proxy_list/test_multiple_proxy.py index f1818e3a..cc0da45f 100644 --- a/changedetectionio/tests/proxy_list/test_multiple_proxy.py +++ b/changedetectionio/tests/proxy_list/test_multiple_proxy.py @@ -6,7 +6,7 @@ from ..util import live_server_setup, wait_for_all_checks def test_preferred_proxy(client, live_server, measure_memory_usage): - live_server_setup(live_server) + # live_server_setup(live_server) # Setup on conftest per function url = "http://chosen.changedetection.io" diff --git a/changedetectionio/tests/proxy_list/test_noproxy.py b/changedetectionio/tests/proxy_list/test_noproxy.py index ffae929b..fdd9aa35 100644 --- a/changedetectionio/tests/proxy_list/test_noproxy.py +++ b/changedetectionio/tests/proxy_list/test_noproxy.py @@ -6,7 +6,7 @@ from ..util import live_server_setup, wait_for_all_checks, extract_UUID_from_cli def test_noproxy_option(client, live_server, measure_memory_usage): - live_server_setup(live_server) + # live_server_setup(live_server) # Setup on conftest per function # Run by run_proxy_tests.sh # Call this URL then scan the containers that it never went through them url = "http://noproxy.changedetection.io" diff --git a/changedetectionio/tests/proxy_list/test_proxy.py b/changedetectionio/tests/proxy_list/test_proxy.py index 726d0c82..bda17d1b 100644 --- a/changedetectionio/tests/proxy_list/test_proxy.py +++ b/changedetectionio/tests/proxy_list/test_proxy.py @@ -6,7 +6,7 @@ from ..util import live_server_setup, wait_for_all_checks, extract_UUID_from_cli # just make a request, we will grep in the docker logs to see it actually got called def test_check_basic_change_detection_functionality(client, live_server, measure_memory_usage): - live_server_setup(live_server) + # live_server_setup(live_server) # Setup on conftest per function res = client.post( url_for("imports.import_page"), # Because a URL wont show in squid/proxy logs due it being SSLed diff --git a/changedetectionio/tests/proxy_list/test_proxy_noconnect.py b/changedetectionio/tests/proxy_list/test_proxy_noconnect.py index 72f3e512..31edaadb 100644 --- a/changedetectionio/tests/proxy_list/test_proxy_noconnect.py +++ b/changedetectionio/tests/proxy_list/test_proxy_noconnect.py @@ -13,7 +13,7 @@ from ... import strtobool # WEBDRIVER_URL=http://127.0.0.1:4444/wd/hub pytest tests/proxy_list/test_proxy_noconnect.py def test_proxy_noconnect_custom(client, live_server, measure_memory_usage): - live_server_setup(live_server) + # live_server_setup(live_server) # Setup on conftest per function # Goto settings, add our custom one res = client.post( diff --git a/changedetectionio/tests/proxy_list/test_select_custom_proxy.py b/changedetectionio/tests/proxy_list/test_select_custom_proxy.py index e35c3718..ab831358 100644 --- a/changedetectionio/tests/proxy_list/test_select_custom_proxy.py +++ b/changedetectionio/tests/proxy_list/test_select_custom_proxy.py @@ -7,7 +7,7 @@ import os # just make a request, we will grep in the docker logs to see it actually got called def test_select_custom(client, live_server, measure_memory_usage): - live_server_setup(live_server) + # live_server_setup(live_server) # Setup on conftest per function # Goto settings, add our custom one res = client.post( diff --git a/changedetectionio/tests/proxy_socks5/test_socks5_proxy.py b/changedetectionio/tests/proxy_socks5/test_socks5_proxy.py index 3d0271a8..f2595bd0 100644 --- a/changedetectionio/tests/proxy_socks5/test_socks5_proxy.py +++ b/changedetectionio/tests/proxy_socks5/test_socks5_proxy.py @@ -20,7 +20,7 @@ def set_response(): time.sleep(1) def test_socks5(client, live_server, measure_memory_usage): - live_server_setup(live_server) + # live_server_setup(live_server) # Setup on conftest per function set_response() # Setup a proxy diff --git a/changedetectionio/tests/proxy_socks5/test_socks5_proxy_sources.py b/changedetectionio/tests/proxy_socks5/test_socks5_proxy_sources.py index 04024843..3805c88d 100644 --- a/changedetectionio/tests/proxy_socks5/test_socks5_proxy_sources.py +++ b/changedetectionio/tests/proxy_socks5/test_socks5_proxy_sources.py @@ -21,7 +21,7 @@ def set_response(): # should be proxies.json mounted from run_proxy_tests.sh already # -v `pwd`/tests/proxy_socks5/proxies.json-example:/app/changedetectionio/test-datastore/proxies.json def test_socks5_from_proxiesjson_file(client, live_server, measure_memory_usage): - live_server_setup(live_server) + # live_server_setup(live_server) # Setup on conftest per function set_response() # Because the socks server should connect back to us test_url = url_for('test_endpoint', _external=True) + f"?socks-test-tag={os.getenv('SOCKSTEST', '')}" diff --git a/changedetectionio/tests/restock/test_restock.py b/changedetectionio/tests/restock/test_restock.py index 1d1accec..ecee00fa 100644 --- a/changedetectionio/tests/restock/test_restock.py +++ b/changedetectionio/tests/restock/test_restock.py @@ -54,7 +54,7 @@ def test_restock_detection(client, live_server, measure_memory_usage): set_original_response() #assert os.getenv('PLAYWRIGHT_DRIVER_URL'), "Needs PLAYWRIGHT_DRIVER_URL set for this test" - live_server_setup(live_server) + # live_server_setup(live_server) # Setup on conftest per function ##################### notification_url = url_for('test_notification_endpoint', _external=True).replace('http://localhost', 'http://changedet').replace('http', 'json') diff --git a/changedetectionio/tests/smtp/test_notification_smtp.py b/changedetectionio/tests/smtp/test_notification_smtp.py index ea17a2b5..a400901c 100644 --- a/changedetectionio/tests/smtp/test_notification_smtp.py +++ b/changedetectionio/tests/smtp/test_notification_smtp.py @@ -20,8 +20,7 @@ from changedetectionio.notification import ( valid_notification_formats, ) -def test_setup(live_server): - live_server_setup(live_server) + def get_last_message_from_smtp_server(): import socket @@ -40,7 +39,7 @@ def get_last_message_from_smtp_server(): # Requires running the test SMTP server def test_check_notification_email_formats_default_HTML(client, live_server, measure_memory_usage): - # live_server_setup(live_server) + ## live_server_setup(live_server) # Setup on conftest per function set_original_response() notification_url = f'mailto://changedetection@{smtp_test_server}:11025/?to=fff@home.com' @@ -91,7 +90,7 @@ def test_check_notification_email_formats_default_HTML(client, live_server, meas def test_check_notification_email_formats_default_Text_override_HTML(client, live_server, measure_memory_usage): - # live_server_setup(live_server) + ## live_server_setup(live_server) # Setup on conftest per function # HTML problems? see this # https://github.com/caronc/apprise/issues/633 diff --git a/changedetectionio/tests/test_access_control.py b/changedetectionio/tests/test_access_control.py index b35de268..a72e64a9 100644 --- a/changedetectionio/tests/test_access_control.py +++ b/changedetectionio/tests/test_access_control.py @@ -4,7 +4,7 @@ import time def test_check_access_control(app, client, live_server): # Still doesnt work, but this is closer. - live_server_setup(live_server) + # live_server_setup(live_server) # Setup on conftest per function with app.test_client(use_cookies=True) as c: # Check we don't have any password protection enabled yet. diff --git a/changedetectionio/tests/test_add_replace_remove_filter.py b/changedetectionio/tests/test_add_replace_remove_filter.py index 62074e42..3ca5284e 100644 --- a/changedetectionio/tests/test_add_replace_remove_filter.py +++ b/changedetectionio/tests/test_add_replace_remove_filter.py @@ -4,7 +4,7 @@ import os.path from flask import url_for from .util import live_server_setup, wait_for_all_checks, wait_for_notification_endpoint_output - +import time def set_original(excluding=None, add_line=None): test_return_data = """<html> @@ -35,11 +35,11 @@ def set_original(excluding=None, add_line=None): with open("test-datastore/endpoint-content.txt", "w") as f: f.write(test_return_data) -def test_setup(client, live_server, measure_memory_usage): - live_server_setup(live_server) +# def test_setup(client, live_server, measure_memory_usage): + # live_server_setup(live_server) # Setup on conftest per function def test_check_removed_line_contains_trigger(client, live_server, measure_memory_usage): - #live_server_setup(live_server) + # Give the endpoint time to spin up set_original() # Add our URL to the import page @@ -72,6 +72,7 @@ def test_check_removed_line_contains_trigger(client, live_server, measure_memory res = client.get(url_for("ui.form_watch_checknow"), follow_redirects=True) assert b'Queued 1 watch for rechecking.' in res.data wait_for_all_checks(client) + time.sleep(0.5) res = client.get(url_for("watchlist.index")) assert b'unviewed' not in res.data @@ -84,12 +85,17 @@ def test_check_removed_line_contains_trigger(client, live_server, measure_memory res = client.get(url_for("watchlist.index")) assert b'unviewed' in res.data + time.sleep(1) # Now add it back, and we should not get a trigger client.get(url_for("ui.mark_all_viewed"), follow_redirects=True) + time.sleep(0.2) + + time.sleep(1) set_original(excluding=None) client.get(url_for("ui.form_watch_checknow"), follow_redirects=True) wait_for_all_checks(client) + time.sleep(1) res = client.get(url_for("watchlist.index")) assert b'unviewed' not in res.data @@ -105,7 +111,10 @@ def test_check_removed_line_contains_trigger(client, live_server, measure_memory def test_check_add_line_contains_trigger(client, live_server, measure_memory_usage): - #live_server_setup(live_server) + + res = client.get(url_for("ui.form_delete", uuid="all"), follow_redirects=True) + assert b'Deleted' in res.data + time.sleep(1) # Give the endpoint time to spin up test_notification_url = url_for('test_notification_endpoint', _external=True).replace('http://', 'post://') + "?xxx={{ watch_url }}" diff --git a/changedetectionio/tests/test_api.py b/changedetectionio/tests/test_api.py index 7703f42c..2cd87e5b 100644 --- a/changedetectionio/tests/test_api.py +++ b/changedetectionio/tests/test_api.py @@ -52,12 +52,12 @@ def is_valid_uuid(val): return False -def test_setup(client, live_server, measure_memory_usage): - live_server_setup(live_server) +# def test_setup(client, live_server, measure_memory_usage): + # live_server_setup(live_server) # Setup on conftest per function def test_api_simple(client, live_server, measure_memory_usage): - #live_server_setup(live_server) + api_key = live_server.app.config['DATASTORE'].data['settings']['application'].get('api_access_token') @@ -108,7 +108,7 @@ def test_api_simple(client, live_server, measure_memory_usage): headers={'x-api-key': api_key} ) assert len(res.json) == 0 - + time.sleep(1) wait_for_all_checks(client) set_modified_response() @@ -119,6 +119,7 @@ def test_api_simple(client, live_server, measure_memory_usage): ) wait_for_all_checks(client) + time.sleep(1) # Did the recheck fire? res = client.get( url_for("createwatch"), @@ -291,7 +292,7 @@ def test_access_denied(client, live_server, measure_memory_usage): def test_api_watch_PUT_update(client, live_server, measure_memory_usage): - #live_server_setup(live_server) + api_key = live_server.app.config['DATASTORE'].data['settings']['application'].get('api_access_token') # Create a watch @@ -371,7 +372,7 @@ def test_api_watch_PUT_update(client, live_server, measure_memory_usage): def test_api_import(client, live_server, measure_memory_usage): - #live_server_setup(live_server) + api_key = live_server.app.config['DATASTORE'].data['settings']['application'].get('api_access_token') res = client.post( @@ -393,7 +394,7 @@ def test_api_import(client, live_server, measure_memory_usage): def test_api_conflict_UI_password(client, live_server, measure_memory_usage): - #live_server_setup(live_server) + api_key = live_server.app.config['DATASTORE'].data['settings']['application'].get('api_access_token') # Enable password check and diff page access bypass diff --git a/changedetectionio/tests/test_api_notifications.py b/changedetectionio/tests/test_api_notifications.py index 9a030e66..d8bad0aa 100644 --- a/changedetectionio/tests/test_api_notifications.py +++ b/changedetectionio/tests/test_api_notifications.py @@ -5,7 +5,7 @@ from .util import live_server_setup import json def test_api_notifications_crud(client, live_server): - live_server_setup(live_server) + # live_server_setup(live_server) # Setup on conftest per function api_key = live_server.app.config['DATASTORE'].data['settings']['application'].get('api_access_token') # Confirm notifications are initially empty diff --git a/changedetectionio/tests/test_api_search.py b/changedetectionio/tests/test_api_search.py index 3369905e..7f7dd6a3 100644 --- a/changedetectionio/tests/test_api_search.py +++ b/changedetectionio/tests/test_api_search.py @@ -7,7 +7,7 @@ from .util import live_server_setup, wait_for_all_checks def test_api_search(client, live_server): - live_server_setup(live_server) + # live_server_setup(live_server) # Setup on conftest per function api_key = live_server.app.config['DATASTORE'].data['settings']['application'].get('api_access_token') watch_data = {} diff --git a/changedetectionio/tests/test_api_tags.py b/changedetectionio/tests/test_api_tags.py index 55131d6d..831d052e 100644 --- a/changedetectionio/tests/test_api_tags.py +++ b/changedetectionio/tests/test_api_tags.py @@ -5,7 +5,7 @@ from .util import live_server_setup, wait_for_all_checks import json def test_api_tags_listing(client, live_server, measure_memory_usage): - live_server_setup(live_server) + # live_server_setup(live_server) # Setup on conftest per function api_key = live_server.app.config['DATASTORE'].data['settings']['application'].get('api_access_token') tag_title = 'Test Tag' diff --git a/changedetectionio/tests/test_auth.py b/changedetectionio/tests/test_auth.py index b3065fc0..a9859961 100644 --- a/changedetectionio/tests/test_auth.py +++ b/changedetectionio/tests/test_auth.py @@ -6,7 +6,7 @@ from .util import live_server_setup, wait_for_all_checks # test pages with http://username@password:foobar.com/ work def test_basic_auth(client, live_server, measure_memory_usage): - live_server_setup(live_server) + # live_server_setup(live_server) # Setup on conftest per function # This page will echo back any auth info diff --git a/changedetectionio/tests/test_automatic_follow_ldjson_price.py b/changedetectionio/tests/test_automatic_follow_ldjson_price.py index f1908053..c730286c 100644 --- a/changedetectionio/tests/test_automatic_follow_ldjson_price.py +++ b/changedetectionio/tests/test_automatic_follow_ldjson_price.py @@ -76,12 +76,12 @@ def set_response_without_ldjson(): f.write(test_return_data) return None -def test_setup(client, live_server, measure_memory_usage): - live_server_setup(live_server) +# def test_setup(client, live_server, measure_memory_usage): + # live_server_setup(live_server) # Setup on conftest per function # actually only really used by the distll.io importer, but could be handy too def test_check_ldjson_price_autodetect(client, live_server, measure_memory_usage): - #live_server_setup(live_server) + set_response_with_ldjson() # Add our URL to the import page @@ -164,7 +164,7 @@ def _test_runner_check_bad_format_ignored(live_server, client, has_ldjson_price_ def test_bad_ldjson_is_correctly_ignored(client, live_server, measure_memory_usage): - #live_server_setup(live_server) + test_return_data = """ <html> <head> diff --git a/changedetectionio/tests/test_backend.py b/changedetectionio/tests/test_backend.py index de0c169e..2cbbc530 100644 --- a/changedetectionio/tests/test_backend.py +++ b/changedetectionio/tests/test_backend.py @@ -18,7 +18,7 @@ def test_inscriptus(): def test_check_basic_change_detection_functionality(client, live_server, measure_memory_usage): set_original_response() - live_server_setup(live_server) + # live_server_setup(live_server) # Setup on conftest per function # Add our URL to the import page res = client.post( @@ -143,6 +143,7 @@ def test_check_basic_change_detection_functionality(client, live_server, measure # hit the mark all viewed link res = client.get(url_for("ui.mark_all_viewed"), follow_redirects=True) + time.sleep(0.2) assert b'class="has-unviewed' not in res.data assert b'unviewed' not in res.data diff --git a/changedetectionio/tests/test_backup.py b/changedetectionio/tests/test_backup.py index 16366e91..ca1a0c6f 100644 --- a/changedetectionio/tests/test_backup.py +++ b/changedetectionio/tests/test_backup.py @@ -9,7 +9,7 @@ import time def test_backup(client, live_server, measure_memory_usage): - live_server_setup(live_server) + # live_server_setup(live_server) # Setup on conftest per function set_original_response() diff --git a/changedetectionio/tests/test_basic_socketio.py b/changedetectionio/tests/test_basic_socketio.py index 82a31e22..1c48e758 100644 --- a/changedetectionio/tests/test_basic_socketio.py +++ b/changedetectionio/tests/test_basic_socketio.py @@ -110,7 +110,7 @@ def run_socketio_watch_update_test(client, live_server, password_mode=""): def test_everything(live_server, client): - live_server_setup(live_server) + # live_server_setup(live_server) # Setup on conftest per function run_socketio_watch_update_test(password_mode="", live_server=live_server, client=client) diff --git a/changedetectionio/tests/test_block_while_text_present.py b/changedetectionio/tests/test_block_while_text_present.py index 473f5645..6ab36855 100644 --- a/changedetectionio/tests/test_block_while_text_present.py +++ b/changedetectionio/tests/test_block_while_text_present.py @@ -62,7 +62,7 @@ def set_modified_response_minus_block_text(): def test_check_block_changedetection_text_NOT_present(client, live_server, measure_memory_usage): - live_server_setup(live_server) + # live_server_setup(live_server) # Setup on conftest per function # Use a mix of case in ZzZ to prove it works case-insensitive. ignore_text = "out of stoCk\r\nfoobar" set_original_ignore_response() diff --git a/changedetectionio/tests/test_clone.py b/changedetectionio/tests/test_clone.py index fd43384a..aeb3b4f2 100644 --- a/changedetectionio/tests/test_clone.py +++ b/changedetectionio/tests/test_clone.py @@ -7,7 +7,7 @@ from .util import live_server_setup, wait_for_all_checks def test_clone_functionality(client, live_server, measure_memory_usage): - live_server_setup(live_server) + # live_server_setup(live_server) # Setup on conftest per function with open("test-datastore/endpoint-content.txt", "w") as f: f.write("<html><body>Some content</body></html>") diff --git a/changedetectionio/tests/test_conditions.py b/changedetectionio/tests/test_conditions.py index 14dde024..9c6fae45 100644 --- a/changedetectionio/tests/test_conditions.py +++ b/changedetectionio/tests/test_conditions.py @@ -45,15 +45,15 @@ def set_number_out_of_range_response(number="150"): f.write(test_return_data) -def test_setup(client, live_server): +# def test_setup(client, live_server): """Test that both text and number conditions work together with AND logic.""" - live_server_setup(live_server) + # live_server_setup(live_server) # Setup on conftest per function def test_conditions_with_text_and_number(client, live_server): """Test that both text and number conditions work together with AND logic.""" set_original_response("50") - #live_server_setup(live_server) + test_url = url_for('test_endpoint', _external=True) @@ -110,6 +110,8 @@ def test_conditions_with_text_and_number(client, live_server): wait_for_all_checks(client) client.get(url_for("ui.mark_all_viewed"), follow_redirects=True) + time.sleep(0.2) + wait_for_all_checks(client) # Case 1 @@ -126,6 +128,8 @@ def test_conditions_with_text_and_number(client, live_server): # Case 2: Change with one condition violated # Number out of range (150) but contains '5' client.get(url_for("ui.mark_all_viewed"), follow_redirects=True) + time.sleep(0.2) + set_number_out_of_range_response("150.5") @@ -206,7 +210,7 @@ def test_condition_validate_rule_row(client, live_server): # If there was only a change in the whitespacing, then we shouldnt have a change detected def test_wordcount_conditions_plugin(client, live_server, measure_memory_usage): - #live_server_setup(live_server) + test_return_data = """<html> <body> @@ -249,7 +253,7 @@ def test_wordcount_conditions_plugin(client, live_server, measure_memory_usage): # If there was only a change in the whitespacing, then we shouldnt have a change detected def test_lev_conditions_plugin(client, live_server, measure_memory_usage): - #live_server_setup(live_server) + with open("test-datastore/endpoint-content.txt", "w") as f: f.write("""<html> diff --git a/changedetectionio/tests/test_css_selector.py b/changedetectionio/tests/test_css_selector.py index 545d97f0..bad181c2 100644 --- a/changedetectionio/tests/test_css_selector.py +++ b/changedetectionio/tests/test_css_selector.py @@ -6,8 +6,7 @@ from .util import live_server_setup, wait_for_all_checks from ..html_tools import * -def test_setup(live_server): - live_server_setup(live_server) + def set_original_response(): test_return_data = """<html> @@ -125,7 +124,7 @@ def test_check_markup_include_filters_restriction(client, live_server, measure_m # Tests the whole stack works with the CSS Filter def test_check_multiple_filters(client, live_server, measure_memory_usage): - #live_server_setup(live_server) + include_filters = "#blob-a\r\nxpath://*[contains(@id,'blob-b')]" with open("test-datastore/endpoint-content.txt", "w") as f: @@ -177,7 +176,7 @@ def test_check_multiple_filters(client, live_server, measure_memory_usage): # Mainly used when the filter contains just an IMG, this can happen when someone selects an image in the visual-selector # Tests fetcher can throw a "ReplyWithContentButNoText" exception after applying filter and extracting text def test_filter_is_empty_help_suggestion(client, live_server, measure_memory_usage): - #live_server_setup(live_server) + include_filters = "#blob-a" diff --git a/changedetectionio/tests/test_element_removal.py b/changedetectionio/tests/test_element_removal.py index b4a31079..36643b71 100644 --- a/changedetectionio/tests/test_element_removal.py +++ b/changedetectionio/tests/test_element_removal.py @@ -8,8 +8,7 @@ from ..html_tools import * from .util import live_server_setup, wait_for_all_checks -def test_setup(live_server): - live_server_setup(live_server) + def set_response_with_multiple_index(): data= """<!DOCTYPE html> @@ -148,7 +147,7 @@ across multiple lines def test_element_removal_full(client, live_server, measure_memory_usage): - #live_server_setup(live_server) + set_original_response() @@ -209,7 +208,7 @@ def test_element_removal_full(client, live_server, measure_memory_usage): # Re #2752 def test_element_removal_nth_offset_no_shift(client, live_server, measure_memory_usage): - #live_server_setup(live_server) + set_response_with_multiple_index() subtractive_selectors_data = [""" diff --git a/changedetectionio/tests/test_encoding.py b/changedetectionio/tests/test_encoding.py index d70dc56a..722d1e6b 100644 --- a/changedetectionio/tests/test_encoding.py +++ b/changedetectionio/tests/test_encoding.py @@ -7,8 +7,7 @@ from .util import live_server_setup, wait_for_all_checks, extract_UUID_from_clie import pytest -def test_setup(live_server): - live_server_setup(live_server) + def set_html_response(): diff --git a/changedetectionio/tests/test_errorhandling.py b/changedetectionio/tests/test_errorhandling.py index 0717f611..27b9a318 100644 --- a/changedetectionio/tests/test_errorhandling.py +++ b/changedetectionio/tests/test_errorhandling.py @@ -5,10 +5,7 @@ import time from flask import url_for from .util import live_server_setup, wait_for_all_checks -from ..html_tools import * -def test_setup(live_server): - live_server_setup(live_server) def _runner_test_http_errors(client, live_server, http_code, expected_text): @@ -79,7 +76,14 @@ def test_DNS_errors(client, live_server, measure_memory_usage): wait_for_all_checks(client) res = client.get(url_for("watchlist.index")) - found_name_resolution_error = b"Temporary failure in name resolution" in res.data or b"Name or service not known" in res.data + found_name_resolution_error = ( + b"No address found" in res.data or + b"Name or service not known" in res.data or + b"nodename nor servname provided" in res.data or + b"Temporary failure in name resolution" in res.data or + b"Failed to establish a new connection" in res.data or + b"Connection error occurred" in res.data + ) assert found_name_resolution_error # Should always record that we tried assert bytes("just now".encode('utf-8')) in res.data @@ -88,7 +92,7 @@ def test_DNS_errors(client, live_server, measure_memory_usage): # Re 1513 def test_low_level_errors_clear_correctly(client, live_server, measure_memory_usage): - #live_server_setup(live_server) + # Give the endpoint time to spin up time.sleep(1) @@ -108,7 +112,14 @@ def test_low_level_errors_clear_correctly(client, live_server, measure_memory_us # We should see the DNS error res = client.get(url_for("watchlist.index")) - found_name_resolution_error = b"Temporary failure in name resolution" in res.data or b"Name or service not known" in res.data + found_name_resolution_error = ( + b"No address found" in res.data or + b"Name or service not known" in res.data or + b"nodename nor servname provided" in res.data or + b"Temporary failure in name resolution" in res.data or + b"Failed to establish a new connection" in res.data or + b"Connection error occurred" in res.data + ) assert found_name_resolution_error # Update with what should work @@ -123,7 +134,14 @@ def test_low_level_errors_clear_correctly(client, live_server, measure_memory_us # Now the error should be gone wait_for_all_checks(client) res = client.get(url_for("watchlist.index")) - found_name_resolution_error = b"Temporary failure in name resolution" in res.data or b"Name or service not known" in res.data + found_name_resolution_error = ( + b"No address found" in res.data or + b"Name or service not known" in res.data or + b"nodename nor servname provided" in res.data or + b"Temporary failure in name resolution" in res.data or + b"Failed to establish a new connection" in res.data or + b"Connection error occurred" in res.data + ) assert not found_name_resolution_error res = client.get(url_for("ui.form_delete", uuid="all"), follow_redirects=True) diff --git a/changedetectionio/tests/test_extract_csv.py b/changedetectionio/tests/test_extract_csv.py index e7073638..e70c41b2 100644 --- a/changedetectionio/tests/test_extract_csv.py +++ b/changedetectionio/tests/test_extract_csv.py @@ -14,7 +14,7 @@ def test_check_extract_text_from_diff(client, live_server, measure_memory_usage) with open("test-datastore/endpoint-content.txt", "w") as f: f.write("Now it's {} seconds since epoch, time flies!".format(str(time.time()))) - live_server_setup(live_server) + # live_server_setup(live_server) # Setup on conftest per function # Add our URL to the import page res = client.post( diff --git a/changedetectionio/tests/test_extract_regex.py b/changedetectionio/tests/test_extract_regex.py index 68155ff1..3b270d3f 100644 --- a/changedetectionio/tests/test_extract_regex.py +++ b/changedetectionio/tests/test_extract_regex.py @@ -67,11 +67,11 @@ def set_multiline_response(): return None -def test_setup(client, live_server, measure_memory_usage): - live_server_setup(live_server) +# def test_setup(client, live_server, measure_memory_usage): + # live_server_setup(live_server) # Setup on conftest per function def test_check_filter_multiline(client, live_server, measure_memory_usage): - # live_server_setup(live_server) + ## live_server_setup(live_server) # Setup on conftest per function set_multiline_response() # Add our URL to the import page @@ -206,7 +206,7 @@ def test_check_filter_and_regex_extract(client, live_server, measure_memory_usag def test_regex_error_handling(client, live_server, measure_memory_usage): - #live_server_setup(live_server) + # Add our URL to the import page test_url = url_for('test_endpoint', _external=True) diff --git a/changedetectionio/tests/test_filter_exist_changes.py b/changedetectionio/tests/test_filter_exist_changes.py index 9b2f9350..c7841bad 100644 --- a/changedetectionio/tests/test_filter_exist_changes.py +++ b/changedetectionio/tests/test_filter_exist_changes.py @@ -46,7 +46,7 @@ def test_filter_doesnt_exist_then_exists_should_get_notification(client, live_se # And the page has that filter available # Then I should get a notification - live_server_setup(live_server) + # live_server_setup(live_server) # Setup on conftest per function # Give the endpoint time to spin up time.sleep(1) diff --git a/changedetectionio/tests/test_filter_failure_notification.py b/changedetectionio/tests/test_filter_failure_notification.py index fcb13d88..7023fb55 100644 --- a/changedetectionio/tests/test_filter_failure_notification.py +++ b/changedetectionio/tests/test_filter_failure_notification.py @@ -163,15 +163,14 @@ def run_filter_test(client, live_server, content_filter): os.unlink("test-datastore/notification.txt") -def test_setup(live_server): - live_server_setup(live_server) + def test_check_include_filters_failure_notification(client, live_server, measure_memory_usage): -# live_server_setup(live_server) +# # live_server_setup(live_server) # Setup on conftest per function run_filter_test(client, live_server,'#nope-doesnt-exist') def test_check_xpath_filter_failure_notification(client, live_server, measure_memory_usage): -# live_server_setup(live_server) +# # live_server_setup(live_server) # Setup on conftest per function run_filter_test(client, live_server, '//*[@id="nope-doesnt-exist"]') # Test that notification is never sent diff --git a/changedetectionio/tests/test_group.py b/changedetectionio/tests/test_group.py index ae294d40..e166a8da 100644 --- a/changedetectionio/tests/test_group.py +++ b/changedetectionio/tests/test_group.py @@ -6,8 +6,8 @@ from .util import live_server_setup, wait_for_all_checks, extract_rss_token_from import os -def test_setup(client, live_server, measure_memory_usage): - live_server_setup(live_server) +# def test_setup(client, live_server, measure_memory_usage): + # live_server_setup(live_server) # Setup on conftest per function def set_original_response(): test_return_data = """<html> @@ -40,7 +40,7 @@ def set_modified_response(): return None def test_setup_group_tag(client, live_server, measure_memory_usage): - #live_server_setup(live_server) + set_original_response() # Add a tag with some config, import a tag and it should roughly work @@ -131,7 +131,7 @@ def test_setup_group_tag(client, live_server, measure_memory_usage): assert b'Deleted' in res.data def test_tag_import_singular(client, live_server, measure_memory_usage): - #live_server_setup(live_server) + test_url = url_for('test_endpoint', _external=True) res = client.post( @@ -151,7 +151,7 @@ def test_tag_import_singular(client, live_server, measure_memory_usage): assert b'Deleted' in res.data def test_tag_add_in_ui(client, live_server, measure_memory_usage): - #live_server_setup(live_server) + # res = client.post( url_for("tags.form_tag_add"), @@ -168,7 +168,7 @@ def test_tag_add_in_ui(client, live_server, measure_memory_usage): assert b'Deleted' in res.data def test_group_tag_notification(client, live_server, measure_memory_usage): - #live_server_setup(live_server) + set_original_response() test_url = url_for('test_endpoint', _external=True) @@ -236,7 +236,7 @@ def test_group_tag_notification(client, live_server, measure_memory_usage): assert b'Deleted' in res.data def test_limit_tag_ui(client, live_server, measure_memory_usage): - #live_server_setup(live_server) + test_url = url_for('test_endpoint', _external=True) urls=[] @@ -275,7 +275,7 @@ def test_limit_tag_ui(client, live_server, measure_memory_usage): assert b'All tags deleted' in res.data def test_clone_tag_on_import(client, live_server, measure_memory_usage): - #live_server_setup(live_server) + test_url = url_for('test_endpoint', _external=True) res = client.post( url_for("imports.import_page"), @@ -301,7 +301,7 @@ def test_clone_tag_on_import(client, live_server, measure_memory_usage): assert b'Deleted' in res.data def test_clone_tag_on_quickwatchform_add(client, live_server, measure_memory_usage): - #live_server_setup(live_server) + test_url = url_for('test_endpoint', _external=True) diff --git a/changedetectionio/tests/test_history_consistency.py b/changedetectionio/tests/test_history_consistency.py index 1558c275..fbcfba4d 100644 --- a/changedetectionio/tests/test_history_consistency.py +++ b/changedetectionio/tests/test_history_consistency.py @@ -9,7 +9,7 @@ from .util import live_server_setup, wait_for_all_checks from urllib.parse import urlparse, parse_qs def test_consistent_history(client, live_server, measure_memory_usage): - live_server_setup(live_server) + # live_server_setup(live_server) # Setup on conftest per function workers = int(os.getenv("FETCH_WORKERS", 10)) r = range(1, 10+workers) diff --git a/changedetectionio/tests/test_ignore.py b/changedetectionio/tests/test_ignore.py index 1a88c0b7..985e58b8 100644 --- a/changedetectionio/tests/test_ignore.py +++ b/changedetectionio/tests/test_ignore.py @@ -24,7 +24,7 @@ def set_original_ignore_response(): def test_ignore(client, live_server, measure_memory_usage): - live_server_setup(live_server) + # live_server_setup(live_server) # Setup on conftest per function set_original_ignore_response() test_url = url_for('test_endpoint', _external=True) res = client.post( diff --git a/changedetectionio/tests/test_ignore_regex_text.py b/changedetectionio/tests/test_ignore_regex_text.py index dca89817..847a3e74 100644 --- a/changedetectionio/tests/test_ignore_regex_text.py +++ b/changedetectionio/tests/test_ignore_regex_text.py @@ -3,8 +3,7 @@ from . util import live_server_setup from changedetectionio import html_tools -def test_setup(live_server): - live_server_setup(live_server) + # Unit test of the stripper # Always we are dealing in utf-8 diff --git a/changedetectionio/tests/test_ignore_text.py b/changedetectionio/tests/test_ignore_text.py index 7864e08e..19fe2303 100644 --- a/changedetectionio/tests/test_ignore_text.py +++ b/changedetectionio/tests/test_ignore_text.py @@ -5,8 +5,7 @@ from flask import url_for from .util import live_server_setup, wait_for_all_checks from changedetectionio import html_tools -def test_setup(live_server): - live_server_setup(live_server) + # Unit test of the stripper # Always we are dealing in utf-8 @@ -256,9 +255,9 @@ def _run_test_global_ignore(client, as_source=False, extra_ignore=""): assert b'Deleted' in res.data def test_check_global_ignore_text_functionality(client, live_server): - #live_server_setup(live_server) + _run_test_global_ignore(client, as_source=False) def test_check_global_ignore_text_functionality_as_source(client, live_server): - #live_server_setup(live_server) + _run_test_global_ignore(client, as_source=True, extra_ignore='/\?v=\d/') diff --git a/changedetectionio/tests/test_ignorehyperlinks.py b/changedetectionio/tests/test_ignorehyperlinks.py index 34b43a1f..5df8f9ae 100644 --- a/changedetectionio/tests/test_ignorehyperlinks.py +++ b/changedetectionio/tests/test_ignorehyperlinks.py @@ -6,8 +6,7 @@ from flask import url_for from .util import live_server_setup, wait_for_all_checks -def test_setup(live_server): - live_server_setup(live_server) + def set_original_ignore_response(): test_return_data = """<html> diff --git a/changedetectionio/tests/test_ignorestatuscode.py b/changedetectionio/tests/test_ignorestatuscode.py index cac971be..a28e8996 100644 --- a/changedetectionio/tests/test_ignorestatuscode.py +++ b/changedetectionio/tests/test_ignorestatuscode.py @@ -5,8 +5,7 @@ from flask import url_for from .util import live_server_setup, wait_for_all_checks -def test_setup(live_server): - live_server_setup(live_server) + def set_original_response(): diff --git a/changedetectionio/tests/test_ignorewhitespace.py b/changedetectionio/tests/test_ignorewhitespace.py index fe97d6ca..93fa94b2 100644 --- a/changedetectionio/tests/test_ignorewhitespace.py +++ b/changedetectionio/tests/test_ignorewhitespace.py @@ -4,8 +4,7 @@ import time from flask import url_for from . util import live_server_setup -def test_setup(live_server): - live_server_setup(live_server) + # Should be the same as set_original_ignore_response() but with a little more whitespacing diff --git a/changedetectionio/tests/test_import.py b/changedetectionio/tests/test_import.py index 26cc6888..899ff1ba 100644 --- a/changedetectionio/tests/test_import.py +++ b/changedetectionio/tests/test_import.py @@ -8,8 +8,8 @@ from flask import url_for from .util import live_server_setup, wait_for_all_checks -def test_setup(client, live_server, measure_memory_usage): - live_server_setup(live_server) +# def test_setup(client, live_server, measure_memory_usage): + # live_server_setup(live_server) # Setup on conftest per function def test_import(client, live_server, measure_memory_usage): # Give the endpoint time to spin up @@ -126,7 +126,7 @@ def test_import_distillio(client, live_server, measure_memory_usage): def test_import_custom_xlsx(client, live_server, measure_memory_usage): """Test can upload a excel spreadsheet and the watches are created correctly""" - #live_server_setup(live_server) + dirname = os.path.dirname(__file__) filename = os.path.join(dirname, 'import/spreadsheet.xlsx') @@ -175,7 +175,7 @@ def test_import_custom_xlsx(client, live_server, measure_memory_usage): def test_import_watchete_xlsx(client, live_server, measure_memory_usage): """Test can upload a excel spreadsheet and the watches are created correctly""" - #live_server_setup(live_server) + dirname = os.path.dirname(__file__) filename = os.path.join(dirname, 'import/spreadsheet.xlsx') with open(filename, 'rb') as f: diff --git a/changedetectionio/tests/test_jinja2.py b/changedetectionio/tests/test_jinja2.py index ca06b467..71152943 100644 --- a/changedetectionio/tests/test_jinja2.py +++ b/changedetectionio/tests/test_jinja2.py @@ -5,12 +5,12 @@ from flask import url_for from .util import live_server_setup, wait_for_all_checks -def test_setup(client, live_server, measure_memory_usage): - live_server_setup(live_server) +# def test_setup(client, live_server, measure_memory_usage): + # # live_server_setup(live_server) # Setup on conftest per function # If there was only a change in the whitespacing, then we shouldnt have a change detected def test_jinja2_in_url_query(client, live_server, measure_memory_usage): - #live_server_setup(live_server) + # Add our URL to the import page test_url = url_for('test_return_query', _external=True) @@ -35,7 +35,7 @@ def test_jinja2_in_url_query(client, live_server, measure_memory_usage): # https://techtonics.medium.com/secure-templating-with-jinja2-understanding-ssti-and-jinja2-sandbox-environment-b956edd60456 def test_jinja2_security_url_query(client, live_server, measure_memory_usage): - #live_server_setup(live_server) + # Add our URL to the import page test_url = url_for('test_return_query', _external=True) diff --git a/changedetectionio/tests/test_jsonpath_jq_selector.py b/changedetectionio/tests/test_jsonpath_jq_selector.py index 7f22ea03..380e6dea 100644 --- a/changedetectionio/tests/test_jsonpath_jq_selector.py +++ b/changedetectionio/tests/test_jsonpath_jq_selector.py @@ -12,8 +12,7 @@ try: except ModuleNotFoundError: jq_support = False -def test_setup(live_server): - live_server_setup(live_server) + def test_unittest_inline_html_extract(): # So lets pretend that the JSON we want is inside some HTML diff --git a/changedetectionio/tests/test_live_preview.py b/changedetectionio/tests/test_live_preview.py index f8997692..088e695f 100644 --- a/changedetectionio/tests/test_live_preview.py +++ b/changedetectionio/tests/test_live_preview.py @@ -19,7 +19,7 @@ something to trigger<br> f.write(data) def test_content_filter_live_preview(client, live_server, measure_memory_usage): - live_server_setup(live_server) + # live_server_setup(live_server) # Setup on conftest per function set_response() test_url = url_for('test_endpoint', _external=True) diff --git a/changedetectionio/tests/test_nonrenderable_pages.py b/changedetectionio/tests/test_nonrenderable_pages.py index df2bef00..2b3bd305 100644 --- a/changedetectionio/tests/test_nonrenderable_pages.py +++ b/changedetectionio/tests/test_nonrenderable_pages.py @@ -27,7 +27,7 @@ def set_zero_byte_response(): def test_check_basic_change_detection_functionality(client, live_server, measure_memory_usage): set_original_response() - live_server_setup(live_server) + # live_server_setup(live_server) # Setup on conftest per function # Add our URL to the import page res = client.post( @@ -96,6 +96,8 @@ def test_check_basic_change_detection_functionality(client, live_server, measure res = client.get(url_for("watchlist.index")) assert b'unviewed' in res.data client.get(url_for("ui.mark_all_viewed"), follow_redirects=True) + time.sleep(0.2) + # A totally zero byte (#2528) response should also not trigger an error set_zero_byte_response() diff --git a/changedetectionio/tests/test_notification.py b/changedetectionio/tests/test_notification.py index 1d4a1984..8640d2cc 100644 --- a/changedetectionio/tests/test_notification.py +++ b/changedetectionio/tests/test_notification.py @@ -5,8 +5,7 @@ import re from flask import url_for from loguru import logger -from .util import set_original_response, set_modified_response, set_more_modified_response, live_server_setup, wait_for_all_checks, \ - set_longer_modified_response, get_index +from .util import set_original_response, set_modified_response, set_more_modified_response, live_server_setup, wait_for_all_checks from . util import extract_UUID_from_client import logging import base64 @@ -18,13 +17,12 @@ from changedetectionio.notification import ( valid_notification_formats, ) -def test_setup(live_server): - live_server_setup(live_server) + # Hard to just add more live server URLs when one test is already running (I think) # So we add our test here (was in a different file) def test_check_notification(client, live_server, measure_memory_usage): - #live_server_setup(live_server) + set_original_response() # Re 360 - new install should have defaults set @@ -286,7 +284,7 @@ def test_notification_validation(client, live_server, measure_memory_usage): def test_notification_custom_endpoint_and_jinja2(client, live_server, measure_memory_usage): - #live_server_setup(live_server) + # test_endpoint - that sends the contents of a file # test_notification_endpoint - that takes a POST and writes it to file (test-datastore/notification.txt) @@ -331,7 +329,7 @@ def test_notification_custom_endpoint_and_jinja2(client, live_server, measure_me # Check no errors were recorded, because we asked for 204 which is slightly uncommon but is still OK - res = get_index(client) + res = client.get(url_for("watchlist.index")) assert b'notification-error' not in res.data with open("test-datastore/notification.txt", 'r') as f: @@ -372,7 +370,7 @@ def test_notification_custom_endpoint_and_jinja2(client, live_server, measure_me #2510 def test_global_send_test_notification(client, live_server, measure_memory_usage): - #live_server_setup(live_server) + set_original_response() if os.path.isfile("test-datastore/notification.txt"): os.unlink("test-datastore/notification.txt") \ @@ -517,7 +515,7 @@ def _test_color_notifications(client, notification_body_token): def test_html_color_notifications(client, live_server, measure_memory_usage): - #live_server_setup(live_server) + _test_color_notifications(client, '{{diff}}') _test_color_notifications(client, '{{diff_full}}') \ No newline at end of file diff --git a/changedetectionio/tests/test_notification_errors.py b/changedetectionio/tests/test_notification_errors.py index 28f503b3..9b4ac770 100644 --- a/changedetectionio/tests/test_notification_errors.py +++ b/changedetectionio/tests/test_notification_errors.py @@ -6,7 +6,7 @@ import logging def test_check_notification_error_handling(client, live_server, measure_memory_usage): - live_server_setup(live_server) + # live_server_setup(live_server) # Setup on conftest per function set_original_response() # Set a URL and fetch it, then set a notification URL which is going to give errors @@ -60,7 +60,15 @@ def test_check_notification_error_handling(client, live_server, measure_memory_u # The error should show in the notification logs res = client.get( url_for("settings.notification_logs")) - found_name_resolution_error = b"Temporary failure in name resolution" in res.data or b"Name or service not known" in res.data + # Check for various DNS/connection error patterns that may appear in different environments + found_name_resolution_error = ( + b"No address found" in res.data or + b"Name or service not known" in res.data or + b"nodename nor servname provided" in res.data or + b"Temporary failure in name resolution" in res.data or + b"Failed to establish a new connection" in res.data or + b"Connection error occurred" in res.data + ) assert found_name_resolution_error # And the working one, which is after the 'broken' one should still have fired diff --git a/changedetectionio/tests/test_obfuscations.py b/changedetectionio/tests/test_obfuscations.py index 055e2dc8..9004f0fd 100644 --- a/changedetectionio/tests/test_obfuscations.py +++ b/changedetectionio/tests/test_obfuscations.py @@ -20,7 +20,7 @@ def set_original_ignore_response(): def test_obfuscations(client, live_server, measure_memory_usage): set_original_ignore_response() - live_server_setup(live_server) + # live_server_setup(live_server) # Setup on conftest per function time.sleep(1) # Add our URL to the import page test_url = url_for('test_endpoint', _external=True) diff --git a/changedetectionio/tests/test_pdf.py b/changedetectionio/tests/test_pdf.py index 7dc32b5b..5a8080b2 100644 --- a/changedetectionio/tests/test_pdf.py +++ b/changedetectionio/tests/test_pdf.py @@ -10,7 +10,7 @@ def test_fetch_pdf(client, live_server, measure_memory_usage): import shutil shutil.copy("tests/test.pdf", "test-datastore/endpoint-test.pdf") - live_server_setup(live_server) + # live_server_setup(live_server) # Setup on conftest per function test_url = url_for('test_pdf_endpoint', _external=True) # Add our URL to the import page res = client.post( diff --git a/changedetectionio/tests/test_preview_endpoints.py b/changedetectionio/tests/test_preview_endpoints.py index 3cb23b7c..ada52ed4 100644 --- a/changedetectionio/tests/test_preview_endpoints.py +++ b/changedetectionio/tests/test_preview_endpoints.py @@ -10,7 +10,7 @@ def test_fetch_pdf(client, live_server, measure_memory_usage): import shutil shutil.copy("tests/test.pdf", "test-datastore/endpoint-test.pdf") - live_server_setup(live_server) + # live_server_setup(live_server) # Setup on conftest per function test_url = url_for('test_pdf_endpoint', _external=True) # Add our URL to the import page res = client.post( diff --git a/changedetectionio/tests/test_request.py b/changedetectionio/tests/test_request.py index 142984c2..85b00633 100644 --- a/changedetectionio/tests/test_request.py +++ b/changedetectionio/tests/test_request.py @@ -4,8 +4,7 @@ import time from flask import url_for from . util import set_original_response, set_modified_response, live_server_setup, wait_for_all_checks, extract_UUID_from_client -def test_setup(live_server): - live_server_setup(live_server) + # Hard to just add more live server URLs when one test is already running (I think) # So we add our test here (was in a different file) @@ -154,7 +153,7 @@ def test_body_in_request(client, live_server, measure_memory_usage): follow_redirects=True ) assert b"1 Imported" in res.data - + wait_for_all_checks(client) watches_with_body = 0 with open('test-datastore/url-watches.json') as f: app_struct = json.load(f) @@ -258,7 +257,7 @@ def test_method_in_request(client, live_server, measure_memory_usage): # Re #2408 - user-agent override test, also should handle case-insensitive header deduplication def test_ua_global_override(client, live_server, measure_memory_usage): - # live_server_setup(live_server) + ## live_server_setup(live_server) # Setup on conftest per function test_url = url_for('test_headers', _external=True) res = client.post( @@ -313,7 +312,7 @@ def test_ua_global_override(client, live_server, measure_memory_usage): assert b'Deleted' in res.data def test_headers_textfile_in_request(client, live_server, measure_memory_usage): - #live_server_setup(live_server) + # Add our URL to the import page webdriver_ua = "Hello fancy webdriver UA 1.0" @@ -426,7 +425,7 @@ def test_headers_textfile_in_request(client, live_server, measure_memory_usage): assert b'Deleted' in res.data def test_headers_validation(client, live_server): - #live_server_setup(live_server) + test_url = url_for('test_headers', _external=True) res = client.post( diff --git a/changedetectionio/tests/test_restock_itemprop.py b/changedetectionio/tests/test_restock_itemprop.py index 73454cd8..0627f597 100644 --- a/changedetectionio/tests/test_restock_itemprop.py +++ b/changedetectionio/tests/test_restock_itemprop.py @@ -44,13 +44,13 @@ def set_original_response(props_markup='', price="121.95"): -def test_setup(client, live_server): +# def test_setup(client, live_server): - live_server_setup(live_server) + # live_server_setup(live_server) # Setup on conftest per function def test_restock_itemprop_basic(client, live_server): - #live_server_setup(live_server) + test_url = url_for('test_endpoint', _external=True) @@ -89,7 +89,7 @@ def test_restock_itemprop_basic(client, live_server): assert b'Deleted' in res.data def test_itemprop_price_change(client, live_server): - #live_server_setup(live_server) + # Out of the box 'Follow price changes' should be ON test_url = url_for('test_endpoint', _external=True) @@ -114,6 +114,8 @@ def test_itemprop_price_change(client, live_server): assert b'180.45' in res.data assert b'unviewed' in res.data client.get(url_for("ui.mark_all_viewed"), follow_redirects=True) + time.sleep(0.2) + # turning off price change trigger, but it should show the new price, with no change notification set_original_response(props_markup=instock_props[0], price='120.45') @@ -214,7 +216,7 @@ def _run_test_minmax_limit(client, extra_watch_edit_form): def test_restock_itemprop_minmax(client, live_server): - #live_server_setup(live_server) + extras = { "restock_settings-follow_price_changes": "y", "restock_settings-price_change_min": 900.0, @@ -223,7 +225,7 @@ def test_restock_itemprop_minmax(client, live_server): _run_test_minmax_limit(client, extra_watch_edit_form=extras) def test_restock_itemprop_with_tag(client, live_server): - #live_server_setup(live_server) + res = client.post( url_for("tags.form_tag_add"), @@ -252,7 +254,7 @@ def test_restock_itemprop_with_tag(client, live_server): def test_itemprop_percent_threshold(client, live_server): - #live_server_setup(live_server) + res = client.get(url_for("ui.form_delete", uuid="all"), follow_redirects=True) assert b'Deleted' in res.data @@ -319,7 +321,7 @@ def test_itemprop_percent_threshold(client, live_server): def test_change_with_notification_values(client, live_server): - #live_server_setup(live_server) + if os.path.isfile("test-datastore/notification.txt"): os.unlink("test-datastore/notification.txt") @@ -387,7 +389,7 @@ def test_change_with_notification_values(client, live_server): def test_data_sanity(client, live_server): - #live_server_setup(live_server) + res = client.get(url_for("ui.form_delete", uuid="all"), follow_redirects=True) assert b'Deleted' in res.data @@ -437,7 +439,7 @@ def test_data_sanity(client, live_server): # All examples should give a prive of 666.66 def test_special_prop_examples(client, live_server): import glob - #live_server_setup(live_server) + test_url = url_for('test_endpoint', _external=True) check_path = os.path.join(os.path.dirname(__file__), "itemprop_test_examples", "*.txt") diff --git a/changedetectionio/tests/test_rss.py b/changedetectionio/tests/test_rss.py index 5701f690..847c9fad 100644 --- a/changedetectionio/tests/test_rss.py +++ b/changedetectionio/tests/test_rss.py @@ -65,11 +65,11 @@ def set_html_content(content): with open("test-datastore/endpoint-content.txt", "wb") as f: f.write(test_return_data.encode('utf-8')) -def test_setup(client, live_server, measure_memory_usage): - live_server_setup(live_server) +# def test_setup(client, live_server, measure_memory_usage): + # live_server_setup(live_server) # Setup on conftest per function def test_rss_and_token(client, live_server, measure_memory_usage): - # live_server_setup(live_server) + # # live_server_setup(live_server) # Setup on conftest per function set_original_response() rss_token = extract_rss_token_from_UI(client) @@ -107,7 +107,7 @@ def test_rss_and_token(client, live_server, measure_memory_usage): client.get(url_for("ui.form_delete", uuid="all"), follow_redirects=True) def test_basic_cdata_rss_markup(client, live_server, measure_memory_usage): - #live_server_setup(live_server) + set_original_cdata_xml() @@ -135,7 +135,7 @@ def test_basic_cdata_rss_markup(client, live_server, measure_memory_usage): res = client.get(url_for("ui.form_delete", uuid="all"), follow_redirects=True) def test_rss_xpath_filtering(client, live_server, measure_memory_usage): - #live_server_setup(live_server) + set_original_cdata_xml() @@ -191,7 +191,7 @@ def test_rss_bad_chars_breaking(client, live_server): Otherwise feedgen should support regular unicode """ - #live_server_setup(live_server) + with open("test-datastore/endpoint-content.txt", "w") as f: ten_kb_string = "A" * 10_000 diff --git a/changedetectionio/tests/test_scheduler.py b/changedetectionio/tests/test_scheduler.py index caacc3ad..51610d60 100644 --- a/changedetectionio/tests/test_scheduler.py +++ b/changedetectionio/tests/test_scheduler.py @@ -6,11 +6,11 @@ from zoneinfo import ZoneInfo from flask import url_for from .util import live_server_setup, wait_for_all_checks, extract_UUID_from_client -def test_setup(client, live_server): - live_server_setup(live_server) +# def test_setup(client, live_server): + # live_server_setup(live_server) # Setup on conftest per function def test_check_basic_scheduler_functionality(client, live_server, measure_memory_usage): - #live_server_setup(live_server) + days = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday'] test_url = url_for('test_random_content_endpoint', _external=True) @@ -92,7 +92,7 @@ def test_check_basic_scheduler_functionality(client, live_server, measure_memory def test_check_basic_global_scheduler_functionality(client, live_server, measure_memory_usage): - #live_server_setup(live_server) + days = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday'] test_url = url_for('test_random_content_endpoint', _external=True) diff --git a/changedetectionio/tests/test_search.py b/changedetectionio/tests/test_search.py index 1668eaab..eb6f0ee9 100644 --- a/changedetectionio/tests/test_search.py +++ b/changedetectionio/tests/test_search.py @@ -2,11 +2,10 @@ from flask import url_for from .util import set_original_response, set_modified_response, live_server_setup import time -def test_setup(live_server): - live_server_setup(live_server) + def test_basic_search(client, live_server, measure_memory_usage): - #live_server_setup(live_server) + urls = ['https://localhost:12300?first-result=1', 'https://localhost:5000?second-result=1' @@ -39,7 +38,7 @@ def test_basic_search(client, live_server, measure_memory_usage): def test_search_in_tag_limit(client, live_server, measure_memory_usage): - #live_server_setup(live_server) + urls = ['https://localhost:12300?first-result=1 tag-one', 'https://localhost:5000?second-result=1 tag-two' diff --git a/changedetectionio/tests/test_security.py b/changedetectionio/tests/test_security.py index 00902bfc..495e12a8 100644 --- a/changedetectionio/tests/test_security.py +++ b/changedetectionio/tests/test_security.py @@ -5,11 +5,11 @@ from .util import live_server_setup, wait_for_all_checks from .. import strtobool -def test_setup(client, live_server, measure_memory_usage): - live_server_setup(live_server) +# def test_setup(client, live_server, measure_memory_usage): + # live_server_setup(live_server) # Setup on conftest per function def test_bad_access(client, live_server, measure_memory_usage): - #live_server_setup(live_server) + res = client.post( url_for("imports.import_page"), data={"urls": 'https://localhost'}, @@ -89,7 +89,7 @@ def _runner_test_various_file_slash(client, file_uri): assert b'Deleted' in res.data def test_file_slash_access(client, live_server, measure_memory_usage): - #live_server_setup(live_server) + # file: is NOT permitted by default, so it will be caught by ALLOW_FILE_URI check @@ -99,7 +99,7 @@ def test_file_slash_access(client, live_server, measure_memory_usage): _runner_test_various_file_slash(client, file_uri=f"file:{test_file_path}") # CVE-2024-56509 def test_xss(client, live_server, measure_memory_usage): - #live_server_setup(live_server) + from changedetectionio.notification import ( default_notification_format ) diff --git a/changedetectionio/tests/test_share_watch.py b/changedetectionio/tests/test_share_watch.py index 00456317..09e3f35d 100644 --- a/changedetectionio/tests/test_share_watch.py +++ b/changedetectionio/tests/test_share_watch.py @@ -11,7 +11,7 @@ sleep_time_for_fetch_thread = 3 def test_share_watch(client, live_server, measure_memory_usage): set_original_response() - live_server_setup(live_server) + # live_server_setup(live_server) # Setup on conftest per function test_url = url_for('test_endpoint', _external=True) include_filters = ".nice-filter" diff --git a/changedetectionio/tests/test_source.py b/changedetectionio/tests/test_source.py index 809b1909..992314c1 100644 --- a/changedetectionio/tests/test_source.py +++ b/changedetectionio/tests/test_source.py @@ -7,8 +7,7 @@ from .util import set_original_response, set_modified_response, live_server_setu sleep_time_for_fetch_thread = 3 -def test_setup(live_server): - live_server_setup(live_server) + def test_check_basic_change_detection_functionality_source(client, live_server, measure_memory_usage): set_original_response() diff --git a/changedetectionio/tests/test_trigger.py b/changedetectionio/tests/test_trigger.py index dc18c1a5..0df5ec3b 100644 --- a/changedetectionio/tests/test_trigger.py +++ b/changedetectionio/tests/test_trigger.py @@ -57,7 +57,7 @@ def set_modified_with_trigger_text_response(): def test_trigger_functionality(client, live_server, measure_memory_usage): - live_server_setup(live_server) + # live_server_setup(live_server) # Setup on conftest per function trigger_text = "Add to cart" set_original_ignore_response() diff --git a/changedetectionio/tests/test_trigger_regex.py b/changedetectionio/tests/test_trigger_regex.py index 62c6acb7..25253f21 100644 --- a/changedetectionio/tests/test_trigger_regex.py +++ b/changedetectionio/tests/test_trigger_regex.py @@ -24,7 +24,7 @@ def set_original_ignore_response(): def test_trigger_regex_functionality(client, live_server, measure_memory_usage): - live_server_setup(live_server) + # live_server_setup(live_server) # Setup on conftest per function set_original_ignore_response() diff --git a/changedetectionio/tests/test_trigger_regex_with_filter.py b/changedetectionio/tests/test_trigger_regex_with_filter.py index bf69da9d..a78b8fc4 100644 --- a/changedetectionio/tests/test_trigger_regex_with_filter.py +++ b/changedetectionio/tests/test_trigger_regex_with_filter.py @@ -24,7 +24,7 @@ def set_original_ignore_response(): def test_trigger_regex_functionality_with_filter(client, live_server, measure_memory_usage): - live_server_setup(live_server) + # live_server_setup(live_server) # Setup on conftest per function sleep_time_for_fetch_thread = 3 set_original_ignore_response() diff --git a/changedetectionio/tests/test_ui.py b/changedetectionio/tests/test_ui.py index 743b70b5..aec1ff60 100644 --- a/changedetectionio/tests/test_ui.py +++ b/changedetectionio/tests/test_ui.py @@ -6,7 +6,7 @@ from .util import set_original_response, set_modified_response, live_server_setu def test_checkbox_open_diff_in_new_tab(client, live_server): set_original_response() - live_server_setup(live_server) + # live_server_setup(live_server) # Setup on conftest per function # Add our URL to the import page res = client.post( diff --git a/changedetectionio/tests/test_unique_lines.py b/changedetectionio/tests/test_unique_lines.py index f3f70dc3..b4829e62 100644 --- a/changedetectionio/tests/test_unique_lines.py +++ b/changedetectionio/tests/test_unique_lines.py @@ -68,11 +68,11 @@ def set_modified_with_trigger_text_response(): with open("test-datastore/endpoint-content.txt", "w") as f: f.write(test_return_data) -def test_setup(client, live_server, measure_memory_usage): - live_server_setup(live_server) +# def test_setup(client, live_server, measure_memory_usage): + # live_server_setup(live_server) # Setup on conftest per function def test_unique_lines_functionality(client, live_server, measure_memory_usage): - #live_server_setup(live_server) + set_original_ignore_response() @@ -121,7 +121,7 @@ def test_unique_lines_functionality(client, live_server, measure_memory_usage): assert b'Deleted' in res.data def test_sort_lines_functionality(client, live_server, measure_memory_usage): - #live_server_setup(live_server) + set_modified_swapped_lines_with_extra_text_for_sorting() @@ -171,7 +171,7 @@ def test_sort_lines_functionality(client, live_server, measure_memory_usage): def test_extra_filters(client, live_server, measure_memory_usage): - #live_server_setup(live_server) + set_original_ignore_response() diff --git a/changedetectionio/tests/test_watch_fields_storage.py b/changedetectionio/tests/test_watch_fields_storage.py index 8765a51f..506722fe 100644 --- a/changedetectionio/tests/test_watch_fields_storage.py +++ b/changedetectionio/tests/test_watch_fields_storage.py @@ -6,7 +6,7 @@ from . util import set_original_response, set_modified_response, live_server_set def test_check_watch_field_storage(client, live_server, measure_memory_usage): set_original_response() - live_server_setup(live_server) + # live_server_setup(live_server) # Setup on conftest per function test_url = "http://somerandomsitewewatch.com" diff --git a/changedetectionio/tests/test_xpath_selector.py b/changedetectionio/tests/test_xpath_selector.py index b3d0350e..fbdf201c 100644 --- a/changedetectionio/tests/test_xpath_selector.py +++ b/changedetectionio/tests/test_xpath_selector.py @@ -7,8 +7,7 @@ from .util import live_server_setup, wait_for_all_checks from ..html_tools import * -def test_setup(live_server): - live_server_setup(live_server) + def set_original_response(): @@ -256,7 +255,7 @@ def test_xpath23_prefix_validation(client, live_server, measure_memory_usage): assert b'Deleted' in res.data def test_xpath1_lxml(client, live_server, measure_memory_usage): - #live_server_setup(live_server) + d = '''<?xml version="1.0" encoding="UTF-8"?> <rss xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:itunes="http://www.itunes.com/dtds/podcast-1.0.dtd" xmlns:dc="http://purl.org/dc/elements/1.1/" version="2.0"> @@ -380,7 +379,7 @@ def test_check_with_prefix_include_filters(client, live_server, measure_memory_u def test_various_rules(client, live_server, measure_memory_usage): # Just check these don't error - # live_server_setup(live_server) + ## live_server_setup(live_server) # Setup on conftest per function with open("test-datastore/endpoint-content.txt", "w") as f: f.write("""<html> <body> diff --git a/changedetectionio/tests/util.py b/changedetectionio/tests/util.py index effbadfa..6261b7b2 100644 --- a/changedetectionio/tests/util.py +++ b/changedetectionio/tests/util.py @@ -1,4 +1,5 @@ #!/usr/bin/env python3 +from operator import truediv from flask import make_response, request from flask import url_for @@ -129,52 +130,46 @@ def extract_UUID_from_client(client): def wait_for_all_checks(client=None): """ - Waits until the queue is empty and remains empty for at least `required_empty_duration` seconds, - and also ensures no running threads have `current_uuid` set. - Retries for up to `max_attempts` times, sleeping `wait_between_attempts` seconds between checks. + Waits until the queue is empty and workers are idle. + Much faster than the original with adaptive timing. """ - from changedetectionio.flask_app import update_q as global_update_q, running_update_threads - - # Configuration - attempt = 0 - i=0 - max_attempts = 60 - required_empty_duration = 0.2 + from changedetectionio.flask_app import update_q as global_update_q + from changedetectionio import worker_handler logger = logging.getLogger() - time.sleep(1.2) - empty_since = None + attempt = 0 + max_attempts = 150 # Still reasonable upper bound while attempt < max_attempts: + # Start with fast checks, slow down if needed + if attempt < 10: + time.sleep(0.1) # Very fast initial checks + elif attempt < 30: + time.sleep(0.3) # Medium speed + else: + time.sleep(0.8) # Slower for persistent issues + q_length = global_update_q.qsize() + running_uuids = worker_handler.get_running_uuids() + any_workers_busy = len(running_uuids) > 0 - # Check if any threads are still processing - time.sleep(1.2) - any_threads_busy = any(t.current_uuid for t in running_update_threads) - - - if q_length == 0 and not any_threads_busy: + if q_length == 0 and not any_workers_busy: if empty_since is None: empty_since = time.time() - logger.info(f"Queue empty and no active threads at attempt {attempt}, starting empty timer...") - elif time.time() - empty_since >= required_empty_duration: - logger.info(f"Queue has been empty and threads idle for {required_empty_duration} seconds. Done waiting.") + elif time.time() - empty_since >= 0.15: # Shorter wait break - else: - logger.info(f"Still waiting: queue empty and no active threads, but not yet {required_empty_duration} seconds...") else: - if q_length != 0: - logger.info(f"Queue not empty (size={q_length}), resetting timer.") - if any_threads_busy: - busy_threads = [t.name for t in running_update_threads if t.current_uuid] - logger.info(f"Threads still busy: {busy_threads}, resetting timer.") empty_since = None + attempt += 1 + time.sleep(0.3) - time.sleep(1) +# Replaced by new_live_server_setup and calling per function scope in conftest.py +def live_server_setup(live_server): + return True -def live_server_setup(live_server): +def new_live_server_setup(live_server): @live_server.app.route('/test-random-content-endpoint') def test_random_content_endpoint(): @@ -328,20 +323,3 @@ def live_server_setup(live_server): live_server.start() - - -def get_index(client): - import inspect - # Get the caller's frame (parent function) - frame = inspect.currentframe() - caller_frame = frame.f_back # Go back to the caller's frame - caller_name = caller_frame.f_code.co_name - caller_line = caller_frame.f_lineno - - print(f"Called by: {caller_name}, Line: {caller_line}") - - res = client.get(url_for("watchlist.index")) - with open(f"test-datastore/index-{caller_name}-{caller_line}.html", 'wb') as f: - f.write(res.data) - - return res diff --git a/changedetectionio/tests/visualselector/test_fetch_data.py b/changedetectionio/tests/visualselector/test_fetch_data.py index e1c76b79..c476f7db 100644 --- a/changedetectionio/tests/visualselector/test_fetch_data.py +++ b/changedetectionio/tests/visualselector/test_fetch_data.py @@ -2,16 +2,14 @@ import os from flask import url_for -from ..util import live_server_setup, wait_for_all_checks, get_index +from ..util import live_server_setup, wait_for_all_checks -def test_setup(client, live_server): - live_server_setup(live_server) +# def test_setup(client, live_server): + # live_server_setup(live_server) # Setup on conftest per function # Add a site in paused mode, add an invalid filter, we should still have visual selector data ready def test_visual_selector_content_ready(client, live_server, measure_memory_usage): - live_server.stop() - live_server.start() import os import json @@ -89,9 +87,6 @@ def test_visual_selector_content_ready(client, live_server, measure_memory_usage def test_basic_browserstep(client, live_server, measure_memory_usage): - live_server.stop() - live_server.start() - assert os.getenv('PLAYWRIGHT_DRIVER_URL'), "Needs PLAYWRIGHT_DRIVER_URL set for this test" test_url = url_for('test_interactive_html_endpoint', _external=True) @@ -144,12 +139,9 @@ def test_basic_browserstep(client, live_server, measure_memory_usage): assert b"testheader: yes" in res.data assert b"user-agent: mycustomagent" in res.data - live_server.stop() def test_non_200_errors_report_browsersteps(client, live_server): - live_server.stop() - live_server.start() four_o_four_url = url_for('test_endpoint', status_code=404, _external=True) four_o_four_url = four_o_four_url.replace('localhost.localdomain', 'cdio') @@ -183,7 +175,7 @@ def test_non_200_errors_report_browsersteps(client, live_server): wait_for_all_checks(client) - res = get_index(client) + res = client.get(url_for("watchlist.index")) assert b'Error - 404' in res.data diff --git a/changedetectionio/update_worker.py b/changedetectionio/update_worker.py deleted file mode 100644 index 53d2d798..00000000 --- a/changedetectionio/update_worker.py +++ /dev/null @@ -1,608 +0,0 @@ -from .processors.exceptions import ProcessorException -import changedetectionio.content_fetchers.exceptions as content_fetchers_exceptions -from changedetectionio.processors.text_json_diff.processor import FilterNotFoundInResponse -from changedetectionio import html_tools -from changedetectionio.flask_app import watch_check_update - -import importlib -import os -import queue -import threading -import time - -# A single update worker -# -# Requests for checking on a single site(watch) from a queue of watches -# (another process inserts watches into the queue that are time-ready for checking) - -from loguru import logger - -class update_worker(threading.Thread): - current_uuid = None - - def __init__(self, q, notification_q, app, datastore, *args, **kwargs): - self.q = q - self.app = app - self.notification_q = notification_q - self.datastore = datastore - super().__init__(*args, **kwargs) - - def queue_notification_for_watch(self, notification_q, n_object, watch): - from changedetectionio import diff - from changedetectionio.notification import default_notification_format_for_watch - - dates = [] - trigger_text = '' - - now = time.time() - - if watch: - watch_history = watch.history - dates = list(watch_history.keys()) - trigger_text = watch.get('trigger_text', []) - - # Add text that was triggered - if len(dates): - snapshot_contents = watch.get_history_snapshot(dates[-1]) - else: - snapshot_contents = "No snapshot/history available, the watch should fetch atleast once." - - # If we ended up here with "System default" - if n_object.get('notification_format') == default_notification_format_for_watch: - n_object['notification_format'] = self.datastore.data['settings']['application'].get('notification_format') - - html_colour_enable = False - # HTML needs linebreak, but MarkDown and Text can use a linefeed - if n_object.get('notification_format') == 'HTML': - line_feed_sep = "<br>" - # Snapshot will be plaintext on the disk, convert to some kind of HTML - snapshot_contents = snapshot_contents.replace('\n', line_feed_sep) - elif n_object.get('notification_format') == 'HTML Color': - line_feed_sep = "<br>" - # Snapshot will be plaintext on the disk, convert to some kind of HTML - snapshot_contents = snapshot_contents.replace('\n', line_feed_sep) - html_colour_enable = True - else: - line_feed_sep = "\n" - - triggered_text = '' - if len(trigger_text): - from . import html_tools - triggered_text = html_tools.get_triggered_text(content=snapshot_contents, trigger_text=trigger_text) - if triggered_text: - triggered_text = line_feed_sep.join(triggered_text) - - # Could be called as a 'test notification' with only 1 snapshot available - prev_snapshot = "Example text: example test\nExample text: change detection is cool\nExample text: some more examples\n" - current_snapshot = "Example text: example test\nExample text: change detection is fantastic\nExample text: even more examples\nExample text: a lot more examples" - - if len(dates) > 1: - prev_snapshot = watch.get_history_snapshot(dates[-2]) - current_snapshot = watch.get_history_snapshot(dates[-1]) - - n_object.update({ - 'current_snapshot': snapshot_contents, - 'diff': diff.render_diff(prev_snapshot, current_snapshot, line_feed_sep=line_feed_sep, html_colour=html_colour_enable), - 'diff_added': diff.render_diff(prev_snapshot, current_snapshot, include_removed=False, line_feed_sep=line_feed_sep), - 'diff_full': diff.render_diff(prev_snapshot, current_snapshot, include_equal=True, line_feed_sep=line_feed_sep, html_colour=html_colour_enable), - 'diff_patch': diff.render_diff(prev_snapshot, current_snapshot, line_feed_sep=line_feed_sep, patch_format=True), - 'diff_removed': diff.render_diff(prev_snapshot, current_snapshot, include_added=False, line_feed_sep=line_feed_sep), - 'notification_timestamp': now, - 'screenshot': watch.get_screenshot() if watch and watch.get('notification_screenshot') else None, - 'triggered_text': triggered_text, - 'uuid': watch.get('uuid') if watch else None, - 'watch_url': watch.get('url') if watch else None, - }) - - if watch: - n_object.update(watch.extra_notification_token_values()) - - logger.trace(f"Main rendered notification placeholders (diff_added etc) calculated in {time.time()-now:.3f}s") - logger.debug("Queued notification for sending") - notification_q.put(n_object) - - # Prefer - Individual watch settings > Tag settings > Global settings (in that order) - def _check_cascading_vars(self, var_name, watch): - - from changedetectionio.notification import ( - default_notification_format_for_watch, - default_notification_body, - default_notification_title - ) - - # Would be better if this was some kind of Object where Watch can reference the parent datastore etc - v = watch.get(var_name) - if v and not watch.get('notification_muted'): - if var_name == 'notification_format' and v == default_notification_format_for_watch: - return self.datastore.data['settings']['application'].get('notification_format') - - return v - - tags = self.datastore.get_all_tags_for_watch(uuid=watch.get('uuid')) - if tags: - for tag_uuid, tag in tags.items(): - v = tag.get(var_name) - if v and not tag.get('notification_muted'): - return v - - if self.datastore.data['settings']['application'].get(var_name): - return self.datastore.data['settings']['application'].get(var_name) - - # Otherwise could be defaults - if var_name == 'notification_format': - return default_notification_format_for_watch - if var_name == 'notification_body': - return default_notification_body - if var_name == 'notification_title': - return default_notification_title - - return None - - def send_content_changed_notification(self, watch_uuid): - - n_object = {} - watch = self.datastore.data['watching'].get(watch_uuid) - if not watch: - return - - watch_history = watch.history - dates = list(watch_history.keys()) - # Theoretically it's possible that this could be just 1 long, - # - In the case that the timestamp key was not unique - if len(dates) == 1: - raise ValueError( - "History index had 2 or more, but only 1 date loaded, timestamps were not unique? maybe two of the same timestamps got written, needs more delay?" - ) - - # Should be a better parent getter in the model object - - # Prefer - Individual watch settings > Tag settings > Global settings (in that order) - n_object['notification_urls'] = self._check_cascading_vars('notification_urls', watch) - n_object['notification_title'] = self._check_cascading_vars('notification_title', watch) - n_object['notification_body'] = self._check_cascading_vars('notification_body', watch) - n_object['notification_format'] = self._check_cascading_vars('notification_format', watch) - - # (Individual watch) Only prepare to notify if the rules above matched - queued = False - if n_object and n_object.get('notification_urls'): - queued = True - - count = watch.get('notification_alert_count', 0) + 1 - self.datastore.update_watch(uuid=watch_uuid, update_obj={'notification_alert_count': count}) - - self.queue_notification_for_watch(notification_q=self.notification_q, n_object=n_object, watch=watch) - - return queued - - - def send_filter_failure_notification(self, watch_uuid): - - threshold = self.datastore.data['settings']['application'].get('filter_failure_notification_threshold_attempts') - watch = self.datastore.data['watching'].get(watch_uuid) - if not watch: - return - - n_object = {'notification_title': 'Changedetection.io - Alert - CSS/xPath filter was not present in the page', - 'notification_body': "Your configured CSS/xPath filters of '{}' for {{{{watch_url}}}} did not appear on the page after {} attempts, did the page change layout?\n\nLink: {{{{base_url}}}}/edit/{{{{watch_uuid}}}}\n\nThanks - Your omniscient changedetection.io installation :)\n".format( - ", ".join(watch['include_filters']), - threshold), - 'notification_format': 'text'} - - if len(watch['notification_urls']): - n_object['notification_urls'] = watch['notification_urls'] - - elif len(self.datastore.data['settings']['application']['notification_urls']): - n_object['notification_urls'] = self.datastore.data['settings']['application']['notification_urls'] - - # Only prepare to notify if the rules above matched - if 'notification_urls' in n_object: - n_object.update({ - 'watch_url': watch['url'], - 'uuid': watch_uuid, - 'screenshot': None - }) - self.notification_q.put(n_object) - logger.debug(f"Sent filter not found notification for {watch_uuid}") - else: - logger.debug(f"NOT sending filter not found notification for {watch_uuid} - no notification URLs") - - def send_step_failure_notification(self, watch_uuid, step_n): - watch = self.datastore.data['watching'].get(watch_uuid, False) - if not watch: - return - threshold = self.datastore.data['settings']['application'].get('filter_failure_notification_threshold_attempts') - n_object = {'notification_title': "Changedetection.io - Alert - Browser step at position {} could not be run".format(step_n+1), - 'notification_body': "Your configured browser step at position {} for {{{{watch_url}}}} " - "did not appear on the page after {} attempts, did the page change layout? " - "Does it need a delay added?\n\nLink: {{{{base_url}}}}/edit/{{{{watch_uuid}}}}\n\n" - "Thanks - Your omniscient changedetection.io installation :)\n".format(step_n+1, threshold), - 'notification_format': 'text'} - - if len(watch['notification_urls']): - n_object['notification_urls'] = watch['notification_urls'] - - elif len(self.datastore.data['settings']['application']['notification_urls']): - n_object['notification_urls'] = self.datastore.data['settings']['application']['notification_urls'] - - # Only prepare to notify if the rules above matched - if 'notification_urls' in n_object: - n_object.update({ - 'watch_url': watch['url'], - 'uuid': watch_uuid - }) - self.notification_q.put(n_object) - logger.error(f"Sent step not found notification for {watch_uuid}") - - - def cleanup_error_artifacts(self, uuid): - # All went fine, remove error artifacts - cleanup_files = ["last-error-screenshot.png", "last-error.txt"] - for f in cleanup_files: - full_path = os.path.join(self.datastore.datastore_path, uuid, f) - if os.path.isfile(full_path): - os.unlink(full_path) - - def run(self): - - while not self.app.config.exit.is_set(): - update_handler = None - watch = None - - try: - queued_item_data = self.q.get(block=False) - except queue.Empty: - pass - else: - uuid = queued_item_data.item.get('uuid') - fetch_start_time = round(time.time()) # Also used for a unique history key for now - self.current_uuid = uuid - if uuid in list(self.datastore.data['watching'].keys()) and self.datastore.data['watching'][uuid].get('url'): - changed_detected = False - contents = b'' - process_changedetection_results = True - update_obj = {} - - - # Clear last errors (move to preflight func?) - self.datastore.data['watching'][uuid]['browser_steps_last_error_step'] = None - self.datastore.data['watching'][uuid]['last_checked'] = fetch_start_time - - watch = self.datastore.data['watching'].get(uuid) - - logger.info(f"Processing watch UUID {uuid} Priority {queued_item_data.priority} URL {watch['url']}") - - try: - watch_check_update.send(watch_uuid=uuid) - - # Processor is what we are using for detecting the "Change" - processor = watch.get('processor', 'text_json_diff') - - # Init a new 'difference_detection_processor', first look in processors - processor_module_name = f"changedetectionio.processors.{processor}.processor" - try: - processor_module = importlib.import_module(processor_module_name) - except ModuleNotFoundError as e: - print(f"Processor module '{processor}' not found.") - raise e - - update_handler = processor_module.perform_site_check(datastore=self.datastore, - watch_uuid=uuid - ) - - update_handler.call_browser() - - changed_detected, update_obj, contents = update_handler.run_changedetection(watch=watch) - - # Re #342 - # In Python 3, all strings are sequences of Unicode characters. There is a bytes type that holds raw bytes. - # We then convert/.decode('utf-8') for the notification etc -# if not isinstance(contents, (bytes, bytearray)): -# raise Exception("Error - returned data from the fetch handler SHOULD be bytes") - except PermissionError as e: - logger.critical(f"File permission error updating file, watch: {uuid}") - logger.critical(str(e)) - process_changedetection_results = False - - # A generic other-exception thrown by processors - except ProcessorException as e: - if e.screenshot: - watch.save_screenshot(screenshot=e.screenshot) - if e.xpath_data: - watch.save_xpath_data(data=e.xpath_data) - self.datastore.update_watch(uuid=uuid, update_obj={'last_error': e.message}) - process_changedetection_results = False - - except content_fetchers_exceptions.ReplyWithContentButNoText as e: - # Totally fine, it's by choice - just continue on, nothing more to care about - # Page had elements/content but no renderable text - # Backend (not filters) gave zero output - extra_help = "" - if e.has_filters: - # Maybe it contains an image? offer a more helpful link - has_img = html_tools.include_filters(include_filters='img', - html_content=e.html_content) - if has_img: - extra_help = ", it's possible that the filters you have give an empty result or contain only an image." - else: - extra_help = ", it's possible that the filters were found, but contained no usable text." - - self.datastore.update_watch(uuid=uuid, update_obj={ - 'last_error': f"Got HTML content but no text found (With {e.status_code} reply code){extra_help}" - }) - - if e.screenshot: - watch.save_screenshot(screenshot=e.screenshot, as_error=True) - - if e.xpath_data: - watch.save_xpath_data(data=e.xpath_data) - - process_changedetection_results = False - - except content_fetchers_exceptions.Non200ErrorCodeReceived as e: - if e.status_code == 403: - err_text = "Error - 403 (Access denied) received" - elif e.status_code == 404: - err_text = "Error - 404 (Page not found) received" - elif e.status_code == 407: - err_text = "Error - 407 (Proxy authentication required) received, did you need a username and password for the proxy?" - elif e.status_code == 500: - err_text = "Error - 500 (Internal server error) received from the web site" - else: - extra = ' (Access denied or blocked)' if str(e.status_code).startswith('4') else '' - err_text = f"Error - Request returned a HTTP error code {e.status_code}{extra}" - - if e.screenshot: - watch.save_screenshot(screenshot=e.screenshot, as_error=True) - if e.xpath_data: - watch.save_xpath_data(data=e.xpath_data, as_error=True) - if e.page_text: - watch.save_error_text(contents=e.page_text) - - self.datastore.update_watch(uuid=uuid, update_obj={'last_error': err_text}) - process_changedetection_results = False - - except FilterNotFoundInResponse as e: - if not self.datastore.data['watching'].get(uuid): - continue - - err_text = "Warning, no filters were found, no change detection ran - Did the page change layout? update your Visual Filter if necessary." - self.datastore.update_watch(uuid=uuid, update_obj={'last_error': err_text}) - - # Filter wasnt found, but we should still update the visual selector so that they can have a chance to set it up again - if e.screenshot: - watch.save_screenshot(screenshot=e.screenshot) - - if e.xpath_data: - watch.save_xpath_data(data=e.xpath_data) - - # Only when enabled, send the notification - if watch.get('filter_failure_notification_send', False): - c = watch.get('consecutive_filter_failures', 0) - c += 1 - # Send notification if we reached the threshold? - threshold = self.datastore.data['settings']['application'].get('filter_failure_notification_threshold_attempts', 0) - logger.debug(f"Filter for {uuid} not found, consecutive_filter_failures: {c} of threshold {threshold}") - if c >= threshold: - if not watch.get('notification_muted'): - logger.debug(f"Sending filter failed notification for {uuid}") - self.send_filter_failure_notification(uuid) - c = 0 - logger.debug(f"Reset filter failure count back to zero") - - self.datastore.update_watch(uuid=uuid, update_obj={'consecutive_filter_failures': c}) - else: - logger.trace(f"{uuid} - filter_failure_notification_send not enabled, skipping") - - - process_changedetection_results = False - - except content_fetchers_exceptions.checksumFromPreviousCheckWasTheSame as e: - # Yes fine, so nothing todo, don't continue to process. - process_changedetection_results = False - changed_detected = False - except content_fetchers_exceptions.BrowserConnectError as e: - self.datastore.update_watch(uuid=uuid, - update_obj={'last_error': e.msg - } - ) - process_changedetection_results = False - except content_fetchers_exceptions.BrowserFetchTimedOut as e: - self.datastore.update_watch(uuid=uuid, - update_obj={'last_error': e.msg - } - ) - process_changedetection_results = False - except content_fetchers_exceptions.BrowserStepsStepException as e: - - if not self.datastore.data['watching'].get(uuid): - continue - - error_step = e.step_n + 1 - from playwright._impl._errors import TimeoutError, Error - - # Generally enough info for TimeoutError (couldnt locate the element after default seconds) - err_text = f"Browser step at position {error_step} could not run, check the watch, add a delay if necessary, view Browser Steps to see screenshot at that step." - - if e.original_e.name == "TimeoutError": - # Just the first line is enough, the rest is the stack trace - err_text += " Could not find the target." - else: - # Other Error, more info is good. - err_text += " " + str(e.original_e).splitlines()[0] - - logger.debug(f"BrowserSteps exception at step {error_step} {str(e.original_e)}") - - self.datastore.update_watch(uuid=uuid, - update_obj={'last_error': err_text, - 'browser_steps_last_error_step': error_step - } - ) - - if watch.get('filter_failure_notification_send', False): - c = watch.get('consecutive_filter_failures', 0) - c += 1 - # Send notification if we reached the threshold? - threshold = self.datastore.data['settings']['application'].get('filter_failure_notification_threshold_attempts', - 0) - logger.error(f"Step for {uuid} not found, consecutive_filter_failures: {c}") - if threshold > 0 and c >= threshold: - if not watch.get('notification_muted'): - self.send_step_failure_notification(watch_uuid=uuid, step_n=e.step_n) - c = 0 - - self.datastore.update_watch(uuid=uuid, update_obj={'consecutive_filter_failures': c}) - - process_changedetection_results = False - - except content_fetchers_exceptions.EmptyReply as e: - # Some kind of custom to-str handler in the exception handler that does this? - err_text = "EmptyReply - try increasing 'Wait seconds before extracting text', Status Code {}".format(e.status_code) - self.datastore.update_watch(uuid=uuid, update_obj={'last_error': err_text, - 'last_check_status': e.status_code}) - process_changedetection_results = False - except content_fetchers_exceptions.ScreenshotUnavailable as e: - err_text = "Screenshot unavailable, page did not render fully in the expected time or page was too long - try increasing 'Wait seconds before extracting text'" - self.datastore.update_watch(uuid=uuid, update_obj={'last_error': err_text, - 'last_check_status': e.status_code}) - process_changedetection_results = False - except content_fetchers_exceptions.JSActionExceptions as e: - err_text = "Error running JS Actions - Page request - "+e.message - if e.screenshot: - watch.save_screenshot(screenshot=e.screenshot, as_error=True) - self.datastore.update_watch(uuid=uuid, update_obj={'last_error': err_text, - 'last_check_status': e.status_code}) - process_changedetection_results = False - except content_fetchers_exceptions.PageUnloadable as e: - err_text = "Page request from server didnt respond correctly" - if e.message: - err_text = "{} - {}".format(err_text, e.message) - - if e.screenshot: - watch.save_screenshot(screenshot=e.screenshot, as_error=True) - - self.datastore.update_watch(uuid=uuid, update_obj={'last_error': err_text, - 'last_check_status': e.status_code, - 'has_ldjson_price_data': None}) - process_changedetection_results = False - except content_fetchers_exceptions.BrowserStepsInUnsupportedFetcher as e: - err_text = "This watch has Browser Steps configured and so it cannot run with the 'Basic fast Plaintext/HTTP Client', either remove the Browser Steps or select a Chrome fetcher." - self.datastore.update_watch(uuid=uuid, update_obj={'last_error': err_text}) - process_changedetection_results = False - logger.error(f"Exception (BrowserStepsInUnsupportedFetcher) reached processing watch UUID: {uuid}") - - except Exception as e: - logger.error(f"Exception reached processing watch UUID: {uuid}") - logger.error(str(e)) - self.datastore.update_watch(uuid=uuid, update_obj={'last_error': "Exception: " + str(e)}) - # Other serious error - process_changedetection_results = False - - else: - # Crash protection, the watch entry could have been removed by this point (during a slow chrome fetch etc) - if not self.datastore.data['watching'].get(uuid): - continue - - update_obj['content-type'] = update_handler.fetcher.get_all_headers().get('content-type', '').lower() - - # Mark that we never had any failures - if not watch.get('ignore_status_codes'): - update_obj['consecutive_filter_failures'] = 0 - - # Everything ran OK, clean off any previous error - update_obj['last_error'] = False - - self.cleanup_error_artifacts(uuid) - - if not self.datastore.data['watching'].get(uuid): - continue - - # Different exceptions mean that we may or may not want to bump the snapshot, trigger notifications etc - if process_changedetection_results: - - # Extract <title> as title if possible/requested. - if self.datastore.data['settings']['application'].get('extract_title_as_title') or watch['extract_title_as_title']: - if not watch['title'] or not len(watch['title']): - try: - update_obj['title'] = html_tools.extract_element(find='title', html_content=update_handler.fetcher.content) - logger.info(f"UUID: {uuid} Extract <title> updated title to '{update_obj['title']}") - except Exception as e: - logger.warning(f"UUID: {uuid} Extract <title> as watch title was enabled, but couldn't find a <title>.") - - try: - self.datastore.update_watch(uuid=uuid, update_obj=update_obj) - - - # Also save the snapshot on the first time checked, "last checked" will always be updated, so we just check history length. - if changed_detected or not watch.history_n: - - if update_handler.screenshot: - watch.save_screenshot(screenshot=update_handler.screenshot) - - if update_handler.xpath_data: - watch.save_xpath_data(data=update_handler.xpath_data) - - # Small hack so that we sleep just enough to allow 1 second between history snapshots - # this is because history.txt indexes/keys snapshots by epoch seconds and we dont want dupe keys - # @also - the keys are one per second at the most (for now) - if watch.newest_history_key and int(fetch_start_time) == int(watch.newest_history_key): - logger.warning( - f"Timestamp {fetch_start_time} already exists, waiting 1 seconds so we have a unique key in history.txt") - fetch_start_time += 1 - time.sleep(1) - - watch.save_history_text(contents=contents, - timestamp=int(fetch_start_time), - snapshot_id=update_obj.get('previous_md5', 'none')) - - - empty_pages_are_a_change = self.datastore.data['settings']['application'].get('empty_pages_are_a_change', False) - if update_handler.fetcher.content or (not update_handler.fetcher.content and empty_pages_are_a_change): - # attribute .last_changed is then based on this data - watch.save_last_fetched_html(contents=update_handler.fetcher.content, timestamp=int(fetch_start_time)) - - # Notifications should only trigger on the second time (first time, we gather the initial snapshot) - if watch.history_n >= 2: - logger.info(f"Change detected in UUID {uuid} - {watch['url']}") - if not watch.get('notification_muted'): - # @todo only run this if notifications exist - self.send_content_changed_notification(watch_uuid=uuid) - - except Exception as e: - # Catch everything possible here, so that if a worker crashes, we don't lose it until restart! - logger.critical("!!!! Exception in update_worker while processing process_changedetection_results !!!") - logger.critical(str(e)) - self.datastore.update_watch(uuid=uuid, update_obj={'last_error': str(e)}) - - - # Always record that we atleast tried - count = watch.get('check_count', 0) + 1 - - # Record the 'server' header reply, can be used for actions in the future like cloudflare/akamai workarounds - try: - server_header = update_handler.fetcher.headers.get('server', '').strip().lower()[:255] - self.datastore.update_watch(uuid=uuid, - update_obj={'remote_server_reply': server_header} - ) - except Exception as e: - pass - - self.datastore.update_watch(uuid=uuid, update_obj={'fetch_time': round(time.time() - fetch_start_time, 3), - 'check_count': count - }) - - self.current_uuid = None # Done - self.q.task_done() - - # Send signal for watch check completion with the watch data - if watch: - logger.info(f"Sending watch_check_update signal for UUID {watch['uuid']}") - watch_check_update.send(watch_uuid=watch['uuid']) - - update_handler = None - logger.debug(f"Watch {uuid} done in {time.time()-fetch_start_time:.2f}s") - - - # Give the CPU time to interrupt - time.sleep(0.1) - - self.app.config.exit.wait(1) diff --git a/changedetectionio/worker_handler.py b/changedetectionio/worker_handler.py new file mode 100644 index 00000000..953d2354 --- /dev/null +++ b/changedetectionio/worker_handler.py @@ -0,0 +1,395 @@ +""" +Worker management module for changedetection.io + +Handles asynchronous workers for dynamic worker scaling. +Sync worker support has been removed in favor of async-only architecture. +""" + +import asyncio +import os +import threading +import time +from loguru import logger + +# Global worker state +running_async_tasks = [] +async_loop = None +async_loop_thread = None + +# Track currently processing UUIDs for async workers +currently_processing_uuids = set() + +# Configuration - async workers only +USE_ASYNC_WORKERS = True + + +def start_async_event_loop(): + """Start a dedicated event loop for async workers in a separate thread""" + global async_loop + logger.info("Starting async event loop for workers") + + try: + # Create a new event loop for this thread + async_loop = asyncio.new_event_loop() + # Set it as the event loop for this thread + asyncio.set_event_loop(async_loop) + + logger.debug(f"Event loop created and set: {async_loop}") + + # Run the event loop forever + async_loop.run_forever() + except Exception as e: + logger.error(f"Async event loop error: {e}") + finally: + # Clean up + if async_loop and not async_loop.is_closed(): + async_loop.close() + async_loop = None + logger.info("Async event loop stopped") + + +def start_async_workers(n_workers, update_q, notification_q, app, datastore): + """Start the async worker management system""" + global async_loop_thread, async_loop, running_async_tasks, currently_processing_uuids + + # Clear any stale UUID tracking state + currently_processing_uuids.clear() + + # Start the event loop in a separate thread + async_loop_thread = threading.Thread(target=start_async_event_loop, daemon=True) + async_loop_thread.start() + + # Wait for the loop to be available (with timeout for safety) + max_wait_time = 5.0 + wait_start = time.time() + while async_loop is None and (time.time() - wait_start) < max_wait_time: + time.sleep(0.1) + + if async_loop is None: + logger.error("Failed to start async event loop within timeout") + return + + # Additional brief wait to ensure loop is running + time.sleep(0.2) + + # Start async workers + logger.info(f"Starting {n_workers} async workers") + for i in range(n_workers): + try: + # Use a factory function to create named worker coroutines + def create_named_worker(worker_id): + async def named_worker(): + task = asyncio.current_task() + if task: + task.set_name(f"async-worker-{worker_id}") + return await start_single_async_worker(worker_id, update_q, notification_q, app, datastore) + return named_worker() + + task_future = asyncio.run_coroutine_threadsafe(create_named_worker(i), async_loop) + running_async_tasks.append(task_future) + except RuntimeError as e: + logger.error(f"Failed to start async worker {i}: {e}") + continue + + +async def start_single_async_worker(worker_id, update_q, notification_q, app, datastore): + """Start a single async worker with auto-restart capability""" + from changedetectionio.async_update_worker import async_update_worker + + # Check if we're in pytest environment - if so, be more gentle with logging + import os + in_pytest = "pytest" in os.sys.modules or "PYTEST_CURRENT_TEST" in os.environ + + while not app.config.exit.is_set(): + try: + if not in_pytest: + logger.info(f"Starting async worker {worker_id}") + await async_update_worker(worker_id, update_q, notification_q, app, datastore) + # If we reach here, worker exited cleanly + if not in_pytest: + logger.info(f"Async worker {worker_id} exited cleanly") + break + except asyncio.CancelledError: + # Task was cancelled (normal shutdown) + if not in_pytest: + logger.info(f"Async worker {worker_id} cancelled") + break + except Exception as e: + logger.error(f"Async worker {worker_id} crashed: {e}") + if not in_pytest: + logger.info(f"Restarting async worker {worker_id} in 5 seconds...") + await asyncio.sleep(5) + + if not in_pytest: + logger.info(f"Async worker {worker_id} shutdown complete") + + +def start_workers(n_workers, update_q, notification_q, app, datastore): + """Start async workers - sync workers are deprecated""" + start_async_workers(n_workers, update_q, notification_q, app, datastore) + + +def add_worker(update_q, notification_q, app, datastore): + """Add a new async worker (for dynamic scaling)""" + global running_async_tasks + + if not async_loop: + logger.error("Async loop not running, cannot add worker") + return False + + worker_id = len(running_async_tasks) + logger.info(f"Adding async worker {worker_id}") + + task_future = asyncio.run_coroutine_threadsafe( + start_single_async_worker(worker_id, update_q, notification_q, app, datastore), async_loop + ) + running_async_tasks.append(task_future) + return True + + +def remove_worker(): + """Remove an async worker (for dynamic scaling)""" + global running_async_tasks + + if not running_async_tasks: + return False + + # Cancel the last worker + task_future = running_async_tasks.pop() + task_future.cancel() + logger.info(f"Removed async worker, {len(running_async_tasks)} workers remaining") + return True + + +def get_worker_count(): + """Get current number of async workers""" + return len(running_async_tasks) + + +def get_running_uuids(): + """Get list of UUIDs currently being processed by async workers""" + return list(currently_processing_uuids) + + +def set_uuid_processing(uuid, processing=True): + """Mark a UUID as being processed or completed""" + global currently_processing_uuids + if processing: + currently_processing_uuids.add(uuid) + logger.debug(f"Started processing UUID: {uuid}") + else: + currently_processing_uuids.discard(uuid) + logger.debug(f"Finished processing UUID: {uuid}") + + +def is_watch_running(watch_uuid): + """Check if a specific watch is currently being processed""" + return watch_uuid in get_running_uuids() + + +def queue_item_async_safe(update_q, item): + """Queue an item for async queue processing""" + if async_loop and not async_loop.is_closed(): + try: + # For async queue, schedule the put operation + asyncio.run_coroutine_threadsafe(update_q.put(item), async_loop) + except RuntimeError as e: + logger.error(f"Failed to queue item: {e}") + else: + logger.error("Async loop not available or closed for queueing item") + + +def shutdown_workers(): + """Shutdown all async workers fast and aggressively""" + global async_loop, async_loop_thread, running_async_tasks + + # Check if we're in pytest environment - if so, be more gentle with logging + import os + in_pytest = "pytest" in os.sys.modules or "PYTEST_CURRENT_TEST" in os.environ + + if not in_pytest: + logger.info("Fast shutdown of async workers initiated...") + + # Cancel all async tasks immediately + for task_future in running_async_tasks: + if not task_future.done(): + task_future.cancel() + + # Stop the async event loop immediately + if async_loop and not async_loop.is_closed(): + try: + async_loop.call_soon_threadsafe(async_loop.stop) + except RuntimeError: + # Loop might already be stopped + pass + + running_async_tasks.clear() + async_loop = None + + # Give async thread minimal time to finish, then continue + if async_loop_thread and async_loop_thread.is_alive(): + async_loop_thread.join(timeout=1.0) # Only 1 second timeout + if async_loop_thread.is_alive() and not in_pytest: + logger.info("Async thread still running after timeout - continuing with shutdown") + async_loop_thread = None + + if not in_pytest: + logger.info("Async workers fast shutdown complete") + + + + +def adjust_async_worker_count(new_count, update_q=None, notification_q=None, app=None, datastore=None): + """ + Dynamically adjust the number of async workers. + + Args: + new_count: Target number of workers + update_q, notification_q, app, datastore: Required for adding new workers + + Returns: + dict: Status of the adjustment operation + """ + global running_async_tasks + + current_count = get_worker_count() + + if new_count == current_count: + return { + 'status': 'no_change', + 'message': f'Worker count already at {current_count}', + 'current_count': current_count + } + + if new_count > current_count: + # Add workers + workers_to_add = new_count - current_count + logger.info(f"Adding {workers_to_add} async workers (from {current_count} to {new_count})") + + if not all([update_q, notification_q, app, datastore]): + return { + 'status': 'error', + 'message': 'Missing required parameters to add workers', + 'current_count': current_count + } + + for i in range(workers_to_add): + worker_id = len(running_async_tasks) + task_future = asyncio.run_coroutine_threadsafe( + start_single_async_worker(worker_id, update_q, notification_q, app, datastore), + async_loop + ) + running_async_tasks.append(task_future) + + return { + 'status': 'success', + 'message': f'Added {workers_to_add} workers', + 'previous_count': current_count, + 'current_count': new_count + } + + else: + # Remove workers + workers_to_remove = current_count - new_count + logger.info(f"Removing {workers_to_remove} async workers (from {current_count} to {new_count})") + + removed_count = 0 + for _ in range(workers_to_remove): + if running_async_tasks: + task_future = running_async_tasks.pop() + task_future.cancel() + # Wait for the task to actually stop + try: + task_future.result(timeout=5) # 5 second timeout + except Exception: + pass # Task was cancelled, which is expected + removed_count += 1 + + return { + 'status': 'success', + 'message': f'Removed {removed_count} workers', + 'previous_count': current_count, + 'current_count': current_count - removed_count + } + + +def get_worker_status(): + """Get status information about async workers""" + return { + 'worker_type': 'async', + 'worker_count': get_worker_count(), + 'running_uuids': get_running_uuids(), + 'async_loop_running': async_loop is not None, + } + + +def check_worker_health(expected_count, update_q=None, notification_q=None, app=None, datastore=None): + """ + Check if the expected number of async workers are running and restart any missing ones. + + Args: + expected_count: Expected number of workers + update_q, notification_q, app, datastore: Required for restarting workers + + Returns: + dict: Health check results + """ + global running_async_tasks + + current_count = get_worker_count() + + if current_count == expected_count: + return { + 'status': 'healthy', + 'expected_count': expected_count, + 'actual_count': current_count, + 'message': f'All {expected_count} async workers running' + } + + # Check for crashed async workers + dead_workers = [] + alive_count = 0 + + for i, task_future in enumerate(running_async_tasks[:]): + if task_future.done(): + try: + result = task_future.result() + dead_workers.append(i) + logger.warning(f"Async worker {i} completed unexpectedly") + except Exception as e: + dead_workers.append(i) + logger.error(f"Async worker {i} crashed: {e}") + else: + alive_count += 1 + + # Remove dead workers from tracking + for i in reversed(dead_workers): + if i < len(running_async_tasks): + running_async_tasks.pop(i) + + missing_workers = expected_count - alive_count + restarted_count = 0 + + if missing_workers > 0 and all([update_q, notification_q, app, datastore]): + logger.info(f"Restarting {missing_workers} crashed async workers") + + for i in range(missing_workers): + worker_id = alive_count + i + try: + task_future = asyncio.run_coroutine_threadsafe( + start_single_async_worker(worker_id, update_q, notification_q, app, datastore), + async_loop + ) + running_async_tasks.append(task_future) + restarted_count += 1 + except Exception as e: + logger.error(f"Failed to restart worker {worker_id}: {e}") + + return { + 'status': 'repaired' if restarted_count > 0 else 'degraded', + 'expected_count': expected_count, + 'actual_count': alive_count, + 'dead_workers': len(dead_workers), + 'restarted_workers': restarted_count, + 'message': f'Found {len(dead_workers)} dead workers, restarted {restarted_count}' + } \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 79c113be..2e90ccc3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -eventlet>=0.38.0 +# eventlet>=0.38.0 # Removed - replaced with threading mode for better Python 3.12+ compatibility feedgen~=0.9 flask-compress # 0.6.3 included compatibility fix for werkzeug 3.x (2.x had deprecation of url handlers) @@ -9,9 +9,9 @@ flask_restful flask_cors # For the Chrome extension to operate flask_wtf~=1.2 flask~=2.3 -flask-socketio>=5.5.1 -python-socketio>=5.13.0 -python-engineio>=4.12.0 +flask-socketio~=5.5.1 +python-socketio~=5.13.0 +python-engineio~=4.12.0 inscriptis~=2.2 pytz timeago~=1.0 @@ -24,13 +24,16 @@ brotli~=1.0 requests[socks] requests-file -urllib3==1.26.19 +# urllib3==1.26.19 # Unpinned - let requests decide compatible version +# If specific version needed for security, use urllib3>=1.26.19,<3.0 chardet>2.3.0 wtforms~=3.0 jsonpath-ng~=1.5.3 -dnspython==2.6.1 # related to eventlet fixes +# dnspython - Used by paho-mqtt for MQTT broker resolution +# Version pin removed since eventlet (which required the specific 2.6.1 pin) has been eliminated +# paho-mqtt will install compatible dnspython version automatically # jq not available on Windows so must be installed manually @@ -53,7 +56,8 @@ beautifulsoup4>=4.0.0 # https://bugs.launchpad.net/lxml/+bug/2059910/comments/16 lxml >=4.8.0,<6,!=5.2.0,!=5.2.1 -# XPath 2.0-3.1 support - 4.2.0 broke something? +# XPath 2.0-3.1 support - 4.2.0 had issues, 4.1.5 stable +# Consider updating to latest stable version periodically elementpath==4.1.5 selenium~=4.31.0 @@ -98,7 +102,9 @@ levenshtein # Needed for > 3.10, https://github.com/microsoft/playwright-python/issues/2096 greenlet >= 3.0.3 -# Used for realtime socketio mode (so its a different driver to eventlet/threading not to interfere with playwright) +# Optional: Used for high-concurrency SocketIO mode (via SOCKETIO_MODE=gevent) +# Note: gevent has cross-platform limitations (Windows 1024 socket limit, macOS ARM build issues) +# Default SOCKETIO_MODE=threading is recommended for better compatibility gevent # Pinned or it causes problems with flask_expects_json which seems unmaintained