Compare commits

..

14 Commits

Author SHA1 Message Date
dgtlmoon
5a7b7726aa Adding test for explicit JS code run 2024-02-26 17:41:20 +01:00
dgtlmoon
970b2fb54b Improve tests 2024-02-26 17:35:07 +01:00
dgtlmoon
c95561edfb more work on fixing headers 2024-02-26 13:47:35 +01:00
dgtlmoon
c9a9ed2da8 add test for headers 2024-02-23 15:09:51 +01:00
dgtlmoon
e03403dc3c oops 2024-02-23 14:54:52 +01:00
dgtlmoon
903fc14960 tidy-up 2024-02-23 14:51:21 +01:00
dgtlmoon
78b99aa2cd Re #2197 fixing headers and user-agent 2024-02-23 14:20:16 +01:00
dgtlmoon
3d390b6ea4 BrowserSteps UI - Avoid selecting very large elements that are likely to be the page wrapper 2024-02-21 11:00:35 +01:00
dgtlmoon
301a40ca34 Fetching - Puppeteer - Adding more debug/diagnostic information 2024-02-21 10:55:18 +01:00
dgtlmoon
1c099cdba6 Update stock-not-in-stock.js 2024-02-21 10:28:59 +01:00
dgtlmoon
af747e6e3f UI - Sorted alphabetical tag list and list of tags in groups setting (#2205) 2024-02-21 10:03:09 +01:00
dgtlmoon
aefad0bdf6 Code - Remove whitespaces in visual selector elements config 2024-02-21 09:37:35 +01:00
dgtlmoon
904ef84f82 Build fix - Pinning package versions and Custom browser endpoints should not have a proxy set (#2204) 2024-02-20 22:11:17 +01:00
dgtlmoon
d2569ba715 Update stock-not-in-stock.js 2024-02-20 20:00:31 +01:00
19 changed files with 264 additions and 141 deletions

View File

@@ -72,7 +72,11 @@ jobs:
run: | run: |
# Playwright via Sockpuppetbrowser fetch # Playwright via Sockpuppetbrowser fetch
# tests/visualselector/test_fetch_data.py will do browser steps # tests/visualselector/test_fetch_data.py will do browser steps
docker run --rm -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest tests/fetchers/test_content.py && pytest tests/test_errorhandling.py && pytest tests/visualselector/test_fetch_data.py' docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/fetchers/test_content.py'
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/test_errorhandling.py'
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/visualselector/test_fetch_data.py'
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/fetchers/test_custom_js_before_content.py'
- name: Playwright and SocketPuppetBrowser - Headers and requests - name: Playwright and SocketPuppetBrowser - Headers and requests
run: | run: |
@@ -87,8 +91,11 @@ jobs:
# STRAIGHT TO CDP # STRAIGHT TO CDP
- name: Pyppeteer and SocketPuppetBrowser - Specific tests in built container - name: Pyppeteer and SocketPuppetBrowser - Specific tests in built container
run: | run: |
# Playwright via Sockpuppetbrowser fetch # Playwright via Sockpuppetbrowser fetch
docker run --rm -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" -e "FAST_PUPPETEER_CHROME_FETCHER=True" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest tests/fetchers/test_content.py && pytest tests/test_errorhandling.py && pytest tests/visualselector/test_fetch_data.py' docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "FAST_PUPPETEER_CHROME_FETCHER=True" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/fetchers/test_content.py'
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "FAST_PUPPETEER_CHROME_FETCHER=True" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/test_errorhandling.py'
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "FAST_PUPPETEER_CHROME_FETCHER=True" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/visualselector/test_fetch_data.py'
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "FAST_PUPPETEER_CHROME_FETCHER=True" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/fetchers/test_custom_js_before_content.py'
- name: Pyppeteer and SocketPuppetBrowser - Headers and requests checks - name: Pyppeteer and SocketPuppetBrowser - Headers and requests checks
run: | run: |

View File

@@ -6,6 +6,8 @@ import re
from random import randint from random import randint
from loguru import logger from loguru import logger
from changedetectionio.content_fetchers.base import manage_user_agent
# Two flags, tell the JS which of the "Selector" or "Value" field should be enabled in the front end # Two flags, tell the JS which of the "Selector" or "Value" field should be enabled in the front end
# 0- off, 1- on # 0- off, 1- on
browser_step_ui_config = {'Choose one': '0 0', browser_step_ui_config = {'Choose one': '0 0',
@@ -178,6 +180,7 @@ class browsersteps_live_ui(steppable_browser_interface):
stale = False stale = False
# bump and kill this if idle after X sec # bump and kill this if idle after X sec
age_start = 0 age_start = 0
headers = {}
# use a special driver, maybe locally etc # use a special driver, maybe locally etc
command_executor = os.getenv( command_executor = os.getenv(
@@ -192,7 +195,8 @@ class browsersteps_live_ui(steppable_browser_interface):
browser_type = os.getenv("PLAYWRIGHT_BROWSER_TYPE", 'chromium').strip('"') browser_type = os.getenv("PLAYWRIGHT_BROWSER_TYPE", 'chromium').strip('"')
def __init__(self, playwright_browser, proxy=None): def __init__(self, playwright_browser, proxy=None, headers=None):
self.headers = headers or {}
self.age_start = time.time() self.age_start = time.time()
self.playwright_browser = playwright_browser self.playwright_browser = playwright_browser
if self.context is None: if self.context is None:
@@ -206,16 +210,17 @@ class browsersteps_live_ui(steppable_browser_interface):
# @todo handle multiple contexts, bind a unique id from the browser on each req? # @todo handle multiple contexts, bind a unique id from the browser on each req?
self.context = self.playwright_browser.new_context( self.context = self.playwright_browser.new_context(
# @todo accept_downloads=False, # Should never be needed
# user_agent=request_headers['User-Agent'] if request_headers.get('User-Agent') else 'Mozilla/5.0', bypass_csp=True, # This is needed to enable JavaScript execution on GitHub and others
# proxy=self.proxy, extra_http_headers=self.headers,
# This is needed to enable JavaScript execution on GitHub and others ignore_https_errors=True,
bypass_csp=True, proxy=proxy,
# Should never be needed service_workers=os.getenv('PLAYWRIGHT_SERVICE_WORKERS', 'allow'),
accept_downloads=False, # Should be `allow` or `block` - sites like YouTube can transmit large amounts of data via Service Workers
proxy=proxy user_agent=manage_user_agent(headers=self.headers),
) )
self.page = self.context.new_page() self.page = self.context.new_page()
# self.page.set_default_navigation_timeout(keep_open) # self.page.set_default_navigation_timeout(keep_open)

View File

@@ -1,5 +1,4 @@
from playwright.sync_api import PlaywrightContextManager from playwright.sync_api import PlaywrightContextManager
import asyncio
# So playwright wants to run as a context manager, but we do something horrible and hacky # So playwright wants to run as a context manager, but we do something horrible and hacky
# we are holding the session open for as long as possible, then shutting it down, and opening a new one # we are holding the session open for as long as possible, then shutting it down, and opening a new one

View File

@@ -11,9 +11,10 @@ def construct_blueprint(datastore: ChangeDetectionStore):
def tags_overview_page(): def tags_overview_page():
from .form import SingleTag from .form import SingleTag
add_form = SingleTag(request.form) add_form = SingleTag(request.form)
sorted_tags = sorted(datastore.data['settings']['application'].get('tags').items(), key=lambda x: x[1]['title'])
output = render_template("groups-overview.html", output = render_template("groups-overview.html",
form=add_form, form=add_form,
available_tags=datastore.data['settings']['application'].get('tags', {}), available_tags=sorted_tags,
) )
return output return output

View File

@@ -40,7 +40,7 @@
<td colspan="3">No website organisational tags/groups configured</td> <td colspan="3">No website organisational tags/groups configured</td>
</tr> </tr>
{% endif %} {% endif %}
{% for uuid, tag in available_tags.items() %} {% for uuid, tag in available_tags %}
<tr id="{{ uuid }}" class="{{ loop.cycle('pure-table-odd', 'pure-table-even') }}"> <tr id="{{ uuid }}" class="{{ loop.cycle('pure-table-odd', 'pure-table-even') }}">
<td class="watch-controls"> <td class="watch-controls">
<a class="link-mute state-{{'on' if tag.notification_muted else 'off'}}" href="{{url_for('tags.mute', uuid=tag.uuid)}}"><img src="{{url_for('static_content', group='images', filename='bell-off.svg')}}" alt="Mute notifications" title="Mute notifications" class="icon icon-mute" ></a> <a class="link-mute state-{{'on' if tag.notification_muted else 'off'}}" href="{{url_for('tags.mute', uuid=tag.uuid)}}"><img src="{{url_for('static_content', group='images', filename='bell-off.svg')}}" alt="Mute notifications" title="Mute notifications" class="icon icon-mute" ></a>

View File

@@ -1,10 +1,10 @@
import sys import sys
from distutils.util import strtobool from distutils.util import strtobool
from loguru import logger
from changedetectionio.content_fetchers.exceptions import BrowserStepsStepException from changedetectionio.content_fetchers.exceptions import BrowserStepsStepException
import os import os
visualselector_xpath_selectors = 'div,span,form,table,tbody,tr,td,a,p,ul,li,h1,h2,h3,h4, header, footer, section, article, aside, details, main, nav, section, summary' visualselector_xpath_selectors = 'div,span,form,table,tbody,tr,td,a,p,ul,li,h1,h2,h3,h4,header,footer,section,article,aside,details,main,nav,section,summary'
# available_fetchers() will scan this implementation looking for anything starting with html_ # available_fetchers() will scan this implementation looking for anything starting with html_
# this information is used in the form selections # this information is used in the form selections
@@ -29,10 +29,15 @@ def available_fetchers():
# rather than site-specific. # rather than site-specific.
use_playwright_as_chrome_fetcher = os.getenv('PLAYWRIGHT_DRIVER_URL', False) use_playwright_as_chrome_fetcher = os.getenv('PLAYWRIGHT_DRIVER_URL', False)
if use_playwright_as_chrome_fetcher: if use_playwright_as_chrome_fetcher:
if not strtobool(os.getenv('FAST_PUPPETEER_CHROME_FETCHER', 'False')): # @note - For now, browser steps always uses playwright
if not strtobool(os.getenv('FAST_PUPPETEER_CHROME_FETCHER', 'False')) or False:
logger.debug('Using Playwright library as fetcher')
from .playwright import fetcher as html_webdriver from .playwright import fetcher as html_webdriver
else: else:
logger.debug('Using direct Python Puppeteer library as fetcher')
from .puppeteer import fetcher as html_webdriver from .puppeteer import fetcher as html_webdriver
else: else:
logger.debug("Falling back to selenium as fetcher")
from .webdriver_selenium import fetcher as html_webdriver from .webdriver_selenium import fetcher as html_webdriver

View File

@@ -5,6 +5,40 @@ from loguru import logger
from changedetectionio.content_fetchers import BrowserStepsStepException from changedetectionio.content_fetchers import BrowserStepsStepException
def manage_user_agent(headers, current_ua=''):
"""
Basic setting of user-agent
NOTE!!!!!! The service that does the actual Chrome fetching should handle any anti-robot techniques
THERE ARE MANY WAYS THAT IT CAN BE DETECTED AS A ROBOT!!
This does not take care of
- Scraping of 'navigator' (platform, productSub, vendor, oscpu etc etc) browser object (navigator.appVersion) etc
- TCP/IP fingerprint JA3 etc
- Graphic rendering fingerprinting
- Your IP being obviously in a pool of bad actors
- Too many requests
- Scraping of SCH-UA browser replies (thanks google!!)
- Scraping of ServiceWorker, new window calls etc
See https://filipvitas.medium.com/how-to-set-user-agent-header-with-puppeteer-js-and-not-fail-28c7a02165da
Puppeteer requests https://github.com/dgtlmoon/pyppeteerstealth
:param page:
:param headers:
:return:
"""
# Ask it what the user agent is, if its obviously ChromeHeadless, switch it to the default
ua_in_custom_headers = next((v for k, v in headers.items() if k.lower() == "user-agent"), None)
if ua_in_custom_headers:
return ua_in_custom_headers
if not ua_in_custom_headers and current_ua:
current_ua = current_ua.replace('HeadlessChrome', 'Chrome')
return current_ua
return None
class Fetcher(): class Fetcher():
browser_connection_is_custom = None browser_connection_is_custom = None
browser_connection_url = None browser_connection_url = None

View File

@@ -3,7 +3,8 @@ import os
from urllib.parse import urlparse from urllib.parse import urlparse
from loguru import logger from loguru import logger
from changedetectionio.content_fetchers.base import Fetcher
from changedetectionio.content_fetchers.base import Fetcher, manage_user_agent
from changedetectionio.content_fetchers.exceptions import PageUnloadable, Non200ErrorCodeReceived, EmptyReply, ScreenshotUnavailable from changedetectionio.content_fetchers.exceptions import PageUnloadable, Non200ErrorCodeReceived, EmptyReply, ScreenshotUnavailable
class fetcher(Fetcher): class fetcher(Fetcher):
@@ -102,19 +103,16 @@ class fetcher(Fetcher):
# Set user agent to prevent Cloudflare from blocking the browser # Set user agent to prevent Cloudflare from blocking the browser
# Use the default one configured in the App.py model that's passed from fetch_site_status.py # Use the default one configured in the App.py model that's passed from fetch_site_status.py
context = browser.new_context( context = browser.new_context(
user_agent={k.lower(): v for k, v in request_headers.items()}.get('user-agent', None), accept_downloads=False, # Should never be needed
bypass_csp=True, # This is needed to enable JavaScript execution on GitHub and others
extra_http_headers=request_headers,
ignore_https_errors=True,
proxy=self.proxy, proxy=self.proxy,
# This is needed to enable JavaScript execution on GitHub and others service_workers=os.getenv('PLAYWRIGHT_SERVICE_WORKERS', 'allow'), # Should be `allow` or `block` - sites like YouTube can transmit large amounts of data via Service Workers
bypass_csp=True, user_agent=manage_user_agent(headers=request_headers),
# Should be `allow` or `block` - sites like YouTube can transmit large amounts of data via Service Workers
service_workers=os.getenv('PLAYWRIGHT_SERVICE_WORKERS', 'allow'),
# Should never be needed
accept_downloads=False
) )
self.page = context.new_page() self.page = context.new_page()
if len(request_headers):
context.set_extra_http_headers(request_headers)
# Listen for all console events and handle errors # Listen for all console events and handle errors
self.page.on("console", lambda msg: print(f"Playwright console: Watch URL: {url} {msg.type}: {msg.text} {msg.args}")) self.page.on("console", lambda msg: print(f"Playwright console: Watch URL: {url} {msg.type}: {msg.text} {msg.args}"))

View File

@@ -5,7 +5,8 @@ import websockets.exceptions
from urllib.parse import urlparse from urllib.parse import urlparse
from loguru import logger from loguru import logger
from changedetectionio.content_fetchers.base import Fetcher
from changedetectionio.content_fetchers.base import Fetcher, manage_user_agent
from changedetectionio.content_fetchers.exceptions import PageUnloadable, Non200ErrorCodeReceived, EmptyReply, BrowserFetchTimedOut, BrowserConnectError from changedetectionio.content_fetchers.exceptions import PageUnloadable, Non200ErrorCodeReceived, EmptyReply, BrowserFetchTimedOut, BrowserConnectError
@@ -100,10 +101,11 @@ class fetcher(Fetcher):
else: else:
self.page = await browser.newPage() self.page = await browser.newPage()
await self.page.setUserAgent(manage_user_agent(headers=request_headers, current_ua=await self.page.evaluate('navigator.userAgent')))
await self.page.setBypassCSP(True) await self.page.setBypassCSP(True)
if request_headers: if request_headers:
await self.page.setExtraHTTPHeaders(request_headers) await self.page.setExtraHTTPHeaders(request_headers)
# @todo check user-agent worked
# SOCKS5 with authentication is not supported (yet) # SOCKS5 with authentication is not supported (yet)
# https://github.com/microsoft/playwright/issues/10567 # https://github.com/microsoft/playwright/issues/10567
@@ -212,8 +214,12 @@ class fetcher(Fetcher):
logger.error('ERROR: Failed to get viewport-only reduced screenshot :(') logger.error('ERROR: Failed to get viewport-only reduced screenshot :(')
pass pass
finally: finally:
# It's good to log here in the case that the browser crashes on shutting down but we still get the data we need
logger.success(f"Fetching '{url}' complete, closing page")
await self.page.close() await self.page.close()
logger.success(f"Fetching '{url}' complete, closing browser")
await browser.close() await browser.close()
logger.success(f"Fetching '{url}' complete, exiting puppeteer fetch.")
async def main(self, **kwargs): async def main(self, **kwargs):
await self.fetch_page(**kwargs) await self.fetch_page(**kwargs)

View File

@@ -10,6 +10,7 @@ function isItemInStock() {
const outOfStockTexts = [ const outOfStockTexts = [
' أخبرني عندما يتوفر', ' أخبرني عندما يتوفر',
'0 in stock', '0 in stock',
'actuellement indisponible',
'agotado', 'agotado',
'article épuisé', 'article épuisé',
'artikel zurzeit vergriffen', 'artikel zurzeit vergriffen',
@@ -17,6 +18,7 @@ function isItemInStock() {
'ausverkauft', // sold out 'ausverkauft', // sold out
'available for back order', 'available for back order',
'back-order or out of stock', 'back-order or out of stock',
'back in stock soon',
'backordered', 'backordered',
'benachrichtigt mich', // notify me 'benachrichtigt mich', // notify me
'brak na stanie', 'brak na stanie',
@@ -24,6 +26,7 @@ function isItemInStock() {
'coming soon', 'coming soon',
'currently have any tickets for this', 'currently have any tickets for this',
'currently unavailable', 'currently unavailable',
'dieser artikel ist bald wieder verfügbar',
'dostępne wkrótce', 'dostępne wkrótce',
'en rupture de stock', 'en rupture de stock',
'ist derzeit nicht auf lager', 'ist derzeit nicht auf lager',
@@ -57,11 +60,13 @@ function isItemInStock() {
'tickets unavailable', 'tickets unavailable',
'tijdelijk uitverkocht', 'tijdelijk uitverkocht',
'unavailable tickets', 'unavailable tickets',
'vorbestellung ist bald möglich',
'we do not currently have an estimate of when this product will be back in stock.', 'we do not currently have an estimate of when this product will be back in stock.',
'we don\'t know when or if this item will be back in stock.', 'we don\'t know when or if this item will be back in stock.',
'zur zeit nicht an lager', 'zur zeit nicht an lager',
'品切れ', '品切れ',
'已售完', '已售完',
'已售',
'품절' '품절'
]; ];

View File

@@ -404,17 +404,21 @@ def changedetection_app(config=None, datastore_o=None):
global datastore global datastore
from changedetectionio import forms from changedetectionio import forms
limit_tag = request.args.get('tag', '').lower().strip() active_tag_req = request.args.get('tag', '').lower().strip()
active_tag_uuid = active_tag = None
# Be sure limit_tag is a uuid # Be sure limit_tag is a uuid
for uuid, tag in datastore.data['settings']['application'].get('tags', {}).items(): if active_tag_req:
if limit_tag == tag.get('title', '').lower().strip(): for uuid, tag in datastore.data['settings']['application'].get('tags', {}).items():
limit_tag = uuid if active_tag_req == tag.get('title', '').lower().strip() or active_tag_req == uuid:
active_tag = tag
active_tag_uuid = uuid
break
# Redirect for the old rss path which used the /?rss=true # Redirect for the old rss path which used the /?rss=true
if request.args.get('rss'): if request.args.get('rss'):
return redirect(url_for('rss', tag=limit_tag)) return redirect(url_for('rss', tag=active_tag_uuid))
op = request.args.get('op') op = request.args.get('op')
if op: if op:
@@ -425,7 +429,7 @@ def changedetection_app(config=None, datastore_o=None):
datastore.data['watching'][uuid].toggle_mute() datastore.data['watching'][uuid].toggle_mute()
datastore.needs_write = True datastore.needs_write = True
return redirect(url_for('index', tag = limit_tag)) return redirect(url_for('index', tag = active_tag_uuid))
# Sort by last_changed and add the uuid which is usually the key.. # Sort by last_changed and add the uuid which is usually the key..
sorted_watches = [] sorted_watches = []
@@ -436,7 +440,7 @@ def changedetection_app(config=None, datastore_o=None):
if with_errors and not watch.get('last_error'): if with_errors and not watch.get('last_error'):
continue continue
if limit_tag and not limit_tag in watch['tags']: if active_tag_uuid and not active_tag_uuid in watch['tags']:
continue continue
if watch.get('last_error'): if watch.get('last_error'):
errored_count += 1 errored_count += 1
@@ -455,11 +459,12 @@ def changedetection_app(config=None, datastore_o=None):
total=total_count, total=total_count,
per_page=datastore.data['settings']['application'].get('pager_size', 50), css_framework="semantic") per_page=datastore.data['settings']['application'].get('pager_size', 50), css_framework="semantic")
sorted_tags = sorted(datastore.data['settings']['application'].get('tags').items(), key=lambda x: x[1]['title'])
output = render_template( output = render_template(
"watch-overview.html", "watch-overview.html",
# Don't link to hosting when we're on the hosting environment # Don't link to hosting when we're on the hosting environment
active_tag=limit_tag, active_tag=active_tag,
active_tag_uuid=active_tag_uuid,
app_rss_token=datastore.data['settings']['application']['rss_access_token'], app_rss_token=datastore.data['settings']['application']['rss_access_token'],
datastore=datastore, datastore=datastore,
errored_count=errored_count, errored_count=errored_count,
@@ -474,7 +479,7 @@ def changedetection_app(config=None, datastore_o=None):
sort_attribute=request.args.get('sort') if request.args.get('sort') else request.cookies.get('sort'), sort_attribute=request.args.get('sort') if request.args.get('sort') else request.cookies.get('sort'),
sort_order=request.args.get('order') if request.args.get('order') else request.cookies.get('order'), sort_order=request.args.get('order') if request.args.get('order') else request.cookies.get('order'),
system_default_fetcher=datastore.data['settings']['application'].get('fetch_backend'), system_default_fetcher=datastore.data['settings']['application'].get('fetch_backend'),
tags=datastore.data['settings']['application'].get('tags'), tags=sorted_tags,
watches=sorted_watches watches=sorted_watches
) )
@@ -1547,7 +1552,6 @@ def changedetection_app(config=None, datastore_o=None):
# @todo handle ctrl break # @todo handle ctrl break
ticker_thread = threading.Thread(target=ticker_thread_check_time_launch_checks).start() ticker_thread = threading.Thread(target=ticker_thread_check_time_launch_checks).start()
threading.Thread(target=notification_runner).start() threading.Thread(target=notification_runner).start()
threading.Thread(target=thread_maintain_worker_thread_pool).start()
# Check for new release version, but not when running in test/build or pytest # Check for new release version, but not when running in test/build or pytest
if not os.getenv("GITHUB_REF", False) and not config.get('disable_checkver') == True: if not os.getenv("GITHUB_REF", False) and not config.get('disable_checkver') == True:
@@ -1630,73 +1634,23 @@ def notification_runner():
# Trim the log length # Trim the log length
notification_debug_log = notification_debug_log[-100:] notification_debug_log = notification_debug_log[-100:]
def thread_maintain_worker_thread_pool():
from changedetectionio import update_worker
n_workers = int(os.getenv("FETCH_WORKERS", datastore.data['settings']['requests']['workers']))
while not app.config.exit.is_set():
needed_threads = n_workers if not running_update_threads else 0
how_many_running_now = 0
dead_threads = []
for i, t in enumerate(running_update_threads):
if t.is_alive():
how_many_running_now += 1
else:
dead_threads.append(i)
for i in dead_threads:
del running_update_threads[i]
for _ in range(needed_threads - how_many_running_now):
logger.info("Adding new worker thread")
new_worker = update_worker.update_worker(update_q, notification_q, app, datastore)
running_update_threads.append(new_worker)
new_worker.start()
app.config.exit.wait(2)
def thread_maintain_worker_thread_pool():
from changedetectionio import update_worker
logger.info("Starting thread pool worker maintainer thread")
n_workers = int(os.getenv("FETCH_WORKERS", datastore.data['settings']['requests']['workers']))
while not app.config.exit.is_set():
needed_threads = n_workers if not running_update_threads else 0
how_many_running_now = 0
dead_threads = []
for i, t in enumerate(running_update_threads):
if t.is_alive():
how_many_running_now += 1
else:
dead_threads.append(i)
for i in dead_threads:
del running_update_threads[i]
for _ in range(needed_threads - how_many_running_now):
logger.info("Adding new worker thread")
new_worker = update_worker.update_worker(update_q, notification_q, app, datastore)
running_update_threads.append(new_worker)
new_worker.start()
app.config.exit.wait(2)
# Thread runner to check every minute, look for new watches to feed into the Queue. # Thread runner to check every minute, look for new watches to feed into the Queue.
def ticker_thread_check_time_launch_checks(): def ticker_thread_check_time_launch_checks():
import random import random
from changedetectionio import update_worker
proxy_last_called_time = {} proxy_last_called_time = {}
recheck_time_minimum_seconds = int(os.getenv('MINIMUM_SECONDS_RECHECK_TIME', 20)) recheck_time_minimum_seconds = int(os.getenv('MINIMUM_SECONDS_RECHECK_TIME', 20))
logger.debug(f"System env MINIMUM_SECONDS_RECHECK_TIME {recheck_time_minimum_seconds}") logger.debug(f"System env MINIMUM_SECONDS_RECHECK_TIME {recheck_time_minimum_seconds}")
# Spin up Workers that do the fetching
# Can be overriden by ENV or use the default settings
n_workers = int(os.getenv("FETCH_WORKERS", datastore.data['settings']['requests']['workers']))
for _ in range(n_workers):
new_worker = update_worker.update_worker(update_q, notification_q, app, datastore)
running_update_threads.append(new_worker)
new_worker.start()
while not app.config.exit.is_set(): while not app.config.exit.is_set():
@@ -1779,7 +1733,7 @@ def ticker_thread_check_time_launch_checks():
priority = int(time.time()) priority = int(time.time())
logger.debug( logger.debug(
f"> Queued watch UUID {uuid} " f"> Queued watch UUID {uuid} "
f"last checked at {watch['last_checked']} ({seconds_since_last_recheck} seconds ago!) recheck min was :{recheck_time_minimum_seconds} " f"last checked at {watch['last_checked']} "
f"queued at {now:0.2f} priority {priority} " f"queued at {now:0.2f} priority {priority} "
f"jitter {watch.jitter_seconds:0.2f}s, " f"jitter {watch.jitter_seconds:0.2f}s, "
f"{now - watch['last_checked']:0.2f}s since last checked") f"{now - watch['last_checked']:0.2f}s since last checked")

View File

@@ -75,12 +75,12 @@ class difference_detection_processor():
proxy_url = None proxy_url = None
if preferred_proxy_id: if preferred_proxy_id:
# Custom browser endpoints should not have a proxy added # Custom browser endpoints should NOT have a proxy added
if not preferred_proxy_id.startswith('ui-'): if not prefer_fetch_backend.startswith('extra_browser_'):
proxy_url = self.datastore.proxy_list.get(preferred_proxy_id).get('url') proxy_url = self.datastore.proxy_list.get(preferred_proxy_id).get('url')
logger.debug(f"Selected proxy key '{preferred_proxy_id}' as proxy URL '{proxy_url}' for {url}") logger.debug(f"Selected proxy key '{preferred_proxy_id}' as proxy URL '{proxy_url}' for {url}")
else: else:
logger.debug(f"Skipping adding proxy data when custom Browser endpoint is specified.") logger.debug(f"Skipping adding proxy data when custom Browser endpoint is specified. ")
# Now call the fetcher (playwright/requests/etc) with arguments that only a fetcher would need. # Now call the fetcher (playwright/requests/etc) with arguments that only a fetcher would need.
# When browser_connection_url is None, it method should default to working out whats the best defaults (os env vars etc) # When browser_connection_url is None, it method should default to working out whats the best defaults (os env vars etc)

View File

@@ -160,6 +160,12 @@ $(document).ready(function () {
e.offsetX > item.left * y_scale && e.offsetX < item.left * y_scale + item.width * y_scale e.offsetX > item.left * y_scale && e.offsetX < item.left * y_scale + item.width * y_scale
) { ) {
// Ignore really large ones, because we are scraping 'div' also from xpath_element_scraper but
// that div or whatever could be some wrapper and would generally make you select the whole page
if (item.width > 800 && item.height > 400) {
return
}
// There could be many elements here, record them all and then we'll find out which is the most 'useful' // There could be many elements here, record them all and then we'll find out which is the most 'useful'
// (input, textarea, button, A etc) // (input, textarea, button, A etc)
if (item.width < xpath_data['browser_width']) { if (item.width < xpath_data['browser_width']) {

View File

@@ -1,6 +1,6 @@
{% extends 'base.html' %} {% extends 'base.html' %}
{% block content %} {% block content %}
{% from '_helpers.jinja' import render_simple_field, render_field, render_nolabel_field %} {% from '_helpers.jinja' import render_simple_field, render_field, render_nolabel_field, sort_by_title %}
<script src="{{url_for('static_content', group='js', filename='jquery-3.6.0.min.js')}}"></script> <script src="{{url_for('static_content', group='js', filename='jquery-3.6.0.min.js')}}"></script>
<script src="{{url_for('static_content', group='js', filename='watch-overview.js')}}" defer></script> <script src="{{url_for('static_content', group='js', filename='watch-overview.js')}}" defer></script>
@@ -13,7 +13,7 @@
<div id="watch-add-wrapper-zone"> <div id="watch-add-wrapper-zone">
{{ render_nolabel_field(form.url, placeholder="https://...", required=true) }} {{ render_nolabel_field(form.url, placeholder="https://...", required=true) }}
{{ render_nolabel_field(form.tags, value=tags[active_tag].title if active_tag else '', placeholder="watch label / tag") }} {{ render_nolabel_field(form.tags, value=active_tag.title if active_tag else '', placeholder="watch label / tag") }}
{{ render_nolabel_field(form.watch_submit_button, title="Watch this URL!" ) }} {{ render_nolabel_field(form.watch_submit_button, title="Watch this URL!" ) }}
{{ render_nolabel_field(form.edit_and_watch_submit_button, title="Edit first then Watch") }} {{ render_nolabel_field(form.edit_and_watch_submit_button, title="Edit first then Watch") }}
</div> </div>
@@ -46,11 +46,13 @@
{% if search_q %}<div id="search-result-info">Searching "<strong><i>{{search_q}}</i></strong>"</div>{% endif %} {% if search_q %}<div id="search-result-info">Searching "<strong><i>{{search_q}}</i></strong>"</div>{% endif %}
<div> <div>
<a href="{{url_for('index')}}" class="pure-button button-tag {{'active' if not active_tag }}">All</a> <a href="{{url_for('index')}}" class="pure-button button-tag {{'active' if not active_tag }}">All</a>
{% for uuid, tag in tags.items() %}
{% if tag != "" %} <!-- tag list -->
<a href="{{url_for('index', tag=uuid) }}" class="pure-button button-tag {{'active' if active_tag == uuid }}">{{ tag.title }}</a> {% for uuid, tag in tags %}
{% endif %} {% if tag != "" %}
{% endfor %} <a href="{{url_for('index', tag=uuid) }}" class="pure-button button-tag {{'active' if active_tag_uuid == uuid }}">{{ tag.title }}</a>
{% endif %}
{% endfor %}
</div> </div>
{% set sort_order = sort_order or 'asc' %} {% set sort_order = sort_order or 'asc' %}
@@ -197,8 +199,8 @@
</li> </li>
{% endif %} {% endif %}
<li> <li>
<a href="{{ url_for('form_watch_checknow', tag=active_tag, with_errors=request.args.get('with_errors',0)) }}" class="pure-button button-tag ">Recheck <a href="{{ url_for('form_watch_checknow', tag=active_tag_uuid, with_errors=request.args.get('with_errors',0)) }}" class="pure-button button-tag ">Recheck
all {% if active_tag%} in "{{tags[active_tag].title}}"{%endif%}</a> all {% if active_tag_uuid %} in "{{active_tag.title}}"{%endif%}</a>
</li> </li>
<li> <li>
<a href="{{ url_for('rss', tag=active_tag , token=app_rss_token)}}"><img alt="RSS Feed" id="feed-icon" src="{{url_for('static_content', group='images', filename='Generic_Feed-icon.svg')}}" height="15"></a> <a href="{{ url_for('rss', tag=active_tag , token=app_rss_token)}}"><img alt="RSS Feed" id="feed-icon" src="{{url_for('static_content', group='images', filename='Generic_Feed-icon.svg')}}" height="15"></a>

View File

@@ -0,0 +1,56 @@
import os
from flask import url_for
from ..util import live_server_setup, wait_for_all_checks, extract_UUID_from_client
def test_execute_custom_js(client, live_server):
live_server_setup(live_server)
assert os.getenv('PLAYWRIGHT_DRIVER_URL'), "Needs PLAYWRIGHT_DRIVER_URL set for this test"
test_url = url_for('test_interactive_html_endpoint', _external=True)
test_url = test_url.replace('localhost.localdomain', 'cdio')
test_url = test_url.replace('localhost', 'cdio')
res = client.post(
url_for("form_quick_watch_add"),
data={"url": test_url, "tags": '', 'edit_and_watch_submit_button': 'Edit > Watch'},
follow_redirects=True
)
assert b"Watch added in Paused state, saving will unpause" in res.data
res = client.post(
url_for("edit_page", uuid="first", unpause_on_save=1),
data={
"url": test_url,
"tags": "",
'fetch_backend': "html_webdriver",
'webdriver_js_execute_code': 'document.querySelector("button[name=test-button]").click();',
'headers': "testheader: yes\buser-agent: MyCustomAgent",
},
follow_redirects=True
)
assert b"unpaused" in res.data
wait_for_all_checks(client)
uuid = extract_UUID_from_client(client)
assert live_server.app.config['DATASTORE'].data['watching'][uuid].history_n >= 1, "Watch history had atleast 1 (everything fetched OK)"
assert b"This text should be removed" not in res.data
# Check HTML conversion detected and workd
res = client.get(
url_for("preview_page", uuid=uuid),
follow_redirects=True
)
assert b"This text should be removed" not in res.data
assert b"I smell JavaScript because the button was pressed" in res.data
assert b"testheader: yes" in res.data
assert b"user-agent: mycustomagent" in res.data
client.get(
url_for("form_delete", uuid="all"),
follow_redirects=True
)

View File

@@ -29,7 +29,8 @@ def test_fetch_pdf(client, live_server):
follow_redirects=True follow_redirects=True
) )
assert b'PDF-1.5' not in res.data # PDF header should not be there (it was converted to text)
assert b'PDF' not in res.data[:10]
assert b'hello world' in res.data assert b'hello world' in res.data
# So we know if the file changes in other ways # So we know if the file changes in other ways

View File

@@ -242,5 +242,28 @@ def live_server_setup(live_server):
resp.headers['Content-Type'] = 'application/pdf' resp.headers['Content-Type'] = 'application/pdf'
return resp return resp
@live_server.app.route('/test-interactive-html-endpoint')
def test_interactive_html_endpoint():
header_text=""
for k,v in request.headers.items():
header_text += f"{k}: {v}<br>"
resp = make_response(f"""
<html>
<body>
Primitive JS check for <pre>changedetectionio/tests/visualselector/test_fetch_data.py</pre>
<p id="remove">This text should be removed</p>
<form onsubmit="event.preventDefault();">
<!-- obfuscated text so that we dont accidentally get a false positive due to conversion of the source :) --->
<button name="test-button" onclick="getElementById('remove').remove();getElementById('some-content').innerHTML = atob('SSBzbWVsbCBKYXZhU2NyaXB0IGJlY2F1c2UgdGhlIGJ1dHRvbiB3YXMgcHJlc3NlZCE=')">Click here</button>
<div id=some-content></div>
<pre>
{header_text.lower()}
</pre>
</body>
</html>""", 200)
resp.headers['Content-Type'] = 'text/html'
return resp
live_server.start() live_server.start()

View File

@@ -7,15 +7,19 @@ from ..util import live_server_setup, wait_for_all_checks, extract_UUID_from_cli
def test_setup(client, live_server): def test_setup(client, live_server):
live_server_setup(live_server) live_server_setup(live_server)
# Add a site in paused mode, add an invalid filter, we should still have visual selector data ready # Add a site in paused mode, add an invalid filter, we should still have visual selector data ready
def test_visual_selector_content_ready(client, live_server): def test_visual_selector_content_ready(client, live_server):
import os import os
import json import json
assert os.getenv('PLAYWRIGHT_DRIVER_URL'), "Needs PLAYWRIGHT_DRIVER_URL set for this test" assert os.getenv('PLAYWRIGHT_DRIVER_URL'), "Needs PLAYWRIGHT_DRIVER_URL set for this test"
# Add our URL to the import page, because the docker container (playwright/selenium) wont be able to connect to our usual test url # Add our URL to the import page, because the docker container (playwright/selenium) wont be able to connect to our usual test url
test_url = "https://changedetection.io/ci-test/test-runjs.html" test_url = url_for('test_interactive_html_endpoint', _external=True)
test_url = test_url.replace('localhost.localdomain', 'cdio')
test_url = test_url.replace('localhost', 'cdio')
res = client.post( res = client.post(
url_for("form_quick_watch_add"), url_for("form_quick_watch_add"),
@@ -23,28 +27,31 @@ def test_visual_selector_content_ready(client, live_server):
follow_redirects=True follow_redirects=True
) )
assert b"Watch added in Paused state, saving will unpause" in res.data assert b"Watch added in Paused state, saving will unpause" in res.data
uuid = extract_UUID_from_client(client)
res = client.post( res = client.post(
url_for("edit_page", uuid="first", unpause_on_save=1), url_for("edit_page", uuid=uuid, unpause_on_save=1),
data={ data={
"url": test_url, "url": test_url,
"tags": "", "tags": "",
"headers": "", # For now, cookies doesnt work in headers because it must be a full cookiejar object
'fetch_backend': "html_webdriver", 'headers': "testheader: yes\buser-agent: MyCustomAgent",
'webdriver_js_execute_code': 'document.querySelector("button[name=test-button]").click();' 'fetch_backend': "html_webdriver",
}, },
follow_redirects=True follow_redirects=True
) )
assert b"unpaused" in res.data assert b"unpaused" in res.data
wait_for_all_checks(client) wait_for_all_checks(client)
uuid = extract_UUID_from_client(client)
# Check the JS execute code before extract worked
assert live_server.app.config['DATASTORE'].data['watching'][uuid].history_n >= 1, "Watch history had atleast 1 (everything fetched OK)"
res = client.get( res = client.get(
url_for("preview_page", uuid="first"), url_for("preview_page", uuid=uuid),
follow_redirects=True follow_redirects=True
) )
assert b'I smell JavaScript' in res.data assert b"testheader: yes" in res.data
assert b"user-agent: mycustomagent" in res.data
assert os.path.isfile(os.path.join('test-datastore', uuid, 'last-screenshot.png')), "last-screenshot.png should exist" assert os.path.isfile(os.path.join('test-datastore', uuid, 'last-screenshot.png')), "last-screenshot.png should exist"
assert os.path.isfile(os.path.join('test-datastore', uuid, 'elements.json')), "xpath elements.json data should exist" assert os.path.isfile(os.path.join('test-datastore', uuid, 'elements.json')), "xpath elements.json data should exist"
@@ -74,30 +81,33 @@ def test_visual_selector_content_ready(client, live_server):
def test_basic_browserstep(client, live_server): def test_basic_browserstep(client, live_server):
assert os.getenv('PLAYWRIGHT_DRIVER_URL'), "Needs PLAYWRIGHT_DRIVER_URL set for this test"
#live_server_setup(live_server) #live_server_setup(live_server)
assert os.getenv('PLAYWRIGHT_DRIVER_URL'), "Needs PLAYWRIGHT_DRIVER_URL set for this test"
# Add our URL to the import page, because the docker container (playwright/selenium) wont be able to connect to our usual test url test_url = url_for('test_interactive_html_endpoint', _external=True)
test_url = "https://changedetection.io/ci-test/test-runjs.html" test_url = test_url.replace('localhost.localdomain', 'cdio')
test_url = test_url.replace('localhost', 'cdio')
res = client.post( res = client.post(
url_for("form_quick_watch_add"), url_for("form_quick_watch_add"),
data={"url": test_url, "tags": '', 'edit_and_watch_submit_button': 'Edit > Watch'}, data={"url": test_url, "tags": '', 'edit_and_watch_submit_button': 'Edit > Watch'},
follow_redirects=True follow_redirects=True
) )
assert b"Watch added in Paused state, saving will unpause" in res.data assert b"Watch added in Paused state, saving will unpause" in res.data
res = client.post( res = client.post(
url_for("edit_page", uuid="first", unpause_on_save=1), url_for("edit_page", uuid="first", unpause_on_save=1),
data={ data={
"url": test_url, "url": test_url,
"tags": "", "tags": "",
"headers": "", 'fetch_backend': "html_webdriver",
'fetch_backend': "html_webdriver", 'browser_steps-0-operation': 'Goto site',
'browser_steps-0-operation': 'Goto site', 'browser_steps-1-operation': 'Click element',
'browser_steps-1-operation': 'Click element', 'browser_steps-1-selector': 'button[name=test-button]',
'browser_steps-1-selector': 'button[name=test-button]', 'browser_steps-1-optional_value': '',
'browser_steps-1-optional_value': '' # For now, cookies doesnt work in headers because it must be a full cookiejar object
'headers': "testheader: yes\buser-agent: MyCustomAgent",
}, },
follow_redirects=True follow_redirects=True
) )
@@ -105,6 +115,9 @@ def test_basic_browserstep(client, live_server):
wait_for_all_checks(client) wait_for_all_checks(client)
uuid = extract_UUID_from_client(client) uuid = extract_UUID_from_client(client)
assert live_server.app.config['DATASTORE'].data['watching'][uuid].history_n >= 1, "Watch history had atleast 1 (everything fetched OK)"
assert b"This text should be removed" not in res.data
# Check HTML conversion detected and workd # Check HTML conversion detected and workd
res = client.get( res = client.get(
@@ -114,13 +127,19 @@ def test_basic_browserstep(client, live_server):
assert b"This text should be removed" not in res.data assert b"This text should be removed" not in res.data
assert b"I smell JavaScript because the button was pressed" in res.data assert b"I smell JavaScript because the button was pressed" in res.data
assert b"testheader: yes" in res.data
assert b"user-agent: mycustomagent" in res.data
four_o_four_url = url_for('test_endpoint', status_code=404, _external=True)
four_o_four_url = four_o_four_url.replace('localhost.localdomain', 'cdio')
four_o_four_url = four_o_four_url.replace('localhost', 'cdio')
# now test for 404 errors # now test for 404 errors
res = client.post( res = client.post(
url_for("edit_page", uuid=uuid, unpause_on_save=1), url_for("edit_page", uuid=uuid, unpause_on_save=1),
data={ data={
"url": "https://changedetection.io/404", "url": four_o_four_url,
"tags": "", "tags": "",
"headers": "",
'fetch_backend': "html_webdriver", 'fetch_backend': "html_webdriver",
'browser_steps-0-operation': 'Goto site', 'browser_steps-0-operation': 'Goto site',
'browser_steps-1-operation': 'Click element', 'browser_steps-1-operation': 'Click element',

View File

@@ -1,7 +1,7 @@
# Used by Pyppeteer # Used by Pyppeteer
pyee pyee
eventlet>=0.33.3 # related to dnspython fixes eventlet==0.33.3 # related to dnspython fixes
feedgen~=0.9 feedgen~=0.9
flask-compress flask-compress
# 0.6.3 included compatibility fix for werkzeug 3.x (2.x had deprecation of url handlers) # 0.6.3 included compatibility fix for werkzeug 3.x (2.x had deprecation of url handlers)
@@ -22,13 +22,15 @@ validators~=0.21
brotli~=1.0 brotli~=1.0
requests[socks] requests[socks]
urllib3>1.26 urllib3==1.26.18
chardet>2.3.0 chardet>2.3.0
wtforms~=3.0 wtforms~=3.0
jsonpath-ng~=1.5.3 jsonpath-ng~=1.5.3
dnspython~=2.4 # related to eventlet fixes # Pinned: module 'eventlet.green.select' has no attribute 'epoll'
# https://github.com/eventlet/eventlet/issues/805#issuecomment-1640463482
dnspython==2.3.0 # related to eventlet fixes
# jq not available on Windows so must be installed manually # jq not available on Windows so must be installed manually