mirror of
https://github.com/dgtlmoon/changedetection.io.git
synced 2025-11-08 10:36:32 +00:00
Compare commits
24 Commits
ui-font-ve
...
selectable
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6fb6d01e2a | ||
|
|
bfe69de549 | ||
|
|
c437e5d740 | ||
|
|
7cc2afbb8f | ||
|
|
2877a639dc | ||
|
|
2f16aee0dd | ||
|
|
cdf611f173 | ||
|
|
77ec1da0ff | ||
|
|
c8dcc072c8 | ||
|
|
7c97a5a403 | ||
|
|
7dd967be8e | ||
|
|
3607d15185 | ||
|
|
3382b4cb3f | ||
|
|
7477ce11d6 | ||
|
|
858b66efb4 | ||
|
|
0bcbcb80f1 | ||
|
|
b6bdc2738b | ||
|
|
ebc7a7e568 | ||
|
|
d7bc2bd3f6 | ||
|
|
2bd32b261a | ||
|
|
572a169a47 | ||
|
|
68d1e2736c | ||
|
|
97e591fa24 | ||
|
|
5d9a5d9fa8 |
16
.github/workflows/containers.yml
vendored
16
.github/workflows/containers.yml
vendored
@@ -96,8 +96,9 @@ jobs:
|
|||||||
tags: |
|
tags: |
|
||||||
${{ secrets.DOCKER_HUB_USERNAME }}/changedetection.io:dev,ghcr.io/${{ github.repository }}:dev
|
${{ secrets.DOCKER_HUB_USERNAME }}/changedetection.io:dev,ghcr.io/${{ github.repository }}:dev
|
||||||
platforms: linux/amd64,linux/arm64,linux/arm/v6,linux/arm/v7,linux/arm/v8
|
platforms: linux/amd64,linux/arm64,linux/arm/v6,linux/arm/v7,linux/arm/v8
|
||||||
cache-from: type=local,src=/tmp/.buildx-cache
|
cache-from: type=gha
|
||||||
cache-to: type=local,dest=/tmp/.buildx-cache
|
cache-to: type=gha,mode=max
|
||||||
|
|
||||||
# Looks like this was disabled
|
# Looks like this was disabled
|
||||||
# provenance: false
|
# provenance: false
|
||||||
|
|
||||||
@@ -116,18 +117,11 @@ jobs:
|
|||||||
${{ secrets.DOCKER_HUB_USERNAME }}/changedetection.io:latest
|
${{ secrets.DOCKER_HUB_USERNAME }}/changedetection.io:latest
|
||||||
ghcr.io/dgtlmoon/changedetection.io:latest
|
ghcr.io/dgtlmoon/changedetection.io:latest
|
||||||
platforms: linux/amd64,linux/arm64,linux/arm/v6,linux/arm/v7,linux/arm/v8
|
platforms: linux/amd64,linux/arm64,linux/arm/v6,linux/arm/v7,linux/arm/v8
|
||||||
cache-from: type=local,src=/tmp/.buildx-cache
|
cache-from: type=gha
|
||||||
cache-to: type=local,dest=/tmp/.buildx-cache
|
cache-to: type=gha,mode=max
|
||||||
# Looks like this was disabled
|
# Looks like this was disabled
|
||||||
# provenance: false
|
# provenance: false
|
||||||
|
|
||||||
- name: Image digest
|
- name: Image digest
|
||||||
run: echo step SHA ${{ steps.vars.outputs.sha_short }} tag ${{steps.vars.outputs.tag}} branch ${{steps.vars.outputs.branch}} digest ${{ steps.docker_build.outputs.digest }}
|
run: echo step SHA ${{ steps.vars.outputs.sha_short }} tag ${{steps.vars.outputs.tag}} branch ${{steps.vars.outputs.branch}} digest ${{ steps.docker_build.outputs.digest }}
|
||||||
|
|
||||||
- name: Cache Docker layers
|
|
||||||
uses: actions/cache@v3
|
|
||||||
with:
|
|
||||||
path: /tmp/.buildx-cache
|
|
||||||
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-buildx-
|
|
||||||
|
|||||||
11
.github/workflows/test-only.yml
vendored
11
.github/workflows/test-only.yml
vendored
@@ -30,7 +30,10 @@ jobs:
|
|||||||
|
|
||||||
# Selenium+browserless
|
# Selenium+browserless
|
||||||
docker run --network changedet-network -d --hostname selenium -p 4444:4444 --rm --shm-size="2g" selenium/standalone-chrome:4
|
docker run --network changedet-network -d --hostname selenium -p 4444:4444 --rm --shm-size="2g" selenium/standalone-chrome:4
|
||||||
docker run --network changedet-network -d --hostname browserless -e "FUNCTION_BUILT_INS=[\"fs\",\"crypto\"]" -e "DEFAULT_LAUNCH_ARGS=[\"--window-size=1920,1080\"]" --rm -p 3000:3000 --shm-size="2g" browserless/chrome:1.60-chrome-stable
|
docker run --network changedet-network -d --name browserless --hostname browserless -e "FUNCTION_BUILT_INS=[\"fs\",\"crypto\"]" -e "DEFAULT_LAUNCH_ARGS=[\"--window-size=1920,1080\"]" --rm -p 3000:3000 --shm-size="2g" browserless/chrome:1.60-chrome-stable
|
||||||
|
|
||||||
|
# For accessing custom browser tests
|
||||||
|
docker run --network changedet-network -d --name browserless-custom-url --hostname browserless-custom-url -e "FUNCTION_BUILT_INS=[\"fs\",\"crypto\"]" -e "DEFAULT_LAUNCH_ARGS=[\"--window-size=1920,1080\"]" --rm --shm-size="2g" browserless/chrome:1.60-chrome-stable
|
||||||
|
|
||||||
- name: Build changedetection.io container for testing
|
- name: Build changedetection.io container for testing
|
||||||
run: |
|
run: |
|
||||||
@@ -86,6 +89,12 @@ jobs:
|
|||||||
# And again with PLAYWRIGHT_DRIVER_URL=..
|
# And again with PLAYWRIGHT_DRIVER_URL=..
|
||||||
cd ..
|
cd ..
|
||||||
|
|
||||||
|
- name: Test custom browser URL
|
||||||
|
run: |
|
||||||
|
cd changedetectionio
|
||||||
|
./run_custom_browser_url_tests.sh
|
||||||
|
cd ..
|
||||||
|
|
||||||
- name: Test changedetection.io container starts+runs basically without error
|
- name: Test changedetection.io container starts+runs basically without error
|
||||||
run: |
|
run: |
|
||||||
docker run -p 5556:5000 -d test-changedetectionio
|
docker run -p 5556:5000 -d test-changedetectionio
|
||||||
|
|||||||
@@ -38,7 +38,7 @@ from flask_paginate import Pagination, get_page_parameter
|
|||||||
from changedetectionio import html_tools
|
from changedetectionio import html_tools
|
||||||
from changedetectionio.api import api_v1
|
from changedetectionio.api import api_v1
|
||||||
|
|
||||||
__version__ = '0.45.7.1'
|
__version__ = '0.45.7.3'
|
||||||
|
|
||||||
from changedetectionio.store import BASE_URL_NOT_SET_TEXT
|
from changedetectionio.store import BASE_URL_NOT_SET_TEXT
|
||||||
|
|
||||||
@@ -614,6 +614,8 @@ def changedetection_app(config=None, datastore_o=None):
|
|||||||
# For the form widget tag uuid lookup
|
# For the form widget tag uuid lookup
|
||||||
form.tags.datastore = datastore # in _value
|
form.tags.datastore = datastore # in _value
|
||||||
|
|
||||||
|
for p in datastore.extra_browsers:
|
||||||
|
form.fetch_backend.choices.append(p)
|
||||||
|
|
||||||
form.fetch_backend.choices.append(("system", 'System settings default'))
|
form.fetch_backend.choices.append(("system", 'System settings default'))
|
||||||
|
|
||||||
@@ -714,7 +716,7 @@ def changedetection_app(config=None, datastore_o=None):
|
|||||||
system_uses_webdriver = datastore.data['settings']['application']['fetch_backend'] == 'html_webdriver'
|
system_uses_webdriver = datastore.data['settings']['application']['fetch_backend'] == 'html_webdriver'
|
||||||
|
|
||||||
is_html_webdriver = False
|
is_html_webdriver = False
|
||||||
if (watch.get('fetch_backend') == 'system' and system_uses_webdriver) or watch.get('fetch_backend') == 'html_webdriver':
|
if (watch.get('fetch_backend') == 'system' and system_uses_webdriver) or watch.get('fetch_backend') == 'html_webdriver' or watch.get('fetch_backend', '').startswith('extra_browser_'):
|
||||||
is_html_webdriver = True
|
is_html_webdriver = True
|
||||||
|
|
||||||
# Only works reliably with Playwright
|
# Only works reliably with Playwright
|
||||||
@@ -977,7 +979,7 @@ def changedetection_app(config=None, datastore_o=None):
|
|||||||
system_uses_webdriver = datastore.data['settings']['application']['fetch_backend'] == 'html_webdriver'
|
system_uses_webdriver = datastore.data['settings']['application']['fetch_backend'] == 'html_webdriver'
|
||||||
|
|
||||||
is_html_webdriver = False
|
is_html_webdriver = False
|
||||||
if (watch.get('fetch_backend') == 'system' and system_uses_webdriver) or watch.get('fetch_backend') == 'html_webdriver':
|
if (watch.get('fetch_backend') == 'system' and system_uses_webdriver) or watch.get('fetch_backend') == 'html_webdriver' or watch.get('fetch_backend', '').startswith('extra_browser_'):
|
||||||
is_html_webdriver = True
|
is_html_webdriver = True
|
||||||
|
|
||||||
password_enabled_and_share_is_off = False
|
password_enabled_and_share_is_off = False
|
||||||
@@ -1031,7 +1033,7 @@ def changedetection_app(config=None, datastore_o=None):
|
|||||||
|
|
||||||
|
|
||||||
is_html_webdriver = False
|
is_html_webdriver = False
|
||||||
if (watch.get('fetch_backend') == 'system' and system_uses_webdriver) or watch.get('fetch_backend') == 'html_webdriver':
|
if (watch.get('fetch_backend') == 'system' and system_uses_webdriver) or watch.get('fetch_backend') == 'html_webdriver' or watch.get('fetch_backend', '').startswith('extra_browser_'):
|
||||||
is_html_webdriver = True
|
is_html_webdriver = True
|
||||||
|
|
||||||
# Never requested successfully, but we detected a fetch error
|
# Never requested successfully, but we detected a fetch error
|
||||||
|
|||||||
@@ -40,8 +40,8 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
|||||||
contents = ''
|
contents = ''
|
||||||
now = time.time()
|
now = time.time()
|
||||||
try:
|
try:
|
||||||
update_handler = text_json_diff.perform_site_check(datastore=datastore)
|
update_handler = text_json_diff.perform_site_check(datastore=datastore, watch_uuid=uuid)
|
||||||
changed_detected, update_obj, contents = update_handler.run(uuid, preferred_proxy=preferred_proxy, skip_when_checksum_same=False)
|
update_handler.call_browser()
|
||||||
# title, size is len contents not len xfer
|
# title, size is len contents not len xfer
|
||||||
except content_fetcher.Non200ErrorCodeReceived as e:
|
except content_fetcher.Non200ErrorCodeReceived as e:
|
||||||
if e.status_code == 404:
|
if e.status_code == 404:
|
||||||
|
|||||||
@@ -96,6 +96,7 @@ class Fetcher():
|
|||||||
content = None
|
content = None
|
||||||
error = None
|
error = None
|
||||||
fetcher_description = "No description"
|
fetcher_description = "No description"
|
||||||
|
browser_connection_url = None
|
||||||
headers = {}
|
headers = {}
|
||||||
status_code = None
|
status_code = None
|
||||||
webdriver_js_execute_code = None
|
webdriver_js_execute_code = None
|
||||||
@@ -251,14 +252,16 @@ class base_html_playwright(Fetcher):
|
|||||||
|
|
||||||
proxy = None
|
proxy = None
|
||||||
|
|
||||||
def __init__(self, proxy_override=None):
|
def __init__(self, proxy_override=None, browser_connection_url=None):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
# .strip('"') is going to save someone a lot of time when they accidently wrap the env value
|
|
||||||
self.browser_type = os.getenv("PLAYWRIGHT_BROWSER_TYPE", 'chromium').strip('"')
|
self.browser_type = os.getenv("PLAYWRIGHT_BROWSER_TYPE", 'chromium').strip('"')
|
||||||
self.command_executor = os.getenv(
|
|
||||||
"PLAYWRIGHT_DRIVER_URL",
|
# .strip('"') is going to save someone a lot of time when they accidently wrap the env value
|
||||||
'ws://playwright-chrome:3000'
|
if not browser_connection_url:
|
||||||
).strip('"')
|
self.browser_connection_url = os.getenv("PLAYWRIGHT_DRIVER_URL", 'ws://playwright-chrome:3000').strip('"')
|
||||||
|
else:
|
||||||
|
self.browser_connection_url = browser_connection_url
|
||||||
|
|
||||||
# If any proxy settings are enabled, then we should setup the proxy object
|
# If any proxy settings are enabled, then we should setup the proxy object
|
||||||
proxy_args = {}
|
proxy_args = {}
|
||||||
@@ -419,11 +422,7 @@ class base_html_playwright(Fetcher):
|
|||||||
is_binary=False):
|
is_binary=False):
|
||||||
|
|
||||||
# For now, USE_EXPERIMENTAL_PUPPETEER_FETCH is not supported by watches with BrowserSteps (for now!)
|
# For now, USE_EXPERIMENTAL_PUPPETEER_FETCH is not supported by watches with BrowserSteps (for now!)
|
||||||
has_browser_steps = self.browser_steps and list(filter(
|
if not self.browser_steps and os.getenv('USE_EXPERIMENTAL_PUPPETEER_FETCH'):
|
||||||
lambda s: (s['operation'] and len(s['operation']) and s['operation'] != 'Choose one' and s['operation'] != 'Goto site'),
|
|
||||||
self.browser_steps))
|
|
||||||
|
|
||||||
if not has_browser_steps and os.getenv('USE_EXPERIMENTAL_PUPPETEER_FETCH'):
|
|
||||||
if strtobool(os.getenv('USE_EXPERIMENTAL_PUPPETEER_FETCH')):
|
if strtobool(os.getenv('USE_EXPERIMENTAL_PUPPETEER_FETCH')):
|
||||||
# Temporary backup solution until we rewrite the playwright code
|
# Temporary backup solution until we rewrite the playwright code
|
||||||
return self.run_fetch_browserless_puppeteer(
|
return self.run_fetch_browserless_puppeteer(
|
||||||
@@ -448,7 +447,7 @@ class base_html_playwright(Fetcher):
|
|||||||
# Seemed to cause a connection Exception even tho I can see it connect
|
# Seemed to cause a connection Exception even tho I can see it connect
|
||||||
# self.browser = browser_type.connect(self.command_executor, timeout=timeout*1000)
|
# self.browser = browser_type.connect(self.command_executor, timeout=timeout*1000)
|
||||||
# 60,000 connection timeout only
|
# 60,000 connection timeout only
|
||||||
browser = browser_type.connect_over_cdp(self.command_executor, timeout=60000)
|
browser = browser_type.connect_over_cdp(self.browser_connection_url, timeout=60000)
|
||||||
|
|
||||||
# SOCKS5 with authentication is not supported (yet)
|
# SOCKS5 with authentication is not supported (yet)
|
||||||
# https://github.com/microsoft/playwright/issues/10567
|
# https://github.com/microsoft/playwright/issues/10567
|
||||||
@@ -508,7 +507,11 @@ class base_html_playwright(Fetcher):
|
|||||||
self.status_code = response.status
|
self.status_code = response.status
|
||||||
|
|
||||||
if self.status_code != 200 and not ignore_status_codes:
|
if self.status_code != 200 and not ignore_status_codes:
|
||||||
raise Non200ErrorCodeReceived(url=url, status_code=self.status_code)
|
|
||||||
|
screenshot=self.page.screenshot(type='jpeg', full_page=True,
|
||||||
|
quality=int(os.getenv("PLAYWRIGHT_SCREENSHOT_QUALITY", 72)))
|
||||||
|
|
||||||
|
raise Non200ErrorCodeReceived(url=url, status_code=self.status_code, screenshot=screenshot)
|
||||||
|
|
||||||
if len(self.page.content().strip()) == 0:
|
if len(self.page.content().strip()) == 0:
|
||||||
context.close()
|
context.close()
|
||||||
@@ -559,8 +562,6 @@ class base_html_webdriver(Fetcher):
|
|||||||
else:
|
else:
|
||||||
fetcher_description = "WebDriver Chrome/Javascript"
|
fetcher_description = "WebDriver Chrome/Javascript"
|
||||||
|
|
||||||
command_executor = ''
|
|
||||||
|
|
||||||
# Configs for Proxy setup
|
# Configs for Proxy setup
|
||||||
# In the ENV vars, is prefixed with "webdriver_", so it is for example "webdriver_sslProxy"
|
# In the ENV vars, is prefixed with "webdriver_", so it is for example "webdriver_sslProxy"
|
||||||
selenium_proxy_settings_mappings = ['proxyType', 'ftpProxy', 'httpProxy', 'noProxy',
|
selenium_proxy_settings_mappings = ['proxyType', 'ftpProxy', 'httpProxy', 'noProxy',
|
||||||
@@ -568,12 +569,15 @@ class base_html_webdriver(Fetcher):
|
|||||||
'socksProxy', 'socksVersion', 'socksUsername', 'socksPassword']
|
'socksProxy', 'socksVersion', 'socksUsername', 'socksPassword']
|
||||||
proxy = None
|
proxy = None
|
||||||
|
|
||||||
def __init__(self, proxy_override=None):
|
def __init__(self, proxy_override=None, browser_connection_url=None):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
from selenium.webdriver.common.proxy import Proxy as SeleniumProxy
|
from selenium.webdriver.common.proxy import Proxy as SeleniumProxy
|
||||||
|
|
||||||
# .strip('"') is going to save someone a lot of time when they accidently wrap the env value
|
# .strip('"') is going to save someone a lot of time when they accidently wrap the env value
|
||||||
self.command_executor = os.getenv("WEBDRIVER_URL", 'http://browser-chrome:4444/wd/hub').strip('"')
|
if not browser_connection_url:
|
||||||
|
self.browser_connection_url = os.getenv("WEBDRIVER_URL", 'http://browser-chrome:4444/wd/hub').strip('"')
|
||||||
|
else:
|
||||||
|
self.browser_connection_url = browser_connection_url
|
||||||
|
|
||||||
# If any proxy settings are enabled, then we should setup the proxy object
|
# If any proxy settings are enabled, then we should setup the proxy object
|
||||||
proxy_args = {}
|
proxy_args = {}
|
||||||
@@ -615,7 +619,7 @@ class base_html_webdriver(Fetcher):
|
|||||||
options.proxy = self.proxy
|
options.proxy = self.proxy
|
||||||
|
|
||||||
self.driver = webdriver.Remote(
|
self.driver = webdriver.Remote(
|
||||||
command_executor=self.command_executor,
|
command_executor=self.browser_connection_url,
|
||||||
options=options)
|
options=options)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@@ -670,8 +674,10 @@ class base_html_webdriver(Fetcher):
|
|||||||
class html_requests(Fetcher):
|
class html_requests(Fetcher):
|
||||||
fetcher_description = "Basic fast Plaintext/HTTP Client"
|
fetcher_description = "Basic fast Plaintext/HTTP Client"
|
||||||
|
|
||||||
def __init__(self, proxy_override=None):
|
def __init__(self, proxy_override=None, browser_connection_url=None):
|
||||||
|
super().__init__()
|
||||||
self.proxy_override = proxy_override
|
self.proxy_override = proxy_override
|
||||||
|
# browser_connection_url is none because its always 'launched locally'
|
||||||
|
|
||||||
def run(self,
|
def run(self,
|
||||||
url,
|
url,
|
||||||
|
|||||||
@@ -168,7 +168,9 @@ class ValidateContentFetcherIsReady(object):
|
|||||||
def __call__(self, form, field):
|
def __call__(self, form, field):
|
||||||
import urllib3.exceptions
|
import urllib3.exceptions
|
||||||
from changedetectionio import content_fetcher
|
from changedetectionio import content_fetcher
|
||||||
|
return
|
||||||
|
|
||||||
|
# AttributeError: module 'changedetectionio.content_fetcher' has no attribute 'extra_browser_unlocked<>ASDF213r123r'
|
||||||
# Better would be a radiohandler that keeps a reference to each class
|
# Better would be a radiohandler that keeps a reference to each class
|
||||||
if field.data is not None and field.data != 'system':
|
if field.data is not None and field.data != 'system':
|
||||||
klass = getattr(content_fetcher, field.data)
|
klass = getattr(content_fetcher, field.data)
|
||||||
@@ -496,6 +498,12 @@ class SingleExtraProxy(Form):
|
|||||||
proxy_url = StringField('Proxy URL', [validators.Optional()], render_kw={"placeholder": "socks5:// or regular proxy http://user:pass@...:3128", "size":50})
|
proxy_url = StringField('Proxy URL', [validators.Optional()], render_kw={"placeholder": "socks5:// or regular proxy http://user:pass@...:3128", "size":50})
|
||||||
# @todo do the validation here instead
|
# @todo do the validation here instead
|
||||||
|
|
||||||
|
class SingleExtraBrowser(Form):
|
||||||
|
browser_name = StringField('Name', [validators.Optional()], render_kw={"placeholder": "Name"})
|
||||||
|
browser_connection_url = StringField('Browser connection URL', [validators.Optional()], render_kw={"placeholder": "wss://brightdata... wss://oxylabs etc", "size":50})
|
||||||
|
# @todo do the validation here instead
|
||||||
|
|
||||||
|
|
||||||
# datastore.data['settings']['requests']..
|
# datastore.data['settings']['requests']..
|
||||||
class globalSettingsRequestForm(Form):
|
class globalSettingsRequestForm(Form):
|
||||||
time_between_check = FormField(TimeBetweenCheckForm)
|
time_between_check = FormField(TimeBetweenCheckForm)
|
||||||
@@ -504,6 +512,7 @@ class globalSettingsRequestForm(Form):
|
|||||||
render_kw={"style": "width: 5em;"},
|
render_kw={"style": "width: 5em;"},
|
||||||
validators=[validators.NumberRange(min=0, message="Should contain zero or more seconds")])
|
validators=[validators.NumberRange(min=0, message="Should contain zero or more seconds")])
|
||||||
extra_proxies = FieldList(FormField(SingleExtraProxy), min_entries=5)
|
extra_proxies = FieldList(FormField(SingleExtraProxy), min_entries=5)
|
||||||
|
extra_browsers = FieldList(FormField(SingleExtraBrowser), min_entries=5)
|
||||||
|
|
||||||
def validate_extra_proxies(self, extra_validators=None):
|
def validate_extra_proxies(self, extra_validators=None):
|
||||||
for e in self.data['extra_proxies']:
|
for e in self.data['extra_proxies']:
|
||||||
|
|||||||
@@ -16,6 +16,7 @@ class model(dict):
|
|||||||
},
|
},
|
||||||
'requests': {
|
'requests': {
|
||||||
'extra_proxies': [], # Configurable extra proxies via the UI
|
'extra_proxies': [], # Configurable extra proxies via the UI
|
||||||
|
'extra_browsers': [], # Configurable extra proxies via the UI
|
||||||
'jitter_seconds': 0,
|
'jitter_seconds': 0,
|
||||||
'proxy': None, # Preferred proxy connection
|
'proxy': None, # Preferred proxy connection
|
||||||
'time_between_check': {'weeks': None, 'days': None, 'hours': 3, 'minutes': None, 'seconds': None},
|
'time_between_check': {'weeks': None, 'days': None, 'hours': 3, 'minutes': None, 'seconds': None},
|
||||||
|
|||||||
@@ -19,6 +19,7 @@ from changedetectionio.notification import (
|
|||||||
|
|
||||||
base_config = {
|
base_config = {
|
||||||
'body': None,
|
'body': None,
|
||||||
|
'browser_steps': [],
|
||||||
'browser_steps_last_error_step': None,
|
'browser_steps_last_error_step': None,
|
||||||
'check_unique_lines': False, # On change-detected, compare against all history if its something new
|
'check_unique_lines': False, # On change-detected, compare against all history if its something new
|
||||||
'check_count': 0,
|
'check_count': 0,
|
||||||
@@ -145,8 +146,14 @@ class model(dict):
|
|||||||
flash(message, 'error')
|
flash(message, 'error')
|
||||||
return ''
|
return ''
|
||||||
|
|
||||||
|
if ready_url.startswith('source:'):
|
||||||
|
ready_url=ready_url.replace('source:', '')
|
||||||
return ready_url
|
return ready_url
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_source_type_url(self):
|
||||||
|
return self.get('url', '').startswith('source:')
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def get_fetch_backend(self):
|
def get_fetch_backend(self):
|
||||||
"""
|
"""
|
||||||
@@ -234,6 +241,14 @@ class model(dict):
|
|||||||
fname = os.path.join(self.watch_data_dir, "history.txt")
|
fname = os.path.join(self.watch_data_dir, "history.txt")
|
||||||
return os.path.isfile(fname)
|
return os.path.isfile(fname)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def has_browser_steps(self):
|
||||||
|
has_browser_steps = self.get('browser_steps') and list(filter(
|
||||||
|
lambda s: (s['operation'] and len(s['operation']) and s['operation'] != 'Choose one' and s['operation'] != 'Goto site'),
|
||||||
|
self.get('browser_steps')))
|
||||||
|
|
||||||
|
return has_browser_steps
|
||||||
|
|
||||||
# Returns the newest key, but if theres only 1 record, then it's counted as not being new, so return 0.
|
# Returns the newest key, but if theres only 1 record, then it's counted as not being new, so return 0.
|
||||||
@property
|
@property
|
||||||
def newest_history_key(self):
|
def newest_history_key(self):
|
||||||
|
|||||||
@@ -1,15 +1,122 @@
|
|||||||
from abc import abstractmethod
|
from abc import abstractmethod
|
||||||
|
import os
|
||||||
import hashlib
|
import hashlib
|
||||||
|
import re
|
||||||
|
from changedetectionio import content_fetcher
|
||||||
|
from copy import deepcopy
|
||||||
|
from distutils.util import strtobool
|
||||||
|
|
||||||
class difference_detection_processor():
|
class difference_detection_processor():
|
||||||
|
|
||||||
|
browser_steps = None
|
||||||
|
datastore = None
|
||||||
|
fetcher = None
|
||||||
|
screenshot = None
|
||||||
|
watch = None
|
||||||
|
xpath_data = None
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, datastore, watch_uuid, **kwargs):
|
||||||
super().__init__(*args, **kwargs)
|
super().__init__(*args, **kwargs)
|
||||||
|
self.datastore = datastore
|
||||||
|
self.watch = deepcopy(self.datastore.data['watching'].get(watch_uuid))
|
||||||
|
|
||||||
|
def call_browser(self):
|
||||||
|
|
||||||
|
# Protect against file:// access
|
||||||
|
if re.search(r'^file://', self.watch.get('url', '').strip(), re.IGNORECASE):
|
||||||
|
if not strtobool(os.getenv('ALLOW_FILE_URI', 'false')):
|
||||||
|
raise Exception(
|
||||||
|
"file:// type access is denied for security reasons."
|
||||||
|
)
|
||||||
|
|
||||||
|
url = self.watch.link
|
||||||
|
|
||||||
|
# Requests, playwright, other browser via wss:// etc, fetch_extra_something
|
||||||
|
prefer_fetch_backend = self.watch.get('fetch_backend', 'system')
|
||||||
|
|
||||||
|
# Proxy ID "key"
|
||||||
|
preferred_proxy_id = self.datastore.get_preferred_proxy_for_watch(uuid=self.watch.get('uuid'))
|
||||||
|
|
||||||
|
# Pluggable content self.fetcher
|
||||||
|
if not prefer_fetch_backend or prefer_fetch_backend == 'system':
|
||||||
|
prefer_fetch_backend = self.datastore.data['settings']['application'].get('fetch_backend')
|
||||||
|
|
||||||
|
# In the case that the preferred fetcher was a browser config with custom connection URL..
|
||||||
|
# @todo - on save watch, if its extra_browser_ then it should be obvious it will use playwright (like if its requests now..)
|
||||||
|
browser_connection_url = None
|
||||||
|
if prefer_fetch_backend.startswith('extra_browser_'):
|
||||||
|
(t, key) = prefer_fetch_backend.split('extra_browser_')
|
||||||
|
connection = list(
|
||||||
|
filter(lambda s: (s['browser_name'] == key), self.datastore.data['settings']['requests'].get('extra_browsers', [])))
|
||||||
|
if connection:
|
||||||
|
prefer_fetch_backend = 'base_html_playwright'
|
||||||
|
browser_connection_url = connection[0].get('browser_connection_url')
|
||||||
|
|
||||||
|
|
||||||
|
# Grab the right kind of 'fetcher', (playwright, requests, etc)
|
||||||
|
if hasattr(content_fetcher, prefer_fetch_backend):
|
||||||
|
fetcher_obj = getattr(content_fetcher, prefer_fetch_backend)
|
||||||
|
else:
|
||||||
|
# If the klass doesnt exist, just use a default
|
||||||
|
fetcher_obj = getattr(content_fetcher, "html_requests")
|
||||||
|
|
||||||
|
|
||||||
|
proxy_url = None
|
||||||
|
if preferred_proxy_id:
|
||||||
|
proxy_url = self.datastore.proxy_list.get(preferred_proxy_id).get('url')
|
||||||
|
print(f"Using proxy Key: {preferred_proxy_id} as Proxy URL {proxy_url}")
|
||||||
|
|
||||||
|
# Now call the fetcher (playwright/requests/etc) with arguments that only a fetcher would need.
|
||||||
|
# When browser_connection_url is None, it method should default to working out whats the best defaults (os env vars etc)
|
||||||
|
self.fetcher = fetcher_obj(proxy_override=proxy_url,
|
||||||
|
browser_connection_url=browser_connection_url
|
||||||
|
)
|
||||||
|
|
||||||
|
if self.watch.has_browser_steps:
|
||||||
|
self.fetcher.browser_steps = self.watch.get('browser_steps', [])
|
||||||
|
self.fetcher.browser_steps_screenshot_path = os.path.join(self.datastore.datastore_path, self.watch.get('uuid'))
|
||||||
|
|
||||||
|
# Tweak the base config with the per-watch ones
|
||||||
|
request_headers = self.watch.get('headers', [])
|
||||||
|
request_headers.update(self.datastore.get_all_base_headers())
|
||||||
|
request_headers.update(self.datastore.get_all_headers_in_textfile_for_watch(uuid=self.watch.get('uuid')))
|
||||||
|
|
||||||
|
# https://github.com/psf/requests/issues/4525
|
||||||
|
# Requests doesnt yet support brotli encoding, so don't put 'br' here, be totally sure that the user cannot
|
||||||
|
# do this by accident.
|
||||||
|
if 'Accept-Encoding' in request_headers and "br" in request_headers['Accept-Encoding']:
|
||||||
|
request_headers['Accept-Encoding'] = request_headers['Accept-Encoding'].replace(', br', '')
|
||||||
|
|
||||||
|
timeout = self.datastore.data['settings']['requests'].get('timeout')
|
||||||
|
|
||||||
|
request_body = self.watch.get('body')
|
||||||
|
request_method = self.watch.get('method')
|
||||||
|
ignore_status_codes = self.watch.get('ignore_status_codes', False)
|
||||||
|
|
||||||
|
# Configurable per-watch or global extra delay before extracting text (for webDriver types)
|
||||||
|
system_webdriver_delay = self.datastore.data['settings']['application'].get('webdriver_delay', None)
|
||||||
|
if self.watch.get('webdriver_delay'):
|
||||||
|
self.fetcher.render_extract_delay = self.watch.get('webdriver_delay')
|
||||||
|
elif system_webdriver_delay is not None:
|
||||||
|
self.fetcher.render_extract_delay = system_webdriver_delay
|
||||||
|
|
||||||
|
if self.watch.get('webdriver_js_execute_code') is not None and self.watch.get('webdriver_js_execute_code').strip():
|
||||||
|
self.fetcher.webdriver_js_execute_code = self.watch.get('webdriver_js_execute_code')
|
||||||
|
|
||||||
|
# Requests for PDF's, images etc should be passwd the is_binary flag
|
||||||
|
is_binary = self.watch.is_pdf
|
||||||
|
|
||||||
|
# And here we go! call the right browser with browser-specific settings
|
||||||
|
self.fetcher.run(url, timeout, request_headers, request_body, request_method, ignore_status_codes, self.watch.get('include_filters'),
|
||||||
|
is_binary=is_binary)
|
||||||
|
|
||||||
|
#@todo .quit here could go on close object, so we can run JS if change-detected
|
||||||
|
self.fetcher.quit()
|
||||||
|
|
||||||
|
# After init, call run_changedetection() which will do the actual change-detection
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def run(self, uuid, skip_when_checksum_same=True, preferred_proxy=None):
|
def run_changedetection(self, uuid, skip_when_checksum_same=True):
|
||||||
update_obj = {'last_notification_error': False, 'last_error': False}
|
update_obj = {'last_notification_error': False, 'last_error': False}
|
||||||
some_data = 'xxxxx'
|
some_data = 'xxxxx'
|
||||||
update_obj["previous_md5"] = hashlib.md5(some_data.encode('utf-8')).hexdigest()
|
update_obj["previous_md5"] = hashlib.md5(some_data.encode('utf-8')).hexdigest()
|
||||||
|
|||||||
@@ -1,10 +1,7 @@
|
|||||||
|
|
||||||
import hashlib
|
import hashlib
|
||||||
import os
|
|
||||||
import re
|
|
||||||
import urllib3
|
import urllib3
|
||||||
from . import difference_detection_processor
|
from . import difference_detection_processor
|
||||||
from changedetectionio import content_fetcher
|
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
|
|
||||||
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
||||||
@@ -22,11 +19,7 @@ class perform_site_check(difference_detection_processor):
|
|||||||
screenshot = None
|
screenshot = None
|
||||||
xpath_data = None
|
xpath_data = None
|
||||||
|
|
||||||
def __init__(self, *args, datastore, **kwargs):
|
def run_changedetection(self, uuid, skip_when_checksum_same=True):
|
||||||
super().__init__(*args, **kwargs)
|
|
||||||
self.datastore = datastore
|
|
||||||
|
|
||||||
def run(self, uuid, skip_when_checksum_same=True):
|
|
||||||
|
|
||||||
# DeepCopy so we can be sure we don't accidently change anything by reference
|
# DeepCopy so we can be sure we don't accidently change anything by reference
|
||||||
watch = deepcopy(self.datastore.data['watching'].get(uuid))
|
watch = deepcopy(self.datastore.data['watching'].get(uuid))
|
||||||
@@ -34,84 +27,24 @@ class perform_site_check(difference_detection_processor):
|
|||||||
if not watch:
|
if not watch:
|
||||||
raise Exception("Watch no longer exists.")
|
raise Exception("Watch no longer exists.")
|
||||||
|
|
||||||
# Protect against file:// access
|
|
||||||
if re.search(r'^file', watch.get('url', ''), re.IGNORECASE) and not os.getenv('ALLOW_FILE_URI', False):
|
|
||||||
raise Exception(
|
|
||||||
"file:// type access is denied for security reasons."
|
|
||||||
)
|
|
||||||
|
|
||||||
# Unset any existing notification error
|
# Unset any existing notification error
|
||||||
update_obj = {'last_notification_error': False, 'last_error': False}
|
update_obj = {'last_notification_error': False, 'last_error': False}
|
||||||
|
|
||||||
request_headers = watch.get('headers', [])
|
self.screenshot = self.fetcher.screenshot
|
||||||
request_headers.update(self.datastore.get_all_base_headers())
|
self.xpath_data = self.fetcher.xpath_data
|
||||||
request_headers.update(self.datastore.get_all_headers_in_textfile_for_watch(uuid=uuid))
|
|
||||||
|
|
||||||
# https://github.com/psf/requests/issues/4525
|
|
||||||
# Requests doesnt yet support brotli encoding, so don't put 'br' here, be totally sure that the user cannot
|
|
||||||
# do this by accident.
|
|
||||||
if 'Accept-Encoding' in request_headers and "br" in request_headers['Accept-Encoding']:
|
|
||||||
request_headers['Accept-Encoding'] = request_headers['Accept-Encoding'].replace(', br', '')
|
|
||||||
|
|
||||||
timeout = self.datastore.data['settings']['requests'].get('timeout')
|
|
||||||
|
|
||||||
url = watch.link
|
|
||||||
|
|
||||||
request_body = self.datastore.data['watching'][uuid].get('body')
|
|
||||||
request_method = self.datastore.data['watching'][uuid].get('method')
|
|
||||||
ignore_status_codes = self.datastore.data['watching'][uuid].get('ignore_status_codes', False)
|
|
||||||
|
|
||||||
# Pluggable content fetcher
|
|
||||||
prefer_backend = watch.get_fetch_backend
|
|
||||||
if not prefer_backend or prefer_backend == 'system':
|
|
||||||
prefer_backend = self.datastore.data['settings']['application']['fetch_backend']
|
|
||||||
|
|
||||||
if hasattr(content_fetcher, prefer_backend):
|
|
||||||
klass = getattr(content_fetcher, prefer_backend)
|
|
||||||
else:
|
|
||||||
# If the klass doesnt exist, just use a default
|
|
||||||
klass = getattr(content_fetcher, "html_requests")
|
|
||||||
|
|
||||||
proxy_id = self.datastore.get_preferred_proxy_for_watch(uuid=uuid)
|
|
||||||
proxy_url = None
|
|
||||||
if proxy_id:
|
|
||||||
proxy_url = self.datastore.proxy_list.get(proxy_id).get('url')
|
|
||||||
print("UUID {} Using proxy {}".format(uuid, proxy_url))
|
|
||||||
|
|
||||||
fetcher = klass(proxy_override=proxy_url)
|
|
||||||
|
|
||||||
# Configurable per-watch or global extra delay before extracting text (for webDriver types)
|
|
||||||
system_webdriver_delay = self.datastore.data['settings']['application'].get('webdriver_delay', None)
|
|
||||||
if watch['webdriver_delay'] is not None:
|
|
||||||
fetcher.render_extract_delay = watch.get('webdriver_delay')
|
|
||||||
elif system_webdriver_delay is not None:
|
|
||||||
fetcher.render_extract_delay = system_webdriver_delay
|
|
||||||
|
|
||||||
# Could be removed if requests/plaintext could also return some info?
|
|
||||||
if prefer_backend != 'html_webdriver':
|
|
||||||
raise Exception("Re-stock detection requires Chrome or compatible webdriver/playwright fetcher to work")
|
|
||||||
|
|
||||||
if watch.get('webdriver_js_execute_code') is not None and watch.get('webdriver_js_execute_code').strip():
|
|
||||||
fetcher.webdriver_js_execute_code = watch.get('webdriver_js_execute_code')
|
|
||||||
|
|
||||||
fetcher.run(url, timeout, request_headers, request_body, request_method, ignore_status_codes, watch.get('include_filters'))
|
|
||||||
fetcher.quit()
|
|
||||||
|
|
||||||
self.screenshot = fetcher.screenshot
|
|
||||||
self.xpath_data = fetcher.xpath_data
|
|
||||||
|
|
||||||
# Track the content type
|
# Track the content type
|
||||||
update_obj['content_type'] = fetcher.headers.get('Content-Type', '')
|
update_obj['content_type'] = self.fetcher.headers.get('Content-Type', '')
|
||||||
update_obj["last_check_status"] = fetcher.get_last_status_code()
|
update_obj["last_check_status"] = self.fetcher.get_last_status_code()
|
||||||
|
|
||||||
# Main detection method
|
# Main detection method
|
||||||
fetched_md5 = None
|
fetched_md5 = None
|
||||||
if fetcher.instock_data:
|
if self.fetcher.instock_data:
|
||||||
fetched_md5 = hashlib.md5(fetcher.instock_data.encode('utf-8')).hexdigest()
|
fetched_md5 = hashlib.md5(self.fetcher.instock_data.encode('utf-8')).hexdigest()
|
||||||
# 'Possibly in stock' comes from stock-not-in-stock.js when no string found above the fold.
|
# 'Possibly in stock' comes from stock-not-in-stock.js when no string found above the fold.
|
||||||
update_obj["in_stock"] = True if fetcher.instock_data == 'Possibly in stock' else False
|
update_obj["in_stock"] = True if self.fetcher.instock_data == 'Possibly in stock' else False
|
||||||
else:
|
else:
|
||||||
raise UnableToExtractRestockData(status_code=fetcher.status_code)
|
raise UnableToExtractRestockData(status_code=self.fetcher.status_code)
|
||||||
|
|
||||||
# The main thing that all this at the moment comes down to :)
|
# The main thing that all this at the moment comes down to :)
|
||||||
changed_detected = False
|
changed_detected = False
|
||||||
@@ -128,4 +61,4 @@ class perform_site_check(difference_detection_processor):
|
|||||||
# Always record the new checksum
|
# Always record the new checksum
|
||||||
update_obj["previous_md5"] = fetched_md5
|
update_obj["previous_md5"] = fetched_md5
|
||||||
|
|
||||||
return changed_detected, update_obj, fetcher.instock_data.encode('utf-8')
|
return changed_detected, update_obj, self.fetcher.instock_data.encode('utf-8')
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
# HTML to TEXT/JSON DIFFERENCE FETCHER
|
# HTML to TEXT/JSON DIFFERENCE self.fetcher
|
||||||
|
|
||||||
import hashlib
|
import hashlib
|
||||||
import json
|
import json
|
||||||
@@ -32,15 +32,10 @@ class PDFToHTMLToolNotFound(ValueError):
|
|||||||
# Some common stuff here that can be moved to a base class
|
# Some common stuff here that can be moved to a base class
|
||||||
# (set_proxy_from_list)
|
# (set_proxy_from_list)
|
||||||
class perform_site_check(difference_detection_processor):
|
class perform_site_check(difference_detection_processor):
|
||||||
screenshot = None
|
|
||||||
xpath_data = None
|
|
||||||
|
|
||||||
def __init__(self, *args, datastore, **kwargs):
|
def run_changedetection(self, uuid, skip_when_checksum_same=True):
|
||||||
super().__init__(*args, **kwargs)
|
|
||||||
self.datastore = datastore
|
|
||||||
|
|
||||||
def run(self, uuid, skip_when_checksum_same=True, preferred_proxy=None):
|
|
||||||
changed_detected = False
|
changed_detected = False
|
||||||
|
html_content = ""
|
||||||
screenshot = False # as bytes
|
screenshot = False # as bytes
|
||||||
stripped_text_from_html = ""
|
stripped_text_from_html = ""
|
||||||
|
|
||||||
@@ -49,100 +44,25 @@ class perform_site_check(difference_detection_processor):
|
|||||||
if not watch:
|
if not watch:
|
||||||
raise Exception("Watch no longer exists.")
|
raise Exception("Watch no longer exists.")
|
||||||
|
|
||||||
# Protect against file:// access
|
|
||||||
if re.search(r'^file', watch.get('url', ''), re.IGNORECASE) and not os.getenv('ALLOW_FILE_URI', False):
|
|
||||||
raise Exception(
|
|
||||||
"file:// type access is denied for security reasons."
|
|
||||||
)
|
|
||||||
|
|
||||||
# Unset any existing notification error
|
# Unset any existing notification error
|
||||||
update_obj = {'last_notification_error': False, 'last_error': False}
|
update_obj = {'last_notification_error': False, 'last_error': False}
|
||||||
|
|
||||||
# Tweak the base config with the per-watch ones
|
|
||||||
request_headers = watch.get('headers', [])
|
|
||||||
request_headers.update(self.datastore.get_all_base_headers())
|
|
||||||
request_headers.update(self.datastore.get_all_headers_in_textfile_for_watch(uuid=uuid))
|
|
||||||
|
|
||||||
# https://github.com/psf/requests/issues/4525
|
|
||||||
# Requests doesnt yet support brotli encoding, so don't put 'br' here, be totally sure that the user cannot
|
|
||||||
# do this by accident.
|
|
||||||
if 'Accept-Encoding' in request_headers and "br" in request_headers['Accept-Encoding']:
|
|
||||||
request_headers['Accept-Encoding'] = request_headers['Accept-Encoding'].replace(', br', '')
|
|
||||||
|
|
||||||
timeout = self.datastore.data['settings']['requests'].get('timeout')
|
|
||||||
|
|
||||||
url = watch.link
|
url = watch.link
|
||||||
|
|
||||||
request_body = self.datastore.data['watching'][uuid].get('body')
|
self.screenshot = self.fetcher.screenshot
|
||||||
request_method = self.datastore.data['watching'][uuid].get('method')
|
self.xpath_data = self.fetcher.xpath_data
|
||||||
ignore_status_codes = self.datastore.data['watching'][uuid].get('ignore_status_codes', False)
|
|
||||||
|
|
||||||
# source: support
|
|
||||||
is_source = False
|
|
||||||
if url.startswith('source:'):
|
|
||||||
url = url.replace('source:', '')
|
|
||||||
is_source = True
|
|
||||||
|
|
||||||
# Pluggable content fetcher
|
|
||||||
prefer_backend = watch.get_fetch_backend
|
|
||||||
if not prefer_backend or prefer_backend == 'system':
|
|
||||||
prefer_backend = self.datastore.data['settings']['application']['fetch_backend']
|
|
||||||
|
|
||||||
if hasattr(content_fetcher, prefer_backend):
|
|
||||||
klass = getattr(content_fetcher, prefer_backend)
|
|
||||||
else:
|
|
||||||
# If the klass doesnt exist, just use a default
|
|
||||||
klass = getattr(content_fetcher, "html_requests")
|
|
||||||
|
|
||||||
if preferred_proxy:
|
|
||||||
proxy_id = preferred_proxy
|
|
||||||
else:
|
|
||||||
proxy_id = self.datastore.get_preferred_proxy_for_watch(uuid=uuid)
|
|
||||||
|
|
||||||
proxy_url = None
|
|
||||||
if proxy_id:
|
|
||||||
proxy_url = self.datastore.proxy_list.get(proxy_id).get('url')
|
|
||||||
print("UUID {} Using proxy {}".format(uuid, proxy_url))
|
|
||||||
|
|
||||||
fetcher = klass(proxy_override=proxy_url)
|
|
||||||
|
|
||||||
# Configurable per-watch or global extra delay before extracting text (for webDriver types)
|
|
||||||
system_webdriver_delay = self.datastore.data['settings']['application'].get('webdriver_delay', None)
|
|
||||||
if watch['webdriver_delay'] is not None:
|
|
||||||
fetcher.render_extract_delay = watch.get('webdriver_delay')
|
|
||||||
elif system_webdriver_delay is not None:
|
|
||||||
fetcher.render_extract_delay = system_webdriver_delay
|
|
||||||
|
|
||||||
# Possible conflict
|
|
||||||
if prefer_backend == 'html_webdriver':
|
|
||||||
fetcher.browser_steps = watch.get('browser_steps', None)
|
|
||||||
fetcher.browser_steps_screenshot_path = os.path.join(self.datastore.datastore_path, uuid)
|
|
||||||
|
|
||||||
if watch.get('webdriver_js_execute_code') is not None and watch.get('webdriver_js_execute_code').strip():
|
|
||||||
fetcher.webdriver_js_execute_code = watch.get('webdriver_js_execute_code')
|
|
||||||
|
|
||||||
# requests for PDF's, images etc should be passwd the is_binary flag
|
|
||||||
is_binary = watch.is_pdf
|
|
||||||
|
|
||||||
fetcher.run(url, timeout, request_headers, request_body, request_method, ignore_status_codes, watch.get('include_filters'),
|
|
||||||
is_binary=is_binary)
|
|
||||||
fetcher.quit()
|
|
||||||
|
|
||||||
self.screenshot = fetcher.screenshot
|
|
||||||
self.xpath_data = fetcher.xpath_data
|
|
||||||
|
|
||||||
# Track the content type
|
# Track the content type
|
||||||
update_obj['content_type'] = fetcher.get_all_headers().get('content-type', '').lower()
|
update_obj['content_type'] = self.fetcher.get_all_headers().get('content-type', '').lower()
|
||||||
|
|
||||||
# Watches added automatically in the queue manager will skip if its the same checksum as the previous run
|
# Watches added automatically in the queue manager will skip if its the same checksum as the previous run
|
||||||
# Saves a lot of CPU
|
# Saves a lot of CPU
|
||||||
update_obj['previous_md5_before_filters'] = hashlib.md5(fetcher.content.encode('utf-8')).hexdigest()
|
update_obj['previous_md5_before_filters'] = hashlib.md5(self.fetcher.content.encode('utf-8')).hexdigest()
|
||||||
if skip_when_checksum_same:
|
if skip_when_checksum_same:
|
||||||
if update_obj['previous_md5_before_filters'] == watch.get('previous_md5_before_filters'):
|
if update_obj['previous_md5_before_filters'] == watch.get('previous_md5_before_filters'):
|
||||||
raise content_fetcher.checksumFromPreviousCheckWasTheSame()
|
raise content_fetcher.checksumFromPreviousCheckWasTheSame()
|
||||||
|
|
||||||
# Fetching complete, now filters
|
# Fetching complete, now filters
|
||||||
# @todo move to class / maybe inside of fetcher abstract base?
|
|
||||||
|
|
||||||
# @note: I feel like the following should be in a more obvious chain system
|
# @note: I feel like the following should be in a more obvious chain system
|
||||||
# - Check filter text
|
# - Check filter text
|
||||||
@@ -151,24 +71,24 @@ class perform_site_check(difference_detection_processor):
|
|||||||
# https://stackoverflow.com/questions/41817578/basic-method-chaining ?
|
# https://stackoverflow.com/questions/41817578/basic-method-chaining ?
|
||||||
# return content().textfilter().jsonextract().checksumcompare() ?
|
# return content().textfilter().jsonextract().checksumcompare() ?
|
||||||
|
|
||||||
is_json = 'application/json' in fetcher.get_all_headers().get('content-type', '').lower()
|
is_json = 'application/json' in self.fetcher.get_all_headers().get('content-type', '').lower()
|
||||||
is_html = not is_json
|
is_html = not is_json
|
||||||
is_rss = False
|
is_rss = False
|
||||||
|
|
||||||
ctype_header = fetcher.get_all_headers().get('content-type', '').lower()
|
ctype_header = self.fetcher.get_all_headers().get('content-type', '').lower()
|
||||||
# Go into RSS preprocess for converting CDATA/comment to usable text
|
# Go into RSS preprocess for converting CDATA/comment to usable text
|
||||||
if any(substring in ctype_header for substring in ['application/xml', 'application/rss', 'text/xml']):
|
if any(substring in ctype_header for substring in ['application/xml', 'application/rss', 'text/xml']):
|
||||||
if '<rss' in fetcher.content[:100].lower():
|
if '<rss' in self.fetcher.content[:100].lower():
|
||||||
fetcher.content = cdata_in_document_to_text(html_content=fetcher.content)
|
self.fetcher.content = cdata_in_document_to_text(html_content=self.fetcher.content)
|
||||||
is_rss = True
|
is_rss = True
|
||||||
|
|
||||||
# source: support, basically treat it as plaintext
|
# source: support, basically treat it as plaintext
|
||||||
if is_source:
|
if watch.is_source_type_url:
|
||||||
is_html = False
|
is_html = False
|
||||||
is_json = False
|
is_json = False
|
||||||
|
|
||||||
inline_pdf = fetcher.get_all_headers().get('content-disposition', '') and '%PDF-1' in fetcher.content[:10]
|
inline_pdf = self.fetcher.get_all_headers().get('content-disposition', '') and '%PDF-1' in self.fetcher.content[:10]
|
||||||
if watch.is_pdf or 'application/pdf' in fetcher.get_all_headers().get('content-type', '').lower() or inline_pdf:
|
if watch.is_pdf or 'application/pdf' in self.fetcher.get_all_headers().get('content-type', '').lower() or inline_pdf:
|
||||||
from shutil import which
|
from shutil import which
|
||||||
tool = os.getenv("PDF_TO_HTML_TOOL", "pdftohtml")
|
tool = os.getenv("PDF_TO_HTML_TOOL", "pdftohtml")
|
||||||
if not which(tool):
|
if not which(tool):
|
||||||
@@ -179,18 +99,18 @@ class perform_site_check(difference_detection_processor):
|
|||||||
[tool, '-stdout', '-', '-s', 'out.pdf', '-i'],
|
[tool, '-stdout', '-', '-s', 'out.pdf', '-i'],
|
||||||
stdout=subprocess.PIPE,
|
stdout=subprocess.PIPE,
|
||||||
stdin=subprocess.PIPE)
|
stdin=subprocess.PIPE)
|
||||||
proc.stdin.write(fetcher.raw_content)
|
proc.stdin.write(self.fetcher.raw_content)
|
||||||
proc.stdin.close()
|
proc.stdin.close()
|
||||||
fetcher.content = proc.stdout.read().decode('utf-8')
|
self.fetcher.content = proc.stdout.read().decode('utf-8')
|
||||||
proc.wait(timeout=60)
|
proc.wait(timeout=60)
|
||||||
|
|
||||||
# Add a little metadata so we know if the file changes (like if an image changes, but the text is the same
|
# Add a little metadata so we know if the file changes (like if an image changes, but the text is the same
|
||||||
# @todo may cause problems with non-UTF8?
|
# @todo may cause problems with non-UTF8?
|
||||||
metadata = "<p>Added by changedetection.io: Document checksum - {} Filesize - {} bytes</p>".format(
|
metadata = "<p>Added by changedetection.io: Document checksum - {} Filesize - {} bytes</p>".format(
|
||||||
hashlib.md5(fetcher.raw_content).hexdigest().upper(),
|
hashlib.md5(self.fetcher.raw_content).hexdigest().upper(),
|
||||||
len(fetcher.content))
|
len(self.fetcher.content))
|
||||||
|
|
||||||
fetcher.content = fetcher.content.replace('</body>', metadata + '</body>')
|
self.fetcher.content = self.fetcher.content.replace('</body>', metadata + '</body>')
|
||||||
|
|
||||||
# Better would be if Watch.model could access the global data also
|
# Better would be if Watch.model could access the global data also
|
||||||
# and then use getattr https://docs.python.org/3/reference/datamodel.html#object.__getitem__
|
# and then use getattr https://docs.python.org/3/reference/datamodel.html#object.__getitem__
|
||||||
@@ -217,7 +137,7 @@ class perform_site_check(difference_detection_processor):
|
|||||||
if is_json:
|
if is_json:
|
||||||
# Sort the JSON so we dont get false alerts when the content is just re-ordered
|
# Sort the JSON so we dont get false alerts when the content is just re-ordered
|
||||||
try:
|
try:
|
||||||
fetcher.content = json.dumps(json.loads(fetcher.content), sort_keys=True)
|
self.fetcher.content = json.dumps(json.loads(self.fetcher.content), sort_keys=True)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
# Might have just been a snippet, or otherwise bad JSON, continue
|
# Might have just been a snippet, or otherwise bad JSON, continue
|
||||||
pass
|
pass
|
||||||
@@ -225,22 +145,22 @@ class perform_site_check(difference_detection_processor):
|
|||||||
if has_filter_rule:
|
if has_filter_rule:
|
||||||
for filter in include_filters_rule:
|
for filter in include_filters_rule:
|
||||||
if any(prefix in filter for prefix in json_filter_prefixes):
|
if any(prefix in filter for prefix in json_filter_prefixes):
|
||||||
stripped_text_from_html += html_tools.extract_json_as_string(content=fetcher.content, json_filter=filter)
|
stripped_text_from_html += html_tools.extract_json_as_string(content=self.fetcher.content, json_filter=filter)
|
||||||
is_html = False
|
is_html = False
|
||||||
|
|
||||||
if is_html or is_source:
|
if is_html or watch.is_source_type_url:
|
||||||
|
|
||||||
# CSS Filter, extract the HTML that matches and feed that into the existing inscriptis::get_text
|
# CSS Filter, extract the HTML that matches and feed that into the existing inscriptis::get_text
|
||||||
fetcher.content = html_tools.workarounds_for_obfuscations(fetcher.content)
|
self.fetcher.content = html_tools.workarounds_for_obfuscations(self.fetcher.content)
|
||||||
html_content = fetcher.content
|
html_content = self.fetcher.content
|
||||||
|
|
||||||
# If not JSON, and if it's not text/plain..
|
# If not JSON, and if it's not text/plain..
|
||||||
if 'text/plain' in fetcher.get_all_headers().get('content-type', '').lower():
|
if 'text/plain' in self.fetcher.get_all_headers().get('content-type', '').lower():
|
||||||
# Don't run get_text or xpath/css filters on plaintext
|
# Don't run get_text or xpath/css filters on plaintext
|
||||||
stripped_text_from_html = html_content
|
stripped_text_from_html = html_content
|
||||||
else:
|
else:
|
||||||
# Does it have some ld+json price data? used for easier monitoring
|
# Does it have some ld+json price data? used for easier monitoring
|
||||||
update_obj['has_ldjson_price_data'] = html_tools.has_ldjson_product_info(fetcher.content)
|
update_obj['has_ldjson_price_data'] = html_tools.has_ldjson_product_info(self.fetcher.content)
|
||||||
|
|
||||||
# Then we assume HTML
|
# Then we assume HTML
|
||||||
if has_filter_rule:
|
if has_filter_rule:
|
||||||
@@ -250,14 +170,14 @@ class perform_site_check(difference_detection_processor):
|
|||||||
# For HTML/XML we offer xpath as an option, just start a regular xPath "/.."
|
# For HTML/XML we offer xpath as an option, just start a regular xPath "/.."
|
||||||
if filter_rule[0] == '/' or filter_rule.startswith('xpath:'):
|
if filter_rule[0] == '/' or filter_rule.startswith('xpath:'):
|
||||||
html_content += html_tools.xpath_filter(xpath_filter=filter_rule.replace('xpath:', ''),
|
html_content += html_tools.xpath_filter(xpath_filter=filter_rule.replace('xpath:', ''),
|
||||||
html_content=fetcher.content,
|
html_content=self.fetcher.content,
|
||||||
append_pretty_line_formatting=not is_source,
|
append_pretty_line_formatting=not watch.is_source_type_url,
|
||||||
is_rss=is_rss)
|
is_rss=is_rss)
|
||||||
else:
|
else:
|
||||||
# CSS Filter, extract the HTML that matches and feed that into the existing inscriptis::get_text
|
# CSS Filter, extract the HTML that matches and feed that into the existing inscriptis::get_text
|
||||||
html_content += html_tools.include_filters(include_filters=filter_rule,
|
html_content += html_tools.include_filters(include_filters=filter_rule,
|
||||||
html_content=fetcher.content,
|
html_content=self.fetcher.content,
|
||||||
append_pretty_line_formatting=not is_source)
|
append_pretty_line_formatting=not watch.is_source_type_url)
|
||||||
|
|
||||||
if not html_content.strip():
|
if not html_content.strip():
|
||||||
raise FilterNotFoundInResponse(include_filters_rule)
|
raise FilterNotFoundInResponse(include_filters_rule)
|
||||||
@@ -265,7 +185,7 @@ class perform_site_check(difference_detection_processor):
|
|||||||
if has_subtractive_selectors:
|
if has_subtractive_selectors:
|
||||||
html_content = html_tools.element_removal(subtractive_selectors, html_content)
|
html_content = html_tools.element_removal(subtractive_selectors, html_content)
|
||||||
|
|
||||||
if is_source:
|
if watch.is_source_type_url:
|
||||||
stripped_text_from_html = html_content
|
stripped_text_from_html = html_content
|
||||||
else:
|
else:
|
||||||
# extract text
|
# extract text
|
||||||
@@ -311,7 +231,7 @@ class perform_site_check(difference_detection_processor):
|
|||||||
empty_pages_are_a_change = self.datastore.data['settings']['application'].get('empty_pages_are_a_change', False)
|
empty_pages_are_a_change = self.datastore.data['settings']['application'].get('empty_pages_are_a_change', False)
|
||||||
if not is_json and not empty_pages_are_a_change and len(stripped_text_from_html.strip()) == 0:
|
if not is_json and not empty_pages_are_a_change and len(stripped_text_from_html.strip()) == 0:
|
||||||
raise content_fetcher.ReplyWithContentButNoText(url=url,
|
raise content_fetcher.ReplyWithContentButNoText(url=url,
|
||||||
status_code=fetcher.get_last_status_code(),
|
status_code=self.fetcher.get_last_status_code(),
|
||||||
screenshot=screenshot,
|
screenshot=screenshot,
|
||||||
has_filters=has_filter_rule,
|
has_filters=has_filter_rule,
|
||||||
html_content=html_content
|
html_content=html_content
|
||||||
@@ -320,7 +240,7 @@ class perform_site_check(difference_detection_processor):
|
|||||||
# We rely on the actual text in the html output.. many sites have random script vars etc,
|
# We rely on the actual text in the html output.. many sites have random script vars etc,
|
||||||
# in the future we'll implement other mechanisms.
|
# in the future we'll implement other mechanisms.
|
||||||
|
|
||||||
update_obj["last_check_status"] = fetcher.get_last_status_code()
|
update_obj["last_check_status"] = self.fetcher.get_last_status_code()
|
||||||
|
|
||||||
# If there's text to skip
|
# If there's text to skip
|
||||||
# @todo we could abstract out the get_text() to handle this cleaner
|
# @todo we could abstract out the get_text() to handle this cleaner
|
||||||
@@ -408,7 +328,7 @@ class perform_site_check(difference_detection_processor):
|
|||||||
if is_html:
|
if is_html:
|
||||||
if self.datastore.data['settings']['application'].get('extract_title_as_title') or watch['extract_title_as_title']:
|
if self.datastore.data['settings']['application'].get('extract_title_as_title') or watch['extract_title_as_title']:
|
||||||
if not watch['title'] or not len(watch['title']):
|
if not watch['title'] or not len(watch['title']):
|
||||||
update_obj['title'] = html_tools.extract_element(find='title', html_content=fetcher.content)
|
update_obj['title'] = html_tools.extract_element(find='title', html_content=self.fetcher.content)
|
||||||
|
|
||||||
if changed_detected:
|
if changed_detected:
|
||||||
if watch.get('check_unique_lines', False):
|
if watch.get('check_unique_lines', False):
|
||||||
|
|||||||
44
changedetectionio/run_custom_browser_url_tests.sh
Executable file
44
changedetectionio/run_custom_browser_url_tests.sh
Executable file
@@ -0,0 +1,44 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# run some tests and look if the 'custom-browser-search-string=1' connect string appeared in the correct containers
|
||||||
|
|
||||||
|
# enable debug
|
||||||
|
set -x
|
||||||
|
|
||||||
|
# A extra browser is configured, but we never chose to use it, so it should NOT show in the logs
|
||||||
|
docker run --rm -e "PLAYWRIGHT_DRIVER_URL=ws://browserless:3000" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest tests/custom_browser_url/test_custom_browser_url.py::test_request_not_via_custom_browser_url'
|
||||||
|
docker logs browserless-custom-url &>log.txt
|
||||||
|
grep 'custom-browser-search-string=1' log.txt
|
||||||
|
if [ $? -ne 1 ]
|
||||||
|
then
|
||||||
|
echo "Saw a request in 'browserless-custom-url' container with 'custom-browser-search-string=1' when I should not"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
docker logs browserless &>log.txt
|
||||||
|
grep 'custom-browser-search-string=1' log.txt
|
||||||
|
if [ $? -ne 1 ]
|
||||||
|
then
|
||||||
|
echo "Saw a request in 'browser' container with 'custom-browser-search-string=1' when I should not"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Special connect string should appear in the custom-url container, but not in the 'default' one
|
||||||
|
docker run --rm -e "PLAYWRIGHT_DRIVER_URL=ws://browserless:3000" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest tests/custom_browser_url/test_custom_browser_url.py::test_request_via_custom_browser_url'
|
||||||
|
docker logs browserless-custom-url &>log.txt
|
||||||
|
grep 'custom-browser-search-string=1' log.txt
|
||||||
|
if [ $? -ne 0 ]
|
||||||
|
then
|
||||||
|
echo "Did not see request in 'browserless-custom-url' container with 'custom-browser-search-string=1' when I should"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
docker logs browserless &>log.txt
|
||||||
|
grep 'custom-browser-search-string=1' log.txt
|
||||||
|
if [ $? -ne 1 ]
|
||||||
|
then
|
||||||
|
echo "Saw a request in 'browser' container with 'custom-browser-search-string=1' when I should not"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
@@ -0,0 +1,24 @@
|
|||||||
|
ul#requests-extra_browsers {
|
||||||
|
list-style: none;
|
||||||
|
/* tidy up the table to look more "inline" */
|
||||||
|
li {
|
||||||
|
> label {
|
||||||
|
display: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/* each proxy entry is a `table` */
|
||||||
|
table {
|
||||||
|
tr {
|
||||||
|
display: inline;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#extra-browsers-setting {
|
||||||
|
border: 1px solid var(--color-grey-800);
|
||||||
|
border-radius: 4px;
|
||||||
|
margin: 1em;
|
||||||
|
padding: 1em;
|
||||||
|
}
|
||||||
@@ -60,3 +60,10 @@ body.proxy-check-active {
|
|||||||
|
|
||||||
padding-bottom: 1em;
|
padding-bottom: 1em;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#extra-proxies-setting {
|
||||||
|
border: 1px solid var(--color-grey-800);
|
||||||
|
border-radius: 4px;
|
||||||
|
margin: 1em;
|
||||||
|
padding: 1em;
|
||||||
|
}
|
||||||
|
|||||||
@@ -5,6 +5,7 @@
|
|||||||
@import "parts/_arrows";
|
@import "parts/_arrows";
|
||||||
@import "parts/_browser-steps";
|
@import "parts/_browser-steps";
|
||||||
@import "parts/_extra_proxies";
|
@import "parts/_extra_proxies";
|
||||||
|
@import "parts/_extra_browsers";
|
||||||
@import "parts/_pagination";
|
@import "parts/_pagination";
|
||||||
@import "parts/_spinners";
|
@import "parts/_spinners";
|
||||||
@import "parts/_variables";
|
@import "parts/_variables";
|
||||||
|
|||||||
@@ -128,6 +128,27 @@ body.proxy-check-active #request .proxy-timing {
|
|||||||
border-radius: 4px;
|
border-radius: 4px;
|
||||||
padding: 1em; }
|
padding: 1em; }
|
||||||
|
|
||||||
|
#extra-proxies-setting {
|
||||||
|
border: 1px solid var(--color-grey-800);
|
||||||
|
border-radius: 4px;
|
||||||
|
margin: 1em;
|
||||||
|
padding: 1em; }
|
||||||
|
|
||||||
|
ul#requests-extra_browsers {
|
||||||
|
list-style: none;
|
||||||
|
/* tidy up the table to look more "inline" */
|
||||||
|
/* each proxy entry is a `table` */ }
|
||||||
|
ul#requests-extra_browsers li > label {
|
||||||
|
display: none; }
|
||||||
|
ul#requests-extra_browsers table tr {
|
||||||
|
display: inline; }
|
||||||
|
|
||||||
|
#extra-browsers-setting {
|
||||||
|
border: 1px solid var(--color-grey-800);
|
||||||
|
border-radius: 4px;
|
||||||
|
margin: 1em;
|
||||||
|
padding: 1em; }
|
||||||
|
|
||||||
.pagination-page-info {
|
.pagination-page-info {
|
||||||
color: #fff;
|
color: #fff;
|
||||||
font-size: 0.85rem;
|
font-size: 0.85rem;
|
||||||
|
|||||||
@@ -633,6 +633,18 @@ class ChangeDetectionStore:
|
|||||||
|
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
|
@property
|
||||||
|
def extra_browsers(self):
|
||||||
|
res = []
|
||||||
|
p = list(filter(
|
||||||
|
lambda s: (s.get('browser_name') and s.get('browser_connection_url')),
|
||||||
|
self.__data['settings']['requests'].get('extra_browsers', [])))
|
||||||
|
if p:
|
||||||
|
for i in p:
|
||||||
|
res.append(("extra_browser_"+i['browser_name'], i['browser_name']))
|
||||||
|
|
||||||
|
return res
|
||||||
|
|
||||||
def tag_exists_by_name(self, tag_name):
|
def tag_exists_by_name(self, tag_name):
|
||||||
return any(v.get('title', '').lower() == tag_name.lower() for k, v in self.__data['settings']['application']['tags'].items())
|
return any(v.get('title', '').lower() == tag_name.lower() for k, v in self.__data['settings']['application']['tags'].items())
|
||||||
|
|
||||||
|
|||||||
@@ -227,11 +227,15 @@ nav
|
|||||||
</p>
|
</p>
|
||||||
<p><strong>Tip</strong>: "Residential" and "Mobile" proxy type can be more successfull than "Data Center" for blocked websites.
|
<p><strong>Tip</strong>: "Residential" and "Mobile" proxy type can be more successfull than "Data Center" for blocked websites.
|
||||||
|
|
||||||
<div class="pure-control-group">
|
<div class="pure-control-group" id="extra-proxies-setting">
|
||||||
{{ render_field(form.requests.form.extra_proxies) }}
|
{{ render_field(form.requests.form.extra_proxies) }}
|
||||||
<span class="pure-form-message-inline">"Name" will be used for selecting the proxy in the Watch Edit settings</span><br>
|
<span class="pure-form-message-inline">"Name" will be used for selecting the proxy in the Watch Edit settings</span><br>
|
||||||
<span class="pure-form-message-inline">SOCKS5 proxies with authentication are only supported with 'plain requests' fetcher, for other fetchers you should whitelist the IP access instead</span>
|
<span class="pure-form-message-inline">SOCKS5 proxies with authentication are only supported with 'plain requests' fetcher, for other fetchers you should whitelist the IP access instead</span>
|
||||||
</div>
|
</div>
|
||||||
|
<div class="pure-control-group" id="extra-browsers-setting">
|
||||||
|
<span class="pure-form-message-inline"><i>Extra Browsers</i> allow changedetection.io to communicate with a different web-browser.</span><br>
|
||||||
|
{{ render_field(form.requests.form.extra_browsers) }}
|
||||||
|
</div>
|
||||||
</div>
|
</div>
|
||||||
<div id="actions">
|
<div id="actions">
|
||||||
<div class="pure-control-group">
|
<div class="pure-control-group">
|
||||||
|
|||||||
@@ -104,8 +104,9 @@
|
|||||||
|
|
||||||
{% if watch.get_fetch_backend == "html_webdriver"
|
{% if watch.get_fetch_backend == "html_webdriver"
|
||||||
or ( watch.get_fetch_backend == "system" and system_default_fetcher == 'html_webdriver' )
|
or ( watch.get_fetch_backend == "system" and system_default_fetcher == 'html_webdriver' )
|
||||||
|
or "extra_browser_" in watch.get_fetch_backend
|
||||||
%}
|
%}
|
||||||
<img class="status-icon" src="{{url_for('static_content', group='images', filename='Google-Chrome-icon.png')}}" title="Using a chrome browser" >
|
<img class="status-icon" src="{{url_for('static_content', group='images', filename='Google-Chrome-icon.png')}}" title="Using a Chrome browser" >
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
{%if watch.is_pdf %}<img class="status-icon" src="{{url_for('static_content', group='images', filename='pdf-icon.svg')}}" title="Converting PDF to text" >{% endif %}
|
{%if watch.is_pdf %}<img class="status-icon" src="{{url_for('static_content', group='images', filename='pdf-icon.svg')}}" title="Converting PDF to text" >{% endif %}
|
||||||
|
|||||||
1
changedetectionio/tests/custom_browser_url/__init__.py
Normal file
1
changedetectionio/tests/custom_browser_url/__init__.py
Normal file
@@ -0,0 +1 @@
|
|||||||
|
# placeholder
|
||||||
@@ -0,0 +1,89 @@
|
|||||||
|
# !/usr/bin/python3
|
||||||
|
import os
|
||||||
|
|
||||||
|
from flask import url_for
|
||||||
|
from ..util import live_server_setup, wait_for_all_checks
|
||||||
|
|
||||||
|
def do_test(client, live_server, make_test_use_extra_browser=False):
|
||||||
|
|
||||||
|
# Grep for this string in the logs?
|
||||||
|
test_url = f"https://changedetection.io/ci-test.html"
|
||||||
|
custom_browser_name = 'custom browser URL'
|
||||||
|
|
||||||
|
# needs to be set and something like 'ws://127.0.0.1:3000?stealth=1&--disable-web-security=true'
|
||||||
|
assert os.getenv('PLAYWRIGHT_DRIVER_URL'), "Needs PLAYWRIGHT_DRIVER_URL set for this test"
|
||||||
|
|
||||||
|
#####################
|
||||||
|
res = client.post(
|
||||||
|
url_for("settings_page"),
|
||||||
|
data={"application-empty_pages_are_a_change": "",
|
||||||
|
"requests-time_between_check-minutes": 180,
|
||||||
|
'application-fetch_backend': "html_webdriver",
|
||||||
|
# browserless-custom-url is setup in .github/workflows/test-only.yml
|
||||||
|
# the test script run_custom_browser_url_test.sh will look for 'custom-browser-search-string' in the container logs
|
||||||
|
'requests-extra_browsers-0-browser_connection_url': 'ws://browserless-custom-url:3000?stealth=1&--disable-web-security=true&custom-browser-search-string=1',
|
||||||
|
'requests-extra_browsers-0-browser_name': custom_browser_name
|
||||||
|
},
|
||||||
|
follow_redirects=True
|
||||||
|
)
|
||||||
|
|
||||||
|
assert b"Settings updated." in res.data
|
||||||
|
|
||||||
|
# Add our URL to the import page
|
||||||
|
res = client.post(
|
||||||
|
url_for("import_page"),
|
||||||
|
data={"urls": test_url},
|
||||||
|
follow_redirects=True
|
||||||
|
)
|
||||||
|
|
||||||
|
assert b"1 Imported" in res.data
|
||||||
|
wait_for_all_checks(client)
|
||||||
|
|
||||||
|
if make_test_use_extra_browser:
|
||||||
|
|
||||||
|
# So the name should appear in the edit page under "Request" > "Fetch Method"
|
||||||
|
res = client.get(
|
||||||
|
url_for("edit_page", uuid="first"),
|
||||||
|
follow_redirects=True
|
||||||
|
)
|
||||||
|
assert b'custom browser URL' in res.data
|
||||||
|
|
||||||
|
res = client.post(
|
||||||
|
url_for("edit_page", uuid="first"),
|
||||||
|
data={
|
||||||
|
"url": test_url,
|
||||||
|
"tags": "",
|
||||||
|
"headers": "",
|
||||||
|
'fetch_backend': f"extra_browser_{custom_browser_name}",
|
||||||
|
'webdriver_js_execute_code': ''
|
||||||
|
},
|
||||||
|
follow_redirects=True
|
||||||
|
)
|
||||||
|
|
||||||
|
assert b"Updated watch." in res.data
|
||||||
|
wait_for_all_checks(client)
|
||||||
|
|
||||||
|
# Force recheck
|
||||||
|
res = client.get(url_for("form_watch_checknow"), follow_redirects=True)
|
||||||
|
assert b'1 watches queued for rechecking.' in res.data
|
||||||
|
|
||||||
|
wait_for_all_checks(client)
|
||||||
|
|
||||||
|
res = client.get(
|
||||||
|
url_for("preview_page", uuid="first"),
|
||||||
|
follow_redirects=True
|
||||||
|
)
|
||||||
|
assert b'cool it works' in res.data
|
||||||
|
|
||||||
|
|
||||||
|
# Requires playwright to be installed
|
||||||
|
def test_request_via_custom_browser_url(client, live_server):
|
||||||
|
live_server_setup(live_server)
|
||||||
|
# We do this so we can grep the logs of the custom container and see if the request actually went through that container
|
||||||
|
do_test(client, live_server, make_test_use_extra_browser=True)
|
||||||
|
|
||||||
|
|
||||||
|
def test_request_not_via_custom_browser_url(client, live_server):
|
||||||
|
live_server_setup(live_server)
|
||||||
|
# We do this so we can grep the logs of the custom container and see if the request actually went through that container
|
||||||
|
do_test(client, live_server, make_test_use_extra_browser=False)
|
||||||
@@ -24,7 +24,7 @@ def test_check_extract_text_from_diff(client, live_server):
|
|||||||
)
|
)
|
||||||
|
|
||||||
assert b"1 Imported" in res.data
|
assert b"1 Imported" in res.data
|
||||||
time.sleep(1)
|
wait_for_all_checks(client)
|
||||||
|
|
||||||
# Load in 5 different numbers/changes
|
# Load in 5 different numbers/changes
|
||||||
last_date=""
|
last_date=""
|
||||||
|
|||||||
@@ -33,8 +33,6 @@ def test_strip_regex_text_func():
|
|||||||
"/not"
|
"/not"
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
fetcher = fetch_site_status.perform_site_check(datastore=False)
|
|
||||||
stripped_content = html_tools.strip_ignore_text(test_content, ignore_lines)
|
stripped_content = html_tools.strip_ignore_text(test_content, ignore_lines)
|
||||||
|
|
||||||
assert b"but 1 lines" in stripped_content
|
assert b"but 1 lines" in stripped_content
|
||||||
|
|||||||
@@ -24,7 +24,6 @@ def test_strip_text_func():
|
|||||||
|
|
||||||
ignore_lines = ["sometimes"]
|
ignore_lines = ["sometimes"]
|
||||||
|
|
||||||
fetcher = fetch_site_status.perform_site_check(datastore=False)
|
|
||||||
stripped_content = html_tools.strip_ignore_text(test_content, ignore_lines)
|
stripped_content = html_tools.strip_ignore_text(test_content, ignore_lines)
|
||||||
|
|
||||||
assert b"sometimes" not in stripped_content
|
assert b"sometimes" not in stripped_content
|
||||||
|
|||||||
@@ -80,8 +80,11 @@ def test_headers_in_request(client, live_server):
|
|||||||
|
|
||||||
# Should be only one with headers set
|
# Should be only one with headers set
|
||||||
assert watches_with_headers==1
|
assert watches_with_headers==1
|
||||||
|
res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True)
|
||||||
|
assert b'Deleted' in res.data
|
||||||
|
|
||||||
def test_body_in_request(client, live_server):
|
def test_body_in_request(client, live_server):
|
||||||
|
|
||||||
# Add our URL to the import page
|
# Add our URL to the import page
|
||||||
test_url = url_for('test_body', _external=True)
|
test_url = url_for('test_body', _external=True)
|
||||||
if os.getenv('PLAYWRIGHT_DRIVER_URL'):
|
if os.getenv('PLAYWRIGHT_DRIVER_URL'):
|
||||||
@@ -170,7 +173,8 @@ def test_body_in_request(client, live_server):
|
|||||||
follow_redirects=True
|
follow_redirects=True
|
||||||
)
|
)
|
||||||
assert b"Body must be empty when Request Method is set to GET" in res.data
|
assert b"Body must be empty when Request Method is set to GET" in res.data
|
||||||
|
res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True)
|
||||||
|
assert b'Deleted' in res.data
|
||||||
|
|
||||||
def test_method_in_request(client, live_server):
|
def test_method_in_request(client, live_server):
|
||||||
# Add our URL to the import page
|
# Add our URL to the import page
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
from flask import url_for
|
from flask import url_for
|
||||||
from . util import set_original_response, set_modified_response, live_server_setup
|
from .util import set_original_response, set_modified_response, live_server_setup, wait_for_all_checks
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
|
||||||
@@ -12,6 +12,7 @@ def test_bad_access(client, live_server):
|
|||||||
)
|
)
|
||||||
|
|
||||||
assert b"1 Imported" in res.data
|
assert b"1 Imported" in res.data
|
||||||
|
wait_for_all_checks(client)
|
||||||
|
|
||||||
# Attempt to add a body with a GET method
|
# Attempt to add a body with a GET method
|
||||||
res = client.post(
|
res = client.post(
|
||||||
@@ -59,7 +60,7 @@ def test_bad_access(client, live_server):
|
|||||||
data={"url": 'file:///tasty/disk/drive', "tags": ''},
|
data={"url": 'file:///tasty/disk/drive', "tags": ''},
|
||||||
follow_redirects=True
|
follow_redirects=True
|
||||||
)
|
)
|
||||||
time.sleep(1)
|
wait_for_all_checks(client)
|
||||||
res = client.get(url_for("index"))
|
res = client.get(url_for("index"))
|
||||||
|
|
||||||
assert b'file:// type access is denied for security reasons.' in res.data
|
assert b'file:// type access is denied for security reasons.' in res.data
|
||||||
@@ -209,6 +209,7 @@ class update_worker(threading.Thread):
|
|||||||
from .processors import text_json_diff, restock_diff
|
from .processors import text_json_diff, restock_diff
|
||||||
|
|
||||||
while not self.app.config.exit.is_set():
|
while not self.app.config.exit.is_set():
|
||||||
|
update_handler = None
|
||||||
|
|
||||||
try:
|
try:
|
||||||
queued_item_data = self.q.get(block=False)
|
queued_item_data = self.q.get(block=False)
|
||||||
@@ -229,17 +230,35 @@ class update_worker(threading.Thread):
|
|||||||
now = time.time()
|
now = time.time()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
processor = self.datastore.data['watching'][uuid].get('processor','text_json_diff')
|
# Processor is what we are using for detecting the "Change"
|
||||||
|
processor = self.datastore.data['watching'][uuid].get('processor', 'text_json_diff')
|
||||||
|
# if system...
|
||||||
|
|
||||||
|
# Abort processing when the content was the same as the last fetch
|
||||||
|
skip_when_same_checksum = queued_item_data.item.get('skip_when_checksum_same')
|
||||||
|
|
||||||
|
|
||||||
# @todo some way to switch by name
|
# @todo some way to switch by name
|
||||||
|
# Init a new 'difference_detection_processor'
|
||||||
|
|
||||||
if processor == 'restock_diff':
|
if processor == 'restock_diff':
|
||||||
update_handler = restock_diff.perform_site_check(datastore=self.datastore)
|
update_handler = restock_diff.perform_site_check(datastore=self.datastore,
|
||||||
|
watch_uuid=uuid
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
# Used as a default and also by some tests
|
# Used as a default and also by some tests
|
||||||
update_handler = text_json_diff.perform_site_check(datastore=self.datastore)
|
update_handler = text_json_diff.perform_site_check(datastore=self.datastore,
|
||||||
|
watch_uuid=uuid
|
||||||
|
)
|
||||||
|
|
||||||
|
# Clear last errors (move to preflight func?)
|
||||||
self.datastore.data['watching'][uuid]['browser_steps_last_error_step'] = None
|
self.datastore.data['watching'][uuid]['browser_steps_last_error_step'] = None
|
||||||
changed_detected, update_obj, contents = update_handler.run(uuid, skip_when_checksum_same=queued_item_data.item.get('skip_when_checksum_same'))
|
|
||||||
|
update_handler.call_browser()
|
||||||
|
|
||||||
|
changed_detected, update_obj, contents = update_handler.run_changedetection(uuid,
|
||||||
|
skip_when_checksum_same=skip_when_same_checksum,
|
||||||
|
)
|
||||||
|
|
||||||
# Re #342
|
# Re #342
|
||||||
# In Python 3, all strings are sequences of Unicode characters. There is a bytes type that holds raw bytes.
|
# In Python 3, all strings are sequences of Unicode characters. There is a bytes type that holds raw bytes.
|
||||||
@@ -391,6 +410,9 @@ class update_worker(threading.Thread):
|
|||||||
self.datastore.update_watch(uuid=uuid, update_obj={'last_error': str(e)})
|
self.datastore.update_watch(uuid=uuid, update_obj={'last_error': str(e)})
|
||||||
# Other serious error
|
# Other serious error
|
||||||
process_changedetection_results = False
|
process_changedetection_results = False
|
||||||
|
# import traceback
|
||||||
|
# print(traceback.format_exc())
|
||||||
|
|
||||||
else:
|
else:
|
||||||
# Crash protection, the watch entry could have been removed by this point (during a slow chrome fetch etc)
|
# Crash protection, the watch entry could have been removed by this point (during a slow chrome fetch etc)
|
||||||
if not self.datastore.data['watching'].get(uuid):
|
if not self.datastore.data['watching'].get(uuid):
|
||||||
|
|||||||
Reference in New Issue
Block a user