Compare commits

..

4 Commits

Author SHA1 Message Date
dgtlmoon
5aaec3f8b8 Ability to block access to simplehosts 2023-08-13 16:50:44 +02:00
dgtlmoon
1ae1b58c93 use floating env 2023-08-13 15:52:51 +02:00
dgtlmoon
60c1c96e57 validators 0.21 needs 'simple host' setting for single word hosts 2023-08-13 15:51:28 +02:00
dgtlmoon
23ef67efec Add debug to build 2023-08-13 15:01:34 +02:00
205 changed files with 5493 additions and 11918 deletions

View File

@@ -1,14 +0,0 @@
version: 2
updates:
- package-ecosystem: github-actions
directory: /
schedule:
interval: "weekly"
"caronc/apprise":
versioning-strategy: "increase"
schedule:
interval: "daily"
groups:
all:
patterns:
- "*"

View File

@@ -2,7 +2,7 @@
# Test that we can still build on Alpine (musl modified libc https://musl.libc.org/)
# Some packages wont install via pypi because they dont have a wheel available under this architecture.
FROM ghcr.io/linuxserver/baseimage-alpine:3.18
FROM ghcr.io/linuxserver/baseimage-alpine:3.16
ENV PYTHONUNBUFFERED=1
COPY requirements.txt /requirements.txt
@@ -12,10 +12,8 @@ RUN \
cargo \
g++ \
gcc \
jpeg-dev \
libc-dev \
libffi-dev \
libjpeg \
libxslt-dev \
make \
openssl-dev \
@@ -28,6 +26,6 @@ RUN \
py3-pip && \
echo "**** pip3 install test of changedetection.io ****" && \
pip3 install -U pip wheel setuptools && \
pip3 install -U --no-cache-dir --find-links https://wheel-index.linuxserver.io/alpine-3.18/ -r /requirements.txt && \
pip3 install -U --no-cache-dir --find-links https://wheel-index.linuxserver.io/alpine-3.16/ -r /requirements.txt && \
apk del --purge \
build-dependencies

View File

@@ -30,11 +30,11 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v4
uses: actions/checkout@v2
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v3
uses: github/codeql-action/init@v1
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
@@ -45,7 +45,7 @@ jobs:
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v3
uses: github/codeql-action/autobuild@v1
# Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
@@ -59,4 +59,4 @@ jobs:
# make release
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v3
uses: github/codeql-action/analyze@v1

View File

@@ -39,11 +39,11 @@ jobs:
# Or if we are in a tagged release scenario.
if: ${{ github.event.workflow_run.conclusion == 'success' }} || ${{ github.event.release.tag_name }} != ''
steps:
- uses: actions/checkout@v4
- name: Set up Python 3.11
uses: actions/setup-python@v5
- uses: actions/checkout@v2
- name: Set up Python 3.9
uses: actions/setup-python@v2
with:
python-version: 3.11
python-version: 3.9
- name: Install dependencies
run: |
@@ -58,27 +58,27 @@ jobs:
echo ${{ github.ref }} > changedetectionio/tag.txt
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
uses: docker/setup-qemu-action@v1
with:
image: tonistiigi/binfmt:latest
platforms: all
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
uses: docker/login-action@v1
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Login to Docker Hub Container Registry
uses: docker/login-action@v3
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKER_HUB_USERNAME }}
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v3
uses: docker/setup-buildx-action@v1
with:
install: true
version: latest
@@ -88,17 +88,16 @@ jobs:
- name: Build and push :dev
id: docker_build
if: ${{ github.ref }} == "refs/heads/master"
uses: docker/build-push-action@v6
uses: docker/build-push-action@v2
with:
context: ./
file: ./Dockerfile
push: true
tags: |
${{ secrets.DOCKER_HUB_USERNAME }}/changedetection.io:dev,ghcr.io/${{ github.repository }}:dev
platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/arm/v8,linux/arm64/v8
cache-from: type=gha
cache-to: type=gha,mode=max
platforms: linux/amd64,linux/arm64,linux/arm/v6,linux/arm/v7
cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache
# Looks like this was disabled
# provenance: false
@@ -106,7 +105,7 @@ jobs:
- name: Build and push :tag
id: docker_build_tag_release
if: github.event_name == 'release' && startsWith(github.event.release.tag_name, '0.')
uses: docker/build-push-action@v6
uses: docker/build-push-action@v2
with:
context: ./
file: ./Dockerfile
@@ -116,12 +115,19 @@ jobs:
ghcr.io/dgtlmoon/changedetection.io:${{ github.event.release.tag_name }}
${{ secrets.DOCKER_HUB_USERNAME }}/changedetection.io:latest
ghcr.io/dgtlmoon/changedetection.io:latest
platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/arm/v8,linux/arm64/v8
cache-from: type=gha
cache-to: type=gha,mode=max
platforms: linux/amd64,linux/arm64,linux/arm/v6,linux/arm/v7
cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache
# Looks like this was disabled
# provenance: false
- name: Image digest
run: echo step SHA ${{ steps.vars.outputs.sha_short }} tag ${{steps.vars.outputs.tag}} branch ${{steps.vars.outputs.branch}} digest ${{ steps.docker_build.outputs.digest }}
- name: Cache Docker layers
uses: actions/cache@v2
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-

View File

@@ -1,77 +0,0 @@
name: Publish Python 🐍distribution 📦 to PyPI and TestPyPI
on: push
jobs:
build:
name: Build distribution 📦
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.11"
- name: Install pypa/build
run: >-
python3 -m
pip install
build
--user
- name: Build a binary wheel and a source tarball
run: python3 -m build
- name: Store the distribution packages
uses: actions/upload-artifact@v4
with:
name: python-package-distributions
path: dist/
test-pypi-package:
name: Test the built 📦 package works basically.
runs-on: ubuntu-latest
needs:
- build
steps:
- name: Download all the dists
uses: actions/download-artifact@v4
with:
name: python-package-distributions
path: dist/
- name: Set up Python 3.11
uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Test that the basic pip built package runs without error
run: |
set -ex
sudo pip3 install --upgrade pip
pip3 install dist/changedetection.io*.whl
changedetection.io -d /tmp -p 10000 &
sleep 3
curl --retry-connrefused --retry 6 http://127.0.0.1:10000/static/styles/pure-min.css >/dev/null
curl --retry-connrefused --retry 6 http://127.0.0.1:10000/ >/dev/null
killall changedetection.io
publish-to-pypi:
name: >-
Publish Python 🐍 distribution 📦 to PyPI
if: startsWith(github.ref, 'refs/tags/') # only publish to PyPI on tag pushes
needs:
- test-pypi-package
runs-on: ubuntu-latest
environment:
name: release
url: https://pypi.org/p/changedetection.io
permissions:
id-token: write # IMPORTANT: mandatory for trusted publishing
steps:
- name: Download all the dists
uses: actions/download-artifact@v4
with:
name: python-package-distributions
path: dist/
- name: Publish distribution 📦 to PyPI
uses: pypa/gh-action-pypi-publish@release/v1

View File

@@ -11,14 +11,12 @@ on:
- requirements.txt
- Dockerfile
- .github/workflows/*
- .github/test/Dockerfile*
pull_request:
paths:
- requirements.txt
- Dockerfile
- .github/workflows/*
- .github/test/Dockerfile*
# Changes to requirements.txt packages and Dockerfile may or may not always be compatible with arm etc, so worth testing
# @todo: some kind of path filter for requirements.txt and Dockerfile
@@ -26,22 +24,22 @@ jobs:
test-container-build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Python 3.11
uses: actions/setup-python@v5
- uses: actions/checkout@v2
- name: Set up Python 3.9
uses: actions/setup-python@v2
with:
python-version: 3.11
python-version: 3.9
# Just test that the build works, some libraries won't compile on ARM/rPi etc
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
uses: docker/setup-qemu-action@v1
with:
image: tonistiigi/binfmt:latest
platforms: all
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v3
uses: docker/setup-buildx-action@v1
with:
install: true
version: latest
@@ -51,7 +49,7 @@ jobs:
# Check we can still build under alpine/musl
- name: Test that the docker containers can build (musl via alpine check)
id: docker_build_musl
uses: docker/build-push-action@v6
uses: docker/build-push-action@v2
with:
context: ./
file: ./.github/test/Dockerfile-alpine
@@ -59,12 +57,12 @@ jobs:
- name: Test that the docker containers can build
id: docker_build
uses: docker/build-push-action@v6
uses: docker/build-push-action@v2
# https://github.com/docker/build-push-action#customizing
with:
context: ./
file: ./Dockerfile
platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/arm/v8,linux/arm64/v8
platforms: linux/arm/v6,linux/arm/v7,linux/amd64,linux/arm64,
cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache

View File

@@ -4,10 +4,17 @@ name: ChangeDetection.io App Test
on: [push, pull_request]
jobs:
lint-code:
test-application:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v2
# Mainly just for link/flake8
- name: Set up Python 3.10
uses: actions/setup-python@v2
with:
python-version: '3.10'
- name: Lint with flake8
run: |
pip3 install flake8
@@ -16,24 +23,79 @@ jobs:
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
test-application-3-10:
needs: lint-code
uses: ./.github/workflows/test-stack-reusable-workflow.yml
with:
python-version: '3.10'
- name: Spin up ancillary testable services
run: |
docker network create changedet-network
# Selenium+browserless
docker run --network changedet-network -d --hostname selenium -p 4444:4444 --rm --shm-size="2g" selenium/standalone-chrome-debug:3.141.59
docker run --network changedet-network -d --hostname browserless -e "FUNCTION_BUILT_INS=[\"fs\",\"crypto\"]" -e "DEFAULT_LAUNCH_ARGS=[\"--window-size=1920,1080\"]" --rm -p 3000:3000 --shm-size="2g" browserless/chrome:1.53-chrome-stable
test-application-3-11:
needs: lint-code
uses: ./.github/workflows/test-stack-reusable-workflow.yml
with:
python-version: '3.11'
skip-pypuppeteer: true
- name: Build changedetection.io container for testing
run: |
# Build a changedetection.io container and start testing inside
docker build . -t test-changedetectionio
# Debug info
docker run test-changedetectionio bash -c 'pip list'
test-application-3-12:
needs: lint-code
uses: ./.github/workflows/test-stack-reusable-workflow.yml
with:
python-version: '3.12'
skip-pypuppeteer: true
- name: Spin up ancillary SMTP+Echo message test server
run: |
# Debug SMTP server/echo message back server
docker run --network changedet-network -d -p 11025:11025 -p 11080:11080 --hostname mailserver test-changedetectionio bash -c 'python changedetectionio/tests/smtp/smtp-test-server.py'
- name: Test built container with pytest
run: |
# Unit tests
docker run test-changedetectionio bash -c 'python3 -m unittest changedetectionio.tests.unit.test_notification_diff'
# All tests
docker run --network changedet-network test-changedetectionio bash -c 'cd changedetectionio && ./run_basic_tests.sh'
- name: Test built container selenium+browserless/playwright
run: |
# Selenium fetch
docker run --rm -e "WEBDRIVER_URL=http://selenium:4444/wd/hub" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest tests/fetchers/test_content.py && pytest tests/test_errorhandling.py'
# Playwright/Browserless fetch
docker run --rm -e "PLAYWRIGHT_DRIVER_URL=ws://browserless:3000" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest tests/fetchers/test_content.py && pytest tests/test_errorhandling.py && pytest tests/visualselector/test_fetch_data.py'
# Settings headers playwright tests - Call back in from Browserless, check headers
docker run --name "changedet" --hostname changedet --rm -e "FLASK_SERVER_NAME=changedet" -e "PLAYWRIGHT_DRIVER_URL=ws://browserless:3000?dumpio=true" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio; pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/test_request.py'
docker run --name "changedet" --hostname changedet --rm -e "FLASK_SERVER_NAME=changedet" -e "WEBDRIVER_URL=http://selenium:4444/wd/hub" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio; pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/test_request.py'
docker run --name "changedet" --hostname changedet --rm -e "FLASK_SERVER_NAME=changedet" -e "USE_EXPERIMENTAL_PUPPETEER_FETCH=yes" -e "PLAYWRIGHT_DRIVER_URL=ws://browserless:3000?dumpio=true" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio; pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/test_request.py'
# restock detection via playwright - added name=changedet here so that playwright/browserless can connect to it
docker run --rm --name "changedet" -e "FLASK_SERVER_NAME=changedet" -e "PLAYWRIGHT_DRIVER_URL=ws://browserless:3000" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-port=5004 --live-server-host=0.0.0.0 tests/restock/test_restock.py'
- name: Test SMTP notification mime types
run: |
# SMTP content types - needs the 'Debug SMTP server/echo message back server' container from above
docker run --rm --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest tests/smtp/test_notification_smtp.py'
- name: Test with puppeteer fetcher and disk cache
run: |
docker run --rm -e "PUPPETEER_DISK_CACHE=/tmp/data/" -e "USE_EXPERIMENTAL_PUPPETEER_FETCH=yes" -e "PLAYWRIGHT_DRIVER_URL=ws://browserless:3000" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest tests/fetchers/test_content.py && pytest tests/test_errorhandling.py && pytest tests/visualselector/test_fetch_data.py'
# Browserless would have had -e "FUNCTION_BUILT_INS=[\"fs\",\"crypto\"]" added above
- name: Test proxy interaction
run: |
cd changedetectionio
./run_proxy_tests.sh
cd ..
- name: Test changedetection.io container starts+runs basically without error
run: |
docker run -p 5556:5000 -d test-changedetectionio
sleep 3
# Should return 0 (no error) when grep finds it
curl -s http://localhost:5556 |grep -q checkbox-uuid
# and IPv6
curl -s -g -6 "http://[::1]:5556"|grep -q checkbox-uuid
#export WEBDRIVER_URL=http://localhost:4444/wd/hub
#pytest tests/fetchers/test_content.py
#pytest tests/test_errorhandling.py

36
.github/workflows/test-pip-build.yml vendored Normal file
View File

@@ -0,0 +1,36 @@
name: ChangeDetection.io PIP package test
# Triggers the workflow on push or pull request events
# This line doesnt work, even tho it is the documented one
on: [push, pull_request]
# Changes to requirements.txt packages and Dockerfile may or may not always be compatible with arm etc, so worth testing
# @todo: some kind of path filter for requirements.txt and Dockerfile
jobs:
test-pip-build-basics:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Set up Python 3.9
uses: actions/setup-python@v2
with:
python-version: 3.9
- name: Test that the basic pip built package runs without error
run: |
set -e
mkdir dist
pip3 install wheel
python3 setup.py bdist_wheel
pip3 install -r requirements.txt
rm ./changedetection.py
rm -rf changedetectio
pip3 install dist/changedetection.io*.whl
changedetection.io -d /tmp -p 10000 &
sleep 3
curl http://127.0.0.1:10000/static/styles/pure-min.css >/dev/null
killall -9 changedetection.io

View File

@@ -1,239 +0,0 @@
name: ChangeDetection.io App Test
on:
workflow_call:
inputs:
python-version:
description: 'Python version to use'
required: true
type: string
default: '3.10'
skip-pypuppeteer:
description: 'Skip PyPuppeteer (not supported in 3.11/3.12)'
required: false
type: boolean
default: false
jobs:
test-application:
runs-on: ubuntu-latest
env:
PYTHON_VERSION: ${{ inputs.python-version }}
steps:
- uses: actions/checkout@v4
# Mainly just for link/flake8
- name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: Build changedetection.io container for testing under Python ${{ env.PYTHON_VERSION }}
run: |
echo "---- Building for Python ${{ env.PYTHON_VERSION }} -----"
# Build a changedetection.io container and start testing inside
docker build --build-arg PYTHON_VERSION=${{ env.PYTHON_VERSION }} --build-arg LOGGER_LEVEL=TRACE -t test-changedetectionio .
# Debug info
docker run test-changedetectionio bash -c 'pip list'
- name: We should be Python ${{ env.PYTHON_VERSION }} ...
run: |
docker run test-changedetectionio bash -c 'python3 --version'
- name: Spin up ancillary testable services
run: |
docker network create changedet-network
# Selenium
docker run --network changedet-network -d --hostname selenium -p 4444:4444 --rm --shm-size="2g" selenium/standalone-chrome:4
# SocketPuppetBrowser + Extra for custom browser test
docker run --network changedet-network -d -e "LOG_LEVEL=TRACE" --cap-add=SYS_ADMIN --name sockpuppetbrowser --hostname sockpuppetbrowser --rm -p 3000:3000 dgtlmoon/sockpuppetbrowser:latest
docker run --network changedet-network -d -e "LOG_LEVEL=TRACE" --cap-add=SYS_ADMIN --name sockpuppetbrowser-custom-url --hostname sockpuppetbrowser-custom-url -p 3001:3000 --rm dgtlmoon/sockpuppetbrowser:latest
- name: Spin up ancillary SMTP+Echo message test server
run: |
# Debug SMTP server/echo message back server
docker run --network changedet-network -d -p 11025:11025 -p 11080:11080 --hostname mailserver test-changedetectionio bash -c 'pip3 install aiosmtpd && python changedetectionio/tests/smtp/smtp-test-server.py'
docker ps
- name: Show docker container state and other debug info
run: |
set -x
echo "Running processes in docker..."
docker ps
- name: Test built container with Pytest (generally as requests/plaintext fetching)
run: |
# Unit tests
echo "run test with unittest"
docker run test-changedetectionio bash -c 'python3 -m unittest changedetectionio.tests.unit.test_notification_diff'
docker run test-changedetectionio bash -c 'python3 -m unittest changedetectionio.tests.unit.test_watch_model'
docker run test-changedetectionio bash -c 'python3 -m unittest changedetectionio.tests.unit.test_jinja2_security'
# All tests
echo "run test with pytest"
# The default pytest logger_level is TRACE
# To change logger_level for pytest(test/conftest.py),
# append the docker option. e.g. '-e LOGGER_LEVEL=DEBUG'
docker run --name test-cdio-basic-tests --network changedet-network test-changedetectionio bash -c 'cd changedetectionio && ./run_basic_tests.sh'
# PLAYWRIGHT/NODE-> CDP
- name: Playwright and SocketPuppetBrowser - Specific tests in built container
run: |
# Playwright via Sockpuppetbrowser fetch
# tests/visualselector/test_fetch_data.py will do browser steps
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/fetchers/test_content.py'
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/test_errorhandling.py'
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/visualselector/test_fetch_data.py'
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/fetchers/test_custom_js_before_content.py'
- name: Playwright and SocketPuppetBrowser - Headers and requests
run: |
# Settings headers playwright tests - Call back in from Sockpuppetbrowser, check headers
docker run --name "changedet" --hostname changedet --rm -e "FLASK_SERVER_NAME=changedet" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000?dumpio=true" --network changedet-network test-changedetectionio bash -c 'find .; cd changedetectionio; pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/test_request.py; pwd;find .'
- name: Playwright and SocketPuppetBrowser - Restock detection
run: |
# restock detection via playwright - added name=changedet here so that playwright and sockpuppetbrowser can connect to it
docker run --rm --name "changedet" -e "FLASK_SERVER_NAME=changedet" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-port=5004 --live-server-host=0.0.0.0 tests/restock/test_restock.py'
# STRAIGHT TO CDP
- name: Pyppeteer and SocketPuppetBrowser - Specific tests in built container
if: ${{ inputs.skip-pypuppeteer == false }}
run: |
# Playwright via Sockpuppetbrowser fetch
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "FAST_PUPPETEER_CHROME_FETCHER=True" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/fetchers/test_content.py'
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "FAST_PUPPETEER_CHROME_FETCHER=True" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/test_errorhandling.py'
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "FAST_PUPPETEER_CHROME_FETCHER=True" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/visualselector/test_fetch_data.py'
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "FAST_PUPPETEER_CHROME_FETCHER=True" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/fetchers/test_custom_js_before_content.py'
- name: Pyppeteer and SocketPuppetBrowser - Headers and requests checks
if: ${{ inputs.skip-pypuppeteer == false }}
run: |
# Settings headers playwright tests - Call back in from Sockpuppetbrowser, check headers
docker run --name "changedet" --hostname changedet --rm -e "FAST_PUPPETEER_CHROME_FETCHER=True" -e "FLASK_SERVER_NAME=changedet" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000?dumpio=true" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio; pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/test_request.py'
- name: Pyppeteer and SocketPuppetBrowser - Restock detection
if: ${{ inputs.skip-pypuppeteer == false }}
run: |
# restock detection via playwright - added name=changedet here so that playwright and sockpuppetbrowser can connect to it
docker run --rm --name "changedet" -e "FLASK_SERVER_NAME=changedet" -e "FAST_PUPPETEER_CHROME_FETCHER=True" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-port=5004 --live-server-host=0.0.0.0 tests/restock/test_restock.py'
# SELENIUM
- name: Specific tests in built container for Selenium
run: |
# Selenium fetch
docker run --rm -e "WEBDRIVER_URL=http://selenium:4444/wd/hub" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest tests/fetchers/test_content.py && pytest tests/test_errorhandling.py'
- name: Specific tests in built container for headers and requests checks with Selenium
run: |
docker run --name "changedet" --hostname changedet --rm -e "FLASK_SERVER_NAME=changedet" -e "WEBDRIVER_URL=http://selenium:4444/wd/hub" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio; pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/test_request.py'
# OTHER STUFF
- name: Test SMTP notification mime types
run: |
# SMTP content types - needs the 'Debug SMTP server/echo message back server' container from above
# "mailserver" hostname defined above
docker run --rm --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest tests/smtp/test_notification_smtp.py'
# @todo Add a test via playwright/puppeteer
# squid with auth is tested in run_proxy_tests.sh -> tests/proxy_list/test_select_custom_proxy.py
- name: Test proxy squid style interaction
run: |
cd changedetectionio
./run_proxy_tests.sh
cd ..
- name: Test proxy SOCKS5 style interaction
run: |
cd changedetectionio
./run_socks_proxy_tests.sh
cd ..
- name: Test custom browser URL
run: |
cd changedetectionio
./run_custom_browser_url_tests.sh
cd ..
- name: Test changedetection.io container starts+runs basically without error
run: |
docker run --name test-changedetectionio -p 5556:5000 -d test-changedetectionio
sleep 3
# Should return 0 (no error) when grep finds it
curl --retry-connrefused --retry 6 -s http://localhost:5556 |grep -q checkbox-uuid
# and IPv6
curl --retry-connrefused --retry 6 -s -g -6 "http://[::1]:5556"|grep -q checkbox-uuid
# Check whether TRACE log is enabled.
# Also, check whether TRACE is came from STDERR
docker logs test-changedetectionio 2>&1 1>/dev/null | grep 'TRACE log is enabled' || exit 1
# Check whether DEBUG is came from STDOUT
docker logs test-changedetectionio 2>/dev/null | grep 'DEBUG' || exit 1
docker kill test-changedetectionio
- name: Test changedetection.io SIGTERM and SIGINT signal shutdown
run: |
echo SIGINT Shutdown request test
docker run --name sig-test -d test-changedetectionio
sleep 3
echo ">>> Sending SIGINT to sig-test container"
docker kill --signal=SIGINT sig-test
sleep 3
# invert the check (it should be not 0/not running)
docker ps
# check signal catch(STDERR) log. Because of
# changedetectionio/__init__.py: logger.add(sys.stderr, level=logger_level)
docker logs sig-test 2>&1 | grep 'Shutdown: Got Signal - SIGINT' || exit 1
test -z "`docker ps|grep sig-test`"
if [ $? -ne 0 ]
then
echo "Looks like container was running when it shouldnt be"
docker ps
exit 1
fi
# @todo - scan the container log to see the right "graceful shutdown" text exists
docker rm sig-test
echo SIGTERM Shutdown request test
docker run --name sig-test -d test-changedetectionio
sleep 3
echo ">>> Sending SIGTERM to sig-test container"
docker kill --signal=SIGTERM sig-test
sleep 3
# invert the check (it should be not 0/not running)
docker ps
# check signal catch(STDERR) log. Because of
# changedetectionio/__init__.py: logger.add(sys.stderr, level=logger_level)
docker logs sig-test 2>&1 | grep 'Shutdown: Got Signal - SIGTERM' || exit 1
test -z "`docker ps|grep sig-test`"
if [ $? -ne 0 ]
then
echo "Looks like container was running when it shouldnt be"
docker ps
exit 1
fi
# @todo - scan the container log to see the right "graceful shutdown" text exists
docker rm sig-test
- name: Dump container log
if: always()
run: |
mkdir output-logs
docker logs test-cdio-basic-tests > output-logs/test-cdio-basic-tests-stdout-${{ env.PYTHON_VERSION }}.txt
docker logs test-cdio-basic-tests 2> output-logs/test-cdio-basic-tests-stderr-${{ env.PYTHON_VERSION }}.txt
- name: Store everything including test-datastore
if: always()
uses: actions/upload-artifact@v4
with:
name: test-cdio-basic-tests-output-py${{ env.PYTHON_VERSION }}
path: .

View File

@@ -1,54 +0,0 @@
# Generally
In any commercial activity involving 'Hosting' (as defined herein), whether in part or in full, this license must be executed and adhered to.
# Commercial License Agreement
This Commercial License Agreement ("Agreement") is entered into by and between Mr Morresi (the original creator of this software) here-in ("Licensor") and (your company or personal name) _____________ ("Licensee"). This Agreement sets forth the terms and conditions under which Licensor provides its software ("Software") and services to Licensee for the purpose of reselling the software either in part or full, as part of any commercial activity where the activity involves a third party.
### Definition of Hosting
For the purposes of this Agreement, "hosting" means making the functionality of the Program or modified version available to third parties as a service. This includes, without limitation:
- Enabling third parties to interact with the functionality of the Program or modified version remotely through a computer network.
- Offering a service the value of which entirely or primarily derives from the value of the Program or modified version.
- Offering a service that accomplishes for users the primary purpose of the Program or modified version.
## 1. Grant of License
Subject to the terms and conditions of this Agreement, Licensor grants Licensee a non-exclusive, non-transferable license to install, use, and resell the Software. Licensee may:
- Resell the Software as part of a service offering or as a standalone product.
- Host the Software on a server and provide it as a hosted service (e.g., Software as a Service - SaaS).
- Integrate the Software into a larger product or service that is then sold or provided for commercial purposes, where the software is used either in part or full.
## 2. License Fees
Licensee agrees to pay Licensor the license fees specified in the ordering document. License fees are due and payable as specified in the ordering document. The fees may include initial licensing costs and recurring fees based on the number of end users, instances of the Software resold, or revenue generated from the resale activities.
## 3. Resale Conditions
Licensee must comply with the following conditions when reselling the Software, whether the software is resold in part or full:
- Provide end users with access to the source code under the same open-source license conditions as provided by Licensor.
- Clearly state in all marketing and sales materials that the Software is provided under a commercial license from Licensor, and provide a link back to https://changedetection.io.
- Ensure end users are aware of and agree to the terms of the commercial license prior to resale.
- Do not sublicense or transfer the Software to third parties except as part of an authorized resale activity.
## 4. Hosting and Provision of Services
Licensee may host the Software (either in part or full) on its servers and provide it as a hosted service to end users. The following conditions apply:
- Licensee must ensure that all hosted versions of the Software comply with the terms of this Agreement.
- Licensee must provide Licensor with regular reports detailing the number of end users and instances of the hosted service.
- Any modifications to the Software made by Licensee for hosting purposes must be made available to end users under the same open-source license conditions, unless agreed otherwise.
## 5. Services
Licensor will provide support and maintenance services as described in the support policy referenced in the ordering document should such an agreement be signed by all parties. Additional fees may apply for support services provided to end users resold by Licensee.
## 6. Reporting and Audits
Licensee agrees to provide Licensor with regular reports detailing the number of instances, end users, and revenue generated from the resale of the Software. Licensor reserves the right to audit Licensees records to ensure compliance with this Agreement.
## 7. Term and Termination
This Agreement shall commence on the effective date and continue for the period set forth in the ordering document unless terminated earlier in accordance with this Agreement. Either party may terminate this Agreement if the other party breaches any material term and fails to cure such breach within thirty (30) days after receipt of written notice.
## 8. Limitation of Liability and Disclaimer of Warranty
Executing this commercial license does not waive the Limitation of Liability or Disclaimer of Warranty as stated in the open-source LICENSE provided with the Software. The Software is provided "as is," without warranty of any kind, express or implied, including but not limited to the warranties of merchantability, fitness for a particular purpose, and noninfringement. In no event shall the authors or copyright holders be liable for any claim, damages, or other liability, whether in an action of contract, tort, or otherwise, arising from, out of, or in connection with the Software or the use or other dealings in the Software.
## 9. Governing Law
This Agreement shall be governed by and construed in accordance with the laws of the Czech Republic.
## Contact Information
For commercial licensing inquiries, please contact contact@changedetection.io and dgtlmoon@gmail.com.

View File

@@ -2,7 +2,7 @@ Contributing is always welcome!
I am no professional flask developer, if you know a better way that something can be done, please let me know!
Otherwise, it's always best to PR into the `master` branch.
Otherwise, it's always best to PR into the `dev` branch.
Please be sure that all new functionality has a matching test!

View File

@@ -1,11 +1,5 @@
# pip dependencies install stage
# @NOTE! I would love to move to 3.11 but it breaks the async handler in changedetectionio/content_fetchers/puppeteer.py
# If you know how to fix it, please do! and test it for both 3.10 and 3.11
ARG PYTHON_VERSION=3.11
FROM python:${PYTHON_VERSION}-slim-bookworm AS builder
FROM python:3.10-slim-bullseye as builder
# See `cryptography` pin comment in requirements.txt
ARG CRYPTOGRAPHY_DONT_BUILD_RUST=1
@@ -26,22 +20,20 @@ WORKDIR /install
COPY requirements.txt /requirements.txt
# --extra-index-url https://www.piwheels.org/simple is for cryptography module to be prebuilt (or rustc etc needs to be installed)
RUN pip install --extra-index-url https://www.piwheels.org/simple --target=/dependencies -r /requirements.txt
RUN pip install --target=/dependencies -r /requirements.txt
# Playwright is an alternative to Selenium
# Excluded this package from requirements.txt to prevent arm/v6 and arm/v7 builds from failing
# https://github.com/dgtlmoon/changedetection.io/pull/1067 also musl/alpine (not supported)
RUN pip install --target=/dependencies playwright~=1.41.2 \
RUN pip install --target=/dependencies playwright~=1.27.1 \
|| echo "WARN: Failed to install Playwright. The application can still run, but the Playwright option will be disabled."
# Final image stage
FROM python:${PYTHON_VERSION}-slim-bookworm
FROM python:3.10-slim-bullseye
RUN apt-get update && apt-get install -y --no-install-recommends \
libssl1.1 \
libxslt1.1 \
# For presenting price amounts correctly in the restock/price detection overview
locales \
# For pdftohtml
poppler-utils \
zlib1g \
@@ -62,17 +54,12 @@ ENV PYTHONPATH=/usr/local
EXPOSE 5000
# The actual flask app module
# The actual flask app
COPY changedetectionio /app/changedetectionio
# Starting wrapper
# The eventlet server wrapper
COPY changedetection.py /app/changedetection.py
# Github Action test purpose(test-only.yml).
# On production, it is effectively LOGGER_LEVEL=''.
ARG LOGGER_LEVEL=''
ENV LOGGER_LEVEL "$LOGGER_LEVEL"
WORKDIR /app
CMD ["python", "./changedetection.py", "-d", "/datastore"]
CMD [ "python", "./changedetection.py" , "-d", "/datastore"]

View File

@@ -1,8 +1,8 @@
recursive-include changedetectionio/api *
recursive-include changedetectionio/blueprint *
recursive-include changedetectionio/content_fetchers *
recursive-include changedetectionio/model *
recursive-include changedetectionio/processors *
recursive-include changedetectionio/res *
recursive-include changedetectionio/static *
recursive-include changedetectionio/templates *
recursive-include changedetectionio/tests *
@@ -10,12 +10,9 @@ prune changedetectionio/static/package-lock.json
prune changedetectionio/static/styles/node_modules
prune changedetectionio/static/styles/package-lock.json
include changedetection.py
include requirements.txt
include README-pip.md
global-exclude *.pyc
global-exclude node_modules
global-exclude venv
global-exclude test-datastore
global-exclude changedetection.io*dist-info
global-exclude changedetectionio/tests/proxy_socks5/test-datastore

1
Procfile Normal file
View File

@@ -0,0 +1 @@
web: python3 ./changedetection.py -C -d ./datastore -p $PORT

View File

@@ -2,44 +2,19 @@
Live your data-life pro-actively, track website content changes and receive notifications via Discord, Email, Slack, Telegram and 70+ more
[<img src="https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/docs/screenshot.png" style="max-width:100%;" alt="Self-hosted web page change monitoring, list of websites with changes" title="Self-hosted web page change monitoring, list of websites with changes" />](https://changedetection.io)
[<img src="https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/docs/screenshot.png" style="max-width:100%;" alt="Self-hosted web page change monitoring" title="Self-hosted web page change monitoring" />](https://changedetection.io)
[**Don't have time? Let us host it for you! try our extremely affordable subscription use our proxies and support!**](https://changedetection.io)
### Target specific parts of the webpage using the Visual Selector tool.
Available when connected to a <a href="https://github.com/dgtlmoon/changedetection.io/wiki/Playwright-content-fetcher">playwright content fetcher</a> (included as part of our subscription service)
[<img src="https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/docs/visualselector-anim.gif" style="max-width:100%;" alt="Select parts and elements of a web page to monitor for changes" title="Select parts and elements of a web page to monitor for changes" />](https://changedetection.io?src=pip)
### Easily see what changed, examine by word, line, or individual character.
[<img src="https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/docs/screenshot-diff.png" style="max-width:100%;" alt="Self-hosted web page change monitoring context difference " title="Self-hosted web page change monitoring context difference " />](https://changedetection.io?src=pip)
### Perform interactive browser steps
Fill in text boxes, click buttons and more, setup your changedetection scenario.
Using the **Browser Steps** configuration, add basic steps before performing change detection, such as logging into websites, adding a product to a cart, accept cookie logins, entering dates and refining searches.
[<img src="docs/browsersteps-anim.gif" style="max-width:100%;" alt="Website change detection with interactive browser steps, detect changes behind login and password, search queries and more" title="Website change detection with interactive browser steps, detect changes behind login and password, search queries and more" />](https://changedetection.io?src=pip)
After **Browser Steps** have been run, then visit the **Visual Selector** tab to refine the content you're interested in.
Requires Playwright to be enabled.
### Example use cases
#### Example use cases
- Products and services have a change in pricing
- _Out of stock notification_ and _Back In stock notification_
- Monitor and track PDF file changes, know when a PDF file has text changes.
- Governmental department updates (changes are often only on their websites)
- New software releases, security advisories when you're not on their mailing list.
- Festivals with changes
- Discogs restock alerts and monitoring
- Realestate listing changes
- Know when your favourite whiskey is on sale, or other special deals are announced before anyone else
- COVID related news from government websites
@@ -52,34 +27,18 @@ Requires Playwright to be enabled.
- Create RSS feeds based on changes in web content
- Monitor HTML source code for unexpected changes, strengthen your PCI compliance
- You have a very sensitive list of URLs to watch and you do _not_ want to use the paid alternatives. (Remember, _you_ are the product)
- Get notified when certain keywords appear in Twitter search results
- Proactively search for jobs, get notified when companies update their careers page, search job portals for keywords.
- Get alerts when new job positions are open on Bamboo HR and other job platforms
- Website defacement monitoring
- Pokémon Card Restock Tracker / Pokémon TCG Tracker
- RegTech - stay ahead of regulatory changes, regulatory compliance
_Need an actual Chrome runner with Javascript support? We support fetching via WebDriver and Playwright!</a>_
#### Key Features
- Lots of trigger filters, such as "Trigger on text", "Remove text by selector", "Ignore text", "Extract text", also using regular-expressions!
- Target elements with xPath(1.0) and CSS Selectors, Easily monitor complex JSON with JSONPath or jq
- Target elements with xPath and CSS Selectors, Easily monitor complex JSON with JSONPath or jq
- Switch between fast non-JS and Chrome JS based "fetchers"
- Track changes in PDF files (Monitor text changed in the PDF, Also monitor PDF filesize and checksums)
- Easily specify how often a site should be checked
- Execute JS before extracting text (Good for logging in, see examples in the UI!)
- Override Request Headers, Specify `POST` or `GET` and other methods
- Use the "Visual Selector" to help target specific elements
- Configurable [proxy per watch](https://github.com/dgtlmoon/changedetection.io/wiki/Proxy-configuration)
- Send a screenshot with the notification when a change is detected in the web page
We [recommend and use Bright Data](https://brightdata.grsm.io/n0r16zf7eivq) global proxy services, Bright Data will match any first deposit up to $100 using our signup link.
[Oxylabs](https://oxylabs.go2cloud.org/SH2d) is also an excellent proxy provider and well worth using, they offer Residental, ISP, Rotating and many other proxy types to suit your project.
Please :star: star :star: this project and help it grow! https://github.com/dgtlmoon/changedetection.io/
```bash
@@ -95,5 +54,5 @@ $ changedetection.io -d /path/to/empty/data/dir -p 5000
Then visit http://127.0.0.1:5000 , You should now be able to access the UI.
See https://changedetection.io for more information.
See https://github.com/dgtlmoon/changedetection.io for more information.

View File

@@ -5,25 +5,24 @@
_Live your data-life pro-actively._
[<img src="https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/docs/screenshot.png" style="max-width:100%;" alt="Self-hosted web site page change monitoring" title="Self-hosted web site page change monitoring" />](https://changedetection.io?src=github)
[<img src="https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/docs/screenshot.png" style="max-width:100%;" alt="Self-hosted web page change monitoring" title="Self-hosted web page change monitoring" />](https://changedetection.io?src=github)
[![Release Version][release-shield]][release-link] [![Docker Pulls][docker-pulls]][docker-link] [![License][license-shield]](LICENSE.md)
![changedetection.io](https://github.com/dgtlmoon/changedetection.io/actions/workflows/test-only.yml/badge.svg?branch=master)
[**Get started with website page change monitoring straight away. Don't have time? Try our $8.99/month subscription, use our proxies and support!**](https://changedetection.io) , _half the price of other website change monitoring services!_
[**Don't have time? Let us host it for you! try our $8.99/month subscription - use our proxies and support!**](https://changedetection.io) , _half the price of other website change monitoring services!_
- Chrome browser included.
- Nothing to install, access via browser login after signup.
- Super fast, no registration needed setup.
- Get started watching and receiving website change notifications straight away.
- See our [tutorials and how-to page for more inspiration](https://changedetection.io/tutorials)
### Target specific parts of the webpage using the Visual Selector tool.
Available when connected to a <a href="https://github.com/dgtlmoon/changedetection.io/wiki/Playwright-content-fetcher">playwright content fetcher</a> (included as part of our subscription service)
[<img src="https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/docs/visualselector-anim.gif" style="max-width:100%;" alt="Select parts and elements of a web page to monitor for changes" title="Select parts and elements of a web page to monitor for changes" />](https://changedetection.io?src=github)
[<img src="https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/docs/visualselector-anim.gif" style="max-width:100%;" alt="Self-hosted web page change monitoring context difference " title="Self-hosted web page change monitoring context difference " />](https://changedetection.io?src=github)
### Easily see what changed, examine by word, line, or individual character.
@@ -36,25 +35,11 @@ Fill in text boxes, click buttons and more, setup your changedetection scenario.
Using the **Browser Steps** configuration, add basic steps before performing change detection, such as logging into websites, adding a product to a cart, accept cookie logins, entering dates and refining searches.
[<img src="docs/browsersteps-anim.gif" style="max-width:100%;" alt="Website change detection with interactive browser steps, detect changes behind login and password, search queries and more" title="Website change detection with interactive browser steps, detect changes behind login and password, search queries and more" />](https://changedetection.io?src=github)
[<img src="docs/browsersteps-anim.gif" style="max-width:100%;" alt="Self-hosted web page change monitoring context difference " title="Website change detection with interactive browser steps, login, cookies etc" />](https://changedetection.io?src=github)
After **Browser Steps** have been run, then visit the **Visual Selector** tab to refine the content you're interested in.
Requires Playwright to be enabled.
### Awesome restock and price change notifications
Enable the _"Re-stock & Price detection for single product pages"_ option to activate the best way to monitor product pricing, this will extract any meta-data in the HTML page and give you many options to follow the pricing of the product.
Easily organise and monitor prices for products from the dashboard, get alerts and notifications when the price of a product changes or comes back in stock again!
[<img src="docs/restock-overview.png" style="max-width:100%;" alt="Easily keep an eye on product price changes directly from the UI" title="Easily keep an eye on product price changes directly from the UI" />](https://changedetection.io?src=github)
Set price change notification parameters, upper and lower price, price change percentage and more.
Always know when a product for sale drops in price.
[<img src="docs/restock-settings.png" style="max-width:100%;" alt="Set upper lower and percentage price change notification values" title="Set upper lower and percentage price change notification values" />](https://changedetection.io?src=github)
### Example use cases
@@ -82,14 +67,13 @@ Always know when a product for sale drops in price.
- Get alerts when new job positions are open on Bamboo HR and other job platforms
- Website defacement monitoring
- Pokémon Card Restock Tracker / Pokémon TCG Tracker
- RegTech - stay ahead of regulatory changes, regulatory compliance
_Need an actual Chrome runner with Javascript support? We support fetching via WebDriver and Playwright!</a>_
#### Key Features
- Lots of trigger filters, such as "Trigger on text", "Remove text by selector", "Ignore text", "Extract text", also using regular-expressions!
- Target elements with xPath(1.0) and CSS Selectors, Easily monitor complex JSON with JSONPath or jq
- Target elements with xPath and CSS Selectors, Easily monitor complex JSON with JSONPath or jq
- Switch between fast non-JS and Chrome JS based "fetchers"
- Track changes in PDF files (Monitor text changed in the PDF, Also monitor PDF filesize and checksums)
- Easily specify how often a site should be checked
@@ -101,18 +85,8 @@ _Need an actual Chrome runner with Javascript support? We support fetching via W
We [recommend and use Bright Data](https://brightdata.grsm.io/n0r16zf7eivq) global proxy services, Bright Data will match any first deposit up to $100 using our signup link.
[Oxylabs](https://oxylabs.go2cloud.org/SH2d) is also an excellent proxy provider and well worth using, they offer Residental, ISP, Rotating and many other proxy types to suit your project.
Please :star: star :star: this project and help it grow! https://github.com/dgtlmoon/changedetection.io/
### We have a Chrome extension!
Easily add the current web page to your changedetection.io tool, simply install the extension and click "Sync" to connect it to your existing changedetection.io install.
[<img src="./docs/chrome-extension-screenshot.png" style="max-width:80%;" alt="Chrome Extension to easily add the current web-page to detect a change." title="Chrome Extension to easily add the current web-page to detect a change." />](https://chromewebstore.google.com/detail/changedetectionio-website/kefcfmgmlhmankjmnbijimhofdjekbop)
[Goto the Chrome Webstore to download the extension.](https://chromewebstore.google.com/detail/changedetectionio-website/kefcfmgmlhmankjmnbijimhofdjekbop)
## Installation
### Docker
@@ -120,7 +94,7 @@ Easily add the current web page to your changedetection.io tool, simply install
With Docker composer, just clone this repository and..
```bash
$ docker compose up -d
$ docker-compose up -d
```
Docker standalone
@@ -159,10 +133,10 @@ docker rm $(docker ps -a -f name=changedetection.io -q)
docker run -d --restart always -p "127.0.0.1:5000:5000" -v datastore-volume:/datastore --name changedetection.io dgtlmoon/changedetection.io
```
### docker compose
### docker-compose
```bash
docker compose pull && docker compose up -d
docker-compose pull && docker-compose up -d
```
See the wiki for more information https://github.com/dgtlmoon/changedetection.io/wiki
@@ -170,7 +144,7 @@ See the wiki for more information https://github.com/dgtlmoon/changedetection.io
## Filters
XPath(1.0), JSONPath, jq, and CSS support comes baked in! You can be as specific as you need, use XPath exported from various XPath element query creation tools.
XPath, JSONPath, jq, and CSS support comes baked in! You can be as specific as you need, use XPath exported from various XPath element query creation tools.
(We support LXML `re:test`, `re:match` and `re:replace`.)
## Notifications
@@ -209,7 +183,7 @@ This will re-parse the JSON and apply formatting to the text, making it super ea
### JSONPath or jq?
For more complex parsing, filtering, and modifying of JSON data, jq is recommended due to the built-in operators and functions. Refer to the [documentation](https://stedolan.github.io/jq/manual/) for more specific information on jq.
For more complex parsing, filtering, and modifying of JSON data, jq is recommended due to the built-in operators and functions. Refer to the [documentation](https://stedolan.github.io/jq/manual/) for more specifc information on jq.
One big advantage of `jq` is that you can use logic in your JSON filter, such as filters to only show items that have a value greater than/less than etc.
@@ -249,19 +223,12 @@ The application also supports notifying you that it can follow this information
## Proxy Configuration
See the wiki https://github.com/dgtlmoon/changedetection.io/wiki/Proxy-configuration , we also support using [Bright Data proxy services where possible](https://github.com/dgtlmoon/changedetection.io/wiki/Proxy-configuration#brightdata-proxy-support) and [Oxylabs](https://oxylabs.go2cloud.org/SH2d) proxy services.
See the wiki https://github.com/dgtlmoon/changedetection.io/wiki/Proxy-configuration , we also support using [BrightData proxy services where possible]( https://github.com/dgtlmoon/changedetection.io/wiki/Proxy-configuration#brightdata-proxy-support)
## Raspberry Pi support?
Raspberry Pi and linux/arm/v6 linux/arm/v7 arm64 devices are supported! See the wiki for [details](https://github.com/dgtlmoon/changedetection.io/wiki/Fetching-pages-with-WebDriver)
## Import support
Easily [import your list of websites to watch for changes in Excel .xslx file format](https://changedetection.io/tutorial/how-import-your-website-change-detection-lists-excel), or paste in lists of website URLs as plaintext.
Excel import is recommended - that way you can better organise tags/groups of websites and other features.
## API Support
Supports managing the website watch list [via our API](https://changedetection.io/docs/api_v1/index.html)
@@ -271,7 +238,13 @@ Supports managing the website watch list [via our API](https://changedetection.i
Do you use changedetection.io to make money? does it save you time or money? Does it make your life easier? less stressful? Remember, we write this software when we should be doing actual paid work, we have to buy food and pay rent just like you.
Consider taking out an officially supported [website change detection subscription](https://changedetection.io?src=github) , even if you don't use it, you still get the warm fuzzy feeling of helping out the project. (And who knows, you might just use it!)
Firstly, consider taking out a [change detection monthly subscription - unlimited checks and watches](https://changedetection.io?src=github) , even if you don't use it, you still get the warm fuzzy feeling of helping out the project. (And who knows, you might just use it!)
Or directly donate an amount PayPal [![Donate](https://img.shields.io/badge/Donate-PayPal-green.svg)](https://www.paypal.com/donate/?hosted_button_id=7CP6HR9ZCNDYJ)
Or BTC `1PLFN327GyUarpJd7nVe7Reqg9qHx5frNn`
<img src="https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/docs/btc-support.png" style="max-width:50%;" alt="Support us!" />
## Commercial Support
@@ -285,17 +258,3 @@ I offer commercial support, this software is depended on by network security, ae
[license-shield]: https://img.shields.io/github/license/dgtlmoon/changedetection.io.svg?style=for-the-badge
[release-link]: https://github.com/dgtlmoon/changedetection.io/releases
[docker-link]: https://hub.docker.com/r/dgtlmoon/changedetection.io
## Commercial Licencing
If you are reselling this software either in part or full as part of any commercial arrangement, you must abide by our COMMERCIAL_LICENCE.md found in our code repository, please contact dgtlmoon@gmail.com and contact@changedetection.io .
## Third-party licenses
changedetectionio.html_tools.elementpath_tostring: Copyright (c), 2018-2021, SISSA (Scuola Internazionale Superiore di Studi Avanzati), Licensed under [MIT license](https://github.com/sissaschool/elementpath/blob/master/LICENSE)
## Contributors
Recognition of fantastic contributors to the project
- Constantin Hong https://github.com/Constantin1489

21
app.json Normal file
View File

@@ -0,0 +1,21 @@
{
"name": "ChangeDetection.io",
"description": "The best and simplest self-hosted open source website change detection monitoring and notification service.",
"keywords": [
"changedetection",
"website monitoring"
],
"repository": "https://github.com/dgtlmoon/changedetection.io",
"success_url": "/",
"scripts": {
},
"env": {
},
"formation": {
"web": {
"quantity": 1,
"size": "free"
}
},
"image": "heroku/python"
}

View File

@@ -1,6 +1,44 @@
#!/usr/bin/env python3
#!/usr/bin/python3
# Only exists for direct CLI usage
# Entry-point for running from the CLI when not installed via Pip, Pip will handle the console_scripts entry_points's from setup.py
# It's recommended to use `pip3 install changedetection.io` and start with `changedetection.py` instead, it will be linkd to your global path.
# or Docker.
# Read more https://github.com/dgtlmoon/changedetection.io/wiki
import changedetectionio
changedetectionio.main()
from changedetectionio import changedetection
import multiprocessing
import sys
import os
def sigchld_handler(_signo, _stack_frame):
import sys
print('Shutdown: Got SIGCHLD')
# https://stackoverflow.com/questions/40453496/python-multiprocessing-capturing-signals-to-restart-child-processes-or-shut-do
pid, status = os.waitpid(-1, os.WNOHANG | os.WUNTRACED | os.WCONTINUED)
print('Sub-process: pid %d status %d' % (pid, status))
if status != 0:
sys.exit(1)
raise SystemExit
if __name__ == '__main__':
#signal.signal(signal.SIGCHLD, sigchld_handler)
# The only way I could find to get Flask to shutdown, is to wrap it and then rely on the subsystem issuing SIGTERM/SIGKILL
parse_process = multiprocessing.Process(target=changedetection.main)
parse_process.daemon = True
parse_process.start()
import time
try:
while True:
time.sleep(1)
if not parse_process.is_alive():
# Process died/crashed for some reason, exit with error set
sys.exit(1)
except KeyboardInterrupt:
#parse_process.terminate() not needed, because this process will issue it to the sub-process anyway
print ("Exited - CTRL+C")

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,5 @@
import os
from changedetectionio.strtobool import strtobool
from distutils.util import strtobool
from flask_expects_json import expects_json
from changedetectionio import queuedWatchMetaData
@@ -12,10 +12,9 @@ import copy
# See docs/README.md for rebuilding the docs/apidoc information
from . import api_schema
from ..model import watch_base
# Build a JSON Schema atleast partially based on our Watch model
watch_base_config = watch_base()
from changedetectionio.model.Watch import base_config as watch_base_config
schema = api_schema.build_watch_json_schema(watch_base_config)
schema_create_watch = copy.deepcopy(schema)
@@ -31,7 +30,7 @@ class Watch(Resource):
self.update_q = kwargs['update_q']
# Get information about a single watch, excluding the history list (can be large)
# curl http://localhost:5000/api/v1/watch/<string:uuid>
# curl http://localhost:4000/api/v1/watch/<string:uuid>
# @todo - version2 - ?muted and ?paused should be able to be called together, return the watch struct not "OK"
# ?recheck=true
@auth.check_token
@@ -40,9 +39,9 @@ class Watch(Resource):
@api {get} /api/v1/watch/:uuid Single watch - get data, recheck, pause, mute.
@apiDescription Retrieve watch information and set muted/paused status
@apiExample {curl} Example usage:
curl http://localhost:5000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091 -H"x-api-key:813031b16330fe25e3780cf0325daa45"
curl "http://localhost:5000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091?muted=unmuted" -H"x-api-key:813031b16330fe25e3780cf0325daa45"
curl "http://localhost:5000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091?paused=unpaused" -H"x-api-key:813031b16330fe25e3780cf0325daa45"
curl http://localhost:4000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091 -H"x-api-key:813031b16330fe25e3780cf0325daa45"
curl "http://localhost:4000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091?muted=unmuted" -H"x-api-key:813031b16330fe25e3780cf0325daa45"
curl "http://localhost:4000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091?paused=unpaused" -H"x-api-key:813031b16330fe25e3780cf0325daa45"
@apiName Watch
@apiGroup Watch
@apiParam {uuid} uuid Watch unique ID.
@@ -77,7 +76,7 @@ class Watch(Resource):
# Properties are not returned as a JSON, so add the required props manually
watch['history_n'] = watch.history_n
watch['last_changed'] = watch.last_changed
watch['viewed'] = watch.viewed
return watch
@auth.check_token
@@ -85,7 +84,7 @@ class Watch(Resource):
"""
@api {delete} /api/v1/watch/:uuid Delete a watch and related history
@apiExample {curl} Example usage:
curl http://localhost:5000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091 -X DELETE -H"x-api-key:813031b16330fe25e3780cf0325daa45"
curl http://localhost:4000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091 -X DELETE -H"x-api-key:813031b16330fe25e3780cf0325daa45"
@apiParam {uuid} uuid Watch unique ID.
@apiName Delete
@apiGroup Watch
@@ -104,7 +103,7 @@ class Watch(Resource):
@api {put} /api/v1/watch/:uuid Update watch information
@apiExample {curl} Example usage:
Update (PUT)
curl http://localhost:5000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091 -X PUT -H"x-api-key:813031b16330fe25e3780cf0325daa45" -H "Content-Type: application/json" -d '{"url": "https://my-nice.com" , "tag": "new list"}'
curl http://localhost:4000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091 -X PUT -H"x-api-key:813031b16330fe25e3780cf0325daa45" -H "Content-Type: application/json" -d '{"url": "https://my-nice.com" , "tag": "new list"}'
@apiDescription Updates an existing watch using JSON, accepts the same structure as returned in <a href="#api-Watch-Watch">get single watch information</a>
@apiParam {uuid} uuid Watch unique ID.
@@ -133,14 +132,13 @@ class WatchHistory(Resource):
self.datastore = kwargs['datastore']
# Get a list of available history for a watch by UUID
# curl http://localhost:5000/api/v1/watch/<string:uuid>/history
@auth.check_token
# curl http://localhost:4000/api/v1/watch/<string:uuid>/history
def get(self, uuid):
"""
@api {get} /api/v1/watch/<string:uuid>/history Get a list of all historical snapshots available for a watch
@apiDescription Requires `uuid`, returns list
@apiExample {curl} Example usage:
curl http://localhost:5000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091/history -H"x-api-key:813031b16330fe25e3780cf0325daa45" -H "Content-Type: application/json"
curl http://localhost:4000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091/history -H"x-api-key:813031b16330fe25e3780cf0325daa45" -H "Content-Type: application/json"
{
"1676649279": "/tmp/data/6a4b7d5c-fee4-4616-9f43-4ac97046b595/cb7e9be8258368262246910e6a2a4c30.txt",
"1677092785": "/tmp/data/6a4b7d5c-fee4-4616-9f43-4ac97046b595/e20db368d6fc633e34f559ff67bb4044.txt",
@@ -168,36 +166,26 @@ class WatchSingleHistory(Resource):
@api {get} /api/v1/watch/<string:uuid>/history/<int:timestamp> Get single snapshot from watch
@apiDescription Requires watch `uuid` and `timestamp`. `timestamp` of "`latest`" for latest available snapshot, or <a href="#api-Watch_History-Get_list_of_available_stored_snapshots_for_watch">use the list returned here</a>
@apiExample {curl} Example usage:
curl http://localhost:5000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091/history/1677092977 -H"x-api-key:813031b16330fe25e3780cf0325daa45" -H "Content-Type: application/json"
curl http://localhost:4000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091/history/1677092977 -H"x-api-key:813031b16330fe25e3780cf0325daa45" -H "Content-Type: application/json"
@apiName Get single snapshot content
@apiGroup Watch History
@apiParam {String} [html] Optional Set to =1 to return the last HTML (only stores last 2 snapshots, use `latest` as timestamp)
@apiSuccess (200) {String} OK
@apiSuccess (404) {String} ERR Not found
"""
watch = self.datastore.data['watching'].get(uuid)
if not watch:
abort(404, message=f"No watch exists with the UUID of {uuid}")
abort(404, message='No watch exists with the UUID of {}'.format(uuid))
if not len(watch.history):
abort(404, message=f"Watch found but no history exists for the UUID {uuid}")
abort(404, message='Watch found but no history exists for the UUID {}'.format(uuid))
if timestamp == 'latest':
timestamp = list(watch.history.keys())[-1]
if request.args.get('html'):
content = watch.get_fetched_html(timestamp)
if content:
response = make_response(content, 200)
response.mimetype = "text/html"
else:
response = make_response("No content found", 404)
response.mimetype = "text/plain"
else:
content = watch.get_history_snapshot(timestamp)
response = make_response(content, 200)
response.mimetype = "text/plain"
content = watch.get_history_snapshot(timestamp)
response = make_response(content, 200)
response.mimetype = "text/plain"
return response
@@ -214,7 +202,7 @@ class CreateWatch(Resource):
@api {post} /api/v1/watch Create a single watch
@apiDescription Requires atleast `url` set, can accept the same structure as <a href="#api-Watch-Watch">get single watch information</a> to create.
@apiExample {curl} Example usage:
curl http://localhost:5000/api/v1/watch -H"x-api-key:813031b16330fe25e3780cf0325daa45" -H "Content-Type: application/json" -d '{"url": "https://my-nice.com" , "tag": "nice list"}'
curl http://localhost:4000/api/v1/watch -H"x-api-key:813031b16330fe25e3780cf0325daa45" -H "Content-Type: application/json" -d '{"url": "https://my-nice.com" , "tag": "nice list"}'
@apiName Create
@apiGroup Watch
@apiSuccess (200) {String} OK Was created
@@ -257,7 +245,7 @@ class CreateWatch(Resource):
@api {get} /api/v1/watch List watches
@apiDescription Return concise list of available watches and some very basic info
@apiExample {curl} Example usage:
curl http://localhost:5000/api/v1/watch -H"x-api-key:813031b16330fe25e3780cf0325daa45"
curl http://localhost:4000/api/v1/watch -H"x-api-key:813031b16330fe25e3780cf0325daa45"
{
"6a4b7d5c-fee4-4616-9f43-4ac97046b595": {
"last_changed": 1677103794,
@@ -292,14 +280,11 @@ class CreateWatch(Resource):
if tag_limit and not any(v.get('title').lower() == tag_limit for k, v in tags.items()):
continue
list[uuid] = {
'last_changed': watch.last_changed,
'last_checked': watch['last_checked'],
'last_error': watch['last_error'],
'title': watch['title'],
'url': watch['url'],
'viewed': watch.viewed
}
list[uuid] = {'url': watch['url'],
'title': watch['title'],
'last_checked': watch['last_checked'],
'last_changed': watch.last_changed,
'last_error': watch['last_error']}
if request.args.get('recheck_all'):
for uuid in self.datastore.data['watching'].keys():
@@ -308,61 +293,6 @@ class CreateWatch(Resource):
return list, 200
class Import(Resource):
def __init__(self, **kwargs):
# datastore is a black box dependency
self.datastore = kwargs['datastore']
@auth.check_token
def post(self):
"""
@api {post} /api/v1/import Import a list of watched URLs
@apiDescription Accepts a line-feed separated list of URLs to import, additionally with ?tag_uuids=(tag id), ?tag=(name), ?proxy={key}, ?dedupe=true (default true) one URL per line.
@apiExample {curl} Example usage:
curl http://localhost:5000/api/v1/import --data-binary @list-of-sites.txt -H"x-api-key:8a111a21bc2f8f1dd9b9353bbd46049a"
@apiName Import
@apiGroup Watch
@apiSuccess (200) {List} OK List of watch UUIDs added
@apiSuccess (500) {String} ERR Some other error
"""
extras = {}
if request.args.get('proxy'):
plist = self.datastore.proxy_list
if not request.args.get('proxy') in plist:
return "Invalid proxy choice, currently supported proxies are '{}'".format(', '.join(plist)), 400
else:
extras['proxy'] = request.args.get('proxy')
dedupe = strtobool(request.args.get('dedupe', 'true'))
tags = request.args.get('tag')
tag_uuids = request.args.get('tag_uuids')
if tag_uuids:
tag_uuids = tag_uuids.split(',')
urls = request.get_data().decode('utf8').splitlines()
added = []
allow_simplehost = not strtobool(os.getenv('BLOCK_SIMPLEHOSTS', 'False'))
for url in urls:
url = url.strip()
if not len(url):
continue
# If hosts that only contain alphanumerics are allowed ("localhost" for example)
if not validators.url(url, simple_host=allow_simplehost):
return f"Invalid or unsupported URL - {url}", 400
if dedupe and self.datastore.url_exists(url):
continue
new_uuid = self.datastore.add_watch(url=url, extras=extras, tag=tags, tag_uuids=tag_uuids)
added.append(new_uuid)
return added
class SystemInfo(Resource):
def __init__(self, **kwargs):
# datastore is a black box dependency
@@ -375,7 +305,7 @@ class SystemInfo(Resource):
@api {get} /api/v1/systeminfo Return system info
@apiDescription Return some info about the current system state
@apiExample {curl} Example usage:
curl http://localhost:5000/api/v1/systeminfo -H"x-api-key:813031b16330fe25e3780cf0325daa45"
curl http://localhost:4000/api/v1/systeminfo -H"x-api-key:813031b16330fe25e3780cf0325daa45"
HTTP/1.0 200
{
'queue_size': 10 ,

View File

@@ -1,7 +0,0 @@
- This needs an abstraction to directly handle the puppeteer connection methods
- Then remove the playwright stuff
- Remove hack redirect at line 65 changedetectionio/processors/__init__.py
The screenshots are base64 encoded/decoded which is very CPU intensive for large screenshots (in playwright) but not
in the direct puppeteer connection (they are binary end to end)

View File

@@ -4,21 +4,29 @@
# Why?
# `browsersteps_playwright_browser_interface.chromium.connect_over_cdp()` will only run once without async()
# - this flask app is not async()
# - A single timeout/keepalive which applies to the session made at .connect_over_cdp()
# - browserless has a single timeout/keepalive which applies to the session made at .connect_over_cdp()
#
# So it means that we must unfortunately for now just keep a single timer since .connect_over_cdp() was run
# and know when that reaches timeout/keepalive :( when that time is up, restart the connection and tell the user
# that their time is up, insert another coin. (reload)
#
# Bigger picture
# - It's horrible that we have this click+wait deal, some nice socket.io solution using something similar
# to what the browserless debug UI already gives us would be smarter..
#
# OR
# - Some API call that should be hacked into browserless or playwright that we can "/api/bump-keepalive/{session_id}/60"
# So we can tell it that we need more time (run this on each action)
#
# OR
# - use multiprocessing to bump this over to its own process and add some transport layer (queue/pipes)
from changedetectionio.strtobool import strtobool
from distutils.util import strtobool
from flask import Blueprint, request, make_response
import os
import logging
from changedetectionio.store import ChangeDetectionStore
from changedetectionio.flask_app import login_optionally_required
from loguru import logger
from changedetectionio import login_optionally_required
browsersteps_sessions = {}
io_interface_context = None
@@ -36,7 +44,7 @@ def construct_blueprint(datastore: ChangeDetectionStore):
# We keep the playwright session open for many minutes
keepalive_seconds = int(os.getenv('BROWSERSTEPS_MINUTES_KEEPALIVE', 10)) * 60
seconds_keepalive = int(os.getenv('BROWSERSTEPS_MINUTES_KEEPALIVE', 10)) * 60
browsersteps_start_session = {'start_time': time.time()}
@@ -48,18 +56,16 @@ def construct_blueprint(datastore: ChangeDetectionStore):
# Start the Playwright context, which is actually a nodejs sub-process and communicates over STDIN/STDOUT pipes
io_interface_context = io_interface_context.start()
keepalive_ms = ((keepalive_seconds + 3) * 1000)
base_url = os.getenv('PLAYWRIGHT_DRIVER_URL', '').strip('"')
a = "?" if not '?' in base_url else '&'
base_url += a + f"timeout={keepalive_ms}"
# keep it alive for 10 seconds more than we advertise, sometimes it helps to keep it shutting down cleanly
keepalive = "&timeout={}".format(((seconds_keepalive + 3) * 1000))
try:
browsersteps_start_session['browser'] = io_interface_context.chromium.connect_over_cdp(base_url)
browsersteps_start_session['browser'] = io_interface_context.chromium.connect_over_cdp(
os.getenv('PLAYWRIGHT_DRIVER_URL', '') + keepalive)
except Exception as e:
if 'ECONNREFUSED' in str(e):
return make_response('Unable to start the Playwright Browser session, is it running?', 401)
else:
# Other errors, bad URL syntax, bad reply etc
return make_response(str(e), 401)
proxy_id = datastore.get_preferred_proxy_for_watch(uuid=watch_uuid)
@@ -79,14 +85,12 @@ def construct_blueprint(datastore: ChangeDetectionStore):
if parsed.password:
proxy['password'] = parsed.password
logger.debug(f"Browser Steps: UUID {watch_uuid} selected proxy {proxy_url}")
print("Browser Steps: UUID {} selected proxy {}".format(watch_uuid, proxy_url))
# Tell Playwright to connect to Chrome and setup a new session via our stepper interface
browsersteps_start_session['browserstepper'] = browser_steps.browsersteps_live_ui(
playwright_browser=browsersteps_start_session['browser'],
proxy=proxy,
start_url=datastore.data['watching'][watch_uuid].get('url')
)
proxy=proxy)
# For test
#browsersteps_start_session['browserstepper'].action_goto_url(value="http://example.com?time="+str(time.time()))
@@ -108,43 +112,18 @@ def construct_blueprint(datastore: ChangeDetectionStore):
if not watch_uuid:
return make_response('No Watch UUID specified', 500)
logger.debug("Starting connection with playwright")
logger.debug("browser_steps.py connecting")
print("Starting connection with playwright")
logging.debug("browser_steps.py connecting")
browsersteps_sessions[browsersteps_session_id] = start_browsersteps_session(watch_uuid)
logger.debug("Starting connection with playwright - done")
print("Starting connection with playwright - done")
return {'browsersteps_session_id': browsersteps_session_id}
@login_optionally_required
@browser_steps_blueprint.route("/browsersteps_image", methods=['GET'])
def browser_steps_fetch_screenshot_image():
from flask import (
make_response,
request,
send_from_directory,
)
uuid = request.args.get('uuid')
step_n = int(request.args.get('step_n'))
watch = datastore.data['watching'].get(uuid)
filename = f"step_before-{step_n}.jpeg" if request.args.get('type', '') == 'before' else f"step_{step_n}.jpeg"
if step_n and watch and os.path.isfile(os.path.join(watch.watch_data_dir, filename)):
response = make_response(send_from_directory(directory=watch.watch_data_dir, path=filename))
response.headers['Content-type'] = 'image/jpeg'
response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'
response.headers['Pragma'] = 'no-cache'
response.headers['Expires'] = 0
return response
else:
return make_response('Unable to fetch image, is the URL correct? does the watch exist? does the step_type-n.jpeg exist?', 401)
# A request for an action was received
@login_optionally_required
@browser_steps_blueprint.route("/browsersteps_update", methods=['POST'])
def browsersteps_ui_update():
import base64
import playwright._impl._errors
import playwright._impl._api_types
global browsersteps_sessions
from changedetectionio.blueprint.browser_steps import browser_steps
@@ -169,6 +148,11 @@ def construct_blueprint(datastore: ChangeDetectionStore):
step_n = int(request.form.get('step_n'))
is_last_step = strtobool(request.form.get('is_last_step'))
if step_operation == 'Goto site':
step_operation = 'goto_url'
step_optional_value = datastore.data['watching'][uuid].get('url')
step_selector = None
# @todo try.. accept.. nice errors not popups..
try:
@@ -177,7 +161,7 @@ def construct_blueprint(datastore: ChangeDetectionStore):
optional_value=step_optional_value)
except Exception as e:
logger.error(f"Exception when calling step operation {step_operation} {str(e)}")
print("Exception when calling step operation", step_operation, str(e))
# Try to find something of value to give back to the user
return make_response(str(e).splitlines()[0], 401)
@@ -187,10 +171,8 @@ def construct_blueprint(datastore: ChangeDetectionStore):
u = browsersteps_sessions[browsersteps_session_id]['browserstepper'].page.url
if is_last_step and u:
(screenshot, xpath_data) = browsersteps_sessions[browsersteps_session_id]['browserstepper'].request_visualselector_data()
watch = datastore.data['watching'].get(uuid)
if watch:
watch.save_screenshot(screenshot=screenshot)
watch.save_xpath_data(data=xpath_data)
datastore.save_screenshot(watch_uuid=uuid, screenshot=screenshot)
datastore.save_xpath_data(watch_uuid=uuid, data=xpath_data)
# if not this_session.page:
# cleanup_playwright_session()

View File

@@ -1,13 +1,9 @@
#!/usr/bin/env python3
#!/usr/bin/python3
import os
import time
import re
from random import randint
from loguru import logger
from changedetectionio.content_fetchers.base import manage_user_agent
from changedetectionio.safe_jinja import render as jinja_render
# Two flags, tell the JS which of the "Selector" or "Value" field should be enabled in the front end
# 0- off, 1- on
@@ -49,10 +45,6 @@ browser_step_ui_config = {'Choose one': '0 0',
# ONLY Works in Playwright because we need the fullscreen screenshot
class steppable_browser_interface():
page = None
start_url = None
def __init__(self, start_url):
self.start_url = start_url
# Convert and perform "Click Button" for example
def call_action(self, action_name, selector=None, optional_value=None):
@@ -61,7 +53,7 @@ class steppable_browser_interface():
if call_action_name == 'choose_one':
return
logger.debug(f"> Action calling '{call_action_name}'")
print("> action calling", call_action_name)
# https://playwright.dev/python/docs/selectors#xpath-selectors
if selector and selector.startswith('/') and not selector.startswith('//'):
selector = "xpath=" + selector
@@ -69,31 +61,29 @@ class steppable_browser_interface():
action_handler = getattr(self, "action_" + call_action_name)
# Support for Jinja2 variables in the value and selector
from jinja2 import Environment
jinja2_env = Environment(extensions=['jinja2_time.TimeExtension'])
if selector and ('{%' in selector or '{{' in selector):
selector = jinja_render(template_str=selector)
selector = str(jinja2_env.from_string(selector).render())
if optional_value and ('{%' in optional_value or '{{' in optional_value):
optional_value = jinja_render(template_str=optional_value)
optional_value = str(jinja2_env.from_string(optional_value).render())
action_handler(selector, optional_value)
self.page.wait_for_timeout(1.5 * 1000)
logger.debug(f"Call action done in {time.time()-now:.2f}s")
print("Call action done in", time.time() - now)
def action_goto_url(self, selector=None, value=None):
# self.page.set_viewport_size({"width": 1280, "height": 5000})
now = time.time()
response = self.page.goto(value, timeout=0, wait_until='load')
# Should be the same as the puppeteer_fetch.js methods, means, load with no timeout set (skip timeout)
#and also wait for seconds ?
#await page.waitForTimeout(1000);
#await page.waitForTimeout(extra_wait_ms);
logger.debug(f"Time to goto URL {time.time()-now:.2f}s")
return response
response = self.page.goto(value, timeout=0, wait_until='commit')
# Incase they request to go back to the start
def action_goto_site(self, selector=None, value=None):
return self.action_goto_url(value=self.start_url)
# Wait_until = commit
# - `'commit'` - consider operation to be finished when network response is received and the document started loading.
# Better to not use any smarts from Playwright and just wait an arbitrary number of seconds
# This seemed to solve nearly all 'TimeoutErrors'
print("Time to goto URL ", time.time() - now)
def action_click_element_containing_text(self, selector=None, value=''):
if not len(value.strip()):
@@ -109,19 +99,18 @@ class steppable_browser_interface():
self.page.fill(selector, value, timeout=10 * 1000)
def action_execute_js(self, selector, value):
response = self.page.evaluate(value)
return response
self.page.evaluate(value)
def action_click_element(self, selector, value):
logger.debug("Clicking element")
print("Clicking element")
if not len(selector.strip()):
return
self.page.click(selector=selector, timeout=30 * 1000, delay=randint(200, 500))
def action_click_element_if_exists(self, selector, value):
import playwright._impl._errors as _api_types
logger.debug("Clicking element if exists")
import playwright._impl._api_types as _api_types
print("Clicking element if exists")
if not len(selector.strip()):
return
try:
@@ -133,9 +122,6 @@ class steppable_browser_interface():
return
def action_click_x_y(self, selector, value):
if not re.match(r'^\s?\d+\s?,\s?\d+\s?$', value):
raise Exception("'Click X,Y' step should be in the format of '100 , 90'")
x, y = value.strip().split(',')
x = int(float(x.strip()))
y = int(float(y.strip()))
@@ -152,13 +138,13 @@ class steppable_browser_interface():
def action_wait_for_text(self, selector, value):
import json
v = json.dumps(value)
self.page.wait_for_function(f'document.querySelector("body").innerText.includes({v});', timeout=30000)
self.page.wait_for_function(f'document.querySelector("body").innerText.includes({v});', timeout=90000)
def action_wait_for_text_in_element(self, selector, value):
import json
s = json.dumps(selector)
v = json.dumps(value)
self.page.wait_for_function(f'document.querySelector({s}).innerText.includes({v});', timeout=30000)
self.page.wait_for_function(f'document.querySelector({s}).innerText.includes({v});', timeout=90000)
# @todo - in the future make some popout interface to capture what needs to be set
# https://playwright.dev/python/docs/api/class-keyboard
@@ -178,7 +164,7 @@ class steppable_browser_interface():
self.page.locator(selector, timeout=1000).uncheck(timeout=1000)
# Responsible for maintaining a live 'context' with the chrome CDP
# Responsible for maintaining a live 'context' with browserless
# @todo - how long do contexts live for anyway?
class browsersteps_live_ui(steppable_browser_interface):
context = None
@@ -187,7 +173,6 @@ class browsersteps_live_ui(steppable_browser_interface):
stale = False
# bump and kill this if idle after X sec
age_start = 0
headers = {}
# use a special driver, maybe locally etc
command_executor = os.getenv(
@@ -202,11 +187,9 @@ class browsersteps_live_ui(steppable_browser_interface):
browser_type = os.getenv("PLAYWRIGHT_BROWSER_TYPE", 'chromium').strip('"')
def __init__(self, playwright_browser, proxy=None, headers=None, start_url=None):
self.headers = headers or {}
def __init__(self, playwright_browser, proxy=None):
self.age_start = time.time()
self.playwright_browser = playwright_browser
self.start_url = start_url
if self.context is None:
self.connect(proxy=proxy)
@@ -218,17 +201,16 @@ class browsersteps_live_ui(steppable_browser_interface):
# @todo handle multiple contexts, bind a unique id from the browser on each req?
self.context = self.playwright_browser.new_context(
accept_downloads=False, # Should never be needed
bypass_csp=True, # This is needed to enable JavaScript execution on GitHub and others
extra_http_headers=self.headers,
ignore_https_errors=True,
proxy=proxy,
service_workers=os.getenv('PLAYWRIGHT_SERVICE_WORKERS', 'allow'),
# Should be `allow` or `block` - sites like YouTube can transmit large amounts of data via Service Workers
user_agent=manage_user_agent(headers=self.headers),
# @todo
# user_agent=request_headers['User-Agent'] if request_headers.get('User-Agent') else 'Mozilla/5.0',
# proxy=self.proxy,
# This is needed to enable JavaScript execution on GitHub and others
bypass_csp=True,
# Should never be needed
accept_downloads=False,
proxy=proxy
)
self.page = self.context.new_page()
# self.page.set_default_navigation_timeout(keep_open)
@@ -241,11 +223,11 @@ class browsersteps_live_ui(steppable_browser_interface):
# Listen for all console events and handle errors
self.page.on("console", lambda msg: print(f"Browser steps console - {msg.type}: {msg.text} {msg.args}"))
logger.debug(f"Time to browser setup {time.time()-now:.2f}s")
print("Time to browser setup", time.time() - now)
self.page.wait_for_timeout(1 * 1000)
def mark_as_closed(self):
logger.debug("Page closed, cleaning up..")
print("Page closed, cleaning up..")
@property
def has_expired(self):
@@ -255,9 +237,8 @@ class browsersteps_live_ui(steppable_browser_interface):
def get_current_state(self):
"""Return the screenshot and interactive elements mapping, generally always called after action_()"""
import importlib.resources
xpath_element_js = importlib.resources.files("changedetectionio.content_fetchers.res").joinpath('xpath_element_scraper.js').read_text()
from pkg_resources import resource_string
xpath_element_js = resource_string(__name__, "../../res/xpath_element_scraper.js").decode('utf-8')
now = time.time()
self.page.wait_for_timeout(1 * 1000)
@@ -272,7 +253,7 @@ class browsersteps_live_ui(steppable_browser_interface):
xpath_data = self.page.evaluate("async () => {" + xpath_element_js + "}")
# So the JS will find the smallest one first
xpath_data['size_pos'] = sorted(xpath_data['size_pos'], key=lambda k: k['width'] * k['height'], reverse=True)
logger.debug(f"Time to complete get_current_state of browser {time.time()-now:.2f}s")
print("Time to complete get_current_state of browser", time.time() - now)
# except
# playwright._impl._api_types.Error: Browser closed.
# @todo show some countdown timer?
@@ -288,12 +269,14 @@ class browsersteps_live_ui(steppable_browser_interface):
:param current_include_filters:
:return:
"""
import importlib.resources
self.page.evaluate("var include_filters=''")
xpath_element_js = importlib.resources.files("changedetectionio.content_fetchers.res").joinpath('xpath_element_scraper.js').read_text()
from changedetectionio.content_fetchers import visualselector_xpath_selectors
from pkg_resources import resource_string
# The code that scrapes elements and makes a list of elements/size/position to click on in the VisualSelector
xpath_element_js = resource_string(__name__, "../../res/xpath_element_scraper.js").decode('utf-8')
from changedetectionio.content_fetcher import visualselector_xpath_selectors
xpath_element_js = xpath_element_js.replace('%ELEMENTS%', visualselector_xpath_selectors)
xpath_data = self.page.evaluate("async () => {" + xpath_element_js + "}")
screenshot = self.page.screenshot(type='jpeg', full_page=True, quality=int(os.getenv("SCREENSHOT_QUALITY", 72)))
screenshot = self.page.screenshot(type='jpeg', full_page=True, quality=int(os.getenv("PLAYWRIGHT_SCREENSHOT_QUALITY", 72)))
return (screenshot, xpath_data)

View File

@@ -1,4 +1,5 @@
from playwright.sync_api import PlaywrightContextManager
import asyncio
# So playwright wants to run as a context manager, but we do something horrible and hacky
# we are holding the session open for as long as possible, then shutting it down, and opening a new one

View File

@@ -1,11 +1,14 @@
from concurrent.futures import ThreadPoolExecutor
from changedetectionio.store import ChangeDetectionStore
from functools import wraps
from flask import Blueprint
from flask_login import login_required
from changedetectionio.processors import text_json_diff
from changedetectionio.store import ChangeDetectionStore
STATUS_CHECKING = 0
STATUS_FAILED = 1
STATUS_OK = 2
@@ -29,19 +32,18 @@ def construct_blueprint(datastore: ChangeDetectionStore):
@threadpool
def long_task(uuid, preferred_proxy):
import time
from changedetectionio.content_fetchers import exceptions as content_fetcher_exceptions
from changedetectionio.processors.text_json_diff import text_json_diff
from changedetectionio.safe_jinja import render as jinja_render
from changedetectionio import content_fetcher
status = {'status': '', 'length': 0, 'text': ''}
from jinja2 import Environment, BaseLoader
contents = ''
now = time.time()
try:
update_handler = text_json_diff.perform_site_check(datastore=datastore, watch_uuid=uuid)
update_handler.call_browser()
update_handler = text_json_diff.perform_site_check(datastore=datastore)
changed_detected, update_obj, contents = update_handler.run(uuid, preferred_proxy=preferred_proxy, skip_when_checksum_same=False)
# title, size is len contents not len xfer
except content_fetcher_exceptions.Non200ErrorCodeReceived as e:
except content_fetcher.Non200ErrorCodeReceived as e:
if e.status_code == 404:
status.update({'status': 'OK', 'length': len(contents), 'text': f"OK but 404 (page not found)"})
elif e.status_code == 403 or e.status_code == 401:
@@ -50,23 +52,19 @@ def construct_blueprint(datastore: ChangeDetectionStore):
status.update({'status': 'ERROR', 'length': len(contents), 'text': f"Status code: {e.status_code}"})
except text_json_diff.FilterNotFoundInResponse:
status.update({'status': 'OK', 'length': len(contents), 'text': f"OK but CSS/xPath filter not found (page changed layout?)"})
except content_fetcher_exceptions.EmptyReply as e:
except content_fetcher.EmptyReply as e:
if e.status_code == 403 or e.status_code == 401:
status.update({'status': 'ERROR OTHER', 'length': len(contents), 'text': f"Got empty reply with code {e.status_code} - Access denied"})
else:
status.update({'status': 'ERROR OTHER', 'length': len(contents) if contents else 0, 'text': f"Empty reply with code {e.status_code}, needs chrome?"})
except content_fetcher_exceptions.ReplyWithContentButNoText as e:
txt = f"Got reply but with no content - Status code {e.status_code} - It's possible that the filters were found, but contained no usable text (or contained only an image)."
status.update({'status': 'ERROR', 'text': txt})
except Exception as e:
status.update({'status': 'ERROR OTHER', 'length': len(contents) if contents else 0, 'text': 'Error: '+type(e).__name__+str(e)})
status.update({'status': 'ERROR OTHER', 'length': len(contents) if contents else 0, 'text': 'Error: '+str(e)})
else:
status.update({'status': 'OK', 'length': len(contents), 'text': ''})
if status.get('text'):
# parse 'text' as text for safety
v = {'text': status['text']}
status['text'] = jinja_render(template_str='{{text|e}}', **v)
status['text'] = Environment(loader=BaseLoader()).from_string('{{text|e}}').render({'text': status['text']})
status['time'] = "{:.2f}s".format(time.time() - now)

View File

@@ -1,5 +1,5 @@
from changedetectionio.strtobool import strtobool
from distutils.util import strtobool
from flask import Blueprint, flash, redirect, url_for
from flask_login import login_required
from changedetectionio.store import ChangeDetectionStore
@@ -17,10 +17,9 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q: PriorityQueue
@price_data_follower_blueprint.route("/<string:uuid>/accept", methods=['GET'])
def accept(uuid):
datastore.data['watching'][uuid]['track_ldjson_price_data'] = PRICE_DATA_TRACK_ACCEPT
datastore.data['watching'][uuid]['processor'] = 'restock_diff'
datastore.data['watching'][uuid].clear_watch()
update_q.put(queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid, 'skip_when_checksum_same': False}))
return redirect(url_for("index"))
return redirect(url_for("form_watch_checknow", uuid=uuid))
@login_required
@price_data_follower_blueprint.route("/<string:uuid>/reject", methods=['GET'])

View File

@@ -1,8 +1,6 @@
from flask import Blueprint, request, render_template, flash, url_for, redirect
from flask import Blueprint, request, make_response, render_template, flash, url_for, redirect
from changedetectionio.store import ChangeDetectionStore
from changedetectionio.flask_app import login_optionally_required
from changedetectionio import login_optionally_required
def construct_blueprint(datastore: ChangeDetectionStore):
@@ -13,16 +11,9 @@ def construct_blueprint(datastore: ChangeDetectionStore):
def tags_overview_page():
from .form import SingleTag
add_form = SingleTag(request.form)
sorted_tags = sorted(datastore.data['settings']['application'].get('tags').items(), key=lambda x: x[1]['title'])
from collections import Counter
tag_count = Counter(tag for watch in datastore.data['watching'].values() if watch.get('tags') for tag in watch['tags'])
output = render_template("groups-overview.html",
available_tags=sorted_tags,
form=add_form,
tag_count=tag_count
available_tags=datastore.data['settings']['application'].get('tags', {}),
)
return output
@@ -98,55 +89,22 @@ def construct_blueprint(datastore: ChangeDetectionStore):
@tags_blueprint.route("/edit/<string:uuid>", methods=['GET'])
@login_optionally_required
def form_tag_edit(uuid):
from changedetectionio.blueprint.tags.form import group_restock_settings_form
from changedetectionio import forms
if uuid == 'first':
uuid = list(datastore.data['settings']['application']['tags'].keys()).pop()
default = datastore.data['settings']['application']['tags'].get(uuid)
form = group_restock_settings_form(formdata=request.form if request.method == 'POST' else None,
data=default,
extra_notification_tokens=datastore.get_unique_notification_tokens_available()
)
template_args = {
'data': default,
'form': form,
'watch': default,
'extra_notification_token_placeholder_info': datastore.get_unique_notification_token_placeholders_available(),
}
included_content = {}
if form.extra_form_content():
# So that the extra panels can access _helpers.html etc, we set the environment to load from templates/
# And then render the code from the module
from jinja2 import Environment, FileSystemLoader
import importlib.resources
templates_dir = str(importlib.resources.files("changedetectionio").joinpath('templates'))
env = Environment(loader=FileSystemLoader(templates_dir))
template_str = """{% from '_helpers.html' import render_field, render_checkbox_field, render_button %}
<script>
$(document).ready(function () {
toggleOpacity('#overrides_watch', '#restock-fieldset-price-group', true);
});
</script>
<fieldset>
<div class="pure-control-group">
<fieldset class="pure-group">
{{ render_checkbox_field(form.overrides_watch) }}
<span class="pure-form-message-inline">Used for watches in "Restock & Price detection" mode</span>
</fieldset>
</fieldset>
"""
template_str += form.extra_form_content()
template = env.from_string(template_str)
included_content = template.render(**template_args)
form = forms.watchForm(formdata=request.form if request.method == 'POST' else None,
data=default,
)
form.datastore=datastore # needed?
output = render_template("edit-tag.html",
data=default,
form=form,
settings_application=datastore.data['settings']['application'],
extra_tab_content=form.extra_tab_content() if form.extra_tab_content() else None,
extra_form_content=included_content,
**template_args
)
return output
@@ -155,15 +113,14 @@ def construct_blueprint(datastore: ChangeDetectionStore):
@tags_blueprint.route("/edit/<string:uuid>", methods=['POST'])
@login_optionally_required
def form_tag_edit_submit(uuid):
from changedetectionio.blueprint.tags.form import group_restock_settings_form
from changedetectionio import forms
if uuid == 'first':
uuid = list(datastore.data['settings']['application']['tags'].keys()).pop()
default = datastore.data['settings']['application']['tags'].get(uuid)
form = group_restock_settings_form(formdata=request.form if request.method == 'POST' else None,
form = forms.watchForm(formdata=request.form if request.method == 'POST' else None,
data=default,
extra_notification_tokens=datastore.get_unique_notification_tokens_available()
)
# @todo subclass form so validation works
#if not form.validate():
@@ -172,7 +129,6 @@ def construct_blueprint(datastore: ChangeDetectionStore):
# return redirect(url_for('tags.form_tag_edit_submit', uuid=uuid))
datastore.data['settings']['application']['tags'][uuid].update(form.data)
datastore.data['settings']['application']['tags'][uuid]['processor'] = 'restock_diff'
datastore.needs_write_urgent = True
flash("Updated")

View File

@@ -1,15 +1,16 @@
from wtforms import (
BooleanField,
Form,
IntegerField,
RadioField,
SelectField,
StringField,
SubmitField,
TextAreaField,
validators,
)
from wtforms.fields.simple import BooleanField
from changedetectionio.processors.restock_diff.forms import processor_settings_form as restock_settings_form
class group_restock_settings_form(restock_settings_form):
overrides_watch = BooleanField('Activate for individual watches in this tag/group?', default=False)
class SingleTag(Form):

View File

@@ -1,11 +1,7 @@
{% extends 'base.html' %}
{% block content %}
{% from '_helpers.html' import render_field, render_checkbox_field, render_button %}
{% from '_common_fields.html' import render_common_settings_form %}
<script>
const notification_base_url="{{url_for('ajax_callback_send_notification_test', mode="group-settings")}}";
</script>
{% from '_helpers.jinja' import render_field, render_checkbox_field, render_button %}
{% from '_common_fields.jinja' import render_common_settings_form %}
<script src="{{url_for('static_content', group='js', filename='tabs.js')}}" defer></script>
<script>
@@ -26,9 +22,6 @@
<ul>
<li class="tab" id=""><a href="#general">General</a></li>
<li class="tab"><a href="#filters-and-triggers">Filters &amp; Triggers</a></li>
{% if extra_tab_content %}
<li class="tab"><a href="#extras_tab">{{ extra_tab_content }}</a></li>
{% endif %}
<li class="tab"><a href="#notifications">Notifications</a></li>
</ul>
</div>
@@ -66,18 +59,17 @@ xpath://body/div/span[contains(@class, 'example-class')]",
<ul>
<li>JSONPath: Prefix with <code>json:</code>, use <code>json:$</code> to force re-formatting if required, <a href="https://jsonpath.com/" target="new">test your JSONPath here</a>.</li>
{% if jq_support %}
<li>jq: Prefix with <code>jq:</code> and <a href="https://jqplay.org/" target="new">test your jq here</a>. Using <a href="https://stedolan.github.io/jq/" target="new">jq</a> allows for complex filtering and processing of JSON data with built-in functions, regex, filtering, and more. See examples and documentation <a href="https://stedolan.github.io/jq/manual/" target="new">here</a>. Prefix <code>jqraw:</code> outputs the results as text instead of a JSON list.</li>
<li>jq: Prefix with <code>jq:</code> and <a href="https://jqplay.org/" target="new">test your jq here</a>. Using <a href="https://stedolan.github.io/jq/" target="new">jq</a> allows for complex filtering and processing of JSON data with built-in functions, regex, filtering, and more. See examples and documentation <a href="https://stedolan.github.io/jq/manual/" target="new">here</a>.</li>
{% else %}
<li>jq support not installed</li>
{% endif %}
</ul>
</li>
<li>XPath - Limit text to this XPath rule, simply start with a forward-slash. To specify XPath to be used explicitly or the XPath rule starts with an XPath function: Prefix with <code>xpath:</code>
<li>XPath - Limit text to this XPath rule, simply start with a forward-slash,
<ul>
<li>Example: <code>//*[contains(@class, 'sametext')]</code> or <code>xpath:count(//*[contains(@class, 'sametext')])</code>, <a
<li>Example: <code>//*[contains(@class, 'sametext')]</code> or <code>xpath://*[contains(@class, 'sametext')]</code>, <a
href="http://xpather.com/" target="new">test your XPath here</a></li>
<li>Example: Get all titles from an RSS feed <code>//title/text()</code></li>
<li>To use XPath1.0: Prefix with <code>xpath1:</code></li>
</ul>
</li>
</ul>
@@ -100,12 +92,6 @@ nav
</div>
{# rendered sub Template #}
{% if extra_form_content %}
<div class="tab-pane-inner" id="extras_tab">
{{ extra_form_content|safe }}
</div>
{% endif %}
<div class="tab-pane-inner" id="notifications">
<fieldset>
<div class="pure-control-group inline-radio">
@@ -128,7 +114,7 @@ nav
{% endif %}
<a href="#notifications" id="notification-setting-reset-to-default" class="pure-button button-xsmall" style="right: 20px; top: 20px; position: absolute; background-color: #5f42dd; border-radius: 4px; font-size: 70%; color: #fff">Use system defaults</a>
{{ render_common_settings_form(form, emailprefix, settings_application, extra_notification_token_placeholder_info) }}
{{ render_common_settings_form(form, emailprefix, settings_application) }}
</div>
</fieldset>
</div>

View File

@@ -1,6 +1,6 @@
{% extends 'base.html' %}
{% block content %}
{% from '_helpers.html' import render_simple_field, render_field %}
{% from '_helpers.jinja' import render_simple_field, render_field %}
<script src="{{url_for('static_content', group='js', filename='jquery-3.6.0.min.js')}}"></script>
<div class="box">
@@ -27,7 +27,6 @@
<thead>
<tr>
<th></th>
<th># Watches</th>
<th>Tag / Label name</th>
<th></th>
</tr>
@@ -41,13 +40,12 @@
<td colspan="3">No website organisational tags/groups configured</td>
</tr>
{% endif %}
{% for uuid, tag in available_tags %}
{% for uuid, tag in available_tags.items() %}
<tr id="{{ uuid }}" class="{{ loop.cycle('pure-table-odd', 'pure-table-even') }}">
<td class="watch-controls">
<a class="link-mute state-{{'on' if tag.notification_muted else 'off'}}" href="{{url_for('tags.mute', uuid=tag.uuid)}}"><img src="{{url_for('static_content', group='images', filename='bell-off.svg')}}" alt="Mute notifications" title="Mute notifications" class="icon icon-mute" ></a>
</td>
<td>{{ "{:,}".format(tag_count[uuid]) if uuid in tag_count else 0 }}</td>
<td class="title-col inline"> <a href="{{url_for('index', tag=uuid) }}">{{ tag.title }}</a></td>
<td class="title-col inline">{{tag.title}}</td>
<td>
<a class="pure-button pure-button-primary" href="{{ url_for('tags.form_tag_edit', uuid=uuid) }}">Edit</a>&nbsp;
<a class="pure-button pure-button-primary" href="{{ url_for('tags.delete', uuid=uuid) }}" title="Deletes and removes tag">Delete</a>

View File

@@ -0,0 +1,153 @@
#!/usr/bin/python3
# Launch as a eventlet.wsgi server instance.
from distutils.util import strtobool
from json.decoder import JSONDecodeError
import eventlet
import eventlet.wsgi
import getopt
import os
import signal
import socket
import sys
from . import store, changedetection_app, content_fetcher
from . import __version__
# Only global so we can access it in the signal handler
app = None
datastore = None
def sigterm_handler(_signo, _stack_frame):
global app
global datastore
# app.config.exit.set()
print('Shutdown: Got SIGTERM, DB saved to disk')
datastore.sync_to_json()
# raise SystemExit
def main():
global datastore
global app
datastore_path = None
do_cleanup = False
host = ''
ipv6_enabled = False
port = os.environ.get('PORT') or 5000
ssl_mode = False
# On Windows, create and use a default path.
if os.name == 'nt':
datastore_path = os.path.expandvars(r'%APPDATA%\changedetection.io')
os.makedirs(datastore_path, exist_ok=True)
else:
# Must be absolute so that send_from_directory doesnt try to make it relative to backend/
datastore_path = os.path.join(os.getcwd(), "../datastore")
try:
opts, args = getopt.getopt(sys.argv[1:], "6Ccsd:h:p:", "port")
except getopt.GetoptError:
print('backend.py -s SSL enable -h [host] -p [port] -d [datastore path]')
sys.exit(2)
create_datastore_dir = False
for opt, arg in opts:
if opt == '-s':
ssl_mode = True
if opt == '-h':
host = arg
if opt == '-p':
port = int(arg)
if opt == '-d':
datastore_path = arg
if opt == '-6':
print ("Enabling IPv6 listen support")
ipv6_enabled = True
# Cleanup (remove text files that arent in the index)
if opt == '-c':
do_cleanup = True
# Create the datadir if it doesnt exist
if opt == '-C':
create_datastore_dir = True
# isnt there some @thingy to attach to each route to tell it, that this route needs a datastore
app_config = {'datastore_path': datastore_path}
if not os.path.isdir(app_config['datastore_path']):
if create_datastore_dir:
os.mkdir(app_config['datastore_path'])
else:
print(
"ERROR: Directory path for the datastore '{}' does not exist, cannot start, please make sure the directory exists or specify a directory with the -d option.\n"
"Or use the -C parameter to create the directory.".format(app_config['datastore_path']), file=sys.stderr)
sys.exit(2)
try:
datastore = store.ChangeDetectionStore(datastore_path=app_config['datastore_path'], version_tag=__version__)
except JSONDecodeError as e:
# Dont' start if the JSON DB looks corrupt
print ("ERROR: JSON DB or Proxy List JSON at '{}' appears to be corrupt, aborting".format(app_config['datastore_path']))
print(str(e))
return
app = changedetection_app(app_config, datastore)
signal.signal(signal.SIGTERM, sigterm_handler)
# Go into cleanup mode
if do_cleanup:
datastore.remove_unused_snapshots()
app.config['datastore_path'] = datastore_path
@app.context_processor
def inject_version():
return dict(right_sticky="v{}".format(datastore.data['version_tag']),
new_version_available=app.config['NEW_VERSION_AVAILABLE'],
has_password=datastore.data['settings']['application']['password'] != False
)
# Monitored websites will not receive a Referer header when a user clicks on an outgoing link.
# @Note: Incompatible with password login (and maybe other features) for now, submit a PR!
@app.after_request
def hide_referrer(response):
if strtobool(os.getenv("HIDE_REFERER", 'false')):
response.headers["Referrer-Policy"] = "no-referrer"
return response
# Proxy sub-directory support
# Set environment var USE_X_SETTINGS=1 on this script
# And then in your proxy_pass settings
#
# proxy_set_header Host "localhost";
# proxy_set_header X-Forwarded-Prefix /app;
if os.getenv('USE_X_SETTINGS'):
print ("USE_X_SETTINGS is ENABLED\n")
from werkzeug.middleware.proxy_fix import ProxyFix
app.wsgi_app = ProxyFix(app.wsgi_app, x_prefix=1, x_host=1)
s_type = socket.AF_INET6 if ipv6_enabled else socket.AF_INET
if ssl_mode:
# @todo finalise SSL config, but this should get you in the right direction if you need it.
eventlet.wsgi.server(eventlet.wrap_ssl(eventlet.listen((host, port), s_type),
certfile='cert.pem',
keyfile='privkey.pem',
server_side=True), app)
else:
eventlet.wsgi.server(eventlet.listen((host, int(port)), s_type), app)

View File

@@ -0,0 +1,747 @@
import hashlib
from abc import abstractmethod
import chardet
import json
import logging
import os
import requests
import sys
import time
visualselector_xpath_selectors = 'div,span,form,table,tbody,tr,td,a,p,ul,li,h1,h2,h3,h4, header, footer, section, article, aside, details, main, nav, section, summary'
class Non200ErrorCodeReceived(Exception):
def __init__(self, status_code, url, screenshot=None, xpath_data=None, page_html=None):
# Set this so we can use it in other parts of the app
self.status_code = status_code
self.url = url
self.screenshot = screenshot
self.xpath_data = xpath_data
self.page_text = None
if page_html:
from changedetectionio import html_tools
self.page_text = html_tools.html_to_text(page_html)
return
class checksumFromPreviousCheckWasTheSame(Exception):
def __init__(self):
return
class JSActionExceptions(Exception):
def __init__(self, status_code, url, screenshot, message=''):
self.status_code = status_code
self.url = url
self.screenshot = screenshot
self.message = message
return
class BrowserStepsStepTimout(Exception):
def __init__(self, step_n):
self.step_n = step_n
return
class PageUnloadable(Exception):
def __init__(self, status_code, url, message, screenshot=False):
# Set this so we can use it in other parts of the app
self.status_code = status_code
self.url = url
self.screenshot = screenshot
self.message = message
return
class EmptyReply(Exception):
def __init__(self, status_code, url, screenshot=None):
# Set this so we can use it in other parts of the app
self.status_code = status_code
self.url = url
self.screenshot = screenshot
return
class ScreenshotUnavailable(Exception):
def __init__(self, status_code, url, page_html=None):
# Set this so we can use it in other parts of the app
self.status_code = status_code
self.url = url
if page_html:
from html_tools import html_to_text
self.page_text = html_to_text(page_html)
return
class ReplyWithContentButNoText(Exception):
def __init__(self, status_code, url, screenshot=None):
# Set this so we can use it in other parts of the app
self.status_code = status_code
self.url = url
self.screenshot = screenshot
return
class Fetcher():
browser_steps = None
browser_steps_screenshot_path = None
content = None
error = None
fetcher_description = "No description"
headers = {}
status_code = None
webdriver_js_execute_code = None
xpath_data = None
xpath_element_js = ""
instock_data = None
instock_data_js = ""
# Will be needed in the future by the VisualSelector, always get this where possible.
screenshot = False
system_http_proxy = os.getenv('HTTP_PROXY')
system_https_proxy = os.getenv('HTTPS_PROXY')
# Time ONTOP of the system defined env minimum time
render_extract_delay = 0
def __init__(self):
from pkg_resources import resource_string
# The code that scrapes elements and makes a list of elements/size/position to click on in the VisualSelector
self.xpath_element_js = resource_string(__name__, "res/xpath_element_scraper.js").decode('utf-8')
self.instock_data_js = resource_string(__name__, "res/stock-not-in-stock.js").decode('utf-8')
@abstractmethod
def get_error(self):
return self.error
@abstractmethod
def run(self,
url,
timeout,
request_headers,
request_body,
request_method,
ignore_status_codes=False,
current_include_filters=None,
is_binary=False):
# Should set self.error, self.status_code and self.content
pass
@abstractmethod
def quit(self):
return
@abstractmethod
def get_last_status_code(self):
return self.status_code
@abstractmethod
def screenshot_step(self, step_n):
return None
@abstractmethod
# Return true/false if this checker is ready to run, in the case it needs todo some special config check etc
def is_ready(self):
return True
def get_all_headers(self):
"""
Get all headers but ensure all keys are lowercase
:return:
"""
return {k.lower(): v for k, v in self.headers.items()}
def iterate_browser_steps(self):
from changedetectionio.blueprint.browser_steps.browser_steps import steppable_browser_interface
from playwright._impl._api_types import TimeoutError
from jinja2 import Environment
jinja2_env = Environment(extensions=['jinja2_time.TimeExtension'])
step_n = 0
if self.browser_steps is not None and len(self.browser_steps):
interface = steppable_browser_interface()
interface.page = self.page
valid_steps = filter(
lambda s: (s['operation'] and len(s['operation']) and s['operation'] != 'Choose one' and s['operation'] != 'Goto site'),
self.browser_steps)
for step in valid_steps:
step_n += 1
print(">> Iterating check - browser Step n {} - {}...".format(step_n, step['operation']))
self.screenshot_step("before-" + str(step_n))
self.save_step_html("before-" + str(step_n))
try:
optional_value = step['optional_value']
selector = step['selector']
# Support for jinja2 template in step values, with date module added
if '{%' in step['optional_value'] or '{{' in step['optional_value']:
optional_value = str(jinja2_env.from_string(step['optional_value']).render())
if '{%' in step['selector'] or '{{' in step['selector']:
selector = str(jinja2_env.from_string(step['selector']).render())
getattr(interface, "call_action")(action_name=step['operation'],
selector=selector,
optional_value=optional_value)
self.screenshot_step(step_n)
self.save_step_html(step_n)
except TimeoutError as e:
print(str(e))
# Stop processing here
raise BrowserStepsStepTimout(step_n=step_n)
# It's always good to reset these
def delete_browser_steps_screenshots(self):
import glob
if self.browser_steps_screenshot_path is not None:
dest = os.path.join(self.browser_steps_screenshot_path, 'step_*.jpeg')
files = glob.glob(dest)
for f in files:
if os.path.isfile(f):
os.unlink(f)
# Maybe for the future, each fetcher provides its own diff output, could be used for text, image
# the current one would return javascript output (as we use JS to generate the diff)
#
def available_fetchers():
# See the if statement at the bottom of this file for how we switch between playwright and webdriver
import inspect
p = []
for name, obj in inspect.getmembers(sys.modules[__name__], inspect.isclass):
if inspect.isclass(obj):
# @todo html_ is maybe better as fetcher_ or something
# In this case, make sure to edit the default one in store.py and fetch_site_status.py
if name.startswith('html_'):
t = tuple([name, obj.fetcher_description])
p.append(t)
return p
class base_html_playwright(Fetcher):
fetcher_description = "Playwright {}/Javascript".format(
os.getenv("PLAYWRIGHT_BROWSER_TYPE", 'chromium').capitalize()
)
if os.getenv("PLAYWRIGHT_DRIVER_URL"):
fetcher_description += " via '{}'".format(os.getenv("PLAYWRIGHT_DRIVER_URL"))
browser_type = ''
command_executor = ''
# Configs for Proxy setup
# In the ENV vars, is prefixed with "playwright_proxy_", so it is for example "playwright_proxy_server"
playwright_proxy_settings_mappings = ['bypass', 'server', 'username', 'password']
proxy = None
def __init__(self, proxy_override=None):
super().__init__()
# .strip('"') is going to save someone a lot of time when they accidently wrap the env value
self.browser_type = os.getenv("PLAYWRIGHT_BROWSER_TYPE", 'chromium').strip('"')
self.command_executor = os.getenv(
"PLAYWRIGHT_DRIVER_URL",
'ws://playwright-chrome:3000'
).strip('"')
# If any proxy settings are enabled, then we should setup the proxy object
proxy_args = {}
for k in self.playwright_proxy_settings_mappings:
v = os.getenv('playwright_proxy_' + k, False)
if v:
proxy_args[k] = v.strip('"')
if proxy_args:
self.proxy = proxy_args
# allow per-watch proxy selection override
if proxy_override:
self.proxy = {'server': proxy_override}
if self.proxy:
# Playwright needs separate username and password values
from urllib.parse import urlparse
parsed = urlparse(self.proxy.get('server'))
if parsed.username:
self.proxy['username'] = parsed.username
self.proxy['password'] = parsed.password
def screenshot_step(self, step_n=''):
screenshot = self.page.screenshot(type='jpeg', full_page=True, quality=85)
if self.browser_steps_screenshot_path is not None:
destination = os.path.join(self.browser_steps_screenshot_path, 'step_{}.jpeg'.format(step_n))
logging.debug("Saving step screenshot to {}".format(destination))
with open(destination, 'wb') as f:
f.write(screenshot)
def save_step_html(self, step_n):
content = self.page.content()
destination = os.path.join(self.browser_steps_screenshot_path, 'step_{}.html'.format(step_n))
logging.debug("Saving step HTML to {}".format(destination))
with open(destination, 'w') as f:
f.write(content)
def run_fetch_browserless_puppeteer(self,
url,
timeout,
request_headers,
request_body,
request_method,
ignore_status_codes=False,
current_include_filters=None,
is_binary=False):
from pkg_resources import resource_string
extra_wait_ms = (int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)) + self.render_extract_delay) * 1000
self.xpath_element_js = self.xpath_element_js.replace('%ELEMENTS%', visualselector_xpath_selectors)
code = resource_string(__name__, "res/puppeteer_fetch.js").decode('utf-8')
# In the future inject this is a proper JS package
code = code.replace('%xpath_scrape_code%', self.xpath_element_js)
code = code.replace('%instock_scrape_code%', self.instock_data_js)
from requests.exceptions import ConnectTimeout, ReadTimeout
wait_browserless_seconds = 240
browserless_function_url = os.getenv('BROWSERLESS_FUNCTION_URL')
from urllib.parse import urlparse
if not browserless_function_url:
# Convert/try to guess from PLAYWRIGHT_DRIVER_URL
o = urlparse(os.getenv('PLAYWRIGHT_DRIVER_URL'))
browserless_function_url = o._replace(scheme="http")._replace(path="function").geturl()
# Append proxy connect string
if self.proxy:
import urllib.parse
# Remove username/password if it exists in the URL or you will receive "ERR_NO_SUPPORTED_PROXIES" error
# Actual authentication handled by Puppeteer/node
o = urlparse(self.proxy.get('server'))
proxy_url = urllib.parse.quote(o._replace(netloc="{}:{}".format(o.hostname, o.port)).geturl())
browserless_function_url = f"{browserless_function_url}&--proxy-server={proxy_url}&dumpio=true"
try:
amp = '&' if '?' in browserless_function_url else '?'
response = requests.request(
method="POST",
json={
"code": code,
"context": {
# Very primitive disk cache - USE WITH EXTREME CAUTION
# Run browserless container with -e "FUNCTION_BUILT_INS=[\"fs\",\"crypto\"]"
'disk_cache_dir': os.getenv("PUPPETEER_DISK_CACHE", False), # or path to disk cache ending in /, ie /tmp/cache/
'execute_js': self.webdriver_js_execute_code,
'extra_wait_ms': extra_wait_ms,
'include_filters': current_include_filters,
'req_headers': request_headers,
'screenshot_quality': int(os.getenv("PLAYWRIGHT_SCREENSHOT_QUALITY", 72)),
'url': url,
'user_agent': request_headers.get('User-Agent', 'Mozilla/5.0'),
'proxy_username': self.proxy.get('username','') if self.proxy else False,
'proxy_password': self.proxy.get('password', '') if self.proxy else False,
'no_cache_list': [
'twitter',
'.pdf'
],
# Could use https://github.com/easylist/easylist here, or install a plugin
'block_url_list': [
'adnxs.com',
'analytics.twitter.com',
'doubleclick.net',
'google-analytics.com',
'googletagmanager',
'trustpilot.com'
]
}
},
# @todo /function needs adding ws:// to http:// rebuild this
url=browserless_function_url+f"{amp}--disable-features=AudioServiceOutOfProcess&dumpio=true&--disable-remote-fonts",
timeout=wait_browserless_seconds)
except ReadTimeout:
raise PageUnloadable(url=url, status_code=None, message=f"No response from browserless in {wait_browserless_seconds}s")
except ConnectTimeout:
raise PageUnloadable(url=url, status_code=None, message=f"Timed out connecting to browserless, retrying..")
else:
# 200 Here means that the communication to browserless worked only, not the page state
if response.status_code == 200:
import base64
x = response.json()
if not x.get('screenshot'):
# https://github.com/puppeteer/puppeteer/blob/v1.0.0/docs/troubleshooting.md#tips
# https://github.com/puppeteer/puppeteer/issues/1834
# https://github.com/puppeteer/puppeteer/issues/1834#issuecomment-381047051
# Check your memory is shared and big enough
raise ScreenshotUnavailable(url=url, status_code=None)
if not x.get('content', '').strip():
raise EmptyReply(url=url, status_code=None)
if x.get('status_code', 200) != 200 and not ignore_status_codes:
raise Non200ErrorCodeReceived(url=url, status_code=x.get('status_code', 200), page_html=x['content'])
self.content = x.get('content')
self.headers = x.get('headers')
self.instock_data = x.get('instock_data')
self.screenshot = base64.b64decode(x.get('screenshot'))
self.status_code = x.get('status_code')
self.xpath_data = x.get('xpath_data')
else:
# Some other error from browserless
raise PageUnloadable(url=url, status_code=None, message=response.content.decode('utf-8'))
def run(self,
url,
timeout,
request_headers,
request_body,
request_method,
ignore_status_codes=False,
current_include_filters=None,
is_binary=False):
# For now, USE_EXPERIMENTAL_PUPPETEER_FETCH is not supported by watches with BrowserSteps (for now!)
has_browser_steps = self.browser_steps and list(filter(
lambda s: (s['operation'] and len(s['operation']) and s['operation'] != 'Choose one' and s['operation'] != 'Goto site'),
self.browser_steps))
if not has_browser_steps:
if os.getenv('USE_EXPERIMENTAL_PUPPETEER_FETCH'):
# Temporary backup solution until we rewrite the playwright code
return self.run_fetch_browserless_puppeteer(
url,
timeout,
request_headers,
request_body,
request_method,
ignore_status_codes,
current_include_filters,
is_binary)
from playwright.sync_api import sync_playwright
import playwright._impl._api_types
self.delete_browser_steps_screenshots()
response = None
with sync_playwright() as p:
browser_type = getattr(p, self.browser_type)
# Seemed to cause a connection Exception even tho I can see it connect
# self.browser = browser_type.connect(self.command_executor, timeout=timeout*1000)
# 60,000 connection timeout only
browser = browser_type.connect_over_cdp(self.command_executor, timeout=60000)
# Set user agent to prevent Cloudflare from blocking the browser
# Use the default one configured in the App.py model that's passed from fetch_site_status.py
context = browser.new_context(
user_agent=request_headers.get('User-Agent', 'Mozilla/5.0'),
proxy=self.proxy,
# This is needed to enable JavaScript execution on GitHub and others
bypass_csp=True,
# Should be `allow` or `block` - sites like YouTube can transmit large amounts of data via Service Workers
service_workers=os.getenv('PLAYWRIGHT_SERVICE_WORKERS', 'allow'),
# Should never be needed
accept_downloads=False
)
self.page = context.new_page()
if len(request_headers):
context.set_extra_http_headers(request_headers)
self.page.set_default_navigation_timeout(90000)
self.page.set_default_timeout(90000)
# Listen for all console events and handle errors
self.page.on("console", lambda msg: print(f"Playwright console: Watch URL: {url} {msg.type}: {msg.text} {msg.args}"))
# Goto page
try:
# Wait_until = commit
# - `'commit'` - consider operation to be finished when network response is received and the document started loading.
# Better to not use any smarts from Playwright and just wait an arbitrary number of seconds
# This seemed to solve nearly all 'TimeoutErrors'
response = self.page.goto(url, wait_until='commit')
except playwright._impl._api_types.Error as e:
# Retry once - https://github.com/browserless/chrome/issues/2485
# Sometimes errors related to invalid cert's and other can be random
print("Content Fetcher > retrying request got error - ", str(e))
time.sleep(1)
response = self.page.goto(url, wait_until='commit')
except Exception as e:
print("Content Fetcher > Other exception when page.goto", str(e))
context.close()
browser.close()
raise PageUnloadable(url=url, status_code=None, message=str(e))
# Execute any browser steps
try:
extra_wait = int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)) + self.render_extract_delay
self.page.wait_for_timeout(extra_wait * 1000)
if self.webdriver_js_execute_code is not None and len(self.webdriver_js_execute_code):
self.page.evaluate(self.webdriver_js_execute_code)
except playwright._impl._api_types.TimeoutError as e:
context.close()
browser.close()
# This can be ok, we will try to grab what we could retrieve
pass
except Exception as e:
print("Content Fetcher > Other exception when executing custom JS code", str(e))
context.close()
browser.close()
raise PageUnloadable(url=url, status_code=None, message=str(e))
if response is None:
context.close()
browser.close()
print("Content Fetcher > Response object was none")
raise EmptyReply(url=url, status_code=None)
# Run Browser Steps here
self.iterate_browser_steps()
extra_wait = int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)) + self.render_extract_delay
time.sleep(extra_wait)
self.content = self.page.content()
self.status_code = response.status
if len(self.page.content().strip()) == 0:
context.close()
browser.close()
print("Content Fetcher > Content was empty")
raise EmptyReply(url=url, status_code=response.status)
self.status_code = response.status
self.headers = response.all_headers()
# So we can find an element on the page where its selector was entered manually (maybe not xPath etc)
if current_include_filters is not None:
self.page.evaluate("var include_filters={}".format(json.dumps(current_include_filters)))
else:
self.page.evaluate("var include_filters=''")
self.xpath_data = self.page.evaluate(
"async () => {" + self.xpath_element_js.replace('%ELEMENTS%', visualselector_xpath_selectors) + "}")
self.instock_data = self.page.evaluate("async () => {" + self.instock_data_js + "}")
# Bug 3 in Playwright screenshot handling
# Some bug where it gives the wrong screenshot size, but making a request with the clip set first seems to solve it
# JPEG is better here because the screenshots can be very very large
# Screenshots also travel via the ws:// (websocket) meaning that the binary data is base64 encoded
# which will significantly increase the IO size between the server and client, it's recommended to use the lowest
# acceptable screenshot quality here
try:
# The actual screenshot
self.screenshot = self.page.screenshot(type='jpeg', full_page=True,
quality=int(os.getenv("PLAYWRIGHT_SCREENSHOT_QUALITY", 72)))
except Exception as e:
context.close()
browser.close()
raise ScreenshotUnavailable(url=url, status_code=None)
context.close()
browser.close()
class base_html_webdriver(Fetcher):
if os.getenv("WEBDRIVER_URL"):
fetcher_description = "WebDriver Chrome/Javascript via '{}'".format(os.getenv("WEBDRIVER_URL"))
else:
fetcher_description = "WebDriver Chrome/Javascript"
command_executor = ''
# Configs for Proxy setup
# In the ENV vars, is prefixed with "webdriver_", so it is for example "webdriver_sslProxy"
selenium_proxy_settings_mappings = ['proxyType', 'ftpProxy', 'httpProxy', 'noProxy',
'proxyAutoconfigUrl', 'sslProxy', 'autodetect',
'socksProxy', 'socksVersion', 'socksUsername', 'socksPassword']
proxy = None
def __init__(self, proxy_override=None):
super().__init__()
from selenium.webdriver.common.proxy import Proxy as SeleniumProxy
# .strip('"') is going to save someone a lot of time when they accidently wrap the env value
self.command_executor = os.getenv("WEBDRIVER_URL", 'http://browser-chrome:4444/wd/hub').strip('"')
# If any proxy settings are enabled, then we should setup the proxy object
proxy_args = {}
for k in self.selenium_proxy_settings_mappings:
v = os.getenv('webdriver_' + k, False)
if v:
proxy_args[k] = v.strip('"')
# Map back standard HTTP_ and HTTPS_PROXY to webDriver httpProxy/sslProxy
if not proxy_args.get('webdriver_httpProxy') and self.system_http_proxy:
proxy_args['httpProxy'] = self.system_http_proxy
if not proxy_args.get('webdriver_sslProxy') and self.system_https_proxy:
proxy_args['httpsProxy'] = self.system_https_proxy
# Allows override the proxy on a per-request basis
if proxy_override is not None:
proxy_args['httpProxy'] = proxy_override
if proxy_args:
self.proxy = SeleniumProxy(raw=proxy_args)
def run(self,
url,
timeout,
request_headers,
request_body,
request_method,
ignore_status_codes=False,
current_include_filters=None,
is_binary=False):
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.common.exceptions import WebDriverException
# request_body, request_method unused for now, until some magic in the future happens.
# check env for WEBDRIVER_URL
self.driver = webdriver.Remote(
command_executor=self.command_executor,
desired_capabilities=DesiredCapabilities.CHROME,
proxy=self.proxy)
try:
self.driver.get(url)
except WebDriverException as e:
# Be sure we close the session window
self.quit()
raise
self.driver.set_window_size(1280, 1024)
self.driver.implicitly_wait(int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)))
if self.webdriver_js_execute_code is not None:
self.driver.execute_script(self.webdriver_js_execute_code)
# Selenium doesn't automatically wait for actions as good as Playwright, so wait again
self.driver.implicitly_wait(int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)))
# @todo - how to check this? is it possible?
self.status_code = 200
# @todo somehow we should try to get this working for WebDriver
# raise EmptyReply(url=url, status_code=r.status_code)
# @todo - dom wait loaded?
time.sleep(int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)) + self.render_extract_delay)
self.content = self.driver.page_source
self.headers = {}
self.screenshot = self.driver.get_screenshot_as_png()
# Does the connection to the webdriver work? run a test connection.
def is_ready(self):
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
self.driver = webdriver.Remote(
command_executor=self.command_executor,
desired_capabilities=DesiredCapabilities.CHROME)
# driver.quit() seems to cause better exceptions
self.quit()
return True
def quit(self):
if self.driver:
try:
self.driver.quit()
except Exception as e:
print("Content Fetcher > Exception in chrome shutdown/quit" + str(e))
# "html_requests" is listed as the default fetcher in store.py!
class html_requests(Fetcher):
fetcher_description = "Basic fast Plaintext/HTTP Client"
def __init__(self, proxy_override=None):
self.proxy_override = proxy_override
def run(self,
url,
timeout,
request_headers,
request_body,
request_method,
ignore_status_codes=False,
current_include_filters=None,
is_binary=False):
# Make requests use a more modern looking user-agent
if not 'User-Agent' in request_headers:
request_headers['User-Agent'] = os.getenv("DEFAULT_SETTINGS_HEADERS_USERAGENT",
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36')
proxies = {}
# Allows override the proxy on a per-request basis
if self.proxy_override:
proxies = {'http': self.proxy_override, 'https': self.proxy_override, 'ftp': self.proxy_override}
else:
if self.system_http_proxy:
proxies['http'] = self.system_http_proxy
if self.system_https_proxy:
proxies['https'] = self.system_https_proxy
r = requests.request(method=request_method,
data=request_body,
url=url,
headers=request_headers,
timeout=timeout,
proxies=proxies,
verify=False)
# If the response did not tell us what encoding format to expect, Then use chardet to override what `requests` thinks.
# For example - some sites don't tell us it's utf-8, but return utf-8 content
# This seems to not occur when using webdriver/selenium, it seems to detect the text encoding more reliably.
# https://github.com/psf/requests/issues/1604 good info about requests encoding detection
if not is_binary:
# Don't run this for PDF (and requests identified as binary) takes a _long_ time
if not r.headers.get('content-type') or not 'charset=' in r.headers.get('content-type'):
encoding = chardet.detect(r.content)['encoding']
if encoding:
r.encoding = encoding
if not r.content or not len(r.content):
raise EmptyReply(url=url, status_code=r.status_code)
# @todo test this
# @todo maybe you really want to test zero-byte return pages?
if r.status_code != 200 and not ignore_status_codes:
# maybe check with content works?
raise Non200ErrorCodeReceived(url=url, status_code=r.status_code, page_html=r.text)
self.status_code = r.status_code
if is_binary:
# Binary files just return their checksum until we add something smarter
self.content = hashlib.md5(r.content).hexdigest()
else:
self.content = r.text
self.headers = r.headers
self.raw_content = r.content
# Decide which is the 'real' HTML webdriver, this is more a system wide config
# rather than site-specific.
use_playwright_as_chrome_fetcher = os.getenv('PLAYWRIGHT_DRIVER_URL', False)
if use_playwright_as_chrome_fetcher:
html_webdriver = base_html_playwright
else:
html_webdriver = base_html_webdriver

View File

@@ -1,43 +0,0 @@
import sys
from changedetectionio.strtobool import strtobool
from loguru import logger
from changedetectionio.content_fetchers.exceptions import BrowserStepsStepException
import os
visualselector_xpath_selectors = 'div,span,form,table,tbody,tr,td,a,p,ul,li,h1,h2,h3,h4,header,footer,section,article,aside,details,main,nav,section,summary'
# available_fetchers() will scan this implementation looking for anything starting with html_
# this information is used in the form selections
from changedetectionio.content_fetchers.requests import fetcher as html_requests
def available_fetchers():
# See the if statement at the bottom of this file for how we switch between playwright and webdriver
import inspect
p = []
for name, obj in inspect.getmembers(sys.modules[__name__], inspect.isclass):
if inspect.isclass(obj):
# @todo html_ is maybe better as fetcher_ or something
# In this case, make sure to edit the default one in store.py and fetch_site_status.py
if name.startswith('html_'):
t = tuple([name, obj.fetcher_description])
p.append(t)
return p
# Decide which is the 'real' HTML webdriver, this is more a system wide config
# rather than site-specific.
use_playwright_as_chrome_fetcher = os.getenv('PLAYWRIGHT_DRIVER_URL', False)
if use_playwright_as_chrome_fetcher:
# @note - For now, browser steps always uses playwright
if not strtobool(os.getenv('FAST_PUPPETEER_CHROME_FETCHER', 'False')):
logger.debug('Using Playwright library as fetcher')
from .playwright import fetcher as html_webdriver
else:
logger.debug('Using direct Python Puppeteer library as fetcher')
from .puppeteer import fetcher as html_webdriver
else:
logger.debug("Falling back to selenium as fetcher")
from .webdriver_selenium import fetcher as html_webdriver

View File

@@ -1,179 +0,0 @@
import os
from abc import abstractmethod
from loguru import logger
from changedetectionio.content_fetchers import BrowserStepsStepException
def manage_user_agent(headers, current_ua=''):
"""
Basic setting of user-agent
NOTE!!!!!! The service that does the actual Chrome fetching should handle any anti-robot techniques
THERE ARE MANY WAYS THAT IT CAN BE DETECTED AS A ROBOT!!
This does not take care of
- Scraping of 'navigator' (platform, productSub, vendor, oscpu etc etc) browser object (navigator.appVersion) etc
- TCP/IP fingerprint JA3 etc
- Graphic rendering fingerprinting
- Your IP being obviously in a pool of bad actors
- Too many requests
- Scraping of SCH-UA browser replies (thanks google!!)
- Scraping of ServiceWorker, new window calls etc
See https://filipvitas.medium.com/how-to-set-user-agent-header-with-puppeteer-js-and-not-fail-28c7a02165da
Puppeteer requests https://github.com/dgtlmoon/pyppeteerstealth
:param page:
:param headers:
:return:
"""
# Ask it what the user agent is, if its obviously ChromeHeadless, switch it to the default
ua_in_custom_headers = headers.get('User-Agent')
if ua_in_custom_headers:
return ua_in_custom_headers
if not ua_in_custom_headers and current_ua:
current_ua = current_ua.replace('HeadlessChrome', 'Chrome')
return current_ua
return None
class Fetcher():
browser_connection_is_custom = None
browser_connection_url = None
browser_steps = None
browser_steps_screenshot_path = None
content = None
error = None
fetcher_description = "No description"
headers = {}
instock_data = None
instock_data_js = ""
status_code = None
webdriver_js_execute_code = None
xpath_data = None
xpath_element_js = ""
# Will be needed in the future by the VisualSelector, always get this where possible.
screenshot = False
system_http_proxy = os.getenv('HTTP_PROXY')
system_https_proxy = os.getenv('HTTPS_PROXY')
# Time ONTOP of the system defined env minimum time
render_extract_delay = 0
def __init__(self):
import importlib.resources
self.xpath_element_js = importlib.resources.files("changedetectionio.content_fetchers.res").joinpath('xpath_element_scraper.js').read_text()
self.instock_data_js = importlib.resources.files("changedetectionio.content_fetchers.res").joinpath('stock-not-in-stock.js').read_text()
@abstractmethod
def get_error(self):
return self.error
@abstractmethod
def run(self,
url,
timeout,
request_headers,
request_body,
request_method,
ignore_status_codes=False,
current_include_filters=None,
is_binary=False,
empty_pages_are_a_change=False):
# Should set self.error, self.status_code and self.content
pass
@abstractmethod
def quit(self):
return
@abstractmethod
def get_last_status_code(self):
return self.status_code
@abstractmethod
def screenshot_step(self, step_n):
if self.browser_steps_screenshot_path and not os.path.isdir(self.browser_steps_screenshot_path):
logger.debug(f"> Creating data dir {self.browser_steps_screenshot_path}")
os.mkdir(self.browser_steps_screenshot_path)
return None
@abstractmethod
# Return true/false if this checker is ready to run, in the case it needs todo some special config check etc
def is_ready(self):
return True
def get_all_headers(self):
"""
Get all headers but ensure all keys are lowercase
:return:
"""
return {k.lower(): v for k, v in self.headers.items()}
def browser_steps_get_valid_steps(self):
if self.browser_steps is not None and len(self.browser_steps):
valid_steps = list(filter(
lambda s: (s['operation'] and len(s['operation']) and s['operation'] != 'Choose one'),
self.browser_steps))
# Just incase they selected Goto site by accident with older JS
if valid_steps and valid_steps[0]['operation'] == 'Goto site':
del(valid_steps[0])
return valid_steps
return None
def iterate_browser_steps(self, start_url=None):
from changedetectionio.blueprint.browser_steps.browser_steps import steppable_browser_interface
from playwright._impl._errors import TimeoutError, Error
from changedetectionio.safe_jinja import render as jinja_render
step_n = 0
if self.browser_steps is not None and len(self.browser_steps):
interface = steppable_browser_interface(start_url=start_url)
interface.page = self.page
valid_steps = self.browser_steps_get_valid_steps()
for step in valid_steps:
step_n += 1
logger.debug(f">> Iterating check - browser Step n {step_n} - {step['operation']}...")
self.screenshot_step("before-" + str(step_n))
self.save_step_html("before-" + str(step_n))
try:
optional_value = step['optional_value']
selector = step['selector']
# Support for jinja2 template in step values, with date module added
if '{%' in step['optional_value'] or '{{' in step['optional_value']:
optional_value = jinja_render(template_str=step['optional_value'])
if '{%' in step['selector'] or '{{' in step['selector']:
selector = jinja_render(template_str=step['selector'])
getattr(interface, "call_action")(action_name=step['operation'],
selector=selector,
optional_value=optional_value)
self.screenshot_step(step_n)
self.save_step_html(step_n)
except (Error, TimeoutError) as e:
logger.debug(str(e))
# Stop processing here
raise BrowserStepsStepException(step_n=step_n, original_e=e)
# It's always good to reset these
def delete_browser_steps_screenshots(self):
import glob
if self.browser_steps_screenshot_path is not None:
dest = os.path.join(self.browser_steps_screenshot_path, 'step_*.jpeg')
files = glob.glob(dest)
for f in files:
if os.path.isfile(f):
os.unlink(f)
def save_step_html(self, step_n):
if self.browser_steps_screenshot_path and not os.path.isdir(self.browser_steps_screenshot_path):
logger.debug(f"> Creating data dir {self.browser_steps_screenshot_path}")
os.mkdir(self.browser_steps_screenshot_path)
pass

View File

@@ -1,97 +0,0 @@
from loguru import logger
class Non200ErrorCodeReceived(Exception):
def __init__(self, status_code, url, screenshot=None, xpath_data=None, page_html=None):
# Set this so we can use it in other parts of the app
self.status_code = status_code
self.url = url
self.screenshot = screenshot
self.xpath_data = xpath_data
self.page_text = None
if page_html:
from changedetectionio import html_tools
self.page_text = html_tools.html_to_text(page_html)
return
class checksumFromPreviousCheckWasTheSame(Exception):
def __init__(self):
return
class JSActionExceptions(Exception):
def __init__(self, status_code, url, screenshot, message=''):
self.status_code = status_code
self.url = url
self.screenshot = screenshot
self.message = message
return
class BrowserConnectError(Exception):
msg = ''
def __init__(self, msg):
self.msg = msg
logger.error(f"Browser connection error {msg}")
return
class BrowserFetchTimedOut(Exception):
msg = ''
def __init__(self, msg):
self.msg = msg
logger.error(f"Browser processing took too long - {msg}")
return
class BrowserStepsStepException(Exception):
def __init__(self, step_n, original_e):
self.step_n = step_n
self.original_e = original_e
logger.debug(f"Browser Steps exception at step {self.step_n} {str(original_e)}")
return
# @todo - make base Exception class that announces via logger()
class PageUnloadable(Exception):
def __init__(self, status_code=None, url='', message='', screenshot=False):
# Set this so we can use it in other parts of the app
self.status_code = status_code
self.url = url
self.screenshot = screenshot
self.message = message
return
class BrowserStepsInUnsupportedFetcher(Exception):
def __init__(self, url):
self.url = url
return
class EmptyReply(Exception):
def __init__(self, status_code, url, screenshot=None):
# Set this so we can use it in other parts of the app
self.status_code = status_code
self.url = url
self.screenshot = screenshot
return
class ScreenshotUnavailable(Exception):
def __init__(self, status_code, url, page_html=None):
# Set this so we can use it in other parts of the app
self.status_code = status_code
self.url = url
if page_html:
from changedetectionio.html_tools import html_to_text
self.page_text = html_to_text(page_html)
return
class ReplyWithContentButNoText(Exception):
def __init__(self, status_code, url, screenshot=None, has_filters=False, html_content='', xpath_data=None):
# Set this so we can use it in other parts of the app
self.status_code = status_code
self.url = url
self.screenshot = screenshot
self.has_filters = has_filters
self.html_content = html_content
self.xpath_data = xpath_data
return

View File

@@ -1,211 +0,0 @@
import json
import os
from urllib.parse import urlparse
from loguru import logger
from changedetectionio.content_fetchers.base import Fetcher, manage_user_agent
from changedetectionio.content_fetchers.exceptions import PageUnloadable, Non200ErrorCodeReceived, EmptyReply, ScreenshotUnavailable
class fetcher(Fetcher):
fetcher_description = "Playwright {}/Javascript".format(
os.getenv("PLAYWRIGHT_BROWSER_TYPE", 'chromium').capitalize()
)
if os.getenv("PLAYWRIGHT_DRIVER_URL"):
fetcher_description += " via '{}'".format(os.getenv("PLAYWRIGHT_DRIVER_URL"))
browser_type = ''
command_executor = ''
# Configs for Proxy setup
# In the ENV vars, is prefixed with "playwright_proxy_", so it is for example "playwright_proxy_server"
playwright_proxy_settings_mappings = ['bypass', 'server', 'username', 'password']
proxy = None
def __init__(self, proxy_override=None, custom_browser_connection_url=None):
super().__init__()
self.browser_type = os.getenv("PLAYWRIGHT_BROWSER_TYPE", 'chromium').strip('"')
if custom_browser_connection_url:
self.browser_connection_is_custom = True
self.browser_connection_url = custom_browser_connection_url
else:
# Fallback to fetching from system
# .strip('"') is going to save someone a lot of time when they accidently wrap the env value
self.browser_connection_url = os.getenv("PLAYWRIGHT_DRIVER_URL", 'ws://playwright-chrome:3000').strip('"')
# If any proxy settings are enabled, then we should setup the proxy object
proxy_args = {}
for k in self.playwright_proxy_settings_mappings:
v = os.getenv('playwright_proxy_' + k, False)
if v:
proxy_args[k] = v.strip('"')
if proxy_args:
self.proxy = proxy_args
# allow per-watch proxy selection override
if proxy_override:
self.proxy = {'server': proxy_override}
if self.proxy:
# Playwright needs separate username and password values
parsed = urlparse(self.proxy.get('server'))
if parsed.username:
self.proxy['username'] = parsed.username
self.proxy['password'] = parsed.password
def screenshot_step(self, step_n=''):
super().screenshot_step(step_n=step_n)
screenshot = self.page.screenshot(type='jpeg', full_page=True, quality=int(os.getenv("SCREENSHOT_QUALITY", 72)))
if self.browser_steps_screenshot_path is not None:
destination = os.path.join(self.browser_steps_screenshot_path, 'step_{}.jpeg'.format(step_n))
logger.debug(f"Saving step screenshot to {destination}")
with open(destination, 'wb') as f:
f.write(screenshot)
def save_step_html(self, step_n):
super().save_step_html(step_n=step_n)
content = self.page.content()
destination = os.path.join(self.browser_steps_screenshot_path, 'step_{}.html'.format(step_n))
logger.debug(f"Saving step HTML to {destination}")
with open(destination, 'w') as f:
f.write(content)
def run(self,
url,
timeout,
request_headers,
request_body,
request_method,
ignore_status_codes=False,
current_include_filters=None,
is_binary=False,
empty_pages_are_a_change=False):
from playwright.sync_api import sync_playwright
import playwright._impl._errors
from changedetectionio.content_fetchers import visualselector_xpath_selectors
self.delete_browser_steps_screenshots()
response = None
with sync_playwright() as p:
browser_type = getattr(p, self.browser_type)
# Seemed to cause a connection Exception even tho I can see it connect
# self.browser = browser_type.connect(self.command_executor, timeout=timeout*1000)
# 60,000 connection timeout only
browser = browser_type.connect_over_cdp(self.browser_connection_url, timeout=60000)
# SOCKS5 with authentication is not supported (yet)
# https://github.com/microsoft/playwright/issues/10567
# Set user agent to prevent Cloudflare from blocking the browser
# Use the default one configured in the App.py model that's passed from fetch_site_status.py
context = browser.new_context(
accept_downloads=False, # Should never be needed
bypass_csp=True, # This is needed to enable JavaScript execution on GitHub and others
extra_http_headers=request_headers,
ignore_https_errors=True,
proxy=self.proxy,
service_workers=os.getenv('PLAYWRIGHT_SERVICE_WORKERS', 'allow'), # Should be `allow` or `block` - sites like YouTube can transmit large amounts of data via Service Workers
user_agent=manage_user_agent(headers=request_headers),
)
self.page = context.new_page()
# Listen for all console events and handle errors
self.page.on("console", lambda msg: print(f"Playwright console: Watch URL: {url} {msg.type}: {msg.text} {msg.args}"))
# Re-use as much code from browser steps as possible so its the same
from changedetectionio.blueprint.browser_steps.browser_steps import steppable_browser_interface
browsersteps_interface = steppable_browser_interface(start_url=url)
browsersteps_interface.page = self.page
response = browsersteps_interface.action_goto_url(value=url)
self.headers = response.all_headers()
if response is None:
context.close()
browser.close()
logger.debug("Content Fetcher > Response object from the browser communication was none")
raise EmptyReply(url=url, status_code=None)
try:
if self.webdriver_js_execute_code is not None and len(self.webdriver_js_execute_code):
browsersteps_interface.action_execute_js(value=self.webdriver_js_execute_code, selector=None)
except playwright._impl._errors.TimeoutError as e:
context.close()
browser.close()
# This can be ok, we will try to grab what we could retrieve
pass
except Exception as e:
logger.debug(f"Content Fetcher > Other exception when executing custom JS code {str(e)}")
context.close()
browser.close()
raise PageUnloadable(url=url, status_code=None, message=str(e))
extra_wait = int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)) + self.render_extract_delay
self.page.wait_for_timeout(extra_wait * 1000)
try:
self.status_code = response.status
except Exception as e:
# https://github.com/dgtlmoon/changedetection.io/discussions/2122#discussioncomment-8241962
logger.critical(f"Response from the browser/Playwright did not have a status_code! Response follows.")
logger.critical(response)
context.close()
browser.close()
raise PageUnloadable(url=url, status_code=None, message=str(e))
if self.status_code != 200 and not ignore_status_codes:
screenshot = self.page.screenshot(type='jpeg', full_page=True,
quality=int(os.getenv("SCREENSHOT_QUALITY", 72)))
raise Non200ErrorCodeReceived(url=url, status_code=self.status_code, screenshot=screenshot)
if not empty_pages_are_a_change and len(self.page.content().strip()) == 0:
logger.debug("Content Fetcher > Content was empty, empty_pages_are_a_change = False")
context.close()
browser.close()
raise EmptyReply(url=url, status_code=response.status)
# Run Browser Steps here
if self.browser_steps_get_valid_steps():
self.iterate_browser_steps(start_url=url)
self.page.wait_for_timeout(extra_wait * 1000)
# So we can find an element on the page where its selector was entered manually (maybe not xPath etc)
if current_include_filters is not None:
self.page.evaluate("var include_filters={}".format(json.dumps(current_include_filters)))
else:
self.page.evaluate("var include_filters=''")
self.xpath_data = self.page.evaluate(
"async () => {" + self.xpath_element_js.replace('%ELEMENTS%', visualselector_xpath_selectors) + "}")
self.instock_data = self.page.evaluate("async () => {" + self.instock_data_js + "}")
self.content = self.page.content()
# Bug 3 in Playwright screenshot handling
# Some bug where it gives the wrong screenshot size, but making a request with the clip set first seems to solve it
# JPEG is better here because the screenshots can be very very large
# Screenshots also travel via the ws:// (websocket) meaning that the binary data is base64 encoded
# which will significantly increase the IO size between the server and client, it's recommended to use the lowest
# acceptable screenshot quality here
try:
# The actual screenshot - this always base64 and needs decoding! horrible! huge CPU usage
self.screenshot = self.page.screenshot(type='jpeg',
full_page=True,
quality=int(os.getenv("SCREENSHOT_QUALITY", 72)),
)
except Exception as e:
# It's likely the screenshot was too long/big and something crashed
raise ScreenshotUnavailable(url=url, status_code=self.status_code)
finally:
context.close()
browser.close()

View File

@@ -1,272 +0,0 @@
import asyncio
import json
import os
import websockets.exceptions
from urllib.parse import urlparse
from loguru import logger
from changedetectionio.content_fetchers.base import Fetcher, manage_user_agent
from changedetectionio.content_fetchers.exceptions import PageUnloadable, Non200ErrorCodeReceived, EmptyReply, BrowserFetchTimedOut, BrowserConnectError
class fetcher(Fetcher):
fetcher_description = "Puppeteer/direct {}/Javascript".format(
os.getenv("PLAYWRIGHT_BROWSER_TYPE", 'chromium').capitalize()
)
if os.getenv("PLAYWRIGHT_DRIVER_URL"):
fetcher_description += " via '{}'".format(os.getenv("PLAYWRIGHT_DRIVER_URL"))
browser_type = ''
command_executor = ''
proxy = None
def __init__(self, proxy_override=None, custom_browser_connection_url=None):
super().__init__()
if custom_browser_connection_url:
self.browser_connection_is_custom = True
self.browser_connection_url = custom_browser_connection_url
else:
# Fallback to fetching from system
# .strip('"') is going to save someone a lot of time when they accidently wrap the env value
self.browser_connection_url = os.getenv("PLAYWRIGHT_DRIVER_URL", 'ws://playwright-chrome:3000').strip('"')
# allow per-watch proxy selection override
# @todo check global too?
if proxy_override:
# Playwright needs separate username and password values
parsed = urlparse(proxy_override)
if parsed:
self.proxy = {'username': parsed.username, 'password': parsed.password}
# Add the proxy server chrome start option, the username and password never gets added here
# (It always goes in via await self.page.authenticate(self.proxy))
# @todo filter some injection attack?
# check scheme when no scheme
proxy_url = parsed.scheme + "://" if parsed.scheme else 'http://'
r = "?" if not '?' in self.browser_connection_url else '&'
port = ":"+str(parsed.port) if parsed.port else ''
q = "?"+parsed.query if parsed.query else ''
proxy_url += f"{parsed.hostname}{port}{parsed.path}{q}"
self.browser_connection_url += f"{r}--proxy-server={proxy_url}"
# def screenshot_step(self, step_n=''):
# screenshot = self.page.screenshot(type='jpeg', full_page=True, quality=85)
#
# if self.browser_steps_screenshot_path is not None:
# destination = os.path.join(self.browser_steps_screenshot_path, 'step_{}.jpeg'.format(step_n))
# logger.debug(f"Saving step screenshot to {destination}")
# with open(destination, 'wb') as f:
# f.write(screenshot)
#
# def save_step_html(self, step_n):
# content = self.page.content()
# destination = os.path.join(self.browser_steps_screenshot_path, 'step_{}.html'.format(step_n))
# logger.debug(f"Saving step HTML to {destination}")
# with open(destination, 'w') as f:
# f.write(content)
async def fetch_page(self,
url,
timeout,
request_headers,
request_body,
request_method,
ignore_status_codes,
current_include_filters,
is_binary,
empty_pages_are_a_change
):
from changedetectionio.content_fetchers import visualselector_xpath_selectors
self.delete_browser_steps_screenshots()
extra_wait = int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)) + self.render_extract_delay
from pyppeteer import Pyppeteer
pyppeteer_instance = Pyppeteer()
# Connect directly using the specified browser_ws_endpoint
# @todo timeout
try:
browser = await pyppeteer_instance.connect(browserWSEndpoint=self.browser_connection_url,
ignoreHTTPSErrors=True
)
except websockets.exceptions.InvalidStatusCode as e:
raise BrowserConnectError(msg=f"Error while trying to connect the browser, Code {e.status_code} (check your access, whitelist IP, password etc)")
except websockets.exceptions.InvalidURI:
raise BrowserConnectError(msg=f"Error connecting to the browser, check your browser connection address (should be ws:// or wss://")
except Exception as e:
raise BrowserConnectError(msg=f"Error connecting to the browser {str(e)}")
# Better is to launch chrome with the URL as arg
# non-headless - newPage() will launch an extra tab/window, .browser should already contain 1 page/tab
# headless - ask a new page
self.page = (pages := await browser.pages) and len(pages) or await browser.newPage()
try:
from pyppeteerstealth import inject_evasions_into_page
except ImportError:
logger.debug("pyppeteerstealth module not available, skipping")
pass
else:
# I tried hooking events via self.page.on(Events.Page.DOMContentLoaded, inject_evasions_requiring_obj_to_page)
# But I could never get it to fire reliably, so we just inject it straight after
await inject_evasions_into_page(self.page)
# This user agent is similar to what was used when tweaking the evasions in inject_evasions_into_page(..)
user_agent = None
if request_headers and request_headers.get('User-Agent'):
# Request_headers should now be CaaseInsensitiveDict
# Remove it so it's not sent again with headers after
user_agent = request_headers.pop('User-Agent').strip()
await self.page.setUserAgent(user_agent)
if not user_agent:
# Attempt to strip 'HeadlessChrome' etc
await self.page.setUserAgent(manage_user_agent(headers=request_headers, current_ua=await self.page.evaluate('navigator.userAgent')))
await self.page.setBypassCSP(True)
if request_headers:
await self.page.setExtraHTTPHeaders(request_headers)
# SOCKS5 with authentication is not supported (yet)
# https://github.com/microsoft/playwright/issues/10567
self.page.setDefaultNavigationTimeout(0)
await self.page.setCacheEnabled(True)
if self.proxy and self.proxy.get('username'):
# Setting Proxy-Authentication header is deprecated, and doing so can trigger header change errors from Puppeteer
# https://github.com/puppeteer/puppeteer/issues/676 ?
# https://help.brightdata.com/hc/en-us/articles/12632549957649-Proxy-Manager-How-to-Guides#h_01HAKWR4Q0AFS8RZTNYWRDFJC2
# https://cri.dev/posts/2020-03-30-How-to-solve-Puppeteer-Chrome-Error-ERR_INVALID_ARGUMENT/
await self.page.authenticate(self.proxy)
# Re-use as much code from browser steps as possible so its the same
# from changedetectionio.blueprint.browser_steps.browser_steps import steppable_browser_interface
# not yet used here, we fallback to playwright when browsersteps is required
# browsersteps_interface = steppable_browser_interface()
# browsersteps_interface.page = self.page
response = await self.page.goto(url, waitUntil="load")
if response is None:
await self.page.close()
await browser.close()
logger.warning("Content Fetcher > Response object was none (as in, the response from the browser was empty, not just the content)")
raise EmptyReply(url=url, status_code=None)
self.headers = response.headers
try:
if self.webdriver_js_execute_code is not None and len(self.webdriver_js_execute_code):
await self.page.evaluate(self.webdriver_js_execute_code)
except Exception as e:
logger.warning("Got exception when running evaluate on custom JS code")
logger.error(str(e))
await self.page.close()
await browser.close()
# This can be ok, we will try to grab what we could retrieve
raise PageUnloadable(url=url, status_code=None, message=str(e))
try:
self.status_code = response.status
except Exception as e:
# https://github.com/dgtlmoon/changedetection.io/discussions/2122#discussioncomment-8241962
logger.critical(f"Response from the browser/Playwright did not have a status_code! Response follows.")
logger.critical(response)
await self.page.close()
await browser.close()
raise PageUnloadable(url=url, status_code=None, message=str(e))
if self.status_code != 200 and not ignore_status_codes:
screenshot = await self.page.screenshot(type_='jpeg',
fullPage=True,
quality=int(os.getenv("SCREENSHOT_QUALITY", 72)))
raise Non200ErrorCodeReceived(url=url, status_code=self.status_code, screenshot=screenshot)
content = await self.page.content
if not empty_pages_are_a_change and len(content.strip()) == 0:
logger.error("Content Fetcher > Content was empty (empty_pages_are_a_change is False), closing browsers")
await self.page.close()
await browser.close()
raise EmptyReply(url=url, status_code=response.status)
# Run Browser Steps here
# @todo not yet supported, we switch to playwright in this case
# if self.browser_steps_get_valid_steps():
# self.iterate_browser_steps()
await asyncio.sleep(1 + extra_wait)
# So we can find an element on the page where its selector was entered manually (maybe not xPath etc)
# Setup the xPath/VisualSelector scraper
if current_include_filters is not None:
js = json.dumps(current_include_filters)
await self.page.evaluate(f"var include_filters={js}")
else:
await self.page.evaluate(f"var include_filters=''")
self.xpath_data = await self.page.evaluate(
"async () => {" + self.xpath_element_js.replace('%ELEMENTS%', visualselector_xpath_selectors) + "}")
self.instock_data = await self.page.evaluate("async () => {" + self.instock_data_js + "}")
self.content = await self.page.content
# Bug 3 in Playwright screenshot handling
# Some bug where it gives the wrong screenshot size, but making a request with the clip set first seems to solve it
# JPEG is better here because the screenshots can be very very large
# Screenshots also travel via the ws:// (websocket) meaning that the binary data is base64 encoded
# which will significantly increase the IO size between the server and client, it's recommended to use the lowest
# acceptable screenshot quality here
try:
self.screenshot = await self.page.screenshot(type_='jpeg',
fullPage=True,
quality=int(os.getenv("SCREENSHOT_QUALITY", 72)))
except Exception as e:
logger.error("Error fetching screenshot")
# // May fail on very large pages with 'WARNING: tile memory limits exceeded, some content may not draw'
# // @ todo after text extract, we can place some overlay text with red background to say 'croppped'
logger.error('ERROR: content-fetcher page was maybe too large for a screenshot, reverting to viewport only screenshot')
try:
self.screenshot = await self.page.screenshot(type_='jpeg',
fullPage=False,
quality=int(os.getenv("SCREENSHOT_QUALITY", 72)))
except Exception as e:
logger.error('ERROR: Failed to get viewport-only reduced screenshot :(')
pass
finally:
# It's good to log here in the case that the browser crashes on shutting down but we still get the data we need
logger.success(f"Fetching '{url}' complete, closing page")
await self.page.close()
logger.success(f"Fetching '{url}' complete, closing browser")
await browser.close()
logger.success(f"Fetching '{url}' complete, exiting puppeteer fetch.")
async def main(self, **kwargs):
await self.fetch_page(**kwargs)
def run(self, url, timeout, request_headers, request_body, request_method, ignore_status_codes=False,
current_include_filters=None, is_binary=False, empty_pages_are_a_change=False):
#@todo make update_worker async which could run any of these content_fetchers within memory and time constraints
max_time = os.getenv('PUPPETEER_MAX_PROCESSING_TIMEOUT_SECONDS', 180)
# This will work in 3.10 but not >= 3.11 because 3.11 wants tasks only
try:
asyncio.run(asyncio.wait_for(self.main(
url=url,
timeout=timeout,
request_headers=request_headers,
request_body=request_body,
request_method=request_method,
ignore_status_codes=ignore_status_codes,
current_include_filters=current_include_filters,
is_binary=is_binary,
empty_pages_are_a_change=empty_pages_are_a_change
), timeout=max_time))
except asyncio.TimeoutError:
raise(BrowserFetchTimedOut(msg=f"Browser connected but was unable to process the page in {max_time} seconds."))

View File

@@ -1,96 +0,0 @@
from loguru import logger
import chardet
import hashlib
import os
import requests
from changedetectionio import strtobool
from changedetectionio.content_fetchers.exceptions import BrowserStepsInUnsupportedFetcher, EmptyReply, Non200ErrorCodeReceived
from changedetectionio.content_fetchers.base import Fetcher
# "html_requests" is listed as the default fetcher in store.py!
class fetcher(Fetcher):
fetcher_description = "Basic fast Plaintext/HTTP Client"
def __init__(self, proxy_override=None, custom_browser_connection_url=None):
super().__init__()
self.proxy_override = proxy_override
# browser_connection_url is none because its always 'launched locally'
def run(self,
url,
timeout,
request_headers,
request_body,
request_method,
ignore_status_codes=False,
current_include_filters=None,
is_binary=False,
empty_pages_are_a_change=False):
if self.browser_steps_get_valid_steps():
raise BrowserStepsInUnsupportedFetcher(url=url)
proxies = {}
# Allows override the proxy on a per-request basis
# https://requests.readthedocs.io/en/latest/user/advanced/#socks
# Should also work with `socks5://user:pass@host:port` type syntax.
if self.proxy_override:
proxies = {'http': self.proxy_override, 'https': self.proxy_override, 'ftp': self.proxy_override}
else:
if self.system_http_proxy:
proxies['http'] = self.system_http_proxy
if self.system_https_proxy:
proxies['https'] = self.system_https_proxy
session = requests.Session()
if strtobool(os.getenv('ALLOW_FILE_URI', 'false')) and url.startswith('file://'):
from requests_file import FileAdapter
session.mount('file://', FileAdapter())
r = session.request(method=request_method,
data=request_body.encode('utf-8') if type(request_body) is str else request_body,
url=url,
headers=request_headers,
timeout=timeout,
proxies=proxies,
verify=False)
# If the response did not tell us what encoding format to expect, Then use chardet to override what `requests` thinks.
# For example - some sites don't tell us it's utf-8, but return utf-8 content
# This seems to not occur when using webdriver/selenium, it seems to detect the text encoding more reliably.
# https://github.com/psf/requests/issues/1604 good info about requests encoding detection
if not is_binary:
# Don't run this for PDF (and requests identified as binary) takes a _long_ time
if not r.headers.get('content-type') or not 'charset=' in r.headers.get('content-type'):
encoding = chardet.detect(r.content)['encoding']
if encoding:
r.encoding = encoding
self.headers = r.headers
if not r.content or not len(r.content):
if not empty_pages_are_a_change:
raise EmptyReply(url=url, status_code=r.status_code)
else:
logger.debug(f"URL {url} gave zero byte content reply with Status Code {r.status_code}, but empty_pages_are_a_change = True")
# @todo test this
# @todo maybe you really want to test zero-byte return pages?
if r.status_code != 200 and not ignore_status_codes:
# maybe check with content works?
raise Non200ErrorCodeReceived(url=url, status_code=r.status_code, page_html=r.text)
self.status_code = r.status_code
if is_binary:
# Binary files just return their checksum until we add something smarter
self.content = hashlib.md5(r.content).hexdigest()
else:
self.content = r.text
self.raw_content = r.content

View File

@@ -1 +0,0 @@
# resources for browser injection/scraping

View File

@@ -1,204 +0,0 @@
// Restock Detector
// (c) Leigh Morresi dgtlmoon@gmail.com
//
// Assumes the product is in stock to begin with, unless the following appears above the fold ;
// - outOfStockTexts appears above the fold (out of stock)
// - negateOutOfStockRegex (really is in stock)
function isItemInStock() {
// @todo Pass these in so the same list can be used in non-JS fetchers
const outOfStockTexts = [
' أخبرني عندما يتوفر',
'0 in stock',
'actuellement indisponible',
'agotado',
'article épuisé',
'artikel zurzeit vergriffen',
'as soon as stock is available',
'ausverkauft', // sold out
'available for back order',
'awaiting stock',
'back in stock soon',
'back-order or out of stock',
'backordered',
'benachrichtigt mich', // notify me
'brak na stanie',
'brak w magazynie',
'coming soon',
'currently have any tickets for this',
'currently unavailable',
'dieser artikel ist bald wieder verfügbar',
'dostępne wkrótce',
'en rupture de stock',
'isn\'t in stock right now',
'isnt in stock right now',
'isnt in stock right now',
'item is no longer available',
'let me know when it\'s available',
'mail me when available',
'message if back in stock',
'nachricht bei',
'nicht auf lager',
'nicht lagernd',
'nicht lieferbar',
'nicht verfügbar',
'nicht vorrätig',
'nicht zur verfügung',
'nie znaleziono produktów',
'niet beschikbaar',
'niet leverbaar',
'niet op voorraad',
'no disponible temporalmente',
'no longer in stock',
'no tickets available',
'not available',
'not currently available',
'not in stock',
'notify me when available',
'notify me',
'notify when available',
'não estamos a aceitar encomendas',
'out of stock',
'out-of-stock',
'prodotto esaurito',
'produkt niedostępny',
'sold out',
'sold-out',
'temporarily out of stock',
'temporarily unavailable',
'there were no search results for',
'this item is currently unavailable',
'tickets unavailable',
'tijdelijk uitverkocht',
'unavailable nearby',
'unavailable tickets',
'vergriffen',
'vorbestellen',
'vorbestellung ist bald möglich',
'we couldn\'t find any products that match',
'we do not currently have an estimate of when this product will be back in stock.',
'we don\'t know when or if this item will be back in stock.',
'we were not able to find a match',
'when this arrives in stock',
'zur zeit nicht an lager',
'品切れ',
'已售',
'已售完',
'품절'
];
const vh = Math.max(document.documentElement.clientHeight || 0, window.innerHeight || 0);
function getElementBaseText(element) {
// .textContent can include text from children which may give the wrong results
// scan only immediate TEXT_NODEs, which will be a child of the element
var text = "";
for (var i = 0; i < element.childNodes.length; ++i)
if (element.childNodes[i].nodeType === Node.TEXT_NODE)
text += element.childNodes[i].textContent;
return text.toLowerCase().trim();
}
const negateOutOfStockRegex = new RegExp('^([0-9] in stock|add to cart|in stock)', 'ig');
// The out-of-stock or in-stock-text is generally always above-the-fold
// and often below-the-fold is a list of related products that may or may not contain trigger text
// so it's good to filter to just the 'above the fold' elements
// and it should be atleast 100px from the top to ignore items in the toolbar, sometimes menu items like "Coming soon" exist
// @todo - if it's SVG or IMG, go into image diff mode
// %ELEMENTS% replaced at injection time because different interfaces use it with different settings
console.log("Scanning %ELEMENTS%");
function collectVisibleElements(parent, visibleElements) {
if (!parent) return; // Base case: if parent is null or undefined, return
// Add the parent itself to the visible elements array if it's of the specified types
visibleElements.push(parent);
// Iterate over the parent's children
const children = parent.children;
for (let i = 0; i < children.length; i++) {
const child = children[i];
if (
child.nodeType === Node.ELEMENT_NODE &&
window.getComputedStyle(child).display !== 'none' &&
window.getComputedStyle(child).visibility !== 'hidden' &&
child.offsetWidth >= 0 &&
child.offsetHeight >= 0 &&
window.getComputedStyle(child).contentVisibility !== 'hidden'
) {
// If the child is an element and is visible, recursively collect visible elements
collectVisibleElements(child, visibleElements);
}
}
}
const elementsToScan = [];
collectVisibleElements(document.body, elementsToScan);
var elementText = "";
// REGEXS THAT REALLY MEAN IT'S IN STOCK
for (let i = elementsToScan.length - 1; i >= 0; i--) {
const element = elementsToScan[i];
// outside the 'fold' or some weird text in the heading area
// .getBoundingClientRect() was causing a crash in chrome 119, can only be run on contentVisibility != hidden
if (element.getBoundingClientRect().top + window.scrollY >= vh || element.getBoundingClientRect().top + window.scrollY <= 100) {
continue
}
elementText = "";
if (element.tagName.toLowerCase() === "input") {
elementText = element.value.toLowerCase().trim();
} else {
elementText = getElementBaseText(element);
}
if (elementText.length) {
// try which ones could mean its in stock
if (negateOutOfStockRegex.test(elementText) && !elementText.includes('(0 products)')) {
console.log(`Negating/overriding 'Out of Stock' back to "Possibly in stock" found "${elementText}"`)
return 'Possibly in stock';
}
}
}
// OTHER STUFF THAT COULD BE THAT IT'S OUT OF STOCK
for (let i = elementsToScan.length - 1; i >= 0; i--) {
const element = elementsToScan[i];
// outside the 'fold' or some weird text in the heading area
// .getBoundingClientRect() was causing a crash in chrome 119, can only be run on contentVisibility != hidden
if (element.getBoundingClientRect().top + window.scrollY >= vh + 150 || element.getBoundingClientRect().top + window.scrollY <= 100) {
continue
}
elementText = "";
if (element.tagName.toLowerCase() === "input") {
elementText = element.value.toLowerCase().trim();
} else {
elementText = getElementBaseText(element);
}
if (elementText.length) {
// and these mean its out of stock
for (const outOfStockText of outOfStockTexts) {
if (elementText.includes(outOfStockText)) {
console.log(`Selected 'Out of Stock' - found text "${outOfStockText}" - "${elementText}"`)
return outOfStockText; // item is out of stock
}
}
}
}
console.log(`Returning 'Possibly in stock' - cant' find any useful matching text`)
return 'Possibly in stock'; // possibly in stock, cant decide otherwise.
}
// returns the element text that makes it think it's out of stock
return isItemInStock().trim()

View File

@@ -1,120 +0,0 @@
import os
import time
from loguru import logger
from changedetectionio.content_fetchers.base import Fetcher
class fetcher(Fetcher):
if os.getenv("WEBDRIVER_URL"):
fetcher_description = "WebDriver Chrome/Javascript via '{}'".format(os.getenv("WEBDRIVER_URL"))
else:
fetcher_description = "WebDriver Chrome/Javascript"
# Configs for Proxy setup
# In the ENV vars, is prefixed with "webdriver_", so it is for example "webdriver_sslProxy"
selenium_proxy_settings_mappings = ['proxyType', 'ftpProxy', 'httpProxy', 'noProxy',
'proxyAutoconfigUrl', 'sslProxy', 'autodetect',
'socksProxy', 'socksVersion', 'socksUsername', 'socksPassword']
proxy = None
def __init__(self, proxy_override=None, custom_browser_connection_url=None):
super().__init__()
from selenium.webdriver.common.proxy import Proxy as SeleniumProxy
# .strip('"') is going to save someone a lot of time when they accidently wrap the env value
if not custom_browser_connection_url:
self.browser_connection_url = os.getenv("WEBDRIVER_URL", 'http://browser-chrome:4444/wd/hub').strip('"')
else:
self.browser_connection_is_custom = True
self.browser_connection_url = custom_browser_connection_url
# If any proxy settings are enabled, then we should setup the proxy object
proxy_args = {}
for k in self.selenium_proxy_settings_mappings:
v = os.getenv('webdriver_' + k, False)
if v:
proxy_args[k] = v.strip('"')
# Map back standard HTTP_ and HTTPS_PROXY to webDriver httpProxy/sslProxy
if not proxy_args.get('webdriver_httpProxy') and self.system_http_proxy:
proxy_args['httpProxy'] = self.system_http_proxy
if not proxy_args.get('webdriver_sslProxy') and self.system_https_proxy:
proxy_args['httpsProxy'] = self.system_https_proxy
# Allows override the proxy on a per-request basis
if proxy_override is not None:
proxy_args['httpProxy'] = proxy_override
if proxy_args:
self.proxy = SeleniumProxy(raw=proxy_args)
def run(self,
url,
timeout,
request_headers,
request_body,
request_method,
ignore_status_codes=False,
current_include_filters=None,
is_binary=False,
empty_pages_are_a_change=False):
from selenium import webdriver
from selenium.webdriver.chrome.options import Options as ChromeOptions
from selenium.common.exceptions import WebDriverException
# request_body, request_method unused for now, until some magic in the future happens.
options = ChromeOptions()
if self.proxy:
options.proxy = self.proxy
self.driver = webdriver.Remote(
command_executor=self.browser_connection_url,
options=options)
try:
self.driver.get(url)
except WebDriverException as e:
# Be sure we close the session window
self.quit()
raise
self.driver.set_window_size(1280, 1024)
self.driver.implicitly_wait(int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)))
if self.webdriver_js_execute_code is not None:
self.driver.execute_script(self.webdriver_js_execute_code)
# Selenium doesn't automatically wait for actions as good as Playwright, so wait again
self.driver.implicitly_wait(int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)))
# @todo - how to check this? is it possible?
self.status_code = 200
# @todo somehow we should try to get this working for WebDriver
# raise EmptyReply(url=url, status_code=r.status_code)
# @todo - dom wait loaded?
time.sleep(int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)) + self.render_extract_delay)
self.content = self.driver.page_source
self.headers = {}
self.screenshot = self.driver.get_screenshot_as_png()
# Does the connection to the webdriver work? run a test connection.
def is_ready(self):
from selenium import webdriver
from selenium.webdriver.chrome.options import Options as ChromeOptions
self.driver = webdriver.Remote(
command_executor=self.command_executor,
options=ChromeOptions())
# driver.quit() seems to cause better exceptions
self.quit()
return True
def quit(self):
if self.driver:
try:
self.driver.quit()
except Exception as e:
logger.debug(f"Content Fetcher > Exception in chrome shutdown/quit {str(e)}")

View File

@@ -1,97 +1,58 @@
# used for the notifications, the front-end is using a JS library
import difflib
from typing import List, Iterator, Union
def same_slicer(lst: List[str], start: int, end: int) -> List[str]:
"""Return a slice of the list, or a single element if start == end."""
return lst[start:end] if start != end else [lst[start]]
def customSequenceMatcher(
before: List[str],
after: List[str],
include_equal: bool = False,
include_removed: bool = True,
include_added: bool = True,
include_replaced: bool = True,
include_change_type_prefix: bool = True
) -> Iterator[List[str]]:
"""
Compare two sequences and yield differences based on specified parameters.
Args:
before (List[str]): Original sequence
after (List[str]): Modified sequence
include_equal (bool): Include unchanged parts
include_removed (bool): Include removed parts
include_added (bool): Include added parts
include_replaced (bool): Include replaced parts
include_change_type_prefix (bool): Add prefixes to indicate change types
Yields:
List[str]: Differences between sequences
"""
cruncher = difflib.SequenceMatcher(isjunk=lambda x: x in " \t", a=before, b=after)
def same_slicer(l, a, b):
if a == b:
return [l[a]]
else:
return l[a:b]
# like .compare but a little different output
def customSequenceMatcher(before, after, include_equal=False, include_removed=True, include_added=True, include_replaced=True, include_change_type_prefix=True):
cruncher = difflib.SequenceMatcher(isjunk=lambda x: x in " \\t", a=before, b=after)
# @todo Line-by-line mode instead of buncghed, including `after` that is not in `before` (maybe unset?)
for tag, alo, ahi, blo, bhi in cruncher.get_opcodes():
if include_equal and tag == 'equal':
yield before[alo:ahi]
g = before[alo:ahi]
yield g
elif include_removed and tag == 'delete':
prefix = "(removed) " if include_change_type_prefix else ''
yield [f"{prefix}{line}" for line in same_slicer(before, alo, ahi)]
row_prefix = "(removed) " if include_change_type_prefix else ''
g = [ row_prefix + i for i in same_slicer(before, alo, ahi)]
yield g
elif include_replaced and tag == 'replace':
prefix_changed = "(changed) " if include_change_type_prefix else ''
prefix_into = "(into) " if include_change_type_prefix else ''
yield [f"{prefix_changed}{line}" for line in same_slicer(before, alo, ahi)] + \
[f"{prefix_into}{line}" for line in same_slicer(after, blo, bhi)]
row_prefix = "(changed) " if include_change_type_prefix else ''
g = [row_prefix + i for i in same_slicer(before, alo, ahi)]
row_prefix = "(into) " if include_change_type_prefix else ''
g += [row_prefix + i for i in same_slicer(after, blo, bhi)]
yield g
elif include_added and tag == 'insert':
prefix = "(added) " if include_change_type_prefix else ''
yield [f"{prefix}{line}" for line in same_slicer(after, blo, bhi)]
row_prefix = "(added) " if include_change_type_prefix else ''
g = [row_prefix + i for i in same_slicer(after, blo, bhi)]
yield g
def render_diff(
previous_version_file_contents: str,
newest_version_file_contents: str,
include_equal: bool = False,
include_removed: bool = True,
include_added: bool = True,
include_replaced: bool = True,
line_feed_sep: str = "\n",
include_change_type_prefix: bool = True,
patch_format: bool = False
) -> str:
"""
Render the difference between two file contents.
Args:
previous_version_file_contents (str): Original file contents
newest_version_file_contents (str): Modified file contents
include_equal (bool): Include unchanged parts
include_removed (bool): Include removed parts
include_added (bool): Include added parts
include_replaced (bool): Include replaced parts
line_feed_sep (str): Separator for lines in output
include_change_type_prefix (bool): Add prefixes to indicate change types
patch_format (bool): Use patch format for output
Returns:
str: Rendered difference
"""
newest_lines = [line.rstrip() for line in newest_version_file_contents.splitlines()]
previous_lines = [line.rstrip() for line in previous_version_file_contents.splitlines()] if previous_version_file_contents else []
# only_differences - only return info about the differences, no context
# line_feed_sep could be "<br>" or "<li>" or "\n" etc
def render_diff(previous_version_file_contents, newest_version_file_contents, include_equal=False, include_removed=True, include_added=True, include_replaced=True, line_feed_sep="\n", include_change_type_prefix=True):
if patch_format:
patch = difflib.unified_diff(previous_lines, newest_lines)
return line_feed_sep.join(patch)
newest_version_file_contents = [line.rstrip() for line in newest_version_file_contents.splitlines()]
rendered_diff = customSequenceMatcher(
before=previous_lines,
after=newest_lines,
include_equal=include_equal,
include_removed=include_removed,
include_added=include_added,
include_replaced=include_replaced,
include_change_type_prefix=include_change_type_prefix
)
if previous_version_file_contents:
previous_version_file_contents = [line.rstrip() for line in previous_version_file_contents.splitlines()]
else:
previous_version_file_contents = ""
def flatten(lst: List[Union[str, List[str]]]) -> str:
return line_feed_sep.join(flatten(x) if isinstance(x, list) else x for x in lst)
rendered_diff = customSequenceMatcher(before=previous_version_file_contents,
after=newest_version_file_contents,
include_equal=include_equal,
include_removed=include_removed,
include_added=include_added,
include_replaced=include_replaced,
include_change_type_prefix=include_change_type_prefix)
return flatten(rendered_diff)
# Recursively join lists
f = lambda L: line_feed_sep.join([f(x) if type(x) is list else x for x in L])
p= f(rendered_diff)
return p

File diff suppressed because it is too large Load Diff

View File

@@ -1,7 +1,6 @@
import os
import re
from changedetectionio.strtobool import strtobool
from distutils.util import strtobool
from wtforms import (
BooleanField,
@@ -16,20 +15,14 @@ from wtforms import (
validators,
widgets
)
from flask_wtf.file import FileField, FileAllowed
from wtforms.fields import FieldList
from wtforms.validators import ValidationError
from validators.url import url as url_validator
# default
# each select <option data-enabled="enabled-0-0"
from changedetectionio.blueprint.browser_steps.browser_steps import browser_step_ui_config
from changedetectionio import html_tools, content_fetchers
from changedetectionio import content_fetcher
from changedetectionio.notification import (
valid_notification_formats,
)
@@ -44,11 +37,10 @@ valid_method = {
'PUT',
'PATCH',
'DELETE',
'OPTIONS',
}
default_method = 'GET'
allow_simplehost = not strtobool(os.getenv('BLOCK_SIMPLEHOSTS', 'False'))
class StringListField(StringField):
widget = widgets.TextArea()
@@ -168,31 +160,31 @@ class ValidateContentFetcherIsReady(object):
self.message = message
def __call__(self, form, field):
return
import urllib3.exceptions
from changedetectionio import content_fetcher
# AttributeError: module 'changedetectionio.content_fetcher' has no attribute 'extra_browser_unlocked<>ASDF213r123r'
# Better would be a radiohandler that keeps a reference to each class
# if field.data is not None and field.data != 'system':
# klass = getattr(content_fetcher, field.data)
# some_object = klass()
# try:
# ready = some_object.is_ready()
#
# except urllib3.exceptions.MaxRetryError as e:
# driver_url = some_object.command_executor
# message = field.gettext('Content fetcher \'%s\' did not respond.' % (field.data))
# message += '<br>' + field.gettext(
# 'Be sure that the selenium/webdriver runner is running and accessible via network from this container/host.')
# message += '<br>' + field.gettext('Did you follow the instructions in the wiki?')
# message += '<br><br>' + field.gettext('WebDriver Host: %s' % (driver_url))
# message += '<br><a href="https://github.com/dgtlmoon/changedetection.io/wiki/Fetching-pages-with-WebDriver">Go here for more information</a>'
# message += '<br>'+field.gettext('Content fetcher did not respond properly, unable to use it.\n %s' % (str(e)))
#
# raise ValidationError(message)
#
# except Exception as e:
# message = field.gettext('Content fetcher \'%s\' did not respond properly, unable to use it.\n %s')
# raise ValidationError(message % (field.data, e))
if field.data is not None and field.data != 'system':
klass = getattr(content_fetcher, field.data)
some_object = klass()
try:
ready = some_object.is_ready()
except urllib3.exceptions.MaxRetryError as e:
driver_url = some_object.command_executor
message = field.gettext('Content fetcher \'%s\' did not respond.' % (field.data))
message += '<br>' + field.gettext(
'Be sure that the selenium/webdriver runner is running and accessible via network from this container/host.')
message += '<br>' + field.gettext('Did you follow the instructions in the wiki?')
message += '<br><br>' + field.gettext('WebDriver Host: %s' % (driver_url))
message += '<br><a href="https://github.com/dgtlmoon/changedetection.io/wiki/Fetching-pages-with-WebDriver">Go here for more information</a>'
message += '<br>'+field.gettext('Content fetcher did not respond properly, unable to use it.\n %s' % (str(e)))
raise ValidationError(message)
except Exception as e:
message = field.gettext('Content fetcher \'%s\' did not respond properly, unable to use it.\n %s')
raise ValidationError(message % (field.data, e))
class ValidateNotificationBodyAndTitleWhenURLisSet(object):
@@ -231,33 +223,24 @@ class ValidateJinja2Template(object):
"""
Validates that a {token} is from a valid set
"""
def __init__(self, message=None):
self.message = message
def __call__(self, form, field):
from changedetectionio import notification
from jinja2 import BaseLoader, TemplateSyntaxError, UndefinedError
from jinja2.sandbox import ImmutableSandboxedEnvironment
from jinja2 import Environment, BaseLoader, TemplateSyntaxError
from jinja2.meta import find_undeclared_variables
import jinja2.exceptions
# Might be a list of text, or might be just text (like from the apprise url list)
joined_data = ' '.join(map(str, field.data)) if isinstance(field.data, list) else f"{field.data}"
try:
jinja2_env = ImmutableSandboxedEnvironment(loader=BaseLoader)
jinja2_env = Environment(loader=BaseLoader)
jinja2_env.globals.update(notification.valid_tokens)
# Extra validation tokens provided on the form_class(... extra_tokens={}) setup
if hasattr(field, 'extra_notification_tokens'):
jinja2_env.globals.update(field.extra_notification_tokens)
jinja2_env.from_string(joined_data).render()
rendered = jinja2_env.from_string(field.data).render()
except TemplateSyntaxError as e:
raise ValidationError(f"This is not a valid Jinja2 template: {e}") from e
except UndefinedError as e:
raise ValidationError(f"A variable or function is not defined: {e}") from e
except jinja2.exceptions.SecurityError as e:
raise ValidationError(f"This is not a valid Jinja2 template: {e}") from e
ast = jinja2_env.parse(joined_data)
ast = jinja2_env.parse(field.data)
undefined = ", ".join(find_undeclared_variables(ast))
if undefined:
raise ValidationError(
@@ -274,23 +257,19 @@ class validateURL(object):
self.message = message
def __call__(self, form, field):
# This should raise a ValidationError() or not
validate_url(field.data)
import validators
# If hosts that only contain alphanumerics are allowed ("localhost" for example)
allow_simplehost = not strtobool(os.getenv('BLOCK_SIMPLEHOSTS', 'False'))
try:
validators.url(field.data.strip(), simple_host=allow_simplehost)
except validators.ValidationFailure:
message = field.gettext('\'%s\' is not a valid URL.' % (field.data.strip()))
raise ValidationError(message)
def validate_url(test_url):
# If hosts that only contain alphanumerics are allowed ("localhost" for example)
try:
url_validator(test_url, simple_host=allow_simplehost)
except validators.ValidationError:
#@todo check for xss
message = f"'{test_url}' is not a valid URL."
# This should be wtforms.validators.
raise ValidationError(message)
from .model.Watch import is_safe_url
if not is_safe_url(field.data):
raise ValidationError('Watch protocol is not permitted by SAFE_PROTOCOL_REGEX')
from .model.Watch import is_safe_url
if not is_safe_url(test_url):
# This should be wtforms.validators.
raise ValidationError('Watch protocol is not permitted by SAFE_PROTOCOL_REGEX or incorrect URL format')
class ValidateListRegex(object):
"""
@@ -302,10 +281,11 @@ class ValidateListRegex(object):
def __call__(self, form, field):
for line in field.data:
if re.search(html_tools.PERL_STYLE_REGEX, line, re.IGNORECASE):
if line[0] == '/' and line[-1] == '/':
# Because internally we dont wrap in /
line = line.strip('/')
try:
regex = html_tools.perl_style_slash_enclosed_regex_to_options(line)
re.compile(regex)
re.compile(line)
except re.error:
message = field.gettext('RegEx \'%s\' is not a valid regular expression.')
raise ValidationError(message % (line))
@@ -334,30 +314,11 @@ class ValidateCSSJSONXPATHInput(object):
return
# Does it look like XPath?
if line.strip()[0] == '/' or line.strip().startswith('xpath:'):
if not self.allow_xpath:
raise ValidationError("XPath not permitted in this field!")
from lxml import etree, html
import elementpath
# xpath 2.0-3.1
from elementpath.xpath3 import XPath3Parser
tree = html.fromstring("<html></html>")
line = line.replace('xpath:', '')
try:
elementpath.select(tree, line.strip(), parser=XPath3Parser)
except elementpath.ElementPathError as e:
message = field.gettext('\'%s\' is not a valid XPath expression. (%s)')
raise ValidationError(message % (line, str(e)))
except:
raise ValidationError("A system-error occurred when validating your XPath expression")
if line.strip().startswith('xpath1:'):
if line.strip()[0] == '/':
if not self.allow_xpath:
raise ValidationError("XPath not permitted in this field!")
from lxml import etree, html
tree = html.fromstring("<html></html>")
line = re.sub(r'^xpath1:', '', line)
try:
tree.xpath(line.strip())
@@ -421,31 +382,19 @@ class quickWatchForm(Form):
# Common to a single watch and the global settings
class commonSettingsForm(Form):
from . import processors
def __init__(self, formdata=None, obj=None, prefix="", data=None, meta=None, **kwargs):
super().__init__(formdata, obj, prefix, data, meta, **kwargs)
self.notification_body.extra_notification_tokens = kwargs.get('extra_notification_tokens', {})
self.notification_title.extra_notification_tokens = kwargs.get('extra_notification_tokens', {})
self.notification_urls.extra_notification_tokens = kwargs.get('extra_notification_tokens', {})
extract_title_as_title = BooleanField('Extract <title> from document and use as watch title', default=False)
fetch_backend = RadioField(u'Fetch Method', choices=content_fetchers.available_fetchers(), validators=[ValidateContentFetcherIsReady()])
notification_urls = StringListField('Notification URL List', validators=[validators.Optional(), ValidateAppRiseServers()])
notification_title = StringField('Notification Title', default='ChangeDetection.io Notification - {{ watch_url }}', validators=[validators.Optional(), ValidateJinja2Template()])
notification_body = TextAreaField('Notification Body', default='{{ watch_url }} had a change.', validators=[validators.Optional(), ValidateJinja2Template()])
notification_format = SelectField('Notification format', choices=valid_notification_formats.keys())
notification_title = StringField('Notification Title', default='ChangeDetection.io Notification - {{ watch_url }}', validators=[validators.Optional(), ValidateJinja2Template()])
notification_urls = StringListField('Notification URL List', validators=[validators.Optional(), ValidateAppRiseServers(), ValidateJinja2Template()])
processor = RadioField( label=u"Processor - What do you want to achieve?", choices=processors.available_processors(), default="text_json_diff")
webdriver_delay = IntegerField('Wait seconds before extracting text', validators=[validators.Optional(), validators.NumberRange(min=1, message="Should contain one or more seconds")])
fetch_backend = RadioField(u'Fetch Method', choices=content_fetcher.available_fetchers(), validators=[ValidateContentFetcherIsReady()])
extract_title_as_title = BooleanField('Extract <title> from document and use as watch title', default=False)
webdriver_delay = IntegerField('Wait seconds before extracting text', validators=[validators.Optional(), validators.NumberRange(min=1,
message="Should contain one or more seconds")])
class importForm(Form):
from . import processors
processor = RadioField(u'Processor', choices=processors.available_processors(), default="text_json_diff")
urls = TextAreaField('URLs')
xlsx_file = FileField('Upload .xlsx file', validators=[FileAllowed(['xlsx'], 'Must be .xlsx file!')])
file_mapping = SelectField('File mapping', [validators.DataRequired()], choices={('wachete', 'Wachete mapping'), ('custom','Custom mapping')})
class SingleBrowserStep(Form):
@@ -458,13 +407,12 @@ class SingleBrowserStep(Form):
# remove_button = SubmitField('-', render_kw={"type": "button", "class": "pure-button pure-button-primary", 'title': 'Remove'})
# add_button = SubmitField('+', render_kw={"type": "button", "class": "pure-button pure-button-primary", 'title': 'Add new step after'})
class processor_text_json_diff_form(commonSettingsForm):
class watchForm(commonSettingsForm):
url = fields.URLField('URL', validators=[validateURL()])
tags = StringTagUUID('Group tag', [validators.Optional()], default='')
time_between_check = FormField(TimeBetweenCheckForm)
time_between_check_use_default = BooleanField('Use global settings for time between check', default=False)
include_filters = StringListField('CSS/JSONPath/JQ/XPath Filters', [ValidateCSSJSONXPATHInput()], default='')
@@ -480,12 +428,14 @@ class processor_text_json_diff_form(commonSettingsForm):
method = SelectField('Request method', choices=valid_method, default=default_method)
ignore_status_codes = BooleanField('Ignore status codes (process non-2xx status codes as normal)', default=False)
check_unique_lines = BooleanField('Only trigger when unique lines appear', default=False)
sort_text_alphabetically = BooleanField('Sort text alphabetically', default=False)
filter_text_added = BooleanField('Added lines', default=True)
filter_text_replaced = BooleanField('Replaced/changed lines', default=True)
filter_text_removed = BooleanField('Removed lines', default=True)
# @todo this class could be moved to its own text_json_diff_watchForm and this goes to restock_diff_Watchform perhaps
in_stock_only = BooleanField('Only trigger when product goes BACK to in-stock', default=True)
trigger_text = StringListField('Trigger/wait for text', [validators.Optional(), ValidateListRegex()])
if os.getenv("PLAYWRIGHT_DRIVER_URL"):
browser_steps = FieldList(FormField(SingleBrowserStep), min_entries=10)
@@ -501,12 +451,6 @@ class processor_text_json_diff_form(commonSettingsForm):
notification_muted = BooleanField('Notifications Muted / Off', default=False)
notification_screenshot = BooleanField('Attach screenshot to notification (where possible)', default=False)
def extra_tab_content(self):
return None
def extra_form_content(self):
return None
def validate(self, **kwargs):
if not super().validate():
return False
@@ -519,31 +463,24 @@ class processor_text_json_diff_form(commonSettingsForm):
result = False
# Attempt to validate jinja2 templates in the URL
from jinja2 import Environment
# Jinja2 available in URLs along with https://pypi.org/project/jinja2-time/
jinja2_env = Environment(extensions=['jinja2_time.TimeExtension'])
try:
from changedetectionio.safe_jinja import render as jinja_render
jinja_render(template_str=self.url.data)
ready_url = str(jinja2_env.from_string(self.url.data).render())
except Exception as e:
self.url.errors.append('Invalid template syntax')
result = False
return result
class SingleExtraProxy(Form):
# maybe better to set some <script>var..
proxy_name = StringField('Name', [validators.Optional()], render_kw={"placeholder": "Name"})
proxy_url = StringField('Proxy URL', [validators.Optional()], render_kw={"placeholder": "socks5:// or regular proxy http://user:pass@...:3128", "size":50})
proxy_url = StringField('Proxy URL', [validators.Optional()], render_kw={"placeholder": "http://user:pass@...:3128", "size":50})
# @todo do the validation here instead
class SingleExtraBrowser(Form):
browser_name = StringField('Name', [validators.Optional()], render_kw={"placeholder": "Name"})
browser_connection_url = StringField('Browser connection URL', [validators.Optional()], render_kw={"placeholder": "wss://brightdata... wss://oxylabs etc", "size":50})
# @todo do the validation here instead
class DefaultUAInputForm(Form):
html_requests = StringField('Plaintext requests', validators=[validators.Optional()], render_kw={"placeholder": "<default>"})
if os.getenv("PLAYWRIGHT_DRIVER_URL") or os.getenv("WEBDRIVER_URL"):
html_webdriver = StringField('Chrome requests', validators=[validators.Optional()], render_kw={"placeholder": "<default>"})
# datastore.data['settings']['requests']..
class globalSettingsRequestForm(Form):
time_between_check = FormField(TimeBetweenCheckForm)
@@ -552,9 +489,6 @@ class globalSettingsRequestForm(Form):
render_kw={"style": "width: 5em;"},
validators=[validators.NumberRange(min=0, message="Should contain zero or more seconds")])
extra_proxies = FieldList(FormField(SingleExtraProxy), min_entries=5)
extra_browsers = FieldList(FormField(SingleExtraBrowser), min_entries=5)
default_ua = FormField(DefaultUAInputForm, label="Default User-Agent overrides")
def validate_extra_proxies(self, extra_validators=None):
for e in self.data['extra_proxies']:
@@ -568,12 +502,9 @@ class globalSettingsRequestForm(Form):
class globalSettingsApplicationForm(commonSettingsForm):
api_access_token_enabled = BooleanField('API access token security check enabled', default=True, validators=[validators.Optional()])
base_url = StringField('Notification base URL override',
validators=[validators.Optional()],
render_kw={"placeholder": os.getenv('BASE_URL', 'Not set')}
)
base_url = StringField('Base URL', validators=[validators.Optional()])
empty_pages_are_a_change = BooleanField('Treat empty pages as a change?', default=False)
fetch_backend = RadioField('Fetch Method', default="html_requests", choices=content_fetchers.available_fetchers(), validators=[ValidateContentFetcherIsReady()])
fetch_backend = RadioField('Fetch Method', default="html_requests", choices=content_fetcher.available_fetchers(), validators=[ValidateContentFetcherIsReady()])
global_ignore_text = StringListField('Ignore Text', [ValidateListRegex()])
global_subtractive_selectors = StringListField('Remove elements', [ValidateCSSJSONXPATHInput(allow_xpath=False, allow_json=False)])
ignore_whitespace = BooleanField('Ignore whitespace')
@@ -585,8 +516,6 @@ class globalSettingsApplicationForm(commonSettingsForm):
removepassword_button = SubmitField('Remove password', render_kw={"class": "pure-button pure-button-primary"})
render_anchor_tag_content = BooleanField('Render anchor tag content', default=False)
shared_diff_access = BooleanField('Allow access to view diff page when password is enabled', default=False, validators=[validators.Optional()])
rss_hide_muted_watches = BooleanField('Hide muted watches from RSS feed', default=True,
validators=[validators.Optional()])
filter_failure_notification_threshold_attempts = IntegerField('Number of times the filter can be missing before sending a notification',
render_kw={"style": "width: 5em;"},
validators=[validators.NumberRange(min=0,
@@ -597,11 +526,6 @@ class globalSettingsForm(Form):
# Define these as FormFields/"sub forms", this way it matches the JSON storage
# datastore.data['settings']['application']..
# datastore.data['settings']['requests']..
def __init__(self, formdata=None, obj=None, prefix="", data=None, meta=None, **kwargs):
super().__init__(formdata, obj, prefix, data, meta, **kwargs)
self.application.notification_body.extra_notification_tokens = kwargs.get('extra_notification_tokens', {})
self.application.notification_title.extra_notification_tokens = kwargs.get('extra_notification_tokens', {})
self.application.notification_urls.extra_notification_tokens = kwargs.get('extra_notification_tokens', {})
requests = FormField(globalSettingsRequestForm)
application = FormField(globalSettingsApplicationForm)

View File

@@ -1,42 +1,23 @@
from bs4 import BeautifulSoup
from inscriptis import get_text
from inscriptis.model.config import ParserConfig
from jsonpath_ng.ext import parse
from typing import List
from inscriptis.model.config import ParserConfig
from xml.sax.saxutils import escape as xml_escape
import json
import re
# HTML added to be sure each result matching a filter (.example) gets converted to a new line by Inscriptis
TEXT_FILTER_LIST_LINE_SUFFIX = "<br>"
PERL_STYLE_REGEX = r'^/(.*?)/([a-z]*)?$'
# 'price' , 'lowPrice', 'highPrice' are usually under here
# All of those may or may not appear on different websites - I didnt find a way todo case-insensitive searching here
LD_JSON_PRODUCT_OFFER_SELECTORS = ["json:$..offers", "json:$..Offers"]
# all of those may or may not appear on different websites
LD_JSON_PRODUCT_OFFER_SELECTOR = "json:$..offers"
class JSONNotFound(ValueError):
def __init__(self, msg):
ValueError.__init__(self, msg)
# Doesn't look like python supports forward slash auto enclosure in re.findall
# So convert it to inline flag "(?i)foobar" type configuration
def perl_style_slash_enclosed_regex_to_options(regex):
res = re.search(PERL_STYLE_REGEX, regex, re.IGNORECASE)
if res:
flags = res.group(2) if res.group(2) else 'i'
regex = f"(?{flags}){res.group(1)}"
else:
# Fall back to just ignorecase as an option
regex = f"(?i){regex}"
return regex
# Given a CSS Rule, and a blob of HTML, return the blob of HTML that matches
def include_filters(include_filters, html_content, append_pretty_line_formatting=False):
soup = BeautifulSoup(html_content, "html.parser")
@@ -67,96 +48,12 @@ def element_removal(selectors: List[str], html_content):
selector = ",".join(selectors)
return subtractive_css_selector(selector, html_content)
def elementpath_tostring(obj):
"""
change elementpath.select results to string type
# The MIT License (MIT), Copyright (c), 2018-2021, SISSA (Scuola Internazionale Superiore di Studi Avanzati)
# https://github.com/sissaschool/elementpath/blob/dfcc2fd3d6011b16e02bf30459a7924f547b47d0/elementpath/xpath_tokens.py#L1038
"""
import elementpath
from decimal import Decimal
import math
if obj is None:
return ''
# https://elementpath.readthedocs.io/en/latest/xpath_api.html#elementpath.select
elif isinstance(obj, elementpath.XPathNode):
return obj.string_value
elif isinstance(obj, bool):
return 'true' if obj else 'false'
elif isinstance(obj, Decimal):
value = format(obj, 'f')
if '.' in value:
return value.rstrip('0').rstrip('.')
return value
elif isinstance(obj, float):
if math.isnan(obj):
return 'NaN'
elif math.isinf(obj):
return str(obj).upper()
value = str(obj)
if '.' in value:
value = value.rstrip('0').rstrip('.')
if '+' in value:
value = value.replace('+', '')
if 'e' in value:
return value.upper()
return value
return str(obj)
# Return str Utf-8 of matched rules
def xpath_filter(xpath_filter, html_content, append_pretty_line_formatting=False, is_rss=False):
from lxml import etree, html
import elementpath
# xpath 2.0-3.1
from elementpath.xpath3 import XPath3Parser
parser = etree.HTMLParser()
if is_rss:
# So that we can keep CDATA for cdata_in_document_to_text() to process
parser = etree.XMLParser(strip_cdata=False)
tree = html.fromstring(bytes(html_content, encoding='utf-8'), parser=parser)
html_block = ""
r = elementpath.select(tree, xpath_filter.strip(), namespaces={'re': 'http://exslt.org/regular-expressions'}, parser=XPath3Parser)
#@note: //title/text() wont work where <title>CDATA..
if type(r) != list:
r = [r]
for element in r:
# When there's more than 1 match, then add the suffix to separate each line
# And where the matched result doesn't include something that will cause Inscriptis to add a newline
# (This way each 'match' reliably has a new-line in the diff)
# Divs are converted to 4 whitespaces by inscriptis
if append_pretty_line_formatting and len(html_block) and (not hasattr( element, 'tag' ) or not element.tag in (['br', 'hr', 'div', 'p'])):
html_block += TEXT_FILTER_LIST_LINE_SUFFIX
if type(element) == str:
html_block += element
elif issubclass(type(element), etree._Element) or issubclass(type(element), etree._ElementTree):
html_block += etree.tostring(element, pretty_print=True).decode('utf-8')
else:
html_block += elementpath_tostring(element)
return html_block
# Return str Utf-8 of matched rules
# 'xpath1:'
def xpath1_filter(xpath_filter, html_content, append_pretty_line_formatting=False, is_rss=False):
def xpath_filter(xpath_filter, html_content, append_pretty_line_formatting=False):
from lxml import etree, html
parser = None
if is_rss:
# So that we can keep CDATA for cdata_in_document_to_text() to process
parser = etree.XMLParser(strip_cdata=False)
tree = html.fromstring(bytes(html_content, encoding='utf-8'), parser=parser)
tree = html.fromstring(bytes(html_content, encoding='utf-8'))
html_block = ""
r = tree.xpath(xpath_filter.strip(), namespaces={'re': 'http://exslt.org/regular-expressions'})
@@ -167,18 +64,19 @@ def xpath1_filter(xpath_filter, html_content, append_pretty_line_formatting=Fals
# And where the matched result doesn't include something that will cause Inscriptis to add a newline
# (This way each 'match' reliably has a new-line in the diff)
# Divs are converted to 4 whitespaces by inscriptis
if append_pretty_line_formatting and len(html_block) and (not hasattr(element, 'tag') or not element.tag in (['br', 'hr', 'div', 'p'])):
if append_pretty_line_formatting and len(html_block) and (not hasattr( element, 'tag' ) or not element.tag in (['br', 'hr', 'div', 'p'])):
html_block += TEXT_FILTER_LIST_LINE_SUFFIX
# Some kind of text, UTF-8 or other
if isinstance(element, (str, bytes)):
html_block += element
if type(element) == etree._ElementStringResult:
html_block += str(element)
elif type(element) == etree._ElementUnicodeResult:
html_block += str(element)
else:
# Return the HTML which will get parsed as text
html_block += etree.tostring(element, pretty_print=True).decode('utf-8')
return html_block
# Extract/find element
def extract_element(find='title', html_content=''):
@@ -194,12 +92,12 @@ def extract_element(find='title', html_content=''):
#
def _parse_json(json_data, json_filter):
if json_filter.startswith("json:"):
if 'json:' in json_filter:
jsonpath_expression = parse(json_filter.replace('json:', ''))
match = jsonpath_expression.find(json_data)
return _get_stripped_text_from_json_match(match)
if json_filter.startswith("jq:") or json_filter.startswith("jqraw:"):
if 'jq:' in json_filter:
try:
import jq
@@ -207,15 +105,10 @@ def _parse_json(json_data, json_filter):
# `jq` requires full compilation in windows and so isn't generally available
raise Exception("jq not support not found")
if json_filter.startswith("jq:"):
jq_expression = jq.compile(json_filter.removeprefix("jq:"))
match = jq_expression.input(json_data).all()
return _get_stripped_text_from_json_match(match)
jq_expression = jq.compile(json_filter.replace('jq:', ''))
match = jq_expression.input(json_data).all()
if json_filter.startswith("jqraw:"):
jq_expression = jq.compile(json_filter.removeprefix("jqraw:"))
match = jq_expression.input(json_data).all()
return '\n'.join(str(item) for item in match)
return _get_stripped_text_from_json_match(match)
def _get_stripped_text_from_json_match(match):
s = []
@@ -243,7 +136,7 @@ def _get_stripped_text_from_json_match(match):
# ensure_is_ldjson_info_type - str "product", optional, "@type == product" (I dont know how to do that as a json selector)
def extract_json_as_string(content, json_filter, ensure_is_ldjson_info_type=None):
stripped_text_from_html = False
# https://github.com/dgtlmoon/changedetection.io/pull/2041#issuecomment-1848397161w
# Try to parse/filter out the JSON, if we get some parser error, then maybe it's embedded within HTML tags
try:
stripped_text_from_html = _parse_json(json.loads(content), json_filter)
@@ -251,6 +144,7 @@ def extract_json_as_string(content, json_filter, ensure_is_ldjson_info_type=None
# Foreach <script json></script> blob.. just return the first that matches json_filter
# As a last resort, try to parse the whole <body>
s = []
soup = BeautifulSoup(content, 'html.parser')
if ensure_is_ldjson_info_type:
@@ -276,26 +170,13 @@ def extract_json_as_string(content, json_filter, ensure_is_ldjson_info_type=None
for json_data in bs_jsons:
stripped_text_from_html = _parse_json(json_data, json_filter)
if ensure_is_ldjson_info_type:
# Could sometimes be list, string or something else random
if isinstance(json_data, dict):
# If it has LD JSON 'key' @type, and @type is 'product', and something was found for the search
# (Some sites have multiple of the same ld+json @type='product', but some have the review part, some have the 'price' part)
# @type could also be a list although non-standard ("@type": ["Product", "SubType"],)
# LD_JSON auto-extract also requires some content PLUS the ldjson to be present
# 1833 - could be either str or dict, should not be anything else
t = json_data.get('@type')
if t and stripped_text_from_html:
if isinstance(t, str) and t.lower() == ensure_is_ldjson_info_type.lower():
break
# The non-standard part, some have a list
elif isinstance(t, list):
if ensure_is_ldjson_info_type.lower() in [x.lower().strip() for x in t]:
break
if json_data.get('@type', False) and json_data.get('@type','').lower() == ensure_is_ldjson_info_type.lower() and stripped_text_from_html:
break
elif stripped_text_from_html:
break
@@ -310,56 +191,50 @@ def extract_json_as_string(content, json_filter, ensure_is_ldjson_info_type=None
#
# wordlist - list of regex's (str) or words (str)
def strip_ignore_text(content, wordlist, mode="content"):
ignore = []
ignore_regex = []
# @todo check this runs case insensitive
for k in wordlist:
# Is it a regex?
if k[0] == '/':
ignore_regex.append(k.strip(" /"))
else:
ignore.append(k)
i = 0
output = []
ignore_text = []
ignore_regex = []
ignored_line_numbers = []
for k in wordlist:
# Is it a regex?
res = re.search(PERL_STYLE_REGEX, k, re.IGNORECASE)
if res:
ignore_regex.append(re.compile(perl_style_slash_enclosed_regex_to_options(k)))
else:
ignore_text.append(k.strip())
for line in content.splitlines():
i += 1
# Always ignore blank lines in this mode. (when this function gets called)
got_match = False
if len(line.strip()):
for l in ignore_text:
if l.lower() in line.lower():
got_match = True
regex_matches = False
if not got_match:
for r in ignore_regex:
if r.search(line):
got_match = True
# if any of these match, skip
for regex in ignore_regex:
try:
if re.search(regex, line, re.IGNORECASE):
regex_matches = True
except Exception as e:
continue
if not got_match:
# Not ignored
if not regex_matches and not any(skip_text.lower() in line.lower() for skip_text in ignore):
output.append(line.encode('utf8'))
else:
ignored_line_numbers.append(i)
# Used for finding out what to highlight
if mode == "line numbers":
return ignored_line_numbers
return "\n".encode('utf8').join(output)
def cdata_in_document_to_text(html_content: str, render_anchor_tag_content=False) -> str:
pattern = '<!\[CDATA\[(\s*(?:.(?<!\]\]>)\s*)*)\]\]>'
def repl(m):
text = m.group(1)
return xml_escape(html_to_text(html_content=text)).strip()
return re.sub(pattern, repl, html_content)
def html_to_text(html_content: str, render_anchor_tag_content=False, is_rss=False) -> str:
def html_to_text(html_content: str, render_anchor_tag_content=False) -> str:
"""Converts html string to a string with just the text. If ignoring
rendering anchor tag content is enable, anchor tag content are also
included in the text
@@ -375,21 +250,16 @@ def html_to_text(html_content: str, render_anchor_tag_content=False, is_rss=Fals
# if anchor tag content flag is set to True define a config for
# extracting this content
if render_anchor_tag_content:
parser_config = ParserConfig(
annotation_rules={"a": ["hyperlink"]},
display_links=True
annotation_rules={"a": ["hyperlink"]}, display_links=True
)
# otherwise set config to None/default
# otherwise set config to None
else:
parser_config = None
# RSS Mode - Inscriptis will treat `title` as something else.
# Make it as a regular block display element (//item/title)
# This is a bit of a hack - the real way it to use XSLT to convert it to HTML #1874
if is_rss:
html_content = re.sub(r'<title([\s>])', r'<h1\1', html_content)
html_content = re.sub(r'</title>', r'</h1>', html_content)
# get text and annotations via inscriptis
text_content = get_text(html_content, config=parser_config)
return text_content
@@ -398,22 +268,12 @@ def html_to_text(html_content: str, render_anchor_tag_content=False, is_rss=Fals
# Does LD+JSON exist with a @type=='product' and a .price set anywhere?
def has_ldjson_product_info(content):
try:
lc = content.lower()
if 'application/ld+json' in lc and lc.count('"price"') == 1 and '"pricecurrency"' in lc:
return True
# On some pages this is really terribly expensive when they dont really need it
# (For example you never want price monitoring, but this runs on every watch to suggest it)
# for filter in LD_JSON_PRODUCT_OFFER_SELECTORS:
# pricing_data += extract_json_as_string(content=content,
# json_filter=filter,
# ensure_is_ldjson_info_type="product")
except Exception as e:
# OK too
pricing_data = extract_json_as_string(content=content, json_filter=LD_JSON_PRODUCT_OFFER_SELECTOR, ensure_is_ldjson_info_type="product")
except JSONNotFound as e:
# Totally fine
return False
return False
x=bool(pricing_data)
return x
def workarounds_for_obfuscations(content):

View File

@@ -1,10 +1,6 @@
from abc import ABC, abstractmethod
import time
import validators
from wtforms import ValidationError
from loguru import logger
from changedetectionio.forms import validate_url
class Importer():
@@ -16,7 +12,6 @@ class Importer():
self.new_uuids = []
self.good = 0
self.remaining_data = []
self.import_profile = None
@abstractmethod
def run(self,
@@ -57,7 +52,7 @@ class import_url_list(Importer):
# Flask wtform validators wont work with basic auth, use validators package
# Up to 5000 per batch so we dont flood the server
# @todo validators.url will fail when you add your own IP etc
# @todo validators.url failed on local hostnames (such as referring to ourself when using browserless)
if len(url) and 'http' in url.lower() and good < 5000:
extras = None
if processor:
@@ -137,167 +132,3 @@ class import_distill_io_json(Importer):
good += 1
flash("{} Imported from Distill.io in {:.2f}s, {} Skipped.".format(len(self.new_uuids), time.time() - now, len(self.remaining_data)))
class import_xlsx_wachete(Importer):
def run(self,
data,
flash,
datastore,
):
good = 0
now = time.time()
self.new_uuids = []
from openpyxl import load_workbook
try:
wb = load_workbook(data)
except Exception as e:
# @todo correct except
flash("Unable to read export XLSX file, something wrong with the file?", 'error')
return
row_id = 2
for row in wb.active.iter_rows(min_row=row_id):
try:
extras = {}
data = {}
for cell in row:
if not cell.value:
continue
column_title = wb.active.cell(row=1, column=cell.column).value.strip().lower()
data[column_title] = cell.value
# Forced switch to webdriver/playwright/etc
dynamic_wachet = str(data.get('dynamic wachet', '')).strip().lower() # Convert bool to str to cover all cases
# libreoffice and others can have it as =FALSE() =TRUE(), or bool(true)
if 'true' in dynamic_wachet or dynamic_wachet == '1':
extras['fetch_backend'] = 'html_webdriver'
elif 'false' in dynamic_wachet or dynamic_wachet == '0':
extras['fetch_backend'] = 'html_requests'
if data.get('xpath'):
# @todo split by || ?
extras['include_filters'] = [data.get('xpath')]
if data.get('name'):
extras['title'] = data.get('name').strip()
if data.get('interval (min)'):
minutes = int(data.get('interval (min)'))
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
weeks, days = divmod(days, 7)
extras['time_between_check'] = {'weeks': weeks, 'days': days, 'hours': hours, 'minutes': minutes, 'seconds': 0}
# At minimum a URL is required.
if data.get('url'):
try:
validate_url(data.get('url'))
except ValidationError as e:
logger.error(f">> Import URL error {data.get('url')} {str(e)}")
flash(f"Error processing row number {row_id}, URL value was incorrect, row was skipped.", 'error')
# Don't bother processing anything else on this row
continue
new_uuid = datastore.add_watch(url=data['url'].strip(),
extras=extras,
tag=data.get('folder'),
write_to_disk_now=False)
if new_uuid:
# Straight into the queue.
self.new_uuids.append(new_uuid)
good += 1
except Exception as e:
logger.error(e)
flash(f"Error processing row number {row_id}, check all cell data types are correct, row was skipped.", 'error')
else:
row_id += 1
flash(
"{} imported from Wachete .xlsx in {:.2f}s".format(len(self.new_uuids), time.time() - now))
class import_xlsx_custom(Importer):
def run(self,
data,
flash,
datastore,
):
good = 0
now = time.time()
self.new_uuids = []
from openpyxl import load_workbook
try:
wb = load_workbook(data)
except Exception as e:
# @todo correct except
flash("Unable to read export XLSX file, something wrong with the file?", 'error')
return
# @todo cehck atleast 2 rows, same in other method
from .forms import validate_url
row_i = 1
try:
for row in wb.active.iter_rows():
url = None
tags = None
extras = {}
for cell in row:
if not self.import_profile.get(cell.col_idx):
continue
if not cell.value:
continue
cell_map = self.import_profile.get(cell.col_idx)
cell_val = str(cell.value).strip() # could be bool
if cell_map == 'url':
url = cell.value.strip()
try:
validate_url(url)
except ValidationError as e:
logger.error(f">> Import URL error {url} {str(e)}")
flash(f"Error processing row number {row_i}, URL value was incorrect, row was skipped.", 'error')
# Don't bother processing anything else on this row
url = None
break
elif cell_map == 'tag':
tags = cell.value.strip()
elif cell_map == 'include_filters':
# @todo validate?
extras['include_filters'] = [cell.value.strip()]
elif cell_map == 'interval_minutes':
hours, minutes = divmod(int(cell_val), 60)
days, hours = divmod(hours, 24)
weeks, days = divmod(days, 7)
extras['time_between_check'] = {'weeks': weeks, 'days': days, 'hours': hours, 'minutes': minutes, 'seconds': 0}
else:
extras[cell_map] = cell_val
# At minimum a URL is required.
if url:
new_uuid = datastore.add_watch(url=url,
extras=extras,
tag=tags,
write_to_disk_now=False)
if new_uuid:
# Straight into the queue.
self.new_uuids.append(new_uuid)
good += 1
except Exception as e:
logger.error(e)
flash(f"Error processing row number {row_i}, check all cell data types are correct, row was skipped.", 'error')
else:
row_i += 1
flash(
"{} imported from custom .xlsx in {:.2f}s".format(len(self.new_uuids), time.time() - now))

View File

@@ -5,9 +5,7 @@ from changedetectionio.notification import (
default_notification_title,
)
# Equal to or greater than this number of FilterNotFoundInResponse exceptions will trigger a filter-not-found notification
_FILTER_FAILURE_THRESHOLD_ATTEMPTS_DEFAULT = 6
DEFAULT_SETTINGS_HEADERS_USERAGENT='Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36'
class model(dict):
base_config = {
@@ -18,16 +16,11 @@ class model(dict):
},
'requests': {
'extra_proxies': [], # Configurable extra proxies via the UI
'extra_browsers': [], # Configurable extra proxies via the UI
'jitter_seconds': 0,
'proxy': None, # Preferred proxy connection
'time_between_check': {'weeks': None, 'days': None, 'hours': 3, 'minutes': None, 'seconds': None},
'timeout': int(getenv("DEFAULT_SETTINGS_REQUESTS_TIMEOUT", "45")), # Default 45 seconds
'workers': int(getenv("DEFAULT_SETTINGS_REQUESTS_WORKERS", "10")), # Number of threads, lower is better for slow connections
'default_ua': {
'html_requests': getenv("DEFAULT_SETTINGS_HEADERS_USERAGENT", DEFAULT_SETTINGS_HEADERS_USERAGENT),
'html_webdriver': None,
}
},
'application': {
# Custom notification content
@@ -47,8 +40,6 @@ class model(dict):
'pager_size': 50,
'password': False,
'render_anchor_tag_content': False,
'rss_access_token': None,
'rss_hide_muted_watches': True,
'schema_version' : 0,
'shared_diff_access': False,
'webdriver_delay': None , # Extra delay in seconds before extracting text

View File

@@ -1,14 +1,19 @@
from .Watch import base_config
import uuid
from changedetectionio.model import watch_base
class model(watch_base):
class model(dict):
def __init__(self, *arg, **kw):
super(model, self).__init__(*arg, **kw)
self['overrides_watch'] = kw.get('default', {}).get('overrides_watch')
self.update(base_config)
self['uuid'] = str(uuid.uuid4())
if kw.get('default'):
self.update(kw['default'])
del kw['default']
# Goes at the end so we update the default object with the initialiser
super(model, self).__init__(*arg, **kw)

View File

@@ -1,18 +1,72 @@
from changedetectionio.strtobool import strtobool
from changedetectionio.safe_jinja import render as jinja_render
from . import watch_base
from distutils.util import strtobool
import logging
import os
import re
from pathlib import Path
from loguru import logger
import time
import uuid
# Allowable protocols, protects against javascript: etc
# file:// is further checked by ALLOW_FILE_URI
SAFE_PROTOCOL_REGEX='^(http|https|ftp|file):'
minimum_seconds_recheck_time = int(os.getenv('MINIMUM_SECONDS_RECHECK_TIME', 3))
minimum_seconds_recheck_time = int(os.getenv('MINIMUM_SECONDS_RECHECK_TIME', 60))
mtable = {'seconds': 1, 'minutes': 60, 'hours': 3600, 'days': 86400, 'weeks': 86400 * 7}
from changedetectionio.notification import (
default_notification_format_for_watch
)
base_config = {
'body': None,
'check_unique_lines': False, # On change-detected, compare against all history if its something new
'check_count': 0,
'date_created': None,
'consecutive_filter_failures': 0, # Every time the CSS/xPath filter cannot be located, reset when all is fine.
'extract_text': [], # Extract text by regex after filters
'extract_title_as_title': False,
'fetch_backend': 'system', # plaintext, playwright etc
'processor': 'text_json_diff', # could be restock_diff or others from .processors
'filter_failure_notification_send': strtobool(os.getenv('FILTER_FAILURE_NOTIFICATION_SEND_DEFAULT', 'True')),
'filter_text_added': True,
'filter_text_replaced': True,
'filter_text_removed': True,
'has_ldjson_price_data': None,
'track_ldjson_price_data': None,
'headers': {}, # Extra headers to send
'ignore_text': [], # List of text to ignore when calculating the comparison checksum
'in_stock_only' : True, # Only trigger change on going to instock from out-of-stock
'include_filters': [],
'last_checked': 0,
'last_error': False,
'last_viewed': 0, # history key value of the last viewed via the [diff] link
'method': 'GET',
# Custom notification content
'notification_body': None,
'notification_format': default_notification_format_for_watch,
'notification_muted': False,
'notification_title': None,
'notification_screenshot': False, # Include the latest screenshot if available and supported by the apprise URL
'notification_urls': [], # List of URLs to add to the notification Queue (Usually AppRise)
'paused': False,
'previous_md5': False,
'previous_md5_before_filters': False, # Used for skipping changedetection entirely
'proxy': None, # Preferred proxy connection
'subtractive_selectors': [],
'tag': '', # Old system of text name for a tag, to be removed
'tags': [], # list of UUIDs to App.Tags
'text_should_not_be_present': [], # Text that should not present
# Re #110, so then if this is set to None, we know to use the default value instead
# Requires setting to None on submit if it's the same as the default
# Should be all None by default, so we use the system default in this case.
'time_between_check': {'weeks': None, 'days': None, 'hours': None, 'minutes': None, 'seconds': None},
'title': None,
'trigger_text': [], # List of text or regex to wait for until a change is detected
'url': '',
'uuid': str(uuid.uuid4()),
'webdriver_delay': None,
'webdriver_js_execute_code': None, # Run before change-detection
}
def is_safe_url(test_url):
# See https://github.com/dgtlmoon/changedetection.io/issues/1358
@@ -29,37 +83,40 @@ def is_safe_url(test_url):
return True
class model(watch_base):
class model(dict):
__newest_history_key = None
__history_n = 0
jitter_seconds = 0
def __init__(self, *arg, **kw):
self.update(base_config)
self.__datastore_path = kw['datastore_path']
self['uuid'] = str(uuid.uuid4())
del kw['datastore_path']
super(model, self).__init__(*arg, **kw)
if kw.get('default'):
self.update(kw['default'])
del kw['default']
if self.get('default'):
del self['default']
# Be sure the cached timestamp is ready
bump = self.history
# Goes at the end so we update the default object with the initialiser
super(model, self).__init__(*arg, **kw)
@property
def viewed(self):
# Don't return viewed when last_viewed is 0 and newest_key is 0
if int(self['last_viewed']) and int(self['last_viewed']) >= int(self.newest_history_key) :
if int(self['last_viewed']) >= int(self.newest_history_key) :
return True
return False
def ensure_data_dir_exists(self):
if not os.path.isdir(self.watch_data_dir):
logger.debug(f"> Creating data dir {self.watch_data_dir}")
print ("> Creating data dir {}".format(self.watch_data_dir))
os.mkdir(self.watch_data_dir)
@property
@@ -71,11 +128,12 @@ class model(watch_base):
ready_url = url
if '{%' in url or '{{' in url:
from jinja2 import Environment
# Jinja2 available in URLs along with https://pypi.org/project/jinja2-time/
jinja2_env = Environment(extensions=['jinja2_time.TimeExtension'])
try:
ready_url = jinja_render(template_str=url)
ready_url = str(jinja2_env.from_string(url).render())
except Exception as e:
logger.critical(f"Invalid URL template for: '{url}' - {str(e)}")
from flask import (
flash, Markup, url_for
)
@@ -84,41 +142,8 @@ class model(watch_base):
flash(message, 'error')
return ''
if ready_url.startswith('source:'):
ready_url=ready_url.replace('source:', '')
return ready_url
def clear_watch(self):
import pathlib
# JSON Data, Screenshots, Textfiles (history index and snapshots), HTML in the future etc
for item in pathlib.Path(str(self.watch_data_dir)).rglob("*.*"):
os.unlink(item)
# Force the attr to recalculate
bump = self.history
# Do this last because it will trigger a recheck due to last_checked being zero
self.update({
'browser_steps_last_error_step': None,
'check_count': 0,
'fetch_time': 0.0,
'has_ldjson_price_data': None,
'last_checked': 0,
'last_error': False,
'last_notification_error': False,
'last_viewed': 0,
'previous_md5': False,
'previous_md5_before_filters': False,
'remote_server_reply': None,
'track_ldjson_price_data': None
})
return
@property
def is_source_type_url(self):
return self.get('url', '').startswith('source:')
@property
def get_fetch_backend(self):
"""
@@ -142,7 +167,9 @@ class model(watch_base):
@property
def label(self):
# Used for sorting
return self.get('title') if self.get('title') else self.get('url')
if self['title']:
return self['title']
return self['url']
@property
def last_changed(self):
@@ -174,7 +201,7 @@ class model(watch_base):
# Read the history file as a dict
fname = os.path.join(self.watch_data_dir, "history.txt")
if os.path.isfile(fname):
logger.debug(f"Reading watch history index for {self.get('uuid')}")
logging.debug("Reading history index " + str(time.time()))
with open(fname, "r") as f:
for i in f.readlines():
if ',' in i:
@@ -196,8 +223,6 @@ class model(watch_base):
if len(tmp_history):
self.__newest_history_key = list(tmp_history.keys())[-1]
else:
self.__newest_history_key = None
self.__history_n = len(tmp_history)
@@ -208,21 +233,6 @@ class model(watch_base):
fname = os.path.join(self.watch_data_dir, "history.txt")
return os.path.isfile(fname)
@property
def has_browser_steps(self):
has_browser_steps = self.get('browser_steps') and list(filter(
lambda s: (s['operation'] and len(s['operation']) and s['operation'] != 'Choose one' and s['operation'] != 'Goto site'),
self.get('browser_steps')))
return has_browser_steps
@property
def has_restock_info(self):
if self.get('restock') and self['restock'].get('in_stock') != None:
return True
return False
# Returns the newest key, but if theres only 1 record, then it's counted as not being new, so return 0.
@property
def newest_history_key(self):
@@ -236,38 +246,6 @@ class model(watch_base):
bump = self.history
return self.__newest_history_key
# Given an arbitrary timestamp, find the closest next key
# For example, last_viewed = 1000 so it should return the next 1001 timestamp
#
# used for the [diff] button so it can preset a smarter from_version
@property
def get_next_snapshot_key_to_last_viewed(self):
"""Unfortunately for now timestamp is stored as string key"""
keys = list(self.history.keys())
if not keys:
return None
last_viewed = int(self.get('last_viewed'))
prev_k = keys[0]
sorted_keys = sorted(keys, key=lambda x: int(x))
sorted_keys.reverse()
# When the 'last viewed' timestamp is greater than the newest snapshot, return second last
if last_viewed > int(sorted_keys[0]):
return sorted_keys[1]
for k in sorted_keys:
if int(k) < last_viewed:
if prev_k == sorted_keys[0]:
# Return the second last one so we dont recommend the same version compares itself
return sorted_keys[1]
return prev_k
prev_k = k
return keys[0]
def get_history_snapshot(self, timestamp):
import brotli
filepath = self.history[timestamp]
@@ -295,10 +273,13 @@ class model(watch_base):
def save_history_text(self, contents, timestamp, snapshot_id):
import brotli
logger.trace(f"{self.get('uuid')} - Updating history.txt with timestamp {timestamp}")
self.ensure_data_dir_exists()
# Small hack so that we sleep just enough to allow 1 second between history snapshots
# this is because history.txt indexes/keys snapshots by epoch seconds and we dont want dupe keys
if self.__newest_history_key and int(timestamp) == int(self.__newest_history_key):
time.sleep(timestamp - self.__newest_history_key)
threshold = int(os.getenv('SNAPSHOT_BROTLI_COMPRESSION_THRESHOLD', 1024))
skip_brotli = strtobool(os.getenv('DISABLE_BROTLI_TEXT_SNAPSHOT', 'False'))
@@ -328,7 +309,6 @@ class model(watch_base):
# @todo bump static cache of the last timestamp so we dont need to examine the file to set a proper ''viewed'' status
return snapshot_fname
@property
@property
def has_empty_checktime(self):
# using all() + dictionary comprehension
@@ -432,17 +412,6 @@ class model(watch_base):
def toggle_mute(self):
self['notification_muted'] ^= True
def extra_notification_token_values(self):
# Used for providing extra tokens
# return {'widget': 555}
return {}
def extra_notification_token_placeholder_info(self):
# Used for providing extra tokens
# return [('widget', "Get widget amounts")]
return []
def extract_regex_from_all_history(self, regex):
import csv
import re
@@ -501,42 +470,8 @@ class model(watch_base):
# None is set
return False
def save_error_text(self, contents):
self.ensure_data_dir_exists()
target_path = os.path.join(self.watch_data_dir, "last-error.txt")
with open(target_path, 'w') as f:
f.write(contents)
def save_xpath_data(self, data, as_error=False):
import json
if as_error:
target_path = os.path.join(self.watch_data_dir, "elements-error.json")
else:
target_path = os.path.join(self.watch_data_dir, "elements.json")
self.ensure_data_dir_exists()
with open(target_path, 'w') as f:
f.write(json.dumps(data))
f.close()
# Save as PNG, PNG is larger but better for doing visual diff in the future
def save_screenshot(self, screenshot: bytes, as_error=False):
if as_error:
target_path = os.path.join(self.watch_data_dir, "last-error-screenshot.png")
else:
target_path = os.path.join(self.watch_data_dir, "last-screenshot.png")
self.ensure_data_dir_exists()
with open(target_path, 'wb') as f:
f.write(screenshot)
f.close()
def get_last_fetched_text_before_filters(self):
def get_last_fetched_before_filters(self):
import brotli
filepath = os.path.join(self.watch_data_dir, 'last-fetched.br')
@@ -551,62 +486,8 @@ class model(watch_base):
with open(filepath, 'rb') as f:
return(brotli.decompress(f.read()).decode('utf-8'))
def save_last_text_fetched_before_filters(self, contents):
def save_last_fetched_before_filters(self, contents):
import brotli
filepath = os.path.join(self.watch_data_dir, 'last-fetched.br')
with open(filepath, 'wb') as f:
f.write(brotli.compress(contents, mode=brotli.MODE_TEXT))
def save_last_fetched_html(self, timestamp, contents):
import brotli
self.ensure_data_dir_exists()
snapshot_fname = f"{timestamp}.html.br"
filepath = os.path.join(self.watch_data_dir, snapshot_fname)
with open(filepath, 'wb') as f:
contents = contents.encode('utf-8') if isinstance(contents, str) else contents
try:
f.write(brotli.compress(contents))
except Exception as e:
logger.warning(f"{self.get('uuid')} - Unable to compress snapshot, saving as raw data to {filepath}")
logger.warning(e)
f.write(contents)
self._prune_last_fetched_html_snapshots()
def get_fetched_html(self, timestamp):
import brotli
snapshot_fname = f"{timestamp}.html.br"
filepath = os.path.join(self.watch_data_dir, snapshot_fname)
if os.path.isfile(filepath):
with open(filepath, 'rb') as f:
return (brotli.decompress(f.read()).decode('utf-8'))
return False
def _prune_last_fetched_html_snapshots(self):
dates = list(self.history.keys())
dates.reverse()
for index, timestamp in enumerate(dates):
snapshot_fname = f"{timestamp}.html.br"
filepath = os.path.join(self.watch_data_dir, snapshot_fname)
# Keep only the first 2
if index > 1 and os.path.isfile(filepath):
os.remove(filepath)
@property
def get_browsersteps_available_screenshots(self):
"For knowing which screenshots are available to show the user in BrowserSteps UI"
available = []
for f in Path(self.watch_data_dir).glob('step_before-*.jpeg'):
step_n=re.search(r'step_before-(\d+)', f.name)
if step_n:
available.append(step_n.group(1))
return available

View File

@@ -1,73 +0,0 @@
import os
import uuid
from changedetectionio import strtobool
from changedetectionio.notification import default_notification_format_for_watch
class watch_base(dict):
def __init__(self, *arg, **kw):
self.update({
# Custom notification content
# Re #110, so then if this is set to None, we know to use the default value instead
# Requires setting to None on submit if it's the same as the default
# Should be all None by default, so we use the system default in this case.
'body': None,
'browser_steps': [],
'browser_steps_last_error_step': None,
'check_count': 0,
'check_unique_lines': False, # On change-detected, compare against all history if its something new
'consecutive_filter_failures': 0, # Every time the CSS/xPath filter cannot be located, reset when all is fine.
'date_created': None,
'extract_text': [], # Extract text by regex after filters
'extract_title_as_title': False,
'fetch_backend': 'system', # plaintext, playwright etc
'fetch_time': 0.0,
'filter_failure_notification_send': strtobool(os.getenv('FILTER_FAILURE_NOTIFICATION_SEND_DEFAULT', 'True')),
'filter_text_added': True,
'filter_text_removed': True,
'filter_text_replaced': True,
'follow_price_changes': True,
'has_ldjson_price_data': None,
'headers': {}, # Extra headers to send
'ignore_text': [], # List of text to ignore when calculating the comparison checksum
'in_stock_only': True, # Only trigger change on going to instock from out-of-stock
'include_filters': [],
'last_checked': 0,
'last_error': False,
'last_viewed': 0, # history key value of the last viewed via the [diff] link
'method': 'GET',
'notification_alert_count': 0,
'notification_body': None,
'notification_format': default_notification_format_for_watch,
'notification_muted': False,
'notification_screenshot': False, # Include the latest screenshot if available and supported by the apprise URL
'notification_title': None,
'notification_urls': [], # List of URLs to add to the notification Queue (Usually AppRise)
'paused': False,
'previous_md5': False,
'previous_md5_before_filters': False, # Used for skipping changedetection entirely
'processor': 'text_json_diff', # could be restock_diff or others from .processors
'price_change_threshold_percent': None,
'proxy': None, # Preferred proxy connection
'remote_server_reply': None, # From 'server' reply header
'sort_text_alphabetically': False,
'subtractive_selectors': [],
'tag': '', # Old system of text name for a tag, to be removed
'tags': [], # list of UUIDs to App.Tags
'text_should_not_be_present': [], # Text that should not present
'time_between_check': {'weeks': None, 'days': None, 'hours': None, 'minutes': None, 'seconds': None},
'time_between_check_use_default': True,
'title': None,
'track_ldjson_price_data': None,
'trigger_text': [], # List of text or regex to wait for until a change is detected
'url': '',
'uuid': str(uuid.uuid4()),
'webdriver_delay': None,
'webdriver_js_execute_code': None, # Run before change-detection
})
super(watch_base, self).__init__(*arg, **kw)
if self.get('default'):
del self['default']

View File

@@ -1,8 +1,7 @@
import apprise
import time
from jinja2 import Environment, BaseLoader
from apprise import NotifyFormat
import json
from loguru import logger
valid_tokens = {
'base_url': '',
@@ -10,7 +9,6 @@ valid_tokens = {
'diff': '',
'diff_added': '',
'diff_full': '',
'diff_patch': '',
'diff_removed': '',
'diff_url': '',
'preview_url': '',
@@ -47,9 +45,6 @@ from apprise.decorators import notify
@notify(on="puts")
def apprise_custom_api_call_wrapper(body, title, notify_type, *args, **kwargs):
import requests
from apprise.utils import parse_url as apprise_parse_url
from apprise import URLBase
url = kwargs['meta'].get('url')
if url.startswith('post'):
@@ -72,56 +67,27 @@ def apprise_custom_api_call_wrapper(body, title, notify_type, *args, **kwargs):
url = url.replace('delete://', 'http://')
url = url.replace('deletes://', 'https://')
headers = {}
params = {}
auth = None
# Convert /foobar?+some-header=hello to proper header dictionary
results = apprise_parse_url(url)
if results:
# Add our headers that the user can potentially over-ride if they wish
# to to our returned result set and tidy entries by unquoting them
headers = {URLBase.unquote(x): URLBase.unquote(y)
for x, y in results['qsd+'].items()}
# https://github.com/caronc/apprise/wiki/Notify_Custom_JSON#get-parameter-manipulation
# In Apprise, it relies on prefixing each request arg with "-", because it uses say &method=update as a flag for apprise
# but here we are making straight requests, so we need todo convert this against apprise's logic
for k, v in results['qsd'].items():
if not k.strip('+-') in results['qsd+'].keys():
params[URLBase.unquote(k)] = URLBase.unquote(v)
# Determine Authentication
auth = ''
if results.get('user') and results.get('password'):
auth = (URLBase.unquote(results.get('user')), URLBase.unquote(results.get('user')))
elif results.get('user'):
auth = (URLBase.unquote(results.get('user')))
# Try to auto-guess if it's JSON
headers = {}
try:
json.loads(body)
headers['Content-Type'] = 'application/json; charset=utf-8'
headers = {'Content-Type': 'application/json; charset=utf-8'}
except ValueError as e:
pass
r(results.get('url'),
auth=auth,
data=body.encode('utf-8') if type(body) is str else body,
headers=headers,
params=params
)
r(url, headers=headers, data=body)
def process_notification(n_object, datastore):
from .safe_jinja import render as jinja_render
now = time.time()
if n_object.get('notification_timestamp'):
logger.trace(f"Time since queued {now-n_object['notification_timestamp']:.3f}s")
# Insert variables into the notification content
notification_parameters = create_notification_parameters(n_object, datastore)
# Get the notification body from datastore
jinja2_env = Environment(loader=BaseLoader)
n_body = jinja2_env.from_string(n_object.get('notification_body', default_notification_body)).render(**notification_parameters)
n_title = jinja2_env.from_string(n_object.get('notification_title', default_notification_title)).render(**notification_parameters)
n_format = valid_notification_formats.get(
n_object.get('notification_format', default_notification_format),
valid_notification_formats[default_notification_format],
@@ -132,120 +98,103 @@ def process_notification(n_object, datastore):
# Initially text or whatever
n_format = datastore.data['settings']['application'].get('notification_format', valid_notification_formats[default_notification_format])
logger.trace(f"Complete notification body including Jinja and placeholders calculated in {time.time() - now:.3f}s")
# https://github.com/caronc/apprise/wiki/Development_LogCapture
# Anything higher than or equal to WARNING (which covers things like Connection errors)
# raise it as an exception
sent_objs = []
apobjs=[]
sent_objs=[]
from .apprise_asset import asset
apobj = apprise.Apprise(debug=True, asset=asset)
for url in n_object['notification_urls']:
url = jinja2_env.from_string(url).render(**notification_parameters)
apobj = apprise.Apprise(debug=True, asset=asset)
url = url.strip()
if len(url):
print(">> Process Notification: AppRise notifying {}".format(url))
with apprise.LogCapture(level=apprise.logging.DEBUG) as logs:
# Re 323 - Limit discord length to their 2000 char limit total or it wont send.
# Because different notifications may require different pre-processing, run each sequentially :(
# 2000 bytes minus -
# 200 bytes for the overhead of the _entire_ json payload, 200 bytes for {tts, wait, content} etc headers
# Length of URL - Incase they specify a longer custom avatar_url
if not n_object.get('notification_urls'):
return None
# So if no avatar_url is specified, add one so it can be correctly calculated into the total payload
k = '?' if not '?' in url else '&'
if not 'avatar_url' in url \
and not url.startswith('mail') \
and not url.startswith('post') \
and not url.startswith('get') \
and not url.startswith('delete') \
and not url.startswith('put'):
url += k + 'avatar_url=https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/changedetectionio/static/images/avatar-256x256.png'
with apprise.LogCapture(level=apprise.logging.DEBUG) as logs:
for url in n_object['notification_urls']:
if url.startswith('tgram://'):
# Telegram only supports a limit subset of HTML, remove the '<br>' we place in.
# re https://github.com/dgtlmoon/changedetection.io/issues/555
# @todo re-use an existing library we have already imported to strip all non-allowed tags
n_body = n_body.replace('<br>', '\n')
n_body = n_body.replace('</br>', '\n')
# real limit is 4096, but minus some for extra metadata
payload_max_size = 3600
body_limit = max(0, payload_max_size - len(n_title))
n_title = n_title[0:payload_max_size]
n_body = n_body[0:body_limit]
# Get the notification body from datastore
n_body = jinja_render(template_str=n_object.get('notification_body', ''), **notification_parameters)
n_title = jinja_render(template_str=n_object.get('notification_title', ''), **notification_parameters)
elif url.startswith('discord://') or url.startswith('https://discordapp.com/api/webhooks') or url.startswith('https://discord.com/api'):
# real limit is 2000, but minus some for extra metadata
payload_max_size = 1700
body_limit = max(0, payload_max_size - len(n_title))
n_title = n_title[0:payload_max_size]
n_body = n_body[0:body_limit]
url = url.strip()
if not url:
logger.warning(f"Process Notification: skipping empty notification URL.")
continue
elif url.startswith('mailto'):
# Apprise will default to HTML, so we need to override it
# So that whats' generated in n_body is in line with what is going to be sent.
# https://github.com/caronc/apprise/issues/633#issuecomment-1191449321
if not 'format=' in url and (n_format == 'Text' or n_format == 'Markdown'):
prefix = '?' if not '?' in url else '&'
# Apprise format is lowercase text https://github.com/caronc/apprise/issues/633
n_format = n_format.tolower()
url = "{}{}format={}".format(url, prefix, n_format)
# If n_format == HTML, then apprise email should default to text/html and we should be sending HTML only
logger.info(f">> Process Notification: AppRise notifying {url}")
url = jinja_render(template_str=url, **notification_parameters)
apobj.add(url)
# Re 323 - Limit discord length to their 2000 char limit total or it wont send.
# Because different notifications may require different pre-processing, run each sequentially :(
# 2000 bytes minus -
# 200 bytes for the overhead of the _entire_ json payload, 200 bytes for {tts, wait, content} etc headers
# Length of URL - Incase they specify a longer custom avatar_url
apobj.notify(
title=n_title,
body=n_body,
body_format=n_format,
# False is not an option for AppRise, must be type None
attach=n_object.get('screenshot', None)
)
# So if no avatar_url is specified, add one so it can be correctly calculated into the total payload
k = '?' if not '?' in url else '&'
if not 'avatar_url' in url \
and not url.startswith('mail') \
and not url.startswith('post') \
and not url.startswith('get') \
and not url.startswith('delete') \
and not url.startswith('put'):
url += k + 'avatar_url=https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/changedetectionio/static/images/avatar-256x256.png'
apobj.clear()
if url.startswith('tgram://'):
# Telegram only supports a limit subset of HTML, remove the '<br>' we place in.
# re https://github.com/dgtlmoon/changedetection.io/issues/555
# @todo re-use an existing library we have already imported to strip all non-allowed tags
n_body = n_body.replace('<br>', '\n')
n_body = n_body.replace('</br>', '\n')
# real limit is 4096, but minus some for extra metadata
payload_max_size = 3600
body_limit = max(0, payload_max_size - len(n_title))
n_title = n_title[0:payload_max_size]
n_body = n_body[0:body_limit]
# Incase it needs to exist in memory for a while after to process(?)
apobjs.append(apobj)
elif url.startswith('discord://') or url.startswith('https://discordapp.com/api/webhooks') or url.startswith(
'https://discord.com/api'):
# real limit is 2000, but minus some for extra metadata
payload_max_size = 1700
body_limit = max(0, payload_max_size - len(n_title))
n_title = n_title[0:payload_max_size]
n_body = n_body[0:body_limit]
elif url.startswith('mailto'):
# Apprise will default to HTML, so we need to override it
# So that whats' generated in n_body is in line with what is going to be sent.
# https://github.com/caronc/apprise/issues/633#issuecomment-1191449321
if not 'format=' in url and (n_format == 'Text' or n_format == 'Markdown'):
prefix = '?' if not '?' in url else '&'
# Apprise format is lowercase text https://github.com/caronc/apprise/issues/633
n_format = n_format.lower()
url = f"{url}{prefix}format={n_format}"
# If n_format == HTML, then apprise email should default to text/html and we should be sending HTML only
apobj.add(url)
sent_objs.append({'title': n_title,
'body': n_body,
'url': url,
'body_format': n_format})
# Blast off the notifications tht are set in .add()
apobj.notify(
title=n_title,
body=n_body,
body_format=n_format,
# False is not an option for AppRise, must be type None
attach=n_object.get('screenshot', None)
)
# Give apprise time to register an error
time.sleep(3)
# Returns empty string if nothing found, multi-line string otherwise
log_value = logs.getvalue()
if log_value and 'WARNING' in log_value or 'ERROR' in log_value:
logger.critical(log_value)
raise Exception(log_value)
# Returns empty string if nothing found, multi-line string otherwise
log_value = logs.getvalue()
if log_value and 'WARNING' in log_value or 'ERROR' in log_value:
raise Exception(log_value)
sent_objs.append({'title': n_title,
'body': n_body,
'url' : url,
'body_format': n_format})
# Return what was sent for better logging - after the for loop
return sent_objs
# Notification title + body content parameters get created here.
# ( Where we prepare the tokens in the notification to be replaced with actual values )
def create_notification_parameters(n_object, datastore):
from copy import deepcopy
# in the case we send a test notification from the main settings, there is no UUID.
uuid = n_object['uuid'] if 'uuid' in n_object else ''
if uuid:
if uuid != '':
watch_title = datastore.data['watching'][uuid].get('title', '')
tag_list = []
tags = datastore.get_all_tags_for_watch(uuid)
@@ -258,11 +207,15 @@ def create_notification_parameters(n_object, datastore):
watch_tag = ''
# Create URLs to customise the notification with
# active_base_url - set in store.py data property
base_url = datastore.data['settings']['application'].get('active_base_url')
base_url = datastore.data['settings']['application']['base_url']
watch_url = n_object['watch_url']
# Re #148 - Some people have just {{ base_url }} in the body or title, but this may break some notification services
# like 'Join', so it's always best to atleast set something obvious so that they are not broken.
if base_url == '':
base_url = "<base-url-env-var-not-set>"
diff_url = "{}/diff/{}".format(base_url, uuid)
preview_url = "{}/preview/{}".format(base_url, uuid)
@@ -272,19 +225,19 @@ def create_notification_parameters(n_object, datastore):
# Valid_tokens also used as a field validator
tokens.update(
{
'base_url': base_url,
'base_url': base_url if base_url is not None else '',
'current_snapshot': n_object['current_snapshot'] if 'current_snapshot' in n_object else '',
'diff': n_object.get('diff', ''), # Null default in the case we use a test
'diff_added': n_object.get('diff_added', ''), # Null default in the case we use a test
'diff_full': n_object.get('diff_full', ''), # Null default in the case we use a test
'diff_removed': n_object.get('diff_removed', ''), # Null default in the case we use a test
'diff_url': diff_url,
'preview_url': preview_url,
'triggered_text': n_object.get('triggered_text', ''),
'watch_tag': watch_tag if watch_tag is not None else '',
'watch_title': watch_title if watch_title is not None else '',
'watch_url': watch_url,
'watch_uuid': uuid,
})
# n_object will contain diff, diff_added etc etc
tokens.update(n_object)
if uuid:
tokens.update(datastore.data['watching'].get(uuid).extra_notification_token_values())
return tokens

View File

@@ -8,8 +8,4 @@ The concept here is to be able to switch between different domain specific probl
Some suggestions for the future
- `graphical`
## Todo
- Make each processor return a extra list of sub-processed (so you could configure a single processor in different ways)
- move restock_diff to its own pip/github repo
- `restock_and_price` - extract price AND stock text

View File

@@ -1,160 +1,15 @@
from abc import abstractmethod
from changedetectionio.strtobool import strtobool
from copy import deepcopy
from loguru import logger
import hashlib
import os
import re
import importlib
import pkgutil
import inspect
class difference_detection_processor():
browser_steps = None
datastore = None
fetcher = None
screenshot = None
watch = None
xpath_data = None
def __init__(self, *args, datastore, watch_uuid, **kwargs):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.datastore = datastore
self.watch = deepcopy(self.datastore.data['watching'].get(watch_uuid))
def call_browser(self):
from requests.structures import CaseInsensitiveDict
from changedetectionio.content_fetchers.exceptions import EmptyReply
# Protect against file:// access
if re.search(r'^file://', self.watch.get('url', '').strip(), re.IGNORECASE):
if not strtobool(os.getenv('ALLOW_FILE_URI', 'false')):
raise Exception(
"file:// type access is denied for security reasons."
)
url = self.watch.link
# Requests, playwright, other browser via wss:// etc, fetch_extra_something
prefer_fetch_backend = self.watch.get('fetch_backend', 'system')
# Proxy ID "key"
preferred_proxy_id = self.datastore.get_preferred_proxy_for_watch(uuid=self.watch.get('uuid'))
# Pluggable content self.fetcher
if not prefer_fetch_backend or prefer_fetch_backend == 'system':
prefer_fetch_backend = self.datastore.data['settings']['application'].get('fetch_backend')
# In the case that the preferred fetcher was a browser config with custom connection URL..
# @todo - on save watch, if its extra_browser_ then it should be obvious it will use playwright (like if its requests now..)
custom_browser_connection_url = None
if prefer_fetch_backend.startswith('extra_browser_'):
(t, key) = prefer_fetch_backend.split('extra_browser_')
connection = list(
filter(lambda s: (s['browser_name'] == key), self.datastore.data['settings']['requests'].get('extra_browsers', [])))
if connection:
prefer_fetch_backend = 'html_webdriver'
custom_browser_connection_url = connection[0].get('browser_connection_url')
# PDF should be html_requests because playwright will serve it up (so far) in a embedded page
# @todo https://github.com/dgtlmoon/changedetection.io/issues/2019
# @todo needs test to or a fix
if self.watch.is_pdf:
prefer_fetch_backend = "html_requests"
# Grab the right kind of 'fetcher', (playwright, requests, etc)
from changedetectionio import content_fetchers
if hasattr(content_fetchers, prefer_fetch_backend):
# @todo TEMPORARY HACK - SWITCH BACK TO PLAYWRIGHT FOR BROWSERSTEPS
if prefer_fetch_backend == 'html_webdriver' and self.watch.has_browser_steps:
# This is never supported in selenium anyway
logger.warning("Using playwright fetcher override for possible puppeteer request in browsersteps, because puppetteer:browser steps is incomplete.")
from changedetectionio.content_fetchers.playwright import fetcher as playwright_fetcher
fetcher_obj = playwright_fetcher
else:
fetcher_obj = getattr(content_fetchers, prefer_fetch_backend)
else:
# What it referenced doesnt exist, Just use a default
fetcher_obj = getattr(content_fetchers, "html_requests")
proxy_url = None
if preferred_proxy_id:
# Custom browser endpoints should NOT have a proxy added
if not prefer_fetch_backend.startswith('extra_browser_'):
proxy_url = self.datastore.proxy_list.get(preferred_proxy_id).get('url')
logger.debug(f"Selected proxy key '{preferred_proxy_id}' as proxy URL '{proxy_url}' for {url}")
else:
logger.debug(f"Skipping adding proxy data when custom Browser endpoint is specified. ")
# Now call the fetcher (playwright/requests/etc) with arguments that only a fetcher would need.
# When browser_connection_url is None, it method should default to working out whats the best defaults (os env vars etc)
self.fetcher = fetcher_obj(proxy_override=proxy_url,
custom_browser_connection_url=custom_browser_connection_url
)
if self.watch.has_browser_steps:
self.fetcher.browser_steps = self.watch.get('browser_steps', [])
self.fetcher.browser_steps_screenshot_path = os.path.join(self.datastore.datastore_path, self.watch.get('uuid'))
# Tweak the base config with the per-watch ones
request_headers = CaseInsensitiveDict()
ua = self.datastore.data['settings']['requests'].get('default_ua')
if ua and ua.get(prefer_fetch_backend):
request_headers.update({'User-Agent': ua.get(prefer_fetch_backend)})
request_headers.update(self.watch.get('headers', {}))
request_headers.update(self.datastore.get_all_base_headers())
request_headers.update(self.datastore.get_all_headers_in_textfile_for_watch(uuid=self.watch.get('uuid')))
# https://github.com/psf/requests/issues/4525
# Requests doesnt yet support brotli encoding, so don't put 'br' here, be totally sure that the user cannot
# do this by accident.
if 'Accept-Encoding' in request_headers and "br" in request_headers['Accept-Encoding']:
request_headers['Accept-Encoding'] = request_headers['Accept-Encoding'].replace(', br', '')
timeout = self.datastore.data['settings']['requests'].get('timeout')
request_body = self.watch.get('body')
request_method = self.watch.get('method')
ignore_status_codes = self.watch.get('ignore_status_codes', False)
# Configurable per-watch or global extra delay before extracting text (for webDriver types)
system_webdriver_delay = self.datastore.data['settings']['application'].get('webdriver_delay', None)
if self.watch.get('webdriver_delay'):
self.fetcher.render_extract_delay = self.watch.get('webdriver_delay')
elif system_webdriver_delay is not None:
self.fetcher.render_extract_delay = system_webdriver_delay
if self.watch.get('webdriver_js_execute_code') is not None and self.watch.get('webdriver_js_execute_code').strip():
self.fetcher.webdriver_js_execute_code = self.watch.get('webdriver_js_execute_code')
# Requests for PDF's, images etc should be passwd the is_binary flag
is_binary = self.watch.is_pdf
# And here we go! call the right browser with browser-specific settings
empty_pages_are_a_change = self.datastore.data['settings']['application'].get('empty_pages_are_a_change', False)
self.fetcher.run(url=url,
timeout=timeout,
request_headers=request_headers,
request_body=request_body,
request_method=request_method,
ignore_status_codes=ignore_status_codes,
current_include_filters=self.watch.get('include_filters'),
is_binary=is_binary,
empty_pages_are_a_change=empty_pages_are_a_change
)
#@todo .quit here could go on close object, so we can run JS if change-detected
self.fetcher.quit()
# After init, call run_changedetection() which will do the actual change-detection
@abstractmethod
def run_changedetection(self, watch, skip_when_checksum_same=True):
def run(self, uuid, skip_when_checksum_same=True, preferred_proxy=None):
update_obj = {'last_notification_error': False, 'last_error': False}
some_data = 'xxxxx'
update_obj["previous_md5"] = hashlib.md5(some_data.encode('utf-8')).hexdigest()
@@ -162,83 +17,8 @@ class difference_detection_processor():
return changed_detected, update_obj, ''.encode('utf-8')
def find_sub_packages(package_name):
"""
Find all sub-packages within the given package.
:param package_name: The name of the base package to scan for sub-packages.
:return: A list of sub-package names.
"""
package = importlib.import_module(package_name)
return [name for _, name, is_pkg in pkgutil.iter_modules(package.__path__) if is_pkg]
def find_processors():
"""
Find all subclasses of DifferenceDetectionProcessor in the specified package.
:param package_name: The name of the package to scan for processor modules.
:return: A list of (module, class) tuples.
"""
package_name = "changedetectionio.processors" # Name of the current package/module
processors = []
sub_packages = find_sub_packages(package_name)
for sub_package in sub_packages:
module_name = f"{package_name}.{sub_package}.processor"
try:
module = importlib.import_module(module_name)
# Iterate through all classes in the module
for name, obj in inspect.getmembers(module, inspect.isclass):
if issubclass(obj, difference_detection_processor) and obj is not difference_detection_processor:
processors.append((module, sub_package))
except (ModuleNotFoundError, ImportError) as e:
logger.warning(f"Failed to import module {module_name}: {e} (find_processors())")
return processors
def get_parent_module(module):
module_name = module.__name__
if '.' not in module_name:
return None # Top-level module has no parent
parent_module_name = module_name.rsplit('.', 1)[0]
try:
return importlib.import_module(parent_module_name)
except Exception as e:
pass
return False
def get_custom_watch_obj_for_processor(processor_name):
from changedetectionio.model import Watch
watch_class = Watch.model
processor_classes = find_processors()
custom_watch_obj = next((tpl for tpl in processor_classes if tpl[1] == processor_name), None)
if custom_watch_obj:
# Parent of .processor.py COULD have its own Watch implementation
parent_module = get_parent_module(custom_watch_obj[0])
if hasattr(parent_module, 'Watch'):
watch_class = parent_module.Watch
return watch_class
def available_processors():
"""
Get a list of processors by name and description for the UI elements
:return: A list :)
"""
processor_classes = find_processors()
available = []
for package, processor_class in processor_classes:
available.append((processor_class, package.name))
return available
from . import restock_diff, text_json_diff
x=[('text_json_diff', text_json_diff.name), ('restock_diff', restock_diff.name)]
# @todo Make this smarter with introspection of sorts.
return x

View File

@@ -1,10 +0,0 @@
class ProcessorException(Exception):
def __init__(self, message=None, status_code=None, url=None, screenshot=None, has_filters=False, html_content='', xpath_data=None):
self.message = message
self.status_code = status_code
self.url = url
self.screenshot = screenshot
self.has_filters = has_filters
self.html_content = html_content
self.xpath_data = xpath_data
return

View File

@@ -0,0 +1,131 @@
import hashlib
import os
import re
import urllib3
from . import difference_detection_processor
from changedetectionio import content_fetcher
from copy import deepcopy
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
name = 'Re-stock detection for single product pages'
description = 'Detects if the product goes back to in-stock'
class UnableToExtractRestockData(Exception):
def __init__(self, status_code):
# Set this so we can use it in other parts of the app
self.status_code = status_code
return
class perform_site_check(difference_detection_processor):
screenshot = None
xpath_data = None
def __init__(self, *args, datastore, **kwargs):
super().__init__(*args, **kwargs)
self.datastore = datastore
def run(self, uuid, skip_when_checksum_same=True):
# DeepCopy so we can be sure we don't accidently change anything by reference
watch = deepcopy(self.datastore.data['watching'].get(uuid))
if not watch:
raise Exception("Watch no longer exists.")
# Protect against file:// access
if re.search(r'^file', watch.get('url', ''), re.IGNORECASE) and not os.getenv('ALLOW_FILE_URI', False):
raise Exception(
"file:// type access is denied for security reasons."
)
# Unset any existing notification error
update_obj = {'last_notification_error': False, 'last_error': False}
request_headers = watch.get('headers', [])
request_headers.update(self.datastore.get_all_base_headers())
request_headers.update(self.datastore.get_all_headers_in_textfile_for_watch(uuid=uuid))
# https://github.com/psf/requests/issues/4525
# Requests doesnt yet support brotli encoding, so don't put 'br' here, be totally sure that the user cannot
# do this by accident.
if 'Accept-Encoding' in request_headers and "br" in request_headers['Accept-Encoding']:
request_headers['Accept-Encoding'] = request_headers['Accept-Encoding'].replace(', br', '')
timeout = self.datastore.data['settings']['requests'].get('timeout')
url = watch.link
request_body = self.datastore.data['watching'][uuid].get('body')
request_method = self.datastore.data['watching'][uuid].get('method')
ignore_status_codes = self.datastore.data['watching'][uuid].get('ignore_status_codes', False)
# Pluggable content fetcher
prefer_backend = watch.get_fetch_backend
if not prefer_backend or prefer_backend == 'system':
prefer_backend = self.datastore.data['settings']['application']['fetch_backend']
if hasattr(content_fetcher, prefer_backend):
klass = getattr(content_fetcher, prefer_backend)
else:
# If the klass doesnt exist, just use a default
klass = getattr(content_fetcher, "html_requests")
proxy_id = self.datastore.get_preferred_proxy_for_watch(uuid=uuid)
proxy_url = None
if proxy_id:
proxy_url = self.datastore.proxy_list.get(proxy_id).get('url')
print("UUID {} Using proxy {}".format(uuid, proxy_url))
fetcher = klass(proxy_override=proxy_url)
# Configurable per-watch or global extra delay before extracting text (for webDriver types)
system_webdriver_delay = self.datastore.data['settings']['application'].get('webdriver_delay', None)
if watch['webdriver_delay'] is not None:
fetcher.render_extract_delay = watch.get('webdriver_delay')
elif system_webdriver_delay is not None:
fetcher.render_extract_delay = system_webdriver_delay
# Could be removed if requests/plaintext could also return some info?
if prefer_backend != 'html_webdriver':
raise Exception("Re-stock detection requires Chrome or compatible webdriver/playwright fetcher to work")
if watch.get('webdriver_js_execute_code') is not None and watch.get('webdriver_js_execute_code').strip():
fetcher.webdriver_js_execute_code = watch.get('webdriver_js_execute_code')
fetcher.run(url, timeout, request_headers, request_body, request_method, ignore_status_codes, watch.get('include_filters'))
fetcher.quit()
self.screenshot = fetcher.screenshot
self.xpath_data = fetcher.xpath_data
# Track the content type
update_obj['content_type'] = fetcher.headers.get('Content-Type', '')
update_obj["last_check_status"] = fetcher.get_last_status_code()
# Main detection method
fetched_md5 = None
if fetcher.instock_data:
fetched_md5 = hashlib.md5(fetcher.instock_data.encode('utf-8')).hexdigest()
# 'Possibly in stock' comes from stock-not-in-stock.js when no string found above the fold.
update_obj["in_stock"] = True if fetcher.instock_data == 'Possibly in stock' else False
else:
raise UnableToExtractRestockData(status_code=fetcher.status_code)
# The main thing that all this at the moment comes down to :)
changed_detected = False
if watch.get('previous_md5') and watch.get('previous_md5') != fetched_md5:
# Yes if we only care about it going to instock, AND we are in stock
if watch.get('in_stock_only') and update_obj["in_stock"]:
changed_detected = True
if not watch.get('in_stock_only'):
# All cases
changed_detected = True
# Always record the new checksum
update_obj["previous_md5"] = fetched_md5
return changed_detected, update_obj, fetcher.instock_data.encode('utf-8')

View File

@@ -1,80 +0,0 @@
from changedetectionio.model.Watch import model as BaseWatch
import re
from babel.numbers import parse_decimal
class Restock(dict):
def parse_currency(self, raw_value: str) -> float:
# Clean and standardize the value (ie 1,400.00 should be 1400.00), even better would be store the whole thing as an integer.
standardized_value = raw_value
if ',' in standardized_value and '.' in standardized_value:
# Identify the correct decimal separator
if standardized_value.rfind('.') > standardized_value.rfind(','):
standardized_value = standardized_value.replace(',', '')
else:
standardized_value = standardized_value.replace('.', '').replace(',', '.')
else:
standardized_value = standardized_value.replace(',', '.')
# Remove any non-numeric characters except for the decimal point
standardized_value = re.sub(r'[^\d.-]', '', standardized_value)
# Convert to float
return float(parse_decimal(standardized_value, locale='en'))
def __init__(self, *args, **kwargs):
# Define default values
default_values = {
'in_stock': None,
'price': None,
'currency': None,
'original_price': None
}
# Initialize the dictionary with default values
super().__init__(default_values)
# Update with any provided positional arguments (dictionaries)
if args:
if len(args) == 1 and isinstance(args[0], dict):
self.update(args[0])
else:
raise ValueError("Only one positional argument of type 'dict' is allowed")
def __setitem__(self, key, value):
# Custom logic to handle setting price and original_price
if key == 'price' or key == 'original_price':
if isinstance(value, str):
value = self.parse_currency(raw_value=value)
super().__setitem__(key, value)
class Watch(BaseWatch):
def __init__(self, *arg, **kw):
super().__init__(*arg, **kw)
self['restock'] = Restock(kw['default']['restock']) if kw.get('default') and kw['default'].get('restock') else Restock()
self['restock_settings'] = kw['default']['restock_settings'] if kw.get('default',{}).get('restock_settings') else {
'follow_price_changes': True,
'in_stock_processing' : 'in_stock_only'
} #@todo update
def clear_watch(self):
super().clear_watch()
self.update({'restock': Restock()})
def extra_notification_token_values(self):
values = super().extra_notification_token_values()
values['restock'] = self.get('restock', {})
return values
def extra_notification_token_placeholder_info(self):
values = super().extra_notification_token_placeholder_info()
values.append(('restock.price', "Price detected"))
values.append(('restock.original_price', "Original price at first check"))
return values

View File

@@ -1,81 +0,0 @@
from wtforms import (
BooleanField,
validators,
FloatField
)
from wtforms.fields.choices import RadioField
from wtforms.fields.form import FormField
from wtforms.form import Form
from changedetectionio.forms import processor_text_json_diff_form
class RestockSettingsForm(Form):
in_stock_processing = RadioField(label='Re-stock detection', choices=[
('in_stock_only', "In Stock only (Out Of Stock -> In Stock only)"),
('all_changes', "Any availability changes"),
('off', "Off, don't follow availability/restock"),
], default="in_stock_only")
price_change_min = FloatField('Below price to trigger notification', [validators.Optional()],
render_kw={"placeholder": "No limit", "size": "10"})
price_change_max = FloatField('Above price to trigger notification', [validators.Optional()],
render_kw={"placeholder": "No limit", "size": "10"})
price_change_threshold_percent = FloatField('Threshold in % for price changes since the original price', validators=[
validators.Optional(),
validators.NumberRange(min=0, max=100, message="Should be between 0 and 100"),
], render_kw={"placeholder": "0%", "size": "5"})
follow_price_changes = BooleanField('Follow price changes', default=True)
class processor_settings_form(processor_text_json_diff_form):
restock_settings = FormField(RestockSettingsForm)
def extra_tab_content(self):
return 'Restock & Price Detection'
def extra_form_content(self):
output = ""
if getattr(self, 'watch', None) and getattr(self, 'datastore'):
for tag_uuid in self.watch.get('tags'):
tag = self.datastore.data['settings']['application']['tags'].get(tag_uuid, {})
if tag.get('overrides_watch'):
# @todo - Quick and dirty, cant access 'url_for' here because its out of scope somehow
output = f"""<p><strong>Note! A Group tag overrides the restock and price detection here.</strong></p><style>#restock-fieldset-price-group {{ opacity: 0.6; }}</style>"""
output += """
{% from '_helpers.html' import render_field, render_checkbox_field, render_button %}
<script>
$(document).ready(function () {
toggleOpacity('#restock_settings-follow_price_changes', '.price-change-minmax', true);
});
</script>
<fieldset id="restock-fieldset-price-group">
<div class="pure-control-group">
<fieldset class="pure-group inline-radio">
{{ render_field(form.restock_settings.in_stock_processing) }}
</fieldset>
<fieldset class="pure-group">
{{ render_checkbox_field(form.restock_settings.follow_price_changes) }}
<span class="pure-form-message-inline">Changes in price should trigger a notification</span>
</fieldset>
<fieldset class="pure-group price-change-minmax">
{{ render_field(form.restock_settings.price_change_min, placeholder=watch.get('restock', {}).get('price')) }}
<span class="pure-form-message-inline">Minimum amount, Trigger a change/notification when the price drops <i>below</i> this value.</span>
</fieldset>
<fieldset class="pure-group price-change-minmax">
{{ render_field(form.restock_settings.price_change_max, placeholder=watch.get('restock', {}).get('price')) }}
<span class="pure-form-message-inline">Maximum amount, Trigger a change/notification when the price rises <i>above</i> this value.</span>
</fieldset>
<fieldset class="pure-group price-change-minmax">
{{ render_field(form.restock_settings.price_change_threshold_percent) }}
<span class="pure-form-message-inline">Price must change more than this % to trigger a change since the first check.</span><br>
<span class="pure-form-message-inline">For example, If the product is $1,000 USD originally, <strong>2%</strong> would mean it has to change more than $20 since the first check.</span><br>
</fieldset>
</div>
</fieldset>
"""
return output

View File

@@ -1,263 +0,0 @@
from .. import difference_detection_processor
from ..exceptions import ProcessorException
from . import Restock
from loguru import logger
import hashlib
import re
import urllib3
import time
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
name = 'Re-stock & Price detection for single product pages'
description = 'Detects if the product goes back to in-stock'
class UnableToExtractRestockData(Exception):
def __init__(self, status_code):
# Set this so we can use it in other parts of the app
self.status_code = status_code
return
class MoreThanOnePriceFound(Exception):
def __init__(self):
return
def _search_prop_by_value(matches, value):
for properties in matches:
for prop in properties:
if value in prop[0]:
return prop[1] # Yield the desired value and exit the function
# should return Restock()
# add casting?
def get_itemprop_availability(html_content) -> Restock:
"""
Kind of funny/cool way to find price/availability in one many different possibilities.
Use 'extruct' to find any possible RDFa/microdata/json-ld data, make a JSON string from the output then search it.
"""
from jsonpath_ng import parse
now = time.time()
import extruct
logger.trace(f"Imported extruct module in {time.time() - now:.3f}s")
value = {}
now = time.time()
# Extruct is very slow, I'm wondering if some ML is going to be faster (800ms on my i7), 'rdfa' seems to be the heaviest.
syntaxes = ['dublincore', 'json-ld', 'microdata', 'microformat', 'opengraph']
data = extruct.extract(html_content, syntaxes=syntaxes)
logger.trace(f"Extruct basic extract of all metadata done in {time.time() - now:.3f}s")
# First phase, dead simple scanning of anything that looks useful
value = Restock()
if data:
logger.debug(f"Using jsonpath to find price/availability/etc")
price_parse = parse('$..(price|Price)')
pricecurrency_parse = parse('$..(pricecurrency|currency|priceCurrency )')
availability_parse = parse('$..(availability|Availability)')
price_result = price_parse.find(data)
if price_result:
# Right now, we just support single product items, maybe we will store the whole actual metadata seperately in teh future and
# parse that for the UI?
prices_found = set(str(item.value).replace('$', '') for item in price_result)
if len(price_result) > 1 and len(prices_found) > 1:
# See of all prices are different, in the case that one product has many embedded data types with the same price
# One might have $121.95 and another 121.95 etc
logger.warning(f"More than one price found {prices_found}, throwing exception, cant use this plugin.")
raise MoreThanOnePriceFound()
value['price'] = price_result[0].value
pricecurrency_result = pricecurrency_parse.find(data)
if pricecurrency_result:
value['currency'] = pricecurrency_result[0].value
availability_result = availability_parse.find(data)
if availability_result:
value['availability'] = availability_result[0].value
if value.get('availability'):
value['availability'] = re.sub(r'(?i)^(https|http)://schema.org/', '',
value.get('availability').strip(' "\'').lower()) if value.get('availability') else None
# Second, go dig OpenGraph which is something that jsonpath_ng cant do because of the tuples and double-dots (:)
if not value.get('price') or value.get('availability'):
logger.debug(f"Alternatively digging through OpenGraph properties for restock/price info..")
jsonpath_expr = parse('$..properties')
for match in jsonpath_expr.find(data):
if not value.get('price'):
value['price'] = _search_prop_by_value([match.value], "price:amount")
if not value.get('availability'):
value['availability'] = _search_prop_by_value([match.value], "product:availability")
if not value.get('currency'):
value['currency'] = _search_prop_by_value([match.value], "price:currency")
logger.trace(f"Processed with Extruct in {time.time()-now:.3f}s")
return value
def is_between(number, lower=None, upper=None):
"""
Check if a number is between two values.
Parameters:
number (float): The number to check.
lower (float or None): The lower bound (inclusive). If None, no lower bound.
upper (float or None): The upper bound (inclusive). If None, no upper bound.
Returns:
bool: True if the number is between the lower and upper bounds, False otherwise.
"""
return (lower is None or lower <= number) and (upper is None or number <= upper)
class perform_site_check(difference_detection_processor):
screenshot = None
xpath_data = None
def run_changedetection(self, watch, skip_when_checksum_same=True):
if not watch:
raise Exception("Watch no longer exists.")
# Unset any existing notification error
update_obj = {'last_notification_error': False, 'last_error': False, 'restock': Restock()}
self.screenshot = self.fetcher.screenshot
self.xpath_data = self.fetcher.xpath_data
# Track the content type
update_obj['content_type'] = self.fetcher.headers.get('Content-Type', '')
update_obj["last_check_status"] = self.fetcher.get_last_status_code()
# Which restock settings to compare against?
restock_settings = watch.get('restock_settings', {})
# See if any tags have 'activate for individual watches in this tag/group?' enabled and use the first we find
for tag_uuid in watch.get('tags'):
tag = self.datastore.data['settings']['application']['tags'].get(tag_uuid, {})
if tag.get('overrides_watch'):
restock_settings = tag.get('restock_settings', {})
logger.info(f"Watch {watch.get('uuid')} - Tag '{tag.get('title')}' selected for restock settings override")
break
itemprop_availability = {}
try:
itemprop_availability = get_itemprop_availability(html_content=self.fetcher.content)
except MoreThanOnePriceFound as e:
# Add the real data
raise ProcessorException(message="Cannot run, more than one price detected, this plugin is only for product pages with ONE product, try the content-change detection mode.",
url=watch.get('url'),
status_code=self.fetcher.get_last_status_code(),
screenshot=self.fetcher.screenshot,
xpath_data=self.fetcher.xpath_data
)
# Something valid in get_itemprop_availability() by scraping metadata ?
if itemprop_availability.get('price') or itemprop_availability.get('availability'):
# Store for other usage
update_obj['restock'] = itemprop_availability
if itemprop_availability.get('availability'):
# @todo: Configurable?
if any(substring.lower() in itemprop_availability['availability'].lower() for substring in [
'instock',
'instoreonly',
'limitedavailability',
'onlineonly',
'presale']
):
update_obj['restock']['in_stock'] = True
else:
update_obj['restock']['in_stock'] = False
# Main detection method
fetched_md5 = None
# store original price if not set
if itemprop_availability and itemprop_availability.get('price') and not itemprop_availability.get('original_price'):
itemprop_availability['original_price'] = itemprop_availability.get('price')
update_obj['restock']["original_price"] = itemprop_availability.get('price')
if not self.fetcher.instock_data and not itemprop_availability.get('availability'):
raise ProcessorException(
message=f"Unable to extract restock data for this page unfortunately. (Got code {self.fetcher.get_last_status_code()} from server), no embedded stock information was found and nothing interesting in the text, try using this watch with Chrome.",
url=watch.get('url'),
status_code=self.fetcher.get_last_status_code(),
screenshot=self.fetcher.screenshot,
xpath_data=self.fetcher.xpath_data
)
# Nothing automatic in microdata found, revert to scraping the page
if self.fetcher.instock_data and itemprop_availability.get('availability') is None:
# 'Possibly in stock' comes from stock-not-in-stock.js when no string found above the fold.
# Careful! this does not really come from chrome/js when the watch is set to plaintext
update_obj['restock']["in_stock"] = True if self.fetcher.instock_data == 'Possibly in stock' else False
logger.debug(f"Watch UUID {watch.get('uuid')} restock check returned '{self.fetcher.instock_data}' from JS scraper.")
# What we store in the snapshot
price = update_obj.get('restock').get('price') if update_obj.get('restock').get('price') else ""
snapshot_content = f"In Stock: {update_obj.get('restock').get('in_stock')} - Price: {price}"
# Main detection method
fetched_md5 = hashlib.md5(snapshot_content.encode('utf-8')).hexdigest()
# The main thing that all this at the moment comes down to :)
changed_detected = False
logger.debug(f"Watch UUID {watch.get('uuid')} restock check - Previous MD5: {watch.get('previous_md5')}, Fetched MD5 {fetched_md5}")
# out of stock -> back in stock only?
if watch.get('restock') and watch['restock'].get('in_stock') != update_obj['restock'].get('in_stock'):
# Yes if we only care about it going to instock, AND we are in stock
if restock_settings.get('in_stock_processing') == 'in_stock_only' and update_obj['restock']['in_stock']:
changed_detected = True
if restock_settings.get('in_stock_processing') == 'all_changes':
# All cases
changed_detected = True
if restock_settings.get('follow_price_changes') and watch.get('restock') and update_obj.get('restock') and update_obj['restock'].get('price'):
price = float(update_obj['restock'].get('price'))
# Default to current price if no previous price found
if watch['restock'].get('original_price'):
previous_price = float(watch['restock'].get('original_price'))
# It was different, but negate it further down
if price != previous_price:
changed_detected = True
# Minimum/maximum price limit
if update_obj.get('restock') and update_obj['restock'].get('price'):
logger.debug(
f"{watch.get('uuid')} - Change was detected, 'price_change_max' is '{restock_settings.get('price_change_max', '')}' 'price_change_min' is '{restock_settings.get('price_change_min', '')}', price from website is '{update_obj['restock'].get('price', '')}'.")
if update_obj['restock'].get('price'):
min_limit = float(restock_settings.get('price_change_min')) if restock_settings.get('price_change_min') else None
max_limit = float(restock_settings.get('price_change_max')) if restock_settings.get('price_change_max') else None
price = float(update_obj['restock'].get('price'))
logger.debug(f"{watch.get('uuid')} after float conversion - Min limit: '{min_limit}' Max limit: '{max_limit}' Price: '{price}'")
if min_limit or max_limit:
if is_between(number=price, lower=min_limit, upper=max_limit):
# Price was between min/max limit, so there was nothing todo in any case
logger.trace(f"{watch.get('uuid')} {price} is between {min_limit} and {max_limit}, nothing to check, forcing changed_detected = False (was {changed_detected})")
changed_detected = False
else:
logger.trace(f"{watch.get('uuid')} {price} is between {min_limit} and {max_limit}, continuing normal comparison")
# Price comparison by %
if watch['restock'].get('original_price') and changed_detected and restock_settings.get('price_change_threshold_percent'):
previous_price = float(watch['restock'].get('original_price'))
pc = float(restock_settings.get('price_change_threshold_percent'))
change = abs((price - previous_price) / previous_price * 100)
if change and change <= pc:
logger.debug(f"{watch.get('uuid')} Override change-detected to FALSE because % threshold ({pc}%) was {change:.3f}%")
changed_detected = False
else:
logger.debug(f"{watch.get('uuid')} Price change was {change:.3f}% , (threshold {pc}%)")
# Always record the new checksum
update_obj["previous_md5"] = fetched_md5
return changed_detected, update_obj, snapshot_content.encode('utf-8').strip()

View File

@@ -1,31 +1,27 @@
# HTML to TEXT/JSON DIFFERENCE self.fetcher
# HTML to TEXT/JSON DIFFERENCE FETCHER
import hashlib
import json
import logging
import os
import re
import urllib3
from changedetectionio.processors import difference_detection_processor
from changedetectionio.html_tools import PERL_STYLE_REGEX, cdata_in_document_to_text
from changedetectionio import html_tools, content_fetchers
from changedetectionio import content_fetcher, html_tools
from changedetectionio.blueprint.price_data_follower import PRICE_DATA_TRACK_ACCEPT, PRICE_DATA_TRACK_REJECT
from loguru import logger
from copy import deepcopy
from . import difference_detection_processor
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
name = 'Webpage Text/HTML, JSON and PDF changes'
name = 'Webpage Text/HTML, JSON and PDF changes'
description = 'Detects all text changes where possible'
json_filter_prefixes = ['json:', 'jq:', 'jqraw:']
class FilterNotFoundInResponse(ValueError):
def __init__(self, msg, screenshot=None, xpath_data=None):
self.screenshot = screenshot
self.xpath_data = xpath_data
def __init__(self, msg):
ValueError.__init__(self, msg)
class PDFToHTMLToolNotFound(ValueError):
def __init__(self, msg):
ValueError.__init__(self, msg)
@@ -34,35 +30,130 @@ class PDFToHTMLToolNotFound(ValueError):
# Some common stuff here that can be moved to a base class
# (set_proxy_from_list)
class perform_site_check(difference_detection_processor):
screenshot = None
xpath_data = None
def run_changedetection(self, watch, skip_when_checksum_same=True):
def __init__(self, *args, datastore, **kwargs):
super().__init__(*args, **kwargs)
self.datastore = datastore
# Doesn't look like python supports forward slash auto enclosure in re.findall
# So convert it to inline flag "foobar(?i)" type configuration
def forward_slash_enclosed_regex_to_options(self, regex):
res = re.search(r'^/(.*?)/(\w+)$', regex, re.IGNORECASE)
if res:
regex = res.group(1)
regex += '(?{})'.format(res.group(2))
else:
regex += '(?{})'.format('i')
return regex
def run(self, uuid, skip_when_checksum_same=True, preferred_proxy=None):
changed_detected = False
html_content = ""
screenshot = False # as bytes
stripped_text_from_html = ""
# DeepCopy so we can be sure we don't accidently change anything by reference
watch = deepcopy(self.datastore.data['watching'].get(uuid))
if not watch:
raise Exception("Watch no longer exists.")
# Protect against file:// access
if re.search(r'^file', watch.get('url', ''), re.IGNORECASE) and not os.getenv('ALLOW_FILE_URI', False):
raise Exception(
"file:// type access is denied for security reasons."
)
# Unset any existing notification error
update_obj = {'last_notification_error': False, 'last_error': False}
# Tweak the base config with the per-watch ones
request_headers = watch.get('headers', [])
request_headers.update(self.datastore.get_all_base_headers())
request_headers.update(self.datastore.get_all_headers_in_textfile_for_watch(uuid=uuid))
# https://github.com/psf/requests/issues/4525
# Requests doesnt yet support brotli encoding, so don't put 'br' here, be totally sure that the user cannot
# do this by accident.
if 'Accept-Encoding' in request_headers and "br" in request_headers['Accept-Encoding']:
request_headers['Accept-Encoding'] = request_headers['Accept-Encoding'].replace(', br', '')
timeout = self.datastore.data['settings']['requests'].get('timeout')
url = watch.link
self.screenshot = self.fetcher.screenshot
self.xpath_data = self.fetcher.xpath_data
request_body = self.datastore.data['watching'][uuid].get('body')
request_method = self.datastore.data['watching'][uuid].get('method')
ignore_status_codes = self.datastore.data['watching'][uuid].get('ignore_status_codes', False)
# source: support
is_source = False
if url.startswith('source:'):
url = url.replace('source:', '')
is_source = True
# Pluggable content fetcher
prefer_backend = watch.get_fetch_backend
if not prefer_backend or prefer_backend == 'system':
prefer_backend = self.datastore.data['settings']['application']['fetch_backend']
if hasattr(content_fetcher, prefer_backend):
klass = getattr(content_fetcher, prefer_backend)
else:
# If the klass doesnt exist, just use a default
klass = getattr(content_fetcher, "html_requests")
if preferred_proxy:
proxy_id = preferred_proxy
else:
proxy_id = self.datastore.get_preferred_proxy_for_watch(uuid=uuid)
proxy_url = None
if proxy_id:
proxy_url = self.datastore.proxy_list.get(proxy_id).get('url')
print("UUID {} Using proxy {}".format(uuid, proxy_url))
fetcher = klass(proxy_override=proxy_url)
# Configurable per-watch or global extra delay before extracting text (for webDriver types)
system_webdriver_delay = self.datastore.data['settings']['application'].get('webdriver_delay', None)
if watch['webdriver_delay'] is not None:
fetcher.render_extract_delay = watch.get('webdriver_delay')
elif system_webdriver_delay is not None:
fetcher.render_extract_delay = system_webdriver_delay
# Possible conflict
if prefer_backend == 'html_webdriver':
fetcher.browser_steps = watch.get('browser_steps', None)
fetcher.browser_steps_screenshot_path = os.path.join(self.datastore.datastore_path, uuid)
if watch.get('webdriver_js_execute_code') is not None and watch.get('webdriver_js_execute_code').strip():
fetcher.webdriver_js_execute_code = watch.get('webdriver_js_execute_code')
# requests for PDF's, images etc should be passwd the is_binary flag
is_binary = watch.is_pdf
fetcher.run(url, timeout, request_headers, request_body, request_method, ignore_status_codes, watch.get('include_filters'), is_binary=is_binary)
fetcher.quit()
self.screenshot = fetcher.screenshot
self.xpath_data = fetcher.xpath_data
# Track the content type
update_obj['content_type'] = self.fetcher.get_all_headers().get('content-type', '').lower()
update_obj['content_type'] = fetcher.get_all_headers().get('content-type', '').lower()
# Watches added automatically in the queue manager will skip if its the same checksum as the previous run
# Saves a lot of CPU
update_obj['previous_md5_before_filters'] = hashlib.md5(self.fetcher.content.encode('utf-8')).hexdigest()
update_obj['previous_md5_before_filters'] = hashlib.md5(fetcher.content.encode('utf-8')).hexdigest()
if skip_when_checksum_same:
if update_obj['previous_md5_before_filters'] == watch.get('previous_md5_before_filters'):
raise content_fetchers.exceptions.checksumFromPreviousCheckWasTheSame()
raise content_fetcher.checksumFromPreviousCheckWasTheSame()
# Fetching complete, now filters
# @todo move to class / maybe inside of fetcher abstract base?
# @note: I feel like the following should be in a more obvious chain system
# - Check filter text
@@ -71,24 +162,15 @@ class perform_site_check(difference_detection_processor):
# https://stackoverflow.com/questions/41817578/basic-method-chaining ?
# return content().textfilter().jsonextract().checksumcompare() ?
is_json = 'application/json' in self.fetcher.get_all_headers().get('content-type', '').lower()
is_json = 'application/json' in fetcher.get_all_headers().get('content-type', '').lower()
is_html = not is_json
is_rss = False
ctype_header = self.fetcher.get_all_headers().get('content-type', '').lower()
# Go into RSS preprocess for converting CDATA/comment to usable text
if any(substring in ctype_header for substring in ['application/xml', 'application/rss', 'text/xml']):
if '<rss' in self.fetcher.content[:100].lower():
self.fetcher.content = cdata_in_document_to_text(html_content=self.fetcher.content)
is_rss = True
# source: support, basically treat it as plaintext
if watch.is_source_type_url:
if is_source:
is_html = False
is_json = False
inline_pdf = self.fetcher.get_all_headers().get('content-disposition', '') and '%PDF-1' in self.fetcher.content[:10]
if watch.is_pdf or 'application/pdf' in self.fetcher.get_all_headers().get('content-type', '').lower() or inline_pdf:
if watch.is_pdf or 'application/pdf' in fetcher.get_all_headers().get('content-type', '').lower():
from shutil import which
tool = os.getenv("PDF_TO_HTML_TOOL", "pdftohtml")
if not which(tool):
@@ -99,35 +181,33 @@ class perform_site_check(difference_detection_processor):
[tool, '-stdout', '-', '-s', 'out.pdf', '-i'],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE)
proc.stdin.write(self.fetcher.raw_content)
proc.stdin.write(fetcher.raw_content)
proc.stdin.close()
self.fetcher.content = proc.stdout.read().decode('utf-8')
fetcher.content = proc.stdout.read().decode('utf-8')
proc.wait(timeout=60)
# Add a little metadata so we know if the file changes (like if an image changes, but the text is the same
# @todo may cause problems with non-UTF8?
metadata = "<p>Added by changedetection.io: Document checksum - {} Filesize - {} bytes</p>".format(
hashlib.md5(self.fetcher.raw_content).hexdigest().upper(),
len(self.fetcher.content))
hashlib.md5(fetcher.raw_content).hexdigest().upper(),
len(fetcher.content))
self.fetcher.content = self.fetcher.content.replace('</body>', metadata + '</body>')
fetcher.content = fetcher.content.replace('</body>', metadata + '</body>')
# Better would be if Watch.model could access the global data also
# and then use getattr https://docs.python.org/3/reference/datamodel.html#object.__getitem__
# https://realpython.com/inherit-python-dict/ instead of doing it procedurely
include_filters_from_tags = self.datastore.get_tag_overrides_for_watch(uuid=watch.get('uuid'), attr='include_filters')
include_filters_from_tags = self.datastore.get_tag_overrides_for_watch(uuid=uuid, attr='include_filters')
include_filters_rule = [*watch.get('include_filters', []), *include_filters_from_tags]
# 1845 - remove duplicated filters in both group and watch include filter
include_filters_rule = list(dict.fromkeys(watch.get('include_filters', []) + include_filters_from_tags))
subtractive_selectors = [*self.datastore.get_tag_overrides_for_watch(uuid=watch.get('uuid'), attr='subtractive_selectors'),
subtractive_selectors = [*self.datastore.get_tag_overrides_for_watch(uuid=uuid, attr='subtractive_selectors'),
*watch.get("subtractive_selectors", []),
*self.datastore.data["settings"]["application"].get("global_subtractive_selectors", [])
]
# Inject a virtual LD+JSON price tracker rule
if watch.get('track_ldjson_price_data', '') == PRICE_DATA_TRACK_ACCEPT:
include_filters_rule += html_tools.LD_JSON_PRODUCT_OFFER_SELECTORS
include_filters_rule.append(html_tools.LD_JSON_PRODUCT_OFFER_SELECTOR)
has_filter_rule = len(include_filters_rule) and len(include_filters_rule[0].strip())
has_subtractive_selectors = len(subtractive_selectors) and len(subtractive_selectors[0].strip())
@@ -139,30 +219,33 @@ class perform_site_check(difference_detection_processor):
if is_json:
# Sort the JSON so we dont get false alerts when the content is just re-ordered
try:
self.fetcher.content = json.dumps(json.loads(self.fetcher.content), sort_keys=True)
fetcher.content = json.dumps(json.loads(fetcher.content), sort_keys=True)
except Exception as e:
# Might have just been a snippet, or otherwise bad JSON, continue
pass
if has_filter_rule:
json_filter_prefixes = ['json:', 'jq:']
for filter in include_filters_rule:
if any(prefix in filter for prefix in json_filter_prefixes):
stripped_text_from_html += html_tools.extract_json_as_string(content=self.fetcher.content, json_filter=filter)
stripped_text_from_html += html_tools.extract_json_as_string(content=fetcher.content, json_filter=filter)
is_html = False
if is_html or watch.is_source_type_url:
if is_html or is_source:
# CSS Filter, extract the HTML that matches and feed that into the existing inscriptis::get_text
self.fetcher.content = html_tools.workarounds_for_obfuscations(self.fetcher.content)
html_content = self.fetcher.content
fetcher.content = html_tools.workarounds_for_obfuscations(fetcher.content)
html_content = fetcher.content
# If not JSON, and if it's not text/plain..
if 'text/plain' in self.fetcher.get_all_headers().get('content-type', '').lower():
if 'text/plain' in fetcher.get_all_headers().get('content-type', '').lower():
# Don't run get_text or xpath/css filters on plaintext
stripped_text_from_html = html_content
else:
# Does it have some ld+json price data? used for easier monitoring
update_obj['has_ldjson_price_data'] = html_tools.has_ldjson_product_info(self.fetcher.content)
update_obj['has_ldjson_price_data'] = html_tools.has_ldjson_product_info(fetcher.content)
# Then we assume HTML
if has_filter_rule:
@@ -172,65 +255,53 @@ class perform_site_check(difference_detection_processor):
# For HTML/XML we offer xpath as an option, just start a regular xPath "/.."
if filter_rule[0] == '/' or filter_rule.startswith('xpath:'):
html_content += html_tools.xpath_filter(xpath_filter=filter_rule.replace('xpath:', ''),
html_content=self.fetcher.content,
append_pretty_line_formatting=not watch.is_source_type_url,
is_rss=is_rss)
elif filter_rule.startswith('xpath1:'):
html_content += html_tools.xpath1_filter(xpath_filter=filter_rule.replace('xpath1:', ''),
html_content=self.fetcher.content,
append_pretty_line_formatting=not watch.is_source_type_url,
is_rss=is_rss)
html_content=fetcher.content,
append_pretty_line_formatting=not is_source)
else:
# CSS Filter, extract the HTML that matches and feed that into the existing inscriptis::get_text
html_content += html_tools.include_filters(include_filters=filter_rule,
html_content=self.fetcher.content,
append_pretty_line_formatting=not watch.is_source_type_url)
html_content=fetcher.content,
append_pretty_line_formatting=not is_source)
if not html_content.strip():
raise FilterNotFoundInResponse(msg=include_filters_rule, screenshot=self.fetcher.screenshot, xpath_data=self.fetcher.xpath_data)
raise FilterNotFoundInResponse(include_filters_rule)
if has_subtractive_selectors:
html_content = html_tools.element_removal(subtractive_selectors, html_content)
if watch.is_source_type_url:
if is_source:
stripped_text_from_html = html_content
else:
# extract text
do_anchor = self.datastore.data["settings"]["application"].get("render_anchor_tag_content", False)
stripped_text_from_html = \
html_tools.html_to_text(
html_content=html_content,
render_anchor_tag_content=do_anchor,
is_rss=is_rss # #1874 activate the <title workaround hack
html_content,
render_anchor_tag_content=do_anchor
)
if watch.get('sort_text_alphabetically') and stripped_text_from_html:
# Note: Because a <p>something</p> will add an extra line feed to signify the paragraph gap
# we end up with 'Some text\n\n', sorting will add all those extra \n at the start, so we remove them here.
stripped_text_from_html = stripped_text_from_html.replace('\n\n', '\n')
stripped_text_from_html = '\n'.join( sorted(stripped_text_from_html.splitlines(), key=lambda x: x.lower() ))
# Re #340 - return the content before the 'ignore text' was applied
text_content_before_ignored_filter = stripped_text_from_html.encode('utf-8')
# @todo whitespace coming from missing rtrim()?
# stripped_text_from_html could be based on their preferences, replace the processed text with only that which they want to know about.
# Rewrite's the processing text based on only what diff result they want to see
if watch.has_special_diff_filter_options_set() and len(watch.history.keys()):
# Now the content comes from the diff-parser and not the returned HTTP traffic, so could be some differences
from changedetectionio import diff
from .. import diff
# needs to not include (added) etc or it may get used twice
# Replace the processed text with the preferred result
rendered_diff = diff.render_diff(previous_version_file_contents=watch.get_last_fetched_text_before_filters(),
newest_version_file_contents=stripped_text_from_html,
include_equal=False, # not the same lines
include_added=watch.get('filter_text_added', True),
include_removed=watch.get('filter_text_removed', True),
include_replaced=watch.get('filter_text_replaced', True),
line_feed_sep="\n",
include_change_type_prefix=False)
rendered_diff = diff.render_diff(previous_version_file_contents=watch.get_last_fetched_before_filters(),
newest_version_file_contents=stripped_text_from_html,
include_equal=False, # not the same lines
include_added=watch.get('filter_text_added', True),
include_removed=watch.get('filter_text_removed', True),
include_replaced=watch.get('filter_text_replaced', True),
line_feed_sep="\n",
include_change_type_prefix=False)
watch.save_last_text_fetched_before_filters(text_content_before_ignored_filter)
watch.save_last_fetched_before_filters(text_content_before_ignored_filter)
if not rendered_diff and stripped_text_from_html:
# We had some content, but no differences were found
@@ -243,18 +314,12 @@ class perform_site_check(difference_detection_processor):
# Treat pages with no renderable text content as a change? No by default
empty_pages_are_a_change = self.datastore.data['settings']['application'].get('empty_pages_are_a_change', False)
if not is_json and not empty_pages_are_a_change and len(stripped_text_from_html.strip()) == 0:
raise content_fetchers.exceptions.ReplyWithContentButNoText(url=url,
status_code=self.fetcher.get_last_status_code(),
screenshot=self.fetcher.screenshot,
has_filters=has_filter_rule,
html_content=html_content,
xpath_data=self.fetcher.xpath_data
)
raise content_fetcher.ReplyWithContentButNoText(url=url, status_code=fetcher.get_last_status_code(), screenshot=screenshot)
# We rely on the actual text in the html output.. many sites have random script vars etc,
# in the future we'll implement other mechanisms.
update_obj["last_check_status"] = self.fetcher.get_last_status_code()
update_obj["last_check_status"] = fetcher.get_last_status_code()
# If there's text to skip
# @todo we could abstract out the get_text() to handle this cleaner
@@ -270,25 +335,16 @@ class perform_site_check(difference_detection_processor):
regex_matched_output = []
for s_re in extract_text:
# incase they specified something in '/.../x'
if re.search(PERL_STYLE_REGEX, s_re, re.IGNORECASE):
regex = html_tools.perl_style_slash_enclosed_regex_to_options(s_re)
result = re.findall(regex.encode('utf-8'), stripped_text_from_html)
regex = self.forward_slash_enclosed_regex_to_options(s_re)
result = re.findall(regex.encode('utf-8'), stripped_text_from_html)
for l in result:
if type(l) is tuple:
# @todo - some formatter option default (between groups)
regex_matched_output += list(l) + [b'\n']
else:
# @todo - some formatter option default (between each ungrouped result)
regex_matched_output += [l] + [b'\n']
else:
# Doesnt look like regex, just hunt for plaintext and return that which matches
# `stripped_text_from_html` will be bytes, so we must encode s_re also to bytes
r = re.compile(re.escape(s_re.encode('utf-8')), re.IGNORECASE)
res = r.findall(stripped_text_from_html)
if res:
for match in res:
regex_matched_output += [match] + [b'\n']
for l in result:
if type(l) is tuple:
# @todo - some formatter option default (between groups)
regex_matched_output += list(l) + [b'\n']
else:
# @todo - some formatter option default (between each ungrouped result)
regex_matched_output += [l] + [b'\n']
# Now we will only show what the regex matched
stripped_text_from_html = b''
@@ -338,17 +394,21 @@ class perform_site_check(difference_detection_processor):
if blocked:
changed_detected = False
logger.debug(f"Watch UUID {watch.get('uuid')} content check - Previous MD5: {watch.get('previous_md5')}, Fetched MD5 {fetched_md5}")
# Extract title as title
if is_html:
if self.datastore.data['settings']['application'].get('extract_title_as_title') or watch['extract_title_as_title']:
if not watch['title'] or not len(watch['title']):
update_obj['title'] = html_tools.extract_element(find='title', html_content=fetcher.content)
if changed_detected:
if watch.get('check_unique_lines', False):
has_unique_lines = watch.lines_contain_something_unique_compared_to_history(lines=stripped_text_from_html.splitlines())
# One or more lines? unsure?
if not has_unique_lines:
logger.debug(f"check_unique_lines: UUID {watch.get('uuid')} didnt have anything new setting change_detected=False")
logging.debug("check_unique_lines: UUID {} didnt have anything new setting change_detected=False".format(uuid))
changed_detected = False
else:
logger.debug(f"check_unique_lines: UUID {watch.get('uuid')} had unique content")
logging.debug("check_unique_lines: UUID {} had unique content".format(uuid))
# Always record the new checksum
update_obj["previous_md5"] = fetched_md5

View File

@@ -18,19 +18,12 @@ module.exports = async ({page, context}) => {
await page.setBypassCSP(true)
await page.setExtraHTTPHeaders(req_headers);
if (user_agent) {
await page.setUserAgent(user_agent);
}
await page.setUserAgent(user_agent);
// https://ourcodeworld.com/articles/read/1106/how-to-solve-puppeteer-timeouterror-navigation-timeout-of-30000-ms-exceeded
await page.setDefaultNavigationTimeout(0);
if (proxy_username) {
// Setting Proxy-Authentication header is deprecated, and doing so can trigger header change errors from Puppeteer
// https://github.com/puppeteer/puppeteer/issues/676 ?
// https://help.brightdata.com/hc/en-us/articles/12632549957649-Proxy-Manager-How-to-Guides#h_01HAKWR4Q0AFS8RZTNYWRDFJC2
// https://cri.dev/posts/2020-03-30-How-to-solve-Puppeteer-Chrome-Error-ERR_INVALID_ARGUMENT/
await page.authenticate({
username: proxy_username,
password: proxy_password
@@ -146,7 +139,7 @@ module.exports = async ({page, context}) => {
var xpath_data;
var instock_data;
try {
// Not sure the best way here, in the future this should be a new package added to npm then run in evaluatedCode
// Not sure the best way here, in the future this should be a new package added to npm then run in browserless
// (Once the old playwright is removed)
xpath_data = await page.evaluate((include_filters) => {%xpath_scrape_code%}, include_filters);
instock_data = await page.evaluate(() => {%instock_scrape_code%});

View File

@@ -0,0 +1,102 @@
function isItemInStock() {
// @todo Pass these in so the same list can be used in non-JS fetchers
const outOfStockTexts = [
'0 in stock',
'agotado',
'artikel zurzeit vergriffen',
'as soon as stock is available',
'available for back order',
'backordered',
'brak na stanie',
'brak w magazynie',
'coming soon',
'currently have any tickets for this',
'currently unavailable',
'en rupture de stock',
'item is no longer available',
'message if back in stock',
'nachricht bei',
'nicht auf lager',
'nicht lieferbar',
'nicht zur verfügung',
'no disponible temporalmente',
'no longer in stock',
'no tickets available',
'not available',
'not currently available',
'not in stock',
'notify me when available',
'não estamos a aceitar encomendas',
'out of stock',
'out-of-stock',
'produkt niedostępny',
'sold out',
'temporarily out of stock',
'temporarily unavailable',
'tickets unavailable',
'unavailable tickets',
'we do not currently have an estimate of when this product will be back in stock.',
'zur zeit nicht an lager',
];
const negateOutOfStockRegexs = [
'[0-9] in stock'
]
var negateOutOfStockRegexs_r = [];
for (let i = 0; i < negateOutOfStockRegexs.length; i++) {
negateOutOfStockRegexs_r.push(new RegExp(negateOutOfStockRegexs[0], 'g'));
}
const elementsWithZeroChildren = Array.from(document.getElementsByTagName('*')).filter(element => element.children.length === 0);
// REGEXS THAT REALLY MEAN IT'S IN STOCK
for (let i = elementsWithZeroChildren.length - 1; i >= 0; i--) {
const element = elementsWithZeroChildren[i];
if (element.offsetWidth > 0 || element.offsetHeight > 0 || element.getClientRects().length > 0) {
var elementText="";
if (element.tagName.toLowerCase() === "input") {
elementText = element.value.toLowerCase();
} else {
elementText = element.textContent.toLowerCase();
}
if (elementText.length) {
// try which ones could mean its in stock
for (let i = 0; i < negateOutOfStockRegexs.length; i++) {
if (negateOutOfStockRegexs_r[i].test(elementText)) {
return 'Possibly in stock';
}
}
}
}
}
// OTHER STUFF THAT COULD BE THAT IT'S OUT OF STOCK
for (let i = elementsWithZeroChildren.length - 1; i >= 0; i--) {
const element = elementsWithZeroChildren[i];
if (element.offsetWidth > 0 || element.offsetHeight > 0 || element.getClientRects().length > 0) {
var elementText="";
if (element.tagName.toLowerCase() === "input") {
elementText = element.value.toLowerCase();
} else {
elementText = element.textContent.toLowerCase();
}
if (elementText.length) {
// and these mean its out of stock
for (const outOfStockText of outOfStockTexts) {
if (elementText.includes(outOfStockText)) {
return elementText; // item is out of stock
}
}
}
}
}
return 'Possibly in stock'; // possibly in stock, cant decide otherwise.
}
// returns the element text that makes it think it's out of stock
return isItemInStock();

View File

@@ -16,23 +16,24 @@ try {
}
// Include the getXpath script directly, easier than fetching
function getxpath(e) {
var n = e;
if (n && n.id) return '//*[@id="' + n.id + '"]';
for (var o = []; n && Node.ELEMENT_NODE === n.nodeType;) {
for (var i = 0, r = !1, d = n.previousSibling; d;) d.nodeType !== Node.DOCUMENT_TYPE_NODE && d.nodeName === n.nodeName && i++, d = d.previousSibling;
for (d = n.nextSibling; d;) {
if (d.nodeName === n.nodeName) {
r = !0;
break
var n = e;
if (n && n.id) return '//*[@id="' + n.id + '"]';
for (var o = []; n && Node.ELEMENT_NODE === n.nodeType;) {
for (var i = 0, r = !1, d = n.previousSibling; d;) d.nodeType !== Node.DOCUMENT_TYPE_NODE && d.nodeName === n.nodeName && i++, d = d.previousSibling;
for (d = n.nextSibling; d;) {
if (d.nodeName === n.nodeName) {
r = !0;
break
}
d = d.nextSibling
}
d = d.nextSibling
o.push((n.prefix ? n.prefix + ":" : "") + n.localName + (i || r ? "[" + (i + 1) + "]" : "")), n = n.parentNode
}
o.push((n.prefix ? n.prefix + ":" : "") + n.localName + (i || r ? "[" + (i + 1) + "]" : "")), n = n.parentNode
return o.length ? "/" + o.reverse().join("/") : ""
}
return o.length ? "/" + o.reverse().join("/") : ""
}
const findUpTag = (el) => {
let r = el
@@ -58,14 +59,14 @@ const findUpTag = (el) => {
// Strategy 2: Keep going up until we hit an ID tag, imagine it's like #list-widget div h4
while (r.parentNode) {
if (depth === 5) {
if (depth == 5) {
break;
}
if ('' !== r.id) {
chained_css.unshift("#" + CSS.escape(r.id));
final_selector = chained_css.join(' > ');
// Be sure theres only one, some sites have multiples of the same ID tag :-(
if (window.document.querySelectorAll(final_selector).length === 1) {
if (window.document.querySelectorAll(final_selector).length == 1) {
return final_selector;
}
return null;
@@ -81,60 +82,30 @@ const findUpTag = (el) => {
// @todo - if it's SVG or IMG, go into image diff mode
// %ELEMENTS% replaced at injection time because different interfaces use it with different settings
var elements = window.document.querySelectorAll("%ELEMENTS%");
var size_pos = [];
// after page fetch, inject this JS
// build a map of all elements and their positions (maybe that only include text?)
var bbox;
console.log("Scanning %ELEMENTS%");
for (var i = 0; i < elements.length; i++) {
bbox = elements[i].getBoundingClientRect();
function collectVisibleElements(parent, visibleElements) {
if (!parent) return; // Base case: if parent is null or undefined, return
// Add the parent itself to the visible elements array if it's of the specified types
const tagName = parent.tagName.toLowerCase();
if ("%ELEMENTS%".split(',').includes(tagName)) {
visibleElements.push(parent);
// Exclude items that are not interactable or visible
if(elements[i].style.opacity === "0") {
continue
}
// Iterate over the parent's children
const children = parent.children;
for (let i = 0; i < children.length; i++) {
const child = children[i];
if (
child.nodeType === Node.ELEMENT_NODE &&
window.getComputedStyle(child).display !== 'none' &&
window.getComputedStyle(child).visibility !== 'hidden' &&
child.offsetWidth >= 0 &&
child.offsetHeight >= 0 &&
window.getComputedStyle(child).contentVisibility !== 'hidden'
) {
// If the child is an element and is visible, recursively collect visible elements
collectVisibleElements(child, visibleElements);
}
if(elements[i].style.display === "none" || elements[i].style.pointerEvents === "none" ) {
continue
}
}
// Create an array to hold the visible elements
const visibleElementsArray = [];
// Call collectVisibleElements with the starting parent element
collectVisibleElements(document.body, visibleElementsArray);
visibleElementsArray.forEach(function (element) {
bbox = element.getBoundingClientRect();
// Skip really small ones, and where width or height ==0
if (bbox['width'] * bbox['height'] < 10) {
return
if (bbox['width'] * bbox['height'] < 100) {
continue;
}
// Don't include elements that are offset from canvas
if (bbox['top'] + scroll_y < 0 || bbox['left'] < 0) {
return
if (bbox['top']+scroll_y < 0 || bbox['left'] < 0) {
continue;
}
// @todo the getXpath kind of sucks, it doesnt know when there is for example just one ID sometimes
@@ -143,46 +114,50 @@ visibleElementsArray.forEach(function (element) {
// 1st primitive - if it has class, try joining it all and select, if theres only one.. well thats us.
xpath_result = false;
try {
var d = findUpTag(element);
var d = findUpTag(elements[i]);
if (d) {
xpath_result = d;
}
} catch (e) {
console.log(e);
}
// You could swap it and default to getXpath and then try the smarter one
// default back to the less intelligent one
if (!xpath_result) {
try {
// I've seen on FB and eBay that this doesnt work
// ReferenceError: getXPath is not defined at eval (eval at evaluate (:152:29), <anonymous>:67:20) at UtilityScript.evaluate (<anonymous>:159:18) at UtilityScript.<anonymous> (<anonymous>:1:44)
xpath_result = getxpath(element);
xpath_result = getxpath(elements[i]);
} catch (e) {
console.log(e);
return
continue;
}
}
if (window.getComputedStyle(elements[i]).visibility === "hidden") {
continue;
}
// @todo Possible to ONLY list where it's clickable to save JSON xfer size
size_pos.push({
xpath: xpath_result,
width: Math.round(bbox['width']),
height: Math.round(bbox['height']),
left: Math.floor(bbox['left']),
top: Math.floor(bbox['top']) + scroll_y,
tagName: (element.tagName) ? element.tagName.toLowerCase() : '',
tagtype: (element.tagName.toLowerCase() === 'input' && element.type) ? element.type.toLowerCase() : '',
isClickable: window.getComputedStyle(element).cursor == "pointer"
top: Math.floor(bbox['top'])+scroll_y,
tagName: (elements[i].tagName) ? elements[i].tagName.toLowerCase() : '',
tagtype: (elements[i].tagName == 'INPUT' && elements[i].type) ? elements[i].type.toLowerCase() : '',
isClickable: (elements[i].onclick) || window.getComputedStyle(elements[i]).cursor == "pointer"
});
});
}
// Inject the current one set in the include_filters, which may be a CSS rule
// used for displaying the current one in VisualSelector, where its not one we generated.
if (include_filters.length) {
let results;
// Foreach filter, go and find it on the page and add it to the results so we can visualise it again
for (const f of include_filters) {
bbox = false;
@@ -195,64 +170,44 @@ if (include_filters.length) {
try {
// is it xpath?
if (f.startsWith('/') || f.startsWith('xpath')) {
var qry_f = f.replace(/xpath(:|\d:)/, '')
console.log("[xpath] Scanning for included filter " + qry_f)
let xpathResult = document.evaluate(qry_f, document, null, XPathResult.ORDERED_NODE_SNAPSHOT_TYPE, null);
results = [];
for (let i = 0; i < xpathResult.snapshotLength; i++) {
results.push(xpathResult.snapshotItem(i));
}
if (f.startsWith('/') || f.startsWith('xpath:')) {
q = document.evaluate(f.replace('xpath:', ''), document, null, XPathResult.FIRST_ORDERED_NODE_TYPE, null).singleNodeValue;
} else {
console.log("[css] Scanning for included filter " + f)
console.log("[css] Scanning for included filter " + f);
results = document.querySelectorAll(f);
q = document.querySelector(f);
}
} catch (e) {
// Maybe catch DOMException and alert?
console.log("xpath_element_scraper: Exception selecting element from filter " + f);
console.log("xpath_element_scraper: Exception selecting element from filter "+f);
console.log(e);
}
if (results != null && results.length) {
// Iterate over the results
results.forEach(node => {
// Try to resolve //something/text() back to its /something so we can atleast get the bounding box
if (q) {
// #1231 - IN the case XPath attribute filter is applied, we will have to traverse up and find the element.
if (q.hasOwnProperty('getBoundingClientRect')) {
bbox = q.getBoundingClientRect();
console.log("xpath_element_scraper: Got filter element, scroll from top was " + scroll_y)
} else {
try {
if (typeof node.nodeName == 'string' && node.nodeName === '#text') {
node = node.parentElement
}
// Try and see we can find its ownerElement
bbox = q.ownerElement.getBoundingClientRect();
console.log("xpath_element_scraper: Got filter by ownerElement element, scroll from top was " + scroll_y)
} catch (e) {
console.log(e)
console.log("xpath_element_scraper: #text resolver")
console.log("xpath_element_scraper: error looking up ownerElement")
}
}
}
if(!q) {
console.log("xpath_element_scraper: filter element " + f + " was not found");
}
// #1231 - IN the case XPath attribute filter is applied, we will have to traverse up and find the element.
if (typeof node.getBoundingClientRect == 'function') {
bbox = node.getBoundingClientRect();
console.log("xpath_element_scraper: Got filter element, scroll from top was " + scroll_y)
} else {
try {
// Try and see we can find its ownerElement
bbox = node.ownerElement.getBoundingClientRect();
console.log("xpath_element_scraper: Got filter by ownerElement element, scroll from top was " + scroll_y)
} catch (e) {
console.log(e)
console.log("xpath_element_scraper: error looking up q.ownerElement")
}
}
if (bbox && bbox['width'] > 0 && bbox['height'] > 0) {
size_pos.push({
xpath: f,
width: parseInt(bbox['width']),
height: parseInt(bbox['height']),
left: parseInt(bbox['left']),
top: parseInt(bbox['top']) + scroll_y,
highlight_as_custom_filter: true
});
}
if (bbox && bbox['width'] > 0 && bbox['height'] > 0) {
size_pos.push({
xpath: f,
width: parseInt(bbox['width']),
height: parseInt(bbox['height']),
left: parseInt(bbox['left']),
top: parseInt(bbox['top'])+scroll_y
});
}
}
@@ -260,7 +215,7 @@ if (include_filters.length) {
// Sort the elements so we find the smallest one first, in other words, we find the smallest one matching in that area
// so that we dont select the wrapping element by mistake and be unable to select what we want
size_pos.sort((a, b) => (a.width * a.height > b.width * b.height) ? 1 : -1)
size_pos.sort((a, b) => (a.width*a.height > b.width*b.height) ? 1 : -1)
// Window.width required for proper scaling in the frontend
return {'size_pos': size_pos, 'browser_width': window.innerWidth};

View File

@@ -35,8 +35,4 @@ pytest tests/test_access_control.py
pytest tests/test_notification.py
pytest tests/test_backend.py
pytest tests/test_rss.py
pytest tests/test_unique_lines.py
# Check file:// will pickup a file when enabled
echo "Hello world" > /tmp/test-file.txt
ALLOW_FILE_URI=yes pytest tests/test_security.py
pytest tests/test_unique_lines.py

View File

@@ -1,46 +0,0 @@
#!/bin/bash
# run some tests and look if the 'custom-browser-search-string=1' connect string appeared in the correct containers
# @todo do it again but with the puppeteer one
# enable debug
set -x
# A extra browser is configured, but we never chose to use it, so it should NOT show in the logs
docker run --rm -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest tests/custom_browser_url/test_custom_browser_url.py::test_request_not_via_custom_browser_url'
docker logs sockpuppetbrowser-custom-url &>log-custom.txt
grep 'custom-browser-search-string=1' log-custom.txt
if [ $? -ne 1 ]
then
echo "Saw a request in 'sockpuppetbrowser-custom-url' container with 'custom-browser-search-string=1' when I should not - log-custom.txt"
exit 1
fi
docker logs sockpuppetbrowser &>log.txt
grep 'custom-browser-search-string=1' log.txt
if [ $? -ne 1 ]
then
echo "Saw a request in 'browser' container with 'custom-browser-search-string=1' when I should not"
exit 1
fi
# Special connect string should appear in the custom-url container, but not in the 'default' one
docker run --rm -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest tests/custom_browser_url/test_custom_browser_url.py::test_request_via_custom_browser_url'
docker logs sockpuppetbrowser-custom-url &>log-custom.txt
grep 'custom-browser-search-string=1' log-custom.txt
if [ $? -ne 0 ]
then
echo "Did not see request in 'sockpuppetbrowser-custom-url' container with 'custom-browser-search-string=1' when I should - log-custom.txt"
exit 1
fi
docker logs sockpuppetbrowser &>log.txt
grep 'custom-browser-search-string=1' log.txt
if [ $? -ne 1 ]
then
echo "Saw a request in 'browser' container with 'custom-browser-search-string=1' when I should not"
exit 1
fi

View File

@@ -2,15 +2,13 @@
# exit when any command fails
set -e
# enable debug
set -x
# Test proxy list handling, starting two squids on different ports
# Each squid adds a different header to the response, which is the main thing we test for.
docker run --network changedet-network -d --name squid-one --hostname squid-one --rm -v `pwd`/tests/proxy_list/squid.conf:/etc/squid/conf.d/debian.conf ubuntu/squid:4.13-21.10_edge
docker run --network changedet-network -d --name squid-two --hostname squid-two --rm -v `pwd`/tests/proxy_list/squid.conf:/etc/squid/conf.d/debian.conf ubuntu/squid:4.13-21.10_edge
# Used for configuring a custom proxy URL via the UI - with username+password auth
# Used for configuring a custom proxy URL via the UI
docker run --network changedet-network -d \
--name squid-custom \
--hostname squid-custom \
@@ -21,22 +19,21 @@ docker run --network changedet-network -d \
## 2nd test actually choose the preferred proxy from proxies.json
docker run --network changedet-network \
-v `pwd`/tests/proxy_list/proxies.json-example:/app/changedetectionio/test-datastore/proxies.json \
test-changedetectionio \
bash -c 'cd changedetectionio && pytest tests/proxy_list/test_multiple_proxy.py'
set +e
echo "- Looking for chosen.changedetection.io request in squid-one - it should NOT be here"
## Should be a request in the default "first" squid
docker logs squid-one 2>/dev/null|grep chosen.changedetection.io
if [ $? -ne 1 ]
if [ $? -ne 0 ]
then
echo "Saw a request to chosen.changedetection.io in the squid logs (while checking preferred proxy - squid one) WHEN I SHOULD NOT"
echo "Did not see a request to chosen.changedetection.io in the squid logs (while checking preferred proxy - squid one)"
exit 1
fi
set -e
echo "- Looking for chosen.changedetection.io request in squid-two"
# And one in the 'second' squid (user selects this as preferred)
docker logs squid-two 2>/dev/null|grep chosen.changedetection.io
if [ $? -ne 0 ]
@@ -45,14 +42,15 @@ then
exit 1
fi
# Test the UI configurable proxies
docker run --network changedet-network \
test-changedetectionio \
bash -c 'cd changedetectionio && pytest tests/proxy_list/test_select_custom_proxy.py'
# Should see a request for one.changedetection.io in there
echo "- Looking for .changedetection.io request in squid-custom"
docker logs squid-custom 2>/dev/null|grep "TCP_TUNNEL.200.*changedetection.io"
if [ $? -ne 0 ]
then
@@ -60,25 +58,4 @@ then
exit 1
fi
# Test "no-proxy" option
docker run --network changedet-network \
test-changedetectionio \
bash -c 'cd changedetectionio && pytest tests/proxy_list/test_noproxy.py'
# We need to handle grep returning 1
set +e
# Check request was never seen in any container
for c in $(echo "squid-one squid-two squid-custom"); do
echo ....Checking $c
docker logs $c &> $c.txt
grep noproxy $c.txt
if [ $? -ne 1 ]
then
echo "Saw request for noproxy in $c container"
cat $c.txt
exit 1
fi
done
docker kill squid-one squid-two squid-custom

View File

@@ -1,43 +0,0 @@
#!/bin/bash
# exit when any command fails
set -e
# enable debug
set -x
# SOCKS5 related - start simple Socks5 proxy server
# SOCKSTEST=xyz should show in the logs of this service to confirm it fetched
docker run --network changedet-network -d --hostname socks5proxy --rm --name socks5proxy -p 1080:1080 -e PROXY_USER=proxy_user123 -e PROXY_PASSWORD=proxy_pass123 serjs/go-socks5-proxy
docker run --network changedet-network -d --hostname socks5proxy-noauth --rm -p 1081:1080 --name socks5proxy-noauth serjs/go-socks5-proxy
echo "---------------------------------- SOCKS5 -------------------"
# SOCKS5 related - test from proxies.json
docker run --network changedet-network \
-v `pwd`/tests/proxy_socks5/proxies.json-example:/app/changedetectionio/test-datastore/proxies.json \
--rm \
-e "SOCKSTEST=proxiesjson" \
test-changedetectionio \
bash -c 'cd changedetectionio && pytest tests/proxy_socks5/test_socks5_proxy_sources.py'
# SOCKS5 related - by manually entering in UI
docker run --network changedet-network \
--rm \
-e "SOCKSTEST=manual" \
test-changedetectionio \
bash -c 'cd changedetectionio && pytest tests/proxy_socks5/test_socks5_proxy.py'
# SOCKS5 related - test from proxies.json via playwright - NOTE- PLAYWRIGHT DOESNT SUPPORT AUTHENTICATING PROXY
docker run --network changedet-network \
-e "SOCKSTEST=manual-playwright" \
-v `pwd`/tests/proxy_socks5/proxies.json-example-noauth:/app/changedetectionio/test-datastore/proxies.json \
-e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" \
--rm \
test-changedetectionio \
bash -c 'cd changedetectionio && pytest tests/proxy_socks5/test_socks5_proxy_sources.py'
echo "socks5 server logs"
docker logs socks5proxy
echo "----------------------------------"
docker kill socks5proxy socks5proxy-noauth

View File

@@ -1,18 +0,0 @@
"""
Safe Jinja2 render with max payload sizes
See https://jinja.palletsprojects.com/en/3.1.x/sandbox/#security-considerations
"""
import jinja2.sandbox
import typing as t
import os
JINJA2_MAX_RETURN_PAYLOAD_SIZE = 1024 * int(os.getenv("JINJA2_MAX_RETURN_PAYLOAD_SIZE_KB", 1024 * 10))
def render(template_str, **args: t.Any) -> str:
jinja2_env = jinja2.sandbox.ImmutableSandboxedEnvironment(extensions=['jinja2_time.TimeExtension'])
output = jinja2_env.from_string(template_str).render(args)
return output[:JINJA2_MAX_RETURN_PAYLOAD_SIZE]

View File

@@ -1,4 +1,4 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg width="15" height="16.363636" viewBox="0 0 15 16.363636" xmlns="http://www.w3.org/2000/svg" >
<svg width="15" height="16.363636" viewBox="0 0 15 16.363636" xmlns="http://www.w3.org/2000/svg" xmlns:svg="http://www.w3.org/2000/svg">
<path d="m 14.318182,11.762045 v 1.1925 H 5.4102273 L 11.849318,7.1140909 C 12.234545,9.1561364 12.54,11.181818 14.318182,11.762045 Z m -6.7984093,4.601591 c 1.0759091,0 2.0256823,-0.955909 2.0256823,-2.045454 H 5.4545455 c 0,1.089545 0.9879545,2.045454 2.0652272,2.045454 z M 15,2.8622727 0.9177273,15.636136 0,14.627045 l 1.8443182,-1.6725 h -1.1625 v -1.1925 C 4.0070455,10.677273 2.1784091,4.5388636 5.3611364,2.6897727 5.8009091,2.4347727 6.0709091,1.9609091 6.0702273,1.4488636 v -0.00205 C 6.0702273,0.64772727 6.7104545,0 7.5,0 8.2895455,0 8.9297727,0.64772727 8.9297727,1.4468182 v 0.00205 C 8.9290909,1.9602319 9.199773,2.4354591 9.638864,2.6897773 10.364318,3.111141 10.827273,3.7568228 11.1525,4.5129591 L 14.085682,1.8531818 Z M 6.8181818,1.3636364 C 6.8181818,1.74 7.1236364,2.0454545 7.5,2.0454545 7.8763636,2.0454545 8.1818182,1.74 8.1818182,1.3636364 8.1818182,0.98795455 7.8763636,0.68181818 7.5,0.68181818 c -0.3763636,0 -0.6818182,0.30613637 -0.6818182,0.68181822 z" id="path2" style="fill:#f8321b;stroke-width:0.681818;fill-opacity:1"/>
</svg>

Before

Width:  |  Height:  |  Size: 1.2 KiB

After

Width:  |  Height:  |  Size: 1.2 KiB

View File

@@ -1,58 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg
height="59.553207"
viewBox="-0.36 95.21 25.082135 59.553208"
width="249.99138"
version="1.1"
id="svg12"
sodipodi:docname="brightdata.svg"
inkscape:version="1.1.2 (0a00cf5339, 2022-02-04)"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns="http://www.w3.org/2000/svg"
>
<defs
id="defs16" />
<sodipodi:namedview
id="namedview14"
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1.0"
inkscape:pageshadow="2"
inkscape:pageopacity="0.0"
inkscape:pagecheckerboard="0"
showgrid="false"
fit-margin-top="0"
fit-margin-left="0"
fit-margin-right="0"
fit-margin-bottom="0"
inkscape:zoom="0.9464"
inkscape:cx="22.189349"
inkscape:cy="-90.870668"
inkscape:window-width="1920"
inkscape:window-height="1051"
inkscape:window-x="1920"
inkscape:window-y="0"
inkscape:window-maximized="1"
inkscape:current-layer="svg12" />
<path
d="m -34.416031,129.28 c -3.97,-2.43 -5.1,-6.09 -4.32,-10.35 0.81,-4.4 3.95,-6.75 8.04,-7.75 4.23,-1.04 8.44,-0.86 12.3,1.5 0.63,0.39 0.93,0.03 1.31,-0.29 1.5,-1.26 3.27,-1.72 5.189999,-1.83 0.79,-0.05 1.04,0.24 1.01,1.01 -0.05,1.31 -0.04,2.63 0,3.95 0.02,0.65 -0.19,0.93 -0.87,0.89 -0.889999,-0.04 -1.789999,0.03 -2.669999,-0.02 -0.82,-0.04 -1.08,0.1 -0.88,1.04 0.83,3.9 -0.06,7.37 -3.1,10.06 -2.76,2.44 -6.13,3.15 -9.72,3.04 -0.51,-0.02 -1.03,-0.02 -1.52,-0.13 -1.22,-0.25 -1.96,0.14 -2.19,1.41 -0.28,1.54 0.16,2.62 1.37,3.07 0.84,0.31 1.74,0.35 2.63,0.39 2.97,0.13 5.95,-0.18 8.91,0.21 2.93,0.39 5.69,1.16 6.85,4.25 1.269999,3.38 0.809999,6.62 -1.48,9.47 -2.73,3.39 -6.52,4.78 -10.66,5.33 -3.53,0.48 -7.04,0.27 -10.39,-1.11 -3.89,-1.6 -5.75,-4.95 -4.84,-8.72 0.51,-2.11 1.85,-3.58 3.69,-4.65 0.38,-0.22 0.93,-0.32 0.28,-0.96 -2.91,-2.83 -2.85,-6.16 0.1,-8.95 0.28,-0.26 0.6,-0.53 0.96,-0.86 z m 8.07,21.5 c 0.95,0.04 1.87,-0.13 2.78,-0.33 1.89,-0.42 3.51,-1.3 4.49,-3.06 1.82,-3.25 0.24,-6.2 -3.37,-6.58 -2.88,-0.3 -5.76,0.24 -8.63,-0.13 -0.53,-0.07 -0.75,0.34 -0.95,0.71 -1.16,2.24 -1.08,4.53 0,6.73 1.15,2.34 3.46,2.48 5.68,2.66 z m -5,-30.61 c -0.03,1.67 0.08,3.19 0.74,4.61 0.76,1.62 2.17,2.42 4.03,2.31 1.62,-0.1 2.9,-1.12 3.36,-2.84 0.66,-2.46 0.69,-4.95 0.01,-7.42 -0.49,-1.76 -1.7,-2.64 -3.56,-2.7 -2.08,-0.07 -3.37,0.7 -4.04,2.42 -0.47,1.21 -0.6,2.47 -0.54,3.62 z m 32.9399993,6.56 c 0,2.59 0.05,5.18 -0.02,7.77 -0.03,1.03 0.31,1.46 1.32,1.52 0.65,0.04 1.61,-0.09 1.82,0.57 0.26,0.81 0.11,1.76 0.06,2.65 -0.03,0.48 -0.81,0.39 -0.81,0.39 l -11.47,0.01 c 0,0 -0.95,-0.21 -0.88,-0.88 0.03,-0.29 0.04,-0.6 0,-0.89 -0.19,-1.24 0.21,-1.92 1.58,-1.9 0.99,0.01 1.28,-0.52 1.28,-1.53 -0.05,-8.75 -0.05,-17.49 0,-26.24 0.01,-1.15 -0.36,-1.62 -1.44,-1.67 -0.17,-0.01 -0.34,-0.04 -0.5,-0.07 -1.43,-0.22 -2.12,-1.57 -1.53,-2.91 0.15,-0.35 0.43,-0.36 0.72,-0.4 2.94,-0.41 5.88,-0.81 8.82000002,-1.23 0.81999998,-0.12 0.99999998,0.27 0.98999998,1.01 -0.02,3.35 0,6.71 0.02,10.06 0,0.35 -0.23,0.84 0.18,1.03 0.38,0.17 0.69,-0.25 0.99,-0.45 2.56,-1.74 5.33,-2.73 8.4900007,-2.56 3.51005,0.19 5.65005,1.95 6.35005,5.46 0.42,2.09 0.52,4.21 0.51,6.33 -0.02,3.86 0.05,7.73 -0.04,11.59 -0.02,1.12 0.37,1.5 1.39,1.6 0.61,0.05 1.55,-0.13 1.74,0.47 0.26,0.85 0.12,1.84 0.1,2.77 -0.01,0.41 -0.69,0.37 -0.69,0.37 l -11.4700504,0.01 c 0,0 -0.81,-0.29 -0.8,-0.85 0.01,-0.38 0.04,-0.77 -0.01,-1.15 -0.13,-1.01 0.32,-1.52 1.31,-1.56 1.0600004,-0.05 1.3800004,-0.55 1.3500004,-1.63 -0.14,-4.84 0.16,-9.68 -0.18,-14.51 -0.26,-3.66 -2.1100004,-4.95 -5.6700007,-3.99 -0.25,0.07 -0.49,0.15 -0.73,0.22 -2.57,0.8 -2.79,1.09 -2.79,3.71 0.01,2.3 0.01,4.59 0.01,6.88 z M -109.26603,122.56 c 0,-4.75 -0.02,-9.51 0.02,-14.26 0.01,-0.92 -0.17,-1.47 -1.19,-1.45 -0.16,0 -0.33,-0.07 -0.5,-0.1 -1.56,-0.27 -2.24,-1.47 -1.69,-2.92 0.14,-0.37 0.41,-0.38 0.7,-0.42 2.98,-0.41 5.97,-0.81 8.94,-1.24 0.85,-0.12 0.88,0.33 0.88,0.96 -0.01,3.01 -0.01,6.03 0,9.04 0,0.4 -0.18,0.96 0.27,1.16 0.36,0.16 0.66,-0.3 0.96,-0.52 4.729999,-3.51 12.459999,-2.61 14.889999,4.48 1.89,5.51 1.91,11.06 -0.96,16.28 -2.37,4.31 -6.19,6.49 -11.15,6.59 -3.379999,0.07 -6.679999,-0.3 -9.909999,-1.37 -0.93,-0.31 -1.3,-0.78 -1.28,-1.83 0.05,-4.81 0.02,-9.6 0.02,-14.4 z m 7.15,3.89 c 0,2.76 0.02,5.52 -0.01,8.28 -0.01,0.76 0.18,1.29 0.91,1.64 1.899999,0.9 4.299999,0.5 5.759999,-1.01 0.97,-1 1.56,-2.21 1.96,-3.52 1.03,-3.36 0.97,-6.78 0.61,-10.22 a 9.991,9.991 0 0 0 -0.93,-3.29 c -1.47,-3.06 -4.67,-3.85 -7.439999,-1.86 -0.6,0.43 -0.88,0.93 -0.87,1.7 0.04,2.76 0.01,5.52 0.01,8.28 z"
fill="#4280f6"
id="path2" />
<path
d="m 68.644019,137.2 c -1.62,1.46 -3.41,2.56 -5.62,2.96 -4.4,0.8 -8.7,-1.39 -10.49,-5.49 -2.31,-5.31 -2.3,-10.67 -0.1,-15.98 2.31,-5.58 8.29,-8.65 14.24,-7.46 1.71,0.34 1.9,0.18 1.9,-1.55 0,-0.68 -0.05,-1.36 0.01,-2.04 0.09,-1.02 -0.25,-1.54 -1.34,-1.43 -0.64,0.06 -1.26,-0.1 -1.88,-0.21 -1.32,-0.24 -1.6,-0.62 -1.37,-1.97 0.07,-0.41 0.25,-0.57 0.65,-0.62 2.63,-0.33 5.27,-0.66 7.9,-1.02 1.04,-0.14 1.17,0.37 1.17,1.25 -0.02,10.23 -0.02,20.45 -0.01,30.68 v 1.02 c 0.02,0.99 0.35,1.6 1.52,1.47 0.52,-0.06 1.35,-0.27 1.25,0.73 -0.08,0.8 0.58,1.93 -0.94,2.18 -1.29,0.22 -2.51,0.69 -3.86,0.65 -2.04,-0.06 -2.3,-0.23 -2.76,-2.19 -0.09,-0.3 0.06,-0.67 -0.27,-0.98 z m -0.07,-12.46 c 0,-2.8 -0.04,-5.6 0.02,-8.39 0.02,-0.9 -0.28,-1.47 -1.05,-1.81 -3.18,-1.4 -7.54,-0.8 -9.3,2.87 -0.83,1.74 -1.31,3.54 -1.49,5.46 -0.28,2.93 -0.38,5.83 0.61,8.65 0.73,2.09 1.81,3.9 4.11,4.67 2.49,0.83 4.55,-0.04 6.5,-1.48 0.54,-0.4 0.62,-0.95 0.61,-1.57 -0.02,-2.8 -0.01,-5.6 -0.01,-8.4 z m 28.79,2.53 c 0,3.24 0.04,5.83 -0.02,8.41 -0.02,1 0.19,1.49 1.309998,1.41 0.55,-0.04 1.460003,-0.46 1.520003,0.73 0.05,1.02 0.1,1.89 -1.330003,2.08 -1.289998,0.17 -2.559998,0.51 -3.889998,0.48 -1.88,-0.05 -2.15,-0.26 -2.42,-2.15 -0.04,-0.27 0.14,-0.65 -0.22,-0.79 -0.34,-0.13 -0.5,0.24 -0.72,0.42 -3.61,3 -8.15,3.4 -11.64,1.08 -1.61,-1.07 -2.49,-2.63 -2.67,-4.43 -0.51,-5.13 0.77,-7.91 6.3,-10.22 2.44,-1.02 5.07,-1.27 7.68,-1.49 0.77,-0.07 1.03,-0.28 1.02,-1.05 -0.03,-1.48 -0.05,-2.94 -0.64,-4.36 -0.59,-1.42 -1.67,-1.92 -3.08,-2.03 -3.04,-0.24 -5.88,0.5 -8.63,1.71 -0.51,0.23 -1.19,0.75 -1.48,-0.13 -0.26,-0.77 -1.35,-1.61 0.05,-2.47 3.27,-2 6.7,-3.44 10.61,-3.42 1.44,0.01 2.88,0.27 4.21,0.81 2.67,1.08 3.44,3.4 3.8,5.99 0.46,3.37 0.1,6.73 0.24,9.42 z m -5.09,2.9 c 0,-1.23 -0.01,-2.46 0,-3.69 0,-0.52 -0.06,-0.98 -0.75,-0.84 -1.45,0.3 -2.93,0.28 -4.37,0.69 -3.71,1.04 -5.46,4.48 -3.97,8.03 0.51,1.22 1.48,1.98 2.79,2.16 2.01,0.28 3.86,-0.29 5.6,-1.28 0.54,-0.31 0.73,-0.76 0.72,-1.37 -0.05,-1.23 -0.02,-2.47 -0.02,-3.7 z m 43.060001,-2.89 c 0,2.72 0.01,5.43 -0.01,8.15 0,0.66 0.02,1.21 0.91,1.12 0.54,-0.06 0.99,0.12 0.86,0.75 -0.15,0.71 0.56,1.7 -0.58,2.09 -1.55,0.52 -3.16,0.59 -4.77,0.4 -0.99,-0.12 -1.12,-1.01 -1.18,-1.73 -0.08,-1.15 -0.16,-1.45 -1.24,-0.54 -3.41,2.87 -8.05,3.17 -11.43,0.88 -1.75,-1.18 -2.49,-2.91 -2.7,-4.94 -0.64,-6.24 3.16,-8.74 7.83,-10.17 2.04,-0.62 4.14,-0.8 6.24,-0.99 0.81,-0.07 1,-0.36 0.98,-1.09 -0.04,-1.31 0.04,-2.62 -0.42,-3.89 -0.57,-1.57 -1.53,-2.34 -3.18,-2.45 -3.03,-0.21 -5.88,0.46 -8.64,1.66 -0.6,0.26 -1.25,0.81 -1.68,-0.2 -0.34,-0.8 -1.08,-1.61 0.16,-2.36 4.12,-2.5 8.44,-4.16 13.36,-3.07 3.21,0.71 4.89,2.91 5.26,6.34 0.18,1.69 0.22,3.37 0.22,5.07 0.01,1.66 0.01,3.32 0.01,4.97 z m -5.09,2.54 c 0,-1.27 -0.03,-2.54 0.01,-3.81 0.02,-0.74 -0.27,-1.02 -0.98,-0.92 -1.21,0.17 -2.43,0.28 -3.62,0.55 -3.72,0.83 -5.47,3.48 -4.82,7.21 0.29,1.66 1.57,2.94 3.21,3.16 2.02,0.27 3.85,-0.34 5.57,-1.34 0.49,-0.29 0.64,-0.73 0.63,-1.29 -0.02,-1.18 0,-2.37 0,-3.56 z"
fill="#c8dbfb"
id="path4" />
<path
d="m 26.314019,125.77 c 0,-2.89 -0.05,-5.77 0.02,-8.66 0.03,-1.04 -0.33,-1.39 -1.31,-1.24 a 0.7,0.7 0 0 1 -0.25,0 c -0.57,-0.18 -1.44,0.48 -1.68,-0.58 -0.35,-1.48 -0.02,-2.3 1.21,-2.7 1.3,-0.43 2.16,-1.26 2.76,-2.46 0.78,-1.56 1.44,-3.17 1.91,-4.84 0.18,-0.63 0.47,-0.86 1.15,-0.88 3.28,-0.09 3.27,-0.11 3.32,3.17 0.01,1.06 0.09,2.12 0.09,3.18 -0.01,0.67 0.27,0.89 0.91,0.88 1.61,-0.02 3.23,0.03 4.84,-0.02 0.77,-0.02 1.01,0.23 1.03,1.01 0.08,3.27 0.1,3.27 -3.09,3.27 -0.93,0 -1.87,0.03 -2.8,-0.01 -0.67,-0.02 -0.89,0.26 -0.88,0.91 0.04,5.43 0.04,10.86 0.12,16.29 0.02,1.7 0.75,2.26 2.46,2.1 1.1,-0.1 2.19,-0.26 3.23,-0.65 0.59,-0.22 0.89,-0.09 1.14,0.53 0.93,2.29 0.92,2.37 -1.32,3.52 -2.54,1.3 -5.22,1.99 -8.1,1.79 -2.27,-0.16 -3.68,-1.27 -4.35,-3.45 -0.3,-0.98 -0.41,-1.99 -0.41,-3.01 z m -97.67005,-8.99 c 0.57,-0.84 1.11,-1.74 1.76,-2.55 1.68,-2.09 3.68,-3.62 6.54,-3.66 1.08,-0.01 1.63,0.28 1.57,1.52 -0.1,2.08 -0.05,4.16 -0.02,6.24 0.01,0.74 -0.17,0.96 -0.96,0.76 -2.36,-0.59 -4.71,-0.42 -7.03,0.28 -0.8,0.24 -1.16,0.62 -1.15,1.52 0.05,4.5 0.04,9 0,13.5 -0.01,0.89 0.29,1.16 1.15,1.2 1.23,0.06 2.44,0.32 3.67,0.39 0.75,0.05 0.91,0.38 0.89,1.04 -0.06,2.86 0.29,2.28 -2.25,2.3 -4.2,0.04 -8.41,-0.02 -12.61,0.03 -0.91,0.01 -1.39,-0.18 -1.22,-1.18 0.02,-0.12 0,-0.25 0,-0.38 0.02,-2.1 -0.24,-1.88 1.77,-2.04 1.33,-0.11 1.6,-0.67 1.58,-1.9 -0.07,-5.35 -0.04,-10.7 -0.02,-16.05 0,-0.78 -0.17,-1.2 -1,-1.46 -2.21,-0.68 -2.7,-1.69 -2.22,-3.99 0.11,-0.52 0.45,-0.56 0.82,-0.62 2.22,-0.34 4.44,-0.7 6.67,-0.99 0.99,-0.13 1.82,0.7 1.84,1.76 0.03,1.4 0.03,2.8 0.04,4.2 -0.01,0.02 0.06,0.04 0.18,0.08 z m 25.24,6.59 c 0,3.69 0.04,7.38 -0.03,11.07 -0.02,1.04 0.31,1.48 1.32,1.49 0.29,0 0.59,0.12 0.88,0.13 0.93,0.01 1.18,0.47 1.16,1.37 -0.05,2.19 0,2.19 -2.24,2.19 -3.48,0 -6.96,-0.04 -10.44,0.03 -1.09,0.02 -1.47,-0.33 -1.3,-1.36 0.02,-0.12 0.02,-0.26 0,-0.38 -0.28,-1.39 0.39,-1.96 1.7,-1.9 1.36,0.06 1.76,-0.51 1.74,-1.88 -0.09,-5.17 -0.08,-10.35 0,-15.53 0.02,-1.22 -0.32,-1.87 -1.52,-2.17 -0.57,-0.14 -1.47,-0.11 -1.57,-0.85 -0.15,-1.04 -0.05,-2.11 0.01,-3.17 0.02,-0.34 0.44,-0.35 0.73,-0.39 2.81,-0.39 5.63,-0.77 8.44,-1.18 0.92,-0.14 1.15,0.2 1.14,1.09 -0.04,3.8 -0.02,7.62 -0.02,11.44 z"
fill="#4280f6"
id="path6" />
<path
d="m 101.44402,125.64 c 0,-3.18 -0.03,-6.37 0.02,-9.55 0.02,-0.94 -0.26,-1.36 -1.22,-1.22 -0.21,0.03 -0.430003,0.04 -0.630003,0 -0.51,-0.12 -1.35,0.39 -1.44,-0.55 -0.08,-0.85 -0.429998,-1.87 0.93,-2.24 2.080003,-0.57 2.720003,-2.39 3.350003,-4.17 0.31,-0.88 0.62,-1.76 0.87,-2.66 0.18,-0.64 0.52,-0.85 1.19,-0.84 2.46,0.05 2,-0.15 2.04,2.04 0.02,1.1 0.08,2.21 -0.02,3.31 -0.11,1.16 0.46,1.52 1.46,1.53 1.78,0.01 3.57,0.04 5.35,-0.01 0.82,-0.02 1.12,0.23 1.11,1.08 -0.05,2.86 0.19,2.49 -2.42,2.51 -1.53,0.01 -3.06,0.02 -4.59,-0.01 -0.65,-0.01 -0.9,0.22 -0.9,0.89 0.02,5.52 0,11.04 0.03,16.56 0,0.67 0.14,1.34 0.25,2.01 0.17,1.04 1.17,1.62 2.59,1.42 1.29,-0.19 2.57,-0.49 3.86,-0.69 0.43,-0.07 1.05,-0.47 1.19,0.4 0.12,0.75 1.05,1.61 -0.09,2.24 -2.09,1.16 -4.28,2.07 -6.71,2.16 -1.05,0.04 -2.13,0.2 -3.16,-0.14 -1.92,-0.65 -3.03,-2.28 -3.05,-4.51 -0.02,-3.19 -0.01,-6.37 -0.01,-9.56 z"
fill="#c8dbfb"
id="path8" />
<path
d="m -50.816031,95.21 c 0.19,2.160002 1.85,3.240002 2.82,4.740002 0.25,0.379998 0.48,0.109998 0.67,-0.16 0.21,-0.31 0.6,-1.21 1.15,-1.28 -0.35,1.38 -0.04,3.149998 0.16,4.449998 0.49,3.05 -1.22,5.64 -4.07,6.18 -3.38,0.65 -6.22,-2.21 -5.6,-5.62 0.23,-1.24 1.37,-2.5 0.77,-3.699998 -0.85,-1.7 0.54,-0.52 0.79,-0.22 1.04,1.199998 1.21,0.09 1.45,-0.55 0.24,-0.63 0.31,-1.31 0.47,-1.97 0.19,-0.770002 0.55,-1.400002 1.39,-1.870002 z"
fill="#4280f6"
id="path10" />
</svg>

Before

Width:  |  Height:  |  Size: 11 KiB

View File

@@ -12,7 +12,7 @@
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns="http://www.w3.org/2000/svg"
><defs
xmlns:svg="http://www.w3.org/2000/svg"><defs
id="defs11" /><sodipodi:namedview
id="namedview9"
pagecolor="#ffffff"

Before

Width:  |  Height:  |  Size: 2.5 KiB

After

Width:  |  Height:  |  Size: 2.5 KiB

View File

@@ -10,7 +10,7 @@
viewBox="0 0 7.1975545 4.7993639"
xml:space="preserve"
xmlns="http://www.w3.org/2000/svg"
><defs
xmlns:svg="http://www.w3.org/2000/svg"><defs
id="defs19" />
<g
id="g14"

Before

Width:  |  Height:  |  Size: 1.9 KiB

After

Width:  |  Height:  |  Size: 1.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

View File

@@ -9,7 +9,7 @@
id="svg5"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns="http://www.w3.org/2000/svg"
>
xmlns:svg="http://www.w3.org/2000/svg">
<defs
id="defs2" />
<g

Before

Width:  |  Height:  |  Size: 2.4 KiB

After

Width:  |  Height:  |  Size: 2.4 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 9.7 KiB

View File

@@ -3,6 +3,7 @@
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
version="1.1"
id="Capa_1"

Before

Width:  |  Height:  |  Size: 2.8 KiB

After

Width:  |  Height:  |  Size: 2.9 KiB

View File

@@ -13,6 +13,7 @@
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns="http://www.w3.org/2000/svg"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:dc="http://purl.org/dc/elements/1.1/"><sodipodi:namedview

Before

Width:  |  Height:  |  Size: 3.4 KiB

After

Width:  |  Height:  |  Size: 3.5 KiB

View File

@@ -6,7 +6,7 @@
version="1.1"
id="svg6"
xmlns="http://www.w3.org/2000/svg"
>
xmlns:svg="http://www.w3.org/2000/svg">
<defs
id="defs10" />
<path

Before

Width:  |  Height:  |  Size: 854 B

After

Width:  |  Height:  |  Size: 892 B

View File

@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg width="18" height="19.92" viewBox="0 0 18 19.92" xmlns="http://www.w3.org/2000/svg" >
<svg width="18" height="19.92" viewBox="0 0 18 19.92" xmlns="http://www.w3.org/2000/svg" xmlns:svg="http://www.w3.org/2000/svg">
<path d="M -3,-2 H 21 V 22 H -3 Z" fill="none" id="path2"/>
<path d="m 15,14.08 c -0.76,0 -1.44,0.3 -1.96,0.77 L 5.91,10.7 C 5.96,10.47 6,10.24 6,10 6,9.76 5.96,9.53 5.91,9.3 L 12.96,5.19 C 13.5,5.69 14.21,6 15,6 16.66,6 18,4.66 18,3 18,1.34 16.66,0 15,0 c -1.66,0 -3,1.34 -3,3 0,0.24 0.04,0.47 0.09,0.7 L 5.04,7.81 C 4.5,7.31 3.79,7 3,7 1.34,7 0,8.34 0,10 c 0,1.66 1.34,3 3,3 0.79,0 1.5,-0.31 2.04,-0.81 l 7.12,4.16 c -0.05,0.21 -0.08,0.43 -0.08,0.65 0,1.61 1.31,2.92 2.92,2.92 1.61,0 2.92,-1.31 2.92,-2.92 0,-1.61 -1.31,-2.92 -2.92,-2.92 z" id="path4" style="fill:#0078e7;fill-opacity:1"/>
</svg>

Before

Width:  |  Height:  |  Size: 749 B

After

Width:  |  Height:  |  Size: 787 B

View File

@@ -1,44 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg
aria-hidden="true"
viewBox="0 0 19.966091 17.999964"
class="css-1oqmxjn"
version="1.1"
id="svg4"
sodipodi:docname="steps.svg"
width="19.966091"
height="17.999964"
inkscape:version="1.1.2 (0a00cf5339, 2022-02-04)"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns="http://www.w3.org/2000/svg"
xmlns:svg="http://www.w3.org/2000/svg">
<defs
id="defs8" />
<sodipodi:namedview
id="namedview6"
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1.0"
inkscape:pageshadow="2"
inkscape:pageopacity="0.0"
inkscape:pagecheckerboard="0"
showgrid="false"
fit-margin-top="0"
fit-margin-left="0"
fit-margin-right="0"
fit-margin-bottom="0"
inkscape:zoom="8.6354167"
inkscape:cx="-1.3896261"
inkscape:cy="6.1375151"
inkscape:window-width="1280"
inkscape:window-height="667"
inkscape:window-x="2419"
inkscape:window-y="250"
inkscape:window-maximized="0"
inkscape:current-layer="svg4" />
<path
d="m 16.95807,12.000003 c -0.7076,0.0019 -1.3917,0.2538 -1.9316,0.7113 -0.5398,0.4575 -0.9005,1.091 -1.0184,1.7887 H 5.60804 c -0.80847,0.0297 -1.60693,-0.1865 -2.29,-0.62 -0.26632,-0.1847 -0.48375,-0.4315 -0.63356,-0.7189 -0.14982,-0.2874 -0.22753,-0.607 -0.22644,-0.9311 -0.02843,-0.3931 0.03646,-0.7873 0.1894,-1.1505 0.15293,-0.3632 0.38957,-0.6851 0.6906,-0.9395 0.66628,-0.4559004 1.4637,-0.6807004 2.27,-0.6400004 h 8.35003 c 0.8515,-0.0223 1.6727,-0.3206 2.34,-0.85 0.3971,-0.3622 0.7076,-0.8091 0.9084,-1.3077 0.2008,-0.49857 0.2868,-1.03596 0.2516,-1.57229 0.0113,-0.47161 -0.0887,-0.93924 -0.292,-1.36493 -0.2033,-0.4257 -0.5041,-0.79745 -0.878,-1.08507 -0.7801,-0.55815 -1.7212,-0.84609 -2.68,-0.82 H 5.95804 c -0.12537,-0.7417 -0.5248,-1.40924 -1.11913,-1.87032996 -0.59434,-0.46108 -1.3402,-0.68207 -2.08979,-0.61917 -0.74958,0.06291 -1.44818,0.40512 -1.95736,0.95881 C 0.28259,1.5230126 0,2.2477926 0,3.0000126 c 0,0.75222 0.28259,1.47699 0.79176,2.03068 0.50918,0.55369 1.20778,0.8959 1.95736,0.95881 0.74959,0.0629 1.49545,-0.15808 2.08979,-0.61917 0.59433,-0.46109 0.99376,-1.12863 1.11913,-1.87032 h 7.70003 c 0.7353,-0.03061 1.4599,0.18397 2.06,0.61 0.2548,0.19335 0.4595,0.445 0.597,0.73385 0.1375,0.28884 0.2036,0.60644 0.193,0.92615 0.0316,0.38842 -0.0247,0.77898 -0.165,1.14258 -0.1402,0.36361 -0.3607,0.69091 -0.645,0.95741 -0.5713,0.4398 -1.2799,0.663 -2,0.63 H 5.69804 c -1.03259,-0.0462 -2.05065,0.2568 -2.89,0.86 -0.43755,0.3361 -0.78838,0.7720004 -1.02322,1.2712004 -0.23484,0.4993 -0.34688,1.0474 -0.32678,1.5988 -0.00726,0.484 0.10591,0.9622 0.32934,1.3916 0.22344,0.4295 0.55012,0.7966 0.95066,1.0684 0.85039,0.5592 1.85274,0.8421 2.87,0.81 h 8.40003 c 0.0954,0.5643 0.3502,1.0896 0.7343,1.5138 0.3842,0.4242 0.8817,0.7297 1.4338,0.8803 0.5521,0.1507 1.1358,0.1403 1.6822,-0.0299 0.5464,-0.1702 1.0328,-0.4932 1.4016,-0.9308 0.3688,-0.4376 0.6048,-0.9716 0.6801,-1.5389 0.0752,-0.5673 -0.0134,-1.1444 -0.2554,-1.663 -0.242,-0.5186 -0.6273,-0.9572 -1.1104,-1.264 -0.4831,-0.3068 -1.0439,-0.469 -1.6162,-0.4675 z m 0,5 c -0.3956,0 -0.7823,-0.1173 -1.1112,-0.3371 -0.3289,-0.2197 -0.5852,-0.5321 -0.7366,-0.8975 -0.1514,-0.3655 -0.191,-0.7676 -0.1138,-1.1556 0.0772,-0.3879 0.2677,-0.7443 0.5474,-1.024 0.2797,-0.2797 0.636,-0.4702 1.024,-0.5474 0.388,-0.0771 0.7901,-0.0375 1.1555,0.1138 0.3655,0.1514 0.6778,0.4078 0.8976,0.7367 0.2198,0.3289 0.3371,0.7155 0.3371,1.1111 0,0.5304 -0.2107,1.0391 -0.5858,1.4142 -0.3751,0.3751 -0.8838,0.5858 -1.4142,0.5858 z"
id="path2"
style="fill:#777777;fill-opacity:1" />
</svg>

Before

Width:  |  Height:  |  Size: 3.7 KiB

View File

@@ -1,7 +1,16 @@
$(document).ready(function () {
// duplicate
var csrftoken = $('input[name=csrf_token]').val();
$.ajaxSetup({
beforeSend: function (xhr, settings) {
if (!/^(GET|HEAD|OPTIONS|TRACE)$/i.test(settings.type) && !this.crossDomain) {
xhr.setRequestHeader("X-CSRFToken", csrftoken)
}
}
})
var browsersteps_session_id;
var browser_interface_seconds_remaining = 0;
var browserless_seconds_remaining = 0;
var apply_buttons_disabled = false;
var include_text_elements = $("#include_text_elements");
var xpath_data = false;
@@ -17,8 +26,7 @@ $(document).ready(function () {
set_scale();
});
// Should always be disabled
$('#browser_steps-0-operation option[value="Goto site"]').prop("selected", "selected");
$('#browser_steps-0-operation').attr('disabled', 'disabled');
$('#browser_steps >li:first-child select').val('Goto site').attr('disabled', 'disabled');
$('#browsersteps-click-start').click(function () {
$("#browsersteps-click-start").fadeOut();
@@ -41,7 +49,7 @@ $(document).ready(function () {
$('#browsersteps-img').removeAttr('src');
$("#browsersteps-click-start").show();
$("#browsersteps-selector-wrapper .spinner").hide();
browser_interface_seconds_remaining = 0;
browserless_seconds_remaining = 0;
browsersteps_session_id = false;
apply_buttons_disabled = false;
ctx.clearRect(0, 0, c.width, c.height);
@@ -53,12 +61,12 @@ $(document).ready(function () {
$('#browser_steps >li:first-child').css('opacity', '0.5');
}
// Show seconds remaining until the browser interface needs to restart the session
// Show seconds remaining until playwright/browserless needs to restart the session
// (See comment at the top of changedetectionio/blueprint/browser_steps/__init__.py )
setInterval(() => {
if (browser_interface_seconds_remaining >= 1) {
document.getElementById('browser-seconds-remaining').innerText = browser_interface_seconds_remaining + " seconds remaining in session";
browser_interface_seconds_remaining -= 1;
if (browserless_seconds_remaining >= 1) {
document.getElementById('browserless-seconds-remaining').innerText = browserless_seconds_remaining + " seconds remaining in session";
browserless_seconds_remaining -= 1;
}
}, "1000")
@@ -152,12 +160,6 @@ $(document).ready(function () {
e.offsetX > item.left * y_scale && e.offsetX < item.left * y_scale + item.width * y_scale
) {
// Ignore really large ones, because we are scraping 'div' also from xpath_element_scraper but
// that div or whatever could be some wrapper and would generally make you select the whole page
if (item.width > 800 && item.height > 400) {
return
}
// There could be many elements here, record them all and then we'll find out which is the most 'useful'
// (input, textarea, button, A etc)
if (item.width < xpath_data['browser_width']) {
@@ -206,7 +208,7 @@ $(document).ready(function () {
console.log(x);
if (x && first_available.length) {
// @todo will it let you click shit that has a layer ontop? probably not.
if (x['tagtype'] === 'text' || x['tagtype'] === 'number' || x['tagtype'] === 'email' || x['tagName'] === 'textarea' || x['tagtype'] === 'password' || x['tagtype'] === 'search') {
if (x['tagtype'] === 'text' || x['tagtype'] === 'email' || x['tagName'] === 'textarea' || x['tagtype'] === 'password' || x['tagtype'] === 'search') {
$('select', first_available).val('Enter text in field').change();
$('input[type=text]', first_available).first().val(x['xpath']);
$('input[placeholder="Value"]', first_available).addClass('ok').click().focus();
@@ -259,7 +261,7 @@ $(document).ready(function () {
// This should trigger 'Goto site'
console.log("Got startup response, requesting Goto-Site (first) step fake click");
$('#browser_steps >li:first-child .apply').click();
browser_interface_seconds_remaining = 500;
browserless_seconds_remaining = 500;
set_first_gotosite_disabled();
}).fail(function (data) {
console.log(data);
@@ -319,14 +321,8 @@ $(document).ready(function () {
var s = '<div class="control">' + '<a data-step-index=' + i + ' class="pure-button button-secondary button-green button-xsmall apply" >Apply</a>&nbsp;';
if (i > 0) {
// The first step never gets these (Goto-site)
s += `<a data-step-index="${i}" class="pure-button button-secondary button-xsmall clear" >Clear</a>&nbsp;` +
`<a data-step-index="${i}" class="pure-button button-secondary button-red button-xsmall remove" >Remove</a>`;
// if a screenshot is available
if (browser_steps_available_screenshots.includes(i.toString())) {
var d = (browser_steps_last_error_step === i+1) ? 'before' : 'after';
s += `&nbsp;<a data-step-index="${i}" class="pure-button button-secondary button-xsmall show-screenshot" title="Show screenshot from last run" data-type="${d}">Pic</a>&nbsp;`;
}
s += '<a data-step-index=' + i + ' class="pure-button button-secondary button-xsmall clear" >Clear</a>&nbsp;' +
'<a data-step-index=' + i + ' class="pure-button button-secondary button-red button-xsmall remove" >Remove</a>';
}
s += '</div>';
$(this).append(s)
@@ -441,24 +437,6 @@ $(document).ready(function () {
});
$('ul#browser_steps li .control .show-screenshot').click(function (element) {
var step_n = $(event.currentTarget).data('step-index');
w = window.open(this.href, "_blank", "width=640,height=480");
const t = $(event.currentTarget).data('type');
const url = browser_steps_fetch_screenshot_image_url + `&step_n=${step_n}&type=${t}`;
w.document.body.innerHTML = `<!DOCTYPE html>
<html lang="en">
<body>
<img src="${url}" style="width: 100%" alt="Browser Step at step ${step_n} from last run." title="Browser Step at step ${step_n} from last run."/>
</body>
</html>`;
w.document.title = `Browser Step at step ${step_n} from last run.`;
});
if (browser_steps_last_error_step) {
$("ul#browser_steps>li:nth-child("+browser_steps_last_error_step+")").addClass("browser-step-with-error");
}
$("ul#browser_steps select").change(function () {
set_greyed_state();

View File

@@ -1,10 +0,0 @@
$(document).ready(function () {
$.ajaxSetup({
beforeSend: function (xhr, settings) {
if (!/^(GET|HEAD|OPTIONS|TRACE)$/i.test(settings.type) && !this.crossDomain) {
xhr.setRequestHeader("X-CSRFToken", csrftoken)
}
}
})
});

View File

@@ -1,11 +1,4 @@
$(document).ready(function () {
$('.needs-localtime').each(function () {
for (var option of this.options) {
var dateObject = new Date(option.value * 1000);
option.label = dateObject.toLocaleString(undefined, {dateStyle: "full", timeStyle: "medium"});
}
});
// Load it when the #screenshot tab is in use, so we dont give a slow experience when waiting for the text diff to load
window.addEventListener('hashchange', function (e) {
toggle(location.hash);
@@ -22,82 +15,11 @@ $(document).ready(function () {
$("#settings").hide();
} else if (hash_name === '#extract') {
$("#settings").hide();
} else {
}
else {
$("#settings").show();
}
}
const article = $('.highlightable-filter')[0];
// We could also add the 'touchend' event for touch devices, but since
// most iOS/Android browsers already show a dialog when you select
// text (often with a Share option) we'll skip that
article.addEventListener('mouseup', dragTextHandler, false);
article.addEventListener('mousedown', clean, false);
function clean(event) {
$("#highlightSnippet").remove();
}
// Listen for Escape key press
window.addEventListener('keydown', function (e) {
if (e.key === 'Escape') {
clean();
}
}, false);
function dragTextHandler(event) {
console.log('mouseupped');
// Check if any text was selected
if (window.getSelection().toString().length > 0) {
// Find out how much (if any) user has scrolled
var scrollTop = (window.pageYOffset !== undefined) ? window.pageYOffset : (document.documentElement || document.body.parentNode || document.body).scrollTop;
// Get cursor position
const posX = event.clientX;
const posY = event.clientY + 20 + scrollTop;
// Append HTML to the body, create the "Tweet Selection" dialog
document.body.insertAdjacentHTML('beforeend', '<div id="highlightSnippet" style="position: absolute; top: ' + posY + 'px; left: ' + posX + 'px;"><div class="pure-form-message-inline" style="font-size: 70%">Ignore any change on any line which contains the selected text.</div><br><a data-mode="exact" href="javascript:void(0);" class="pure-button button-secondary button-xsmall">Ignore exact text</a>&nbsp;</div>');
if (/\d/.test(window.getSelection().toString())) {
// Offer regex replacement
document.getElementById("highlightSnippet").insertAdjacentHTML('beforeend', '<a data-mode="digit-regex" href="javascript:void(0);" class="pure-button button-secondary button-xsmall">Ignore text including number changes</a>');
}
$('#highlightSnippet a').bind('click', function (e) {
if(!window.getSelection().toString().trim().length) {
alert('Oops no text selected!');
return;
}
$.ajax({
type: "POST",
url: highlight_submit_ignore_url,
data: {'mode': $(this).data('mode'), 'selection': window.getSelection().toString()},
statusCode: {
400: function () {
// More than likely the CSRF token was lost when the server restarted
alert("There was a problem processing the request, please reload the page.");
}
}
}).done(function (data) {
$("#highlightSnippet").html(data)
}).fail(function (data) {
console.log(data);
alert('There was an error communicating with the server.');
});
});
}
}
$('#diff-form').on('submit', function (e) {
if ($('select[name=from_version]').val() === $('select[name=to_version]').val()) {
e.preventDefault();
alert('Error - You are trying to compare the same version.');
}
});
});

View File

@@ -1,115 +1,110 @@
$(document).ready(function () {
var a = document.getElementById("a");
var b = document.getElementById("b");
var result = document.getElementById("result");
var inputs;
var a = document.getElementById("a");
var b = document.getElementById("b");
var result = document.getElementById("result");
$('#jump-next-diff').click(function () {
function changed() {
// https://github.com/kpdecker/jsdiff/issues/389
// I would love to use `{ignoreWhitespace: true}` here but it breaks the formatting
options = {
ignoreWhitespace: document.getElementById("ignoreWhitespace").checked,
};
var element = inputs[inputs.current];
var headerOffset = 80;
var elementPosition = element.getBoundingClientRect().top;
var offsetPosition = elementPosition - headerOffset + window.scrollY;
window.scrollTo({
top: offsetPosition,
behavior: "smooth",
});
inputs.current++;
if (inputs.current >= inputs.length) {
inputs.current = 0;
}
});
function changed() {
// https://github.com/kpdecker/jsdiff/issues/389
// I would love to use `{ignoreWhitespace: true}` here but it breaks the formatting
options = {
ignoreWhitespace: document.getElementById("ignoreWhitespace").checked,
};
var diff = Diff[window.diffType](a.textContent, b.textContent, options);
var fragment = document.createDocumentFragment();
for (var i = 0; i < diff.length; i++) {
if (diff[i].added && diff[i + 1] && diff[i + 1].removed) {
var swap = diff[i];
diff[i] = diff[i + 1];
diff[i + 1] = swap;
}
var node;
if (diff[i].removed) {
node = document.createElement("del");
node.classList.add("change");
const wrapper = node.appendChild(document.createElement("span"));
wrapper.appendChild(document.createTextNode(diff[i].value));
} else if (diff[i].added) {
node = document.createElement("ins");
node.classList.add("change");
const wrapper = node.appendChild(document.createElement("span"));
wrapper.appendChild(document.createTextNode(diff[i].value));
} else {
node = document.createTextNode(diff[i].value);
}
fragment.appendChild(node);
}
result.textContent = "";
result.appendChild(fragment);
// For nice mouse-over hover/title information
const removed_current_option = $('#diff-version option:selected')
if (removed_current_option) {
$('del').each(function () {
$(this).prop('title', 'Removed '+removed_current_option[0].label);
});
}
const inserted_current_option = $('#current-version option:selected')
if (removed_current_option) {
$('ins').each(function () {
$(this).prop('title', 'Inserted '+inserted_current_option[0].label);
});
}
// Set the list of possible differences to jump to
inputs = document.querySelectorAll('#diff-ui .change')
// Set the "current" diff pointer
inputs.current = 0;
// Goto diff
$('#jump-next-diff').click();
var diff = Diff[window.diffType](a.textContent, b.textContent, options);
var fragment = document.createDocumentFragment();
for (var i = 0; i < diff.length; i++) {
if (diff[i].added && diff[i + 1] && diff[i + 1].removed) {
var swap = diff[i];
diff[i] = diff[i + 1];
diff[i + 1] = swap;
}
onDiffTypeChange(
document.querySelector('#settings [name="diff_type"]:checked'),
);
changed();
a.onpaste = a.onchange = b.onpaste = b.onchange = changed;
if ("oninput" in a) {
a.oninput = b.oninput = changed;
var node;
if (diff[i].removed) {
node = document.createElement("del");
node.classList.add("change");
const wrapper = node.appendChild(document.createElement("span"));
wrapper.appendChild(document.createTextNode(diff[i].value));
} else if (diff[i].added) {
node = document.createElement("ins");
node.classList.add("change");
const wrapper = node.appendChild(document.createElement("span"));
wrapper.appendChild(document.createTextNode(diff[i].value));
} else {
a.onkeyup = b.onkeyup = changed;
node = document.createTextNode(diff[i].value);
}
fragment.appendChild(node);
}
function onDiffTypeChange(radio) {
window.diffType = radio.value;
// Not necessary
// document.title = "Diff " + radio.value.slice(4);
result.textContent = "";
result.appendChild(fragment);
// Jump at start
inputs.current = 0;
next_diff();
}
window.onload = function () {
/* Convert what is options from UTC time.time() to local browser time */
var diffList = document.getElementById("diff-version");
if (typeof diffList != "undefined" && diffList != null) {
for (var option of diffList.options) {
var dateObject = new Date(option.value * 1000);
option.label = dateObject.toLocaleString();
}
}
var radio = document.getElementsByName("diff_type");
for (var i = 0; i < radio.length; i++) {
radio[i].onchange = function (e) {
onDiffTypeChange(e.target);
changed();
};
}
/* Set current version date as local time in the browser also */
var current_v = document.getElementById("current-v-date");
var dateObject = new Date(newest_version_timestamp * 1000);
current_v.innerHTML = dateObject.toLocaleString();
onDiffTypeChange(
document.querySelector('#settings [name="diff_type"]:checked'),
);
changed();
};
document.getElementById("ignoreWhitespace").onchange = function (e) {
changed();
};
a.onpaste = a.onchange = b.onpaste = b.onchange = changed;
});
if ("oninput" in a) {
a.oninput = b.oninput = changed;
} else {
a.onkeyup = b.onkeyup = changed;
}
function onDiffTypeChange(radio) {
window.diffType = radio.value;
// Not necessary
// document.title = "Diff " + radio.value.slice(4);
}
var radio = document.getElementsByName("diff_type");
for (var i = 0; i < radio.length; i++) {
radio[i].onchange = function (e) {
onDiffTypeChange(e.target);
changed();
};
}
document.getElementById("ignoreWhitespace").onchange = function (e) {
changed();
};
var inputs = document.getElementsByClassName("change");
inputs.current = 0;
function next_diff() {
var element = inputs[inputs.current];
var headerOffset = 80;
var elementPosition = element.getBoundingClientRect().top;
var offsetPosition = elementPosition - headerOffset + window.scrollY;
window.scrollTo({
top: offsetPosition,
behavior: "smooth",
});
inputs.current++;
if (inputs.current >= inputs.length) {
inputs.current = 0;
}
}

View File

@@ -1,4 +1,19 @@
$(document).ready(function () {
function toggle() {
if ($('input[name="application-fetch_backend"]:checked').val() != 'html_requests') {
$('#requests-override-options').hide();
$('#webdriver-override-options').show();
} else {
$('#requests-override-options').show();
$('#webdriver-override-options').hide();
}
}
$('input[name="application-fetch_backend"]').click(function (e) {
toggle();
});
toggle();
$("#api-key").hover(
function () {
$("#api-key-copy").html('copy').fadeIn();
@@ -17,10 +32,5 @@ $(document).ready(function () {
window.getSelection().removeAllRanges();
});
$("#notification-token-toggle").click(function (e) {
e.preventDefault();
$('#notification-tokens-info').toggle();
});
});

View File

@@ -13,15 +13,26 @@ $(document).ready(function() {
$('#send-test-notification').click(function (e) {
e.preventDefault();
data = {
notification_body: $('#notification_body').val(),
notification_format: $('#notification_format').val(),
notification_title: $('#notification_title').val(),
notification_urls: $('.notification-urls').val(),
tags: $('#tags').val(),
window_url: window.location.href,
}
// this can be global
var csrftoken = $('input[name=csrf_token]').val();
$.ajaxSetup({
beforeSend: function(xhr, settings) {
if (!/^(GET|HEAD|OPTIONS|TRACE)$/i.test(settings.type) && !this.crossDomain) {
xhr.setRequestHeader("X-CSRFToken", csrftoken)
}
}
})
data = {
window_url : window.location.href,
notification_urls : $('.notification-urls').val(),
}
for (key in data) {
if (!data[key].length) {
alert(key+" is empty, cannot send test.")
return;
}
}
$.ajax({
type: "POST",
@@ -35,7 +46,7 @@ $(document).ready(function() {
}
}).done(function(data){
console.log(data);
alert(data);
alert('Sent');
}).fail(function(data){
console.log(data);
alert('There was an error communicating with the server.');

View File

@@ -1,53 +0,0 @@
function redirect_to_version(version) {
var currentUrl = window.location.href;
var baseUrl = currentUrl.split('?')[0]; // Base URL without query parameters
var anchor = '';
// Check if there is an anchor
if (baseUrl.indexOf('#') !== -1) {
anchor = baseUrl.substring(baseUrl.indexOf('#'));
baseUrl = baseUrl.substring(0, baseUrl.indexOf('#'));
}
window.location.href = baseUrl + '?version=' + version + anchor;
}
document.addEventListener('keydown', function (event) {
var selectElement = document.getElementById('preview-version');
if (selectElement) {
var selectedOption = selectElement.querySelector('option:checked');
if (selectedOption) {
if (event.key === 'ArrowLeft') {
if (selectedOption.previousElementSibling) {
redirect_to_version(selectedOption.previousElementSibling.value);
}
} else if (event.key === 'ArrowRight') {
if (selectedOption.nextElementSibling) {
redirect_to_version(selectedOption.nextElementSibling.value);
}
}
}
}
});
document.getElementById('preview-version').addEventListener('change', function () {
redirect_to_version(this.value);
});
var selectElement = document.getElementById('preview-version');
if (selectElement) {
var selectedOption = selectElement.querySelector('option:checked');
if (selectedOption) {
if (selectedOption.previousElementSibling) {
document.getElementById('btn-previous').href = "?version=" + selectedOption.previousElementSibling.value;
} else {
document.getElementById('btn-previous').remove()
}
if (selectedOption.nextElementSibling) {
document.getElementById('btn-next').href = "?version=" + selectedOption.nextElementSibling.value;
} else {
document.getElementById('btn-next').remove()
}
}
}

View File

@@ -3,50 +3,45 @@
* Toggles theme between light and dark mode.
*/
$(document).ready(function () {
const button = document.getElementById("toggle-light-mode");
const button = document.getElementById("toggle-light-mode");
button.onclick = () => {
const htmlElement = document.getElementsByTagName("html");
const isDarkMode = htmlElement[0].dataset.darkmode === "true";
htmlElement[0].dataset.darkmode = !isDarkMode;
setCookieValue(!isDarkMode);
};
button.onclick = () => {
const htmlElement = document.getElementsByTagName("html");
const isDarkMode = htmlElement[0].dataset.darkmode === "true";
htmlElement[0].dataset.darkmode = !isDarkMode;
setCookieValue(!isDarkMode);
};
const setCookieValue = (value) => {
document.cookie = `css_dark_mode=${value};max-age=31536000;path=/`
}
const setCookieValue = (value) => {
document.cookie = `css_dark_mode=${value};max-age=31536000;path=/`
}
// Search input box behaviour
// Search input box behaviour
const toggle_search = document.getElementById("toggle-search");
const search_q = document.getElementById("search-q");
if(search_q) {
window.addEventListener('keydown', function (e) {
if (e.altKey == true && e.keyCode == 83) {
search_q.classList.toggle('expanded');
search_q.focus();
}
});
const search_q = document.getElementById("search-q");
window.addEventListener('keydown', function (e) {
search_q.onkeydown = (e) => {
var key = e.keyCode || e.which;
if (key === 13) {
document.searchForm.submit();
}
};
toggle_search.onclick = () => {
// Could be that they want to search something once text is in there
if (search_q.value.length) {
document.searchForm.submit();
} else {
// If not..
search_q.classList.toggle('expanded');
search_q.focus();
}
};
if (e.altKey == true && e.keyCode == 83)
search_q.classList.toggle('expanded');
search_q.focus();
});
search_q.onkeydown = (e) => {
var key = e.keyCode || e.which;
if (key === 13) {
document.searchForm.submit();
}
};
toggle_search.onclick = () => {
// Could be that they want to search something once text is in there
if (search_q.value.length) {
document.searchForm.submit();
} else {
// If not..
search_q.classList.toggle('expanded');
search_q.focus();
}
};
$('#heart-us').click(function () {
$("#overlay").toggleClass('visible');
heartpath.style.fill = document.getElementById("overlay").classList.contains("visible") ? '#ff0000' : 'var(--color-background)';
});
});

View File

@@ -1,29 +0,0 @@
$(document).ready(function () {
// Lazy Hide/Show elements mechanism
$('[data-visible-for]').hide();
function show_related_elem(e) {
var n = $(e).attr('name') + "=" + $(e).val();
if (n === 'fetch_backend=system') {
n = "fetch_backend=" + default_system_fetch_backend;
}
$(`[data-visible-for~="${n}"]`).show();
}
$(':radio').on('keyup keypress blur change click', function (e) {
$(`[data-visible-for]`).hide();
$('.advanced-options').hide();
show_related_elem(this);
});
$(':radio:checked').each(function (e) {
show_related_elem(this);
})
// Show advanced
$('.show-advanced').click(function (e) {
$(this).closest('.tab-pane-inner').find('.advanced-options').each(function (e) {
$(this).toggle();
})
});
});

View File

@@ -2,258 +2,250 @@
// All rights reserved.
// yes - this is really a hack, if you are a front-ender and want to help, please get in touch!
let runInClearMode = false;
$(document).ready(function () {
$(document).ready(() => {
let currentSelections = [];
let currentSelection = null;
let appendToList = false;
let c, xctx, ctx;
let xScale = 1, yScale = 1;
let selectorImage, selectorImageRect, selectorData;
var current_selected_i;
var state_clicked = false;
var c;
// Global jQuery selectors with "Elem" appended
const $selectorCanvasElem = $('#selector-canvas');
const $includeFiltersElem = $("#include_filters");
const $selectorBackgroundElem = $("img#selector-background");
const $selectorCurrentXpathElem = $("#selector-current-xpath span");
const $fetchingUpdateNoticeElem = $('.fetching-update-notice');
const $selectorWrapperElem = $("#selector-wrapper");
// greyed out fill context
var xctx;
// redline highlight context
var ctx;
// Color constants
const FILL_STYLE_HIGHLIGHT = 'rgba(205,0,0,0.35)';
const FILL_STYLE_GREYED_OUT = 'rgba(205,205,205,0.95)';
const STROKE_STYLE_HIGHLIGHT = 'rgba(255,0,0, 0.9)';
const FILL_STYLE_REDLINE = 'rgba(255,0,0, 0.1)';
const STROKE_STYLE_REDLINE = 'rgba(225,0,0,0.9)';
var current_default_xpath = [];
var x_scale = 1;
var y_scale = 1;
var selector_image;
var selector_image_rect;
var selector_data;
$('#visualselector-tab').click(() => {
$selectorBackgroundElem.off('load');
currentSelections = [];
bootstrapVisualSelector();
$('#visualselector-tab').click(function () {
$("img#selector-background").off('load');
state_clicked = false;
current_selected_i = false;
bootstrap_visualselector();
});
function clearReset() {
ctx.clearRect(0, 0, c.width, c.height);
if ($includeFiltersElem.val().length) {
alert("Existing filters under the 'Filters & Triggers' tab were cleared.");
}
$includeFiltersElem.val('');
currentSelections = [];
// Means we ignore the xpaths from the scraper marked as sel.highlight_as_custom_filter (it matched a previous selector)
runInClearMode = true;
highlightCurrentSelected();
}
function splitToList(v) {
return v.split('\n').map(line => line.trim()).filter(line => line.length > 0);
}
function sortScrapedElementsBySize() {
// Sort the currentSelections array by area (width * height) in descending order
selectorData['size_pos'].sort((a, b) => {
const areaA = a.width * a.height;
const areaB = b.width * b.height;
return areaB - areaA;
});
}
$(document).on('keydown keyup', (event) => {
if (event.code === 'ShiftLeft' || event.code === 'ShiftRight') {
appendToList = event.type === 'keydown';
}
if (event.type === 'keydown') {
if ($selectorBackgroundElem.is(":visible") && event.key === "Escape") {
clearReset();
$(document).on('keydown', function (event) {
if ($("img#selector-background").is(":visible")) {
if (event.key == "Escape") {
state_clicked = false;
ctx.clearRect(0, 0, c.width, c.height);
}
}
});
$('#clear-selector').on('click', () => {
clearReset();
});
// So if they start switching between visualSelector and manual filters, stop it from rendering old filters
$('li.tab a').on('click', () => {
runInClearMode = true;
});
if (!window.location.hash || window.location.hash !== '#visualselector') {
$selectorBackgroundElem.attr('src', '');
// For when the page loads
if (!window.location.hash || window.location.hash != '#visualselector') {
$("img#selector-background").attr('src', '');
return;
}
bootstrapVisualSelector();
// Handle clearing button/link
$('#clear-selector').on('click', function (event) {
if (!state_clicked) {
alert('Oops, Nothing selected!');
}
state_clicked = false;
ctx.clearRect(0, 0, c.width, c.height);
xctx.clearRect(0, 0, c.width, c.height);
$("#include_filters").val('');
});
function bootstrapVisualSelector() {
$selectorBackgroundElem
.on("error", () => {
$fetchingUpdateNoticeElem.html("<strong>Ooops!</strong> The VisualSelector tool needs at least one fetched page, please unpause the watch and/or wait for the watch to complete fetching and then reload this page.")
.css('color', '#bb0000');
$('#selector-current-xpath, #clear-selector').hide();
})
.on('load', () => {
bootstrap_visualselector();
function bootstrap_visualselector() {
if (1) {
// bootstrap it, this will trigger everything else
$("img#selector-background").on("error", function () {
$('.fetching-update-notice').html("<strong>Ooops!</strong> The VisualSelector tool needs atleast one fetched page, please unpause the watch and/or wait for the watch to complete fetching and then reload this page.");
$('.fetching-update-notice').css('color','#bb0000');
$('#selector-current-xpath').hide();
$('#clear-selector').hide();
}).bind('load', function () {
console.log("Loaded background...");
c = document.getElementById("selector-canvas");
// greyed out fill context
xctx = c.getContext("2d");
// redline highlight context
ctx = c.getContext("2d");
fetchData();
$selectorCanvasElem.off("mousemove mousedown");
})
.attr("src", screenshot_url);
let s = `${$selectorBackgroundElem.attr('src')}?${new Date().getTime()}`;
$selectorBackgroundElem.attr('src', s);
}
function alertIfFilterNotFound() {
let existingFilters = splitToList($includeFiltersElem.val());
let sizePosXpaths = selectorData['size_pos'].map(sel => sel.xpath);
for (let filter of existingFilters) {
if (!sizePosXpaths.includes(filter)) {
alert(`One or more of your existing filters was not found and will be removed when a new filter is selected.`);
break;
}
if ($("#include_filters").val().trim().length) {
current_default_xpath = $("#include_filters").val().split(/\r?\n/g);
} else {
current_default_xpath = [];
}
fetch_data();
$('#selector-canvas').off("mousemove mousedown");
// screenshot_url defined in the edit.html template
}).attr("src", screenshot_url);
}
// Tell visualSelector that the image should update
var s = $("img#selector-background").attr('src') + "?" + new Date().getTime();
$("img#selector-background").attr('src', s)
}
function fetchData() {
$fetchingUpdateNoticeElem.html("Fetching element data..");
// This is fired once the img src is loaded in bootstrap_visualselector()
function fetch_data() {
// Image is ready
$('.fetching-update-notice').html("Fetching element data..");
$.ajax({
url: watch_visual_selector_data_url,
context: document.body
}).done((data) => {
$fetchingUpdateNoticeElem.html("Rendering..");
selectorData = data;
sortScrapedElementsBySize();
console.log(`Reported browser width from backend: ${data['browser_width']}`);
// Little sanity check for the user, alert them if something missing
alertIfFilterNotFound();
setScale();
reflowSelector();
$fetchingUpdateNoticeElem.fadeOut();
}).done(function (data) {
$('.fetching-update-notice').html("Rendering..");
selector_data = data;
console.log("Reported browser width from backend: " + data['browser_width']);
state_clicked = false;
set_scale();
reflow_selector();
$('.fetching-update-notice').fadeOut();
});
}
function updateFiltersText() {
// Assuming currentSelections is already defined and contains the selections
let uniqueSelections = new Set(currentSelections.map(sel => (sel[0] === '/' ? `xpath:${sel.xpath}` : sel.xpath)));
if (currentSelections.length > 0) {
// Convert the Set back to an array and join with newline characters
let textboxFilterText = Array.from(uniqueSelections).join("\n");
$includeFiltersElem.val(textboxFilterText);
}
}
function set_scale() {
function setScale() {
$selectorWrapperElem.show();
selectorImage = $selectorBackgroundElem[0];
selectorImageRect = selectorImage.getBoundingClientRect();
// some things to check if the scaling doesnt work
// - that the widths/sizes really are about the actual screen size cat elements.json |grep -o width......|sort|uniq
$("#selector-wrapper").show();
selector_image = $("img#selector-background")[0];
selector_image_rect = selector_image.getBoundingClientRect();
$selectorCanvasElem.attr({
'height': selectorImageRect.height,
'width': selectorImageRect.width
});
$selectorWrapperElem.attr('width', selectorImageRect.width);
$('#visual-selector-heading').css('max-width', selectorImageRect.width + "px")
xScale = selectorImageRect.width / selectorImage.naturalWidth;
yScale = selectorImageRect.height / selectorImage.naturalHeight;
ctx.strokeStyle = STROKE_STYLE_HIGHLIGHT;
ctx.fillStyle = FILL_STYLE_REDLINE;
// make the canvas the same size as the image
$('#selector-canvas').attr('height', selector_image_rect.height);
$('#selector-canvas').attr('width', selector_image_rect.width);
$('#selector-wrapper').attr('width', selector_image_rect.width);
x_scale = selector_image_rect.width / selector_data['browser_width'];
y_scale = selector_image_rect.height / selector_image.naturalHeight;
ctx.strokeStyle = 'rgba(255,0,0, 0.9)';
ctx.fillStyle = 'rgba(255,0,0, 0.1)';
ctx.lineWidth = 3;
console.log("Scaling set x: " + xScale + " by y:" + yScale);
$("#selector-current-xpath").css('max-width', selectorImageRect.width);
console.log("scaling set x: " + x_scale + " by y:" + y_scale);
$("#selector-current-xpath").css('max-width', selector_image_rect.width);
}
function reflowSelector() {
$(window).resize(() => {
setScale();
highlightCurrentSelected();
function reflow_selector() {
$(window).resize(function () {
set_scale();
highlight_current_selected_i();
});
var selector_currnt_xpath_text = $("#selector-current-xpath span");
setScale();
set_scale();
console.log(selectorData['size_pos'].length + " selectors found");
console.log(selector_data['size_pos'].length + " selectors found");
let existingFilters = splitToList($includeFiltersElem.val());
selectorData['size_pos'].forEach(sel => {
if ((!runInClearMode && sel.highlight_as_custom_filter) || existingFilters.includes(sel.xpath)) {
console.log("highlighting " + c);
currentSelections.push(sel);
// highlight the default one if we can find it in the xPath list
// or the xpath matches the default one
found = false;
if (current_default_xpath.length) {
// Find the first one that matches
// @todo In the future paint all that match
for (const c of current_default_xpath) {
for (var i = selector_data['size_pos'].length; i !== 0; i--) {
if (selector_data['size_pos'][i - 1].xpath === c) {
console.log("highlighting " + c);
current_selected_i = i - 1;
highlight_current_selected_i();
found = true;
break;
}
}
if (found) {
break;
}
}
});
if (!found) {
alert("Unfortunately your existing CSS/xPath Filter was no longer found!");
}
}
highlightCurrentSelected();
updateFiltersText();
$('#selector-canvas').bind('mousemove', function (e) {
if (state_clicked) {
return;
}
ctx.clearRect(0, 0, c.width, c.height);
current_selected_i = null;
$selectorCanvasElem.bind('mousemove', handleMouseMove.debounce(5));
$selectorCanvasElem.bind('mousedown', handleMouseDown.debounce(5));
$selectorCanvasElem.bind('mouseleave', highlightCurrentSelected.debounce(5));
function handleMouseMove(e) {
if (!e.offsetX && !e.offsetY) {
const targetOffset = $(e.target).offset();
// Add in offset
if ((typeof e.offsetX === "undefined" || typeof e.offsetY === "undefined") || (e.offsetX === 0 && e.offsetY === 0)) {
var targetOffset = $(e.target).offset();
e.offsetX = e.pageX - targetOffset.left;
e.offsetY = e.pageY - targetOffset.top;
}
ctx.fillStyle = FILL_STYLE_HIGHLIGHT;
// Reverse order - the most specific one should be deeper/"laster"
// Basically, find the most 'deepest'
var found = 0;
ctx.fillStyle = 'rgba(205,0,0,0.35)';
// Will be sorted by smallest width*height first
for (var i = 0; i <= selector_data['size_pos'].length; i++) {
// draw all of them? let them choose somehow?
var sel = selector_data['size_pos'][i];
// If we are in a bounding-box
if (e.offsetY > sel.top * y_scale && e.offsetY < sel.top * y_scale + sel.height * y_scale
&&
e.offsetX > sel.left * y_scale && e.offsetX < sel.left * y_scale + sel.width * y_scale
selectorData['size_pos'].forEach(sel => {
if (e.offsetY > sel.top * yScale && e.offsetY < sel.top * yScale + sel.height * yScale &&
e.offsetX > sel.left * yScale && e.offsetX < sel.left * yScale + sel.width * yScale) {
setCurrentSelectedText(sel.xpath);
drawHighlight(sel);
currentSelections.push(sel);
currentSelection = sel;
highlightCurrentSelected();
currentSelections.pop();
) {
// FOUND ONE
set_current_selected_text(sel.xpath);
ctx.strokeRect(sel.left * x_scale, sel.top * y_scale, sel.width * x_scale, sel.height * y_scale);
ctx.fillRect(sel.left * x_scale, sel.top * y_scale, sel.width * x_scale, sel.height * y_scale);
// no need to keep digging
// @todo or, O to go out/up, I to go in
// or double click to go up/out the selector?
current_selected_i = i;
found += 1;
break;
}
})
}
}.debounce(5));
function set_current_selected_text(s) {
selector_currnt_xpath_text[0].innerHTML = s;
}
function highlight_current_selected_i() {
if (state_clicked) {
state_clicked = false;
xctx.clearRect(0, 0, c.width, c.height);
return;
}
var sel = selector_data['size_pos'][current_selected_i];
if (sel[0] == '/') {
// @todo - not sure just checking / is right
$("#include_filters").val('xpath:' + sel.xpath);
} else {
$("#include_filters").val(sel.xpath);
}
xctx.fillStyle = 'rgba(205,205,205,0.95)';
xctx.strokeStyle = 'rgba(225,0,0,0.9)';
xctx.lineWidth = 3;
xctx.fillRect(0, 0, c.width, c.height);
// Clear out what only should be seen (make a clear/clean spot)
xctx.clearRect(sel.left * x_scale, sel.top * y_scale, sel.width * x_scale, sel.height * y_scale);
xctx.strokeRect(sel.left * x_scale, sel.top * y_scale, sel.width * x_scale, sel.height * y_scale);
state_clicked = true;
set_current_selected_text(sel.xpath);
}
function setCurrentSelectedText(s) {
$selectorCurrentXpathElem[0].innerHTML = s;
}
function drawHighlight(sel) {
ctx.strokeRect(sel.left * xScale, sel.top * yScale, sel.width * xScale, sel.height * yScale);
ctx.fillRect(sel.left * xScale, sel.top * yScale, sel.width * xScale, sel.height * yScale);
}
function handleMouseDown() {
// If we are in 'appendToList' mode, grow the list, if not, just 1
currentSelections = appendToList ? [...currentSelections, currentSelection] : [currentSelection];
highlightCurrentSelected();
updateFiltersText();
}
}
function highlightCurrentSelected() {
xctx.fillStyle = FILL_STYLE_GREYED_OUT;
xctx.strokeStyle = STROKE_STYLE_REDLINE;
xctx.lineWidth = 3;
xctx.clearRect(0, 0, c.width, c.height);
currentSelections.forEach(sel => {
//xctx.clearRect(sel.left * xScale, sel.top * yScale, sel.width * xScale, sel.height * yScale);
xctx.strokeRect(sel.left * xScale, sel.top * yScale, sel.width * xScale, sel.height * yScale);
$('#selector-canvas').bind('mousedown', function (e) {
highlight_current_selected_i();
});
}
});

View File

@@ -4,14 +4,6 @@ $(function () {
$(this).closest('.unviewed').removeClass('unviewed');
});
$('td[data-timestamp]').each(function () {
$(this).prop('title', new Intl.DateTimeFormat(undefined,
{
dateStyle: 'full',
timeStyle: 'long'
}).format($(this).data('timestamp') * 1000));
})
$("#checkbox-assign-tag").click(function (e) {
$('#op_extradata').val(prompt("Enter a tag name"));
});

View File

@@ -1,18 +1,40 @@
function toggleOpacity(checkboxSelector, fieldSelector, inverted) {
const checkbox = document.querySelector(checkboxSelector);
const fields = document.querySelectorAll(fieldSelector);
function updateOpacity() {
const opacityValue = !checkbox.checked ? (inverted ? 0.6 : 1) : (inverted ? 1 : 0.6);
fields.forEach(field => {
field.style.opacity = opacityValue;
});
}
// Initial setup
updateOpacity();
checkbox.addEventListener('change', updateOpacity);
}
$(document).ready(function () {
function toggle() {
if ($('input[name="fetch_backend"]:checked').val() == 'html_webdriver') {
if (playwright_enabled) {
// playwright supports headers, so hide everything else
// See #664
$('#requests-override-options #request-method').hide();
$('#requests-override-options #request-body').hide();
// @todo connect this one up
$('#ignore-status-codes-option').hide();
} else {
// selenium/webdriver doesnt support anything afaik, hide it all
$('#requests-override-options').hide();
}
$('#webdriver-override-options').show();
} else if ($('input[name="fetch_backend"]:checked').val() == 'system') {
$('#requests-override-options #request-method').hide();
$('#requests-override-options #request-body').hide();
$('#ignore-status-codes-option').hide();
$('#requests-override-options').hide();
$('#webdriver-override-options').hide();
} else {
$('#requests-override-options').show();
$('#requests-override-options *:hidden').show();
$('#webdriver-override-options').hide();
}
}
$('input[name="fetch_backend"]').click(function (e) {
toggle();
});
toggle();
$('#notification-setting-reset-to-default').click(function (e) {
$('#notification_title').val('');
$('#notification_body').val('');
@@ -20,12 +42,4 @@ $(document).ready(function () {
$('#notification_urls').val('');
e.preventDefault();
});
$("#notification-token-toggle").click(function (e) {
e.preventDefault();
$('#notification-tokens-info').toggle();
});
toggleOpacity('#time_between_check_use_default', '#time_between_check', false);
});

View File

@@ -68,7 +68,7 @@
--color-last-checked: #bbb;
--color-text-footer: #444;
--color-border-watch-table-cell: #eee;
--color-text-watch-tag-list: rgba(231, 0, 105, 0.4);
--color-text-watch-tag-list: #e70069;
--color-background-new-watch-form: rgba(0, 0, 0, 0.05);
--color-background-new-watch-input: var(--color-white);
--color-text-new-watch-input: var(--color-text);
@@ -111,7 +111,7 @@ html[data-darkmode="true"] {
--color-background-input: var(--color-grey-350);
--color-text-input-description: var(--color-grey-600);
--color-text-input-placeholder: var(--color-grey-600);
--color-text-watch-tag-list: rgba(250, 62, 146, 0.4);
--color-text-watch-tag-list: #fa3e92;
--color-background-code: var(--color-grey-200);
--color-background-tab: rgba(0, 0, 0, 0.2);
--color-background-tab-hover: rgba(0, 0, 0, 0.5);
@@ -126,8 +126,6 @@ html[data-darkmode="true"] {
html[data-darkmode="true"] .watch-table .title-col a[target="_blank"]::after,
html[data-darkmode="true"] .watch-table .current-diff-url::after {
filter: invert(0.5) hue-rotate(10deg) brightness(2); }
html[data-darkmode="true"] .watch-table .status-browsersteps {
filter: invert(0.5) hue-rotate(10deg) brightness(1.5); }
html[data-darkmode="true"] .watch-table .watch-controls .state-off img {
opacity: 0.3; }
html[data-darkmode="true"] .watch-table .watch-controls .state-on img {
@@ -189,10 +187,6 @@ ins {
padding: 0.5em; }
#settings ins {
padding: 0.5em; }
#settings option:checked {
font-weight: bold; }
#settings [type=radio], #settings [type=checkbox] {
vertical-align: middle; }
.source {
position: absolute;
@@ -224,10 +218,3 @@ td#diff-col div {
text-align: center; }
.tab-pane-inner#screenshot img {
max-width: 99%; }
#highlightSnippet {
background: var(--color-background);
padding: 1em;
border-radius: 5px;
background: var(--color-background);
box-shadow: 1px 1px 4px var(--color-shadow-jump); }

View File

@@ -77,13 +77,6 @@ ins {
ins {
padding: 0.5em;
}
option:checked {
font-weight: bold;
}
[type=radio],[type=checkbox] {
vertical-align: middle;
}
}
.source {
@@ -126,11 +119,3 @@ td#diff-col div {
max-width: 99%;
}
}
#highlightSnippet {
background: var(--color-background);
padding: 1em;
border-radius: 5px;
background: var(--color-background);
box-shadow: 1px 1px 4px var(--color-shadow-jump);
}

View File

@@ -6,10 +6,6 @@
}
li {
&.browser-step-with-error {
background-color: #ffd6d6;
border-radius: 4px;
}
&:not(:first-child) {
&:hover {
opacity: 1.0;
@@ -48,7 +44,7 @@
#browser-steps .flex-wrapper {
display: flex;
flex-flow: row;
height: 70vh;
height: 600px; /*@todo make this dynamic */
}
/* this is duplicate :( */

View File

@@ -1,6 +1,6 @@
#toggle-light-mode {
/* width: 3rem;*/
width: 3rem;
/* default */
.icon-dark {
display: none;

View File

@@ -1,24 +0,0 @@
ul#requests-extra_browsers {
list-style: none;
/* tidy up the table to look more "inline" */
li {
> label {
display: none;
}
}
/* each proxy entry is a `table` */
table {
tr {
display: inline;
}
}
}
#extra-browsers-setting {
border: 1px solid var(--color-grey-800);
border-radius: 4px;
margin: 1em;
padding: 1em;
}

View File

@@ -42,28 +42,4 @@ body.proxy-check-active {
color: var(--color-link);
}
}
}
#recommended-proxy {
display: grid;
gap: 2rem;
@media (min-width: 991px) {
grid-template-columns: repeat(2, 1fr);
}
> div {
border: 1px #aaa solid;
border-radius: 4px;
padding: 1em;
}
padding-bottom: 1em;
}
#extra-proxies-setting {
border: 1px solid var(--color-grey-800);
border-radius: 4px;
margin: 1em;
padding: 1em;
}
}

View File

@@ -1,38 +0,0 @@
#overlay {
opacity: 0.95;
position: fixed;
width: 350px;
max-width: 100%;
height: 100%;
top: 0;
right: -350px;
background-color: var(--color-table-stripe);
z-index: 2;
transform: translateX(0);
transition: transform .5s ease;
&.visible {
transform: translateX(-100%);
}
.content {
font-size: 0.875rem;
padding: 1rem;
margin-top: 5rem;
max-width: 400px;
color: var(--color-watch-table-row-text);
}
}
#heartpath {
&:hover {
fill: #ff0000 !important;
transition: all ease 0.3s !important;
}
transition: all ease 0.3s !important;
}

View File

@@ -1,25 +0,0 @@
.pure-menu-link {
padding: 0.5rem 1em;
line-height: 1.2rem;
}
.pure-menu-item {
svg {
height: 1.2rem;
}
* {
vertical-align: middle;
}
.github-link {
height: 1.8rem;
display: block;
svg {
height: 100%;
}
}
.bi-heart {
&:hover {
cursor: pointer;
}
}
}

Some files were not shown because too many files have changed in this diff Show More