mirror of
https://github.com/dgtlmoon/changedetection.io.git
synced 2025-10-30 14:17:40 +00:00
Compare commits
313 Commits
browserste
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
33ab4c8891 | ||
|
|
e1028f822d | ||
|
|
ae1cd61e61 | ||
|
|
a5fe1a771f | ||
|
|
b0980f45b8 | ||
|
|
afadaf5467 | ||
|
|
db11f601a1 | ||
|
|
ef04840dd2 | ||
|
|
1628586553 | ||
|
|
a23c07ba94 | ||
|
|
431fd168a1 | ||
|
|
7dbd0b75b2 | ||
|
|
ae532c82e8 | ||
|
|
ab0b85d088 | ||
|
|
66aec365c2 | ||
|
|
e09cea60ef | ||
|
|
f304ae19db | ||
|
|
2116b2cb93 | ||
|
|
8f580ac96b | ||
|
|
a8cadc3d16 | ||
|
|
c9290d73e0 | ||
|
|
2db5e906e9 | ||
|
|
0751bd371a | ||
|
|
3ffa0805e9 | ||
|
|
3335270692 | ||
|
|
a7573b10ec | ||
|
|
df945ad743 | ||
|
|
4536e95205 | ||
|
|
1479d7bd46 | ||
|
|
9ba2094f75 | ||
|
|
8aa012ba8e | ||
|
|
8bc6b10db1 | ||
|
|
76d799c95b | ||
|
|
7c8bdfcc9f | ||
|
|
01a938d7ce | ||
|
|
e44853c439 | ||
|
|
3830bec891 | ||
|
|
88ab663330 | ||
|
|
68335b95c3 | ||
|
|
7bbfa0ef32 | ||
|
|
e233d52931 | ||
|
|
181d32e82a | ||
|
|
a51614f83d | ||
|
|
07f98d6bd3 | ||
|
|
f71550da4d | ||
|
|
8c3d0d7e31 | ||
|
|
46658a85d6 | ||
|
|
d699652955 | ||
|
|
9e88db5d9b | ||
|
|
5d9c102aff | ||
|
|
cb1c36d97d | ||
|
|
cc29ba5ea9 | ||
|
|
6f371b1bc6 | ||
|
|
785dabd071 | ||
|
|
09914d54a0 | ||
|
|
58b5586674 | ||
|
|
cb02ccc8b4 | ||
|
|
ec692ed727 | ||
|
|
2fb2ea573e | ||
|
|
ada2dc6112 | ||
|
|
ad9024a4f0 | ||
|
|
047c10e23c | ||
|
|
4f83164544 | ||
|
|
6f926ed595 | ||
|
|
249dc55212 | ||
|
|
46252bc6f3 | ||
|
|
64350a2e78 | ||
|
|
2902c63a3b | ||
|
|
55b8588f1f | ||
|
|
02ecc4ae9a | ||
|
|
3ee50b7832 | ||
|
|
66ddd87ee4 | ||
|
|
233189e4f7 | ||
|
|
b237fd7201 | ||
|
|
3c81efe2f4 | ||
|
|
0fcfb94690 | ||
|
|
bb6d4c2756 | ||
|
|
b59ce190ac | ||
|
|
80be1a30f2 | ||
|
|
93b4f79006 | ||
|
|
3009e46617 | ||
|
|
8f040a1a84 | ||
|
|
4dbab8d77a | ||
|
|
cde42c8a49 | ||
|
|
3b9d19df43 | ||
|
|
6ad4acc9fc | ||
|
|
3e59521f48 | ||
|
|
0970c087c8 | ||
|
|
676c550e6e | ||
|
|
78fa47f6f8 | ||
|
|
4aa5bb6da3 | ||
|
|
f7dfc9bbb8 | ||
|
|
584b6e378d | ||
|
|
754febfd33 | ||
|
|
0c9c475f32 | ||
|
|
e4baca1127 | ||
|
|
bb61a35a54 | ||
|
|
4b9ae5a97c | ||
|
|
c8caa0662d | ||
|
|
f4e8d1963f | ||
|
|
45d5e961dc | ||
|
|
45f2863966 | ||
|
|
01c1ac4c0c | ||
|
|
b2f9aec383 | ||
|
|
a95aa67aef | ||
|
|
cbeefeccbb | ||
|
|
2b72d38235 | ||
|
|
8fe7aec3c6 | ||
|
|
6e1f5a8503 | ||
|
|
b74b76c9f9 | ||
|
|
a27265450c | ||
|
|
cc5455c3dc | ||
|
|
9db7fb83eb | ||
|
|
f0061110c9 | ||
|
|
a13fedc0d6 | ||
|
|
7576bec66a | ||
|
|
7672190923 | ||
|
|
0ade4307b0 | ||
|
|
8c03b65dc6 | ||
|
|
8a07459e43 | ||
|
|
cd8e115118 | ||
|
|
4ff7b20fcf | ||
|
|
8120f00148 | ||
|
|
127abf49f1 | ||
|
|
db81c3c5e2 | ||
|
|
9952af7a52 | ||
|
|
790577c1b6 | ||
|
|
bab362fb7d | ||
|
|
a177d02406 | ||
|
|
8b8f280565 | ||
|
|
e752875504 | ||
|
|
0a4562fc09 | ||
|
|
c84ac2eab1 | ||
|
|
3ae07ac633 | ||
|
|
8379fdb1f8 | ||
|
|
3f77e075b9 | ||
|
|
685bd01156 | ||
|
|
20bcca578a | ||
|
|
f05f143b46 | ||
|
|
d7f00679a0 | ||
|
|
b7da6f0ca7 | ||
|
|
e4a81ebe08 | ||
|
|
a4edc46af0 | ||
|
|
767db3b79b | ||
|
|
4f6e9dcc56 | ||
|
|
aa4e182549 | ||
|
|
fe1f7c30e1 | ||
|
|
e5ed1ae349 | ||
|
|
d1b1dd70f4 | ||
|
|
93b14c9fc8 | ||
|
|
c9c5de20d8 | ||
|
|
011fa3540e | ||
|
|
c3c3671f8b | ||
|
|
5980bd9bcd | ||
|
|
438871429c | ||
|
|
173ce5bfa2 | ||
|
|
106b1f85fa | ||
|
|
a5c7f343d0 | ||
|
|
401886bcda | ||
|
|
c66fca9de9 | ||
|
|
daee4c5c17 | ||
|
|
af5d0b6963 | ||
|
|
f92dd81c8f | ||
|
|
55cdcfe3ea | ||
|
|
2f7520a6c5 | ||
|
|
4fdc5d7da2 | ||
|
|
308f30b2e8 | ||
|
|
4fa2042d12 | ||
|
|
2a4e1bad4e | ||
|
|
8a317eead5 | ||
|
|
b58094877f | ||
|
|
afe252126c | ||
|
|
342e6119f1 | ||
|
|
e4ff87e970 | ||
|
|
e45a544f15 | ||
|
|
9a5abaa17a | ||
|
|
b8ecfff861 | ||
|
|
58e2a41c95 | ||
|
|
a7214db9c3 | ||
|
|
b9da4af64f | ||
|
|
b77105be7b | ||
|
|
3d5a544ea6 | ||
|
|
4f362385e1 | ||
|
|
a01d6169d2 | ||
|
|
9beda3911d | ||
|
|
5ed596bfa9 | ||
|
|
99ca8787ab | ||
|
|
8f1a6feb90 | ||
|
|
c0e229201b | ||
|
|
66bc7fbc04 | ||
|
|
530bd40ca5 | ||
|
|
36004cf74b | ||
|
|
c7374245e1 | ||
|
|
59df59e9cd | ||
|
|
c0c2898b91 | ||
|
|
abac660bac | ||
|
|
26de64d873 | ||
|
|
79d9a8ca28 | ||
|
|
5c391fbcad | ||
|
|
d7e24f64a5 | ||
|
|
d6427d823f | ||
|
|
47eb874f47 | ||
|
|
37019355fd | ||
|
|
a8e7f8236e | ||
|
|
2414b61fcb | ||
|
|
a63ffa89b1 | ||
|
|
59e93c29d0 | ||
|
|
d7173bb96e | ||
|
|
d544e11a20 | ||
|
|
7f0c19c61c | ||
|
|
30e84f1030 | ||
|
|
d5af91d8f7 | ||
|
|
4b18c633ba | ||
|
|
08728d7d03 | ||
|
|
73f3beda00 | ||
|
|
7b8d335c43 | ||
|
|
ba0b6071e6 | ||
|
|
a6603d5ad6 | ||
|
|
26833781a7 | ||
|
|
f3ed9bdbb5 | ||
|
|
0f65178190 | ||
|
|
a58fc82575 | ||
|
|
2575c03ae0 | ||
|
|
9b7372fff0 | ||
|
|
fcd6ebe0ee | ||
|
|
c162ec9d52 | ||
|
|
bb7f7f473b | ||
|
|
a9ca511004 | ||
|
|
8df61f5eaa | ||
|
|
162f573967 | ||
|
|
eada0ef08d | ||
|
|
f57bc10973 | ||
|
|
d2e8f822d6 | ||
|
|
5fd8200fd9 | ||
|
|
d0da8c9825 | ||
|
|
fd7574d21b | ||
|
|
c70706a27b | ||
|
|
968c364999 | ||
|
|
031cb76b7d | ||
|
|
af568d064c | ||
|
|
a75f57de43 | ||
|
|
72a1c3dda1 | ||
|
|
ffde79ecac | ||
|
|
66ad43b2df | ||
|
|
6b0e56ca80 | ||
|
|
5a2d84d8b4 | ||
|
|
a941156f26 | ||
|
|
a1fdeeaa29 | ||
|
|
40ea2604a7 | ||
|
|
ceda526093 | ||
|
|
4197254c53 | ||
|
|
a0b7efb436 | ||
|
|
5f5e8ede6c | ||
|
|
52ca855a29 | ||
|
|
079efd0a85 | ||
|
|
3a583a4e5d | ||
|
|
cfb4decf67 | ||
|
|
8067d5170b | ||
|
|
5551acf67d | ||
|
|
45a030bac6 | ||
|
|
96dc49e229 | ||
|
|
5f43d988a3 | ||
|
|
4269079c54 | ||
|
|
cdfb3f206c | ||
|
|
9f326783e5 | ||
|
|
4e6e680d79 | ||
|
|
1378b5b2ff | ||
|
|
456c6e3f58 | ||
|
|
61be7f68db | ||
|
|
0e38a3c881 | ||
|
|
2c630e9853 | ||
|
|
786e0d1fab | ||
|
|
78b7aee512 | ||
|
|
9d9d01863a | ||
|
|
108cdf84a5 | ||
|
|
8c6f6f1578 | ||
|
|
df4ffaaff8 | ||
|
|
d522c65e50 | ||
|
|
c3b2a8b019 | ||
|
|
28d3151090 | ||
|
|
2a1c832f8d | ||
|
|
0170adb171 | ||
|
|
cb62404b8c | ||
|
|
8f9c46bd3f | ||
|
|
97291ce6d0 | ||
|
|
f689e5418e | ||
|
|
f751f0b0ef | ||
|
|
ea9ba3bb2e | ||
|
|
c7ffebce2a | ||
|
|
54b7c070f7 | ||
|
|
6c1b687cd1 | ||
|
|
e850540a91 | ||
|
|
d4bc9dfc50 | ||
|
|
f26ea55e9c | ||
|
|
b53e1985ac | ||
|
|
302ef80d95 | ||
|
|
5b97c29714 | ||
|
|
64075c87ee | ||
|
|
d58a71cffc | ||
|
|
036b006226 | ||
|
|
f29f89d078 | ||
|
|
289f118581 | ||
|
|
10b2bbea83 | ||
|
|
32d110b92f | ||
|
|
860a5f5c1a | ||
|
|
70a18ee4b5 | ||
|
|
73189672c3 | ||
|
|
7e7d5dc383 | ||
|
|
1c2cfc37aa | ||
|
|
0634fe021d | ||
|
|
04934b6b3b | ||
|
|
ff00417bc5 | ||
|
|
849c5b2293 |
@@ -29,3 +29,34 @@ venv/
|
||||
|
||||
# Visual Studio
|
||||
.vscode/
|
||||
|
||||
# Test and development files
|
||||
test-datastore/
|
||||
tests/
|
||||
*.md
|
||||
!README.md
|
||||
|
||||
# Temporary and log files
|
||||
*.log
|
||||
*.tmp
|
||||
tmp/
|
||||
temp/
|
||||
|
||||
# Training data and large files
|
||||
train-data/
|
||||
works-data/
|
||||
|
||||
# Container files
|
||||
Dockerfile*
|
||||
docker-compose*.yml
|
||||
.dockerignore
|
||||
|
||||
# Development certificates and keys
|
||||
*.pem
|
||||
*.key
|
||||
*.crt
|
||||
profile_output.prof
|
||||
|
||||
# Large binary files that shouldn't be in container
|
||||
*.pdf
|
||||
chrome.json
|
||||
51
.github/actions/extract-memory-report/action.yml
vendored
Normal file
51
.github/actions/extract-memory-report/action.yml
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
name: 'Extract Memory Test Report'
|
||||
description: 'Extracts and displays memory test report from a container'
|
||||
inputs:
|
||||
container-name:
|
||||
description: 'Name of the container to extract logs from'
|
||||
required: true
|
||||
python-version:
|
||||
description: 'Python version for artifact naming'
|
||||
required: true
|
||||
output-dir:
|
||||
description: 'Directory to store output logs'
|
||||
required: false
|
||||
default: 'output-logs'
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Create output directory
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir -p ${{ inputs.output-dir }}
|
||||
|
||||
- name: Dump container log
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Disabled for now"
|
||||
# return
|
||||
# docker logs ${{ inputs.container-name }} > ${{ inputs.output-dir }}/${{ inputs.container-name }}-stdout-${{ inputs.python-version }}.txt 2>&1 || echo "Could not get stdout"
|
||||
# docker logs ${{ inputs.container-name }} 2> ${{ inputs.output-dir }}/${{ inputs.container-name }}-stderr-${{ inputs.python-version }}.txt || echo "Could not get stderr"
|
||||
|
||||
- name: Extract and display memory test report
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Disabled for now"
|
||||
# echo "Extracting test-memory.log from container..."
|
||||
# docker cp ${{ inputs.container-name }}:/app/changedetectionio/test-memory.log ${{ inputs.output-dir }}/test-memory-${{ inputs.python-version }}.log || echo "test-memory.log not found in container"
|
||||
#
|
||||
# echo "=== Top 10 Highest Peak Memory Tests ==="
|
||||
# if [ -f ${{ inputs.output-dir }}/test-memory-${{ inputs.python-version }}.log ]; then
|
||||
# grep "Peak memory:" ${{ inputs.output-dir }}/test-memory-${{ inputs.python-version }}.log | \
|
||||
# sed 's/.*Peak memory: //' | \
|
||||
# paste -d'|' - <(grep "Peak memory:" ${{ inputs.output-dir }}/test-memory-${{ inputs.python-version }}.log) | \
|
||||
# sort -t'|' -k1 -nr | \
|
||||
# cut -d'|' -f2 | \
|
||||
# head -10
|
||||
# echo ""
|
||||
# echo "=== Full Memory Test Report ==="
|
||||
# cat ${{ inputs.output-dir }}/test-memory-${{ inputs.python-version }}.log
|
||||
# else
|
||||
# echo "No memory log available"
|
||||
# fi
|
||||
8
.github/dependabot.yml
vendored
8
.github/dependabot.yml
vendored
@@ -4,11 +4,11 @@ updates:
|
||||
directory: /
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
"caronc/apprise":
|
||||
versioning-strategy: "increase"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
groups:
|
||||
all:
|
||||
patterns:
|
||||
- "*"
|
||||
- package-ecosystem: pip
|
||||
directory: /
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
|
||||
6
.github/test/Dockerfile-alpine
vendored
6
.github/test/Dockerfile-alpine
vendored
@@ -2,7 +2,7 @@
|
||||
# Test that we can still build on Alpine (musl modified libc https://musl.libc.org/)
|
||||
# Some packages wont install via pypi because they dont have a wheel available under this architecture.
|
||||
|
||||
FROM ghcr.io/linuxserver/baseimage-alpine:3.21
|
||||
FROM ghcr.io/linuxserver/baseimage-alpine:3.22
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
|
||||
COPY requirements.txt /requirements.txt
|
||||
@@ -18,17 +18,19 @@ RUN \
|
||||
libxslt-dev \
|
||||
openssl-dev \
|
||||
python3-dev \
|
||||
file \
|
||||
zip \
|
||||
zlib-dev && \
|
||||
apk add --update --no-cache \
|
||||
libjpeg \
|
||||
libxslt \
|
||||
file \
|
||||
nodejs \
|
||||
poppler-utils \
|
||||
python3 && \
|
||||
echo "**** pip3 install test of changedetection.io ****" && \
|
||||
python3 -m venv /lsiopy && \
|
||||
pip install -U pip wheel setuptools && \
|
||||
pip install -U --no-cache-dir --find-links https://wheel-index.linuxserver.io/alpine-3.21/ -r /requirements.txt && \
|
||||
pip install -U --no-cache-dir --find-links https://wheel-index.linuxserver.io/alpine-3.22/ -r /requirements.txt && \
|
||||
apk del --purge \
|
||||
build-dependencies
|
||||
|
||||
8
.github/workflows/codeql-analysis.yml
vendored
8
.github/workflows/codeql-analysis.yml
vendored
@@ -30,11 +30,11 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v3
|
||||
uses: github/codeql-action/init@v4
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
@@ -45,7 +45,7 @@ jobs:
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v3
|
||||
uses: github/codeql-action/autobuild@v4
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
@@ -59,4 +59,4 @@ jobs:
|
||||
# make release
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v3
|
||||
uses: github/codeql-action/analyze@v4
|
||||
|
||||
30
.github/workflows/containers.yml
vendored
30
.github/workflows/containers.yml
vendored
@@ -39,12 +39,20 @@ jobs:
|
||||
# Or if we are in a tagged release scenario.
|
||||
if: ${{ github.event.workflow_run.conclusion == 'success' }} || ${{ github.event.release.tag_name }} != ''
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
- name: Set up Python 3.11
|
||||
uses: actions/setup-python@v5
|
||||
uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: 3.11
|
||||
|
||||
- name: Cache pip packages
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pip-
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
@@ -95,7 +103,7 @@ jobs:
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKER_HUB_USERNAME }}/changedetection.io:dev,ghcr.io/${{ github.repository }}:dev
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/arm/v8,linux/arm64/v8
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/arm/v8
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
@@ -103,6 +111,13 @@ jobs:
|
||||
# provenance: false
|
||||
|
||||
# A new tagged release is required, which builds :tag and :latest
|
||||
- name: Debug release info
|
||||
if: github.event_name == 'release' && startsWith(github.event.release.tag_name, '0.')
|
||||
run: |
|
||||
echo "Release tag: ${{ github.event.release.tag_name }}"
|
||||
echo "Github ref: ${{ github.ref }}"
|
||||
echo "Github ref name: ${{ github.ref_name }}"
|
||||
|
||||
- name: Docker meta :tag
|
||||
if: github.event_name == 'release' && startsWith(github.event.release.tag_name, '0.')
|
||||
uses: docker/metadata-action@v5
|
||||
@@ -112,9 +127,10 @@ jobs:
|
||||
${{ secrets.DOCKER_HUB_USERNAME }}/changedetection.io
|
||||
ghcr.io/dgtlmoon/changedetection.io
|
||||
tags: |
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
type=semver,pattern={{version}},value=${{ github.event.release.tag_name }}
|
||||
type=semver,pattern={{major}}.{{minor}},value=${{ github.event.release.tag_name }}
|
||||
type=semver,pattern={{major}},value=${{ github.event.release.tag_name }}
|
||||
type=raw,value=latest
|
||||
|
||||
- name: Build and push :tag
|
||||
id: docker_build_tag_release
|
||||
@@ -125,7 +141,7 @@ jobs:
|
||||
file: ./Dockerfile
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/arm/v8,linux/arm64/v8
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/arm/v8
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
# Looks like this was disabled
|
||||
|
||||
41
.github/workflows/pypi-release.yml
vendored
41
.github/workflows/pypi-release.yml
vendored
@@ -7,9 +7,9 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: "3.11"
|
||||
- name: Install pypa/build
|
||||
@@ -21,35 +21,60 @@ jobs:
|
||||
- name: Build a binary wheel and a source tarball
|
||||
run: python3 -m build
|
||||
- name: Store the distribution packages
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: python-package-distributions
|
||||
path: dist/
|
||||
|
||||
|
||||
test-pypi-package:
|
||||
name: Test the built 📦 package works basically.
|
||||
name: Test the built package works basically.
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- build
|
||||
steps:
|
||||
- name: Download all the dists
|
||||
uses: actions/download-artifact@v4
|
||||
uses: actions/download-artifact@v6
|
||||
with:
|
||||
name: python-package-distributions
|
||||
path: dist/
|
||||
- name: Set up Python 3.11
|
||||
uses: actions/setup-python@v5
|
||||
uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
- name: Test that the basic pip built package runs without error
|
||||
run: |
|
||||
set -ex
|
||||
pip3 install dist/changedetection.io*.whl
|
||||
ls -alR
|
||||
|
||||
# Install the first wheel found in dist/
|
||||
WHEEL=$(find dist -type f -name "*.whl" -print -quit)
|
||||
echo Installing $WHEEL
|
||||
python3 -m pip install --upgrade pip
|
||||
python3 -m pip install "$WHEEL"
|
||||
changedetection.io -d /tmp -p 10000 &
|
||||
|
||||
sleep 3
|
||||
curl --retry-connrefused --retry 6 http://127.0.0.1:10000/static/styles/pure-min.css >/dev/null
|
||||
curl --retry-connrefused --retry 6 http://127.0.0.1:10000/ >/dev/null
|
||||
|
||||
# --- API test ---
|
||||
# This also means that the docs/api-spec.yml was shipped and could be read
|
||||
test -f /tmp/url-watches.json
|
||||
API_KEY=$(jq -r '.. | .api_access_token? // empty' /tmp/url-watches.json)
|
||||
echo Test API KEY is $API_KEY
|
||||
curl -X POST "http://127.0.0.1:10000/api/v1/watch" \
|
||||
-H "x-api-key: ${API_KEY}" \
|
||||
-H "Content-Type: application/json" \
|
||||
--show-error --fail \
|
||||
--retry 6 --retry-delay 1 --retry-connrefused \
|
||||
-d '{
|
||||
"url": "https://example.com",
|
||||
"title": "Example Site Monitor",
|
||||
"time_between_check": { "hours": 1 }
|
||||
}'
|
||||
|
||||
killall changedetection.io
|
||||
|
||||
|
||||
@@ -68,7 +93,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Download all the dists
|
||||
uses: actions/download-artifact@v4
|
||||
uses: actions/download-artifact@v6
|
||||
with:
|
||||
name: python-package-distributions
|
||||
path: dist/
|
||||
|
||||
52
.github/workflows/test-container-build.yml
vendored
52
.github/workflows/test-container-build.yml
vendored
@@ -23,15 +23,41 @@ on:
|
||||
# Changes to requirements.txt packages and Dockerfile may or may not always be compatible with arm etc, so worth testing
|
||||
# @todo: some kind of path filter for requirements.txt and Dockerfile
|
||||
jobs:
|
||||
test-container-build:
|
||||
builder:
|
||||
name: Build ${{ matrix.platform }} (${{ matrix.dockerfile == './Dockerfile' && 'main' || 'alpine' }})
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
# Main Dockerfile platforms
|
||||
- platform: linux/amd64
|
||||
dockerfile: ./Dockerfile
|
||||
- platform: linux/arm64
|
||||
dockerfile: ./Dockerfile
|
||||
- platform: linux/arm/v7
|
||||
dockerfile: ./Dockerfile
|
||||
- platform: linux/arm/v8
|
||||
dockerfile: ./Dockerfile
|
||||
# Alpine Dockerfile platforms (musl via alpine check)
|
||||
- platform: linux/amd64
|
||||
dockerfile: ./.github/test/Dockerfile-alpine
|
||||
- platform: linux/arm64
|
||||
dockerfile: ./.github/test/Dockerfile-alpine
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
- name: Set up Python 3.11
|
||||
uses: actions/setup-python@v5
|
||||
uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: 3.11
|
||||
|
||||
- name: Cache pip packages
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pip-
|
||||
|
||||
# Just test that the build works, some libraries won't compile on ARM/rPi etc
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
@@ -47,24 +73,14 @@ jobs:
|
||||
version: latest
|
||||
driver-opts: image=moby/buildkit:master
|
||||
|
||||
# https://github.com/dgtlmoon/changedetection.io/pull/1067
|
||||
# Check we can still build under alpine/musl
|
||||
- name: Test that the docker containers can build (musl via alpine check)
|
||||
id: docker_build_musl
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: ./
|
||||
file: ./.github/test/Dockerfile-alpine
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
||||
- name: Test that the docker containers can build
|
||||
- name: Test that the docker containers can build (${{ matrix.platform }} - ${{ matrix.dockerfile }})
|
||||
id: docker_build
|
||||
uses: docker/build-push-action@v6
|
||||
# https://github.com/docker/build-push-action#customizing
|
||||
with:
|
||||
context: ./
|
||||
file: ./Dockerfile
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/arm/v8,linux/arm64/v8
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache
|
||||
file: ${{ matrix.dockerfile }}
|
||||
platforms: ${{ matrix.platform }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
|
||||
29
.github/workflows/test-only.yml
vendored
29
.github/workflows/test-only.yml
vendored
@@ -7,16 +7,22 @@ jobs:
|
||||
lint-code:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Lint with flake8
|
||||
- uses: actions/checkout@v5
|
||||
- name: Lint with Ruff
|
||||
run: |
|
||||
pip3 install flake8
|
||||
# stop the build if there are Python syntax errors or undefined names
|
||||
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
|
||||
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
|
||||
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
|
||||
pip install ruff
|
||||
# Check for syntax errors and undefined names
|
||||
ruff check . --select E9,F63,F7,F82
|
||||
# Complete check with errors treated as warnings
|
||||
ruff check . --exit-zero
|
||||
- name: Validate OpenAPI spec
|
||||
run: |
|
||||
pip install openapi-spec-validator
|
||||
python3 -c "from openapi_spec_validator import validate_spec; import yaml; validate_spec(yaml.safe_load(open('docs/api-spec.yaml')))"
|
||||
|
||||
test-application-3-10:
|
||||
# Only run on push to master (including PR merges)
|
||||
if: github.event_name == 'push' && github.ref == 'refs/heads/master'
|
||||
needs: lint-code
|
||||
uses: ./.github/workflows/test-stack-reusable-workflow.yml
|
||||
with:
|
||||
@@ -24,13 +30,15 @@ jobs:
|
||||
|
||||
|
||||
test-application-3-11:
|
||||
# Always run
|
||||
needs: lint-code
|
||||
uses: ./.github/workflows/test-stack-reusable-workflow.yml
|
||||
with:
|
||||
python-version: '3.11'
|
||||
skip-pypuppeteer: true
|
||||
|
||||
test-application-3-12:
|
||||
# Only run on push to master (including PR merges)
|
||||
if: github.event_name == 'push' && github.ref == 'refs/heads/master'
|
||||
needs: lint-code
|
||||
uses: ./.github/workflows/test-stack-reusable-workflow.yml
|
||||
with:
|
||||
@@ -38,9 +46,10 @@ jobs:
|
||||
skip-pypuppeteer: true
|
||||
|
||||
test-application-3-13:
|
||||
# Only run on push to master (including PR merges)
|
||||
if: github.event_name == 'push' && github.ref == 'refs/heads/master'
|
||||
needs: lint-code
|
||||
uses: ./.github/workflows/test-stack-reusable-workflow.yml
|
||||
with:
|
||||
python-version: '3.13'
|
||||
skip-pypuppeteer: true
|
||||
|
||||
skip-pypuppeteer: true
|
||||
461
.github/workflows/test-stack-reusable-workflow.yml
vendored
461
.github/workflows/test-stack-reusable-workflow.yml
vendored
@@ -7,7 +7,7 @@ on:
|
||||
description: 'Python version to use'
|
||||
required: true
|
||||
type: string
|
||||
default: '3.10'
|
||||
default: '3.11'
|
||||
skip-pypuppeteer:
|
||||
description: 'Skip PyPuppeteer (not supported in 3.11/3.12)'
|
||||
required: false
|
||||
@@ -15,138 +15,294 @@ on:
|
||||
default: false
|
||||
|
||||
jobs:
|
||||
test-application:
|
||||
# Build the Docker image once and share it with all test jobs
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
PYTHON_VERSION: ${{ inputs.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
# Mainly just for link/flake8
|
||||
- name: Set up Python ${{ env.PYTHON_VERSION }}
|
||||
uses: actions/setup-python@v5
|
||||
uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
|
||||
- name: Cache pip packages
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: ${{ runner.os }}-pip-py${{ env.PYTHON_VERSION }}-${{ hashFiles('requirements.txt') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pip-py${{ env.PYTHON_VERSION }}-
|
||||
${{ runner.os }}-pip-
|
||||
|
||||
- name: Build changedetection.io container for testing under Python ${{ env.PYTHON_VERSION }}
|
||||
run: |
|
||||
echo "---- Building for Python ${{ env.PYTHON_VERSION }} -----"
|
||||
# Build a changedetection.io container and start testing inside
|
||||
docker build --build-arg PYTHON_VERSION=${{ env.PYTHON_VERSION }} --build-arg LOGGER_LEVEL=TRACE -t test-changedetectionio .
|
||||
# Debug info
|
||||
docker run test-changedetectionio bash -c 'pip list'
|
||||
docker run test-changedetectionio bash -c 'pip list'
|
||||
|
||||
- name: We should be Python ${{ env.PYTHON_VERSION }} ...
|
||||
run: |
|
||||
docker run test-changedetectionio bash -c 'python3 --version'
|
||||
|
||||
- name: Spin up ancillary testable services
|
||||
run: |
|
||||
|
||||
docker network create changedet-network
|
||||
|
||||
# Selenium
|
||||
docker run --network changedet-network -d --hostname selenium -p 4444:4444 --rm --shm-size="2g" selenium/standalone-chrome:4
|
||||
|
||||
# SocketPuppetBrowser + Extra for custom browser test
|
||||
docker run --network changedet-network -d -e "LOG_LEVEL=TRACE" --cap-add=SYS_ADMIN --name sockpuppetbrowser --hostname sockpuppetbrowser --rm -p 3000:3000 dgtlmoon/sockpuppetbrowser:latest
|
||||
docker run --network changedet-network -d -e "LOG_LEVEL=TRACE" --cap-add=SYS_ADMIN --name sockpuppetbrowser-custom-url --hostname sockpuppetbrowser-custom-url -p 3001:3000 --rm dgtlmoon/sockpuppetbrowser:latest
|
||||
docker run test-changedetectionio bash -c 'python3 --version'
|
||||
|
||||
- name: Spin up ancillary SMTP+Echo message test server
|
||||
- name: Save Docker image
|
||||
run: |
|
||||
# Debug SMTP server/echo message back server
|
||||
docker run --network changedet-network -d -p 11025:11025 -p 11080:11080 --hostname mailserver test-changedetectionio bash -c 'pip3 install aiosmtpd && python changedetectionio/tests/smtp/smtp-test-server.py'
|
||||
docker ps
|
||||
docker save test-changedetectionio -o /tmp/test-changedetectionio.tar
|
||||
|
||||
- name: Show docker container state and other debug info
|
||||
- name: Upload Docker image artifact
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: test-changedetectionio-${{ env.PYTHON_VERSION }}
|
||||
path: /tmp/test-changedetectionio.tar
|
||||
retention-days: 1
|
||||
|
||||
# Unit tests (lightweight, no ancillary services needed)
|
||||
unit-tests:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 10
|
||||
env:
|
||||
PYTHON_VERSION: ${{ inputs.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
- name: Download Docker image artifact
|
||||
uses: actions/download-artifact@v5
|
||||
with:
|
||||
name: test-changedetectionio-${{ env.PYTHON_VERSION }}
|
||||
path: /tmp
|
||||
|
||||
- name: Load Docker image
|
||||
run: |
|
||||
set -x
|
||||
echo "Running processes in docker..."
|
||||
docker ps
|
||||
docker load -i /tmp/test-changedetectionio.tar
|
||||
|
||||
- name: Run Unit Tests
|
||||
run: |
|
||||
# Unit tests
|
||||
docker run test-changedetectionio bash -c 'python3 -m unittest changedetectionio.tests.unit.test_notification_diff'
|
||||
docker run test-changedetectionio bash -c 'python3 -m unittest changedetectionio.tests.unit.test_watch_model'
|
||||
docker run test-changedetectionio bash -c 'python3 -m unittest changedetectionio.tests.unit.test_jinja2_security'
|
||||
docker run test-changedetectionio bash -c 'python3 -m unittest changedetectionio.tests.unit.test_semver'
|
||||
docker run test-changedetectionio bash -c 'python3 -m unittest changedetectionio.tests.unit.test_notification_diff'
|
||||
docker run test-changedetectionio bash -c 'python3 -m unittest changedetectionio.tests.unit.test_watch_model'
|
||||
docker run test-changedetectionio bash -c 'python3 -m unittest changedetectionio.tests.unit.test_jinja2_security'
|
||||
docker run test-changedetectionio bash -c 'python3 -m unittest changedetectionio.tests.unit.test_semver'
|
||||
|
||||
- name: Test built container with Pytest (generally as requests/plaintext fetching)
|
||||
# Basic pytest tests with ancillary services
|
||||
basic-tests:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 25
|
||||
env:
|
||||
PYTHON_VERSION: ${{ inputs.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
- name: Download Docker image artifact
|
||||
uses: actions/download-artifact@v5
|
||||
with:
|
||||
name: test-changedetectionio-${{ env.PYTHON_VERSION }}
|
||||
path: /tmp
|
||||
|
||||
- name: Load Docker image
|
||||
run: |
|
||||
# All tests
|
||||
echo "run test with pytest"
|
||||
# The default pytest logger_level is TRACE
|
||||
# To change logger_level for pytest(test/conftest.py),
|
||||
# append the docker option. e.g. '-e LOGGER_LEVEL=DEBUG'
|
||||
docker run --name test-cdio-basic-tests --network changedet-network test-changedetectionio bash -c 'cd changedetectionio && ./run_basic_tests.sh'
|
||||
docker load -i /tmp/test-changedetectionio.tar
|
||||
|
||||
# PLAYWRIGHT/NODE-> CDP
|
||||
- name: Playwright and SocketPuppetBrowser - Specific tests in built container
|
||||
- name: Test built container with Pytest
|
||||
run: |
|
||||
# Playwright via Sockpuppetbrowser fetch
|
||||
# tests/visualselector/test_fetch_data.py will do browser steps
|
||||
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/fetchers/test_content.py'
|
||||
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/test_errorhandling.py'
|
||||
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/visualselector/test_fetch_data.py'
|
||||
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/fetchers/test_custom_js_before_content.py'
|
||||
docker network inspect changedet-network >/dev/null 2>&1 || docker network create changedet-network
|
||||
docker run --name test-cdio-basic-tests --network changedet-network test-changedetectionio bash -c 'cd changedetectionio && ./run_basic_tests.sh'
|
||||
|
||||
- name: Extract memory report and logs
|
||||
if: always()
|
||||
uses: ./.github/actions/extract-memory-report
|
||||
with:
|
||||
container-name: test-cdio-basic-tests
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
|
||||
- name: Playwright and SocketPuppetBrowser - Headers and requests
|
||||
run: |
|
||||
# Settings headers playwright tests - Call back in from Sockpuppetbrowser, check headers
|
||||
docker run --name "changedet" --hostname changedet --rm -e "FLASK_SERVER_NAME=changedet" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000?dumpio=true" --network changedet-network test-changedetectionio bash -c 'find .; cd changedetectionio; pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/test_request.py; pwd;find .'
|
||||
- name: Store test artifacts
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: test-cdio-basic-tests-output-py${{ env.PYTHON_VERSION }}
|
||||
path: output-logs
|
||||
|
||||
- name: Playwright and SocketPuppetBrowser - Restock detection
|
||||
run: |
|
||||
# restock detection via playwright - added name=changedet here so that playwright and sockpuppetbrowser can connect to it
|
||||
docker run --rm --name "changedet" -e "FLASK_SERVER_NAME=changedet" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-port=5004 --live-server-host=0.0.0.0 tests/restock/test_restock.py'
|
||||
# Playwright tests
|
||||
playwright-tests:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 10
|
||||
env:
|
||||
PYTHON_VERSION: ${{ inputs.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
# STRAIGHT TO CDP
|
||||
- name: Pyppeteer and SocketPuppetBrowser - Specific tests in built container
|
||||
if: ${{ inputs.skip-pypuppeteer == false }}
|
||||
- name: Download Docker image artifact
|
||||
uses: actions/download-artifact@v5
|
||||
with:
|
||||
name: test-changedetectionio-${{ env.PYTHON_VERSION }}
|
||||
path: /tmp
|
||||
|
||||
- name: Load Docker image
|
||||
run: |
|
||||
# Playwright via Sockpuppetbrowser fetch
|
||||
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "FAST_PUPPETEER_CHROME_FETCHER=True" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/fetchers/test_content.py'
|
||||
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "FAST_PUPPETEER_CHROME_FETCHER=True" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/test_errorhandling.py'
|
||||
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "FAST_PUPPETEER_CHROME_FETCHER=True" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/visualselector/test_fetch_data.py'
|
||||
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "FAST_PUPPETEER_CHROME_FETCHER=True" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/fetchers/test_custom_js_before_content.py'
|
||||
docker load -i /tmp/test-changedetectionio.tar
|
||||
|
||||
- name: Pyppeteer and SocketPuppetBrowser - Headers and requests checks
|
||||
if: ${{ inputs.skip-pypuppeteer == false }}
|
||||
- name: Spin up ancillary services
|
||||
run: |
|
||||
# Settings headers playwright tests - Call back in from Sockpuppetbrowser, check headers
|
||||
docker run --name "changedet" --hostname changedet --rm -e "FAST_PUPPETEER_CHROME_FETCHER=True" -e "FLASK_SERVER_NAME=changedet" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000?dumpio=true" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio; pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/test_request.py'
|
||||
docker network create changedet-network
|
||||
docker run --network changedet-network -d -e "LOG_LEVEL=TRACE" --cap-add=SYS_ADMIN --name sockpuppetbrowser --hostname sockpuppetbrowser --rm -p 3000:3000 dgtlmoon/sockpuppetbrowser:latest
|
||||
docker run --network changedet-network -d -e "LOG_LEVEL=TRACE" --cap-add=SYS_ADMIN --name sockpuppetbrowser-custom-url --hostname sockpuppetbrowser-custom-url -p 3001:3000 --rm dgtlmoon/sockpuppetbrowser:latest
|
||||
|
||||
- name: Pyppeteer and SocketPuppetBrowser - Restock detection
|
||||
if: ${{ inputs.skip-pypuppeteer == false }}
|
||||
run: |
|
||||
# restock detection via playwright - added name=changedet here so that playwright and sockpuppetbrowser can connect to it
|
||||
docker run --rm --name "changedet" -e "FLASK_SERVER_NAME=changedet" -e "FAST_PUPPETEER_CHROME_FETCHER=True" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-port=5004 --live-server-host=0.0.0.0 tests/restock/test_restock.py'
|
||||
- name: Playwright - Specific tests in built container
|
||||
run: |
|
||||
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest -vv --capture=tee-sys --showlocals --tb=long --live-server-host=0.0.0.0 --live-server-port=5004 tests/fetchers/test_content.py'
|
||||
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest -vv --capture=tee-sys --showlocals --tb=long --live-server-host=0.0.0.0 --live-server-port=5004 tests/test_errorhandling.py'
|
||||
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest -vv --capture=tee-sys --showlocals --tb=long --live-server-host=0.0.0.0 --live-server-port=5004 tests/visualselector/test_fetch_data.py'
|
||||
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest -vv --capture=tee-sys --showlocals --tb=long --live-server-host=0.0.0.0 --live-server-port=5004 tests/fetchers/test_custom_js_before_content.py'
|
||||
|
||||
- name: Playwright - Headers and requests
|
||||
run: |
|
||||
docker run --name "changedet" --hostname changedet --rm -e "FLASK_SERVER_NAME=changedet" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000?dumpio=true" --network changedet-network test-changedetectionio bash -c 'find .; cd changedetectionio; pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/test_request.py; pwd;find .'
|
||||
|
||||
- name: Playwright - Restock detection
|
||||
run: |
|
||||
docker run --rm --name "changedet" -e "FLASK_SERVER_NAME=changedet" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-port=5004 --live-server-host=0.0.0.0 tests/restock/test_restock.py'
|
||||
|
||||
# Pyppeteer tests
|
||||
pyppeteer-tests:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
if: ${{ inputs.skip-pypuppeteer == false }}
|
||||
timeout-minutes: 10
|
||||
env:
|
||||
PYTHON_VERSION: ${{ inputs.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
- name: Download Docker image artifact
|
||||
uses: actions/download-artifact@v5
|
||||
with:
|
||||
name: test-changedetectionio-${{ env.PYTHON_VERSION }}
|
||||
path: /tmp
|
||||
|
||||
- name: Load Docker image
|
||||
run: |
|
||||
docker load -i /tmp/test-changedetectionio.tar
|
||||
|
||||
- name: Spin up ancillary services
|
||||
run: |
|
||||
docker network create changedet-network
|
||||
docker run --network changedet-network -d -e "LOG_LEVEL=TRACE" --cap-add=SYS_ADMIN --name sockpuppetbrowser --hostname sockpuppetbrowser --rm -p 3000:3000 dgtlmoon/sockpuppetbrowser:latest
|
||||
|
||||
- name: Pyppeteer - Specific tests in built container
|
||||
run: |
|
||||
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "FAST_PUPPETEER_CHROME_FETCHER=True" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/fetchers/test_content.py'
|
||||
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "FAST_PUPPETEER_CHROME_FETCHER=True" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/test_errorhandling.py'
|
||||
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "FAST_PUPPETEER_CHROME_FETCHER=True" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/visualselector/test_fetch_data.py'
|
||||
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "FAST_PUPPETEER_CHROME_FETCHER=True" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/fetchers/test_custom_js_before_content.py'
|
||||
|
||||
- name: Pyppeteer - Headers and requests checks
|
||||
run: |
|
||||
docker run --name "changedet" --hostname changedet --rm -e "FAST_PUPPETEER_CHROME_FETCHER=True" -e "FLASK_SERVER_NAME=changedet" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000?dumpio=true" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio; pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/test_request.py'
|
||||
|
||||
- name: Pyppeteer - Restock detection
|
||||
run: |
|
||||
docker run --rm --name "changedet" -e "FLASK_SERVER_NAME=changedet" -e "FAST_PUPPETEER_CHROME_FETCHER=True" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-port=5004 --live-server-host=0.0.0.0 tests/restock/test_restock.py'
|
||||
|
||||
# Selenium tests
|
||||
selenium-tests:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 10
|
||||
env:
|
||||
PYTHON_VERSION: ${{ inputs.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
- name: Download Docker image artifact
|
||||
uses: actions/download-artifact@v5
|
||||
with:
|
||||
name: test-changedetectionio-${{ env.PYTHON_VERSION }}
|
||||
path: /tmp
|
||||
|
||||
- name: Load Docker image
|
||||
run: |
|
||||
docker load -i /tmp/test-changedetectionio.tar
|
||||
|
||||
- name: Spin up ancillary services
|
||||
run: |
|
||||
docker network create changedet-network
|
||||
docker run --network changedet-network -d --hostname selenium -p 4444:4444 --rm --shm-size="2g" selenium/standalone-chrome:4
|
||||
sleep 3
|
||||
|
||||
- name: Specific tests for headers and requests checks with Selenium
|
||||
run: |
|
||||
|
||||
docker run --name "changedet" --hostname changedet --rm -e "FLASK_SERVER_NAME=changedet" -e "WEBDRIVER_URL=http://selenium:4444/wd/hub" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio; pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/test_request.py'
|
||||
|
||||
# SELENIUM
|
||||
- name: Specific tests in built container for Selenium
|
||||
run: |
|
||||
# Selenium fetch
|
||||
docker run --rm -e "WEBDRIVER_URL=http://selenium:4444/wd/hub" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest tests/fetchers/test_content.py && pytest tests/test_errorhandling.py'
|
||||
docker run --rm -e "WEBDRIVER_URL=http://selenium:4444/wd/hub" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest tests/fetchers/test_content.py && pytest tests/test_errorhandling.py'
|
||||
|
||||
- name: Specific tests in built container for headers and requests checks with Selenium
|
||||
|
||||
# SMTP tests
|
||||
smtp-tests:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 10
|
||||
env:
|
||||
PYTHON_VERSION: ${{ inputs.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
- name: Download Docker image artifact
|
||||
uses: actions/download-artifact@v5
|
||||
with:
|
||||
name: test-changedetectionio-${{ env.PYTHON_VERSION }}
|
||||
path: /tmp
|
||||
|
||||
- name: Load Docker image
|
||||
run: |
|
||||
docker run --name "changedet" --hostname changedet --rm -e "FLASK_SERVER_NAME=changedet" -e "WEBDRIVER_URL=http://selenium:4444/wd/hub" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio; pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/test_request.py'
|
||||
docker load -i /tmp/test-changedetectionio.tar
|
||||
|
||||
- name: Spin up SMTP test server
|
||||
run: |
|
||||
docker network create changedet-network
|
||||
docker run --network changedet-network -d -p 11025:11025 -p 11080:11080 --hostname mailserver test-changedetectionio bash -c 'pip3 install aiosmtpd && python changedetectionio/tests/smtp/smtp-test-server.py'
|
||||
|
||||
# OTHER STUFF
|
||||
- name: Test SMTP notification mime types
|
||||
run: |
|
||||
# SMTP content types - needs the 'Debug SMTP server/echo message back server' container from above
|
||||
# "mailserver" hostname defined above
|
||||
docker run --rm --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest tests/smtp/test_notification_smtp.py'
|
||||
|
||||
# @todo Add a test via playwright/puppeteer
|
||||
# squid with auth is tested in run_proxy_tests.sh -> tests/proxy_list/test_select_custom_proxy.py
|
||||
- name: Test proxy squid style interaction
|
||||
# Proxy tests
|
||||
proxy-tests:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 10
|
||||
env:
|
||||
PYTHON_VERSION: ${{ inputs.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
- name: Download Docker image artifact
|
||||
uses: actions/download-artifact@v5
|
||||
with:
|
||||
name: test-changedetectionio-${{ env.PYTHON_VERSION }}
|
||||
path: /tmp
|
||||
|
||||
- name: Load Docker image
|
||||
run: |
|
||||
docker load -i /tmp/test-changedetectionio.tar
|
||||
|
||||
- name: Spin up services
|
||||
run: |
|
||||
docker network create changedet-network
|
||||
docker run --network changedet-network -d --hostname selenium -p 4444:4444 --rm --shm-size="2g" selenium/standalone-chrome:4
|
||||
docker run --network changedet-network -d -e "LOG_LEVEL=TRACE" --cap-add=SYS_ADMIN --name sockpuppetbrowser --hostname sockpuppetbrowser --rm -p 3000:3000 dgtlmoon/sockpuppetbrowser:latest
|
||||
docker run --network changedet-network -d -e "LOG_LEVEL=TRACE" --cap-add=SYS_ADMIN --name sockpuppetbrowser-custom-url --hostname sockpuppetbrowser-custom-url -p 3001:3000 --rm dgtlmoon/sockpuppetbrowser:latest
|
||||
|
||||
- name: Test proxy Squid style interaction
|
||||
run: |
|
||||
cd changedetectionio
|
||||
./run_proxy_tests.sh
|
||||
docker ps
|
||||
cd ..
|
||||
|
||||
- name: Test proxy SOCKS5 style interaction
|
||||
@@ -155,87 +311,132 @@ jobs:
|
||||
./run_socks_proxy_tests.sh
|
||||
cd ..
|
||||
|
||||
# Custom browser URL tests
|
||||
custom-browser-tests:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 10
|
||||
env:
|
||||
PYTHON_VERSION: ${{ inputs.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
- name: Download Docker image artifact
|
||||
uses: actions/download-artifact@v5
|
||||
with:
|
||||
name: test-changedetectionio-${{ env.PYTHON_VERSION }}
|
||||
path: /tmp
|
||||
|
||||
- name: Load Docker image
|
||||
run: |
|
||||
docker load -i /tmp/test-changedetectionio.tar
|
||||
|
||||
- name: Spin up ancillary services
|
||||
run: |
|
||||
docker network create changedet-network
|
||||
docker run --network changedet-network -d -e "LOG_LEVEL=TRACE" --cap-add=SYS_ADMIN --name sockpuppetbrowser --hostname sockpuppetbrowser --rm -p 3000:3000 dgtlmoon/sockpuppetbrowser:latest
|
||||
docker run --network changedet-network -d -e "LOG_LEVEL=TRACE" --cap-add=SYS_ADMIN --name sockpuppetbrowser-custom-url --hostname sockpuppetbrowser-custom-url -p 3001:3000 --rm dgtlmoon/sockpuppetbrowser:latest
|
||||
|
||||
- name: Test custom browser URL
|
||||
run: |
|
||||
cd changedetectionio
|
||||
./run_custom_browser_url_tests.sh
|
||||
cd ..
|
||||
|
||||
- name: Test changedetection.io container starts+runs basically without error
|
||||
# Container startup tests
|
||||
container-tests:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 10
|
||||
env:
|
||||
PYTHON_VERSION: ${{ inputs.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
- name: Download Docker image artifact
|
||||
uses: actions/download-artifact@v5
|
||||
with:
|
||||
name: test-changedetectionio-${{ env.PYTHON_VERSION }}
|
||||
path: /tmp
|
||||
|
||||
- name: Load Docker image
|
||||
run: |
|
||||
docker run --name test-changedetectionio -p 5556:5000 -d test-changedetectionio
|
||||
docker load -i /tmp/test-changedetectionio.tar
|
||||
|
||||
- name: Test container starts+runs basically without error
|
||||
run: |
|
||||
docker run --name test-changedetectionio -p 5556:5000 -d test-changedetectionio
|
||||
sleep 3
|
||||
# Should return 0 (no error) when grep finds it
|
||||
curl --retry-connrefused --retry 6 -s http://localhost:5556 |grep -q checkbox-uuid
|
||||
|
||||
# and IPv6
|
||||
curl --retry-connrefused --retry 6 -s -g -6 "http://[::1]:5556"|grep -q checkbox-uuid
|
||||
|
||||
# Check whether TRACE log is enabled.
|
||||
# Also, check whether TRACE is came from STDERR
|
||||
docker logs test-changedetectionio 2>&1 1>/dev/null | grep 'TRACE log is enabled' || exit 1
|
||||
# Check whether DEBUG is came from STDOUT
|
||||
curl --retry-connrefused --retry 6 -s http://localhost:5556 |grep -q checkbox-uuid
|
||||
curl --retry-connrefused --retry 6 -s -g -6 "http://[::1]:5556"|grep -q checkbox-uuid
|
||||
docker logs test-changedetectionio 2>/dev/null | grep 'TRACE log is enabled' || exit 1
|
||||
docker logs test-changedetectionio 2>/dev/null | grep 'DEBUG' || exit 1
|
||||
|
||||
docker kill test-changedetectionio
|
||||
|
||||
- name: Test changedetection.io SIGTERM and SIGINT signal shutdown
|
||||
- name: Test HTTPS SSL mode
|
||||
run: |
|
||||
openssl req -x509 -newkey rsa:4096 -keyout privkey.pem -out cert.pem -days 365 -nodes -subj "/CN=localhost"
|
||||
docker run --name test-changedetectionio-ssl --rm -e SSL_CERT_FILE=cert.pem -e SSL_PRIVKEY_FILE=privkey.pem -p 5000:5000 -v ./cert.pem:/app/cert.pem -v ./privkey.pem:/app/privkey.pem -d test-changedetectionio
|
||||
sleep 3
|
||||
curl --retry-connrefused --retry 6 -k https://localhost:5000 -v|grep -q checkbox-uuid
|
||||
docker kill test-changedetectionio-ssl
|
||||
|
||||
- name: Test IPv6 Mode
|
||||
run: |
|
||||
docker run --name test-changedetectionio-ipv6 --rm -p 5000:5000 -e LISTEN_HOST=:: -d test-changedetectionio
|
||||
sleep 3
|
||||
curl --retry-connrefused --retry 6 http://[::1]:5000 -v|grep -q checkbox-uuid
|
||||
docker kill test-changedetectionio-ipv6
|
||||
|
||||
# Signal tests
|
||||
signal-tests:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 10
|
||||
env:
|
||||
PYTHON_VERSION: ${{ inputs.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
- name: Download Docker image artifact
|
||||
uses: actions/download-artifact@v5
|
||||
with:
|
||||
name: test-changedetectionio-${{ env.PYTHON_VERSION }}
|
||||
path: /tmp
|
||||
|
||||
- name: Load Docker image
|
||||
run: |
|
||||
docker load -i /tmp/test-changedetectionio.tar
|
||||
|
||||
- name: Test SIGTERM and SIGINT signal shutdown
|
||||
run: |
|
||||
|
||||
echo SIGINT Shutdown request test
|
||||
docker run --name sig-test -d test-changedetectionio
|
||||
sleep 3
|
||||
echo ">>> Sending SIGINT to sig-test container"
|
||||
docker kill --signal=SIGINT sig-test
|
||||
sleep 3
|
||||
# invert the check (it should be not 0/not running)
|
||||
docker ps
|
||||
# check signal catch(STDERR) log. Because of
|
||||
# changedetectionio/__init__.py: logger.add(sys.stderr, level=logger_level)
|
||||
docker logs sig-test 2>&1 | grep 'Shutdown: Got Signal - SIGINT' || exit 1
|
||||
test -z "`docker ps|grep sig-test`"
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Looks like container was running when it shouldnt be"
|
||||
docker ps
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# @todo - scan the container log to see the right "graceful shutdown" text exists
|
||||
docker rm sig-test
|
||||
|
||||
|
||||
echo SIGTERM Shutdown request test
|
||||
docker run --name sig-test -d test-changedetectionio
|
||||
sleep 3
|
||||
echo ">>> Sending SIGTERM to sig-test container"
|
||||
docker kill --signal=SIGTERM sig-test
|
||||
sleep 3
|
||||
# invert the check (it should be not 0/not running)
|
||||
docker ps
|
||||
# check signal catch(STDERR) log. Because of
|
||||
# changedetectionio/__init__.py: logger.add(sys.stderr, level=logger_level)
|
||||
docker logs sig-test 2>&1 | grep 'Shutdown: Got Signal - SIGTERM' || exit 1
|
||||
test -z "`docker ps|grep sig-test`"
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Looks like container was running when it shouldnt be"
|
||||
docker ps
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# @todo - scan the container log to see the right "graceful shutdown" text exists
|
||||
docker rm sig-test
|
||||
|
||||
- name: Dump container log
|
||||
if: always()
|
||||
run: |
|
||||
mkdir output-logs
|
||||
docker logs test-cdio-basic-tests > output-logs/test-cdio-basic-tests-stdout-${{ env.PYTHON_VERSION }}.txt
|
||||
docker logs test-cdio-basic-tests 2> output-logs/test-cdio-basic-tests-stderr-${{ env.PYTHON_VERSION }}.txt
|
||||
|
||||
- name: Store everything including test-datastore
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-cdio-basic-tests-output-py${{ env.PYTHON_VERSION }}
|
||||
path: .
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -16,6 +16,7 @@ dist/
|
||||
.env
|
||||
.venv/
|
||||
venv/
|
||||
.python-version
|
||||
|
||||
# IDEs
|
||||
.idea
|
||||
|
||||
9
.pre-commit-config.yaml
Normal file
9
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
repos:
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.11.2
|
||||
hooks:
|
||||
# Lint (and apply safe fixes)
|
||||
- id: ruff
|
||||
args: [--fix]
|
||||
# Fomrat
|
||||
- id: ruff-format
|
||||
48
.ruff.toml
Normal file
48
.ruff.toml
Normal file
@@ -0,0 +1,48 @@
|
||||
# Minimum supported version
|
||||
target-version = "py310"
|
||||
|
||||
# Formatting options
|
||||
line-length = 100
|
||||
indent-width = 4
|
||||
|
||||
exclude = [
|
||||
"__pycache__",
|
||||
".eggs",
|
||||
".git",
|
||||
".tox",
|
||||
".venv",
|
||||
"*.egg-info",
|
||||
"*.pyc",
|
||||
]
|
||||
|
||||
[lint]
|
||||
# https://docs.astral.sh/ruff/rules/
|
||||
select = [
|
||||
"B", # flake8-bugbear
|
||||
"B9",
|
||||
"C",
|
||||
"E", # pycodestyle
|
||||
"F", # Pyflakes
|
||||
"I", # isort
|
||||
"N", # pep8-naming
|
||||
"UP", # pyupgrade
|
||||
"W", # pycodestyle
|
||||
]
|
||||
ignore = [
|
||||
"B007", # unused-loop-control-variable
|
||||
"B909", # loop-iterator-mutation
|
||||
"E203", # whitespace-before-punctuation
|
||||
"E266", # multiple-leading-hashes-for-block-comment
|
||||
"E501", # redundant-backslash
|
||||
"F403", # undefined-local-with-import-star
|
||||
"N802", # invalid-function-name
|
||||
"N806", # non-lowercase-variable-in-function
|
||||
"N815", # mixed-case-variable-in-class-scope
|
||||
]
|
||||
|
||||
[lint.mccabe]
|
||||
max-complexity = 12
|
||||
|
||||
[format]
|
||||
indent-style = "space"
|
||||
quote-style = "preserve"
|
||||
49
Dockerfile
49
Dockerfile
@@ -1,14 +1,10 @@
|
||||
# pip dependencies install stage
|
||||
|
||||
# @NOTE! I would love to move to 3.11 but it breaks the async handler in changedetectionio/content_fetchers/puppeteer.py
|
||||
# If you know how to fix it, please do! and test it for both 3.10 and 3.11
|
||||
|
||||
ARG PYTHON_VERSION=3.11
|
||||
|
||||
FROM python:${PYTHON_VERSION}-slim-bookworm AS builder
|
||||
|
||||
# See `cryptography` pin comment in requirements.txt
|
||||
ARG CRYPTOGRAPHY_DONT_BUILD_RUST=1
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
g++ \
|
||||
@@ -19,6 +15,8 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
libssl-dev \
|
||||
libxslt-dev \
|
||||
make \
|
||||
patch \
|
||||
pkg-config \
|
||||
zlib1g-dev
|
||||
|
||||
RUN mkdir /install
|
||||
@@ -26,14 +24,37 @@ WORKDIR /install
|
||||
|
||||
COPY requirements.txt /requirements.txt
|
||||
|
||||
# --extra-index-url https://www.piwheels.org/simple is for cryptography module to be prebuilt (or rustc etc needs to be installed)
|
||||
RUN pip install --extra-index-url https://www.piwheels.org/simple --target=/dependencies -r /requirements.txt
|
||||
# Use cache mounts and multiple wheel sources for faster ARM builds
|
||||
ENV PIP_CACHE_DIR=/tmp/pip-cache
|
||||
# Help Rust find OpenSSL for cryptography package compilation on ARM
|
||||
ENV PKG_CONFIG_PATH="/usr/lib/pkgconfig:/usr/lib/arm-linux-gnueabihf/pkgconfig:/usr/lib/aarch64-linux-gnu/pkgconfig"
|
||||
ENV PKG_CONFIG_ALLOW_SYSTEM_CFLAGS=1
|
||||
ENV OPENSSL_DIR="/usr"
|
||||
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/openssl"
|
||||
# Additional environment variables for cryptography Rust build
|
||||
ENV CRYPTOGRAPHY_DONT_BUILD_RUST=1
|
||||
RUN --mount=type=cache,id=pip,sharing=locked,target=/tmp/pip-cache \
|
||||
pip install \
|
||||
--prefer-binary \
|
||||
--extra-index-url https://www.piwheels.org/simple \
|
||||
--extra-index-url https://pypi.anaconda.org/ARM-software/simple \
|
||||
--cache-dir=/tmp/pip-cache \
|
||||
--target=/dependencies \
|
||||
-r /requirements.txt
|
||||
|
||||
|
||||
# Playwright is an alternative to Selenium
|
||||
# Excluded this package from requirements.txt to prevent arm/v6 and arm/v7 builds from failing
|
||||
# https://github.com/dgtlmoon/changedetection.io/pull/1067 also musl/alpine (not supported)
|
||||
RUN pip install --target=/dependencies playwright~=1.48.0 \
|
||||
|| echo "WARN: Failed to install Playwright. The application can still run, but the Playwright option will be disabled."
|
||||
RUN --mount=type=cache,id=pip,sharing=locked,target=/tmp/pip-cache \
|
||||
pip install \
|
||||
--prefer-binary \
|
||||
--cache-dir=/tmp/pip-cache \
|
||||
--target=/dependencies \
|
||||
playwright~=1.48.0 \
|
||||
|| echo "WARN: Failed to install Playwright. The application can still run, but the Playwright option will be disabled."
|
||||
|
||||
|
||||
# Final image stage
|
||||
FROM python:${PYTHON_VERSION}-slim-bookworm
|
||||
@@ -45,6 +66,8 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
locales \
|
||||
# For pdftohtml
|
||||
poppler-utils \
|
||||
# favicon type detection and other uses
|
||||
file \
|
||||
zlib1g \
|
||||
&& apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
@@ -65,13 +88,21 @@ EXPOSE 5000
|
||||
|
||||
# The actual flask app module
|
||||
COPY changedetectionio /app/changedetectionio
|
||||
|
||||
# Also for OpenAPI validation wrapper - needs the YML
|
||||
RUN [ ! -d "/app/docs" ] && mkdir /app/docs
|
||||
COPY docs/api-spec.yaml /app/docs/api-spec.yaml
|
||||
|
||||
# Starting wrapper
|
||||
COPY changedetection.py /app/changedetection.py
|
||||
|
||||
# Github Action test purpose(test-only.yml).
|
||||
# On production, it is effectively LOGGER_LEVEL=''.
|
||||
ARG LOGGER_LEVEL=''
|
||||
ENV LOGGER_LEVEL "$LOGGER_LEVEL"
|
||||
ENV LOGGER_LEVEL="$LOGGER_LEVEL"
|
||||
|
||||
# Default
|
||||
ENV LC_ALL=en_US.UTF-8
|
||||
|
||||
WORKDIR /app
|
||||
CMD ["python", "./changedetection.py", "-d", "/datastore"]
|
||||
|
||||
2
LICENSE
2
LICENSE
@@ -186,7 +186,7 @@
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
Copyright 2025 Web Technologies s.r.o.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,12 +1,17 @@
|
||||
recursive-include changedetectionio/api *
|
||||
recursive-include changedetectionio/apprise_plugin *
|
||||
include docs/api-spec.yaml
|
||||
recursive-include changedetectionio/blueprint *
|
||||
recursive-include changedetectionio/conditions *
|
||||
recursive-include changedetectionio/content_fetchers *
|
||||
recursive-include changedetectionio/jinja2_custom *
|
||||
recursive-include changedetectionio/model *
|
||||
recursive-include changedetectionio/notification *
|
||||
recursive-include changedetectionio/processors *
|
||||
recursive-include changedetectionio/realtime *
|
||||
recursive-include changedetectionio/static *
|
||||
recursive-include changedetectionio/templates *
|
||||
recursive-include changedetectionio/tests *
|
||||
recursive-include changedetectionio/widgets *
|
||||
prune changedetectionio/static/package-lock.json
|
||||
prune changedetectionio/static/styles/node_modules
|
||||
prune changedetectionio/static/styles/package-lock.json
|
||||
|
||||
@@ -1,11 +1,21 @@
|
||||
## Web Site Change Detection, Monitoring and Notification.
|
||||
# Monitor website changes
|
||||
|
||||
Live your data-life pro-actively, track website content changes and receive notifications via Discord, Email, Slack, Telegram and 70+ more
|
||||
Detect WebPage Changes Automatically — Monitor Web Page Changes in Real Time
|
||||
|
||||
Monitor websites for updates — get notified via Discord, Email, Slack, Telegram, Webhook and many more.
|
||||
|
||||
Detect web page content changes and get instant alerts.
|
||||
|
||||
|
||||
[Changedetection.io is the best tool to monitor web-pages for changes](https://changedetection.io) Track website content changes and receive notifications via Discord, Email, Slack, Telegram and 90+ more
|
||||
|
||||
Ideal for monitoring price changes, content edits, conditional changes and more.
|
||||
|
||||
[<img src="https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/docs/screenshot.png" style="max-width:100%;" alt="Self-hosted web page change monitoring, list of websites with changes" title="Self-hosted web page change monitoring, list of websites with changes" />](https://changedetection.io)
|
||||
|
||||
|
||||
[**Don't have time? Let us host it for you! try our extremely affordable subscription use our proxies and support!**](https://changedetection.io)
|
||||
[**Don't have time? Try our extremely affordable subscription use our proxies and support!**](https://changedetection.io)
|
||||
|
||||
|
||||
|
||||
### Target specific parts of the webpage using the Visual Selector tool.
|
||||
|
||||
28
README.md
28
README.md
@@ -1,11 +1,13 @@
|
||||
## Web Site Change Detection, Restock monitoring and notifications.
|
||||
# Detect Website Changes Automatically — Monitor Web Page Changes in Real Time
|
||||
|
||||
**_Detect website content changes and perform meaningful actions - trigger notifications via Discord, Email, Slack, Telegram, API calls and many more._**
|
||||
Monitor websites for updates — get notified via Discord, Email, Slack, Telegram, Webhook and many more.
|
||||
|
||||
_Live your data-life pro-actively._
|
||||
**Detect web page content changes and get instant alerts.**
|
||||
|
||||
Ideal for monitoring price changes, content edits, conditional changes and more.
|
||||
|
||||
|
||||
[<img src="https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/docs/screenshot.png" style="max-width:100%;" alt="Self-hosted web site page change monitoring" title="Self-hosted web site page change monitoring" />](https://changedetection.io?src=github)
|
||||
[<img src="https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/docs/screenshot.png" style="max-width:100%;" alt="Web site page change monitoring" title="Web site page change monitoring" />](https://changedetection.io?src=github)
|
||||
|
||||
[![Release Version][release-shield]][release-link] [![Docker Pulls][docker-pulls]][docker-link] [![License][license-shield]](LICENSE.md)
|
||||
|
||||
@@ -13,6 +15,7 @@ _Live your data-life pro-actively._
|
||||
|
||||
[**Get started with website page change monitoring straight away. Don't have time? Try our $8.99/month subscription, use our proxies and support!**](https://changedetection.io) , _half the price of other website change monitoring services!_
|
||||
|
||||
|
||||
- Chrome browser included.
|
||||
- Nothing to install, access via browser login after signup.
|
||||
- Super fast, no registration needed setup.
|
||||
@@ -89,7 +92,7 @@ _Need an actual Chrome runner with Javascript support? We support fetching via W
|
||||
#### Key Features
|
||||
|
||||
- Lots of trigger filters, such as "Trigger on text", "Remove text by selector", "Ignore text", "Extract text", also using regular-expressions!
|
||||
- Target elements with xPath(1.0) and CSS Selectors, Easily monitor complex JSON with JSONPath or jq
|
||||
- Target elements with xPath 1 and xPath 2, CSS Selectors, Easily monitor complex JSON with JSONPath or jq
|
||||
- Switch between fast non-JS and Chrome JS based "fetchers"
|
||||
- Track changes in PDF files (Monitor text changed in the PDF, Also monitor PDF filesize and checksums)
|
||||
- Easily specify how often a site should be checked
|
||||
@@ -99,12 +102,16 @@ _Need an actual Chrome runner with Javascript support? We support fetching via W
|
||||
- Configurable [proxy per watch](https://github.com/dgtlmoon/changedetection.io/wiki/Proxy-configuration)
|
||||
- Send a screenshot with the notification when a change is detected in the web page
|
||||
|
||||
We [recommend and use Bright Data](https://brightdata.grsm.io/n0r16zf7eivq) global proxy services, Bright Data will match any first deposit up to $100 using our signup link.
|
||||
|
||||
[Oxylabs](https://oxylabs.go2cloud.org/SH2d) is also an excellent proxy provider and well worth using, they offer Residental, ISP, Rotating and many other proxy types to suit your project.
|
||||
We [recommend and use Bright Data](https://brightdata.grsm.io/n0r16zf7eivq) global proxy services, Bright Data will match any first deposit up to $150 using our signup link.
|
||||
|
||||
Please :star: star :star: this project and help it grow! https://github.com/dgtlmoon/changedetection.io/
|
||||
|
||||
### Conditional web page changes
|
||||
|
||||
Easily [configure conditional actions](https://changedetection.io/tutorial/conditional-actions-web-page-changes), for example, only trigger when a price is above or below a preset amount, or [when a web page includes (or does not include) a keyword](https://changedetection.io/tutorial/how-monitor-keywords-any-website)
|
||||
|
||||
<img src="./docs/web-page-change-conditions.png" style="max-width:80%;" alt="Conditional web page changes" title="Conditional web page changes" />
|
||||
|
||||
### Schedule web page watches in any timezone, limit by day of week and time.
|
||||
|
||||
Easily set a re-check schedule, for example you could limit the web page change detection to only operate during business hours.
|
||||
@@ -273,7 +280,10 @@ Excel import is recommended - that way you can better organise tags/groups of we
|
||||
|
||||
## API Support
|
||||
|
||||
Supports managing the website watch list [via our API](https://changedetection.io/docs/api_v1/index.html)
|
||||
Full REST API for programmatic management of watches, tags, notifications and more.
|
||||
|
||||
- **[Interactive API Documentation](https://changedetection.io/docs/api_v1/index.html)** - Complete API reference with live testing
|
||||
- **[OpenAPI Specification](docs/api-spec.yaml)** - Generate SDKs for any programming language
|
||||
|
||||
## Support us
|
||||
|
||||
|
||||
@@ -3,4 +3,6 @@
|
||||
# Only exists for direct CLI usage
|
||||
|
||||
import changedetectionio
|
||||
changedetectionio.main()
|
||||
|
||||
if __name__ == '__main__':
|
||||
changedetectionio.main()
|
||||
|
||||
98
changedetectionio/PLUGIN_README.md
Normal file
98
changedetectionio/PLUGIN_README.md
Normal file
@@ -0,0 +1,98 @@
|
||||
# Creating Plugins for changedetection.io
|
||||
|
||||
This document describes how to create plugins for changedetection.io. Plugins can be used to extend the functionality of the application in various ways.
|
||||
|
||||
## Plugin Types
|
||||
|
||||
### UI Stats Tab Plugins
|
||||
|
||||
These plugins can add content to the Stats tab in the Edit page. This is useful for adding custom statistics or visualizations about a watch.
|
||||
|
||||
#### Creating a UI Stats Tab Plugin
|
||||
|
||||
1. Create a Python file in a directory that will be loaded by the plugin system.
|
||||
|
||||
2. Use the `global_hookimpl` decorator to implement the `ui_edit_stats_extras` hook:
|
||||
|
||||
```python
|
||||
import pluggy
|
||||
from loguru import logger
|
||||
|
||||
global_hookimpl = pluggy.HookimplMarker("changedetectionio")
|
||||
|
||||
@global_hookimpl
|
||||
def ui_edit_stats_extras(watch):
|
||||
"""Add custom content to the stats tab"""
|
||||
# Calculate or retrieve your stats
|
||||
my_stat = calculate_something(watch)
|
||||
|
||||
# Return HTML content as a string
|
||||
html = f"""
|
||||
<div class="my-plugin-stats">
|
||||
<h4>My Plugin Statistics</h4>
|
||||
<p>My statistic: {my_stat}</p>
|
||||
</div>
|
||||
"""
|
||||
return html
|
||||
```
|
||||
|
||||
3. The HTML you return will be included in the Stats tab.
|
||||
|
||||
## Plugin Loading
|
||||
|
||||
Plugins can be loaded from:
|
||||
|
||||
1. Built-in plugin directories in the codebase
|
||||
2. External packages using setuptools entry points
|
||||
|
||||
To add a new plugin directory, modify the `plugin_dirs` dictionary in `pluggy_interface.py`.
|
||||
|
||||
## Example Plugin
|
||||
|
||||
Here's a simple example of a plugin that adds a word count statistic to the Stats tab:
|
||||
|
||||
```python
|
||||
import pluggy
|
||||
from loguru import logger
|
||||
|
||||
global_hookimpl = pluggy.HookimplMarker("changedetectionio")
|
||||
|
||||
def count_words_in_history(watch):
|
||||
"""Count words in the latest snapshot"""
|
||||
try:
|
||||
if not watch.history.keys():
|
||||
return 0
|
||||
|
||||
latest_key = list(watch.history.keys())[-1]
|
||||
latest_content = watch.get_history_snapshot(latest_key)
|
||||
return len(latest_content.split())
|
||||
except Exception as e:
|
||||
logger.error(f"Error counting words: {str(e)}")
|
||||
return 0
|
||||
|
||||
@global_hookimpl
|
||||
def ui_edit_stats_extras(watch):
|
||||
"""Add word count to the Stats tab"""
|
||||
word_count = count_words_in_history(watch)
|
||||
|
||||
html = f"""
|
||||
<div class="word-count-stats">
|
||||
<h4>Content Analysis</h4>
|
||||
<table class="pure-table">
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>Word count (latest snapshot)</td>
|
||||
<td>{word_count}</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
"""
|
||||
return html
|
||||
```
|
||||
|
||||
## Testing Your Plugin
|
||||
|
||||
1. Place your plugin in one of the directories scanned by the plugin system
|
||||
2. Restart changedetection.io
|
||||
3. Go to the Edit page of a watch and check the Stats tab to see your content
|
||||
@@ -2,24 +2,23 @@
|
||||
|
||||
# Read more https://github.com/dgtlmoon/changedetection.io/wiki
|
||||
|
||||
__version__ = '0.49.3'
|
||||
__version__ = '0.50.38'
|
||||
|
||||
from changedetectionio.strtobool import strtobool
|
||||
from json.decoder import JSONDecodeError
|
||||
import os
|
||||
os.environ['EVENTLET_NO_GREENDNS'] = 'yes'
|
||||
import eventlet
|
||||
import eventlet.wsgi
|
||||
import getopt
|
||||
import platform
|
||||
import signal
|
||||
import socket
|
||||
|
||||
import sys
|
||||
|
||||
# Eventlet completely removed - using threading mode for SocketIO
|
||||
# This provides better Python 3.12+ compatibility and eliminates eventlet/asyncio conflicts
|
||||
from changedetectionio import store
|
||||
from changedetectionio.flask_app import changedetection_app
|
||||
from loguru import logger
|
||||
|
||||
|
||||
# Only global so we can access it in the signal handler
|
||||
app = None
|
||||
datastore = None
|
||||
@@ -29,16 +28,44 @@ def get_version():
|
||||
|
||||
# Parent wrapper or OS sends us a SIGTERM/SIGINT, do everything required for a clean shutdown
|
||||
def sigshutdown_handler(_signo, _stack_frame):
|
||||
global app
|
||||
global datastore
|
||||
name = signal.Signals(_signo).name
|
||||
logger.critical(f'Shutdown: Got Signal - {name} ({_signo}), Saving DB to disk and calling shutdown')
|
||||
datastore.sync_to_json()
|
||||
logger.success('Sync JSON to disk complete.')
|
||||
# This will throw a SystemExit exception, because eventlet.wsgi.server doesn't know how to deal with it.
|
||||
# Solution: move to gevent or other server in the future (#2014)
|
||||
datastore.stop_thread = True
|
||||
logger.critical(f'Shutdown: Got Signal - {name} ({_signo}), Fast shutdown initiated')
|
||||
|
||||
# Set exit flag immediately to stop all loops
|
||||
app.config.exit.set()
|
||||
datastore.stop_thread = True
|
||||
|
||||
# Shutdown workers and queues immediately
|
||||
try:
|
||||
from changedetectionio import worker_handler
|
||||
worker_handler.shutdown_workers()
|
||||
except Exception as e:
|
||||
logger.error(f"Error shutting down workers: {str(e)}")
|
||||
|
||||
# Close janus queues properly
|
||||
try:
|
||||
from changedetectionio.flask_app import update_q, notification_q
|
||||
update_q.close()
|
||||
notification_q.close()
|
||||
logger.debug("Janus queues closed successfully")
|
||||
except Exception as e:
|
||||
logger.critical(f"CRITICAL: Failed to close janus queues: {e}")
|
||||
|
||||
# Shutdown socketio server fast
|
||||
from changedetectionio.flask_app import socketio_server
|
||||
if socketio_server and hasattr(socketio_server, 'shutdown'):
|
||||
try:
|
||||
socketio_server.shutdown()
|
||||
except Exception as e:
|
||||
logger.error(f"Error shutting down Socket.IO server: {str(e)}")
|
||||
|
||||
# Save data quickly
|
||||
try:
|
||||
datastore.sync_to_json()
|
||||
logger.success('Fast sync to disk complete.')
|
||||
except Exception as e:
|
||||
logger.error(f"Error syncing to disk: {str(e)}")
|
||||
|
||||
sys.exit()
|
||||
|
||||
def main():
|
||||
@@ -47,9 +74,8 @@ def main():
|
||||
|
||||
datastore_path = None
|
||||
do_cleanup = False
|
||||
host = ''
|
||||
ipv6_enabled = False
|
||||
port = os.environ.get('PORT') or 5000
|
||||
host = os.environ.get("LISTEN_HOST", "0.0.0.0").strip()
|
||||
port = int(os.environ.get('PORT', 5000))
|
||||
ssl_mode = False
|
||||
|
||||
# On Windows, create and use a default path.
|
||||
@@ -90,10 +116,6 @@ def main():
|
||||
if opt == '-d':
|
||||
datastore_path = arg
|
||||
|
||||
if opt == '-6':
|
||||
logger.success("Enabling IPv6 listen support")
|
||||
ipv6_enabled = True
|
||||
|
||||
# Cleanup (remove text files that arent in the index)
|
||||
if opt == '-c':
|
||||
do_cleanup = True
|
||||
@@ -105,10 +127,24 @@ def main():
|
||||
if opt == '-l':
|
||||
logger_level = int(arg) if arg.isdigit() else arg.upper()
|
||||
|
||||
|
||||
logger.success(f"changedetection.io version {get_version()} starting.")
|
||||
# Launch using SocketIO run method for proper integration (if enabled)
|
||||
ssl_cert_file = os.getenv("SSL_CERT_FILE", 'cert.pem')
|
||||
ssl_privkey_file = os.getenv("SSL_PRIVKEY_FILE", 'privkey.pem')
|
||||
if os.getenv("SSL_CERT_FILE") and os.getenv("SSL_PRIVKEY_FILE"):
|
||||
ssl_mode = True
|
||||
|
||||
# SSL mode could have been set by -s too, therefor fallback to default values
|
||||
if ssl_mode:
|
||||
if not os.path.isfile(ssl_cert_file) or not os.path.isfile(ssl_privkey_file):
|
||||
logger.critical(f"Cannot start SSL/HTTPS mode, Please be sure that {ssl_cert_file}' and '{ssl_privkey_file}' exist in in {os.getcwd()}")
|
||||
os._exit(2)
|
||||
|
||||
# Without this, a logger will be duplicated
|
||||
logger.remove()
|
||||
try:
|
||||
log_level_for_stdout = { 'DEBUG', 'SUCCESS' }
|
||||
log_level_for_stdout = { 'TRACE', 'DEBUG', 'INFO', 'SUCCESS' }
|
||||
logger.configure(handlers=[
|
||||
{"sink": sys.stdout, "level": logger_level,
|
||||
"filter" : lambda record: record['level'].name in log_level_for_stdout},
|
||||
@@ -145,8 +181,26 @@ def main():
|
||||
|
||||
app = changedetection_app(app_config, datastore)
|
||||
|
||||
# Get the SocketIO instance from the Flask app (created in flask_app.py)
|
||||
from changedetectionio.flask_app import socketio_server
|
||||
global socketio
|
||||
socketio = socketio_server
|
||||
|
||||
signal.signal(signal.SIGTERM, sigshutdown_handler)
|
||||
signal.signal(signal.SIGINT, sigshutdown_handler)
|
||||
|
||||
# Custom signal handler for memory cleanup
|
||||
def sigusr_clean_handler(_signo, _stack_frame):
|
||||
from changedetectionio.gc_cleanup import memory_cleanup
|
||||
logger.info('SIGUSR1 received: Running memory cleanup')
|
||||
return memory_cleanup(app)
|
||||
|
||||
# Register the SIGUSR1 signal handler
|
||||
# Only register the signal handler if running on Linux
|
||||
if platform.system() == "Linux":
|
||||
signal.signal(signal.SIGUSR1, sigusr_clean_handler)
|
||||
else:
|
||||
logger.info("SIGUSR1 handler only registered on Linux, skipped.")
|
||||
|
||||
# Go into cleanup mode
|
||||
if do_cleanup:
|
||||
@@ -156,10 +210,11 @@ def main():
|
||||
|
||||
|
||||
@app.context_processor
|
||||
def inject_version():
|
||||
def inject_template_globals():
|
||||
return dict(right_sticky="v{}".format(datastore.data['version_tag']),
|
||||
new_version_available=app.config['NEW_VERSION_AVAILABLE'],
|
||||
has_password=datastore.data['settings']['application']['password'] != False
|
||||
has_password=datastore.data['settings']['application']['password'] != False,
|
||||
socket_io_enabled=datastore.data['settings']['application']['ui'].get('socket_io_enabled', True)
|
||||
)
|
||||
|
||||
# Monitored websites will not receive a Referer header when a user clicks on an outgoing link.
|
||||
@@ -183,15 +238,21 @@ def main():
|
||||
from werkzeug.middleware.proxy_fix import ProxyFix
|
||||
app.wsgi_app = ProxyFix(app.wsgi_app, x_prefix=1, x_host=1)
|
||||
|
||||
s_type = socket.AF_INET6 if ipv6_enabled else socket.AF_INET
|
||||
|
||||
if ssl_mode:
|
||||
# @todo finalise SSL config, but this should get you in the right direction if you need it.
|
||||
eventlet.wsgi.server(eventlet.wrap_ssl(eventlet.listen((host, port), s_type),
|
||||
certfile='cert.pem',
|
||||
keyfile='privkey.pem',
|
||||
server_side=True), app)
|
||||
|
||||
# SocketIO instance is already initialized in flask_app.py
|
||||
if socketio_server:
|
||||
if ssl_mode:
|
||||
logger.success(f"SSL mode enabled, attempting to start with '{ssl_cert_file}' and '{ssl_privkey_file}' in {os.getcwd()}")
|
||||
socketio.run(app, host=host, port=int(port), debug=False,
|
||||
ssl_context=(ssl_cert_file, ssl_privkey_file), allow_unsafe_werkzeug=True)
|
||||
else:
|
||||
socketio.run(app, host=host, port=int(port), debug=False, allow_unsafe_werkzeug=True)
|
||||
else:
|
||||
eventlet.wsgi.server(eventlet.listen((host, int(port)), s_type), app)
|
||||
|
||||
# Run Flask app without Socket.IO if disabled
|
||||
logger.info("Starting Flask app without Socket.IO server")
|
||||
if ssl_mode:
|
||||
logger.success(f"SSL mode enabled, attempting to start with '{ssl_cert_file}' and '{ssl_privkey_file}' in {os.getcwd()}")
|
||||
app.run(host=host, port=int(port), debug=False,
|
||||
ssl_context=(ssl_cert_file, ssl_privkey_file))
|
||||
else:
|
||||
app.run(host=host, port=int(port), debug=False)
|
||||
|
||||
67
changedetectionio/api/Import.py
Normal file
67
changedetectionio/api/Import.py
Normal file
@@ -0,0 +1,67 @@
|
||||
from changedetectionio.strtobool import strtobool
|
||||
from flask_restful import abort, Resource
|
||||
from flask import request
|
||||
from functools import wraps
|
||||
from . import auth, validate_openapi_request
|
||||
from ..validate_url import is_safe_valid_url
|
||||
|
||||
|
||||
def default_content_type(content_type='text/plain'):
|
||||
"""Decorator to set a default Content-Type header if none is provided."""
|
||||
def decorator(f):
|
||||
@wraps(f)
|
||||
def wrapper(*args, **kwargs):
|
||||
if not request.content_type:
|
||||
# Set default content type in the request environment
|
||||
request.environ['CONTENT_TYPE'] = content_type
|
||||
return f(*args, **kwargs)
|
||||
return wrapper
|
||||
return decorator
|
||||
|
||||
|
||||
class Import(Resource):
|
||||
def __init__(self, **kwargs):
|
||||
# datastore is a black box dependency
|
||||
self.datastore = kwargs['datastore']
|
||||
|
||||
@auth.check_token
|
||||
@default_content_type('text/plain') #3547 #3542
|
||||
@validate_openapi_request('importWatches')
|
||||
def post(self):
|
||||
"""Import a list of watched URLs."""
|
||||
|
||||
extras = {}
|
||||
|
||||
if request.args.get('proxy'):
|
||||
plist = self.datastore.proxy_list
|
||||
if not request.args.get('proxy') in plist:
|
||||
return "Invalid proxy choice, currently supported proxies are '{}'".format(', '.join(plist)), 400
|
||||
else:
|
||||
extras['proxy'] = request.args.get('proxy')
|
||||
|
||||
dedupe = strtobool(request.args.get('dedupe', 'true'))
|
||||
|
||||
tags = request.args.get('tag')
|
||||
tag_uuids = request.args.get('tag_uuids')
|
||||
|
||||
if tag_uuids:
|
||||
tag_uuids = tag_uuids.split(',')
|
||||
|
||||
urls = request.get_data().decode('utf8').splitlines()
|
||||
added = []
|
||||
for url in urls:
|
||||
url = url.strip()
|
||||
if not len(url):
|
||||
continue
|
||||
|
||||
# If hosts that only contain alphanumerics are allowed ("localhost" for example)
|
||||
if not is_safe_valid_url(url):
|
||||
return f"Invalid or unsupported URL - {url}", 400
|
||||
|
||||
if dedupe and self.datastore.url_exists(url):
|
||||
continue
|
||||
|
||||
new_uuid = self.datastore.add_watch(url=url, extras=extras, tag=tags, tag_uuids=tag_uuids)
|
||||
added.append(new_uuid)
|
||||
|
||||
return added
|
||||
108
changedetectionio/api/Notifications.py
Normal file
108
changedetectionio/api/Notifications.py
Normal file
@@ -0,0 +1,108 @@
|
||||
from flask_expects_json import expects_json
|
||||
from flask_restful import Resource, abort
|
||||
from flask import request
|
||||
from . import auth, validate_openapi_request
|
||||
from . import schema_create_notification_urls, schema_delete_notification_urls
|
||||
|
||||
class Notifications(Resource):
|
||||
def __init__(self, **kwargs):
|
||||
# datastore is a black box dependency
|
||||
self.datastore = kwargs['datastore']
|
||||
|
||||
@auth.check_token
|
||||
@validate_openapi_request('getNotifications')
|
||||
def get(self):
|
||||
"""Return Notification URL List."""
|
||||
|
||||
notification_urls = self.datastore.data.get('settings', {}).get('application', {}).get('notification_urls', [])
|
||||
|
||||
return {
|
||||
'notification_urls': notification_urls,
|
||||
}, 200
|
||||
|
||||
@auth.check_token
|
||||
@validate_openapi_request('addNotifications')
|
||||
@expects_json(schema_create_notification_urls)
|
||||
def post(self):
|
||||
"""Create Notification URLs."""
|
||||
|
||||
json_data = request.get_json()
|
||||
notification_urls = json_data.get("notification_urls", [])
|
||||
|
||||
from wtforms import ValidationError
|
||||
try:
|
||||
validate_notification_urls(notification_urls)
|
||||
except ValidationError as e:
|
||||
return str(e), 400
|
||||
|
||||
added_urls = []
|
||||
|
||||
for url in notification_urls:
|
||||
clean_url = url.strip()
|
||||
added_url = self.datastore.add_notification_url(clean_url)
|
||||
if added_url:
|
||||
added_urls.append(added_url)
|
||||
|
||||
if not added_urls:
|
||||
return "No valid notification URLs were added", 400
|
||||
|
||||
return {'notification_urls': added_urls}, 201
|
||||
|
||||
@auth.check_token
|
||||
@validate_openapi_request('replaceNotifications')
|
||||
@expects_json(schema_create_notification_urls)
|
||||
def put(self):
|
||||
"""Replace Notification URLs."""
|
||||
json_data = request.get_json()
|
||||
notification_urls = json_data.get("notification_urls", [])
|
||||
|
||||
from wtforms import ValidationError
|
||||
try:
|
||||
validate_notification_urls(notification_urls)
|
||||
except ValidationError as e:
|
||||
return str(e), 400
|
||||
|
||||
if not isinstance(notification_urls, list):
|
||||
return "Invalid input format", 400
|
||||
|
||||
clean_urls = [url.strip() for url in notification_urls if isinstance(url, str)]
|
||||
self.datastore.data['settings']['application']['notification_urls'] = clean_urls
|
||||
self.datastore.needs_write = True
|
||||
|
||||
return {'notification_urls': clean_urls}, 200
|
||||
|
||||
@auth.check_token
|
||||
@validate_openapi_request('deleteNotifications')
|
||||
@expects_json(schema_delete_notification_urls)
|
||||
def delete(self):
|
||||
"""Delete Notification URLs."""
|
||||
|
||||
json_data = request.get_json()
|
||||
urls_to_delete = json_data.get("notification_urls", [])
|
||||
if not isinstance(urls_to_delete, list):
|
||||
abort(400, message="Expected a list of notification URLs.")
|
||||
|
||||
notification_urls = self.datastore.data['settings']['application'].get('notification_urls', [])
|
||||
deleted = []
|
||||
|
||||
for url in urls_to_delete:
|
||||
clean_url = url.strip()
|
||||
if clean_url in notification_urls:
|
||||
notification_urls.remove(clean_url)
|
||||
deleted.append(clean_url)
|
||||
|
||||
if not deleted:
|
||||
abort(400, message="No matching notification URLs found.")
|
||||
|
||||
self.datastore.data['settings']['application']['notification_urls'] = notification_urls
|
||||
self.datastore.needs_write = True
|
||||
|
||||
return 'OK', 204
|
||||
|
||||
def validate_notification_urls(notification_urls):
|
||||
from changedetectionio.forms import ValidateAppRiseServers
|
||||
validator = ValidateAppRiseServers()
|
||||
class DummyForm: pass
|
||||
dummy_form = DummyForm()
|
||||
field = type("Field", (object,), {"data": notification_urls, "gettext": lambda self, x: x})()
|
||||
validator(dummy_form, field)
|
||||
39
changedetectionio/api/Search.py
Normal file
39
changedetectionio/api/Search.py
Normal file
@@ -0,0 +1,39 @@
|
||||
from flask_restful import Resource, abort
|
||||
from flask import request
|
||||
from . import auth, validate_openapi_request
|
||||
|
||||
class Search(Resource):
|
||||
def __init__(self, **kwargs):
|
||||
# datastore is a black box dependency
|
||||
self.datastore = kwargs['datastore']
|
||||
|
||||
@auth.check_token
|
||||
@validate_openapi_request('searchWatches')
|
||||
def get(self):
|
||||
"""Search for watches by URL or title text."""
|
||||
query = request.args.get('q', '').strip()
|
||||
tag_limit = request.args.get('tag', '').strip()
|
||||
from changedetectionio.strtobool import strtobool
|
||||
partial = bool(strtobool(request.args.get('partial', '0'))) if 'partial' in request.args else False
|
||||
|
||||
# Require a search query
|
||||
if not query:
|
||||
abort(400, message="Search query 'q' parameter is required")
|
||||
|
||||
# Use the search function from the datastore
|
||||
matching_uuids = self.datastore.search_watches_for_url(query=query, tag_limit=tag_limit, partial=partial)
|
||||
|
||||
# Build the response with watch details
|
||||
results = {}
|
||||
for uuid in matching_uuids:
|
||||
watch = self.datastore.data['watching'].get(uuid)
|
||||
results[uuid] = {
|
||||
'last_changed': watch.last_changed,
|
||||
'last_checked': watch['last_checked'],
|
||||
'last_error': watch['last_error'],
|
||||
'title': watch['title'],
|
||||
'url': watch['url'],
|
||||
'viewed': watch.viewed
|
||||
}
|
||||
|
||||
return results, 200
|
||||
40
changedetectionio/api/SystemInfo.py
Normal file
40
changedetectionio/api/SystemInfo.py
Normal file
@@ -0,0 +1,40 @@
|
||||
from flask_restful import Resource
|
||||
from . import auth, validate_openapi_request
|
||||
|
||||
|
||||
class SystemInfo(Resource):
|
||||
def __init__(self, **kwargs):
|
||||
# datastore is a black box dependency
|
||||
self.datastore = kwargs['datastore']
|
||||
self.update_q = kwargs['update_q']
|
||||
|
||||
@auth.check_token
|
||||
@validate_openapi_request('getSystemInfo')
|
||||
def get(self):
|
||||
"""Return system info."""
|
||||
import time
|
||||
overdue_watches = []
|
||||
|
||||
# Check all watches and report which have not been checked but should have been
|
||||
|
||||
for uuid, watch in self.datastore.data.get('watching', {}).items():
|
||||
# see if now - last_checked is greater than the time that should have been
|
||||
# this is not super accurate (maybe they just edited it) but better than nothing
|
||||
t = watch.threshold_seconds()
|
||||
if not t:
|
||||
# Use the system wide default
|
||||
t = self.datastore.threshold_seconds
|
||||
|
||||
time_since_check = time.time() - watch.get('last_checked')
|
||||
|
||||
# Allow 5 minutes of grace time before we decide it's overdue
|
||||
if time_since_check - (5 * 60) > t:
|
||||
overdue_watches.append(uuid)
|
||||
from changedetectionio import __version__ as main_version
|
||||
return {
|
||||
'queue_size': self.update_q.qsize(),
|
||||
'overdue_watches': overdue_watches,
|
||||
'uptime': round(time.time() - self.datastore.start_time, 2),
|
||||
'watch_count': len(self.datastore.data.get('watching', {})),
|
||||
'version': main_version
|
||||
}, 200
|
||||
119
changedetectionio/api/Tags.py
Normal file
119
changedetectionio/api/Tags.py
Normal file
@@ -0,0 +1,119 @@
|
||||
from changedetectionio import queuedWatchMetaData
|
||||
from changedetectionio import worker_handler
|
||||
from flask_expects_json import expects_json
|
||||
from flask_restful import abort, Resource
|
||||
|
||||
from flask import request
|
||||
from . import auth
|
||||
|
||||
# Import schemas from __init__.py
|
||||
from . import schema_tag, schema_create_tag, schema_update_tag, validate_openapi_request
|
||||
|
||||
|
||||
class Tag(Resource):
|
||||
def __init__(self, **kwargs):
|
||||
# datastore is a black box dependency
|
||||
self.datastore = kwargs['datastore']
|
||||
self.update_q = kwargs['update_q']
|
||||
|
||||
# Get information about a single tag
|
||||
# curl http://localhost:5000/api/v1/tag/<string:uuid>
|
||||
@auth.check_token
|
||||
@validate_openapi_request('getTag')
|
||||
def get(self, uuid):
|
||||
"""Get data for a single tag/group, toggle notification muting, or recheck all."""
|
||||
from copy import deepcopy
|
||||
tag = deepcopy(self.datastore.data['settings']['application']['tags'].get(uuid))
|
||||
if not tag:
|
||||
abort(404, message=f'No tag exists with the UUID of {uuid}')
|
||||
|
||||
if request.args.get('recheck'):
|
||||
# Recheck all, including muted
|
||||
# Get most overdue first
|
||||
i=0
|
||||
for k in sorted(self.datastore.data['watching'].items(), key=lambda item: item[1].get('last_checked', 0)):
|
||||
watch_uuid = k[0]
|
||||
watch = k[1]
|
||||
if not watch['paused'] and tag['uuid'] not in watch['tags']:
|
||||
continue
|
||||
worker_handler.queue_item_async_safe(self.update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': watch_uuid}))
|
||||
i+=1
|
||||
|
||||
return f"OK, {i} watches queued", 200
|
||||
|
||||
if request.args.get('muted', '') == 'muted':
|
||||
self.datastore.data['settings']['application']['tags'][uuid]['notification_muted'] = True
|
||||
return "OK", 200
|
||||
elif request.args.get('muted', '') == 'unmuted':
|
||||
self.datastore.data['settings']['application']['tags'][uuid]['notification_muted'] = False
|
||||
return "OK", 200
|
||||
|
||||
return tag
|
||||
|
||||
@auth.check_token
|
||||
@validate_openapi_request('deleteTag')
|
||||
def delete(self, uuid):
|
||||
"""Delete a tag/group and remove it from all watches."""
|
||||
if not self.datastore.data['settings']['application']['tags'].get(uuid):
|
||||
abort(400, message='No tag exists with the UUID of {}'.format(uuid))
|
||||
|
||||
# Delete the tag, and any tag reference
|
||||
del self.datastore.data['settings']['application']['tags'][uuid]
|
||||
|
||||
# Remove tag from all watches
|
||||
for watch_uuid, watch in self.datastore.data['watching'].items():
|
||||
if watch.get('tags') and uuid in watch['tags']:
|
||||
watch['tags'].remove(uuid)
|
||||
|
||||
return 'OK', 204
|
||||
|
||||
@auth.check_token
|
||||
@validate_openapi_request('updateTag')
|
||||
@expects_json(schema_update_tag)
|
||||
def put(self, uuid):
|
||||
"""Update tag information."""
|
||||
tag = self.datastore.data['settings']['application']['tags'].get(uuid)
|
||||
if not tag:
|
||||
abort(404, message='No tag exists with the UUID of {}'.format(uuid))
|
||||
|
||||
tag.update(request.json)
|
||||
self.datastore.needs_write_urgent = True
|
||||
|
||||
return "OK", 200
|
||||
|
||||
|
||||
@auth.check_token
|
||||
@validate_openapi_request('createTag')
|
||||
# Only cares for {'title': 'xxxx'}
|
||||
def post(self):
|
||||
"""Create a single tag/group."""
|
||||
|
||||
json_data = request.get_json()
|
||||
title = json_data.get("title",'').strip()
|
||||
|
||||
|
||||
new_uuid = self.datastore.add_tag(title=title)
|
||||
if new_uuid:
|
||||
return {'uuid': new_uuid}, 201
|
||||
else:
|
||||
return "Invalid or unsupported tag", 400
|
||||
|
||||
class Tags(Resource):
|
||||
def __init__(self, **kwargs):
|
||||
# datastore is a black box dependency
|
||||
self.datastore = kwargs['datastore']
|
||||
|
||||
@auth.check_token
|
||||
@validate_openapi_request('listTags')
|
||||
def get(self):
|
||||
"""List tags/groups."""
|
||||
result = {}
|
||||
for uuid, tag in self.datastore.data['settings']['application']['tags'].items():
|
||||
result[uuid] = {
|
||||
'date_created': tag.get('date_created', 0),
|
||||
'notification_muted': tag.get('notification_muted', False),
|
||||
'title': tag.get('title', ''),
|
||||
'uuid': tag.get('uuid')
|
||||
}
|
||||
|
||||
return result, 200
|
||||
292
changedetectionio/api/Watch.py
Normal file
292
changedetectionio/api/Watch.py
Normal file
@@ -0,0 +1,292 @@
|
||||
import os
|
||||
|
||||
from changedetectionio.validate_url import is_safe_valid_url
|
||||
|
||||
from flask_expects_json import expects_json
|
||||
from changedetectionio import queuedWatchMetaData
|
||||
from changedetectionio import worker_handler
|
||||
from flask_restful import abort, Resource
|
||||
from flask import request, make_response, send_from_directory
|
||||
from . import auth
|
||||
import copy
|
||||
|
||||
# Import schemas from __init__.py
|
||||
from . import schema, schema_create_watch, schema_update_watch, validate_openapi_request
|
||||
|
||||
|
||||
def validate_time_between_check_required(json_data):
|
||||
"""
|
||||
Validate that at least one time interval is specified when not using default settings.
|
||||
Returns None if valid, or error message string if invalid.
|
||||
Defaults to using global settings if time_between_check_use_default is not provided.
|
||||
"""
|
||||
# Default to using global settings if not specified
|
||||
use_default = json_data.get('time_between_check_use_default', True)
|
||||
|
||||
# If using default settings, no validation needed
|
||||
if use_default:
|
||||
return None
|
||||
|
||||
# If not using defaults, check if time_between_check exists and has at least one non-zero value
|
||||
time_check = json_data.get('time_between_check')
|
||||
if not time_check:
|
||||
# No time_between_check provided and not using defaults - this is an error
|
||||
return "At least one time interval (weeks, days, hours, minutes, or seconds) must be specified when not using global settings."
|
||||
|
||||
# time_between_check exists, check if it has at least one non-zero value
|
||||
if any([
|
||||
(time_check.get('weeks') or 0) > 0,
|
||||
(time_check.get('days') or 0) > 0,
|
||||
(time_check.get('hours') or 0) > 0,
|
||||
(time_check.get('minutes') or 0) > 0,
|
||||
(time_check.get('seconds') or 0) > 0
|
||||
]):
|
||||
return None
|
||||
|
||||
# time_between_check exists but all values are 0 or empty - this is an error
|
||||
return "At least one time interval (weeks, days, hours, minutes, or seconds) must be specified when not using global settings."
|
||||
|
||||
|
||||
class Watch(Resource):
|
||||
def __init__(self, **kwargs):
|
||||
# datastore is a black box dependency
|
||||
self.datastore = kwargs['datastore']
|
||||
self.update_q = kwargs['update_q']
|
||||
|
||||
# Get information about a single watch, excluding the history list (can be large)
|
||||
# curl http://localhost:5000/api/v1/watch/<string:uuid>
|
||||
# @todo - version2 - ?muted and ?paused should be able to be called together, return the watch struct not "OK"
|
||||
# ?recheck=true
|
||||
@auth.check_token
|
||||
@validate_openapi_request('getWatch')
|
||||
def get(self, uuid):
|
||||
"""Get information about a single watch, recheck, pause, or mute."""
|
||||
from copy import deepcopy
|
||||
watch = deepcopy(self.datastore.data['watching'].get(uuid))
|
||||
if not watch:
|
||||
abort(404, message='No watch exists with the UUID of {}'.format(uuid))
|
||||
|
||||
if request.args.get('recheck'):
|
||||
worker_handler.queue_item_async_safe(self.update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid}))
|
||||
return "OK", 200
|
||||
if request.args.get('paused', '') == 'paused':
|
||||
self.datastore.data['watching'].get(uuid).pause()
|
||||
return "OK", 200
|
||||
elif request.args.get('paused', '') == 'unpaused':
|
||||
self.datastore.data['watching'].get(uuid).unpause()
|
||||
return "OK", 200
|
||||
if request.args.get('muted', '') == 'muted':
|
||||
self.datastore.data['watching'].get(uuid).mute()
|
||||
return "OK", 200
|
||||
elif request.args.get('muted', '') == 'unmuted':
|
||||
self.datastore.data['watching'].get(uuid).unmute()
|
||||
return "OK", 200
|
||||
|
||||
# Return without history, get that via another API call
|
||||
# Properties are not returned as a JSON, so add the required props manually
|
||||
watch['history_n'] = watch.history_n
|
||||
# attr .last_changed will check for the last written text snapshot on change
|
||||
watch['last_changed'] = watch.last_changed
|
||||
watch['viewed'] = watch.viewed
|
||||
watch['link'] = watch.link,
|
||||
|
||||
return watch
|
||||
|
||||
@auth.check_token
|
||||
@validate_openapi_request('deleteWatch')
|
||||
def delete(self, uuid):
|
||||
"""Delete a watch and related history."""
|
||||
if not self.datastore.data['watching'].get(uuid):
|
||||
abort(400, message='No watch exists with the UUID of {}'.format(uuid))
|
||||
|
||||
self.datastore.delete(uuid)
|
||||
return 'OK', 204
|
||||
|
||||
@auth.check_token
|
||||
@validate_openapi_request('updateWatch')
|
||||
@expects_json(schema_update_watch)
|
||||
def put(self, uuid):
|
||||
"""Update watch information."""
|
||||
watch = self.datastore.data['watching'].get(uuid)
|
||||
if not watch:
|
||||
abort(404, message='No watch exists with the UUID of {}'.format(uuid))
|
||||
|
||||
if request.json.get('proxy'):
|
||||
plist = self.datastore.proxy_list
|
||||
if not request.json.get('proxy') in plist:
|
||||
return "Invalid proxy choice, currently supported proxies are '{}'".format(', '.join(plist)), 400
|
||||
|
||||
# Validate time_between_check when not using defaults
|
||||
validation_error = validate_time_between_check_required(request.json)
|
||||
if validation_error:
|
||||
return validation_error, 400
|
||||
|
||||
# XSS etc protection
|
||||
if request.json.get('url') and not is_safe_valid_url(request.json.get('url')):
|
||||
return "Invalid URL", 400
|
||||
|
||||
watch.update(request.json)
|
||||
|
||||
return "OK", 200
|
||||
|
||||
|
||||
class WatchHistory(Resource):
|
||||
def __init__(self, **kwargs):
|
||||
# datastore is a black box dependency
|
||||
self.datastore = kwargs['datastore']
|
||||
|
||||
# Get a list of available history for a watch by UUID
|
||||
# curl http://localhost:5000/api/v1/watch/<string:uuid>/history
|
||||
@auth.check_token
|
||||
@validate_openapi_request('getWatchHistory')
|
||||
def get(self, uuid):
|
||||
"""Get a list of all historical snapshots available for a watch."""
|
||||
watch = self.datastore.data['watching'].get(uuid)
|
||||
if not watch:
|
||||
abort(404, message='No watch exists with the UUID of {}'.format(uuid))
|
||||
return watch.history, 200
|
||||
|
||||
|
||||
class WatchSingleHistory(Resource):
|
||||
def __init__(self, **kwargs):
|
||||
# datastore is a black box dependency
|
||||
self.datastore = kwargs['datastore']
|
||||
|
||||
@auth.check_token
|
||||
@validate_openapi_request('getWatchSnapshot')
|
||||
def get(self, uuid, timestamp):
|
||||
"""Get single snapshot from watch."""
|
||||
watch = self.datastore.data['watching'].get(uuid)
|
||||
if not watch:
|
||||
abort(404, message=f"No watch exists with the UUID of {uuid}")
|
||||
|
||||
if not len(watch.history):
|
||||
abort(404, message=f"Watch found but no history exists for the UUID {uuid}")
|
||||
|
||||
if timestamp == 'latest':
|
||||
timestamp = list(watch.history.keys())[-1]
|
||||
|
||||
if request.args.get('html'):
|
||||
content = watch.get_fetched_html(timestamp)
|
||||
if content:
|
||||
response = make_response(content, 200)
|
||||
response.mimetype = "text/html"
|
||||
else:
|
||||
response = make_response("No content found", 404)
|
||||
response.mimetype = "text/plain"
|
||||
else:
|
||||
content = watch.get_history_snapshot(timestamp)
|
||||
response = make_response(content, 200)
|
||||
response.mimetype = "text/plain"
|
||||
|
||||
return response
|
||||
|
||||
class WatchFavicon(Resource):
|
||||
def __init__(self, **kwargs):
|
||||
# datastore is a black box dependency
|
||||
self.datastore = kwargs['datastore']
|
||||
|
||||
@auth.check_token
|
||||
@validate_openapi_request('getWatchFavicon')
|
||||
def get(self, uuid):
|
||||
"""Get favicon for a watch."""
|
||||
watch = self.datastore.data['watching'].get(uuid)
|
||||
if not watch:
|
||||
abort(404, message=f"No watch exists with the UUID of {uuid}")
|
||||
|
||||
favicon_filename = watch.get_favicon_filename()
|
||||
if favicon_filename:
|
||||
try:
|
||||
import magic
|
||||
mime = magic.from_file(
|
||||
os.path.join(watch.watch_data_dir, favicon_filename),
|
||||
mime=True
|
||||
)
|
||||
except ImportError:
|
||||
# Fallback, no python-magic
|
||||
import mimetypes
|
||||
mime, encoding = mimetypes.guess_type(favicon_filename)
|
||||
|
||||
response = make_response(send_from_directory(watch.watch_data_dir, favicon_filename))
|
||||
response.headers['Content-type'] = mime
|
||||
response.headers['Cache-Control'] = 'max-age=300, must-revalidate' # Cache for 5 minutes, then revalidate
|
||||
return response
|
||||
|
||||
abort(404, message=f'No Favicon available for {uuid}')
|
||||
|
||||
|
||||
class CreateWatch(Resource):
|
||||
def __init__(self, **kwargs):
|
||||
# datastore is a black box dependency
|
||||
self.datastore = kwargs['datastore']
|
||||
self.update_q = kwargs['update_q']
|
||||
|
||||
@auth.check_token
|
||||
@validate_openapi_request('createWatch')
|
||||
@expects_json(schema_create_watch)
|
||||
def post(self):
|
||||
"""Create a single watch."""
|
||||
|
||||
json_data = request.get_json()
|
||||
url = json_data['url'].strip()
|
||||
|
||||
if not is_safe_valid_url(url):
|
||||
return "Invalid or unsupported URL", 400
|
||||
|
||||
if json_data.get('proxy'):
|
||||
plist = self.datastore.proxy_list
|
||||
if not json_data.get('proxy') in plist:
|
||||
return "Invalid proxy choice, currently supported proxies are '{}'".format(', '.join(plist)), 400
|
||||
|
||||
# Validate time_between_check when not using defaults
|
||||
validation_error = validate_time_between_check_required(json_data)
|
||||
if validation_error:
|
||||
return validation_error, 400
|
||||
|
||||
extras = copy.deepcopy(json_data)
|
||||
|
||||
# Because we renamed 'tag' to 'tags' but don't want to change the API (can do this in v2 of the API)
|
||||
tags = None
|
||||
if extras.get('tag'):
|
||||
tags = extras.get('tag')
|
||||
del extras['tag']
|
||||
|
||||
del extras['url']
|
||||
|
||||
new_uuid = self.datastore.add_watch(url=url, extras=extras, tag=tags)
|
||||
if new_uuid:
|
||||
worker_handler.queue_item_async_safe(self.update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': new_uuid}))
|
||||
return {'uuid': new_uuid}, 201
|
||||
else:
|
||||
return "Invalid or unsupported URL", 400
|
||||
|
||||
@auth.check_token
|
||||
@validate_openapi_request('listWatches')
|
||||
def get(self):
|
||||
"""List watches."""
|
||||
list = {}
|
||||
|
||||
tag_limit = request.args.get('tag', '').lower()
|
||||
for uuid, watch in self.datastore.data['watching'].items():
|
||||
# Watch tags by name (replace the other calls?)
|
||||
tags = self.datastore.get_all_tags_for_watch(uuid=uuid)
|
||||
if tag_limit and not any(v.get('title').lower() == tag_limit for k, v in tags.items()):
|
||||
continue
|
||||
|
||||
list[uuid] = {
|
||||
'last_changed': watch.last_changed,
|
||||
'last_checked': watch['last_checked'],
|
||||
'last_error': watch['last_error'],
|
||||
'link': watch.link,
|
||||
'page_title': watch['page_title'],
|
||||
'title': watch['title'],
|
||||
'url': watch['url'],
|
||||
'viewed': watch.viewed
|
||||
}
|
||||
|
||||
if request.args.get('recheck_all'):
|
||||
for uuid in self.datastore.data['watching'].keys():
|
||||
worker_handler.queue_item_async_safe(self.update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid}))
|
||||
return {'status': "OK"}, 200
|
||||
|
||||
return list, 200
|
||||
@@ -0,0 +1,86 @@
|
||||
import copy
|
||||
import functools
|
||||
from flask import request, abort
|
||||
from loguru import logger
|
||||
from . import api_schema
|
||||
from ..model import watch_base
|
||||
|
||||
# Build a JSON Schema atleast partially based on our Watch model
|
||||
watch_base_config = watch_base()
|
||||
schema = api_schema.build_watch_json_schema(watch_base_config)
|
||||
|
||||
schema_create_watch = copy.deepcopy(schema)
|
||||
schema_create_watch['required'] = ['url']
|
||||
del schema_create_watch['properties']['last_viewed']
|
||||
|
||||
schema_update_watch = copy.deepcopy(schema)
|
||||
schema_update_watch['additionalProperties'] = False
|
||||
|
||||
# Tag schema is also based on watch_base since Tag inherits from it
|
||||
schema_tag = copy.deepcopy(schema)
|
||||
schema_create_tag = copy.deepcopy(schema_tag)
|
||||
schema_create_tag['required'] = ['title']
|
||||
schema_update_tag = copy.deepcopy(schema_tag)
|
||||
schema_update_tag['additionalProperties'] = False
|
||||
|
||||
schema_notification_urls = copy.deepcopy(schema)
|
||||
schema_create_notification_urls = copy.deepcopy(schema_notification_urls)
|
||||
schema_create_notification_urls['required'] = ['notification_urls']
|
||||
schema_delete_notification_urls = copy.deepcopy(schema_notification_urls)
|
||||
schema_delete_notification_urls['required'] = ['notification_urls']
|
||||
|
||||
@functools.cache
|
||||
def get_openapi_spec():
|
||||
"""Lazy load OpenAPI spec and dependencies only when validation is needed."""
|
||||
import os
|
||||
import yaml # Lazy import - only loaded when API validation is actually used
|
||||
from openapi_core import OpenAPI # Lazy import - saves ~10.7 MB on startup
|
||||
|
||||
spec_path = os.path.join(os.path.dirname(__file__), '../../docs/api-spec.yaml')
|
||||
if not os.path.exists(spec_path):
|
||||
# Possibly for pip3 packages
|
||||
spec_path = os.path.join(os.path.dirname(__file__), '../docs/api-spec.yaml')
|
||||
|
||||
with open(spec_path, 'r') as f:
|
||||
spec_dict = yaml.safe_load(f)
|
||||
_openapi_spec = OpenAPI.from_dict(spec_dict)
|
||||
return _openapi_spec
|
||||
|
||||
def validate_openapi_request(operation_id):
|
||||
"""Decorator to validate incoming requests against OpenAPI spec."""
|
||||
def decorator(f):
|
||||
@functools.wraps(f)
|
||||
def wrapper(*args, **kwargs):
|
||||
try:
|
||||
# Skip OpenAPI validation for GET requests since they don't have request bodies
|
||||
if request.method.upper() != 'GET':
|
||||
# Lazy import - only loaded when actually validating a request
|
||||
from openapi_core.contrib.flask import FlaskOpenAPIRequest
|
||||
|
||||
spec = get_openapi_spec()
|
||||
openapi_request = FlaskOpenAPIRequest(request)
|
||||
result = spec.unmarshal_request(openapi_request)
|
||||
if result.errors:
|
||||
from werkzeug.exceptions import BadRequest
|
||||
error_details = []
|
||||
for error in result.errors:
|
||||
error_details.append(str(error))
|
||||
raise BadRequest(f"OpenAPI validation failed: {error_details}")
|
||||
except BadRequest:
|
||||
# Re-raise BadRequest exceptions (validation failures)
|
||||
raise
|
||||
except Exception as e:
|
||||
# If OpenAPI spec loading fails, log but don't break existing functionality
|
||||
logger.critical(f"OpenAPI validation warning for {operation_id}: {e}")
|
||||
abort(500)
|
||||
return f(*args, **kwargs)
|
||||
return wrapper
|
||||
return decorator
|
||||
|
||||
# Import all API resources
|
||||
from .Watch import Watch, WatchHistory, WatchSingleHistory, CreateWatch, WatchFavicon
|
||||
from .Tags import Tags, Tag
|
||||
from .Import import Import
|
||||
from .SystemInfo import SystemInfo
|
||||
from .Notifications import Notifications
|
||||
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
# Responsible for building the storage dict into a set of rules ("JSON Schema") acceptable via the API
|
||||
# Probably other ways to solve this when the backend switches to some ORM
|
||||
from changedetectionio.notification import valid_notification_formats
|
||||
|
||||
|
||||
def build_time_between_check_json_schema():
|
||||
# Setup time between check schema
|
||||
@@ -76,6 +78,13 @@ def build_watch_json_schema(d):
|
||||
]:
|
||||
schema['properties'][v]['anyOf'].append({'type': 'string', "maxLength": 5000})
|
||||
|
||||
for v in ['last_viewed']:
|
||||
schema['properties'][v] = {
|
||||
"type": "integer",
|
||||
"description": "Unix timestamp in seconds of the last time the watch was viewed.",
|
||||
"minimum": 0
|
||||
}
|
||||
|
||||
# None or Boolean
|
||||
schema['properties']['track_ldjson_price_data']['anyOf'].append({'type': 'boolean'})
|
||||
|
||||
@@ -98,8 +107,6 @@ def build_watch_json_schema(d):
|
||||
}
|
||||
}
|
||||
|
||||
from changedetectionio.notification import valid_notification_formats
|
||||
|
||||
schema['properties']['notification_format'] = {'type': 'string',
|
||||
'enum': list(valid_notification_formats.keys())
|
||||
}
|
||||
@@ -112,6 +119,12 @@ def build_watch_json_schema(d):
|
||||
|
||||
schema['properties']['time_between_check'] = build_time_between_check_json_schema()
|
||||
|
||||
schema['properties']['time_between_check_use_default'] = {
|
||||
"type": "boolean",
|
||||
"default": True,
|
||||
"description": "Whether to use global settings for time between checks - defaults to true if not set"
|
||||
}
|
||||
|
||||
schema['properties']['browser_steps'] = {
|
||||
"anyOf": [
|
||||
{
|
||||
|
||||
@@ -1,416 +0,0 @@
|
||||
import os
|
||||
from changedetectionio.strtobool import strtobool
|
||||
|
||||
from flask_expects_json import expects_json
|
||||
from changedetectionio import queuedWatchMetaData
|
||||
from flask_restful import abort, Resource
|
||||
from flask import request, make_response
|
||||
import validators
|
||||
from . import auth
|
||||
import copy
|
||||
|
||||
# See docs/README.md for rebuilding the docs/apidoc information
|
||||
|
||||
from . import api_schema
|
||||
from ..model import watch_base
|
||||
|
||||
# Build a JSON Schema atleast partially based on our Watch model
|
||||
watch_base_config = watch_base()
|
||||
schema = api_schema.build_watch_json_schema(watch_base_config)
|
||||
|
||||
schema_create_watch = copy.deepcopy(schema)
|
||||
schema_create_watch['required'] = ['url']
|
||||
|
||||
schema_update_watch = copy.deepcopy(schema)
|
||||
schema_update_watch['additionalProperties'] = False
|
||||
|
||||
class Watch(Resource):
|
||||
def __init__(self, **kwargs):
|
||||
# datastore is a black box dependency
|
||||
self.datastore = kwargs['datastore']
|
||||
self.update_q = kwargs['update_q']
|
||||
|
||||
# Get information about a single watch, excluding the history list (can be large)
|
||||
# curl http://localhost:5000/api/v1/watch/<string:uuid>
|
||||
# @todo - version2 - ?muted and ?paused should be able to be called together, return the watch struct not "OK"
|
||||
# ?recheck=true
|
||||
@auth.check_token
|
||||
def get(self, uuid):
|
||||
"""
|
||||
@api {get} /api/v1/watch/:uuid Single watch - get data, recheck, pause, mute.
|
||||
@apiDescription Retrieve watch information and set muted/paused status
|
||||
@apiExample {curl} Example usage:
|
||||
curl http://localhost:5000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091 -H"x-api-key:813031b16330fe25e3780cf0325daa45"
|
||||
curl "http://localhost:5000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091?muted=unmuted" -H"x-api-key:813031b16330fe25e3780cf0325daa45"
|
||||
curl "http://localhost:5000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091?paused=unpaused" -H"x-api-key:813031b16330fe25e3780cf0325daa45"
|
||||
@apiName Watch
|
||||
@apiGroup Watch
|
||||
@apiParam {uuid} uuid Watch unique ID.
|
||||
@apiQuery {Boolean} [recheck] Recheck this watch `recheck=1`
|
||||
@apiQuery {String} [paused] =`paused` or =`unpaused` , Sets the PAUSED state
|
||||
@apiQuery {String} [muted] =`muted` or =`unmuted` , Sets the MUTE NOTIFICATIONS state
|
||||
@apiSuccess (200) {String} OK When paused/muted/recheck operation OR full JSON object of the watch
|
||||
@apiSuccess (200) {JSON} WatchJSON JSON Full JSON object of the watch
|
||||
"""
|
||||
from copy import deepcopy
|
||||
watch = deepcopy(self.datastore.data['watching'].get(uuid))
|
||||
if not watch:
|
||||
abort(404, message='No watch exists with the UUID of {}'.format(uuid))
|
||||
|
||||
if request.args.get('recheck'):
|
||||
self.update_q.put(queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid}))
|
||||
return "OK", 200
|
||||
if request.args.get('paused', '') == 'paused':
|
||||
self.datastore.data['watching'].get(uuid).pause()
|
||||
return "OK", 200
|
||||
elif request.args.get('paused', '') == 'unpaused':
|
||||
self.datastore.data['watching'].get(uuid).unpause()
|
||||
return "OK", 200
|
||||
if request.args.get('muted', '') == 'muted':
|
||||
self.datastore.data['watching'].get(uuid).mute()
|
||||
return "OK", 200
|
||||
elif request.args.get('muted', '') == 'unmuted':
|
||||
self.datastore.data['watching'].get(uuid).unmute()
|
||||
return "OK", 200
|
||||
|
||||
# Return without history, get that via another API call
|
||||
# Properties are not returned as a JSON, so add the required props manually
|
||||
watch['history_n'] = watch.history_n
|
||||
# attr .last_changed will check for the last written text snapshot on change
|
||||
watch['last_changed'] = watch.last_changed
|
||||
watch['viewed'] = watch.viewed
|
||||
return watch
|
||||
|
||||
@auth.check_token
|
||||
def delete(self, uuid):
|
||||
"""
|
||||
@api {delete} /api/v1/watch/:uuid Delete a watch and related history
|
||||
@apiExample {curl} Example usage:
|
||||
curl http://localhost:5000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091 -X DELETE -H"x-api-key:813031b16330fe25e3780cf0325daa45"
|
||||
@apiParam {uuid} uuid Watch unique ID.
|
||||
@apiName Delete
|
||||
@apiGroup Watch
|
||||
@apiSuccess (200) {String} OK Was deleted
|
||||
"""
|
||||
if not self.datastore.data['watching'].get(uuid):
|
||||
abort(400, message='No watch exists with the UUID of {}'.format(uuid))
|
||||
|
||||
self.datastore.delete(uuid)
|
||||
return 'OK', 204
|
||||
|
||||
@auth.check_token
|
||||
@expects_json(schema_update_watch)
|
||||
def put(self, uuid):
|
||||
"""
|
||||
@api {put} /api/v1/watch/:uuid Update watch information
|
||||
@apiExample {curl} Example usage:
|
||||
Update (PUT)
|
||||
curl http://localhost:5000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091 -X PUT -H"x-api-key:813031b16330fe25e3780cf0325daa45" -H "Content-Type: application/json" -d '{"url": "https://my-nice.com" , "tag": "new list"}'
|
||||
|
||||
@apiDescription Updates an existing watch using JSON, accepts the same structure as returned in <a href="#api-Watch-Watch">get single watch information</a>
|
||||
@apiParam {uuid} uuid Watch unique ID.
|
||||
@apiName Update a watch
|
||||
@apiGroup Watch
|
||||
@apiSuccess (200) {String} OK Was updated
|
||||
@apiSuccess (500) {String} ERR Some other error
|
||||
"""
|
||||
watch = self.datastore.data['watching'].get(uuid)
|
||||
if not watch:
|
||||
abort(404, message='No watch exists with the UUID of {}'.format(uuid))
|
||||
|
||||
if request.json.get('proxy'):
|
||||
plist = self.datastore.proxy_list
|
||||
if not request.json.get('proxy') in plist:
|
||||
return "Invalid proxy choice, currently supported proxies are '{}'".format(', '.join(plist)), 400
|
||||
|
||||
watch.update(request.json)
|
||||
|
||||
return "OK", 200
|
||||
|
||||
|
||||
class WatchHistory(Resource):
|
||||
def __init__(self, **kwargs):
|
||||
# datastore is a black box dependency
|
||||
self.datastore = kwargs['datastore']
|
||||
|
||||
# Get a list of available history for a watch by UUID
|
||||
# curl http://localhost:5000/api/v1/watch/<string:uuid>/history
|
||||
@auth.check_token
|
||||
def get(self, uuid):
|
||||
"""
|
||||
@api {get} /api/v1/watch/<string:uuid>/history Get a list of all historical snapshots available for a watch
|
||||
@apiDescription Requires `uuid`, returns list
|
||||
@apiExample {curl} Example usage:
|
||||
curl http://localhost:5000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091/history -H"x-api-key:813031b16330fe25e3780cf0325daa45" -H "Content-Type: application/json"
|
||||
{
|
||||
"1676649279": "/tmp/data/6a4b7d5c-fee4-4616-9f43-4ac97046b595/cb7e9be8258368262246910e6a2a4c30.txt",
|
||||
"1677092785": "/tmp/data/6a4b7d5c-fee4-4616-9f43-4ac97046b595/e20db368d6fc633e34f559ff67bb4044.txt",
|
||||
"1677103794": "/tmp/data/6a4b7d5c-fee4-4616-9f43-4ac97046b595/02efdd37dacdae96554a8cc85dc9c945.txt"
|
||||
}
|
||||
@apiName Get list of available stored snapshots for watch
|
||||
@apiGroup Watch History
|
||||
@apiSuccess (200) {String} OK
|
||||
@apiSuccess (404) {String} ERR Not found
|
||||
"""
|
||||
watch = self.datastore.data['watching'].get(uuid)
|
||||
if not watch:
|
||||
abort(404, message='No watch exists with the UUID of {}'.format(uuid))
|
||||
return watch.history, 200
|
||||
|
||||
|
||||
class WatchSingleHistory(Resource):
|
||||
def __init__(self, **kwargs):
|
||||
# datastore is a black box dependency
|
||||
self.datastore = kwargs['datastore']
|
||||
|
||||
@auth.check_token
|
||||
def get(self, uuid, timestamp):
|
||||
"""
|
||||
@api {get} /api/v1/watch/<string:uuid>/history/<int:timestamp> Get single snapshot from watch
|
||||
@apiDescription Requires watch `uuid` and `timestamp`. `timestamp` of "`latest`" for latest available snapshot, or <a href="#api-Watch_History-Get_list_of_available_stored_snapshots_for_watch">use the list returned here</a>
|
||||
@apiExample {curl} Example usage:
|
||||
curl http://localhost:5000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091/history/1677092977 -H"x-api-key:813031b16330fe25e3780cf0325daa45" -H "Content-Type: application/json"
|
||||
@apiName Get single snapshot content
|
||||
@apiGroup Watch History
|
||||
@apiParam {String} [html] Optional Set to =1 to return the last HTML (only stores last 2 snapshots, use `latest` as timestamp)
|
||||
@apiSuccess (200) {String} OK
|
||||
@apiSuccess (404) {String} ERR Not found
|
||||
"""
|
||||
watch = self.datastore.data['watching'].get(uuid)
|
||||
if not watch:
|
||||
abort(404, message=f"No watch exists with the UUID of {uuid}")
|
||||
|
||||
if not len(watch.history):
|
||||
abort(404, message=f"Watch found but no history exists for the UUID {uuid}")
|
||||
|
||||
if timestamp == 'latest':
|
||||
timestamp = list(watch.history.keys())[-1]
|
||||
|
||||
if request.args.get('html'):
|
||||
content = watch.get_fetched_html(timestamp)
|
||||
if content:
|
||||
response = make_response(content, 200)
|
||||
response.mimetype = "text/html"
|
||||
else:
|
||||
response = make_response("No content found", 404)
|
||||
response.mimetype = "text/plain"
|
||||
else:
|
||||
content = watch.get_history_snapshot(timestamp)
|
||||
response = make_response(content, 200)
|
||||
response.mimetype = "text/plain"
|
||||
|
||||
return response
|
||||
|
||||
|
||||
class CreateWatch(Resource):
|
||||
def __init__(self, **kwargs):
|
||||
# datastore is a black box dependency
|
||||
self.datastore = kwargs['datastore']
|
||||
self.update_q = kwargs['update_q']
|
||||
|
||||
@auth.check_token
|
||||
@expects_json(schema_create_watch)
|
||||
def post(self):
|
||||
"""
|
||||
@api {post} /api/v1/watch Create a single watch
|
||||
@apiDescription Requires atleast `url` set, can accept the same structure as <a href="#api-Watch-Watch">get single watch information</a> to create.
|
||||
@apiExample {curl} Example usage:
|
||||
curl http://localhost:5000/api/v1/watch -H"x-api-key:813031b16330fe25e3780cf0325daa45" -H "Content-Type: application/json" -d '{"url": "https://my-nice.com" , "tag": "nice list"}'
|
||||
@apiName Create
|
||||
@apiGroup Watch
|
||||
@apiSuccess (200) {String} OK Was created
|
||||
@apiSuccess (500) {String} ERR Some other error
|
||||
"""
|
||||
|
||||
json_data = request.get_json()
|
||||
url = json_data['url'].strip()
|
||||
|
||||
# If hosts that only contain alphanumerics are allowed ("localhost" for example)
|
||||
allow_simplehost = not strtobool(os.getenv('BLOCK_SIMPLEHOSTS', 'False'))
|
||||
if not validators.url(url, simple_host=allow_simplehost):
|
||||
return "Invalid or unsupported URL", 400
|
||||
|
||||
if json_data.get('proxy'):
|
||||
plist = self.datastore.proxy_list
|
||||
if not json_data.get('proxy') in plist:
|
||||
return "Invalid proxy choice, currently supported proxies are '{}'".format(', '.join(plist)), 400
|
||||
|
||||
extras = copy.deepcopy(json_data)
|
||||
|
||||
# Because we renamed 'tag' to 'tags' but don't want to change the API (can do this in v2 of the API)
|
||||
tags = None
|
||||
if extras.get('tag'):
|
||||
tags = extras.get('tag')
|
||||
del extras['tag']
|
||||
|
||||
del extras['url']
|
||||
|
||||
new_uuid = self.datastore.add_watch(url=url, extras=extras, tag=tags)
|
||||
if new_uuid:
|
||||
self.update_q.put(queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': new_uuid}))
|
||||
return {'uuid': new_uuid}, 201
|
||||
else:
|
||||
return "Invalid or unsupported URL", 400
|
||||
|
||||
@auth.check_token
|
||||
def get(self):
|
||||
"""
|
||||
@api {get} /api/v1/watch List watches
|
||||
@apiDescription Return concise list of available watches and some very basic info
|
||||
@apiExample {curl} Example usage:
|
||||
curl http://localhost:5000/api/v1/watch -H"x-api-key:813031b16330fe25e3780cf0325daa45"
|
||||
{
|
||||
"6a4b7d5c-fee4-4616-9f43-4ac97046b595": {
|
||||
"last_changed": 1677103794,
|
||||
"last_checked": 1677103794,
|
||||
"last_error": false,
|
||||
"title": "",
|
||||
"url": "http://www.quotationspage.com/random.php"
|
||||
},
|
||||
"e6f5fd5c-dbfe-468b-b8f3-f9d6ff5ad69b": {
|
||||
"last_changed": 0,
|
||||
"last_checked": 1676662819,
|
||||
"last_error": false,
|
||||
"title": "QuickLook",
|
||||
"url": "https://github.com/QL-Win/QuickLook/tags"
|
||||
}
|
||||
}
|
||||
|
||||
@apiParam {String} [recheck_all] Optional Set to =1 to force recheck of all watches
|
||||
@apiParam {String} [tag] Optional name of tag to limit results
|
||||
@apiName ListWatches
|
||||
@apiGroup Watch Management
|
||||
@apiSuccess (200) {String} OK JSON dict
|
||||
"""
|
||||
list = {}
|
||||
|
||||
tag_limit = request.args.get('tag', '').lower()
|
||||
|
||||
|
||||
for uuid, watch in self.datastore.data['watching'].items():
|
||||
# Watch tags by name (replace the other calls?)
|
||||
tags = self.datastore.get_all_tags_for_watch(uuid=uuid)
|
||||
if tag_limit and not any(v.get('title').lower() == tag_limit for k, v in tags.items()):
|
||||
continue
|
||||
|
||||
list[uuid] = {
|
||||
'last_changed': watch.last_changed,
|
||||
'last_checked': watch['last_checked'],
|
||||
'last_error': watch['last_error'],
|
||||
'title': watch['title'],
|
||||
'url': watch['url'],
|
||||
'viewed': watch.viewed
|
||||
}
|
||||
|
||||
if request.args.get('recheck_all'):
|
||||
for uuid in self.datastore.data['watching'].keys():
|
||||
self.update_q.put(queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid}))
|
||||
return {'status': "OK"}, 200
|
||||
|
||||
return list, 200
|
||||
|
||||
class Import(Resource):
|
||||
def __init__(self, **kwargs):
|
||||
# datastore is a black box dependency
|
||||
self.datastore = kwargs['datastore']
|
||||
|
||||
@auth.check_token
|
||||
def post(self):
|
||||
"""
|
||||
@api {post} /api/v1/import Import a list of watched URLs
|
||||
@apiDescription Accepts a line-feed separated list of URLs to import, additionally with ?tag_uuids=(tag id), ?tag=(name), ?proxy={key}, ?dedupe=true (default true) one URL per line.
|
||||
@apiExample {curl} Example usage:
|
||||
curl http://localhost:5000/api/v1/import --data-binary @list-of-sites.txt -H"x-api-key:8a111a21bc2f8f1dd9b9353bbd46049a"
|
||||
@apiName Import
|
||||
@apiGroup Watch
|
||||
@apiSuccess (200) {List} OK List of watch UUIDs added
|
||||
@apiSuccess (500) {String} ERR Some other error
|
||||
"""
|
||||
|
||||
extras = {}
|
||||
|
||||
if request.args.get('proxy'):
|
||||
plist = self.datastore.proxy_list
|
||||
if not request.args.get('proxy') in plist:
|
||||
return "Invalid proxy choice, currently supported proxies are '{}'".format(', '.join(plist)), 400
|
||||
else:
|
||||
extras['proxy'] = request.args.get('proxy')
|
||||
|
||||
dedupe = strtobool(request.args.get('dedupe', 'true'))
|
||||
|
||||
tags = request.args.get('tag')
|
||||
tag_uuids = request.args.get('tag_uuids')
|
||||
|
||||
if tag_uuids:
|
||||
tag_uuids = tag_uuids.split(',')
|
||||
|
||||
urls = request.get_data().decode('utf8').splitlines()
|
||||
added = []
|
||||
allow_simplehost = not strtobool(os.getenv('BLOCK_SIMPLEHOSTS', 'False'))
|
||||
for url in urls:
|
||||
url = url.strip()
|
||||
if not len(url):
|
||||
continue
|
||||
|
||||
# If hosts that only contain alphanumerics are allowed ("localhost" for example)
|
||||
if not validators.url(url, simple_host=allow_simplehost):
|
||||
return f"Invalid or unsupported URL - {url}", 400
|
||||
|
||||
if dedupe and self.datastore.url_exists(url):
|
||||
continue
|
||||
|
||||
new_uuid = self.datastore.add_watch(url=url, extras=extras, tag=tags, tag_uuids=tag_uuids)
|
||||
added.append(new_uuid)
|
||||
|
||||
return added
|
||||
|
||||
class SystemInfo(Resource):
|
||||
def __init__(self, **kwargs):
|
||||
# datastore is a black box dependency
|
||||
self.datastore = kwargs['datastore']
|
||||
self.update_q = kwargs['update_q']
|
||||
|
||||
@auth.check_token
|
||||
def get(self):
|
||||
"""
|
||||
@api {get} /api/v1/systeminfo Return system info
|
||||
@apiDescription Return some info about the current system state
|
||||
@apiExample {curl} Example usage:
|
||||
curl http://localhost:5000/api/v1/systeminfo -H"x-api-key:813031b16330fe25e3780cf0325daa45"
|
||||
HTTP/1.0 200
|
||||
{
|
||||
'queue_size': 10 ,
|
||||
'overdue_watches': ["watch-uuid-list"],
|
||||
'uptime': 38344.55,
|
||||
'watch_count': 800,
|
||||
'version': "0.40.1"
|
||||
}
|
||||
@apiName Get Info
|
||||
@apiGroup System Information
|
||||
"""
|
||||
import time
|
||||
overdue_watches = []
|
||||
|
||||
# Check all watches and report which have not been checked but should have been
|
||||
|
||||
for uuid, watch in self.datastore.data.get('watching', {}).items():
|
||||
# see if now - last_checked is greater than the time that should have been
|
||||
# this is not super accurate (maybe they just edited it) but better than nothing
|
||||
t = watch.threshold_seconds()
|
||||
if not t:
|
||||
# Use the system wide default
|
||||
t = self.datastore.threshold_seconds
|
||||
|
||||
time_since_check = time.time() - watch.get('last_checked')
|
||||
|
||||
# Allow 5 minutes of grace time before we decide it's overdue
|
||||
if time_since_check - (5 * 60) > t:
|
||||
overdue_watches.append(uuid)
|
||||
from changedetectionio import __version__ as main_version
|
||||
return {
|
||||
'queue_size': self.update_q.qsize(),
|
||||
'overdue_watches': overdue_watches,
|
||||
'uptime': round(time.time() - self.datastore.start_time, 2),
|
||||
'watch_count': len(self.datastore.data.get('watching', {})),
|
||||
'version': main_version
|
||||
}, 200
|
||||
@@ -11,22 +11,14 @@ def check_token(f):
|
||||
datastore = args[0].datastore
|
||||
|
||||
config_api_token_enabled = datastore.data['settings']['application'].get('api_access_token_enabled')
|
||||
if not config_api_token_enabled:
|
||||
return
|
||||
|
||||
try:
|
||||
api_key_header = request.headers['x-api-key']
|
||||
except KeyError:
|
||||
return make_response(
|
||||
jsonify("No authorization x-api-key header."), 403
|
||||
)
|
||||
|
||||
config_api_token = datastore.data['settings']['application'].get('api_access_token')
|
||||
|
||||
if api_key_header != config_api_token:
|
||||
return make_response(
|
||||
jsonify("Invalid access - API key invalid."), 403
|
||||
)
|
||||
# config_api_token_enabled - a UI option in settings if access should obey the key or not
|
||||
if config_api_token_enabled:
|
||||
if request.headers.get('x-api-key') != config_api_token:
|
||||
return make_response(
|
||||
jsonify("Invalid access - API key invalid."), 403
|
||||
)
|
||||
|
||||
return f(*args, **kwargs)
|
||||
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
from changedetectionio import apprise_plugin
|
||||
import apprise
|
||||
|
||||
# Create our AppriseAsset and populate it with some of our new values:
|
||||
# https://github.com/caronc/apprise/wiki/Development_API#the-apprise-asset-object
|
||||
asset = apprise.AppriseAsset(
|
||||
image_url_logo='https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/changedetectionio/static/images/avatar-256x256.png'
|
||||
)
|
||||
|
||||
asset.app_id = "changedetection.io"
|
||||
asset.app_desc = "ChangeDetection.io best and simplest website monitoring and change detection"
|
||||
asset.app_url = "https://changedetection.io"
|
||||
@@ -1,98 +0,0 @@
|
||||
# include the decorator
|
||||
from apprise.decorators import notify
|
||||
from loguru import logger
|
||||
from requests.structures import CaseInsensitiveDict
|
||||
|
||||
|
||||
@notify(on="delete")
|
||||
@notify(on="deletes")
|
||||
@notify(on="get")
|
||||
@notify(on="gets")
|
||||
@notify(on="post")
|
||||
@notify(on="posts")
|
||||
@notify(on="put")
|
||||
@notify(on="puts")
|
||||
def apprise_custom_api_call_wrapper(body, title, notify_type, *args, **kwargs):
|
||||
import requests
|
||||
import json
|
||||
import re
|
||||
|
||||
from urllib.parse import unquote_plus
|
||||
from apprise.utils.parse import parse_url as apprise_parse_url
|
||||
|
||||
url = kwargs['meta'].get('url')
|
||||
schema = kwargs['meta'].get('schema').lower().strip()
|
||||
|
||||
# Choose POST, GET etc from requests
|
||||
method = re.sub(rf's$', '', schema)
|
||||
requests_method = getattr(requests, method)
|
||||
|
||||
params = CaseInsensitiveDict({}) # Added to requests
|
||||
auth = None
|
||||
has_error = False
|
||||
|
||||
# Convert /foobar?+some-header=hello to proper header dictionary
|
||||
results = apprise_parse_url(url)
|
||||
|
||||
# Add our headers that the user can potentially over-ride if they wish
|
||||
# to to our returned result set and tidy entries by unquoting them
|
||||
headers = CaseInsensitiveDict({unquote_plus(x): unquote_plus(y)
|
||||
for x, y in results['qsd+'].items()})
|
||||
|
||||
# https://github.com/caronc/apprise/wiki/Notify_Custom_JSON#get-parameter-manipulation
|
||||
# In Apprise, it relies on prefixing each request arg with "-", because it uses say &method=update as a flag for apprise
|
||||
# but here we are making straight requests, so we need todo convert this against apprise's logic
|
||||
for k, v in results['qsd'].items():
|
||||
if not k.strip('+-') in results['qsd+'].keys():
|
||||
params[unquote_plus(k)] = unquote_plus(v)
|
||||
|
||||
# Determine Authentication
|
||||
auth = ''
|
||||
if results.get('user') and results.get('password'):
|
||||
auth = (unquote_plus(results.get('user')), unquote_plus(results.get('user')))
|
||||
elif results.get('user'):
|
||||
auth = (unquote_plus(results.get('user')))
|
||||
|
||||
# If it smells like it could be JSON and no content-type was already set, offer a default content type.
|
||||
if body and '{' in body[:100] and not headers.get('Content-Type'):
|
||||
json_header = 'application/json; charset=utf-8'
|
||||
try:
|
||||
# Try if it's JSON
|
||||
json.loads(body)
|
||||
headers['Content-Type'] = json_header
|
||||
except ValueError as e:
|
||||
logger.warning(f"Could not automatically add '{json_header}' header to the notification because the document failed to parse as JSON: {e}")
|
||||
pass
|
||||
|
||||
# POSTS -> HTTPS etc
|
||||
if schema.lower().endswith('s'):
|
||||
url = re.sub(rf'^{schema}', 'https', results.get('url'))
|
||||
else:
|
||||
url = re.sub(rf'^{schema}', 'http', results.get('url'))
|
||||
|
||||
status_str = ''
|
||||
try:
|
||||
r = requests_method(url,
|
||||
auth=auth,
|
||||
data=body.encode('utf-8') if type(body) is str else body,
|
||||
headers=headers,
|
||||
params=params
|
||||
)
|
||||
|
||||
if not (200 <= r.status_code < 300):
|
||||
status_str = f"Error sending '{method.upper()}' request to {url} - Status: {r.status_code}: '{r.reason}'"
|
||||
logger.error(status_str)
|
||||
has_error = True
|
||||
else:
|
||||
logger.info(f"Sent '{method.upper()}' request to {url}")
|
||||
has_error = False
|
||||
|
||||
except requests.RequestException as e:
|
||||
status_str = f"Error sending '{method.upper()}' request to {url} - {str(e)}"
|
||||
logger.error(status_str)
|
||||
has_error = True
|
||||
|
||||
if has_error:
|
||||
raise TypeError(status_str)
|
||||
|
||||
return True
|
||||
496
changedetectionio/async_update_worker.py
Normal file
496
changedetectionio/async_update_worker.py
Normal file
@@ -0,0 +1,496 @@
|
||||
from .processors.exceptions import ProcessorException
|
||||
import changedetectionio.content_fetchers.exceptions as content_fetchers_exceptions
|
||||
from changedetectionio.processors.text_json_diff.processor import FilterNotFoundInResponse
|
||||
from changedetectionio import html_tools
|
||||
from changedetectionio.flask_app import watch_check_update
|
||||
|
||||
import asyncio
|
||||
import importlib
|
||||
import os
|
||||
import queue
|
||||
import time
|
||||
|
||||
from loguru import logger
|
||||
|
||||
# Async version of update_worker
|
||||
# Processes jobs from AsyncSignalPriorityQueue instead of threaded queue
|
||||
|
||||
async def async_update_worker(worker_id, q, notification_q, app, datastore):
|
||||
"""
|
||||
Async worker function that processes watch check jobs from the queue.
|
||||
|
||||
Args:
|
||||
worker_id: Unique identifier for this worker
|
||||
q: AsyncSignalPriorityQueue containing jobs to process
|
||||
notification_q: Standard queue for notifications
|
||||
app: Flask application instance
|
||||
datastore: Application datastore
|
||||
"""
|
||||
# Set a descriptive name for this task
|
||||
task = asyncio.current_task()
|
||||
if task:
|
||||
task.set_name(f"async-worker-{worker_id}")
|
||||
|
||||
logger.info(f"Starting async worker {worker_id}")
|
||||
|
||||
while not app.config.exit.is_set():
|
||||
update_handler = None
|
||||
watch = None
|
||||
|
||||
try:
|
||||
# Use native janus async interface - no threads needed!
|
||||
queued_item_data = await asyncio.wait_for(q.async_get(), timeout=1.0)
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
# No jobs available, continue loop
|
||||
continue
|
||||
except Exception as e:
|
||||
logger.critical(f"CRITICAL: Worker {worker_id} failed to get queue item: {type(e).__name__}: {e}")
|
||||
|
||||
# Log queue health for debugging
|
||||
try:
|
||||
queue_size = q.qsize()
|
||||
is_empty = q.empty()
|
||||
logger.critical(f"CRITICAL: Worker {worker_id} queue health - size: {queue_size}, empty: {is_empty}")
|
||||
except Exception as health_e:
|
||||
logger.critical(f"CRITICAL: Worker {worker_id} queue health check failed: {health_e}")
|
||||
|
||||
await asyncio.sleep(0.1)
|
||||
continue
|
||||
|
||||
uuid = queued_item_data.item.get('uuid')
|
||||
fetch_start_time = round(time.time())
|
||||
|
||||
# Mark this UUID as being processed
|
||||
from changedetectionio import worker_handler
|
||||
worker_handler.set_uuid_processing(uuid, processing=True)
|
||||
|
||||
try:
|
||||
if uuid in list(datastore.data['watching'].keys()) and datastore.data['watching'][uuid].get('url'):
|
||||
changed_detected = False
|
||||
contents = b''
|
||||
process_changedetection_results = True
|
||||
update_obj = {}
|
||||
|
||||
# Clear last errors
|
||||
datastore.data['watching'][uuid]['browser_steps_last_error_step'] = None
|
||||
datastore.data['watching'][uuid]['last_checked'] = fetch_start_time
|
||||
|
||||
watch = datastore.data['watching'].get(uuid)
|
||||
|
||||
logger.info(f"Worker {worker_id} processing watch UUID {uuid} Priority {queued_item_data.priority} URL {watch['url']}")
|
||||
|
||||
try:
|
||||
watch_check_update.send(watch_uuid=uuid)
|
||||
|
||||
# Processor is what we are using for detecting the "Change"
|
||||
processor = watch.get('processor', 'text_json_diff')
|
||||
|
||||
# Init a new 'difference_detection_processor'
|
||||
processor_module_name = f"changedetectionio.processors.{processor}.processor"
|
||||
try:
|
||||
processor_module = importlib.import_module(processor_module_name)
|
||||
except ModuleNotFoundError as e:
|
||||
print(f"Processor module '{processor}' not found.")
|
||||
raise e
|
||||
|
||||
update_handler = processor_module.perform_site_check(datastore=datastore,
|
||||
watch_uuid=uuid)
|
||||
|
||||
# All fetchers are now async, so call directly
|
||||
await update_handler.call_browser()
|
||||
|
||||
# Run change detection (this is synchronous)
|
||||
changed_detected, update_obj, contents = update_handler.run_changedetection(watch=watch)
|
||||
|
||||
except PermissionError as e:
|
||||
logger.critical(f"File permission error updating file, watch: {uuid}")
|
||||
logger.critical(str(e))
|
||||
process_changedetection_results = False
|
||||
|
||||
except ProcessorException as e:
|
||||
if e.screenshot:
|
||||
watch.save_screenshot(screenshot=e.screenshot)
|
||||
if e.xpath_data:
|
||||
watch.save_xpath_data(data=e.xpath_data)
|
||||
datastore.update_watch(uuid=uuid, update_obj={'last_error': e.message})
|
||||
process_changedetection_results = False
|
||||
|
||||
except content_fetchers_exceptions.ReplyWithContentButNoText as e:
|
||||
extra_help = ""
|
||||
if e.has_filters:
|
||||
has_img = html_tools.include_filters(include_filters='img',
|
||||
html_content=e.html_content)
|
||||
if has_img:
|
||||
extra_help = ", it's possible that the filters you have give an empty result or contain only an image."
|
||||
else:
|
||||
extra_help = ", it's possible that the filters were found, but contained no usable text."
|
||||
|
||||
datastore.update_watch(uuid=uuid, update_obj={
|
||||
'last_error': f"Got HTML content but no text found (With {e.status_code} reply code){extra_help}"
|
||||
})
|
||||
|
||||
if e.screenshot:
|
||||
watch.save_screenshot(screenshot=e.screenshot, as_error=True)
|
||||
|
||||
if e.xpath_data:
|
||||
watch.save_xpath_data(data=e.xpath_data)
|
||||
|
||||
process_changedetection_results = False
|
||||
|
||||
except content_fetchers_exceptions.Non200ErrorCodeReceived as e:
|
||||
if e.status_code == 403:
|
||||
err_text = "Error - 403 (Access denied) received"
|
||||
elif e.status_code == 404:
|
||||
err_text = "Error - 404 (Page not found) received"
|
||||
elif e.status_code == 407:
|
||||
err_text = "Error - 407 (Proxy authentication required) received, did you need a username and password for the proxy?"
|
||||
elif e.status_code == 500:
|
||||
err_text = "Error - 500 (Internal server error) received from the web site"
|
||||
else:
|
||||
extra = ' (Access denied or blocked)' if str(e.status_code).startswith('4') else ''
|
||||
err_text = f"Error - Request returned a HTTP error code {e.status_code}{extra}"
|
||||
|
||||
if e.screenshot:
|
||||
watch.save_screenshot(screenshot=e.screenshot, as_error=True)
|
||||
if e.xpath_data:
|
||||
watch.save_xpath_data(data=e.xpath_data, as_error=True)
|
||||
if e.page_text:
|
||||
watch.save_error_text(contents=e.page_text)
|
||||
|
||||
datastore.update_watch(uuid=uuid, update_obj={'last_error': err_text})
|
||||
process_changedetection_results = False
|
||||
|
||||
except FilterNotFoundInResponse as e:
|
||||
if not datastore.data['watching'].get(uuid):
|
||||
continue
|
||||
|
||||
err_text = "Warning, no filters were found, no change detection ran - Did the page change layout? update your Visual Filter if necessary."
|
||||
datastore.update_watch(uuid=uuid, update_obj={'last_error': err_text})
|
||||
|
||||
# Filter wasnt found, but we should still update the visual selector so that they can have a chance to set it up again
|
||||
if e.screenshot:
|
||||
watch.save_screenshot(screenshot=e.screenshot)
|
||||
|
||||
if e.xpath_data:
|
||||
watch.save_xpath_data(data=e.xpath_data)
|
||||
|
||||
# Only when enabled, send the notification
|
||||
if watch.get('filter_failure_notification_send', False):
|
||||
c = watch.get('consecutive_filter_failures', 0)
|
||||
c += 1
|
||||
# Send notification if we reached the threshold?
|
||||
threshold = datastore.data['settings']['application'].get('filter_failure_notification_threshold_attempts', 0)
|
||||
logger.debug(f"Filter for {uuid} not found, consecutive_filter_failures: {c} of threshold {threshold}")
|
||||
if c >= threshold:
|
||||
if not watch.get('notification_muted'):
|
||||
logger.debug(f"Sending filter failed notification for {uuid}")
|
||||
await send_filter_failure_notification(uuid, notification_q, datastore)
|
||||
c = 0
|
||||
logger.debug(f"Reset filter failure count back to zero")
|
||||
|
||||
datastore.update_watch(uuid=uuid, update_obj={'consecutive_filter_failures': c})
|
||||
else:
|
||||
logger.trace(f"{uuid} - filter_failure_notification_send not enabled, skipping")
|
||||
|
||||
process_changedetection_results = False
|
||||
|
||||
except content_fetchers_exceptions.checksumFromPreviousCheckWasTheSame as e:
|
||||
# Yes fine, so nothing todo, don't continue to process.
|
||||
process_changedetection_results = False
|
||||
changed_detected = False
|
||||
|
||||
except content_fetchers_exceptions.BrowserConnectError as e:
|
||||
datastore.update_watch(uuid=uuid,
|
||||
update_obj={'last_error': e.msg})
|
||||
process_changedetection_results = False
|
||||
|
||||
except content_fetchers_exceptions.BrowserFetchTimedOut as e:
|
||||
datastore.update_watch(uuid=uuid,
|
||||
update_obj={'last_error': e.msg})
|
||||
process_changedetection_results = False
|
||||
|
||||
except content_fetchers_exceptions.BrowserStepsStepException as e:
|
||||
if not datastore.data['watching'].get(uuid):
|
||||
continue
|
||||
|
||||
error_step = e.step_n + 1
|
||||
from playwright._impl._errors import TimeoutError, Error
|
||||
|
||||
# Generally enough info for TimeoutError (couldnt locate the element after default seconds)
|
||||
err_text = f"Browser step at position {error_step} could not run, check the watch, add a delay if necessary, view Browser Steps to see screenshot at that step."
|
||||
|
||||
if e.original_e.name == "TimeoutError":
|
||||
# Just the first line is enough, the rest is the stack trace
|
||||
err_text += " Could not find the target."
|
||||
else:
|
||||
# Other Error, more info is good.
|
||||
err_text += " " + str(e.original_e).splitlines()[0]
|
||||
|
||||
logger.debug(f"BrowserSteps exception at step {error_step} {str(e.original_e)}")
|
||||
|
||||
datastore.update_watch(uuid=uuid,
|
||||
update_obj={'last_error': err_text,
|
||||
'browser_steps_last_error_step': error_step})
|
||||
|
||||
if watch.get('filter_failure_notification_send', False):
|
||||
c = watch.get('consecutive_filter_failures', 0)
|
||||
c += 1
|
||||
# Send notification if we reached the threshold?
|
||||
threshold = datastore.data['settings']['application'].get('filter_failure_notification_threshold_attempts', 0)
|
||||
logger.error(f"Step for {uuid} not found, consecutive_filter_failures: {c}")
|
||||
if threshold > 0 and c >= threshold:
|
||||
if not watch.get('notification_muted'):
|
||||
await send_step_failure_notification(watch_uuid=uuid, step_n=e.step_n, notification_q=notification_q, datastore=datastore)
|
||||
c = 0
|
||||
|
||||
datastore.update_watch(uuid=uuid, update_obj={'consecutive_filter_failures': c})
|
||||
|
||||
process_changedetection_results = False
|
||||
|
||||
except content_fetchers_exceptions.EmptyReply as e:
|
||||
# Some kind of custom to-str handler in the exception handler that does this?
|
||||
err_text = "EmptyReply - try increasing 'Wait seconds before extracting text', Status Code {}".format(e.status_code)
|
||||
datastore.update_watch(uuid=uuid, update_obj={'last_error': err_text,
|
||||
'last_check_status': e.status_code})
|
||||
process_changedetection_results = False
|
||||
|
||||
except content_fetchers_exceptions.ScreenshotUnavailable as e:
|
||||
err_text = "Screenshot unavailable, page did not render fully in the expected time or page was too long - try increasing 'Wait seconds before extracting text'"
|
||||
datastore.update_watch(uuid=uuid, update_obj={'last_error': err_text,
|
||||
'last_check_status': e.status_code})
|
||||
process_changedetection_results = False
|
||||
|
||||
except content_fetchers_exceptions.JSActionExceptions as e:
|
||||
err_text = "Error running JS Actions - Page request - "+e.message
|
||||
if e.screenshot:
|
||||
watch.save_screenshot(screenshot=e.screenshot, as_error=True)
|
||||
datastore.update_watch(uuid=uuid, update_obj={'last_error': err_text,
|
||||
'last_check_status': e.status_code})
|
||||
process_changedetection_results = False
|
||||
|
||||
except content_fetchers_exceptions.PageUnloadable as e:
|
||||
err_text = "Page request from server didnt respond correctly"
|
||||
if e.message:
|
||||
err_text = "{} - {}".format(err_text, e.message)
|
||||
|
||||
if e.screenshot:
|
||||
watch.save_screenshot(screenshot=e.screenshot, as_error=True)
|
||||
|
||||
datastore.update_watch(uuid=uuid, update_obj={'last_error': err_text,
|
||||
'last_check_status': e.status_code,
|
||||
'has_ldjson_price_data': None})
|
||||
process_changedetection_results = False
|
||||
|
||||
except content_fetchers_exceptions.BrowserStepsInUnsupportedFetcher as e:
|
||||
err_text = "This watch has Browser Steps configured and so it cannot run with the 'Basic fast Plaintext/HTTP Client', either remove the Browser Steps or select a Chrome fetcher."
|
||||
datastore.update_watch(uuid=uuid, update_obj={'last_error': err_text})
|
||||
process_changedetection_results = False
|
||||
logger.error(f"Exception (BrowserStepsInUnsupportedFetcher) reached processing watch UUID: {uuid}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Worker {worker_id} exception processing watch UUID: {uuid}")
|
||||
logger.error(str(e))
|
||||
datastore.update_watch(uuid=uuid, update_obj={'last_error': "Exception: " + str(e)})
|
||||
process_changedetection_results = False
|
||||
|
||||
else:
|
||||
if not datastore.data['watching'].get(uuid):
|
||||
continue
|
||||
|
||||
update_obj['content-type'] = update_handler.fetcher.get_all_headers().get('content-type', '').lower()
|
||||
|
||||
if not watch.get('ignore_status_codes'):
|
||||
update_obj['consecutive_filter_failures'] = 0
|
||||
|
||||
update_obj['last_error'] = False
|
||||
cleanup_error_artifacts(uuid, datastore)
|
||||
|
||||
if not datastore.data['watching'].get(uuid):
|
||||
continue
|
||||
|
||||
if process_changedetection_results:
|
||||
try:
|
||||
datastore.update_watch(uuid=uuid, update_obj=update_obj)
|
||||
|
||||
if changed_detected or not watch.history_n:
|
||||
if update_handler.screenshot:
|
||||
watch.save_screenshot(screenshot=update_handler.screenshot)
|
||||
|
||||
if update_handler.xpath_data:
|
||||
watch.save_xpath_data(data=update_handler.xpath_data)
|
||||
|
||||
# Ensure unique timestamp for history
|
||||
if watch.newest_history_key and int(fetch_start_time) == int(watch.newest_history_key):
|
||||
logger.warning(f"Timestamp {fetch_start_time} already exists, waiting 1 seconds")
|
||||
fetch_start_time += 1
|
||||
await asyncio.sleep(1)
|
||||
|
||||
watch.save_history_text(contents=contents,
|
||||
timestamp=int(fetch_start_time),
|
||||
snapshot_id=update_obj.get('previous_md5', 'none'))
|
||||
|
||||
empty_pages_are_a_change = datastore.data['settings']['application'].get('empty_pages_are_a_change', False)
|
||||
if update_handler.fetcher.content or (not update_handler.fetcher.content and empty_pages_are_a_change):
|
||||
watch.save_last_fetched_html(contents=update_handler.fetcher.content, timestamp=int(fetch_start_time))
|
||||
|
||||
# Explicitly delete large content variables to free memory IMMEDIATELY after saving
|
||||
# These are no longer needed after being saved to history
|
||||
del contents
|
||||
|
||||
# Send notifications on second+ check
|
||||
if watch.history_n >= 2:
|
||||
logger.info(f"Change detected in UUID {uuid} - {watch['url']}")
|
||||
if not watch.get('notification_muted'):
|
||||
await send_content_changed_notification(uuid, notification_q, datastore)
|
||||
|
||||
except Exception as e:
|
||||
logger.critical(f"Worker {worker_id} exception in process_changedetection_results")
|
||||
logger.critical(str(e))
|
||||
datastore.update_watch(uuid=uuid, update_obj={'last_error': str(e)})
|
||||
|
||||
# Always record attempt count
|
||||
count = watch.get('check_count', 0) + 1
|
||||
|
||||
# Always record page title (used in notifications, and can change even when the content is the same)
|
||||
try:
|
||||
page_title = html_tools.extract_title(data=update_handler.fetcher.content)
|
||||
logger.debug(f"UUID: {uuid} Page <title> is '{page_title}'")
|
||||
datastore.update_watch(uuid=uuid, update_obj={'page_title': page_title})
|
||||
except Exception as e:
|
||||
logger.warning(f"UUID: {uuid} Exception when extracting <title> - {str(e)}")
|
||||
|
||||
# Record server header
|
||||
try:
|
||||
server_header = update_handler.fetcher.headers.get('server', '').strip().lower()[:255]
|
||||
datastore.update_watch(uuid=uuid, update_obj={'remote_server_reply': server_header})
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
# Store favicon if necessary
|
||||
if update_handler.fetcher.favicon_blob and update_handler.fetcher.favicon_blob.get('base64'):
|
||||
watch.bump_favicon(url=update_handler.fetcher.favicon_blob.get('url'),
|
||||
favicon_base_64=update_handler.fetcher.favicon_blob.get('base64')
|
||||
)
|
||||
|
||||
datastore.update_watch(uuid=uuid, update_obj={'fetch_time': round(time.time() - fetch_start_time, 3),
|
||||
'check_count': count})
|
||||
|
||||
# NOW clear fetcher content - after all processing is complete
|
||||
# This is the last point where we need the fetcher data
|
||||
if update_handler and hasattr(update_handler, 'fetcher') and update_handler.fetcher:
|
||||
update_handler.fetcher.clear_content()
|
||||
logger.debug(f"Cleared fetcher content for UUID {uuid}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Worker {worker_id} unexpected error processing {uuid}: {e}")
|
||||
logger.error(f"Worker {worker_id} traceback:", exc_info=True)
|
||||
|
||||
# Also update the watch with error information
|
||||
if datastore and uuid in datastore.data['watching']:
|
||||
datastore.update_watch(uuid=uuid, update_obj={'last_error': f"Worker error: {str(e)}"})
|
||||
|
||||
finally:
|
||||
# Always cleanup - this runs whether there was an exception or not
|
||||
if uuid:
|
||||
try:
|
||||
# Mark UUID as no longer being processed
|
||||
worker_handler.set_uuid_processing(uuid, processing=False)
|
||||
|
||||
# Send completion signal
|
||||
if watch:
|
||||
#logger.info(f"Worker {worker_id} sending completion signal for UUID {watch['uuid']}")
|
||||
watch_check_update.send(watch_uuid=watch['uuid'])
|
||||
|
||||
# Explicitly clean up update_handler and all its references
|
||||
if update_handler:
|
||||
# Clear fetcher content using the proper method
|
||||
if hasattr(update_handler, 'fetcher') and update_handler.fetcher:
|
||||
update_handler.fetcher.clear_content()
|
||||
|
||||
# Clear processor references
|
||||
if hasattr(update_handler, 'content_processor'):
|
||||
update_handler.content_processor = None
|
||||
|
||||
update_handler = None
|
||||
|
||||
# Clear local contents variable if it still exists
|
||||
if 'contents' in locals():
|
||||
del contents
|
||||
|
||||
# Note: We don't set watch = None here because:
|
||||
# 1. watch is just a local reference to datastore.data['watching'][uuid]
|
||||
# 2. Setting it to None doesn't affect the datastore
|
||||
# 3. GC can't collect the object anyway (still referenced by datastore)
|
||||
# 4. It would just cause confusion
|
||||
|
||||
logger.debug(f"Worker {worker_id} completed watch {uuid} in {time.time()-fetch_start_time:.2f}s")
|
||||
except Exception as cleanup_error:
|
||||
logger.error(f"Worker {worker_id} error during cleanup: {cleanup_error}")
|
||||
|
||||
# Brief pause before continuing to avoid tight error loops (only on error)
|
||||
if 'e' in locals():
|
||||
await asyncio.sleep(1.0)
|
||||
else:
|
||||
# Small yield for normal completion
|
||||
await asyncio.sleep(0.01)
|
||||
|
||||
# Check if we should exit
|
||||
if app.config.exit.is_set():
|
||||
break
|
||||
|
||||
# Check if we're in pytest environment - if so, be more gentle with logging
|
||||
import sys
|
||||
in_pytest = "pytest" in sys.modules or "PYTEST_CURRENT_TEST" in os.environ
|
||||
|
||||
if not in_pytest:
|
||||
logger.info(f"Worker {worker_id} shutting down")
|
||||
|
||||
|
||||
def cleanup_error_artifacts(uuid, datastore):
|
||||
"""Helper function to clean up error artifacts"""
|
||||
cleanup_files = ["last-error-screenshot.png", "last-error.txt"]
|
||||
for f in cleanup_files:
|
||||
full_path = os.path.join(datastore.datastore_path, uuid, f)
|
||||
if os.path.isfile(full_path):
|
||||
os.unlink(full_path)
|
||||
|
||||
|
||||
|
||||
async def send_content_changed_notification(watch_uuid, notification_q, datastore):
|
||||
"""Helper function to queue notifications using the new notification service"""
|
||||
try:
|
||||
from changedetectionio.notification_service import create_notification_service
|
||||
|
||||
# Create notification service instance
|
||||
notification_service = create_notification_service(datastore, notification_q)
|
||||
|
||||
notification_service.send_content_changed_notification(watch_uuid)
|
||||
except Exception as e:
|
||||
logger.error(f"Error sending notification for {watch_uuid}: {e}")
|
||||
|
||||
|
||||
async def send_filter_failure_notification(watch_uuid, notification_q, datastore):
|
||||
"""Helper function to send filter failure notifications using the new notification service"""
|
||||
try:
|
||||
from changedetectionio.notification_service import create_notification_service
|
||||
|
||||
# Create notification service instance
|
||||
notification_service = create_notification_service(datastore, notification_q)
|
||||
|
||||
notification_service.send_filter_failure_notification(watch_uuid)
|
||||
except Exception as e:
|
||||
logger.error(f"Error sending filter failure notification for {watch_uuid}: {e}")
|
||||
|
||||
|
||||
async def send_step_failure_notification(watch_uuid, step_n, notification_q, datastore):
|
||||
"""Helper function to send step failure notifications using the new notification service"""
|
||||
try:
|
||||
from changedetectionio.notification_service import create_notification_service
|
||||
|
||||
# Create notification service instance
|
||||
notification_service = create_notification_service(datastore, notification_q)
|
||||
|
||||
notification_service.send_step_failure_notification(watch_uuid, step_n)
|
||||
except Exception as e:
|
||||
logger.error(f"Error sending step failure notification for {watch_uuid}: {e}")
|
||||
33
changedetectionio/auth_decorator.py
Normal file
33
changedetectionio/auth_decorator.py
Normal file
@@ -0,0 +1,33 @@
|
||||
import os
|
||||
from functools import wraps
|
||||
from flask import current_app, redirect, request
|
||||
from loguru import logger
|
||||
|
||||
def login_optionally_required(func):
|
||||
"""
|
||||
If password authentication is enabled, verify the user is logged in.
|
||||
To be used as a decorator for routes that should optionally require login.
|
||||
This version is blueprint-friendly as it uses current_app instead of directly accessing app.
|
||||
"""
|
||||
@wraps(func)
|
||||
def decorated_view(*args, **kwargs):
|
||||
from flask import current_app
|
||||
import flask_login
|
||||
from flask_login import current_user
|
||||
|
||||
# Access datastore through the app config
|
||||
datastore = current_app.config['DATASTORE']
|
||||
has_password_enabled = datastore.data['settings']['application'].get('password') or os.getenv("SALTED_PASS", False)
|
||||
|
||||
# Permitted
|
||||
if request.endpoint and 'diff_history_page' in request.endpoint and datastore.data['settings']['application'].get('shared_diff_access'):
|
||||
return func(*args, **kwargs)
|
||||
elif request.method in flask_login.config.EXEMPT_METHODS:
|
||||
return func(*args, **kwargs)
|
||||
elif current_app.config.get('LOGIN_DISABLED'):
|
||||
return func(*args, **kwargs)
|
||||
elif has_password_enabled and not current_user.is_authenticated:
|
||||
return current_app.login_manager.unauthorized()
|
||||
|
||||
return func(*args, **kwargs)
|
||||
return decorated_view
|
||||
@@ -138,7 +138,7 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
return send_from_directory(os.path.abspath(datastore.datastore_path), filename, as_attachment=True)
|
||||
|
||||
@login_optionally_required
|
||||
@backups_blueprint.route("/", methods=['GET'])
|
||||
@backups_blueprint.route("", methods=['GET'])
|
||||
def index():
|
||||
backups = find_backups()
|
||||
output = render_template("overview.html",
|
||||
|
||||
@@ -22,45 +22,56 @@ from loguru import logger
|
||||
|
||||
browsersteps_sessions = {}
|
||||
io_interface_context = None
|
||||
import json
|
||||
import hashlib
|
||||
from flask import Response
|
||||
import asyncio
|
||||
import threading
|
||||
|
||||
def run_async_in_browser_loop(coro):
|
||||
"""Run async coroutine using the existing async worker event loop"""
|
||||
from changedetectionio import worker_handler
|
||||
|
||||
# Use the existing async worker event loop instead of creating a new one
|
||||
if worker_handler.USE_ASYNC_WORKERS and worker_handler.async_loop and not worker_handler.async_loop.is_closed():
|
||||
logger.debug("Browser steps using existing async worker event loop")
|
||||
future = asyncio.run_coroutine_threadsafe(coro, worker_handler.async_loop)
|
||||
return future.result()
|
||||
else:
|
||||
# Fallback: create a new event loop (for sync workers or if async loop not available)
|
||||
logger.debug("Browser steps creating temporary event loop")
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
try:
|
||||
return loop.run_until_complete(coro)
|
||||
finally:
|
||||
loop.close()
|
||||
|
||||
def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
browser_steps_blueprint = Blueprint('browser_steps', __name__, template_folder="templates")
|
||||
|
||||
def start_browsersteps_session(watch_uuid):
|
||||
from . import nonContext
|
||||
async def start_browsersteps_session(watch_uuid):
|
||||
from . import browser_steps
|
||||
import time
|
||||
global browsersteps_sessions
|
||||
global io_interface_context
|
||||
|
||||
from playwright.async_api import async_playwright
|
||||
|
||||
# We keep the playwright session open for many minutes
|
||||
keepalive_seconds = int(os.getenv('BROWSERSTEPS_MINUTES_KEEPALIVE', 10)) * 60
|
||||
|
||||
browsersteps_start_session = {'start_time': time.time()}
|
||||
|
||||
# You can only have one of these running
|
||||
# This should be very fine to leave running for the life of the application
|
||||
# @idea - Make it global so the pool of watch fetchers can use it also
|
||||
if not io_interface_context:
|
||||
io_interface_context = nonContext.c_sync_playwright()
|
||||
# Start the Playwright context, which is actually a nodejs sub-process and communicates over STDIN/STDOUT pipes
|
||||
io_interface_context = io_interface_context.start()
|
||||
# Create a new async playwright instance for browser steps
|
||||
playwright_instance = async_playwright()
|
||||
playwright_context = await playwright_instance.start()
|
||||
|
||||
keepalive_ms = ((keepalive_seconds + 3) * 1000)
|
||||
base_url = os.getenv('PLAYWRIGHT_DRIVER_URL', '').strip('"')
|
||||
a = "?" if not '?' in base_url else '&'
|
||||
base_url += a + f"timeout={keepalive_ms}"
|
||||
|
||||
try:
|
||||
browsersteps_start_session['browser'] = io_interface_context.chromium.connect_over_cdp(base_url)
|
||||
except Exception as e:
|
||||
if 'ECONNREFUSED' in str(e):
|
||||
return make_response('Unable to start the Playwright Browser session, is it running?', 401)
|
||||
else:
|
||||
# Other errors, bad URL syntax, bad reply etc
|
||||
return make_response(str(e), 401)
|
||||
browser = await playwright_context.chromium.connect_over_cdp(base_url, timeout=keepalive_ms)
|
||||
browsersteps_start_session['browser'] = browser
|
||||
browsersteps_start_session['playwright_context'] = playwright_context
|
||||
|
||||
proxy_id = datastore.get_preferred_proxy_for_watch(uuid=watch_uuid)
|
||||
proxy = None
|
||||
@@ -82,15 +93,20 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
logger.debug(f"Browser Steps: UUID {watch_uuid} selected proxy {proxy_url}")
|
||||
|
||||
# Tell Playwright to connect to Chrome and setup a new session via our stepper interface
|
||||
browsersteps_start_session['browserstepper'] = browser_steps.browsersteps_live_ui(
|
||||
playwright_browser=browsersteps_start_session['browser'],
|
||||
browserstepper = browser_steps.browsersteps_live_ui(
|
||||
playwright_browser=browser,
|
||||
proxy=proxy,
|
||||
start_url=datastore.data['watching'][watch_uuid].get('url'),
|
||||
start_url=datastore.data['watching'][watch_uuid].link,
|
||||
headers=datastore.data['watching'][watch_uuid].get('headers')
|
||||
)
|
||||
|
||||
# Initialize the async connection
|
||||
await browserstepper.connect(proxy=proxy)
|
||||
|
||||
browsersteps_start_session['browserstepper'] = browserstepper
|
||||
|
||||
# For test
|
||||
#browsersteps_start_session['browserstepper'].action_goto_url(value="http://example.com?time="+str(time.time()))
|
||||
#await browsersteps_start_session['browserstepper'].action_goto_url(value="http://example.com?time="+str(time.time()))
|
||||
|
||||
return browsersteps_start_session
|
||||
|
||||
@@ -99,10 +115,8 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
@browser_steps_blueprint.route("/browsersteps_start_session", methods=['GET'])
|
||||
def browsersteps_start_session():
|
||||
# A new session was requested, return sessionID
|
||||
|
||||
import asyncio
|
||||
import uuid
|
||||
global browsersteps_sessions
|
||||
|
||||
browsersteps_session_id = str(uuid.uuid4())
|
||||
watch_uuid = request.args.get('uuid')
|
||||
|
||||
@@ -111,7 +125,19 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
|
||||
logger.debug("Starting connection with playwright")
|
||||
logger.debug("browser_steps.py connecting")
|
||||
browsersteps_sessions[browsersteps_session_id] = start_browsersteps_session(watch_uuid)
|
||||
|
||||
try:
|
||||
# Run the async function in the dedicated browser steps event loop
|
||||
browsersteps_sessions[browsersteps_session_id] = run_async_in_browser_loop(
|
||||
start_browsersteps_session(watch_uuid)
|
||||
)
|
||||
except Exception as e:
|
||||
if 'ECONNREFUSED' in str(e):
|
||||
return make_response('Unable to start the Playwright Browser session, is sockpuppetbrowser running? Network configuration is OK?', 401)
|
||||
else:
|
||||
# Other errors, bad URL syntax, bad reply etc
|
||||
return make_response(str(e), 401)
|
||||
|
||||
logger.debug("Starting connection with playwright - done")
|
||||
return {'browsersteps_session_id': browsersteps_session_id}
|
||||
|
||||
@@ -146,7 +172,6 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
def browsersteps_ui_update():
|
||||
import base64
|
||||
import playwright._impl._errors
|
||||
global browsersteps_sessions
|
||||
from changedetectionio.blueprint.browser_steps import browser_steps
|
||||
|
||||
remaining =0
|
||||
@@ -160,38 +185,30 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
if not browsersteps_sessions.get(browsersteps_session_id):
|
||||
return make_response('No session exists under that ID', 500)
|
||||
|
||||
|
||||
is_last_step = False
|
||||
# Actions - step/apply/etc, do the thing and return state
|
||||
if request.method == 'POST':
|
||||
# @todo - should always be an existing session
|
||||
step_operation = request.form.get('operation')
|
||||
step_selector = request.form.get('selector')
|
||||
step_optional_value = request.form.get('optional_value')
|
||||
step_n = int(request.form.get('step_n'))
|
||||
is_last_step = strtobool(request.form.get('is_last_step'))
|
||||
|
||||
# @todo try.. accept.. nice errors not popups..
|
||||
try:
|
||||
|
||||
browsersteps_sessions[browsersteps_session_id]['browserstepper'].call_action(action_name=step_operation,
|
||||
selector=step_selector,
|
||||
optional_value=step_optional_value)
|
||||
# Run the async call_action method in the dedicated browser steps event loop
|
||||
run_async_in_browser_loop(
|
||||
browsersteps_sessions[browsersteps_session_id]['browserstepper'].call_action(
|
||||
action_name=step_operation,
|
||||
selector=step_selector,
|
||||
optional_value=step_optional_value
|
||||
)
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Exception when calling step operation {step_operation} {str(e)}")
|
||||
# Try to find something of value to give back to the user
|
||||
return make_response(str(e).splitlines()[0], 401)
|
||||
|
||||
# Get visual selector ready/update its data (also use the current filter info from the page?)
|
||||
# When the last 'apply' button was pressed
|
||||
# @todo this adds overhead because the xpath selection is happening twice
|
||||
u = browsersteps_sessions[browsersteps_session_id]['browserstepper'].page.url
|
||||
if is_last_step and u:
|
||||
(screenshot, xpath_data) = browsersteps_sessions[browsersteps_session_id]['browserstepper'].request_visualselector_data()
|
||||
watch = datastore.data['watching'].get(uuid)
|
||||
if watch:
|
||||
watch.save_screenshot(screenshot=screenshot)
|
||||
watch.save_xpath_data(data=xpath_data)
|
||||
|
||||
# if not this_session.page:
|
||||
# cleanup_playwright_session()
|
||||
@@ -199,31 +216,36 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
|
||||
# Screenshots and other info only needed on requesting a step (POST)
|
||||
try:
|
||||
state = browsersteps_sessions[browsersteps_session_id]['browserstepper'].get_current_state()
|
||||
except playwright._impl._api_types.Error as e:
|
||||
return make_response("Browser session ran out of time :( Please reload this page."+str(e), 401)
|
||||
# Run the async get_current_state method in the dedicated browser steps event loop
|
||||
(screenshot, xpath_data) = run_async_in_browser_loop(
|
||||
browsersteps_sessions[browsersteps_session_id]['browserstepper'].get_current_state()
|
||||
)
|
||||
|
||||
if is_last_step:
|
||||
watch = datastore.data['watching'].get(uuid)
|
||||
u = browsersteps_sessions[browsersteps_session_id]['browserstepper'].page.url
|
||||
if watch and u:
|
||||
watch.save_screenshot(screenshot=screenshot)
|
||||
watch.save_xpath_data(data=xpath_data)
|
||||
|
||||
# Use send_file() which is way faster than read/write loop on bytes
|
||||
import json
|
||||
from tempfile import mkstemp
|
||||
from flask import send_file
|
||||
tmp_fd, tmp_file = mkstemp(text=True, suffix=".json", prefix="changedetectionio-")
|
||||
except Exception as e:
|
||||
return make_response(f"Error fetching screenshot and element data - {str(e)}", 401)
|
||||
|
||||
output = json.dumps({'screenshot': "data:image/jpeg;base64,{}".format(
|
||||
base64.b64encode(state[0]).decode('ascii')),
|
||||
'xpath_data': state[1],
|
||||
'session_age_start': browsersteps_sessions[browsersteps_session_id]['browserstepper'].age_start,
|
||||
'browser_time_remaining': round(remaining)
|
||||
})
|
||||
# SEND THIS BACK TO THE BROWSER
|
||||
output = {
|
||||
"screenshot": f"data:image/jpeg;base64,{base64.b64encode(screenshot).decode('ascii')}",
|
||||
"xpath_data": xpath_data,
|
||||
"session_age_start": browsersteps_sessions[browsersteps_session_id]['browserstepper'].age_start,
|
||||
"browser_time_remaining": round(remaining)
|
||||
}
|
||||
json_data = json.dumps(output)
|
||||
|
||||
with os.fdopen(tmp_fd, 'w') as f:
|
||||
f.write(output)
|
||||
# Generate an ETag (hash of the response body)
|
||||
etag_hash = hashlib.md5(json_data.encode('utf-8')).hexdigest()
|
||||
|
||||
response = make_response(send_file(path_or_file=tmp_file,
|
||||
mimetype='application/json; charset=UTF-8',
|
||||
etag=True))
|
||||
# No longer needed
|
||||
os.unlink(tmp_file)
|
||||
# Create the response with ETag
|
||||
response = Response(json_data, mimetype="application/json; charset=UTF-8")
|
||||
response.set_etag(etag_hash)
|
||||
|
||||
return response
|
||||
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import os
|
||||
import time
|
||||
import re
|
||||
from random import randint
|
||||
from loguru import logger
|
||||
|
||||
from changedetectionio.content_fetchers import SCREENSHOT_MAX_HEIGHT_DEFAULT
|
||||
from changedetectionio.content_fetchers.base import manage_user_agent
|
||||
from changedetectionio.safe_jinja import render as jinja_render
|
||||
from changedetectionio.jinja2_custom import render as jinja_render
|
||||
|
||||
|
||||
|
||||
# Two flags, tell the JS which of the "Selector" or "Value" field should be enabled in the front end
|
||||
# 0- off, 1- on
|
||||
@@ -34,6 +35,7 @@ browser_step_ui_config = {'Choose one': '0 0',
|
||||
'Make all child elements visible': '1 0',
|
||||
'Press Enter': '0 0',
|
||||
'Select by label': '1 1',
|
||||
'<select> by option text': '1 1',
|
||||
'Scroll down': '0 0',
|
||||
'Uncheck checkbox': '1 0',
|
||||
'Wait for seconds': '0 1',
|
||||
@@ -53,14 +55,17 @@ browser_step_ui_config = {'Choose one': '0 0',
|
||||
class steppable_browser_interface():
|
||||
page = None
|
||||
start_url = None
|
||||
|
||||
action_timeout = 10 * 1000
|
||||
|
||||
def __init__(self, start_url):
|
||||
self.start_url = start_url
|
||||
|
||||
# Convert and perform "Click Button" for example
|
||||
def call_action(self, action_name, selector=None, optional_value=None):
|
||||
async def call_action(self, action_name, selector=None, optional_value=None):
|
||||
if self.page is None:
|
||||
logger.warning("Cannot call action on None page object")
|
||||
return
|
||||
|
||||
now = time.time()
|
||||
call_action_name = re.sub('[^0-9a-zA-Z]+', '_', action_name.lower())
|
||||
if call_action_name == 'choose_one':
|
||||
@@ -71,136 +76,216 @@ class steppable_browser_interface():
|
||||
if selector and selector.startswith('/') and not selector.startswith('//'):
|
||||
selector = "xpath=" + selector
|
||||
|
||||
# Check if action handler exists
|
||||
if not hasattr(self, "action_" + call_action_name):
|
||||
logger.warning(f"Action handler for '{call_action_name}' not found")
|
||||
return
|
||||
|
||||
action_handler = getattr(self, "action_" + call_action_name)
|
||||
|
||||
# Support for Jinja2 variables in the value and selector
|
||||
|
||||
if selector and ('{%' in selector or '{{' in selector):
|
||||
selector = jinja_render(template_str=selector)
|
||||
|
||||
if optional_value and ('{%' in optional_value or '{{' in optional_value):
|
||||
optional_value = jinja_render(template_str=optional_value)
|
||||
|
||||
action_handler(selector, optional_value)
|
||||
self.page.wait_for_timeout(1.5 * 1000)
|
||||
# Trigger click and cautiously handle potential navigation
|
||||
# This means the page redirects/reloads/changes JS etc etc
|
||||
if call_action_name.startswith('click_'):
|
||||
try:
|
||||
# Set up navigation expectation before the click (like sync version)
|
||||
async with self.page.expect_event("framenavigated", timeout=3000) as navigation_info:
|
||||
await action_handler(selector, optional_value)
|
||||
|
||||
# Check if navigation actually occurred
|
||||
try:
|
||||
await navigation_info.value # This waits for the navigation promise
|
||||
logger.debug(f"Navigation occurred on {call_action_name}.")
|
||||
except Exception:
|
||||
logger.debug(f"No navigation occurred within timeout when calling {call_action_name}, that's OK, continuing.")
|
||||
|
||||
except Exception as e:
|
||||
# If expect_event itself times out, that means no navigation occurred - that's OK
|
||||
if "framenavigated" in str(e) and "exceeded" in str(e):
|
||||
logger.debug(f"No navigation occurred within timeout when calling {call_action_name}, that's OK, continuing.")
|
||||
else:
|
||||
raise e
|
||||
else:
|
||||
# Some other action that probably a navigation is not expected
|
||||
await action_handler(selector, optional_value)
|
||||
|
||||
|
||||
# Safely wait for timeout
|
||||
await self.page.wait_for_timeout(1.5 * 1000)
|
||||
logger.debug(f"Call action done in {time.time()-now:.2f}s")
|
||||
|
||||
def action_goto_url(self, selector=None, value=None):
|
||||
# self.page.set_viewport_size({"width": 1280, "height": 5000})
|
||||
async def action_goto_url(self, selector=None, value=None):
|
||||
if not value:
|
||||
logger.warning("No URL provided for goto_url action")
|
||||
return None
|
||||
|
||||
now = time.time()
|
||||
response = self.page.goto(value, timeout=0, wait_until='load')
|
||||
# Should be the same as the puppeteer_fetch.js methods, means, load with no timeout set (skip timeout)
|
||||
#and also wait for seconds ?
|
||||
#await page.waitForTimeout(1000);
|
||||
#await page.waitForTimeout(extra_wait_ms);
|
||||
response = await self.page.goto(value, timeout=0, wait_until='load')
|
||||
logger.debug(f"Time to goto URL {time.time()-now:.2f}s")
|
||||
return response
|
||||
|
||||
# Incase they request to go back to the start
|
||||
def action_goto_site(self, selector=None, value=None):
|
||||
return self.action_goto_url(value=self.start_url)
|
||||
async def action_goto_site(self, selector=None, value=None):
|
||||
return await self.action_goto_url(value=re.sub(r'^source:', '', self.start_url, flags=re.IGNORECASE))
|
||||
|
||||
def action_click_element_containing_text(self, selector=None, value=''):
|
||||
async def action_click_element_containing_text(self, selector=None, value=''):
|
||||
logger.debug("Clicking element containing text")
|
||||
if not len(value.strip()):
|
||||
if not value or not len(value.strip()):
|
||||
return
|
||||
|
||||
elem = self.page.get_by_text(value)
|
||||
if elem.count():
|
||||
elem.first.click(delay=randint(200, 500), timeout=self.action_timeout)
|
||||
if await elem.count():
|
||||
await elem.first.click(delay=randint(200, 500), timeout=self.action_timeout)
|
||||
|
||||
def action_click_element_containing_text_if_exists(self, selector=None, value=''):
|
||||
|
||||
async def action_click_element_containing_text_if_exists(self, selector=None, value=''):
|
||||
logger.debug("Clicking element containing text if exists")
|
||||
if not len(value.strip()):
|
||||
if not value or not len(value.strip()):
|
||||
return
|
||||
|
||||
elem = self.page.get_by_text(value)
|
||||
logger.debug(f"Clicking element containing text - {elem.count()} elements found")
|
||||
if elem.count():
|
||||
elem.first.click(delay=randint(200, 500), timeout=self.action_timeout)
|
||||
else:
|
||||
count = await elem.count()
|
||||
logger.debug(f"Clicking element containing text - {count} elements found")
|
||||
if count:
|
||||
await elem.first.click(delay=randint(200, 500), timeout=self.action_timeout)
|
||||
|
||||
|
||||
async def action_enter_text_in_field(self, selector, value):
|
||||
if not selector or not len(selector.strip()):
|
||||
return
|
||||
|
||||
def action_enter_text_in_field(self, selector, value):
|
||||
if not len(selector.strip()):
|
||||
return
|
||||
await self.page.fill(selector, value, timeout=self.action_timeout)
|
||||
|
||||
self.page.fill(selector, value, timeout=self.action_timeout)
|
||||
async def action_execute_js(self, selector, value):
|
||||
if not value:
|
||||
return None
|
||||
|
||||
return await self.page.evaluate(value)
|
||||
|
||||
def action_execute_js(self, selector, value):
|
||||
response = self.page.evaluate(value)
|
||||
return response
|
||||
|
||||
def action_click_element(self, selector, value):
|
||||
async def action_click_element(self, selector, value):
|
||||
logger.debug("Clicking element")
|
||||
if not len(selector.strip()):
|
||||
if not selector or not len(selector.strip()):
|
||||
return
|
||||
|
||||
self.page.click(selector=selector, timeout=self.action_timeout + 20 * 1000, delay=randint(200, 500))
|
||||
await self.page.click(selector=selector, timeout=self.action_timeout + 20 * 1000, delay=randint(200, 500))
|
||||
|
||||
def action_click_element_if_exists(self, selector, value):
|
||||
async def action_click_element_if_exists(self, selector, value):
|
||||
import playwright._impl._errors as _api_types
|
||||
logger.debug("Clicking element if exists")
|
||||
if not len(selector.strip()):
|
||||
if not selector or not len(selector.strip()):
|
||||
return
|
||||
|
||||
try:
|
||||
self.page.click(selector, timeout=self.action_timeout, delay=randint(200, 500))
|
||||
except _api_types.TimeoutError as e:
|
||||
await self.page.click(selector, timeout=self.action_timeout, delay=randint(200, 500))
|
||||
except _api_types.TimeoutError:
|
||||
return
|
||||
except _api_types.Error as e:
|
||||
except _api_types.Error:
|
||||
# Element was there, but page redrew and now its long long gone
|
||||
return
|
||||
|
||||
|
||||
def action_click_x_y(self, selector, value):
|
||||
if not re.match(r'^\s?\d+\s?,\s?\d+\s?$', value):
|
||||
raise Exception("'Click X,Y' step should be in the format of '100 , 90'")
|
||||
async def action_click_x_y(self, selector, value):
|
||||
if not value or not re.match(r'^\s?\d+\s?,\s?\d+\s?$', value):
|
||||
logger.warning("'Click X,Y' step should be in the format of '100 , 90'")
|
||||
return
|
||||
|
||||
x, y = value.strip().split(',')
|
||||
x = int(float(x.strip()))
|
||||
y = int(float(y.strip()))
|
||||
self.page.mouse.click(x=x, y=y, delay=randint(200, 500))
|
||||
try:
|
||||
x, y = value.strip().split(',')
|
||||
x = int(float(x.strip()))
|
||||
y = int(float(y.strip()))
|
||||
|
||||
await self.page.mouse.click(x=x, y=y, delay=randint(200, 500))
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error parsing x,y coordinates: {str(e)}")
|
||||
|
||||
def action_scroll_down(self, selector, value):
|
||||
async def action__select_by_option_text(self, selector, value):
|
||||
if not selector or not len(selector.strip()):
|
||||
return
|
||||
|
||||
await self.page.select_option(selector, label=value, timeout=self.action_timeout)
|
||||
|
||||
async def action_scroll_down(self, selector, value):
|
||||
# Some sites this doesnt work on for some reason
|
||||
self.page.mouse.wheel(0, 600)
|
||||
self.page.wait_for_timeout(1000)
|
||||
await self.page.mouse.wheel(0, 600)
|
||||
await self.page.wait_for_timeout(1000)
|
||||
|
||||
def action_wait_for_seconds(self, selector, value):
|
||||
self.page.wait_for_timeout(float(value.strip()) * 1000)
|
||||
async def action_wait_for_seconds(self, selector, value):
|
||||
try:
|
||||
seconds = float(value.strip()) if value else 1.0
|
||||
await self.page.wait_for_timeout(seconds * 1000)
|
||||
except (ValueError, TypeError) as e:
|
||||
logger.error(f"Invalid value for wait_for_seconds: {str(e)}")
|
||||
|
||||
def action_wait_for_text(self, selector, value):
|
||||
async def action_wait_for_text(self, selector, value):
|
||||
if not value:
|
||||
return
|
||||
|
||||
import json
|
||||
v = json.dumps(value)
|
||||
self.page.wait_for_function(f'document.querySelector("body").innerText.includes({v});', timeout=30000)
|
||||
await self.page.wait_for_function(
|
||||
f'document.querySelector("body").innerText.includes({v});',
|
||||
timeout=30000
|
||||
)
|
||||
|
||||
|
||||
def action_wait_for_text_in_element(self, selector, value):
|
||||
async def action_wait_for_text_in_element(self, selector, value):
|
||||
if not selector or not value:
|
||||
return
|
||||
|
||||
import json
|
||||
s = json.dumps(selector)
|
||||
v = json.dumps(value)
|
||||
self.page.wait_for_function(f'document.querySelector({s}).innerText.includes({v});', timeout=30000)
|
||||
|
||||
await self.page.wait_for_function(
|
||||
f'document.querySelector({s}).innerText.includes({v});',
|
||||
timeout=30000
|
||||
)
|
||||
|
||||
# @todo - in the future make some popout interface to capture what needs to be set
|
||||
# https://playwright.dev/python/docs/api/class-keyboard
|
||||
def action_press_enter(self, selector, value):
|
||||
self.page.keyboard.press("Enter", delay=randint(200, 500))
|
||||
async def action_press_enter(self, selector, value):
|
||||
await self.page.keyboard.press("Enter", delay=randint(200, 500))
|
||||
|
||||
|
||||
def action_press_page_up(self, selector, value):
|
||||
self.page.keyboard.press("PageUp", delay=randint(200, 500))
|
||||
async def action_press_page_up(self, selector, value):
|
||||
await self.page.keyboard.press("PageUp", delay=randint(200, 500))
|
||||
|
||||
def action_press_page_down(self, selector, value):
|
||||
self.page.keyboard.press("PageDown", delay=randint(200, 500))
|
||||
async def action_press_page_down(self, selector, value):
|
||||
await self.page.keyboard.press("PageDown", delay=randint(200, 500))
|
||||
|
||||
def action_check_checkbox(self, selector, value):
|
||||
self.page.locator(selector).check(timeout=self.action_timeout)
|
||||
async def action_check_checkbox(self, selector, value):
|
||||
if not selector:
|
||||
return
|
||||
|
||||
def action_uncheck_checkbox(self, selector, value):
|
||||
self.page.locator(selector).uncheck(timeout=self.action_timeout)
|
||||
await self.page.locator(selector).check(timeout=self.action_timeout)
|
||||
|
||||
def action_remove_elements(self, selector, value):
|
||||
async def action_uncheck_checkbox(self, selector, value):
|
||||
if not selector:
|
||||
return
|
||||
|
||||
await self.page.locator(selector).uncheck(timeout=self.action_timeout)
|
||||
|
||||
|
||||
async def action_remove_elements(self, selector, value):
|
||||
"""Removes all elements matching the given selector from the DOM."""
|
||||
self.page.locator(selector).evaluate_all("els => els.forEach(el => el.remove())")
|
||||
if not selector:
|
||||
return
|
||||
|
||||
await self.page.locator(selector).evaluate_all("els => els.forEach(el => el.remove())")
|
||||
|
||||
def action_make_all_child_elements_visible(self, selector, value):
|
||||
async def action_make_all_child_elements_visible(self, selector, value):
|
||||
"""Recursively makes all child elements inside the given selector fully visible."""
|
||||
self.page.locator(selector).locator("*").evaluate_all("""
|
||||
if not selector:
|
||||
return
|
||||
|
||||
await self.page.locator(selector).locator("*").evaluate_all("""
|
||||
els => els.forEach(el => {
|
||||
el.style.display = 'block'; // Forces it to be displayed
|
||||
el.style.visibility = 'visible'; // Ensures it's not hidden
|
||||
@@ -223,7 +308,9 @@ class browsersteps_live_ui(steppable_browser_interface):
|
||||
# bump and kill this if idle after X sec
|
||||
age_start = 0
|
||||
headers = {}
|
||||
|
||||
# Track if resources are properly cleaned up
|
||||
_is_cleaned_up = False
|
||||
|
||||
# use a special driver, maybe locally etc
|
||||
command_executor = os.getenv(
|
||||
"PLAYWRIGHT_BROWSERSTEPS_DRIVER_URL"
|
||||
@@ -242,17 +329,23 @@ class browsersteps_live_ui(steppable_browser_interface):
|
||||
self.age_start = time.time()
|
||||
self.playwright_browser = playwright_browser
|
||||
self.start_url = start_url
|
||||
if self.context is None:
|
||||
self.connect(proxy=proxy)
|
||||
self._is_cleaned_up = False
|
||||
self.proxy = proxy
|
||||
# Note: connect() is now async and must be called separately
|
||||
|
||||
def __del__(self):
|
||||
# Ensure cleanup happens if object is garbage collected
|
||||
# Note: cleanup is now async, so we can only mark as cleaned up here
|
||||
self._is_cleaned_up = True
|
||||
|
||||
# Connect and setup a new context
|
||||
def connect(self, proxy=None):
|
||||
async def connect(self, proxy=None):
|
||||
# Should only get called once - test that
|
||||
keep_open = 1000 * 60 * 5
|
||||
now = time.time()
|
||||
|
||||
# @todo handle multiple contexts, bind a unique id from the browser on each req?
|
||||
self.context = self.playwright_browser.new_context(
|
||||
self.context = await self.playwright_browser.new_context(
|
||||
accept_downloads=False, # Should never be needed
|
||||
bypass_csp=True, # This is needed to enable JavaScript execution on GitHub and others
|
||||
extra_http_headers=self.headers,
|
||||
@@ -263,72 +356,142 @@ class browsersteps_live_ui(steppable_browser_interface):
|
||||
user_agent=manage_user_agent(headers=self.headers),
|
||||
)
|
||||
|
||||
|
||||
self.page = self.context.new_page()
|
||||
self.page = await self.context.new_page()
|
||||
|
||||
# self.page.set_default_navigation_timeout(keep_open)
|
||||
self.page.set_default_timeout(keep_open)
|
||||
# @todo probably this doesnt work
|
||||
self.page.on(
|
||||
"close",
|
||||
self.mark_as_closed,
|
||||
)
|
||||
# Set event handlers
|
||||
self.page.on("close", self.mark_as_closed)
|
||||
# Listen for all console events and handle errors
|
||||
self.page.on("console", lambda msg: print(f"Browser steps console - {msg.type}: {msg.text} {msg.args}"))
|
||||
|
||||
logger.debug(f"Time to browser setup {time.time()-now:.2f}s")
|
||||
self.page.wait_for_timeout(1 * 1000)
|
||||
await self.page.wait_for_timeout(1 * 1000)
|
||||
|
||||
def mark_as_closed(self):
|
||||
logger.debug("Page closed, cleaning up..")
|
||||
# Note: This is called from a sync context (event handler)
|
||||
# so we'll just mark as cleaned up and let __del__ handle the rest
|
||||
self._is_cleaned_up = True
|
||||
|
||||
async def cleanup(self):
|
||||
"""Properly clean up all resources to prevent memory leaks"""
|
||||
if self._is_cleaned_up:
|
||||
return
|
||||
|
||||
logger.debug("Cleaning up browser steps resources")
|
||||
|
||||
# Clean up page
|
||||
if hasattr(self, 'page') and self.page is not None:
|
||||
try:
|
||||
# Force garbage collection before closing
|
||||
await self.page.request_gc()
|
||||
except Exception as e:
|
||||
logger.debug(f"Error during page garbage collection: {str(e)}")
|
||||
|
||||
try:
|
||||
# Remove event listeners before closing
|
||||
self.page.remove_listener("close", self.mark_as_closed)
|
||||
except Exception as e:
|
||||
logger.debug(f"Error removing event listeners: {str(e)}")
|
||||
|
||||
try:
|
||||
await self.page.close()
|
||||
except Exception as e:
|
||||
logger.debug(f"Error closing page: {str(e)}")
|
||||
|
||||
self.page = None
|
||||
|
||||
# Clean up context
|
||||
if hasattr(self, 'context') and self.context is not None:
|
||||
try:
|
||||
await self.context.close()
|
||||
except Exception as e:
|
||||
logger.debug(f"Error closing context: {str(e)}")
|
||||
|
||||
self.context = None
|
||||
|
||||
self._is_cleaned_up = True
|
||||
logger.debug("Browser steps resources cleanup complete")
|
||||
|
||||
@property
|
||||
def has_expired(self):
|
||||
if not self.page:
|
||||
if not self.page or self._is_cleaned_up:
|
||||
return True
|
||||
|
||||
# Check if session has expired based on age
|
||||
max_age_seconds = int(os.getenv("BROWSER_STEPS_MAX_AGE_SECONDS", 60 * 10)) # Default 10 minutes
|
||||
if (time.time() - self.age_start) > max_age_seconds:
|
||||
logger.debug(f"Browser steps session expired after {max_age_seconds} seconds")
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def get_current_state(self):
|
||||
async def get_current_state(self):
|
||||
"""Return the screenshot and interactive elements mapping, generally always called after action_()"""
|
||||
import importlib.resources
|
||||
import json
|
||||
# because we for now only run browser steps in playwright mode (not puppeteer mode)
|
||||
from changedetectionio.content_fetchers.playwright import capture_full_page_async
|
||||
|
||||
# Safety check - don't proceed if resources are cleaned up
|
||||
if self._is_cleaned_up or self.page is None:
|
||||
logger.warning("Attempted to get current state after cleanup")
|
||||
return (None, None)
|
||||
|
||||
xpath_element_js = importlib.resources.files("changedetectionio.content_fetchers.res").joinpath('xpath_element_scraper.js').read_text()
|
||||
|
||||
now = time.time()
|
||||
self.page.wait_for_timeout(1 * 1000)
|
||||
await self.page.wait_for_timeout(1 * 1000)
|
||||
|
||||
# The actual screenshot
|
||||
screenshot = self.page.screenshot(type='jpeg', full_page=True, quality=40)
|
||||
screenshot = None
|
||||
xpath_data = None
|
||||
|
||||
try:
|
||||
# Get screenshot first
|
||||
screenshot = await capture_full_page_async(page=self.page)
|
||||
if not screenshot:
|
||||
logger.error("No screenshot was retrieved :((")
|
||||
|
||||
self.page.evaluate("var include_filters=''")
|
||||
# Go find the interactive elements
|
||||
# @todo in the future, something smarter that can scan for elements with .click/focus etc event handlers?
|
||||
elements = 'a,button,input,select,textarea,i,th,td,p,li,h1,h2,h3,h4,div,span'
|
||||
xpath_element_js = xpath_element_js.replace('%ELEMENTS%', elements)
|
||||
xpath_data = self.page.evaluate("async () => {" + xpath_element_js + "}")
|
||||
# So the JS will find the smallest one first
|
||||
xpath_data['size_pos'] = sorted(xpath_data['size_pos'], key=lambda k: k['width'] * k['height'], reverse=True)
|
||||
logger.debug(f"Time to complete get_current_state of browser {time.time()-now:.2f}s")
|
||||
# except
|
||||
# playwright._impl._api_types.Error: Browser closed.
|
||||
# @todo show some countdown timer?
|
||||
logger.debug(f"Time to get screenshot from browser {time.time() - now:.2f}s")
|
||||
|
||||
# Then get interactive elements
|
||||
now = time.time()
|
||||
await self.page.evaluate("var include_filters=''")
|
||||
await self.page.request_gc()
|
||||
|
||||
scan_elements = 'a,button,input,select,textarea,i,th,td,p,li,h1,h2,h3,h4,div,span'
|
||||
|
||||
MAX_TOTAL_HEIGHT = int(os.getenv("SCREENSHOT_MAX_HEIGHT", SCREENSHOT_MAX_HEIGHT_DEFAULT))
|
||||
xpath_data = json.loads(await self.page.evaluate(xpath_element_js, {
|
||||
"visualselector_xpath_selectors": scan_elements,
|
||||
"max_height": MAX_TOTAL_HEIGHT
|
||||
}))
|
||||
await self.page.request_gc()
|
||||
|
||||
# Sort elements by size
|
||||
xpath_data['size_pos'] = sorted(xpath_data['size_pos'], key=lambda k: k['width'] * k['height'], reverse=True)
|
||||
logger.debug(f"Time to scrape xPath element data in browser {time.time()-now:.2f}s")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting current state: {str(e)}")
|
||||
# If the page has navigated (common with logins) then the context is destroyed on navigation, continue
|
||||
# I'm not sure that this is required anymore because we have the "expect navigation wrapper" at the top
|
||||
if "Execution context was destroyed" in str(e):
|
||||
logger.debug("Execution context was destroyed, most likely because of navigation, continuing...")
|
||||
pass
|
||||
|
||||
# Attempt recovery - force garbage collection
|
||||
try:
|
||||
await self.page.request_gc()
|
||||
except:
|
||||
pass
|
||||
|
||||
# Request garbage collection one final time
|
||||
try:
|
||||
await self.page.request_gc()
|
||||
except:
|
||||
pass
|
||||
|
||||
return (screenshot, xpath_data)
|
||||
|
||||
def request_visualselector_data(self):
|
||||
"""
|
||||
Does the same that the playwright operation in content_fetcher does
|
||||
This is used to just bump the VisualSelector data so it' ready to go if they click on the tab
|
||||
@todo refactor and remove duplicate code, add include_filters
|
||||
:param xpath_data:
|
||||
:param screenshot:
|
||||
:param current_include_filters:
|
||||
:return:
|
||||
"""
|
||||
import importlib.resources
|
||||
self.page.evaluate("var include_filters=''")
|
||||
xpath_element_js = importlib.resources.files("changedetectionio.content_fetchers.res").joinpath('xpath_element_scraper.js').read_text()
|
||||
from changedetectionio.content_fetchers import visualselector_xpath_selectors
|
||||
xpath_element_js = xpath_element_js.replace('%ELEMENTS%', visualselector_xpath_selectors)
|
||||
xpath_data = self.page.evaluate("async () => {" + xpath_element_js + "}")
|
||||
screenshot = self.page.screenshot(type='jpeg', full_page=True, quality=int(os.getenv("SCREENSHOT_QUALITY", 72)))
|
||||
|
||||
return (screenshot, xpath_data)
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
from playwright.sync_api import PlaywrightContextManager
|
||||
|
||||
# So playwright wants to run as a context manager, but we do something horrible and hacky
|
||||
# we are holding the session open for as long as possible, then shutting it down, and opening a new one
|
||||
# So it means we don't get to use PlaywrightContextManager' __enter__ __exit__
|
||||
# To work around this, make goodbye() act the same as the __exit__()
|
||||
#
|
||||
# But actually I think this is because the context is opened correctly with __enter__() but we timeout the connection
|
||||
# then theres some lock condition where we cant destroy it without it hanging
|
||||
|
||||
class c_PlaywrightContextManager(PlaywrightContextManager):
|
||||
|
||||
def goodbye(self) -> None:
|
||||
self.__exit__()
|
||||
|
||||
def c_sync_playwright() -> PlaywrightContextManager:
|
||||
return c_PlaywrightContextManager()
|
||||
@@ -33,7 +33,7 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
def long_task(uuid, preferred_proxy):
|
||||
import time
|
||||
from changedetectionio.content_fetchers import exceptions as content_fetcher_exceptions
|
||||
from changedetectionio.safe_jinja import render as jinja_render
|
||||
from changedetectionio.jinja2_custom import render as jinja_render
|
||||
|
||||
status = {'status': '', 'length': 0, 'text': ''}
|
||||
|
||||
|
||||
75
changedetectionio/blueprint/imports/__init__.py
Normal file
75
changedetectionio/blueprint/imports/__init__.py
Normal file
@@ -0,0 +1,75 @@
|
||||
from flask import Blueprint, request, redirect, url_for, flash, render_template
|
||||
from changedetectionio.store import ChangeDetectionStore
|
||||
from changedetectionio.auth_decorator import login_optionally_required
|
||||
from changedetectionio import worker_handler
|
||||
from changedetectionio.blueprint.imports.importer import (
|
||||
import_url_list,
|
||||
import_distill_io_json,
|
||||
import_xlsx_wachete,
|
||||
import_xlsx_custom
|
||||
)
|
||||
|
||||
def construct_blueprint(datastore: ChangeDetectionStore, update_q, queuedWatchMetaData):
|
||||
import_blueprint = Blueprint('imports', __name__, template_folder="templates")
|
||||
|
||||
@import_blueprint.route("/import", methods=['GET', 'POST'])
|
||||
@login_optionally_required
|
||||
def import_page():
|
||||
remaining_urls = []
|
||||
from changedetectionio import forms
|
||||
|
||||
if request.method == 'POST':
|
||||
# URL List import
|
||||
if request.values.get('urls') and len(request.values.get('urls').strip()):
|
||||
# Import and push into the queue for immediate update check
|
||||
importer_handler = import_url_list()
|
||||
importer_handler.run(data=request.values.get('urls'), flash=flash, datastore=datastore, processor=request.values.get('processor', 'text_json_diff'))
|
||||
for uuid in importer_handler.new_uuids:
|
||||
worker_handler.queue_item_async_safe(update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid}))
|
||||
|
||||
if len(importer_handler.remaining_data) == 0:
|
||||
return redirect(url_for('watchlist.index'))
|
||||
else:
|
||||
remaining_urls = importer_handler.remaining_data
|
||||
|
||||
# Distill.io import
|
||||
if request.values.get('distill-io') and len(request.values.get('distill-io').strip()):
|
||||
# Import and push into the queue for immediate update check
|
||||
d_importer = import_distill_io_json()
|
||||
d_importer.run(data=request.values.get('distill-io'), flash=flash, datastore=datastore)
|
||||
for uuid in d_importer.new_uuids:
|
||||
worker_handler.queue_item_async_safe(update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid}))
|
||||
|
||||
# XLSX importer
|
||||
if request.files and request.files.get('xlsx_file'):
|
||||
file = request.files['xlsx_file']
|
||||
|
||||
if request.values.get('file_mapping') == 'wachete':
|
||||
w_importer = import_xlsx_wachete()
|
||||
w_importer.run(data=file, flash=flash, datastore=datastore)
|
||||
else:
|
||||
w_importer = import_xlsx_custom()
|
||||
# Building mapping of col # to col # type
|
||||
map = {}
|
||||
for i in range(10):
|
||||
c = request.values.get(f"custom_xlsx[col_{i}]")
|
||||
v = request.values.get(f"custom_xlsx[col_type_{i}]")
|
||||
if c and v:
|
||||
map[int(c)] = v
|
||||
|
||||
w_importer.import_profile = map
|
||||
w_importer.run(data=file, flash=flash, datastore=datastore)
|
||||
|
||||
for uuid in w_importer.new_uuids:
|
||||
worker_handler.queue_item_async_safe(update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid}))
|
||||
|
||||
# Could be some remaining, or we could be on GET
|
||||
form = forms.importForm(formdata=request.form if request.method == 'POST' else None)
|
||||
output = render_template("import.html",
|
||||
form=form,
|
||||
import_url_list_remaining="\n".join(remaining_urls),
|
||||
original_distill_json=''
|
||||
)
|
||||
return output
|
||||
|
||||
return import_blueprint
|
||||
@@ -1,6 +1,5 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from abc import abstractmethod
|
||||
import time
|
||||
import validators
|
||||
from wtforms import ValidationError
|
||||
from loguru import logger
|
||||
|
||||
@@ -241,7 +240,7 @@ class import_xlsx_custom(Importer):
|
||||
return
|
||||
|
||||
# @todo cehck atleast 2 rows, same in other method
|
||||
from .forms import validate_url
|
||||
from changedetectionio.forms import validate_url
|
||||
row_i = 1
|
||||
|
||||
try:
|
||||
@@ -300,4 +299,4 @@ class import_xlsx_custom(Importer):
|
||||
row_i += 1
|
||||
|
||||
flash(
|
||||
"{} imported from custom .xlsx in {:.2f}s".format(len(self.new_uuids), time.time() - now))
|
||||
"{} imported from custom .xlsx in {:.2f}s".format(len(self.new_uuids), time.time() - now))
|
||||
@@ -13,29 +13,27 @@
|
||||
</div>
|
||||
|
||||
<div class="box-wrap inner">
|
||||
<form class="pure-form" action="{{url_for('import_page')}}" method="POST" enctype="multipart/form-data">
|
||||
<form class="pure-form" action="{{url_for('imports.import_page')}}" method="POST" enctype="multipart/form-data">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}">
|
||||
<div class="tab-pane-inner" id="url-list">
|
||||
<legend>
|
||||
<div class="pure-control-group">
|
||||
Enter one URL per line, and optionally add tags for each URL after a space, delineated by comma
|
||||
(,):
|
||||
<br>
|
||||
<code>https://example.com tag1, tag2, last tag</code>
|
||||
<br>
|
||||
<p><strong>Example: </strong><code>https://example.com tag1, tag2, last tag</code></p>
|
||||
URLs which do not pass validation will stay in the textarea.
|
||||
</legend>
|
||||
</div>
|
||||
{{ render_field(form.processor, class="processor") }}
|
||||
|
||||
|
||||
<div class="pure-control-group">
|
||||
<textarea name="urls" class="pure-input-1-2" placeholder="https://"
|
||||
style="width: 100%;
|
||||
font-family:monospace;
|
||||
white-space: pre;
|
||||
overflow-wrap: normal;
|
||||
overflow-x: scroll;" rows="25">{{ import_url_list_remaining }}</textarea>
|
||||
|
||||
<div id="quick-watch-processor-type">
|
||||
|
||||
</div>
|
||||
</div>
|
||||
<div id="quick-watch-processor-type"></div>
|
||||
|
||||
</div>
|
||||
|
||||
@@ -43,7 +41,7 @@
|
||||
|
||||
|
||||
|
||||
<legend>
|
||||
<div class="pure-control-group">
|
||||
Copy and Paste your Distill.io watch 'export' file, this should be a JSON file.<br>
|
||||
This is <i>experimental</i>, supported fields are <code>name</code>, <code>uri</code>, <code>tags</code>, <code>config:selections</code>, the rest (including <code>schedule</code>) are ignored.
|
||||
<br>
|
||||
@@ -51,7 +49,7 @@
|
||||
How to export? <a href="https://distill.io/docs/web-monitor/how-export-and-import-monitors/">https://distill.io/docs/web-monitor/how-export-and-import-monitors/</a><br>
|
||||
Be sure to set your default fetcher to Chrome if required.<br>
|
||||
</p>
|
||||
</legend>
|
||||
</div>
|
||||
|
||||
|
||||
<textarea name="distill-io" class="pure-input-1-2" style="width: 100%;
|
||||
@@ -122,4 +120,4 @@
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{% endblock %}
|
||||
{% endblock %}
|
||||
@@ -4,6 +4,7 @@ from flask import Blueprint, flash, redirect, url_for
|
||||
from flask_login import login_required
|
||||
from changedetectionio.store import ChangeDetectionStore
|
||||
from changedetectionio import queuedWatchMetaData
|
||||
from changedetectionio import worker_handler
|
||||
from queue import PriorityQueue
|
||||
|
||||
PRICE_DATA_TRACK_ACCEPT = 'accepted'
|
||||
@@ -19,14 +20,14 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q: PriorityQueue
|
||||
datastore.data['watching'][uuid]['track_ldjson_price_data'] = PRICE_DATA_TRACK_ACCEPT
|
||||
datastore.data['watching'][uuid]['processor'] = 'restock_diff'
|
||||
datastore.data['watching'][uuid].clear_watch()
|
||||
update_q.put(queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid}))
|
||||
return redirect(url_for("index"))
|
||||
worker_handler.queue_item_async_safe(update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid}))
|
||||
return redirect(url_for("watchlist.index"))
|
||||
|
||||
@login_required
|
||||
@price_data_follower_blueprint.route("/<string:uuid>/reject", methods=['GET'])
|
||||
def reject(uuid):
|
||||
datastore.data['watching'][uuid]['track_ldjson_price_data'] = PRICE_DATA_TRACK_REJECT
|
||||
return redirect(url_for("index"))
|
||||
return redirect(url_for("watchlist.index"))
|
||||
|
||||
|
||||
return price_data_follower_blueprint
|
||||
|
||||
1
changedetectionio/blueprint/rss/__init__.py
Normal file
1
changedetectionio/blueprint/rss/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
RSS_FORMAT_TYPES = [('plaintext', 'Plain text'), ('html', 'HTML Color')]
|
||||
155
changedetectionio/blueprint/rss/blueprint.py
Normal file
155
changedetectionio/blueprint/rss/blueprint.py
Normal file
@@ -0,0 +1,155 @@
|
||||
|
||||
from changedetectionio.jinja2_custom import render as jinja_render
|
||||
from changedetectionio.notification.handler import apply_service_tweaks
|
||||
from changedetectionio.store import ChangeDetectionStore
|
||||
from feedgen.feed import FeedGenerator
|
||||
from flask import Blueprint, make_response, request, url_for, redirect
|
||||
from loguru import logger
|
||||
import datetime
|
||||
import pytz
|
||||
import re
|
||||
import time
|
||||
|
||||
|
||||
BAD_CHARS_REGEX=r'[\x00-\x08\x0B\x0C\x0E-\x1F]'
|
||||
|
||||
# Anything that is not text/UTF-8 should be stripped before it breaks feedgen (such as binary data etc)
|
||||
def scan_invalid_chars_in_rss(content):
|
||||
for match in re.finditer(BAD_CHARS_REGEX, content):
|
||||
i = match.start()
|
||||
bad_char = content[i]
|
||||
hex_value = f"0x{ord(bad_char):02x}"
|
||||
# Grab context
|
||||
start = max(0, i - 20)
|
||||
end = min(len(content), i + 21)
|
||||
context = content[start:end].replace('\n', '\\n').replace('\r', '\\r')
|
||||
logger.warning(f"Invalid char {hex_value} at pos {i}: ...{context}...")
|
||||
# First match is enough
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def clean_entry_content(content):
|
||||
cleaned = re.sub(BAD_CHARS_REGEX, '', content)
|
||||
return cleaned
|
||||
|
||||
def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
rss_blueprint = Blueprint('rss', __name__)
|
||||
|
||||
# Some RSS reader situations ended up with rss/ (forward slash after RSS) due
|
||||
# to some earlier blueprint rerouting work, it should goto feed.
|
||||
@rss_blueprint.route("/", methods=['GET'])
|
||||
def extraslash():
|
||||
return redirect(url_for('rss.feed'))
|
||||
|
||||
# Import the login decorator if needed
|
||||
# from changedetectionio.auth_decorator import login_optionally_required
|
||||
@rss_blueprint.route("", methods=['GET'])
|
||||
def feed():
|
||||
now = time.time()
|
||||
# Always requires token set
|
||||
app_rss_token = datastore.data['settings']['application'].get('rss_access_token')
|
||||
rss_url_token = request.args.get('token')
|
||||
if rss_url_token != app_rss_token:
|
||||
return "Access denied, bad token", 403
|
||||
|
||||
from changedetectionio import diff
|
||||
limit_tag = request.args.get('tag', '').lower().strip()
|
||||
# Be sure limit_tag is a uuid
|
||||
for uuid, tag in datastore.data['settings']['application'].get('tags', {}).items():
|
||||
if limit_tag == tag.get('title', '').lower().strip():
|
||||
limit_tag = uuid
|
||||
|
||||
# Sort by last_changed and add the uuid which is usually the key..
|
||||
sorted_watches = []
|
||||
|
||||
# @todo needs a .itemsWithTag() or something - then we can use that in Jinaj2 and throw this away
|
||||
for uuid, watch in datastore.data['watching'].items():
|
||||
# @todo tag notification_muted skip also (improve Watch model)
|
||||
if datastore.data['settings']['application'].get('rss_hide_muted_watches') and watch.get('notification_muted'):
|
||||
continue
|
||||
if limit_tag and not limit_tag in watch['tags']:
|
||||
continue
|
||||
watch['uuid'] = uuid
|
||||
sorted_watches.append(watch)
|
||||
|
||||
sorted_watches.sort(key=lambda x: x.last_changed, reverse=False)
|
||||
|
||||
fg = FeedGenerator()
|
||||
fg.title('changedetection.io')
|
||||
fg.description('Feed description')
|
||||
fg.link(href='https://changedetection.io')
|
||||
|
||||
html_colour_enable = False
|
||||
if datastore.data['settings']['application'].get('rss_content_format') == 'html':
|
||||
html_colour_enable = True
|
||||
|
||||
for watch in sorted_watches:
|
||||
|
||||
dates = list(watch.history.keys())
|
||||
# Re #521 - Don't bother processing this one if theres less than 2 snapshots, means we never had a change detected.
|
||||
if len(dates) < 2:
|
||||
continue
|
||||
|
||||
if not watch.viewed:
|
||||
# Re #239 - GUID needs to be individual for each event
|
||||
# @todo In the future make this a configurable link back (see work on BASE_URL https://github.com/dgtlmoon/changedetection.io/pull/228)
|
||||
guid = "{}/{}".format(watch['uuid'], watch.last_changed)
|
||||
fe = fg.add_entry()
|
||||
|
||||
# Include a link to the diff page, they will have to login here to see if password protection is enabled.
|
||||
# Description is the page you watch, link takes you to the diff JS UI page
|
||||
# Dict val base_url will get overriden with the env var if it is set.
|
||||
ext_base_url = datastore.data['settings']['application'].get('active_base_url')
|
||||
# @todo fix
|
||||
|
||||
# Because we are called via whatever web server, flask should figure out the right path (
|
||||
diff_link = {'href': url_for('ui.ui_views.diff_history_page', uuid=watch['uuid'], _external=True)}
|
||||
|
||||
fe.link(link=diff_link)
|
||||
|
||||
# Same logic as watch-overview.html
|
||||
if datastore.data['settings']['application']['ui'].get('use_page_title_in_list') or watch.get('use_page_title_in_list'):
|
||||
watch_label = watch.label
|
||||
else:
|
||||
watch_label = watch.get('url')
|
||||
|
||||
fe.title(title=watch_label)
|
||||
try:
|
||||
|
||||
html_diff = diff.render_diff(previous_version_file_contents=watch.get_history_snapshot(dates[-2]),
|
||||
newest_version_file_contents=watch.get_history_snapshot(dates[-1]),
|
||||
include_equal=False,
|
||||
line_feed_sep="<br>"
|
||||
)
|
||||
|
||||
|
||||
requested_output_format = 'htmlcolor' if html_colour_enable else 'html'
|
||||
html_diff = apply_service_tweaks(url='', n_body=html_diff, n_title=None, requested_output_format=requested_output_format)
|
||||
|
||||
except FileNotFoundError as e:
|
||||
html_diff = f"History snapshot file for watch {watch.get('uuid')}@{watch.last_changed} - '{watch.get('title')} not found."
|
||||
|
||||
# @todo Make this configurable and also consider html-colored markup
|
||||
# @todo User could decide if <link> goes to the diff page, or to the watch link
|
||||
rss_template = "<html><body>\n<h4><a href=\"{{watch_url}}\">{{watch_title}}</a></h4>\n<p>{{html_diff}}</p>\n</body></html>\n"
|
||||
|
||||
content = jinja_render(template_str=rss_template, watch_title=watch_label, html_diff=html_diff, watch_url=watch.link)
|
||||
|
||||
# Out of range chars could also break feedgen
|
||||
if scan_invalid_chars_in_rss(content):
|
||||
content = clean_entry_content(content)
|
||||
|
||||
fe.content(content=content, type='CDATA')
|
||||
fe.guid(guid, permalink=False)
|
||||
dt = datetime.datetime.fromtimestamp(int(watch.newest_history_key))
|
||||
dt = dt.replace(tzinfo=pytz.UTC)
|
||||
fe.pubDate(dt)
|
||||
|
||||
response = make_response(fg.rss_str())
|
||||
response.headers.set('Content-Type', 'application/rss+xml;charset=utf-8')
|
||||
logger.trace(f"RSS generated in {time.time() - now:.3f}s")
|
||||
return response
|
||||
|
||||
return rss_blueprint
|
||||
145
changedetectionio/blueprint/settings/__init__.py
Normal file
145
changedetectionio/blueprint/settings/__init__.py
Normal file
@@ -0,0 +1,145 @@
|
||||
import os
|
||||
from copy import deepcopy
|
||||
from datetime import datetime
|
||||
from zoneinfo import ZoneInfo, available_timezones
|
||||
import secrets
|
||||
import flask_login
|
||||
from flask import Blueprint, render_template, request, redirect, url_for, flash
|
||||
|
||||
from changedetectionio.store import ChangeDetectionStore
|
||||
from changedetectionio.auth_decorator import login_optionally_required
|
||||
|
||||
|
||||
def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
settings_blueprint = Blueprint('settings', __name__, template_folder="templates")
|
||||
|
||||
@settings_blueprint.route("", methods=['GET', "POST"])
|
||||
@login_optionally_required
|
||||
def settings_page():
|
||||
from changedetectionio import forms
|
||||
|
||||
default = deepcopy(datastore.data['settings'])
|
||||
if datastore.proxy_list is not None:
|
||||
available_proxies = list(datastore.proxy_list.keys())
|
||||
# When enabled
|
||||
system_proxy = datastore.data['settings']['requests']['proxy']
|
||||
# In the case it doesnt exist anymore
|
||||
if not system_proxy in available_proxies:
|
||||
system_proxy = None
|
||||
|
||||
default['requests']['proxy'] = system_proxy if system_proxy is not None else available_proxies[0]
|
||||
# Used by the form handler to keep or remove the proxy settings
|
||||
default['proxy_list'] = available_proxies[0]
|
||||
|
||||
# Don't use form.data on POST so that it doesnt overrid the checkbox status from the POST status
|
||||
form = forms.globalSettingsForm(formdata=request.form if request.method == 'POST' else None,
|
||||
data=default,
|
||||
extra_notification_tokens=datastore.get_unique_notification_tokens_available()
|
||||
)
|
||||
|
||||
# Remove the last option 'System default'
|
||||
form.application.form.notification_format.choices.pop()
|
||||
|
||||
if datastore.proxy_list is None:
|
||||
# @todo - Couldn't get setattr() etc dynamic addition working, so remove it instead
|
||||
del form.requests.form.proxy
|
||||
else:
|
||||
form.requests.form.proxy.choices = []
|
||||
for p in datastore.proxy_list:
|
||||
form.requests.form.proxy.choices.append(tuple((p, datastore.proxy_list[p]['label'])))
|
||||
|
||||
if request.method == 'POST':
|
||||
# Password unset is a GET, but we can lock the session to a salted env password to always need the password
|
||||
if form.application.form.data.get('removepassword_button', False):
|
||||
# SALTED_PASS means the password is "locked" to what we set in the Env var
|
||||
if not os.getenv("SALTED_PASS", False):
|
||||
datastore.remove_password()
|
||||
flash("Password protection removed.", 'notice')
|
||||
flask_login.logout_user()
|
||||
return redirect(url_for('settings.settings_page'))
|
||||
|
||||
if form.validate():
|
||||
# Don't set password to False when a password is set - should be only removed with the `removepassword` button
|
||||
app_update = dict(deepcopy(form.data['application']))
|
||||
|
||||
# Never update password with '' or False (Added by wtforms when not in submission)
|
||||
if 'password' in app_update and not app_update['password']:
|
||||
del (app_update['password'])
|
||||
|
||||
datastore.data['settings']['application'].update(app_update)
|
||||
|
||||
# Handle dynamic worker count adjustment
|
||||
old_worker_count = datastore.data['settings']['requests'].get('workers', 1)
|
||||
new_worker_count = form.data['requests'].get('workers', 1)
|
||||
|
||||
datastore.data['settings']['requests'].update(form.data['requests'])
|
||||
|
||||
# Adjust worker count if it changed
|
||||
if new_worker_count != old_worker_count:
|
||||
from changedetectionio import worker_handler
|
||||
from changedetectionio.flask_app import update_q, notification_q, app, datastore as ds
|
||||
|
||||
result = worker_handler.adjust_async_worker_count(
|
||||
new_count=new_worker_count,
|
||||
update_q=update_q,
|
||||
notification_q=notification_q,
|
||||
app=app,
|
||||
datastore=ds
|
||||
)
|
||||
|
||||
if result['status'] == 'success':
|
||||
flash(f"Worker count adjusted: {result['message']}", 'notice')
|
||||
elif result['status'] == 'not_supported':
|
||||
flash("Dynamic worker adjustment not supported for sync workers", 'warning')
|
||||
elif result['status'] == 'error':
|
||||
flash(f"Error adjusting workers: {result['message']}", 'error')
|
||||
|
||||
if not os.getenv("SALTED_PASS", False) and len(form.application.form.password.encrypted_password):
|
||||
datastore.data['settings']['application']['password'] = form.application.form.password.encrypted_password
|
||||
datastore.needs_write_urgent = True
|
||||
flash("Password protection enabled.", 'notice')
|
||||
flask_login.logout_user()
|
||||
return redirect(url_for('watchlist.index'))
|
||||
|
||||
datastore.needs_write_urgent = True
|
||||
flash("Settings updated.")
|
||||
|
||||
else:
|
||||
flash("An error occurred, please see below.", "error")
|
||||
|
||||
# Convert to ISO 8601 format, all date/time relative events stored as UTC time
|
||||
utc_time = datetime.now(ZoneInfo("UTC")).isoformat()
|
||||
|
||||
output = render_template("settings.html",
|
||||
api_key=datastore.data['settings']['application'].get('api_access_token'),
|
||||
available_timezones=sorted(available_timezones()),
|
||||
emailprefix=os.getenv('NOTIFICATION_MAIL_BUTTON_PREFIX', False),
|
||||
extra_notification_token_placeholder_info=datastore.get_unique_notification_token_placeholders_available(),
|
||||
form=form,
|
||||
hide_remove_pass=os.getenv("SALTED_PASS", False),
|
||||
min_system_recheck_seconds=int(os.getenv('MINIMUM_SECONDS_RECHECK_TIME', 3)),
|
||||
settings_application=datastore.data['settings']['application'],
|
||||
timezone_default_config=datastore.data['settings']['application'].get('scheduler_timezone_default'),
|
||||
utc_time=utc_time,
|
||||
)
|
||||
|
||||
return output
|
||||
|
||||
@settings_blueprint.route("/reset-api-key", methods=['GET'])
|
||||
@login_optionally_required
|
||||
def settings_reset_api_key():
|
||||
secret = secrets.token_hex(16)
|
||||
datastore.data['settings']['application']['api_access_token'] = secret
|
||||
datastore.needs_write_urgent = True
|
||||
flash("API Key was regenerated.")
|
||||
return redirect(url_for('settings.settings_page')+'#api')
|
||||
|
||||
@settings_blueprint.route("/notification-logs", methods=['GET'])
|
||||
@login_optionally_required
|
||||
def notification_logs():
|
||||
from changedetectionio.flask_app import notification_debug_log
|
||||
output = render_template("notification-log.html",
|
||||
logs=notification_debug_log if len(notification_debug_log) else ["Notification logs are empty - no notifications sent yet."])
|
||||
return output
|
||||
|
||||
return settings_blueprint
|
||||
@@ -1,10 +1,10 @@
|
||||
{% extends 'base.html' %}
|
||||
|
||||
{% block content %}
|
||||
{% from '_helpers.html' import render_field, render_checkbox_field, render_button, render_time_schedule_form %}
|
||||
{% from '_helpers.html' import render_field, render_checkbox_field, render_button, render_time_schedule_form, render_ternary_field, render_fieldlist_with_inline_errors %}
|
||||
{% from '_common_fields.html' import render_common_settings_form %}
|
||||
<script>
|
||||
const notification_base_url="{{url_for('ajax_callback_send_notification_test', mode="global-settings")}}";
|
||||
const notification_base_url="{{url_for('ui.ui_notification.ajax_callback_send_notification_test', mode="global-settings")}}";
|
||||
{% if emailprefix %}
|
||||
const email_notification_prefix=JSON.parse('{{emailprefix|tojson}}');
|
||||
{% endif %}
|
||||
@@ -22,13 +22,14 @@
|
||||
<li class="tab"><a href="#notifications">Notifications</a></li>
|
||||
<li class="tab"><a href="#fetching">Fetching</a></li>
|
||||
<li class="tab"><a href="#filters">Global Filters</a></li>
|
||||
<li class="tab"><a href="#ui-options">UI Options</a></li>
|
||||
<li class="tab"><a href="#api">API</a></li>
|
||||
<li class="tab"><a href="#timedate">Time & Date</a></li>
|
||||
<li class="tab"><a href="#proxies">CAPTCHA & Proxies</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
<div class="box-wrap inner">
|
||||
<form class="pure-form pure-form-stacked settings" action="{{url_for('settings_page')}}" method="POST">
|
||||
<form class="pure-form pure-form-stacked settings" action="{{url_for('settings.settings_page')}}" method="POST">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}" >
|
||||
<div class="tab-pane-inner" id="general">
|
||||
<fieldset>
|
||||
@@ -71,30 +72,23 @@
|
||||
<span class="pure-form-message-inline">Allow access to view watch diff page when password is enabled (Good for sharing the diff page)
|
||||
</span>
|
||||
</div>
|
||||
<div class="pure-control-group">
|
||||
{{ render_checkbox_field(form.application.form.rss_hide_muted_watches) }}
|
||||
</div>
|
||||
<div class="pure-control-group">
|
||||
{{ render_field(form.application.form.pager_size) }}
|
||||
<span class="pure-form-message-inline">Number of items per page in the watch overview list, 0 to disable.</span>
|
||||
</div>
|
||||
|
||||
<div class="pure-control-group">
|
||||
{{ render_checkbox_field(form.application.form.extract_title_as_title) }}
|
||||
<span class="pure-form-message-inline">Note: This will automatically apply to all existing watches.</span>
|
||||
</div>
|
||||
<div class="pure-control-group">
|
||||
{{ render_checkbox_field(form.application.form.empty_pages_are_a_change) }}
|
||||
<span class="pure-form-message-inline">When a request returns no content, or the HTML does not contain any text, is this considered a change?</span>
|
||||
</div>
|
||||
{% if form.requests.proxy %}
|
||||
<div class="pure-control-group inline-radio">
|
||||
{{ render_field(form.requests.form.proxy, class="fetch-backend-proxy") }}
|
||||
<span class="pure-form-message-inline">
|
||||
Choose a default proxy for all watches
|
||||
</span>
|
||||
<div class="grey-form-border">
|
||||
<div class="pure-control-group">
|
||||
{{ render_checkbox_field(form.application.form.rss_hide_muted_watches) }}
|
||||
</div>
|
||||
<div class="pure-control-group">
|
||||
{{ render_field(form.application.form.rss_content_format) }}
|
||||
<span class="pure-form-message-inline">Love RSS? Does your reader support HTML? Set it here</span>
|
||||
</div>
|
||||
<div class="pure-control-group">
|
||||
{{ render_checkbox_field(form.application.form.rss_reader_mode) }}
|
||||
<span class="pure-form-message-inline">Transforms RSS/RDF feed watches into beautiful text only</span>
|
||||
</div>
|
||||
</div>
|
||||
{% endif %}
|
||||
</fieldset>
|
||||
</div>
|
||||
|
||||
@@ -131,6 +125,16 @@
|
||||
{{ render_field(form.application.form.webdriver_delay) }}
|
||||
</div>
|
||||
</fieldset>
|
||||
<div class="pure-control-group">
|
||||
{{ render_field(form.requests.form.workers) }}
|
||||
{% set worker_info = get_worker_status_info() %}
|
||||
<span class="pure-form-message-inline">Number of concurrent workers to process watches. More workers = faster processing but higher memory usage.<br>
|
||||
Currently running: <strong>{{ worker_info.count }}</strong> operational {{ worker_info.type }} workers{% if worker_info.active_workers > 0 %} ({{ worker_info.active_workers }} actively processing){% endif %}.</span>
|
||||
</div>
|
||||
<div class="pure-control-group">
|
||||
{{ render_field(form.requests.form.timeout) }}
|
||||
<span class="pure-form-message-inline">For regular plain requests (not chrome based), maximum number of seconds until timeout, 1-999.<br>
|
||||
</div>
|
||||
<div class="pure-control-group inline-radio">
|
||||
{{ render_field(form.requests.form.default_ua) }}
|
||||
<span class="pure-form-message-inline">
|
||||
@@ -189,11 +193,17 @@ nav
|
||||
</ul>
|
||||
</span>
|
||||
</fieldset>
|
||||
<fieldset class="pure-group">
|
||||
{{ render_checkbox_field(form.application.form.strip_ignored_lines) }}
|
||||
<span class="pure-form-message-inline">Remove any text that appears in the "Ignore text" from the output (otherwise its just ignored for change-detection)<br>
|
||||
<i>Note:</i> Changing this will change the status of your existing watches, possibly trigger alerts etc.
|
||||
</span>
|
||||
</fieldset>
|
||||
</div>
|
||||
|
||||
<div class="tab-pane-inner" id="api">
|
||||
<h4>API Access</h4>
|
||||
<p>Drive your changedetection.io via API, More about <a href="https://github.com/dgtlmoon/changedetection.io/wiki/API-Reference">API access here</a></p>
|
||||
<p>Drive your changedetection.io via API, More about <a href="https://changedetection.io/docs/api_v1/index.html">API access and examples here</a>.</p>
|
||||
|
||||
<div class="pure-control-group">
|
||||
{{ render_checkbox_field(form.application.form.api_access_token_enabled) }}
|
||||
@@ -203,7 +213,7 @@ nav
|
||||
</div>
|
||||
</div>
|
||||
<div class="pure-control-group">
|
||||
<a href="{{url_for('settings_reset_api_key')}}" class="pure-button button-small button-cancel">Regenerate API key</a>
|
||||
<a href="{{url_for('settings.settings_reset_api_key')}}" class="pure-button button-small button-cancel">Regenerate API key</a>
|
||||
</div>
|
||||
<div class="pure-control-group">
|
||||
<h4>Chrome Extension</h4>
|
||||
@@ -214,7 +224,7 @@ nav
|
||||
<a id="chrome-extension-link"
|
||||
title="Try our new Chrome Extension!"
|
||||
href="https://chromewebstore.google.com/detail/changedetectionio-website/kefcfmgmlhmankjmnbijimhofdjekbop">
|
||||
<img alt="Chrome store icon" src="{{ url_for('static_content', group='images', filename='Google-Chrome-icon.png') }}" alt="Chrome">
|
||||
<img alt="Chrome store icon" src="{{ url_for('static_content', group='images', filename='google-chrome-icon.png') }}" alt="Chrome">
|
||||
Chrome Webstore
|
||||
</a>
|
||||
</p>
|
||||
@@ -228,15 +238,35 @@ nav
|
||||
<p><strong>UTC Time & Date from Server:</strong> <span id="utc-time" >{{ utc_time }}</span></p>
|
||||
<p><strong>Local Time & Date in Browser:</strong> <span class="local-time" data-utc="{{ utc_time }}"></span></p>
|
||||
<p>
|
||||
{{ render_field(form.application.form.timezone) }}
|
||||
{{ render_field(form.application.form.scheduler_timezone_default) }}
|
||||
<datalist id="timezones" style="display: none;">
|
||||
{% for tz_name in available_timezones %}
|
||||
<option value="{{ tz_name }}">{{ tz_name }}</option>
|
||||
{% endfor %}
|
||||
{%- for timezone in available_timezones -%}<option value="{{ timezone }}">{{ timezone }}</option>{%- endfor -%}
|
||||
</datalist>
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="tab-pane-inner" id="ui-options">
|
||||
<div class="pure-control-group">
|
||||
{{ render_checkbox_field(form.application.form.ui.form.open_diff_in_new_tab, class="open_diff_in_new_tab") }}
|
||||
<span class="pure-form-message-inline">Enable this setting to open the diff page in a new tab. If disabled, the diff page will open in the current tab.</span>
|
||||
</div>
|
||||
<div class="pure-control-group">
|
||||
{{ render_checkbox_field(form.application.form.ui.form.socket_io_enabled, class="socket_io_enabled") }}
|
||||
<span class="pure-form-message-inline">Realtime UI Updates Enabled - (Restart required if this is changed)</span>
|
||||
</div>
|
||||
<div class="pure-control-group">
|
||||
{{ render_checkbox_field(form.application.form.ui.form.favicons_enabled, class="") }}
|
||||
<span class="pure-form-message-inline">Enable or Disable Favicons next to the watch list</span>
|
||||
</div>
|
||||
<div class="pure-control-group">
|
||||
{{ render_checkbox_field(form.application.form.ui.use_page_title_in_list) }}
|
||||
</div>
|
||||
<div class="pure-control-group">
|
||||
{{ render_field(form.application.form.pager_size) }}
|
||||
<span class="pure-form-message-inline">Number of items per page in the watch overview list, 0 to disable.</span>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
<div class="tab-pane-inner" id="proxies">
|
||||
<div id="recommended-proxy">
|
||||
<div>
|
||||
@@ -284,23 +314,33 @@ nav
|
||||
<p><strong>Tip</strong>: "Residential" and "Mobile" proxy type can be more successfull than "Data Center" for blocked websites.
|
||||
|
||||
<div class="pure-control-group" id="extra-proxies-setting">
|
||||
{{ render_field(form.requests.form.extra_proxies) }}
|
||||
{{ render_fieldlist_with_inline_errors(form.requests.form.extra_proxies) }}
|
||||
<span class="pure-form-message-inline">"Name" will be used for selecting the proxy in the Watch Edit settings</span><br>
|
||||
<span class="pure-form-message-inline">SOCKS5 proxies with authentication are only supported with 'plain requests' fetcher, for other fetchers you should whitelist the IP access instead</span>
|
||||
{% if form.requests.proxy %}
|
||||
<div>
|
||||
<br>
|
||||
<div class="inline-radio">
|
||||
{{ render_field(form.requests.form.proxy, class="fetch-backend-proxy") }}
|
||||
<span class="pure-form-message-inline">Choose a default proxy for all watches</span>
|
||||
</div>
|
||||
</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
<div class="pure-control-group" id="extra-browsers-setting">
|
||||
<p>
|
||||
<span class="pure-form-message-inline"><i>Extra Browsers</i> can be attached to further defeat CAPTCHA's on websites that are particularly hard to scrape.</span><br>
|
||||
<span class="pure-form-message-inline">Simply paste the connection address into the box, <a href="https://changedetection.io/tutorial/using-bright-datas-scraping-browser-pass-captchas-and-other-protection-when-monitoring">More instructions and examples here</a> </span>
|
||||
</p>
|
||||
{{ render_field(form.requests.form.extra_browsers) }}
|
||||
{{ render_fieldlist_with_inline_errors(form.requests.form.extra_browsers) }}
|
||||
</div>
|
||||
|
||||
</div>
|
||||
<div id="actions">
|
||||
<div class="pure-control-group">
|
||||
{{ render_button(form.save_button) }}
|
||||
<a href="{{url_for('index')}}" class="pure-button button-small button-cancel">Back</a>
|
||||
<a href="{{url_for('clear_all_history')}}" class="pure-button button-small button-error">Clear Snapshot History</a>
|
||||
<a href="{{url_for('watchlist.index')}}" class="pure-button button-cancel">Back</a>
|
||||
<a href="{{url_for('ui.clear_all_history')}}" class="pure-button button-error">Clear Snapshot History</a>
|
||||
</div>
|
||||
</div>
|
||||
</form>
|
||||
@@ -104,6 +104,9 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
uuid = list(datastore.data['settings']['application']['tags'].keys()).pop()
|
||||
|
||||
default = datastore.data['settings']['application']['tags'].get(uuid)
|
||||
if not default:
|
||||
flash("Tag not found", "error")
|
||||
return redirect(url_for('watchlist.index'))
|
||||
|
||||
form = group_restock_settings_form(
|
||||
formdata=request.form if request.method == 'POST' else None,
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
{% extends 'base.html' %}
|
||||
{% block content %}
|
||||
{% from '_helpers.html' import render_field, render_checkbox_field, render_button %}
|
||||
{% from '_helpers.html' import render_field, render_checkbox_field, render_button, render_ternary_field %}
|
||||
{% from '_common_fields.html' import render_common_settings_form %}
|
||||
<script>
|
||||
const notification_base_url="{{url_for('ajax_callback_send_notification_test', mode="group-settings")}}";
|
||||
const notification_base_url="{{url_for('ui.ui_notification.ajax_callback_send_notification_test', mode="group-settings")}}";
|
||||
</script>
|
||||
|
||||
<script src="{{url_for('static_content', group='js', filename='tabs.js')}}" defer></script>
|
||||
@@ -13,6 +13,7 @@
|
||||
/*const email_notification_prefix=JSON.parse('{{ emailprefix|tojson }}');*/
|
||||
/*{% endif %}*/
|
||||
|
||||
{% set has_tag_filters_extra='' %}
|
||||
|
||||
</script>
|
||||
|
||||
@@ -46,59 +47,12 @@
|
||||
</div>
|
||||
|
||||
<div class="tab-pane-inner" id="filters-and-triggers">
|
||||
<div class="pure-control-group">
|
||||
{% set field = render_field(form.include_filters,
|
||||
rows=5,
|
||||
placeholder="#example
|
||||
xpath://body/div/span[contains(@class, 'example-class')]",
|
||||
class="m-d")
|
||||
%}
|
||||
{{ field }}
|
||||
{% if '/text()' in field %}
|
||||
<span class="pure-form-message-inline"><strong>Note!: //text() function does not work where the <element> contains <![CDATA[]]></strong></span><br>
|
||||
{% endif %}
|
||||
<span class="pure-form-message-inline">One CSS, xPath, JSON Path/JQ selector per line, <i>any</i> rules that matches will be used.<br>
|
||||
<div data-target="#advanced-help-selectors" class="toggle-show pure-button button-tag button-xsmall">Show advanced help and tips</div>
|
||||
<ul id="advanced-help-selectors">
|
||||
<li>CSS - Limit text to this CSS rule, only text matching this CSS rule is included.</li>
|
||||
<li>JSON - Limit text to this JSON rule, using either <a href="https://pypi.org/project/jsonpath-ng/" target="new">JSONPath</a> or <a href="https://stedolan.github.io/jq/" target="new">jq</a> (if installed).
|
||||
<ul>
|
||||
<li>JSONPath: Prefix with <code>json:</code>, use <code>json:$</code> to force re-formatting if required, <a href="https://jsonpath.com/" target="new">test your JSONPath here</a>.</li>
|
||||
{% if jq_support %}
|
||||
<li>jq: Prefix with <code>jq:</code> and <a href="https://jqplay.org/" target="new">test your jq here</a>. Using <a href="https://stedolan.github.io/jq/" target="new">jq</a> allows for complex filtering and processing of JSON data with built-in functions, regex, filtering, and more. See examples and documentation <a href="https://stedolan.github.io/jq/manual/" target="new">here</a>. Prefix <code>jqraw:</code> outputs the results as text instead of a JSON list.</li>
|
||||
{% else %}
|
||||
<li>jq support not installed</li>
|
||||
{% endif %}
|
||||
</ul>
|
||||
</li>
|
||||
<li>XPath - Limit text to this XPath rule, simply start with a forward-slash. To specify XPath to be used explicitly or the XPath rule starts with an XPath function: Prefix with <code>xpath:</code>
|
||||
<ul>
|
||||
<li>Example: <code>//*[contains(@class, 'sametext')]</code> or <code>xpath:count(//*[contains(@class, 'sametext')])</code>, <a
|
||||
href="http://xpather.com/" target="new">test your XPath here</a></li>
|
||||
<li>Example: Get all titles from an RSS feed <code>//title/text()</code></li>
|
||||
<li>To use XPath1.0: Prefix with <code>xpath1:</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
Please be sure that you thoroughly understand how to write CSS, JSONPath, XPath{% if jq_support %}, or jq selector{%endif%} rules before filing an issue on GitHub! <a
|
||||
href="https://github.com/dgtlmoon/changedetection.io/wiki/CSS-Selector-help">here for more CSS selector help</a>.<br>
|
||||
</span>
|
||||
</div>
|
||||
<fieldset class="pure-control-group">
|
||||
{{ render_field(form.subtractive_selectors, rows=5, placeholder="header
|
||||
footer
|
||||
nav
|
||||
.stockticker
|
||||
//*[contains(text(), 'Advertisement')]") }}
|
||||
<span class="pure-form-message-inline">
|
||||
<ul>
|
||||
<li> Remove HTML element(s) by CSS and XPath selectors before text conversion. </li>
|
||||
<li> Don't paste HTML here, use only CSS and XPath selectors </li>
|
||||
<li> Add multiple elements, CSS or XPath selectors per line to ignore multiple parts of the HTML. </li>
|
||||
</ul>
|
||||
</span>
|
||||
</fieldset>
|
||||
|
||||
<p>These settings are <strong><i>added</i></strong> to any existing watch configurations.</p>
|
||||
{% include "edit/include_subtract.html" %}
|
||||
<div class="text-filtering border-fieldset">
|
||||
<h3>Text filtering</h3>
|
||||
{% include "edit/text-options.html" %}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{# rendered sub Template #}
|
||||
@@ -110,9 +64,9 @@ nav
|
||||
<div class="tab-pane-inner" id="notifications">
|
||||
<fieldset>
|
||||
<div class="pure-control-group inline-radio">
|
||||
{{ render_checkbox_field(form.notification_muted) }}
|
||||
{{ render_ternary_field(form.notification_muted, BooleanField=True) }}
|
||||
</div>
|
||||
{% if is_html_webdriver %}
|
||||
{% if 1 %}
|
||||
<div class="pure-control-group inline-radio">
|
||||
{{ render_checkbox_field(form.notification_screenshot) }}
|
||||
<span class="pure-form-message-inline">
|
||||
@@ -124,7 +78,7 @@ nav
|
||||
{% if has_default_notification_urls %}
|
||||
<div class="inline-warning">
|
||||
<img class="inline-warning-icon" src="{{url_for('static_content', group='images', filename='notice.svg')}}" alt="Look out!" title="Lookout!" >
|
||||
There are <a href="{{ url_for('settings_page')}}#notifications">system-wide notification URLs enabled</a>, this form will override notification settings for this watch only ‐ an empty Notification URL list here will still send notifications.
|
||||
There are <a href="{{ url_for('settings.settings_page')}}#notifications">system-wide notification URLs enabled</a>, this form will override notification settings for this watch only ‐ an empty Notification URL list here will still send notifications.
|
||||
</div>
|
||||
{% endif %}
|
||||
<a href="#notifications" id="notification-setting-reset-to-default" class="pure-button button-xsmall" style="right: 20px; top: 20px; position: absolute; background-color: #5f42dd; border-radius: 4px; font-size: 70%; color: #fff">Use system defaults</a>
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
<legend>Add a new organisational tag</legend>
|
||||
<div id="watch-add-wrapper-zone">
|
||||
<div>
|
||||
{{ render_simple_field(form.name, placeholder="watch label / tag") }}
|
||||
{{ render_simple_field(form.name, placeholder="Watch group / tag") }}
|
||||
</div>
|
||||
<div>
|
||||
{{ render_simple_field(form.save_button, title="Save" ) }}
|
||||
@@ -47,7 +47,7 @@
|
||||
<a class="link-mute state-{{'on' if tag.notification_muted else 'off'}}" href="{{url_for('tags.mute', uuid=tag.uuid)}}"><img src="{{url_for('static_content', group='images', filename='bell-off.svg')}}" alt="Mute notifications" title="Mute notifications" class="icon icon-mute" ></a>
|
||||
</td>
|
||||
<td>{{ "{:,}".format(tag_count[uuid]) if uuid in tag_count else 0 }}</td>
|
||||
<td class="title-col inline"> <a href="{{url_for('index', tag=uuid) }}">{{ tag.title }}</a></td>
|
||||
<td class="title-col inline"> <a href="{{url_for('watchlist.index', tag=uuid) }}">{{ tag.title }}</a></td>
|
||||
<td>
|
||||
<a class="pure-button pure-button-primary" href="{{ url_for('tags.form_tag_edit', uuid=uuid) }}">Edit</a>
|
||||
<a class="pure-button pure-button-primary" href="{{ url_for('tags.delete', uuid=uuid) }}" title="Deletes and removes tag">Delete</a>
|
||||
|
||||
325
changedetectionio/blueprint/ui/__init__.py
Normal file
325
changedetectionio/blueprint/ui/__init__.py
Normal file
@@ -0,0 +1,325 @@
|
||||
import time
|
||||
from flask import Blueprint, request, redirect, url_for, flash, render_template, session
|
||||
from loguru import logger
|
||||
|
||||
from changedetectionio.store import ChangeDetectionStore
|
||||
from changedetectionio.blueprint.ui.edit import construct_blueprint as construct_edit_blueprint
|
||||
from changedetectionio.blueprint.ui.notification import construct_blueprint as construct_notification_blueprint
|
||||
from changedetectionio.blueprint.ui.views import construct_blueprint as construct_views_blueprint
|
||||
|
||||
def _handle_operations(op, uuids, datastore, worker_handler, update_q, queuedWatchMetaData, watch_check_update, extra_data=None, emit_flash=True):
|
||||
from flask import request, flash
|
||||
|
||||
if op == 'delete':
|
||||
for uuid in uuids:
|
||||
if datastore.data['watching'].get(uuid):
|
||||
datastore.delete(uuid)
|
||||
if emit_flash:
|
||||
flash(f"{len(uuids)} watches deleted")
|
||||
|
||||
elif op == 'pause':
|
||||
for uuid in uuids:
|
||||
if datastore.data['watching'].get(uuid):
|
||||
datastore.data['watching'][uuid]['paused'] = True
|
||||
if emit_flash:
|
||||
flash(f"{len(uuids)} watches paused")
|
||||
|
||||
elif op == 'unpause':
|
||||
for uuid in uuids:
|
||||
if datastore.data['watching'].get(uuid):
|
||||
datastore.data['watching'][uuid.strip()]['paused'] = False
|
||||
if emit_flash:
|
||||
flash(f"{len(uuids)} watches unpaused")
|
||||
|
||||
elif (op == 'mark-viewed'):
|
||||
for uuid in uuids:
|
||||
if datastore.data['watching'].get(uuid):
|
||||
datastore.set_last_viewed(uuid, int(time.time()))
|
||||
if emit_flash:
|
||||
flash(f"{len(uuids)} watches updated")
|
||||
|
||||
elif (op == 'mute'):
|
||||
for uuid in uuids:
|
||||
if datastore.data['watching'].get(uuid):
|
||||
datastore.data['watching'][uuid]['notification_muted'] = True
|
||||
if emit_flash:
|
||||
flash(f"{len(uuids)} watches muted")
|
||||
|
||||
elif (op == 'unmute'):
|
||||
for uuid in uuids:
|
||||
if datastore.data['watching'].get(uuid):
|
||||
datastore.data['watching'][uuid]['notification_muted'] = False
|
||||
if emit_flash:
|
||||
flash(f"{len(uuids)} watches un-muted")
|
||||
|
||||
elif (op == 'recheck'):
|
||||
for uuid in uuids:
|
||||
if datastore.data['watching'].get(uuid):
|
||||
# Recheck and require a full reprocessing
|
||||
worker_handler.queue_item_async_safe(update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid}))
|
||||
if emit_flash:
|
||||
flash(f"{len(uuids)} watches queued for rechecking")
|
||||
|
||||
elif (op == 'clear-errors'):
|
||||
for uuid in uuids:
|
||||
if datastore.data['watching'].get(uuid):
|
||||
datastore.data['watching'][uuid]["last_error"] = False
|
||||
if emit_flash:
|
||||
flash(f"{len(uuids)} watches errors cleared")
|
||||
|
||||
elif (op == 'clear-history'):
|
||||
for uuid in uuids:
|
||||
if datastore.data['watching'].get(uuid):
|
||||
datastore.clear_watch_history(uuid)
|
||||
if emit_flash:
|
||||
flash(f"{len(uuids)} watches cleared/reset.")
|
||||
|
||||
elif (op == 'notification-default'):
|
||||
from changedetectionio.notification import (
|
||||
USE_SYSTEM_DEFAULT_NOTIFICATION_FORMAT_FOR_WATCH
|
||||
)
|
||||
for uuid in uuids:
|
||||
if datastore.data['watching'].get(uuid):
|
||||
datastore.data['watching'][uuid]['notification_title'] = None
|
||||
datastore.data['watching'][uuid]['notification_body'] = None
|
||||
datastore.data['watching'][uuid]['notification_urls'] = []
|
||||
datastore.data['watching'][uuid]['notification_format'] = USE_SYSTEM_DEFAULT_NOTIFICATION_FORMAT_FOR_WATCH
|
||||
if emit_flash:
|
||||
flash(f"{len(uuids)} watches set to use default notification settings")
|
||||
|
||||
elif (op == 'assign-tag'):
|
||||
op_extradata = extra_data
|
||||
if op_extradata:
|
||||
tag_uuid = datastore.add_tag(title=op_extradata)
|
||||
if op_extradata and tag_uuid:
|
||||
for uuid in uuids:
|
||||
if datastore.data['watching'].get(uuid):
|
||||
# Bug in old versions caused by bad edit page/tag handler
|
||||
if isinstance(datastore.data['watching'][uuid]['tags'], str):
|
||||
datastore.data['watching'][uuid]['tags'] = []
|
||||
|
||||
datastore.data['watching'][uuid]['tags'].append(tag_uuid)
|
||||
if emit_flash:
|
||||
flash(f"{len(uuids)} watches were tagged")
|
||||
|
||||
if uuids:
|
||||
for uuid in uuids:
|
||||
watch_check_update.send(watch_uuid=uuid)
|
||||
|
||||
def construct_blueprint(datastore: ChangeDetectionStore, update_q, worker_handler, queuedWatchMetaData, watch_check_update):
|
||||
ui_blueprint = Blueprint('ui', __name__, template_folder="templates")
|
||||
|
||||
# Register the edit blueprint
|
||||
edit_blueprint = construct_edit_blueprint(datastore, update_q, queuedWatchMetaData)
|
||||
ui_blueprint.register_blueprint(edit_blueprint)
|
||||
|
||||
# Register the notification blueprint
|
||||
notification_blueprint = construct_notification_blueprint(datastore)
|
||||
ui_blueprint.register_blueprint(notification_blueprint)
|
||||
|
||||
# Register the views blueprint
|
||||
views_blueprint = construct_views_blueprint(datastore, update_q, queuedWatchMetaData, watch_check_update)
|
||||
ui_blueprint.register_blueprint(views_blueprint)
|
||||
|
||||
# Import the login decorator
|
||||
from changedetectionio.auth_decorator import login_optionally_required
|
||||
|
||||
@ui_blueprint.route("/clear_history/<string:uuid>", methods=['GET'])
|
||||
@login_optionally_required
|
||||
def clear_watch_history(uuid):
|
||||
try:
|
||||
datastore.clear_watch_history(uuid)
|
||||
except KeyError:
|
||||
flash('Watch not found', 'error')
|
||||
else:
|
||||
flash("Cleared snapshot history for watch {}".format(uuid))
|
||||
return redirect(url_for('watchlist.index'))
|
||||
|
||||
@ui_blueprint.route("/clear_history", methods=['GET', 'POST'])
|
||||
@login_optionally_required
|
||||
def clear_all_history():
|
||||
if request.method == 'POST':
|
||||
confirmtext = request.form.get('confirmtext')
|
||||
|
||||
if confirmtext == 'clear':
|
||||
for uuid in datastore.data['watching'].keys():
|
||||
datastore.clear_watch_history(uuid)
|
||||
flash("Cleared snapshot history for all watches")
|
||||
else:
|
||||
flash('Incorrect confirmation text.', 'error')
|
||||
|
||||
return redirect(url_for('watchlist.index'))
|
||||
|
||||
output = render_template("clear_all_history.html")
|
||||
return output
|
||||
|
||||
# Clear all statuses, so we do not see the 'unviewed' class
|
||||
@ui_blueprint.route("/form/mark-all-viewed", methods=['GET'])
|
||||
@login_optionally_required
|
||||
def mark_all_viewed():
|
||||
# Save the current newest history as the most recently viewed
|
||||
with_errors = request.args.get('with_errors') == "1"
|
||||
tag_limit = request.args.get('tag')
|
||||
logger.debug(f"Limiting to tag {tag_limit}")
|
||||
now = int(time.time())
|
||||
for watch_uuid, watch in datastore.data['watching'].items():
|
||||
if with_errors and not watch.get('last_error'):
|
||||
continue
|
||||
|
||||
if tag_limit and ( not watch.get('tags') or tag_limit not in watch['tags'] ):
|
||||
logger.debug(f"Skipping watch {watch_uuid}")
|
||||
continue
|
||||
|
||||
datastore.set_last_viewed(watch_uuid, now)
|
||||
|
||||
return redirect(url_for('watchlist.index', tag=tag_limit))
|
||||
|
||||
@ui_blueprint.route("/delete", methods=['GET'])
|
||||
@login_optionally_required
|
||||
def form_delete():
|
||||
uuid = request.args.get('uuid')
|
||||
|
||||
if uuid != 'all' and not uuid in datastore.data['watching'].keys():
|
||||
flash('The watch by UUID {} does not exist.'.format(uuid), 'error')
|
||||
return redirect(url_for('watchlist.index'))
|
||||
|
||||
# More for testing, possible to return the first/only
|
||||
if uuid == 'first':
|
||||
uuid = list(datastore.data['watching'].keys()).pop()
|
||||
datastore.delete(uuid)
|
||||
flash('Deleted.')
|
||||
|
||||
return redirect(url_for('watchlist.index'))
|
||||
|
||||
@ui_blueprint.route("/clone", methods=['GET'])
|
||||
@login_optionally_required
|
||||
def form_clone():
|
||||
uuid = request.args.get('uuid')
|
||||
# More for testing, possible to return the first/only
|
||||
if uuid == 'first':
|
||||
uuid = list(datastore.data['watching'].keys()).pop()
|
||||
|
||||
new_uuid = datastore.clone(uuid)
|
||||
|
||||
if not datastore.data['watching'].get(uuid).get('paused'):
|
||||
worker_handler.queue_item_async_safe(update_q, queuedWatchMetaData.PrioritizedItem(priority=5, item={'uuid': new_uuid}))
|
||||
|
||||
flash('Cloned, you are editing the new watch.')
|
||||
|
||||
return redirect(url_for("ui.ui_edit.edit_page", uuid=new_uuid))
|
||||
|
||||
@ui_blueprint.route("/checknow", methods=['GET'])
|
||||
@login_optionally_required
|
||||
def form_watch_checknow():
|
||||
# Forced recheck will skip the 'skip if content is the same' rule (, 'reprocess_existing_data': True})))
|
||||
tag = request.args.get('tag')
|
||||
uuid = request.args.get('uuid')
|
||||
with_errors = request.args.get('with_errors') == "1"
|
||||
|
||||
i = 0
|
||||
|
||||
running_uuids = worker_handler.get_running_uuids()
|
||||
|
||||
if uuid:
|
||||
if uuid not in running_uuids:
|
||||
worker_handler.queue_item_async_safe(update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid}))
|
||||
i += 1
|
||||
|
||||
else:
|
||||
# Recheck all, including muted
|
||||
# Get most overdue first
|
||||
for k in sorted(datastore.data['watching'].items(), key=lambda item: item[1].get('last_checked', 0)):
|
||||
watch_uuid = k[0]
|
||||
watch = k[1]
|
||||
if not watch['paused']:
|
||||
if watch_uuid not in running_uuids:
|
||||
if with_errors and not watch.get('last_error'):
|
||||
continue
|
||||
|
||||
if tag != None and tag not in watch['tags']:
|
||||
continue
|
||||
|
||||
worker_handler.queue_item_async_safe(update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': watch_uuid}))
|
||||
i += 1
|
||||
|
||||
if i == 1:
|
||||
flash("Queued 1 watch for rechecking.")
|
||||
if i > 1:
|
||||
flash(f"Queued {i} watches for rechecking.")
|
||||
if i == 0:
|
||||
flash("No watches available to recheck.")
|
||||
|
||||
return redirect(url_for('watchlist.index'))
|
||||
|
||||
@ui_blueprint.route("/form/checkbox-operations", methods=['POST'])
|
||||
@login_optionally_required
|
||||
def form_watch_list_checkbox_operations():
|
||||
op = request.form['op']
|
||||
uuids = [u.strip() for u in request.form.getlist('uuids') if u]
|
||||
extra_data = request.form.get('op_extradata', '').strip()
|
||||
_handle_operations(
|
||||
datastore=datastore,
|
||||
extra_data=extra_data,
|
||||
queuedWatchMetaData=queuedWatchMetaData,
|
||||
uuids=uuids,
|
||||
worker_handler=worker_handler,
|
||||
update_q=update_q,
|
||||
watch_check_update=watch_check_update,
|
||||
op=op,
|
||||
)
|
||||
|
||||
return redirect(url_for('watchlist.index'))
|
||||
|
||||
|
||||
@ui_blueprint.route("/share-url/<string:uuid>", methods=['GET'])
|
||||
@login_optionally_required
|
||||
def form_share_put_watch(uuid):
|
||||
"""Given a watch UUID, upload the info and return a share-link
|
||||
the share-link can be imported/added"""
|
||||
import requests
|
||||
import json
|
||||
from copy import deepcopy
|
||||
|
||||
# more for testing
|
||||
if uuid == 'first':
|
||||
uuid = list(datastore.data['watching'].keys()).pop()
|
||||
|
||||
# copy it to memory as trim off what we dont need (history)
|
||||
watch = deepcopy(datastore.data['watching'].get(uuid))
|
||||
# For older versions that are not a @property
|
||||
if (watch.get('history')):
|
||||
del (watch['history'])
|
||||
|
||||
# for safety/privacy
|
||||
for k in list(watch.keys()):
|
||||
if k.startswith('notification_'):
|
||||
del watch[k]
|
||||
|
||||
for r in['uuid', 'last_checked', 'last_changed']:
|
||||
if watch.get(r):
|
||||
del (watch[r])
|
||||
|
||||
# Add the global stuff which may have an impact
|
||||
watch['ignore_text'] += datastore.data['settings']['application']['global_ignore_text']
|
||||
watch['subtractive_selectors'] += datastore.data['settings']['application']['global_subtractive_selectors']
|
||||
|
||||
watch_json = json.dumps(watch)
|
||||
|
||||
try:
|
||||
r = requests.request(method="POST",
|
||||
data={'watch': watch_json},
|
||||
url="https://changedetection.io/share/share",
|
||||
headers={'App-Guid': datastore.data['app_guid']})
|
||||
res = r.json()
|
||||
|
||||
# Add to the flask session
|
||||
session['share-link'] = f"https://changedetection.io/share/{res['share_key']}"
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error sharing -{str(e)}")
|
||||
flash(f"Could not share, something went wrong while communicating with the share server - {str(e)}", 'error')
|
||||
|
||||
return redirect(url_for('watchlist.index'))
|
||||
|
||||
return ui_blueprint
|
||||
340
changedetectionio/blueprint/ui/edit.py
Normal file
340
changedetectionio/blueprint/ui/edit.py
Normal file
@@ -0,0 +1,340 @@
|
||||
import time
|
||||
from copy import deepcopy
|
||||
import os
|
||||
import importlib.resources
|
||||
from flask import Blueprint, request, redirect, url_for, flash, render_template, make_response, send_from_directory, abort
|
||||
from loguru import logger
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
|
||||
from changedetectionio.store import ChangeDetectionStore
|
||||
from changedetectionio.auth_decorator import login_optionally_required
|
||||
from changedetectionio.time_handler import is_within_schedule
|
||||
from changedetectionio import worker_handler
|
||||
|
||||
def construct_blueprint(datastore: ChangeDetectionStore, update_q, queuedWatchMetaData):
|
||||
edit_blueprint = Blueprint('ui_edit', __name__, template_folder="../ui/templates")
|
||||
|
||||
def _watch_has_tag_options_set(watch):
|
||||
"""This should be fixed better so that Tag is some proper Model, a tag is just a Watch also"""
|
||||
for tag_uuid, tag in datastore.data['settings']['application'].get('tags', {}).items():
|
||||
if tag_uuid in watch.get('tags', []) and (tag.get('include_filters') or tag.get('subtractive_selectors')):
|
||||
return True
|
||||
|
||||
@edit_blueprint.route("/edit/<string:uuid>", methods=['GET', 'POST'])
|
||||
@login_optionally_required
|
||||
# https://stackoverflow.com/questions/42984453/wtforms-populate-form-with-data-if-data-exists
|
||||
# https://wtforms.readthedocs.io/en/3.0.x/forms/#wtforms.form.Form.populate_obj ?
|
||||
def edit_page(uuid):
|
||||
from changedetectionio import forms
|
||||
from changedetectionio.blueprint.browser_steps.browser_steps import browser_step_ui_config
|
||||
from changedetectionio import processors
|
||||
import importlib
|
||||
|
||||
# More for testing, possible to return the first/only
|
||||
if not datastore.data['watching'].keys():
|
||||
flash("No watches to edit", "error")
|
||||
return redirect(url_for('watchlist.index'))
|
||||
|
||||
if uuid == 'first':
|
||||
uuid = list(datastore.data['watching'].keys()).pop()
|
||||
|
||||
if not uuid in datastore.data['watching']:
|
||||
flash("No watch with the UUID %s found." % (uuid), "error")
|
||||
return redirect(url_for('watchlist.index'))
|
||||
|
||||
switch_processor = request.args.get('switch_processor')
|
||||
if switch_processor:
|
||||
for p in processors.available_processors():
|
||||
if p[0] == switch_processor:
|
||||
datastore.data['watching'][uuid]['processor'] = switch_processor
|
||||
flash(f"Switched to mode - {p[1]}.")
|
||||
datastore.clear_watch_history(uuid)
|
||||
redirect(url_for('ui_edit.edit_page', uuid=uuid))
|
||||
|
||||
# be sure we update with a copy instead of accidently editing the live object by reference
|
||||
default = deepcopy(datastore.data['watching'][uuid])
|
||||
|
||||
# Defaults for proxy choice
|
||||
if datastore.proxy_list is not None: # When enabled
|
||||
# @todo
|
||||
# Radio needs '' not None, or incase that the chosen one no longer exists
|
||||
if default['proxy'] is None or not any(default['proxy'] in tup for tup in datastore.proxy_list):
|
||||
default['proxy'] = ''
|
||||
# proxy_override set to the json/text list of the items
|
||||
|
||||
# Does it use some custom form? does one exist?
|
||||
processor_name = datastore.data['watching'][uuid].get('processor', '')
|
||||
processor_classes = next((tpl for tpl in processors.find_processors() if tpl[1] == processor_name), None)
|
||||
if not processor_classes:
|
||||
flash(f"Cannot load the edit form for processor/plugin '{processor_classes[1]}', plugin missing?", 'error')
|
||||
return redirect(url_for('watchlist.index'))
|
||||
|
||||
parent_module = processors.get_parent_module(processor_classes[0])
|
||||
|
||||
try:
|
||||
# Get the parent of the "processor.py" go up one, get the form (kinda spaghetti but its reusing existing code)
|
||||
forms_module = importlib.import_module(f"{parent_module.__name__}.forms")
|
||||
# Access the 'processor_settings_form' class from the 'forms' module
|
||||
form_class = getattr(forms_module, 'processor_settings_form')
|
||||
except ModuleNotFoundError as e:
|
||||
# .forms didnt exist
|
||||
form_class = forms.processor_text_json_diff_form
|
||||
except AttributeError as e:
|
||||
# .forms exists but no useful form
|
||||
form_class = forms.processor_text_json_diff_form
|
||||
|
||||
form = form_class(formdata=request.form if request.method == 'POST' else None,
|
||||
data=default,
|
||||
extra_notification_tokens=default.extra_notification_token_values(),
|
||||
default_system_settings=datastore.data['settings']
|
||||
)
|
||||
|
||||
# For the form widget tag UUID back to "string name" for the field
|
||||
form.tags.datastore = datastore
|
||||
|
||||
# Used by some forms that need to dig deeper
|
||||
form.datastore = datastore
|
||||
form.watch = default
|
||||
|
||||
for p in datastore.extra_browsers:
|
||||
form.fetch_backend.choices.append(p)
|
||||
|
||||
form.fetch_backend.choices.append(("system", 'System settings default'))
|
||||
|
||||
# form.browser_steps[0] can be assumed that we 'goto url' first
|
||||
|
||||
if datastore.proxy_list is None:
|
||||
# @todo - Couldn't get setattr() etc dynamic addition working, so remove it instead
|
||||
del form.proxy
|
||||
else:
|
||||
form.proxy.choices = [('', 'Default')]
|
||||
for p in datastore.proxy_list:
|
||||
form.proxy.choices.append(tuple((p, datastore.proxy_list[p]['label'])))
|
||||
|
||||
|
||||
if request.method == 'POST' and form.validate():
|
||||
|
||||
# If they changed processor, it makes sense to reset it.
|
||||
if datastore.data['watching'][uuid].get('processor') != form.data.get('processor'):
|
||||
datastore.data['watching'][uuid].clear_watch()
|
||||
flash("Reset watch history due to change of processor")
|
||||
|
||||
extra_update_obj = {
|
||||
'consecutive_filter_failures': 0,
|
||||
'last_error' : False
|
||||
}
|
||||
|
||||
if request.args.get('unpause_on_save'):
|
||||
extra_update_obj['paused'] = False
|
||||
|
||||
extra_update_obj['time_between_check'] = form.time_between_check.data
|
||||
|
||||
# Ignore text
|
||||
form_ignore_text = form.ignore_text.data
|
||||
datastore.data['watching'][uuid]['ignore_text'] = form_ignore_text
|
||||
|
||||
# Be sure proxy value is None
|
||||
if datastore.proxy_list is not None and form.data['proxy'] == '':
|
||||
extra_update_obj['proxy'] = None
|
||||
|
||||
# Unsetting all filter_text methods should make it go back to default
|
||||
# This particularly affects tests running
|
||||
if 'filter_text_added' in form.data and not form.data.get('filter_text_added') \
|
||||
and 'filter_text_replaced' in form.data and not form.data.get('filter_text_replaced') \
|
||||
and 'filter_text_removed' in form.data and not form.data.get('filter_text_removed'):
|
||||
extra_update_obj['filter_text_added'] = True
|
||||
extra_update_obj['filter_text_replaced'] = True
|
||||
extra_update_obj['filter_text_removed'] = True
|
||||
|
||||
# Because wtforms doesn't support accessing other data in process_ , but we convert the CSV list of tags back to a list of UUIDs
|
||||
tag_uuids = []
|
||||
if form.data.get('tags'):
|
||||
# Sometimes in testing this can be list, dont know why
|
||||
if type(form.data.get('tags')) == list:
|
||||
extra_update_obj['tags'] = form.data.get('tags')
|
||||
else:
|
||||
for t in form.data.get('tags').split(','):
|
||||
tag_uuids.append(datastore.add_tag(title=t))
|
||||
extra_update_obj['tags'] = tag_uuids
|
||||
|
||||
datastore.data['watching'][uuid].update(form.data)
|
||||
datastore.data['watching'][uuid].update(extra_update_obj)
|
||||
|
||||
if not datastore.data['watching'][uuid].get('tags'):
|
||||
# Force it to be a list, because form.data['tags'] will be string if nothing found
|
||||
# And del(form.data['tags'] ) wont work either for some reason
|
||||
datastore.data['watching'][uuid]['tags'] = []
|
||||
|
||||
# Recast it if need be to right data Watch handler
|
||||
watch_class = processors.get_custom_watch_obj_for_processor(form.data.get('processor'))
|
||||
datastore.data['watching'][uuid] = watch_class(datastore_path=datastore.datastore_path, default=datastore.data['watching'][uuid])
|
||||
flash("Updated watch - unpaused!" if request.args.get('unpause_on_save') else "Updated watch.")
|
||||
|
||||
# Re #286 - We wait for syncing new data to disk in another thread every 60 seconds
|
||||
# But in the case something is added we should save straight away
|
||||
datastore.needs_write_urgent = True
|
||||
|
||||
# Do not queue on edit if its not within the time range
|
||||
|
||||
# @todo maybe it should never queue anyway on edit...
|
||||
is_in_schedule = True
|
||||
watch = datastore.data['watching'].get(uuid)
|
||||
|
||||
if watch.get('time_between_check_use_default'):
|
||||
time_schedule_limit = datastore.data['settings']['requests'].get('time_schedule_limit', {})
|
||||
else:
|
||||
time_schedule_limit = watch.get('time_schedule_limit')
|
||||
|
||||
tz_name = time_schedule_limit.get('timezone')
|
||||
if not tz_name:
|
||||
tz_name = datastore.data['settings']['application'].get('scheduler_timezone_default', os.getenv('TZ', 'UTC').strip())
|
||||
|
||||
if time_schedule_limit and time_schedule_limit.get('enabled'):
|
||||
try:
|
||||
is_in_schedule = is_within_schedule(time_schedule_limit=time_schedule_limit,
|
||||
default_tz=tz_name
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"{uuid} - Recheck scheduler, error handling timezone, check skipped - TZ name '{tz_name}' - {str(e)}")
|
||||
return False
|
||||
|
||||
#############################
|
||||
if not datastore.data['watching'][uuid].get('paused') and is_in_schedule:
|
||||
# Queue the watch for immediate recheck, with a higher priority
|
||||
worker_handler.queue_item_async_safe(update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid}))
|
||||
|
||||
# Diff page [edit] link should go back to diff page
|
||||
if request.args.get("next") and request.args.get("next") == 'diff':
|
||||
return redirect(url_for('ui.ui_views.diff_history_page', uuid=uuid))
|
||||
|
||||
return redirect(url_for('watchlist.index', tag=request.args.get("tag",'')))
|
||||
|
||||
else:
|
||||
if request.method == 'POST' and not form.validate():
|
||||
flash("An error occurred, please see below.", "error")
|
||||
|
||||
# JQ is difficult to install on windows and must be manually added (outside requirements.txt)
|
||||
jq_support = True
|
||||
try:
|
||||
import jq
|
||||
except ModuleNotFoundError:
|
||||
jq_support = False
|
||||
|
||||
watch = datastore.data['watching'].get(uuid)
|
||||
|
||||
# if system or watch is configured to need a chrome type browser
|
||||
system_uses_webdriver = datastore.data['settings']['application']['fetch_backend'] == 'html_webdriver'
|
||||
watch_needs_selenium_or_playwright = False
|
||||
if (watch.get('fetch_backend') == 'system' and system_uses_webdriver) or watch.get('fetch_backend') == 'html_webdriver' or watch.get('fetch_backend', '').startswith('extra_browser_'):
|
||||
watch_needs_selenium_or_playwright = True
|
||||
|
||||
|
||||
from zoneinfo import available_timezones
|
||||
|
||||
# Only works reliably with Playwright
|
||||
|
||||
# Import the global plugin system
|
||||
from changedetectionio.pluggy_interface import collect_ui_edit_stats_extras
|
||||
|
||||
template_args = {
|
||||
'available_processors': processors.available_processors(),
|
||||
'available_timezones': sorted(available_timezones()),
|
||||
'browser_steps_config': browser_step_ui_config,
|
||||
'emailprefix': os.getenv('NOTIFICATION_MAIL_BUTTON_PREFIX', False),
|
||||
'extra_classes': 'checking-now' if worker_handler.is_watch_running(uuid) else '',
|
||||
'extra_notification_token_placeholder_info': datastore.get_unique_notification_token_placeholders_available(),
|
||||
'extra_processor_config': form.extra_tab_content(),
|
||||
'extra_title': f" - Edit - {watch.label}",
|
||||
'form': form,
|
||||
'has_default_notification_urls': True if len(datastore.data['settings']['application']['notification_urls']) else False,
|
||||
'has_extra_headers_file': len(datastore.get_all_headers_in_textfile_for_watch(uuid=uuid)) > 0,
|
||||
'has_special_tag_options': _watch_has_tag_options_set(watch=watch),
|
||||
'jq_support': jq_support,
|
||||
'playwright_enabled': os.getenv('PLAYWRIGHT_DRIVER_URL', False),
|
||||
'settings_application': datastore.data['settings']['application'],
|
||||
'system_has_playwright_configured': os.getenv('PLAYWRIGHT_DRIVER_URL'),
|
||||
'system_has_webdriver_configured': os.getenv('WEBDRIVER_URL'),
|
||||
'ui_edit_stats_extras': collect_ui_edit_stats_extras(watch),
|
||||
'visual_selector_data_ready': datastore.visualselector_data_is_ready(watch_uuid=uuid),
|
||||
'timezone_default_config': datastore.data['settings']['application'].get('scheduler_timezone_default'),
|
||||
'using_global_webdriver_wait': not default['webdriver_delay'],
|
||||
'uuid': uuid,
|
||||
'watch': watch,
|
||||
'watch_needs_selenium_or_playwright': watch_needs_selenium_or_playwright,
|
||||
}
|
||||
|
||||
included_content = None
|
||||
if form.extra_form_content():
|
||||
# So that the extra panels can access _helpers.html etc, we set the environment to load from templates/
|
||||
# And then render the code from the module
|
||||
templates_dir = str(importlib.resources.files("changedetectionio").joinpath('templates'))
|
||||
env = Environment(loader=FileSystemLoader(templates_dir))
|
||||
template = env.from_string(form.extra_form_content())
|
||||
included_content = template.render(**template_args)
|
||||
|
||||
output = render_template("edit.html",
|
||||
extra_tab_content=form.extra_tab_content() if form.extra_tab_content() else None,
|
||||
extra_form_content=included_content,
|
||||
**template_args
|
||||
)
|
||||
|
||||
return output
|
||||
|
||||
@edit_blueprint.route("/edit/<string:uuid>/get-html", methods=['GET'])
|
||||
@login_optionally_required
|
||||
def watch_get_latest_html(uuid):
|
||||
from io import BytesIO
|
||||
from flask import send_file
|
||||
import brotli
|
||||
|
||||
watch = datastore.data['watching'].get(uuid)
|
||||
if watch and watch.history.keys() and os.path.isdir(watch.watch_data_dir):
|
||||
latest_filename = list(watch.history.keys())[-1]
|
||||
html_fname = os.path.join(watch.watch_data_dir, f"{latest_filename}.html.br")
|
||||
with open(html_fname, 'rb') as f:
|
||||
if html_fname.endswith('.br'):
|
||||
# Read and decompress the Brotli file
|
||||
decompressed_data = brotli.decompress(f.read())
|
||||
else:
|
||||
decompressed_data = f.read()
|
||||
|
||||
buffer = BytesIO(decompressed_data)
|
||||
|
||||
return send_file(buffer, as_attachment=True, download_name=f"{latest_filename}.html", mimetype='text/html')
|
||||
|
||||
# Return a 500 error
|
||||
abort(500)
|
||||
|
||||
# Ajax callback
|
||||
@edit_blueprint.route("/edit/<string:uuid>/preview-rendered", methods=['POST'])
|
||||
@login_optionally_required
|
||||
def watch_get_preview_rendered(uuid):
|
||||
'''For when viewing the "preview" of the rendered text from inside of Edit'''
|
||||
from flask import jsonify
|
||||
from changedetectionio.processors.text_json_diff import prepare_filter_prevew
|
||||
result = prepare_filter_prevew(watch_uuid=uuid, form_data=request.form, datastore=datastore)
|
||||
return jsonify(result)
|
||||
|
||||
@edit_blueprint.route("/highlight_submit_ignore_url", methods=['POST'])
|
||||
@login_optionally_required
|
||||
def highlight_submit_ignore_url():
|
||||
import re
|
||||
mode = request.form.get('mode')
|
||||
selection = request.form.get('selection')
|
||||
|
||||
uuid = request.args.get('uuid','')
|
||||
if datastore.data["watching"].get(uuid):
|
||||
if mode == 'exact':
|
||||
for l in selection.splitlines():
|
||||
datastore.data["watching"][uuid]['ignore_text'].append(l.strip())
|
||||
elif mode == 'digit-regex':
|
||||
for l in selection.splitlines():
|
||||
# Replace any series of numbers with a regex
|
||||
s = re.escape(l.strip())
|
||||
s = re.sub(r'[0-9]+', r'\\d+', s)
|
||||
datastore.data["watching"][uuid]['ignore_text'].append('/' + s + '/')
|
||||
|
||||
return f"<a href={url_for('ui.ui_views.preview_page', uuid=uuid)}>Click to preview</a>"
|
||||
|
||||
return edit_blueprint
|
||||
149
changedetectionio/blueprint/ui/notification.py
Normal file
149
changedetectionio/blueprint/ui/notification.py
Normal file
@@ -0,0 +1,149 @@
|
||||
from flask import Blueprint, request, make_response
|
||||
import random
|
||||
from loguru import logger
|
||||
|
||||
from changedetectionio.notification_service import NotificationContextData, set_basic_notification_vars
|
||||
from changedetectionio.store import ChangeDetectionStore
|
||||
from changedetectionio.auth_decorator import login_optionally_required
|
||||
|
||||
def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
notification_blueprint = Blueprint('ui_notification', __name__, template_folder="../ui/templates")
|
||||
|
||||
# AJAX endpoint for sending a test
|
||||
@notification_blueprint.route("/notification/send-test/<string:watch_uuid>", methods=['POST'])
|
||||
@notification_blueprint.route("/notification/send-test", methods=['POST'])
|
||||
@notification_blueprint.route("/notification/send-test/", methods=['POST'])
|
||||
@login_optionally_required
|
||||
def ajax_callback_send_notification_test(watch_uuid=None):
|
||||
|
||||
# Watch_uuid could be unset in the case it`s used in tag editor, global settings
|
||||
import apprise
|
||||
from changedetectionio.notification.handler import process_notification
|
||||
from changedetectionio.notification.apprise_plugin.assets import apprise_asset
|
||||
from changedetectionio.jinja2_custom import render as jinja_render
|
||||
|
||||
from changedetectionio.notification.apprise_plugin.custom_handlers import apprise_http_custom_handler
|
||||
|
||||
apobj = apprise.Apprise(asset=apprise_asset)
|
||||
|
||||
is_global_settings_form = request.args.get('mode', '') == 'global-settings'
|
||||
is_group_settings_form = request.args.get('mode', '') == 'group-settings'
|
||||
|
||||
# Use an existing random one on the global/main settings form
|
||||
if not watch_uuid and (is_global_settings_form or is_group_settings_form) \
|
||||
and datastore.data.get('watching'):
|
||||
logger.debug(f"Send test notification - Choosing random Watch {watch_uuid}")
|
||||
watch_uuid = random.choice(list(datastore.data['watching'].keys()))
|
||||
|
||||
if not watch_uuid:
|
||||
return make_response("Error: You must have atleast one watch configured for 'test notification' to work", 400)
|
||||
|
||||
watch = datastore.data['watching'].get(watch_uuid)
|
||||
notification_urls = request.form.get('notification_urls','').strip().splitlines()
|
||||
|
||||
if not notification_urls:
|
||||
logger.debug("Test notification - Trying by group/tag in the edit form if available")
|
||||
# On an edit page, we should also fire off to the tags if they have notifications
|
||||
if request.form.get('tags') and request.form['tags'].strip():
|
||||
for k in request.form['tags'].split(','):
|
||||
tag = datastore.tag_exists_by_name(k.strip())
|
||||
notification_urls = tag.get('notifications_urls') if tag and tag.get('notifications_urls') else None
|
||||
|
||||
if not notification_urls and not is_global_settings_form and not is_group_settings_form:
|
||||
# In the global settings, use only what is typed currently in the text box
|
||||
logger.debug("Test notification - Trying by global system settings notifications")
|
||||
if datastore.data['settings']['application'].get('notification_urls'):
|
||||
notification_urls = datastore.data['settings']['application']['notification_urls']
|
||||
|
||||
if not notification_urls:
|
||||
return 'Error: No Notification URLs set/found'
|
||||
|
||||
for n_url in notification_urls:
|
||||
# We are ONLY validating the apprise:// part here, convert all tags to something so as not to break apprise URLs
|
||||
generic_notification_context_data = NotificationContextData()
|
||||
generic_notification_context_data.set_random_for_validation()
|
||||
n_url = jinja_render(template_str=n_url, **generic_notification_context_data).strip()
|
||||
if len(n_url.strip()):
|
||||
if not apobj.add(n_url):
|
||||
return f'Error: {n_url} is not a valid AppRise URL.'
|
||||
|
||||
try:
|
||||
# use the same as when it is triggered, but then override it with the form test values
|
||||
n_object = NotificationContextData({
|
||||
'watch_url': request.form.get('window_url', "https://changedetection.io"),
|
||||
'notification_urls': notification_urls
|
||||
})
|
||||
|
||||
# Only use if present, if not set in n_object it should use the default system value
|
||||
if 'notification_format' in request.form and request.form['notification_format'].strip():
|
||||
n_object['notification_format'] = request.form.get('notification_format', '').strip()
|
||||
else:
|
||||
n_object['notification_format'] = datastore.data['settings']['application'].get('notification_format')
|
||||
|
||||
if 'notification_title' in request.form and request.form['notification_title'].strip():
|
||||
n_object['notification_title'] = request.form.get('notification_title', '').strip()
|
||||
elif datastore.data['settings']['application'].get('notification_title'):
|
||||
n_object['notification_title'] = datastore.data['settings']['application'].get('notification_title')
|
||||
else:
|
||||
n_object['notification_title'] = "Test title"
|
||||
|
||||
if 'notification_body' in request.form and request.form['notification_body'].strip():
|
||||
n_object['notification_body'] = request.form.get('notification_body', '').strip()
|
||||
elif datastore.data['settings']['application'].get('notification_body'):
|
||||
n_object['notification_body'] = datastore.data['settings']['application'].get('notification_body')
|
||||
else:
|
||||
n_object['notification_body'] = "Test body"
|
||||
|
||||
n_object['as_async'] = False
|
||||
|
||||
# Same like in notification service, should be refactored
|
||||
dates = []
|
||||
trigger_text = ''
|
||||
snapshot_contents = ''
|
||||
if watch:
|
||||
watch_history = watch.history
|
||||
dates = list(watch_history.keys())
|
||||
trigger_text = watch.get('trigger_text', [])
|
||||
# Add text that was triggered
|
||||
if len(dates):
|
||||
snapshot_contents = watch.get_history_snapshot(dates[-1])
|
||||
else:
|
||||
snapshot_contents = "No snapshot/history available, the watch should fetch atleast once."
|
||||
|
||||
if len(trigger_text):
|
||||
from . import html_tools
|
||||
triggered_text = html_tools.get_triggered_text(content=snapshot_contents, trigger_text=trigger_text)
|
||||
if triggered_text:
|
||||
triggered_text = '\n'.join(triggered_text)
|
||||
|
||||
# Could be called as a 'test notification' with only 1 snapshot available
|
||||
prev_snapshot = "Example text: example test\nExample text: change detection is cool\nExample text: some more examples\n"
|
||||
current_snapshot = "Example text: example test\nExample text: change detection is fantastic\nExample text: even more examples\nExample text: a lot more examples"
|
||||
|
||||
|
||||
|
||||
if len(dates) > 1:
|
||||
prev_snapshot = watch.get_history_snapshot(dates[-2])
|
||||
current_snapshot = watch.get_history_snapshot(dates[-1])
|
||||
|
||||
n_object.update(set_basic_notification_vars(snapshot_contents=snapshot_contents,
|
||||
current_snapshot=current_snapshot,
|
||||
prev_snapshot=prev_snapshot,
|
||||
watch=watch,
|
||||
triggered_text=trigger_text))
|
||||
|
||||
|
||||
sent_obj = process_notification(n_object, datastore)
|
||||
|
||||
except Exception as e:
|
||||
e_str = str(e)
|
||||
# Remove this text which is not important and floods the container
|
||||
e_str = e_str.replace(
|
||||
"DEBUG - <class 'apprise.decorators.base.CustomNotifyPlugin.instantiate_plugin.<locals>.CustomNotifyPluginWrapper'>",
|
||||
'')
|
||||
|
||||
return make_response(e_str, 400)
|
||||
|
||||
return 'OK - Sent test notifications'
|
||||
|
||||
return notification_blueprint
|
||||
@@ -3,7 +3,7 @@
|
||||
<div class="box-wrap inner">
|
||||
<form
|
||||
class="pure-form pure-form-stacked"
|
||||
action="{{url_for('clear_all_history')}}"
|
||||
action="{{url_for('ui.clear_all_history')}}"
|
||||
method="POST"
|
||||
>
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}" >
|
||||
@@ -37,7 +37,7 @@
|
||||
</div>
|
||||
<br />
|
||||
<div class="pure-control-group">
|
||||
<a href="{{url_for('index')}}" class="pure-button button-cancel"
|
||||
<a href="{{url_for('watchlist.index')}}" class="pure-button button-cancel"
|
||||
>Cancel</a
|
||||
>
|
||||
</div>
|
||||
@@ -1,11 +1,14 @@
|
||||
{% extends 'base.html' %}
|
||||
{% block content %}
|
||||
{% from '_helpers.html' import render_field, render_checkbox_field, render_button, render_time_schedule_form, playwright_warning, only_webdriver_type_watches_warning %}
|
||||
{% from '_helpers.html' import render_field, render_checkbox_field, render_button, render_time_schedule_form, playwright_warning, only_playwright_type_watches_warning, render_conditions_fieldlist_of_formfields_as_table, render_ternary_field %}
|
||||
{% from '_common_fields.html' import render_common_settings_form %}
|
||||
<script src="{{url_for('static_content', group='js', filename='tabs.js')}}" defer></script>
|
||||
<script src="{{url_for('static_content', group='js', filename='vis.js')}}" defer></script>
|
||||
<script src="{{url_for('static_content', group='js', filename='global-settings.js')}}" defer></script>
|
||||
<script src="{{url_for('static_content', group='js', filename='scheduler.js')}}" defer></script>
|
||||
<script src="{{url_for('static_content', group='js', filename='conditions.js')}}" defer></script>
|
||||
|
||||
|
||||
<script>
|
||||
const browser_steps_available_screenshots=JSON.parse('{{ watch.get_browsersteps_available_screenshots|tojson }}');
|
||||
const browser_steps_config=JSON.parse('{{ browser_steps_config|tojson }}');
|
||||
@@ -17,7 +20,7 @@
|
||||
{% if emailprefix %}
|
||||
const email_notification_prefix=JSON.parse('{{ emailprefix|tojson }}');
|
||||
{% endif %}
|
||||
const notification_base_url="{{url_for('ajax_callback_send_notification_test', watch_uuid=uuid)}}";
|
||||
const notification_base_url="{{url_for('ui.ui_notification.ajax_callback_send_notification_test', watch_uuid=uuid)}}";
|
||||
const playwright_enabled={% if playwright_enabled %}true{% else %}false{% endif %};
|
||||
const recheck_proxy_start_url="{{url_for('check_proxies.start_check', uuid=uuid)}}";
|
||||
const proxy_recheck_status_url="{{url_for('check_proxies.get_recheck_status', uuid=uuid)}}";
|
||||
@@ -50,6 +53,7 @@
|
||||
{% if watch['processor'] == 'text_json_diff' %}
|
||||
<li class="tab"><a id="visualselector-tab" href="#visualselector">Visual Filter Selector</a></li>
|
||||
<li class="tab" id="filters-and-triggers-tab"><a href="#filters-and-triggers">Filters & Triggers</a></li>
|
||||
<li class="tab" id="conditions-tab"><a href="#conditions">Conditions</a></li>
|
||||
{% endif %}
|
||||
<li class="tab"><a href="#notifications">Notifications</a></li>
|
||||
<li class="tab"><a href="#stats">Stats</a></li>
|
||||
@@ -58,7 +62,7 @@
|
||||
|
||||
<div class="box-wrap inner">
|
||||
<form class="pure-form pure-form-stacked"
|
||||
action="{{ url_for('edit_page', uuid=uuid, next = request.args.get('next'), unpause_on_save = request.args.get('unpause_on_save'), tag = request.args.get('tag')) }}" method="POST">
|
||||
action="{{ url_for('ui.ui_edit.edit_page', uuid=uuid, next = request.args.get('next'), unpause_on_save = request.args.get('unpause_on_save'), tag = request.args.get('tag')) }}" method="POST">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}">
|
||||
|
||||
<div class="tab-pane-inner" id="general">
|
||||
@@ -68,15 +72,16 @@
|
||||
<div class="pure-form-message">Some sites use JavaScript to create the content, for this you should <a href="https://github.com/dgtlmoon/changedetection.io/wiki/Fetching-pages-with-WebDriver">use the Chrome/WebDriver Fetcher</a></div>
|
||||
<div class="pure-form-message">Variables are supported in the URL (<a href="https://github.com/dgtlmoon/changedetection.io/wiki/Handling-variables-in-the-watched-URL">help and examples here</a>).</div>
|
||||
</div>
|
||||
<div class="pure-control-group">
|
||||
{{ render_field(form.tags) }}
|
||||
<span class="pure-form-message-inline">Organisational tag/group name used in the main listing page</span>
|
||||
</div>
|
||||
<div class="pure-control-group inline-radio">
|
||||
{{ render_field(form.processor) }}
|
||||
</div>
|
||||
<div class="pure-control-group">
|
||||
{{ render_field(form.title, class="m-d") }}
|
||||
</div>
|
||||
<div class="pure-control-group">
|
||||
{{ render_field(form.tags) }}
|
||||
<span class="pure-form-message-inline">Organisational tag/group name used in the main listing page</span>
|
||||
{{ render_field(form.title, class="m-d", placeholder=watch.label) }}
|
||||
<span class="pure-form-message-inline">Automatically uses the page title if found, you can also use your own title/description here</span>
|
||||
</div>
|
||||
<div class="pure-control-group time-between-check border-fieldset">
|
||||
|
||||
@@ -97,15 +102,16 @@
|
||||
</div>
|
||||
<br>
|
||||
</div>
|
||||
<div class="pure-control-group">
|
||||
{{ render_checkbox_field(form.extract_title_as_title) }}
|
||||
</div>
|
||||
|
||||
<div class="pure-control-group">
|
||||
{{ render_checkbox_field(form.filter_failure_notification_send) }}
|
||||
<span class="pure-form-message-inline">
|
||||
Sends a notification when the filter can no longer be seen on the page, good for knowing when the page changed and your filter will not work anymore.
|
||||
</span>
|
||||
</div>
|
||||
<div class="pure-control-group">
|
||||
{{ render_ternary_field(form.use_page_title_in_list) }}
|
||||
</div>
|
||||
</fieldset>
|
||||
</div>
|
||||
|
||||
@@ -200,7 +206,9 @@ Math: {{ 1 + 1 }}") }}
|
||||
</div>
|
||||
|
||||
<div class="tab-pane-inner" id="browser-steps">
|
||||
{% if playwright_enabled and watch_uses_webdriver %}
|
||||
{% if watch_needs_selenium_or_playwright %}
|
||||
{# Only works with playwright #}
|
||||
{% if system_has_playwright_configured %}
|
||||
<img class="beta-logo" src="{{url_for('static_content', group='images', filename='beta-logo.png')}}" alt="New beta functionality">
|
||||
<fieldset>
|
||||
<div class="pure-control-group">
|
||||
@@ -219,7 +227,6 @@ Math: {{ 1 + 1 }}") }}
|
||||
<div class="flex-wrapper" >
|
||||
|
||||
<div id="browser-steps-ui" class="noselect">
|
||||
|
||||
<div class="noselect" id="browsersteps-selector-wrapper" style="width: 100%">
|
||||
<span class="loader" >
|
||||
<span id="browsersteps-click-start">
|
||||
@@ -234,31 +241,32 @@ Math: {{ 1 + 1 }}") }}
|
||||
</div>
|
||||
</div>
|
||||
<div id="browser-steps-fieldlist" >
|
||||
<span id="browser-seconds-remaining">Loading</span> <span style="font-size: 80%;"> (<a target="newwindow" href="https://github.com/dgtlmoon/changedetection.io/pull/478/files#diff-1a79d924d1840c485238e66772391268a89c95b781d69091384cf1ea1ac146c9R4">?</a>) </span>
|
||||
<span id="browser-seconds-remaining">Press "Play" to start.</span> <span style="font-size: 80%;"> (<a target="newwindow" href="https://github.com/dgtlmoon/changedetection.io/pull/478/files#diff-1a79d924d1840c485238e66772391268a89c95b781d69091384cf1ea1ac146c9R4">?</a>) </span>
|
||||
{{ render_field(form.browser_steps) }}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</fieldset>
|
||||
{% else %}
|
||||
<span class="pure-form-message-inline">
|
||||
{% if not watch_uses_webdriver %}
|
||||
{{ only_webdriver_type_watches_warning() }}
|
||||
{% endif %}
|
||||
{% if not playwright_enabled %}
|
||||
{{ playwright_warning() }}
|
||||
{% endif %}
|
||||
</span>
|
||||
{# it's configured to use selenium or chrome but system says its not configured #}
|
||||
{{ playwright_warning() }}
|
||||
{% if system_has_webdriver_configured %}
|
||||
<strong>Selenium/Webdriver cant be used here because it wont fetch screenshots reliably.</strong>
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% else %}
|
||||
{# "This functionality needs chrome.." #}
|
||||
{{ only_playwright_type_watches_warning() }}
|
||||
{% endif %}
|
||||
</div>
|
||||
|
||||
|
||||
<div class="tab-pane-inner" id="notifications">
|
||||
<fieldset>
|
||||
<div class="pure-control-group inline-radio">
|
||||
{{ render_checkbox_field(form.notification_muted) }}
|
||||
{{ render_ternary_field(form.notification_muted, BooleanField=true) }}
|
||||
</div>
|
||||
{% if watch_uses_webdriver %}
|
||||
{% if watch_needs_selenium_or_playwright %}
|
||||
<div class="pure-control-group inline-radio">
|
||||
{{ render_checkbox_field(form.notification_screenshot) }}
|
||||
<span class="pure-form-message-inline">
|
||||
@@ -270,17 +278,31 @@ Math: {{ 1 + 1 }}") }}
|
||||
{% if has_default_notification_urls %}
|
||||
<div class="inline-warning">
|
||||
<img class="inline-warning-icon" src="{{url_for('static_content', group='images', filename='notice.svg')}}" alt="Look out!" title="Lookout!" >
|
||||
There are <a href="{{ url_for('settings_page')}}#notifications">system-wide notification URLs enabled</a>, this form will override notification settings for this watch only ‐ an empty Notification URL list here will still send notifications.
|
||||
There are <a href="{{ url_for('settings.settings_page')}}#notifications">system-wide notification URLs enabled</a>, this form will override notification settings for this watch only ‐ an empty Notification URL list here will still send notifications.
|
||||
</div>
|
||||
{% endif %}
|
||||
<a href="#notifications" id="notification-setting-reset-to-default" class="pure-button button-xsmall" style="right: 20px; top: 20px; position: absolute; background-color: #5f42dd; border-radius: 4px; font-size: 70%; color: #fff">Use system defaults</a>
|
||||
|
||||
{{ render_common_settings_form(form, emailprefix, settings_application, extra_notification_token_placeholder_info) }}
|
||||
</div>
|
||||
</fieldset>
|
||||
</div>
|
||||
|
||||
{% if watch['processor'] == 'text_json_diff' %}
|
||||
|
||||
<div class="tab-pane-inner" id="conditions">
|
||||
<script>
|
||||
const verify_condition_rule_url="{{url_for('conditions.verify_condition_single_rule', watch_uuid=uuid)}}";
|
||||
</script>
|
||||
<div class="pure-control-group">
|
||||
{{ render_field(form.conditions_match_logic) }}
|
||||
{{ render_conditions_fieldlist_of_formfields_as_table(form.conditions) }}
|
||||
<div class="pure-form-message-inline">
|
||||
|
||||
<p id="verify-state-text">Use the verify (✓) button to test if a condition passes against the current snapshot.</p>
|
||||
Read a quick tutorial about <a href="https://changedetection.io/tutorial/conditional-actions-web-page-changes">using conditional web page changes here</a>.<br>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="tab-pane-inner" id="filters-and-triggers">
|
||||
<span id="activate-text-preview" class="pure-button pure-button-primary button-xsmall">Activate preview</span>
|
||||
<div>
|
||||
@@ -296,61 +318,8 @@ Math: {{ 1 + 1 }}") }}
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
<div class="pure-control-group">
|
||||
{% set field = render_field(form.include_filters,
|
||||
rows=5,
|
||||
placeholder=has_tag_filters_extra+"#example
|
||||
xpath://body/div/span[contains(@class, 'example-class')]",
|
||||
class="m-d")
|
||||
%}
|
||||
{{ field }}
|
||||
{% if '/text()' in field %}
|
||||
<span class="pure-form-message-inline"><strong>Note!: //text() function does not work where the <element> contains <![CDATA[]]></strong></span><br>
|
||||
{% endif %}
|
||||
<span class="pure-form-message-inline">One CSS, xPath, JSON Path/JQ selector per line, <i>any</i> rules that matches will be used.<br>
|
||||
<span data-target="#advanced-help-selectors" class="toggle-show pure-button button-tag button-xsmall">Show advanced help and tips</span><br>
|
||||
<ul id="advanced-help-selectors" style="display: none;">
|
||||
<li>CSS - Limit text to this CSS rule, only text matching this CSS rule is included.</li>
|
||||
<li>JSON - Limit text to this JSON rule, using either <a href="https://pypi.org/project/jsonpath-ng/" target="new">JSONPath</a> or <a href="https://stedolan.github.io/jq/" target="new">jq</a> (if installed).
|
||||
<ul>
|
||||
<li>JSONPath: Prefix with <code>json:</code>, use <code>json:$</code> to force re-formatting if required, <a href="https://jsonpath.com/" target="new">test your JSONPath here</a>.</li>
|
||||
{% if jq_support %}
|
||||
<li>jq: Prefix with <code>jq:</code> and <a href="https://jqplay.org/" target="new">test your jq here</a>. Using <a href="https://stedolan.github.io/jq/" target="new">jq</a> allows for complex filtering and processing of JSON data with built-in functions, regex, filtering, and more. See examples and documentation <a href="https://stedolan.github.io/jq/manual/" target="new">here</a>. Prefix <code>jqraw:</code> outputs the results as text instead of a JSON list.</li>
|
||||
{% else %}
|
||||
<li>jq support not installed</li>
|
||||
{% endif %}
|
||||
</ul>
|
||||
</li>
|
||||
<li>XPath - Limit text to this XPath rule, simply start with a forward-slash. To specify XPath to be used explicitly or the XPath rule starts with an XPath function: Prefix with <code>xpath:</code>
|
||||
<ul>
|
||||
<li>Example: <code>//*[contains(@class, 'sametext')]</code> or <code>xpath:count(//*[contains(@class, 'sametext')])</code>, <a
|
||||
href="http://xpather.com/" target="new">test your XPath here</a></li>
|
||||
<li>Example: Get all titles from an RSS feed <code>//title/text()</code></li>
|
||||
<li>To use XPath1.0: Prefix with <code>xpath1:</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>
|
||||
Please be sure that you thoroughly understand how to write CSS, JSONPath, XPath{% if jq_support %}, or jq selector{%endif%} rules before filing an issue on GitHub! <a
|
||||
href="https://github.com/dgtlmoon/changedetection.io/wiki/CSS-Selector-help">here for more CSS selector help</a>.<br>
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
</span>
|
||||
</div>
|
||||
<fieldset class="pure-control-group">
|
||||
{{ render_field(form.subtractive_selectors, rows=5, placeholder=has_tag_filters_extra+"header
|
||||
footer
|
||||
nav
|
||||
.stockticker
|
||||
//*[contains(text(), 'Advertisement')]") }}
|
||||
<span class="pure-form-message-inline">
|
||||
<ul>
|
||||
<li> Remove HTML element(s) by CSS and XPath selectors before text conversion. </li>
|
||||
<li> Don't paste HTML here, use only CSS and XPath selectors </li>
|
||||
<li> Add multiple elements, CSS or XPath selectors per line to ignore multiple parts of the HTML. </li>
|
||||
</ul>
|
||||
</span>
|
||||
</fieldset>
|
||||
{% include "edit/include_subtract.html" %}
|
||||
<div class="text-filtering border-fieldset">
|
||||
<fieldset class="pure-group" id="text-filtering-type-options">
|
||||
<h3>Text filtering</h3>
|
||||
@@ -378,79 +347,12 @@ nav
|
||||
{{ render_checkbox_field(form.trim_text_whitespace) }}
|
||||
<span class="pure-form-message-inline">Remove any whitespace before and after each line of text</span>
|
||||
</fieldset>
|
||||
<fieldset>
|
||||
<div class="pure-control-group">
|
||||
{{ render_field(form.trigger_text, rows=5, placeholder="Some text to wait for in a line
|
||||
/some.regex\d{2}/ for case-INsensitive regex
|
||||
") }}
|
||||
<span class="pure-form-message-inline">
|
||||
<ul>
|
||||
<li>Text to wait for before triggering a change/notification, all text and regex are tested <i>case-insensitive</i>.</li>
|
||||
<li>Trigger text is processed from the result-text that comes out of any CSS/JSON Filters for this watch</li>
|
||||
<li>Each line is processed separately (think of each line as "OR")</li>
|
||||
<li>Note: Wrap in forward slash / to use regex example: <code>/foo\d/</code></li>
|
||||
</ul>
|
||||
</span>
|
||||
</div>
|
||||
</fieldset>
|
||||
<fieldset class="pure-group">
|
||||
{{ render_field(form.ignore_text, rows=5, placeholder="Some text to ignore in a line
|
||||
/some.regex\d{2}/ for case-INsensitive regex
|
||||
") }}
|
||||
<span class="pure-form-message-inline">
|
||||
<ul>
|
||||
<li>Matching text will be <strong>ignored</strong> in the text snapshot (you can still see it but it wont trigger a change)</li>
|
||||
<li>Each line processed separately, any line matching will be ignored (removed before creating the checksum)</li>
|
||||
<li>Regular Expression support, wrap the entire line in forward slash <code>/regex/</code></li>
|
||||
<li>Changing this will affect the comparison checksum which may trigger an alert</li>
|
||||
</ul>
|
||||
</span>
|
||||
|
||||
</fieldset>
|
||||
|
||||
<fieldset>
|
||||
<div class="pure-control-group">
|
||||
{{ render_field(form.text_should_not_be_present, rows=5, placeholder="For example: Out of stock
|
||||
Sold out
|
||||
Not in stock
|
||||
Unavailable") }}
|
||||
<span class="pure-form-message-inline">
|
||||
<ul>
|
||||
<li>Block change-detection while this text is on the page, all text and regex are tested <i>case-insensitive</i>, good for waiting for when a product is available again</li>
|
||||
<li>Block text is processed from the result-text that comes out of any CSS/JSON Filters for this watch</li>
|
||||
<li>All lines here must not exist (think of each line as "OR")</li>
|
||||
<li>Note: Wrap in forward slash / to use regex example: <code>/foo\d/</code></li>
|
||||
</ul>
|
||||
</span>
|
||||
</div>
|
||||
</fieldset>
|
||||
<fieldset>
|
||||
<div class="pure-control-group">
|
||||
{{ render_field(form.extract_text, rows=5, placeholder="/.+?\d+ comments.+?/
|
||||
or
|
||||
keyword") }}
|
||||
<span class="pure-form-message-inline">
|
||||
<ul>
|
||||
<li>Extracts text in the final output (line by line) after other filters using regular expressions or string match;
|
||||
<ul>
|
||||
<li>Regular expression ‐ example <code>/reports.+?2022/i</code></li>
|
||||
<li>Don't forget to consider the white-space at the start of a line <code>/.+?reports.+?2022/i</code></li>
|
||||
<li>Use <code>//(?aiLmsux))</code> type flags (more <a href="https://docs.python.org/3/library/re.html#index-15">information here</a>)<br></li>
|
||||
<li>Keyword example ‐ example <code>Out of stock</code></li>
|
||||
<li>Use groups to extract just that text ‐ example <code>/reports.+?(\d+)/i</code> returns a list of years only</li>
|
||||
<li>Example - match lines containing a keyword <code>/.*icecream.*/</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>One line per regular-expression/string match</li>
|
||||
</ul>
|
||||
</span>
|
||||
</div>
|
||||
</fieldset>
|
||||
{% include "edit/text-options.html" %}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div id="text-preview" style="display: none;" >
|
||||
<script>
|
||||
const preview_text_edit_filters_url="{{url_for('watch_get_preview_rendered', uuid=uuid)}}";
|
||||
const preview_text_edit_filters_url="{{url_for('ui.ui_edit.watch_get_preview_rendered', uuid=uuid)}}";
|
||||
</script>
|
||||
<br>
|
||||
{#<div id="text-preview-controls"><span id="text-preview-refresh" class="pure-button button-xsmall">Refresh</span></div>#}
|
||||
@@ -481,13 +383,15 @@ keyword") }}
|
||||
|
||||
<fieldset>
|
||||
<div class="pure-control-group">
|
||||
{% if playwright_enabled and watch_uses_webdriver %}
|
||||
{% if watch_needs_selenium_or_playwright %}
|
||||
{% if system_has_playwright_configured %}
|
||||
<span class="pure-form-message-inline" id="visual-selector-heading">
|
||||
The Visual Selector tool lets you select the <i>text</i> elements that will be used for the change detection. It automatically fills-in the filters in the "CSS/JSONPath/JQ/XPath Filters" box of the <a href="#filters-and-triggers">Filters & Triggers</a> tab. Use <strong>Shift+Click</strong> to select multiple items.
|
||||
</span>
|
||||
|
||||
<div id="selector-header">
|
||||
<a id="clear-selector" class="pure-button button-secondary button-xsmall" style="font-size: 70%">Clear selection</a>
|
||||
<!-- visual selector IMG will try to load, it will either replace this or on error replace it with some handy text -->
|
||||
<i class="fetching-update-notice" style="font-size: 80%;">One moment, fetching screenshot and element information..</i>
|
||||
</div>
|
||||
<div id="selector-wrapper" style="display: none">
|
||||
@@ -499,13 +403,16 @@ keyword") }}
|
||||
</div>
|
||||
<div id="selector-current-xpath" style="overflow-x: hidden"><strong>Currently:</strong> <span class="text">Loading...</span></div>
|
||||
{% else %}
|
||||
{% if not watch_uses_webdriver %}
|
||||
{{ only_webdriver_type_watches_warning() }}
|
||||
{% endif %}
|
||||
{% if not playwright_enabled %}
|
||||
{{ playwright_warning() }}
|
||||
{% endif %}
|
||||
{# The watch needed chrome but system says that playwright is not ready #}
|
||||
{{ playwright_warning() }}
|
||||
{% endif %}
|
||||
{% if system_has_webdriver_configured %}
|
||||
<strong>Selenium/Webdriver cant be used here because it wont fetch screenshots reliably.</strong>
|
||||
{% endif %}
|
||||
{% else %}
|
||||
{# "This functionality needs chrome.." #}
|
||||
{{ only_playwright_type_watches_warning() }}
|
||||
{% endif %}
|
||||
</div>
|
||||
</fieldset>
|
||||
</div>
|
||||
@@ -545,9 +452,16 @@ keyword") }}
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
{% if ui_edit_stats_extras %}
|
||||
<div class="plugin-stats-extras"> <!-- from pluggy plugin -->
|
||||
{{ ui_edit_stats_extras|safe }}
|
||||
</div>
|
||||
{% endif %}
|
||||
|
||||
{% if watch.history_n %}
|
||||
<p>
|
||||
<a href="{{url_for('watch_get_latest_html', uuid=uuid)}}" class="pure-button button-small">Download latest HTML snapshot</a>
|
||||
<a href="{{url_for('ui.ui_edit.watch_get_latest_html', uuid=uuid)}}" class="pure-button button-small">Download latest HTML snapshot</a>
|
||||
</p>
|
||||
{% endif %}
|
||||
|
||||
@@ -556,12 +470,12 @@ keyword") }}
|
||||
<div id="actions">
|
||||
<div class="pure-control-group">
|
||||
{{ render_button(form.save_button) }}
|
||||
<a href="{{url_for('form_delete', uuid=uuid)}}"
|
||||
class="pure-button button-small button-error ">Delete</a>
|
||||
<a href="{{url_for('clear_watch_history', uuid=uuid)}}"
|
||||
class="pure-button button-small button-error ">Clear History</a>
|
||||
<a href="{{url_for('form_clone', uuid=uuid)}}"
|
||||
class="pure-button button-small ">Create Copy</a>
|
||||
<a href="{{url_for('ui.form_delete', uuid=uuid)}}"
|
||||
class="pure-button button-error ">Delete</a>
|
||||
{% if watch.history_n %}<a href="{{url_for('ui.clear_watch_history', uuid=uuid)}}"
|
||||
class="pure-button button-error">Clear History</a>{% endif %}
|
||||
<a href="{{url_for('ui.form_clone', uuid=uuid)}}"
|
||||
class="pure-button">Clone & Edit</a>
|
||||
</div>
|
||||
</div>
|
||||
</form>
|
||||
236
changedetectionio/blueprint/ui/views.py
Normal file
236
changedetectionio/blueprint/ui/views.py
Normal file
@@ -0,0 +1,236 @@
|
||||
from flask import Blueprint, request, redirect, url_for, flash, render_template, make_response, send_from_directory, abort
|
||||
import os
|
||||
import time
|
||||
from loguru import logger
|
||||
|
||||
from changedetectionio.store import ChangeDetectionStore
|
||||
from changedetectionio.auth_decorator import login_optionally_required
|
||||
from changedetectionio import html_tools
|
||||
from changedetectionio import worker_handler
|
||||
|
||||
def construct_blueprint(datastore: ChangeDetectionStore, update_q, queuedWatchMetaData, watch_check_update):
|
||||
views_blueprint = Blueprint('ui_views', __name__, template_folder="../ui/templates")
|
||||
|
||||
@views_blueprint.route("/preview/<string:uuid>", methods=['GET'])
|
||||
@login_optionally_required
|
||||
def preview_page(uuid):
|
||||
content = []
|
||||
versions = []
|
||||
timestamp = None
|
||||
|
||||
# More for testing, possible to return the first/only
|
||||
if uuid == 'first':
|
||||
uuid = list(datastore.data['watching'].keys()).pop()
|
||||
|
||||
try:
|
||||
watch = datastore.data['watching'][uuid]
|
||||
except KeyError:
|
||||
flash("No history found for the specified link, bad link?", "error")
|
||||
return redirect(url_for('watchlist.index'))
|
||||
|
||||
system_uses_webdriver = datastore.data['settings']['application']['fetch_backend'] == 'html_webdriver'
|
||||
extra_stylesheets = [url_for('static_content', group='styles', filename='diff.css')]
|
||||
|
||||
is_html_webdriver = False
|
||||
if (watch.get('fetch_backend') == 'system' and system_uses_webdriver) or watch.get('fetch_backend') == 'html_webdriver' or watch.get('fetch_backend', '').startswith('extra_browser_'):
|
||||
is_html_webdriver = True
|
||||
triggered_line_numbers = []
|
||||
if datastore.data['watching'][uuid].history_n == 0 and (watch.get_error_text() or watch.get_error_snapshot()):
|
||||
flash("Preview unavailable - No fetch/check completed or triggers not reached", "error")
|
||||
else:
|
||||
# So prepare the latest preview or not
|
||||
preferred_version = request.args.get('version')
|
||||
versions = list(watch.history.keys())
|
||||
timestamp = versions[-1]
|
||||
if preferred_version and preferred_version in versions:
|
||||
timestamp = preferred_version
|
||||
|
||||
try:
|
||||
versions = list(watch.history.keys())
|
||||
content = watch.get_history_snapshot(timestamp)
|
||||
|
||||
triggered_line_numbers = html_tools.strip_ignore_text(content=content,
|
||||
wordlist=watch['trigger_text'],
|
||||
mode='line numbers'
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
content.append({'line': f"File doesnt exist or unable to read timestamp {timestamp}", 'classes': ''})
|
||||
|
||||
output = render_template("preview.html",
|
||||
content=content,
|
||||
current_version=timestamp,
|
||||
history_n=watch.history_n,
|
||||
extra_stylesheets=extra_stylesheets,
|
||||
extra_title=f" - Diff - {watch.label} @ {timestamp}",
|
||||
triggered_line_numbers=triggered_line_numbers,
|
||||
current_diff_url=watch['url'],
|
||||
screenshot=watch.get_screenshot(),
|
||||
watch=watch,
|
||||
uuid=uuid,
|
||||
is_html_webdriver=is_html_webdriver,
|
||||
last_error=watch['last_error'],
|
||||
last_error_text=watch.get_error_text(),
|
||||
last_error_screenshot=watch.get_error_snapshot(),
|
||||
versions=versions
|
||||
)
|
||||
|
||||
return output
|
||||
|
||||
@views_blueprint.route("/diff/<string:uuid>", methods=['POST'])
|
||||
@login_optionally_required
|
||||
def diff_history_page_build_report(uuid):
|
||||
from changedetectionio import forms
|
||||
|
||||
# More for testing, possible to return the first/only
|
||||
if uuid == 'first':
|
||||
uuid = list(datastore.data['watching'].keys()).pop()
|
||||
|
||||
try:
|
||||
watch = datastore.data['watching'][uuid]
|
||||
except KeyError:
|
||||
flash("No history found for the specified link, bad link?", "error")
|
||||
return redirect(url_for('watchlist.index'))
|
||||
|
||||
# For submission of requesting an extract
|
||||
extract_form = forms.extractDataForm(formdata=request.form,
|
||||
data={'extract_regex': request.form.get('extract_regex', '')}
|
||||
)
|
||||
if not extract_form.validate():
|
||||
flash("An error occurred, please see below.", "error")
|
||||
return _render_diff_template(uuid, extract_form)
|
||||
|
||||
else:
|
||||
extract_regex = request.form.get('extract_regex', '').strip()
|
||||
output = watch.extract_regex_from_all_history(extract_regex)
|
||||
if output:
|
||||
watch_dir = os.path.join(datastore.datastore_path, uuid)
|
||||
response = make_response(send_from_directory(directory=watch_dir, path=output, as_attachment=True))
|
||||
response.headers['Content-type'] = 'text/csv'
|
||||
response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'
|
||||
response.headers['Pragma'] = 'no-cache'
|
||||
response.headers['Expires'] = "0"
|
||||
return response
|
||||
|
||||
flash('No matches found while scanning all of the watch history for that RegEx.', 'error')
|
||||
return redirect(url_for('ui.ui_views.diff_history_page', uuid=uuid) + '#extract')
|
||||
|
||||
def _render_diff_template(uuid, extract_form=None):
|
||||
"""Helper function to render the diff template with all required data"""
|
||||
from changedetectionio import forms
|
||||
|
||||
# More for testing, possible to return the first/only
|
||||
if uuid == 'first':
|
||||
uuid = list(datastore.data['watching'].keys()).pop()
|
||||
|
||||
extra_stylesheets = [url_for('static_content', group='styles', filename='diff.css')]
|
||||
try:
|
||||
watch = datastore.data['watching'][uuid]
|
||||
except KeyError:
|
||||
flash("No history found for the specified link, bad link?", "error")
|
||||
return redirect(url_for('watchlist.index'))
|
||||
|
||||
# Use provided form or create a new one
|
||||
if extract_form is None:
|
||||
extract_form = forms.extractDataForm(formdata=request.form,
|
||||
data={'extract_regex': request.form.get('extract_regex', '')}
|
||||
)
|
||||
|
||||
history = watch.history
|
||||
dates = list(history.keys())
|
||||
|
||||
# If a "from_version" was requested, then find it (or the closest one)
|
||||
# Also set "from version" to be the closest version to the one that was last viewed.
|
||||
|
||||
best_last_viewed_timestamp = watch.get_from_version_based_on_last_viewed
|
||||
from_version_timestamp = best_last_viewed_timestamp if best_last_viewed_timestamp else dates[-2]
|
||||
from_version = request.args.get('from_version', from_version_timestamp )
|
||||
|
||||
# Use the current one if nothing was specified
|
||||
to_version = request.args.get('to_version', str(dates[-1]))
|
||||
|
||||
try:
|
||||
to_version_file_contents = watch.get_history_snapshot(timestamp=to_version)
|
||||
except Exception as e:
|
||||
logger.error(f"Unable to read watch history to-version for version {to_version}: {str(e)}")
|
||||
to_version_file_contents = f"Unable to read to-version at {to_version}.\n"
|
||||
|
||||
try:
|
||||
from_version_file_contents = watch.get_history_snapshot(timestamp=from_version)
|
||||
except Exception as e:
|
||||
logger.error(f"Unable to read watch history from-version for version {from_version}: {str(e)}")
|
||||
from_version_file_contents = f"Unable to read to-version {from_version}.\n"
|
||||
|
||||
screenshot_url = watch.get_screenshot()
|
||||
|
||||
system_uses_webdriver = datastore.data['settings']['application']['fetch_backend'] == 'html_webdriver'
|
||||
|
||||
is_html_webdriver = False
|
||||
if (watch.get('fetch_backend') == 'system' and system_uses_webdriver) or watch.get('fetch_backend') == 'html_webdriver' or watch.get('fetch_backend', '').startswith('extra_browser_'):
|
||||
is_html_webdriver = True
|
||||
|
||||
password_enabled_and_share_is_off = False
|
||||
if datastore.data['settings']['application'].get('password') or os.getenv("SALTED_PASS", False):
|
||||
password_enabled_and_share_is_off = not datastore.data['settings']['application'].get('shared_diff_access')
|
||||
|
||||
datastore.set_last_viewed(uuid, time.time())
|
||||
|
||||
return render_template("diff.html",
|
||||
current_diff_url=watch['url'],
|
||||
from_version=str(from_version),
|
||||
to_version=str(to_version),
|
||||
extra_stylesheets=extra_stylesheets,
|
||||
extra_title=f" - Diff - {watch.label}",
|
||||
extract_form=extract_form,
|
||||
is_html_webdriver=is_html_webdriver,
|
||||
last_error=watch['last_error'],
|
||||
last_error_screenshot=watch.get_error_snapshot(),
|
||||
last_error_text=watch.get_error_text(),
|
||||
left_sticky=True,
|
||||
newest=to_version_file_contents,
|
||||
newest_version_timestamp=dates[-1],
|
||||
password_enabled_and_share_is_off=password_enabled_and_share_is_off,
|
||||
from_version_file_contents=from_version_file_contents,
|
||||
to_version_file_contents=to_version_file_contents,
|
||||
screenshot=screenshot_url,
|
||||
uuid=uuid,
|
||||
versions=dates, # All except current/last
|
||||
watch_a=watch
|
||||
)
|
||||
|
||||
@views_blueprint.route("/diff/<string:uuid>", methods=['GET'])
|
||||
@login_optionally_required
|
||||
def diff_history_page(uuid):
|
||||
return _render_diff_template(uuid)
|
||||
|
||||
@views_blueprint.route("/form/add/quickwatch", methods=['POST'])
|
||||
@login_optionally_required
|
||||
def form_quick_watch_add():
|
||||
from changedetectionio import forms
|
||||
form = forms.quickWatchForm(request.form)
|
||||
|
||||
if not form.validate():
|
||||
for widget, l in form.errors.items():
|
||||
flash(','.join(l), 'error')
|
||||
return redirect(url_for('watchlist.index'))
|
||||
|
||||
url = request.form.get('url').strip()
|
||||
if datastore.url_exists(url):
|
||||
flash(f'Warning, URL {url} already exists', "notice")
|
||||
|
||||
add_paused = request.form.get('edit_and_watch_submit_button') != None
|
||||
processor = request.form.get('processor', 'text_json_diff')
|
||||
new_uuid = datastore.add_watch(url=url, tag=request.form.get('tags').strip(), extras={'paused': add_paused, 'processor': processor})
|
||||
|
||||
if new_uuid:
|
||||
if add_paused:
|
||||
flash('Watch added in Paused state, saving will unpause.')
|
||||
return redirect(url_for('ui.ui_edit.edit_page', uuid=new_uuid, unpause_on_save=1, tag=request.args.get('tag')))
|
||||
else:
|
||||
# Straight into the queue.
|
||||
worker_handler.queue_item_async_safe(update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': new_uuid}))
|
||||
flash("Watch added.")
|
||||
|
||||
return redirect(url_for('watchlist.index', tag=request.args.get('tag','')))
|
||||
|
||||
return views_blueprint
|
||||
116
changedetectionio/blueprint/watchlist/__init__.py
Normal file
116
changedetectionio/blueprint/watchlist/__init__.py
Normal file
@@ -0,0 +1,116 @@
|
||||
import os
|
||||
import time
|
||||
|
||||
from flask import Blueprint, request, make_response, render_template, redirect, url_for, flash, session
|
||||
from flask_login import current_user
|
||||
from flask_paginate import Pagination, get_page_parameter
|
||||
|
||||
from changedetectionio import forms
|
||||
from changedetectionio.store import ChangeDetectionStore
|
||||
from changedetectionio.auth_decorator import login_optionally_required
|
||||
|
||||
def construct_blueprint(datastore: ChangeDetectionStore, update_q, queuedWatchMetaData):
|
||||
watchlist_blueprint = Blueprint('watchlist', __name__, template_folder="templates")
|
||||
|
||||
@watchlist_blueprint.route("/", methods=['GET'])
|
||||
@login_optionally_required
|
||||
def index():
|
||||
active_tag_req = request.args.get('tag', '').lower().strip()
|
||||
active_tag_uuid = active_tag = None
|
||||
|
||||
# Be sure limit_tag is a uuid
|
||||
if active_tag_req:
|
||||
for uuid, tag in datastore.data['settings']['application'].get('tags', {}).items():
|
||||
if active_tag_req == tag.get('title', '').lower().strip() or active_tag_req == uuid:
|
||||
active_tag = tag
|
||||
active_tag_uuid = uuid
|
||||
break
|
||||
|
||||
# Redirect for the old rss path which used the /?rss=true
|
||||
if request.args.get('rss'):
|
||||
return redirect(url_for('rss.feed', tag=active_tag_uuid))
|
||||
|
||||
op = request.args.get('op')
|
||||
if op:
|
||||
uuid = request.args.get('uuid')
|
||||
if op == 'pause':
|
||||
datastore.data['watching'][uuid].toggle_pause()
|
||||
elif op == 'mute':
|
||||
datastore.data['watching'][uuid].toggle_mute()
|
||||
|
||||
datastore.needs_write = True
|
||||
return redirect(url_for('watchlist.index', tag = active_tag_uuid))
|
||||
|
||||
# Sort by last_changed and add the uuid which is usually the key..
|
||||
sorted_watches = []
|
||||
with_errors = request.args.get('with_errors') == "1"
|
||||
unread_only = request.args.get('unread') == "1"
|
||||
errored_count = 0
|
||||
search_q = request.args.get('q').strip().lower() if request.args.get('q') else False
|
||||
for uuid, watch in datastore.data['watching'].items():
|
||||
if with_errors and not watch.get('last_error'):
|
||||
continue
|
||||
|
||||
if unread_only and (watch.viewed or watch.last_changed == 0) :
|
||||
continue
|
||||
|
||||
if active_tag_uuid and not active_tag_uuid in watch['tags']:
|
||||
continue
|
||||
if watch.get('last_error'):
|
||||
errored_count += 1
|
||||
|
||||
if search_q:
|
||||
if (watch.get('title') and search_q in watch.get('title').lower()) or search_q in watch.get('url', '').lower():
|
||||
sorted_watches.append(watch)
|
||||
elif watch.get('last_error') and search_q in watch.get('last_error').lower():
|
||||
sorted_watches.append(watch)
|
||||
else:
|
||||
sorted_watches.append(watch)
|
||||
|
||||
form = forms.quickWatchForm(request.form)
|
||||
page = request.args.get(get_page_parameter(), type=int, default=1)
|
||||
total_count = len(sorted_watches)
|
||||
|
||||
pagination = Pagination(page=page,
|
||||
total=total_count,
|
||||
per_page=datastore.data['settings']['application'].get('pager_size', 50), css_framework="semantic")
|
||||
|
||||
sorted_tags = sorted(datastore.data['settings']['application'].get('tags').items(), key=lambda x: x[1]['title'])
|
||||
|
||||
output = render_template(
|
||||
"watch-overview.html",
|
||||
active_tag=active_tag,
|
||||
active_tag_uuid=active_tag_uuid,
|
||||
app_rss_token=datastore.data['settings']['application'].get('rss_access_token'),
|
||||
datastore=datastore,
|
||||
errored_count=errored_count,
|
||||
form=form,
|
||||
guid=datastore.data['app_guid'],
|
||||
has_proxies=datastore.proxy_list,
|
||||
hosted_sticky=os.getenv("SALTED_PASS", False) == False,
|
||||
now_time_server=round(time.time()),
|
||||
pagination=pagination,
|
||||
queued_uuids=[q_uuid.item['uuid'] for q_uuid in update_q.queue],
|
||||
search_q=request.args.get('q', '').strip(),
|
||||
sort_attribute=request.args.get('sort') if request.args.get('sort') else request.cookies.get('sort'),
|
||||
sort_order=request.args.get('order') if request.args.get('order') else request.cookies.get('order'),
|
||||
system_default_fetcher=datastore.data['settings']['application'].get('fetch_backend'),
|
||||
tags=sorted_tags,
|
||||
unread_changes_count=datastore.unread_changes_count,
|
||||
watches=sorted_watches
|
||||
)
|
||||
|
||||
if session.get('share-link'):
|
||||
del (session['share-link'])
|
||||
|
||||
resp = make_response(output)
|
||||
|
||||
# The template can run on cookie or url query info
|
||||
if request.args.get('sort'):
|
||||
resp.set_cookie('sort', request.args.get('sort'))
|
||||
if request.args.get('order'):
|
||||
resp.set_cookie('order', request.args.get('order'))
|
||||
|
||||
return resp
|
||||
|
||||
return watchlist_blueprint
|
||||
@@ -0,0 +1,273 @@
|
||||
{%- extends 'base.html' -%}
|
||||
{%- block content -%}
|
||||
{%- from '_helpers.html' import render_simple_field, render_field, render_nolabel_field, sort_by_title -%}
|
||||
<script src="{{url_for('static_content', group='js', filename='jquery-3.6.0.min.js')}}"></script>
|
||||
<script src="{{url_for('static_content', group='js', filename='watch-overview.js')}}" defer></script>
|
||||
<script>let nowtimeserver={{ now_time_server }};</script>
|
||||
<script>let favicon_baseURL="{{ url_for('static_content', group='favicon', filename="PLACEHOLDER")}}";</script>
|
||||
<script>
|
||||
// Initialize Feather icons after the page loads
|
||||
document.addEventListener('DOMContentLoaded', function() {
|
||||
feather.replace();
|
||||
});
|
||||
</script>
|
||||
<style>
|
||||
.checking-now .last-checked {
|
||||
background-image: linear-gradient(to bottom, transparent 0%, rgba(0,0,0,0.05) 40%, rgba(0,0,0,0.1) 100%);
|
||||
background-size: 0 100%;
|
||||
background-repeat: no-repeat;
|
||||
transition: background-size 0.9s ease
|
||||
}
|
||||
</style>
|
||||
<div class="box" id="form-quick-watch-add">
|
||||
|
||||
<form class="pure-form" action="{{ url_for('ui.ui_views.form_quick_watch_add', tag=active_tag_uuid) }}" method="POST" id="new-watch-form">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}" >
|
||||
<fieldset>
|
||||
<legend>Add a new web page change detection watch</legend>
|
||||
<div id="watch-add-wrapper-zone">
|
||||
{{ render_nolabel_field(form.url, placeholder="https://...", required=true) }}
|
||||
{{ render_nolabel_field(form.watch_submit_button, title="Watch this URL!" ) }}
|
||||
{{ render_nolabel_field(form.edit_and_watch_submit_button, title="Edit first then Watch") }}
|
||||
</div>
|
||||
<div id="watch-group-tag">
|
||||
{{ render_field(form.tags, value=active_tag.title if active_tag_uuid else '', placeholder="Watch group / tag", class="transparent-field") }}
|
||||
</div>
|
||||
<div id="quick-watch-processor-type">
|
||||
{{ render_simple_field(form.processor) }}
|
||||
</div>
|
||||
|
||||
</fieldset>
|
||||
<span style="color:#eee; font-size: 80%;"><img alt="Create a shareable link" style="height: 1em;display:inline-block;" src="{{url_for('static_content', group='images', filename='spread-white.svg')}}" > Tip: You can also add 'shared' watches. <a href="https://github.com/dgtlmoon/changedetection.io/wiki/Sharing-a-Watch">More info</a></span>
|
||||
</form>
|
||||
</div>
|
||||
<div class="box">
|
||||
<form class="pure-form" action="{{ url_for('ui.form_watch_list_checkbox_operations') }}" method="POST" id="watch-list-form">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}" >
|
||||
<input type="hidden" id="op_extradata" name="op_extradata" value="" >
|
||||
<div id="checkbox-operations">
|
||||
<button class="pure-button button-secondary button-xsmall" name="op" value="pause"><i data-feather="pause" style="width: 14px; height: 14px; stroke: white; margin-right: 4px;"></i>Pause</button>
|
||||
<button class="pure-button button-secondary button-xsmall" name="op" value="unpause"><i data-feather="play" style="width: 14px; height: 14px; stroke: white; margin-right: 4px;"></i>UnPause</button>
|
||||
<button class="pure-button button-secondary button-xsmall" name="op" value="mute"><i data-feather="volume-x" style="width: 14px; height: 14px; stroke: white; margin-right: 4px;"></i>Mute</button>
|
||||
<button class="pure-button button-secondary button-xsmall" name="op" value="unmute"><i data-feather="volume-2" style="width: 14px; height: 14px; stroke: white; margin-right: 4px;"></i>UnMute</button>
|
||||
<button class="pure-button button-secondary button-xsmall" name="op" value="recheck"><i data-feather="refresh-cw" style="width: 14px; height: 14px; stroke: white; margin-right: 4px;"></i>Recheck</button>
|
||||
<button class="pure-button button-secondary button-xsmall" name="op" value="assign-tag" id="checkbox-assign-tag"><i data-feather="tag" style="width: 14px; height: 14px; stroke: white; margin-right: 4px;"></i>Tag</button>
|
||||
<button class="pure-button button-secondary button-xsmall" name="op" value="mark-viewed"><i data-feather="eye" style="width: 14px; height: 14px; stroke: white; margin-right: 4px;"></i>Mark viewed</button>
|
||||
<button class="pure-button button-secondary button-xsmall" name="op" value="notification-default"><i data-feather="bell" style="width: 14px; height: 14px; stroke: white; margin-right: 4px;"></i>Use default notification</button>
|
||||
<button class="pure-button button-secondary button-xsmall" name="op" value="clear-errors"><i data-feather="x-circle" style="width: 14px; height: 14px; stroke: white; margin-right: 4px;"></i>Clear errors</button>
|
||||
<button class="pure-button button-secondary button-xsmall" style="background: #dd4242;" name="op" value="clear-history"><i data-feather="trash-2" style="width: 14px; height: 14px; stroke: white; margin-right: 4px;"></i>Clear/reset history</button>
|
||||
<button class="pure-button button-secondary button-xsmall" style="background: #dd4242;" name="op" value="delete"><i data-feather="trash" style="width: 14px; height: 14px; stroke: white; margin-right: 4px;"></i>Delete</button>
|
||||
</div>
|
||||
{%- if watches|length >= pagination.per_page -%}
|
||||
{{ pagination.info }}
|
||||
{%- endif -%}
|
||||
{%- if search_q -%}<div id="search-result-info">Searching "<strong><i>{{search_q}}</i></strong>"</div>{%- endif -%}
|
||||
<div>
|
||||
<a href="{{url_for('watchlist.index')}}" class="pure-button button-tag {{'active' if not active_tag_uuid }}">All</a>
|
||||
|
||||
<!-- tag list -->
|
||||
{%- for uuid, tag in tags -%}
|
||||
{%- if tag != "" -%}
|
||||
<a href="{{url_for('watchlist.index', tag=uuid) }}" class="pure-button button-tag {{'active' if active_tag_uuid == uuid }}">{{ tag.title }}</a>
|
||||
{%- endif -%}
|
||||
{%- endfor -%}
|
||||
</div>
|
||||
|
||||
{%- set sort_order = sort_order or 'asc' -%}
|
||||
{%- set sort_attribute = sort_attribute or 'last_changed' -%}
|
||||
{%- set pagination_page = request.args.get('page', 0) -%}
|
||||
{%- set cols_required = 6 -%}
|
||||
{%- set any_has_restock_price_processor = datastore.any_watches_have_processor_by_name("restock_diff") -%}
|
||||
{%- if any_has_restock_price_processor -%}
|
||||
{%- set cols_required = cols_required + 1 -%}
|
||||
{%- endif -%}
|
||||
{%- set ui_settings = datastore.data['settings']['application']['ui'] -%}
|
||||
{%- set wrapper_classes = [
|
||||
'has-unread-changes' if unread_changes_count else '',
|
||||
'has-error' if errored_count else '',
|
||||
] -%}
|
||||
<div id="watch-table-wrapper" class="{{ wrapper_classes | reject('equalto', '') | join(' ') }}">
|
||||
{%- set table_classes = [
|
||||
'favicon-enabled' if 'favicons_enabled' not in ui_settings or ui_settings['favicons_enabled'] else 'favicon-not-enabled',
|
||||
] -%}
|
||||
<table class="pure-table pure-table-striped watch-table {{ table_classes | reject('equalto', '') | join(' ') }}">
|
||||
<thead>
|
||||
<tr>
|
||||
{%- set link_order = "desc" if sort_order == 'asc' else "asc" -%}
|
||||
{%- set arrow_span = "" -%}
|
||||
<th><input style="vertical-align: middle" type="checkbox" id="check-all" > <a class="{{ 'active '+link_order if sort_attribute == 'date_created' else 'inactive' }}" href="{{url_for('watchlist.index', sort='date_created', order=link_order, tag=active_tag_uuid)}}"># <span class='arrow {{link_order}}'></span></a></th>
|
||||
<th>
|
||||
<a class="{{ 'active '+link_order if sort_attribute == 'paused' else 'inactive' }}" href="{{url_for('watchlist.index', sort='paused', order=link_order, tag=active_tag_uuid)}}"><i data-feather="pause" style="vertical-align: bottom; width: 14px; height: 14px; margin-right: 4px;"></i><span class='arrow {{link_order}}'></span></a>
|
||||
|
||||
<a class="{{ 'active '+link_order if sort_attribute == 'notification_muted' else 'inactive' }}" href="{{url_for('watchlist.index', sort='notification_muted', order=link_order, tag=active_tag_uuid)}}"><i data-feather="volume-2" style="vertical-align: bottom; width: 14px; height: 14px; margin-right: 4px;"></i><span class='arrow {{link_order}}'></span></a>
|
||||
</th>
|
||||
<th><a class="{{ 'active '+link_order if sort_attribute == 'label' else 'inactive' }}" href="{{url_for('watchlist.index', sort='label', order=link_order, tag=active_tag_uuid)}}">Website <span class='arrow {{link_order}}'></span></a></th>
|
||||
{%- if any_has_restock_price_processor -%}
|
||||
<th>Restock & Price</th>
|
||||
{%- endif -%}
|
||||
<th><a class="{{ 'active '+link_order if sort_attribute == 'last_checked' else 'inactive' }}" href="{{url_for('watchlist.index', sort='last_checked', order=link_order, tag=active_tag_uuid)}}"><span class="hide-on-mobile">Last</span> Checked <span class='arrow {{link_order}}'></span></a></th>
|
||||
<th><a class="{{ 'active '+link_order if sort_attribute == 'last_changed' else 'inactive' }}" href="{{url_for('watchlist.index', sort='last_changed', order=link_order, tag=active_tag_uuid)}}"><span class="hide-on-mobile">Last</span> Changed <span class='arrow {{link_order}}'></span></a></th>
|
||||
<th class="empty-cell"></th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{%- if not watches|length -%}
|
||||
<tr>
|
||||
<td colspan="{{ cols_required }}" style="text-wrap: wrap;">No website watches configured, please add a URL in the box above, or <a href="{{ url_for('imports.import_page')}}" >import a list</a>.</td>
|
||||
</tr>
|
||||
{%- endif -%}
|
||||
|
||||
{%- for watch in (watches|sort(attribute=sort_attribute, reverse=sort_order == 'asc'))|pagination_slice(skip=pagination.skip) -%}
|
||||
{%- set checking_now = is_checking_now(watch) -%}
|
||||
{%- set history_n = watch.history_n -%}
|
||||
{%- set favicon = watch.get_favicon_filename() -%}
|
||||
{%- set system_use_url_watchlist = datastore.data['settings']['application']['ui'].get('use_page_title_in_list') -%}
|
||||
{# Class settings mirrored in changedetectionio/static/js/realtime.js for the frontend #}
|
||||
{%- set row_classes = [
|
||||
loop.cycle('pure-table-odd', 'pure-table-even'),
|
||||
'processor-' ~ watch['processor'],
|
||||
'has-error' if watch.compile_error_texts()|length > 2 else '',
|
||||
'paused' if watch.paused is defined and watch.paused != False else '',
|
||||
'unviewed' if watch.has_unviewed else '',
|
||||
'has-restock-info' if watch.has_restock_info else 'no-restock-info',
|
||||
'has-favicon' if favicon else '',
|
||||
'in-stock' if watch.has_restock_info and watch['restock']['in_stock'] else '',
|
||||
'not-in-stock' if watch.has_restock_info and not watch['restock']['in_stock'] else '',
|
||||
'queued' if watch.uuid in queued_uuids else '',
|
||||
'checking-now' if checking_now else '',
|
||||
'notification_muted' if watch.notification_muted else '',
|
||||
'single-history' if history_n == 1 else '',
|
||||
'multiple-history' if history_n >= 2 else '',
|
||||
'use-html-title' if system_use_url_watchlist else 'no-html-title',
|
||||
] -%}
|
||||
<tr id="{{ watch.uuid }}" data-watch-uuid="{{ watch.uuid }}" class="{{ row_classes | reject('equalto', '') | join(' ') }}">
|
||||
<td class="inline checkbox-uuid" ><div><input name="uuids" type="checkbox" value="{{ watch.uuid}} " > <span class="counter-i">{{ loop.index+pagination.skip }}</span></div></td>
|
||||
<td class="inline watch-controls">
|
||||
<div>
|
||||
<a class="ajax-op state-off pause-toggle" data-op="pause" href="{{url_for('watchlist.index', op='pause', uuid=watch.uuid, tag=active_tag_uuid)}}"><img src="{{url_for('static_content', group='images', filename='pause.svg')}}" alt="Pause checks" title="Pause checks" class="icon icon-pause" ></a>
|
||||
<a class="ajax-op state-on pause-toggle" data-op="pause" style="display: none" href="{{url_for('watchlist.index', op='pause', uuid=watch.uuid, tag=active_tag_uuid)}}"><img src="{{url_for('static_content', group='images', filename='play.svg')}}" alt="UnPause checks" title="UnPause checks" class="icon icon-unpause" ></a>
|
||||
<a class="ajax-op state-off mute-toggle" data-op="mute" href="{{url_for('watchlist.index', op='mute', uuid=watch.uuid, tag=active_tag_uuid)}}"><img src="{{url_for('static_content', group='images', filename='bell-off.svg')}}" alt="Mute notification" title="Mute notification" class="icon icon-mute" ></a>
|
||||
<a class="ajax-op state-on mute-toggle" data-op="mute" style="display: none" href="{{url_for('watchlist.index', op='mute', uuid=watch.uuid, tag=active_tag_uuid)}}"><img src="{{url_for('static_content', group='images', filename='bell-off.svg')}}" alt="UnMute notification" title="UnMute notification" class="icon icon-mute" ></a>
|
||||
</div>
|
||||
</td>
|
||||
|
||||
<td class="title-col inline">
|
||||
<div class="flex-wrapper">
|
||||
{% if 'favicons_enabled' not in ui_settings or ui_settings['favicons_enabled'] %}
|
||||
<div>{# A page might have hundreds of these images, set IMG options for lazy loading, don't set SRC if we dont have it so it doesnt fetch the placeholder' #}
|
||||
<img alt="Favicon thumbnail" class="favicon" loading="lazy" decoding="async" fetchpriority="low" {% if favicon %} src="{{url_for('static_content', group='favicon', filename=watch.uuid)}}" {% else %} src='data:image/svg+xml;utf8,%3Csvg xmlns="http://www.w3.org/2000/svg" width="7.087" height="7.087" viewBox="0 0 7.087 7.087"%3E%3Ccircle cx="3.543" cy="3.543" r="3.279" stroke="%23e1e1e1" stroke-width="0.45" fill="none" opacity="0.74"/%3E%3C/svg%3E' {% endif %} />
|
||||
</div>
|
||||
{% endif %}
|
||||
<div>
|
||||
<span class="watch-title">
|
||||
{% if system_use_url_watchlist or watch.get('use_page_title_in_list') %}
|
||||
{{ watch.label }}
|
||||
{% else %}
|
||||
{{ watch.get('title') or watch.link }}
|
||||
{% endif %}
|
||||
<a class="external" target="_blank" rel="noopener" href="{{ watch.link.replace('source:','') }}"> </a>
|
||||
</span>
|
||||
<div class="error-text" style="display:none;">{{ watch.compile_error_texts(has_proxies=datastore.proxy_list) }}</div>
|
||||
{%- if watch['processor'] == 'text_json_diff' -%}
|
||||
{%- if watch['has_ldjson_price_data'] and not watch['track_ldjson_price_data'] -%}
|
||||
<div class="ldjson-price-track-offer">Switch to Restock & Price watch mode? <a href="{{url_for('price_data_follower.accept', uuid=watch.uuid)}}" class="pure-button button-xsmall">Yes</a> <a href="{{url_for('price_data_follower.reject', uuid=watch.uuid)}}" class="">No</a></div>
|
||||
{%- endif -%}
|
||||
{%- endif -%}
|
||||
{%- if watch['processor'] == 'restock_diff' -%}
|
||||
<span class="tracking-ldjson-price-data" title="Automatically following embedded price information"><img src="{{url_for('static_content', group='images', filename='price-tag-icon.svg')}}" class="status-icon price-follow-tag-icon" > Price</span>
|
||||
{%- endif -%}
|
||||
{%- for watch_tag_uuid, watch_tag in datastore.get_all_tags_for_watch(watch['uuid']).items() -%}
|
||||
<span class="watch-tag-list">{{ watch_tag.title }}</span>
|
||||
{%- endfor -%}
|
||||
</div>
|
||||
<div class="status-icons">
|
||||
<a class="link-spread" href="{{url_for('ui.form_share_put_watch', uuid=watch.uuid)}}"><img src="{{url_for('static_content', group='images', filename='spread.svg')}}" class="status-icon icon icon-spread" title="Create a link to share watch config with others" ></a>
|
||||
{%- if watch.get_fetch_backend == "html_webdriver"
|
||||
or ( watch.get_fetch_backend == "system" and system_default_fetcher == 'html_webdriver' )
|
||||
or "extra_browser_" in watch.get_fetch_backend
|
||||
-%}
|
||||
<img class="status-icon" src="{{url_for('static_content', group='images', filename='google-chrome-icon.png')}}" alt="Using a Chrome browser" title="Using a Chrome browser" >
|
||||
{%- endif -%}
|
||||
{%- if watch.is_pdf -%}<img class="status-icon" src="{{url_for('static_content', group='images', filename='pdf-icon.svg')}}" alt="Converting PDF to text" >{%- endif -%}
|
||||
{%- if watch.has_browser_steps -%}<img class="status-icon status-browsersteps" src="{{url_for('static_content', group='images', filename='steps.svg')}}" alt="Browser Steps is enabled" >{%- endif -%}
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</td>
|
||||
{%- if any_has_restock_price_processor -%}
|
||||
<td class="restock-and-price">
|
||||
{%- if watch['processor'] == 'restock_diff' -%}
|
||||
{%- if watch.has_restock_info -%}
|
||||
<span class="restock-label {{'in-stock' if watch['restock']['in_stock'] else 'not-in-stock' }}" title="Detecting restock and price">
|
||||
<!-- maybe some object watch['processor'][restock_diff] or.. -->
|
||||
{%- if watch['restock']['in_stock']-%} In stock {%- else-%} Not in stock {%- endif -%}
|
||||
</span>
|
||||
{%- endif -%}
|
||||
|
||||
{%- if watch.get('restock') and watch['restock']['price'] != None -%}
|
||||
{%- if watch['restock']['price'] != None -%}
|
||||
<span class="restock-label price" title="Price">
|
||||
{{ watch['restock']['price']|format_number_locale }} {{ watch['restock']['currency'] }}
|
||||
</span>
|
||||
{%- endif -%}
|
||||
{%- elif not watch.has_restock_info -%}
|
||||
<span class="restock-label error">No information</span>
|
||||
{%- endif -%}
|
||||
{%- endif -%}
|
||||
</td>
|
||||
{%- endif -%}
|
||||
{#last_checked becomes fetch-start-time#}
|
||||
<td class="last-checked" data-timestamp="{{ watch.last_checked }}" data-fetchduration={{ watch.fetch_time }} data-eta_complete="{{ watch.last_checked+watch.fetch_time }}" >
|
||||
<div class="spinner-wrapper" style="display:none;" >
|
||||
<span class="spinner"></span><span> Checking now</span>
|
||||
</div>
|
||||
<span class="innertext">{{watch|format_last_checked_time|safe}}</span>
|
||||
</td>
|
||||
<td class="last-changed" data-timestamp="{{ watch.last_changed }}">{%- if watch.history_n >=2 and watch.last_changed >0 -%}
|
||||
{{watch.last_changed|format_timestamp_timeago}}
|
||||
{%- else -%}
|
||||
Not yet
|
||||
{%- endif -%}
|
||||
</td>
|
||||
<td class="buttons">
|
||||
<div>
|
||||
{%- set target_attr = ' target="' ~ watch.uuid ~ '"' if datastore.data['settings']['application']['ui'].get('open_diff_in_new_tab') else '' -%}
|
||||
<a href="" class="already-in-queue-button recheck pure-button pure-button-primary" style="display: none;" disabled="disabled">Queued</a>
|
||||
<a href="{{ url_for('ui.form_watch_checknow', uuid=watch.uuid, tag=request.args.get('tag')) }}" data-op='recheck' class="ajax-op recheck pure-button pure-button-primary">Recheck</a>
|
||||
<a href="{{ url_for('ui.ui_edit.edit_page', uuid=watch.uuid, tag=active_tag_uuid)}}#general" class="pure-button pure-button-primary">Edit</a>
|
||||
<a href="{{ url_for('ui.ui_views.diff_history_page', uuid=watch.uuid)}}" {{target_attr}} class="pure-button pure-button-primary history-link" style="display: none;">History</a>
|
||||
<a href="{{ url_for('ui.ui_views.preview_page', uuid=watch.uuid)}}" {{target_attr}} class="pure-button pure-button-primary preview-link" style="display: none;">Preview</a>
|
||||
</div>
|
||||
</td>
|
||||
</tr>
|
||||
{%- endfor -%}
|
||||
</tbody>
|
||||
</table>
|
||||
<ul id="post-list-buttons">
|
||||
<li id="post-list-with-errors" style="display: none;" >
|
||||
<a href="{{url_for('watchlist.index', with_errors=1, tag=request.args.get('tag')) }}" class="pure-button button-tag button-error">With errors ({{ errored_count }})</a>
|
||||
</li>
|
||||
<li id="post-list-mark-views" style="display: none;" >
|
||||
<a href="{{url_for('ui.mark_all_viewed',with_errors=request.args.get('with_errors',0)) }}" class="pure-button button-tag " id="mark-all-viewed">Mark all viewed</a>
|
||||
</li>
|
||||
{%- if active_tag_uuid -%}
|
||||
<li id="post-list-mark-views-tag">
|
||||
<a href="{{url_for('ui.mark_all_viewed', tag=active_tag_uuid) }}" class="pure-button button-tag " id="mark-all-viewed">Mark all viewed in '{{active_tag.title}}'</a>
|
||||
</li>
|
||||
{%- endif -%}
|
||||
<li id="post-list-unread" style="display: none;" >
|
||||
<a href="{{url_for('watchlist.index', unread=1, tag=request.args.get('tag')) }}" class="pure-button button-tag">Unread (<span id="unread-tab-counter">{{ unread_changes_count }}</span>)</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="{{ url_for('ui.form_watch_checknow', tag=active_tag_uuid, with_errors=request.args.get('with_errors',0)) }}" class="pure-button button-tag" id="recheck-all">Recheck
|
||||
all {% if active_tag_uuid %} in '{{active_tag.title}}'{%endif%}</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="{{ url_for('rss.feed', tag=active_tag_uuid, token=app_rss_token)}}"><img alt="RSS Feed" id="feed-icon" src="{{url_for('static_content', group='images', filename='generic_feed-icon.svg')}}" height="15"></a>
|
||||
</li>
|
||||
</ul>
|
||||
{{ pagination.links }}
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
{%- endblock -%}
|
||||
167
changedetectionio/conditions/__init__.py
Normal file
167
changedetectionio/conditions/__init__.py
Normal file
@@ -0,0 +1,167 @@
|
||||
from json_logic.builtins import BUILTINS
|
||||
|
||||
from .exceptions import EmptyConditionRuleRowNotUsable
|
||||
from .pluggy_interface import plugin_manager # Import the pluggy plugin manager
|
||||
from . import default_plugin
|
||||
from loguru import logger
|
||||
# List of all supported JSON Logic operators
|
||||
operator_choices = [
|
||||
(None, "Choose one - Operator"),
|
||||
(">", "Greater Than"),
|
||||
("<", "Less Than"),
|
||||
(">=", "Greater Than or Equal To"),
|
||||
("<=", "Less Than or Equal To"),
|
||||
("==", "Equals"),
|
||||
("!=", "Not Equals"),
|
||||
("in", "Contains"),
|
||||
]
|
||||
|
||||
# Fields available in the rules
|
||||
field_choices = [
|
||||
(None, "Choose one - Field"),
|
||||
]
|
||||
|
||||
# The data we will feed the JSON Rules to see if it passes the test/conditions or not
|
||||
EXECUTE_DATA = {}
|
||||
|
||||
|
||||
# Define the extended operations dictionary
|
||||
CUSTOM_OPERATIONS = {
|
||||
**BUILTINS, # Include all standard operators
|
||||
}
|
||||
|
||||
def filter_complete_rules(ruleset):
|
||||
rules = [
|
||||
rule for rule in ruleset
|
||||
if all(value not in ("", False, "None", None) for value in [rule["operator"], rule["field"], rule["value"]])
|
||||
]
|
||||
return rules
|
||||
|
||||
def convert_to_jsonlogic(logic_operator: str, rule_dict: list):
|
||||
"""
|
||||
Convert a structured rule dict into a JSON Logic rule.
|
||||
|
||||
:param rule_dict: Dictionary containing conditions.
|
||||
:return: JSON Logic rule as a dictionary.
|
||||
"""
|
||||
|
||||
|
||||
json_logic_conditions = []
|
||||
|
||||
for condition in rule_dict:
|
||||
operator = condition["operator"]
|
||||
field = condition["field"]
|
||||
value = condition["value"]
|
||||
|
||||
if not operator or operator == 'None' or not value or not field:
|
||||
raise EmptyConditionRuleRowNotUsable()
|
||||
|
||||
# Convert value to int/float if possible
|
||||
try:
|
||||
if isinstance(value, str) and "." in value and str != "None":
|
||||
value = float(value)
|
||||
else:
|
||||
value = int(value)
|
||||
except (ValueError, TypeError):
|
||||
pass # Keep as a string if conversion fails
|
||||
|
||||
# Handle different JSON Logic operators properly
|
||||
if operator == "in":
|
||||
json_logic_conditions.append({"in": [value, {"var": field}]}) # value first
|
||||
elif operator in ("!", "!!", "-"):
|
||||
json_logic_conditions.append({operator: [{"var": field}]}) # Unary operators
|
||||
elif operator in ("min", "max", "cat"):
|
||||
json_logic_conditions.append({operator: value}) # Multi-argument operators
|
||||
else:
|
||||
json_logic_conditions.append({operator: [{"var": field}, value]}) # Standard binary operators
|
||||
|
||||
return {logic_operator: json_logic_conditions} if len(json_logic_conditions) > 1 else json_logic_conditions[0]
|
||||
|
||||
|
||||
def execute_ruleset_against_all_plugins(current_watch_uuid: str, application_datastruct, ephemeral_data={} ):
|
||||
"""
|
||||
Build our data and options by calling our plugins then pass it to jsonlogic and see if the conditions pass
|
||||
|
||||
:param ruleset: JSON Logic rule dictionary.
|
||||
:param extracted_data: Dictionary containing the facts. <-- maybe the app struct+uuid
|
||||
:return: Dictionary of plugin results.
|
||||
"""
|
||||
from json_logic import jsonLogic
|
||||
|
||||
EXECUTE_DATA = {}
|
||||
result = True
|
||||
|
||||
watch = application_datastruct['watching'].get(current_watch_uuid)
|
||||
|
||||
if watch and watch.get("conditions"):
|
||||
logic_operator = "and" if watch.get("conditions_match_logic", "ALL") == "ALL" else "or"
|
||||
complete_rules = filter_complete_rules(watch['conditions'])
|
||||
if complete_rules:
|
||||
# Give all plugins a chance to update the data dict again (that we will test the conditions against)
|
||||
for plugin in plugin_manager.get_plugins():
|
||||
try:
|
||||
import concurrent.futures
|
||||
import time
|
||||
|
||||
with concurrent.futures.ThreadPoolExecutor() as executor:
|
||||
future = executor.submit(
|
||||
plugin.add_data,
|
||||
current_watch_uuid=current_watch_uuid,
|
||||
application_datastruct=application_datastruct,
|
||||
ephemeral_data=ephemeral_data
|
||||
)
|
||||
logger.debug(f"Trying plugin {plugin}....")
|
||||
|
||||
# Set a timeout of 10 seconds
|
||||
try:
|
||||
new_execute_data = future.result(timeout=10)
|
||||
if new_execute_data and isinstance(new_execute_data, dict):
|
||||
EXECUTE_DATA.update(new_execute_data)
|
||||
|
||||
except concurrent.futures.TimeoutError:
|
||||
# The plugin took too long, abort processing for this watch
|
||||
raise Exception(f"Plugin {plugin.__class__.__name__} took more than 10 seconds to run.")
|
||||
except Exception as e:
|
||||
# Log the error but continue with the next plugin
|
||||
import logging
|
||||
logging.error(f"Error executing plugin {plugin.__class__.__name__}: {str(e)}")
|
||||
continue
|
||||
|
||||
# Create the ruleset
|
||||
ruleset = convert_to_jsonlogic(logic_operator=logic_operator, rule_dict=complete_rules)
|
||||
|
||||
# Pass the custom operations dictionary to jsonLogic
|
||||
if not jsonLogic(logic=ruleset, data=EXECUTE_DATA, operations=CUSTOM_OPERATIONS):
|
||||
result = False
|
||||
|
||||
return {'executed_data': EXECUTE_DATA, 'result': result}
|
||||
|
||||
# Load plugins dynamically
|
||||
for plugin in plugin_manager.get_plugins():
|
||||
new_ops = plugin.register_operators()
|
||||
if isinstance(new_ops, dict):
|
||||
CUSTOM_OPERATIONS.update(new_ops)
|
||||
|
||||
new_operator_choices = plugin.register_operator_choices()
|
||||
if isinstance(new_operator_choices, list):
|
||||
operator_choices.extend(new_operator_choices)
|
||||
|
||||
new_field_choices = plugin.register_field_choices()
|
||||
if isinstance(new_field_choices, list):
|
||||
field_choices.extend(new_field_choices)
|
||||
|
||||
def collect_ui_edit_stats_extras(watch):
|
||||
"""Collect and combine HTML content from all plugins that implement ui_edit_stats_extras"""
|
||||
extras_content = []
|
||||
|
||||
for plugin in plugin_manager.get_plugins():
|
||||
try:
|
||||
content = plugin.ui_edit_stats_extras(watch=watch)
|
||||
if content:
|
||||
extras_content.append(content)
|
||||
except Exception as e:
|
||||
# Skip plugins that don't implement the hook or have errors
|
||||
pass
|
||||
|
||||
return "\n".join(extras_content) if extras_content else ""
|
||||
|
||||
81
changedetectionio/conditions/blueprint.py
Normal file
81
changedetectionio/conditions/blueprint.py
Normal file
@@ -0,0 +1,81 @@
|
||||
# Flask Blueprint Definition
|
||||
import json
|
||||
|
||||
from flask import Blueprint
|
||||
|
||||
from changedetectionio.conditions import execute_ruleset_against_all_plugins
|
||||
|
||||
|
||||
def construct_blueprint(datastore):
|
||||
from changedetectionio.flask_app import login_optionally_required
|
||||
|
||||
conditions_blueprint = Blueprint('conditions', __name__, template_folder="templates")
|
||||
|
||||
@conditions_blueprint.route("/<string:watch_uuid>/verify-condition-single-rule", methods=['POST'])
|
||||
@login_optionally_required
|
||||
def verify_condition_single_rule(watch_uuid):
|
||||
"""Verify a single condition rule against the current snapshot"""
|
||||
from changedetectionio.processors.text_json_diff import prepare_filter_prevew
|
||||
from flask import request, jsonify
|
||||
from copy import deepcopy
|
||||
|
||||
ephemeral_data = {}
|
||||
|
||||
# Get the watch data
|
||||
watch = datastore.data['watching'].get(watch_uuid)
|
||||
if not watch:
|
||||
return jsonify({'status': 'error', 'message': 'Watch not found'}), 404
|
||||
|
||||
# First use prepare_filter_prevew to process the form data
|
||||
# This will return text_after_filter which is after all current form settings are applied
|
||||
# Create ephemeral data with the text from the current snapshot
|
||||
|
||||
try:
|
||||
# Call prepare_filter_prevew to get a processed version of the content with current form settings
|
||||
# We'll ignore the returned response and just use the datastore which is modified by the function
|
||||
|
||||
# this should apply all filters etc so then we can run the CONDITIONS against the final output text
|
||||
result = prepare_filter_prevew(datastore=datastore,
|
||||
form_data=request.form,
|
||||
watch_uuid=watch_uuid)
|
||||
|
||||
ephemeral_data['text'] = result.get('after_filter', '')
|
||||
# Create a temporary watch data structure with this single rule
|
||||
tmp_watch_data = deepcopy(datastore.data['watching'].get(watch_uuid))
|
||||
|
||||
# Override the conditions in the temporary watch
|
||||
rule_json = request.args.get("rule")
|
||||
rule = json.loads(rule_json) if rule_json else None
|
||||
|
||||
# Should be key/value of field, operator, value
|
||||
tmp_watch_data['conditions'] = [rule]
|
||||
tmp_watch_data['conditions_match_logic'] = "ALL" # Single rule, so use ALL
|
||||
|
||||
# Create a temporary application data structure for the rule check
|
||||
temp_app_data = {
|
||||
'watching': {
|
||||
watch_uuid: tmp_watch_data
|
||||
}
|
||||
}
|
||||
|
||||
# Execute the rule against the current snapshot with form data
|
||||
result = execute_ruleset_against_all_plugins(
|
||||
current_watch_uuid=watch_uuid,
|
||||
application_datastruct=temp_app_data,
|
||||
ephemeral_data=ephemeral_data
|
||||
)
|
||||
|
||||
return jsonify({
|
||||
'status': 'success',
|
||||
'result': result.get('result'),
|
||||
'data': result.get('executed_data'),
|
||||
'message': 'Condition passes' if result else 'Condition does not pass'
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
return jsonify({
|
||||
'status': 'error',
|
||||
'message': f'Error verifying condition: {str(e)}'
|
||||
}), 500
|
||||
|
||||
return conditions_blueprint
|
||||
83
changedetectionio/conditions/default_plugin.py
Normal file
83
changedetectionio/conditions/default_plugin.py
Normal file
@@ -0,0 +1,83 @@
|
||||
import re
|
||||
|
||||
import pluggy
|
||||
from price_parser import Price
|
||||
from loguru import logger
|
||||
|
||||
hookimpl = pluggy.HookimplMarker("changedetectionio_conditions")
|
||||
|
||||
|
||||
@hookimpl
|
||||
def register_operators():
|
||||
def starts_with(_, text, prefix):
|
||||
return text.lower().strip().startswith(str(prefix).strip().lower())
|
||||
|
||||
def ends_with(_, text, suffix):
|
||||
return text.lower().strip().endswith(str(suffix).strip().lower())
|
||||
|
||||
def length_min(_, text, strlen):
|
||||
return len(text) >= int(strlen)
|
||||
|
||||
def length_max(_, text, strlen):
|
||||
return len(text) <= int(strlen)
|
||||
|
||||
# Custom function for case-insensitive regex matching
|
||||
def contains_regex(_, text, pattern):
|
||||
"""Returns True if `text` contains `pattern` (case-insensitive regex match)."""
|
||||
return bool(re.search(pattern, str(text), re.IGNORECASE))
|
||||
|
||||
# Custom function for NOT matching case-insensitive regex
|
||||
def not_contains_regex(_, text, pattern):
|
||||
"""Returns True if `text` does NOT contain `pattern` (case-insensitive regex match)."""
|
||||
return not bool(re.search(pattern, str(text), re.IGNORECASE))
|
||||
|
||||
def not_contains(_, text, pattern):
|
||||
return not pattern in text
|
||||
|
||||
return {
|
||||
"!in": not_contains,
|
||||
"!contains_regex": not_contains_regex,
|
||||
"contains_regex": contains_regex,
|
||||
"ends_with": ends_with,
|
||||
"length_max": length_max,
|
||||
"length_min": length_min,
|
||||
"starts_with": starts_with,
|
||||
}
|
||||
|
||||
@hookimpl
|
||||
def register_operator_choices():
|
||||
return [
|
||||
("!in", "Does NOT Contain"),
|
||||
("starts_with", "Text Starts With"),
|
||||
("ends_with", "Text Ends With"),
|
||||
("length_min", "Length minimum"),
|
||||
("length_max", "Length maximum"),
|
||||
("contains_regex", "Text Matches Regex"),
|
||||
("!contains_regex", "Text Does NOT Match Regex"),
|
||||
]
|
||||
|
||||
@hookimpl
|
||||
def register_field_choices():
|
||||
return [
|
||||
("extracted_number", "Extracted number after 'Filters & Triggers'"),
|
||||
# ("meta_description", "Meta Description"),
|
||||
# ("meta_keywords", "Meta Keywords"),
|
||||
("page_filtered_text", "Page text after 'Filters & Triggers'"),
|
||||
#("page_title", "Page <title>"), # actual page title <title>
|
||||
]
|
||||
|
||||
@hookimpl
|
||||
def add_data(current_watch_uuid, application_datastruct, ephemeral_data):
|
||||
|
||||
res = {}
|
||||
if 'text' in ephemeral_data:
|
||||
res['page_filtered_text'] = ephemeral_data['text']
|
||||
|
||||
# Better to not wrap this in try/except so that the UI can see any errors
|
||||
price = Price.fromstring(ephemeral_data.get('text'))
|
||||
if price and price.amount != None:
|
||||
# This is slightly misleading, it's extracting a PRICE not a Number..
|
||||
res['extracted_number'] = float(price.amount)
|
||||
logger.debug(f"Extracted number result: '{price}' - returning float({res['extracted_number']})")
|
||||
|
||||
return res
|
||||
6
changedetectionio/conditions/exceptions.py
Normal file
6
changedetectionio/conditions/exceptions.py
Normal file
@@ -0,0 +1,6 @@
|
||||
class EmptyConditionRuleRowNotUsable(Exception):
|
||||
def __init__(self):
|
||||
super().__init__("One of the 'conditions' rulesets is incomplete, cannot run.")
|
||||
|
||||
def __str__(self):
|
||||
return self.args[0]
|
||||
44
changedetectionio/conditions/form.py
Normal file
44
changedetectionio/conditions/form.py
Normal file
@@ -0,0 +1,44 @@
|
||||
# Condition Rule Form (for each rule row)
|
||||
from wtforms import Form, SelectField, StringField, validators
|
||||
from wtforms import validators
|
||||
|
||||
class ConditionFormRow(Form):
|
||||
|
||||
# ✅ Ensure Plugins Are Loaded BEFORE Importing Choices
|
||||
from changedetectionio.conditions import plugin_manager
|
||||
from changedetectionio.conditions import operator_choices, field_choices
|
||||
field = SelectField(
|
||||
"Field",
|
||||
choices=field_choices,
|
||||
validators=[validators.Optional()]
|
||||
)
|
||||
|
||||
operator = SelectField(
|
||||
"Operator",
|
||||
choices=operator_choices,
|
||||
validators=[validators.Optional()]
|
||||
)
|
||||
|
||||
value = StringField("Value", validators=[validators.Optional()], render_kw={"placeholder": "A value"})
|
||||
|
||||
def validate(self, extra_validators=None):
|
||||
# First, run the default validators
|
||||
if not super().validate(extra_validators):
|
||||
return False
|
||||
|
||||
# Custom validation logic
|
||||
# If any of the operator/field/value is set, then they must be all set
|
||||
if any(value not in ("", False, "None", None) for value in [self.operator.data, self.field.data, self.value.data]):
|
||||
if not self.operator.data or self.operator.data == 'None':
|
||||
self.operator.errors.append("Operator is required.")
|
||||
return False
|
||||
|
||||
if not self.field.data or self.field.data == 'None':
|
||||
self.field.errors.append("Field is required.")
|
||||
return False
|
||||
|
||||
if not self.value.data:
|
||||
self.value.errors.append("Value is required.")
|
||||
return False
|
||||
|
||||
return True # Only return True if all conditions pass
|
||||
74
changedetectionio/conditions/pluggy_interface.py
Normal file
74
changedetectionio/conditions/pluggy_interface.py
Normal file
@@ -0,0 +1,74 @@
|
||||
import pluggy
|
||||
import os
|
||||
import importlib
|
||||
import sys
|
||||
from . import default_plugin
|
||||
|
||||
# ✅ Ensure that the namespace in HookspecMarker matches PluginManager
|
||||
PLUGIN_NAMESPACE = "changedetectionio_conditions"
|
||||
|
||||
hookspec = pluggy.HookspecMarker(PLUGIN_NAMESPACE)
|
||||
hookimpl = pluggy.HookimplMarker(PLUGIN_NAMESPACE)
|
||||
|
||||
|
||||
class ConditionsSpec:
|
||||
"""Hook specifications for extending JSON Logic conditions."""
|
||||
|
||||
@hookspec
|
||||
def register_operators():
|
||||
"""Return a dictionary of new JSON Logic operators."""
|
||||
pass
|
||||
|
||||
@hookspec
|
||||
def register_operator_choices():
|
||||
"""Return a list of new operator choices."""
|
||||
pass
|
||||
|
||||
@hookspec
|
||||
def register_field_choices():
|
||||
"""Return a list of new field choices."""
|
||||
pass
|
||||
|
||||
@hookspec
|
||||
def add_data(current_watch_uuid, application_datastruct, ephemeral_data):
|
||||
"""Add to the datadict"""
|
||||
pass
|
||||
|
||||
@hookspec
|
||||
def ui_edit_stats_extras(watch):
|
||||
"""Return HTML content to add to the stats tab in the edit view"""
|
||||
pass
|
||||
|
||||
# ✅ Set up Pluggy Plugin Manager
|
||||
plugin_manager = pluggy.PluginManager(PLUGIN_NAMESPACE)
|
||||
|
||||
# ✅ Register hookspecs (Ensures they are detected)
|
||||
plugin_manager.add_hookspecs(ConditionsSpec)
|
||||
|
||||
# ✅ Register built-in plugins manually
|
||||
plugin_manager.register(default_plugin, "default_plugin")
|
||||
|
||||
# ✅ Load plugins from the plugins directory
|
||||
def load_plugins_from_directory():
|
||||
plugins_dir = os.path.join(os.path.dirname(__file__), 'plugins')
|
||||
if not os.path.exists(plugins_dir):
|
||||
return
|
||||
|
||||
# Get all Python files (excluding __init__.py)
|
||||
for filename in os.listdir(plugins_dir):
|
||||
if filename.endswith(".py") and filename != "__init__.py":
|
||||
module_name = filename[:-3] # Remove .py extension
|
||||
module_path = f"changedetectionio.conditions.plugins.{module_name}"
|
||||
|
||||
try:
|
||||
module = importlib.import_module(module_path)
|
||||
# Register the plugin with pluggy
|
||||
plugin_manager.register(module, module_name)
|
||||
except (ImportError, AttributeError) as e:
|
||||
print(f"Error loading plugin {module_name}: {e}")
|
||||
|
||||
# Load plugins from the plugins directory
|
||||
load_plugins_from_directory()
|
||||
|
||||
# ✅ Discover installed plugins from external packages (if any)
|
||||
plugin_manager.load_setuptools_entrypoints(PLUGIN_NAMESPACE)
|
||||
1
changedetectionio/conditions/plugins/__init__.py
Normal file
1
changedetectionio/conditions/plugins/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# Import plugins package to make them discoverable
|
||||
119
changedetectionio/conditions/plugins/levenshtein_plugin.py
Normal file
119
changedetectionio/conditions/plugins/levenshtein_plugin.py
Normal file
@@ -0,0 +1,119 @@
|
||||
import pluggy
|
||||
from loguru import logger
|
||||
|
||||
LEVENSHTEIN_MAX_LEN_FOR_EDIT_STATS=100000
|
||||
|
||||
# Support both plugin systems
|
||||
conditions_hookimpl = pluggy.HookimplMarker("changedetectionio_conditions")
|
||||
global_hookimpl = pluggy.HookimplMarker("changedetectionio")
|
||||
|
||||
def levenshtein_ratio_recent_history(watch, incoming_text=None):
|
||||
try:
|
||||
from Levenshtein import ratio, distance
|
||||
k = list(watch.history.keys())
|
||||
a = None
|
||||
b = None
|
||||
|
||||
# When called from ui_edit_stats_extras, we don't have incoming_text
|
||||
if incoming_text is None:
|
||||
a = watch.get_history_snapshot(timestamp=k[-1]) # Latest snapshot
|
||||
b = watch.get_history_snapshot(timestamp=k[-2]) # Previous snapshot
|
||||
|
||||
# Needs atleast one snapshot
|
||||
elif len(k) >= 1: # Should be atleast one snapshot to compare against
|
||||
a = watch.get_history_snapshot(timestamp=k[-1]) # Latest saved snapshot
|
||||
b = incoming_text if incoming_text else k[-2]
|
||||
|
||||
if a and b:
|
||||
distance_value = distance(a, b)
|
||||
ratio_value = ratio(a, b)
|
||||
return {
|
||||
'distance': distance_value,
|
||||
'ratio': ratio_value,
|
||||
'percent_similar': round(ratio_value * 100, 2)
|
||||
}
|
||||
except Exception as e:
|
||||
logger.warning(f"Unable to calc similarity: {str(e)}")
|
||||
|
||||
return ''
|
||||
|
||||
@conditions_hookimpl
|
||||
def register_operators():
|
||||
pass
|
||||
|
||||
@conditions_hookimpl
|
||||
def register_operator_choices():
|
||||
pass
|
||||
|
||||
|
||||
@conditions_hookimpl
|
||||
def register_field_choices():
|
||||
return [
|
||||
("levenshtein_ratio", "Levenshtein - Text similarity ratio"),
|
||||
("levenshtein_distance", "Levenshtein - Text change distance"),
|
||||
]
|
||||
|
||||
@conditions_hookimpl
|
||||
def add_data(current_watch_uuid, application_datastruct, ephemeral_data):
|
||||
res = {}
|
||||
watch = application_datastruct['watching'].get(current_watch_uuid)
|
||||
# ephemeral_data['text'] will be the current text after filters, they may have edited filters but not saved them yet etc
|
||||
|
||||
if watch and 'text' in ephemeral_data:
|
||||
lev_data = levenshtein_ratio_recent_history(watch, ephemeral_data.get('text',''))
|
||||
if isinstance(lev_data, dict):
|
||||
res['levenshtein_ratio'] = lev_data.get('ratio', 0)
|
||||
res['levenshtein_similarity'] = lev_data.get('percent_similar', 0)
|
||||
res['levenshtein_distance'] = lev_data.get('distance', 0)
|
||||
|
||||
return res
|
||||
|
||||
@global_hookimpl
|
||||
def ui_edit_stats_extras(watch):
|
||||
"""Add Levenshtein stats to the UI using the global plugin system"""
|
||||
"""Generate the HTML for Levenshtein stats - shared by both plugin systems"""
|
||||
if len(watch.history.keys()) < 2:
|
||||
return "<p>Not enough history to calculate Levenshtein metrics</p>"
|
||||
|
||||
|
||||
# Protection against the algorithm getting stuck on huge documents
|
||||
k = list(watch.history.keys())
|
||||
if any(
|
||||
len(watch.get_history_snapshot(timestamp=k[idx])) > LEVENSHTEIN_MAX_LEN_FOR_EDIT_STATS
|
||||
for idx in (-1, -2)
|
||||
if len(k) >= abs(idx)
|
||||
):
|
||||
return "<p>Snapshot too large for edit statistics, skipping.</p>"
|
||||
|
||||
try:
|
||||
lev_data = levenshtein_ratio_recent_history(watch)
|
||||
if not lev_data or not isinstance(lev_data, dict):
|
||||
return "<p>Unable to calculate Levenshtein metrics</p>"
|
||||
|
||||
html = f"""
|
||||
<div class="levenshtein-stats">
|
||||
<h4>Levenshtein Text Similarity Details</h4>
|
||||
<table class="pure-table">
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>Raw distance (edits needed)</td>
|
||||
<td>{lev_data['distance']}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Similarity ratio</td>
|
||||
<td>{lev_data['ratio']:.4f}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Percent similar</td>
|
||||
<td>{lev_data['percent_similar']}%</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
<p style="font-size: 80%;">Levenshtein metrics compare the last two snapshots, measuring how many character edits are needed to transform one into the other.</p>
|
||||
</div>
|
||||
"""
|
||||
return html
|
||||
except Exception as e:
|
||||
logger.error(f"Error generating Levenshtein UI extras: {str(e)}")
|
||||
return "<p>Error calculating Levenshtein metrics</p>"
|
||||
|
||||
82
changedetectionio/conditions/plugins/wordcount_plugin.py
Normal file
82
changedetectionio/conditions/plugins/wordcount_plugin.py
Normal file
@@ -0,0 +1,82 @@
|
||||
import pluggy
|
||||
from loguru import logger
|
||||
|
||||
# Support both plugin systems
|
||||
conditions_hookimpl = pluggy.HookimplMarker("changedetectionio_conditions")
|
||||
global_hookimpl = pluggy.HookimplMarker("changedetectionio")
|
||||
|
||||
def count_words_in_history(watch, incoming_text=None):
|
||||
"""Count words in snapshot text"""
|
||||
try:
|
||||
if incoming_text is not None:
|
||||
# When called from add_data with incoming text
|
||||
return len(incoming_text.split())
|
||||
elif watch.history.keys():
|
||||
# When called from UI extras to count latest snapshot
|
||||
latest_key = list(watch.history.keys())[-1]
|
||||
latest_content = watch.get_history_snapshot(latest_key)
|
||||
return len(latest_content.split())
|
||||
return 0
|
||||
except Exception as e:
|
||||
logger.error(f"Error counting words: {str(e)}")
|
||||
return 0
|
||||
|
||||
# Implement condition plugin hooks
|
||||
@conditions_hookimpl
|
||||
def register_operators():
|
||||
# No custom operators needed
|
||||
return {}
|
||||
|
||||
@conditions_hookimpl
|
||||
def register_operator_choices():
|
||||
# No custom operator choices needed
|
||||
return []
|
||||
|
||||
@conditions_hookimpl
|
||||
def register_field_choices():
|
||||
# Add a field that will be available in conditions
|
||||
return [
|
||||
("word_count", "Word count of content"),
|
||||
]
|
||||
|
||||
@conditions_hookimpl
|
||||
def add_data(current_watch_uuid, application_datastruct, ephemeral_data):
|
||||
"""Add word count data for conditions"""
|
||||
result = {}
|
||||
watch = application_datastruct['watching'].get(current_watch_uuid)
|
||||
|
||||
if watch and 'text' in ephemeral_data:
|
||||
word_count = count_words_in_history(watch, ephemeral_data['text'])
|
||||
result['word_count'] = word_count
|
||||
|
||||
return result
|
||||
|
||||
def _generate_stats_html(watch):
|
||||
"""Generate the HTML content for the stats tab"""
|
||||
word_count = count_words_in_history(watch)
|
||||
|
||||
html = f"""
|
||||
<div class="word-count-stats">
|
||||
<h4>Content Analysis</h4>
|
||||
<table class="pure-table">
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>Word count (latest snapshot)</td>
|
||||
<td>{word_count}</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
<p style="font-size: 80%;">Word count is a simple measure of content length, calculated by splitting text on whitespace.</p>
|
||||
</div>
|
||||
"""
|
||||
return html
|
||||
|
||||
@conditions_hookimpl
|
||||
def ui_edit_stats_extras(watch):
|
||||
"""Add word count stats to the UI through conditions plugin system"""
|
||||
return _generate_stats_html(watch)
|
||||
|
||||
@global_hookimpl
|
||||
def ui_edit_stats_extras(watch):
|
||||
"""Add word count stats to the UI using the global plugin system"""
|
||||
return _generate_stats_html(watch)
|
||||
@@ -7,11 +7,30 @@ import os
|
||||
# Visual Selector scraper - 'Button' is there because some sites have <button>OUT OF STOCK</button>.
|
||||
visualselector_xpath_selectors = 'div,span,form,table,tbody,tr,td,a,p,ul,li,h1,h2,h3,h4,header,footer,section,article,aside,details,main,nav,section,summary,button'
|
||||
|
||||
SCREENSHOT_MAX_HEIGHT_DEFAULT = 20000
|
||||
SCREENSHOT_DEFAULT_QUALITY = 40
|
||||
|
||||
# Maximum total height for the final image (When in stitch mode).
|
||||
# We limit this to 16000px due to the huge amount of RAM that was being used
|
||||
# Example: 16000 × 1400 × 3 = 67,200,000 bytes ≈ 64.1 MB (not including buffers in PIL etc)
|
||||
SCREENSHOT_MAX_TOTAL_HEIGHT = int(os.getenv("SCREENSHOT_MAX_HEIGHT", SCREENSHOT_MAX_HEIGHT_DEFAULT))
|
||||
|
||||
# The size at which we will switch to stitching method, when below this (and
|
||||
# MAX_TOTAL_HEIGHT which can be set by a user) we will use the default
|
||||
# screenshot method.
|
||||
SCREENSHOT_SIZE_STITCH_THRESHOLD = 8000
|
||||
|
||||
# available_fetchers() will scan this implementation looking for anything starting with html_
|
||||
# this information is used in the form selections
|
||||
from changedetectionio.content_fetchers.requests import fetcher as html_requests
|
||||
|
||||
|
||||
import importlib.resources
|
||||
XPATH_ELEMENT_JS = importlib.resources.files("changedetectionio.content_fetchers.res").joinpath('xpath_element_scraper.js').read_text(encoding='utf-8')
|
||||
INSTOCK_DATA_JS = importlib.resources.files("changedetectionio.content_fetchers.res").joinpath('stock-not-in-stock.js').read_text(encoding='utf-8')
|
||||
FAVICON_FETCHER_JS = importlib.resources.files("changedetectionio.content_fetchers.res").joinpath('favicon-fetcher.js').read_text(encoding='utf-8')
|
||||
|
||||
|
||||
def available_fetchers():
|
||||
# See the if statement at the bottom of this file for how we switch between playwright and webdriver
|
||||
import inspect
|
||||
|
||||
@@ -48,6 +48,7 @@ class Fetcher():
|
||||
error = None
|
||||
fetcher_description = "No description"
|
||||
headers = {}
|
||||
favicon_blob = None
|
||||
instock_data = None
|
||||
instock_data_js = ""
|
||||
status_code = None
|
||||
@@ -63,31 +64,40 @@ class Fetcher():
|
||||
# Time ONTOP of the system defined env minimum time
|
||||
render_extract_delay = 0
|
||||
|
||||
def __init__(self):
|
||||
import importlib.resources
|
||||
self.xpath_element_js = importlib.resources.files("changedetectionio.content_fetchers.res").joinpath('xpath_element_scraper.js').read_text(encoding='utf-8')
|
||||
self.instock_data_js = importlib.resources.files("changedetectionio.content_fetchers.res").joinpath('stock-not-in-stock.js').read_text(encoding='utf-8')
|
||||
def clear_content(self):
|
||||
"""
|
||||
Explicitly clear all content from memory to free up heap space.
|
||||
Call this after content has been saved to disk.
|
||||
"""
|
||||
self.content = None
|
||||
if hasattr(self, 'raw_content'):
|
||||
self.raw_content = None
|
||||
self.screenshot = None
|
||||
self.xpath_data = None
|
||||
# Keep headers and status_code as they're small
|
||||
|
||||
@abstractmethod
|
||||
def get_error(self):
|
||||
return self.error
|
||||
|
||||
@abstractmethod
|
||||
def run(self,
|
||||
url,
|
||||
timeout,
|
||||
request_headers,
|
||||
request_body,
|
||||
request_method,
|
||||
ignore_status_codes=False,
|
||||
current_include_filters=None,
|
||||
is_binary=False,
|
||||
empty_pages_are_a_change=False):
|
||||
async def run(self,
|
||||
fetch_favicon=True,
|
||||
current_include_filters=None,
|
||||
empty_pages_are_a_change=False,
|
||||
ignore_status_codes=False,
|
||||
is_binary=False,
|
||||
request_body=None,
|
||||
request_headers=None,
|
||||
request_method=None,
|
||||
timeout=None,
|
||||
url=None,
|
||||
):
|
||||
# Should set self.error, self.status_code and self.content
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def quit(self):
|
||||
def quit(self, watch=None):
|
||||
return
|
||||
|
||||
@abstractmethod
|
||||
@@ -127,10 +137,10 @@ class Fetcher():
|
||||
|
||||
return None
|
||||
|
||||
def iterate_browser_steps(self, start_url=None):
|
||||
async def iterate_browser_steps(self, start_url=None):
|
||||
from changedetectionio.blueprint.browser_steps.browser_steps import steppable_browser_interface
|
||||
from playwright._impl._errors import TimeoutError, Error
|
||||
from changedetectionio.safe_jinja import render as jinja_render
|
||||
from changedetectionio.jinja2_custom import render as jinja_render
|
||||
step_n = 0
|
||||
|
||||
if self.browser_steps is not None and len(self.browser_steps):
|
||||
@@ -141,8 +151,9 @@ class Fetcher():
|
||||
for step in valid_steps:
|
||||
step_n += 1
|
||||
logger.debug(f">> Iterating check - browser Step n {step_n} - {step['operation']}...")
|
||||
self.screenshot_step("before-" + str(step_n))
|
||||
self.save_step_html("before-" + str(step_n))
|
||||
await self.screenshot_step("before-" + str(step_n))
|
||||
await self.save_step_html("before-" + str(step_n))
|
||||
|
||||
try:
|
||||
optional_value = step['optional_value']
|
||||
selector = step['selector']
|
||||
@@ -152,11 +163,11 @@ class Fetcher():
|
||||
if '{%' in step['selector'] or '{{' in step['selector']:
|
||||
selector = jinja_render(template_str=step['selector'])
|
||||
|
||||
getattr(interface, "call_action")(action_name=step['operation'],
|
||||
await getattr(interface, "call_action")(action_name=step['operation'],
|
||||
selector=selector,
|
||||
optional_value=optional_value)
|
||||
self.screenshot_step(step_n)
|
||||
self.save_step_html(step_n)
|
||||
await self.screenshot_step(step_n)
|
||||
await self.save_step_html(step_n)
|
||||
except (Error, TimeoutError) as e:
|
||||
logger.debug(str(e))
|
||||
# Stop processing here
|
||||
|
||||
@@ -4,9 +4,75 @@ from urllib.parse import urlparse
|
||||
|
||||
from loguru import logger
|
||||
|
||||
from changedetectionio.content_fetchers import SCREENSHOT_MAX_HEIGHT_DEFAULT, visualselector_xpath_selectors, \
|
||||
SCREENSHOT_SIZE_STITCH_THRESHOLD, SCREENSHOT_MAX_TOTAL_HEIGHT, XPATH_ELEMENT_JS, INSTOCK_DATA_JS, FAVICON_FETCHER_JS
|
||||
from changedetectionio.content_fetchers.base import Fetcher, manage_user_agent
|
||||
from changedetectionio.content_fetchers.exceptions import PageUnloadable, Non200ErrorCodeReceived, EmptyReply, ScreenshotUnavailable
|
||||
|
||||
async def capture_full_page_async(page):
|
||||
import os
|
||||
import time
|
||||
from multiprocessing import Process, Pipe
|
||||
|
||||
start = time.time()
|
||||
|
||||
page_height = await page.evaluate("document.documentElement.scrollHeight")
|
||||
page_width = await page.evaluate("document.documentElement.scrollWidth")
|
||||
original_viewport = page.viewport_size
|
||||
|
||||
logger.debug(f"Playwright viewport size {page.viewport_size} page height {page_height} page width {page_width}")
|
||||
|
||||
# Use an approach similar to puppeteer: set a larger viewport and take screenshots in chunks
|
||||
step_size = SCREENSHOT_SIZE_STITCH_THRESHOLD # Size that won't cause GPU to overflow
|
||||
screenshot_chunks = []
|
||||
y = 0
|
||||
|
||||
if page_height > page.viewport_size['height']:
|
||||
if page_height < step_size:
|
||||
step_size = page_height # Incase page is bigger than default viewport but smaller than proposed step size
|
||||
logger.debug(f"Setting bigger viewport to step through large page width W{page.viewport_size['width']}xH{step_size} because page_height > viewport_size")
|
||||
# Set viewport to a larger size to capture more content at once
|
||||
await page.set_viewport_size({'width': page.viewport_size['width'], 'height': step_size})
|
||||
|
||||
# Capture screenshots in chunks up to the max total height
|
||||
while y < min(page_height, SCREENSHOT_MAX_TOTAL_HEIGHT):
|
||||
await page.request_gc()
|
||||
await page.evaluate(f"window.scrollTo(0, {y})")
|
||||
await page.request_gc()
|
||||
screenshot_chunks.append(await page.screenshot(
|
||||
type="jpeg",
|
||||
full_page=False,
|
||||
quality=int(os.getenv("SCREENSHOT_QUALITY", 72))
|
||||
))
|
||||
y += step_size
|
||||
await page.request_gc()
|
||||
|
||||
# Restore original viewport size
|
||||
await page.set_viewport_size({'width': original_viewport['width'], 'height': original_viewport['height']})
|
||||
|
||||
# If we have multiple chunks, stitch them together
|
||||
if len(screenshot_chunks) > 1:
|
||||
from changedetectionio.content_fetchers.screenshot_handler import stitch_images_worker
|
||||
logger.debug(f"Screenshot stitching {len(screenshot_chunks)} chunks together")
|
||||
parent_conn, child_conn = Pipe()
|
||||
p = Process(target=stitch_images_worker, args=(child_conn, screenshot_chunks, page_height, SCREENSHOT_MAX_TOTAL_HEIGHT))
|
||||
p.start()
|
||||
screenshot = parent_conn.recv_bytes()
|
||||
p.join()
|
||||
logger.debug(
|
||||
f"Screenshot (chunked/stitched) - Page height: {page_height} Capture height: {SCREENSHOT_MAX_TOTAL_HEIGHT} - Stitched together in {time.time() - start:.2f}s")
|
||||
# Explicit cleanup
|
||||
del screenshot_chunks
|
||||
del p
|
||||
del parent_conn, child_conn
|
||||
screenshot_chunks = None
|
||||
return screenshot
|
||||
|
||||
logger.debug(
|
||||
f"Screenshot Page height: {page_height} Capture height: {SCREENSHOT_MAX_TOTAL_HEIGHT} - Stitched together in {time.time() - start:.2f}s")
|
||||
|
||||
return screenshot_chunks[0]
|
||||
|
||||
class fetcher(Fetcher):
|
||||
fetcher_description = "Playwright {}/Javascript".format(
|
||||
os.getenv("PLAYWRIGHT_BROWSER_TYPE", 'chromium').capitalize()
|
||||
@@ -57,9 +123,10 @@ class fetcher(Fetcher):
|
||||
self.proxy['username'] = parsed.username
|
||||
self.proxy['password'] = parsed.password
|
||||
|
||||
def screenshot_step(self, step_n=''):
|
||||
async def screenshot_step(self, step_n=''):
|
||||
super().screenshot_step(step_n=step_n)
|
||||
screenshot = self.page.screenshot(type='jpeg', full_page=True, quality=int(os.getenv("SCREENSHOT_QUALITY", 72)))
|
||||
screenshot = await capture_full_page_async(page=self.page)
|
||||
|
||||
|
||||
if self.browser_steps_screenshot_path is not None:
|
||||
destination = os.path.join(self.browser_steps_screenshot_path, 'step_{}.jpeg'.format(step_n))
|
||||
@@ -67,45 +134,47 @@ class fetcher(Fetcher):
|
||||
with open(destination, 'wb') as f:
|
||||
f.write(screenshot)
|
||||
|
||||
def save_step_html(self, step_n):
|
||||
async def save_step_html(self, step_n):
|
||||
super().save_step_html(step_n=step_n)
|
||||
content = self.page.content()
|
||||
content = await self.page.content()
|
||||
destination = os.path.join(self.browser_steps_screenshot_path, 'step_{}.html'.format(step_n))
|
||||
logger.debug(f"Saving step HTML to {destination}")
|
||||
with open(destination, 'w') as f:
|
||||
f.write(content)
|
||||
|
||||
def run(self,
|
||||
url,
|
||||
timeout,
|
||||
request_headers,
|
||||
request_body,
|
||||
request_method,
|
||||
ignore_status_codes=False,
|
||||
current_include_filters=None,
|
||||
is_binary=False,
|
||||
empty_pages_are_a_change=False):
|
||||
async def run(self,
|
||||
fetch_favicon=True,
|
||||
current_include_filters=None,
|
||||
empty_pages_are_a_change=False,
|
||||
ignore_status_codes=False,
|
||||
is_binary=False,
|
||||
request_body=None,
|
||||
request_headers=None,
|
||||
request_method=None,
|
||||
timeout=None,
|
||||
url=None,
|
||||
):
|
||||
|
||||
from playwright.sync_api import sync_playwright
|
||||
from playwright.async_api import async_playwright
|
||||
import playwright._impl._errors
|
||||
from changedetectionio.content_fetchers import visualselector_xpath_selectors
|
||||
import time
|
||||
self.delete_browser_steps_screenshots()
|
||||
response = None
|
||||
|
||||
with sync_playwright() as p:
|
||||
async with async_playwright() as p:
|
||||
browser_type = getattr(p, self.browser_type)
|
||||
|
||||
# Seemed to cause a connection Exception even tho I can see it connect
|
||||
# self.browser = browser_type.connect(self.command_executor, timeout=timeout*1000)
|
||||
# 60,000 connection timeout only
|
||||
browser = browser_type.connect_over_cdp(self.browser_connection_url, timeout=60000)
|
||||
browser = await browser_type.connect_over_cdp(self.browser_connection_url, timeout=60000)
|
||||
|
||||
# SOCKS5 with authentication is not supported (yet)
|
||||
# https://github.com/microsoft/playwright/issues/10567
|
||||
|
||||
# Set user agent to prevent Cloudflare from blocking the browser
|
||||
# Use the default one configured in the App.py model that's passed from fetch_site_status.py
|
||||
context = browser.new_context(
|
||||
context = await browser.new_context(
|
||||
accept_downloads=False, # Should never be needed
|
||||
bypass_csp=True, # This is needed to enable JavaScript execution on GitHub and others
|
||||
extra_http_headers=request_headers,
|
||||
@@ -115,41 +184,47 @@ class fetcher(Fetcher):
|
||||
user_agent=manage_user_agent(headers=request_headers),
|
||||
)
|
||||
|
||||
self.page = context.new_page()
|
||||
self.page = await context.new_page()
|
||||
|
||||
# Listen for all console events and handle errors
|
||||
self.page.on("console", lambda msg: print(f"Playwright console: Watch URL: {url} {msg.type}: {msg.text} {msg.args}"))
|
||||
self.page.on("console", lambda msg: logger.debug(f"Playwright console: Watch URL: {url} {msg.type}: {msg.text} {msg.args}"))
|
||||
|
||||
# Re-use as much code from browser steps as possible so its the same
|
||||
from changedetectionio.blueprint.browser_steps.browser_steps import steppable_browser_interface
|
||||
browsersteps_interface = steppable_browser_interface(start_url=url)
|
||||
browsersteps_interface.page = self.page
|
||||
|
||||
response = browsersteps_interface.action_goto_url(value=url)
|
||||
self.headers = response.all_headers()
|
||||
response = await browsersteps_interface.action_goto_url(value=url)
|
||||
|
||||
if response is None:
|
||||
context.close()
|
||||
browser.close()
|
||||
await context.close()
|
||||
await browser.close()
|
||||
logger.debug("Content Fetcher > Response object from the browser communication was none")
|
||||
raise EmptyReply(url=url, status_code=None)
|
||||
|
||||
# In async_playwright, all_headers() returns a coroutine
|
||||
try:
|
||||
self.headers = await response.all_headers()
|
||||
except TypeError:
|
||||
# Fallback for sync version
|
||||
self.headers = response.all_headers()
|
||||
|
||||
try:
|
||||
if self.webdriver_js_execute_code is not None and len(self.webdriver_js_execute_code):
|
||||
browsersteps_interface.action_execute_js(value=self.webdriver_js_execute_code, selector=None)
|
||||
await browsersteps_interface.action_execute_js(value=self.webdriver_js_execute_code, selector=None)
|
||||
except playwright._impl._errors.TimeoutError as e:
|
||||
context.close()
|
||||
browser.close()
|
||||
await context.close()
|
||||
await browser.close()
|
||||
# This can be ok, we will try to grab what we could retrieve
|
||||
pass
|
||||
except Exception as e:
|
||||
logger.debug(f"Content Fetcher > Other exception when executing custom JS code {str(e)}")
|
||||
context.close()
|
||||
browser.close()
|
||||
await context.close()
|
||||
await browser.close()
|
||||
raise PageUnloadable(url=url, status_code=None, message=str(e))
|
||||
|
||||
extra_wait = int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)) + self.render_extract_delay
|
||||
self.page.wait_for_timeout(extra_wait * 1000)
|
||||
await self.page.wait_for_timeout(extra_wait * 1000)
|
||||
|
||||
try:
|
||||
self.status_code = response.status
|
||||
@@ -157,39 +232,58 @@ class fetcher(Fetcher):
|
||||
# https://github.com/dgtlmoon/changedetection.io/discussions/2122#discussioncomment-8241962
|
||||
logger.critical(f"Response from the browser/Playwright did not have a status_code! Response follows.")
|
||||
logger.critical(response)
|
||||
context.close()
|
||||
browser.close()
|
||||
await context.close()
|
||||
await browser.close()
|
||||
raise PageUnloadable(url=url, status_code=None, message=str(e))
|
||||
|
||||
if self.status_code != 200 and not ignore_status_codes:
|
||||
screenshot = self.page.screenshot(type='jpeg', full_page=True,
|
||||
quality=int(os.getenv("SCREENSHOT_QUALITY", 72)))
|
||||
if fetch_favicon:
|
||||
try:
|
||||
self.favicon_blob = await self.page.evaluate(FAVICON_FETCHER_JS)
|
||||
await self.page.request_gc()
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching FavIcon info {str(e)}, continuing.")
|
||||
|
||||
if self.status_code != 200 and not ignore_status_codes:
|
||||
screenshot = await capture_full_page_async(self.page)
|
||||
raise Non200ErrorCodeReceived(url=url, status_code=self.status_code, screenshot=screenshot)
|
||||
|
||||
if not empty_pages_are_a_change and len(self.page.content().strip()) == 0:
|
||||
if not empty_pages_are_a_change and len((await self.page.content()).strip()) == 0:
|
||||
logger.debug("Content Fetcher > Content was empty, empty_pages_are_a_change = False")
|
||||
context.close()
|
||||
browser.close()
|
||||
await context.close()
|
||||
await browser.close()
|
||||
raise EmptyReply(url=url, status_code=response.status)
|
||||
|
||||
# Run Browser Steps here
|
||||
if self.browser_steps_get_valid_steps():
|
||||
self.iterate_browser_steps(start_url=url)
|
||||
await self.iterate_browser_steps(start_url=url)
|
||||
|
||||
self.page.wait_for_timeout(extra_wait * 1000)
|
||||
await self.page.wait_for_timeout(extra_wait * 1000)
|
||||
|
||||
now = time.time()
|
||||
# So we can find an element on the page where its selector was entered manually (maybe not xPath etc)
|
||||
if current_include_filters is not None:
|
||||
self.page.evaluate("var include_filters={}".format(json.dumps(current_include_filters)))
|
||||
await self.page.evaluate("var include_filters={}".format(json.dumps(current_include_filters)))
|
||||
else:
|
||||
self.page.evaluate("var include_filters=''")
|
||||
await self.page.evaluate("var include_filters=''")
|
||||
await self.page.request_gc()
|
||||
|
||||
# request_gc before and after evaluate to free up memory
|
||||
# @todo browsersteps etc
|
||||
MAX_TOTAL_HEIGHT = int(os.getenv("SCREENSHOT_MAX_HEIGHT", SCREENSHOT_MAX_HEIGHT_DEFAULT))
|
||||
self.xpath_data = await self.page.evaluate(XPATH_ELEMENT_JS, {
|
||||
"visualselector_xpath_selectors": visualselector_xpath_selectors,
|
||||
"max_height": MAX_TOTAL_HEIGHT
|
||||
})
|
||||
await self.page.request_gc()
|
||||
|
||||
self.instock_data = await self.page.evaluate(INSTOCK_DATA_JS)
|
||||
await self.page.request_gc()
|
||||
|
||||
self.content = await self.page.content()
|
||||
await self.page.request_gc()
|
||||
logger.debug(f"Scrape xPath element data in browser done in {time.time() - now:.2f}s")
|
||||
|
||||
self.xpath_data = self.page.evaluate(
|
||||
"async () => {" + self.xpath_element_js.replace('%ELEMENTS%', visualselector_xpath_selectors) + "}")
|
||||
self.instock_data = self.page.evaluate("async () => {" + self.instock_data_js + "}")
|
||||
|
||||
self.content = self.page.content()
|
||||
# Bug 3 in Playwright screenshot handling
|
||||
# Some bug where it gives the wrong screenshot size, but making a request with the clip set first seems to solve it
|
||||
# JPEG is better here because the screenshots can be very very large
|
||||
@@ -199,13 +293,41 @@ class fetcher(Fetcher):
|
||||
# acceptable screenshot quality here
|
||||
try:
|
||||
# The actual screenshot - this always base64 and needs decoding! horrible! huge CPU usage
|
||||
self.screenshot = self.page.screenshot(type='jpeg',
|
||||
full_page=True,
|
||||
quality=int(os.getenv("SCREENSHOT_QUALITY", 72)),
|
||||
)
|
||||
self.screenshot = await capture_full_page_async(page=self.page)
|
||||
|
||||
except Exception as e:
|
||||
# It's likely the screenshot was too long/big and something crashed
|
||||
raise ScreenshotUnavailable(url=url, status_code=self.status_code)
|
||||
finally:
|
||||
context.close()
|
||||
browser.close()
|
||||
# Request garbage collection one more time before closing
|
||||
try:
|
||||
await self.page.request_gc()
|
||||
except:
|
||||
pass
|
||||
|
||||
# Clean up resources properly
|
||||
try:
|
||||
await self.page.request_gc()
|
||||
except:
|
||||
pass
|
||||
|
||||
try:
|
||||
await self.page.close()
|
||||
except:
|
||||
pass
|
||||
self.page = None
|
||||
|
||||
try:
|
||||
await context.close()
|
||||
except:
|
||||
pass
|
||||
context = None
|
||||
|
||||
try:
|
||||
await browser.close()
|
||||
except:
|
||||
pass
|
||||
browser = None
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -6,8 +6,85 @@ from urllib.parse import urlparse
|
||||
|
||||
from loguru import logger
|
||||
|
||||
from changedetectionio.content_fetchers import SCREENSHOT_MAX_HEIGHT_DEFAULT, visualselector_xpath_selectors, \
|
||||
SCREENSHOT_SIZE_STITCH_THRESHOLD, SCREENSHOT_DEFAULT_QUALITY, XPATH_ELEMENT_JS, INSTOCK_DATA_JS, \
|
||||
SCREENSHOT_MAX_TOTAL_HEIGHT, FAVICON_FETCHER_JS
|
||||
from changedetectionio.content_fetchers.base import Fetcher, manage_user_agent
|
||||
from changedetectionio.content_fetchers.exceptions import PageUnloadable, Non200ErrorCodeReceived, EmptyReply, BrowserFetchTimedOut, BrowserConnectError
|
||||
from changedetectionio.content_fetchers.exceptions import PageUnloadable, Non200ErrorCodeReceived, EmptyReply, BrowserFetchTimedOut, \
|
||||
BrowserConnectError
|
||||
|
||||
|
||||
# Bug 3 in Playwright screenshot handling
|
||||
# Some bug where it gives the wrong screenshot size, but making a request with the clip set first seems to solve it
|
||||
|
||||
# Screenshots also travel via the ws:// (websocket) meaning that the binary data is base64 encoded
|
||||
# which will significantly increase the IO size between the server and client, it's recommended to use the lowest
|
||||
# acceptable screenshot quality here
|
||||
async def capture_full_page(page):
|
||||
import os
|
||||
import time
|
||||
from multiprocessing import Process, Pipe
|
||||
|
||||
start = time.time()
|
||||
|
||||
page_height = await page.evaluate("document.documentElement.scrollHeight")
|
||||
page_width = await page.evaluate("document.documentElement.scrollWidth")
|
||||
original_viewport = page.viewport
|
||||
|
||||
logger.debug(f"Puppeteer viewport size {page.viewport} page height {page_height} page width {page_width}")
|
||||
|
||||
# Bug 3 in Playwright screenshot handling
|
||||
# Some bug where it gives the wrong screenshot size, but making a request with the clip set first seems to solve it
|
||||
# JPEG is better here because the screenshots can be very very large
|
||||
|
||||
# Screenshots also travel via the ws:// (websocket) meaning that the binary data is base64 encoded
|
||||
# which will significantly increase the IO size between the server and client, it's recommended to use the lowest
|
||||
# acceptable screenshot quality here
|
||||
|
||||
|
||||
step_size = SCREENSHOT_SIZE_STITCH_THRESHOLD # Something that will not cause the GPU to overflow when taking the screenshot
|
||||
screenshot_chunks = []
|
||||
y = 0
|
||||
if page_height > page.viewport['height']:
|
||||
if page_height < step_size:
|
||||
step_size = page_height # Incase page is bigger than default viewport but smaller than proposed step size
|
||||
await page.setViewport({'width': page.viewport['width'], 'height': step_size})
|
||||
|
||||
while y < min(page_height, SCREENSHOT_MAX_TOTAL_HEIGHT):
|
||||
# better than scrollTo incase they override it in the page
|
||||
await page.evaluate(
|
||||
"""(y) => {
|
||||
document.documentElement.scrollTop = y;
|
||||
document.body.scrollTop = y;
|
||||
}""",
|
||||
y
|
||||
)
|
||||
|
||||
screenshot_chunks.append(await page.screenshot(type_='jpeg',
|
||||
fullPage=False,
|
||||
quality=int(os.getenv("SCREENSHOT_QUALITY", 72))))
|
||||
y += step_size
|
||||
|
||||
await page.setViewport({'width': original_viewport['width'], 'height': original_viewport['height']})
|
||||
|
||||
if len(screenshot_chunks) > 1:
|
||||
from changedetectionio.content_fetchers.screenshot_handler import stitch_images_worker
|
||||
logger.debug(f"Screenshot stitching {len(screenshot_chunks)} chunks together")
|
||||
parent_conn, child_conn = Pipe()
|
||||
p = Process(target=stitch_images_worker, args=(child_conn, screenshot_chunks, page_height, SCREENSHOT_MAX_TOTAL_HEIGHT))
|
||||
p.start()
|
||||
screenshot = parent_conn.recv_bytes()
|
||||
p.join()
|
||||
logger.debug(
|
||||
f"Screenshot (chunked/stitched) - Page height: {page_height} Capture height: {SCREENSHOT_MAX_TOTAL_HEIGHT} - Stitched together in {time.time() - start:.2f}s")
|
||||
|
||||
screenshot_chunks = None
|
||||
return screenshot
|
||||
|
||||
logger.debug(
|
||||
f"Screenshot Page height: {page_height} Capture height: {SCREENSHOT_MAX_TOTAL_HEIGHT} - Stitched together in {time.time() - start:.2f}s")
|
||||
return screenshot_chunks[0]
|
||||
|
||||
|
||||
class fetcher(Fetcher):
|
||||
fetcher_description = "Puppeteer/direct {}/Javascript".format(
|
||||
@@ -68,20 +145,24 @@ class fetcher(Fetcher):
|
||||
# f.write(content)
|
||||
|
||||
async def fetch_page(self,
|
||||
url,
|
||||
timeout,
|
||||
request_headers,
|
||||
request_body,
|
||||
request_method,
|
||||
ignore_status_codes,
|
||||
current_include_filters,
|
||||
empty_pages_are_a_change,
|
||||
fetch_favicon,
|
||||
ignore_status_codes,
|
||||
is_binary,
|
||||
empty_pages_are_a_change
|
||||
request_body,
|
||||
request_headers,
|
||||
request_method,
|
||||
timeout,
|
||||
url,
|
||||
):
|
||||
|
||||
from changedetectionio.content_fetchers import visualselector_xpath_selectors
|
||||
import re
|
||||
self.delete_browser_steps_screenshots()
|
||||
extra_wait = int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)) + self.render_extract_delay
|
||||
|
||||
n = int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)) + self.render_extract_delay
|
||||
extra_wait = min(n, 15)
|
||||
|
||||
logger.debug(f"Extra wait set to {extra_wait}s, requested was {n}s.")
|
||||
|
||||
from pyppeteer import Pyppeteer
|
||||
pyppeteer_instance = Pyppeteer()
|
||||
@@ -97,12 +178,24 @@ class fetcher(Fetcher):
|
||||
except websockets.exceptions.InvalidURI:
|
||||
raise BrowserConnectError(msg=f"Error connecting to the browser, check your browser connection address (should be ws:// or wss://")
|
||||
except Exception as e:
|
||||
raise BrowserConnectError(msg=f"Error connecting to the browser {str(e)}")
|
||||
raise BrowserConnectError(msg=f"Error connecting to the browser - Exception '{str(e)}'")
|
||||
|
||||
# Better is to launch chrome with the URL as arg
|
||||
# non-headless - newPage() will launch an extra tab/window, .browser should already contain 1 page/tab
|
||||
# headless - ask a new page
|
||||
self.page = (pages := await browser.pages) and len(pages) or await browser.newPage()
|
||||
# more reliable is to just request a new page
|
||||
self.page = await browser.newPage()
|
||||
|
||||
# Add console handler to capture console.log from favicon fetcher
|
||||
#self.page.on('console', lambda msg: logger.debug(f"Browser console [{msg.type}]: {msg.text}"))
|
||||
|
||||
if '--window-size' in self.browser_connection_url:
|
||||
# Be sure the viewport is always the window-size, this is often not the same thing
|
||||
match = re.search(r'--window-size=(\d+),(\d+)', self.browser_connection_url)
|
||||
if match:
|
||||
logger.debug(f"Setting viewport to same as --window-size in browser connection URL {int(match.group(1))},{int(match.group(2))}")
|
||||
await self.page.setViewport({
|
||||
"width": int(match.group(1)),
|
||||
"height": int(match.group(2))
|
||||
})
|
||||
logger.debug(f"Puppeteer viewport size {self.page.viewport}")
|
||||
|
||||
try:
|
||||
from pyppeteerstealth import inject_evasions_into_page
|
||||
@@ -148,14 +241,35 @@ class fetcher(Fetcher):
|
||||
# browsersteps_interface = steppable_browser_interface()
|
||||
# browsersteps_interface.page = self.page
|
||||
|
||||
response = await self.page.goto(url, waitUntil="load")
|
||||
async def handle_frame_navigation(event):
|
||||
logger.debug(f"Frame navigated: {event}")
|
||||
w = extra_wait - 2 if extra_wait > 4 else 2
|
||||
logger.debug(f"Waiting {w} seconds before calling Page.stopLoading...")
|
||||
await asyncio.sleep(w)
|
||||
logger.debug("Issuing stopLoading command...")
|
||||
await self.page._client.send('Page.stopLoading')
|
||||
logger.debug("stopLoading command sent!")
|
||||
|
||||
self.page._client.on('Page.frameStartedNavigating', lambda event: asyncio.create_task(handle_frame_navigation(event)))
|
||||
self.page._client.on('Page.frameStartedLoading', lambda event: asyncio.create_task(handle_frame_navigation(event)))
|
||||
self.page._client.on('Page.frameStoppedLoading', lambda event: logger.debug(f"Frame stopped loading: {event}"))
|
||||
|
||||
if response is None:
|
||||
await self.page.close()
|
||||
await browser.close()
|
||||
logger.warning("Content Fetcher > Response object was none (as in, the response from the browser was empty, not just the content)")
|
||||
raise EmptyReply(url=url, status_code=None)
|
||||
response = None
|
||||
attempt=0
|
||||
while not response:
|
||||
logger.debug(f"Attempting page fetch {url} attempt {attempt}")
|
||||
response = await self.page.goto(url)
|
||||
await asyncio.sleep(1 + extra_wait)
|
||||
if response:
|
||||
break
|
||||
if not response:
|
||||
logger.warning("Page did not fetch! trying again!")
|
||||
if response is None and attempt>=2:
|
||||
await self.page.close()
|
||||
await browser.close()
|
||||
logger.warning(f"Content Fetcher > Response object was none (as in, the response from the browser was empty, not just the content) exiting attmpt {attempt}")
|
||||
raise EmptyReply(url=url, status_code=None)
|
||||
attempt+=1
|
||||
|
||||
self.headers = response.headers
|
||||
|
||||
@@ -180,12 +294,17 @@ class fetcher(Fetcher):
|
||||
await browser.close()
|
||||
raise PageUnloadable(url=url, status_code=None, message=str(e))
|
||||
|
||||
if fetch_favicon:
|
||||
try:
|
||||
self.favicon_blob = await self.page.evaluate(FAVICON_FETCHER_JS)
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching FavIcon info {str(e)}, continuing.")
|
||||
|
||||
if self.status_code != 200 and not ignore_status_codes:
|
||||
screenshot = await self.page.screenshot(type_='jpeg',
|
||||
fullPage=True,
|
||||
quality=int(os.getenv("SCREENSHOT_QUALITY", 72)))
|
||||
screenshot = await capture_full_page(page=self.page)
|
||||
|
||||
raise Non200ErrorCodeReceived(url=url, status_code=self.status_code, screenshot=screenshot)
|
||||
|
||||
content = await self.page.content
|
||||
|
||||
if not empty_pages_are_a_change and len(content.strip()) == 0:
|
||||
@@ -199,74 +318,69 @@ class fetcher(Fetcher):
|
||||
# if self.browser_steps_get_valid_steps():
|
||||
# self.iterate_browser_steps()
|
||||
|
||||
await asyncio.sleep(1 + extra_wait)
|
||||
|
||||
# So we can find an element on the page where its selector was entered manually (maybe not xPath etc)
|
||||
# Setup the xPath/VisualSelector scraper
|
||||
if current_include_filters is not None:
|
||||
if current_include_filters:
|
||||
js = json.dumps(current_include_filters)
|
||||
await self.page.evaluate(f"var include_filters={js}")
|
||||
else:
|
||||
await self.page.evaluate(f"var include_filters=''")
|
||||
|
||||
self.xpath_data = await self.page.evaluate(
|
||||
"async () => {" + self.xpath_element_js.replace('%ELEMENTS%', visualselector_xpath_selectors) + "}")
|
||||
self.instock_data = await self.page.evaluate("async () => {" + self.instock_data_js + "}")
|
||||
MAX_TOTAL_HEIGHT = int(os.getenv("SCREENSHOT_MAX_HEIGHT", SCREENSHOT_MAX_HEIGHT_DEFAULT))
|
||||
self.xpath_data = await self.page.evaluate(XPATH_ELEMENT_JS, {
|
||||
"visualselector_xpath_selectors": visualselector_xpath_selectors,
|
||||
"max_height": MAX_TOTAL_HEIGHT
|
||||
})
|
||||
if not self.xpath_data:
|
||||
raise Exception(f"Content Fetcher > xPath scraper failed. Please report this URL so we can fix it :)")
|
||||
|
||||
self.instock_data = await self.page.evaluate(INSTOCK_DATA_JS)
|
||||
|
||||
self.content = await self.page.content
|
||||
# Bug 3 in Playwright screenshot handling
|
||||
# Some bug where it gives the wrong screenshot size, but making a request with the clip set first seems to solve it
|
||||
# JPEG is better here because the screenshots can be very very large
|
||||
|
||||
# Screenshots also travel via the ws:// (websocket) meaning that the binary data is base64 encoded
|
||||
# which will significantly increase the IO size between the server and client, it's recommended to use the lowest
|
||||
# acceptable screenshot quality here
|
||||
try:
|
||||
self.screenshot = await self.page.screenshot(type_='jpeg',
|
||||
fullPage=True,
|
||||
quality=int(os.getenv("SCREENSHOT_QUALITY", 72)))
|
||||
except Exception as e:
|
||||
logger.error("Error fetching screenshot")
|
||||
# // May fail on very large pages with 'WARNING: tile memory limits exceeded, some content may not draw'
|
||||
# // @ todo after text extract, we can place some overlay text with red background to say 'croppped'
|
||||
logger.error('ERROR: content-fetcher page was maybe too large for a screenshot, reverting to viewport only screenshot')
|
||||
try:
|
||||
self.screenshot = await self.page.screenshot(type_='jpeg',
|
||||
fullPage=False,
|
||||
quality=int(os.getenv("SCREENSHOT_QUALITY", 72)))
|
||||
except Exception as e:
|
||||
logger.error('ERROR: Failed to get viewport-only reduced screenshot :(')
|
||||
pass
|
||||
finally:
|
||||
# It's good to log here in the case that the browser crashes on shutting down but we still get the data we need
|
||||
logger.success(f"Fetching '{url}' complete, closing page")
|
||||
await self.page.close()
|
||||
logger.success(f"Fetching '{url}' complete, closing browser")
|
||||
await browser.close()
|
||||
self.screenshot = await capture_full_page(page=self.page)
|
||||
|
||||
# It's good to log here in the case that the browser crashes on shutting down but we still get the data we need
|
||||
logger.success(f"Fetching '{url}' complete, closing page")
|
||||
await self.page.close()
|
||||
logger.success(f"Fetching '{url}' complete, closing browser")
|
||||
await browser.close()
|
||||
logger.success(f"Fetching '{url}' complete, exiting puppeteer fetch.")
|
||||
|
||||
async def main(self, **kwargs):
|
||||
await self.fetch_page(**kwargs)
|
||||
|
||||
def run(self, url, timeout, request_headers, request_body, request_method, ignore_status_codes=False,
|
||||
current_include_filters=None, is_binary=False, empty_pages_are_a_change=False):
|
||||
async def run(self,
|
||||
fetch_favicon=True,
|
||||
current_include_filters=None,
|
||||
empty_pages_are_a_change=False,
|
||||
ignore_status_codes=False,
|
||||
is_binary=False,
|
||||
request_body=None,
|
||||
request_headers=None,
|
||||
request_method=None,
|
||||
timeout=None,
|
||||
url=None,
|
||||
):
|
||||
|
||||
#@todo make update_worker async which could run any of these content_fetchers within memory and time constraints
|
||||
max_time = os.getenv('PUPPETEER_MAX_PROCESSING_TIMEOUT_SECONDS', 180)
|
||||
max_time = int(os.getenv('PUPPETEER_MAX_PROCESSING_TIMEOUT_SECONDS', 180))
|
||||
|
||||
# This will work in 3.10 but not >= 3.11 because 3.11 wants tasks only
|
||||
# Now we run this properly in async context since we're called from async worker
|
||||
try:
|
||||
asyncio.run(asyncio.wait_for(self.main(
|
||||
url=url,
|
||||
timeout=timeout,
|
||||
request_headers=request_headers,
|
||||
request_body=request_body,
|
||||
request_method=request_method,
|
||||
ignore_status_codes=ignore_status_codes,
|
||||
await asyncio.wait_for(self.main(
|
||||
current_include_filters=current_include_filters,
|
||||
empty_pages_are_a_change=empty_pages_are_a_change,
|
||||
fetch_favicon=fetch_favicon,
|
||||
ignore_status_codes=ignore_status_codes,
|
||||
is_binary=is_binary,
|
||||
empty_pages_are_a_change=empty_pages_are_a_change
|
||||
), timeout=max_time))
|
||||
request_body=request_body,
|
||||
request_headers=request_headers,
|
||||
request_method=request_method,
|
||||
timeout=timeout,
|
||||
url=url,
|
||||
), timeout=max_time
|
||||
)
|
||||
except asyncio.TimeoutError:
|
||||
raise(BrowserFetchTimedOut(msg=f"Browser connected but was unable to process the page in {max_time} seconds."))
|
||||
|
||||
raise (BrowserFetchTimedOut(msg=f"Browser connected but was unable to process the page in {max_time} seconds."))
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
from loguru import logger
|
||||
import hashlib
|
||||
import os
|
||||
import asyncio
|
||||
from changedetectionio import strtobool
|
||||
from changedetectionio.content_fetchers.exceptions import BrowserStepsInUnsupportedFetcher, EmptyReply, Non200ErrorCodeReceived
|
||||
from changedetectionio.content_fetchers.base import Fetcher
|
||||
@@ -15,7 +16,7 @@ class fetcher(Fetcher):
|
||||
self.proxy_override = proxy_override
|
||||
# browser_connection_url is none because its always 'launched locally'
|
||||
|
||||
def run(self,
|
||||
def _run_sync(self,
|
||||
url,
|
||||
timeout,
|
||||
request_headers,
|
||||
@@ -25,9 +26,11 @@ class fetcher(Fetcher):
|
||||
current_include_filters=None,
|
||||
is_binary=False,
|
||||
empty_pages_are_a_change=False):
|
||||
"""Synchronous version of run - the original requests implementation"""
|
||||
|
||||
import chardet
|
||||
import requests
|
||||
from requests.exceptions import ProxyError, ConnectionError, RequestException
|
||||
|
||||
if self.browser_steps_get_valid_steps():
|
||||
raise BrowserStepsInUnsupportedFetcher(url=url)
|
||||
@@ -35,7 +38,6 @@ class fetcher(Fetcher):
|
||||
proxies = {}
|
||||
|
||||
# Allows override the proxy on a per-request basis
|
||||
|
||||
# https://requests.readthedocs.io/en/latest/user/advanced/#socks
|
||||
# Should also work with `socks5://user:pass@host:port` type syntax.
|
||||
|
||||
@@ -49,17 +51,23 @@ class fetcher(Fetcher):
|
||||
|
||||
session = requests.Session()
|
||||
|
||||
|
||||
if strtobool(os.getenv('ALLOW_FILE_URI', 'false')) and url.startswith('file://'):
|
||||
from requests_file import FileAdapter
|
||||
session.mount('file://', FileAdapter())
|
||||
|
||||
r = session.request(method=request_method,
|
||||
data=request_body.encode('utf-8') if type(request_body) is str else request_body,
|
||||
url=url,
|
||||
headers=request_headers,
|
||||
timeout=timeout,
|
||||
proxies=proxies,
|
||||
verify=False)
|
||||
try:
|
||||
r = session.request(method=request_method,
|
||||
data=request_body.encode('utf-8') if type(request_body) is str else request_body,
|
||||
url=url,
|
||||
headers=request_headers,
|
||||
timeout=timeout,
|
||||
proxies=proxies,
|
||||
verify=False)
|
||||
except Exception as e:
|
||||
msg = str(e)
|
||||
if proxies and 'SOCKSHTTPSConnectionPool' in msg:
|
||||
msg = f"Proxy connection failed? {msg}"
|
||||
raise Exception(msg) from e
|
||||
|
||||
# If the response did not tell us what encoding format to expect, Then use chardet to override what `requests` thinks.
|
||||
# For example - some sites don't tell us it's utf-8, but return utf-8 content
|
||||
@@ -94,5 +102,50 @@ class fetcher(Fetcher):
|
||||
else:
|
||||
self.content = r.text
|
||||
|
||||
|
||||
self.raw_content = r.content
|
||||
|
||||
async def run(self,
|
||||
fetch_favicon=True,
|
||||
current_include_filters=None,
|
||||
empty_pages_are_a_change=False,
|
||||
ignore_status_codes=False,
|
||||
is_binary=False,
|
||||
request_body=None,
|
||||
request_headers=None,
|
||||
request_method=None,
|
||||
timeout=None,
|
||||
url=None,
|
||||
):
|
||||
"""Async wrapper that runs the synchronous requests code in a thread pool"""
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
|
||||
# Run the synchronous _run_sync in a thread pool to avoid blocking the event loop
|
||||
await loop.run_in_executor(
|
||||
None, # Use default ThreadPoolExecutor
|
||||
lambda: self._run_sync(
|
||||
url=url,
|
||||
timeout=timeout,
|
||||
request_headers=request_headers,
|
||||
request_body=request_body,
|
||||
request_method=request_method,
|
||||
ignore_status_codes=ignore_status_codes,
|
||||
current_include_filters=current_include_filters,
|
||||
is_binary=is_binary,
|
||||
empty_pages_are_a_change=empty_pages_are_a_change
|
||||
)
|
||||
)
|
||||
|
||||
def quit(self, watch=None):
|
||||
|
||||
# In case they switched to `requests` fetcher from something else
|
||||
# Then the screenshot could be old, in any case, it's not used here.
|
||||
# REMOVE_REQUESTS_OLD_SCREENSHOTS - Mainly used for testing
|
||||
if strtobool(os.getenv("REMOVE_REQUESTS_OLD_SCREENSHOTS", 'true')):
|
||||
screenshot = watch.get_screenshot()
|
||||
if screenshot:
|
||||
try:
|
||||
os.unlink(screenshot)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to unlink screenshot: {screenshot} - {e}")
|
||||
|
||||
|
||||
101
changedetectionio/content_fetchers/res/favicon-fetcher.js
Normal file
101
changedetectionio/content_fetchers/res/favicon-fetcher.js
Normal file
@@ -0,0 +1,101 @@
|
||||
(async () => {
|
||||
// Define the function inside the IIFE for console testing
|
||||
window.getFaviconAsBlob = async function() {
|
||||
const links = Array.from(document.querySelectorAll(
|
||||
'link[rel~="apple-touch-icon"], link[rel~="icon"]'
|
||||
));
|
||||
|
||||
const icons = links.map(link => {
|
||||
const sizesStr = link.getAttribute('sizes');
|
||||
let size = 0;
|
||||
if (sizesStr) {
|
||||
const [w] = sizesStr.split('x').map(Number);
|
||||
if (!isNaN(w)) size = w;
|
||||
} else {
|
||||
size = 16;
|
||||
}
|
||||
return {
|
||||
size,
|
||||
rel: link.getAttribute('rel'),
|
||||
href: link.href,
|
||||
hasSizes: !!sizesStr
|
||||
};
|
||||
});
|
||||
|
||||
// If no icons found, add fallback favicon.ico
|
||||
if (icons.length === 0) {
|
||||
icons.push({
|
||||
size: 16,
|
||||
rel: 'icon',
|
||||
href: '/favicon.ico',
|
||||
hasSizes: false
|
||||
});
|
||||
}
|
||||
|
||||
// sort preference: highest resolution first, then apple-touch-icon, then regular icons
|
||||
icons.sort((a, b) => {
|
||||
// First priority: actual size (highest first)
|
||||
if (a.size !== b.size) {
|
||||
return b.size - a.size;
|
||||
}
|
||||
|
||||
// Second priority: apple-touch-icon over regular icon
|
||||
const isAppleA = /apple-touch-icon/.test(a.rel);
|
||||
const isAppleB = /apple-touch-icon/.test(b.rel);
|
||||
if (isAppleA && !isAppleB) return -1;
|
||||
if (!isAppleA && isAppleB) return 1;
|
||||
|
||||
// Third priority: icons with no size attribute (fallback icons) last
|
||||
const hasNoSizeA = !a.hasSizes;
|
||||
const hasNoSizeB = !b.hasSizes;
|
||||
if (hasNoSizeA && !hasNoSizeB) return 1;
|
||||
if (!hasNoSizeA && hasNoSizeB) return -1;
|
||||
|
||||
return 0;
|
||||
});
|
||||
|
||||
const timeoutMs = 2000;
|
||||
|
||||
for (const icon of icons) {
|
||||
try {
|
||||
const controller = new AbortController();
|
||||
const timeout = setTimeout(() => controller.abort(), timeoutMs);
|
||||
|
||||
const resp = await fetch(icon.href, {
|
||||
signal: controller.signal,
|
||||
redirect: 'follow'
|
||||
});
|
||||
|
||||
clearTimeout(timeout);
|
||||
|
||||
if (!resp.ok) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const blob = await resp.blob();
|
||||
|
||||
// Convert blob to base64
|
||||
const reader = new FileReader();
|
||||
return await new Promise(resolve => {
|
||||
reader.onloadend = () => {
|
||||
resolve({
|
||||
url: icon.href,
|
||||
base64: reader.result.split(",")[1]
|
||||
});
|
||||
};
|
||||
reader.readAsDataURL(blob);
|
||||
});
|
||||
|
||||
} catch (e) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// nothing found
|
||||
return null;
|
||||
};
|
||||
|
||||
// Auto-execute and return result for page.evaluate()
|
||||
return await window.getFaviconAsBlob();
|
||||
})();
|
||||
|
||||
@@ -1,190 +0,0 @@
|
||||
module.exports = async ({page, context}) => {
|
||||
|
||||
var {
|
||||
url,
|
||||
execute_js,
|
||||
user_agent,
|
||||
extra_wait_ms,
|
||||
req_headers,
|
||||
include_filters,
|
||||
xpath_element_js,
|
||||
screenshot_quality,
|
||||
proxy_username,
|
||||
proxy_password,
|
||||
disk_cache_dir,
|
||||
no_cache_list,
|
||||
block_url_list,
|
||||
} = context;
|
||||
|
||||
await page.setBypassCSP(true)
|
||||
await page.setExtraHTTPHeaders(req_headers);
|
||||
|
||||
if (user_agent) {
|
||||
await page.setUserAgent(user_agent);
|
||||
}
|
||||
// https://ourcodeworld.com/articles/read/1106/how-to-solve-puppeteer-timeouterror-navigation-timeout-of-30000-ms-exceeded
|
||||
|
||||
await page.setDefaultNavigationTimeout(0);
|
||||
|
||||
if (proxy_username) {
|
||||
// Setting Proxy-Authentication header is deprecated, and doing so can trigger header change errors from Puppeteer
|
||||
// https://github.com/puppeteer/puppeteer/issues/676 ?
|
||||
// https://help.brightdata.com/hc/en-us/articles/12632549957649-Proxy-Manager-How-to-Guides#h_01HAKWR4Q0AFS8RZTNYWRDFJC2
|
||||
// https://cri.dev/posts/2020-03-30-How-to-solve-Puppeteer-Chrome-Error-ERR_INVALID_ARGUMENT/
|
||||
await page.authenticate({
|
||||
username: proxy_username,
|
||||
password: proxy_password
|
||||
});
|
||||
}
|
||||
|
||||
await page.setViewport({
|
||||
width: 1024,
|
||||
height: 768,
|
||||
deviceScaleFactor: 1,
|
||||
});
|
||||
|
||||
await page.setRequestInterception(true);
|
||||
if (disk_cache_dir) {
|
||||
console.log(">>>>>>>>>>>>>>> LOCAL DISK CACHE ENABLED <<<<<<<<<<<<<<<<<<<<<");
|
||||
}
|
||||
const fs = require('fs');
|
||||
const crypto = require('crypto');
|
||||
|
||||
function file_is_expired(file_path) {
|
||||
if (!fs.existsSync(file_path)) {
|
||||
return true;
|
||||
}
|
||||
var stats = fs.statSync(file_path);
|
||||
const now_date = new Date();
|
||||
const expire_seconds = 300;
|
||||
if ((now_date / 1000) - (stats.mtime.getTime() / 1000) > expire_seconds) {
|
||||
console.log("CACHE EXPIRED: " + file_path);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
||||
}
|
||||
|
||||
page.on('request', async (request) => {
|
||||
// General blocking of requests that waste traffic
|
||||
if (block_url_list.some(substring => request.url().toLowerCase().includes(substring))) return request.abort();
|
||||
|
||||
if (disk_cache_dir) {
|
||||
const url = request.url();
|
||||
const key = crypto.createHash('md5').update(url).digest("hex");
|
||||
const dir_path = disk_cache_dir + key.slice(0, 1) + '/' + key.slice(1, 2) + '/' + key.slice(2, 3) + '/';
|
||||
|
||||
// https://stackoverflow.com/questions/4482686/check-synchronously-if-file-directory-exists-in-node-js
|
||||
|
||||
if (fs.existsSync(dir_path + key)) {
|
||||
console.log("* CACHE HIT , using - " + dir_path + key + " - " + url);
|
||||
const cached_data = fs.readFileSync(dir_path + key);
|
||||
// @todo headers can come from dir_path+key+".meta" json file
|
||||
request.respond({
|
||||
status: 200,
|
||||
//contentType: 'text/html', //@todo
|
||||
body: cached_data
|
||||
});
|
||||
return;
|
||||
}
|
||||
}
|
||||
request.continue();
|
||||
});
|
||||
|
||||
|
||||
if (disk_cache_dir) {
|
||||
page.on('response', async (response) => {
|
||||
const url = response.url();
|
||||
// Basic filtering for sane responses
|
||||
if (response.request().method() != 'GET' || response.request().resourceType() == 'xhr' || response.request().resourceType() == 'document' || response.status() != 200) {
|
||||
console.log("Skipping (not useful) - Status:" + response.status() + " Method:" + response.request().method() + " ResourceType:" + response.request().resourceType() + " " + url);
|
||||
return;
|
||||
}
|
||||
if (no_cache_list.some(substring => url.toLowerCase().includes(substring))) {
|
||||
console.log("Skipping (no_cache_list) - " + url);
|
||||
return;
|
||||
}
|
||||
if (url.toLowerCase().includes('data:')) {
|
||||
console.log("Skipping (embedded-data) - " + url);
|
||||
return;
|
||||
}
|
||||
response.buffer().then(buffer => {
|
||||
if (buffer.length > 100) {
|
||||
console.log("Cache - Saving " + response.request().method() + " - " + url + " - " + response.request().resourceType());
|
||||
|
||||
const key = crypto.createHash('md5').update(url).digest("hex");
|
||||
const dir_path = disk_cache_dir + key.slice(0, 1) + '/' + key.slice(1, 2) + '/' + key.slice(2, 3) + '/';
|
||||
|
||||
if (!fs.existsSync(dir_path)) {
|
||||
fs.mkdirSync(dir_path, {recursive: true})
|
||||
}
|
||||
|
||||
if (fs.existsSync(dir_path + key)) {
|
||||
if (file_is_expired(dir_path + key)) {
|
||||
fs.writeFileSync(dir_path + key, buffer);
|
||||
}
|
||||
} else {
|
||||
fs.writeFileSync(dir_path + key, buffer);
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
const r = await page.goto(url, {
|
||||
waitUntil: 'load'
|
||||
});
|
||||
|
||||
await page.waitForTimeout(1000);
|
||||
await page.waitForTimeout(extra_wait_ms);
|
||||
|
||||
if (execute_js) {
|
||||
await page.evaluate(execute_js);
|
||||
await page.waitForTimeout(200);
|
||||
}
|
||||
|
||||
var xpath_data;
|
||||
var instock_data;
|
||||
try {
|
||||
// Not sure the best way here, in the future this should be a new package added to npm then run in evaluatedCode
|
||||
// (Once the old playwright is removed)
|
||||
xpath_data = await page.evaluate((include_filters) => {%xpath_scrape_code%}, include_filters);
|
||||
instock_data = await page.evaluate(() => {%instock_scrape_code%});
|
||||
} catch (e) {
|
||||
console.log(e);
|
||||
}
|
||||
|
||||
// Protocol error (Page.captureScreenshot): Cannot take screenshot with 0 width can come from a proxy auth failure
|
||||
// Wrap it here (for now)
|
||||
|
||||
var b64s = false;
|
||||
try {
|
||||
b64s = await page.screenshot({encoding: "base64", fullPage: true, quality: screenshot_quality, type: 'jpeg'});
|
||||
} catch (e) {
|
||||
console.log(e);
|
||||
}
|
||||
|
||||
// May fail on very large pages with 'WARNING: tile memory limits exceeded, some content may not draw'
|
||||
if (!b64s) {
|
||||
// @todo after text extract, we can place some overlay text with red background to say 'croppped'
|
||||
console.error('ERROR: content-fetcher page was maybe too large for a screenshot, reverting to viewport only screenshot');
|
||||
try {
|
||||
b64s = await page.screenshot({encoding: "base64", quality: screenshot_quality, type: 'jpeg'});
|
||||
} catch (e) {
|
||||
console.log(e);
|
||||
}
|
||||
}
|
||||
|
||||
var html = await page.content();
|
||||
return {
|
||||
data: {
|
||||
'content': html,
|
||||
'headers': r.headers(),
|
||||
'instock_data': instock_data,
|
||||
'screenshot': b64s,
|
||||
'status_code': r.status(),
|
||||
'xpath_data': xpath_data
|
||||
},
|
||||
type: 'application/json',
|
||||
};
|
||||
};
|
||||
@@ -1,217 +1,239 @@
|
||||
// Restock Detector
|
||||
// (c) Leigh Morresi dgtlmoon@gmail.com
|
||||
//
|
||||
// Assumes the product is in stock to begin with, unless the following appears above the fold ;
|
||||
// - outOfStockTexts appears above the fold (out of stock)
|
||||
// - negateOutOfStockRegex (really is in stock)
|
||||
async () => {
|
||||
|
||||
function isItemInStock() {
|
||||
// @todo Pass these in so the same list can be used in non-JS fetchers
|
||||
const outOfStockTexts = [
|
||||
' أخبرني عندما يتوفر',
|
||||
'0 in stock',
|
||||
'actuellement indisponible',
|
||||
'agotado',
|
||||
'article épuisé',
|
||||
'artikel zurzeit vergriffen',
|
||||
'as soon as stock is available',
|
||||
'ausverkauft', // sold out
|
||||
'available for back order',
|
||||
'awaiting stock',
|
||||
'back in stock soon',
|
||||
'back-order or out of stock',
|
||||
'backordered',
|
||||
'benachrichtigt mich', // notify me
|
||||
'brak na stanie',
|
||||
'brak w magazynie',
|
||||
'coming soon',
|
||||
'currently have any tickets for this',
|
||||
'currently unavailable',
|
||||
'dieser artikel ist bald wieder verfügbar',
|
||||
'dostępne wkrótce',
|
||||
'en rupture de stock',
|
||||
'esgotado',
|
||||
'indisponível',
|
||||
'isn\'t in stock right now',
|
||||
'isnt in stock right now',
|
||||
'isn’t in stock right now',
|
||||
'item is no longer available',
|
||||
'let me know when it\'s available',
|
||||
'mail me when available',
|
||||
'message if back in stock',
|
||||
'mevcut değil',
|
||||
'nachricht bei',
|
||||
'nicht auf lager',
|
||||
'nicht lagernd',
|
||||
'nicht lieferbar',
|
||||
'nicht verfügbar',
|
||||
'nicht vorrätig',
|
||||
'nicht zur verfügung',
|
||||
'nie znaleziono produktów',
|
||||
'niet beschikbaar',
|
||||
'niet leverbaar',
|
||||
'niet op voorraad',
|
||||
'no disponible',
|
||||
'non disponibile',
|
||||
'no longer in stock',
|
||||
'no tickets available',
|
||||
'not available',
|
||||
'not currently available',
|
||||
'not in stock',
|
||||
'notify me when available',
|
||||
'notify me',
|
||||
'notify when available',
|
||||
'não disponível',
|
||||
'não estamos a aceitar encomendas',
|
||||
'out of stock',
|
||||
'out-of-stock',
|
||||
'prodotto esaurito',
|
||||
'produkt niedostępny',
|
||||
'sold out',
|
||||
'sold-out',
|
||||
'stokta yok',
|
||||
'temporarily out of stock',
|
||||
'temporarily unavailable',
|
||||
'there were no search results for',
|
||||
'this item is currently unavailable',
|
||||
'tickets unavailable',
|
||||
'tijdelijk uitverkocht',
|
||||
'tükendi',
|
||||
'unavailable nearby',
|
||||
'unavailable tickets',
|
||||
'vergriffen',
|
||||
'vorbestellen',
|
||||
'vorbestellung ist bald möglich',
|
||||
'we don\'t currently have any',
|
||||
'we couldn\'t find any products that match',
|
||||
'we do not currently have an estimate of when this product will be back in stock.',
|
||||
'we don\'t know when or if this item will be back in stock.',
|
||||
'we were not able to find a match',
|
||||
'when this arrives in stock',
|
||||
'zur zeit nicht an lager',
|
||||
'品切れ',
|
||||
'已售',
|
||||
'已售完',
|
||||
'품절'
|
||||
];
|
||||
function isItemInStock() {
|
||||
// @todo Pass these in so the same list can be used in non-JS fetchers
|
||||
const outOfStockTexts = [
|
||||
' أخبرني عندما يتوفر',
|
||||
'0 in stock',
|
||||
'actuellement indisponible',
|
||||
'agotado',
|
||||
'article épuisé',
|
||||
'artikel zurzeit vergriffen',
|
||||
'as soon as stock is available',
|
||||
'aucune offre n\'est disponible',
|
||||
'ausverkauft', // sold out
|
||||
'available for back order',
|
||||
'awaiting stock',
|
||||
'back in stock soon',
|
||||
'back-order or out of stock',
|
||||
'backordered',
|
||||
'backorder',
|
||||
'benachrichtigt mich', // notify me
|
||||
'binnenkort leverbaar', // coming soon
|
||||
'brak na stanie',
|
||||
'brak w magazynie',
|
||||
'coming soon',
|
||||
'currently have any tickets for this',
|
||||
'currently unavailable',
|
||||
'dieser artikel ist bald wieder verfügbar',
|
||||
'dostępne wkrótce',
|
||||
'en rupture',
|
||||
'esgotado',
|
||||
'in kürze lieferbar',
|
||||
'indisponible',
|
||||
'indisponível',
|
||||
'isn\'t in stock right now',
|
||||
'isnt in stock right now',
|
||||
'isn’t in stock right now',
|
||||
'item is no longer available',
|
||||
'let me know when it\'s available',
|
||||
'mail me when available',
|
||||
'message if back in stock',
|
||||
'mevcut değil',
|
||||
'more on order',
|
||||
'nachricht bei',
|
||||
'nicht auf lager',
|
||||
'nicht lagernd',
|
||||
'nicht lieferbar',
|
||||
'nicht verfügbar',
|
||||
'nicht vorrätig',
|
||||
'nicht mehr lieferbar',
|
||||
'nicht zur verfügung',
|
||||
'nie znaleziono produktów',
|
||||
'niet beschikbaar',
|
||||
'niet leverbaar',
|
||||
'niet op voorraad',
|
||||
'no disponible',
|
||||
'no featured offers available',
|
||||
'no longer available',
|
||||
'no longer in stock',
|
||||
'no tickets available',
|
||||
'non disponibile',
|
||||
'non disponible',
|
||||
'not available',
|
||||
'not currently available',
|
||||
'not in stock',
|
||||
'notify me when available',
|
||||
'notify me',
|
||||
'notify when available',
|
||||
'não disponível',
|
||||
'não estamos a aceitar encomendas',
|
||||
'out of stock',
|
||||
'out-of-stock',
|
||||
'plus disponible',
|
||||
'prodotto esaurito',
|
||||
'produkt niedostępny',
|
||||
'rupture',
|
||||
'sold out',
|
||||
'sold-out',
|
||||
'stok habis',
|
||||
'stok kosong',
|
||||
'stok varian ini habis',
|
||||
'stokta yok',
|
||||
'temporarily out of stock',
|
||||
'temporarily unavailable',
|
||||
'there were no search results for',
|
||||
'this item is currently unavailable',
|
||||
'tickets unavailable',
|
||||
'tidak dijual',
|
||||
'tidak tersedia',
|
||||
'tijdelijk uitverkocht',
|
||||
'tiket tidak tersedia',
|
||||
'to subscribe to back in stock',
|
||||
'tükendi',
|
||||
'unavailable nearby',
|
||||
'unavailable tickets',
|
||||
'vergriffen',
|
||||
'vorbestellen',
|
||||
'vorbestellung ist bald möglich',
|
||||
'we couldn\'t find any products that match',
|
||||
'we do not currently have an estimate of when this product will be back in stock.',
|
||||
'we don\'t currently have any',
|
||||
'we don\'t know when or if this item will be back in stock.',
|
||||
'we were not able to find a match',
|
||||
'when this arrives in stock',
|
||||
'when this item is available to order',
|
||||
'zur zeit nicht an lager',
|
||||
'épuisé',
|
||||
'品切れ',
|
||||
'已售',
|
||||
'已售完',
|
||||
'품절'
|
||||
];
|
||||
|
||||
|
||||
const vh = Math.max(document.documentElement.clientHeight || 0, window.innerHeight || 0);
|
||||
const vh = Math.max(document.documentElement.clientHeight || 0, window.innerHeight || 0);
|
||||
|
||||
function getElementBaseText(element) {
|
||||
// .textContent can include text from children which may give the wrong results
|
||||
// scan only immediate TEXT_NODEs, which will be a child of the element
|
||||
var text = "";
|
||||
for (var i = 0; i < element.childNodes.length; ++i)
|
||||
if (element.childNodes[i].nodeType === Node.TEXT_NODE)
|
||||
text += element.childNodes[i].textContent;
|
||||
return text.toLowerCase().trim();
|
||||
}
|
||||
function getElementBaseText(element) {
|
||||
// .textContent can include text from children which may give the wrong results
|
||||
// scan only immediate TEXT_NODEs, which will be a child of the element
|
||||
var text = "";
|
||||
for (var i = 0; i < element.childNodes.length; ++i)
|
||||
if (element.childNodes[i].nodeType === Node.TEXT_NODE)
|
||||
text += element.childNodes[i].textContent;
|
||||
return text.toLowerCase().trim();
|
||||
}
|
||||
|
||||
const negateOutOfStockRegex = new RegExp('^([0-9] in stock|add to cart|in stock)', 'ig');
|
||||
|
||||
// The out-of-stock or in-stock-text is generally always above-the-fold
|
||||
// and often below-the-fold is a list of related products that may or may not contain trigger text
|
||||
// so it's good to filter to just the 'above the fold' elements
|
||||
// and it should be atleast 100px from the top to ignore items in the toolbar, sometimes menu items like "Coming soon" exist
|
||||
const negateOutOfStockRegex = new RegExp('^([0-9] in stock|add to cart|in stock|arrives approximately)', 'ig');
|
||||
// The out-of-stock or in-stock-text is generally always above-the-fold
|
||||
// and often below-the-fold is a list of related products that may or may not contain trigger text
|
||||
// so it's good to filter to just the 'above the fold' elements
|
||||
// and it should be atleast 100px from the top to ignore items in the toolbar, sometimes menu items like "Coming soon" exist
|
||||
|
||||
function elementIsInEyeBallRange(element) {
|
||||
// outside the 'fold' or some weird text in the heading area
|
||||
// .getBoundingClientRect() was causing a crash in chrome 119, can only be run on contentVisibility != hidden
|
||||
// Note: theres also an automated test that places the 'out of stock' text fairly low down
|
||||
// Skip text that could be in the header area
|
||||
if (element.getBoundingClientRect().bottom + window.scrollY <= 300 ) {
|
||||
return false;
|
||||
}
|
||||
// Skip text that could be much further down (like a list of "you may like" products that have 'sold out' in there
|
||||
if (element.getBoundingClientRect().bottom + window.scrollY >= 1300 ) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// @todo - if it's SVG or IMG, go into image diff mode
|
||||
// %ELEMENTS% replaced at injection time because different interfaces use it with different settings
|
||||
|
||||
console.log("Scanning %ELEMENTS%");
|
||||
function collectVisibleElements(parent, visibleElements) {
|
||||
if (!parent) return; // Base case: if parent is null or undefined, return
|
||||
|
||||
function collectVisibleElements(parent, visibleElements) {
|
||||
if (!parent) return; // Base case: if parent is null or undefined, return
|
||||
// Add the parent itself to the visible elements array if it's of the specified types
|
||||
visibleElements.push(parent);
|
||||
|
||||
// Add the parent itself to the visible elements array if it's of the specified types
|
||||
visibleElements.push(parent);
|
||||
|
||||
// Iterate over the parent's children
|
||||
const children = parent.children;
|
||||
for (let i = 0; i < children.length; i++) {
|
||||
const child = children[i];
|
||||
if (
|
||||
child.nodeType === Node.ELEMENT_NODE &&
|
||||
window.getComputedStyle(child).display !== 'none' &&
|
||||
window.getComputedStyle(child).visibility !== 'hidden' &&
|
||||
child.offsetWidth >= 0 &&
|
||||
child.offsetHeight >= 0 &&
|
||||
window.getComputedStyle(child).contentVisibility !== 'hidden'
|
||||
) {
|
||||
// If the child is an element and is visible, recursively collect visible elements
|
||||
collectVisibleElements(child, visibleElements);
|
||||
// Iterate over the parent's children
|
||||
const children = parent.children;
|
||||
for (let i = 0; i < children.length; i++) {
|
||||
const child = children[i];
|
||||
if (
|
||||
child.nodeType === Node.ELEMENT_NODE &&
|
||||
window.getComputedStyle(child).display !== 'none' &&
|
||||
window.getComputedStyle(child).visibility !== 'hidden' &&
|
||||
child.offsetWidth >= 0 &&
|
||||
child.offsetHeight >= 0 &&
|
||||
window.getComputedStyle(child).contentVisibility !== 'hidden'
|
||||
) {
|
||||
// If the child is an element and is visible, recursively collect visible elements
|
||||
collectVisibleElements(child, visibleElements);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const elementsToScan = [];
|
||||
collectVisibleElements(document.body, elementsToScan);
|
||||
const elementsToScan = [];
|
||||
collectVisibleElements(document.body, elementsToScan);
|
||||
|
||||
var elementText = "";
|
||||
var elementText = "";
|
||||
|
||||
// REGEXS THAT REALLY MEAN IT'S IN STOCK
|
||||
for (let i = elementsToScan.length - 1; i >= 0; i--) {
|
||||
const element = elementsToScan[i];
|
||||
// REGEXS THAT REALLY MEAN IT'S IN STOCK
|
||||
for (let i = elementsToScan.length - 1; i >= 0; i--) {
|
||||
const element = elementsToScan[i];
|
||||
|
||||
// outside the 'fold' or some weird text in the heading area
|
||||
// .getBoundingClientRect() was causing a crash in chrome 119, can only be run on contentVisibility != hidden
|
||||
if (element.getBoundingClientRect().top + window.scrollY >= vh || element.getBoundingClientRect().top + window.scrollY <= 100) {
|
||||
continue
|
||||
if (!elementIsInEyeBallRange(element)) {
|
||||
continue
|
||||
}
|
||||
|
||||
elementText = "";
|
||||
try {
|
||||
if (element.tagName.toLowerCase() === "input") {
|
||||
elementText = element.value.toLowerCase().trim();
|
||||
} else {
|
||||
elementText = getElementBaseText(element);
|
||||
}
|
||||
} catch (e) {
|
||||
console.warn('stock-not-in-stock.js scraper - handling element for gettext failed', e);
|
||||
}
|
||||
if (elementText.length) {
|
||||
// try which ones could mean its in stock
|
||||
if (negateOutOfStockRegex.test(elementText) && !elementText.includes('(0 products)')) {
|
||||
console.log(`Negating/overriding 'Out of Stock' back to "Possibly in stock" found "${elementText}"`)
|
||||
element.style.border = "2px solid green"; // highlight the element that was detected as in stock
|
||||
return 'Possibly in stock';
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
elementText = "";
|
||||
try {
|
||||
// OTHER STUFF THAT COULD BE THAT IT'S OUT OF STOCK
|
||||
for (let i = elementsToScan.length - 1; i >= 0; i--) {
|
||||
const element = elementsToScan[i];
|
||||
|
||||
if (!elementIsInEyeBallRange(element)) {
|
||||
continue
|
||||
}
|
||||
elementText = "";
|
||||
if (element.tagName.toLowerCase() === "input") {
|
||||
elementText = element.value.toLowerCase().trim();
|
||||
} else {
|
||||
elementText = getElementBaseText(element);
|
||||
}
|
||||
} catch (e) {
|
||||
console.warn('stock-not-in-stock.js scraper - handling element for gettext failed', e);
|
||||
}
|
||||
|
||||
if (elementText.length) {
|
||||
// try which ones could mean its in stock
|
||||
if (negateOutOfStockRegex.test(elementText) && !elementText.includes('(0 products)')) {
|
||||
console.log(`Negating/overriding 'Out of Stock' back to "Possibly in stock" found "${elementText}"`)
|
||||
return 'Possibly in stock';
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// OTHER STUFF THAT COULD BE THAT IT'S OUT OF STOCK
|
||||
for (let i = elementsToScan.length - 1; i >= 0; i--) {
|
||||
const element = elementsToScan[i];
|
||||
// outside the 'fold' or some weird text in the heading area
|
||||
// .getBoundingClientRect() was causing a crash in chrome 119, can only be run on contentVisibility != hidden
|
||||
// Note: theres also an automated test that places the 'out of stock' text fairly low down
|
||||
if (element.getBoundingClientRect().top + window.scrollY >= vh + 250 || element.getBoundingClientRect().top + window.scrollY <= 100) {
|
||||
continue
|
||||
}
|
||||
elementText = "";
|
||||
if (element.tagName.toLowerCase() === "input") {
|
||||
elementText = element.value.toLowerCase().trim();
|
||||
} else {
|
||||
elementText = getElementBaseText(element);
|
||||
}
|
||||
|
||||
if (elementText.length) {
|
||||
// and these mean its out of stock
|
||||
for (const outOfStockText of outOfStockTexts) {
|
||||
if (elementText.includes(outOfStockText)) {
|
||||
console.log(`Selected 'Out of Stock' - found text "${outOfStockText}" - "${elementText}" - offset top ${element.getBoundingClientRect().top}, page height is ${vh}`)
|
||||
return outOfStockText; // item is out of stock
|
||||
if (elementText.length) {
|
||||
// and these mean its out of stock
|
||||
for (const outOfStockText of outOfStockTexts) {
|
||||
if (elementText.includes(outOfStockText)) {
|
||||
console.log(`Selected 'Out of Stock' - found text "${outOfStockText}" - "${elementText}" - offset top ${element.getBoundingClientRect().top}, page height is ${vh}`)
|
||||
element.style.border = "2px solid red"; // highlight the element that was detected as out of stock
|
||||
return outOfStockText; // item is out of stock
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
console.log(`Returning 'Possibly in stock' - cant' find any useful matching text`)
|
||||
return 'Possibly in stock'; // possibly in stock, cant decide otherwise.
|
||||
}
|
||||
|
||||
console.log(`Returning 'Possibly in stock' - cant' find any useful matching text`)
|
||||
return 'Possibly in stock'; // possibly in stock, cant decide otherwise.
|
||||
}
|
||||
|
||||
// returns the element text that makes it think it's out of stock
|
||||
return isItemInStock().trim()
|
||||
|
||||
|
||||
return isItemInStock().trim()
|
||||
}
|
||||
|
||||
@@ -1,282 +1,284 @@
|
||||
// Copyright (C) 2021 Leigh Morresi (dgtlmoon@gmail.com)
|
||||
// All rights reserved.
|
||||
async (options) => {
|
||||
|
||||
// @file Scrape the page looking for elements of concern (%ELEMENTS%)
|
||||
// http://matatk.agrip.org.uk/tests/position-and-width/
|
||||
// https://stackoverflow.com/questions/26813480/when-is-element-getboundingclientrect-guaranteed-to-be-updated-accurate
|
||||
//
|
||||
// Some pages like https://www.londonstockexchange.com/stock/NCCL/ncondezi-energy-limited/analysis
|
||||
// will automatically force a scroll somewhere, so include the position offset
|
||||
// Lets hope the position doesnt change while we iterate the bbox's, but this is better than nothing
|
||||
var scroll_y = 0;
|
||||
try {
|
||||
scroll_y = +document.documentElement.scrollTop || document.body.scrollTop
|
||||
} catch (e) {
|
||||
console.log(e);
|
||||
}
|
||||
let visualselector_xpath_selectors = options.visualselector_xpath_selectors
|
||||
let max_height = options.max_height
|
||||
|
||||
|
||||
// Include the getXpath script directly, easier than fetching
|
||||
function getxpath(e) {
|
||||
var n = e;
|
||||
if (n && n.id) return '//*[@id="' + n.id + '"]';
|
||||
for (var o = []; n && Node.ELEMENT_NODE === n.nodeType;) {
|
||||
for (var i = 0, r = !1, d = n.previousSibling; d;) d.nodeType !== Node.DOCUMENT_TYPE_NODE && d.nodeName === n.nodeName && i++, d = d.previousSibling;
|
||||
for (d = n.nextSibling; d;) {
|
||||
if (d.nodeName === n.nodeName) {
|
||||
r = !0;
|
||||
break
|
||||
}
|
||||
d = d.nextSibling
|
||||
}
|
||||
o.push((n.prefix ? n.prefix + ":" : "") + n.localName + (i || r ? "[" + (i + 1) + "]" : "")), n = n.parentNode
|
||||
}
|
||||
return o.length ? "/" + o.reverse().join("/") : ""
|
||||
}
|
||||
|
||||
const findUpTag = (el) => {
|
||||
let r = el
|
||||
chained_css = [];
|
||||
depth = 0;
|
||||
|
||||
// Strategy 1: If it's an input, with name, and there's only one, prefer that
|
||||
if (el.name !== undefined && el.name.length) {
|
||||
var proposed = el.tagName + "[name=" + el.name + "]";
|
||||
var proposed_element = window.document.querySelectorAll(proposed);
|
||||
if (proposed_element.length) {
|
||||
if (proposed_element.length === 1) {
|
||||
return proposed;
|
||||
} else {
|
||||
// Some sites change ID but name= stays the same, we can hit it if we know the index
|
||||
// Find all the elements that match and work out the input[n]
|
||||
var n = Array.from(proposed_element).indexOf(el);
|
||||
// Return a Playwright selector for nthinput[name=zipcode]
|
||||
return proposed + " >> nth=" + n;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Strategy 2: Keep going up until we hit an ID tag, imagine it's like #list-widget div h4
|
||||
while (r.parentNode) {
|
||||
if (depth === 5) {
|
||||
break;
|
||||
}
|
||||
if ('' !== r.id) {
|
||||
chained_css.unshift("#" + CSS.escape(r.id));
|
||||
final_selector = chained_css.join(' > ');
|
||||
// Be sure theres only one, some sites have multiples of the same ID tag :-(
|
||||
if (window.document.querySelectorAll(final_selector).length === 1) {
|
||||
return final_selector;
|
||||
}
|
||||
return null;
|
||||
} else {
|
||||
chained_css.unshift(r.tagName.toLowerCase());
|
||||
}
|
||||
r = r.parentNode;
|
||||
depth += 1;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
|
||||
// @todo - if it's SVG or IMG, go into image diff mode
|
||||
// %ELEMENTS% replaced at injection time because different interfaces use it with different settings
|
||||
|
||||
var size_pos = [];
|
||||
// after page fetch, inject this JS
|
||||
// build a map of all elements and their positions (maybe that only include text?)
|
||||
var bbox;
|
||||
console.log("Scanning %ELEMENTS%");
|
||||
|
||||
function collectVisibleElements(parent, visibleElements) {
|
||||
if (!parent) return; // Base case: if parent is null or undefined, return
|
||||
|
||||
|
||||
// Add the parent itself to the visible elements array if it's of the specified types
|
||||
const tagName = parent.tagName.toLowerCase();
|
||||
if ("%ELEMENTS%".split(',').includes(tagName)) {
|
||||
visibleElements.push(parent);
|
||||
}
|
||||
|
||||
// Iterate over the parent's children
|
||||
const children = parent.children;
|
||||
for (let i = 0; i < children.length; i++) {
|
||||
const child = children[i];
|
||||
if (
|
||||
child.nodeType === Node.ELEMENT_NODE &&
|
||||
window.getComputedStyle(child).display !== 'none' &&
|
||||
window.getComputedStyle(child).visibility !== 'hidden' &&
|
||||
child.offsetWidth >= 0 &&
|
||||
child.offsetHeight >= 0 &&
|
||||
window.getComputedStyle(child).contentVisibility !== 'hidden'
|
||||
) {
|
||||
// If the child is an element and is visible, recursively collect visible elements
|
||||
collectVisibleElements(child, visibleElements);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create an array to hold the visible elements
|
||||
const visibleElementsArray = [];
|
||||
|
||||
// Call collectVisibleElements with the starting parent element
|
||||
collectVisibleElements(document.body, visibleElementsArray);
|
||||
|
||||
|
||||
visibleElementsArray.forEach(function (element) {
|
||||
|
||||
bbox = element.getBoundingClientRect();
|
||||
|
||||
// Skip really small ones, and where width or height ==0
|
||||
if (bbox['width'] * bbox['height'] < 10) {
|
||||
return
|
||||
}
|
||||
|
||||
// Don't include elements that are offset from canvas
|
||||
if (bbox['top'] + scroll_y < 0 || bbox['left'] < 0) {
|
||||
return
|
||||
}
|
||||
|
||||
// @todo the getXpath kind of sucks, it doesnt know when there is for example just one ID sometimes
|
||||
// it should not traverse when we know we can anchor off just an ID one level up etc..
|
||||
// maybe, get current class or id, keep traversing up looking for only class or id until there is just one match
|
||||
|
||||
// 1st primitive - if it has class, try joining it all and select, if theres only one.. well thats us.
|
||||
xpath_result = false;
|
||||
var scroll_y = 0;
|
||||
try {
|
||||
var d = findUpTag(element);
|
||||
if (d) {
|
||||
xpath_result = d;
|
||||
}
|
||||
scroll_y = +document.documentElement.scrollTop || document.body.scrollTop
|
||||
} catch (e) {
|
||||
console.log(e);
|
||||
}
|
||||
// You could swap it and default to getXpath and then try the smarter one
|
||||
// default back to the less intelligent one
|
||||
if (!xpath_result) {
|
||||
try {
|
||||
// I've seen on FB and eBay that this doesnt work
|
||||
// ReferenceError: getXPath is not defined at eval (eval at evaluate (:152:29), <anonymous>:67:20) at UtilityScript.evaluate (<anonymous>:159:18) at UtilityScript.<anonymous> (<anonymous>:1:44)
|
||||
xpath_result = getxpath(element);
|
||||
} catch (e) {
|
||||
console.log(e);
|
||||
return
|
||||
|
||||
// Include the getXpath script directly, easier than fetching
|
||||
function getxpath(e) {
|
||||
var n = e;
|
||||
if (n && n.id) return '//*[@id="' + n.id + '"]';
|
||||
for (var o = []; n && Node.ELEMENT_NODE === n.nodeType;) {
|
||||
for (var i = 0, r = !1, d = n.previousSibling; d;) d.nodeType !== Node.DOCUMENT_TYPE_NODE && d.nodeName === n.nodeName && i++, d = d.previousSibling;
|
||||
for (d = n.nextSibling; d;) {
|
||||
if (d.nodeName === n.nodeName) {
|
||||
r = !0;
|
||||
break
|
||||
}
|
||||
d = d.nextSibling
|
||||
}
|
||||
o.push((n.prefix ? n.prefix + ":" : "") + n.localName + (i || r ? "[" + (i + 1) + "]" : "")), n = n.parentNode
|
||||
}
|
||||
return o.length ? "/" + o.reverse().join("/") : ""
|
||||
}
|
||||
|
||||
const findUpTag = (el) => {
|
||||
let r = el
|
||||
chained_css = [];
|
||||
depth = 0;
|
||||
|
||||
// Strategy 1: If it's an input, with name, and there's only one, prefer that
|
||||
if (el.name !== undefined && el.name.length) {
|
||||
var proposed = el.tagName + "[name=\"" + CSS.escape(el.name) + "\"]";
|
||||
var proposed_element = window.document.querySelectorAll(proposed);
|
||||
if (proposed_element.length) {
|
||||
if (proposed_element.length === 1) {
|
||||
return proposed;
|
||||
} else {
|
||||
// Some sites change ID but name= stays the same, we can hit it if we know the index
|
||||
// Find all the elements that match and work out the input[n]
|
||||
var n = Array.from(proposed_element).indexOf(el);
|
||||
// Return a Playwright selector for nthinput[name=zipcode]
|
||||
return proposed + " >> nth=" + n;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Strategy 2: Keep going up until we hit an ID tag, imagine it's like #list-widget div h4
|
||||
while (r.parentNode) {
|
||||
if (depth === 5) {
|
||||
break;
|
||||
}
|
||||
if ('' !== r.id) {
|
||||
chained_css.unshift("#" + CSS.escape(r.id));
|
||||
final_selector = chained_css.join(' > ');
|
||||
// Be sure theres only one, some sites have multiples of the same ID tag :-(
|
||||
if (window.document.querySelectorAll(final_selector).length === 1) {
|
||||
return final_selector;
|
||||
}
|
||||
return null;
|
||||
} else {
|
||||
chained_css.unshift(r.tagName.toLowerCase());
|
||||
}
|
||||
r = r.parentNode;
|
||||
depth += 1;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
|
||||
// @todo - if it's SVG or IMG, go into image diff mode
|
||||
|
||||
var size_pos = [];
|
||||
// after page fetch, inject this JS
|
||||
// build a map of all elements and their positions (maybe that only include text?)
|
||||
var bbox;
|
||||
console.log(`Scanning for "${visualselector_xpath_selectors}"`);
|
||||
|
||||
function collectVisibleElements(parent, visibleElements) {
|
||||
if (!parent) return; // Base case: if parent is null or undefined, return
|
||||
|
||||
|
||||
// Add the parent itself to the visible elements array if it's of the specified types
|
||||
const tagName = parent.tagName.toLowerCase();
|
||||
if (visualselector_xpath_selectors.split(',').includes(tagName)) {
|
||||
visibleElements.push(parent);
|
||||
}
|
||||
|
||||
// Iterate over the parent's children
|
||||
const children = parent.children;
|
||||
for (let i = 0; i < children.length; i++) {
|
||||
const child = children[i];
|
||||
const computedStyle = window.getComputedStyle(child);
|
||||
|
||||
if (
|
||||
child.nodeType === Node.ELEMENT_NODE &&
|
||||
computedStyle.display !== 'none' &&
|
||||
computedStyle.visibility !== 'hidden' &&
|
||||
child.offsetWidth >= 0 &&
|
||||
child.offsetHeight >= 0 &&
|
||||
computedStyle.contentVisibility !== 'hidden'
|
||||
) {
|
||||
// If the child is an element and is visible, recursively collect visible elements
|
||||
collectVisibleElements(child, visibleElements);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let label = "not-interesting" // A placeholder, the actual labels for training are done by hand for now
|
||||
// Create an array to hold the visible elements
|
||||
const visibleElementsArray = [];
|
||||
|
||||
let text = element.textContent.trim().slice(0, 30).trim();
|
||||
while (/\n{2,}|\t{2,}/.test(text)) {
|
||||
text = text.replace(/\n{2,}/g, '\n').replace(/\t{2,}/g, '\t')
|
||||
}
|
||||
// Call collectVisibleElements with the starting parent element
|
||||
collectVisibleElements(document.body, visibleElementsArray);
|
||||
|
||||
// Try to identify any possible currency amounts "Sale: 4000" or "Sale now 3000 Kc", can help with the training.
|
||||
const hasDigitCurrency = (/\d/.test(text.slice(0, 6)) || /\d/.test(text.slice(-6)) ) && /([€£$¥₩₹]|USD|AUD|EUR|Kč|kr|SEK|,–)/.test(text) ;
|
||||
|
||||
size_pos.push({
|
||||
xpath: xpath_result,
|
||||
width: Math.round(bbox['width']),
|
||||
height: Math.round(bbox['height']),
|
||||
left: Math.floor(bbox['left']),
|
||||
top: Math.floor(bbox['top']) + scroll_y,
|
||||
// tagName used by Browser Steps
|
||||
tagName: (element.tagName) ? element.tagName.toLowerCase() : '',
|
||||
// tagtype used by Browser Steps
|
||||
tagtype: (element.tagName.toLowerCase() === 'input' && element.type) ? element.type.toLowerCase() : '',
|
||||
isClickable: window.getComputedStyle(element).cursor === "pointer",
|
||||
// Used by the keras trainer
|
||||
fontSize: window.getComputedStyle(element).getPropertyValue('font-size'),
|
||||
fontWeight: window.getComputedStyle(element).getPropertyValue('font-weight'),
|
||||
hasDigitCurrency: hasDigitCurrency,
|
||||
label: label,
|
||||
visibleElementsArray.forEach(function (element) {
|
||||
|
||||
bbox = element.getBoundingClientRect();
|
||||
|
||||
// Skip really small ones, and where width or height ==0
|
||||
if (bbox['width'] * bbox['height'] < 10) {
|
||||
return
|
||||
}
|
||||
|
||||
// Don't include elements that are offset from canvas
|
||||
if (bbox['top'] + scroll_y < 0 || bbox['left'] < 0) {
|
||||
return
|
||||
}
|
||||
|
||||
// @todo the getXpath kind of sucks, it doesnt know when there is for example just one ID sometimes
|
||||
// it should not traverse when we know we can anchor off just an ID one level up etc..
|
||||
// maybe, get current class or id, keep traversing up looking for only class or id until there is just one match
|
||||
|
||||
// 1st primitive - if it has class, try joining it all and select, if theres only one.. well thats us.
|
||||
xpath_result = false;
|
||||
try {
|
||||
var d = findUpTag(element);
|
||||
if (d) {
|
||||
xpath_result = d;
|
||||
}
|
||||
} catch (e) {
|
||||
console.log(e);
|
||||
}
|
||||
// You could swap it and default to getXpath and then try the smarter one
|
||||
// default back to the less intelligent one
|
||||
if (!xpath_result) {
|
||||
try {
|
||||
// I've seen on FB and eBay that this doesnt work
|
||||
// ReferenceError: getXPath is not defined at eval (eval at evaluate (:152:29), <anonymous>:67:20) at UtilityScript.evaluate (<anonymous>:159:18) at UtilityScript.<anonymous> (<anonymous>:1:44)
|
||||
xpath_result = getxpath(element);
|
||||
} catch (e) {
|
||||
console.log(e);
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
let label = "not-interesting" // A placeholder, the actual labels for training are done by hand for now
|
||||
|
||||
let text = element.textContent.trim().slice(0, 30).trim();
|
||||
while (/\n{2,}|\t{2,}/.test(text)) {
|
||||
text = text.replace(/\n{2,}/g, '\n').replace(/\t{2,}/g, '\t')
|
||||
}
|
||||
|
||||
// Try to identify any possible currency amounts "Sale: 4000" or "Sale now 3000 Kc", can help with the training.
|
||||
const hasDigitCurrency = (/\d/.test(text.slice(0, 6)) || /\d/.test(text.slice(-6))) && /([€£$¥₩₹]|USD|AUD|EUR|Kč|kr|SEK|,–)/.test(text);
|
||||
const computedStyle = window.getComputedStyle(element);
|
||||
|
||||
if (Math.floor(bbox['top']) + scroll_y > max_height) {
|
||||
return
|
||||
}
|
||||
|
||||
size_pos.push({
|
||||
xpath: xpath_result,
|
||||
width: Math.round(bbox['width']),
|
||||
height: Math.round(bbox['height']),
|
||||
left: Math.floor(bbox['left']),
|
||||
top: Math.floor(bbox['top']) + scroll_y,
|
||||
// tagName used by Browser Steps
|
||||
tagName: (element.tagName) ? element.tagName.toLowerCase() : '',
|
||||
// tagtype used by Browser Steps
|
||||
tagtype: (element.tagName.toLowerCase() === 'input' && element.type) ? element.type.toLowerCase() : '',
|
||||
isClickable: computedStyle.cursor === "pointer",
|
||||
// Used by the keras trainer
|
||||
fontSize: computedStyle.getPropertyValue('font-size'),
|
||||
fontWeight: computedStyle.getPropertyValue('font-weight'),
|
||||
hasDigitCurrency: hasDigitCurrency,
|
||||
label: label,
|
||||
});
|
||||
|
||||
});
|
||||
|
||||
});
|
||||
|
||||
|
||||
// Inject the current one set in the include_filters, which may be a CSS rule
|
||||
// used for displaying the current one in VisualSelector, where its not one we generated.
|
||||
if (include_filters.length) {
|
||||
let results;
|
||||
// Foreach filter, go and find it on the page and add it to the results so we can visualise it again
|
||||
for (const f of include_filters) {
|
||||
bbox = false;
|
||||
q = false;
|
||||
if (include_filters.length) {
|
||||
let results;
|
||||
// Foreach filter, go and find it on the page and add it to the results so we can visualise it again
|
||||
for (const f of include_filters) {
|
||||
bbox = false;
|
||||
|
||||
if (!f.length) {
|
||||
console.log("xpath_element_scraper: Empty filter, skipping");
|
||||
continue;
|
||||
}
|
||||
|
||||
try {
|
||||
// is it xpath?
|
||||
if (f.startsWith('/') || f.startsWith('xpath')) {
|
||||
var qry_f = f.replace(/xpath(:|\d:)/, '')
|
||||
console.log("[xpath] Scanning for included filter " + qry_f)
|
||||
let xpathResult = document.evaluate(qry_f, document, null, XPathResult.ORDERED_NODE_SNAPSHOT_TYPE, null);
|
||||
results = [];
|
||||
for (let i = 0; i < xpathResult.snapshotLength; i++) {
|
||||
results.push(xpathResult.snapshotItem(i));
|
||||
}
|
||||
} else {
|
||||
console.log("[css] Scanning for included filter " + f)
|
||||
console.log("[css] Scanning for included filter " + f);
|
||||
results = document.querySelectorAll(f);
|
||||
if (!f.length) {
|
||||
console.log("xpath_element_scraper: Empty filter, skipping");
|
||||
continue;
|
||||
}
|
||||
} catch (e) {
|
||||
// Maybe catch DOMException and alert?
|
||||
console.log("xpath_element_scraper: Exception selecting element from filter " + f);
|
||||
console.log(e);
|
||||
}
|
||||
|
||||
if (results != null && results.length) {
|
||||
|
||||
// Iterate over the results
|
||||
results.forEach(node => {
|
||||
// Try to resolve //something/text() back to its /something so we can atleast get the bounding box
|
||||
try {
|
||||
if (typeof node.nodeName == 'string' && node.nodeName === '#text') {
|
||||
node = node.parentElement
|
||||
try {
|
||||
// is it xpath?
|
||||
if (f.startsWith('/') || f.startsWith('xpath')) {
|
||||
var qry_f = f.replace(/xpath(:|\d:)/, '')
|
||||
console.log("[xpath] Scanning for included filter " + qry_f)
|
||||
let xpathResult = document.evaluate(qry_f, document, null, XPathResult.ORDERED_NODE_SNAPSHOT_TYPE, null);
|
||||
results = [];
|
||||
for (let i = 0; i < xpathResult.snapshotLength; i++) {
|
||||
results.push(xpathResult.snapshotItem(i));
|
||||
}
|
||||
} catch (e) {
|
||||
console.log(e)
|
||||
console.log("xpath_element_scraper: #text resolver")
|
||||
}
|
||||
|
||||
// #1231 - IN the case XPath attribute filter is applied, we will have to traverse up and find the element.
|
||||
if (typeof node.getBoundingClientRect == 'function') {
|
||||
bbox = node.getBoundingClientRect();
|
||||
console.log("xpath_element_scraper: Got filter element, scroll from top was " + scroll_y)
|
||||
} else {
|
||||
console.log("[css] Scanning for included filter " + f)
|
||||
console.log("[css] Scanning for included filter " + f);
|
||||
results = document.querySelectorAll(f);
|
||||
}
|
||||
} catch (e) {
|
||||
// Maybe catch DOMException and alert?
|
||||
console.log("xpath_element_scraper: Exception selecting element from filter " + f);
|
||||
console.log(e);
|
||||
}
|
||||
|
||||
if (results != null && results.length) {
|
||||
|
||||
// Iterate over the results
|
||||
results.forEach(node => {
|
||||
// Try to resolve //something/text() back to its /something so we can atleast get the bounding box
|
||||
try {
|
||||
// Try and see we can find its ownerElement
|
||||
bbox = node.ownerElement.getBoundingClientRect();
|
||||
console.log("xpath_element_scraper: Got filter by ownerElement element, scroll from top was " + scroll_y)
|
||||
if (typeof node.nodeName == 'string' && node.nodeName === '#text') {
|
||||
node = node.parentElement
|
||||
}
|
||||
} catch (e) {
|
||||
console.log(e)
|
||||
console.log("xpath_element_scraper: error looking up q.ownerElement")
|
||||
console.log("xpath_element_scraper: #text resolver")
|
||||
}
|
||||
}
|
||||
|
||||
if (bbox && bbox['width'] > 0 && bbox['height'] > 0) {
|
||||
size_pos.push({
|
||||
xpath: f,
|
||||
width: parseInt(bbox['width']),
|
||||
height: parseInt(bbox['height']),
|
||||
left: parseInt(bbox['left']),
|
||||
top: parseInt(bbox['top']) + scroll_y,
|
||||
highlight_as_custom_filter: true
|
||||
});
|
||||
}
|
||||
});
|
||||
// #1231 - IN the case XPath attribute filter is applied, we will have to traverse up and find the element.
|
||||
if (typeof node.getBoundingClientRect == 'function') {
|
||||
bbox = node.getBoundingClientRect();
|
||||
console.log("xpath_element_scraper: Got filter element, scroll from top was " + scroll_y)
|
||||
} else {
|
||||
try {
|
||||
// Try and see we can find its ownerElement
|
||||
bbox = node.ownerElement.getBoundingClientRect();
|
||||
console.log("xpath_element_scraper: Got filter by ownerElement element, scroll from top was " + scroll_y)
|
||||
} catch (e) {
|
||||
console.log(e)
|
||||
console.log("xpath_element_scraper: error looking up node.ownerElement")
|
||||
}
|
||||
}
|
||||
|
||||
if (bbox && bbox['width'] > 0 && bbox['height'] > 0) {
|
||||
size_pos.push({
|
||||
xpath: f,
|
||||
width: parseInt(bbox['width']),
|
||||
height: parseInt(bbox['height']),
|
||||
left: parseInt(bbox['left']),
|
||||
top: parseInt(bbox['top']) + scroll_y,
|
||||
highlight_as_custom_filter: true
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Sort the elements so we find the smallest one first, in other words, we find the smallest one matching in that area
|
||||
// so that we dont select the wrapping element by mistake and be unable to select what we want
|
||||
size_pos.sort((a, b) => (a.width * a.height > b.width * b.height) ? 1 : -1)
|
||||
size_pos.sort((a, b) => (a.width * a.height > b.width * b.height) ? 1 : -1)
|
||||
|
||||
// browser_width required for proper scaling in the frontend
|
||||
// Return as a string to save playwright for juggling thousands of objects
|
||||
return JSON.stringify({'size_pos': size_pos, 'browser_width': window.innerWidth});
|
||||
}
|
||||
|
||||
// Window.width required for proper scaling in the frontend
|
||||
return {'size_pos': size_pos, 'browser_width': window.innerWidth};
|
||||
|
||||
73
changedetectionio/content_fetchers/screenshot_handler.py
Normal file
73
changedetectionio/content_fetchers/screenshot_handler.py
Normal file
@@ -0,0 +1,73 @@
|
||||
# Pages with a vertical height longer than this will use the 'stitch together' method.
|
||||
|
||||
# - Many GPUs have a max texture size of 16384x16384px (or lower on older devices).
|
||||
# - If a page is taller than ~8000–10000px, it risks exceeding GPU memory limits.
|
||||
# - This is especially important on headless Chromium, where Playwright may fail to allocate a massive full-page buffer.
|
||||
|
||||
from loguru import logger
|
||||
|
||||
from changedetectionio.content_fetchers import SCREENSHOT_MAX_HEIGHT_DEFAULT, SCREENSHOT_DEFAULT_QUALITY
|
||||
|
||||
|
||||
def stitch_images_worker(pipe_conn, chunks_bytes, original_page_height, capture_height):
|
||||
import os
|
||||
import io
|
||||
from PIL import Image, ImageDraw, ImageFont
|
||||
|
||||
try:
|
||||
|
||||
# Load images from byte chunks
|
||||
images = [Image.open(io.BytesIO(b)) for b in chunks_bytes]
|
||||
total_height = sum(im.height for im in images)
|
||||
max_width = max(im.width for im in images)
|
||||
|
||||
# Create stitched image
|
||||
stitched = Image.new('RGB', (max_width, total_height))
|
||||
y_offset = 0
|
||||
for im in images:
|
||||
stitched.paste(im, (0, y_offset))
|
||||
y_offset += im.height
|
||||
|
||||
# Draw caption on top (overlaid, not extending canvas)
|
||||
draw = ImageDraw.Draw(stitched)
|
||||
|
||||
if original_page_height > capture_height:
|
||||
caption_text = f"WARNING: Screenshot was {original_page_height}px but trimmed to {capture_height}px because it was too long"
|
||||
padding = 10
|
||||
font_size = 35
|
||||
font_color = (255, 0, 0)
|
||||
background_color = (255, 255, 255)
|
||||
|
||||
|
||||
# Try to load a proper font
|
||||
try:
|
||||
font = ImageFont.truetype("arial.ttf", font_size)
|
||||
except IOError:
|
||||
font = ImageFont.load_default()
|
||||
|
||||
bbox = draw.textbbox((0, 0), caption_text, font=font)
|
||||
text_width = bbox[2] - bbox[0]
|
||||
text_height = bbox[3] - bbox[1]
|
||||
|
||||
# Draw white rectangle background behind text
|
||||
rect_top = 0
|
||||
rect_bottom = text_height + 2 * padding
|
||||
draw.rectangle([(0, rect_top), (max_width, rect_bottom)], fill=background_color)
|
||||
|
||||
# Draw text centered horizontally, 10px padding from top of the rectangle
|
||||
text_x = (max_width - text_width) // 2
|
||||
text_y = padding
|
||||
draw.text((text_x, text_y), caption_text, font=font, fill=font_color)
|
||||
|
||||
# Encode and send image
|
||||
output = io.BytesIO()
|
||||
stitched.save(output, format="JPEG", quality=int(os.getenv("SCREENSHOT_QUALITY", SCREENSHOT_DEFAULT_QUALITY)))
|
||||
pipe_conn.send_bytes(output.getvalue())
|
||||
|
||||
stitched.close()
|
||||
except Exception as e:
|
||||
pipe_conn.send(f"error:{e}")
|
||||
finally:
|
||||
pipe_conn.close()
|
||||
|
||||
|
||||
@@ -4,22 +4,20 @@ import time
|
||||
from loguru import logger
|
||||
from changedetectionio.content_fetchers.base import Fetcher
|
||||
|
||||
|
||||
class fetcher(Fetcher):
|
||||
if os.getenv("WEBDRIVER_URL"):
|
||||
fetcher_description = "WebDriver Chrome/Javascript via '{}'".format(os.getenv("WEBDRIVER_URL"))
|
||||
fetcher_description = f"WebDriver Chrome/Javascript via \"{os.getenv('WEBDRIVER_URL', '')}\""
|
||||
else:
|
||||
fetcher_description = "WebDriver Chrome/Javascript"
|
||||
|
||||
# Configs for Proxy setup
|
||||
# In the ENV vars, is prefixed with "webdriver_", so it is for example "webdriver_sslProxy"
|
||||
selenium_proxy_settings_mappings = ['proxyType', 'ftpProxy', 'httpProxy', 'noProxy',
|
||||
'proxyAutoconfigUrl', 'sslProxy', 'autodetect',
|
||||
'socksProxy', 'socksVersion', 'socksUsername', 'socksPassword']
|
||||
proxy = None
|
||||
proxy_url = None
|
||||
|
||||
def __init__(self, proxy_override=None, custom_browser_connection_url=None):
|
||||
super().__init__()
|
||||
from selenium.webdriver.common.proxy import Proxy as SeleniumProxy
|
||||
from urllib.parse import urlparse
|
||||
from selenium.webdriver.common.proxy import Proxy
|
||||
|
||||
# .strip('"') is going to save someone a lot of time when they accidently wrap the env value
|
||||
if not custom_browser_connection_url:
|
||||
@@ -28,93 +26,118 @@ class fetcher(Fetcher):
|
||||
self.browser_connection_is_custom = True
|
||||
self.browser_connection_url = custom_browser_connection_url
|
||||
|
||||
# If any proxy settings are enabled, then we should setup the proxy object
|
||||
proxy_args = {}
|
||||
for k in self.selenium_proxy_settings_mappings:
|
||||
v = os.getenv('webdriver_' + k, False)
|
||||
if v:
|
||||
proxy_args[k] = v.strip('"')
|
||||
##### PROXY SETUP #####
|
||||
|
||||
# Map back standard HTTP_ and HTTPS_PROXY to webDriver httpProxy/sslProxy
|
||||
if not proxy_args.get('webdriver_httpProxy') and self.system_http_proxy:
|
||||
proxy_args['httpProxy'] = self.system_http_proxy
|
||||
if not proxy_args.get('webdriver_sslProxy') and self.system_https_proxy:
|
||||
proxy_args['httpsProxy'] = self.system_https_proxy
|
||||
proxy_sources = [
|
||||
self.system_http_proxy,
|
||||
self.system_https_proxy,
|
||||
os.getenv('webdriver_proxySocks'),
|
||||
os.getenv('webdriver_socksProxy'),
|
||||
os.getenv('webdriver_proxyHttp'),
|
||||
os.getenv('webdriver_httpProxy'),
|
||||
os.getenv('webdriver_proxyHttps'),
|
||||
os.getenv('webdriver_httpsProxy'),
|
||||
os.getenv('webdriver_sslProxy'),
|
||||
proxy_override, # last one should override
|
||||
]
|
||||
# The built in selenium proxy handling is super unreliable!!! so we just grab which ever proxy setting we can find and throw it in --proxy-server=
|
||||
for k in filter(None, proxy_sources):
|
||||
if not k:
|
||||
continue
|
||||
self.proxy_url = k.strip()
|
||||
|
||||
# Allows override the proxy on a per-request basis
|
||||
if proxy_override is not None:
|
||||
proxy_args['httpProxy'] = proxy_override
|
||||
async def run(self,
|
||||
fetch_favicon=True,
|
||||
current_include_filters=None,
|
||||
empty_pages_are_a_change=False,
|
||||
ignore_status_codes=False,
|
||||
is_binary=False,
|
||||
request_body=None,
|
||||
request_headers=None,
|
||||
request_method=None,
|
||||
timeout=None,
|
||||
url=None,
|
||||
):
|
||||
|
||||
if proxy_args:
|
||||
self.proxy = SeleniumProxy(raw=proxy_args)
|
||||
import asyncio
|
||||
|
||||
def run(self,
|
||||
url,
|
||||
timeout,
|
||||
request_headers,
|
||||
request_body,
|
||||
request_method,
|
||||
ignore_status_codes=False,
|
||||
current_include_filters=None,
|
||||
is_binary=False,
|
||||
empty_pages_are_a_change=False):
|
||||
# Wrap the entire selenium operation in a thread executor
|
||||
def _run_sync():
|
||||
from selenium.webdriver.chrome.options import Options as ChromeOptions
|
||||
# request_body, request_method unused for now, until some magic in the future happens.
|
||||
|
||||
from selenium import webdriver
|
||||
from selenium.webdriver.chrome.options import Options as ChromeOptions
|
||||
from selenium.common.exceptions import WebDriverException
|
||||
# request_body, request_method unused for now, until some magic in the future happens.
|
||||
options = ChromeOptions()
|
||||
|
||||
options = ChromeOptions()
|
||||
if self.proxy:
|
||||
options.proxy = self.proxy
|
||||
# Load Chrome options from env
|
||||
CHROME_OPTIONS = [
|
||||
line.strip()
|
||||
for line in os.getenv("CHROME_OPTIONS", "").strip().splitlines()
|
||||
if line.strip()
|
||||
]
|
||||
|
||||
self.driver = webdriver.Remote(
|
||||
command_executor=self.browser_connection_url,
|
||||
options=options)
|
||||
for opt in CHROME_OPTIONS:
|
||||
options.add_argument(opt)
|
||||
|
||||
try:
|
||||
self.driver.get(url)
|
||||
except WebDriverException as e:
|
||||
# Be sure we close the session window
|
||||
self.quit()
|
||||
raise
|
||||
# 1. proxy_config /Proxy(proxy_config) selenium object is REALLY unreliable
|
||||
# 2. selenium-wire cant be used because the websocket version conflicts with pypeteer-ng
|
||||
# 3. selenium only allows ONE runner at a time by default!
|
||||
# 4. driver must use quit() or it will continue to block/hold the selenium process!!
|
||||
|
||||
self.driver.set_window_size(1280, 1024)
|
||||
self.driver.implicitly_wait(int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)))
|
||||
if self.proxy_url:
|
||||
options.add_argument(f'--proxy-server={self.proxy_url}')
|
||||
|
||||
if self.webdriver_js_execute_code is not None:
|
||||
self.driver.execute_script(self.webdriver_js_execute_code)
|
||||
# Selenium doesn't automatically wait for actions as good as Playwright, so wait again
|
||||
self.driver.implicitly_wait(int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)))
|
||||
|
||||
# @todo - how to check this? is it possible?
|
||||
self.status_code = 200
|
||||
# @todo somehow we should try to get this working for WebDriver
|
||||
# raise EmptyReply(url=url, status_code=r.status_code)
|
||||
|
||||
# @todo - dom wait loaded?
|
||||
time.sleep(int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)) + self.render_extract_delay)
|
||||
self.content = self.driver.page_source
|
||||
self.headers = {}
|
||||
|
||||
self.screenshot = self.driver.get_screenshot_as_png()
|
||||
|
||||
# Does the connection to the webdriver work? run a test connection.
|
||||
def is_ready(self):
|
||||
from selenium import webdriver
|
||||
from selenium.webdriver.chrome.options import Options as ChromeOptions
|
||||
|
||||
self.driver = webdriver.Remote(
|
||||
command_executor=self.command_executor,
|
||||
options=ChromeOptions())
|
||||
|
||||
# driver.quit() seems to cause better exceptions
|
||||
self.quit()
|
||||
return True
|
||||
|
||||
def quit(self):
|
||||
if self.driver:
|
||||
from selenium.webdriver.remote.remote_connection import RemoteConnection
|
||||
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
|
||||
driver = None
|
||||
try:
|
||||
self.driver.quit()
|
||||
# Create the RemoteConnection and set timeout (e.g., 30 seconds)
|
||||
remote_connection = RemoteConnection(
|
||||
self.browser_connection_url,
|
||||
)
|
||||
remote_connection.set_timeout(30) # seconds
|
||||
|
||||
# Now create the driver with the RemoteConnection
|
||||
driver = RemoteWebDriver(
|
||||
command_executor=remote_connection,
|
||||
options=options
|
||||
)
|
||||
|
||||
driver.set_page_load_timeout(int(os.getenv("WEBDRIVER_PAGELOAD_TIMEOUT", 45)))
|
||||
except Exception as e:
|
||||
logger.debug(f"Content Fetcher > Exception in chrome shutdown/quit {str(e)}")
|
||||
if driver:
|
||||
driver.quit()
|
||||
raise e
|
||||
|
||||
try:
|
||||
driver.get(url)
|
||||
|
||||
if not "--window-size" in os.getenv("CHROME_OPTIONS", ""):
|
||||
driver.set_window_size(1280, 1024)
|
||||
|
||||
driver.implicitly_wait(int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)))
|
||||
|
||||
if self.webdriver_js_execute_code is not None:
|
||||
driver.execute_script(self.webdriver_js_execute_code)
|
||||
# Selenium doesn't automatically wait for actions as good as Playwright, so wait again
|
||||
driver.implicitly_wait(int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)))
|
||||
|
||||
# @todo - how to check this? is it possible?
|
||||
self.status_code = 200
|
||||
# @todo somehow we should try to get this working for WebDriver
|
||||
# raise EmptyReply(url=url, status_code=r.status_code)
|
||||
|
||||
# @todo - dom wait loaded?
|
||||
import time
|
||||
time.sleep(int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)) + self.render_extract_delay)
|
||||
self.content = driver.page_source
|
||||
self.headers = {}
|
||||
self.screenshot = driver.get_screenshot_as_png()
|
||||
except Exception as e:
|
||||
driver.quit()
|
||||
raise e
|
||||
|
||||
driver.quit()
|
||||
|
||||
# Run the selenium operations in a thread pool to avoid blocking the event loop
|
||||
loop = asyncio.get_event_loop()
|
||||
await loop.run_in_executor(None, _run_sync)
|
||||
|
||||
535
changedetectionio/custom_queue.py
Normal file
535
changedetectionio/custom_queue.py
Normal file
@@ -0,0 +1,535 @@
|
||||
import queue
|
||||
import asyncio
|
||||
from blinker import signal
|
||||
from loguru import logger
|
||||
|
||||
|
||||
class NotificationQueue(queue.Queue):
|
||||
"""
|
||||
Extended Queue that sends a 'notification_event' signal when notifications are added.
|
||||
|
||||
This class extends the standard Queue and adds a signal emission after a notification
|
||||
is put into the queue. The signal includes the watch UUID if available.
|
||||
"""
|
||||
|
||||
def __init__(self, maxsize=0):
|
||||
super().__init__(maxsize)
|
||||
try:
|
||||
self.notification_event_signal = signal('notification_event')
|
||||
except Exception as e:
|
||||
logger.critical(f"Exception creating notification_event signal: {e}")
|
||||
|
||||
def put(self, item, block=True, timeout=None):
|
||||
# Call the parent's put method first
|
||||
super().put(item, block, timeout)
|
||||
|
||||
# After putting the notification in the queue, emit signal with watch UUID
|
||||
try:
|
||||
if self.notification_event_signal and isinstance(item, dict):
|
||||
watch_uuid = item.get('uuid')
|
||||
if watch_uuid:
|
||||
# Send the notification_event signal with the watch UUID
|
||||
self.notification_event_signal.send(watch_uuid=watch_uuid)
|
||||
logger.trace(f"NotificationQueue: Emitted notification_event signal for watch UUID {watch_uuid}")
|
||||
else:
|
||||
# Send signal without UUID for system notifications
|
||||
self.notification_event_signal.send()
|
||||
logger.trace("NotificationQueue: Emitted notification_event signal for system notification")
|
||||
except Exception as e:
|
||||
logger.error(f"Exception emitting notification_event signal: {e}")
|
||||
|
||||
class SignalPriorityQueue(queue.PriorityQueue):
|
||||
"""
|
||||
Extended PriorityQueue that sends a signal when items with a UUID are added.
|
||||
|
||||
This class extends the standard PriorityQueue and adds a signal emission
|
||||
after an item is put into the queue. If the item contains a UUID, the signal
|
||||
is sent with that UUID as a parameter.
|
||||
"""
|
||||
|
||||
def __init__(self, maxsize=0):
|
||||
super().__init__(maxsize)
|
||||
try:
|
||||
self.queue_length_signal = signal('queue_length')
|
||||
except Exception as e:
|
||||
logger.critical(f"Exception: {e}")
|
||||
|
||||
def put(self, item, block=True, timeout=None):
|
||||
# Call the parent's put method first
|
||||
super().put(item, block, timeout)
|
||||
|
||||
# After putting the item in the queue, check if it has a UUID and emit signal
|
||||
if hasattr(item, 'item') and isinstance(item.item, dict) and 'uuid' in item.item:
|
||||
uuid = item.item['uuid']
|
||||
# Get the signal and send it if it exists
|
||||
watch_check_update = signal('watch_check_update')
|
||||
if watch_check_update:
|
||||
# Send the watch_uuid parameter
|
||||
watch_check_update.send(watch_uuid=uuid)
|
||||
|
||||
# Send queue_length signal with current queue size
|
||||
try:
|
||||
|
||||
if self.queue_length_signal:
|
||||
self.queue_length_signal.send(length=self.qsize())
|
||||
except Exception as e:
|
||||
logger.critical(f"Exception: {e}")
|
||||
|
||||
def get(self, block=True, timeout=None):
|
||||
# Call the parent's get method first
|
||||
item = super().get(block, timeout)
|
||||
|
||||
# Send queue_length signal with current queue size
|
||||
try:
|
||||
if self.queue_length_signal:
|
||||
self.queue_length_signal.send(length=self.qsize())
|
||||
except Exception as e:
|
||||
logger.critical(f"Exception: {e}")
|
||||
return item
|
||||
|
||||
def get_uuid_position(self, target_uuid):
|
||||
"""
|
||||
Find the position of a watch UUID in the priority queue.
|
||||
Optimized for large queues - O(n) complexity instead of O(n log n).
|
||||
|
||||
Args:
|
||||
target_uuid: The UUID to search for
|
||||
|
||||
Returns:
|
||||
dict: Contains position info or None if not found
|
||||
- position: 0-based position in queue (0 = next to be processed)
|
||||
- total_items: total number of items in queue
|
||||
- priority: the priority value of the found item
|
||||
"""
|
||||
with self.mutex:
|
||||
queue_list = list(self.queue)
|
||||
total_items = len(queue_list)
|
||||
|
||||
if total_items == 0:
|
||||
return {
|
||||
'position': None,
|
||||
'total_items': 0,
|
||||
'priority': None,
|
||||
'found': False
|
||||
}
|
||||
|
||||
# Find the target item and its priority first - O(n)
|
||||
target_item = None
|
||||
target_priority = None
|
||||
|
||||
for item in queue_list:
|
||||
if (hasattr(item, 'item') and
|
||||
isinstance(item.item, dict) and
|
||||
item.item.get('uuid') == target_uuid):
|
||||
target_item = item
|
||||
target_priority = item.priority
|
||||
break
|
||||
|
||||
if target_item is None:
|
||||
return {
|
||||
'position': None,
|
||||
'total_items': total_items,
|
||||
'priority': None,
|
||||
'found': False
|
||||
}
|
||||
|
||||
# Count how many items have higher priority (lower numbers) - O(n)
|
||||
position = 0
|
||||
for item in queue_list:
|
||||
# Items with lower priority numbers are processed first
|
||||
if item.priority < target_priority:
|
||||
position += 1
|
||||
elif item.priority == target_priority and item != target_item:
|
||||
# For same priority, count items that come before this one
|
||||
# (Note: this is approximate since heap order isn't guaranteed for equal priorities)
|
||||
position += 1
|
||||
|
||||
return {
|
||||
'position': position,
|
||||
'total_items': total_items,
|
||||
'priority': target_priority,
|
||||
'found': True
|
||||
}
|
||||
|
||||
def get_all_queued_uuids(self, limit=None, offset=0):
|
||||
"""
|
||||
Get UUIDs currently in the queue with their positions.
|
||||
For large queues, use limit/offset for pagination.
|
||||
|
||||
Args:
|
||||
limit: Maximum number of items to return (None = all)
|
||||
offset: Number of items to skip (for pagination)
|
||||
|
||||
Returns:
|
||||
dict: Contains items and metadata
|
||||
- items: List of dicts with uuid, position, and priority
|
||||
- total_items: Total number of items in queue
|
||||
- returned_items: Number of items returned
|
||||
- has_more: Whether there are more items after this page
|
||||
"""
|
||||
with self.mutex:
|
||||
queue_list = list(self.queue)
|
||||
total_items = len(queue_list)
|
||||
|
||||
if total_items == 0:
|
||||
return {
|
||||
'items': [],
|
||||
'total_items': 0,
|
||||
'returned_items': 0,
|
||||
'has_more': False
|
||||
}
|
||||
|
||||
# For very large queues, warn about performance
|
||||
if total_items > 1000 and limit is None:
|
||||
logger.warning(f"Getting all {total_items} queued items without limit - this may be slow")
|
||||
|
||||
# Sort only if we need exact positions (expensive for large queues)
|
||||
if limit is not None and limit <= 100:
|
||||
# For small requests, we can afford to sort
|
||||
queue_items = sorted(queue_list)
|
||||
end_idx = min(offset + limit, len(queue_items)) if limit else len(queue_items)
|
||||
items_to_process = queue_items[offset:end_idx]
|
||||
|
||||
result = []
|
||||
for position, item in enumerate(items_to_process, start=offset):
|
||||
if (hasattr(item, 'item') and
|
||||
isinstance(item.item, dict) and
|
||||
'uuid' in item.item):
|
||||
|
||||
result.append({
|
||||
'uuid': item.item['uuid'],
|
||||
'position': position,
|
||||
'priority': item.priority
|
||||
})
|
||||
|
||||
return {
|
||||
'items': result,
|
||||
'total_items': total_items,
|
||||
'returned_items': len(result),
|
||||
'has_more': (offset + len(result)) < total_items
|
||||
}
|
||||
else:
|
||||
# For large requests, return items with approximate positions
|
||||
# This is much faster O(n) instead of O(n log n)
|
||||
result = []
|
||||
processed = 0
|
||||
skipped = 0
|
||||
|
||||
for item in queue_list:
|
||||
if (hasattr(item, 'item') and
|
||||
isinstance(item.item, dict) and
|
||||
'uuid' in item.item):
|
||||
|
||||
if skipped < offset:
|
||||
skipped += 1
|
||||
continue
|
||||
|
||||
if limit and processed >= limit:
|
||||
break
|
||||
|
||||
# Approximate position based on priority comparison
|
||||
approx_position = sum(1 for other in queue_list if other.priority < item.priority)
|
||||
|
||||
result.append({
|
||||
'uuid': item.item['uuid'],
|
||||
'position': approx_position, # Approximate
|
||||
'priority': item.priority
|
||||
})
|
||||
processed += 1
|
||||
|
||||
return {
|
||||
'items': result,
|
||||
'total_items': total_items,
|
||||
'returned_items': len(result),
|
||||
'has_more': (offset + len(result)) < total_items,
|
||||
'note': 'Positions are approximate for performance with large queues'
|
||||
}
|
||||
|
||||
def get_queue_summary(self):
|
||||
"""
|
||||
Get a quick summary of queue state without expensive operations.
|
||||
O(n) complexity - fast even for large queues.
|
||||
|
||||
Returns:
|
||||
dict: Queue summary statistics
|
||||
"""
|
||||
with self.mutex:
|
||||
queue_list = list(self.queue)
|
||||
total_items = len(queue_list)
|
||||
|
||||
if total_items == 0:
|
||||
return {
|
||||
'total_items': 0,
|
||||
'priority_breakdown': {},
|
||||
'immediate_items': 0,
|
||||
'clone_items': 0,
|
||||
'scheduled_items': 0
|
||||
}
|
||||
|
||||
# Count items by priority type - O(n)
|
||||
immediate_items = 0 # priority 1
|
||||
clone_items = 0 # priority 5
|
||||
scheduled_items = 0 # priority > 100 (timestamps)
|
||||
priority_counts = {}
|
||||
|
||||
for item in queue_list:
|
||||
priority = item.priority
|
||||
priority_counts[priority] = priority_counts.get(priority, 0) + 1
|
||||
|
||||
if priority == 1:
|
||||
immediate_items += 1
|
||||
elif priority == 5:
|
||||
clone_items += 1
|
||||
elif priority > 100:
|
||||
scheduled_items += 1
|
||||
|
||||
return {
|
||||
'total_items': total_items,
|
||||
'priority_breakdown': priority_counts,
|
||||
'immediate_items': immediate_items,
|
||||
'clone_items': clone_items,
|
||||
'scheduled_items': scheduled_items,
|
||||
'min_priority': min(priority_counts.keys()) if priority_counts else None,
|
||||
'max_priority': max(priority_counts.keys()) if priority_counts else None
|
||||
}
|
||||
|
||||
|
||||
class AsyncSignalPriorityQueue(asyncio.PriorityQueue):
|
||||
"""
|
||||
Async version of SignalPriorityQueue that sends signals when items are added/removed.
|
||||
|
||||
This class extends asyncio.PriorityQueue and maintains the same signal behavior
|
||||
as the synchronous version for real-time UI updates.
|
||||
"""
|
||||
|
||||
def __init__(self, maxsize=0):
|
||||
super().__init__(maxsize)
|
||||
try:
|
||||
self.queue_length_signal = signal('queue_length')
|
||||
except Exception as e:
|
||||
logger.critical(f"Exception: {e}")
|
||||
|
||||
async def put(self, item):
|
||||
# Call the parent's put method first
|
||||
await super().put(item)
|
||||
|
||||
# After putting the item in the queue, check if it has a UUID and emit signal
|
||||
if hasattr(item, 'item') and isinstance(item.item, dict) and 'uuid' in item.item:
|
||||
uuid = item.item['uuid']
|
||||
# Get the signal and send it if it exists
|
||||
watch_check_update = signal('watch_check_update')
|
||||
if watch_check_update:
|
||||
# Send the watch_uuid parameter
|
||||
watch_check_update.send(watch_uuid=uuid)
|
||||
|
||||
# Send queue_length signal with current queue size
|
||||
try:
|
||||
if self.queue_length_signal:
|
||||
self.queue_length_signal.send(length=self.qsize())
|
||||
except Exception as e:
|
||||
logger.critical(f"Exception: {e}")
|
||||
|
||||
async def get(self):
|
||||
# Call the parent's get method first
|
||||
item = await super().get()
|
||||
|
||||
# Send queue_length signal with current queue size
|
||||
try:
|
||||
if self.queue_length_signal:
|
||||
self.queue_length_signal.send(length=self.qsize())
|
||||
except Exception as e:
|
||||
logger.critical(f"Exception: {e}")
|
||||
return item
|
||||
|
||||
@property
|
||||
def queue(self):
|
||||
"""
|
||||
Provide compatibility with sync PriorityQueue.queue access
|
||||
Returns the internal queue for template access
|
||||
"""
|
||||
return self._queue if hasattr(self, '_queue') else []
|
||||
|
||||
def get_uuid_position(self, target_uuid):
|
||||
"""
|
||||
Find the position of a watch UUID in the async priority queue.
|
||||
Optimized for large queues - O(n) complexity instead of O(n log n).
|
||||
|
||||
Args:
|
||||
target_uuid: The UUID to search for
|
||||
|
||||
Returns:
|
||||
dict: Contains position info or None if not found
|
||||
- position: 0-based position in queue (0 = next to be processed)
|
||||
- total_items: total number of items in queue
|
||||
- priority: the priority value of the found item
|
||||
"""
|
||||
queue_list = list(self._queue)
|
||||
total_items = len(queue_list)
|
||||
|
||||
if total_items == 0:
|
||||
return {
|
||||
'position': None,
|
||||
'total_items': 0,
|
||||
'priority': None,
|
||||
'found': False
|
||||
}
|
||||
|
||||
# Find the target item and its priority first - O(n)
|
||||
target_item = None
|
||||
target_priority = None
|
||||
|
||||
for item in queue_list:
|
||||
if (hasattr(item, 'item') and
|
||||
isinstance(item.item, dict) and
|
||||
item.item.get('uuid') == target_uuid):
|
||||
target_item = item
|
||||
target_priority = item.priority
|
||||
break
|
||||
|
||||
if target_item is None:
|
||||
return {
|
||||
'position': None,
|
||||
'total_items': total_items,
|
||||
'priority': None,
|
||||
'found': False
|
||||
}
|
||||
|
||||
# Count how many items have higher priority (lower numbers) - O(n)
|
||||
position = 0
|
||||
for item in queue_list:
|
||||
if item.priority < target_priority:
|
||||
position += 1
|
||||
elif item.priority == target_priority and item != target_item:
|
||||
position += 1
|
||||
|
||||
return {
|
||||
'position': position,
|
||||
'total_items': total_items,
|
||||
'priority': target_priority,
|
||||
'found': True
|
||||
}
|
||||
|
||||
def get_all_queued_uuids(self, limit=None, offset=0):
|
||||
"""
|
||||
Get UUIDs currently in the async queue with their positions.
|
||||
For large queues, use limit/offset for pagination.
|
||||
|
||||
Args:
|
||||
limit: Maximum number of items to return (None = all)
|
||||
offset: Number of items to skip (for pagination)
|
||||
|
||||
Returns:
|
||||
dict: Contains items and metadata (same structure as sync version)
|
||||
"""
|
||||
queue_list = list(self._queue)
|
||||
total_items = len(queue_list)
|
||||
|
||||
if total_items == 0:
|
||||
return {
|
||||
'items': [],
|
||||
'total_items': 0,
|
||||
'returned_items': 0,
|
||||
'has_more': False
|
||||
}
|
||||
|
||||
# Same logic as sync version but without mutex
|
||||
if limit is not None and limit <= 100:
|
||||
queue_items = sorted(queue_list)
|
||||
end_idx = min(offset + limit, len(queue_items)) if limit else len(queue_items)
|
||||
items_to_process = queue_items[offset:end_idx]
|
||||
|
||||
result = []
|
||||
for position, item in enumerate(items_to_process, start=offset):
|
||||
if (hasattr(item, 'item') and
|
||||
isinstance(item.item, dict) and
|
||||
'uuid' in item.item):
|
||||
|
||||
result.append({
|
||||
'uuid': item.item['uuid'],
|
||||
'position': position,
|
||||
'priority': item.priority
|
||||
})
|
||||
|
||||
return {
|
||||
'items': result,
|
||||
'total_items': total_items,
|
||||
'returned_items': len(result),
|
||||
'has_more': (offset + len(result)) < total_items
|
||||
}
|
||||
else:
|
||||
# Fast approximate positions for large queues
|
||||
result = []
|
||||
processed = 0
|
||||
skipped = 0
|
||||
|
||||
for item in queue_list:
|
||||
if (hasattr(item, 'item') and
|
||||
isinstance(item.item, dict) and
|
||||
'uuid' in item.item):
|
||||
|
||||
if skipped < offset:
|
||||
skipped += 1
|
||||
continue
|
||||
|
||||
if limit and processed >= limit:
|
||||
break
|
||||
|
||||
approx_position = sum(1 for other in queue_list if other.priority < item.priority)
|
||||
|
||||
result.append({
|
||||
'uuid': item.item['uuid'],
|
||||
'position': approx_position,
|
||||
'priority': item.priority
|
||||
})
|
||||
processed += 1
|
||||
|
||||
return {
|
||||
'items': result,
|
||||
'total_items': total_items,
|
||||
'returned_items': len(result),
|
||||
'has_more': (offset + len(result)) < total_items,
|
||||
'note': 'Positions are approximate for performance with large queues'
|
||||
}
|
||||
|
||||
def get_queue_summary(self):
|
||||
"""
|
||||
Get a quick summary of async queue state.
|
||||
O(n) complexity - fast even for large queues.
|
||||
"""
|
||||
queue_list = list(self._queue)
|
||||
total_items = len(queue_list)
|
||||
|
||||
if total_items == 0:
|
||||
return {
|
||||
'total_items': 0,
|
||||
'priority_breakdown': {},
|
||||
'immediate_items': 0,
|
||||
'clone_items': 0,
|
||||
'scheduled_items': 0
|
||||
}
|
||||
|
||||
immediate_items = 0
|
||||
clone_items = 0
|
||||
scheduled_items = 0
|
||||
priority_counts = {}
|
||||
|
||||
for item in queue_list:
|
||||
priority = item.priority
|
||||
priority_counts[priority] = priority_counts.get(priority, 0) + 1
|
||||
|
||||
if priority == 1:
|
||||
immediate_items += 1
|
||||
elif priority == 5:
|
||||
clone_items += 1
|
||||
elif priority > 100:
|
||||
scheduled_items += 1
|
||||
|
||||
return {
|
||||
'total_items': total_items,
|
||||
'priority_breakdown': priority_counts,
|
||||
'immediate_items': immediate_items,
|
||||
'clone_items': clone_items,
|
||||
'scheduled_items': scheduled_items,
|
||||
'min_priority': min(priority_counts.keys()) if priority_counts else None,
|
||||
'max_priority': max(priority_counts.keys()) if priority_counts else None
|
||||
}
|
||||
@@ -1,8 +1,32 @@
|
||||
import difflib
|
||||
from typing import List, Iterator, Union
|
||||
|
||||
REMOVED_STYLE = "background-color: #fadad7; color: #b30000;"
|
||||
ADDED_STYLE = "background-color: #eaf2c2; color: #406619;"
|
||||
# https://github.com/dgtlmoon/changedetection.io/issues/821#issuecomment-1241837050
|
||||
#HTML_ADDED_STYLE = "background-color: #d2f7c2; color: #255d00;"
|
||||
#HTML_CHANGED_INTO_STYLE = "background-color: #dafbe1; color: #116329;"
|
||||
#HTML_CHANGED_STYLE = "background-color: #ffd6cc; color: #7a2000;"
|
||||
#HTML_REMOVED_STYLE = "background-color: #ffebe9; color: #82071e;"
|
||||
|
||||
# @todo - In the future we can make this configurable
|
||||
HTML_ADDED_STYLE = "background-color: #eaf2c2; color: #406619"
|
||||
HTML_REMOVED_STYLE = "background-color: #fadad7; color: #b30000"
|
||||
HTML_CHANGED_STYLE = HTML_REMOVED_STYLE
|
||||
HTML_CHANGED_INTO_STYLE = HTML_ADDED_STYLE
|
||||
|
||||
|
||||
# These get set to html or telegram type or discord compatible or whatever in handler.py
|
||||
# Something that cant get escaped to HTML by accident
|
||||
REMOVED_PLACEMARKER_OPEN = '@removed_PLACEMARKER_OPEN'
|
||||
REMOVED_PLACEMARKER_CLOSED = '@removed_PLACEMARKER_CLOSED'
|
||||
|
||||
ADDED_PLACEMARKER_OPEN = '@added_PLACEMARKER_OPEN'
|
||||
ADDED_PLACEMARKER_CLOSED = '@added_PLACEMARKER_CLOSED'
|
||||
|
||||
CHANGED_PLACEMARKER_OPEN = '@changed_PLACEMARKER_OPEN'
|
||||
CHANGED_PLACEMARKER_CLOSED = '@changed_PLACEMARKER_CLOSED'
|
||||
|
||||
CHANGED_INTO_PLACEMARKER_OPEN = '@changed_into_PLACEMARKER_OPEN'
|
||||
CHANGED_INTO_PLACEMARKER_CLOSED = '@changed_into_PLACEMARKER_CLOSED'
|
||||
|
||||
def same_slicer(lst: List[str], start: int, end: int) -> List[str]:
|
||||
"""Return a slice of the list, or a single element if start == end."""
|
||||
@@ -15,8 +39,7 @@ def customSequenceMatcher(
|
||||
include_removed: bool = True,
|
||||
include_added: bool = True,
|
||||
include_replaced: bool = True,
|
||||
include_change_type_prefix: bool = True,
|
||||
html_colour: bool = False
|
||||
include_change_type_prefix: bool = True
|
||||
) -> Iterator[List[str]]:
|
||||
"""
|
||||
Compare two sequences and yield differences based on specified parameters.
|
||||
@@ -29,8 +52,6 @@ def customSequenceMatcher(
|
||||
include_added (bool): Include added parts
|
||||
include_replaced (bool): Include replaced parts
|
||||
include_change_type_prefix (bool): Add prefixes to indicate change types
|
||||
html_colour (bool): Use HTML background colors for differences
|
||||
|
||||
Yields:
|
||||
List[str]: Differences between sequences
|
||||
"""
|
||||
@@ -42,22 +63,22 @@ def customSequenceMatcher(
|
||||
if include_equal and tag == 'equal':
|
||||
yield before[alo:ahi]
|
||||
elif include_removed and tag == 'delete':
|
||||
if html_colour:
|
||||
yield [f'<span style="{REMOVED_STYLE}">{line}</span>' for line in same_slicer(before, alo, ahi)]
|
||||
if include_change_type_prefix:
|
||||
yield [f'{REMOVED_PLACEMARKER_OPEN}{line}{REMOVED_PLACEMARKER_CLOSED}' for line in same_slicer(before, alo, ahi)]
|
||||
else:
|
||||
yield [f"(removed) {line}" for line in same_slicer(before, alo, ahi)] if include_change_type_prefix else same_slicer(before, alo, ahi)
|
||||
yield same_slicer(before, alo, ahi)
|
||||
elif include_replaced and tag == 'replace':
|
||||
if html_colour:
|
||||
yield [f'<span style="{REMOVED_STYLE}">{line}</span>' for line in same_slicer(before, alo, ahi)] + \
|
||||
[f'<span style="{ADDED_STYLE}">{line}</span>' for line in same_slicer(after, blo, bhi)]
|
||||
if include_change_type_prefix:
|
||||
yield [f'{CHANGED_PLACEMARKER_OPEN}{line}{CHANGED_PLACEMARKER_CLOSED}' for line in same_slicer(before, alo, ahi)] + \
|
||||
[f'{CHANGED_INTO_PLACEMARKER_OPEN}{line}{CHANGED_INTO_PLACEMARKER_CLOSED}' for line in same_slicer(after, blo, bhi)]
|
||||
else:
|
||||
yield [f"(changed) {line}" for line in same_slicer(before, alo, ahi)] + \
|
||||
[f"(into) {line}" for line in same_slicer(after, blo, bhi)] if include_change_type_prefix else same_slicer(before, alo, ahi) + same_slicer(after, blo, bhi)
|
||||
yield same_slicer(before, alo, ahi) + same_slicer(after, blo, bhi)
|
||||
elif include_added and tag == 'insert':
|
||||
if html_colour:
|
||||
yield [f'<span style="{ADDED_STYLE}">{line}</span>' for line in same_slicer(after, blo, bhi)]
|
||||
if include_change_type_prefix:
|
||||
yield [f'{ADDED_PLACEMARKER_OPEN}{line}{ADDED_PLACEMARKER_CLOSED}' for line in same_slicer(after, blo, bhi)]
|
||||
else:
|
||||
yield [f"(added) {line}" for line in same_slicer(after, blo, bhi)] if include_change_type_prefix else same_slicer(after, blo, bhi)
|
||||
yield same_slicer(after, blo, bhi)
|
||||
|
||||
|
||||
def render_diff(
|
||||
previous_version_file_contents: str,
|
||||
@@ -68,8 +89,7 @@ def render_diff(
|
||||
include_replaced: bool = True,
|
||||
line_feed_sep: str = "\n",
|
||||
include_change_type_prefix: bool = True,
|
||||
patch_format: bool = False,
|
||||
html_colour: bool = False
|
||||
patch_format: bool = False
|
||||
) -> str:
|
||||
"""
|
||||
Render the difference between two file contents.
|
||||
@@ -84,8 +104,6 @@ def render_diff(
|
||||
line_feed_sep (str): Separator for lines in output
|
||||
include_change_type_prefix (bool): Add prefixes to indicate change types
|
||||
patch_format (bool): Use patch format for output
|
||||
html_colour (bool): Use HTML background colors for differences
|
||||
|
||||
Returns:
|
||||
str: Rendered difference
|
||||
"""
|
||||
@@ -103,8 +121,7 @@ def render_diff(
|
||||
include_removed=include_removed,
|
||||
include_added=include_added,
|
||||
include_replaced=include_replaced,
|
||||
include_change_type_prefix=include_change_type_prefix,
|
||||
html_colour=html_colour
|
||||
include_change_type_prefix=include_change_type_prefix
|
||||
)
|
||||
|
||||
def flatten(lst: List[Union[str, List[str]]]) -> str:
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -3,6 +3,9 @@ import re
|
||||
from loguru import logger
|
||||
from wtforms.widgets.core import TimeInput
|
||||
|
||||
from changedetectionio.blueprint.rss import RSS_FORMAT_TYPES
|
||||
from changedetectionio.conditions.form import ConditionFormRow
|
||||
from changedetectionio.notification_service import NotificationContextData
|
||||
from changedetectionio.strtobool import strtobool
|
||||
|
||||
from wtforms import (
|
||||
@@ -21,11 +24,11 @@ from wtforms import (
|
||||
)
|
||||
from flask_wtf.file import FileField, FileAllowed
|
||||
from wtforms.fields import FieldList
|
||||
from wtforms.utils import unset_value
|
||||
|
||||
from wtforms.validators import ValidationError
|
||||
|
||||
from validators.url import url as url_validator
|
||||
|
||||
from changedetectionio.widgets import TernaryNoneBooleanField
|
||||
|
||||
# default
|
||||
# each select <option data-enabled="enabled-0-0"
|
||||
@@ -52,6 +55,8 @@ valid_method = {
|
||||
|
||||
default_method = 'GET'
|
||||
allow_simplehost = not strtobool(os.getenv('BLOCK_SIMPLEHOSTS', 'False'))
|
||||
REQUIRE_ATLEAST_ONE_TIME_PART_MESSAGE_DEFAULT='At least one time interval (weeks, days, hours, minutes, or seconds) must be specified.'
|
||||
REQUIRE_ATLEAST_ONE_TIME_PART_WHEN_NOT_GLOBAL_DEFAULT='At least one time interval (weeks, days, hours, minutes, or seconds) must be specified when not using global settings.'
|
||||
|
||||
class StringListField(StringField):
|
||||
widget = widgets.TextArea()
|
||||
@@ -208,6 +213,35 @@ class ScheduleLimitForm(Form):
|
||||
self.sunday.form.enabled.label.text = "Sunday"
|
||||
|
||||
|
||||
def validate_time_between_check_has_values(form):
|
||||
"""
|
||||
Custom validation function for TimeBetweenCheckForm.
|
||||
Returns True if at least one time interval field has a value > 0.
|
||||
"""
|
||||
res = any([
|
||||
form.weeks.data and int(form.weeks.data) > 0,
|
||||
form.days.data and int(form.days.data) > 0,
|
||||
form.hours.data and int(form.hours.data) > 0,
|
||||
form.minutes.data and int(form.minutes.data) > 0,
|
||||
form.seconds.data and int(form.seconds.data) > 0
|
||||
])
|
||||
|
||||
return res
|
||||
|
||||
|
||||
class RequiredTimeInterval(object):
|
||||
"""
|
||||
WTForms validator that ensures at least one time interval field has a value > 0.
|
||||
Use this with FormField(TimeBetweenCheckForm, validators=[RequiredTimeInterval()]).
|
||||
"""
|
||||
def __init__(self, message=None):
|
||||
self.message = message or 'At least one time interval (weeks, days, hours, minutes, or seconds) must be specified.'
|
||||
|
||||
def __call__(self, form, field):
|
||||
if not validate_time_between_check_has_values(field.form):
|
||||
raise ValidationError(self.message)
|
||||
|
||||
|
||||
class TimeBetweenCheckForm(Form):
|
||||
weeks = IntegerField('Weeks', validators=[validators.Optional(), validators.NumberRange(min=0, message="Should contain zero or more seconds")])
|
||||
days = IntegerField('Days', validators=[validators.Optional(), validators.NumberRange(min=0, message="Should contain zero or more seconds")])
|
||||
@@ -216,33 +250,160 @@ class TimeBetweenCheckForm(Form):
|
||||
seconds = IntegerField('Seconds', validators=[validators.Optional(), validators.NumberRange(min=0, message="Should contain zero or more seconds")])
|
||||
# @todo add total seconds minimum validatior = minimum_seconds_recheck_time
|
||||
|
||||
def __init__(self, formdata=None, obj=None, prefix="", data=None, meta=None, **kwargs):
|
||||
super().__init__(formdata, obj, prefix, data, meta, **kwargs)
|
||||
self.require_at_least_one = kwargs.get('require_at_least_one', False)
|
||||
self.require_at_least_one_message = kwargs.get('require_at_least_one_message', REQUIRE_ATLEAST_ONE_TIME_PART_MESSAGE_DEFAULT)
|
||||
|
||||
def validate(self, **kwargs):
|
||||
"""Custom validation that can optionally require at least one time interval."""
|
||||
# Run normal field validation first
|
||||
if not super().validate(**kwargs):
|
||||
return False
|
||||
|
||||
# Apply optional "at least one" validation
|
||||
if self.require_at_least_one:
|
||||
if not validate_time_between_check_has_values(self):
|
||||
# Add error to the form's general errors (not field-specific)
|
||||
if not hasattr(self, '_formdata_errors'):
|
||||
self._formdata_errors = []
|
||||
self._formdata_errors.append(self.require_at_least_one_message)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
class EnhancedFormField(FormField):
|
||||
"""
|
||||
An enhanced FormField that supports conditional validation with top-level error messages.
|
||||
Adds a 'top_errors' property for validation errors at the FormField level.
|
||||
"""
|
||||
|
||||
def __init__(self, form_class, label=None, validators=None, separator="-",
|
||||
conditional_field=None, conditional_message=None, conditional_test_function=None, **kwargs):
|
||||
"""
|
||||
Initialize EnhancedFormField with optional conditional validation.
|
||||
|
||||
:param conditional_field: Name of the field this FormField depends on (e.g. 'time_between_check_use_default')
|
||||
:param conditional_message: Error message to show when validation fails
|
||||
:param conditional_test_function: Custom function to test if FormField has valid values.
|
||||
Should take self.form as parameter and return True if valid.
|
||||
"""
|
||||
super().__init__(form_class, label, validators, separator, **kwargs)
|
||||
self.top_errors = []
|
||||
self.conditional_field = conditional_field
|
||||
self.conditional_message = conditional_message or "At least one field must have a value when not using defaults."
|
||||
self.conditional_test_function = conditional_test_function
|
||||
|
||||
def validate(self, form, extra_validators=()):
|
||||
"""
|
||||
Custom validation that supports conditional logic and stores top-level errors.
|
||||
"""
|
||||
self.top_errors = []
|
||||
|
||||
# First run the normal FormField validation
|
||||
base_valid = super().validate(form, extra_validators)
|
||||
|
||||
# Apply conditional validation if configured
|
||||
if self.conditional_field and hasattr(form, self.conditional_field):
|
||||
conditional_field_obj = getattr(form, self.conditional_field)
|
||||
|
||||
# If the conditional field is False/unchecked, check if this FormField has any values
|
||||
if not conditional_field_obj.data:
|
||||
# Use custom test function if provided, otherwise use generic fallback
|
||||
if self.conditional_test_function:
|
||||
has_any_value = self.conditional_test_function(self.form)
|
||||
else:
|
||||
# Generic fallback - check if any field has truthy data
|
||||
has_any_value = any(field.data for field in self.form if hasattr(field, 'data') and field.data)
|
||||
|
||||
if not has_any_value:
|
||||
self.top_errors.append(self.conditional_message)
|
||||
base_valid = False
|
||||
|
||||
return base_valid
|
||||
|
||||
|
||||
class RequiredFormField(FormField):
|
||||
"""
|
||||
A FormField that passes require_at_least_one=True to TimeBetweenCheckForm.
|
||||
Use this when you want the sub-form to always require at least one value.
|
||||
"""
|
||||
|
||||
def __init__(self, form_class, label=None, validators=None, separator="-", **kwargs):
|
||||
super().__init__(form_class, label, validators, separator, **kwargs)
|
||||
|
||||
def process(self, formdata, data=unset_value, extra_filters=None):
|
||||
if extra_filters:
|
||||
raise TypeError(
|
||||
"FormField cannot take filters, as the encapsulated"
|
||||
"data is not mutable."
|
||||
)
|
||||
|
||||
if data is unset_value:
|
||||
try:
|
||||
data = self.default()
|
||||
except TypeError:
|
||||
data = self.default
|
||||
self._obj = data
|
||||
|
||||
self.object_data = data
|
||||
|
||||
prefix = self.name + self.separator
|
||||
# Pass require_at_least_one=True to the sub-form
|
||||
if isinstance(data, dict):
|
||||
self.form = self.form_class(formdata=formdata, prefix=prefix, require_at_least_one=True, **data)
|
||||
else:
|
||||
self.form = self.form_class(formdata=formdata, obj=data, prefix=prefix, require_at_least_one=True)
|
||||
|
||||
@property
|
||||
def errors(self):
|
||||
"""Include sub-form validation errors"""
|
||||
form_errors = self.form.errors
|
||||
# Add any general form errors to a special 'form' key
|
||||
if hasattr(self.form, '_formdata_errors') and self.form._formdata_errors:
|
||||
form_errors = dict(form_errors) # Make a copy
|
||||
form_errors['form'] = self.form._formdata_errors
|
||||
return form_errors
|
||||
|
||||
|
||||
# Separated by key:value
|
||||
class StringDictKeyValue(StringField):
|
||||
widget = widgets.TextArea()
|
||||
|
||||
def _value(self):
|
||||
if self.data:
|
||||
output = u''
|
||||
for k in self.data.keys():
|
||||
output += "{}: {}\r\n".format(k, self.data[k])
|
||||
|
||||
output = ''
|
||||
for k, v in self.data.items():
|
||||
output += f"{k}: {v}\r\n"
|
||||
return output
|
||||
else:
|
||||
return u''
|
||||
return ''
|
||||
|
||||
# incoming
|
||||
# incoming data processing + validation
|
||||
def process_formdata(self, valuelist):
|
||||
self.data = {}
|
||||
errors = []
|
||||
if valuelist:
|
||||
self.data = {}
|
||||
# Remove empty strings
|
||||
cleaned = list(filter(None, valuelist[0].split("\n")))
|
||||
for s in cleaned:
|
||||
parts = s.strip().split(':', 1)
|
||||
if len(parts) == 2:
|
||||
self.data.update({parts[0].strip(): parts[1].strip()})
|
||||
# Remove empty strings (blank lines)
|
||||
cleaned = [line.strip() for line in valuelist[0].split("\n") if line.strip()]
|
||||
for idx, s in enumerate(cleaned, start=1):
|
||||
if ':' not in s:
|
||||
errors.append(f"Line {idx} is missing a ':' separator.")
|
||||
continue
|
||||
parts = s.split(':', 1)
|
||||
key = parts[0].strip()
|
||||
value = parts[1].strip()
|
||||
|
||||
else:
|
||||
self.data = {}
|
||||
if not key:
|
||||
errors.append(f"Line {idx} has an empty key.")
|
||||
if not value:
|
||||
errors.append(f"Line {idx} has an empty value.")
|
||||
|
||||
self.data[key] = value
|
||||
|
||||
if errors:
|
||||
raise ValidationError("Invalid input:\n" + "\n".join(errors))
|
||||
|
||||
class ValidateContentFetcherIsReady(object):
|
||||
"""
|
||||
@@ -304,11 +465,18 @@ class ValidateAppRiseServers(object):
|
||||
|
||||
def __call__(self, form, field):
|
||||
import apprise
|
||||
apobj = apprise.Apprise()
|
||||
# so that the custom endpoints are registered
|
||||
from changedetectionio.apprise_plugin import apprise_custom_api_call_wrapper
|
||||
from .notification.apprise_plugin.assets import apprise_asset
|
||||
from .notification.apprise_plugin.custom_handlers import apprise_http_custom_handler # noqa: F401
|
||||
from changedetectionio.jinja2_custom import render as jinja_render
|
||||
|
||||
apobj = apprise.Apprise(asset=apprise_asset)
|
||||
|
||||
for server_url in field.data:
|
||||
url = server_url.strip()
|
||||
generic_notification_context_data = NotificationContextData()
|
||||
# Make sure something is atleast in all those regular token fields
|
||||
generic_notification_context_data.set_random_for_validation()
|
||||
|
||||
url = jinja_render(template_str=server_url.strip(), **generic_notification_context_data).strip()
|
||||
if url.startswith("#"):
|
||||
continue
|
||||
|
||||
@@ -322,9 +490,8 @@ class ValidateJinja2Template(object):
|
||||
"""
|
||||
def __call__(self, form, field):
|
||||
from changedetectionio import notification
|
||||
|
||||
from changedetectionio.jinja2_custom import create_jinja_env
|
||||
from jinja2 import BaseLoader, TemplateSyntaxError, UndefinedError
|
||||
from jinja2.sandbox import ImmutableSandboxedEnvironment
|
||||
from jinja2.meta import find_undeclared_variables
|
||||
import jinja2.exceptions
|
||||
|
||||
@@ -332,9 +499,11 @@ class ValidateJinja2Template(object):
|
||||
joined_data = ' '.join(map(str, field.data)) if isinstance(field.data, list) else f"{field.data}"
|
||||
|
||||
try:
|
||||
jinja2_env = ImmutableSandboxedEnvironment(loader=BaseLoader)
|
||||
jinja2_env.globals.update(notification.valid_tokens)
|
||||
# Extra validation tokens provided on the form_class(... extra_tokens={}) setup
|
||||
# Use the shared helper to create a properly configured environment
|
||||
jinja2_env = create_jinja_env(loader=BaseLoader)
|
||||
|
||||
# Add notification tokens for validation
|
||||
jinja2_env.globals.update(NotificationContextData())
|
||||
if hasattr(field, 'extra_notification_tokens'):
|
||||
jinja2_env.globals.update(field.extra_notification_tokens)
|
||||
|
||||
@@ -346,6 +515,7 @@ class ValidateJinja2Template(object):
|
||||
except jinja2.exceptions.SecurityError as e:
|
||||
raise ValidationError(f"This is not a valid Jinja2 template: {e}") from e
|
||||
|
||||
# Check for undeclared variables
|
||||
ast = jinja2_env.parse(joined_data)
|
||||
undefined = ", ".join(find_undeclared_variables(ast))
|
||||
if undefined:
|
||||
@@ -368,19 +538,23 @@ class validateURL(object):
|
||||
|
||||
|
||||
def validate_url(test_url):
|
||||
# If hosts that only contain alphanumerics are allowed ("localhost" for example)
|
||||
try:
|
||||
url_validator(test_url, simple_host=allow_simplehost)
|
||||
except validators.ValidationError:
|
||||
#@todo check for xss
|
||||
message = f"'{test_url}' is not a valid URL."
|
||||
from changedetectionio.validate_url import is_safe_valid_url
|
||||
if not is_safe_valid_url(test_url):
|
||||
# This should be wtforms.validators.
|
||||
raise ValidationError(message)
|
||||
raise ValidationError('Watch protocol is not permitted or invalid URL format')
|
||||
|
||||
|
||||
class ValidateSinglePythonRegexString(object):
|
||||
def __init__(self, message=None):
|
||||
self.message = message
|
||||
|
||||
def __call__(self, form, field):
|
||||
try:
|
||||
re.compile(field.data)
|
||||
except re.error:
|
||||
message = field.gettext('RegEx \'%s\' is not a valid regular expression.')
|
||||
raise ValidationError(message % (field.data))
|
||||
|
||||
from .model.Watch import is_safe_url
|
||||
if not is_safe_url(test_url):
|
||||
# This should be wtforms.validators.
|
||||
raise ValidationError('Watch protocol is not permitted by SAFE_PROTOCOL_REGEX or incorrect URL format')
|
||||
|
||||
class ValidateListRegex(object):
|
||||
"""
|
||||
@@ -400,6 +574,7 @@ class ValidateListRegex(object):
|
||||
message = field.gettext('RegEx \'%s\' is not a valid regular expression.')
|
||||
raise ValidationError(message % (line))
|
||||
|
||||
|
||||
class ValidateCSSJSONXPATHInput(object):
|
||||
"""
|
||||
Filter validation
|
||||
@@ -499,6 +674,51 @@ class ValidateCSSJSONXPATHInput(object):
|
||||
except:
|
||||
raise ValidationError("A system-error occurred when validating your jq expression")
|
||||
|
||||
class ValidateSimpleURL:
|
||||
"""Validate that the value can be parsed by urllib.parse.urlparse() and has a scheme/netloc."""
|
||||
def __init__(self, message=None):
|
||||
self.message = message or "Invalid URL."
|
||||
|
||||
def __call__(self, form, field):
|
||||
data = (field.data or "").strip()
|
||||
if not data:
|
||||
return # empty is OK — pair with validators.Optional()
|
||||
from urllib.parse import urlparse
|
||||
|
||||
parsed = urlparse(data)
|
||||
if not parsed.scheme or not parsed.netloc:
|
||||
raise ValidationError(self.message)
|
||||
|
||||
class ValidateStartsWithRegex(object):
|
||||
def __init__(self, regex, *, flags=0, message=None, allow_empty=True, split_lines=True):
|
||||
# compile with given flags (we’ll pass re.IGNORECASE below)
|
||||
self.pattern = re.compile(regex, flags) if isinstance(regex, str) else regex
|
||||
self.message = message
|
||||
self.allow_empty = allow_empty
|
||||
self.split_lines = split_lines
|
||||
|
||||
def __call__(self, form, field):
|
||||
data = field.data
|
||||
if not data:
|
||||
return
|
||||
|
||||
# normalize into list of lines
|
||||
if isinstance(data, str) and self.split_lines:
|
||||
lines = data.splitlines()
|
||||
elif isinstance(data, (list, tuple)):
|
||||
lines = data
|
||||
else:
|
||||
lines = [data]
|
||||
|
||||
for line in lines:
|
||||
stripped = line.strip()
|
||||
if not stripped:
|
||||
if self.allow_empty:
|
||||
continue
|
||||
raise ValidationError(self.message or "Empty value not allowed.")
|
||||
if not self.pattern.match(stripped):
|
||||
raise ValidationError(self.message or "Invalid value.")
|
||||
|
||||
class quickWatchForm(Form):
|
||||
from . import processors
|
||||
|
||||
@@ -519,16 +739,23 @@ class commonSettingsForm(Form):
|
||||
self.notification_title.extra_notification_tokens = kwargs.get('extra_notification_tokens', {})
|
||||
self.notification_urls.extra_notification_tokens = kwargs.get('extra_notification_tokens', {})
|
||||
|
||||
extract_title_as_title = BooleanField('Extract <title> from document and use as watch title', default=False)
|
||||
fetch_backend = RadioField(u'Fetch Method', choices=content_fetchers.available_fetchers(), validators=[ValidateContentFetcherIsReady()])
|
||||
notification_body = TextAreaField('Notification Body', default='{{ watch_url }} had a change.', validators=[validators.Optional(), ValidateJinja2Template()])
|
||||
notification_format = SelectField('Notification format', choices=valid_notification_formats.keys())
|
||||
notification_format = SelectField('Notification format', choices=list(valid_notification_formats.items()))
|
||||
notification_title = StringField('Notification Title', default='ChangeDetection.io Notification - {{ watch_url }}', validators=[validators.Optional(), ValidateJinja2Template()])
|
||||
notification_urls = StringListField('Notification URL List', validators=[validators.Optional(), ValidateAppRiseServers(), ValidateJinja2Template()])
|
||||
processor = RadioField( label=u"Processor - What do you want to achieve?", choices=processors.available_processors(), default="text_json_diff")
|
||||
timezone = StringField("Timezone for watch schedule", render_kw={"list": "timezones"}, validators=[validateTimeZoneName()])
|
||||
scheduler_timezone_default = StringField("Default timezone for watch check scheduler", render_kw={"list": "timezones"}, validators=[validateTimeZoneName()])
|
||||
webdriver_delay = IntegerField('Wait seconds before extracting text', validators=[validators.Optional(), validators.NumberRange(min=1, message="Should contain one or more seconds")])
|
||||
|
||||
# Not true anymore but keep the validate_ hook for future use, we convert color tags
|
||||
# def validate_notification_urls(self, field):
|
||||
# """Validate that HTML Color format is not used with Telegram"""
|
||||
# if self.notification_format.data == 'HTML Color' and field.data:
|
||||
# for url in field.data:
|
||||
# if url and ('tgram://' in url or 'discord://' in url or 'discord.com/api/webhooks' in url):
|
||||
# raise ValidationError('HTML Color format is not supported by Telegram and Discord. Please choose another Notification Format (Plain Text, HTML, or Markdown to HTML).')
|
||||
|
||||
|
||||
class importForm(Form):
|
||||
from . import processors
|
||||
@@ -553,11 +780,16 @@ class processor_text_json_diff_form(commonSettingsForm):
|
||||
url = fields.URLField('URL', validators=[validateURL()])
|
||||
tags = StringTagUUID('Group tag', [validators.Optional()], default='')
|
||||
|
||||
time_between_check = FormField(TimeBetweenCheckForm)
|
||||
time_between_check = EnhancedFormField(
|
||||
TimeBetweenCheckForm,
|
||||
conditional_field='time_between_check_use_default',
|
||||
conditional_message=REQUIRE_ATLEAST_ONE_TIME_PART_WHEN_NOT_GLOBAL_DEFAULT,
|
||||
conditional_test_function=validate_time_between_check_has_values
|
||||
)
|
||||
|
||||
time_schedule_limit = FormField(ScheduleLimitForm)
|
||||
|
||||
time_between_check_use_default = BooleanField('Use global settings for time between check', default=False)
|
||||
time_between_check_use_default = BooleanField('Use global settings for time between check and scheduler.', default=False)
|
||||
|
||||
include_filters = StringListField('CSS/JSONPath/JQ/XPath Filters', [ValidateCSSJSONXPATHInput()], default='')
|
||||
|
||||
@@ -575,27 +807,32 @@ class processor_text_json_diff_form(commonSettingsForm):
|
||||
check_unique_lines = BooleanField('Only trigger when unique lines appear in all history', default=False)
|
||||
remove_duplicate_lines = BooleanField('Remove duplicate lines of text', default=False)
|
||||
sort_text_alphabetically = BooleanField('Sort text alphabetically', default=False)
|
||||
strip_ignored_lines = TernaryNoneBooleanField('Strip ignored lines', default=None)
|
||||
trim_text_whitespace = BooleanField('Trim whitespace before and after text', default=False)
|
||||
|
||||
filter_text_added = BooleanField('Added lines', default=True)
|
||||
filter_text_replaced = BooleanField('Replaced/changed lines', default=True)
|
||||
filter_text_removed = BooleanField('Removed lines', default=True)
|
||||
|
||||
trigger_text = StringListField('Trigger/wait for text', [validators.Optional(), ValidateListRegex()])
|
||||
trigger_text = StringListField('Keyword triggers - Trigger/wait for text', [validators.Optional(), ValidateListRegex()])
|
||||
if os.getenv("PLAYWRIGHT_DRIVER_URL"):
|
||||
browser_steps = FieldList(FormField(SingleBrowserStep), min_entries=10)
|
||||
text_should_not_be_present = StringListField('Block change-detection while text matches', [validators.Optional(), ValidateListRegex()])
|
||||
webdriver_js_execute_code = TextAreaField('Execute JavaScript before change detection', render_kw={"rows": "5"}, validators=[validators.Optional()])
|
||||
|
||||
save_button = SubmitField('Save', render_kw={"class": "pure-button button-small pure-button-primary"})
|
||||
save_button = SubmitField('Save', render_kw={"class": "pure-button pure-button-primary"})
|
||||
|
||||
proxy = RadioField('Proxy')
|
||||
# filter_failure_notification_send @todo make ternary
|
||||
filter_failure_notification_send = BooleanField(
|
||||
'Send a notification when the filter can no longer be found on the page', default=False)
|
||||
|
||||
notification_muted = BooleanField('Notifications Muted / Off', default=False)
|
||||
notification_muted = TernaryNoneBooleanField('Notifications', default=None, yes_text="Muted", no_text="On")
|
||||
notification_screenshot = BooleanField('Attach screenshot to notification (where possible)', default=False)
|
||||
|
||||
conditions_match_logic = RadioField(u'Match', choices=[('ALL', 'Match all of the following'),('ANY', 'Match any of the following')], default='ALL')
|
||||
conditions = FieldList(FormField(ConditionFormRow), min_entries=1) # Add rule logic here
|
||||
use_page_title_in_list = TernaryNoneBooleanField('Use page <title> in list', default=None)
|
||||
|
||||
def extra_tab_content(self):
|
||||
return None
|
||||
|
||||
@@ -606,7 +843,7 @@ class processor_text_json_diff_form(commonSettingsForm):
|
||||
if not super().validate():
|
||||
return False
|
||||
|
||||
from changedetectionio.safe_jinja import render as jinja_render
|
||||
from changedetectionio.jinja2_custom import render as jinja_render
|
||||
result = True
|
||||
|
||||
# Fail form validation when a body is set for a GET
|
||||
@@ -669,23 +906,36 @@ class processor_text_json_diff_form(commonSettingsForm):
|
||||
):
|
||||
super().__init__(formdata, obj, prefix, data, meta, **kwargs)
|
||||
if kwargs and kwargs.get('default_system_settings'):
|
||||
default_tz = kwargs.get('default_system_settings').get('application', {}).get('timezone')
|
||||
default_tz = kwargs.get('default_system_settings').get('application', {}).get('scheduler_timezone_default')
|
||||
if default_tz:
|
||||
self.time_schedule_limit.form.timezone.render_kw['placeholder'] = default_tz
|
||||
|
||||
|
||||
|
||||
class SingleExtraProxy(Form):
|
||||
|
||||
# maybe better to set some <script>var..
|
||||
proxy_name = StringField('Name', [validators.Optional()], render_kw={"placeholder": "Name"})
|
||||
proxy_url = StringField('Proxy URL', [validators.Optional()], render_kw={"placeholder": "socks5:// or regular proxy http://user:pass@...:3128", "size":50})
|
||||
# @todo do the validation here instead
|
||||
proxy_url = StringField('Proxy URL', [
|
||||
validators.Optional(),
|
||||
ValidateStartsWithRegex(
|
||||
regex=r'^(https?|socks5)://', # ✅ main pattern
|
||||
flags=re.IGNORECASE, # ✅ makes it case-insensitive
|
||||
message='Proxy URLs must start with http://, https:// or socks5://',
|
||||
),
|
||||
ValidateSimpleURL()
|
||||
], render_kw={"placeholder": "socks5:// or regular proxy http://user:pass@...:3128", "size":50})
|
||||
|
||||
class SingleExtraBrowser(Form):
|
||||
browser_name = StringField('Name', [validators.Optional()], render_kw={"placeholder": "Name"})
|
||||
browser_connection_url = StringField('Browser connection URL', [validators.Optional()], render_kw={"placeholder": "wss://brightdata... wss://oxylabs etc", "size":50})
|
||||
# @todo do the validation here instead
|
||||
browser_connection_url = StringField('Browser connection URL', [
|
||||
validators.Optional(),
|
||||
ValidateStartsWithRegex(
|
||||
regex=r'^(wss?|ws)://',
|
||||
flags=re.IGNORECASE,
|
||||
message='Browser URLs must start with wss:// or ws://'
|
||||
),
|
||||
ValidateSimpleURL()
|
||||
], render_kw={"placeholder": "wss://brightdata... wss://oxylabs etc", "size":50})
|
||||
|
||||
class DefaultUAInputForm(Form):
|
||||
html_requests = StringField('Plaintext requests', validators=[validators.Optional()], render_kw={"placeholder": "<default>"})
|
||||
@@ -694,12 +944,23 @@ class DefaultUAInputForm(Form):
|
||||
|
||||
# datastore.data['settings']['requests']..
|
||||
class globalSettingsRequestForm(Form):
|
||||
time_between_check = FormField(TimeBetweenCheckForm)
|
||||
time_between_check = RequiredFormField(TimeBetweenCheckForm)
|
||||
time_schedule_limit = FormField(ScheduleLimitForm)
|
||||
proxy = RadioField('Proxy')
|
||||
proxy = RadioField('Default proxy')
|
||||
jitter_seconds = IntegerField('Random jitter seconds ± check',
|
||||
render_kw={"style": "width: 5em;"},
|
||||
validators=[validators.NumberRange(min=0, message="Should contain zero or more seconds")])
|
||||
|
||||
workers = IntegerField('Number of fetch workers',
|
||||
render_kw={"style": "width: 5em;"},
|
||||
validators=[validators.NumberRange(min=1, max=50,
|
||||
message="Should be between 1 and 50")])
|
||||
|
||||
timeout = IntegerField('Requests timeout in seconds',
|
||||
render_kw={"style": "width: 5em;"},
|
||||
validators=[validators.NumberRange(min=1, max=999,
|
||||
message="Should be between 1 and 999")])
|
||||
|
||||
extra_proxies = FieldList(FormField(SingleExtraProxy), min_entries=5)
|
||||
extra_browsers = FieldList(FormField(SingleExtraBrowser), min_entries=5)
|
||||
|
||||
@@ -712,6 +973,11 @@ class globalSettingsRequestForm(Form):
|
||||
self.extra_proxies.errors.append('Both a name, and a Proxy URL is required.')
|
||||
return False
|
||||
|
||||
class globalSettingsApplicationUIForm(Form):
|
||||
open_diff_in_new_tab = BooleanField("Open 'History' page in a new tab", default=True, validators=[validators.Optional()])
|
||||
socket_io_enabled = BooleanField('Realtime UI Updates Enabled', default=True, validators=[validators.Optional()])
|
||||
favicons_enabled = BooleanField('Favicons Enabled', default=True, validators=[validators.Optional()])
|
||||
use_page_title_in_list = BooleanField('Use page <title> in watch overview list') #BooleanField=True
|
||||
|
||||
# datastore.data['settings']['application']..
|
||||
class globalSettingsApplicationForm(commonSettingsForm):
|
||||
@@ -731,15 +997,24 @@ class globalSettingsApplicationForm(commonSettingsForm):
|
||||
render_kw={"style": "width: 5em;"},
|
||||
validators=[validators.NumberRange(min=0,
|
||||
message="Should be atleast zero (disabled)")])
|
||||
|
||||
rss_content_format = SelectField('RSS Content format', choices=RSS_FORMAT_TYPES)
|
||||
|
||||
removepassword_button = SubmitField('Remove password', render_kw={"class": "pure-button pure-button-primary"})
|
||||
render_anchor_tag_content = BooleanField('Render anchor tag content', default=False)
|
||||
shared_diff_access = BooleanField('Allow access to view diff page when password is enabled', default=False, validators=[validators.Optional()])
|
||||
shared_diff_access = BooleanField('Allow anonymous access to watch history page when password is enabled', default=False, validators=[validators.Optional()])
|
||||
strip_ignored_lines = BooleanField('Strip ignored lines')
|
||||
rss_hide_muted_watches = BooleanField('Hide muted watches from RSS feed', default=True,
|
||||
validators=[validators.Optional()])
|
||||
|
||||
rss_reader_mode = BooleanField('RSS reader mode ', default=False,
|
||||
validators=[validators.Optional()])
|
||||
|
||||
filter_failure_notification_threshold_attempts = IntegerField('Number of times the filter can be missing before sending a notification',
|
||||
render_kw={"style": "width: 5em;"},
|
||||
validators=[validators.NumberRange(min=0,
|
||||
message="Should contain zero or more attempts")])
|
||||
ui = FormField(globalSettingsApplicationUIForm)
|
||||
|
||||
|
||||
class globalSettingsForm(Form):
|
||||
@@ -754,9 +1029,9 @@ class globalSettingsForm(Form):
|
||||
|
||||
requests = FormField(globalSettingsRequestForm)
|
||||
application = FormField(globalSettingsApplicationForm)
|
||||
save_button = SubmitField('Save', render_kw={"class": "pure-button button-small pure-button-primary"})
|
||||
save_button = SubmitField('Save', render_kw={"class": "pure-button pure-button-primary"})
|
||||
|
||||
|
||||
class extractDataForm(Form):
|
||||
extract_regex = StringField('RegEx to extract', validators=[validators.Length(min=1, message="Needs a RegEx")])
|
||||
extract_regex = StringField('RegEx to extract', validators=[validators.DataRequired(), ValidateSinglePythonRegexString()])
|
||||
extract_submit_button = SubmitField('Extract as CSV', render_kw={"class": "pure-button pure-button-primary"})
|
||||
|
||||
162
changedetectionio/gc_cleanup.py
Normal file
162
changedetectionio/gc_cleanup.py
Normal file
@@ -0,0 +1,162 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import ctypes
|
||||
import gc
|
||||
import re
|
||||
import psutil
|
||||
import sys
|
||||
import threading
|
||||
import importlib
|
||||
from loguru import logger
|
||||
|
||||
def memory_cleanup(app=None):
|
||||
"""
|
||||
Perform comprehensive memory cleanup operations and log memory usage
|
||||
at each step with nicely formatted numbers.
|
||||
|
||||
Args:
|
||||
app: Optional Flask app instance for clearing Flask-specific caches
|
||||
|
||||
Returns:
|
||||
str: Status message
|
||||
"""
|
||||
# Get current process
|
||||
process = psutil.Process()
|
||||
|
||||
# Log initial memory usage with nicely formatted numbers
|
||||
current_memory = process.memory_info().rss / 1024 / 1024
|
||||
logger.debug(f"Memory cleanup started - Current memory usage: {current_memory:,.2f} MB")
|
||||
|
||||
# 1. Standard garbage collection - force full collection on all generations
|
||||
gc.collect(0) # Collect youngest generation
|
||||
gc.collect(1) # Collect middle generation
|
||||
gc.collect(2) # Collect oldest generation
|
||||
|
||||
# Run full collection again to ensure maximum cleanup
|
||||
gc.collect()
|
||||
current_memory = process.memory_info().rss / 1024 / 1024
|
||||
logger.debug(f"After full gc.collect() - Memory usage: {current_memory:,.2f} MB")
|
||||
|
||||
|
||||
# 3. Call libc's malloc_trim to release memory back to the OS
|
||||
libc = ctypes.CDLL("libc.so.6")
|
||||
libc.malloc_trim(0)
|
||||
current_memory = process.memory_info().rss / 1024 / 1024
|
||||
logger.debug(f"After malloc_trim(0) - Memory usage: {current_memory:,.2f} MB")
|
||||
|
||||
# 4. Clear Python's regex cache
|
||||
re.purge()
|
||||
current_memory = process.memory_info().rss / 1024 / 1024
|
||||
logger.debug(f"After re.purge() - Memory usage: {current_memory:,.2f} MB")
|
||||
|
||||
# 5. Reset thread-local storage
|
||||
# Create a new thread local object to encourage cleanup of old ones
|
||||
threading.local()
|
||||
current_memory = process.memory_info().rss / 1024 / 1024
|
||||
logger.debug(f"After threading.local() - Memory usage: {current_memory:,.2f} MB")
|
||||
|
||||
# 6. Clear sys.intern cache if Python version supports it
|
||||
try:
|
||||
sys.intern.clear()
|
||||
current_memory = process.memory_info().rss / 1024 / 1024
|
||||
logger.debug(f"After sys.intern.clear() - Memory usage: {current_memory:,.2f} MB")
|
||||
except (AttributeError, TypeError):
|
||||
logger.debug("sys.intern.clear() not supported in this Python version")
|
||||
|
||||
# 7. Clear XML/lxml caches if available
|
||||
try:
|
||||
# Check if lxml.etree is in use
|
||||
lxml_etree = sys.modules.get('lxml.etree')
|
||||
if lxml_etree:
|
||||
# Clear module-level caches
|
||||
if hasattr(lxml_etree, 'clear_error_log'):
|
||||
lxml_etree.clear_error_log()
|
||||
|
||||
# Check for _ErrorLog and _RotatingErrorLog objects and clear them
|
||||
for obj in gc.get_objects():
|
||||
if hasattr(obj, '__class__') and hasattr(obj.__class__, '__name__'):
|
||||
class_name = obj.__class__.__name__
|
||||
if class_name in ('_ErrorLog', '_RotatingErrorLog', '_DomainErrorLog') and hasattr(obj, 'clear'):
|
||||
try:
|
||||
obj.clear()
|
||||
except (AttributeError, TypeError):
|
||||
pass
|
||||
|
||||
# Clear Element objects which can hold references to documents
|
||||
elif class_name in ('_Element', 'ElementBase') and hasattr(obj, 'clear'):
|
||||
try:
|
||||
obj.clear()
|
||||
except (AttributeError, TypeError):
|
||||
pass
|
||||
|
||||
current_memory = process.memory_info().rss / 1024 / 1024
|
||||
logger.debug(f"After lxml.etree cleanup - Memory usage: {current_memory:,.2f} MB")
|
||||
|
||||
# Check if lxml.html is in use
|
||||
lxml_html = sys.modules.get('lxml.html')
|
||||
if lxml_html:
|
||||
# Clear HTML-specific element types
|
||||
for obj in gc.get_objects():
|
||||
if hasattr(obj, '__class__') and hasattr(obj.__class__, '__name__'):
|
||||
class_name = obj.__class__.__name__
|
||||
if class_name in ('HtmlElement', 'FormElement', 'InputElement',
|
||||
'SelectElement', 'TextareaElement', 'CheckboxGroup',
|
||||
'RadioGroup', 'MultipleSelectOptions', 'FieldsDict') and hasattr(obj, 'clear'):
|
||||
try:
|
||||
obj.clear()
|
||||
except (AttributeError, TypeError):
|
||||
pass
|
||||
|
||||
current_memory = process.memory_info().rss / 1024 / 1024
|
||||
logger.debug(f"After lxml.html cleanup - Memory usage: {current_memory:,.2f} MB")
|
||||
except (ImportError, AttributeError):
|
||||
logger.debug("lxml cleanup not applicable")
|
||||
|
||||
# 8. Clear JSON parser caches if applicable
|
||||
try:
|
||||
# Check if json module is being used and try to clear its cache
|
||||
json_module = sys.modules.get('json')
|
||||
if json_module and hasattr(json_module, '_default_encoder'):
|
||||
json_module._default_encoder.markers.clear()
|
||||
current_memory = process.memory_info().rss / 1024 / 1024
|
||||
logger.debug(f"After JSON parser cleanup - Memory usage: {current_memory:,.2f} MB")
|
||||
except (AttributeError, KeyError):
|
||||
logger.debug("JSON cleanup not applicable")
|
||||
|
||||
# 9. Force Python's memory allocator to release unused memory
|
||||
try:
|
||||
if hasattr(sys, 'pypy_version_info'):
|
||||
# PyPy has different memory management
|
||||
gc.collect()
|
||||
else:
|
||||
# CPython - try to release unused memory
|
||||
ctypes.pythonapi.PyGC_Collect()
|
||||
current_memory = process.memory_info().rss / 1024 / 1024
|
||||
logger.debug(f"After PyGC_Collect - Memory usage: {current_memory:,.2f} MB")
|
||||
except (AttributeError, TypeError):
|
||||
logger.debug("PyGC_Collect not supported")
|
||||
|
||||
# 10. Clear Flask-specific caches if applicable
|
||||
if app:
|
||||
try:
|
||||
# Clear Flask caches if they exist
|
||||
for key in list(app.config.get('_cache', {}).keys()):
|
||||
app.config['_cache'].pop(key, None)
|
||||
|
||||
# Clear Jinja2 template cache if available
|
||||
if hasattr(app, 'jinja_env') and hasattr(app.jinja_env, 'cache'):
|
||||
app.jinja_env.cache.clear()
|
||||
|
||||
current_memory = process.memory_info().rss / 1024 / 1024
|
||||
logger.debug(f"After Flask cache clear - Memory usage: {current_memory:,.2f} MB")
|
||||
except (AttributeError, KeyError):
|
||||
logger.debug("No Flask cache to clear")
|
||||
|
||||
# Final garbage collection pass
|
||||
gc.collect()
|
||||
libc.malloc_trim(0)
|
||||
|
||||
# Log final memory usage
|
||||
final_memory = process.memory_info().rss / 1024 / 1024
|
||||
logger.info(f"Memory cleanup completed - Final memory usage: {final_memory:,.2f} MB")
|
||||
return "cleaned"
|
||||
@@ -1,6 +1,8 @@
|
||||
from functools import lru_cache
|
||||
|
||||
from loguru import logger
|
||||
from lxml import etree
|
||||
from typing import List
|
||||
import html
|
||||
import json
|
||||
import re
|
||||
|
||||
@@ -9,6 +11,10 @@ TEXT_FILTER_LIST_LINE_SUFFIX = "<br>"
|
||||
TRANSLATE_WHITESPACE_TABLE = str.maketrans('', '', '\r\n\t ')
|
||||
PERL_STYLE_REGEX = r'^/(.*?)/([a-z]*)?$'
|
||||
|
||||
TITLE_RE = re.compile(r"<title[^>]*>(.*?)</title>", re.I | re.S)
|
||||
META_CS = re.compile(r'<meta[^>]+charset=["\']?\s*([a-z0-9_\-:+.]+)', re.I)
|
||||
META_CT = re.compile(r'<meta[^>]+http-equiv=["\']?content-type["\']?[^>]*content=["\'][^>]*charset=([a-z0-9_\-:+.]+)', re.I)
|
||||
|
||||
# 'price' , 'lowPrice', 'highPrice' are usually under here
|
||||
# All of those may or may not appear on different websites - I didnt find a way todo case-insensitive searching here
|
||||
LD_JSON_PRODUCT_OFFER_SELECTORS = ["json:$..offers", "json:$..Offers"]
|
||||
@@ -17,9 +23,9 @@ class JSONNotFound(ValueError):
|
||||
def __init__(self, msg):
|
||||
ValueError.__init__(self, msg)
|
||||
|
||||
|
||||
# Doesn't look like python supports forward slash auto enclosure in re.findall
|
||||
# So convert it to inline flag "(?i)foobar" type configuration
|
||||
@lru_cache(maxsize=100)
|
||||
def perl_style_slash_enclosed_regex_to_options(regex):
|
||||
|
||||
res = re.search(PERL_STYLE_REGEX, regex, re.IGNORECASE)
|
||||
@@ -52,13 +58,17 @@ def include_filters(include_filters, html_content, append_pretty_line_formatting
|
||||
|
||||
return html_block
|
||||
|
||||
def subtractive_css_selector(css_selector, html_content):
|
||||
def subtractive_css_selector(css_selector, content):
|
||||
from bs4 import BeautifulSoup
|
||||
soup = BeautifulSoup(html_content, "html.parser")
|
||||
soup = BeautifulSoup(content, "html.parser")
|
||||
|
||||
# So that the elements dont shift their index, build a list of elements here which will be pointers to their place in the DOM
|
||||
elements_to_remove = soup.select(css_selector)
|
||||
|
||||
if not elements_to_remove:
|
||||
# Better to return the original that rebuild with BeautifulSoup
|
||||
return content
|
||||
|
||||
# Then, remove them in a separate loop
|
||||
for item in elements_to_remove:
|
||||
item.decompose()
|
||||
@@ -66,6 +76,7 @@ def subtractive_css_selector(css_selector, html_content):
|
||||
return str(soup)
|
||||
|
||||
def subtractive_xpath_selector(selectors: List[str], html_content: str) -> str:
|
||||
from lxml import etree
|
||||
# Parse the HTML content using lxml
|
||||
html_tree = etree.HTML(html_content)
|
||||
|
||||
@@ -77,6 +88,10 @@ def subtractive_xpath_selector(selectors: List[str], html_content: str) -> str:
|
||||
# Collect elements for each selector
|
||||
elements_to_remove.extend(html_tree.xpath(selector))
|
||||
|
||||
# If no elements were found, return the original HTML content
|
||||
if not elements_to_remove:
|
||||
return html_content
|
||||
|
||||
# Then, remove them in a separate loop
|
||||
for element in elements_to_remove:
|
||||
if element.getparent() is not None: # Ensure the element has a parent before removing
|
||||
@@ -94,7 +109,7 @@ def element_removal(selectors: List[str], html_content):
|
||||
xpath_selectors = []
|
||||
|
||||
for selector in selectors:
|
||||
if selector.startswith(('xpath:', 'xpath1:', '//')):
|
||||
if selector.strip().startswith(('xpath:', 'xpath1:', '//')):
|
||||
# Handle XPath selectors separately
|
||||
xpath_selector = selector.removeprefix('xpath:').removeprefix('xpath1:')
|
||||
xpath_selectors.append(xpath_selector)
|
||||
@@ -171,8 +186,21 @@ def xpath_filter(xpath_filter, html_content, append_pretty_line_formatting=False
|
||||
tree = html.fromstring(bytes(html_content, encoding='utf-8'), parser=parser)
|
||||
html_block = ""
|
||||
|
||||
r = elementpath.select(tree, xpath_filter.strip(), namespaces={'re': 'http://exslt.org/regular-expressions'}, parser=XPath3Parser)
|
||||
#@note: //title/text() wont work where <title>CDATA..
|
||||
# Build namespace map for XPath queries
|
||||
namespaces = {'re': 'http://exslt.org/regular-expressions'}
|
||||
|
||||
# Handle default namespace in documents (common in RSS/Atom feeds, but can occur in any XML)
|
||||
# XPath spec: unprefixed element names have no namespace, not the default namespace
|
||||
# Solution: Register the default namespace with empty string prefix in elementpath
|
||||
# This is primarily for RSS/Atom feeds but works for any XML with default namespace
|
||||
if hasattr(tree, 'nsmap') and tree.nsmap and None in tree.nsmap:
|
||||
# Register the default namespace with empty string prefix for elementpath
|
||||
# This allows //title to match elements in the default namespace
|
||||
namespaces[''] = tree.nsmap[None]
|
||||
|
||||
r = elementpath.select(tree, xpath_filter.strip(), namespaces=namespaces, parser=XPath3Parser)
|
||||
#@note: //title/text() now works with default namespaces (fixed by registering '' prefix)
|
||||
#@note: //title/text() wont work where <title>CDATA.. (use cdata_in_document_to_text first)
|
||||
|
||||
if type(r) != list:
|
||||
r = [r]
|
||||
@@ -207,8 +235,19 @@ def xpath1_filter(xpath_filter, html_content, append_pretty_line_formatting=Fals
|
||||
tree = html.fromstring(bytes(html_content, encoding='utf-8'), parser=parser)
|
||||
html_block = ""
|
||||
|
||||
r = tree.xpath(xpath_filter.strip(), namespaces={'re': 'http://exslt.org/regular-expressions'})
|
||||
#@note: //title/text() wont work where <title>CDATA..
|
||||
# Build namespace map for XPath queries
|
||||
namespaces = {'re': 'http://exslt.org/regular-expressions'}
|
||||
|
||||
# NOTE: lxml's native xpath() does NOT support empty string prefix for default namespace
|
||||
# For documents with default namespace (RSS/Atom feeds), users must use:
|
||||
# - local-name(): //*[local-name()='title']/text()
|
||||
# - Or use xpath_filter (not xpath1_filter) which supports default namespaces
|
||||
# XPath spec: unprefixed element names have no namespace, not the default namespace
|
||||
|
||||
r = tree.xpath(xpath_filter.strip(), namespaces=namespaces)
|
||||
#@note: xpath1 (lxml) does NOT automatically handle default namespaces
|
||||
#@note: Use //*[local-name()='element'] or switch to xpath_filter for default namespace support
|
||||
#@note: //title/text() wont work where <title>CDATA.. (use cdata_in_document_to_text first)
|
||||
|
||||
for element in r:
|
||||
# When there's more than 1 match, then add the suffix to separate each line
|
||||
@@ -289,70 +328,92 @@ def _get_stripped_text_from_json_match(match):
|
||||
|
||||
return stripped_text_from_html
|
||||
|
||||
def extract_json_blob_from_html(content, ensure_is_ldjson_info_type, json_filter):
|
||||
from bs4 import BeautifulSoup
|
||||
stripped_text_from_html = ''
|
||||
|
||||
# Foreach <script json></script> blob.. just return the first that matches json_filter
|
||||
# As a last resort, try to parse the whole <body>
|
||||
soup = BeautifulSoup(content, 'html.parser')
|
||||
|
||||
if ensure_is_ldjson_info_type:
|
||||
bs_result = soup.find_all('script', {"type": "application/ld+json"})
|
||||
else:
|
||||
bs_result = soup.find_all('script')
|
||||
bs_result += soup.find_all('body')
|
||||
|
||||
bs_jsons = []
|
||||
|
||||
for result in bs_result:
|
||||
# result.text is how bs4 magically strips JSON from the body
|
||||
content_start = result.text.lstrip("\ufeff").strip()[:100] if result.text else ''
|
||||
# Skip empty tags, and things that dont even look like JSON
|
||||
if not result.text or not (content_start[0] == '{' or content_start[0] == '['):
|
||||
continue
|
||||
try:
|
||||
json_data = json.loads(result.text)
|
||||
bs_jsons.append(json_data)
|
||||
except json.JSONDecodeError:
|
||||
# Skip objects which cannot be parsed
|
||||
continue
|
||||
|
||||
if not bs_jsons:
|
||||
raise JSONNotFound("No parsable JSON found in this document")
|
||||
|
||||
for json_data in bs_jsons:
|
||||
stripped_text_from_html = _parse_json(json_data, json_filter)
|
||||
|
||||
if ensure_is_ldjson_info_type:
|
||||
# Could sometimes be list, string or something else random
|
||||
if isinstance(json_data, dict):
|
||||
# If it has LD JSON 'key' @type, and @type is 'product', and something was found for the search
|
||||
# (Some sites have multiple of the same ld+json @type='product', but some have the review part, some have the 'price' part)
|
||||
# @type could also be a list although non-standard ("@type": ["Product", "SubType"],)
|
||||
# LD_JSON auto-extract also requires some content PLUS the ldjson to be present
|
||||
# 1833 - could be either str or dict, should not be anything else
|
||||
|
||||
t = json_data.get('@type')
|
||||
if t and stripped_text_from_html:
|
||||
|
||||
if isinstance(t, str) and t.lower() == ensure_is_ldjson_info_type.lower():
|
||||
break
|
||||
# The non-standard part, some have a list
|
||||
elif isinstance(t, list):
|
||||
if ensure_is_ldjson_info_type.lower() in [x.lower().strip() for x in t]:
|
||||
break
|
||||
|
||||
elif stripped_text_from_html:
|
||||
break
|
||||
|
||||
return stripped_text_from_html
|
||||
|
||||
# content - json
|
||||
# json_filter - ie json:$..price
|
||||
# ensure_is_ldjson_info_type - str "product", optional, "@type == product" (I dont know how to do that as a json selector)
|
||||
def extract_json_as_string(content, json_filter, ensure_is_ldjson_info_type=None):
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
stripped_text_from_html = False
|
||||
# https://github.com/dgtlmoon/changedetection.io/pull/2041#issuecomment-1848397161w
|
||||
# Try to parse/filter out the JSON, if we get some parser error, then maybe it's embedded within HTML tags
|
||||
try:
|
||||
# .lstrip("\ufeff") strings ByteOrderMark from UTF8 and still lets the UTF work
|
||||
stripped_text_from_html = _parse_json(json.loads(content.lstrip("\ufeff") ), json_filter)
|
||||
except json.JSONDecodeError as e:
|
||||
logger.warning(str(e))
|
||||
|
||||
# Foreach <script json></script> blob.. just return the first that matches json_filter
|
||||
# As a last resort, try to parse the whole <body>
|
||||
soup = BeautifulSoup(content, 'html.parser')
|
||||
# Looks like clean JSON, dont bother extracting from HTML
|
||||
|
||||
if ensure_is_ldjson_info_type:
|
||||
bs_result = soup.findAll('script', {"type": "application/ld+json"})
|
||||
else:
|
||||
bs_result = soup.findAll('script')
|
||||
bs_result += soup.findAll('body')
|
||||
content_start = content.lstrip("\ufeff").strip()[:100]
|
||||
|
||||
bs_jsons = []
|
||||
for result in bs_result:
|
||||
# Skip empty tags, and things that dont even look like JSON
|
||||
if not result.text or '{' not in result.text:
|
||||
continue
|
||||
try:
|
||||
json_data = json.loads(result.text)
|
||||
bs_jsons.append(json_data)
|
||||
except json.JSONDecodeError:
|
||||
# Skip objects which cannot be parsed
|
||||
continue
|
||||
|
||||
if not bs_jsons:
|
||||
raise JSONNotFound("No parsable JSON found in this document")
|
||||
|
||||
for json_data in bs_jsons:
|
||||
stripped_text_from_html = _parse_json(json_data, json_filter)
|
||||
|
||||
if ensure_is_ldjson_info_type:
|
||||
# Could sometimes be list, string or something else random
|
||||
if isinstance(json_data, dict):
|
||||
# If it has LD JSON 'key' @type, and @type is 'product', and something was found for the search
|
||||
# (Some sites have multiple of the same ld+json @type='product', but some have the review part, some have the 'price' part)
|
||||
# @type could also be a list although non-standard ("@type": ["Product", "SubType"],)
|
||||
# LD_JSON auto-extract also requires some content PLUS the ldjson to be present
|
||||
# 1833 - could be either str or dict, should not be anything else
|
||||
|
||||
t = json_data.get('@type')
|
||||
if t and stripped_text_from_html:
|
||||
|
||||
if isinstance(t, str) and t.lower() == ensure_is_ldjson_info_type.lower():
|
||||
break
|
||||
# The non-standard part, some have a list
|
||||
elif isinstance(t, list):
|
||||
if ensure_is_ldjson_info_type.lower() in [x.lower().strip() for x in t]:
|
||||
break
|
||||
|
||||
elif stripped_text_from_html:
|
||||
break
|
||||
if content_start[0] == '{' or content_start[0] == '[':
|
||||
try:
|
||||
# .lstrip("\ufeff") strings ByteOrderMark from UTF8 and still lets the UTF work
|
||||
stripped_text_from_html = _parse_json(json.loads(content.lstrip("\ufeff")), json_filter)
|
||||
except json.JSONDecodeError as e:
|
||||
logger.warning(f"Error processing JSON {content[:20]}...{str(e)})")
|
||||
else:
|
||||
# Probably something else, go fish inside for it
|
||||
try:
|
||||
stripped_text_from_html = extract_json_blob_from_html(content=content,
|
||||
ensure_is_ldjson_info_type=ensure_is_ldjson_info_type,
|
||||
json_filter=json_filter )
|
||||
except json.JSONDecodeError as e:
|
||||
logger.warning(f"Error processing JSON while extracting JSON from HTML blob {content[:20]}...{str(e)})")
|
||||
|
||||
if not stripped_text_from_html:
|
||||
# Re 265 - Just return an empty string when filter not found
|
||||
@@ -366,22 +427,44 @@ def extract_json_as_string(content, json_filter, ensure_is_ldjson_info_type=None
|
||||
# wordlist - list of regex's (str) or words (str)
|
||||
# Preserves all linefeeds and other whitespacing, its not the job of this to remove that
|
||||
def strip_ignore_text(content, wordlist, mode="content"):
|
||||
i = 0
|
||||
output = []
|
||||
ignore_text = []
|
||||
ignore_regex = []
|
||||
ignored_line_numbers = []
|
||||
ignore_regex_multiline = []
|
||||
ignored_lines = []
|
||||
|
||||
for k in wordlist:
|
||||
# Skip empty strings to avoid matching everything
|
||||
if not k or not k.strip():
|
||||
continue
|
||||
# Is it a regex?
|
||||
res = re.search(PERL_STYLE_REGEX, k, re.IGNORECASE)
|
||||
if res:
|
||||
ignore_regex.append(re.compile(perl_style_slash_enclosed_regex_to_options(k)))
|
||||
res = re.compile(perl_style_slash_enclosed_regex_to_options(k))
|
||||
if res.flags & re.DOTALL or res.flags & re.MULTILINE:
|
||||
ignore_regex_multiline.append(res)
|
||||
else:
|
||||
ignore_regex.append(res)
|
||||
else:
|
||||
ignore_text.append(k.strip())
|
||||
|
||||
for line in content.splitlines(keepends=True):
|
||||
i += 1
|
||||
for r in ignore_regex_multiline:
|
||||
for match in r.finditer(content):
|
||||
content_lines = content[:match.end()].splitlines(keepends=True)
|
||||
match_lines = content[match.start():match.end()].splitlines(keepends=True)
|
||||
|
||||
end_line = len(content_lines)
|
||||
start_line = end_line - len(match_lines)
|
||||
|
||||
if end_line - start_line <= 1:
|
||||
# Match is empty or in the middle of the line
|
||||
ignored_lines.append(start_line)
|
||||
else:
|
||||
for i in range(start_line, end_line):
|
||||
ignored_lines.append(i)
|
||||
|
||||
line_index = 0
|
||||
lines = content.splitlines(keepends=True)
|
||||
for line in lines:
|
||||
# Always ignore blank lines in this mode. (when this function gets called)
|
||||
got_match = False
|
||||
for l in ignore_text:
|
||||
@@ -393,17 +476,19 @@ def strip_ignore_text(content, wordlist, mode="content"):
|
||||
if r.search(line):
|
||||
got_match = True
|
||||
|
||||
if not got_match:
|
||||
# Not ignored, and should preserve "keepends"
|
||||
output.append(line)
|
||||
else:
|
||||
ignored_line_numbers.append(i)
|
||||
if got_match:
|
||||
ignored_lines.append(line_index)
|
||||
|
||||
line_index += 1
|
||||
|
||||
ignored_lines = set([i for i in ignored_lines if i >= 0 and i < len(lines)])
|
||||
|
||||
# Used for finding out what to highlight
|
||||
if mode == "line numbers":
|
||||
return ignored_line_numbers
|
||||
return [i + 1 for i in ignored_lines]
|
||||
|
||||
return ''.join(output)
|
||||
output_lines = set(range(len(lines))) - ignored_lines
|
||||
return ''.join([lines[i] for i in output_lines])
|
||||
|
||||
def cdata_in_document_to_text(html_content: str, render_anchor_tag_content=False) -> str:
|
||||
from xml.sax.saxutils import escape as xml_escape
|
||||
@@ -414,50 +499,36 @@ def cdata_in_document_to_text(html_content: str, render_anchor_tag_content=False
|
||||
|
||||
return re.sub(pattern, repl, html_content)
|
||||
|
||||
def html_to_text(html_content: str, render_anchor_tag_content=False, is_rss=False) -> str:
|
||||
|
||||
# NOTE!! ANYTHING LIBXML, HTML5LIB ETC WILL CAUSE SOME SMALL MEMORY LEAK IN THE LOCAL "LIB" IMPLEMENTATION OUTSIDE PYTHON
|
||||
|
||||
|
||||
def html_to_text(html_content: str, render_anchor_tag_content=False, is_rss=False, timeout=10) -> str:
|
||||
from inscriptis import get_text
|
||||
from inscriptis.model.config import ParserConfig
|
||||
|
||||
"""Converts html string to a string with just the text. If ignoring
|
||||
rendering anchor tag content is enable, anchor tag content are also
|
||||
included in the text
|
||||
|
||||
:param html_content: string with html content
|
||||
:param render_anchor_tag_content: boolean flag indicating whether to extract
|
||||
hyperlinks (the anchor tag content) together with text. This refers to the
|
||||
'href' inside 'a' tags.
|
||||
Anchor tag content is rendered in the following manner:
|
||||
'[ text ](anchor tag content)'
|
||||
:return: extracted text from the HTML
|
||||
"""
|
||||
# if anchor tag content flag is set to True define a config for
|
||||
# extracting this content
|
||||
if render_anchor_tag_content:
|
||||
parser_config = ParserConfig(
|
||||
annotation_rules={"a": ["hyperlink"]},
|
||||
display_links=True
|
||||
)
|
||||
# otherwise set config to None/default
|
||||
else:
|
||||
parser_config = None
|
||||
|
||||
# RSS Mode - Inscriptis will treat `title` as something else.
|
||||
# Make it as a regular block display element (//item/title)
|
||||
# This is a bit of a hack - the real way it to use XSLT to convert it to HTML #1874
|
||||
if is_rss:
|
||||
html_content = re.sub(r'<title([\s>])', r'<h1\1', html_content)
|
||||
html_content = re.sub(r'</title>', r'</h1>', html_content)
|
||||
|
||||
text_content = get_text(html_content, config=parser_config)
|
||||
|
||||
return text_content
|
||||
|
||||
|
||||
# Does LD+JSON exist with a @type=='product' and a .price set anywhere?
|
||||
def has_ldjson_product_info(content):
|
||||
try:
|
||||
lc = content.lower()
|
||||
if 'application/ld+json' in lc and lc.count('"price"') == 1 and '"pricecurrency"' in lc:
|
||||
# Better than .lower() which can use a lot of ram
|
||||
if (re.search(r'application/ld\+json', content, re.IGNORECASE) and
|
||||
re.search(r'"price"', content, re.IGNORECASE) and
|
||||
re.search(r'"pricecurrency"', content, re.IGNORECASE)):
|
||||
return True
|
||||
|
||||
# On some pages this is really terribly expensive when they dont really need it
|
||||
@@ -503,3 +574,43 @@ def get_triggered_text(content, trigger_text):
|
||||
i += 1
|
||||
|
||||
return triggered_text
|
||||
|
||||
|
||||
def extract_title(data: bytes | str, sniff_bytes: int = 2048, scan_chars: int = 8192) -> str | None:
|
||||
try:
|
||||
# Only decode/process the prefix we need for title extraction
|
||||
match data:
|
||||
case bytes() if data.startswith((b"\xff\xfe", b"\xfe\xff")):
|
||||
prefix = data[:scan_chars * 2].decode("utf-16", errors="replace")
|
||||
case bytes() if data.startswith((b"\xff\xfe\x00\x00", b"\x00\x00\xfe\xff")):
|
||||
prefix = data[:scan_chars * 4].decode("utf-32", errors="replace")
|
||||
case bytes():
|
||||
try:
|
||||
prefix = data[:scan_chars].decode("utf-8")
|
||||
except UnicodeDecodeError:
|
||||
try:
|
||||
head = data[:sniff_bytes].decode("ascii", errors="ignore")
|
||||
if m := (META_CS.search(head) or META_CT.search(head)):
|
||||
enc = m.group(1).lower()
|
||||
else:
|
||||
enc = "cp1252"
|
||||
prefix = data[:scan_chars * 2].decode(enc, errors="replace")
|
||||
except Exception as e:
|
||||
logger.error(f"Title extraction encoding detection failed: {e}")
|
||||
return None
|
||||
case str():
|
||||
prefix = data[:scan_chars] if len(data) > scan_chars else data
|
||||
case _:
|
||||
logger.error(f"Title extraction received unsupported data type: {type(data)}")
|
||||
return None
|
||||
|
||||
# Search only in the prefix
|
||||
if m := TITLE_RE.search(prefix):
|
||||
title = html.unescape(" ".join(m.group(1).split())).strip()
|
||||
# Some safe limit
|
||||
return title[:2000]
|
||||
return None
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Title extraction failed: {e}")
|
||||
return None
|
||||
22
changedetectionio/jinja2_custom/__init__.py
Normal file
22
changedetectionio/jinja2_custom/__init__.py
Normal file
@@ -0,0 +1,22 @@
|
||||
"""
|
||||
Jinja2 custom extensions and safe rendering utilities.
|
||||
"""
|
||||
from .extensions.TimeExtension import TimeExtension
|
||||
from .safe_jinja import (
|
||||
render,
|
||||
render_fully_escaped,
|
||||
create_jinja_env,
|
||||
JINJA2_MAX_RETURN_PAYLOAD_SIZE,
|
||||
DEFAULT_JINJA2_EXTENSIONS,
|
||||
)
|
||||
from .plugins.regex import regex_replace
|
||||
|
||||
__all__ = [
|
||||
'TimeExtension',
|
||||
'render',
|
||||
'render_fully_escaped',
|
||||
'create_jinja_env',
|
||||
'JINJA2_MAX_RETURN_PAYLOAD_SIZE',
|
||||
'DEFAULT_JINJA2_EXTENSIONS',
|
||||
'regex_replace',
|
||||
]
|
||||
221
changedetectionio/jinja2_custom/extensions/TimeExtension.py
Normal file
221
changedetectionio/jinja2_custom/extensions/TimeExtension.py
Normal file
@@ -0,0 +1,221 @@
|
||||
"""
|
||||
Jinja2 TimeExtension - Custom date/time handling for templates.
|
||||
|
||||
This extension provides the {% now %} tag for Jinja2 templates, offering timezone-aware
|
||||
date/time formatting with support for time offsets.
|
||||
|
||||
Why This Extension Exists:
|
||||
The Arrow library has a now() function (arrow.now()), but Jinja2 templates cannot
|
||||
directly call Python functions - they need extensions or filters to expose functionality.
|
||||
|
||||
This TimeExtension serves as a Jinja2-to-Arrow bridge that:
|
||||
|
||||
1. Makes Arrow accessible in templates - Jinja2 requires registering functions/tags
|
||||
through extensions. You cannot use arrow.now() directly in a template.
|
||||
|
||||
2. Provides template-friendly syntax - Instead of complex Python code, you get clean tags:
|
||||
{% now 'UTC' %}
|
||||
{% now 'UTC' + 'hours=2' %}
|
||||
{% now 'Europe/London', '%Y-%m-%d' %}
|
||||
|
||||
3. Adds convenience features on top of Arrow:
|
||||
- Default timezone from environment variable (TZ) or config
|
||||
- Default datetime format configuration
|
||||
- Offset syntax parsing: 'hours=2,minutes=30' → shift(hours=2, minutes=30)
|
||||
- Empty string timezone support to use configured defaults
|
||||
|
||||
4. Maintains security - Works within Jinja2's sandboxed environment so users
|
||||
cannot access arbitrary Python code or objects.
|
||||
|
||||
Essentially, this is a Jinja2 wrapper around arrow.now() and arrow.shift() that
|
||||
provides user-friendly template syntax while maintaining security.
|
||||
|
||||
Basic Usage:
|
||||
{% now 'UTC' %}
|
||||
# Output: Wed, 09 Dec 2015 23:33:01
|
||||
|
||||
Custom Format:
|
||||
{% now 'UTC', '%Y-%m-%d %H:%M:%S' %}
|
||||
# Output: 2015-12-09 23:33:01
|
||||
|
||||
Timezone Support:
|
||||
{% now 'America/New_York' %}
|
||||
{% now 'Europe/London' %}
|
||||
{% now '' %} # Uses default timezone from environment.default_timezone
|
||||
|
||||
Time Offsets (Addition):
|
||||
{% now 'UTC' + 'hours=2' %}
|
||||
{% now 'UTC' + 'hours=2,minutes=30' %}
|
||||
{% now 'UTC' + 'days=1,hours=2,minutes=15,seconds=10' %}
|
||||
|
||||
Time Offsets (Subtraction):
|
||||
{% now 'UTC' - 'minutes=11' %}
|
||||
{% now 'UTC' - 'days=2,minutes=33,seconds=1' %}
|
||||
|
||||
Time Offsets with Custom Format:
|
||||
{% now 'UTC' + 'hours=2', '%Y-%m-%d %H:%M:%S' %}
|
||||
# Output: 2015-12-10 01:33:01
|
||||
|
||||
Weekday Support (for finding next/previous weekday):
|
||||
{% now 'UTC' + 'weekday=0' %} # Next Monday (0=Monday, 6=Sunday)
|
||||
{% now 'UTC' + 'weekday=4' %} # Next Friday
|
||||
|
||||
Configuration:
|
||||
- Default timezone: Set via TZ environment variable or override environment.default_timezone
|
||||
- Default format: '%a, %d %b %Y %H:%M:%S' (can be overridden via environment.datetime_format)
|
||||
|
||||
Environment Customization:
|
||||
from changedetectionio.jinja2_custom import create_jinja_env
|
||||
|
||||
jinja2_env = create_jinja_env()
|
||||
jinja2_env.default_timezone = 'America/New_York' # Override default timezone
|
||||
jinja2_env.datetime_format = '%Y-%m-%d %H:%M' # Override default format
|
||||
|
||||
Supported Offset Parameters:
|
||||
- years, months, weeks, days
|
||||
- hours, minutes, seconds, microseconds
|
||||
- weekday (0=Monday through 6=Sunday, must be integer)
|
||||
|
||||
Note:
|
||||
This extension uses the Arrow library for timezone-aware datetime handling.
|
||||
All timezone names should be valid IANA timezone identifiers (e.g., 'America/New_York').
|
||||
"""
|
||||
import arrow
|
||||
|
||||
from jinja2 import nodes
|
||||
from jinja2.ext import Extension
|
||||
import os
|
||||
|
||||
class TimeExtension(Extension):
|
||||
"""
|
||||
Jinja2 Extension providing the {% now %} tag for timezone-aware date/time rendering.
|
||||
|
||||
This extension adds two attributes to the Jinja2 environment:
|
||||
- datetime_format: Default strftime format string (default: '%a, %d %b %Y %H:%M:%S')
|
||||
- default_timezone: Default timezone for rendering (default: TZ env var or 'UTC')
|
||||
|
||||
Both can be overridden after environment creation by setting the attributes directly.
|
||||
"""
|
||||
|
||||
tags = {'now'}
|
||||
|
||||
def __init__(self, environment):
|
||||
"""Jinja2 Extension constructor."""
|
||||
super().__init__(environment)
|
||||
|
||||
environment.extend(
|
||||
datetime_format='%a, %d %b %Y %H:%M:%S',
|
||||
default_timezone=os.getenv('TZ', 'UTC').strip()
|
||||
)
|
||||
|
||||
def _datetime(self, timezone, operator, offset, datetime_format):
|
||||
"""
|
||||
Get current datetime with time offset applied.
|
||||
|
||||
Args:
|
||||
timezone: IANA timezone identifier (e.g., 'UTC', 'America/New_York') or empty string for default
|
||||
operator: '+' for addition or '-' for subtraction
|
||||
offset: Comma-separated offset parameters (e.g., 'hours=2,minutes=30')
|
||||
datetime_format: strftime format string or None to use environment default
|
||||
|
||||
Returns:
|
||||
Formatted datetime string with offset applied
|
||||
|
||||
Example:
|
||||
_datetime('UTC', '+', 'hours=2,minutes=30', '%Y-%m-%d %H:%M:%S')
|
||||
# Returns current time + 2.5 hours
|
||||
"""
|
||||
# Use default timezone if none specified
|
||||
if not timezone or timezone == '':
|
||||
timezone = self.environment.default_timezone
|
||||
|
||||
d = arrow.now(timezone)
|
||||
|
||||
# parse shift params from offset and include operator
|
||||
shift_params = {}
|
||||
for param in offset.split(','):
|
||||
interval, value = param.split('=')
|
||||
shift_params[interval.strip()] = float(operator + value.strip())
|
||||
|
||||
# Fix weekday parameter can not be float
|
||||
if 'weekday' in shift_params:
|
||||
shift_params['weekday'] = int(shift_params['weekday'])
|
||||
|
||||
d = d.shift(**shift_params)
|
||||
|
||||
if datetime_format is None:
|
||||
datetime_format = self.environment.datetime_format
|
||||
return d.strftime(datetime_format)
|
||||
|
||||
def _now(self, timezone, datetime_format):
|
||||
"""
|
||||
Get current datetime without any offset.
|
||||
|
||||
Args:
|
||||
timezone: IANA timezone identifier (e.g., 'UTC', 'America/New_York') or empty string for default
|
||||
datetime_format: strftime format string or None to use environment default
|
||||
|
||||
Returns:
|
||||
Formatted datetime string for current time
|
||||
|
||||
Example:
|
||||
_now('America/New_York', '%Y-%m-%d %H:%M:%S')
|
||||
# Returns current time in New York timezone
|
||||
"""
|
||||
# Use default timezone if none specified
|
||||
if not timezone or timezone == '':
|
||||
timezone = self.environment.default_timezone
|
||||
|
||||
if datetime_format is None:
|
||||
datetime_format = self.environment.datetime_format
|
||||
return arrow.now(timezone).strftime(datetime_format)
|
||||
|
||||
def parse(self, parser):
|
||||
"""
|
||||
Parse the {% now %} tag and generate appropriate AST nodes.
|
||||
|
||||
This method is called by Jinja2 when it encounters a {% now %} tag.
|
||||
It parses the tag syntax and determines whether to call _now() or _datetime()
|
||||
based on whether offset operations (+ or -) are present.
|
||||
|
||||
Supported syntax:
|
||||
{% now 'timezone' %} -> calls _now()
|
||||
{% now 'timezone', 'format' %} -> calls _now()
|
||||
{% now 'timezone' + 'offset' %} -> calls _datetime()
|
||||
{% now 'timezone' + 'offset', 'format' %} -> calls _datetime()
|
||||
{% now 'timezone' - 'offset', 'format' %} -> calls _datetime()
|
||||
|
||||
Args:
|
||||
parser: Jinja2 parser instance
|
||||
|
||||
Returns:
|
||||
nodes.Output: AST output node containing the formatted datetime string
|
||||
"""
|
||||
lineno = next(parser.stream).lineno
|
||||
|
||||
node = parser.parse_expression()
|
||||
|
||||
if parser.stream.skip_if('comma'):
|
||||
datetime_format = parser.parse_expression()
|
||||
else:
|
||||
datetime_format = nodes.Const(None)
|
||||
|
||||
if isinstance(node, nodes.Add):
|
||||
call_method = self.call_method(
|
||||
'_datetime',
|
||||
[node.left, nodes.Const('+'), node.right, datetime_format],
|
||||
lineno=lineno,
|
||||
)
|
||||
elif isinstance(node, nodes.Sub):
|
||||
call_method = self.call_method(
|
||||
'_datetime',
|
||||
[node.left, nodes.Const('-'), node.right, datetime_format],
|
||||
lineno=lineno,
|
||||
)
|
||||
else:
|
||||
call_method = self.call_method(
|
||||
'_now',
|
||||
[node, datetime_format],
|
||||
lineno=lineno,
|
||||
)
|
||||
return nodes.Output([call_method], lineno=lineno)
|
||||
6
changedetectionio/jinja2_custom/plugins/__init__.py
Normal file
6
changedetectionio/jinja2_custom/plugins/__init__.py
Normal file
@@ -0,0 +1,6 @@
|
||||
"""
|
||||
Jinja2 custom filter plugins for changedetection.io
|
||||
"""
|
||||
from .regex import regex_replace
|
||||
|
||||
__all__ = ['regex_replace']
|
||||
98
changedetectionio/jinja2_custom/plugins/regex.py
Normal file
98
changedetectionio/jinja2_custom/plugins/regex.py
Normal file
@@ -0,0 +1,98 @@
|
||||
"""
|
||||
Regex filter plugin for Jinja2 templates.
|
||||
|
||||
Provides regex_replace filter for pattern-based string replacements in templates.
|
||||
"""
|
||||
import re
|
||||
import signal
|
||||
from loguru import logger
|
||||
|
||||
|
||||
def regex_replace(value: str, pattern: str, replacement: str = '', count: int = 0) -> str:
|
||||
"""
|
||||
Replace occurrences of a regex pattern in a string.
|
||||
|
||||
Security: Protected against ReDoS (Regular Expression Denial of Service) attacks:
|
||||
- Limits input value size to prevent excessive processing
|
||||
- Uses timeout mechanism to prevent runaway regex operations
|
||||
- Validates pattern complexity to prevent catastrophic backtracking
|
||||
|
||||
Args:
|
||||
value: The input string to perform replacements on
|
||||
pattern: The regex pattern to search for
|
||||
replacement: The replacement string (default: '')
|
||||
count: Maximum number of replacements (0 = replace all, default: 0)
|
||||
|
||||
Returns:
|
||||
String with replacements applied, or original value on error
|
||||
|
||||
Example:
|
||||
{{ "hello world" | regex_replace("world", "universe") }}
|
||||
{{ diff | regex_replace("<td>([^<]+)</td><td>([^<]+)</td>", "Label1: \\1\\nLabel2: \\2") }}
|
||||
|
||||
Security limits:
|
||||
- Maximum input size: 10MB
|
||||
- Maximum pattern length: 500 characters
|
||||
- Operation timeout: 10 seconds
|
||||
- Dangerous nested quantifier patterns are rejected
|
||||
"""
|
||||
# Security limits
|
||||
MAX_INPUT_SIZE = 1024 * 1024 * 10 # 10MB max input size
|
||||
MAX_PATTERN_LENGTH = 500 # Maximum regex pattern length
|
||||
REGEX_TIMEOUT_SECONDS = 10 # Maximum time for regex operation
|
||||
|
||||
# Validate input sizes
|
||||
value_str = str(value)
|
||||
if len(value_str) > MAX_INPUT_SIZE:
|
||||
logger.warning(f"regex_replace: Input too large ({len(value_str)} bytes), truncating")
|
||||
value_str = value_str[:MAX_INPUT_SIZE]
|
||||
|
||||
if len(pattern) > MAX_PATTERN_LENGTH:
|
||||
logger.warning(f"regex_replace: Pattern too long ({len(pattern)} chars), rejecting")
|
||||
return value_str
|
||||
|
||||
# Check for potentially dangerous patterns (basic checks)
|
||||
# Nested quantifiers like (a+)+ can cause catastrophic backtracking
|
||||
dangerous_patterns = [
|
||||
r'\([^)]*\+[^)]*\)\+', # (x+)+
|
||||
r'\([^)]*\*[^)]*\)\+', # (x*)+
|
||||
r'\([^)]*\+[^)]*\)\*', # (x+)*
|
||||
r'\([^)]*\*[^)]*\)\*', # (x*)*
|
||||
]
|
||||
|
||||
for dangerous in dangerous_patterns:
|
||||
if re.search(dangerous, pattern):
|
||||
logger.warning(f"regex_replace: Potentially dangerous pattern detected: {pattern}")
|
||||
return value_str
|
||||
|
||||
def timeout_handler(signum, frame):
|
||||
raise TimeoutError("Regex operation timed out")
|
||||
|
||||
try:
|
||||
# Set up timeout for regex operation (Unix-like systems only)
|
||||
# This prevents ReDoS attacks
|
||||
old_handler = None
|
||||
if hasattr(signal, 'SIGALRM'):
|
||||
old_handler = signal.signal(signal.SIGALRM, timeout_handler)
|
||||
signal.alarm(REGEX_TIMEOUT_SECONDS)
|
||||
|
||||
try:
|
||||
result = re.sub(pattern, replacement, value_str, count=count)
|
||||
finally:
|
||||
# Cancel the alarm
|
||||
if hasattr(signal, 'SIGALRM'):
|
||||
signal.alarm(0)
|
||||
if old_handler is not None:
|
||||
signal.signal(signal.SIGALRM, old_handler)
|
||||
|
||||
return result
|
||||
|
||||
except TimeoutError:
|
||||
logger.error(f"regex_replace: Regex operation timed out - possible ReDoS attack. Pattern: {pattern}")
|
||||
return value_str
|
||||
except re.error as e:
|
||||
logger.warning(f"regex_replace: Invalid regex pattern: {e}")
|
||||
return value_str
|
||||
except Exception as e:
|
||||
logger.error(f"regex_replace: Unexpected error: {e}")
|
||||
return value_str
|
||||
58
changedetectionio/jinja2_custom/safe_jinja.py
Normal file
58
changedetectionio/jinja2_custom/safe_jinja.py
Normal file
@@ -0,0 +1,58 @@
|
||||
"""
|
||||
Safe Jinja2 render with max payload sizes
|
||||
|
||||
See https://jinja.palletsprojects.com/en/3.1.x/sandbox/#security-considerations
|
||||
"""
|
||||
|
||||
import jinja2.sandbox
|
||||
import typing as t
|
||||
import os
|
||||
from .extensions.TimeExtension import TimeExtension
|
||||
from .plugins import regex_replace
|
||||
|
||||
JINJA2_MAX_RETURN_PAYLOAD_SIZE = 1024 * int(os.getenv("JINJA2_MAX_RETURN_PAYLOAD_SIZE_KB", 1024 * 10))
|
||||
|
||||
# Default extensions - can be overridden in create_jinja_env()
|
||||
DEFAULT_JINJA2_EXTENSIONS = [TimeExtension]
|
||||
|
||||
def create_jinja_env(extensions=None, **kwargs) -> jinja2.sandbox.ImmutableSandboxedEnvironment:
|
||||
"""
|
||||
Create a sandboxed Jinja2 environment with our custom extensions and default timezone.
|
||||
|
||||
Args:
|
||||
extensions: List of extension classes to use (defaults to DEFAULT_JINJA2_EXTENSIONS)
|
||||
**kwargs: Additional arguments to pass to ImmutableSandboxedEnvironment
|
||||
|
||||
Returns:
|
||||
Configured Jinja2 environment
|
||||
"""
|
||||
if extensions is None:
|
||||
extensions = DEFAULT_JINJA2_EXTENSIONS
|
||||
|
||||
jinja2_env = jinja2.sandbox.ImmutableSandboxedEnvironment(
|
||||
extensions=extensions,
|
||||
**kwargs
|
||||
)
|
||||
|
||||
# Get default timezone from environment variable
|
||||
default_timezone = os.getenv('TZ', 'UTC').strip()
|
||||
jinja2_env.default_timezone = default_timezone
|
||||
|
||||
# Register custom filters
|
||||
jinja2_env.filters['regex_replace'] = regex_replace
|
||||
|
||||
return jinja2_env
|
||||
|
||||
|
||||
# This is used for notifications etc, so actually it's OK to send custom HTML such as <a href> etc, but it should limit what data is available.
|
||||
# (Which also limits available functions that could be called)
|
||||
def render(template_str, **args: t.Any) -> str:
|
||||
jinja2_env = create_jinja_env()
|
||||
output = jinja2_env.from_string(template_str).render(args)
|
||||
return output[:JINJA2_MAX_RETURN_PAYLOAD_SIZE]
|
||||
|
||||
def render_fully_escaped(content):
|
||||
env = jinja2.sandbox.ImmutableSandboxedEnvironment(autoescape=True)
|
||||
template = env.from_string("{{ some_html|e }}")
|
||||
return template.render(some_html=content)
|
||||
|
||||
@@ -1,4 +1,8 @@
|
||||
from os import getenv
|
||||
from copy import deepcopy
|
||||
|
||||
from changedetectionio.blueprint.rss import RSS_FORMAT_TYPES
|
||||
|
||||
from changedetectionio.notification import (
|
||||
default_notification_body,
|
||||
default_notification_format,
|
||||
@@ -9,6 +13,8 @@ from changedetectionio.notification import (
|
||||
_FILTER_FAILURE_THRESHOLD_ATTEMPTS_DEFAULT = 6
|
||||
DEFAULT_SETTINGS_HEADERS_USERAGENT='Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36'
|
||||
|
||||
|
||||
|
||||
class model(dict):
|
||||
base_config = {
|
||||
'note': "Hello! If you change this file manually, please be sure to restart your changedetection.io instance!",
|
||||
@@ -34,12 +40,12 @@ class model(dict):
|
||||
'api_access_token_enabled': True,
|
||||
'base_url' : None,
|
||||
'empty_pages_are_a_change': False,
|
||||
'extract_title_as_title': False,
|
||||
'fetch_backend': getenv("DEFAULT_FETCH_BACKEND", "html_requests"),
|
||||
'filter_failure_notification_threshold_attempts': _FILTER_FAILURE_THRESHOLD_ATTEMPTS_DEFAULT,
|
||||
'global_ignore_text': [], # List of text to ignore when calculating the comparison checksum
|
||||
'global_subtractive_selectors': [],
|
||||
'ignore_whitespace': True,
|
||||
'ignore_status_codes': False, #@todo implement, as ternary.
|
||||
'notification_body': default_notification_body,
|
||||
'notification_format': default_notification_format,
|
||||
'notification_title': default_notification_title,
|
||||
@@ -48,19 +54,29 @@ class model(dict):
|
||||
'password': False,
|
||||
'render_anchor_tag_content': False,
|
||||
'rss_access_token': None,
|
||||
'rss_content_format': RSS_FORMAT_TYPES[0][0],
|
||||
'rss_hide_muted_watches': True,
|
||||
'rss_reader_mode': False,
|
||||
'scheduler_timezone_default': None, # Default IANA timezone name
|
||||
'schema_version' : 0,
|
||||
'shared_diff_access': False,
|
||||
'webdriver_delay': None , # Extra delay in seconds before extracting text
|
||||
'strip_ignored_lines': False,
|
||||
'tags': {}, #@todo use Tag.model initialisers
|
||||
'timezone': None, # Default IANA timezone name
|
||||
'webdriver_delay': None , # Extra delay in seconds before extracting text
|
||||
'ui': {
|
||||
'use_page_title_in_list': True,
|
||||
'open_diff_in_new_tab': True,
|
||||
'socket_io_enabled': True,
|
||||
'favicons_enabled': True
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def __init__(self, *arg, **kw):
|
||||
super(model, self).__init__(*arg, **kw)
|
||||
self.update(self.base_config)
|
||||
# CRITICAL: deepcopy to avoid sharing mutable objects between instances
|
||||
self.update(deepcopy(self.base_config))
|
||||
|
||||
|
||||
def parse_headers_from_text_file(filepath):
|
||||
|
||||
@@ -1,37 +1,24 @@
|
||||
from blinker import signal
|
||||
from changedetectionio.validate_url import is_safe_valid_url
|
||||
|
||||
from changedetectionio.strtobool import strtobool
|
||||
from changedetectionio.safe_jinja import render as jinja_render
|
||||
from changedetectionio.jinja2_custom import render as jinja_render
|
||||
from . import watch_base
|
||||
import os
|
||||
import re
|
||||
from pathlib import Path
|
||||
from loguru import logger
|
||||
|
||||
from .. import jinja2_custom as safe_jinja
|
||||
from ..diff import ADDED_PLACEMARKER_OPEN
|
||||
from ..html_tools import TRANSLATE_WHITESPACE_TABLE
|
||||
|
||||
# Allowable protocols, protects against javascript: etc
|
||||
# file:// is further checked by ALLOW_FILE_URI
|
||||
SAFE_PROTOCOL_REGEX='^(http|https|ftp|file):'
|
||||
FAVICON_RESAVE_THRESHOLD_SECONDS=86400
|
||||
|
||||
|
||||
minimum_seconds_recheck_time = int(os.getenv('MINIMUM_SECONDS_RECHECK_TIME', 3))
|
||||
mtable = {'seconds': 1, 'minutes': 60, 'hours': 3600, 'days': 86400, 'weeks': 86400 * 7}
|
||||
|
||||
|
||||
def is_safe_url(test_url):
|
||||
# See https://github.com/dgtlmoon/changedetection.io/issues/1358
|
||||
|
||||
# Remove 'source:' prefix so we dont get 'source:javascript:' etc
|
||||
# 'source:' is a valid way to tell us to return the source
|
||||
|
||||
r = re.compile(re.escape('source:'), re.IGNORECASE)
|
||||
test_url = r.sub('', test_url)
|
||||
|
||||
pattern = re.compile(os.getenv('SAFE_PROTOCOL_REGEX', SAFE_PROTOCOL_REGEX), re.IGNORECASE)
|
||||
if not pattern.match(test_url.strip()):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
class model(watch_base):
|
||||
__newest_history_key = None
|
||||
__history_n = 0
|
||||
@@ -41,6 +28,7 @@ class model(watch_base):
|
||||
self.__datastore_path = kw.get('datastore_path')
|
||||
if kw.get('datastore_path'):
|
||||
del kw['datastore_path']
|
||||
|
||||
super(model, self).__init__(*arg, **kw)
|
||||
if kw.get('default'):
|
||||
self.update(kw['default'])
|
||||
@@ -60,6 +48,10 @@ class model(watch_base):
|
||||
|
||||
return False
|
||||
|
||||
@property
|
||||
def has_unviewed(self):
|
||||
return int(self.newest_history_key) > int(self['last_viewed']) and self.__history_n >= 2
|
||||
|
||||
def ensure_data_dir_exists(self):
|
||||
if not os.path.isdir(self.watch_data_dir):
|
||||
logger.debug(f"> Creating data dir {self.watch_data_dir}")
|
||||
@@ -69,7 +61,7 @@ class model(watch_base):
|
||||
def link(self):
|
||||
|
||||
url = self.get('url', '')
|
||||
if not is_safe_url(url):
|
||||
if not is_safe_valid_url(url):
|
||||
return 'DISABLED'
|
||||
|
||||
ready_url = url
|
||||
@@ -79,11 +71,10 @@ class model(watch_base):
|
||||
ready_url = jinja_render(template_str=url)
|
||||
except Exception as e:
|
||||
logger.critical(f"Invalid URL template for: '{url}' - {str(e)}")
|
||||
from flask import (
|
||||
flash, Markup, url_for
|
||||
)
|
||||
from flask import flash, url_for
|
||||
from markupsafe import Markup
|
||||
message = Markup('<a href="{}#general">The URL {} is invalid and cannot be used, click to edit</a>'.format(
|
||||
url_for('edit_page', uuid=self.get('uuid')), self.get('url', '')))
|
||||
url_for('ui.ui_edit.edit_page', uuid=self.get('uuid')), self.get('url', '')))
|
||||
flash(message, 'error')
|
||||
return ''
|
||||
|
||||
@@ -91,10 +82,17 @@ class model(watch_base):
|
||||
ready_url=ready_url.replace('source:', '')
|
||||
|
||||
# Also double check it after any Jinja2 formatting just incase
|
||||
if not is_safe_url(ready_url):
|
||||
if not is_safe_valid_url(ready_url):
|
||||
return 'DISABLED'
|
||||
return ready_url
|
||||
|
||||
@property
|
||||
def domain_only_from_link(self):
|
||||
from urllib.parse import urlparse
|
||||
parsed = urlparse(self.link)
|
||||
domain = parsed.hostname
|
||||
return domain
|
||||
|
||||
def clear_watch(self):
|
||||
import pathlib
|
||||
|
||||
@@ -120,6 +118,10 @@ class model(watch_base):
|
||||
'remote_server_reply': None,
|
||||
'track_ldjson_price_data': None
|
||||
})
|
||||
watch_check_update = signal('watch_check_update')
|
||||
if watch_check_update:
|
||||
watch_check_update.send(watch_uuid=self.get('uuid'))
|
||||
|
||||
return
|
||||
|
||||
@property
|
||||
@@ -148,8 +150,8 @@ class model(watch_base):
|
||||
|
||||
@property
|
||||
def label(self):
|
||||
# Used for sorting
|
||||
return self.get('title') if self.get('title') else self.get('url')
|
||||
# Used for sorting, display, etc
|
||||
return self.get('title') or self.get('page_title') or self.link
|
||||
|
||||
@property
|
||||
def last_changed(self):
|
||||
@@ -296,11 +298,11 @@ class model(watch_base):
|
||||
with open(filepath, 'r', encoding='utf-8', errors='ignore') as f:
|
||||
return f.read()
|
||||
|
||||
# Save some text file to the appropriate path and bump the history
|
||||
# Save some text file to the appropriate path and bump the history
|
||||
# result_obj from fetch_site_status.run()
|
||||
def save_history_text(self, contents, timestamp, snapshot_id):
|
||||
import brotli
|
||||
|
||||
import tempfile
|
||||
logger.trace(f"{self.get('uuid')} - Updating history.txt with timestamp {timestamp}")
|
||||
|
||||
self.ensure_data_dir_exists()
|
||||
@@ -308,26 +310,37 @@ class model(watch_base):
|
||||
threshold = int(os.getenv('SNAPSHOT_BROTLI_COMPRESSION_THRESHOLD', 1024))
|
||||
skip_brotli = strtobool(os.getenv('DISABLE_BROTLI_TEXT_SNAPSHOT', 'False'))
|
||||
|
||||
# Decide on snapshot filename and destination path
|
||||
if not skip_brotli and len(contents) > threshold:
|
||||
snapshot_fname = f"{snapshot_id}.txt.br"
|
||||
dest = os.path.join(self.watch_data_dir, snapshot_fname)
|
||||
if not os.path.exists(dest):
|
||||
with open(dest, 'wb') as f:
|
||||
f.write(brotli.compress(contents.encode('utf-8'), mode=brotli.MODE_TEXT))
|
||||
encoded_data = brotli.compress(contents.encode('utf-8'), mode=brotli.MODE_TEXT)
|
||||
else:
|
||||
snapshot_fname = f"{snapshot_id}.txt"
|
||||
dest = os.path.join(self.watch_data_dir, snapshot_fname)
|
||||
if not os.path.exists(dest):
|
||||
with open(dest, 'wb') as f:
|
||||
f.write(contents.encode('utf-8'))
|
||||
encoded_data = contents.encode('utf-8')
|
||||
|
||||
# Append to index
|
||||
# @todo check last char was \n
|
||||
dest = os.path.join(self.watch_data_dir, snapshot_fname)
|
||||
|
||||
# Write snapshot file atomically if it doesn't exist
|
||||
if not os.path.exists(dest):
|
||||
with tempfile.NamedTemporaryFile('wb', delete=False, dir=self.watch_data_dir) as tmp:
|
||||
tmp.write(encoded_data)
|
||||
tmp.flush()
|
||||
os.fsync(tmp.fileno())
|
||||
tmp_path = tmp.name
|
||||
os.rename(tmp_path, dest)
|
||||
|
||||
# Append to history.txt atomically
|
||||
index_fname = os.path.join(self.watch_data_dir, "history.txt")
|
||||
with open(index_fname, 'a') as f:
|
||||
f.write("{},{}\n".format(timestamp, snapshot_fname))
|
||||
f.close()
|
||||
index_line = f"{timestamp},{snapshot_fname}\n"
|
||||
|
||||
# Lets try force flush here since it's usually a very small file
|
||||
# If this still fails in the future then try reading all to memory first, re-writing etc
|
||||
with open(index_fname, 'a', encoding='utf-8') as f:
|
||||
f.write(index_line)
|
||||
f.flush()
|
||||
os.fsync(f.fileno())
|
||||
|
||||
# Update internal state
|
||||
self.__newest_history_key = timestamp
|
||||
self.__history_n += 1
|
||||
|
||||
@@ -390,6 +403,154 @@ class model(watch_base):
|
||||
# False is not an option for AppRise, must be type None
|
||||
return None
|
||||
|
||||
def favicon_is_expired(self):
|
||||
favicon_fname = self.get_favicon_filename()
|
||||
import glob
|
||||
import time
|
||||
|
||||
if not favicon_fname:
|
||||
return True
|
||||
try:
|
||||
fname = next(iter(glob.glob(os.path.join(self.watch_data_dir, "favicon.*"))), None)
|
||||
logger.trace(f"Favicon file maybe found at {fname}")
|
||||
if os.path.isfile(fname):
|
||||
file_age = int(time.time() - os.path.getmtime(fname))
|
||||
logger.trace(f"Favicon file age is {file_age}s")
|
||||
if file_age < FAVICON_RESAVE_THRESHOLD_SECONDS:
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.critical(f"Exception checking Favicon age {str(e)}")
|
||||
return True
|
||||
|
||||
# Also in the case that the file didnt exist
|
||||
return True
|
||||
|
||||
def bump_favicon(self, url, favicon_base_64: str) -> None:
|
||||
from urllib.parse import urlparse
|
||||
import base64
|
||||
import binascii
|
||||
decoded = None
|
||||
|
||||
if url:
|
||||
try:
|
||||
parsed = urlparse(url)
|
||||
filename = os.path.basename(parsed.path)
|
||||
(base, extension) = filename.lower().strip().rsplit('.', 1)
|
||||
except ValueError:
|
||||
logger.error(f"UUID: {self.get('uuid')} Cant work out file extension from '{url}'")
|
||||
return None
|
||||
else:
|
||||
# Assume favicon.ico
|
||||
base = "favicon"
|
||||
extension = "ico"
|
||||
|
||||
fname = os.path.join(self.watch_data_dir, f"favicon.{extension}")
|
||||
|
||||
try:
|
||||
# validate=True makes sure the string only contains valid base64 chars
|
||||
decoded = base64.b64decode(favicon_base_64, validate=True)
|
||||
except (binascii.Error, ValueError) as e:
|
||||
logger.warning(f"UUID: {self.get('uuid')} FavIcon save data (Base64) corrupt? {str(e)}")
|
||||
else:
|
||||
if decoded:
|
||||
try:
|
||||
with open(fname, 'wb') as f:
|
||||
f.write(decoded)
|
||||
# A signal that could trigger the socket server to update the browser also
|
||||
watch_check_update = signal('watch_favicon_bump')
|
||||
if watch_check_update:
|
||||
watch_check_update.send(watch_uuid=self.get('uuid'))
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"UUID: {self.get('uuid')} error saving FavIcon to {fname} - {str(e)}")
|
||||
|
||||
# @todo - Store some checksum and only write when its different
|
||||
logger.debug(f"UUID: {self.get('uuid')} updated favicon to at {fname}")
|
||||
|
||||
def get_favicon_filename(self) -> str | None:
|
||||
"""
|
||||
Find any favicon.* file in the current working directory
|
||||
and return the contents of the newest one.
|
||||
|
||||
Returns:
|
||||
bytes: Contents of the newest favicon file, or None if not found.
|
||||
"""
|
||||
import glob
|
||||
|
||||
# Search for all favicon.* files
|
||||
files = glob.glob(os.path.join(self.watch_data_dir, "favicon.*"))
|
||||
|
||||
if not files:
|
||||
return None
|
||||
|
||||
# Find the newest by modification time
|
||||
newest_file = max(files, key=os.path.getmtime)
|
||||
return os.path.basename(newest_file)
|
||||
|
||||
def get_screenshot_as_thumbnail(self, max_age=3200):
|
||||
"""Return path to a square thumbnail of the most recent screenshot.
|
||||
|
||||
Creates a 150x150 pixel thumbnail from the top portion of the screenshot.
|
||||
|
||||
Args:
|
||||
max_age: Maximum age in seconds before recreating thumbnail
|
||||
|
||||
Returns:
|
||||
Path to thumbnail or None if no screenshot exists
|
||||
"""
|
||||
import os
|
||||
import time
|
||||
|
||||
thumbnail_path = os.path.join(self.watch_data_dir, "thumbnail.jpeg")
|
||||
top_trim = 500 # Pixels from top of screenshot to use
|
||||
|
||||
screenshot_path = self.get_screenshot()
|
||||
if not screenshot_path:
|
||||
return None
|
||||
|
||||
# Reuse thumbnail if it's fresh and screenshot hasn't changed
|
||||
if os.path.isfile(thumbnail_path):
|
||||
thumbnail_mtime = os.path.getmtime(thumbnail_path)
|
||||
screenshot_mtime = os.path.getmtime(screenshot_path)
|
||||
|
||||
if screenshot_mtime <= thumbnail_mtime and time.time() - thumbnail_mtime < max_age:
|
||||
return thumbnail_path
|
||||
|
||||
try:
|
||||
from PIL import Image
|
||||
|
||||
with Image.open(screenshot_path) as img:
|
||||
# Crop top portion first (full width, top_trim height)
|
||||
top_crop_height = min(top_trim, img.height)
|
||||
img = img.crop((0, 0, img.width, top_crop_height))
|
||||
|
||||
# Create a smaller intermediate image (to reduce memory usage)
|
||||
aspect = img.width / img.height
|
||||
interim_width = min(top_trim, img.width)
|
||||
interim_height = int(interim_width / aspect) if aspect > 0 else top_trim
|
||||
img = img.resize((interim_width, interim_height), Image.NEAREST)
|
||||
|
||||
# Convert to RGB if needed
|
||||
if img.mode != 'RGB':
|
||||
img = img.convert('RGB')
|
||||
|
||||
# Crop to square from top center
|
||||
square_size = min(img.width, img.height)
|
||||
left = (img.width - square_size) // 2
|
||||
img = img.crop((left, 0, left + square_size, square_size))
|
||||
|
||||
# Final resize to exact thumbnail size with better filter
|
||||
img = img.resize((350, 350), Image.BILINEAR)
|
||||
|
||||
# Save with optimized settings
|
||||
img.save(thumbnail_path, "JPEG", quality=75, optimize=True)
|
||||
|
||||
return thumbnail_path
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error creating thumbnail for {self.get('uuid')}: {str(e)}")
|
||||
return None
|
||||
|
||||
def __get_file_ctime(self, filename):
|
||||
fname = os.path.join(self.watch_data_dir, filename)
|
||||
if os.path.isfile(fname):
|
||||
@@ -483,7 +644,7 @@ class model(watch_base):
|
||||
if res:
|
||||
if not csv_writer:
|
||||
# A file on the disk can be transferred much faster via flask than a string reply
|
||||
csv_output_filename = 'report.csv'
|
||||
csv_output_filename = f"report-{self.get('uuid')}.csv"
|
||||
f = open(os.path.join(self.watch_data_dir, csv_output_filename), 'w')
|
||||
# @todo some headers in the future
|
||||
#fieldnames = ['Epoch seconds', 'Date']
|
||||
@@ -527,7 +688,7 @@ class model(watch_base):
|
||||
def save_error_text(self, contents):
|
||||
self.ensure_data_dir_exists()
|
||||
target_path = os.path.join(self.watch_data_dir, "last-error.txt")
|
||||
with open(target_path, 'w') as f:
|
||||
with open(target_path, 'w', encoding='utf-8') as f:
|
||||
f.write(contents)
|
||||
|
||||
def save_xpath_data(self, data, as_error=False):
|
||||
@@ -542,7 +703,10 @@ class model(watch_base):
|
||||
self.ensure_data_dir_exists()
|
||||
|
||||
with open(target_path, 'wb') as f:
|
||||
f.write(zlib.compress(json.dumps(data).encode()))
|
||||
if not isinstance(data, str):
|
||||
f.write(zlib.compress(json.dumps(data).encode()))
|
||||
else:
|
||||
f.write(zlib.compress(data.encode()))
|
||||
f.close()
|
||||
|
||||
# Save as PNG, PNG is larger but better for doing visual diff in the future
|
||||
@@ -564,7 +728,7 @@ class model(watch_base):
|
||||
import brotli
|
||||
filepath = os.path.join(self.watch_data_dir, 'last-fetched.br')
|
||||
|
||||
if not os.path.isfile(filepath):
|
||||
if not os.path.isfile(filepath) or os.path.getsize(filepath) == 0:
|
||||
# If a previous attempt doesnt yet exist, just snarf the previous snapshot instead
|
||||
dates = list(self.history.keys())
|
||||
if len(dates):
|
||||
@@ -634,3 +798,44 @@ class model(watch_base):
|
||||
if step_n:
|
||||
available.append(step_n.group(1))
|
||||
return available
|
||||
|
||||
def compile_error_texts(self, has_proxies=None):
|
||||
"""Compile error texts for this watch.
|
||||
Accepts has_proxies parameter to ensure it works even outside app context"""
|
||||
from flask import url_for
|
||||
from markupsafe import Markup
|
||||
|
||||
output = [] # Initialize as list since we're using append
|
||||
last_error = self.get('last_error','')
|
||||
|
||||
try:
|
||||
url_for('settings.settings_page')
|
||||
except Exception as e:
|
||||
has_app_context = False
|
||||
else:
|
||||
has_app_context = True
|
||||
|
||||
# has app+request context, we can use url_for()
|
||||
if has_app_context:
|
||||
if last_error:
|
||||
if '403' in last_error:
|
||||
if has_proxies:
|
||||
output.append(str(Markup(f"{last_error} - <a href=\"{url_for('settings.settings_page', uuid=self.get('uuid'))}\">Try other proxies/location</a> '")))
|
||||
else:
|
||||
output.append(str(Markup(f"{last_error} - <a href=\"{url_for('settings.settings_page', uuid=self.get('uuid'))}\">Try adding external proxies/locations</a> '")))
|
||||
else:
|
||||
output.append(str(Markup(last_error)))
|
||||
|
||||
if self.get('last_notification_error'):
|
||||
output.append(str(Markup(f"<div class=\"notification-error\"><a href=\"{url_for('settings.notification_logs')}\">{ self.get('last_notification_error') }</a></div>")))
|
||||
|
||||
else:
|
||||
# Lo_Fi version - no app context, cant rely on Jinja2 Markup
|
||||
if last_error:
|
||||
output.append(safe_jinja.render_fully_escaped(last_error))
|
||||
if self.get('last_notification_error'):
|
||||
output.append(safe_jinja.render_fully_escaped(self.get('last_notification_error')))
|
||||
|
||||
res = "\n".join(output)
|
||||
return res
|
||||
|
||||
|
||||
@@ -2,7 +2,8 @@ import os
|
||||
import uuid
|
||||
|
||||
from changedetectionio import strtobool
|
||||
from changedetectionio.notification import default_notification_format_for_watch
|
||||
USE_SYSTEM_DEFAULT_NOTIFICATION_FORMAT_FOR_WATCH = 'System default'
|
||||
CONDITIONS_MATCH_LOGIC_DEFAULT = 'ALL'
|
||||
|
||||
class watch_base(dict):
|
||||
|
||||
@@ -15,13 +16,14 @@ class watch_base(dict):
|
||||
'body': None,
|
||||
'browser_steps': [],
|
||||
'browser_steps_last_error_step': None,
|
||||
'conditions' : {},
|
||||
'conditions_match_logic': CONDITIONS_MATCH_LOGIC_DEFAULT,
|
||||
'check_count': 0,
|
||||
'check_unique_lines': False, # On change-detected, compare against all history if its something new
|
||||
'consecutive_filter_failures': 0, # Every time the CSS/xPath filter cannot be located, reset when all is fine.
|
||||
'content-type': None,
|
||||
'date_created': None,
|
||||
'extract_text': [], # Extract text by regex after filters
|
||||
'extract_title_as_title': False,
|
||||
'fetch_backend': 'system', # plaintext, playwright etc
|
||||
'fetch_time': 0.0,
|
||||
'filter_failure_notification_send': strtobool(os.getenv('FILTER_FAILURE_NOTIFICATION_SEND_DEFAULT', 'True')),
|
||||
@@ -32,19 +34,22 @@ class watch_base(dict):
|
||||
'has_ldjson_price_data': None,
|
||||
'headers': {}, # Extra headers to send
|
||||
'ignore_text': [], # List of text to ignore when calculating the comparison checksum
|
||||
'ignore_status_codes': None,
|
||||
'in_stock_only': True, # Only trigger change on going to instock from out-of-stock
|
||||
'include_filters': [],
|
||||
'last_checked': 0,
|
||||
'last_error': False,
|
||||
'last_notification_error': None,
|
||||
'last_viewed': 0, # history key value of the last viewed via the [diff] link
|
||||
'method': 'GET',
|
||||
'notification_alert_count': 0,
|
||||
'notification_body': None,
|
||||
'notification_format': default_notification_format_for_watch,
|
||||
'notification_format': USE_SYSTEM_DEFAULT_NOTIFICATION_FORMAT_FOR_WATCH,
|
||||
'notification_muted': False,
|
||||
'notification_screenshot': False, # Include the latest screenshot if available and supported by the apprise URL
|
||||
'notification_title': None,
|
||||
'notification_urls': [], # List of URLs to add to the notification Queue (Usually AppRise)
|
||||
'page_title': None, # <title> from the page
|
||||
'paused': False,
|
||||
'previous_md5': False,
|
||||
'previous_md5_before_filters': False, # Used for skipping changedetection entirely
|
||||
@@ -53,6 +58,7 @@ class watch_base(dict):
|
||||
'proxy': None, # Preferred proxy connection
|
||||
'remote_server_reply': None, # From 'server' reply header
|
||||
'sort_text_alphabetically': False,
|
||||
'strip_ignored_lines': None,
|
||||
'subtractive_selectors': [],
|
||||
'tag': '', # Old system of text name for a tag, to be removed
|
||||
'tags': [], # list of UUIDs to App.Tags
|
||||
@@ -118,12 +124,13 @@ class watch_base(dict):
|
||||
}
|
||||
},
|
||||
},
|
||||
'title': None,
|
||||
'title': None, # An arbitrary field that overrides 'page_title'
|
||||
'track_ldjson_price_data': None,
|
||||
'trim_text_whitespace': False,
|
||||
'remove_duplicate_lines': False,
|
||||
'trigger_text': [], # List of text or regex to wait for until a change is detected
|
||||
'url': '',
|
||||
'use_page_title_in_list': None, # None = use system settings
|
||||
'uuid': str(uuid.uuid4()),
|
||||
'webdriver_delay': None,
|
||||
'webdriver_js_execute_code': None, # Run before change-detection
|
||||
|
||||
@@ -1,226 +0,0 @@
|
||||
|
||||
import time
|
||||
from apprise import NotifyFormat
|
||||
import apprise
|
||||
from loguru import logger
|
||||
|
||||
|
||||
valid_tokens = {
|
||||
'base_url': '',
|
||||
'current_snapshot': '',
|
||||
'diff': '',
|
||||
'diff_added': '',
|
||||
'diff_full': '',
|
||||
'diff_patch': '',
|
||||
'diff_removed': '',
|
||||
'diff_url': '',
|
||||
'preview_url': '',
|
||||
'triggered_text': '',
|
||||
'watch_tag': '',
|
||||
'watch_title': '',
|
||||
'watch_url': '',
|
||||
'watch_uuid': '',
|
||||
}
|
||||
|
||||
default_notification_format_for_watch = 'System default'
|
||||
default_notification_format = 'HTML Color'
|
||||
default_notification_body = '{{watch_url}} had a change.\n---\n{{diff}}\n---\n'
|
||||
default_notification_title = 'ChangeDetection.io Notification - {{watch_url}}'
|
||||
|
||||
valid_notification_formats = {
|
||||
'Text': NotifyFormat.TEXT,
|
||||
'Markdown': NotifyFormat.MARKDOWN,
|
||||
'HTML': NotifyFormat.HTML,
|
||||
'HTML Color': 'htmlcolor',
|
||||
# Used only for editing a watch (not for global)
|
||||
default_notification_format_for_watch: default_notification_format_for_watch
|
||||
}
|
||||
|
||||
|
||||
|
||||
def process_notification(n_object, datastore):
|
||||
# so that the custom endpoints are registered
|
||||
from changedetectionio.apprise_plugin import apprise_custom_api_call_wrapper
|
||||
|
||||
from .safe_jinja import render as jinja_render
|
||||
now = time.time()
|
||||
if n_object.get('notification_timestamp'):
|
||||
logger.trace(f"Time since queued {now-n_object['notification_timestamp']:.3f}s")
|
||||
# Insert variables into the notification content
|
||||
notification_parameters = create_notification_parameters(n_object, datastore)
|
||||
|
||||
n_format = valid_notification_formats.get(
|
||||
n_object.get('notification_format', default_notification_format),
|
||||
valid_notification_formats[default_notification_format],
|
||||
)
|
||||
|
||||
# If we arrived with 'System default' then look it up
|
||||
if n_format == default_notification_format_for_watch and datastore.data['settings']['application'].get('notification_format') != default_notification_format_for_watch:
|
||||
# Initially text or whatever
|
||||
n_format = datastore.data['settings']['application'].get('notification_format', valid_notification_formats[default_notification_format])
|
||||
|
||||
logger.trace(f"Complete notification body including Jinja and placeholders calculated in {time.time() - now:.3f}s")
|
||||
|
||||
# https://github.com/caronc/apprise/wiki/Development_LogCapture
|
||||
# Anything higher than or equal to WARNING (which covers things like Connection errors)
|
||||
# raise it as an exception
|
||||
|
||||
sent_objs = []
|
||||
from .apprise_asset import asset
|
||||
|
||||
if 'as_async' in n_object:
|
||||
asset.async_mode = n_object.get('as_async')
|
||||
|
||||
apobj = apprise.Apprise(debug=True, asset=asset)
|
||||
|
||||
if not n_object.get('notification_urls'):
|
||||
return None
|
||||
|
||||
with apprise.LogCapture(level=apprise.logging.DEBUG) as logs:
|
||||
for url in n_object['notification_urls']:
|
||||
|
||||
# Get the notification body from datastore
|
||||
n_body = jinja_render(template_str=n_object.get('notification_body', ''), **notification_parameters)
|
||||
if n_object.get('notification_format', '').startswith('HTML'):
|
||||
n_body = n_body.replace("\n", '<br>')
|
||||
|
||||
n_title = jinja_render(template_str=n_object.get('notification_title', ''), **notification_parameters)
|
||||
|
||||
url = url.strip()
|
||||
if url.startswith('#'):
|
||||
logger.trace(f"Skipping commented out notification URL - {url}")
|
||||
continue
|
||||
|
||||
if not url:
|
||||
logger.warning(f"Process Notification: skipping empty notification URL.")
|
||||
continue
|
||||
|
||||
logger.info(f">> Process Notification: AppRise notifying {url}")
|
||||
url = jinja_render(template_str=url, **notification_parameters)
|
||||
|
||||
# Re 323 - Limit discord length to their 2000 char limit total or it wont send.
|
||||
# Because different notifications may require different pre-processing, run each sequentially :(
|
||||
# 2000 bytes minus -
|
||||
# 200 bytes for the overhead of the _entire_ json payload, 200 bytes for {tts, wait, content} etc headers
|
||||
# Length of URL - Incase they specify a longer custom avatar_url
|
||||
|
||||
# So if no avatar_url is specified, add one so it can be correctly calculated into the total payload
|
||||
k = '?' if not '?' in url else '&'
|
||||
if not 'avatar_url' in url \
|
||||
and not url.startswith('mail') \
|
||||
and not url.startswith('post') \
|
||||
and not url.startswith('get') \
|
||||
and not url.startswith('delete') \
|
||||
and not url.startswith('put'):
|
||||
url += k + 'avatar_url=https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/changedetectionio/static/images/avatar-256x256.png'
|
||||
|
||||
if url.startswith('tgram://'):
|
||||
# Telegram only supports a limit subset of HTML, remove the '<br>' we place in.
|
||||
# re https://github.com/dgtlmoon/changedetection.io/issues/555
|
||||
# @todo re-use an existing library we have already imported to strip all non-allowed tags
|
||||
n_body = n_body.replace('<br>', '\n')
|
||||
n_body = n_body.replace('</br>', '\n')
|
||||
# real limit is 4096, but minus some for extra metadata
|
||||
payload_max_size = 3600
|
||||
body_limit = max(0, payload_max_size - len(n_title))
|
||||
n_title = n_title[0:payload_max_size]
|
||||
n_body = n_body[0:body_limit]
|
||||
|
||||
elif url.startswith('discord://') or url.startswith('https://discordapp.com/api/webhooks') or url.startswith(
|
||||
'https://discord.com/api'):
|
||||
# real limit is 2000, but minus some for extra metadata
|
||||
payload_max_size = 1700
|
||||
body_limit = max(0, payload_max_size - len(n_title))
|
||||
n_title = n_title[0:payload_max_size]
|
||||
n_body = n_body[0:body_limit]
|
||||
|
||||
elif url.startswith('mailto'):
|
||||
# Apprise will default to HTML, so we need to override it
|
||||
# So that whats' generated in n_body is in line with what is going to be sent.
|
||||
# https://github.com/caronc/apprise/issues/633#issuecomment-1191449321
|
||||
if not 'format=' in url and (n_format == 'Text' or n_format == 'Markdown'):
|
||||
prefix = '?' if not '?' in url else '&'
|
||||
# Apprise format is lowercase text https://github.com/caronc/apprise/issues/633
|
||||
n_format = n_format.lower()
|
||||
url = f"{url}{prefix}format={n_format}"
|
||||
# If n_format == HTML, then apprise email should default to text/html and we should be sending HTML only
|
||||
|
||||
apobj.add(url)
|
||||
|
||||
sent_objs.append({'title': n_title,
|
||||
'body': n_body,
|
||||
'url': url,
|
||||
'body_format': n_format})
|
||||
|
||||
# Blast off the notifications tht are set in .add()
|
||||
apobj.notify(
|
||||
title=n_title,
|
||||
body=n_body,
|
||||
body_format=n_format,
|
||||
# False is not an option for AppRise, must be type None
|
||||
attach=n_object.get('screenshot', None)
|
||||
)
|
||||
|
||||
|
||||
# Returns empty string if nothing found, multi-line string otherwise
|
||||
log_value = logs.getvalue()
|
||||
|
||||
if log_value and 'WARNING' in log_value or 'ERROR' in log_value:
|
||||
logger.critical(log_value)
|
||||
raise Exception(log_value)
|
||||
|
||||
# Return what was sent for better logging - after the for loop
|
||||
return sent_objs
|
||||
|
||||
|
||||
# Notification title + body content parameters get created here.
|
||||
# ( Where we prepare the tokens in the notification to be replaced with actual values )
|
||||
def create_notification_parameters(n_object, datastore):
|
||||
from copy import deepcopy
|
||||
|
||||
# in the case we send a test notification from the main settings, there is no UUID.
|
||||
uuid = n_object['uuid'] if 'uuid' in n_object else ''
|
||||
|
||||
if uuid:
|
||||
watch_title = datastore.data['watching'][uuid].get('title', '')
|
||||
tag_list = []
|
||||
tags = datastore.get_all_tags_for_watch(uuid)
|
||||
if tags:
|
||||
for tag_uuid, tag in tags.items():
|
||||
tag_list.append(tag.get('title'))
|
||||
watch_tag = ', '.join(tag_list)
|
||||
else:
|
||||
watch_title = 'Change Detection'
|
||||
watch_tag = ''
|
||||
|
||||
# Create URLs to customise the notification with
|
||||
# active_base_url - set in store.py data property
|
||||
base_url = datastore.data['settings']['application'].get('active_base_url')
|
||||
|
||||
watch_url = n_object['watch_url']
|
||||
|
||||
diff_url = "{}/diff/{}".format(base_url, uuid)
|
||||
preview_url = "{}/preview/{}".format(base_url, uuid)
|
||||
|
||||
# Not sure deepcopy is needed here, but why not
|
||||
tokens = deepcopy(valid_tokens)
|
||||
|
||||
# Valid_tokens also used as a field validator
|
||||
tokens.update(
|
||||
{
|
||||
'base_url': base_url,
|
||||
'diff_url': diff_url,
|
||||
'preview_url': preview_url,
|
||||
'watch_tag': watch_tag if watch_tag is not None else '',
|
||||
'watch_title': watch_title if watch_title is not None else '',
|
||||
'watch_url': watch_url,
|
||||
'watch_uuid': uuid,
|
||||
})
|
||||
|
||||
# n_object will contain diff, diff_added etc etc
|
||||
tokens.update(n_object)
|
||||
|
||||
if uuid:
|
||||
tokens.update(datastore.data['watching'].get(uuid).extra_notification_token_values())
|
||||
|
||||
return tokens
|
||||
16
changedetectionio/notification/__init__.py
Normal file
16
changedetectionio/notification/__init__.py
Normal file
@@ -0,0 +1,16 @@
|
||||
from changedetectionio.model import USE_SYSTEM_DEFAULT_NOTIFICATION_FORMAT_FOR_WATCH
|
||||
|
||||
default_notification_format = 'htmlcolor'
|
||||
default_notification_body = '{{watch_url}} had a change.\n---\n{{diff}}\n---\n'
|
||||
default_notification_title = 'ChangeDetection.io Notification - {{watch_url}}'
|
||||
|
||||
# The values (markdown etc) are from apprise NotifyFormat,
|
||||
# But to avoid importing the whole heavy module just use the same strings here.
|
||||
valid_notification_formats = {
|
||||
'text': 'Plain Text',
|
||||
'html': 'HTML',
|
||||
'htmlcolor': 'HTML Color',
|
||||
'markdown': 'Markdown to HTML',
|
||||
# Used only for editing a watch (not for global)
|
||||
USE_SYSTEM_DEFAULT_NOTIFICATION_FORMAT_FOR_WATCH: USE_SYSTEM_DEFAULT_NOTIFICATION_FORMAT_FOR_WATCH
|
||||
}
|
||||
16
changedetectionio/notification/apprise_plugin/assets.py
Normal file
16
changedetectionio/notification/apprise_plugin/assets.py
Normal file
@@ -0,0 +1,16 @@
|
||||
from apprise import AppriseAsset
|
||||
|
||||
# Refer to:
|
||||
# https://github.com/caronc/apprise/wiki/Development_API#the-apprise-asset-object
|
||||
|
||||
APPRISE_APP_ID = "changedetection.io"
|
||||
APPRISE_APP_DESC = "ChangeDetection.io best and simplest website monitoring and change detection"
|
||||
APPRISE_APP_URL = "https://changedetection.io"
|
||||
APPRISE_AVATAR_URL = "https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/changedetectionio/static/images/avatar-256x256.png"
|
||||
|
||||
apprise_asset = AppriseAsset(
|
||||
app_id=APPRISE_APP_ID,
|
||||
app_desc=APPRISE_APP_DESC,
|
||||
app_url=APPRISE_APP_URL,
|
||||
image_url_logo=APPRISE_AVATAR_URL,
|
||||
)
|
||||
210
changedetectionio/notification/apprise_plugin/custom_handlers.py
Normal file
210
changedetectionio/notification/apprise_plugin/custom_handlers.py
Normal file
@@ -0,0 +1,210 @@
|
||||
"""
|
||||
Custom Apprise HTTP Handlers with format= Parameter Support
|
||||
|
||||
IMPORTANT: This module works around a limitation in Apprise's @notify decorator.
|
||||
|
||||
THE PROBLEM:
|
||||
-------------
|
||||
When using Apprise's @notify decorator to create custom notification handlers, the
|
||||
decorator creates a CustomNotifyPlugin that uses parse_url(..., simple=True) to parse
|
||||
URLs. This simple parsing mode does NOT extract the format= query parameter from the URL
|
||||
and set it as a top-level parameter that NotifyBase.__init__ can use to set notify_format.
|
||||
|
||||
As a result:
|
||||
1. URL: post://example.com/webhook?format=html
|
||||
2. Apprise parses this and sees format=html in qsd (query string dictionary)
|
||||
3. But it does NOT extract it and pass it to NotifyBase.__init__
|
||||
4. NotifyBase defaults to notify_format=TEXT
|
||||
5. When you call apobj.notify(body="<html>...", body_format="html"):
|
||||
- Apprise sees: input format = html, output format (notify_format) = text
|
||||
- Apprise calls convert_between("html", "text", body)
|
||||
- This strips all HTML tags, leaving only plain text
|
||||
6. Your custom handler receives stripped plain text instead of HTML
|
||||
|
||||
THE SOLUTION:
|
||||
-------------
|
||||
Instead of using the @notify decorator directly, we:
|
||||
1. Manually register custom plugins using plugins.N_MGR.add()
|
||||
2. Create a CustomHTTPHandler class that extends CustomNotifyPlugin
|
||||
3. Override __init__ to extract format= from qsd and set it as kwargs['format']
|
||||
4. Call NotifyBase.__init__ which properly sets notify_format from kwargs['format']
|
||||
5. Set up _default_args like CustomNotifyPlugin does for compatibility
|
||||
|
||||
This ensures that when format=html is in the URL:
|
||||
- notify_format is set to HTML
|
||||
- Apprise sees: input format = html, output format = html
|
||||
- No conversion happens (convert_between returns content unchanged)
|
||||
- Your custom handler receives the original HTML intact
|
||||
|
||||
TESTING:
|
||||
--------
|
||||
To verify this works:
|
||||
>>> apobj = apprise.Apprise()
|
||||
>>> apobj.add('post://localhost:5005/test?format=html')
|
||||
>>> for server in apobj:
|
||||
... print(server.notify_format) # Should print: html (not text)
|
||||
>>> apobj.notify(body='<span>Test</span>', body_format='html')
|
||||
# Your handler should receive '<span>Test</span>' not 'Test'
|
||||
"""
|
||||
|
||||
import json
|
||||
import re
|
||||
from urllib.parse import unquote_plus
|
||||
|
||||
import requests
|
||||
from apprise import plugins
|
||||
from apprise.decorators.base import CustomNotifyPlugin
|
||||
from apprise.utils.parse import parse_url as apprise_parse_url, url_assembly
|
||||
from apprise.utils.logic import dict_full_update
|
||||
from loguru import logger
|
||||
from requests.structures import CaseInsensitiveDict
|
||||
|
||||
SUPPORTED_HTTP_METHODS = {"get", "post", "put", "delete", "patch", "head"}
|
||||
|
||||
|
||||
def notify_supported_methods(func):
|
||||
"""Register custom HTTP method handlers that properly support format= parameter."""
|
||||
for method in SUPPORTED_HTTP_METHODS:
|
||||
_register_http_handler(method, func)
|
||||
_register_http_handler(f"{method}s", func)
|
||||
return func
|
||||
|
||||
|
||||
def _register_http_handler(schema, send_func):
|
||||
"""Register a custom HTTP handler that extracts format= from URL query parameters."""
|
||||
|
||||
# Parse base URL
|
||||
base_url = f"{schema}://"
|
||||
base_args = apprise_parse_url(base_url, default_schema=schema, verify_host=False, simple=True)
|
||||
|
||||
class CustomHTTPHandler(CustomNotifyPlugin):
|
||||
secure_protocol = schema
|
||||
service_name = f"Custom HTTP - {schema.upper()}"
|
||||
_base_args = base_args
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
# Extract format from qsd and set it as a top-level kwarg
|
||||
# This allows NotifyBase.__init__ to properly set notify_format
|
||||
if 'qsd' in kwargs and 'format' in kwargs['qsd']:
|
||||
kwargs['format'] = kwargs['qsd']['format']
|
||||
|
||||
# Call NotifyBase.__init__ (skip CustomNotifyPlugin.__init__)
|
||||
super(CustomNotifyPlugin, self).__init__(**kwargs)
|
||||
|
||||
# Set up _default_args like CustomNotifyPlugin does
|
||||
self._default_args = {}
|
||||
kwargs.pop("secure", None)
|
||||
dict_full_update(self._default_args, self._base_args)
|
||||
dict_full_update(self._default_args, kwargs)
|
||||
self._default_args["url"] = url_assembly(**self._default_args)
|
||||
|
||||
__send = staticmethod(send_func)
|
||||
|
||||
def send(self, body, title="", notify_type="info", *args, **kwargs):
|
||||
"""Call the custom send function."""
|
||||
try:
|
||||
result = self.__send(
|
||||
body, title, notify_type,
|
||||
*args,
|
||||
meta=self._default_args,
|
||||
**kwargs
|
||||
)
|
||||
return True if result is None else bool(result)
|
||||
except Exception as e:
|
||||
self.logger.warning(f"Exception in custom HTTP handler: {e}")
|
||||
return False
|
||||
|
||||
# Register the plugin
|
||||
plugins.N_MGR.add(
|
||||
plugin=CustomHTTPHandler,
|
||||
schemas=schema,
|
||||
send_func=send_func,
|
||||
url=base_url,
|
||||
)
|
||||
|
||||
|
||||
def _get_auth(parsed_url: dict) -> str | tuple[str, str]:
|
||||
user: str | None = parsed_url.get("user")
|
||||
password: str | None = parsed_url.get("password")
|
||||
|
||||
if user is not None and password is not None:
|
||||
return (unquote_plus(user), unquote_plus(password))
|
||||
|
||||
if user is not None:
|
||||
return unquote_plus(user)
|
||||
|
||||
return ""
|
||||
|
||||
|
||||
def _get_headers(parsed_url: dict, body: str) -> CaseInsensitiveDict:
|
||||
headers = CaseInsensitiveDict(
|
||||
{unquote_plus(k).title(): unquote_plus(v) for k, v in parsed_url["qsd+"].items()}
|
||||
)
|
||||
|
||||
# If Content-Type is not specified, guess if the body is a valid JSON
|
||||
if headers.get("Content-Type") is None:
|
||||
try:
|
||||
json.loads(body)
|
||||
headers["Content-Type"] = "application/json; charset=utf-8"
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return headers
|
||||
|
||||
|
||||
def _get_params(parsed_url: dict) -> CaseInsensitiveDict:
|
||||
# https://github.com/caronc/apprise/wiki/Notify_Custom_JSON#get-parameter-manipulation
|
||||
# In Apprise, it relies on prefixing each request arg with "-", because it uses say &method=update as a flag for apprise
|
||||
# but here we are making straight requests, so we need todo convert this against apprise's logic
|
||||
params = CaseInsensitiveDict(
|
||||
{
|
||||
unquote_plus(k): unquote_plus(v)
|
||||
for k, v in parsed_url["qsd"].items()
|
||||
if k.strip("-") not in parsed_url["qsd-"]
|
||||
and k.strip("+") not in parsed_url["qsd+"]
|
||||
}
|
||||
)
|
||||
|
||||
return params
|
||||
|
||||
|
||||
@notify_supported_methods
|
||||
def apprise_http_custom_handler(
|
||||
body: str,
|
||||
title: str,
|
||||
notify_type: str,
|
||||
meta: dict,
|
||||
body_format: str = None,
|
||||
*args,
|
||||
**kwargs,
|
||||
) -> bool:
|
||||
|
||||
|
||||
url: str = meta.get("url")
|
||||
schema: str = meta.get("schema")
|
||||
method: str = re.sub(r"s$", "", schema).upper()
|
||||
|
||||
# Convert /foobar?+some-header=hello to proper header dictionary
|
||||
parsed_url: dict[str, str | dict | None] | None = apprise_parse_url(url)
|
||||
if parsed_url is None:
|
||||
return False
|
||||
|
||||
auth = _get_auth(parsed_url=parsed_url)
|
||||
headers = _get_headers(parsed_url=parsed_url, body=body)
|
||||
params = _get_params(parsed_url=parsed_url)
|
||||
|
||||
url = re.sub(rf"^{schema}", "https" if schema.endswith("s") else "http", parsed_url.get("url"))
|
||||
|
||||
response = requests.request(
|
||||
method=method,
|
||||
url=url,
|
||||
auth=auth,
|
||||
headers=headers,
|
||||
params=params,
|
||||
data=body.encode("utf-8") if isinstance(body, str) else body,
|
||||
)
|
||||
|
||||
response.raise_for_status()
|
||||
|
||||
logger.info(f"Successfully sent custom notification to {url}")
|
||||
return True
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user