mirror of
https://github.com/dgtlmoon/changedetection.io.git
synced 2026-03-18 17:58:04 +00:00
Compare commits
8 Commits
0.54.6
...
openai-int
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
df9258a8f7 | ||
|
|
c070265668 | ||
|
|
48921c878d | ||
|
|
44384386cc | ||
|
|
3513676bc6 | ||
|
|
559b729475 | ||
|
|
8937df7b0b | ||
|
|
9a015041a5 |
@@ -33,6 +33,7 @@ venv/
|
||||
# Test and development files
|
||||
test-datastore/
|
||||
tests/
|
||||
docs/
|
||||
*.md
|
||||
!README.md
|
||||
|
||||
|
||||
51
.github/actions/extract-memory-report/action.yml
vendored
51
.github/actions/extract-memory-report/action.yml
vendored
@@ -1,51 +0,0 @@
|
||||
name: 'Extract Memory Test Report'
|
||||
description: 'Extracts and displays memory test report from a container'
|
||||
inputs:
|
||||
container-name:
|
||||
description: 'Name of the container to extract logs from'
|
||||
required: true
|
||||
python-version:
|
||||
description: 'Python version for artifact naming'
|
||||
required: true
|
||||
output-dir:
|
||||
description: 'Directory to store output logs'
|
||||
required: false
|
||||
default: 'output-logs'
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Create output directory
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir -p ${{ inputs.output-dir }}
|
||||
|
||||
- name: Dump container log
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Disabled for now"
|
||||
# return
|
||||
# docker logs ${{ inputs.container-name }} > ${{ inputs.output-dir }}/${{ inputs.container-name }}-stdout-${{ inputs.python-version }}.txt 2>&1 || echo "Could not get stdout"
|
||||
# docker logs ${{ inputs.container-name }} 2> ${{ inputs.output-dir }}/${{ inputs.container-name }}-stderr-${{ inputs.python-version }}.txt || echo "Could not get stderr"
|
||||
|
||||
- name: Extract and display memory test report
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Disabled for now"
|
||||
# echo "Extracting test-memory.log from container..."
|
||||
# docker cp ${{ inputs.container-name }}:/app/changedetectionio/test-memory.log ${{ inputs.output-dir }}/test-memory-${{ inputs.python-version }}.log || echo "test-memory.log not found in container"
|
||||
#
|
||||
# echo "=== Top 10 Highest Peak Memory Tests ==="
|
||||
# if [ -f ${{ inputs.output-dir }}/test-memory-${{ inputs.python-version }}.log ]; then
|
||||
# grep "Peak memory:" ${{ inputs.output-dir }}/test-memory-${{ inputs.python-version }}.log | \
|
||||
# sed 's/.*Peak memory: //' | \
|
||||
# paste -d'|' - <(grep "Peak memory:" ${{ inputs.output-dir }}/test-memory-${{ inputs.python-version }}.log) | \
|
||||
# sort -t'|' -k1 -nr | \
|
||||
# cut -d'|' -f2 | \
|
||||
# head -10
|
||||
# echo ""
|
||||
# echo "=== Full Memory Test Report ==="
|
||||
# cat ${{ inputs.output-dir }}/test-memory-${{ inputs.python-version }}.log
|
||||
# else
|
||||
# echo "No memory log available"
|
||||
# fi
|
||||
8
.github/dependabot.yml
vendored
8
.github/dependabot.yml
vendored
@@ -4,11 +4,11 @@ updates:
|
||||
directory: /
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
"caronc/apprise":
|
||||
versioning-strategy: "increase"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
groups:
|
||||
all:
|
||||
patterns:
|
||||
- "*"
|
||||
- package-ecosystem: pip
|
||||
directory: /
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
|
||||
33
.github/nginx-reverse-proxy-test.conf
vendored
33
.github/nginx-reverse-proxy-test.conf
vendored
@@ -1,33 +0,0 @@
|
||||
server {
|
||||
listen 80;
|
||||
server_name localhost;
|
||||
|
||||
# Test basic reverse proxy to changedetection.io
|
||||
location / {
|
||||
proxy_pass http://changedet-app:5000;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
# WebSocket support
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
}
|
||||
|
||||
# Test subpath deployment with X-Forwarded-Prefix
|
||||
location /changedet-sub/ {
|
||||
proxy_pass http://changedet-app:5000/;
|
||||
proxy_set_header X-Forwarded-Prefix /changedet-sub;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
# WebSocket support
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
}
|
||||
}
|
||||
21
.github/test/Dockerfile-alpine
vendored
21
.github/test/Dockerfile-alpine
vendored
@@ -2,13 +2,11 @@
|
||||
# Test that we can still build on Alpine (musl modified libc https://musl.libc.org/)
|
||||
# Some packages wont install via pypi because they dont have a wheel available under this architecture.
|
||||
|
||||
FROM ghcr.io/linuxserver/baseimage-alpine:3.22
|
||||
FROM ghcr.io/linuxserver/baseimage-alpine:3.21
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
|
||||
COPY requirements.txt /requirements.txt
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
|
||||
RUN \
|
||||
apk add --update --no-cache --virtual=build-dependencies \
|
||||
build-base \
|
||||
@@ -26,25 +24,12 @@ RUN \
|
||||
apk add --update --no-cache \
|
||||
libjpeg \
|
||||
libxslt \
|
||||
file \
|
||||
nodejs \
|
||||
poppler-utils \
|
||||
python3 \
|
||||
glib \
|
||||
libsm \
|
||||
libxext \
|
||||
libxrender && \
|
||||
case "$TARGETPLATFORM" in \
|
||||
linux/arm/v7|linux/arm/v8) \
|
||||
echo "INFO: Skipping py3-opencv on $TARGETPLATFORM (using pixelmatch fallback)" \
|
||||
;; \
|
||||
*) \
|
||||
apk add --update --no-cache py3-opencv || echo "WARN: py3-opencv install failed, using pixelmatch fallback" \
|
||||
;; \
|
||||
esac && \
|
||||
python3 && \
|
||||
echo "**** pip3 install test of changedetection.io ****" && \
|
||||
python3 -m venv /lsiopy && \
|
||||
pip install -U pip wheel setuptools && \
|
||||
pip install -U --no-cache-dir --find-links https://wheel-index.linuxserver.io/alpine-3.22/ -r /requirements.txt && \
|
||||
pip install -U --no-cache-dir --find-links https://wheel-index.linuxserver.io/alpine-3.21/ -r /requirements.txt && \
|
||||
apk del --purge \
|
||||
build-dependencies
|
||||
|
||||
8
.github/workflows/codeql-analysis.yml
vendored
8
.github/workflows/codeql-analysis.yml
vendored
@@ -30,11 +30,11 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v4
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v4
|
||||
uses: github/codeql-action/init@v3
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
@@ -45,7 +45,7 @@ jobs:
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v4
|
||||
uses: github/codeql-action/autobuild@v3
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
@@ -59,4 +59,4 @@ jobs:
|
||||
# make release
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v4
|
||||
uses: github/codeql-action/analyze@v3
|
||||
|
||||
67
.github/workflows/containers.yml
vendored
67
.github/workflows/containers.yml
vendored
@@ -39,20 +39,12 @@ jobs:
|
||||
# Or if we are in a tagged release scenario.
|
||||
if: ${{ github.event.workflow_run.conclusion == 'success' }} || ${{ github.event.release.tag_name }} != ''
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python 3.11
|
||||
uses: actions/setup-python@v6
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: 3.11
|
||||
|
||||
- name: Cache pip packages
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pip-
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
@@ -66,63 +58,44 @@ jobs:
|
||||
echo ${{ github.ref }} > changedetectionio/tag.txt
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v4
|
||||
uses: docker/setup-qemu-action@v3
|
||||
with:
|
||||
image: tonistiigi/binfmt:latest
|
||||
platforms: all
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v4
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Login to Docker Hub Container Registry
|
||||
uses: docker/login-action@v4
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_HUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v4
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
install: true
|
||||
version: latest
|
||||
driver-opts: image=moby/buildkit:master
|
||||
|
||||
# master branch -> :dev container tag
|
||||
- name: Docker meta :dev
|
||||
if: ${{ github.ref == 'refs/heads/master' && github.event_name != 'release' }}
|
||||
uses: docker/metadata-action@v6
|
||||
id: meta_dev
|
||||
with:
|
||||
images: |
|
||||
${{ secrets.DOCKER_HUB_USERNAME }}/changedetection.io
|
||||
ghcr.io/${{ github.repository }}
|
||||
tags: |
|
||||
type=raw,value=dev
|
||||
labels: |
|
||||
org.opencontainers.image.created=${{ github.event.release.published_at }}
|
||||
org.opencontainers.image.description=Website, webpage change detection, monitoring and notifications.
|
||||
org.opencontainers.image.documentation=https://changedetection.io
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
org.opencontainers.image.source=https://github.com/dgtlmoon/changedetection.io
|
||||
org.opencontainers.image.title=changedetection.io
|
||||
org.opencontainers.image.url=https://changedetection.io
|
||||
|
||||
- name: Build and push :dev
|
||||
id: docker_build
|
||||
if: ${{ github.ref == 'refs/heads/master' && github.event_name != 'release' }}
|
||||
uses: docker/build-push-action@v7
|
||||
if: ${{ github.ref }} == "refs/heads/master"
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: ./
|
||||
file: ./Dockerfile
|
||||
push: true
|
||||
tags: ${{ steps.meta_dev.outputs.tags }}
|
||||
labels: ${{ steps.meta_dev.outputs.labels }}
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/arm/v8
|
||||
tags: |
|
||||
${{ secrets.DOCKER_HUB_USERNAME }}/changedetection.io:dev,ghcr.io/${{ github.repository }}:dev
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/arm/v8,linux/arm64/v8
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
@@ -136,10 +109,10 @@ jobs:
|
||||
echo "Release tag: ${{ github.event.release.tag_name }}"
|
||||
echo "Github ref: ${{ github.ref }}"
|
||||
echo "Github ref name: ${{ github.ref_name }}"
|
||||
|
||||
|
||||
- name: Docker meta :tag
|
||||
if: github.event_name == 'release' && startsWith(github.event.release.tag_name, '0.')
|
||||
uses: docker/metadata-action@v6
|
||||
uses: docker/metadata-action@v5
|
||||
id: meta
|
||||
with:
|
||||
images: |
|
||||
@@ -150,27 +123,17 @@ jobs:
|
||||
type=semver,pattern={{major}}.{{minor}},value=${{ github.event.release.tag_name }}
|
||||
type=semver,pattern={{major}},value=${{ github.event.release.tag_name }}
|
||||
type=raw,value=latest
|
||||
labels: |
|
||||
org.opencontainers.image.created=${{ github.event.release.published_at }}
|
||||
org.opencontainers.image.description=Website, webpage change detection, monitoring and notifications.
|
||||
org.opencontainers.image.documentation=https://changedetection.io
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
org.opencontainers.image.source=https://github.com/dgtlmoon/changedetection.io
|
||||
org.opencontainers.image.title=changedetection.io
|
||||
org.opencontainers.image.url=https://changedetection.io
|
||||
org.opencontainers.image.version=${{ github.event.release.tag_name }}
|
||||
|
||||
- name: Build and push :tag
|
||||
id: docker_build_tag_release
|
||||
if: github.event_name == 'release' && startsWith(github.event.release.tag_name, '0.')
|
||||
uses: docker/build-push-action@v7
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: ./
|
||||
file: ./Dockerfile
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/arm/v8
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/arm/v8,linux/arm64/v8
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
# Looks like this was disabled
|
||||
|
||||
39
.github/workflows/pypi-release.yml
vendored
39
.github/workflows/pypi-release.yml
vendored
@@ -7,9 +7,9 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v6
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11"
|
||||
- name: Install pypa/build
|
||||
@@ -21,60 +21,39 @@ jobs:
|
||||
- name: Build a binary wheel and a source tarball
|
||||
run: python3 -m build
|
||||
- name: Store the distribution packages
|
||||
uses: actions/upload-artifact@v7
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: python-package-distributions
|
||||
path: dist/
|
||||
|
||||
|
||||
test-pypi-package:
|
||||
name: Test the built package works basically.
|
||||
name: Test the built 📦 package works basically.
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- build
|
||||
steps:
|
||||
- name: Download all the dists
|
||||
uses: actions/download-artifact@v8
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: python-package-distributions
|
||||
path: dist/
|
||||
- name: Set up Python 3.11
|
||||
uses: actions/setup-python@v6
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
- name: Test that the basic pip built package runs without error
|
||||
run: |
|
||||
set -ex
|
||||
ls -alR
|
||||
|
||||
# Install the first wheel found in dist/
|
||||
WHEEL=$(find dist -type f -name "*.whl" -print -quit)
|
||||
echo Installing $WHEEL
|
||||
python3 -m pip install --upgrade pip
|
||||
python3 -m pip install "$WHEEL"
|
||||
# Find and install the first .whl file
|
||||
find dist -type f -name "*.whl" -exec pip3 install {} \; -quit
|
||||
changedetection.io -d /tmp -p 10000 &
|
||||
|
||||
sleep 3
|
||||
curl --retry-connrefused --retry 6 http://127.0.0.1:10000/static/styles/pure-min.css >/dev/null
|
||||
curl --retry-connrefused --retry 6 http://127.0.0.1:10000/ >/dev/null
|
||||
|
||||
# --- API test ---
|
||||
# This also means that the docs/api-spec.yml was shipped and could be read
|
||||
test -f /tmp/changedetection.json
|
||||
API_KEY=$(jq -r '.. | .api_access_token? // empty' /tmp/changedetection.json)
|
||||
echo Test API KEY is $API_KEY
|
||||
curl -X POST "http://127.0.0.1:10000/api/v1/watch" \
|
||||
-H "x-api-key: ${API_KEY}" \
|
||||
-H "Content-Type: application/json" \
|
||||
--show-error --fail \
|
||||
--retry 6 --retry-delay 1 --retry-connrefused \
|
||||
-d '{
|
||||
"url": "https://example.com",
|
||||
"title": "Example Site Monitor",
|
||||
"time_between_check": { "hours": 1 }
|
||||
}'
|
||||
|
||||
killall changedetection.io
|
||||
|
||||
|
||||
@@ -93,7 +72,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Download all the dists
|
||||
uses: actions/download-artifact@v8
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: python-package-distributions
|
||||
path: dist/
|
||||
|
||||
20
.github/workflows/test-container-build.yml
vendored
20
.github/workflows/test-container-build.yml
vendored
@@ -38,36 +38,30 @@ jobs:
|
||||
dockerfile: ./Dockerfile
|
||||
- platform: linux/arm/v8
|
||||
dockerfile: ./Dockerfile
|
||||
- platform: linux/arm64/v8
|
||||
dockerfile: ./Dockerfile
|
||||
# Alpine Dockerfile platforms (musl via alpine check)
|
||||
- platform: linux/amd64
|
||||
dockerfile: ./.github/test/Dockerfile-alpine
|
||||
- platform: linux/arm64
|
||||
dockerfile: ./.github/test/Dockerfile-alpine
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python 3.11
|
||||
uses: actions/setup-python@v6
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: 3.11
|
||||
|
||||
- name: Cache pip packages
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pip-
|
||||
|
||||
# Just test that the build works, some libraries won't compile on ARM/rPi etc
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v4
|
||||
uses: docker/setup-qemu-action@v3
|
||||
with:
|
||||
image: tonistiigi/binfmt:latest
|
||||
platforms: all
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v4
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
install: true
|
||||
version: latest
|
||||
@@ -75,7 +69,7 @@ jobs:
|
||||
|
||||
- name: Test that the docker containers can build (${{ matrix.platform }} - ${{ matrix.dockerfile }})
|
||||
id: docker_build
|
||||
uses: docker/build-push-action@v7
|
||||
uses: docker/build-push-action@v6
|
||||
# https://github.com/docker/build-push-action#customizing
|
||||
with:
|
||||
context: ./
|
||||
|
||||
24
.github/workflows/test-only.yml
vendored
24
.github/workflows/test-only.yml
vendored
@@ -7,7 +7,7 @@ jobs:
|
||||
lint-code:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- uses: actions/checkout@v4
|
||||
- name: Lint with Ruff
|
||||
run: |
|
||||
pip install ruff
|
||||
@@ -15,14 +15,8 @@ jobs:
|
||||
ruff check . --select E9,F63,F7,F82
|
||||
# Complete check with errors treated as warnings
|
||||
ruff check . --exit-zero
|
||||
- name: Validate OpenAPI spec
|
||||
run: |
|
||||
pip install openapi-spec-validator
|
||||
python3 -c "from openapi_spec_validator import validate_spec; import yaml; validate_spec(yaml.safe_load(open('docs/api-spec.yaml')))"
|
||||
|
||||
test-application-3-10:
|
||||
# Only run on push to master (including PR merges)
|
||||
if: github.event_name == 'push' && github.ref == 'refs/heads/master'
|
||||
needs: lint-code
|
||||
uses: ./.github/workflows/test-stack-reusable-workflow.yml
|
||||
with:
|
||||
@@ -30,15 +24,12 @@ jobs:
|
||||
|
||||
|
||||
test-application-3-11:
|
||||
# Always run
|
||||
needs: lint-code
|
||||
uses: ./.github/workflows/test-stack-reusable-workflow.yml
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
test-application-3-12:
|
||||
# Only run on push to master (including PR merges)
|
||||
if: github.event_name == 'push' && github.ref == 'refs/heads/master'
|
||||
needs: lint-code
|
||||
uses: ./.github/workflows/test-stack-reusable-workflow.yml
|
||||
with:
|
||||
@@ -46,19 +37,8 @@ jobs:
|
||||
skip-pypuppeteer: true
|
||||
|
||||
test-application-3-13:
|
||||
# Only run on push to master (including PR merges)
|
||||
if: github.event_name == 'push' && github.ref == 'refs/heads/master'
|
||||
needs: lint-code
|
||||
uses: ./.github/workflows/test-stack-reusable-workflow.yml
|
||||
with:
|
||||
python-version: '3.13'
|
||||
skip-pypuppeteer: true
|
||||
|
||||
|
||||
test-application-3-14:
|
||||
#if: github.event_name == 'push' && github.ref == 'refs/heads/master'
|
||||
needs: lint-code
|
||||
uses: ./.github/workflows/test-stack-reusable-workflow.yml
|
||||
with:
|
||||
python-version: '3.14'
|
||||
skip-pypuppeteer: false
|
||||
skip-pypuppeteer: true
|
||||
847
.github/workflows/test-stack-reusable-workflow.yml
vendored
847
.github/workflows/test-stack-reusable-workflow.yml
vendored
@@ -15,516 +15,138 @@ on:
|
||||
default: false
|
||||
|
||||
jobs:
|
||||
# Build the Docker image once and share it with all test jobs
|
||||
build:
|
||||
test-application:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
PYTHON_VERSION: ${{ inputs.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
# Mainly just for link/flake8
|
||||
- name: Set up Python ${{ env.PYTHON_VERSION }}
|
||||
uses: actions/setup-python@v6
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
|
||||
- name: Cache pip packages
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: ${{ runner.os }}-pip-py${{ env.PYTHON_VERSION }}-${{ hashFiles('requirements.txt') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pip-py${{ env.PYTHON_VERSION }}-
|
||||
${{ runner.os }}-pip-
|
||||
|
||||
- name: Get current date for cache key
|
||||
id: date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v4
|
||||
|
||||
- name: Build changedetection.io container for testing under Python ${{ env.PYTHON_VERSION }}
|
||||
uses: docker/build-push-action@v7
|
||||
with:
|
||||
context: ./
|
||||
file: ./Dockerfile
|
||||
build-args: |
|
||||
PYTHON_VERSION=${{ env.PYTHON_VERSION }}
|
||||
LOGGER_LEVEL=TRACE
|
||||
tags: test-changedetectionio
|
||||
load: true
|
||||
cache-from: type=gha,scope=build-${{ github.ref_name }}-py${{ env.PYTHON_VERSION }}-${{ hashFiles('requirements.txt', 'Dockerfile') }}-${{ steps.date.outputs.date }}
|
||||
cache-to: type=gha,mode=max,scope=build-${{ github.ref_name }}-py${{ env.PYTHON_VERSION }}-${{ hashFiles('requirements.txt', 'Dockerfile') }}-${{ steps.date.outputs.date }}
|
||||
|
||||
- name: Verify build
|
||||
run: |
|
||||
echo "---- Built for Python ${{ env.PYTHON_VERSION }} -----"
|
||||
docker run test-changedetectionio bash -c 'pip list'
|
||||
echo "---- Building for Python ${{ env.PYTHON_VERSION }} -----"
|
||||
# Build a changedetection.io container and start testing inside
|
||||
docker build --build-arg PYTHON_VERSION=${{ env.PYTHON_VERSION }} --build-arg LOGGER_LEVEL=TRACE -t test-changedetectionio .
|
||||
# Debug info
|
||||
docker run test-changedetectionio bash -c 'pip list'
|
||||
|
||||
- name: We should be Python ${{ env.PYTHON_VERSION }} ...
|
||||
run: |
|
||||
docker run test-changedetectionio bash -c 'python3 --version'
|
||||
|
||||
- name: Spin up ancillary testable services
|
||||
run: |
|
||||
docker run test-changedetectionio bash -c 'python3 --version'
|
||||
|
||||
docker network create changedet-network
|
||||
|
||||
# Selenium
|
||||
docker run --network changedet-network -d --hostname selenium -p 4444:4444 --rm --shm-size="2g" selenium/standalone-chrome:4
|
||||
|
||||
# SocketPuppetBrowser + Extra for custom browser test
|
||||
docker run --network changedet-network -d -e "LOG_LEVEL=TRACE" --cap-add=SYS_ADMIN --name sockpuppetbrowser --hostname sockpuppetbrowser --rm -p 3000:3000 dgtlmoon/sockpuppetbrowser:latest
|
||||
docker run --network changedet-network -d -e "LOG_LEVEL=TRACE" --cap-add=SYS_ADMIN --name sockpuppetbrowser-custom-url --hostname sockpuppetbrowser-custom-url -p 3001:3000 --rm dgtlmoon/sockpuppetbrowser:latest
|
||||
|
||||
- name: Save Docker image
|
||||
- name: Spin up ancillary SMTP+Echo message test server
|
||||
run: |
|
||||
docker save test-changedetectionio -o /tmp/test-changedetectionio.tar
|
||||
# Debug SMTP server/echo message back server
|
||||
docker run --network changedet-network -d -p 11025:11025 -p 11080:11080 --hostname mailserver test-changedetectionio bash -c 'pip3 install aiosmtpd && python changedetectionio/tests/smtp/smtp-test-server.py'
|
||||
docker ps
|
||||
|
||||
- name: Upload Docker image artifact
|
||||
uses: actions/upload-artifact@v7
|
||||
with:
|
||||
name: test-changedetectionio-${{ env.PYTHON_VERSION }}
|
||||
path: /tmp/test-changedetectionio.tar
|
||||
retention-days: 1
|
||||
|
||||
# Unit tests (lightweight, no ancillary services needed)
|
||||
unit-tests:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 10
|
||||
env:
|
||||
PYTHON_VERSION: ${{ inputs.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Download Docker image artifact
|
||||
uses: actions/download-artifact@v8
|
||||
with:
|
||||
name: test-changedetectionio-${{ env.PYTHON_VERSION }}
|
||||
path: /tmp
|
||||
|
||||
- name: Load Docker image
|
||||
- name: Show docker container state and other debug info
|
||||
run: |
|
||||
docker load -i /tmp/test-changedetectionio.tar
|
||||
set -x
|
||||
echo "Running processes in docker..."
|
||||
docker ps
|
||||
|
||||
- name: Run Unit Tests
|
||||
run: |
|
||||
docker run test-changedetectionio bash -c 'python3 -m unittest changedetectionio.tests.unit.test_notification_diff'
|
||||
docker run test-changedetectionio bash -c 'python3 -m unittest changedetectionio.tests.unit.test_watch_model'
|
||||
docker run test-changedetectionio bash -c 'python3 -m unittest changedetectionio.tests.unit.test_jinja2_security'
|
||||
docker run test-changedetectionio bash -c 'python3 -m unittest changedetectionio.tests.unit.test_semver'
|
||||
docker run test-changedetectionio bash -c 'python3 -m unittest changedetectionio.tests.unit.test_html_to_text'
|
||||
# Unit tests
|
||||
docker run test-changedetectionio bash -c 'python3 -m unittest changedetectionio.tests.unit.test_notification_diff'
|
||||
docker run test-changedetectionio bash -c 'python3 -m unittest changedetectionio.tests.unit.test_watch_model'
|
||||
docker run test-changedetectionio bash -c 'python3 -m unittest changedetectionio.tests.unit.test_jinja2_security'
|
||||
docker run test-changedetectionio bash -c 'python3 -m unittest changedetectionio.tests.unit.test_semver'
|
||||
|
||||
# Basic pytest tests with ancillary services
|
||||
basic-tests:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 25
|
||||
env:
|
||||
PYTHON_VERSION: ${{ inputs.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Download Docker image artifact
|
||||
uses: actions/download-artifact@v8
|
||||
with:
|
||||
name: test-changedetectionio-${{ env.PYTHON_VERSION }}
|
||||
path: /tmp
|
||||
|
||||
- name: Load Docker image
|
||||
- name: Test built container with Pytest (generally as requests/plaintext fetching)
|
||||
run: |
|
||||
docker load -i /tmp/test-changedetectionio.tar
|
||||
# All tests
|
||||
echo "run test with pytest"
|
||||
# The default pytest logger_level is TRACE
|
||||
# To change logger_level for pytest(test/conftest.py),
|
||||
# append the docker option. e.g. '-e LOGGER_LEVEL=DEBUG'
|
||||
docker run --name test-cdio-basic-tests --network changedet-network test-changedetectionio bash -c 'cd changedetectionio && ./run_basic_tests.sh'
|
||||
|
||||
- name: Test built container with Pytest
|
||||
# PLAYWRIGHT/NODE-> CDP
|
||||
- name: Playwright and SocketPuppetBrowser - Specific tests in built container
|
||||
run: |
|
||||
docker network inspect changedet-network >/dev/null 2>&1 || docker network create changedet-network
|
||||
docker run --name test-cdio-basic-tests --network changedet-network test-changedetectionio bash -c 'cd changedetectionio && ./run_basic_tests.sh'
|
||||
# Playwright via Sockpuppetbrowser fetch
|
||||
# tests/visualselector/test_fetch_data.py will do browser steps
|
||||
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest -vv --capture=tee-sys --showlocals --tb=long --live-server-host=0.0.0.0 --live-server-port=5004 tests/fetchers/test_content.py'
|
||||
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest -vv --capture=tee-sys --showlocals --tb=long --live-server-host=0.0.0.0 --live-server-port=5004 tests/test_errorhandling.py'
|
||||
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest -vv --capture=tee-sys --showlocals --tb=long --live-server-host=0.0.0.0 --live-server-port=5004 tests/visualselector/test_fetch_data.py'
|
||||
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest -vv --capture=tee-sys --showlocals --tb=long --live-server-host=0.0.0.0 --live-server-port=5004 tests/fetchers/test_custom_js_before_content.py'
|
||||
|
||||
- name: Test CLI options
|
||||
|
||||
- name: Playwright and SocketPuppetBrowser - Headers and requests
|
||||
run: |
|
||||
# Settings headers playwright tests - Call back in from Sockpuppetbrowser, check headers
|
||||
docker run --name "changedet" --hostname changedet --rm -e "FLASK_SERVER_NAME=changedet" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000?dumpio=true" --network changedet-network test-changedetectionio bash -c 'find .; cd changedetectionio; pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/test_request.py; pwd;find .'
|
||||
|
||||
- name: Playwright and SocketPuppetBrowser - Restock detection
|
||||
run: |
|
||||
# restock detection via playwright - added name=changedet here so that playwright and sockpuppetbrowser can connect to it
|
||||
docker run --rm --name "changedet" -e "FLASK_SERVER_NAME=changedet" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-port=5004 --live-server-host=0.0.0.0 tests/restock/test_restock.py'
|
||||
|
||||
# STRAIGHT TO CDP
|
||||
- name: Pyppeteer and SocketPuppetBrowser - Specific tests in built container
|
||||
if: ${{ inputs.skip-pypuppeteer == false }}
|
||||
run: |
|
||||
docker network inspect changedet-network >/dev/null 2>&1 || docker network create changedet-network
|
||||
docker run --name test-cdio-cli-opts --network changedet-network test-changedetectionio bash -c 'changedetectionio/test_cli_opts.sh' &> cli-opts-output.txt
|
||||
echo "=== CLI Options Test Output ==="
|
||||
cat cli-opts-output.txt
|
||||
# Playwright via Sockpuppetbrowser fetch
|
||||
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "FAST_PUPPETEER_CHROME_FETCHER=True" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/fetchers/test_content.py'
|
||||
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "FAST_PUPPETEER_CHROME_FETCHER=True" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/test_errorhandling.py'
|
||||
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "FAST_PUPPETEER_CHROME_FETCHER=True" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/visualselector/test_fetch_data.py'
|
||||
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "FAST_PUPPETEER_CHROME_FETCHER=True" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/fetchers/test_custom_js_before_content.py'
|
||||
|
||||
- name: CLI Memory Test
|
||||
- name: Pyppeteer and SocketPuppetBrowser - Headers and requests checks
|
||||
if: ${{ inputs.skip-pypuppeteer == false }}
|
||||
run: |
|
||||
echo "=== Checking CLI batch mode memory usage ==="
|
||||
# Extract RSS memory value from output
|
||||
RSS_MB=$(grep -oP "Memory consumption before worker shutdown: RSS=\K[\d.]+" cli-opts-output.txt | head -1 || echo "0")
|
||||
echo "RSS Memory: ${RSS_MB} MB"
|
||||
# Settings headers playwright tests - Call back in from Sockpuppetbrowser, check headers
|
||||
docker run --name "changedet" --hostname changedet --rm -e "FAST_PUPPETEER_CHROME_FETCHER=True" -e "FLASK_SERVER_NAME=changedet" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000?dumpio=true" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio; pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/test_request.py'
|
||||
|
||||
# Check if RSS is less than 100MB
|
||||
if [ -n "$RSS_MB" ]; then
|
||||
if (( $(echo "$RSS_MB < 100" | bc -l) )); then
|
||||
echo "✓ Memory usage is acceptable: ${RSS_MB} MB < 100 MB"
|
||||
else
|
||||
echo "✗ Memory usage too high: ${RSS_MB} MB >= 100 MB"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo "⚠ Could not extract memory usage, skipping check"
|
||||
fi
|
||||
|
||||
- name: Extract memory report and logs
|
||||
if: always()
|
||||
uses: ./.github/actions/extract-memory-report
|
||||
with:
|
||||
container-name: test-cdio-basic-tests
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
|
||||
- name: Store test artifacts
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v7
|
||||
with:
|
||||
name: test-cdio-basic-tests-output-py${{ env.PYTHON_VERSION }}
|
||||
path: output-logs
|
||||
|
||||
- name: Store CLI test output
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v7
|
||||
with:
|
||||
name: test-cdio-cli-opts-output-py${{ env.PYTHON_VERSION }}
|
||||
path: cli-opts-output.txt
|
||||
|
||||
# Playwright tests
|
||||
playwright-tests:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 10
|
||||
env:
|
||||
PYTHON_VERSION: ${{ inputs.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Download Docker image artifact
|
||||
uses: actions/download-artifact@v8
|
||||
with:
|
||||
name: test-changedetectionio-${{ env.PYTHON_VERSION }}
|
||||
path: /tmp
|
||||
|
||||
- name: Load Docker image
|
||||
run: |
|
||||
docker load -i /tmp/test-changedetectionio.tar
|
||||
|
||||
- name: Spin up ancillary services
|
||||
run: |
|
||||
docker network create changedet-network
|
||||
docker run --network changedet-network -d -e "LOG_LEVEL=TRACE" --cap-add=SYS_ADMIN --name sockpuppetbrowser --hostname sockpuppetbrowser --rm -p 3000:3000 dgtlmoon/sockpuppetbrowser:latest
|
||||
docker run --network changedet-network -d -e "LOG_LEVEL=TRACE" --cap-add=SYS_ADMIN --name sockpuppetbrowser-custom-url --hostname sockpuppetbrowser-custom-url -p 3001:3000 --rm dgtlmoon/sockpuppetbrowser:latest
|
||||
|
||||
- name: Playwright - Specific tests in built container
|
||||
run: |
|
||||
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest -vv --capture=tee-sys --showlocals --tb=long --live-server-host=0.0.0.0 --live-server-port=5004 tests/fetchers/test_content.py'
|
||||
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest -vv --capture=tee-sys --showlocals --tb=long --live-server-host=0.0.0.0 --live-server-port=5004 tests/test_errorhandling.py'
|
||||
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest -vv --capture=tee-sys --showlocals --tb=long --live-server-host=0.0.0.0 --live-server-port=5004 tests/visualselector/test_fetch_data.py'
|
||||
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest -vv --capture=tee-sys --showlocals --tb=long --live-server-host=0.0.0.0 --live-server-port=5004 tests/fetchers/test_custom_js_before_content.py'
|
||||
|
||||
- name: Playwright - Headers and requests
|
||||
run: |
|
||||
docker run --name "changedet" --hostname changedet --rm -e "FLASK_SERVER_NAME=changedet" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000?dumpio=true" --network changedet-network test-changedetectionio bash -c 'find .; cd changedetectionio; pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/test_request.py; pwd;find .'
|
||||
|
||||
- name: Playwright - Restock detection
|
||||
run: |
|
||||
docker run --rm --name "changedet" -e "FLASK_SERVER_NAME=changedet" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-port=5004 --live-server-host=0.0.0.0 tests/restock/test_restock.py'
|
||||
|
||||
# Pyppeteer tests
|
||||
pyppeteer-tests:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
if: ${{ inputs.skip-pypuppeteer == false }}
|
||||
timeout-minutes: 10
|
||||
env:
|
||||
PYTHON_VERSION: ${{ inputs.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Download Docker image artifact
|
||||
uses: actions/download-artifact@v8
|
||||
with:
|
||||
name: test-changedetectionio-${{ env.PYTHON_VERSION }}
|
||||
path: /tmp
|
||||
|
||||
- name: Load Docker image
|
||||
run: |
|
||||
docker load -i /tmp/test-changedetectionio.tar
|
||||
|
||||
- name: Spin up ancillary services
|
||||
run: |
|
||||
docker network create changedet-network
|
||||
docker run --network changedet-network -d -e "LOG_LEVEL=TRACE" --cap-add=SYS_ADMIN --name sockpuppetbrowser --hostname sockpuppetbrowser --rm -p 3000:3000 dgtlmoon/sockpuppetbrowser:latest
|
||||
|
||||
- name: Pyppeteer - Specific tests in built container
|
||||
run: |
|
||||
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "FAST_PUPPETEER_CHROME_FETCHER=True" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/fetchers/test_content.py'
|
||||
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "FAST_PUPPETEER_CHROME_FETCHER=True" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/test_errorhandling.py'
|
||||
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "FAST_PUPPETEER_CHROME_FETCHER=True" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/visualselector/test_fetch_data.py'
|
||||
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "FAST_PUPPETEER_CHROME_FETCHER=True" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/fetchers/test_custom_js_before_content.py'
|
||||
|
||||
- name: Pyppeteer - Headers and requests checks
|
||||
run: |
|
||||
docker run --name "changedet" --hostname changedet --rm -e "FAST_PUPPETEER_CHROME_FETCHER=True" -e "FLASK_SERVER_NAME=changedet" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000?dumpio=true" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio; pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/test_request.py'
|
||||
|
||||
- name: Pyppeteer - Restock detection
|
||||
run: |
|
||||
docker run --rm --name "changedet" -e "FLASK_SERVER_NAME=changedet" -e "FAST_PUPPETEER_CHROME_FETCHER=True" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-port=5004 --live-server-host=0.0.0.0 tests/restock/test_restock.py'
|
||||
|
||||
# Selenium tests
|
||||
selenium-tests:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 10
|
||||
env:
|
||||
PYTHON_VERSION: ${{ inputs.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Download Docker image artifact
|
||||
uses: actions/download-artifact@v8
|
||||
with:
|
||||
name: test-changedetectionio-${{ env.PYTHON_VERSION }}
|
||||
path: /tmp
|
||||
|
||||
- name: Load Docker image
|
||||
run: |
|
||||
docker load -i /tmp/test-changedetectionio.tar
|
||||
|
||||
- name: Spin up ancillary services
|
||||
run: |
|
||||
docker network create changedet-network
|
||||
docker run --network changedet-network -d --hostname selenium -p 4444:4444 --rm --shm-size="2g" selenium/standalone-chrome:4
|
||||
sleep 3
|
||||
|
||||
- name: Specific tests for headers and requests checks with Selenium
|
||||
run: |
|
||||
|
||||
docker run --name "changedet" --hostname changedet --rm -e "FLASK_SERVER_NAME=changedet" -e "WEBDRIVER_URL=http://selenium:4444/wd/hub" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio; pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/test_request.py'
|
||||
- name: Pyppeteer and SocketPuppetBrowser - Restock detection
|
||||
if: ${{ inputs.skip-pypuppeteer == false }}
|
||||
run: |
|
||||
# restock detection via playwright - added name=changedet here so that playwright and sockpuppetbrowser can connect to it
|
||||
docker run --rm --name "changedet" -e "FLASK_SERVER_NAME=changedet" -e "FAST_PUPPETEER_CHROME_FETCHER=True" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-port=5004 --live-server-host=0.0.0.0 tests/restock/test_restock.py'
|
||||
|
||||
# SELENIUM
|
||||
- name: Specific tests in built container for Selenium
|
||||
run: |
|
||||
docker run --rm -e "WEBDRIVER_URL=http://selenium:4444/wd/hub" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest tests/fetchers/test_content.py && pytest tests/test_errorhandling.py'
|
||||
# Selenium fetch
|
||||
docker run --rm -e "WEBDRIVER_URL=http://selenium:4444/wd/hub" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest tests/fetchers/test_content.py && pytest tests/test_errorhandling.py'
|
||||
|
||||
|
||||
# SMTP tests
|
||||
smtp-tests:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 10
|
||||
env:
|
||||
PYTHON_VERSION: ${{ inputs.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Download Docker image artifact
|
||||
uses: actions/download-artifact@v8
|
||||
with:
|
||||
name: test-changedetectionio-${{ env.PYTHON_VERSION }}
|
||||
path: /tmp
|
||||
|
||||
- name: Load Docker image
|
||||
- name: Specific tests in built container for headers and requests checks with Selenium
|
||||
run: |
|
||||
docker load -i /tmp/test-changedetectionio.tar
|
||||
|
||||
- name: Spin up SMTP test server
|
||||
run: |
|
||||
docker network create changedet-network
|
||||
docker run --network changedet-network -d -p 11025:11025 -p 11080:11080 --hostname mailserver test-changedetectionio bash -c 'pip3 install aiosmtpd && python changedetectionio/tests/smtp/smtp-test-server.py'
|
||||
docker run --name "changedet" --hostname changedet --rm -e "FLASK_SERVER_NAME=changedet" -e "WEBDRIVER_URL=http://selenium:4444/wd/hub" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio; pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/test_request.py'
|
||||
|
||||
# OTHER STUFF
|
||||
- name: Test SMTP notification mime types
|
||||
run: |
|
||||
# SMTP content types - needs the 'Debug SMTP server/echo message back server' container from above
|
||||
# "mailserver" hostname defined above
|
||||
docker run --rm --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest tests/smtp/test_notification_smtp.py'
|
||||
|
||||
nginx-reverse-proxy:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 10
|
||||
env:
|
||||
PYTHON_VERSION: ${{ inputs.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Download Docker image artifact
|
||||
uses: actions/download-artifact@v8
|
||||
with:
|
||||
name: test-changedetectionio-${{ env.PYTHON_VERSION }}
|
||||
path: /tmp
|
||||
|
||||
- name: Load Docker image
|
||||
run: |
|
||||
docker load -i /tmp/test-changedetectionio.tar
|
||||
|
||||
- name: Spin up services
|
||||
run: |
|
||||
docker network create changedet-network
|
||||
|
||||
# Start changedetection.io container with X-Forwarded headers support
|
||||
docker run --name changedet-app --hostname changedet-app --network changedet-network \
|
||||
-e USE_X_SETTINGS=true \
|
||||
-d test-changedetectionio
|
||||
sleep 3
|
||||
|
||||
- name: Start nginx reverse proxy
|
||||
run: |
|
||||
# Start nginx with our test configuration
|
||||
docker run --name nginx-proxy --network changedet-network -d -p 8080:80 --rm \
|
||||
-v ${{ github.workspace }}/.github/nginx-reverse-proxy-test.conf:/etc/nginx/conf.d/default.conf:ro \
|
||||
nginx:alpine
|
||||
sleep 2
|
||||
|
||||
- name: Test reverse proxy - root path
|
||||
run: |
|
||||
echo "=== Testing nginx reverse proxy at root path ==="
|
||||
curl --retry-connrefused --retry 6 -s http://localhost:8080/ > /tmp/nginx-test-root.html
|
||||
|
||||
# Check for changedetection.io UI elements
|
||||
if grep -q "checkbox-uuid" /tmp/nginx-test-root.html; then
|
||||
echo "✓ Found checkbox-uuid in response"
|
||||
else
|
||||
echo "ERROR: checkbox-uuid not found in response"
|
||||
cat /tmp/nginx-test-root.html
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check for watchlist content
|
||||
if grep -q -i "watch" /tmp/nginx-test-root.html; then
|
||||
echo "✓ Found watch/watchlist content in response"
|
||||
else
|
||||
echo "ERROR: watchlist content not found"
|
||||
cat /tmp/nginx-test-root.html
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✓ Root path reverse proxy working correctly"
|
||||
|
||||
- name: Test reverse proxy - subpath with X-Forwarded-Prefix
|
||||
run: |
|
||||
echo "=== Testing nginx reverse proxy at subpath /changedet-sub/ ==="
|
||||
curl --retry-connrefused --retry 6 -s http://localhost:8080/changedet-sub/ > /tmp/nginx-test-subpath.html
|
||||
|
||||
# Check for changedetection.io UI elements
|
||||
if grep -q "checkbox-uuid" /tmp/nginx-test-subpath.html; then
|
||||
echo "✓ Found checkbox-uuid in subpath response"
|
||||
else
|
||||
echo "ERROR: checkbox-uuid not found in subpath response"
|
||||
cat /tmp/nginx-test-subpath.html
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✓ Subpath reverse proxy working correctly"
|
||||
|
||||
- name: Test API through reverse proxy subpath
|
||||
run: |
|
||||
echo "=== Testing API endpoints through nginx subpath /changedet-sub/ ==="
|
||||
|
||||
# Extract API key from the changedetection.io datastore
|
||||
API_KEY=$(docker exec changedet-app cat /datastore/changedetection.json | grep -o '"api_access_token": *"[^"]*"' | cut -d'"' -f4)
|
||||
|
||||
if [ -z "$API_KEY" ]; then
|
||||
echo "ERROR: Could not extract API key from datastore"
|
||||
docker exec changedet-app cat /datastore/changedetection.json
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✓ Extracted API key: ${API_KEY:0:8}..."
|
||||
|
||||
# Create a watch via API through nginx proxy subpath
|
||||
echo "Creating watch via POST to /changedet-sub/api/v1/watch"
|
||||
RESPONSE=$(curl -s -w "\n%{http_code}" -X POST "http://localhost:8080/changedet-sub/api/v1/watch" \
|
||||
-H "x-api-key: ${API_KEY}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"url": "https://example.com/test-nginx-proxy",
|
||||
"tag": "nginx-test"
|
||||
}')
|
||||
|
||||
HTTP_CODE=$(echo "$RESPONSE" | tail -n1)
|
||||
BODY=$(echo "$RESPONSE" | head -n-1)
|
||||
|
||||
if [ "$HTTP_CODE" != "201" ]; then
|
||||
echo "ERROR: Expected HTTP 201, got $HTTP_CODE"
|
||||
echo "Response: $BODY"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✓ Watch created successfully (HTTP 201)"
|
||||
|
||||
# Extract the watch UUID from response
|
||||
WATCH_UUID=$(echo "$BODY" | grep -o '"uuid": *"[^"]*"' | cut -d'"' -f4)
|
||||
echo "✓ Watch UUID: $WATCH_UUID"
|
||||
|
||||
# Update the watch via PUT through nginx proxy subpath
|
||||
echo "Updating watch via PUT to /changedet-sub/api/v1/watch/${WATCH_UUID}"
|
||||
RESPONSE=$(curl -s -w "\n%{http_code}" -X PUT "http://localhost:8080/changedet-sub/api/v1/watch/${WATCH_UUID}" \
|
||||
-H "x-api-key: ${API_KEY}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"paused": true
|
||||
}')
|
||||
|
||||
HTTP_CODE=$(echo "$RESPONSE" | tail -n1)
|
||||
BODY=$(echo "$RESPONSE" | head -n-1)
|
||||
|
||||
if [ "$HTTP_CODE" != "200" ]; then
|
||||
echo "ERROR: Expected HTTP 200, got $HTTP_CODE"
|
||||
echo "Response: $BODY"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if echo "$BODY" | grep -q 'OK'; then
|
||||
echo "✓ Watch updated successfully (HTTP 200, response: OK)"
|
||||
else
|
||||
echo "ERROR: Expected response 'OK', got: $BODY"
|
||||
echo "Response: $BODY"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Verify the watch is paused via GET
|
||||
echo "Verifying watch is paused via GET"
|
||||
RESPONSE=$(curl -s "http://localhost:8080/changedet-sub/api/v1/watch/${WATCH_UUID}" \
|
||||
-H "x-api-key: ${API_KEY}")
|
||||
|
||||
if echo "$RESPONSE" | grep -q '"paused": *true'; then
|
||||
echo "✓ Watch is paused as expected"
|
||||
else
|
||||
echo "ERROR: Watch paused state not confirmed"
|
||||
echo "Response: $RESPONSE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✓ API tests through nginx subpath completed successfully"
|
||||
|
||||
- name: Cleanup nginx test
|
||||
if: always()
|
||||
run: |
|
||||
docker logs nginx-proxy || true
|
||||
docker logs changedet-app || true
|
||||
docker stop nginx-proxy changedet-app || true
|
||||
docker rm nginx-proxy changedet-app || true
|
||||
|
||||
|
||||
|
||||
# Proxy tests
|
||||
proxy-tests:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 10
|
||||
env:
|
||||
PYTHON_VERSION: ${{ inputs.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Download Docker image artifact
|
||||
uses: actions/download-artifact@v8
|
||||
with:
|
||||
name: test-changedetectionio-${{ env.PYTHON_VERSION }}
|
||||
path: /tmp
|
||||
|
||||
- name: Load Docker image
|
||||
run: |
|
||||
docker load -i /tmp/test-changedetectionio.tar
|
||||
|
||||
- name: Spin up services
|
||||
run: |
|
||||
docker network create changedet-network
|
||||
docker run --network changedet-network -d --hostname selenium -p 4444:4444 --rm --shm-size="2g" selenium/standalone-chrome:4
|
||||
docker run --network changedet-network -d -e "LOG_LEVEL=TRACE" --cap-add=SYS_ADMIN --name sockpuppetbrowser --hostname sockpuppetbrowser --rm -p 3000:3000 dgtlmoon/sockpuppetbrowser:latest
|
||||
docker run --network changedet-network -d -e "LOG_LEVEL=TRACE" --cap-add=SYS_ADMIN --name sockpuppetbrowser-custom-url --hostname sockpuppetbrowser-custom-url -p 3001:3000 --rm dgtlmoon/sockpuppetbrowser:latest
|
||||
|
||||
- name: Test proxy Squid style interaction
|
||||
# @todo Add a test via playwright/puppeteer
|
||||
# squid with auth is tested in run_proxy_tests.sh -> tests/proxy_list/test_select_custom_proxy.py
|
||||
- name: Test proxy squid style interaction
|
||||
run: |
|
||||
cd changedetectionio
|
||||
./run_proxy_tests.sh
|
||||
docker ps
|
||||
cd ..
|
||||
|
||||
- name: Test proxy SOCKS5 style interaction
|
||||
@@ -533,88 +155,28 @@ jobs:
|
||||
./run_socks_proxy_tests.sh
|
||||
cd ..
|
||||
|
||||
# Custom browser URL tests
|
||||
custom-browser-tests:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 10
|
||||
env:
|
||||
PYTHON_VERSION: ${{ inputs.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Download Docker image artifact
|
||||
uses: actions/download-artifact@v8
|
||||
with:
|
||||
name: test-changedetectionio-${{ env.PYTHON_VERSION }}
|
||||
path: /tmp
|
||||
|
||||
- name: Load Docker image
|
||||
run: |
|
||||
docker load -i /tmp/test-changedetectionio.tar
|
||||
|
||||
- name: Spin up ancillary services
|
||||
run: |
|
||||
docker network create changedet-network
|
||||
docker run --network changedet-network -d -e "LOG_LEVEL=TRACE" --cap-add=SYS_ADMIN --name sockpuppetbrowser --hostname sockpuppetbrowser --rm -p 3000:3000 dgtlmoon/sockpuppetbrowser:latest
|
||||
docker run --network changedet-network -d -e "LOG_LEVEL=TRACE" --cap-add=SYS_ADMIN --name sockpuppetbrowser-custom-url --hostname sockpuppetbrowser-custom-url -p 3001:3000 --rm dgtlmoon/sockpuppetbrowser:latest
|
||||
|
||||
- name: Test custom browser URL
|
||||
run: |
|
||||
cd changedetectionio
|
||||
./run_custom_browser_url_tests.sh
|
||||
cd ..
|
||||
|
||||
processor-plugin-tests:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 20
|
||||
env:
|
||||
PYTHON_VERSION: ${{ inputs.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Download Docker image artifact
|
||||
uses: actions/download-artifact@v8
|
||||
with:
|
||||
name: test-changedetectionio-${{ env.PYTHON_VERSION }}
|
||||
path: /tmp
|
||||
|
||||
- name: Load Docker image
|
||||
- name: Test changedetection.io container starts+runs basically without error
|
||||
run: |
|
||||
docker load -i /tmp/test-changedetectionio.tar
|
||||
|
||||
- name: Basic processor plugin registration and checks
|
||||
run: |
|
||||
docker run -e EXTRA_PACKAGES=changedetection.io-osint-processor test-changedetectionio bash -c 'cd changedetectionio;pytest -vvv -s tests/plugins/test_processor.py::test_check_plugin_processor'
|
||||
|
||||
# Container startup tests
|
||||
container-tests:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 10
|
||||
env:
|
||||
PYTHON_VERSION: ${{ inputs.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Download Docker image artifact
|
||||
uses: actions/download-artifact@v8
|
||||
with:
|
||||
name: test-changedetectionio-${{ env.PYTHON_VERSION }}
|
||||
path: /tmp
|
||||
|
||||
- name: Load Docker image
|
||||
run: |
|
||||
docker load -i /tmp/test-changedetectionio.tar
|
||||
|
||||
- name: Test container starts+runs basically without error
|
||||
run: |
|
||||
docker run --name test-changedetectionio -p 5556:5000 -d test-changedetectionio
|
||||
docker run --name test-changedetectionio -p 5556:5000 -d test-changedetectionio
|
||||
sleep 3
|
||||
curl --retry-connrefused --retry 6 -s http://localhost:5556 |grep -q checkbox-uuid
|
||||
curl --retry-connrefused --retry 6 -s -g -6 "http://[::1]:5556"|grep -q checkbox-uuid
|
||||
# Should return 0 (no error) when grep finds it
|
||||
curl --retry-connrefused --retry 6 -s http://localhost:5556 |grep -q checkbox-uuid
|
||||
|
||||
# and IPv6
|
||||
curl --retry-connrefused --retry 6 -s -g -6 "http://[::1]:5556"|grep -q checkbox-uuid
|
||||
|
||||
# Check whether TRACE log is enabled.
|
||||
# Also, check whether TRACE came from STDOUT
|
||||
docker logs test-changedetectionio 2>/dev/null | grep 'TRACE log is enabled' || exit 1
|
||||
# Check whether DEBUG is came from STDOUT
|
||||
docker logs test-changedetectionio 2>/dev/null | grep 'DEBUG' || exit 1
|
||||
|
||||
docker kill test-changedetectionio
|
||||
|
||||
- name: Test HTTPS SSL mode
|
||||
@@ -622,217 +184,78 @@ jobs:
|
||||
openssl req -x509 -newkey rsa:4096 -keyout privkey.pem -out cert.pem -days 365 -nodes -subj "/CN=localhost"
|
||||
docker run --name test-changedetectionio-ssl --rm -e SSL_CERT_FILE=cert.pem -e SSL_PRIVKEY_FILE=privkey.pem -p 5000:5000 -v ./cert.pem:/app/cert.pem -v ./privkey.pem:/app/privkey.pem -d test-changedetectionio
|
||||
sleep 3
|
||||
# Should return 0 (no error) when grep finds it
|
||||
# -k because its self-signed
|
||||
curl --retry-connrefused --retry 6 -k https://localhost:5000 -v|grep -q checkbox-uuid
|
||||
|
||||
docker kill test-changedetectionio-ssl
|
||||
|
||||
- name: Test IPv6 Mode
|
||||
run: |
|
||||
# IPv6 - :: bind to all interfaces inside container (like 0.0.0.0), ::1 would be localhost only
|
||||
docker run --name test-changedetectionio-ipv6 --rm -p 5000:5000 -e LISTEN_HOST=:: -d test-changedetectionio
|
||||
sleep 3
|
||||
# Should return 0 (no error) when grep finds it on localhost
|
||||
curl --retry-connrefused --retry 6 http://[::1]:5000 -v|grep -q checkbox-uuid
|
||||
docker kill test-changedetectionio-ipv6
|
||||
|
||||
# Signal tests
|
||||
signal-tests:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 10
|
||||
env:
|
||||
PYTHON_VERSION: ${{ inputs.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Download Docker image artifact
|
||||
uses: actions/download-artifact@v8
|
||||
with:
|
||||
name: test-changedetectionio-${{ env.PYTHON_VERSION }}
|
||||
path: /tmp
|
||||
|
||||
- name: Load Docker image
|
||||
run: |
|
||||
docker load -i /tmp/test-changedetectionio.tar
|
||||
|
||||
- name: Test SIGTERM and SIGINT signal shutdown
|
||||
- name: Test changedetection.io SIGTERM and SIGINT signal shutdown
|
||||
run: |
|
||||
|
||||
echo SIGINT Shutdown request test
|
||||
docker run --name sig-test -d test-changedetectionio
|
||||
sleep 3
|
||||
echo ">>> Sending SIGINT to sig-test container"
|
||||
docker kill --signal=SIGINT sig-test
|
||||
sleep 3
|
||||
# invert the check (it should be not 0/not running)
|
||||
docker ps
|
||||
# check signal catch(STDERR) log. Because of
|
||||
# changedetectionio/__init__.py: logger.add(sys.stderr, level=logger_level)
|
||||
docker logs sig-test 2>&1 | grep 'Shutdown: Got Signal - SIGINT' || exit 1
|
||||
test -z "`docker ps|grep sig-test`"
|
||||
if [ $? -ne 0 ]; then
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
echo "Looks like container was running when it shouldnt be"
|
||||
docker ps
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# @todo - scan the container log to see the right "graceful shutdown" text exists
|
||||
docker rm sig-test
|
||||
|
||||
|
||||
echo SIGTERM Shutdown request test
|
||||
docker run --name sig-test -d test-changedetectionio
|
||||
sleep 3
|
||||
echo ">>> Sending SIGTERM to sig-test container"
|
||||
docker kill --signal=SIGTERM sig-test
|
||||
sleep 3
|
||||
# invert the check (it should be not 0/not running)
|
||||
docker ps
|
||||
# check signal catch(STDERR) log. Because of
|
||||
# changedetectionio/__init__.py: logger.add(sys.stderr, level=logger_level)
|
||||
docker logs sig-test 2>&1 | grep 'Shutdown: Got Signal - SIGTERM' || exit 1
|
||||
test -z "`docker ps|grep sig-test`"
|
||||
if [ $? -ne 0 ]; then
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
echo "Looks like container was running when it shouldnt be"
|
||||
docker ps
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# @todo - scan the container log to see the right "graceful shutdown" text exists
|
||||
docker rm sig-test
|
||||
|
||||
# Upgrade path test
|
||||
upgrade-path-test:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 25
|
||||
env:
|
||||
PYTHON_VERSION: ${{ inputs.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0 # Fetch all history and tags for upgrade testing
|
||||
|
||||
- name: Set up Python ${{ env.PYTHON_VERSION }}
|
||||
uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
|
||||
- name: Check upgrade works without error
|
||||
run: |
|
||||
echo "=== Testing upgrade path from 0.49.1 to ${{ github.ref_name }} (${{ github.sha }}) ==="
|
||||
sudo apt-get update && sudo apt-get install -y --no-install-recommends \
|
||||
g++ \
|
||||
gcc \
|
||||
libc-dev \
|
||||
libffi-dev \
|
||||
libjpeg-dev \
|
||||
libssl-dev \
|
||||
libxslt-dev \
|
||||
make \
|
||||
patch \
|
||||
pkg-config \
|
||||
zlib1g-dev
|
||||
|
||||
# Checkout old version and create datastore
|
||||
git checkout 0.49.1
|
||||
python3 -m venv .venv
|
||||
source .venv/bin/activate
|
||||
pip install -r requirements.txt
|
||||
pip install 'pyOpenSSL>=23.2.0'
|
||||
|
||||
echo "=== Running version 0.49.1 to create datastore ==="
|
||||
ALLOW_IANA_RESTRICTED_ADDRESSES=true python3 ./changedetection.py -C -d /tmp/data &
|
||||
APP_PID=$!
|
||||
|
||||
# Wait for app to be ready
|
||||
echo "Waiting for 0.49.1 to be ready..."
|
||||
sleep 6
|
||||
|
||||
# Extract API key from datastore (0.49.1 uses url-watches.json)
|
||||
API_KEY=$(jq -r '.settings.application.api_access_token // empty' /tmp/data/url-watches.json)
|
||||
echo "API Key: ${API_KEY:0:8}..."
|
||||
|
||||
# Create a watch with tag "github-group-test" via API
|
||||
echo "Creating test watch with tag via API..."
|
||||
curl -X POST "http://127.0.0.1:5000/api/v1/watch" \
|
||||
-H "x-api-key: ${API_KEY}" \
|
||||
-H "Content-Type: application/json" \
|
||||
--show-error --fail \
|
||||
--retry 6 --retry-delay 1 --retry-connrefused \
|
||||
-d '{
|
||||
"url": "https://example.com/upgrade-test",
|
||||
"tag": "github-group-test"
|
||||
}'
|
||||
|
||||
echo "✓ Created watch with tag 'github-group-test'"
|
||||
|
||||
# Create a specific test URL watch
|
||||
echo "Creating test URL watch via API..."
|
||||
curl -X POST "http://127.0.0.1:5000/api/v1/watch" \
|
||||
-H "x-api-key: ${API_KEY}" \
|
||||
-H "Content-Type: application/json" \
|
||||
--show-error --fail \
|
||||
-d '{
|
||||
"url": "http://localhost/test.txt"
|
||||
}'
|
||||
|
||||
echo "✓ Created watch for 'http://localhost/test.txt' in version 0.49.1"
|
||||
|
||||
# Stop the old version gracefully
|
||||
kill $APP_PID
|
||||
wait $APP_PID || true
|
||||
echo "✓ Version 0.49.1 stopped"
|
||||
|
||||
# Upgrade to current version (use commit SHA since we're in detached HEAD)
|
||||
echo "Upgrading to commit ${{ github.sha }}"
|
||||
git checkout ${{ github.sha }}
|
||||
pip install -r requirements.txt
|
||||
|
||||
echo "=== Running current version (commit ${{ github.sha }}) with old datastore (testing mode) ==="
|
||||
ALLOW_IANA_RESTRICTED_ADDRESSES=true TESTING_SHUTDOWN_AFTER_DATASTORE_LOAD=1 python3 ./changedetection.py -d /tmp/data > /tmp/upgrade-test.log 2>&1
|
||||
|
||||
echo "=== Upgrade test output ==="
|
||||
cat /tmp/upgrade-test.log
|
||||
echo "✓ Datastore upgraded successfully"
|
||||
|
||||
# Now start the current version normally to verify the tag survived
|
||||
echo "=== Starting current version to verify tag exists after upgrade ==="
|
||||
ALLOW_IANA_RESTRICTED_ADDRESSES=true timeout 20 python3 ./changedetection.py -d /tmp/data > /tmp/ui-test.log 2>&1 &
|
||||
APP_PID=$!
|
||||
|
||||
# Wait for app to be ready and fetch UI
|
||||
echo "Waiting for current version to be ready..."
|
||||
sleep 5
|
||||
curl --retry 6 --retry-delay 1 --retry-connrefused --silent http://127.0.0.1:5000 > /tmp/ui-output.html
|
||||
|
||||
# Verify tag exists in UI
|
||||
if grep -q "github-group-test" /tmp/ui-output.html; then
|
||||
echo "✓ Tag 'github-group-test' found in UI after upgrade"
|
||||
else
|
||||
echo "ERROR: Tag 'github-group-test' not found in UI after upgrade"
|
||||
echo "=== UI Output ==="
|
||||
cat /tmp/ui-output.html
|
||||
echo "=== App Log ==="
|
||||
cat /tmp/ui-test.log
|
||||
kill $APP_PID || true
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Verify test URL exists in UI
|
||||
if grep -q "http://localhost/test.txt" /tmp/ui-output.html; then
|
||||
echo "✓ Watch URL 'http://localhost/test.txt' found in UI after upgrade"
|
||||
else
|
||||
echo "ERROR: Watch URL 'http://localhost/test.txt' not found in UI after upgrade"
|
||||
echo "=== UI Output ==="
|
||||
cat /tmp/ui-output.html
|
||||
echo "=== App Log ==="
|
||||
cat /tmp/ui-test.log
|
||||
kill $APP_PID || true
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Cleanup
|
||||
kill $APP_PID || true
|
||||
wait $APP_PID || true
|
||||
|
||||
echo ""
|
||||
echo "✓✓✓ Upgrade test passed: 0.49.1 → ${{ github.ref_name }} ✓✓✓"
|
||||
echo " - Commit: ${{ github.sha }}"
|
||||
echo " - Datastore migrated successfully"
|
||||
echo " - Tag 'github-group-test' survived upgrade"
|
||||
echo " - Watch URL 'http://localhost/test.txt' survived upgrade"
|
||||
|
||||
echo "✓ Upgrade test passed: 0.49.1 → ${{ github.ref_name }}"
|
||||
|
||||
- name: Upload upgrade test logs
|
||||
- name: Dump container log
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v7
|
||||
run: |
|
||||
mkdir output-logs
|
||||
docker logs test-cdio-basic-tests > output-logs/test-cdio-basic-tests-stdout-${{ env.PYTHON_VERSION }}.txt
|
||||
docker logs test-cdio-basic-tests 2> output-logs/test-cdio-basic-tests-stderr-${{ env.PYTHON_VERSION }}.txt
|
||||
|
||||
- name: Store everything including test-datastore
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: upgrade-test-logs-py${{ env.PYTHON_VERSION }}
|
||||
path: /tmp/upgrade-test.log
|
||||
name: test-cdio-basic-tests-output-py${{ env.PYTHON_VERSION }}
|
||||
path: .
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -21,7 +21,6 @@ venv/
|
||||
# IDEs
|
||||
.idea
|
||||
.vscode/settings.json
|
||||
*~
|
||||
|
||||
# Datastore files
|
||||
datastore/
|
||||
@@ -29,4 +28,3 @@ test-datastore/
|
||||
|
||||
# Memory consumption log
|
||||
test-memory.log
|
||||
tests/logs/
|
||||
|
||||
90
Dockerfile
90
Dockerfile
@@ -5,6 +5,7 @@ ARG PYTHON_VERSION=3.11
|
||||
FROM python:${PYTHON_VERSION}-slim-bookworm AS builder
|
||||
|
||||
# See `cryptography` pin comment in requirements.txt
|
||||
ARG CRYPTOGRAPHY_DONT_BUILD_RUST=1
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
g++ \
|
||||
@@ -16,7 +17,6 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
libxslt-dev \
|
||||
make \
|
||||
patch \
|
||||
pkg-config \
|
||||
zlib1g-dev
|
||||
|
||||
RUN mkdir /install
|
||||
@@ -26,64 +26,27 @@ COPY requirements.txt /requirements.txt
|
||||
|
||||
# Use cache mounts and multiple wheel sources for faster ARM builds
|
||||
ENV PIP_CACHE_DIR=/tmp/pip-cache
|
||||
# Help Rust find OpenSSL for cryptography package compilation on ARM
|
||||
ENV PKG_CONFIG_PATH="/usr/lib/pkgconfig:/usr/lib/arm-linux-gnueabihf/pkgconfig:/usr/lib/aarch64-linux-gnu/pkgconfig"
|
||||
ENV PKG_CONFIG_ALLOW_SYSTEM_CFLAGS=1
|
||||
ENV OPENSSL_DIR="/usr"
|
||||
ENV OPENSSL_LIB_DIR="/usr/lib/arm-linux-gnueabihf"
|
||||
ENV OPENSSL_INCLUDE_DIR="/usr/include/openssl"
|
||||
# Additional environment variables for cryptography Rust build
|
||||
ENV CRYPTOGRAPHY_DONT_BUILD_RUST=1
|
||||
|
||||
RUN --mount=type=cache,id=pip,sharing=locked,target=/tmp/pip-cache \
|
||||
pip install \
|
||||
--prefer-binary \
|
||||
--extra-index-url https://www.piwheels.org/simple \
|
||||
--extra-index-url https://pypi.anaconda.org/ARM-software/simple \
|
||||
--cache-dir=/tmp/pip-cache \
|
||||
--target=/dependencies \
|
||||
-r /requirements.txt
|
||||
RUN --mount=type=cache,target=/tmp/pip-cache \
|
||||
pip install \
|
||||
--extra-index-url https://www.piwheels.org/simple \
|
||||
--extra-index-url https://pypi.anaconda.org/ARM-software/simple \
|
||||
--cache-dir=/tmp/pip-cache \
|
||||
--target=/dependencies \
|
||||
-r /requirements.txt
|
||||
|
||||
# Playwright is an alternative to Selenium
|
||||
# Excluded this package from requirements.txt to prevent arm/v6 and arm/v7 builds from failing
|
||||
# https://github.com/dgtlmoon/changedetection.io/pull/1067 also musl/alpine (not supported)
|
||||
RUN --mount=type=cache,id=pip,sharing=locked,target=/tmp/pip-cache \
|
||||
pip install \
|
||||
--prefer-binary \
|
||||
--cache-dir=/tmp/pip-cache \
|
||||
--target=/dependencies \
|
||||
playwright~=1.56.0 \
|
||||
|| echo "WARN: Failed to install Playwright. The application can still run, but the Playwright option will be disabled."
|
||||
|
||||
# OpenCV is optional for fast image comparison (pixelmatch is the fallback)
|
||||
# Skip on arm/v7 and arm/v8 where builds take weeks - excluded from requirements.txt
|
||||
ARG TARGETPLATFORM
|
||||
RUN --mount=type=cache,id=pip,sharing=locked,target=/tmp/pip-cache \
|
||||
case "$TARGETPLATFORM" in \
|
||||
linux/arm/v7|linux/arm/v8) \
|
||||
echo "INFO: Skipping OpenCV on $TARGETPLATFORM (build takes too long), using pixelmatch fallback" \
|
||||
;; \
|
||||
*) \
|
||||
pip install \
|
||||
--prefer-binary \
|
||||
--extra-index-url https://www.piwheels.org/simple \
|
||||
--cache-dir=/tmp/pip-cache \
|
||||
--target=/dependencies \
|
||||
opencv-python-headless>=4.8.0.76 \
|
||||
|| echo "WARN: OpenCV install failed, will use pixelmatch fallback" \
|
||||
;; \
|
||||
esac
|
||||
|
||||
RUN --mount=type=cache,target=/tmp/pip-cache \
|
||||
pip install \
|
||||
--cache-dir=/tmp/pip-cache \
|
||||
--target=/dependencies \
|
||||
playwright~=1.48.0 \
|
||||
|| echo "WARN: Failed to install Playwright. The application can still run, but the Playwright option will be disabled."
|
||||
|
||||
# Final image stage
|
||||
FROM python:${PYTHON_VERSION}-slim-bookworm
|
||||
LABEL org.opencontainers.image.source="https://github.com/dgtlmoon/changedetection.io"
|
||||
LABEL org.opencontainers.image.url="https://changedetection.io"
|
||||
LABEL org.opencontainers.image.documentation="https://changedetection.io/tutorials"
|
||||
LABEL org.opencontainers.image.title="changedetection.io"
|
||||
LABEL org.opencontainers.image.description="Self-hosted web page change monitoring and notification service"
|
||||
LABEL org.opencontainers.image.licenses="Apache-2.0"
|
||||
LABEL org.opencontainers.image.vendor="changedetection.io"
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
libxslt1.1 \
|
||||
@@ -94,11 +57,6 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
# favicon type detection and other uses
|
||||
file \
|
||||
zlib1g \
|
||||
# OpenCV dependencies for image processing
|
||||
libglib2.0-0 \
|
||||
libsm6 \
|
||||
libxext6 \
|
||||
libxrender-dev \
|
||||
&& apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
|
||||
@@ -118,14 +76,6 @@ EXPOSE 5000
|
||||
|
||||
# The actual flask app module
|
||||
COPY changedetectionio /app/changedetectionio
|
||||
|
||||
# Compile translation files for i18n support
|
||||
RUN pybabel compile -d /app/changedetectionio/translations
|
||||
|
||||
# Also for OpenAPI validation wrapper - needs the YML
|
||||
RUN [ ! -d "/app/docs" ] && mkdir /app/docs
|
||||
COPY docs/api-spec.yaml /app/docs/api-spec.yaml
|
||||
|
||||
# Starting wrapper
|
||||
COPY changedetection.py /app/changedetection.py
|
||||
|
||||
@@ -134,19 +84,7 @@ COPY changedetection.py /app/changedetection.py
|
||||
ARG LOGGER_LEVEL=''
|
||||
ENV LOGGER_LEVEL="$LOGGER_LEVEL"
|
||||
|
||||
# Default
|
||||
ENV LC_ALL=en_US.UTF-8
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy and set up entrypoint script for installing extra packages
|
||||
COPY docker-entrypoint.sh /docker-entrypoint.sh
|
||||
RUN chmod +x /docker-entrypoint.sh
|
||||
|
||||
# Set entrypoint to handle EXTRA_PACKAGES env var
|
||||
ENTRYPOINT ["/docker-entrypoint.sh"]
|
||||
|
||||
# Default command (can be overridden in docker-compose.yml)
|
||||
CMD ["python", "./changedetection.py", "-d", "/datastore"]
|
||||
|
||||
|
||||
|
||||
@@ -1,23 +1,17 @@
|
||||
recursive-include changedetectionio/api *
|
||||
include docs/api-spec.yaml
|
||||
recursive-include changedetectionio/blueprint *
|
||||
recursive-include changedetectionio/conditions *
|
||||
recursive-include changedetectionio/content_fetchers *
|
||||
recursive-include changedetectionio/jinja2_custom *
|
||||
recursive-include changedetectionio/conditions *
|
||||
recursive-include changedetectionio/model *
|
||||
recursive-include changedetectionio/notification *
|
||||
recursive-include changedetectionio/processors *
|
||||
recursive-include changedetectionio/realtime *
|
||||
recursive-include changedetectionio/static *
|
||||
recursive-include changedetectionio/store *
|
||||
recursive-include changedetectionio/templates *
|
||||
recursive-include changedetectionio/tests *
|
||||
recursive-include changedetectionio/translations *
|
||||
recursive-include changedetectionio/widgets *
|
||||
prune changedetectionio/static/package-lock.json
|
||||
prune changedetectionio/static/styles/node_modules
|
||||
prune changedetectionio/static/styles/package-lock.json
|
||||
include changedetectionio/favicon_utils.py
|
||||
include changedetection.py
|
||||
include requirements.txt
|
||||
include README-pip.md
|
||||
|
||||
@@ -14,7 +14,7 @@ Ideal for monitoring price changes, content edits, conditional changes and more.
|
||||
[<img src="https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/docs/screenshot.png" style="max-width:100%;" alt="Self-hosted web page change monitoring, list of websites with changes" title="Self-hosted web page change monitoring, list of websites with changes" />](https://changedetection.io)
|
||||
|
||||
|
||||
[**Don't have time? Try our extremely affordable subscription use our proxies and support!**](https://changedetection.io)
|
||||
[**Don't have time? Try our extremely affordable subscription use our proxies and support!**](https://changedetection.io)
|
||||
|
||||
|
||||
|
||||
@@ -31,7 +31,7 @@ Available when connected to a <a href="https://github.com/dgtlmoon/changedetecti
|
||||
|
||||
### Perform interactive browser steps
|
||||
|
||||
Fill in text boxes, click buttons and more, setup your changedetection scenario.
|
||||
Fill in text boxes, click buttons and more, setup your changedetection scenario.
|
||||
|
||||
Using the **Browser Steps** configuration, add basic steps before performing change detection, such as logging into websites, adding a product to a cart, accept cookie logins, entering dates and refining searches.
|
||||
|
||||
@@ -54,7 +54,7 @@ Requires Playwright to be enabled.
|
||||
- Know when your favourite whiskey is on sale, or other special deals are announced before anyone else
|
||||
- COVID related news from government websites
|
||||
- University/organisation news from their website
|
||||
- Detect and monitor changes in JSON API responses
|
||||
- Detect and monitor changes in JSON API responses
|
||||
- JSON API monitoring and alerting
|
||||
- Changes in legal and other documents
|
||||
- Trigger API calls via notifications when text appears on a website
|
||||
@@ -86,7 +86,7 @@ _Need an actual Chrome runner with Javascript support? We support fetching via W
|
||||
|
||||
We [recommend and use Bright Data](https://brightdata.grsm.io/n0r16zf7eivq) global proxy services, Bright Data will match any first deposit up to $100 using our signup link.
|
||||
|
||||
[Oxylabs](https://oxylabs.go2cloud.org/SH2d) is also an excellent proxy provider and well worth using, they offer Residential, ISP, Rotating and many other proxy types to suit your project.
|
||||
[Oxylabs](https://oxylabs.go2cloud.org/SH2d) is also an excellent proxy provider and well worth using, they offer Residental, ISP, Rotating and many other proxy types to suit your project.
|
||||
|
||||
Please :star: star :star: this project and help it grow! https://github.com/dgtlmoon/changedetection.io/
|
||||
|
||||
@@ -106,3 +106,4 @@ $ changedetection.io -d /path/to/empty/data/dir -p 5000
|
||||
Then visit http://127.0.0.1:5000 , You should now be able to access the UI.
|
||||
|
||||
See https://changedetection.io for more information.
|
||||
|
||||
|
||||
@@ -183,9 +183,6 @@ docker compose pull && docker compose up -d
|
||||
|
||||
See the wiki for more information https://github.com/dgtlmoon/changedetection.io/wiki
|
||||
|
||||
## Different browser viewport sizes (mobile, desktop etc)
|
||||
|
||||
If you are using the recommended `sockpuppetbrowser` (which is in the docker-compose.yml as a setting to be uncommented) you can easily set different viewport sizes for your web page change detection, [see more information here about setting up different viewport sizes](https://github.com/dgtlmoon/sockpuppetbrowser?tab=readme-ov-file#setting-viewport-size).
|
||||
|
||||
## Filters
|
||||
|
||||
@@ -283,10 +280,7 @@ Excel import is recommended - that way you can better organise tags/groups of we
|
||||
|
||||
## API Support
|
||||
|
||||
Full REST API for programmatic management of watches, tags, notifications and more.
|
||||
|
||||
- **[Interactive API Documentation](https://changedetection.io/docs/api_v1/index.html)** - Complete API reference with live testing
|
||||
- **[OpenAPI Specification](docs/api-spec.yaml)** - Generate SDKs for any programming language
|
||||
Supports managing the website watch list [via our API](https://changedetection.io/docs/api_v1/index.html)
|
||||
|
||||
## Support us
|
||||
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
[python: **.py]
|
||||
keywords = _:1,_l:1,gettext:1
|
||||
|
||||
[jinja2: **/templates/**.html]
|
||||
encoding = utf-8
|
||||
@@ -64,7 +64,7 @@ def count_words_in_history(watch):
|
||||
return 0
|
||||
|
||||
latest_key = list(watch.history.keys())[-1]
|
||||
latest_content = watch.get_history_snapshot(timestamp=latest_key)
|
||||
latest_content = watch.get_history_snapshot(latest_key)
|
||||
return len(latest_content.split())
|
||||
except Exception as e:
|
||||
logger.error(f"Error counting words: {str(e)}")
|
||||
|
||||
@@ -1,91 +1,23 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# Read more https://github.com/dgtlmoon/changedetection.io/wiki
|
||||
# Semver means never use .01, or 00. Should be .1.
|
||||
__version__ = '0.54.6'
|
||||
|
||||
__version__ = '0.50.7'
|
||||
|
||||
from changedetectionio.strtobool import strtobool
|
||||
from json.decoder import JSONDecodeError
|
||||
|
||||
from loguru import logger
|
||||
import getopt
|
||||
import logging
|
||||
import os
|
||||
import getopt
|
||||
import platform
|
||||
import signal
|
||||
import threading
|
||||
import time
|
||||
|
||||
import sys
|
||||
|
||||
# Eventlet completely removed - using threading mode for SocketIO
|
||||
# This provides better Python 3.12+ compatibility and eliminates eventlet/asyncio conflicts
|
||||
# Note: store and changedetection_app are imported inside main() to avoid
|
||||
# initialization before argument parsing (allows --help to work without loading everything)
|
||||
|
||||
# ==============================================================================
|
||||
# Multiprocessing Configuration - CRITICAL for Thread Safety
|
||||
# ==============================================================================
|
||||
#
|
||||
# PROBLEM: Python 3.12+ warns about fork() with multi-threaded processes:
|
||||
# "This process is multi-threaded, use of fork() may lead to deadlocks"
|
||||
#
|
||||
# WHY IT'S DANGEROUS:
|
||||
# 1. This Flask app has multiple threads (HTTP handlers, workers, SocketIO)
|
||||
# 2. fork() copies ONLY the calling thread to the child process
|
||||
# 3. BUT fork() also copies all locks/mutexes in their current state
|
||||
# 4. If another thread held a lock during fork() → child has locked lock with no owner
|
||||
# 5. Result: PERMANENT DEADLOCK if child tries to acquire that lock
|
||||
#
|
||||
# SOLUTION: Use 'spawn' instead of 'fork'
|
||||
# - spawn starts a fresh Python interpreter (no inherited threads or locks)
|
||||
# - Slower (~200ms vs ~1ms) but safe with multi-threaded parent
|
||||
# - Consistent across all platforms (Windows already uses spawn by default)
|
||||
#
|
||||
# IMPLEMENTATION:
|
||||
# 1. Explicit contexts everywhere (primary protection):
|
||||
# - playwright.py: ctx = multiprocessing.get_context('spawn')
|
||||
# - puppeteer.py: ctx = multiprocessing.get_context('spawn')
|
||||
# - isolated_opencv.py: ctx = multiprocessing.get_context('spawn')
|
||||
# - isolated_libvips.py: ctx = multiprocessing.get_context('spawn')
|
||||
#
|
||||
# 2. Global default (defense-in-depth, below):
|
||||
# - Safety net if future code forgets explicit context
|
||||
# - Protects against third-party libraries using Process()
|
||||
# - Costs nothing (explicit contexts always override it)
|
||||
#
|
||||
# WHY BOTH?
|
||||
# - Explicit contexts: Clear, self-documenting, always works
|
||||
# - Global default: Safety net for forgotten contexts or library code
|
||||
# - If someone writes "Process()" instead of "ctx.Process()", still safe!
|
||||
#
|
||||
# See: https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
|
||||
# ==============================================================================
|
||||
|
||||
import multiprocessing
|
||||
import os
|
||||
import sys
|
||||
|
||||
# Limit glibc malloc arena count to prevent RSS growth from concurrent requests.
|
||||
# Default: glibc creates up to 8×CPU_cores arenas. Each concurrent thread/connection
|
||||
# can trigger a new arena, and freed memory stays mapped in those arenas as RSS forever.
|
||||
# With MALLOC_ARENA_MAX=2, at most 2 arenas are used; freed pages return to the OS faster.
|
||||
# Must be set before worker threads start; env var is read lazily by glibc on first arena creation.
|
||||
if 'MALLOC_ARENA_MAX' not in os.environ:
|
||||
os.environ['MALLOC_ARENA_MAX'] = '2'
|
||||
try:
|
||||
import ctypes as _ctypes
|
||||
_ctypes.CDLL('libc.so.6').mallopt(-8, 2) # M_ARENA_MAX = -8
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Set spawn as global default (safety net - all our code uses explicit contexts anyway)
|
||||
# Skip in tests to avoid breaking pytest-flask's LiveServer fixture (uses unpicklable local functions)
|
||||
if 'pytest' not in sys.modules:
|
||||
try:
|
||||
if multiprocessing.get_start_method(allow_none=True) is None:
|
||||
multiprocessing.set_start_method('spawn', force=False)
|
||||
logger.debug("Set multiprocessing default to 'spawn' for thread safety (explicit contexts used everywhere)")
|
||||
except RuntimeError:
|
||||
logger.debug(f"Multiprocessing start method already set: {multiprocessing.get_start_method()}")
|
||||
from changedetectionio import store
|
||||
from changedetectionio.flask_app import changedetection_app
|
||||
from loguru import logger
|
||||
|
||||
# Only global so we can access it in the signal handler
|
||||
app = None
|
||||
@@ -98,38 +30,18 @@ def get_version():
|
||||
def sigshutdown_handler(_signo, _stack_frame):
|
||||
name = signal.Signals(_signo).name
|
||||
logger.critical(f'Shutdown: Got Signal - {name} ({_signo}), Fast shutdown initiated')
|
||||
|
||||
|
||||
# Set exit flag immediately to stop all loops
|
||||
app.config.exit.set()
|
||||
datastore.stop_thread = True
|
||||
|
||||
# Log memory consumption before shutting down workers (cross-platform)
|
||||
|
||||
# Shutdown workers immediately
|
||||
try:
|
||||
import psutil
|
||||
process = psutil.Process()
|
||||
mem_info = process.memory_info()
|
||||
rss_mb = mem_info.rss / 1024 / 1024
|
||||
vms_mb = mem_info.vms / 1024 / 1024
|
||||
logger.info(f"Memory consumption before worker shutdown: RSS={rss_mb:,.2f} MB, VMS={vms_mb:,.2f} MB")
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not retrieve memory stats: {str(e)}")
|
||||
|
||||
# Shutdown workers and queues immediately
|
||||
try:
|
||||
from changedetectionio import worker_pool
|
||||
worker_pool.shutdown_workers()
|
||||
from changedetectionio import worker_handler
|
||||
worker_handler.shutdown_workers()
|
||||
except Exception as e:
|
||||
logger.error(f"Error shutting down workers: {str(e)}")
|
||||
|
||||
# Close janus queues properly
|
||||
try:
|
||||
from changedetectionio.flask_app import update_q, notification_q
|
||||
update_q.close()
|
||||
notification_q.close()
|
||||
logger.debug("Queues closed successfully")
|
||||
except Exception as e:
|
||||
logger.critical(f"CRITICAL: Failed to close queues: {e}")
|
||||
|
||||
# Shutdown socketio server fast
|
||||
from changedetectionio.flask_app import socketio_server
|
||||
if socketio_server and hasattr(socketio_server, 'shutdown'):
|
||||
@@ -138,80 +50,25 @@ def sigshutdown_handler(_signo, _stack_frame):
|
||||
except Exception as e:
|
||||
logger.error(f"Error shutting down Socket.IO server: {str(e)}")
|
||||
|
||||
# With immediate persistence, all data is already saved
|
||||
logger.success('All data already persisted (immediate commits enabled).')
|
||||
|
||||
# Save data quickly
|
||||
try:
|
||||
datastore.sync_to_json()
|
||||
logger.success('Fast sync to disk complete.')
|
||||
except Exception as e:
|
||||
logger.error(f"Error syncing to disk: {str(e)}")
|
||||
|
||||
sys.exit()
|
||||
|
||||
def print_help():
|
||||
"""Print help text for command line options"""
|
||||
print('Usage: changedetection.py [options]')
|
||||
print('')
|
||||
print('Standard options:')
|
||||
print(' -s SSL enable')
|
||||
print(' -h HOST Listen host (default: 0.0.0.0)')
|
||||
print(' -p PORT Listen port (default: 5000)')
|
||||
print(' -d PATH Datastore path')
|
||||
print(' -l LEVEL Log level (TRACE, DEBUG, INFO, SUCCESS, WARNING, ERROR, CRITICAL)')
|
||||
print(' -c Cleanup unused snapshots')
|
||||
print(' -C Create datastore directory if it doesn\'t exist')
|
||||
print(' -P true/false Set all watches paused (true) or active (false)')
|
||||
print('')
|
||||
print('Add URLs on startup:')
|
||||
print(' -u URL Add URL to watch (can be used multiple times)')
|
||||
print(' -u0 \'JSON\' Set options for first -u URL (e.g. \'{"processor":"text_json_diff"}\')')
|
||||
print(' -u1 \'JSON\' Set options for second -u URL (0-indexed)')
|
||||
print(' -u2 \'JSON\' Set options for third -u URL, etc.')
|
||||
print(' Available options: processor, fetch_backend, headers, method, etc.')
|
||||
print(' See model/Watch.py for all available options')
|
||||
print('')
|
||||
print('Recheck on startup:')
|
||||
print(' -r all Queue all watches for recheck on startup')
|
||||
print(' -r UUID,... Queue specific watches (comma-separated UUIDs)')
|
||||
print(' -r all N Queue all watches, wait for completion, repeat N times')
|
||||
print(' -r UUID,... N Queue specific watches, wait for completion, repeat N times')
|
||||
print('')
|
||||
print('Batch mode:')
|
||||
print(' -b Run in batch mode (process queue then exit)')
|
||||
print(' Useful for CI/CD, cron jobs, or one-time checks')
|
||||
print(' NOTE: Batch mode checks if Flask is running and aborts if port is in use')
|
||||
print(' Use -p PORT to specify a different port if needed')
|
||||
print('')
|
||||
|
||||
def main():
|
||||
global datastore
|
||||
global app
|
||||
|
||||
# Early help/version check before any initialization
|
||||
if '--help' in sys.argv or '-help' in sys.argv:
|
||||
print_help()
|
||||
sys.exit(0)
|
||||
|
||||
if '--version' in sys.argv or '-v' in sys.argv:
|
||||
print(f'changedetection.io {__version__}')
|
||||
sys.exit(0)
|
||||
|
||||
# Import heavy modules after help/version checks to keep startup fast for those flags
|
||||
from changedetectionio import store
|
||||
from changedetectionio.flask_app import changedetection_app
|
||||
|
||||
datastore_path = None
|
||||
# Set a default logger level
|
||||
logger_level = 'DEBUG'
|
||||
include_default_watches = True
|
||||
all_paused = None # None means don't change, True/False to set
|
||||
|
||||
do_cleanup = False
|
||||
host = os.environ.get("LISTEN_HOST", "0.0.0.0").strip()
|
||||
port = int(os.environ.get('PORT', 5000))
|
||||
ssl_mode = False
|
||||
|
||||
# Lists for multiple URLs and their options
|
||||
urls_to_add = []
|
||||
url_options = {} # Key: index (0-based), Value: dict of options
|
||||
recheck_watches = None # None, 'all', or list of UUIDs
|
||||
recheck_repeat_count = 1 # Number of times to repeat recheck cycle
|
||||
batch_mode = False # Run once then exit when queue is empty
|
||||
|
||||
# On Windows, create and use a default path.
|
||||
if os.name == 'nt':
|
||||
datastore_path = os.path.expandvars(r'%APPDATA%\changedetection.io')
|
||||
@@ -220,72 +77,16 @@ def main():
|
||||
# Must be absolute so that send_from_directory doesnt try to make it relative to backend/
|
||||
datastore_path = os.path.join(os.getcwd(), "../datastore")
|
||||
|
||||
# Pre-process arguments to extract -u, -u<N>, and -r options before getopt
|
||||
# This allows unlimited -u0, -u1, -u2, ... options without predefining them
|
||||
cleaned_argv = ['changedetection.py'] # Start with program name
|
||||
i = 1
|
||||
while i < len(sys.argv):
|
||||
arg = sys.argv[i]
|
||||
|
||||
# Handle -u (add URL)
|
||||
if arg == '-u' and i + 1 < len(sys.argv):
|
||||
urls_to_add.append(sys.argv[i + 1])
|
||||
i += 2
|
||||
continue
|
||||
|
||||
# Handle -u<N> (set options for URL at index N)
|
||||
if arg.startswith('-u') and len(arg) > 2 and arg[2:].isdigit():
|
||||
idx = int(arg[2:])
|
||||
if i + 1 < len(sys.argv):
|
||||
try:
|
||||
import json
|
||||
url_options[idx] = json.loads(sys.argv[i + 1])
|
||||
except json.JSONDecodeError as e:
|
||||
print(f'Error: Invalid JSON for {arg}: {sys.argv[i + 1]}')
|
||||
print(f'JSON decode error: {e}')
|
||||
sys.exit(2)
|
||||
i += 2
|
||||
continue
|
||||
|
||||
# Handle -r (recheck watches)
|
||||
if arg == '-r' and i + 1 < len(sys.argv):
|
||||
recheck_arg = sys.argv[i + 1]
|
||||
if recheck_arg.lower() == 'all':
|
||||
recheck_watches = 'all'
|
||||
else:
|
||||
# Parse comma-separated list of UUIDs
|
||||
recheck_watches = [uuid.strip() for uuid in recheck_arg.split(',') if uuid.strip()]
|
||||
|
||||
# Check for optional repeat count as third argument
|
||||
if i + 2 < len(sys.argv) and sys.argv[i + 2].isdigit():
|
||||
recheck_repeat_count = int(sys.argv[i + 2])
|
||||
if recheck_repeat_count < 1:
|
||||
print(f'Error: Repeat count must be at least 1, got {recheck_repeat_count}')
|
||||
sys.exit(2)
|
||||
i += 3
|
||||
else:
|
||||
i += 2
|
||||
continue
|
||||
|
||||
# Handle -b (batch mode - run once and exit)
|
||||
if arg == '-b':
|
||||
batch_mode = True
|
||||
i += 1
|
||||
continue
|
||||
|
||||
# Keep other arguments for getopt
|
||||
cleaned_argv.append(arg)
|
||||
i += 1
|
||||
|
||||
try:
|
||||
opts, args = getopt.getopt(cleaned_argv[1:], "6Csd:h:p:l:P:", "port")
|
||||
except getopt.GetoptError as e:
|
||||
print_help()
|
||||
print(f'Error: {e}')
|
||||
opts, args = getopt.getopt(sys.argv[1:], "6Ccsd:h:p:l:", "port")
|
||||
except getopt.GetoptError:
|
||||
print('backend.py -s SSL enable -h [host] -p [port] -d [datastore path] -l [debug level - TRACE, DEBUG(default), INFO, SUCCESS, WARNING, ERROR, CRITICAL]')
|
||||
sys.exit(2)
|
||||
|
||||
create_datastore_dir = False
|
||||
|
||||
# Set a default logger level
|
||||
logger_level = 'DEBUG'
|
||||
# Set a logger level via shell env variable
|
||||
# Used: Dockerfile for CICD
|
||||
# To set logger level for pytest, see the app function in tests/conftest.py
|
||||
@@ -306,6 +107,10 @@ def main():
|
||||
if opt == '-d':
|
||||
datastore_path = arg
|
||||
|
||||
# Cleanup (remove text files that arent in the index)
|
||||
if opt == '-c':
|
||||
do_cleanup = True
|
||||
|
||||
# Create the datadir if it doesnt exist
|
||||
if opt == '-C':
|
||||
create_datastore_dir = True
|
||||
@@ -313,18 +118,6 @@ def main():
|
||||
if opt == '-l':
|
||||
logger_level = int(arg) if arg.isdigit() else arg.upper()
|
||||
|
||||
if opt == '-P':
|
||||
try:
|
||||
all_paused = bool(strtobool(arg))
|
||||
except ValueError:
|
||||
print(f'Error: Invalid value for -P option: {arg}')
|
||||
print('Expected: true, false, yes, no, 1, or 0')
|
||||
sys.exit(2)
|
||||
|
||||
# If URLs are provided, don't include default watches
|
||||
if urls_to_add:
|
||||
include_default_watches = False
|
||||
|
||||
|
||||
logger.success(f"changedetection.io version {get_version()} starting.")
|
||||
# Launch using SocketIO run method for proper integration (if enabled)
|
||||
@@ -355,22 +148,12 @@ def main():
|
||||
" WARNING, ERROR, CRITICAL")
|
||||
sys.exit(2)
|
||||
|
||||
# Disable verbose pyppeteer logging to prevent memory leaks from large CDP messages
|
||||
# Set both parent and child loggers since pyppeteer hardcodes DEBUG level
|
||||
logging.getLogger('pyppeteer.connection').setLevel(logging.WARNING)
|
||||
logging.getLogger('pyppeteer.connection.Connection').setLevel(logging.WARNING)
|
||||
|
||||
# isnt there some @thingy to attach to each route to tell it, that this route needs a datastore
|
||||
app_config = {
|
||||
'datastore_path': datastore_path,
|
||||
'batch_mode': batch_mode,
|
||||
'recheck_watches': recheck_watches,
|
||||
'recheck_repeat_count': recheck_repeat_count
|
||||
}
|
||||
app_config = {'datastore_path': datastore_path}
|
||||
|
||||
if not os.path.isdir(app_config['datastore_path']):
|
||||
if create_datastore_dir:
|
||||
os.makedirs(app_config['datastore_path'], exist_ok=True)
|
||||
os.mkdir(app_config['datastore_path'])
|
||||
else:
|
||||
logger.critical(
|
||||
f"ERROR: Directory path for the datastore '{app_config['datastore_path']}'"
|
||||
@@ -380,224 +163,15 @@ def main():
|
||||
sys.exit(2)
|
||||
|
||||
try:
|
||||
datastore = store.ChangeDetectionStore(datastore_path=app_config['datastore_path'], version_tag=__version__, include_default_watches=include_default_watches)
|
||||
datastore = store.ChangeDetectionStore(datastore_path=app_config['datastore_path'], version_tag=__version__)
|
||||
except JSONDecodeError as e:
|
||||
# Dont' start if the JSON DB looks corrupt
|
||||
logger.critical(f"ERROR: JSON DB or Proxy List JSON at '{app_config['datastore_path']}' appears to be corrupt, aborting.")
|
||||
logger.critical(str(e))
|
||||
sys.exit(1)
|
||||
|
||||
# Testing mode: Exit cleanly after datastore initialization (for CI/CD upgrade tests)
|
||||
if os.environ.get('TESTING_SHUTDOWN_AFTER_DATASTORE_LOAD'):
|
||||
logger.success(f"TESTING MODE: Datastore loaded successfully from {app_config['datastore_path']}")
|
||||
logger.success(f"TESTING MODE: Schema version: {datastore.data['settings']['application'].get('schema_version', 'unknown')}")
|
||||
logger.success(f"TESTING MODE: Loaded {len(datastore.data['watching'])} watches")
|
||||
logger.success("TESTING MODE: Exiting cleanly (TESTING_SHUTDOWN_AFTER_DATASTORE_LOAD is set)")
|
||||
sys.exit(0)
|
||||
|
||||
# Apply all_paused setting if specified via CLI
|
||||
if all_paused is not None:
|
||||
datastore.data['settings']['application']['all_paused'] = all_paused
|
||||
logger.info(f"Setting all watches paused: {all_paused}")
|
||||
|
||||
# Inject datastore into plugins that need access to settings
|
||||
from changedetectionio.pluggy_interface import inject_datastore_into_plugins
|
||||
inject_datastore_into_plugins(datastore)
|
||||
|
||||
# Step 1: Add URLs with their options (if provided via -u flags)
|
||||
added_watch_uuids = []
|
||||
if urls_to_add:
|
||||
logger.info(f"Adding {len(urls_to_add)} URL(s) from command line")
|
||||
for idx, url in enumerate(urls_to_add):
|
||||
extras = url_options.get(idx, {})
|
||||
if extras:
|
||||
logger.debug(f"Adding watch {idx}: {url} with options: {extras}")
|
||||
else:
|
||||
logger.debug(f"Adding watch {idx}: {url}")
|
||||
|
||||
new_uuid = datastore.add_watch(url=url, extras=extras)
|
||||
if new_uuid:
|
||||
added_watch_uuids.append(new_uuid)
|
||||
logger.success(f"Added watch: {url} (UUID: {new_uuid})")
|
||||
else:
|
||||
logger.error(f"Failed to add watch: {url}")
|
||||
return
|
||||
|
||||
app = changedetection_app(app_config, datastore)
|
||||
|
||||
# Step 2: Queue newly added watches (if -u was provided in batch mode)
|
||||
# This must happen AFTER app initialization so update_q is available
|
||||
if batch_mode and added_watch_uuids:
|
||||
from changedetectionio.flask_app import update_q
|
||||
from changedetectionio import queuedWatchMetaData, worker_pool
|
||||
|
||||
logger.info(f"Batch mode: Queuing {len(added_watch_uuids)} newly added watches")
|
||||
for watch_uuid in added_watch_uuids:
|
||||
try:
|
||||
worker_pool.queue_item_async_safe(
|
||||
update_q,
|
||||
queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': watch_uuid})
|
||||
)
|
||||
logger.debug(f"Queued newly added watch: {watch_uuid}")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to queue watch {watch_uuid}: {e}")
|
||||
|
||||
# Step 3: Queue watches for recheck (if -r was provided)
|
||||
# This must happen AFTER app initialization so update_q is available
|
||||
if recheck_watches is not None:
|
||||
from changedetectionio.flask_app import update_q
|
||||
from changedetectionio import queuedWatchMetaData, worker_pool
|
||||
|
||||
watches_to_queue = []
|
||||
if recheck_watches == 'all':
|
||||
# Queue all watches, excluding those already queued in batch mode
|
||||
all_watches = list(datastore.data['watching'].keys())
|
||||
if batch_mode and added_watch_uuids:
|
||||
# Exclude newly added watches that were already queued in batch mode
|
||||
watches_to_queue = [uuid for uuid in all_watches if uuid not in added_watch_uuids]
|
||||
logger.info(f"Queuing {len(watches_to_queue)} existing watches for recheck ({len(added_watch_uuids)} newly added watches already queued)")
|
||||
else:
|
||||
watches_to_queue = all_watches
|
||||
logger.info(f"Queuing all {len(watches_to_queue)} watches for recheck")
|
||||
else:
|
||||
# Queue specific UUIDs
|
||||
watches_to_queue = recheck_watches
|
||||
logger.info(f"Queuing {len(watches_to_queue)} specific watches for recheck")
|
||||
|
||||
queued_count = 0
|
||||
for watch_uuid in watches_to_queue:
|
||||
if watch_uuid in datastore.data['watching']:
|
||||
try:
|
||||
worker_pool.queue_item_async_safe(
|
||||
update_q,
|
||||
queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': watch_uuid})
|
||||
)
|
||||
queued_count += 1
|
||||
logger.debug(f"Queued watch for recheck: {watch_uuid}")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to queue watch {watch_uuid}: {e}")
|
||||
else:
|
||||
logger.warning(f"Watch UUID not found in datastore: {watch_uuid}")
|
||||
|
||||
logger.success(f"Successfully queued {queued_count} watches for recheck")
|
||||
|
||||
# Step 4: Setup batch mode monitor (if -b was provided)
|
||||
if batch_mode:
|
||||
from changedetectionio.flask_app import update_q
|
||||
|
||||
# Safety check: Ensure Flask app is not already running on this port
|
||||
# Batch mode should never run alongside the web server
|
||||
import socket
|
||||
test_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
|
||||
try:
|
||||
# Try to bind to the configured host:port (no SO_REUSEADDR - strict check)
|
||||
test_socket.bind((host, port))
|
||||
test_socket.close()
|
||||
logger.debug(f"Batch mode: Port {port} is available (Flask app not running)")
|
||||
except OSError as e:
|
||||
test_socket.close()
|
||||
# errno 98 = EADDRINUSE (Linux)
|
||||
# errno 48 = EADDRINUSE (macOS)
|
||||
# errno 10048 = WSAEADDRINUSE (Windows)
|
||||
if e.errno in (48, 98, 10048) or "Address already in use" in str(e) or "already in use" in str(e).lower():
|
||||
logger.critical(f"ERROR: Batch mode cannot run - port {port} is already in use")
|
||||
logger.critical(f"The Flask web server appears to be running on {host}:{port}")
|
||||
logger.critical(f"Batch mode is designed for standalone operation (CI/CD, cron jobs, etc.)")
|
||||
logger.critical(f"Please either stop the Flask web server, or use a different port with -p PORT")
|
||||
sys.exit(1)
|
||||
else:
|
||||
# Some other socket error - log but continue (might be network configuration issue)
|
||||
logger.warning(f"Port availability check failed with unexpected error: {e}")
|
||||
logger.warning(f"Continuing with batch mode anyway - be aware of potential conflicts")
|
||||
|
||||
def queue_watches_for_recheck(datastore, iteration):
|
||||
"""Helper function to queue watches for recheck"""
|
||||
watches_to_queue = []
|
||||
if recheck_watches == 'all':
|
||||
all_watches = list(datastore.data['watching'].keys())
|
||||
if batch_mode and added_watch_uuids and iteration == 1:
|
||||
# Only exclude newly added watches on first iteration
|
||||
watches_to_queue = [uuid for uuid in all_watches if uuid not in added_watch_uuids]
|
||||
else:
|
||||
watches_to_queue = all_watches
|
||||
logger.info(f"Batch mode (iteration {iteration}): Queuing all {len(watches_to_queue)} watches")
|
||||
elif recheck_watches:
|
||||
watches_to_queue = recheck_watches
|
||||
logger.info(f"Batch mode (iteration {iteration}): Queuing {len(watches_to_queue)} specific watches")
|
||||
|
||||
queued_count = 0
|
||||
for watch_uuid in watches_to_queue:
|
||||
if watch_uuid in datastore.data['watching']:
|
||||
try:
|
||||
worker_pool.queue_item_async_safe(
|
||||
update_q,
|
||||
queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': watch_uuid})
|
||||
)
|
||||
queued_count += 1
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to queue watch {watch_uuid}: {e}")
|
||||
else:
|
||||
logger.warning(f"Watch UUID not found in datastore: {watch_uuid}")
|
||||
logger.success(f"Batch mode (iteration {iteration}): Successfully queued {queued_count} watches")
|
||||
return queued_count
|
||||
|
||||
def batch_mode_monitor():
|
||||
"""Monitor queue and workers, shutdown or repeat when work is complete"""
|
||||
import time
|
||||
|
||||
# Track iterations if repeat mode is enabled
|
||||
current_iteration = 1
|
||||
total_iterations = recheck_repeat_count if recheck_watches and recheck_repeat_count > 1 else 1
|
||||
|
||||
if total_iterations > 1:
|
||||
logger.info(f"Batch mode: Will repeat recheck {total_iterations} times")
|
||||
else:
|
||||
logger.info("Batch mode: Waiting for all queued items to complete...")
|
||||
|
||||
# Wait a bit for workers to start processing
|
||||
time.sleep(3)
|
||||
|
||||
try:
|
||||
while current_iteration <= total_iterations:
|
||||
logger.info(f"Batch mode: Waiting for iteration {current_iteration}/{total_iterations} to complete...")
|
||||
|
||||
# Use the shared wait_for_all_checks function
|
||||
completed = worker_pool.wait_for_all_checks(update_q, timeout=300)
|
||||
|
||||
if not completed:
|
||||
logger.warning(f"Batch mode: Iteration {current_iteration} timed out after 300 seconds")
|
||||
|
||||
logger.success(f"Batch mode: Iteration {current_iteration}/{total_iterations} completed")
|
||||
|
||||
# Check if we need to repeat
|
||||
if current_iteration < total_iterations:
|
||||
logger.info(f"Batch mode: Starting iteration {current_iteration + 1}...")
|
||||
current_iteration += 1
|
||||
|
||||
# Re-queue watches for next iteration
|
||||
queue_watches_for_recheck(datastore, current_iteration)
|
||||
|
||||
# Brief pause before continuing
|
||||
time.sleep(2)
|
||||
else:
|
||||
# All iterations complete
|
||||
logger.success(f"Batch mode: All {total_iterations} iterations completed, initiating shutdown")
|
||||
# Trigger shutdown
|
||||
import os, signal
|
||||
os.kill(os.getpid(), signal.SIGTERM)
|
||||
return
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Batch mode monitor error: {e}")
|
||||
logger.error(f"Initiating emergency shutdown")
|
||||
import os, signal
|
||||
os.kill(os.getpid(), signal.SIGTERM)
|
||||
|
||||
# Start monitor in background thread
|
||||
monitor_thread = threading.Thread(target=batch_mode_monitor, daemon=True, name="BatchModeMonitor")
|
||||
monitor_thread.start()
|
||||
logger.info("Batch mode enabled: Will exit after all queued items are processed")
|
||||
|
||||
# Get the SocketIO instance from the Flask app (created in flask_app.py)
|
||||
from changedetectionio.flask_app import socketio_server
|
||||
global socketio
|
||||
@@ -619,17 +193,19 @@ def main():
|
||||
else:
|
||||
logger.info("SIGUSR1 handler only registered on Linux, skipped.")
|
||||
|
||||
# Go into cleanup mode
|
||||
if do_cleanup:
|
||||
datastore.remove_unused_snapshots()
|
||||
|
||||
app.config['datastore_path'] = datastore_path
|
||||
|
||||
|
||||
@app.context_processor
|
||||
def inject_template_globals():
|
||||
return dict(right_sticky="v"+__version__,
|
||||
return dict(right_sticky="v{}".format(datastore.data['version_tag']),
|
||||
new_version_available=app.config['NEW_VERSION_AVAILABLE'],
|
||||
has_password=datastore.data['settings']['application']['password'] != False,
|
||||
socket_io_enabled=datastore.data['settings']['application'].get('ui', {}).get('socket_io_enabled', True),
|
||||
all_paused=datastore.data['settings']['application'].get('all_paused', False),
|
||||
all_muted=datastore.data['settings']['application'].get('all_muted', False)
|
||||
socket_io_enabled=datastore.data['settings']['application']['ui'].get('socket_io_enabled', True)
|
||||
)
|
||||
|
||||
# Monitored websites will not receive a Referer header when a user clicks on an outgoing link.
|
||||
@@ -651,43 +227,23 @@ def main():
|
||||
if os.getenv('USE_X_SETTINGS'):
|
||||
logger.info("USE_X_SETTINGS is ENABLED")
|
||||
from werkzeug.middleware.proxy_fix import ProxyFix
|
||||
app.wsgi_app = ProxyFix(
|
||||
app.wsgi_app,
|
||||
x_for=1, # X-Forwarded-For (client IP)
|
||||
x_proto=1, # X-Forwarded-Proto (http/https)
|
||||
x_host=1, # X-Forwarded-Host (original host)
|
||||
x_port=1, # X-Forwarded-Port (original port)
|
||||
x_prefix=1 # X-Forwarded-Prefix (URL prefix)
|
||||
)
|
||||
app.wsgi_app = ProxyFix(app.wsgi_app, x_prefix=1, x_host=1)
|
||||
|
||||
|
||||
# In batch mode, skip starting the HTTP server - just keep workers running
|
||||
if batch_mode:
|
||||
logger.info("Batch mode: Skipping HTTP server startup, workers will process queue")
|
||||
logger.info("Batch mode: Main thread will wait for shutdown signal")
|
||||
# Keep main thread alive until batch monitor triggers shutdown
|
||||
try:
|
||||
while True:
|
||||
time.sleep(1)
|
||||
except KeyboardInterrupt:
|
||||
logger.info("Batch mode: Keyboard interrupt received")
|
||||
pass
|
||||
else:
|
||||
# Normal mode: Start HTTP server
|
||||
# SocketIO instance is already initialized in flask_app.py
|
||||
if socketio_server:
|
||||
if ssl_mode:
|
||||
logger.success(f"SSL mode enabled, attempting to start with '{ssl_cert_file}' and '{ssl_privkey_file}' in {os.getcwd()}")
|
||||
socketio.run(app, host=host, port=int(port), debug=False,
|
||||
ssl_context=(ssl_cert_file, ssl_privkey_file), allow_unsafe_werkzeug=True)
|
||||
else:
|
||||
socketio.run(app, host=host, port=int(port), debug=False, allow_unsafe_werkzeug=True)
|
||||
# SocketIO instance is already initialized in flask_app.py
|
||||
if socketio_server:
|
||||
if ssl_mode:
|
||||
logger.success(f"SSL mode enabled, attempting to start with '{ssl_cert_file}' and '{ssl_privkey_file}' in {os.getcwd()}")
|
||||
socketio.run(app, host=host, port=int(port), debug=False,
|
||||
ssl_context=(ssl_cert_file, ssl_privkey_file), allow_unsafe_werkzeug=True)
|
||||
else:
|
||||
# Run Flask app without Socket.IO if disabled
|
||||
logger.info("Starting Flask app without Socket.IO server")
|
||||
if ssl_mode:
|
||||
logger.success(f"SSL mode enabled, attempting to start with '{ssl_cert_file}' and '{ssl_privkey_file}' in {os.getcwd()}")
|
||||
app.run(host=host, port=int(port), debug=False,
|
||||
ssl_context=(ssl_cert_file, ssl_privkey_file))
|
||||
else:
|
||||
app.run(host=host, port=int(port), debug=False)
|
||||
socketio.run(app, host=host, port=int(port), debug=False, allow_unsafe_werkzeug=True)
|
||||
else:
|
||||
# Run Flask app without Socket.IO if disabled
|
||||
logger.info("Starting Flask app without Socket.IO server")
|
||||
if ssl_mode:
|
||||
logger.success(f"SSL mode enabled, attempting to start with '{ssl_cert_file}' and '{ssl_privkey_file}' in {os.getcwd()}")
|
||||
app.run(host=host, port=int(port), debug=False,
|
||||
ssl_context=(ssl_cert_file, ssl_privkey_file))
|
||||
else:
|
||||
app.run(host=host, port=int(port), debug=False)
|
||||
|
||||
@@ -1,96 +1,9 @@
|
||||
import os
|
||||
from changedetectionio.strtobool import strtobool
|
||||
from flask_restful import abort, Resource
|
||||
from flask import request
|
||||
from functools import wraps
|
||||
from . import auth, validate_openapi_request
|
||||
from ..validate_url import is_safe_valid_url
|
||||
import json
|
||||
|
||||
# Number of URLs above which import switches to background processing
|
||||
IMPORT_SWITCH_TO_BACKGROUND_THRESHOLD = 20
|
||||
|
||||
|
||||
def default_content_type(content_type='text/plain'):
|
||||
"""Decorator to set a default Content-Type header if none is provided."""
|
||||
def decorator(f):
|
||||
@wraps(f)
|
||||
def wrapper(*args, **kwargs):
|
||||
if not request.content_type:
|
||||
# Set default content type in the request environment
|
||||
request.environ['CONTENT_TYPE'] = content_type
|
||||
return f(*args, **kwargs)
|
||||
return wrapper
|
||||
return decorator
|
||||
|
||||
|
||||
def convert_query_param_to_type(value, schema_property):
|
||||
"""
|
||||
Convert a query parameter string to the appropriate type based on schema definition.
|
||||
|
||||
Args:
|
||||
value: String value from query parameter
|
||||
schema_property: Schema property definition with 'type' or 'anyOf' field
|
||||
|
||||
Returns:
|
||||
Converted value in the appropriate type
|
||||
|
||||
Supports both OpenAPI 3.1 formats:
|
||||
- type: [string, 'null'] (array format)
|
||||
- anyOf: [{type: string}, {type: null}] (anyOf format)
|
||||
"""
|
||||
prop_type = schema_property.get('type')
|
||||
|
||||
# Handle OpenAPI 3.1 type arrays: type: [string, 'null']
|
||||
if isinstance(prop_type, list):
|
||||
# Use the first non-null type from the array
|
||||
for t in prop_type:
|
||||
if t != 'null':
|
||||
prop_type = t
|
||||
break
|
||||
else:
|
||||
prop_type = None
|
||||
|
||||
# Handle anyOf schemas (older format)
|
||||
elif 'anyOf' in schema_property:
|
||||
# Use the first non-null type from anyOf
|
||||
for option in schema_property['anyOf']:
|
||||
if option.get('type') and option.get('type') != 'null':
|
||||
prop_type = option.get('type')
|
||||
break
|
||||
else:
|
||||
prop_type = None
|
||||
|
||||
# Handle array type (e.g., notification_urls)
|
||||
if prop_type == 'array':
|
||||
# Support both comma-separated and JSON array format
|
||||
if value.startswith('['):
|
||||
try:
|
||||
return json.loads(value)
|
||||
except json.JSONDecodeError:
|
||||
return [v.strip() for v in value.split(',')]
|
||||
return [v.strip() for v in value.split(',')]
|
||||
|
||||
# Handle object type (e.g., time_between_check, headers)
|
||||
elif prop_type == 'object':
|
||||
try:
|
||||
return json.loads(value)
|
||||
except json.JSONDecodeError:
|
||||
raise ValueError(f"Invalid JSON object for field: {value}")
|
||||
|
||||
# Handle boolean type
|
||||
elif prop_type == 'boolean':
|
||||
return strtobool(value)
|
||||
|
||||
# Handle integer type
|
||||
elif prop_type == 'integer':
|
||||
return int(value)
|
||||
|
||||
# Handle number type (float)
|
||||
elif prop_type == 'number':
|
||||
return float(value)
|
||||
|
||||
# Default: return as string
|
||||
return value
|
||||
import validators
|
||||
from . import auth
|
||||
|
||||
|
||||
class Import(Resource):
|
||||
@@ -99,131 +12,51 @@ class Import(Resource):
|
||||
self.datastore = kwargs['datastore']
|
||||
|
||||
@auth.check_token
|
||||
@default_content_type('text/plain') #3547 #3542
|
||||
@validate_openapi_request('importWatches')
|
||||
def post(self):
|
||||
"""Import a list of watched URLs with optional watch configuration."""
|
||||
from . import get_watch_schema_properties
|
||||
# Special parameters that are NOT watch configuration
|
||||
special_params = {'tag', 'tag_uuids', 'dedupe', 'proxy'}
|
||||
"""
|
||||
@api {post} /api/v1/import Import a list of watched URLs
|
||||
@apiDescription Accepts a line-feed separated list of URLs to import, additionally with ?tag_uuids=(tag id), ?tag=(name), ?proxy={key}, ?dedupe=true (default true) one URL per line.
|
||||
@apiExample {curl} Example usage:
|
||||
curl http://localhost:5000/api/v1/import --data-binary @list-of-sites.txt -H"x-api-key:8a111a21bc2f8f1dd9b9353bbd46049a"
|
||||
@apiName Import
|
||||
@apiGroup Watch
|
||||
@apiSuccess (200) {List} OK List of watch UUIDs added
|
||||
@apiSuccess (500) {String} ERR Some other error
|
||||
"""
|
||||
|
||||
extras = {}
|
||||
|
||||
# Handle special 'proxy' parameter
|
||||
if request.args.get('proxy'):
|
||||
plist = self.datastore.proxy_list
|
||||
if not request.args.get('proxy') in plist:
|
||||
proxy_list_str = ', '.join(plist) if plist else 'none configured'
|
||||
return f"Invalid proxy choice, currently supported proxies are '{proxy_list_str}'", 400
|
||||
return "Invalid proxy choice, currently supported proxies are '{}'".format(', '.join(plist)), 400
|
||||
else:
|
||||
extras['proxy'] = request.args.get('proxy')
|
||||
|
||||
# Handle special 'dedupe' parameter
|
||||
dedupe = strtobool(request.args.get('dedupe', 'true'))
|
||||
|
||||
# Handle special 'tag' and 'tag_uuids' parameters
|
||||
tags = request.args.get('tag')
|
||||
tag_uuids = request.args.get('tag_uuids')
|
||||
|
||||
if tag_uuids:
|
||||
tag_uuids = tag_uuids.split(',')
|
||||
|
||||
# Extract ALL other query parameters as watch configuration
|
||||
# Get schema from OpenAPI spec (replaces old schema_create_watch)
|
||||
schema_properties = get_watch_schema_properties()
|
||||
for param_name, param_value in request.args.items():
|
||||
# Skip special parameters
|
||||
if param_name in special_params:
|
||||
continue
|
||||
|
||||
# Skip if not in schema (unknown parameter)
|
||||
if param_name not in schema_properties:
|
||||
return f"Unknown watch configuration parameter: {param_name}", 400
|
||||
|
||||
# Convert to appropriate type based on schema
|
||||
try:
|
||||
converted_value = convert_query_param_to_type(param_value, schema_properties[param_name])
|
||||
extras[param_name] = converted_value
|
||||
except (ValueError, json.JSONDecodeError) as e:
|
||||
return f"Invalid value for parameter '{param_name}': {str(e)}", 400
|
||||
|
||||
# Validate processor if provided
|
||||
if 'processor' in extras:
|
||||
from changedetectionio.processors import available_processors
|
||||
available = [p[0] for p in available_processors()]
|
||||
if extras['processor'] not in available:
|
||||
return f"Invalid processor '{extras['processor']}'. Available processors: {', '.join(available)}", 400
|
||||
|
||||
# Validate fetch_backend if provided
|
||||
if 'fetch_backend' in extras:
|
||||
from changedetectionio.content_fetchers import available_fetchers
|
||||
available = [f[0] for f in available_fetchers()]
|
||||
# Also allow 'system' and extra_browser_* patterns
|
||||
is_valid = (
|
||||
extras['fetch_backend'] == 'system' or
|
||||
extras['fetch_backend'] in available or
|
||||
extras['fetch_backend'].startswith('extra_browser_')
|
||||
)
|
||||
if not is_valid:
|
||||
return f"Invalid fetch_backend '{extras['fetch_backend']}'. Available: system, {', '.join(available)}", 400
|
||||
|
||||
# Validate notification_urls if provided
|
||||
if 'notification_urls' in extras:
|
||||
from wtforms import ValidationError
|
||||
from changedetectionio.api.Notifications import validate_notification_urls
|
||||
try:
|
||||
validate_notification_urls(extras['notification_urls'])
|
||||
except ValidationError as e:
|
||||
return f"Invalid notification_urls: {str(e)}", 400
|
||||
|
||||
urls = request.get_data().decode('utf8').splitlines()
|
||||
# Clean and validate URLs upfront
|
||||
urls_to_import = []
|
||||
added = []
|
||||
allow_simplehost = not strtobool(os.getenv('BLOCK_SIMPLEHOSTS', 'False'))
|
||||
for url in urls:
|
||||
url = url.strip()
|
||||
if not len(url):
|
||||
continue
|
||||
|
||||
# Validate URL
|
||||
if not is_safe_valid_url(url):
|
||||
# If hosts that only contain alphanumerics are allowed ("localhost" for example)
|
||||
if not validators.url(url, simple_host=allow_simplehost):
|
||||
return f"Invalid or unsupported URL - {url}", 400
|
||||
|
||||
# Check for duplicates if dedupe is enabled
|
||||
if dedupe and self.datastore.url_exists(url):
|
||||
continue
|
||||
|
||||
urls_to_import.append(url)
|
||||
new_uuid = self.datastore.add_watch(url=url, extras=extras, tag=tags, tag_uuids=tag_uuids)
|
||||
added.append(new_uuid)
|
||||
|
||||
# For small imports, process synchronously for immediate feedback
|
||||
if len(urls_to_import) < IMPORT_SWITCH_TO_BACKGROUND_THRESHOLD:
|
||||
added = []
|
||||
for url in urls_to_import:
|
||||
new_uuid = self.datastore.add_watch(url=url, extras=extras, tag=tags, tag_uuids=tag_uuids)
|
||||
added.append(new_uuid)
|
||||
return added, 200
|
||||
|
||||
# For large imports (>= 20), process in background thread
|
||||
else:
|
||||
import threading
|
||||
from loguru import logger
|
||||
|
||||
def import_watches_background():
|
||||
"""Background thread to import watches - discarded after completion."""
|
||||
try:
|
||||
added_count = 0
|
||||
for url in urls_to_import:
|
||||
try:
|
||||
self.datastore.add_watch(url=url, extras=extras, tag=tags, tag_uuids=tag_uuids)
|
||||
added_count += 1
|
||||
except Exception as e:
|
||||
logger.error(f"Error importing URL {url}: {e}")
|
||||
|
||||
logger.info(f"Background import complete: {added_count} watches created")
|
||||
except Exception as e:
|
||||
logger.error(f"Error in background import: {e}")
|
||||
|
||||
# Start background thread and return immediately
|
||||
thread = threading.Thread(target=import_watches_background, daemon=True, name="ImportWatches-Background")
|
||||
thread.start()
|
||||
|
||||
return {'status': f'Importing {len(urls_to_import)} URLs in background', 'count': len(urls_to_import)}, 202
|
||||
return added
|
||||
@@ -1,6 +1,10 @@
|
||||
from flask_restful import Resource, abort
|
||||
from flask_expects_json import expects_json
|
||||
from flask_restful import Resource
|
||||
from . import auth
|
||||
from flask_restful import abort, Resource
|
||||
from flask import request
|
||||
from . import auth, validate_openapi_request
|
||||
from . import auth
|
||||
from . import schema_create_notification_urls, schema_delete_notification_urls
|
||||
|
||||
class Notifications(Resource):
|
||||
def __init__(self, **kwargs):
|
||||
@@ -8,9 +12,19 @@ class Notifications(Resource):
|
||||
self.datastore = kwargs['datastore']
|
||||
|
||||
@auth.check_token
|
||||
@validate_openapi_request('getNotifications')
|
||||
def get(self):
|
||||
"""Return Notification URL List."""
|
||||
"""
|
||||
@api {get} /api/v1/notifications Return Notification URL List
|
||||
@apiDescription Return the Notification URL List from the configuration
|
||||
@apiExample {curl} Example usage:
|
||||
curl http://localhost:5000/api/v1/notifications -H"x-api-key:813031b16330fe25e3780cf0325daa45"
|
||||
HTTP/1.0 200
|
||||
{
|
||||
'notification_urls': ["notification-urls-list"]
|
||||
}
|
||||
@apiName Get
|
||||
@apiGroup Notifications
|
||||
"""
|
||||
|
||||
notification_urls = self.datastore.data.get('settings', {}).get('application', {}).get('notification_urls', [])
|
||||
|
||||
@@ -19,9 +33,18 @@ class Notifications(Resource):
|
||||
}, 200
|
||||
|
||||
@auth.check_token
|
||||
@validate_openapi_request('addNotifications')
|
||||
@expects_json(schema_create_notification_urls)
|
||||
def post(self):
|
||||
"""Create Notification URLs."""
|
||||
"""
|
||||
@api {post} /api/v1/notifications Create Notification URLs
|
||||
@apiDescription Add one or more notification URLs from the configuration
|
||||
@apiExample {curl} Example usage:
|
||||
curl http://localhost:5000/api/v1/notifications/batch -H"x-api-key:813031b16330fe25e3780cf0325daa45" -H "Content-Type: application/json" -d '{"notification_urls": ["url1", "url2"]}'
|
||||
@apiName CreateBatch
|
||||
@apiGroup Notifications
|
||||
@apiSuccess (201) {Object[]} notification_urls List of added notification URLs
|
||||
@apiError (400) {String} Invalid input
|
||||
"""
|
||||
|
||||
json_data = request.get_json()
|
||||
notification_urls = json_data.get("notification_urls", [])
|
||||
@@ -46,9 +69,18 @@ class Notifications(Resource):
|
||||
return {'notification_urls': added_urls}, 201
|
||||
|
||||
@auth.check_token
|
||||
@validate_openapi_request('replaceNotifications')
|
||||
@expects_json(schema_create_notification_urls)
|
||||
def put(self):
|
||||
"""Replace Notification URLs."""
|
||||
"""
|
||||
@api {put} /api/v1/notifications Replace Notification URLs
|
||||
@apiDescription Replace all notification URLs with the provided list (can be empty)
|
||||
@apiExample {curl} Example usage:
|
||||
curl -X PUT http://localhost:5000/api/v1/notifications -H"x-api-key:813031b16330fe25e3780cf0325daa45" -H "Content-Type: application/json" -d '{"notification_urls": ["url1", "url2"]}'
|
||||
@apiName Replace
|
||||
@apiGroup Notifications
|
||||
@apiSuccess (200) {Object[]} notification_urls List of current notification URLs
|
||||
@apiError (400) {String} Invalid input
|
||||
"""
|
||||
json_data = request.get_json()
|
||||
notification_urls = json_data.get("notification_urls", [])
|
||||
|
||||
@@ -63,14 +95,24 @@ class Notifications(Resource):
|
||||
|
||||
clean_urls = [url.strip() for url in notification_urls if isinstance(url, str)]
|
||||
self.datastore.data['settings']['application']['notification_urls'] = clean_urls
|
||||
self.datastore.commit()
|
||||
self.datastore.needs_write = True
|
||||
|
||||
return {'notification_urls': clean_urls}, 200
|
||||
|
||||
@auth.check_token
|
||||
@validate_openapi_request('deleteNotifications')
|
||||
@expects_json(schema_delete_notification_urls)
|
||||
def delete(self):
|
||||
"""Delete Notification URLs."""
|
||||
"""
|
||||
@api {delete} /api/v1/notifications Delete Notification URLs
|
||||
@apiDescription Deletes one or more notification URLs from the configuration
|
||||
@apiExample {curl} Example usage:
|
||||
curl http://localhost:5000/api/v1/notifications -X DELETE -H"x-api-key:813031b16330fe25e3780cf0325daa45" -H "Content-Type: application/json" -d '{"notification_urls": ["url1", "url2"]}'
|
||||
@apiParam {String[]} notification_urls The notification URLs to delete.
|
||||
@apiName Delete
|
||||
@apiGroup Notifications
|
||||
@apiSuccess (204) {String} OK Deleted
|
||||
@apiError (400) {String} No matching notification URLs found.
|
||||
"""
|
||||
|
||||
json_data = request.get_json()
|
||||
urls_to_delete = json_data.get("notification_urls", [])
|
||||
@@ -90,7 +132,7 @@ class Notifications(Resource):
|
||||
abort(400, message="No matching notification URLs found.")
|
||||
|
||||
self.datastore.data['settings']['application']['notification_urls'] = notification_urls
|
||||
self.datastore.commit()
|
||||
self.datastore.needs_write = True
|
||||
|
||||
return 'OK', 204
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from flask_restful import Resource, abort
|
||||
from flask import request
|
||||
from . import auth, validate_openapi_request
|
||||
from . import auth
|
||||
|
||||
class Search(Resource):
|
||||
def __init__(self, **kwargs):
|
||||
@@ -8,9 +8,21 @@ class Search(Resource):
|
||||
self.datastore = kwargs['datastore']
|
||||
|
||||
@auth.check_token
|
||||
@validate_openapi_request('searchWatches')
|
||||
def get(self):
|
||||
"""Search for watches by URL or title text."""
|
||||
"""
|
||||
@api {get} /api/v1/search Search for watches
|
||||
@apiDescription Search watches by URL or title text
|
||||
@apiExample {curl} Example usage:
|
||||
curl "http://localhost:5000/api/v1/search?q=https://example.com/page1" -H"x-api-key:813031b16330fe25e3780cf0325daa45"
|
||||
curl "http://localhost:5000/api/v1/search?q=https://example.com/page1?tag=Favourites" -H"x-api-key:813031b16330fe25e3780cf0325daa45"
|
||||
curl "http://localhost:5000/api/v1/search?q=https://example.com?partial=true" -H"x-api-key:813031b16330fe25e3780cf0325daa45"
|
||||
@apiName Search
|
||||
@apiGroup Watch Management
|
||||
@apiQuery {String} q Search query to match against watch URLs and titles
|
||||
@apiQuery {String} [tag] Optional name of tag to limit results (name not UUID)
|
||||
@apiQuery {String} [partial] Allow partial matching of URL query
|
||||
@apiSuccess (200) {Object} JSON Object containing matched watches
|
||||
"""
|
||||
query = request.args.get('q', '').strip()
|
||||
tag_limit = request.args.get('tag', '').strip()
|
||||
from changedetectionio.strtobool import strtobool
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
import functools
|
||||
from flask import make_response
|
||||
from flask_restful import Resource
|
||||
|
||||
|
||||
@functools.cache
|
||||
def _get_spec_yaml():
|
||||
"""Build and cache the merged spec as a YAML string (only serialized once per process)."""
|
||||
import yaml
|
||||
from changedetectionio.api import build_merged_spec_dict
|
||||
return yaml.dump(build_merged_spec_dict(), default_flow_style=False, allow_unicode=True)
|
||||
|
||||
|
||||
class Spec(Resource):
|
||||
def get(self):
|
||||
"""Return the merged OpenAPI spec including all registered processor extensions."""
|
||||
return make_response(
|
||||
_get_spec_yaml(),
|
||||
200,
|
||||
{'Content-Type': 'application/yaml'}
|
||||
)
|
||||
@@ -1,5 +1,5 @@
|
||||
from flask_restful import Resource
|
||||
from . import auth, validate_openapi_request
|
||||
from . import auth
|
||||
|
||||
|
||||
class SystemInfo(Resource):
|
||||
@@ -9,9 +9,23 @@ class SystemInfo(Resource):
|
||||
self.update_q = kwargs['update_q']
|
||||
|
||||
@auth.check_token
|
||||
@validate_openapi_request('getSystemInfo')
|
||||
def get(self):
|
||||
"""Return system info."""
|
||||
"""
|
||||
@api {get} /api/v1/systeminfo Return system info
|
||||
@apiDescription Return some info about the current system state
|
||||
@apiExample {curl} Example usage:
|
||||
curl http://localhost:5000/api/v1/systeminfo -H"x-api-key:813031b16330fe25e3780cf0325daa45"
|
||||
HTTP/1.0 200
|
||||
{
|
||||
'queue_size': 10 ,
|
||||
'overdue_watches': ["watch-uuid-list"],
|
||||
'uptime': 38344.55,
|
||||
'watch_count': 800,
|
||||
'version': "0.40.1"
|
||||
}
|
||||
@apiName Get Info
|
||||
@apiGroup System Information
|
||||
"""
|
||||
import time
|
||||
overdue_watches = []
|
||||
|
||||
|
||||
@@ -1,189 +1,117 @@
|
||||
from changedetectionio import queuedWatchMetaData
|
||||
from changedetectionio import worker_pool
|
||||
from flask_expects_json import expects_json
|
||||
from flask_restful import abort, Resource
|
||||
from loguru import logger
|
||||
|
||||
import threading
|
||||
from flask import request
|
||||
from . import auth
|
||||
|
||||
from . import validate_openapi_request
|
||||
# Import schemas from __init__.py
|
||||
from . import schema_tag, schema_create_tag, schema_update_tag
|
||||
|
||||
|
||||
class Tag(Resource):
|
||||
def __init__(self, **kwargs):
|
||||
# datastore is a black box dependency
|
||||
self.datastore = kwargs['datastore']
|
||||
self.update_q = kwargs['update_q']
|
||||
|
||||
# Get information about a single tag
|
||||
# curl http://localhost:5000/api/v1/tag/<uuid_str:uuid>
|
||||
# curl http://localhost:5000/api/v1/tag/<string:uuid>
|
||||
@auth.check_token
|
||||
@validate_openapi_request('getTag')
|
||||
def get(self, uuid):
|
||||
"""Get data for a single tag/group, toggle notification muting, or recheck all."""
|
||||
tag = self.datastore.data['settings']['application']['tags'].get(uuid)
|
||||
"""
|
||||
@api {get} /api/v1/tag/:uuid Single tag - get data or toggle notification muting.
|
||||
@apiDescription Retrieve tag information and set notification_muted status
|
||||
@apiExample {curl} Example usage:
|
||||
curl http://localhost:5000/api/v1/tag/cc0cfffa-f449-477b-83ea-0caafd1dc091 -H"x-api-key:813031b16330fe25e3780cf0325daa45"
|
||||
curl "http://localhost:5000/api/v1/tag/cc0cfffa-f449-477b-83ea-0caafd1dc091?muted=muted" -H"x-api-key:813031b16330fe25e3780cf0325daa45"
|
||||
@apiName Tag
|
||||
@apiGroup Tag
|
||||
@apiParam {uuid} uuid Tag unique ID.
|
||||
@apiQuery {String} [muted] =`muted` or =`unmuted` , Sets the MUTE NOTIFICATIONS state
|
||||
@apiSuccess (200) {String} OK When muted operation OR full JSON object of the tag
|
||||
@apiSuccess (200) {JSON} TagJSON JSON Full JSON object of the tag
|
||||
"""
|
||||
from copy import deepcopy
|
||||
tag = deepcopy(self.datastore.data['settings']['application']['tags'].get(uuid))
|
||||
if not tag:
|
||||
abort(404, message=f'No tag exists with the UUID of {uuid}')
|
||||
|
||||
if request.args.get('recheck'):
|
||||
# Recheck all watches with this tag, including muted
|
||||
# First collect watches to queue
|
||||
watches_to_queue = []
|
||||
for k in sorted(self.datastore.data['watching'].items(), key=lambda item: item[1].get('last_checked', 0)):
|
||||
watch_uuid = k[0]
|
||||
watch = k[1]
|
||||
if not watch['paused'] and tag['uuid'] in watch['tags']:
|
||||
watches_to_queue.append(watch_uuid)
|
||||
|
||||
# If less than 20 watches, queue synchronously for immediate feedback
|
||||
if len(watches_to_queue) < 20:
|
||||
for watch_uuid in watches_to_queue:
|
||||
worker_pool.queue_item_async_safe(self.update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': watch_uuid}))
|
||||
return {'status': f'OK, queued {len(watches_to_queue)} watches for rechecking'}, 200
|
||||
else:
|
||||
# 20+ watches - queue in background thread to avoid blocking API response
|
||||
def queue_watches_background():
|
||||
"""Background thread to queue watches - discarded after completion."""
|
||||
try:
|
||||
for watch_uuid in watches_to_queue:
|
||||
worker_pool.queue_item_async_safe(self.update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': watch_uuid}))
|
||||
logger.info(f"Background queueing complete for tag {tag['uuid']}: {len(watches_to_queue)} watches queued")
|
||||
except Exception as e:
|
||||
logger.error(f"Error in background queueing for tag {tag['uuid']}: {e}")
|
||||
|
||||
# Start background thread and return immediately
|
||||
thread = threading.Thread(target=queue_watches_background, daemon=True, name=f"QueueTag-{tag['uuid'][:8]}")
|
||||
thread.start()
|
||||
|
||||
return {'status': f'OK, queueing {len(watches_to_queue)} watches in background'}, 202
|
||||
|
||||
if request.args.get('muted', '') == 'muted':
|
||||
tag['notification_muted'] = True
|
||||
tag.commit()
|
||||
self.datastore.data['settings']['application']['tags'][uuid]['notification_muted'] = True
|
||||
return "OK", 200
|
||||
elif request.args.get('muted', '') == 'unmuted':
|
||||
tag['notification_muted'] = False
|
||||
tag.commit()
|
||||
self.datastore.data['settings']['application']['tags'][uuid]['notification_muted'] = False
|
||||
return "OK", 200
|
||||
|
||||
# Filter out Watch-specific runtime fields that don't apply to Tags (yet)
|
||||
# TODO: Future enhancement - aggregate these values from all Watches that have this tag:
|
||||
# - check_count: sum of all watches' check_count
|
||||
# - last_checked: most recent last_checked from all watches
|
||||
# - last_changed: most recent last_changed from all watches
|
||||
# - consecutive_filter_failures: count of watches with failures
|
||||
# - etc.
|
||||
# These come from watch_base inheritance but currently have no meaningful value for Tags
|
||||
watch_only_fields = {
|
||||
'browser_steps_last_error_step', 'check_count', 'consecutive_filter_failures',
|
||||
'content-type', 'fetch_time', 'last_changed', 'last_checked', 'last_error',
|
||||
'last_notification_error', 'last_viewed', 'notification_alert_count',
|
||||
'page_title', 'previous_md5', 'remote_server_reply'
|
||||
}
|
||||
|
||||
# Create clean tag dict without Watch-specific fields
|
||||
clean_tag = {k: v for k, v in tag.items() if k not in watch_only_fields}
|
||||
|
||||
return clean_tag
|
||||
return tag
|
||||
|
||||
@auth.check_token
|
||||
@validate_openapi_request('deleteTag')
|
||||
def delete(self, uuid):
|
||||
"""Delete a tag/group and remove it from all watches."""
|
||||
"""
|
||||
@api {delete} /api/v1/tag/:uuid Delete a tag and remove it from all watches
|
||||
@apiExample {curl} Example usage:
|
||||
curl http://localhost:5000/api/v1/tag/cc0cfffa-f449-477b-83ea-0caafd1dc091 -X DELETE -H"x-api-key:813031b16330fe25e3780cf0325daa45"
|
||||
@apiParam {uuid} uuid Tag unique ID.
|
||||
@apiName DeleteTag
|
||||
@apiGroup Tag
|
||||
@apiSuccess (200) {String} OK Was deleted
|
||||
"""
|
||||
if not self.datastore.data['settings']['application']['tags'].get(uuid):
|
||||
abort(400, message='No tag exists with the UUID of {}'.format(uuid))
|
||||
|
||||
# Delete the tag, and any tag reference
|
||||
del self.datastore.data['settings']['application']['tags'][uuid]
|
||||
|
||||
|
||||
# Remove tag from all watches
|
||||
for watch_uuid, watch in self.datastore.data['watching'].items():
|
||||
if watch.get('tags') and uuid in watch['tags']:
|
||||
watch['tags'].remove(uuid)
|
||||
watch.commit()
|
||||
|
||||
return 'OK', 204
|
||||
|
||||
@auth.check_token
|
||||
@validate_openapi_request('updateTag')
|
||||
@expects_json(schema_update_tag)
|
||||
def put(self, uuid):
|
||||
"""Update tag information."""
|
||||
"""
|
||||
@api {put} /api/v1/tag/:uuid Update tag information
|
||||
@apiExample {curl} Example usage:
|
||||
Update (PUT)
|
||||
curl http://localhost:5000/api/v1/tag/cc0cfffa-f449-477b-83ea-0caafd1dc091 -X PUT -H"x-api-key:813031b16330fe25e3780cf0325daa45" -H "Content-Type: application/json" -d '{"title": "New Tag Title"}'
|
||||
|
||||
@apiDescription Updates an existing tag using JSON
|
||||
@apiParam {uuid} uuid Tag unique ID.
|
||||
@apiName UpdateTag
|
||||
@apiGroup Tag
|
||||
@apiSuccess (200) {String} OK Was updated
|
||||
@apiSuccess (500) {String} ERR Some other error
|
||||
"""
|
||||
tag = self.datastore.data['settings']['application']['tags'].get(uuid)
|
||||
if not tag:
|
||||
abort(404, message='No tag exists with the UUID of {}'.format(uuid))
|
||||
|
||||
# Make a mutable copy of request.json for modification
|
||||
json_data = dict(request.json)
|
||||
|
||||
# Validate notification_urls if provided
|
||||
if 'notification_urls' in json_data:
|
||||
from wtforms import ValidationError
|
||||
from changedetectionio.api.Notifications import validate_notification_urls
|
||||
try:
|
||||
notification_urls = json_data.get('notification_urls', [])
|
||||
validate_notification_urls(notification_urls)
|
||||
except ValidationError as e:
|
||||
return str(e), 400
|
||||
|
||||
# Filter out readOnly fields (extracted from OpenAPI spec Tag schema)
|
||||
# These are system-managed fields that should never be user-settable
|
||||
from . import get_readonly_tag_fields
|
||||
readonly_fields = get_readonly_tag_fields()
|
||||
|
||||
# Tag model inherits from watch_base but has no @property attributes of its own
|
||||
# So we only need to filter readOnly fields
|
||||
for field in readonly_fields:
|
||||
json_data.pop(field, None)
|
||||
|
||||
# Validate remaining fields - reject truly unknown fields
|
||||
# Get valid fields from Tag schema
|
||||
from . import get_tag_schema_properties
|
||||
valid_fields = set(get_tag_schema_properties().keys())
|
||||
|
||||
# Check for unknown fields
|
||||
unknown_fields = set(json_data.keys()) - valid_fields
|
||||
if unknown_fields:
|
||||
return f"Unknown field(s): {', '.join(sorted(unknown_fields))}", 400
|
||||
|
||||
tag.update(json_data)
|
||||
tag.commit()
|
||||
|
||||
# Clear checksums for all watches using this tag to force reprocessing
|
||||
# Tag changes affect inherited configuration
|
||||
cleared_count = self.datastore.clear_checksums_for_tag(uuid)
|
||||
logger.info(f"Tag {uuid} updated via API, cleared {cleared_count} watch checksums")
|
||||
tag.update(request.json)
|
||||
self.datastore.needs_write_urgent = True
|
||||
|
||||
return "OK", 200
|
||||
|
||||
|
||||
@auth.check_token
|
||||
@validate_openapi_request('createTag')
|
||||
# Only cares for {'title': 'xxxx'}
|
||||
def post(self):
|
||||
"""Create a single tag/group."""
|
||||
"""
|
||||
@api {post} /api/v1/watch Create a single tag
|
||||
@apiExample {curl} Example usage:
|
||||
curl http://localhost:5000/api/v1/watch -H"x-api-key:813031b16330fe25e3780cf0325daa45" -H "Content-Type: application/json" -d '{"name": "Work related"}'
|
||||
@apiName Create
|
||||
@apiGroup Tag
|
||||
@apiSuccess (200) {String} OK Was created
|
||||
@apiSuccess (500) {String} ERR Some other error
|
||||
"""
|
||||
|
||||
json_data = request.get_json()
|
||||
title = json_data.get("title",'').strip()
|
||||
|
||||
# Validate that only valid fields are provided
|
||||
# Get valid fields from Tag schema
|
||||
from . import get_tag_schema_properties
|
||||
valid_fields = set(get_tag_schema_properties().keys())
|
||||
|
||||
# Check for unknown fields
|
||||
unknown_fields = set(json_data.keys()) - valid_fields
|
||||
if unknown_fields:
|
||||
return f"Unknown field(s): {', '.join(sorted(unknown_fields))}", 400
|
||||
|
||||
new_uuid = self.datastore.add_tag(title=title)
|
||||
if new_uuid:
|
||||
# Apply any extra fields (e.g. processor_config_restock_diff) beyond just title
|
||||
extra = {k: v for k, v in json_data.items() if k != 'title'}
|
||||
if extra:
|
||||
tag = self.datastore.data['settings']['application']['tags'].get(new_uuid)
|
||||
if tag:
|
||||
tag.update(extra)
|
||||
tag.commit()
|
||||
return {'uuid': new_uuid}, 201
|
||||
else:
|
||||
return "Invalid or unsupported tag", 400
|
||||
@@ -194,9 +122,28 @@ class Tags(Resource):
|
||||
self.datastore = kwargs['datastore']
|
||||
|
||||
@auth.check_token
|
||||
@validate_openapi_request('listTags')
|
||||
def get(self):
|
||||
"""List tags/groups."""
|
||||
"""
|
||||
@api {get} /api/v1/tags List tags
|
||||
@apiDescription Return list of available tags
|
||||
@apiExample {curl} Example usage:
|
||||
curl http://localhost:5000/api/v1/tags -H"x-api-key:813031b16330fe25e3780cf0325daa45"
|
||||
{
|
||||
"cc0cfffa-f449-477b-83ea-0caafd1dc091": {
|
||||
"title": "Tech News",
|
||||
"notification_muted": false,
|
||||
"date_created": 1677103794
|
||||
},
|
||||
"e6f5fd5c-dbfe-468b-b8f3-f9d6ff5ad69b": {
|
||||
"title": "Shopping",
|
||||
"notification_muted": true,
|
||||
"date_created": 1676662819
|
||||
}
|
||||
}
|
||||
@apiName ListTags
|
||||
@apiGroup Tag Management
|
||||
@apiSuccess (200) {String} OK JSON dict
|
||||
"""
|
||||
result = {}
|
||||
for uuid, tag in self.datastore.data['settings']['application']['tags'].items():
|
||||
result[uuid] = {
|
||||
|
||||
@@ -1,53 +1,17 @@
|
||||
import os
|
||||
import threading
|
||||
from changedetectionio.strtobool import strtobool
|
||||
|
||||
from changedetectionio.validate_url import is_safe_valid_url
|
||||
from changedetectionio.favicon_utils import get_favicon_mime_type
|
||||
|
||||
from . import auth
|
||||
from changedetectionio import queuedWatchMetaData, strtobool
|
||||
from changedetectionio import worker_pool
|
||||
from flask import request, make_response, send_from_directory
|
||||
from flask_expects_json import expects_json
|
||||
from changedetectionio import queuedWatchMetaData
|
||||
from changedetectionio import worker_handler
|
||||
from flask_restful import abort, Resource
|
||||
from loguru import logger
|
||||
from flask import request, make_response, send_from_directory
|
||||
import validators
|
||||
from . import auth
|
||||
import copy
|
||||
|
||||
from . import validate_openapi_request, get_readonly_watch_fields
|
||||
from ..notification import valid_notification_formats
|
||||
from ..notification.handler import newline_re
|
||||
|
||||
|
||||
def validate_time_between_check_required(json_data):
|
||||
"""
|
||||
Validate that at least one time interval is specified when not using default settings.
|
||||
Returns None if valid, or error message string if invalid.
|
||||
Defaults to using global settings if time_between_check_use_default is not provided.
|
||||
"""
|
||||
# Default to using global settings if not specified
|
||||
use_default = json_data.get('time_between_check_use_default', True)
|
||||
|
||||
# If using default settings, no validation needed
|
||||
if use_default:
|
||||
return None
|
||||
|
||||
# If not using defaults, check if time_between_check exists and has at least one non-zero value
|
||||
time_check = json_data.get('time_between_check')
|
||||
if not time_check:
|
||||
# No time_between_check provided and not using defaults - this is an error
|
||||
return "At least one time interval (weeks, days, hours, minutes, or seconds) must be specified when not using global settings."
|
||||
|
||||
# time_between_check exists, check if it has at least one non-zero value
|
||||
if any([
|
||||
(time_check.get('weeks') or 0) > 0,
|
||||
(time_check.get('days') or 0) > 0,
|
||||
(time_check.get('hours') or 0) > 0,
|
||||
(time_check.get('minutes') or 0) > 0,
|
||||
(time_check.get('seconds') or 0) > 0
|
||||
]):
|
||||
return None
|
||||
|
||||
# time_between_check exists but all values are 0 or empty - this is an error
|
||||
return "At least one time interval (weeks, days, hours, minutes, or seconds) must be specified when not using global settings."
|
||||
# Import schemas from __init__.py
|
||||
from . import schema, schema_create_watch, schema_update_watch
|
||||
|
||||
|
||||
class Watch(Resource):
|
||||
@@ -57,60 +21,67 @@ class Watch(Resource):
|
||||
self.update_q = kwargs['update_q']
|
||||
|
||||
# Get information about a single watch, excluding the history list (can be large)
|
||||
# curl http://localhost:5000/api/v1/watch/<uuid_str:uuid>
|
||||
# curl http://localhost:5000/api/v1/watch/<string:uuid>
|
||||
# @todo - version2 - ?muted and ?paused should be able to be called together, return the watch struct not "OK"
|
||||
# ?recheck=true
|
||||
@auth.check_token
|
||||
@validate_openapi_request('getWatch')
|
||||
def get(self, uuid):
|
||||
"""Get information about a single watch, recheck, pause, or mute."""
|
||||
# Get watch reference first (for pause/mute operations)
|
||||
watch_obj = self.datastore.data['watching'].get(uuid)
|
||||
if not watch_obj:
|
||||
"""
|
||||
@api {get} /api/v1/watch/:uuid Single watch - get data, recheck, pause, mute.
|
||||
@apiDescription Retrieve watch information and set muted/paused status
|
||||
@apiExample {curl} Example usage:
|
||||
curl http://localhost:5000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091 -H"x-api-key:813031b16330fe25e3780cf0325daa45"
|
||||
curl "http://localhost:5000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091?muted=unmuted" -H"x-api-key:813031b16330fe25e3780cf0325daa45"
|
||||
curl "http://localhost:5000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091?paused=unpaused" -H"x-api-key:813031b16330fe25e3780cf0325daa45"
|
||||
@apiName Watch
|
||||
@apiGroup Watch
|
||||
@apiParam {uuid} uuid Watch unique ID.
|
||||
@apiQuery {Boolean} [recheck] Recheck this watch `recheck=1`
|
||||
@apiQuery {String} [paused] =`paused` or =`unpaused` , Sets the PAUSED state
|
||||
@apiQuery {String} [muted] =`muted` or =`unmuted` , Sets the MUTE NOTIFICATIONS state
|
||||
@apiSuccess (200) {String} OK When paused/muted/recheck operation OR full JSON object of the watch
|
||||
@apiSuccess (200) {JSON} WatchJSON JSON Full JSON object of the watch
|
||||
"""
|
||||
from copy import deepcopy
|
||||
watch = deepcopy(self.datastore.data['watching'].get(uuid))
|
||||
if not watch:
|
||||
abort(404, message='No watch exists with the UUID of {}'.format(uuid))
|
||||
|
||||
# Create a dict copy for JSON response (with lock for thread safety)
|
||||
# This is much faster than deepcopy and doesn't copy the datastore reference
|
||||
# WARNING: dict() is a SHALLOW copy - nested dicts are shared with original!
|
||||
# Only safe because we only ADD scalar properties (line 97-101), never modify nested dicts
|
||||
# If you need to modify nested dicts, use: from copy import deepcopy; watch = deepcopy(dict(watch_obj))
|
||||
with self.datastore.lock:
|
||||
watch = dict(watch_obj)
|
||||
|
||||
if request.args.get('recheck'):
|
||||
worker_pool.queue_item_async_safe(self.update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid}))
|
||||
worker_handler.queue_item_async_safe(self.update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid}))
|
||||
return "OK", 200
|
||||
if request.args.get('paused', '') == 'paused':
|
||||
watch_obj.pause()
|
||||
watch_obj.commit()
|
||||
self.datastore.data['watching'].get(uuid).pause()
|
||||
return "OK", 200
|
||||
elif request.args.get('paused', '') == 'unpaused':
|
||||
watch_obj.unpause()
|
||||
watch_obj.commit()
|
||||
self.datastore.data['watching'].get(uuid).unpause()
|
||||
return "OK", 200
|
||||
if request.args.get('muted', '') == 'muted':
|
||||
watch_obj.mute()
|
||||
watch_obj.commit()
|
||||
self.datastore.data['watching'].get(uuid).mute()
|
||||
return "OK", 200
|
||||
elif request.args.get('muted', '') == 'unmuted':
|
||||
watch_obj.unmute()
|
||||
watch_obj.commit()
|
||||
self.datastore.data['watching'].get(uuid).unmute()
|
||||
return "OK", 200
|
||||
|
||||
# Return without history, get that via another API call
|
||||
# Properties are not returned as a JSON, so add the required props manually
|
||||
watch['history_n'] = watch_obj.history_n
|
||||
watch['history_n'] = watch.history_n
|
||||
# attr .last_changed will check for the last written text snapshot on change
|
||||
watch['last_changed'] = watch_obj.last_changed
|
||||
watch['viewed'] = watch_obj.viewed
|
||||
watch['link'] = watch_obj.link,
|
||||
|
||||
watch['last_changed'] = watch.last_changed
|
||||
watch['viewed'] = watch.viewed
|
||||
return watch
|
||||
|
||||
@auth.check_token
|
||||
@validate_openapi_request('deleteWatch')
|
||||
def delete(self, uuid):
|
||||
"""Delete a watch and related history."""
|
||||
"""
|
||||
@api {delete} /api/v1/watch/:uuid Delete a watch and related history
|
||||
@apiExample {curl} Example usage:
|
||||
curl http://localhost:5000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091 -X DELETE -H"x-api-key:813031b16330fe25e3780cf0325daa45"
|
||||
@apiParam {uuid} uuid Watch unique ID.
|
||||
@apiName Delete
|
||||
@apiGroup Watch
|
||||
@apiSuccess (200) {String} OK Was deleted
|
||||
"""
|
||||
if not self.datastore.data['watching'].get(uuid):
|
||||
abort(400, message='No watch exists with the UUID of {}'.format(uuid))
|
||||
|
||||
@@ -118,95 +89,31 @@ class Watch(Resource):
|
||||
return 'OK', 204
|
||||
|
||||
@auth.check_token
|
||||
@validate_openapi_request('updateWatch')
|
||||
@expects_json(schema_update_watch)
|
||||
def put(self, uuid):
|
||||
"""Update watch information."""
|
||||
"""
|
||||
@api {put} /api/v1/watch/:uuid Update watch information
|
||||
@apiExample {curl} Example usage:
|
||||
Update (PUT)
|
||||
curl http://localhost:5000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091 -X PUT -H"x-api-key:813031b16330fe25e3780cf0325daa45" -H "Content-Type: application/json" -d '{"url": "https://my-nice.com" , "tag": "new list"}'
|
||||
|
||||
@apiDescription Updates an existing watch using JSON, accepts the same structure as returned in <a href="#api-Watch-Watch">get single watch information</a>
|
||||
@apiParam {uuid} uuid Watch unique ID.
|
||||
@apiName Update a watch
|
||||
@apiGroup Watch
|
||||
@apiSuccess (200) {String} OK Was updated
|
||||
@apiSuccess (500) {String} ERR Some other error
|
||||
"""
|
||||
watch = self.datastore.data['watching'].get(uuid)
|
||||
if not watch:
|
||||
abort(404, message='No watch exists with the UUID of {}'.format(uuid))
|
||||
|
||||
if request.json.get('proxy'):
|
||||
plist = self.datastore.proxy_list
|
||||
if not plist or request.json.get('proxy') not in plist:
|
||||
proxy_list_str = ', '.join(plist) if plist else 'none configured'
|
||||
return f"Invalid proxy choice, currently supported proxies are '{proxy_list_str}'", 400
|
||||
if not request.json.get('proxy') in plist:
|
||||
return "Invalid proxy choice, currently supported proxies are '{}'".format(', '.join(plist)), 400
|
||||
|
||||
# Validate time_between_check when not using defaults
|
||||
validation_error = validate_time_between_check_required(request.json)
|
||||
if validation_error:
|
||||
return validation_error, 400
|
||||
|
||||
# Validate notification_urls if provided
|
||||
if 'notification_urls' in request.json:
|
||||
from wtforms import ValidationError
|
||||
from changedetectionio.api.Notifications import validate_notification_urls
|
||||
try:
|
||||
notification_urls = request.json.get('notification_urls', [])
|
||||
validate_notification_urls(notification_urls)
|
||||
except ValidationError as e:
|
||||
return str(e), 400
|
||||
|
||||
# XSS etc protection - validate URL if it's being updated
|
||||
if 'url' in request.json:
|
||||
new_url = request.json.get('url')
|
||||
|
||||
# URL must be a non-empty string
|
||||
if new_url is None:
|
||||
return "URL cannot be null", 400
|
||||
|
||||
if not isinstance(new_url, str):
|
||||
return "URL must be a string", 400
|
||||
|
||||
if not new_url.strip():
|
||||
return "URL cannot be empty or whitespace only", 400
|
||||
|
||||
if not is_safe_valid_url(new_url.strip()):
|
||||
return "Invalid or unsupported URL format. URL must use http://, https://, or ftp:// protocol", 400
|
||||
|
||||
# Handle processor-config-* fields separately (save to JSON, not datastore)
|
||||
from changedetectionio import processors
|
||||
|
||||
# Make a mutable copy of request.json for modification
|
||||
json_data = dict(request.json)
|
||||
|
||||
# Extract and remove processor config fields from json_data
|
||||
processor_config_data = processors.extract_processor_config_from_form_data(json_data)
|
||||
|
||||
# Filter out readOnly fields (extracted from OpenAPI spec Watch schema)
|
||||
# These are system-managed fields that should never be user-settable
|
||||
readonly_fields = get_readonly_watch_fields()
|
||||
|
||||
# Also filter out @property attributes (computed/derived values from the model)
|
||||
# These are not stored and should be ignored in PUT requests
|
||||
from changedetectionio.model.Watch import model as WatchModel
|
||||
property_fields = WatchModel.get_property_names()
|
||||
|
||||
# Combine both sets of fields to ignore
|
||||
fields_to_ignore = readonly_fields | property_fields
|
||||
|
||||
# Remove all ignored fields from update data
|
||||
for field in fields_to_ignore:
|
||||
json_data.pop(field, None)
|
||||
|
||||
# Validate remaining fields - reject truly unknown fields
|
||||
# Get valid fields from WatchBase schema
|
||||
from . import get_watch_schema_properties
|
||||
valid_fields = set(get_watch_schema_properties().keys())
|
||||
|
||||
# Also allow last_viewed (explicitly defined in UpdateWatch schema)
|
||||
valid_fields.add('last_viewed')
|
||||
|
||||
# Check for unknown fields
|
||||
unknown_fields = set(json_data.keys()) - valid_fields
|
||||
if unknown_fields:
|
||||
return f"Unknown field(s): {', '.join(sorted(unknown_fields))}", 400
|
||||
|
||||
# Update watch with regular (non-processor-config) fields
|
||||
watch.update(json_data)
|
||||
watch.commit()
|
||||
|
||||
# Save processor config to JSON file
|
||||
processors.save_processor_config(self.datastore, uuid, processor_config_data)
|
||||
watch.update(request.json)
|
||||
|
||||
return "OK", 200
|
||||
|
||||
@@ -217,11 +124,24 @@ class WatchHistory(Resource):
|
||||
self.datastore = kwargs['datastore']
|
||||
|
||||
# Get a list of available history for a watch by UUID
|
||||
# curl http://localhost:5000/api/v1/watch/<uuid_str:uuid>/history
|
||||
# curl http://localhost:5000/api/v1/watch/<string:uuid>/history
|
||||
@auth.check_token
|
||||
@validate_openapi_request('getWatchHistory')
|
||||
def get(self, uuid):
|
||||
"""Get a list of all historical snapshots available for a watch."""
|
||||
"""
|
||||
@api {get} /api/v1/watch/<string:uuid>/history Get a list of all historical snapshots available for a watch
|
||||
@apiDescription Requires `uuid`, returns list
|
||||
@apiExample {curl} Example usage:
|
||||
curl http://localhost:5000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091/history -H"x-api-key:813031b16330fe25e3780cf0325daa45" -H "Content-Type: application/json"
|
||||
{
|
||||
"1676649279": "/tmp/data/6a4b7d5c-fee4-4616-9f43-4ac97046b595/cb7e9be8258368262246910e6a2a4c30.txt",
|
||||
"1677092785": "/tmp/data/6a4b7d5c-fee4-4616-9f43-4ac97046b595/e20db368d6fc633e34f559ff67bb4044.txt",
|
||||
"1677103794": "/tmp/data/6a4b7d5c-fee4-4616-9f43-4ac97046b595/02efdd37dacdae96554a8cc85dc9c945.txt"
|
||||
}
|
||||
@apiName Get list of available stored snapshots for watch
|
||||
@apiGroup Watch History
|
||||
@apiSuccess (200) {String} OK
|
||||
@apiSuccess (404) {String} ERR Not found
|
||||
"""
|
||||
watch = self.datastore.data['watching'].get(uuid)
|
||||
if not watch:
|
||||
abort(404, message='No watch exists with the UUID of {}'.format(uuid))
|
||||
@@ -234,9 +154,18 @@ class WatchSingleHistory(Resource):
|
||||
self.datastore = kwargs['datastore']
|
||||
|
||||
@auth.check_token
|
||||
@validate_openapi_request('getWatchSnapshot')
|
||||
def get(self, uuid, timestamp):
|
||||
"""Get single snapshot from watch."""
|
||||
"""
|
||||
@api {get} /api/v1/watch/<string:uuid>/history/<int:timestamp> Get single snapshot from watch
|
||||
@apiDescription Requires watch `uuid` and `timestamp`. `timestamp` of "`latest`" for latest available snapshot, or <a href="#api-Watch_History-Get_list_of_available_stored_snapshots_for_watch">use the list returned here</a>
|
||||
@apiExample {curl} Example usage:
|
||||
curl http://localhost:5000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091/history/1677092977 -H"x-api-key:813031b16330fe25e3780cf0325daa45" -H "Content-Type: application/json"
|
||||
@apiName Get single snapshot content
|
||||
@apiGroup Watch History
|
||||
@apiParam {String} [html] Optional Set to =1 to return the last HTML (only stores last 2 snapshots, use `latest` as timestamp)
|
||||
@apiSuccess (200) {String} OK
|
||||
@apiSuccess (404) {String} ERR Not found
|
||||
"""
|
||||
watch = self.datastore.data['watching'].get(uuid)
|
||||
if not watch:
|
||||
abort(404, message=f"No watch exists with the UUID of {uuid}")
|
||||
@@ -247,10 +176,6 @@ class WatchSingleHistory(Resource):
|
||||
if timestamp == 'latest':
|
||||
timestamp = list(watch.history.keys())[-1]
|
||||
|
||||
# Validate that the timestamp exists in history
|
||||
if timestamp not in watch.history:
|
||||
abort(404, message=f"No history snapshot found for timestamp '{timestamp}'")
|
||||
|
||||
if request.args.get('html'):
|
||||
content = watch.get_fetched_html(timestamp)
|
||||
if content:
|
||||
@@ -260,150 +185,47 @@ class WatchSingleHistory(Resource):
|
||||
response = make_response("No content found", 404)
|
||||
response.mimetype = "text/plain"
|
||||
else:
|
||||
content = watch.get_history_snapshot(timestamp=timestamp)
|
||||
content = watch.get_history_snapshot(timestamp)
|
||||
response = make_response(content, 200)
|
||||
response.mimetype = "text/plain"
|
||||
|
||||
return response
|
||||
|
||||
class WatchHistoryDiff(Resource):
|
||||
"""
|
||||
Generate diff between two historical snapshots.
|
||||
|
||||
Note: This API endpoint currently returns text-based diffs and works best
|
||||
with the text_json_diff processor. Future processor types (like image_diff,
|
||||
restock_diff) may want to implement their own specialized API endpoints
|
||||
for returning processor-specific data (e.g., price charts, image comparisons).
|
||||
|
||||
The web UI diff page (/diff/<uuid>) is processor-aware and delegates rendering
|
||||
to processors/{type}/difference.py::render() for processor-specific visualizations.
|
||||
"""
|
||||
def __init__(self, **kwargs):
|
||||
# datastore is a black box dependency
|
||||
self.datastore = kwargs['datastore']
|
||||
|
||||
@auth.check_token
|
||||
@validate_openapi_request('getWatchHistoryDiff')
|
||||
def get(self, uuid, from_timestamp, to_timestamp):
|
||||
"""Generate diff between two historical snapshots."""
|
||||
from changedetectionio import diff
|
||||
from changedetectionio.notification.handler import apply_service_tweaks
|
||||
|
||||
watch = self.datastore.data['watching'].get(uuid)
|
||||
if not watch:
|
||||
abort(404, message=f"No watch exists with the UUID of {uuid}")
|
||||
|
||||
if not len(watch.history):
|
||||
abort(404, message=f"Watch found but no history exists for the UUID {uuid}")
|
||||
|
||||
history_keys = list(watch.history.keys())
|
||||
|
||||
# Handle 'latest' keyword for to_timestamp
|
||||
if to_timestamp == 'latest':
|
||||
to_timestamp = history_keys[-1]
|
||||
|
||||
# Handle 'previous' keyword for from_timestamp (second-most-recent)
|
||||
if from_timestamp == 'previous':
|
||||
if len(history_keys) < 2:
|
||||
abort(404, message=f"Not enough history entries. Need at least 2 snapshots for 'previous'")
|
||||
from_timestamp = history_keys[-2]
|
||||
|
||||
# Validate timestamps exist
|
||||
if from_timestamp not in watch.history:
|
||||
abort(404, message=f"From timestamp {from_timestamp} not found in watch history")
|
||||
if to_timestamp not in watch.history:
|
||||
abort(404, message=f"To timestamp {to_timestamp} not found in watch history")
|
||||
|
||||
# Get the format parameter (default to 'text')
|
||||
output_format = request.args.get('format', 'text').lower()
|
||||
|
||||
# Validate format
|
||||
if output_format not in valid_notification_formats.keys():
|
||||
abort(400, message=f"Invalid format. Must be one of: {', '.join(valid_notification_formats.keys())}")
|
||||
|
||||
# Get the word_diff parameter (default to False - line-level mode)
|
||||
word_diff = strtobool(request.args.get('word_diff', 'false'))
|
||||
|
||||
# Get the no_markup parameter (default to False)
|
||||
no_markup = strtobool(request.args.get('no_markup', 'false'))
|
||||
|
||||
# Retrieve snapshot contents
|
||||
from_version_file_contents = watch.get_history_snapshot(from_timestamp)
|
||||
to_version_file_contents = watch.get_history_snapshot(to_timestamp)
|
||||
|
||||
# Get diff preferences from query parameters (matching UI preferences in DIFF_PREFERENCES_CONFIG)
|
||||
# Support both 'type' (UI parameter) and 'word_diff' (API parameter) for backward compatibility
|
||||
diff_type = request.args.get('type', 'diffLines')
|
||||
if diff_type == 'diffWords':
|
||||
word_diff = True
|
||||
|
||||
# Get boolean diff preferences with defaults from DIFF_PREFERENCES_CONFIG
|
||||
changes_only = strtobool(request.args.get('changesOnly', 'false'))
|
||||
ignore_whitespace = strtobool(request.args.get('ignoreWhitespace', 'false'))
|
||||
include_removed = strtobool(request.args.get('removed', 'true'))
|
||||
include_added = strtobool(request.args.get('added', 'true'))
|
||||
include_replaced = strtobool(request.args.get('replaced', 'true'))
|
||||
|
||||
# Generate the diff with all preferences
|
||||
content = diff.render_diff(
|
||||
previous_version_file_contents=from_version_file_contents,
|
||||
newest_version_file_contents=to_version_file_contents,
|
||||
ignore_junk=ignore_whitespace,
|
||||
include_equal=not changes_only,
|
||||
include_removed=include_removed,
|
||||
include_added=include_added,
|
||||
include_replaced=include_replaced,
|
||||
word_diff=word_diff,
|
||||
)
|
||||
|
||||
# Skip formatting if no_markup is set
|
||||
if no_markup:
|
||||
mimetype = "text/plain"
|
||||
else:
|
||||
# Apply formatting based on the requested format
|
||||
if output_format == 'htmlcolor':
|
||||
from changedetectionio.notification.handler import apply_html_color_to_body
|
||||
content = apply_html_color_to_body(n_body=content)
|
||||
mimetype = "text/html"
|
||||
else:
|
||||
# Apply service tweaks for text/html formats
|
||||
# Pass empty URL and title as they're not used for the placeholder replacement we need
|
||||
_, content, _ = apply_service_tweaks(
|
||||
url='',
|
||||
n_body=content,
|
||||
n_title='',
|
||||
requested_output_format=output_format
|
||||
)
|
||||
mimetype = "text/html" if output_format == 'html' else "text/plain"
|
||||
|
||||
if 'html' in output_format:
|
||||
content = newline_re.sub('<br>\r\n', content)
|
||||
|
||||
response = make_response(content, 200)
|
||||
response.mimetype = mimetype
|
||||
return response
|
||||
|
||||
|
||||
class WatchFavicon(Resource):
|
||||
def __init__(self, **kwargs):
|
||||
# datastore is a black box dependency
|
||||
self.datastore = kwargs['datastore']
|
||||
|
||||
@auth.check_token
|
||||
@validate_openapi_request('getWatchFavicon')
|
||||
def get(self, uuid):
|
||||
"""Get favicon for a watch."""
|
||||
"""
|
||||
@api {get} /api/v1/watch/<string:uuid>/favicon Get Favicon for a watch
|
||||
@apiDescription Requires watch `uuid`
|
||||
@apiExample {curl} Example usage:
|
||||
curl http://localhost:5000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091/favicon -H"x-api-key:813031b16330fe25e3780cf0325daa45"
|
||||
@apiName Get latest Favicon
|
||||
@apiGroup Watch History
|
||||
@apiSuccess (200) {String} OK
|
||||
@apiSuccess (404) {String} ERR Not found
|
||||
"""
|
||||
watch = self.datastore.data['watching'].get(uuid)
|
||||
if not watch:
|
||||
abort(404, message=f"No watch exists with the UUID of {uuid}")
|
||||
|
||||
favicon_filename = watch.get_favicon_filename()
|
||||
if favicon_filename:
|
||||
# Use cached MIME type detection
|
||||
filepath = os.path.join(watch.data_dir, favicon_filename)
|
||||
mime = get_favicon_mime_type(filepath)
|
||||
try:
|
||||
import magic
|
||||
mime = magic.from_file(
|
||||
os.path.join(watch.watch_data_dir, favicon_filename),
|
||||
mime=True
|
||||
)
|
||||
except ImportError:
|
||||
# Fallback, no python-magic
|
||||
import mimetypes
|
||||
mime, encoding = mimetypes.guess_type(favicon_filename)
|
||||
|
||||
response = make_response(send_from_directory(watch.data_dir, favicon_filename))
|
||||
response = make_response(send_from_directory(watch.watch_data_dir, favicon_filename))
|
||||
response.headers['Content-type'] = mime
|
||||
response.headers['Cache-Control'] = 'max-age=300, must-revalidate' # Cache for 5 minutes, then revalidate
|
||||
return response
|
||||
@@ -418,45 +240,34 @@ class CreateWatch(Resource):
|
||||
self.update_q = kwargs['update_q']
|
||||
|
||||
@auth.check_token
|
||||
@validate_openapi_request('createWatch')
|
||||
@expects_json(schema_create_watch)
|
||||
def post(self):
|
||||
"""Create a single watch."""
|
||||
"""
|
||||
@api {post} /api/v1/watch Create a single watch
|
||||
@apiDescription Requires atleast `url` set, can accept the same structure as <a href="#api-Watch-Watch">get single watch information</a> to create.
|
||||
@apiExample {curl} Example usage:
|
||||
curl http://localhost:5000/api/v1/watch -H"x-api-key:813031b16330fe25e3780cf0325daa45" -H "Content-Type: application/json" -d '{"url": "https://my-nice.com" , "tag": "nice list"}'
|
||||
@apiName Create
|
||||
@apiGroup Watch
|
||||
@apiSuccess (200) {String} OK Was created
|
||||
@apiSuccess (500) {String} ERR Some other error
|
||||
"""
|
||||
|
||||
json_data = request.get_json()
|
||||
url = json_data['url'].strip()
|
||||
|
||||
if not is_safe_valid_url(url):
|
||||
# If hosts that only contain alphanumerics are allowed ("localhost" for example)
|
||||
allow_simplehost = not strtobool(os.getenv('BLOCK_SIMPLEHOSTS', 'False'))
|
||||
if not validators.url(url, simple_host=allow_simplehost):
|
||||
return "Invalid or unsupported URL", 400
|
||||
|
||||
if json_data.get('proxy'):
|
||||
plist = self.datastore.proxy_list
|
||||
if not plist or json_data.get('proxy') not in plist:
|
||||
proxy_list_str = ', '.join(plist) if plist else 'none configured'
|
||||
return f"Invalid proxy choice, currently supported proxies are '{proxy_list_str}'", 400
|
||||
|
||||
# Validate time_between_check when not using defaults
|
||||
validation_error = validate_time_between_check_required(json_data)
|
||||
if validation_error:
|
||||
return validation_error, 400
|
||||
|
||||
# Validate notification_urls if provided
|
||||
if 'notification_urls' in json_data:
|
||||
from wtforms import ValidationError
|
||||
from changedetectionio.api.Notifications import validate_notification_urls
|
||||
try:
|
||||
notification_urls = json_data.get('notification_urls', [])
|
||||
validate_notification_urls(notification_urls)
|
||||
except ValidationError as e:
|
||||
return str(e), 400
|
||||
|
||||
# Handle processor-config-* fields separately (save to JSON, not watch)
|
||||
from changedetectionio import processors
|
||||
if not json_data.get('proxy') in plist:
|
||||
return "Invalid proxy choice, currently supported proxies are '{}'".format(', '.join(plist)), 400
|
||||
|
||||
extras = copy.deepcopy(json_data)
|
||||
|
||||
# Extract and remove processor config fields from extras
|
||||
processor_config_data = processors.extract_processor_config_from_form_data(extras)
|
||||
|
||||
# Because we renamed 'tag' to 'tags' but don't want to change the API (can do this in v2 of the API)
|
||||
tags = None
|
||||
if extras.get('tag'):
|
||||
@@ -466,31 +277,42 @@ class CreateWatch(Resource):
|
||||
del extras['url']
|
||||
|
||||
new_uuid = self.datastore.add_watch(url=url, extras=extras, tag=tags)
|
||||
|
||||
# Save processor config to separate JSON file
|
||||
if new_uuid and processor_config_data:
|
||||
processors.save_processor_config(self.datastore, new_uuid, processor_config_data)
|
||||
if new_uuid:
|
||||
# Dont queue because the scheduler will check that it hasnt been checked before anyway
|
||||
# worker_pool.queue_item_async_safe(self.update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': new_uuid}))
|
||||
worker_handler.queue_item_async_safe(self.update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': new_uuid}))
|
||||
return {'uuid': new_uuid}, 201
|
||||
else:
|
||||
# Check if it was a limit issue
|
||||
page_watch_limit = os.getenv('PAGE_WATCH_LIMIT')
|
||||
if page_watch_limit:
|
||||
try:
|
||||
page_watch_limit = int(page_watch_limit)
|
||||
current_watch_count = len(self.datastore.data['watching'])
|
||||
if current_watch_count >= page_watch_limit:
|
||||
return f"Watch limit reached ({current_watch_count}/{page_watch_limit} watches). Cannot add more watches.", 429
|
||||
except ValueError:
|
||||
pass
|
||||
return "Invalid or unsupported URL", 400
|
||||
|
||||
@auth.check_token
|
||||
@validate_openapi_request('listWatches')
|
||||
def get(self):
|
||||
"""List watches."""
|
||||
"""
|
||||
@api {get} /api/v1/watch List watches
|
||||
@apiDescription Return concise list of available watches and some very basic info
|
||||
@apiExample {curl} Example usage:
|
||||
curl http://localhost:5000/api/v1/watch -H"x-api-key:813031b16330fe25e3780cf0325daa45"
|
||||
{
|
||||
"6a4b7d5c-fee4-4616-9f43-4ac97046b595": {
|
||||
"last_changed": 1677103794,
|
||||
"last_checked": 1677103794,
|
||||
"last_error": false,
|
||||
"title": "",
|
||||
"url": "http://www.quotationspage.com/random.php"
|
||||
},
|
||||
"e6f5fd5c-dbfe-468b-b8f3-f9d6ff5ad69b": {
|
||||
"last_changed": 0,
|
||||
"last_checked": 1676662819,
|
||||
"last_error": false,
|
||||
"title": "QuickLook",
|
||||
"url": "https://github.com/QL-Win/QuickLook/tags"
|
||||
}
|
||||
}
|
||||
|
||||
@apiParam {String} [recheck_all] Optional Set to =1 to force recheck of all watches
|
||||
@apiParam {String} [tag] Optional name of tag to limit results
|
||||
@apiName ListWatches
|
||||
@apiGroup Watch Management
|
||||
@apiSuccess (200) {String} OK JSON dict
|
||||
"""
|
||||
list = {}
|
||||
|
||||
tag_limit = request.args.get('tag', '').lower()
|
||||
@@ -504,67 +326,14 @@ class CreateWatch(Resource):
|
||||
'last_changed': watch.last_changed,
|
||||
'last_checked': watch['last_checked'],
|
||||
'last_error': watch['last_error'],
|
||||
'link': watch.link,
|
||||
'page_title': watch['page_title'],
|
||||
'tags': [*tags], # Unpack dict keys to list (can't use list() since variable named 'list')
|
||||
'title': watch['title'],
|
||||
'url': watch['url'],
|
||||
'viewed': watch.viewed
|
||||
}
|
||||
|
||||
if request.args.get('recheck_all'):
|
||||
# Collect all watches to queue
|
||||
watches_to_queue = self.datastore.data['watching'].keys()
|
||||
for uuid in self.datastore.data['watching'].keys():
|
||||
worker_handler.queue_item_async_safe(self.update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid}))
|
||||
return {'status': "OK"}, 200
|
||||
|
||||
# If less than 20 watches, queue synchronously for immediate feedback
|
||||
if len(watches_to_queue) < 20:
|
||||
# Get already queued/running UUIDs once (efficient)
|
||||
queued_uuids = set(self.update_q.get_queued_uuids())
|
||||
running_uuids = set(worker_pool.get_running_uuids())
|
||||
|
||||
# Filter out watches that are already queued or running
|
||||
watches_to_queue_filtered = [
|
||||
uuid for uuid in watches_to_queue
|
||||
if uuid not in queued_uuids and uuid not in running_uuids
|
||||
]
|
||||
|
||||
# Queue only the filtered watches
|
||||
for uuid in watches_to_queue_filtered:
|
||||
worker_pool.queue_item_async_safe(self.update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid}))
|
||||
|
||||
# Provide feedback about skipped watches
|
||||
skipped_count = len(watches_to_queue) - len(watches_to_queue_filtered)
|
||||
if skipped_count > 0:
|
||||
return {'status': f'OK, queued {len(watches_to_queue_filtered)} watches for rechecking ({skipped_count} already queued or running)'}, 200
|
||||
else:
|
||||
return {'status': f'OK, queued {len(watches_to_queue_filtered)} watches for rechecking'}, 200
|
||||
else:
|
||||
# 20+ watches - queue in background thread to avoid blocking API response
|
||||
# Capture queued/running state before background thread
|
||||
queued_uuids = set(self.update_q.get_queued_uuids())
|
||||
running_uuids = set(worker_pool.get_running_uuids())
|
||||
|
||||
def queue_all_watches_background():
|
||||
"""Background thread to queue all watches - discarded after completion."""
|
||||
try:
|
||||
queued_count = 0
|
||||
skipped_count = 0
|
||||
for uuid in watches_to_queue:
|
||||
# Check if already queued or running (state captured at start)
|
||||
if uuid not in queued_uuids and uuid not in running_uuids:
|
||||
worker_pool.queue_item_async_safe(self.update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid}))
|
||||
queued_count += 1
|
||||
else:
|
||||
skipped_count += 1
|
||||
|
||||
logger.info(f"Background queueing complete: {queued_count} watches queued, {skipped_count} skipped (already queued/running)")
|
||||
except Exception as e:
|
||||
logger.error(f"Error in background queueing all watches: {e}")
|
||||
|
||||
# Start background thread and return immediately
|
||||
thread = threading.Thread(target=queue_all_watches_background, daemon=True, name="QueueAllWatches-Background")
|
||||
thread.start()
|
||||
|
||||
return {'status': f'OK, queueing {len(watches_to_queue)} watches in background'}, 202
|
||||
|
||||
return list, 200
|
||||
return list, 200
|
||||
@@ -1,196 +1,33 @@
|
||||
import functools
|
||||
from flask import request, abort
|
||||
from loguru import logger
|
||||
import copy
|
||||
from . import api_schema
|
||||
from ..model import watch_base
|
||||
|
||||
@functools.cache
|
||||
def build_merged_spec_dict():
|
||||
"""
|
||||
Load the base OpenAPI spec and merge in any per-processor api.yaml extensions.
|
||||
# Build a JSON Schema atleast partially based on our Watch model
|
||||
watch_base_config = watch_base()
|
||||
schema = api_schema.build_watch_json_schema(watch_base_config)
|
||||
|
||||
Each processor can provide an api.yaml file alongside its __init__.py that defines
|
||||
additional schemas (e.g., processor_config_restock_diff). These are merged into
|
||||
WatchBase.properties so the spec accurately reflects what the API accepts.
|
||||
schema_create_watch = copy.deepcopy(schema)
|
||||
schema_create_watch['required'] = ['url']
|
||||
|
||||
Plugin processors (via pluggy) are also supported - they just need an api.yaml
|
||||
next to their processor module.
|
||||
schema_update_watch = copy.deepcopy(schema)
|
||||
schema_update_watch['additionalProperties'] = False
|
||||
|
||||
Returns the merged dict (cached - do not mutate the returned value).
|
||||
"""
|
||||
import os
|
||||
import yaml
|
||||
# Tag schema is also based on watch_base since Tag inherits from it
|
||||
schema_tag = copy.deepcopy(schema)
|
||||
schema_create_tag = copy.deepcopy(schema_tag)
|
||||
schema_create_tag['required'] = ['title']
|
||||
schema_update_tag = copy.deepcopy(schema_tag)
|
||||
schema_update_tag['additionalProperties'] = False
|
||||
|
||||
spec_path = os.path.join(os.path.dirname(__file__), '../../docs/api-spec.yaml')
|
||||
if not os.path.exists(spec_path):
|
||||
spec_path = os.path.join(os.path.dirname(__file__), '../docs/api-spec.yaml')
|
||||
|
||||
with open(spec_path, 'r', encoding='utf-8') as f:
|
||||
spec_dict = yaml.safe_load(f)
|
||||
|
||||
try:
|
||||
from changedetectionio.processors import find_processors, get_parent_module
|
||||
for module, proc_name in find_processors():
|
||||
parent = get_parent_module(module)
|
||||
if not parent or not hasattr(parent, '__file__'):
|
||||
continue
|
||||
api_yaml_path = os.path.join(os.path.dirname(parent.__file__), 'api.yaml')
|
||||
if not os.path.exists(api_yaml_path):
|
||||
continue
|
||||
with open(api_yaml_path, 'r', encoding='utf-8') as f:
|
||||
proc_spec = yaml.safe_load(f)
|
||||
# Merge schemas
|
||||
proc_schemas = proc_spec.get('components', {}).get('schemas', {})
|
||||
spec_dict['components']['schemas'].update(proc_schemas)
|
||||
# Inject processor_config_{name} into WatchBase if the schema is defined
|
||||
schema_key = f'processor_config_{proc_name}'
|
||||
if schema_key in proc_schemas:
|
||||
spec_dict['components']['schemas']['WatchBase']['properties'][schema_key] = {
|
||||
'$ref': f'#/components/schemas/{schema_key}'
|
||||
}
|
||||
# Append x-code-samples from processor paths into existing path operations
|
||||
for path, path_item in proc_spec.get('paths', {}).items():
|
||||
if path not in spec_dict.get('paths', {}):
|
||||
continue
|
||||
for method, operation in path_item.items():
|
||||
if method not in spec_dict['paths'][path]:
|
||||
continue
|
||||
if 'x-code-samples' in operation:
|
||||
existing = spec_dict['paths'][path][method].get('x-code-samples', [])
|
||||
spec_dict['paths'][path][method]['x-code-samples'] = existing + operation['x-code-samples']
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to merge processor API specs: {e}")
|
||||
|
||||
return spec_dict
|
||||
|
||||
|
||||
@functools.cache
|
||||
def get_openapi_spec():
|
||||
"""Lazy load OpenAPI spec and dependencies only when validation is needed."""
|
||||
from openapi_core import OpenAPI # Lazy import - saves ~10.7 MB on startup
|
||||
return OpenAPI.from_dict(build_merged_spec_dict())
|
||||
|
||||
@functools.cache
|
||||
def get_openapi_schema_dict():
|
||||
"""
|
||||
Get the raw OpenAPI spec dictionary for schema access.
|
||||
|
||||
Used by Import endpoint to validate and convert query parameters.
|
||||
Returns the merged YAML dict (not the OpenAPI object).
|
||||
"""
|
||||
return build_merged_spec_dict()
|
||||
|
||||
@functools.cache
|
||||
def _resolve_schema_properties(schema_name):
|
||||
"""
|
||||
Generic helper to resolve schema properties, including allOf inheritance.
|
||||
|
||||
Args:
|
||||
schema_name: Name of the schema (e.g., 'WatchBase', 'Watch', 'Tag')
|
||||
|
||||
Returns:
|
||||
dict: All properties including inherited ones from $ref schemas
|
||||
"""
|
||||
spec_dict = get_openapi_schema_dict()
|
||||
schema = spec_dict['components']['schemas'].get(schema_name, {})
|
||||
|
||||
properties = {}
|
||||
|
||||
# Handle allOf (schema inheritance)
|
||||
if 'allOf' in schema:
|
||||
for item in schema['allOf']:
|
||||
# Resolve $ref to parent schema
|
||||
if '$ref' in item:
|
||||
ref_path = item['$ref'].split('/')[-1]
|
||||
ref_schema = spec_dict['components']['schemas'].get(ref_path, {})
|
||||
properties.update(ref_schema.get('properties', {}))
|
||||
# Add schema-specific properties
|
||||
if 'properties' in item:
|
||||
properties.update(item['properties'])
|
||||
else:
|
||||
# Direct properties (no inheritance)
|
||||
properties = schema.get('properties', {})
|
||||
|
||||
return properties
|
||||
|
||||
|
||||
@functools.cache
|
||||
def get_watch_schema_properties():
|
||||
"""
|
||||
Extract watch schema properties from OpenAPI spec for Import endpoint.
|
||||
|
||||
Returns WatchBase properties (all writable Watch fields).
|
||||
"""
|
||||
return _resolve_schema_properties('WatchBase')
|
||||
|
||||
# Import readonly field utilities from shared module (avoids circular dependencies with model layer)
|
||||
from changedetectionio.model.schema_utils import get_readonly_watch_fields, get_readonly_tag_fields
|
||||
|
||||
@functools.cache
|
||||
def get_tag_schema_properties():
|
||||
"""
|
||||
Extract Tag schema properties from OpenAPI spec.
|
||||
|
||||
Returns WatchBase properties + Tag-specific properties (overrides_watch).
|
||||
"""
|
||||
return _resolve_schema_properties('Tag')
|
||||
|
||||
def validate_openapi_request(operation_id):
|
||||
"""Decorator to validate incoming requests against OpenAPI spec."""
|
||||
def decorator(f):
|
||||
@functools.wraps(f)
|
||||
def wrapper(*args, **kwargs):
|
||||
from werkzeug.exceptions import BadRequest
|
||||
try:
|
||||
# Skip OpenAPI validation for GET requests since they don't have request bodies
|
||||
if request.method.upper() != 'GET':
|
||||
# Lazy import - only loaded when actually validating a request
|
||||
from openapi_core.contrib.flask import FlaskOpenAPIRequest
|
||||
from openapi_core.templating.paths.exceptions import ServerNotFound, PathNotFound, PathError
|
||||
|
||||
spec = get_openapi_spec()
|
||||
openapi_request = FlaskOpenAPIRequest(request)
|
||||
result = spec.unmarshal_request(openapi_request)
|
||||
if result.errors:
|
||||
error_details = []
|
||||
for error in result.errors:
|
||||
# Skip path/server validation errors for reverse proxy compatibility
|
||||
# Flask routing already validates that endpoints exist (returns 404 if not).
|
||||
# OpenAPI validation here is primarily for request body schema validation.
|
||||
# When behind nginx/reverse proxy, URLs may have path prefixes that don't
|
||||
# match the OpenAPI server definitions, causing false positives.
|
||||
if isinstance(error, PathError):
|
||||
logger.debug(f"API Call - Skipping path/server validation (delegated to Flask): {error}")
|
||||
continue
|
||||
|
||||
error_str = str(error)
|
||||
# Extract detailed schema errors from __cause__
|
||||
if hasattr(error, '__cause__') and hasattr(error.__cause__, 'schema_errors'):
|
||||
for schema_error in error.__cause__.schema_errors:
|
||||
field = '.'.join(str(p) for p in schema_error.path) if schema_error.path else 'body'
|
||||
msg = schema_error.message if hasattr(schema_error, 'message') else str(schema_error)
|
||||
error_details.append(f"{field}: {msg}")
|
||||
else:
|
||||
error_details.append(error_str)
|
||||
|
||||
# Only raise if we have actual validation errors (not path/server issues)
|
||||
if error_details:
|
||||
logger.error(f"API Call - Validation failed: {'; '.join(error_details)}")
|
||||
raise BadRequest(f"Validation failed: {'; '.join(error_details)}")
|
||||
except BadRequest:
|
||||
# Re-raise BadRequest exceptions (validation failures)
|
||||
raise
|
||||
except Exception as e:
|
||||
# If OpenAPI spec loading fails, log but don't break existing functionality
|
||||
logger.critical(f"OpenAPI validation warning for {operation_id}: {e}")
|
||||
abort(500)
|
||||
return f(*args, **kwargs)
|
||||
return wrapper
|
||||
return decorator
|
||||
schema_notification_urls = copy.deepcopy(schema)
|
||||
schema_create_notification_urls = copy.deepcopy(schema_notification_urls)
|
||||
schema_create_notification_urls['required'] = ['notification_urls']
|
||||
schema_delete_notification_urls = copy.deepcopy(schema_notification_urls)
|
||||
schema_delete_notification_urls['required'] = ['notification_urls']
|
||||
|
||||
# Import all API resources
|
||||
from .Watch import Watch, WatchHistory, WatchSingleHistory, WatchHistoryDiff, CreateWatch, WatchFavicon
|
||||
from .Watch import Watch, WatchHistory, WatchSingleHistory, CreateWatch, WatchFavicon
|
||||
from .Tags import Tags, Tag
|
||||
from .Import import Import
|
||||
from .SystemInfo import SystemInfo
|
||||
from .Spec import Spec
|
||||
from .Notifications import Notifications
|
||||
|
||||
|
||||
146
changedetectionio/api/api_schema.py
Normal file
146
changedetectionio/api/api_schema.py
Normal file
@@ -0,0 +1,146 @@
|
||||
# Responsible for building the storage dict into a set of rules ("JSON Schema") acceptable via the API
|
||||
# Probably other ways to solve this when the backend switches to some ORM
|
||||
from changedetectionio.notification import valid_notification_formats
|
||||
|
||||
|
||||
def build_time_between_check_json_schema():
|
||||
# Setup time between check schema
|
||||
schema_properties_time_between_check = {
|
||||
"type": "object",
|
||||
"additionalProperties": False,
|
||||
"properties": {}
|
||||
}
|
||||
for p in ['weeks', 'days', 'hours', 'minutes', 'seconds']:
|
||||
schema_properties_time_between_check['properties'][p] = {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
return schema_properties_time_between_check
|
||||
|
||||
def build_watch_json_schema(d):
|
||||
# Base JSON schema
|
||||
schema = {
|
||||
'type': 'object',
|
||||
'properties': {},
|
||||
}
|
||||
|
||||
for k, v in d.items():
|
||||
# @todo 'integer' is not covered here because its almost always for internal usage
|
||||
|
||||
if isinstance(v, type(None)):
|
||||
schema['properties'][k] = {
|
||||
"anyOf": [
|
||||
{"type": "null"},
|
||||
]
|
||||
}
|
||||
elif isinstance(v, list):
|
||||
schema['properties'][k] = {
|
||||
"anyOf": [
|
||||
{"type": "array",
|
||||
# Always is an array of strings, like text or regex or something
|
||||
"items": {
|
||||
"type": "string",
|
||||
"maxLength": 5000
|
||||
}
|
||||
},
|
||||
]
|
||||
}
|
||||
elif isinstance(v, bool):
|
||||
schema['properties'][k] = {
|
||||
"anyOf": [
|
||||
{"type": "boolean"},
|
||||
]
|
||||
}
|
||||
elif isinstance(v, str):
|
||||
schema['properties'][k] = {
|
||||
"anyOf": [
|
||||
{"type": "string",
|
||||
"maxLength": 5000},
|
||||
]
|
||||
}
|
||||
|
||||
# Can also be a string (or None by default above)
|
||||
for v in ['body',
|
||||
'notification_body',
|
||||
'notification_format',
|
||||
'notification_title',
|
||||
'proxy',
|
||||
'tag',
|
||||
'title',
|
||||
'webdriver_js_execute_code'
|
||||
]:
|
||||
schema['properties'][v]['anyOf'].append({'type': 'string', "maxLength": 5000})
|
||||
|
||||
# None or Boolean
|
||||
schema['properties']['track_ldjson_price_data']['anyOf'].append({'type': 'boolean'})
|
||||
|
||||
schema['properties']['method'] = {"type": "string",
|
||||
"enum": ["GET", "POST", "DELETE", "PUT"]
|
||||
}
|
||||
|
||||
schema['properties']['fetch_backend']['anyOf'].append({"type": "string",
|
||||
"enum": ["html_requests", "html_webdriver"]
|
||||
})
|
||||
|
||||
|
||||
|
||||
# All headers must be key/value type dict
|
||||
schema['properties']['headers'] = {
|
||||
"type": "object",
|
||||
"patternProperties": {
|
||||
# Should always be a string:string type value
|
||||
".*": {"type": "string"},
|
||||
}
|
||||
}
|
||||
|
||||
schema['properties']['notification_format'] = {'type': 'string',
|
||||
'enum': list(valid_notification_formats.keys())
|
||||
}
|
||||
|
||||
# Stuff that shouldn't be available but is just state-storage
|
||||
for v in ['previous_md5', 'last_error', 'has_ldjson_price_data', 'previous_md5_before_filters', 'uuid']:
|
||||
del schema['properties'][v]
|
||||
|
||||
schema['properties']['webdriver_delay']['anyOf'].append({'type': 'integer'})
|
||||
|
||||
schema['properties']['time_between_check'] = build_time_between_check_json_schema()
|
||||
|
||||
schema['properties']['browser_steps'] = {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"operation": {
|
||||
"type": ["string", "null"],
|
||||
"maxLength": 5000 # Allows null and any string up to 5000 chars (including "")
|
||||
},
|
||||
"selector": {
|
||||
"type": ["string", "null"],
|
||||
"maxLength": 5000
|
||||
},
|
||||
"optional_value": {
|
||||
"type": ["string", "null"],
|
||||
"maxLength": 5000
|
||||
}
|
||||
},
|
||||
"required": ["operation", "selector", "optional_value"],
|
||||
"additionalProperties": False # No extra keys allowed
|
||||
}
|
||||
},
|
||||
{"type": "null"}, # Allows null for `browser_steps`
|
||||
{"type": "array", "maxItems": 0} # Allows empty array []
|
||||
]
|
||||
}
|
||||
|
||||
# headers ?
|
||||
return schema
|
||||
|
||||
@@ -1,15 +1,12 @@
|
||||
from blinker import signal
|
||||
from .processors.exceptions import ProcessorException
|
||||
import changedetectionio.content_fetchers.exceptions as content_fetchers_exceptions
|
||||
from changedetectionio.processors.text_json_diff.processor import FilterNotFoundInResponse
|
||||
from changedetectionio import html_tools
|
||||
from changedetectionio import worker_pool
|
||||
from changedetectionio.queuedWatchMetaData import PrioritizedItem
|
||||
from changedetectionio.pluggy_interface import apply_update_handler_alter, apply_update_finalize
|
||||
from changedetectionio.flask_app import watch_check_update
|
||||
|
||||
import asyncio
|
||||
import importlib
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
|
||||
from loguru import logger
|
||||
@@ -17,109 +14,46 @@ from loguru import logger
|
||||
# Async version of update_worker
|
||||
# Processes jobs from AsyncSignalPriorityQueue instead of threaded queue
|
||||
|
||||
IN_PYTEST = "pytest" in sys.modules or "PYTEST_CURRENT_TEST" in os.environ
|
||||
DEFER_SLEEP_TIME_ALREADY_QUEUED = 0.3 if IN_PYTEST else 10.0
|
||||
|
||||
async def async_update_worker(worker_id, q, notification_q, app, datastore, executor=None):
|
||||
async def async_update_worker(worker_id, q, notification_q, app, datastore):
|
||||
"""
|
||||
Async worker function that processes watch check jobs from the queue.
|
||||
|
||||
|
||||
Args:
|
||||
worker_id: Unique identifier for this worker
|
||||
q: AsyncSignalPriorityQueue containing jobs to process
|
||||
notification_q: Standard queue for notifications
|
||||
app: Flask application instance
|
||||
datastore: Application datastore
|
||||
executor: ThreadPoolExecutor for queue operations (optional)
|
||||
|
||||
Returns:
|
||||
"restart" if worker should restart, "shutdown" for clean exit
|
||||
"""
|
||||
# Set a descriptive name for this task
|
||||
task = asyncio.current_task()
|
||||
if task:
|
||||
task.set_name(f"async-worker-{worker_id}")
|
||||
|
||||
# Read restart policy from environment
|
||||
max_jobs = int(os.getenv("WORKER_MAX_JOBS", "10"))
|
||||
max_runtime_seconds = int(os.getenv("WORKER_MAX_RUNTIME", "3600")) # 1 hour default
|
||||
|
||||
jobs_processed = 0
|
||||
start_time = time.time()
|
||||
|
||||
# Log thread name for debugging
|
||||
import threading
|
||||
thread_name = threading.current_thread().name
|
||||
logger.info(f"Starting async worker {worker_id} on thread '{thread_name}' (max_jobs={max_jobs}, max_runtime={max_runtime_seconds}s)")
|
||||
|
||||
|
||||
logger.info(f"Starting async worker {worker_id}")
|
||||
|
||||
while not app.config.exit.is_set():
|
||||
update_handler = None
|
||||
watch = None
|
||||
processing_exception = None # Reset at start of each iteration to prevent state bleeding
|
||||
|
||||
try:
|
||||
# Efficient blocking via run_in_executor (no polling overhead!)
|
||||
# Worker blocks in threading.Queue.get() which uses Condition.wait()
|
||||
# Executor must be sized to match worker count (see worker_pool.py: 50 threads default)
|
||||
# Single timeout (no double-timeout wrapper) = no race condition
|
||||
queued_item_data = await q.async_get(executor=executor, timeout=1.0)
|
||||
|
||||
# CRITICAL: Claim UUID immediately after getting from queue to prevent race condition
|
||||
# in wait_for_all_checks() which checks qsize() and running_uuids separately
|
||||
uuid = queued_item_data.item.get('uuid')
|
||||
if not worker_pool.claim_uuid_for_processing(uuid, worker_id):
|
||||
# Already being processed - re-queue and continue
|
||||
logger.trace(f"Worker {worker_id} detected UUID {uuid} already processing during claim - deferring")
|
||||
await asyncio.sleep(DEFER_SLEEP_TIME_ALREADY_QUEUED)
|
||||
deferred_priority = max(1000, queued_item_data.priority * 10)
|
||||
deferred_item = PrioritizedItem(priority=deferred_priority, item=queued_item_data.item)
|
||||
worker_pool.queue_item_async_safe(q, deferred_item, silent=True)
|
||||
continue
|
||||
|
||||
# Use asyncio wait_for to make queue.get() cancellable
|
||||
queued_item_data = await asyncio.wait_for(q.get(), timeout=1.0)
|
||||
except asyncio.TimeoutError:
|
||||
# No jobs available - check if we should restart based on time while idle
|
||||
runtime = time.time() - start_time
|
||||
if runtime >= max_runtime_seconds:
|
||||
logger.info(f"Worker {worker_id} idle and reached max runtime ({runtime:.0f}s), restarting")
|
||||
return "restart"
|
||||
continue
|
||||
except RuntimeError as e:
|
||||
# Handle executor shutdown gracefully - this is expected during shutdown
|
||||
if "cannot schedule new futures after shutdown" in str(e):
|
||||
# Executor shut down - exit gracefully without logging in pytest
|
||||
if not IN_PYTEST:
|
||||
logger.debug(f"Worker {worker_id} detected executor shutdown, exiting")
|
||||
break
|
||||
# Other RuntimeError - log and continue
|
||||
logger.error(f"Worker {worker_id} runtime error: {e}")
|
||||
await asyncio.sleep(0.1)
|
||||
# No jobs available, continue loop
|
||||
continue
|
||||
except Exception as e:
|
||||
# Handle expected Empty exception from queue timeout
|
||||
import queue
|
||||
if isinstance(e, queue.Empty):
|
||||
# Queue is empty, normal behavior - just continue
|
||||
continue
|
||||
|
||||
# Unexpected exception - log as critical
|
||||
logger.critical(f"CRITICAL: Worker {worker_id} failed to get queue item: {type(e).__name__}: {e}")
|
||||
|
||||
# Log queue health for debugging
|
||||
try:
|
||||
queue_size = q.qsize()
|
||||
is_empty = q.empty()
|
||||
logger.critical(f"CRITICAL: Worker {worker_id} queue health - size: {queue_size}, empty: {is_empty}")
|
||||
except Exception as health_e:
|
||||
logger.critical(f"CRITICAL: Worker {worker_id} queue health check failed: {health_e}")
|
||||
|
||||
logger.error(f"Worker {worker_id} error getting queue item: {e}")
|
||||
await asyncio.sleep(0.1)
|
||||
continue
|
||||
|
||||
# UUID already claimed above immediately after getting from queue
|
||||
# to prevent race condition with wait_for_all_checks()
|
||||
|
||||
|
||||
uuid = queued_item_data.item.get('uuid')
|
||||
fetch_start_time = round(time.time())
|
||||
|
||||
|
||||
# Mark this UUID as being processed
|
||||
from changedetectionio import worker_handler
|
||||
worker_handler.set_uuid_processing(uuid, processing=True)
|
||||
|
||||
try:
|
||||
if uuid in list(datastore.data['watching'].keys()) and datastore.data['watching'][uuid].get('url'):
|
||||
changed_detected = False
|
||||
@@ -136,43 +70,27 @@ async def async_update_worker(worker_id, q, notification_q, app, datastore, exec
|
||||
logger.info(f"Worker {worker_id} processing watch UUID {uuid} Priority {queued_item_data.priority} URL {watch['url']}")
|
||||
|
||||
try:
|
||||
# Retrieve signal by name to ensure thread-safe access across worker threads
|
||||
watch_check_update = signal('watch_check_update')
|
||||
watch_check_update.send(watch_uuid=uuid)
|
||||
|
||||
# Processor is what we are using for detecting the "Change"
|
||||
processor = watch.get('processor', 'text_json_diff')
|
||||
|
||||
# Init a new 'difference_detection_processor'
|
||||
# Use get_processor_module() to support both built-in and plugin processors
|
||||
from changedetectionio.processors import get_processor_module
|
||||
processor_module = get_processor_module(processor)
|
||||
|
||||
if not processor_module:
|
||||
error_msg = f"Processor module '{processor}' not found."
|
||||
logger.error(error_msg)
|
||||
raise ModuleNotFoundError(error_msg)
|
||||
processor_module_name = f"changedetectionio.processors.{processor}.processor"
|
||||
try:
|
||||
processor_module = importlib.import_module(processor_module_name)
|
||||
except ModuleNotFoundError as e:
|
||||
print(f"Processor module '{processor}' not found.")
|
||||
raise e
|
||||
|
||||
update_handler = processor_module.perform_site_check(datastore=datastore,
|
||||
watch_uuid=uuid)
|
||||
|
||||
# Allow plugins to modify/wrap the update_handler
|
||||
update_handler = apply_update_handler_alter(update_handler, watch, datastore)
|
||||
|
||||
update_signal = signal('watch_small_status_comment')
|
||||
update_signal.send(watch_uuid=uuid, status="Fetching page..")
|
||||
|
||||
# All fetchers are now async, so call directly
|
||||
await update_handler.call_browser()
|
||||
|
||||
# Run change detection in executor to avoid blocking event loop
|
||||
# This includes CPU-intensive operations like HTML parsing (lxml/inscriptis)
|
||||
# which can take 2-10ms and cause GIL contention across workers
|
||||
loop = asyncio.get_event_loop()
|
||||
changed_detected, update_obj, contents = await loop.run_in_executor(
|
||||
executor,
|
||||
lambda: update_handler.run_changedetection(watch=watch)
|
||||
)
|
||||
# Run change detection (this is synchronous)
|
||||
changed_detected, update_obj, contents = update_handler.run_changedetection(watch=watch)
|
||||
|
||||
except PermissionError as e:
|
||||
logger.critical(f"File permission error updating file, watch: {uuid}")
|
||||
@@ -182,10 +100,8 @@ async def async_update_worker(worker_id, q, notification_q, app, datastore, exec
|
||||
except ProcessorException as e:
|
||||
if e.screenshot:
|
||||
watch.save_screenshot(screenshot=e.screenshot)
|
||||
e.screenshot = None # Free memory immediately
|
||||
if e.xpath_data:
|
||||
watch.save_xpath_data(data=e.xpath_data)
|
||||
e.xpath_data = None # Free memory immediately
|
||||
datastore.update_watch(uuid=uuid, update_obj={'last_error': e.message})
|
||||
process_changedetection_results = False
|
||||
|
||||
@@ -205,11 +121,9 @@ async def async_update_worker(worker_id, q, notification_q, app, datastore, exec
|
||||
|
||||
if e.screenshot:
|
||||
watch.save_screenshot(screenshot=e.screenshot, as_error=True)
|
||||
e.screenshot = None # Free memory immediately
|
||||
|
||||
if e.xpath_data:
|
||||
watch.save_xpath_data(data=e.xpath_data)
|
||||
e.xpath_data = None # Free memory immediately
|
||||
|
||||
process_changedetection_results = False
|
||||
|
||||
@@ -228,10 +142,8 @@ async def async_update_worker(worker_id, q, notification_q, app, datastore, exec
|
||||
|
||||
if e.screenshot:
|
||||
watch.save_screenshot(screenshot=e.screenshot, as_error=True)
|
||||
e.screenshot = None # Free memory immediately
|
||||
if e.xpath_data:
|
||||
watch.save_xpath_data(data=e.xpath_data, as_error=True)
|
||||
e.xpath_data = None # Free memory immediately
|
||||
if e.page_text:
|
||||
watch.save_error_text(contents=e.page_text)
|
||||
|
||||
@@ -241,7 +153,6 @@ async def async_update_worker(worker_id, q, notification_q, app, datastore, exec
|
||||
except FilterNotFoundInResponse as e:
|
||||
if not datastore.data['watching'].get(uuid):
|
||||
continue
|
||||
logger.debug(f"Received FilterNotFoundInResponse exception for {uuid}")
|
||||
|
||||
err_text = "Warning, no filters were found, no change detection ran - Did the page change layout? update your Visual Filter if necessary."
|
||||
datastore.update_watch(uuid=uuid, update_obj={'last_error': err_text})
|
||||
@@ -249,11 +160,9 @@ async def async_update_worker(worker_id, q, notification_q, app, datastore, exec
|
||||
# Filter wasnt found, but we should still update the visual selector so that they can have a chance to set it up again
|
||||
if e.screenshot:
|
||||
watch.save_screenshot(screenshot=e.screenshot)
|
||||
e.screenshot = None # Free memory immediately
|
||||
|
||||
if e.xpath_data:
|
||||
watch.save_xpath_data(data=e.xpath_data)
|
||||
e.xpath_data = None # Free memory immediately
|
||||
|
||||
# Only when enabled, send the notification
|
||||
if watch.get('filter_failure_notification_send', False):
|
||||
@@ -261,19 +170,17 @@ async def async_update_worker(worker_id, q, notification_q, app, datastore, exec
|
||||
c += 1
|
||||
# Send notification if we reached the threshold?
|
||||
threshold = datastore.data['settings']['application'].get('filter_failure_notification_threshold_attempts', 0)
|
||||
logger.debug(f"FilterNotFoundInResponse - Filter for {uuid} not found, consecutive_filter_failures: {c} of threshold {threshold}")
|
||||
logger.debug(f"Filter for {uuid} not found, consecutive_filter_failures: {c} of threshold {threshold}")
|
||||
if c >= threshold:
|
||||
if not watch.get('notification_muted'):
|
||||
logger.debug(f"FilterNotFoundInResponse - Sending filter failed notification for {uuid}")
|
||||
logger.debug(f"Sending filter failed notification for {uuid}")
|
||||
await send_filter_failure_notification(uuid, notification_q, datastore)
|
||||
c = 0
|
||||
logger.debug(f"FilterNotFoundInResponse - Reset filter failure count back to zero")
|
||||
else:
|
||||
logger.debug(f"FilterNotFoundInResponse - {c} of threshold {threshold}..")
|
||||
logger.debug(f"Reset filter failure count back to zero")
|
||||
|
||||
datastore.update_watch(uuid=uuid, update_obj={'consecutive_filter_failures': c})
|
||||
else:
|
||||
logger.trace(f"FilterNotFoundInResponse - {uuid} - filter_failure_notification_send not enabled, skipping")
|
||||
logger.trace(f"{uuid} - filter_failure_notification_send not enabled, skipping")
|
||||
|
||||
process_changedetection_results = False
|
||||
|
||||
@@ -281,9 +188,6 @@ async def async_update_worker(worker_id, q, notification_q, app, datastore, exec
|
||||
# Yes fine, so nothing todo, don't continue to process.
|
||||
process_changedetection_results = False
|
||||
changed_detected = False
|
||||
logger.debug(f'[{uuid}] - checksumFromPreviousCheckWasTheSame - Checksum from previous check was the same, nothing todo here.')
|
||||
# Reset the edited flag since we successfully completed the check
|
||||
watch.reset_watch_edited_flag()
|
||||
|
||||
except content_fetchers_exceptions.BrowserConnectError as e:
|
||||
datastore.update_watch(uuid=uuid,
|
||||
@@ -350,7 +254,6 @@ async def async_update_worker(worker_id, q, notification_q, app, datastore, exec
|
||||
err_text = "Error running JS Actions - Page request - "+e.message
|
||||
if e.screenshot:
|
||||
watch.save_screenshot(screenshot=e.screenshot, as_error=True)
|
||||
e.screenshot = None # Free memory immediately
|
||||
datastore.update_watch(uuid=uuid, update_obj={'last_error': err_text,
|
||||
'last_check_status': e.status_code})
|
||||
process_changedetection_results = False
|
||||
@@ -362,7 +265,6 @@ async def async_update_worker(worker_id, q, notification_q, app, datastore, exec
|
||||
|
||||
if e.screenshot:
|
||||
watch.save_screenshot(screenshot=e.screenshot, as_error=True)
|
||||
e.screenshot = None # Free memory immediately
|
||||
|
||||
datastore.update_watch(uuid=uuid, update_obj={'last_error': err_text,
|
||||
'last_check_status': e.status_code,
|
||||
@@ -376,9 +278,8 @@ async def async_update_worker(worker_id, q, notification_q, app, datastore, exec
|
||||
logger.error(f"Exception (BrowserStepsInUnsupportedFetcher) reached processing watch UUID: {uuid}")
|
||||
|
||||
except Exception as e:
|
||||
import traceback
|
||||
logger.error(f"Worker {worker_id} exception processing watch UUID: {uuid}")
|
||||
logger.exception(f"Worker {worker_id} full exception details:")
|
||||
logger.error(str(e))
|
||||
datastore.update_watch(uuid=uuid, update_obj={'last_error': "Exception: " + str(e)})
|
||||
process_changedetection_results = False
|
||||
|
||||
@@ -386,7 +287,7 @@ async def async_update_worker(worker_id, q, notification_q, app, datastore, exec
|
||||
if not datastore.data['watching'].get(uuid):
|
||||
continue
|
||||
|
||||
update_obj['content-type'] = str(update_handler.fetcher.get_all_headers().get('content-type', '') or "").lower()
|
||||
update_obj['content-type'] = update_handler.fetcher.get_all_headers().get('content-type', '').lower()
|
||||
|
||||
if not watch.get('ignore_status_codes'):
|
||||
update_obj['consecutive_filter_failures'] = 0
|
||||
@@ -397,27 +298,25 @@ async def async_update_worker(worker_id, q, notification_q, app, datastore, exec
|
||||
if not datastore.data['watching'].get(uuid):
|
||||
continue
|
||||
|
||||
logger.debug(f"Processing watch UUID: {uuid} - xpath_data length returned {len(update_handler.xpath_data) if update_handler and update_handler.xpath_data else 'empty.'}")
|
||||
if update_handler and process_changedetection_results:
|
||||
if process_changedetection_results:
|
||||
# Extract title if needed
|
||||
if datastore.data['settings']['application'].get('extract_title_as_title') or watch['extract_title_as_title']:
|
||||
if not watch['title'] or not len(watch['title']):
|
||||
try:
|
||||
update_obj['title'] = html_tools.extract_element(find='title', html_content=update_handler.fetcher.content)
|
||||
logger.info(f"UUID: {uuid} Extract <title> updated title to '{update_obj['title']}")
|
||||
except Exception as e:
|
||||
logger.warning(f"UUID: {uuid} Extract <title> as watch title was enabled, but couldn't find a <title>.")
|
||||
|
||||
try:
|
||||
# Reset the edited flag BEFORE update_watch (which calls watch.update() and would set it again)
|
||||
watch.reset_watch_edited_flag()
|
||||
datastore.update_watch(uuid=uuid, update_obj=update_obj)
|
||||
|
||||
if changed_detected or not watch.history_n:
|
||||
if update_handler.screenshot:
|
||||
watch.save_screenshot(screenshot=update_handler.screenshot)
|
||||
# Free screenshot memory immediately after saving
|
||||
update_handler.screenshot = None
|
||||
if hasattr(update_handler, 'fetcher') and hasattr(update_handler.fetcher, 'screenshot'):
|
||||
update_handler.fetcher.screenshot = None
|
||||
|
||||
if update_handler.xpath_data:
|
||||
watch.save_xpath_data(data=update_handler.xpath_data)
|
||||
# Free xpath data memory
|
||||
update_handler.xpath_data = None
|
||||
if hasattr(update_handler, 'fetcher') and hasattr(update_handler.fetcher, 'xpath_data'):
|
||||
update_handler.fetcher.xpath_data = None
|
||||
|
||||
# Ensure unique timestamp for history
|
||||
if watch.newest_history_key and int(fetch_start_time) == int(watch.newest_history_key):
|
||||
@@ -425,7 +324,7 @@ async def async_update_worker(worker_id, q, notification_q, app, datastore, exec
|
||||
fetch_start_time += 1
|
||||
await asyncio.sleep(1)
|
||||
|
||||
watch.save_history_blob(contents=contents,
|
||||
watch.save_history_text(contents=contents,
|
||||
timestamp=int(fetch_start_time),
|
||||
snapshot_id=update_obj.get('previous_md5', 'none'))
|
||||
|
||||
@@ -433,10 +332,6 @@ async def async_update_worker(worker_id, q, notification_q, app, datastore, exec
|
||||
if update_handler.fetcher.content or (not update_handler.fetcher.content and empty_pages_are_a_change):
|
||||
watch.save_last_fetched_html(contents=update_handler.fetcher.content, timestamp=int(fetch_start_time))
|
||||
|
||||
# Explicitly delete large content variables to free memory IMMEDIATELY after saving
|
||||
# These are no longer needed after being saved to history
|
||||
del contents
|
||||
|
||||
# Send notifications on second+ check
|
||||
if watch.history_n >= 2:
|
||||
logger.info(f"Change detected in UUID {uuid} - {watch['url']}")
|
||||
@@ -444,69 +339,33 @@ async def async_update_worker(worker_id, q, notification_q, app, datastore, exec
|
||||
await send_content_changed_notification(uuid, notification_q, datastore)
|
||||
|
||||
except Exception as e:
|
||||
|
||||
logger.critical(f"Worker {worker_id} exception in process_changedetection_results")
|
||||
logger.exception(f"Worker {worker_id} full exception details:")
|
||||
logger.critical(str(e))
|
||||
datastore.update_watch(uuid=uuid, update_obj={'last_error': str(e)})
|
||||
|
||||
|
||||
# Always record attempt count
|
||||
count = watch.get('check_count', 0) + 1
|
||||
|
||||
final_updates = {'fetch_time': round(time.time() - fetch_start_time, 3),
|
||||
'check_count': count,
|
||||
}
|
||||
# Record server header
|
||||
try:
|
||||
server_header = str(update_handler.fetcher.get_all_headers().get('server', '') or "").strip().lower()[:255]
|
||||
if server_header:
|
||||
final_updates['remote_server_reply'] = server_header
|
||||
server_header = update_handler.fetcher.headers.get('server', '').strip().lower()[:255]
|
||||
datastore.update_watch(uuid=uuid, update_obj={'remote_server_reply': server_header})
|
||||
except Exception as e:
|
||||
server_header = None
|
||||
pass
|
||||
|
||||
if update_handler: # Could be none or empty if the processor was not found
|
||||
# Always record page title (used in notifications, and can change even when the content is the same)
|
||||
if update_obj.get('content-type') and 'html' in update_obj.get('content-type'):
|
||||
try:
|
||||
page_title = html_tools.extract_title(data=update_handler.fetcher.content)
|
||||
if page_title:
|
||||
page_title = page_title.strip()[:2000]
|
||||
logger.debug(f"UUID: {uuid} Page <title> is '{page_title}'")
|
||||
final_updates['page_title'] = page_title
|
||||
except Exception as e:
|
||||
logger.exception(f"Worker {worker_id} full exception details:")
|
||||
logger.warning(f"UUID: {uuid} Exception when extracting <title> - {str(e)}")
|
||||
# Store favicon if necessary
|
||||
if update_handler.fetcher.favicon_blob and update_handler.fetcher.favicon_blob.get('base64'):
|
||||
watch.bump_favicon(url=update_handler.fetcher.favicon_blob.get('url'),
|
||||
favicon_base_64=update_handler.fetcher.favicon_blob.get('base64')
|
||||
)
|
||||
|
||||
# Store favicon if necessary
|
||||
if update_handler.fetcher.favicon_blob and update_handler.fetcher.favicon_blob.get('base64'):
|
||||
watch.bump_favicon(url=update_handler.fetcher.favicon_blob.get('url'),
|
||||
favicon_base_64=update_handler.fetcher.favicon_blob.get('base64')
|
||||
)
|
||||
|
||||
datastore.update_watch(uuid=uuid, update_obj=final_updates)
|
||||
|
||||
# NOW clear fetcher content - after all processing is complete
|
||||
# This is the last point where we need the fetcher data
|
||||
if update_handler and hasattr(update_handler, 'fetcher') and update_handler.fetcher:
|
||||
update_handler.fetcher.clear_content()
|
||||
|
||||
# Explicitly delete update_handler to free all references
|
||||
if update_handler:
|
||||
del update_handler
|
||||
update_handler = None
|
||||
|
||||
# Force garbage collection
|
||||
import gc
|
||||
gc.collect()
|
||||
datastore.update_watch(uuid=uuid, update_obj={'fetch_time': round(time.time() - fetch_start_time, 3),
|
||||
'check_count': count})
|
||||
|
||||
except Exception as e:
|
||||
# Store the processing exception for plugin finalization hook
|
||||
processing_exception = e
|
||||
|
||||
logger.error(f"Worker {worker_id} unexpected error processing {uuid}: {e}")
|
||||
logger.exception(f"Worker {worker_id} full exception details:")
|
||||
|
||||
logger.error(f"Worker {worker_id} traceback:", exc_info=True)
|
||||
|
||||
# Also update the watch with error information
|
||||
if datastore and uuid in datastore.data['watching']:
|
||||
datastore.update_watch(uuid=uuid, update_obj={'last_error': f"Worker error: {str(e)}"})
|
||||
@@ -514,75 +373,20 @@ async def async_update_worker(worker_id, q, notification_q, app, datastore, exec
|
||||
finally:
|
||||
# Always cleanup - this runs whether there was an exception or not
|
||||
if uuid:
|
||||
# Capture references for plugin finalize hook BEFORE cleanup
|
||||
# (cleanup may delete these variables, but plugins need the original references)
|
||||
finalize_handler = update_handler # Capture now, before cleanup deletes it
|
||||
finalize_watch = watch # Capture now, before any modifications
|
||||
|
||||
# Call quit() as backup (Puppeteer/Playwright have internal cleanup, but this acts as safety net)
|
||||
try:
|
||||
if update_handler and hasattr(update_handler, 'fetcher') and update_handler.fetcher:
|
||||
await update_handler.fetcher.quit(watch=watch)
|
||||
except Exception as e:
|
||||
logger.error(f"Exception while cleaning/quit after calling browser: {e}")
|
||||
logger.exception(f"Worker {worker_id} full exception details:")
|
||||
|
||||
try:
|
||||
|
||||
# Clean up all memory references BEFORE garbage collection
|
||||
if update_handler:
|
||||
if hasattr(update_handler, 'fetcher') and update_handler.fetcher:
|
||||
update_handler.fetcher.clear_content()
|
||||
if hasattr(update_handler, 'content_processor'):
|
||||
update_handler.content_processor = None
|
||||
del update_handler
|
||||
update_handler = None
|
||||
|
||||
# Clear large content variables
|
||||
if 'contents' in locals():
|
||||
del contents
|
||||
|
||||
# Force garbage collection after all references are cleared
|
||||
import gc
|
||||
gc.collect()
|
||||
# Mark UUID as no longer being processed
|
||||
worker_handler.set_uuid_processing(uuid, processing=False)
|
||||
|
||||
# Send completion signal
|
||||
if watch:
|
||||
#logger.info(f"Worker {worker_id} sending completion signal for UUID {watch['uuid']}")
|
||||
watch_check_update.send(watch_uuid=watch['uuid'])
|
||||
|
||||
update_handler = None
|
||||
logger.debug(f"Worker {worker_id} completed watch {uuid} in {time.time()-fetch_start_time:.2f}s")
|
||||
except Exception as cleanup_error:
|
||||
logger.error(f"Worker {worker_id} error during cleanup: {cleanup_error}")
|
||||
logger.exception(f"Worker {worker_id} full exception details:")
|
||||
|
||||
# Call plugin finalization hook after all cleanup is done
|
||||
# Use captured references from before cleanup
|
||||
try:
|
||||
apply_update_finalize(
|
||||
update_handler=finalize_handler,
|
||||
watch=finalize_watch,
|
||||
datastore=datastore,
|
||||
processing_exception=processing_exception
|
||||
)
|
||||
except Exception as finalize_error:
|
||||
logger.error(f"Worker {worker_id} error in finalize hook: {finalize_error}")
|
||||
logger.exception(f"Worker {worker_id} full exception details:")
|
||||
finally:
|
||||
# Clean up captured references to allow immediate garbage collection
|
||||
del finalize_handler
|
||||
del finalize_watch
|
||||
|
||||
# Release UUID from processing AFTER all cleanup and hooks complete (thread-safe)
|
||||
# This ensures wait_for_all_checks() waits for finalize hooks to complete
|
||||
try:
|
||||
worker_pool.release_uuid_from_processing(uuid, worker_id=worker_id)
|
||||
except Exception as release_error:
|
||||
logger.error(f"Worker {worker_id} error releasing UUID: {release_error}")
|
||||
logger.exception(f"Worker {worker_id} full exception details:")
|
||||
finally:
|
||||
# Send completion signal - retrieve by name to ensure thread-safe access
|
||||
if watch:
|
||||
watch_check_update = signal('watch_check_update')
|
||||
watch_check_update.send(watch_uuid=watch['uuid'])
|
||||
|
||||
del (uuid)
|
||||
|
||||
|
||||
# Brief pause before continuing to avoid tight error loops (only on error)
|
||||
if 'e' in locals():
|
||||
await asyncio.sleep(1.0)
|
||||
@@ -590,19 +394,6 @@ async def async_update_worker(worker_id, q, notification_q, app, datastore, exec
|
||||
# Small yield for normal completion
|
||||
await asyncio.sleep(0.01)
|
||||
|
||||
# Job completed - increment counter and check restart conditions
|
||||
jobs_processed += 1
|
||||
runtime = time.time() - start_time
|
||||
|
||||
# Check if we should restart (only when idle, between jobs)
|
||||
should_restart_jobs = jobs_processed >= max_jobs
|
||||
should_restart_time = runtime >= max_runtime_seconds
|
||||
|
||||
if should_restart_jobs or should_restart_time:
|
||||
reason = f"{jobs_processed} jobs" if should_restart_jobs else f"{runtime:.0f}s runtime"
|
||||
logger.info(f"Worker {worker_id} restarting after {reason} ({jobs_processed} jobs, {runtime:.0f}s runtime)")
|
||||
return "restart"
|
||||
|
||||
# Check if we should exit
|
||||
if app.config.exit.is_set():
|
||||
break
|
||||
@@ -610,12 +401,10 @@ async def async_update_worker(worker_id, q, notification_q, app, datastore, exec
|
||||
# Check if we're in pytest environment - if so, be more gentle with logging
|
||||
import sys
|
||||
in_pytest = "pytest" in sys.modules or "PYTEST_CURRENT_TEST" in os.environ
|
||||
|
||||
|
||||
if not in_pytest:
|
||||
logger.info(f"Worker {worker_id} shutting down")
|
||||
|
||||
return "shutdown"
|
||||
|
||||
|
||||
def cleanup_error_artifacts(uuid, datastore):
|
||||
"""Helper function to clean up error artifacts"""
|
||||
@@ -3,7 +3,6 @@ import glob
|
||||
import threading
|
||||
|
||||
from flask import Blueprint, render_template, send_from_directory, flash, url_for, redirect, abort
|
||||
from flask_babel import gettext
|
||||
import os
|
||||
|
||||
from changedetectionio.store import ChangeDetectionStore
|
||||
@@ -13,7 +12,7 @@ from loguru import logger
|
||||
BACKUP_FILENAME_FORMAT = "changedetection-backup-{}.zip"
|
||||
|
||||
|
||||
def create_backup(datastore_path, watches: dict, tags: dict = None):
|
||||
def create_backup(datastore_path, watches: dict):
|
||||
logger.debug("Creating backup...")
|
||||
import zipfile
|
||||
from pathlib import Path
|
||||
@@ -27,31 +26,15 @@ def create_backup(datastore_path, watches: dict, tags: dict = None):
|
||||
compression=zipfile.ZIP_DEFLATED,
|
||||
compresslevel=8) as zipObj:
|
||||
|
||||
# Add the settings file (supports both formats)
|
||||
# New format: changedetection.json
|
||||
changedetection_json = os.path.join(datastore_path, "changedetection.json")
|
||||
if os.path.isfile(changedetection_json):
|
||||
zipObj.write(changedetection_json, arcname="changedetection.json")
|
||||
logger.debug("Added changedetection.json to backup")
|
||||
# Add the index
|
||||
zipObj.write(os.path.join(datastore_path, "url-watches.json"), arcname="url-watches.json")
|
||||
|
||||
# Legacy format: url-watches.json (for backward compatibility)
|
||||
url_watches_json = os.path.join(datastore_path, "url-watches.json")
|
||||
if os.path.isfile(url_watches_json):
|
||||
zipObj.write(url_watches_json, arcname="url-watches.json")
|
||||
logger.debug("Added url-watches.json to backup")
|
||||
|
||||
# Add tag data directories (each tag has its own {uuid}/tag.json)
|
||||
for uuid, tag in (tags or {}).items():
|
||||
for f in Path(tag.data_dir).glob('*'):
|
||||
zipObj.write(f,
|
||||
arcname=os.path.join(f.parts[-2], f.parts[-1]),
|
||||
compress_type=zipfile.ZIP_DEFLATED,
|
||||
compresslevel=8)
|
||||
logger.debug(f"Added tag '{tag.get('title')}' ({uuid}) to backup")
|
||||
# Add the flask app secret
|
||||
zipObj.write(os.path.join(datastore_path, "secret.txt"), arcname="secret.txt")
|
||||
|
||||
# Add any data in the watch data directory.
|
||||
for uuid, w in watches.items():
|
||||
for f in Path(w.data_dir).glob('*'):
|
||||
for f in Path(w.watch_data_dir).glob('*'):
|
||||
zipObj.write(f,
|
||||
# Use the full path to access the file, but make the file 'relative' in the Zip.
|
||||
arcname=os.path.join(f.parts[-2], f.parts[-1]),
|
||||
@@ -92,36 +75,28 @@ def create_backup(datastore_path, watches: dict, tags: dict = None):
|
||||
|
||||
|
||||
def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
from .restore import construct_restore_blueprint
|
||||
|
||||
backups_blueprint = Blueprint('backups', __name__, template_folder="templates")
|
||||
backups_blueprint.register_blueprint(construct_restore_blueprint(datastore))
|
||||
backup_threads = []
|
||||
|
||||
@login_optionally_required
|
||||
@backups_blueprint.route("/request-backup", methods=['GET'])
|
||||
def request_backup():
|
||||
if any(thread.is_alive() for thread in backup_threads):
|
||||
flash(gettext("A backup is already running, check back in a few minutes"), "error")
|
||||
return redirect(url_for('backups.create'))
|
||||
flash("A backup is already running, check back in a few minutes", "error")
|
||||
return redirect(url_for('backups.index'))
|
||||
|
||||
if len(find_backups()) > int(os.getenv("MAX_NUMBER_BACKUPS", 100)):
|
||||
flash(gettext("Maximum number of backups reached, please remove some"), "error")
|
||||
return redirect(url_for('backups.create'))
|
||||
flash("Maximum number of backups reached, please remove some", "error")
|
||||
return redirect(url_for('backups.index'))
|
||||
|
||||
# With immediate persistence, all data is already saved
|
||||
zip_thread = threading.Thread(
|
||||
target=create_backup,
|
||||
args=(datastore.datastore_path, datastore.data.get("watching")),
|
||||
kwargs={'tags': datastore.data['settings']['application'].get('tags', {})},
|
||||
daemon=True,
|
||||
name="BackupCreator"
|
||||
)
|
||||
# Be sure we're written fresh
|
||||
datastore.sync_to_json()
|
||||
zip_thread = threading.Thread(target=create_backup, args=(datastore.datastore_path, datastore.data.get("watching")))
|
||||
zip_thread.start()
|
||||
backup_threads.append(zip_thread)
|
||||
flash(gettext("Backup building in background, check back in a few minutes."))
|
||||
flash("Backup building in background, check back in a few minutes.")
|
||||
|
||||
return redirect(url_for('backups.create'))
|
||||
return redirect(url_for('backups.index'))
|
||||
|
||||
def find_backups():
|
||||
backup_filepath = os.path.join(datastore.datastore_path, BACKUP_FILENAME_FORMAT.format("*"))
|
||||
@@ -146,34 +121,31 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
def download_backup(filename):
|
||||
import re
|
||||
filename = filename.strip()
|
||||
backup_filename_regex = BACKUP_FILENAME_FORMAT.format(r"\d+")
|
||||
backup_filename_regex = BACKUP_FILENAME_FORMAT.format("\d+")
|
||||
|
||||
full_path = os.path.join(os.path.abspath(datastore.datastore_path), filename)
|
||||
if not full_path.startswith(os.path.abspath(datastore.datastore_path)):
|
||||
abort(404)
|
||||
|
||||
# Resolve 'latest' before any validation so checks run against the real filename.
|
||||
if filename == 'latest':
|
||||
backups = find_backups()
|
||||
if not backups:
|
||||
abort(404)
|
||||
filename = backups[0]['filename']
|
||||
|
||||
if not re.match(r"^" + backup_filename_regex + "$", filename):
|
||||
abort(400) # Bad Request if the filename doesn't match the pattern
|
||||
|
||||
full_path = os.path.join(os.path.abspath(datastore.datastore_path), filename)
|
||||
if not full_path.startswith(os.path.abspath(datastore.datastore_path) + os.sep):
|
||||
abort(404)
|
||||
|
||||
logger.debug(f"Backup download request for '{full_path}'")
|
||||
return send_from_directory(os.path.abspath(datastore.datastore_path), filename, as_attachment=True)
|
||||
|
||||
@login_optionally_required
|
||||
@backups_blueprint.route("/", methods=['GET'])
|
||||
@backups_blueprint.route("/create", methods=['GET'])
|
||||
def create():
|
||||
@backups_blueprint.route("", methods=['GET'])
|
||||
def index():
|
||||
backups = find_backups()
|
||||
output = render_template("backup_create.html",
|
||||
output = render_template("overview.html",
|
||||
available_backups=backups,
|
||||
backup_running=any(thread.is_alive() for thread in backup_threads)
|
||||
)
|
||||
|
||||
return output
|
||||
|
||||
@login_optionally_required
|
||||
@@ -185,8 +157,8 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
for backup in backups:
|
||||
os.unlink(backup)
|
||||
|
||||
flash(gettext("Backups were deleted."))
|
||||
flash("Backups were deleted.")
|
||||
|
||||
return redirect(url_for('backups.create'))
|
||||
return redirect(url_for('backups.index'))
|
||||
|
||||
return backups_blueprint
|
||||
|
||||
@@ -1,248 +0,0 @@
|
||||
import io
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import tempfile
|
||||
import threading
|
||||
import zipfile
|
||||
|
||||
from flask import Blueprint, render_template, flash, url_for, redirect, request
|
||||
from flask_babel import gettext, lazy_gettext as _l
|
||||
from wtforms import Form, BooleanField, SubmitField
|
||||
from flask_wtf.file import FileField, FileAllowed
|
||||
from loguru import logger
|
||||
|
||||
from changedetectionio.flask_app import login_optionally_required
|
||||
|
||||
# Maximum size of the uploaded zip file. Override via env var MAX_RESTORE_UPLOAD_MB.
|
||||
_MAX_UPLOAD_BYTES = int(os.getenv("MAX_RESTORE_UPLOAD_MB", 256)) * 1024 * 1024
|
||||
# Maximum total uncompressed size of all entries (zip-bomb guard). Override via MAX_RESTORE_DECOMPRESSED_MB.
|
||||
_MAX_DECOMPRESSED_BYTES = int(os.getenv("MAX_RESTORE_DECOMPRESSED_MB", 1024)) * 1024 * 1024
|
||||
# Only top-level directories whose name is a valid UUID are treated as watch/tag entries.
|
||||
_UUID_RE = re.compile(
|
||||
r'^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$',
|
||||
re.IGNORECASE,
|
||||
)
|
||||
|
||||
|
||||
class RestoreForm(Form):
|
||||
zip_file = FileField(_l('Backup zip file'), validators=[
|
||||
FileAllowed(['zip'], _l('Must be a .zip backup file!'))
|
||||
])
|
||||
include_groups = BooleanField(_l('Include groups'), default=True)
|
||||
include_groups_replace_existing = BooleanField(_l('Replace existing groups of the same UUID'), default=True)
|
||||
include_watches = BooleanField(_l('Include watches'), default=True)
|
||||
include_watches_replace_existing = BooleanField(_l('Replace existing watches of the same UUID'), default=True)
|
||||
submit = SubmitField(_l('Restore backup'))
|
||||
|
||||
|
||||
def import_from_zip(zip_stream, datastore, include_groups, include_groups_replace, include_watches, include_watches_replace):
|
||||
"""
|
||||
Extract and import watches and groups from a backup zip stream.
|
||||
|
||||
Mirrors the store's _load_watches / _load_tags loading pattern:
|
||||
- UUID dirs with tag.json → Tag.model + tag_obj.commit()
|
||||
- UUID dirs with watch.json → rehydrate_entity + watch_obj.commit()
|
||||
|
||||
Returns a dict with counts: restored_groups, skipped_groups, restored_watches, skipped_watches.
|
||||
Raises zipfile.BadZipFile if the stream is not a valid zip.
|
||||
"""
|
||||
from changedetectionio.model import Tag
|
||||
|
||||
restored_groups = 0
|
||||
skipped_groups = 0
|
||||
restored_watches = 0
|
||||
skipped_watches = 0
|
||||
|
||||
current_tags = datastore.data['settings']['application'].get('tags', {})
|
||||
current_watches = datastore.data['watching']
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
logger.debug(f"Restore: extracting zip to {tmpdir}")
|
||||
with zipfile.ZipFile(zip_stream, 'r') as zf:
|
||||
total_uncompressed = sum(m.file_size for m in zf.infolist())
|
||||
if total_uncompressed > _MAX_DECOMPRESSED_BYTES:
|
||||
raise ValueError(
|
||||
f"Backup archive decompressed size ({total_uncompressed // (1024 * 1024)} MB) "
|
||||
f"exceeds the {_MAX_DECOMPRESSED_BYTES // (1024 * 1024)} MB limit"
|
||||
)
|
||||
resolved_dest = os.path.realpath(tmpdir)
|
||||
for member in zf.infolist():
|
||||
member_dest = os.path.realpath(os.path.join(resolved_dest, member.filename))
|
||||
if not member_dest.startswith(resolved_dest + os.sep) and member_dest != resolved_dest:
|
||||
raise ValueError(f"Zip Slip path traversal detected in backup archive: {member.filename!r}")
|
||||
zf.extract(member, tmpdir)
|
||||
logger.debug("Restore: zip extracted, scanning UUID directories")
|
||||
|
||||
for entry in os.scandir(tmpdir):
|
||||
if not entry.is_dir():
|
||||
continue
|
||||
|
||||
uuid = entry.name
|
||||
if not _UUID_RE.match(uuid):
|
||||
logger.warning(f"Restore: skipping non-UUID directory {uuid!r}")
|
||||
continue
|
||||
tag_json_path = os.path.join(entry.path, 'tag.json')
|
||||
watch_json_path = os.path.join(entry.path, 'watch.json')
|
||||
|
||||
# --- Tags (groups) ---
|
||||
if include_groups and os.path.exists(tag_json_path):
|
||||
if uuid in current_tags and not include_groups_replace:
|
||||
logger.debug(f"Restore: skipping existing group {uuid} (replace not requested)")
|
||||
skipped_groups += 1
|
||||
continue
|
||||
|
||||
try:
|
||||
with open(tag_json_path, 'r', encoding='utf-8') as f:
|
||||
tag_data = json.load(f)
|
||||
except (json.JSONDecodeError, IOError) as e:
|
||||
logger.error(f"Restore: failed to read tag.json for {uuid}: {e}")
|
||||
continue
|
||||
|
||||
title = tag_data.get('title', uuid)
|
||||
logger.debug(f"Restore: importing group '{title}' ({uuid})")
|
||||
|
||||
# Mirror _load_tags: set uuid and force processor
|
||||
tag_data['uuid'] = uuid
|
||||
tag_data['processor'] = 'restock_diff'
|
||||
|
||||
# Copy the UUID directory so data_dir exists for commit()
|
||||
dst_dir = os.path.join(datastore.datastore_path, uuid)
|
||||
if os.path.exists(dst_dir):
|
||||
shutil.rmtree(dst_dir)
|
||||
shutil.copytree(entry.path, dst_dir)
|
||||
|
||||
tag_obj = Tag.model(
|
||||
datastore_path=datastore.datastore_path,
|
||||
__datastore=datastore.data,
|
||||
default=tag_data
|
||||
)
|
||||
current_tags[uuid] = tag_obj
|
||||
tag_obj.commit()
|
||||
restored_groups += 1
|
||||
logger.success(f"Restore: group '{title}' ({uuid}) restored")
|
||||
|
||||
# --- Watches ---
|
||||
elif include_watches and os.path.exists(watch_json_path):
|
||||
if uuid in current_watches and not include_watches_replace:
|
||||
logger.debug(f"Restore: skipping existing watch {uuid} (replace not requested)")
|
||||
skipped_watches += 1
|
||||
continue
|
||||
|
||||
try:
|
||||
with open(watch_json_path, 'r', encoding='utf-8') as f:
|
||||
watch_data = json.load(f)
|
||||
except (json.JSONDecodeError, IOError) as e:
|
||||
logger.error(f"Restore: failed to read watch.json for {uuid}: {e}")
|
||||
continue
|
||||
|
||||
url = watch_data.get('url', uuid)
|
||||
logger.debug(f"Restore: importing watch '{url}' ({uuid})")
|
||||
|
||||
# Copy UUID directory first so data_dir and history files exist
|
||||
dst_dir = os.path.join(datastore.datastore_path, uuid)
|
||||
if os.path.exists(dst_dir):
|
||||
shutil.rmtree(dst_dir)
|
||||
shutil.copytree(entry.path, dst_dir)
|
||||
|
||||
# Mirror _load_watches / rehydrate_entity
|
||||
watch_data['uuid'] = uuid
|
||||
watch_obj = datastore.rehydrate_entity(uuid, watch_data)
|
||||
current_watches[uuid] = watch_obj
|
||||
watch_obj.commit()
|
||||
restored_watches += 1
|
||||
logger.success(f"Restore: watch '{url}' ({uuid}) restored")
|
||||
|
||||
logger.debug(f"Restore: scan complete - groups {restored_groups} restored / {skipped_groups} skipped, "
|
||||
f"watches {restored_watches} restored / {skipped_watches} skipped")
|
||||
|
||||
# Persist changedetection.json (includes the updated tags dict)
|
||||
logger.debug("Restore: committing datastore settings")
|
||||
datastore.commit()
|
||||
|
||||
return {
|
||||
'restored_groups': restored_groups,
|
||||
'skipped_groups': skipped_groups,
|
||||
'restored_watches': restored_watches,
|
||||
'skipped_watches': skipped_watches,
|
||||
}
|
||||
|
||||
|
||||
|
||||
def construct_restore_blueprint(datastore):
|
||||
restore_blueprint = Blueprint('restore', __name__, template_folder="templates")
|
||||
restore_threads = []
|
||||
|
||||
@login_optionally_required
|
||||
@restore_blueprint.route("/restore", methods=['GET'])
|
||||
def restore():
|
||||
form = RestoreForm()
|
||||
return render_template("backup_restore.html",
|
||||
form=form,
|
||||
restore_running=any(t.is_alive() for t in restore_threads),
|
||||
max_upload_mb=_MAX_UPLOAD_BYTES // (1024 * 1024),
|
||||
max_decompressed_mb=_MAX_DECOMPRESSED_BYTES // (1024 * 1024))
|
||||
|
||||
@login_optionally_required
|
||||
@restore_blueprint.route("/restore/start", methods=['POST'])
|
||||
def backups_restore_start():
|
||||
if any(t.is_alive() for t in restore_threads):
|
||||
flash(gettext("A restore is already running, check back in a few minutes"), "error")
|
||||
return redirect(url_for('backups.restore.restore'))
|
||||
|
||||
zip_file = request.files.get('zip_file')
|
||||
if not zip_file or not zip_file.filename:
|
||||
flash(gettext("No file uploaded"), "error")
|
||||
return redirect(url_for('backups.restore.restore'))
|
||||
|
||||
if not zip_file.filename.lower().endswith('.zip'):
|
||||
flash(gettext("File must be a .zip backup file"), "error")
|
||||
return redirect(url_for('backups.restore.restore'))
|
||||
|
||||
# Reject oversized uploads before reading the stream into memory.
|
||||
content_length = request.content_length
|
||||
if content_length and content_length > _MAX_UPLOAD_BYTES:
|
||||
flash(gettext("Backup file is too large (max %(mb)s MB)", mb=_MAX_UPLOAD_BYTES // (1024 * 1024)), "error")
|
||||
return redirect(url_for('backups.restore.restore'))
|
||||
|
||||
# Read into memory now — the request stream is gone once we return.
|
||||
# Read one byte beyond the limit so we can detect truncated-but-still-oversized streams.
|
||||
try:
|
||||
raw = zip_file.read(_MAX_UPLOAD_BYTES + 1)
|
||||
if len(raw) > _MAX_UPLOAD_BYTES:
|
||||
flash(gettext("Backup file is too large (max %(mb)s MB)", mb=_MAX_UPLOAD_BYTES // (1024 * 1024)), "error")
|
||||
return redirect(url_for('backups.restore.restore'))
|
||||
zip_bytes = io.BytesIO(raw)
|
||||
with zipfile.ZipFile(zip_bytes): # quick validity check before spawning
|
||||
pass
|
||||
zip_bytes.seek(0)
|
||||
except zipfile.BadZipFile:
|
||||
flash(gettext("Invalid or corrupted zip file"), "error")
|
||||
return redirect(url_for('backups.restore.restore'))
|
||||
|
||||
include_groups = request.form.get('include_groups') == 'y'
|
||||
include_groups_replace = request.form.get('include_groups_replace_existing') == 'y'
|
||||
include_watches = request.form.get('include_watches') == 'y'
|
||||
include_watches_replace = request.form.get('include_watches_replace_existing') == 'y'
|
||||
|
||||
restore_thread = threading.Thread(
|
||||
target=import_from_zip,
|
||||
kwargs={
|
||||
'zip_stream': zip_bytes,
|
||||
'datastore': datastore,
|
||||
'include_groups': include_groups,
|
||||
'include_groups_replace': include_groups_replace,
|
||||
'include_watches': include_watches,
|
||||
'include_watches_replace': include_watches_replace,
|
||||
},
|
||||
daemon=True,
|
||||
name="BackupRestore"
|
||||
)
|
||||
restore_thread.start()
|
||||
restore_threads[:] = [t for t in restore_threads if t.is_alive()]
|
||||
restore_threads.append(restore_thread)
|
||||
flash(gettext("Restore started in background, check back in a few minutes."))
|
||||
return redirect(url_for('backups.restore.restore'))
|
||||
|
||||
return restore_blueprint
|
||||
@@ -1,49 +0,0 @@
|
||||
{% extends 'base.html' %}
|
||||
{% block content %}
|
||||
{% from '_helpers.html' import render_simple_field, render_field %}
|
||||
|
||||
<div class="edit-form">
|
||||
<div class="tabs collapsable">
|
||||
<ul>
|
||||
<li class="tab active" id=""><a href="{{ url_for('backups.create') }}">{{ _('Create') }}</a></li>
|
||||
<li class="tab"><a href="{{ url_for('backups.restore.restore') }}">{{ _('Restore') }}</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
<div class="box-wrap inner">
|
||||
<div id="general">
|
||||
{% if backup_running %}
|
||||
<p>
|
||||
<span class="spinner"></span> <strong>{{ _('A backup is running!') }}</strong>
|
||||
</p>
|
||||
{% endif %}
|
||||
|
||||
<p>
|
||||
{{ _('Here you can download and request a new backup, when a backup is completed you will see it listed below.') }}
|
||||
</p>
|
||||
<br>
|
||||
{% if available_backups %}
|
||||
<ul>
|
||||
{% for backup in available_backups %}
|
||||
<li>
|
||||
<a href="{{ url_for('backups.download_backup', filename=backup["filename"]) }}">{{ backup["filename"] }}</a> {{ backup["filesize"] }} {{ _('Mb') }}
|
||||
</li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{% else %}
|
||||
<p>
|
||||
<strong>{{ _('No backups found.') }}</strong>
|
||||
</p>
|
||||
{% endif %}
|
||||
|
||||
<a class="pure-button pure-button-primary"
|
||||
href="{{ url_for('backups.request_backup') }}">{{ _('Create backup') }}</a>
|
||||
{% if available_backups %}
|
||||
<a class="pure-button button-small button-error "
|
||||
href="{{ url_for('backups.remove_backups') }}">{{ _('Remove backups') }}</a>
|
||||
{% endif %}
|
||||
|
||||
</div>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
{% endblock %}
|
||||
@@ -1,62 +0,0 @@
|
||||
{% extends 'base.html' %}
|
||||
{% block content %}
|
||||
{% from '_helpers.html' import render_field, render_checkbox_field %}
|
||||
|
||||
<div class="edit-form">
|
||||
<div class="tabs collapsable">
|
||||
<ul>
|
||||
<li class="tab"><a href="{{ url_for('backups.create') }}">{{ _('Create') }}</a></li>
|
||||
<li class="tab active"><a href="{{ url_for('backups.restore.restore') }}">{{ _('Restore') }}</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
<div class="box-wrap inner">
|
||||
<div id="general">
|
||||
{% if restore_running %}
|
||||
<p>
|
||||
<span class="spinner"></span> <strong>{{ _('A restore is running!') }}</strong>
|
||||
</p>
|
||||
{% endif %}
|
||||
|
||||
<p>{{ _('Restore a backup. Must be a .zip backup file created on/after v0.53.1 (new database layout).') }}</p>
|
||||
<p>{{ _('Note: This does not override the main application settings, only watches and groups.') }}</p>
|
||||
<p class="pure-form-message">
|
||||
{{ _('Max upload size: %(upload)s MB · Max decompressed size: %(decomp)s MB',
|
||||
upload=max_upload_mb, decomp=max_decompressed_mb) }}
|
||||
</p>
|
||||
|
||||
<form class="pure-form pure-form-stacked settings"
|
||||
action="{{ url_for('backups.restore.backups_restore_start') }}"
|
||||
method="POST"
|
||||
enctype="multipart/form-data">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}">
|
||||
|
||||
<div class="pure-control-group">
|
||||
{{ render_checkbox_field(form.include_groups) }}
|
||||
<span class="pure-form-message-inline">{{ _('Include all groups found in backup?') }}</span>
|
||||
</div>
|
||||
<div class="pure-control-group">
|
||||
{{ render_checkbox_field(form.include_groups_replace_existing) }}
|
||||
<span class="pure-form-message-inline">{{ _('Replace any existing groups of the same UUID?') }}</span>
|
||||
</div>
|
||||
|
||||
<div class="pure-control-group">
|
||||
{{ render_checkbox_field(form.include_watches) }}
|
||||
<span class="pure-form-message-inline">{{ _('Include all watches found in backup?') }}</span>
|
||||
</div>
|
||||
<div class="pure-control-group">
|
||||
{{ render_checkbox_field(form.include_watches_replace_existing) }}
|
||||
<span class="pure-form-message-inline">{{ _('Replace any existing watches of the same UUID?') }}</span>
|
||||
</div>
|
||||
|
||||
<div class="pure-control-group">
|
||||
{{ render_field(form.zip_file) }}
|
||||
</div>
|
||||
|
||||
<div class="pure-controls">
|
||||
<button type="submit" class="pure-button pure-button-primary">{{ _('Restore backup') }}</button>
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{% endblock %}
|
||||
36
changedetectionio/blueprint/backups/templates/overview.html
Normal file
36
changedetectionio/blueprint/backups/templates/overview.html
Normal file
@@ -0,0 +1,36 @@
|
||||
{% extends 'base.html' %}
|
||||
{% block content %}
|
||||
{% from '_helpers.html' import render_simple_field, render_field %}
|
||||
<div class="edit-form">
|
||||
<div class="box-wrap inner">
|
||||
<h4>Backups</h4>
|
||||
{% if backup_running %}
|
||||
<p>
|
||||
<strong>A backup is running!</strong>
|
||||
</p>
|
||||
{% endif %}
|
||||
<p>
|
||||
Here you can download and request a new backup, when a backup is completed you will see it listed below.
|
||||
</p>
|
||||
<br>
|
||||
{% if available_backups %}
|
||||
<ul>
|
||||
{% for backup in available_backups %}
|
||||
<li><a href="{{ url_for('backups.download_backup', filename=backup["filename"]) }}">{{ backup["filename"] }}</a> {{ backup["filesize"] }} Mb</li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{% else %}
|
||||
<p>
|
||||
<strong>No backups found.</strong>
|
||||
</p>
|
||||
{% endif %}
|
||||
|
||||
<a class="pure-button pure-button-primary" href="{{ url_for('backups.request_backup') }}">Create backup</a>
|
||||
{% if available_backups %}
|
||||
<a class="pure-button button-small button-error " href="{{ url_for('backups.remove_backups') }}">Remove backups</a>
|
||||
{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
{% endblock %}
|
||||
@@ -21,250 +21,93 @@ from changedetectionio.flask_app import login_optionally_required
|
||||
from loguru import logger
|
||||
|
||||
browsersteps_sessions = {}
|
||||
browsersteps_watch_to_session = {} # Maps watch_uuid -> browsersteps_session_id
|
||||
io_interface_context = None
|
||||
import json
|
||||
import hashlib
|
||||
from flask import Response
|
||||
import asyncio
|
||||
import threading
|
||||
import time
|
||||
|
||||
# Dedicated event loop for ALL browser steps sessions
|
||||
_browser_steps_loop = None
|
||||
_browser_steps_thread = None
|
||||
_browser_steps_loop_lock = threading.Lock()
|
||||
|
||||
def _start_browser_steps_loop():
|
||||
"""Start a dedicated event loop for browser steps in its own thread"""
|
||||
global _browser_steps_loop
|
||||
|
||||
# Create and set the event loop for this thread
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
_browser_steps_loop = loop
|
||||
|
||||
logger.debug("Browser steps event loop started")
|
||||
|
||||
try:
|
||||
# Run the loop forever - handles all browsersteps sessions
|
||||
loop.run_forever()
|
||||
except Exception as e:
|
||||
logger.error(f"Browser steps event loop error: {e}")
|
||||
finally:
|
||||
try:
|
||||
# Cancel all remaining tasks
|
||||
pending = asyncio.all_tasks(loop)
|
||||
for task in pending:
|
||||
task.cancel()
|
||||
|
||||
# Wait for tasks to finish cancellation
|
||||
if pending:
|
||||
loop.run_until_complete(asyncio.gather(*pending, return_exceptions=True))
|
||||
except Exception as e:
|
||||
logger.debug(f"Error during browser steps loop cleanup: {e}")
|
||||
finally:
|
||||
loop.close()
|
||||
logger.debug("Browser steps event loop closed")
|
||||
|
||||
def _ensure_browser_steps_loop():
|
||||
"""Ensure the browser steps event loop is running"""
|
||||
global _browser_steps_loop, _browser_steps_thread
|
||||
|
||||
with _browser_steps_loop_lock:
|
||||
if _browser_steps_thread is None or not _browser_steps_thread.is_alive():
|
||||
logger.debug("Starting browser steps event loop thread")
|
||||
_browser_steps_thread = threading.Thread(
|
||||
target=_start_browser_steps_loop,
|
||||
daemon=True,
|
||||
name="BrowserStepsEventLoop"
|
||||
)
|
||||
_browser_steps_thread.start()
|
||||
|
||||
# Wait for the loop to be ready
|
||||
timeout = 5.0
|
||||
start_time = time.time()
|
||||
while _browser_steps_loop is None:
|
||||
if time.time() - start_time > timeout:
|
||||
raise RuntimeError("Browser steps event loop failed to start")
|
||||
time.sleep(0.01)
|
||||
|
||||
logger.debug("Browser steps event loop thread started and ready")
|
||||
|
||||
def run_async_in_browser_loop(coro):
|
||||
"""Run async coroutine using the dedicated browser steps event loop"""
|
||||
_ensure_browser_steps_loop()
|
||||
|
||||
if _browser_steps_loop and not _browser_steps_loop.is_closed():
|
||||
logger.debug("Browser steps using dedicated event loop")
|
||||
future = asyncio.run_coroutine_threadsafe(coro, _browser_steps_loop)
|
||||
"""Run async coroutine using the existing async worker event loop"""
|
||||
from changedetectionio import worker_handler
|
||||
|
||||
# Use the existing async worker event loop instead of creating a new one
|
||||
if worker_handler.USE_ASYNC_WORKERS and worker_handler.async_loop and not worker_handler.async_loop.is_closed():
|
||||
logger.debug("Browser steps using existing async worker event loop")
|
||||
future = asyncio.run_coroutine_threadsafe(coro, worker_handler.async_loop)
|
||||
return future.result()
|
||||
else:
|
||||
raise RuntimeError("Browser steps event loop is not available")
|
||||
|
||||
async def _close_session_resources(session_data, label=''):
|
||||
"""Close all browser resources for a session in the correct order.
|
||||
|
||||
browserstepper.cleanup() closes page+context but not the browser itself.
|
||||
For CloakBrowser, browser.close() is what stops the local Chromium process via pw.stop().
|
||||
For the default CDP path, playwright_context.stop() shuts down the playwright instance.
|
||||
"""
|
||||
browserstepper = session_data.get('browserstepper')
|
||||
if browserstepper:
|
||||
# Fallback: create a new event loop (for sync workers or if async loop not available)
|
||||
logger.debug("Browser steps creating temporary event loop")
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
try:
|
||||
await browserstepper.cleanup()
|
||||
except Exception as e:
|
||||
logger.error(f"Error cleaning up browserstepper{label}: {e}")
|
||||
|
||||
browser = session_data.get('browser')
|
||||
if browser:
|
||||
try:
|
||||
await asyncio.wait_for(browser.close(), timeout=5.0)
|
||||
except Exception as e:
|
||||
logger.warning(f"Error closing browser{label}: {e}")
|
||||
|
||||
playwright_context = session_data.get('playwright_context')
|
||||
if playwright_context:
|
||||
try:
|
||||
await playwright_context.stop()
|
||||
except Exception as e:
|
||||
logger.warning(f"Error stopping playwright context{label}: {e}")
|
||||
|
||||
|
||||
def cleanup_expired_sessions():
|
||||
"""Remove expired browsersteps sessions and cleanup their resources"""
|
||||
global browsersteps_sessions, browsersteps_watch_to_session
|
||||
|
||||
expired_session_ids = []
|
||||
|
||||
# Find expired sessions
|
||||
for session_id, session_data in browsersteps_sessions.items():
|
||||
browserstepper = session_data.get('browserstepper')
|
||||
if browserstepper and browserstepper.has_expired:
|
||||
expired_session_ids.append(session_id)
|
||||
|
||||
# Cleanup expired sessions
|
||||
for session_id in expired_session_ids:
|
||||
logger.debug(f"Cleaning up expired browsersteps session {session_id}")
|
||||
session_data = browsersteps_sessions[session_id]
|
||||
|
||||
try:
|
||||
run_async_in_browser_loop(_close_session_resources(session_data, label=f" for session {session_id}"))
|
||||
except Exception as e:
|
||||
logger.error(f"Error cleaning up session {session_id}: {e}")
|
||||
|
||||
# Remove from sessions dict
|
||||
del browsersteps_sessions[session_id]
|
||||
|
||||
# Remove from watch mapping
|
||||
for watch_uuid, mapped_session_id in list(browsersteps_watch_to_session.items()):
|
||||
if mapped_session_id == session_id:
|
||||
del browsersteps_watch_to_session[watch_uuid]
|
||||
break
|
||||
|
||||
if expired_session_ids:
|
||||
logger.info(f"Cleaned up {len(expired_session_ids)} expired browsersteps session(s)")
|
||||
|
||||
def cleanup_session_for_watch(watch_uuid):
|
||||
"""Cleanup a specific browsersteps session for a watch UUID"""
|
||||
global browsersteps_sessions, browsersteps_watch_to_session
|
||||
|
||||
session_id = browsersteps_watch_to_session.get(watch_uuid)
|
||||
if not session_id:
|
||||
logger.debug(f"No browsersteps session found for watch {watch_uuid}")
|
||||
return
|
||||
|
||||
logger.debug(f"Cleaning up browsersteps session {session_id} for watch {watch_uuid}")
|
||||
|
||||
session_data = browsersteps_sessions.get(session_id)
|
||||
if session_data:
|
||||
try:
|
||||
run_async_in_browser_loop(_close_session_resources(session_data, label=f" for watch {watch_uuid}"))
|
||||
except Exception as e:
|
||||
logger.error(f"Error cleaning up session {session_id} for watch {watch_uuid}: {e}")
|
||||
|
||||
# Remove from sessions dict
|
||||
del browsersteps_sessions[session_id]
|
||||
|
||||
# Remove from watch mapping
|
||||
del browsersteps_watch_to_session[watch_uuid]
|
||||
|
||||
logger.debug(f"Cleaned up session for watch {watch_uuid}")
|
||||
|
||||
# Opportunistically cleanup any other expired sessions
|
||||
cleanup_expired_sessions()
|
||||
return loop.run_until_complete(coro)
|
||||
finally:
|
||||
loop.close()
|
||||
|
||||
def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
browser_steps_blueprint = Blueprint('browser_steps', __name__, template_folder="templates")
|
||||
|
||||
async def start_browsersteps_session(watch_uuid):
|
||||
from changedetectionio.browser_steps import browser_steps
|
||||
from . import browser_steps
|
||||
import time
|
||||
from playwright.async_api import async_playwright
|
||||
|
||||
# We keep the playwright session open for many minutes
|
||||
keepalive_seconds = int(os.getenv('BROWSERSTEPS_MINUTES_KEEPALIVE', 10)) * 60
|
||||
keepalive_ms = ((keepalive_seconds + 3) * 1000)
|
||||
|
||||
browsersteps_start_session = {'start_time': time.time()}
|
||||
|
||||
# Build proxy dict first — needed by both the CDP path and fetcher-specific launchers
|
||||
proxy_id = datastore.get_preferred_proxy_for_watch(uuid=watch_uuid)
|
||||
proxy = None
|
||||
if proxy_id:
|
||||
proxy_url = datastore.proxy_list.get(proxy_id, {}).get('url')
|
||||
if proxy_url:
|
||||
from urllib.parse import urlparse
|
||||
parsed = urlparse(proxy_url)
|
||||
proxy = {'server': proxy_url}
|
||||
if parsed.username:
|
||||
proxy['username'] = parsed.username
|
||||
if parsed.password:
|
||||
proxy['password'] = parsed.password
|
||||
logger.debug(f"Browser Steps: UUID {watch_uuid} selected proxy {proxy_url}")
|
||||
# Create a new async playwright instance for browser steps
|
||||
playwright_instance = async_playwright()
|
||||
playwright_context = await playwright_instance.start()
|
||||
|
||||
# Resolve the fetcher class for this watch so we can ask it to launch its own browser
|
||||
# if it supports that (e.g. CloakBrowser, which runs locally rather than via CDP)
|
||||
watch = datastore.data['watching'][watch_uuid]
|
||||
from changedetectionio import content_fetchers
|
||||
fetcher_name = watch.get_fetch_backend or 'system'
|
||||
if fetcher_name == 'system':
|
||||
fetcher_name = datastore.data['settings']['application'].get('fetch_backend', 'html_requests')
|
||||
fetcher_class = getattr(content_fetchers, fetcher_name, None)
|
||||
|
||||
browser = None
|
||||
playwright_context = None
|
||||
|
||||
# If the fetcher has its own browser launch for the live steps UI, use it.
|
||||
# get_browsersteps_browser(proxy, keepalive_ms) returns (browser, playwright_context_or_None)
|
||||
# or None to fall back to the default CDP path.
|
||||
if fetcher_class and hasattr(fetcher_class, 'get_browsersteps_browser'):
|
||||
result = await fetcher_class.get_browsersteps_browser(proxy=proxy, keepalive_ms=keepalive_ms)
|
||||
if result is not None:
|
||||
browser, playwright_context = result
|
||||
logger.debug(f"Browser Steps: using fetcher-specific browser for '{fetcher_name}'")
|
||||
|
||||
# Default: connect to the remote Playwright/sockpuppetbrowser via CDP
|
||||
if browser is None:
|
||||
playwright_instance = async_playwright()
|
||||
playwright_context = await playwright_instance.start()
|
||||
base_url = os.getenv('PLAYWRIGHT_DRIVER_URL', '').strip('"')
|
||||
a = "?" if '?' not in base_url else '&'
|
||||
base_url += a + f"timeout={keepalive_ms}"
|
||||
browser = await playwright_context.chromium.connect_over_cdp(base_url, timeout=keepalive_ms)
|
||||
logger.debug(f"Browser Steps: using CDP connection to {base_url}")
|
||||
keepalive_ms = ((keepalive_seconds + 3) * 1000)
|
||||
base_url = os.getenv('PLAYWRIGHT_DRIVER_URL', '').strip('"')
|
||||
a = "?" if not '?' in base_url else '&'
|
||||
base_url += a + f"timeout={keepalive_ms}"
|
||||
|
||||
browser = await playwright_context.chromium.connect_over_cdp(base_url, timeout=keepalive_ms)
|
||||
browsersteps_start_session['browser'] = browser
|
||||
browsersteps_start_session['playwright_context'] = playwright_context
|
||||
|
||||
proxy_id = datastore.get_preferred_proxy_for_watch(uuid=watch_uuid)
|
||||
proxy = None
|
||||
if proxy_id:
|
||||
proxy_url = datastore.proxy_list.get(proxy_id).get('url')
|
||||
if proxy_url:
|
||||
|
||||
# Playwright needs separate username and password values
|
||||
from urllib.parse import urlparse
|
||||
parsed = urlparse(proxy_url)
|
||||
proxy = {'server': proxy_url}
|
||||
|
||||
if parsed.username:
|
||||
proxy['username'] = parsed.username
|
||||
|
||||
if parsed.password:
|
||||
proxy['password'] = parsed.password
|
||||
|
||||
logger.debug(f"Browser Steps: UUID {watch_uuid} selected proxy {proxy_url}")
|
||||
|
||||
# Tell Playwright to connect to Chrome and setup a new session via our stepper interface
|
||||
browserstepper = browser_steps.browsersteps_live_ui(
|
||||
playwright_browser=browser,
|
||||
proxy=proxy,
|
||||
start_url=watch.link,
|
||||
headers=watch.get('headers')
|
||||
start_url=datastore.data['watching'][watch_uuid].link,
|
||||
headers=datastore.data['watching'][watch_uuid].get('headers')
|
||||
)
|
||||
|
||||
# Initialize the async connection
|
||||
await browserstepper.connect(proxy=proxy)
|
||||
|
||||
browsersteps_start_session['browserstepper'] = browserstepper
|
||||
|
||||
# For test
|
||||
#await browsersteps_start_session['browserstepper'].action_goto_url(value="http://example.com?time="+str(time.time()))
|
||||
|
||||
return browsersteps_start_session
|
||||
|
||||
|
||||
@@ -272,6 +115,7 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
@browser_steps_blueprint.route("/browsersteps_start_session", methods=['GET'])
|
||||
def browsersteps_start_session():
|
||||
# A new session was requested, return sessionID
|
||||
import asyncio
|
||||
import uuid
|
||||
browsersteps_session_id = str(uuid.uuid4())
|
||||
watch_uuid = request.args.get('uuid')
|
||||
@@ -279,9 +123,6 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
if not watch_uuid:
|
||||
return make_response('No Watch UUID specified', 500)
|
||||
|
||||
# Cleanup any existing session for this watch
|
||||
cleanup_session_for_watch(watch_uuid)
|
||||
|
||||
logger.debug("Starting connection with playwright")
|
||||
logger.debug("browser_steps.py connecting")
|
||||
|
||||
@@ -290,10 +131,6 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
browsersteps_sessions[browsersteps_session_id] = run_async_in_browser_loop(
|
||||
start_browsersteps_session(watch_uuid)
|
||||
)
|
||||
|
||||
# Store the mapping of watch_uuid -> browsersteps_session_id
|
||||
browsersteps_watch_to_session[watch_uuid] = browsersteps_session_id
|
||||
|
||||
except Exception as e:
|
||||
if 'ECONNREFUSED' in str(e):
|
||||
return make_response('Unable to start the Playwright Browser session, is sockpuppetbrowser running? Network configuration is OK?', 401)
|
||||
@@ -318,8 +155,8 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
watch = datastore.data['watching'].get(uuid)
|
||||
filename = f"step_before-{step_n}.jpeg" if request.args.get('type', '') == 'before' else f"step_{step_n}.jpeg"
|
||||
|
||||
if step_n and watch and os.path.isfile(os.path.join(watch.data_dir, filename)):
|
||||
response = make_response(send_from_directory(directory=watch.data_dir, path=filename))
|
||||
if step_n and watch and os.path.isfile(os.path.join(watch.watch_data_dir, filename)):
|
||||
response = make_response(send_from_directory(directory=watch.watch_data_dir, path=filename))
|
||||
response.headers['Content-type'] = 'image/jpeg'
|
||||
response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'
|
||||
response.headers['Pragma'] = 'no-cache'
|
||||
@@ -334,10 +171,11 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
@browser_steps_blueprint.route("/browsersteps_update", methods=['POST'])
|
||||
def browsersteps_ui_update():
|
||||
import base64
|
||||
import playwright._impl._errors
|
||||
from changedetectionio.blueprint.browser_steps import browser_steps
|
||||
|
||||
remaining = 0
|
||||
remaining =0
|
||||
uuid = request.args.get('uuid')
|
||||
goto_website_url_first_step = request.args.get('goto_website_url_first_step')
|
||||
|
||||
browsersteps_session_id = request.args.get('browsersteps_session_id')
|
||||
|
||||
@@ -348,33 +186,33 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
return make_response('No session exists under that ID', 500)
|
||||
|
||||
is_last_step = False
|
||||
|
||||
# @todo - should always be an existing session
|
||||
if goto_website_url_first_step:
|
||||
logger.debug("Going to site (requested automatically before stepping)..")
|
||||
step_operation = "Goto site"
|
||||
step_selector = None
|
||||
step_optional_value = None
|
||||
else:
|
||||
# Actions - step/apply/etc, do the thing and return state
|
||||
if request.method == 'POST':
|
||||
# @todo - should always be an existing session
|
||||
step_operation = request.form.get('operation')
|
||||
step_selector = request.form.get('selector')
|
||||
step_optional_value = request.form.get('optional_value')
|
||||
is_last_step = strtobool(request.form.get('is_last_step'))
|
||||
|
||||
try:
|
||||
# Run the async call_action method in the dedicated browser steps event loop
|
||||
run_async_in_browser_loop(
|
||||
browsersteps_sessions[browsersteps_session_id]['browserstepper'].call_action(
|
||||
action_name=step_operation,
|
||||
selector=step_selector,
|
||||
optional_value=step_optional_value
|
||||
try:
|
||||
# Run the async call_action method in the dedicated browser steps event loop
|
||||
run_async_in_browser_loop(
|
||||
browsersteps_sessions[browsersteps_session_id]['browserstepper'].call_action(
|
||||
action_name=step_operation,
|
||||
selector=step_selector,
|
||||
optional_value=step_optional_value
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Exception when calling step operation {step_operation} {str(e)}")
|
||||
# Try to find something of value to give back to the user
|
||||
return make_response(str(e).splitlines()[0], 401)
|
||||
except Exception as e:
|
||||
logger.error(f"Exception when calling step operation {step_operation} {str(e)}")
|
||||
# Try to find something of value to give back to the user
|
||||
return make_response(str(e).splitlines()[0], 401)
|
||||
|
||||
|
||||
# if not this_session.page:
|
||||
# cleanup_playwright_session()
|
||||
# return make_response('Browser session ran out of time :( Please reload this page.', 401)
|
||||
|
||||
# Screenshots and other info only needed on requesting a step (POST)
|
||||
try:
|
||||
@@ -382,7 +220,7 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
(screenshot, xpath_data) = run_async_in_browser_loop(
|
||||
browsersteps_sessions[browsersteps_session_id]['browserstepper'].get_current_state()
|
||||
)
|
||||
|
||||
|
||||
if is_last_step:
|
||||
watch = datastore.data['watching'].get(uuid)
|
||||
u = browsersteps_sessions[browsersteps_session_id]['browserstepper'].page.url
|
||||
|
||||
@@ -6,19 +6,8 @@ from loguru import logger
|
||||
|
||||
from changedetectionio.content_fetchers import SCREENSHOT_MAX_HEIGHT_DEFAULT
|
||||
from changedetectionio.content_fetchers.base import manage_user_agent
|
||||
from changedetectionio.jinja2_custom import render as jinja_render
|
||||
from changedetectionio.safe_jinja import render as jinja_render
|
||||
|
||||
def browser_steps_get_valid_steps(browser_steps: list):
|
||||
if browser_steps is not None and len(browser_steps):
|
||||
valid_steps = list(filter(
|
||||
lambda s: (s['operation'] and len(s['operation']) and s['operation'] != 'Choose one'),browser_steps))
|
||||
|
||||
# Just incase they selected Goto site by accident with older JS
|
||||
if valid_steps and valid_steps[0]['operation'] == 'Goto site':
|
||||
del(valid_steps[0])
|
||||
|
||||
return valid_steps
|
||||
return []
|
||||
|
||||
|
||||
# Two flags, tell the JS which of the "Selector" or "Value" field should be enabled in the front end
|
||||
@@ -450,7 +439,7 @@ class browsersteps_live_ui(steppable_browser_interface):
|
||||
logger.warning("Attempted to get current state after cleanup")
|
||||
return (None, None)
|
||||
|
||||
xpath_element_js = importlib.resources.files("changedetectionio.content_fetchers.res").joinpath('xpath_element_scraper.js').read_text(encoding="utf-8")
|
||||
xpath_element_js = importlib.resources.files("changedetectionio.content_fetchers.res").joinpath('xpath_element_scraper.js').read_text()
|
||||
|
||||
now = time.time()
|
||||
await self.page.wait_for_timeout(1 * 1000)
|
||||
@@ -33,7 +33,7 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
def long_task(uuid, preferred_proxy):
|
||||
import time
|
||||
from changedetectionio.content_fetchers import exceptions as content_fetcher_exceptions
|
||||
from changedetectionio.jinja2_custom import render as jinja_render
|
||||
from changedetectionio.safe_jinja import render as jinja_render
|
||||
|
||||
status = {'status': '', 'length': 0, 'text': ''}
|
||||
|
||||
@@ -94,13 +94,13 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
return results
|
||||
|
||||
@login_required
|
||||
@check_proxies_blueprint.route("/<uuid_str:uuid>/status", methods=['GET'])
|
||||
@check_proxies_blueprint.route("/<string:uuid>/status", methods=['GET'])
|
||||
def get_recheck_status(uuid):
|
||||
results = _recalc_check_status(uuid=uuid)
|
||||
return results
|
||||
|
||||
@login_required
|
||||
@check_proxies_blueprint.route("/<uuid_str:uuid>/start", methods=['GET'])
|
||||
@check_proxies_blueprint.route("/<string:uuid>/start", methods=['GET'])
|
||||
def start_check(uuid):
|
||||
|
||||
if not datastore.proxy_list:
|
||||
|
||||
@@ -1,8 +1,13 @@
|
||||
from flask import Blueprint, request, redirect, url_for, flash, render_template
|
||||
from loguru import logger
|
||||
|
||||
from changedetectionio.store import ChangeDetectionStore
|
||||
from changedetectionio.auth_decorator import login_optionally_required
|
||||
from changedetectionio import worker_handler
|
||||
from changedetectionio.blueprint.imports.importer import (
|
||||
import_url_list,
|
||||
import_distill_io_json,
|
||||
import_xlsx_wachete,
|
||||
import_xlsx_custom
|
||||
)
|
||||
|
||||
def construct_blueprint(datastore: ChangeDetectionStore, update_q, queuedWatchMetaData):
|
||||
import_blueprint = Blueprint('imports', __name__, template_folder="templates")
|
||||
@@ -12,27 +17,15 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q, queuedWatchMe
|
||||
def import_page():
|
||||
remaining_urls = []
|
||||
from changedetectionio import forms
|
||||
#
|
||||
|
||||
if request.method == 'POST':
|
||||
# from changedetectionio import worker_pool
|
||||
|
||||
from changedetectionio.blueprint.imports.importer import (
|
||||
import_url_list,
|
||||
import_distill_io_json,
|
||||
import_xlsx_wachete,
|
||||
import_xlsx_custom
|
||||
)
|
||||
|
||||
# URL List import
|
||||
if request.values.get('urls') and len(request.values.get('urls').strip()):
|
||||
# Import and push into the queue for immediate update check
|
||||
from changedetectionio import processors
|
||||
importer_handler = import_url_list()
|
||||
importer_handler.run(data=request.values.get('urls'), flash=flash, datastore=datastore, processor=request.values.get('processor', processors.get_default_processor()))
|
||||
logger.debug(f"Imported {len(importer_handler.new_uuids)} new UUIDs")
|
||||
# Dont' add to queue because scheduler can see that they haven't been checked and will add them to the queue
|
||||
# for uuid in importer_handler.new_uuids:
|
||||
# worker_pool.queue_item_async_safe(update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid}))
|
||||
importer_handler.run(data=request.values.get('urls'), flash=flash, datastore=datastore, processor=request.values.get('processor', 'text_json_diff'))
|
||||
for uuid in importer_handler.new_uuids:
|
||||
worker_handler.queue_item_async_safe(update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid}))
|
||||
|
||||
if len(importer_handler.remaining_data) == 0:
|
||||
return redirect(url_for('watchlist.index'))
|
||||
@@ -44,10 +37,8 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q, queuedWatchMe
|
||||
# Import and push into the queue for immediate update check
|
||||
d_importer = import_distill_io_json()
|
||||
d_importer.run(data=request.values.get('distill-io'), flash=flash, datastore=datastore)
|
||||
# Dont' add to queue because scheduler can see that they haven't been checked and will add them to the queue
|
||||
# for uuid in importer_handler.new_uuids:
|
||||
# worker_pool.queue_item_async_safe(update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid}))
|
||||
|
||||
for uuid in d_importer.new_uuids:
|
||||
worker_handler.queue_item_async_safe(update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid}))
|
||||
|
||||
# XLSX importer
|
||||
if request.files and request.files.get('xlsx_file'):
|
||||
@@ -69,10 +60,8 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q, queuedWatchMe
|
||||
w_importer.import_profile = map
|
||||
w_importer.run(data=file, flash=flash, datastore=datastore)
|
||||
|
||||
# Dont' add to queue because scheduler can see that they haven't been checked and will add them to the queue
|
||||
# for uuid in importer_handler.new_uuids:
|
||||
# worker_pool.queue_item_async_safe(update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid}))
|
||||
|
||||
for uuid in w_importer.new_uuids:
|
||||
worker_handler.queue_item_async_safe(update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid}))
|
||||
|
||||
# Could be some remaining, or we could be on GET
|
||||
form = forms.importForm(formdata=request.form if request.method == 'POST' else None)
|
||||
|
||||
@@ -2,7 +2,6 @@ from abc import abstractmethod
|
||||
import time
|
||||
from wtforms import ValidationError
|
||||
from loguru import logger
|
||||
from flask_babel import gettext
|
||||
|
||||
from changedetectionio.forms import validate_url
|
||||
|
||||
@@ -42,7 +41,7 @@ class import_url_list(Importer):
|
||||
now = time.time()
|
||||
|
||||
if (len(urls) > 5000):
|
||||
flash(gettext("Importing 5,000 of the first URLs from your list, the rest can be imported again."))
|
||||
flash("Importing 5,000 of the first URLs from your list, the rest can be imported again.")
|
||||
|
||||
for url in urls:
|
||||
url = url.strip()
|
||||
@@ -62,7 +61,7 @@ class import_url_list(Importer):
|
||||
extras = None
|
||||
if processor:
|
||||
extras = {'processor': processor}
|
||||
new_uuid = datastore.add_watch(url=url.strip(), tag=tags, save_immediately=False, extras=extras)
|
||||
new_uuid = datastore.add_watch(url=url.strip(), tag=tags, write_to_disk_now=False, extras=extras)
|
||||
|
||||
if new_uuid:
|
||||
# Straight into the queue.
|
||||
@@ -75,7 +74,7 @@ class import_url_list(Importer):
|
||||
self.remaining_data = []
|
||||
self.remaining_data.append(url)
|
||||
|
||||
flash(gettext("{} Imported from list in {:.2f}s, {} Skipped.").format(good, time.time() - now, len(self.remaining_data)))
|
||||
flash("{} Imported from list in {:.2f}s, {} Skipped.".format(good, time.time() - now, len(self.remaining_data)))
|
||||
|
||||
|
||||
class import_distill_io_json(Importer):
|
||||
@@ -95,11 +94,11 @@ class import_distill_io_json(Importer):
|
||||
try:
|
||||
data = json.loads(data.strip())
|
||||
except json.decoder.JSONDecodeError:
|
||||
flash(gettext("Unable to read JSON file, was it broken?"), 'error')
|
||||
flash("Unable to read JSON file, was it broken?", 'error')
|
||||
return
|
||||
|
||||
if not data.get('data'):
|
||||
flash(gettext("JSON structure looks invalid, was it broken?"), 'error')
|
||||
flash("JSON structure looks invalid, was it broken?", 'error')
|
||||
return
|
||||
|
||||
for d in data.get('data'):
|
||||
@@ -129,14 +128,14 @@ class import_distill_io_json(Importer):
|
||||
new_uuid = datastore.add_watch(url=d['uri'].strip(),
|
||||
tag=",".join(d.get('tags', [])),
|
||||
extras=extras,
|
||||
save_immediately=False)
|
||||
write_to_disk_now=False)
|
||||
|
||||
if new_uuid:
|
||||
# Straight into the queue.
|
||||
self.new_uuids.append(new_uuid)
|
||||
good += 1
|
||||
|
||||
flash(gettext("{} Imported from Distill.io in {:.2f}s, {} Skipped.").format(len(self.new_uuids), time.time() - now, len(self.remaining_data)))
|
||||
flash("{} Imported from Distill.io in {:.2f}s, {} Skipped.".format(len(self.new_uuids), time.time() - now, len(self.remaining_data)))
|
||||
|
||||
|
||||
class import_xlsx_wachete(Importer):
|
||||
@@ -157,7 +156,7 @@ class import_xlsx_wachete(Importer):
|
||||
wb = load_workbook(data)
|
||||
except Exception as e:
|
||||
# @todo correct except
|
||||
flash(gettext("Unable to read export XLSX file, something wrong with the file?"), 'error')
|
||||
flash("Unable to read export XLSX file, something wrong with the file?", 'error')
|
||||
return
|
||||
|
||||
row_id = 2
|
||||
@@ -197,25 +196,26 @@ class import_xlsx_wachete(Importer):
|
||||
validate_url(data.get('url'))
|
||||
except ValidationError as e:
|
||||
logger.error(f">> Import URL error {data.get('url')} {str(e)}")
|
||||
flash(gettext("Error processing row number {}, URL value was incorrect, row was skipped.").format(row_id), 'error')
|
||||
flash(f"Error processing row number {row_id}, URL value was incorrect, row was skipped.", 'error')
|
||||
# Don't bother processing anything else on this row
|
||||
continue
|
||||
|
||||
new_uuid = datastore.add_watch(url=data['url'].strip(),
|
||||
extras=extras,
|
||||
tag=data.get('folder'),
|
||||
save_immediately=False)
|
||||
write_to_disk_now=False)
|
||||
if new_uuid:
|
||||
# Straight into the queue.
|
||||
self.new_uuids.append(new_uuid)
|
||||
good += 1
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
flash(gettext("Error processing row number {}, check all cell data types are correct, row was skipped.").format(row_id), 'error')
|
||||
flash(f"Error processing row number {row_id}, check all cell data types are correct, row was skipped.", 'error')
|
||||
else:
|
||||
row_id += 1
|
||||
|
||||
flash(gettext("{} imported from Wachete .xlsx in {:.2f}s").format(len(self.new_uuids), time.time() - now))
|
||||
flash(
|
||||
"{} imported from Wachete .xlsx in {:.2f}s".format(len(self.new_uuids), time.time() - now))
|
||||
|
||||
|
||||
class import_xlsx_custom(Importer):
|
||||
@@ -236,7 +236,7 @@ class import_xlsx_custom(Importer):
|
||||
wb = load_workbook(data)
|
||||
except Exception as e:
|
||||
# @todo correct except
|
||||
flash(gettext("Unable to read export XLSX file, something wrong with the file?"), 'error')
|
||||
flash("Unable to read export XLSX file, something wrong with the file?", 'error')
|
||||
return
|
||||
|
||||
# @todo cehck atleast 2 rows, same in other method
|
||||
@@ -265,7 +265,7 @@ class import_xlsx_custom(Importer):
|
||||
validate_url(url)
|
||||
except ValidationError as e:
|
||||
logger.error(f">> Import URL error {url} {str(e)}")
|
||||
flash(gettext("Error processing row number {}, URL value was incorrect, row was skipped.").format(row_i), 'error')
|
||||
flash(f"Error processing row number {row_i}, URL value was incorrect, row was skipped.", 'error')
|
||||
# Don't bother processing anything else on this row
|
||||
url = None
|
||||
break
|
||||
@@ -287,15 +287,16 @@ class import_xlsx_custom(Importer):
|
||||
new_uuid = datastore.add_watch(url=url,
|
||||
extras=extras,
|
||||
tag=tags,
|
||||
save_immediately=False)
|
||||
write_to_disk_now=False)
|
||||
if new_uuid:
|
||||
# Straight into the queue.
|
||||
self.new_uuids.append(new_uuid)
|
||||
good += 1
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
flash(gettext("Error processing row number {}, check all cell data types are correct, row was skipped.").format(row_i), 'error')
|
||||
flash(f"Error processing row number {row_i}, check all cell data types are correct, row was skipped.", 'error')
|
||||
else:
|
||||
row_i += 1
|
||||
|
||||
flash(gettext("{} imported from custom .xlsx in {:.2f}s").format(len(self.new_uuids), time.time() - now))
|
||||
flash(
|
||||
"{} imported from custom .xlsx in {:.2f}s".format(len(self.new_uuids), time.time() - now))
|
||||
@@ -6,9 +6,9 @@
|
||||
|
||||
<div class="tabs collapsable">
|
||||
<ul>
|
||||
<li class="tab" id=""><a href="#url-list">{{ _('URL List') }}</a></li>
|
||||
<li class="tab"><a href="#distill-io">{{ _('Distill.io') }}</a></li>
|
||||
<li class="tab"><a href="#xlsx">{{ _('.XLSX & Wachete') }}</a></li>
|
||||
<li class="tab" id=""><a href="#url-list">URL List</a></li>
|
||||
<li class="tab"><a href="#distill-io">Distill.io</a></li>
|
||||
<li class="tab"><a href="#xlsx">.XLSX & Wachete</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
@@ -16,16 +16,12 @@
|
||||
<form class="pure-form" action="{{url_for('imports.import_page')}}" method="POST" enctype="multipart/form-data">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}">
|
||||
<div class="tab-pane-inner" id="url-list">
|
||||
|
||||
<p>
|
||||
{{ _('Restoring changedetection.io backups is in the') }}<a href="{{ url_for('backups.restore.restore') }}"> {{ _('backups section') }}</a>.
|
||||
<br>
|
||||
</p>
|
||||
<div class="pure-control-group">
|
||||
{{ _('Enter one URL per line, and optionally add tags for each URL after a space, delineated by comma (,):') }}
|
||||
Enter one URL per line, and optionally add tags for each URL after a space, delineated by comma
|
||||
(,):
|
||||
<br>
|
||||
<p><strong>{{ _('Example:') }} </strong><code>https://example.com tag1, tag2, last tag</code></p>
|
||||
{{ _('URLs which do not pass validation will stay in the textarea.') }}
|
||||
<p><strong>Example: </strong><code>https://example.com tag1, tag2, last tag</code></p>
|
||||
URLs which do not pass validation will stay in the textarea.
|
||||
</div>
|
||||
{{ render_field(form.processor, class="processor") }}
|
||||
|
||||
@@ -42,15 +38,20 @@
|
||||
</div>
|
||||
|
||||
<div class="tab-pane-inner" id="distill-io">
|
||||
|
||||
|
||||
|
||||
<div class="pure-control-group">
|
||||
{{ _('Copy and Paste your Distill.io watch \'export\' file, this should be a JSON file.') }}<br>
|
||||
{{ _('This is') }} <i>{{ _('experimental') }}</i>, {{ _('supported fields are') }} <code>name</code>, <code>uri</code>, <code>tags</code>, <code>config:selections</code>, {{ _('the rest (including') }} <code>schedule</code>) {{ _('are ignored.') }}
|
||||
Copy and Paste your Distill.io watch 'export' file, this should be a JSON file.<br>
|
||||
This is <i>experimental</i>, supported fields are <code>name</code>, <code>uri</code>, <code>tags</code>, <code>config:selections</code>, the rest (including <code>schedule</code>) are ignored.
|
||||
<br>
|
||||
<p>
|
||||
{{ _('How to export?') }} <a href="https://distill.io/docs/web-monitor/how-export-and-import-monitors/">https://distill.io/docs/web-monitor/how-export-and-import-monitors/</a><br>
|
||||
{{ _('Be sure to set your default fetcher to Chrome if required.') }}<br>
|
||||
How to export? <a href="https://distill.io/docs/web-monitor/how-export-and-import-monitors/">https://distill.io/docs/web-monitor/how-export-and-import-monitors/</a><br>
|
||||
Be sure to set your default fetcher to Chrome if required.<br>
|
||||
</p>
|
||||
</div>
|
||||
|
||||
|
||||
<textarea name="distill-io" class="pure-input-1-2" style="width: 100%;
|
||||
font-family:monospace;
|
||||
white-space: pre;
|
||||
@@ -88,33 +89,32 @@
|
||||
</fieldset>
|
||||
<div class="pure-control-group">
|
||||
<span class="pure-form-message-inline">
|
||||
{{ _('Table of custom column and data types mapping for the') }} <strong>{{ _('Custom mapping') }}</strong> {{ _('File mapping type.') }}
|
||||
Table of custom column and data types mapping for the <strong>Custom mapping</strong> File mapping type.
|
||||
</span>
|
||||
<table style="border: 1px solid #aaa; padding: 0.5rem; border-radius: 4px;">
|
||||
<tr>
|
||||
<td><strong>{{ _('Column #') }}</strong></td>
|
||||
<td><strong>Column #</strong></td>
|
||||
{% for n in range(4) %}
|
||||
<td><input type="number" name="custom_xlsx[col_{{n}}]" style="width: 4rem;" min="1"></td>
|
||||
{% endfor %}
|
||||
</tr>
|
||||
<tr>
|
||||
<td><strong>{{ _('Type') }}</strong></td>
|
||||
<td><strong>Type</strong></td>
|
||||
{% for n in range(4) %}
|
||||
<td><select name="custom_xlsx[col_type_{{n}}]">
|
||||
<option value="" style="color: #aaa"> -- {{ _('none') }} --</option>
|
||||
<option value="url">{{ _('URL') }}</option>
|
||||
<option value="title">{{ _('Title') }}</option>
|
||||
<option value="include_filters">{{ _('CSS/xPath filter') }}</option>
|
||||
<option value="tag">{{ _('Group / Tag name(s)') }}</option>
|
||||
<option value="interval_minutes">{{ _('Recheck time (minutes)') }}</option>
|
||||
<option value="" style="color: #aaa"> -- none --</option>
|
||||
<option value="url">URL</option>
|
||||
<option value="title">Title</option>
|
||||
<option value="include_filters">CSS/xPath filter</option>
|
||||
<option value="tag">Group / Tag name(s)</option>
|
||||
<option value="interval_minutes">Recheck time (minutes)</option>
|
||||
</select></td>
|
||||
{% endfor %}
|
||||
</tr>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
<button type="submit" class="pure-button pure-input-1-2 pure-button-primary">{{ _('Import') }}</button>
|
||||
|
||||
<button type="submit" class="pure-button pure-input-1-2 pure-button-primary">Import</button>
|
||||
</form>
|
||||
|
||||
</div>
|
||||
|
||||
@@ -4,7 +4,7 @@ from flask import Blueprint, flash, redirect, url_for
|
||||
from flask_login import login_required
|
||||
from changedetectionio.store import ChangeDetectionStore
|
||||
from changedetectionio import queuedWatchMetaData
|
||||
from changedetectionio import worker_pool
|
||||
from changedetectionio import worker_handler
|
||||
from queue import PriorityQueue
|
||||
|
||||
PRICE_DATA_TRACK_ACCEPT = 'accepted'
|
||||
@@ -15,20 +15,18 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q: PriorityQueue
|
||||
price_data_follower_blueprint = Blueprint('price_data_follower', __name__)
|
||||
|
||||
@login_required
|
||||
@price_data_follower_blueprint.route("/<uuid_str:uuid>/accept", methods=['GET'])
|
||||
@price_data_follower_blueprint.route("/<string:uuid>/accept", methods=['GET'])
|
||||
def accept(uuid):
|
||||
datastore.data['watching'][uuid]['track_ldjson_price_data'] = PRICE_DATA_TRACK_ACCEPT
|
||||
datastore.data['watching'][uuid]['processor'] = 'restock_diff'
|
||||
datastore.data['watching'][uuid].clear_watch()
|
||||
datastore.data['watching'][uuid].commit()
|
||||
worker_pool.queue_item_async_safe(update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid}))
|
||||
worker_handler.queue_item_async_safe(update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid}))
|
||||
return redirect(url_for("watchlist.index"))
|
||||
|
||||
@login_required
|
||||
@price_data_follower_blueprint.route("/<uuid_str:uuid>/reject", methods=['GET'])
|
||||
@price_data_follower_blueprint.route("/<string:uuid>/reject", methods=['GET'])
|
||||
def reject(uuid):
|
||||
datastore.data['watching'][uuid]['track_ldjson_price_data'] = PRICE_DATA_TRACK_REJECT
|
||||
datastore.data['watching'][uuid].commit()
|
||||
return redirect(url_for("watchlist.index"))
|
||||
|
||||
|
||||
|
||||
@@ -1,27 +1 @@
|
||||
from copy import deepcopy
|
||||
from loguru import logger
|
||||
|
||||
from changedetectionio.model import USE_SYSTEM_DEFAULT_NOTIFICATION_FORMAT_FOR_WATCH
|
||||
from changedetectionio.notification import valid_notification_formats
|
||||
|
||||
RSS_CONTENT_FORMAT_DEFAULT = 'text'
|
||||
|
||||
# Some stuff not related
|
||||
RSS_FORMAT_TYPES = deepcopy(valid_notification_formats)
|
||||
if RSS_FORMAT_TYPES.get('markdown'):
|
||||
del RSS_FORMAT_TYPES['markdown']
|
||||
|
||||
if RSS_FORMAT_TYPES.get(USE_SYSTEM_DEFAULT_NOTIFICATION_FORMAT_FOR_WATCH):
|
||||
del RSS_FORMAT_TYPES[USE_SYSTEM_DEFAULT_NOTIFICATION_FORMAT_FOR_WATCH]
|
||||
|
||||
if not RSS_FORMAT_TYPES.get(RSS_CONTENT_FORMAT_DEFAULT):
|
||||
logger.critical(f"RSS_CONTENT_FORMAT_DEFAULT not in the acceptable list {RSS_CONTENT_FORMAT_DEFAULT}")
|
||||
|
||||
RSS_TEMPLATE_TYPE_OPTIONS = {'system_default': 'System default', 'notification_body': 'Notification body'}
|
||||
|
||||
# @note: We use <pre> because nearly all RSS readers render only HTML (Thunderbird for example cant do just plaintext)
|
||||
RSS_TEMPLATE_PLAINTEXT_DEFAULT = "<pre>{{watch_label}} had a change.\n\n{{diff}}\n</pre>"
|
||||
|
||||
# @todo add some [edit]/[history]/[goto] etc links
|
||||
# @todo need {{watch_edit_link}} + delete + history link token
|
||||
RSS_TEMPLATE_HTML_DEFAULT = "<html><body>\n<h4><a href=\"{{watch_url}}\">{{watch_label}}</a></h4>\n<p>{{diff}}</p>\n</body></html>\n"
|
||||
RSS_FORMAT_TYPES = [('plaintext', 'Plain text'), ('html', 'HTML Color')]
|
||||
|
||||
@@ -1,156 +0,0 @@
|
||||
"""
|
||||
Utility functions for RSS feed generation.
|
||||
"""
|
||||
|
||||
from changedetectionio.notification.handler import process_notification
|
||||
from changedetectionio.notification_service import NotificationContextData, _check_cascading_vars
|
||||
from loguru import logger
|
||||
import datetime
|
||||
import pytz
|
||||
import re
|
||||
|
||||
|
||||
BAD_CHARS_REGEX = r'[\x00-\x08\x0B\x0C\x0E-\x1F]'
|
||||
|
||||
|
||||
def scan_invalid_chars_in_rss(content):
|
||||
"""
|
||||
Scan for invalid characters in RSS content.
|
||||
Returns True if invalid characters are found.
|
||||
"""
|
||||
for match in re.finditer(BAD_CHARS_REGEX, content):
|
||||
i = match.start()
|
||||
bad_char = content[i]
|
||||
hex_value = f"0x{ord(bad_char):02x}"
|
||||
# Grab context
|
||||
start = max(0, i - 20)
|
||||
end = min(len(content), i + 21)
|
||||
context = content[start:end].replace('\n', '\\n').replace('\r', '\\r')
|
||||
logger.warning(f"Invalid char {hex_value} at pos {i}: ...{context}...")
|
||||
# First match is enough
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def clean_entry_content(content):
|
||||
"""
|
||||
Remove invalid characters from RSS content.
|
||||
"""
|
||||
cleaned = re.sub(BAD_CHARS_REGEX, '', content)
|
||||
return cleaned
|
||||
|
||||
|
||||
def generate_watch_guid(watch, timestamp):
|
||||
"""
|
||||
Generate a unique GUID for a watch RSS entry.
|
||||
|
||||
Args:
|
||||
watch: The watch object
|
||||
timestamp: The timestamp of the specific change this entry represents
|
||||
"""
|
||||
return f"{watch['uuid']}/{timestamp}"
|
||||
|
||||
|
||||
def validate_rss_token(datastore, request):
|
||||
"""
|
||||
Validate the RSS access token from the request.
|
||||
|
||||
Returns:
|
||||
tuple: (is_valid, error_response) where error_response is None if valid
|
||||
"""
|
||||
app_rss_token = datastore.data['settings']['application'].get('rss_access_token')
|
||||
rss_url_token = request.args.get('token')
|
||||
|
||||
if rss_url_token != app_rss_token:
|
||||
return False, ("Access denied, bad token", 403)
|
||||
|
||||
return True, None
|
||||
|
||||
|
||||
def get_rss_template(datastore, watch, rss_content_format, default_html, default_plaintext):
|
||||
"""Get the appropriate template for RSS content."""
|
||||
if datastore.data['settings']['application'].get('rss_template_type') == 'notification_body':
|
||||
return _check_cascading_vars(datastore=datastore, var_name='notification_body', watch=watch)
|
||||
|
||||
override = datastore.data['settings']['application'].get('rss_template_override')
|
||||
if override and override.strip():
|
||||
return override
|
||||
elif 'text' in rss_content_format:
|
||||
return default_plaintext
|
||||
else:
|
||||
return default_html
|
||||
|
||||
|
||||
def get_watch_label(datastore, watch):
|
||||
"""Get the label for a watch based on settings."""
|
||||
if datastore.data['settings']['application']['ui'].get('use_page_title_in_list') or watch.get('use_page_title_in_list'):
|
||||
return watch.label
|
||||
else:
|
||||
return watch.get('url')
|
||||
|
||||
|
||||
def add_watch_categories(fe, watch, datastore):
|
||||
"""Add category tags to a feed entry based on watch tags."""
|
||||
for tag_uuid in watch.get('tags', []):
|
||||
tag = datastore.data['settings']['application'].get('tags', {}).get(tag_uuid)
|
||||
if tag and tag.get('title'):
|
||||
fe.category(term=tag.get('title'))
|
||||
|
||||
|
||||
def build_notification_context(watch, timestamp_from, timestamp_to, watch_label,
|
||||
n_body_template, rss_content_format):
|
||||
"""Build the notification context object."""
|
||||
return NotificationContextData(initial_data={
|
||||
'notification_urls': ['null://just-sending-a-null-test-for-the-render-in-RSS'],
|
||||
'notification_body': n_body_template,
|
||||
'timestamp_to': timestamp_to,
|
||||
'timestamp_from': timestamp_from,
|
||||
'watch_label': watch_label,
|
||||
'notification_format': rss_content_format
|
||||
})
|
||||
|
||||
|
||||
def render_notification(n_object, notification_service, watch, datastore,
|
||||
date_index_from=None, date_index_to=None):
|
||||
"""Process and render the notification content."""
|
||||
kwargs = {'n_object': n_object, 'watch': watch}
|
||||
|
||||
if date_index_from is not None and date_index_to is not None:
|
||||
kwargs['date_index_from'] = date_index_from
|
||||
kwargs['date_index_to'] = date_index_to
|
||||
|
||||
n_object = notification_service.queue_notification_for_watch(**kwargs)
|
||||
n_object['watch_mime_type'] = None
|
||||
|
||||
res = process_notification(n_object=n_object, datastore=datastore)
|
||||
return res[0]
|
||||
|
||||
|
||||
def populate_feed_entry(fe, watch, content, guid, timestamp, link=None, title_suffix=None):
|
||||
"""Populate a feed entry with content and metadata."""
|
||||
watch_label = watch.get('url') # Already determined by caller
|
||||
|
||||
# Set link
|
||||
if link:
|
||||
fe.link(link=link)
|
||||
|
||||
# Set title
|
||||
if title_suffix:
|
||||
fe.title(title=f"{watch_label} - {title_suffix}")
|
||||
else:
|
||||
fe.title(title=watch_label)
|
||||
|
||||
# Clean and set content
|
||||
if scan_invalid_chars_in_rss(content):
|
||||
content = clean_entry_content(content)
|
||||
fe.content(content=content, type='CDATA')
|
||||
|
||||
# Set GUID
|
||||
fe.guid(guid, permalink=False)
|
||||
|
||||
# Set pubDate using the timestamp of this specific change
|
||||
dt = datetime.datetime.fromtimestamp(int(timestamp))
|
||||
dt = dt.replace(tzinfo=pytz.UTC)
|
||||
fe.pubDate(dt)
|
||||
|
||||
@@ -1,26 +1,147 @@
|
||||
|
||||
from changedetectionio.safe_jinja import render as jinja_render
|
||||
from changedetectionio.store import ChangeDetectionStore
|
||||
from flask import Blueprint
|
||||
from feedgen.feed import FeedGenerator
|
||||
from flask import Blueprint, make_response, request, url_for, redirect
|
||||
from loguru import logger
|
||||
import datetime
|
||||
import pytz
|
||||
import re
|
||||
import time
|
||||
|
||||
from . import tag as tag_routes
|
||||
from . import main_feed
|
||||
from . import single_watch
|
||||
|
||||
BAD_CHARS_REGEX=r'[\x00-\x08\x0B\x0C\x0E-\x1F]'
|
||||
|
||||
# Anything that is not text/UTF-8 should be stripped before it breaks feedgen (such as binary data etc)
|
||||
def scan_invalid_chars_in_rss(content):
|
||||
for match in re.finditer(BAD_CHARS_REGEX, content):
|
||||
i = match.start()
|
||||
bad_char = content[i]
|
||||
hex_value = f"0x{ord(bad_char):02x}"
|
||||
# Grab context
|
||||
start = max(0, i - 20)
|
||||
end = min(len(content), i + 21)
|
||||
context = content[start:end].replace('\n', '\\n').replace('\r', '\\r')
|
||||
logger.warning(f"Invalid char {hex_value} at pos {i}: ...{context}...")
|
||||
# First match is enough
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def clean_entry_content(content):
|
||||
cleaned = re.sub(BAD_CHARS_REGEX, '', content)
|
||||
return cleaned
|
||||
|
||||
def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
"""
|
||||
Construct and configure the RSS blueprint with all routes.
|
||||
|
||||
Args:
|
||||
datastore: The ChangeDetectionStore instance
|
||||
|
||||
Returns:
|
||||
The configured Flask blueprint
|
||||
"""
|
||||
rss_blueprint = Blueprint('rss', __name__)
|
||||
|
||||
# Register all route modules
|
||||
main_feed.construct_main_feed_routes(rss_blueprint, datastore)
|
||||
single_watch.construct_single_watch_routes(rss_blueprint, datastore)
|
||||
tag_routes.construct_tag_routes(rss_blueprint, datastore)
|
||||
# Some RSS reader situations ended up with rss/ (forward slash after RSS) due
|
||||
# to some earlier blueprint rerouting work, it should goto feed.
|
||||
@rss_blueprint.route("/", methods=['GET'])
|
||||
def extraslash():
|
||||
return redirect(url_for('rss.feed'))
|
||||
|
||||
# Import the login decorator if needed
|
||||
# from changedetectionio.auth_decorator import login_optionally_required
|
||||
@rss_blueprint.route("", methods=['GET'])
|
||||
def feed():
|
||||
now = time.time()
|
||||
# Always requires token set
|
||||
app_rss_token = datastore.data['settings']['application'].get('rss_access_token')
|
||||
rss_url_token = request.args.get('token')
|
||||
if rss_url_token != app_rss_token:
|
||||
return "Access denied, bad token", 403
|
||||
|
||||
from changedetectionio import diff
|
||||
limit_tag = request.args.get('tag', '').lower().strip()
|
||||
# Be sure limit_tag is a uuid
|
||||
for uuid, tag in datastore.data['settings']['application'].get('tags', {}).items():
|
||||
if limit_tag == tag.get('title', '').lower().strip():
|
||||
limit_tag = uuid
|
||||
|
||||
# Sort by last_changed and add the uuid which is usually the key..
|
||||
sorted_watches = []
|
||||
|
||||
# @todo needs a .itemsWithTag() or something - then we can use that in Jinaj2 and throw this away
|
||||
for uuid, watch in datastore.data['watching'].items():
|
||||
# @todo tag notification_muted skip also (improve Watch model)
|
||||
if datastore.data['settings']['application'].get('rss_hide_muted_watches') and watch.get('notification_muted'):
|
||||
continue
|
||||
if limit_tag and not limit_tag in watch['tags']:
|
||||
continue
|
||||
watch['uuid'] = uuid
|
||||
sorted_watches.append(watch)
|
||||
|
||||
sorted_watches.sort(key=lambda x: x.last_changed, reverse=False)
|
||||
|
||||
fg = FeedGenerator()
|
||||
fg.title('changedetection.io')
|
||||
fg.description('Feed description')
|
||||
fg.link(href='https://changedetection.io')
|
||||
|
||||
html_colour_enable = False
|
||||
if datastore.data['settings']['application'].get('rss_content_format') == 'html':
|
||||
html_colour_enable = True
|
||||
|
||||
for watch in sorted_watches:
|
||||
|
||||
dates = list(watch.history.keys())
|
||||
# Re #521 - Don't bother processing this one if theres less than 2 snapshots, means we never had a change detected.
|
||||
if len(dates) < 2:
|
||||
continue
|
||||
|
||||
if not watch.viewed:
|
||||
# Re #239 - GUID needs to be individual for each event
|
||||
# @todo In the future make this a configurable link back (see work on BASE_URL https://github.com/dgtlmoon/changedetection.io/pull/228)
|
||||
guid = "{}/{}".format(watch['uuid'], watch.last_changed)
|
||||
fe = fg.add_entry()
|
||||
|
||||
# Include a link to the diff page, they will have to login here to see if password protection is enabled.
|
||||
# Description is the page you watch, link takes you to the diff JS UI page
|
||||
# Dict val base_url will get overriden with the env var if it is set.
|
||||
ext_base_url = datastore.data['settings']['application'].get('active_base_url')
|
||||
# @todo fix
|
||||
|
||||
# Because we are called via whatever web server, flask should figure out the right path (
|
||||
diff_link = {'href': url_for('ui.ui_views.diff_history_page', uuid=watch['uuid'], _external=True)}
|
||||
|
||||
fe.link(link=diff_link)
|
||||
|
||||
# @todo watch should be a getter - watch.get('title') (internally if URL else..)
|
||||
|
||||
watch_title = watch.get('title') if watch.get('title') else watch.get('url')
|
||||
fe.title(title=watch_title)
|
||||
try:
|
||||
|
||||
html_diff = diff.render_diff(previous_version_file_contents=watch.get_history_snapshot(dates[-2]),
|
||||
newest_version_file_contents=watch.get_history_snapshot(dates[-1]),
|
||||
include_equal=False,
|
||||
line_feed_sep="<br>",
|
||||
html_colour=html_colour_enable
|
||||
)
|
||||
except FileNotFoundError as e:
|
||||
html_diff = f"History snapshot file for watch {watch.get('uuid')}@{watch.last_changed} - '{watch.get('title')} not found."
|
||||
|
||||
# @todo Make this configurable and also consider html-colored markup
|
||||
# @todo User could decide if <link> goes to the diff page, or to the watch link
|
||||
rss_template = "<html><body>\n<h4><a href=\"{{watch_url}}\">{{watch_title}}</a></h4>\n<p>{{html_diff}}</p>\n</body></html>\n"
|
||||
|
||||
content = jinja_render(template_str=rss_template, watch_title=watch_title, html_diff=html_diff, watch_url=watch.link)
|
||||
|
||||
# Out of range chars could also break feedgen
|
||||
if scan_invalid_chars_in_rss(content):
|
||||
content = clean_entry_content(content)
|
||||
|
||||
fe.content(content=content, type='CDATA')
|
||||
fe.guid(guid, permalink=False)
|
||||
dt = datetime.datetime.fromtimestamp(int(watch.newest_history_key))
|
||||
dt = dt.replace(tzinfo=pytz.UTC)
|
||||
fe.pubDate(dt)
|
||||
|
||||
response = make_response(fg.rss_str())
|
||||
response.headers.set('Content-Type', 'application/rss+xml;charset=utf-8')
|
||||
logger.trace(f"RSS generated in {time.time() - now:.3f}s")
|
||||
return response
|
||||
|
||||
return rss_blueprint
|
||||
@@ -1,105 +0,0 @@
|
||||
from flask import make_response, request, url_for, redirect
|
||||
|
||||
|
||||
|
||||
def construct_main_feed_routes(rss_blueprint, datastore):
|
||||
"""
|
||||
Construct the main RSS feed routes.
|
||||
|
||||
Args:
|
||||
rss_blueprint: The Flask blueprint to add routes to
|
||||
datastore: The ChangeDetectionStore instance
|
||||
"""
|
||||
|
||||
# Some RSS reader situations ended up with rss/ (forward slash after RSS) due
|
||||
# to some earlier blueprint rerouting work, it should goto feed.
|
||||
@rss_blueprint.route("/", methods=['GET'])
|
||||
def extraslash():
|
||||
return redirect(url_for('rss.feed'))
|
||||
|
||||
# Import the login decorator if needed
|
||||
# from changedetectionio.auth_decorator import login_optionally_required
|
||||
@rss_blueprint.route("", methods=['GET'])
|
||||
def feed():
|
||||
from feedgen.feed import FeedGenerator
|
||||
from loguru import logger
|
||||
import time
|
||||
|
||||
from . import RSS_TEMPLATE_HTML_DEFAULT, RSS_TEMPLATE_PLAINTEXT_DEFAULT
|
||||
from ._util import (validate_rss_token, generate_watch_guid, get_rss_template,
|
||||
get_watch_label, build_notification_context, render_notification,
|
||||
populate_feed_entry, add_watch_categories)
|
||||
from ...notification_service import NotificationService
|
||||
|
||||
now = time.time()
|
||||
|
||||
# Validate token
|
||||
is_valid, error = validate_rss_token(datastore, request)
|
||||
if not is_valid:
|
||||
return error
|
||||
|
||||
rss_content_format = datastore.data['settings']['application'].get('rss_content_format')
|
||||
|
||||
limit_tag = request.args.get('tag', '').lower().strip()
|
||||
# Be sure limit_tag is a uuid
|
||||
for uuid, tag in datastore.data['settings']['application'].get('tags', {}).items():
|
||||
if limit_tag == tag.get('title', '').lower().strip():
|
||||
limit_tag = uuid
|
||||
|
||||
# Sort by last_changed and add the uuid which is usually the key..
|
||||
sorted_watches = []
|
||||
|
||||
# @todo needs a .itemsWithTag() or something - then we can use that in Jinaj2 and throw this away
|
||||
for uuid, watch in datastore.data['watching'].items():
|
||||
# @todo tag notification_muted skip also (improve Watch model)
|
||||
if datastore.data['settings']['application'].get('rss_hide_muted_watches') and watch.get('notification_muted'):
|
||||
continue
|
||||
if limit_tag and not limit_tag in watch['tags']:
|
||||
continue
|
||||
sorted_watches.append(watch)
|
||||
|
||||
sorted_watches.sort(key=lambda x: x.last_changed, reverse=False)
|
||||
|
||||
fg = FeedGenerator()
|
||||
fg.title('changedetection.io')
|
||||
fg.description('Feed description')
|
||||
fg.link(href='https://changedetection.io')
|
||||
notification_service = NotificationService(datastore=datastore, notification_q=False)
|
||||
|
||||
for watch in sorted_watches:
|
||||
|
||||
dates = list(watch.history.keys())
|
||||
# Re #521 - Don't bother processing this one if theres less than 2 snapshots, means we never had a change detected.
|
||||
if len(dates) < 2:
|
||||
continue
|
||||
|
||||
if not watch.viewed:
|
||||
# Re #239 - GUID needs to be individual for each event
|
||||
# @todo In the future make this a configurable link back (see work on BASE_URL https://github.com/dgtlmoon/changedetection.io/pull/228)
|
||||
watch_label = get_watch_label(datastore, watch)
|
||||
timestamp_to = dates[-1]
|
||||
timestamp_from = dates[-2]
|
||||
guid = generate_watch_guid(watch, timestamp_to)
|
||||
# Because we are called via whatever web server, flask should figure out the right path
|
||||
diff_link = {'href': url_for('ui.ui_diff.diff_history_page', uuid=watch['uuid'], _external=True)}
|
||||
|
||||
# Get template and build notification context
|
||||
n_body_template = get_rss_template(datastore, watch, rss_content_format,
|
||||
RSS_TEMPLATE_HTML_DEFAULT, RSS_TEMPLATE_PLAINTEXT_DEFAULT)
|
||||
|
||||
n_object = build_notification_context(watch, timestamp_from, timestamp_to,
|
||||
watch_label, n_body_template, rss_content_format)
|
||||
|
||||
# Render notification
|
||||
res = render_notification(n_object, notification_service, watch, datastore)
|
||||
|
||||
# Create and populate feed entry
|
||||
fe = fg.add_entry()
|
||||
populate_feed_entry(fe, watch, res['body'], guid, timestamp_to, link=diff_link)
|
||||
fe.title(title=watch_label) # Override title to not include suffix
|
||||
add_watch_categories(fe, watch, datastore)
|
||||
|
||||
response = make_response(fg.rss_str())
|
||||
response.headers.set('Content-Type', 'application/rss+xml;charset=utf-8')
|
||||
logger.trace(f"RSS generated in {time.time() - now:.3f}s")
|
||||
return response
|
||||
@@ -1,115 +0,0 @@
|
||||
|
||||
|
||||
def construct_single_watch_routes(rss_blueprint, datastore):
|
||||
"""
|
||||
Construct RSS feed routes for single watches.
|
||||
|
||||
Args:
|
||||
rss_blueprint: The Flask blueprint to add routes to
|
||||
datastore: The ChangeDetectionStore instance
|
||||
"""
|
||||
|
||||
@rss_blueprint.route("/watch/<uuid_str:uuid>", methods=['GET'])
|
||||
def rss_single_watch(uuid):
|
||||
import time
|
||||
|
||||
from flask import make_response, request, Response
|
||||
from flask_babel import lazy_gettext as _l
|
||||
from feedgen.feed import FeedGenerator
|
||||
from loguru import logger
|
||||
|
||||
from . import RSS_TEMPLATE_HTML_DEFAULT, RSS_TEMPLATE_PLAINTEXT_DEFAULT
|
||||
from ._util import (validate_rss_token, get_rss_template, get_watch_label,
|
||||
build_notification_context, render_notification,
|
||||
populate_feed_entry, add_watch_categories)
|
||||
from ...notification_service import NotificationService
|
||||
|
||||
"""
|
||||
Display the most recent changes for a single watch as RSS feed.
|
||||
Returns RSS XML with multiple entries showing diffs between consecutive snapshots.
|
||||
The number of entries is controlled by the rss_diff_length setting.
|
||||
"""
|
||||
now = time.time()
|
||||
|
||||
# Validate token
|
||||
is_valid, error = validate_rss_token(datastore, request)
|
||||
if not is_valid:
|
||||
return error
|
||||
|
||||
rss_content_format = datastore.data['settings']['application'].get('rss_content_format')
|
||||
|
||||
if uuid == 'first':
|
||||
uuid = list(datastore.data['watching'].keys()).pop()
|
||||
# Get the watch by UUID
|
||||
watch = datastore.data['watching'].get(uuid)
|
||||
if not watch:
|
||||
return Response(_l("Watch with UUID %(uuid)s not found", uuid=uuid), status=404, mimetype='text/plain')
|
||||
|
||||
# Check if watch has at least 2 history snapshots
|
||||
dates = list(watch.history.keys())
|
||||
if len(dates) < 2:
|
||||
return Response(_l("Watch %(uuid)s does not have enough history snapshots to show changes (need at least 2)", uuid=uuid), status=400, mimetype='text/plain')
|
||||
|
||||
# Get the number of diffs to include (default: 5)
|
||||
rss_diff_length = datastore.data['settings']['application'].get('rss_diff_length', 5)
|
||||
|
||||
# Calculate how many diffs we can actually show (limited by available history)
|
||||
# We need at least 2 snapshots to create 1 diff
|
||||
max_possible_diffs = len(dates) - 1
|
||||
num_diffs = min(rss_diff_length, max_possible_diffs) if rss_diff_length > 0 else max_possible_diffs
|
||||
|
||||
# Create RSS feed
|
||||
fg = FeedGenerator()
|
||||
|
||||
# Set title: use "label (url)" if label differs from url, otherwise just url
|
||||
watch_url = watch.get('url', '')
|
||||
watch_label = get_watch_label(datastore, watch)
|
||||
|
||||
if watch_label != watch_url:
|
||||
feed_title = f'changedetection.io - {watch_label} ({watch_url})'
|
||||
else:
|
||||
feed_title = f'changedetection.io - {watch_url}'
|
||||
|
||||
fg.title(feed_title)
|
||||
fg.description('Changes')
|
||||
fg.link(href='https://changedetection.io')
|
||||
|
||||
# Loop through history and create RSS entries for each diff
|
||||
# Add entries in reverse order because feedgen reverses them
|
||||
# This way, the newest change appears first in the final RSS
|
||||
|
||||
notification_service = NotificationService(datastore=datastore, notification_q=False)
|
||||
for i in range(num_diffs - 1, -1, -1):
|
||||
# Calculate indices for this diff (working backwards from newest)
|
||||
# i=0: compare dates[-2] to dates[-1] (most recent change)
|
||||
# i=1: compare dates[-3] to dates[-2] (previous change)
|
||||
# etc.
|
||||
date_index_to = -(i + 1)
|
||||
date_index_from = -(i + 2)
|
||||
timestamp_to = dates[date_index_to]
|
||||
timestamp_from = dates[date_index_from]
|
||||
|
||||
# Get template and build notification context
|
||||
n_body_template = get_rss_template(datastore, watch, rss_content_format,
|
||||
RSS_TEMPLATE_HTML_DEFAULT, RSS_TEMPLATE_PLAINTEXT_DEFAULT)
|
||||
|
||||
n_object = build_notification_context(watch, timestamp_from, timestamp_to,
|
||||
watch_label, n_body_template, rss_content_format)
|
||||
|
||||
# Render notification with date indices
|
||||
res = render_notification(n_object, notification_service, watch, datastore,
|
||||
date_index_from, date_index_to)
|
||||
|
||||
# Create and populate feed entry
|
||||
guid = f"{uuid}/{timestamp_to}"
|
||||
fe = fg.add_entry()
|
||||
title_suffix = f"Change @ {res['original_context']['change_datetime']}"
|
||||
populate_feed_entry(fe, watch, res.get('body', ''), guid, timestamp_to,
|
||||
link={'href': watch.get('url')}, title_suffix=title_suffix)
|
||||
add_watch_categories(fe, watch, datastore)
|
||||
|
||||
response = make_response(fg.rss_str())
|
||||
response.headers.set('Content-Type', 'application/rss+xml;charset=utf-8')
|
||||
logger.debug(f"RSS Single watch built in {time.time()-now:.2f}s")
|
||||
|
||||
return response
|
||||
@@ -1,95 +0,0 @@
|
||||
def construct_tag_routes(rss_blueprint, datastore):
|
||||
"""
|
||||
Construct RSS feed routes for tags.
|
||||
|
||||
Args:
|
||||
rss_blueprint: The Flask blueprint to add routes to
|
||||
datastore: The ChangeDetectionStore instance
|
||||
"""
|
||||
|
||||
@rss_blueprint.route("/tag/<uuid_str:tag_uuid>", methods=['GET'])
|
||||
def rss_tag_feed(tag_uuid):
|
||||
|
||||
from flask import make_response, request, url_for
|
||||
from feedgen.feed import FeedGenerator
|
||||
|
||||
from . import RSS_TEMPLATE_HTML_DEFAULT, RSS_TEMPLATE_PLAINTEXT_DEFAULT
|
||||
from ._util import (validate_rss_token, generate_watch_guid, get_rss_template,
|
||||
get_watch_label, build_notification_context, render_notification,
|
||||
populate_feed_entry, add_watch_categories)
|
||||
from ...notification_service import NotificationService
|
||||
|
||||
"""
|
||||
Display an RSS feed for all unviewed watches that belong to a specific tag.
|
||||
Returns RSS XML with entries for each unviewed watch with sufficient history.
|
||||
"""
|
||||
# Validate token
|
||||
is_valid, error = validate_rss_token(datastore, request)
|
||||
if not is_valid:
|
||||
return error
|
||||
|
||||
rss_content_format = datastore.data['settings']['application'].get('rss_content_format')
|
||||
|
||||
# Verify tag exists
|
||||
tag = datastore.data['settings']['application'].get('tags', {}).get(tag_uuid)
|
||||
if not tag:
|
||||
return f"Tag with UUID {tag_uuid} not found", 404
|
||||
|
||||
tag_title = tag.get('title', 'Unknown Tag')
|
||||
|
||||
# Create RSS feed
|
||||
fg = FeedGenerator()
|
||||
fg.title(f'changedetection.io - {tag_title}')
|
||||
fg.description(f'Changes for watches tagged with {tag_title}')
|
||||
fg.link(href='https://changedetection.io')
|
||||
notification_service = NotificationService(datastore=datastore, notification_q=False)
|
||||
# Find all watches with this tag
|
||||
for uuid, watch in datastore.data['watching'].items():
|
||||
#@todo This is wrong, it needs to sort by most recently changed and then limit it datastore.data['watching'].items().sorted(?)
|
||||
# So get all watches in this tag then sort
|
||||
|
||||
# Skip if watch doesn't have this tag
|
||||
if tag_uuid not in watch.get('tags', []):
|
||||
continue
|
||||
|
||||
# Skip muted watches if configured
|
||||
if datastore.data['settings']['application'].get('rss_hide_muted_watches') and watch.get('notification_muted'):
|
||||
continue
|
||||
|
||||
# Check if watch has at least 2 history snapshots
|
||||
dates = list(watch.history.keys())
|
||||
if len(dates) < 2:
|
||||
continue
|
||||
|
||||
# Only include unviewed watches
|
||||
if not watch.viewed:
|
||||
# Include a link to the diff page (use uuid from loop, don't modify watch dict)
|
||||
diff_link = {'href': url_for('ui.ui_diff.diff_history_page', uuid=uuid, _external=True)}
|
||||
|
||||
# Get watch label
|
||||
watch_label = get_watch_label(datastore, watch)
|
||||
|
||||
# Get template and build notification context
|
||||
timestamp_to = dates[-1]
|
||||
timestamp_from = dates[-2]
|
||||
|
||||
# Generate GUID for this entry
|
||||
guid = generate_watch_guid(watch, timestamp_to)
|
||||
n_body_template = get_rss_template(datastore, watch, rss_content_format,
|
||||
RSS_TEMPLATE_HTML_DEFAULT, RSS_TEMPLATE_PLAINTEXT_DEFAULT)
|
||||
|
||||
n_object = build_notification_context(watch, timestamp_from, timestamp_to,
|
||||
watch_label, n_body_template, rss_content_format)
|
||||
|
||||
# Render notification
|
||||
res = render_notification(n_object, notification_service, watch, datastore)
|
||||
|
||||
# Create and populate feed entry
|
||||
fe = fg.add_entry()
|
||||
title_suffix = f"Change @ {res['original_context']['change_datetime']}"
|
||||
populate_feed_entry(fe, watch, res['body'], guid, timestamp_to, link=diff_link, title_suffix=title_suffix)
|
||||
add_watch_categories(fe, watch, datastore)
|
||||
|
||||
response = make_response(fg.rss_str())
|
||||
response.headers.set('Content-Type', 'application/rss+xml;charset=utf-8')
|
||||
return response
|
||||
@@ -1,12 +1,10 @@
|
||||
import os
|
||||
from copy import deepcopy
|
||||
from datetime import datetime, timedelta
|
||||
from datetime import datetime
|
||||
from zoneinfo import ZoneInfo, available_timezones
|
||||
import secrets
|
||||
import time
|
||||
import flask_login
|
||||
from flask import Blueprint, render_template, request, redirect, url_for, flash
|
||||
from flask_babel import gettext
|
||||
|
||||
from changedetectionio.store import ChangeDetectionStore
|
||||
from changedetectionio.auth_decorator import login_optionally_required
|
||||
@@ -19,12 +17,6 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
@login_optionally_required
|
||||
def settings_page():
|
||||
from changedetectionio import forms
|
||||
from changedetectionio.pluggy_interface import (
|
||||
get_plugin_settings_tabs,
|
||||
load_plugin_settings,
|
||||
save_plugin_settings
|
||||
)
|
||||
|
||||
|
||||
default = deepcopy(datastore.data['settings'])
|
||||
if datastore.proxy_list is not None:
|
||||
@@ -62,7 +54,7 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
# SALTED_PASS means the password is "locked" to what we set in the Env var
|
||||
if not os.getenv("SALTED_PASS", False):
|
||||
datastore.remove_password()
|
||||
flash(gettext("Password protection removed."), 'notice')
|
||||
flash("Password protection removed.", 'notice')
|
||||
flask_login.logout_user()
|
||||
return redirect(url_for('settings.settings_page'))
|
||||
|
||||
@@ -75,100 +67,51 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
del (app_update['password'])
|
||||
|
||||
datastore.data['settings']['application'].update(app_update)
|
||||
|
||||
|
||||
# Handle dynamic worker count adjustment
|
||||
old_worker_count = datastore.data['settings']['requests'].get('workers', 1)
|
||||
new_worker_count = form.data['requests'].get('workers', 1)
|
||||
|
||||
|
||||
datastore.data['settings']['requests'].update(form.data['requests'])
|
||||
datastore.commit()
|
||||
|
||||
# Clear all checksums to force reprocessing with new settings
|
||||
# Global settings can affect watch behavior (filters, rendering, etc.)
|
||||
datastore.clear_all_last_checksums()
|
||||
|
||||
|
||||
# Adjust worker count if it changed
|
||||
if new_worker_count != old_worker_count:
|
||||
from changedetectionio import worker_pool
|
||||
from changedetectionio import worker_handler
|
||||
from changedetectionio.flask_app import update_q, notification_q, app, datastore as ds
|
||||
|
||||
# Check CPU core availability and warn if worker count is high
|
||||
cpu_count = os.cpu_count()
|
||||
if cpu_count and new_worker_count >= (cpu_count * 0.9):
|
||||
flash(gettext("Warning: Worker count ({}) is close to or exceeds available CPU cores ({})").format(
|
||||
new_worker_count, cpu_count), 'warning')
|
||||
|
||||
result = worker_pool.adjust_async_worker_count(
|
||||
|
||||
result = worker_handler.adjust_async_worker_count(
|
||||
new_count=new_worker_count,
|
||||
update_q=update_q,
|
||||
notification_q=notification_q,
|
||||
app=app,
|
||||
datastore=ds
|
||||
)
|
||||
|
||||
|
||||
if result['status'] == 'success':
|
||||
flash(gettext("Worker count adjusted: {}").format(result['message']), 'notice')
|
||||
flash(f"Worker count adjusted: {result['message']}", 'notice')
|
||||
elif result['status'] == 'not_supported':
|
||||
flash(gettext("Dynamic worker adjustment not supported for sync workers"), 'warning')
|
||||
flash("Dynamic worker adjustment not supported for sync workers", 'warning')
|
||||
elif result['status'] == 'error':
|
||||
flash(gettext("Error adjusting workers: {}").format(result['message']), 'error')
|
||||
flash(f"Error adjusting workers: {result['message']}", 'error')
|
||||
|
||||
if not os.getenv("SALTED_PASS", False) and len(form.application.form.password.encrypted_password):
|
||||
datastore.data['settings']['application']['password'] = form.application.form.password.encrypted_password
|
||||
datastore.commit()
|
||||
flash(gettext("Password protection enabled."), 'notice')
|
||||
datastore.needs_write_urgent = True
|
||||
flash("Password protection enabled.", 'notice')
|
||||
flask_login.logout_user()
|
||||
return redirect(url_for('watchlist.index'))
|
||||
|
||||
# Also save plugin settings from the same form submission
|
||||
plugin_tabs_list = get_plugin_settings_tabs()
|
||||
for tab in plugin_tabs_list:
|
||||
plugin_id = tab['plugin_id']
|
||||
form_class = tab['form_class']
|
||||
|
||||
# Instantiate plugin form with POST data
|
||||
plugin_form = form_class(formdata=request.form)
|
||||
|
||||
# Save plugin settings (validation is optional for plugins)
|
||||
if plugin_form.data:
|
||||
save_plugin_settings(datastore.datastore_path, plugin_id, plugin_form.data)
|
||||
|
||||
flash(gettext("Settings updated."))
|
||||
datastore.needs_write_urgent = True
|
||||
flash("Settings updated.")
|
||||
|
||||
else:
|
||||
flash(gettext("An error occurred, please see below."), "error")
|
||||
flash("An error occurred, please see below.", "error")
|
||||
|
||||
# Convert to ISO 8601 format, all date/time relative events stored as UTC time
|
||||
utc_time = datetime.now(ZoneInfo("UTC")).isoformat()
|
||||
|
||||
# Get active plugins
|
||||
from changedetectionio.pluggy_interface import get_active_plugins
|
||||
import sys
|
||||
active_plugins = get_active_plugins()
|
||||
python_version = f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"
|
||||
|
||||
# Calculate uptime in seconds
|
||||
uptime_seconds = time.time() - datastore.start_time
|
||||
|
||||
# Get plugin settings tabs and instantiate forms
|
||||
plugin_tabs = get_plugin_settings_tabs()
|
||||
plugin_forms = {}
|
||||
|
||||
for tab in plugin_tabs:
|
||||
plugin_id = tab['plugin_id']
|
||||
form_class = tab['form_class']
|
||||
|
||||
# Load existing settings
|
||||
settings = load_plugin_settings(datastore.datastore_path, plugin_id)
|
||||
|
||||
# Instantiate the form with existing settings
|
||||
plugin_forms[plugin_id] = form_class(data=settings)
|
||||
|
||||
output = render_template("settings.html",
|
||||
active_plugins=active_plugins,
|
||||
api_key=datastore.data['settings']['application'].get('api_access_token'),
|
||||
python_version=python_version,
|
||||
uptime_seconds=uptime_seconds,
|
||||
available_timezones=sorted(available_timezones()),
|
||||
emailprefix=os.getenv('NOTIFICATION_MAIL_BUTTON_PREFIX', False),
|
||||
extra_notification_token_placeholder_info=datastore.get_unique_notification_token_placeholders_available(),
|
||||
@@ -176,10 +119,8 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
hide_remove_pass=os.getenv("SALTED_PASS", False),
|
||||
min_system_recheck_seconds=int(os.getenv('MINIMUM_SECONDS_RECHECK_TIME', 3)),
|
||||
settings_application=datastore.data['settings']['application'],
|
||||
timezone_default_config=datastore.data['settings']['application'].get('scheduler_timezone_default'),
|
||||
timezone_default_config=datastore.data['settings']['application'].get('timezone'),
|
||||
utc_time=utc_time,
|
||||
plugin_tabs=plugin_tabs,
|
||||
plugin_forms=plugin_forms,
|
||||
)
|
||||
|
||||
return output
|
||||
@@ -189,8 +130,8 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
def settings_reset_api_key():
|
||||
secret = secrets.token_hex(16)
|
||||
datastore.data['settings']['application']['api_access_token'] = secret
|
||||
datastore.commit()
|
||||
flash(gettext("API Key was regenerated."))
|
||||
datastore.needs_write_urgent = True
|
||||
flash("API Key was regenerated.")
|
||||
return redirect(url_for('settings.settings_page')+'#api')
|
||||
|
||||
@settings_blueprint.route("/notification-logs", methods=['GET'])
|
||||
@@ -201,32 +142,4 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
logs=notification_debug_log if len(notification_debug_log) else ["Notification logs are empty - no notifications sent yet."])
|
||||
return output
|
||||
|
||||
@settings_blueprint.route("/toggle-all-paused", methods=['GET'])
|
||||
@login_optionally_required
|
||||
def toggle_all_paused():
|
||||
current_state = datastore.data['settings']['application'].get('all_paused', False)
|
||||
datastore.data['settings']['application']['all_paused'] = not current_state
|
||||
datastore.commit()
|
||||
|
||||
if datastore.data['settings']['application']['all_paused']:
|
||||
flash(gettext("Automatic scheduling paused - checks will not be queued."), 'notice')
|
||||
else:
|
||||
flash(gettext("Automatic scheduling resumed - checks will be queued normally."), 'notice')
|
||||
|
||||
return redirect(url_for('watchlist.index'))
|
||||
|
||||
@settings_blueprint.route("/toggle-all-muted", methods=['GET'])
|
||||
@login_optionally_required
|
||||
def toggle_all_muted():
|
||||
current_state = datastore.data['settings']['application'].get('all_muted', False)
|
||||
datastore.data['settings']['application']['all_muted'] = not current_state
|
||||
datastore.commit()
|
||||
|
||||
if datastore.data['settings']['application']['all_muted']:
|
||||
flash(gettext("All notifications muted."), 'notice')
|
||||
else:
|
||||
flash(gettext("All notifications unmuted."), 'notice')
|
||||
|
||||
return redirect(url_for('watchlist.index'))
|
||||
|
||||
return settings_blueprint
|
||||
@@ -4,7 +4,7 @@
|
||||
<div class="edit-form">
|
||||
<div class="inner">
|
||||
|
||||
<h4 style="margin-top: 0px;">{{ _('Notification debug log') }}</h4>
|
||||
<h4 style="margin-top: 0px;">Notification debug log</h4>
|
||||
<div id="notification-error-log">
|
||||
<ul style="font-size: 80%; margin:0px; padding: 0 0 0 7px">
|
||||
{% for log in logs|reverse %}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
{% extends 'base.html' %}
|
||||
|
||||
{% block content %}
|
||||
{% from '_helpers.html' import render_field, render_checkbox_field, render_button, render_time_schedule_form, render_ternary_field, render_fieldlist_with_inline_errors %}
|
||||
{% from '_common_fields.html' import render_common_settings_form, show_token_placeholders %}
|
||||
{% from '_helpers.html' import render_field, render_checkbox_field, render_simple_field, render_button, render_time_schedule_form %}
|
||||
{% from '_common_fields.html' import render_common_settings_form %}
|
||||
<script>
|
||||
const notification_base_url="{{url_for('ui.ui_notification.ajax_callback_send_notification_test', mode="global-settings")}}";
|
||||
{% if emailprefix %}
|
||||
@@ -18,22 +18,15 @@
|
||||
<div class="edit-form">
|
||||
<div class="tabs collapsable">
|
||||
<ul>
|
||||
<li class="tab" id=""><a href="#general">{{ _('General') }}</a></li>
|
||||
<li class="tab"><a href="#notifications">{{ _('Notifications') }}</a></li>
|
||||
<li class="tab"><a href="#fetching">{{ _('Fetching') }}</a></li>
|
||||
<li class="tab"><a href="#filters">{{ _('Global Filters') }}</a></li>
|
||||
<li class="tab"><a href="#ui-options">{{ _('UI Options') }}</a></li>
|
||||
<li class="tab"><a href="#api">{{ _('API') }}</a></li>
|
||||
<li class="tab"><a href="#rss">{{ _('RSS') }}</a></li>
|
||||
<li class="tab"><a href="{{ url_for('backups.create') }}">{{ _('Backups') }}</a></li>
|
||||
<li class="tab"><a href="#timedate">{{ _('Time & Date') }}</a></li>
|
||||
<li class="tab"><a href="#proxies">{{ _('CAPTCHA & Proxies') }}</a></li>
|
||||
{% if plugin_tabs %}
|
||||
{% for tab in plugin_tabs %}
|
||||
<li class="tab"><a href="#plugin-{{ tab.plugin_id }}">{{ tab.tab_label }}</a></li>
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
<li class="tab"><a href="#info">{{ _('Info') }}</a></li>
|
||||
<li class="tab" id=""><a href="#general">General</a></li>
|
||||
<li class="tab"><a href="#notifications">Notifications</a></li>
|
||||
<li class="tab"><a href="#fetching">Fetching</a></li>
|
||||
<li class="tab"><a href="#filters">Global Filters</a></li>
|
||||
<li class="tab"><a href="#ui-options">UI Options</a></li>
|
||||
<li class="tab"><a href="#ai-options"><i data-feather="aperture" style="width: 14px; height: 14px; margin-right: 4px;"></i> AI</a></li>
|
||||
<li class="tab"><a href="#api">API</a></li>
|
||||
<li class="tab"><a href="#timedate">Time & Date</a></li>
|
||||
<li class="tab"><a href="#proxies">CAPTCHA & Proxies</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
<div class="box-wrap inner">
|
||||
@@ -43,73 +36,84 @@
|
||||
<fieldset>
|
||||
<div class="pure-control-group">
|
||||
{{ render_field(form.requests.form.time_between_check, class="time-check-widget") }}
|
||||
|
||||
<span class="pure-form-message-inline">{{ _('Default recheck time for all watches, current system minimum is') }} <i>{{min_system_recheck_seconds}}</i> {{ _('seconds') }} (<a href="https://github.com/dgtlmoon/changedetection.io/wiki/Misc-system-settings#enviroment-variables">{{ _('more info') }}</a>).</span>
|
||||
<span class="pure-form-message-inline">Default recheck time for all watches, current system minimum is <i>{{min_system_recheck_seconds}}</i> seconds (<a href="https://github.com/dgtlmoon/changedetection.io/wiki/Misc-system-settings#enviroment-variables">more info</a>).</span>
|
||||
<div id="time-between-check-schedule">
|
||||
<!-- Start Time and End Time {{ timezone_default_config }} -->
|
||||
<!-- Start Time and End Time -->
|
||||
<div id="limit-between-time">
|
||||
{{ render_time_schedule_form(form.requests, available_timezones, timezone_default_config) }}
|
||||
{{ render_time_schedule_form(form.requests, available_timezones, timezone_default_config) }}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="pure-control-group">
|
||||
{{ render_field(form.application.form.filter_failure_notification_threshold_attempts, class="filter_failure_notification_threshold_attempts") }}
|
||||
<span class="pure-form-message-inline">{{ _('After this many consecutive times that the CSS/xPath filter is missing, send a notification') }}
|
||||
<br>
|
||||
{{ _('Set to') }} <strong>0</strong> {{ _('to disable') }}
|
||||
</span>
|
||||
{{ render_field(form.requests.form.jitter_seconds, class="jitter_seconds") }}
|
||||
<span class="pure-form-message-inline">Example - 3 seconds random jitter could trigger up to 3 seconds earlier or up to 3 seconds later</span>
|
||||
</div>
|
||||
<div class="pure-control-group">
|
||||
{{ render_field(form.application.form.history_snapshot_max_length, class="history_snapshot_max_length") }}
|
||||
<span class="pure-form-message-inline">{{ _('Limit collection of history snapshots for each watch to this number of history items.') }}
|
||||
{{ render_field(form.application.form.filter_failure_notification_threshold_attempts, class="filter_failure_notification_threshold_attempts") }}
|
||||
<span class="pure-form-message-inline">After this many consecutive times that the CSS/xPath filter is missing, send a notification
|
||||
<br>
|
||||
{{ _('Set to empty to disable / no limit') }}
|
||||
Set to <strong>0</strong> to disable
|
||||
</span>
|
||||
</div>
|
||||
|
||||
<div class="pure-control-group">
|
||||
{% if not hide_remove_pass %}
|
||||
{% if current_user.is_authenticated %}
|
||||
{{ render_button(form.application.form.removepassword_button) }}
|
||||
{% else %}
|
||||
{{ render_field(form.application.form.password) }}
|
||||
<span class="pure-form-message-inline">{{ _('Password protection for your changedetection.io application.') }}</span>
|
||||
<span class="pure-form-message-inline">Password protection for your changedetection.io application.</span>
|
||||
{% endif %}
|
||||
{% else %}
|
||||
<span class="pure-form-message-inline">{{ _('Password is locked.') }}</span>
|
||||
<span class="pure-form-message-inline">Password is locked.</span>
|
||||
{% endif %}
|
||||
</div>
|
||||
|
||||
<div class="pure-control-group">
|
||||
{{ render_checkbox_field(form.application.form.shared_diff_access, class="shared_diff_access") }}
|
||||
<span class="pure-form-message-inline">{{ _('Allow access to the watch change history page when password is enabled (Good for sharing the diff page)') }}</span>
|
||||
<span class="pure-form-message-inline">Allow access to view watch diff page when password is enabled (Good for sharing the diff page)
|
||||
</span>
|
||||
</div>
|
||||
<div class="pure-control-group">
|
||||
{{ render_checkbox_field(form.application.form.rss_hide_muted_watches) }}
|
||||
</div>
|
||||
<div class="pure-control-group">
|
||||
{{ render_field(form.application.form.pager_size) }}
|
||||
<span class="pure-form-message-inline">Number of items per page in the watch overview list, 0 to disable.</span>
|
||||
</div>
|
||||
<div class="pure-control-group">
|
||||
{{ render_field(form.application.form.rss_content_format) }}
|
||||
<span class="pure-form-message-inline">Love RSS? Does your reader support HTML? Set it here</span>
|
||||
</div>
|
||||
<div class="pure-control-group">
|
||||
{{ render_checkbox_field(form.application.form.extract_title_as_title) }}
|
||||
<span class="pure-form-message-inline">Note: This will automatically apply to all existing watches.</span>
|
||||
</div>
|
||||
<div class="pure-control-group">
|
||||
{{ render_checkbox_field(form.application.form.empty_pages_are_a_change) }}
|
||||
<span class="pure-form-message-inline">{{ _('When a request returns no content, or the HTML does not contain any text, is this considered a change?') }}</span>
|
||||
<span class="pure-form-message-inline">When a request returns no content, or the HTML does not contain any text, is this considered a change?</span>
|
||||
</div>
|
||||
{% if form.requests.proxy %}
|
||||
<div>
|
||||
<br>
|
||||
<div class="inline-radio">
|
||||
{{ render_field(form.requests.form.proxy, class="fetch-backend-proxy") }}
|
||||
<span class="pure-form-message-inline">{{ _('Choose a default proxy for all watches') }}</span>
|
||||
</div>
|
||||
<div class="pure-control-group inline-radio">
|
||||
{{ render_field(form.requests.form.proxy, class="fetch-backend-proxy") }}
|
||||
<span class="pure-form-message-inline">
|
||||
Choose a default proxy for all watches
|
||||
</span>
|
||||
</div>
|
||||
{% endif %}
|
||||
|
||||
</fieldset>
|
||||
</div>
|
||||
|
||||
<div class="tab-pane-inner" id="notifications">
|
||||
<fieldset>
|
||||
{{ render_common_settings_form(form.application.form, emailprefix, settings_application, extra_notification_token_placeholder_info) }}
|
||||
<div class="field-group">
|
||||
{{ render_common_settings_form(form.application.form, emailprefix, settings_application, extra_notification_token_placeholder_info) }}
|
||||
</div>
|
||||
</fieldset>
|
||||
<div class="pure-control-group" id="notification-base-url">
|
||||
{{ render_field(form.application.form.base_url, class="m-d") }}
|
||||
<span class="pure-form-message-inline">
|
||||
{{ _('Base URL used for the') }} <code>{{ '{{ base_url }}' }}</code> {{ _('token in notification links.') }}<br>
|
||||
{{ _('Default value is the system environment variable') }} '<code>BASE_URL</code>' - <a href="https://github.com/dgtlmoon/changedetection.io/wiki/Configurable-BASE_URL-setting">{{ _('read more here') }}</a>.
|
||||
Base URL used for the <code>{{ '{{ base_url }}' }}</code> token in notification links.<br>
|
||||
Default value is the system environment variable '<code>BASE_URL</code>' - <a href="https://github.com/dgtlmoon/changedetection.io/wiki/Configurable-BASE_URL-setting">read more here</a>.
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
@@ -118,15 +122,15 @@
|
||||
<div class="pure-control-group inline-radio">
|
||||
{{ render_field(form.application.form.fetch_backend, class="fetch-backend") }}
|
||||
<span class="pure-form-message-inline">
|
||||
<p>{{ _('Use the') }} <strong>{{ _('Basic') }}</strong> {{ _('method (default) where your watched sites don\'t need Javascript to render.') }}</p>
|
||||
<p>{{ _('The') }} <strong>{{ _('Chrome/Javascript') }}</strong> {{ _('method requires a network connection to a running WebDriver+Chrome server, set by the ENV var') }} 'WEBDRIVER_URL'. </p>
|
||||
<p>Use the <strong>Basic</strong> method (default) where your watched sites don't need Javascript to render.</p>
|
||||
<p>The <strong>Chrome/Javascript</strong> method requires a network connection to a running WebDriver+Chrome server, set by the ENV var 'WEBDRIVER_URL'. </p>
|
||||
</span>
|
||||
</div>
|
||||
<fieldset class="pure-group" id="webdriver-override-options" data-visible-for="application-fetch_backend=html_webdriver">
|
||||
<div class="pure-form-message-inline">
|
||||
<strong>{{ _('If you\'re having trouble waiting for the page to be fully rendered (text missing etc), try increasing the \'wait\' time here.') }}</strong>
|
||||
<strong>If you're having trouble waiting for the page to be fully rendered (text missing etc), try increasing the 'wait' time here.</strong>
|
||||
<br>
|
||||
{{ _('This will wait') }} <i>n</i> {{ _('seconds before extracting the text.') }}
|
||||
This will wait <i>n</i> seconds before extracting the text.
|
||||
</div>
|
||||
<div class="pure-control-group">
|
||||
{{ render_field(form.application.form.webdriver_delay) }}
|
||||
@@ -135,27 +139,19 @@
|
||||
<div class="pure-control-group">
|
||||
{{ render_field(form.requests.form.workers) }}
|
||||
{% set worker_info = get_worker_status_info() %}
|
||||
<span class="pure-form-message-inline">{{ _('Number of concurrent workers to process watches. More workers = faster processing but higher memory usage.') }}<br>
|
||||
{{ _('Currently running:') }} <strong>{{ worker_info.count }}</strong> {{ _('operational') }} {{ worker_info.type }} {{ _('workers') }}{% if worker_info.active_workers > 0 %} ({{ worker_info.active_workers }} {{ _('actively processing') }}){% endif %}.</span>
|
||||
</div>
|
||||
<div class="pure-control-group">
|
||||
{{ render_field(form.requests.form.jitter_seconds, class="jitter_seconds") }}
|
||||
<span class="pure-form-message-inline">{{ _('Example - 3 seconds random jitter could trigger up to 3 seconds earlier or up to 3 seconds later') }}</span>
|
||||
</div>
|
||||
<div class="pure-control-group">
|
||||
{{ render_field(form.requests.form.timeout) }}
|
||||
<span class="pure-form-message-inline">{{ _('For regular plain requests (not chrome based), maximum number of seconds until timeout, 1-999.') }}</span><br>
|
||||
<span class="pure-form-message-inline">Number of concurrent workers to process watches. More workers = faster processing but higher memory usage.<br>
|
||||
Currently running: <strong>{{ worker_info.count }}</strong> operational {{ worker_info.type }} workers{% if worker_info.active_workers > 0 %} ({{ worker_info.active_workers }} actively processing){% endif %}.</span>
|
||||
</div>
|
||||
<div class="pure-control-group inline-radio">
|
||||
{{ render_field(form.requests.form.default_ua) }}
|
||||
<span class="pure-form-message-inline">
|
||||
{{ _('Applied to all requests.') }}<br><br>
|
||||
{{ _('Note: Simply changing the User-Agent often does not defeat anti-robot technologies, it\'s important to consider') }} <a href="https://changedetection.io/tutorial/what-are-main-types-anti-robot-mechanisms">{{ _('all of the ways that the browser is detected') }}</a>.
|
||||
Applied to all requests.<br><br>
|
||||
Note: Simply changing the User-Agent often does not defeat anti-robot technologies, it's important to consider <a href="https://changedetection.io/tutorial/what-are-main-types-anti-robot-mechanisms">all of the ways that the browser is detected</a>.
|
||||
</span>
|
||||
</div>
|
||||
<div class="pure-control-group">
|
||||
<br>
|
||||
{{ _('Tip:') }} <a href="https://github.com/dgtlmoon/changedetection.io/wiki/Proxy-configuration#brightdata-proxy-support">{{ _('Connect using Bright Data and Oxylabs Proxies, find out more here.') }}</a>
|
||||
Tip: <a href="https://github.com/dgtlmoon/changedetection.io/wiki/Proxy-configuration#brightdata-proxy-support">Connect using Bright Data and Oxylabs Proxies, find out more here.</a>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
@@ -164,15 +160,15 @@
|
||||
|
||||
<fieldset class="pure-group">
|
||||
{{ render_checkbox_field(form.application.form.ignore_whitespace) }}
|
||||
<span class="pure-form-message-inline">{{ _('Ignore whitespace, tabs and new-lines/line-feeds when considering if a change was detected.') }}<br>
|
||||
<i>{{ _('Note:') }}</i> {{ _('Changing this will change the status of your existing watches, possibly trigger alerts etc.') }}
|
||||
<span class="pure-form-message-inline">Ignore whitespace, tabs and new-lines/line-feeds when considering if a change was detected.<br>
|
||||
<i>Note:</i> Changing this will change the status of your existing watches, possibly trigger alerts etc.
|
||||
</span>
|
||||
</fieldset>
|
||||
<fieldset class="pure-group">
|
||||
{{ render_checkbox_field(form.application.form.render_anchor_tag_content) }}
|
||||
<span class="pure-form-message-inline">{{ _('Render anchor tag content, default disabled, when enabled renders links as') }} <code>(link text)[https://somesite.com]</code>
|
||||
<span class="pure-form-message-inline">Render anchor tag content, default disabled, when enabled renders links as <code>(link text)[https://somesite.com]</code>
|
||||
<br>
|
||||
<i>{{ _('Note:') }}</i> {{ _('Changing this could affect the content of your existing watches, possibly trigger alerts etc.') }}
|
||||
<i>Note:</i> Changing this could affect the content of your existing watches, possibly trigger alerts etc.
|
||||
</span>
|
||||
</fieldset>
|
||||
<fieldset class="pure-group">
|
||||
@@ -183,9 +179,9 @@ nav
|
||||
//*[contains(text(), 'Advertisement')]") }}
|
||||
<span class="pure-form-message-inline">
|
||||
<ul>
|
||||
<li> {{ _('Remove HTML element(s) by CSS and XPath selectors before text conversion.') }} </li>
|
||||
<li> {{ _('Don\'t paste HTML here, use only CSS and XPath selectors') }} </li>
|
||||
<li> {{ _('Add multiple elements, CSS or XPath selectors per line to ignore multiple parts of the HTML.') }} </li>
|
||||
<li> Remove HTML element(s) by CSS and XPath selectors before text conversion. </li>
|
||||
<li> Don't paste HTML here, use only CSS and XPath selectors </li>
|
||||
<li> Add multiple elements, CSS or XPath selectors per line to ignore multiple parts of the HTML. </li>
|
||||
</ul>
|
||||
</span>
|
||||
</fieldset>
|
||||
@@ -193,121 +189,98 @@ nav
|
||||
{{ render_field(form.application.form.global_ignore_text, rows=5, placeholder="Some text to ignore in a line
|
||||
/some.regex\d{2}/ for case-INsensitive regex
|
||||
") }}
|
||||
<span class="pure-form-message-inline">{{ _('Note: This is applied globally in addition to the per-watch rules.') }}</span><br>
|
||||
<span class="pure-form-message-inline">Note: This is applied globally in addition to the per-watch rules.</span><br>
|
||||
<span class="pure-form-message-inline">
|
||||
<ul>
|
||||
<li>{{ _('Matching text will be') }} <strong>{{ _('ignored') }}</strong> {{ _('in the text snapshot (you can still see it but it wont trigger a change)') }}</li>
|
||||
<li>{{ _('Note: This is applied globally in addition to the per-watch rules.') }}</li>
|
||||
<li>{{ _('Each line processed separately, any line matching will be ignored (removed before creating the checksum)') }}</li>
|
||||
<li>{{ _('Regular Expression support, wrap the entire line in forward slash') }} <code>/regex/</code></li>
|
||||
<li>{{ _('Changing this will affect the comparison checksum which may trigger an alert') }}</li>
|
||||
<li>Matching text will be <strong>ignored</strong> in the text snapshot (you can still see it but it wont trigger a change)</li>
|
||||
<li>Note: This is applied globally in addition to the per-watch rules.</li>
|
||||
<li>Each line processed separately, any line matching will be ignored (removed before creating the checksum)</li>
|
||||
<li>Regular Expression support, wrap the entire line in forward slash <code>/regex/</code></li>
|
||||
<li>Changing this will affect the comparison checksum which may trigger an alert</li>
|
||||
</ul>
|
||||
</span>
|
||||
</fieldset>
|
||||
<fieldset class="pure-group">
|
||||
{{ render_checkbox_field(form.application.form.strip_ignored_lines) }}
|
||||
<span class="pure-form-message-inline">{{ _('Remove any text that appears in the "Ignore text" from the output (otherwise its just ignored for change-detection)') }}<br>
|
||||
<i>{{ _('Note:') }}</i> {{ _('Changing this will change the status of your existing watches, possibly trigger alerts etc.') }}
|
||||
</span>
|
||||
</fieldset>
|
||||
</div>
|
||||
|
||||
<div class="tab-pane-inner" id="api">
|
||||
<h4>{{ _('API Access') }}</h4>
|
||||
<p>{{ _('Drive your changedetection.io via API, More about') }} <a href="https://changedetection.io/docs/api_v1/index.html">{{ _('API access and examples here') }}</a>.</p>
|
||||
<h4>API Access</h4>
|
||||
<p>Drive your changedetection.io via API, More about <a href="https://github.com/dgtlmoon/changedetection.io/wiki/API-Reference">API access here</a></p>
|
||||
|
||||
<div class="pure-control-group">
|
||||
{{ render_checkbox_field(form.application.form.api_access_token_enabled) }}
|
||||
<div class="pure-form-message-inline">{{ _('Restrict API access limit by using') }} <code>x-api-key</code> {{ _('header - required for the Chrome Extension to work') }}</div><br>
|
||||
<div class="pure-form-message-inline"><br>{{ _('API Key') }} <span id="api-key">{{api_key}}</span>
|
||||
<span style="display:none;" id="api-key-copy" >{{ _('copy') }}</span>
|
||||
<div class="pure-form-message-inline">Restrict API access limit by using <code>x-api-key</code> header - required for the Chrome Extension to work</div><br>
|
||||
<div class="pure-form-message-inline"><br>API Key <span id="api-key">{{api_key}}</span>
|
||||
<span style="display:none;" id="api-key-copy" >copy</span>
|
||||
</div>
|
||||
</div>
|
||||
<div class="pure-control-group">
|
||||
<a href="{{url_for('settings.settings_reset_api_key')}}" class="pure-button button-small button-cancel">{{ _('Regenerate API key') }}</a>
|
||||
<a href="{{url_for('settings.settings_reset_api_key')}}" class="pure-button button-small button-cancel">Regenerate API key</a>
|
||||
</div>
|
||||
<div class="pure-control-group">
|
||||
<h4>{{ _('Chrome Extension') }}</h4>
|
||||
<p>{{ _('Easily add any web-page to your changedetection.io installation from within Chrome.') }}</p>
|
||||
<strong>{{ _('Step 1') }}</strong> {{ _('Install the extension,') }} <strong>{{ _('Step 2') }}</strong> {{ _('Navigate to this page,') }}
|
||||
<strong>{{ _('Step 3') }}</strong> {{ _('Open the extension from the toolbar and click') }} "<i>{{ _('Sync API Access') }}</i>"
|
||||
<h4>Chrome Extension</h4>
|
||||
<p>Easily add any web-page to your changedetection.io installation from within Chrome.</p>
|
||||
<strong>Step 1</strong> Install the extension, <strong>Step 2</strong> Navigate to this page,
|
||||
<strong>Step 3</strong> Open the extension from the toolbar and click "<i>Sync API Access</i>"
|
||||
<p>
|
||||
<a id="chrome-extension-link"
|
||||
title="{{ _('Try our new Chrome Extension!') }}"
|
||||
title="Try our new Chrome Extension!"
|
||||
href="https://chromewebstore.google.com/detail/changedetectionio-website/kefcfmgmlhmankjmnbijimhofdjekbop">
|
||||
<img alt="{{ _('Chrome store icon') }}" src="{{ url_for('static_content', group='images', filename='google-chrome-icon.png') }}" >
|
||||
{{ _('Chrome Webstore') }}
|
||||
<img alt="Chrome store icon" src="{{ url_for('static_content', group='images', filename='google-chrome-icon.png') }}" alt="Chrome">
|
||||
Chrome Webstore
|
||||
</a>
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="tab-pane-inner" id="rss">
|
||||
<div class="tab-pane-inner" id="timedate">
|
||||
<div class="pure-control-group">
|
||||
{{ render_checkbox_field(form.application.form.rss_hide_muted_watches) }}
|
||||
Ensure the settings below are correct, they are used to manage the time schedule for checking your web page watches.
|
||||
</div>
|
||||
<div class="pure-control-group">
|
||||
{{ render_field(form.application.form.rss_diff_length) }}
|
||||
<span class="pure-form-message-inline">{{ _('Maximum number of history snapshots to include in the watch specific RSS feed.') }}</span>
|
||||
</div>
|
||||
<div class="pure-control-group">
|
||||
{{ render_checkbox_field(form.application.form.rss_reader_mode) }}
|
||||
<span class="pure-form-message-inline">{{ _('For watching other RSS feeds - When watching RSS/Atom feeds, convert them into clean text for better change detection.') }}</span>
|
||||
</div>
|
||||
<div class="pure-control-group grey-form-border">
|
||||
<div class="pure-control-group">
|
||||
{{ render_field(form.application.form.rss_content_format) }}
|
||||
<span class="pure-form-message-inline">{{ _('Does your reader support HTML? Set it here') }}</span>
|
||||
</div>
|
||||
<div class="pure-control-group">
|
||||
{{ render_field(form.application.form.rss_template_type) }}
|
||||
<span class="pure-form-message-inline">{{ _('\'System default\' for the same template for all items, or re-use your "Notification Body" as the template.') }}</span>
|
||||
</div>
|
||||
<div>
|
||||
{{ render_field(form.application.form.rss_template_override) }}
|
||||
{{ show_token_placeholders(extra_notification_token_placeholder_info=extra_notification_token_placeholder_info, suffix="-rss") }}
|
||||
</div>
|
||||
</div>
|
||||
<br>
|
||||
|
||||
|
||||
</div>
|
||||
<div class="tab-pane-inner" id="timedate">
|
||||
<div class="pure-control-group">
|
||||
{{ _('Ensure the settings below are correct, they are used to manage the time schedule for checking your web page watches.') }}
|
||||
</div>
|
||||
<div class="pure-control-group">
|
||||
<p><strong>{{ _('UTC Time & Date from Server:') }}</strong> <span id="utc-time" >{{ utc_time }}</span></p>
|
||||
<p><strong>{{ _('Local Time & Date in Browser:') }}</strong> <span class="local-time" data-utc="{{ utc_time }}"></span></p>
|
||||
<div>
|
||||
{{ render_field(form.application.form.scheduler_timezone_default) }}
|
||||
<p><strong>UTC Time & Date from Server:</strong> <span id="utc-time" >{{ utc_time }}</span></p>
|
||||
<p><strong>Local Time & Date in Browser:</strong> <span class="local-time" data-utc="{{ utc_time }}"></span></p>
|
||||
<p>
|
||||
{{ render_field(form.application.form.timezone) }}
|
||||
<datalist id="timezones" style="display: none;">
|
||||
{%- for timezone in available_timezones -%}<option value="{{ timezone }}">{{ timezone }}</option>{%- endfor -%}
|
||||
{% for tz_name in available_timezones %}
|
||||
<option value="{{ tz_name }}">{{ tz_name }}</option>
|
||||
{% endfor %}
|
||||
</datalist>
|
||||
</div>
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="tab-pane-inner" id="ui-options">
|
||||
<div class="pure-control-group">
|
||||
{{ render_checkbox_field(form.application.form.ui.form.open_diff_in_new_tab, class="open_diff_in_new_tab") }}
|
||||
<span class="pure-form-message-inline">{{ _('Enable this setting to open the diff page in a new tab. If disabled, the diff page will open in the current tab.') }}</span>
|
||||
<span class="pure-form-message-inline">Enable this setting to open the diff page in a new tab. If disabled, the diff page will open in the current tab.</span>
|
||||
</div>
|
||||
<div class="pure-control-group">
|
||||
{{ render_checkbox_field(form.application.form.ui.form.socket_io_enabled, class="socket_io_enabled") }}
|
||||
<span class="pure-form-message-inline">{{ _('Realtime UI Updates Enabled - (Restart required if this is changed)') }}</span>
|
||||
<span class="pure-form-message-inline">Realtime UI Updates Enabled - (Restart required if this is changed)</span>
|
||||
</div>
|
||||
<div class="pure-control-group">
|
||||
{{ render_checkbox_field(form.application.form.ui.form.favicons_enabled, class="") }}
|
||||
<span class="pure-form-message-inline">{{ _('Enable or Disable Favicons next to the watch list') }}</span>
|
||||
</div>
|
||||
<div class="pure-control-group">
|
||||
{{ render_checkbox_field(form.application.form.ui.use_page_title_in_list) }}
|
||||
</div>
|
||||
<div class="pure-control-group">
|
||||
{{ render_field(form.application.form.pager_size) }}
|
||||
<span class="pure-form-message-inline">{{ _('Number of items per page in the watch overview list, 0 to disable.') }}</span>
|
||||
<span class="pure-form-message-inline">Enable or Disable Favicons next to the watch list</span>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
<div class="tab-pane-inner" id="ai-options">
|
||||
<p><strong>New:</strong> click here (link to changedetection.io tutorial page) find out how to setup and example</p>
|
||||
<br>
|
||||
key fields should be some password type field so you can see its set but doesnt contain the key on view and doesnt lose it on save<br>
|
||||
|
||||
<div class="pure-control-group inline-radio">
|
||||
{{ render_simple_field(form.application.form.ai.form.LLM_backend) }}
|
||||
<span class="pure-form-message-inline">Preferred LLM connection</span>
|
||||
</div>
|
||||
<div class="pure-control-group">
|
||||
{{ render_checkbox_field(form.application.form.ai.form.API_keys.form.openai) }}
|
||||
<span class="pure-form-message-inline">Go here to read more about OpenAI integration</span>
|
||||
</div>
|
||||
<div class="pure-control-group">
|
||||
{{ render_checkbox_field(form.application.form.ai.form.API_keys.form.gemini) }}
|
||||
<span class="pure-form-message-inline">Go here to read more about Google Gemini integration</span>
|
||||
</div>
|
||||
</div>
|
||||
<div class="tab-pane-inner" id="proxies">
|
||||
<div id="recommended-proxy">
|
||||
<div>
|
||||
@@ -352,66 +325,26 @@ nav
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<p><strong>{{ _('Tip') }}</strong>: {{ _('"Residential" and "Mobile" proxy type can be more successfull than "Data Center" for blocked websites.') }}</p>
|
||||
<p><strong>Tip</strong>: "Residential" and "Mobile" proxy type can be more successfull than "Data Center" for blocked websites.
|
||||
|
||||
<div class="pure-control-group" id="extra-proxies-setting">
|
||||
{{ render_fieldlist_with_inline_errors(form.requests.form.extra_proxies) }}
|
||||
<span class="pure-form-message-inline">{{ _('"Name" will be used for selecting the proxy in the Watch Edit settings') }}</span><br>
|
||||
<span class="pure-form-message-inline">{{ _('SOCKS5 proxies with authentication are only supported with \'plain requests\' fetcher, for other fetchers you should whitelist the IP access instead') }}</span>
|
||||
{{ render_field(form.requests.form.extra_proxies) }}
|
||||
<span class="pure-form-message-inline">"Name" will be used for selecting the proxy in the Watch Edit settings</span><br>
|
||||
<span class="pure-form-message-inline">SOCKS5 proxies with authentication are only supported with 'plain requests' fetcher, for other fetchers you should whitelist the IP access instead</span>
|
||||
</div>
|
||||
<div class="pure-control-group" id="extra-browsers-setting">
|
||||
<p>
|
||||
<span class="pure-form-message-inline"><i>Extra Browsers</i> can be attached to further defeat CAPTCHA's on websites that are particularly hard to scrape.</span><br>
|
||||
<span class="pure-form-message-inline">Simply paste the connection address into the box, <a href="https://changedetection.io/tutorial/using-bright-datas-scraping-browser-pass-captchas-and-other-protection-when-monitoring">More instructions and examples here</a> </span>
|
||||
</p>
|
||||
{{ render_fieldlist_with_inline_errors(form.requests.form.extra_browsers) }}
|
||||
{{ render_field(form.requests.form.extra_browsers) }}
|
||||
</div>
|
||||
</div>
|
||||
{% if plugin_tabs %}
|
||||
{% for tab in plugin_tabs %}
|
||||
<div class="tab-pane-inner" id="plugin-{{ tab.plugin_id }}">
|
||||
{% set plugin_form = plugin_forms[tab.plugin_id] %}
|
||||
{% if tab.template_path %}
|
||||
{# Plugin provides custom template - include it directly (no separate form) #}
|
||||
{% include tab.template_path with context %}
|
||||
{% else %}
|
||||
{# Default form rendering - fields only, no submit button #}
|
||||
<fieldset>
|
||||
{% for field in plugin_form %}
|
||||
{% if field.type != 'CSRFToken' and field.type != 'SubmitField' %}
|
||||
<div class="pure-control-group">
|
||||
{% if field.type == 'BooleanField' %}
|
||||
{{ render_checkbox_field(field) }}
|
||||
{% else %}
|
||||
{{ render_field(field) }}
|
||||
{% endif %}
|
||||
</div>
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</fieldset>
|
||||
{% endif %}
|
||||
</div>
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
<div class="tab-pane-inner" id="info">
|
||||
<p><strong>{{ _('Uptime:') }}</strong> {{ uptime_seconds|format_duration }}</p>
|
||||
<p><strong>{{ _('Python version:') }}</strong> {{ python_version }}</p>
|
||||
<p><strong>{{ _('Plugins active:') }}</strong></p>
|
||||
{% if active_plugins %}
|
||||
<ul>
|
||||
{% for plugin in active_plugins %}
|
||||
<li><strong>{{ plugin.name }}</strong> - {{ plugin.description }}</li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{% else %}
|
||||
<p>{{ _('No plugins active') }}</p>
|
||||
{% endif %}
|
||||
</div>
|
||||
<div id="actions">
|
||||
<div class="pure-control-group">
|
||||
{{ render_button(form.save_button) }}
|
||||
<a href="{{url_for('watchlist.index')}}" class="pure-button button-cancel">{{ _('Back') }}</a>
|
||||
<a href="{{url_for('ui.clear_all_history')}}" class="pure-button button-error">{{ _('Clear Snapshot History') }}</a>
|
||||
<a href="{{url_for('watchlist.index')}}" class="pure-button button-small button-cancel">Back</a>
|
||||
<a href="{{url_for('ui.clear_all_history')}}" class="pure-button button-small button-error">Clear Snapshot History</a>
|
||||
</div>
|
||||
</div>
|
||||
</form>
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
import threading
|
||||
from flask import Blueprint, request, render_template, flash, url_for, redirect
|
||||
from flask_babel import gettext
|
||||
from loguru import logger
|
||||
|
||||
|
||||
from changedetectionio.store import ChangeDetectionStore
|
||||
from changedetectionio.flask_app import login_optionally_required
|
||||
@@ -23,10 +21,9 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
tag_count = Counter(tag for watch in datastore.data['watching'].values() if watch.get('tags') for tag in watch['tags'])
|
||||
|
||||
output = render_template("groups-overview.html",
|
||||
app_rss_token=datastore.data['settings']['application'].get('rss_access_token'),
|
||||
available_tags=sorted_tags,
|
||||
form=add_form,
|
||||
tag_count=tag_count,
|
||||
tag_count=tag_count
|
||||
)
|
||||
|
||||
return output
|
||||
@@ -45,103 +42,61 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
title = request.form.get('name').strip()
|
||||
|
||||
if datastore.tag_exists_by_name(title):
|
||||
flash(gettext('The tag "{}" already exists').format(title), "error")
|
||||
flash(f'The tag "{title}" already exists', "error")
|
||||
return redirect(url_for('tags.tags_overview_page'))
|
||||
|
||||
datastore.add_tag(title)
|
||||
flash(gettext("Tag added"))
|
||||
flash("Tag added")
|
||||
|
||||
|
||||
return redirect(url_for('tags.tags_overview_page'))
|
||||
|
||||
@tags_blueprint.route("/mute/<uuid_str:uuid>", methods=['GET'])
|
||||
@tags_blueprint.route("/mute/<string:uuid>", methods=['GET'])
|
||||
@login_optionally_required
|
||||
def mute(uuid):
|
||||
tag = datastore.data['settings']['application']['tags'].get(uuid)
|
||||
if tag:
|
||||
tag['notification_muted'] = not tag['notification_muted']
|
||||
tag.commit()
|
||||
if datastore.data['settings']['application']['tags'].get(uuid):
|
||||
datastore.data['settings']['application']['tags'][uuid]['notification_muted'] = not datastore.data['settings']['application']['tags'][uuid]['notification_muted']
|
||||
return redirect(url_for('tags.tags_overview_page'))
|
||||
|
||||
@tags_blueprint.route("/delete/<uuid_str:uuid>", methods=['GET'])
|
||||
@tags_blueprint.route("/delete/<string:uuid>", methods=['GET'])
|
||||
@login_optionally_required
|
||||
def delete(uuid):
|
||||
# Delete the tag from settings immediately
|
||||
removed = 0
|
||||
# Delete the tag, and any tag reference
|
||||
if datastore.data['settings']['application']['tags'].get(uuid):
|
||||
del datastore.data['settings']['application']['tags'][uuid]
|
||||
|
||||
# Remove tag from all watches in background thread to avoid blocking
|
||||
def remove_tag_background(tag_uuid):
|
||||
"""Background thread to remove tag from watches - discarded after completion."""
|
||||
removed_count = 0
|
||||
try:
|
||||
for watch_uuid, watch in datastore.data['watching'].items():
|
||||
if watch.get('tags') and tag_uuid in watch['tags']:
|
||||
watch['tags'].remove(tag_uuid)
|
||||
watch.commit()
|
||||
removed_count += 1
|
||||
logger.info(f"Background: Tag {tag_uuid} removed from {removed_count} watches")
|
||||
except Exception as e:
|
||||
logger.error(f"Error removing tag from watches: {e}")
|
||||
for watch_uuid, watch in datastore.data['watching'].items():
|
||||
if watch.get('tags') and uuid in watch['tags']:
|
||||
removed += 1
|
||||
watch['tags'].remove(uuid)
|
||||
|
||||
# Start daemon thread
|
||||
threading.Thread(target=remove_tag_background, args=(uuid,), daemon=True).start()
|
||||
|
||||
flash(gettext("Tag deleted, removing from watches in background"))
|
||||
flash(f"Tag deleted and removed from {removed} watches")
|
||||
return redirect(url_for('tags.tags_overview_page'))
|
||||
|
||||
@tags_blueprint.route("/unlink/<uuid_str:uuid>", methods=['GET'])
|
||||
@tags_blueprint.route("/unlink/<string:uuid>", methods=['GET'])
|
||||
@login_optionally_required
|
||||
def unlink(uuid):
|
||||
# Unlink tag from all watches in background thread to avoid blocking
|
||||
def unlink_tag_background(tag_uuid):
|
||||
"""Background thread to unlink tag from watches - discarded after completion."""
|
||||
unlinked_count = 0
|
||||
try:
|
||||
for watch_uuid, watch in datastore.data['watching'].items():
|
||||
if watch.get('tags') and tag_uuid in watch['tags']:
|
||||
watch['tags'].remove(tag_uuid)
|
||||
watch.commit()
|
||||
unlinked_count += 1
|
||||
logger.info(f"Background: Tag {tag_uuid} unlinked from {unlinked_count} watches")
|
||||
except Exception as e:
|
||||
logger.error(f"Error unlinking tag from watches: {e}")
|
||||
unlinked = 0
|
||||
for watch_uuid, watch in datastore.data['watching'].items():
|
||||
if watch.get('tags') and uuid in watch['tags']:
|
||||
unlinked += 1
|
||||
watch['tags'].remove(uuid)
|
||||
|
||||
# Start daemon thread
|
||||
threading.Thread(target=unlink_tag_background, args=(uuid,), daemon=True).start()
|
||||
|
||||
flash(gettext("Unlinking tag from watches in background"))
|
||||
flash(f"Tag unlinked removed from {unlinked} watches")
|
||||
return redirect(url_for('tags.tags_overview_page'))
|
||||
|
||||
@tags_blueprint.route("/delete_all", methods=['GET'])
|
||||
@login_optionally_required
|
||||
def delete_all():
|
||||
for watch_uuid, watch in datastore.data['watching'].items():
|
||||
watch['tags'] = []
|
||||
datastore.data['settings']['application']['tags'] = {}
|
||||
|
||||
for tag_uuid in list(datastore.data['settings']['application']['tags'].keys()):
|
||||
# TagsDict 'del' handler will remove the dir
|
||||
del datastore.data['settings']['application']['tags'][tag_uuid]
|
||||
|
||||
|
||||
# Clear tags from all watches in background thread to avoid blocking
|
||||
def clear_all_tags_background():
|
||||
"""Background thread to clear tags from all watches - discarded after completion."""
|
||||
cleared_count = 0
|
||||
try:
|
||||
for watch_uuid, watch in datastore.data['watching'].items():
|
||||
watch['tags'] = []
|
||||
watch.commit()
|
||||
cleared_count += 1
|
||||
logger.info(f"Background: Cleared tags from {cleared_count} watches")
|
||||
except Exception as e:
|
||||
logger.error(f"Error clearing tags from watches: {e}")
|
||||
|
||||
# Start daemon thread
|
||||
threading.Thread(target=clear_all_tags_background, daemon=True).start()
|
||||
|
||||
flash(gettext("All tags deleted, clearing from watches in background"))
|
||||
flash(f"All tags deleted")
|
||||
return redirect(url_for('tags.tags_overview_page'))
|
||||
|
||||
@tags_blueprint.route("/edit/<uuid_str:uuid>", methods=['GET'])
|
||||
@tags_blueprint.route("/edit/<string:uuid>", methods=['GET'])
|
||||
@login_optionally_required
|
||||
def form_tag_edit(uuid):
|
||||
from changedetectionio.blueprint.tags.form import group_restock_settings_form
|
||||
@@ -150,7 +105,7 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
|
||||
default = datastore.data['settings']['application']['tags'].get(uuid)
|
||||
if not default:
|
||||
flash(gettext("Tag not found"), "error")
|
||||
flash("Tag not found", "error")
|
||||
return redirect(url_for('watchlist.index'))
|
||||
|
||||
form = group_restock_settings_form(
|
||||
@@ -160,21 +115,6 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
default_system_settings = datastore.data['settings'],
|
||||
)
|
||||
|
||||
# Bridge API-stored processor_config_* values into the form's FormField sub-forms.
|
||||
# The API stores processor_config_restock_diff in the tag dict; find the matching
|
||||
# FormField by checking which one's sub-fields cover the config keys.
|
||||
from wtforms.fields.form import FormField as WTFormField
|
||||
for key, value in default.items():
|
||||
if not key.startswith('processor_config_') or not isinstance(value, dict):
|
||||
continue
|
||||
for form_field in form:
|
||||
if isinstance(form_field, WTFormField) and all(k in form_field.form._fields for k in value):
|
||||
for sub_key, sub_value in value.items():
|
||||
sub_field = form_field.form._fields.get(sub_key)
|
||||
if sub_field is not None:
|
||||
sub_field.data = sub_value
|
||||
break
|
||||
|
||||
template_args = {
|
||||
'data': default,
|
||||
'form': form,
|
||||
@@ -209,26 +149,26 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
included_content = template.render(**template_args)
|
||||
|
||||
output = render_template("edit-tag.html",
|
||||
extra_form_content=included_content,
|
||||
extra_tab_content=form.extra_tab_content() if form.extra_tab_content() else None,
|
||||
settings_application=datastore.data['settings']['application'],
|
||||
extra_tab_content=form.extra_tab_content() if form.extra_tab_content() else None,
|
||||
extra_form_content=included_content,
|
||||
**template_args
|
||||
)
|
||||
|
||||
return output
|
||||
|
||||
|
||||
@tags_blueprint.route("/edit/<uuid_str:uuid>", methods=['POST'])
|
||||
@tags_blueprint.route("/edit/<string:uuid>", methods=['POST'])
|
||||
@login_optionally_required
|
||||
def form_tag_edit_submit(uuid):
|
||||
from changedetectionio.blueprint.tags.form import group_restock_settings_form
|
||||
if uuid == 'first':
|
||||
uuid = list(datastore.data['settings']['application']['tags'].keys()).pop()
|
||||
|
||||
tag = datastore.data['settings']['application']['tags'].get(uuid)
|
||||
default = datastore.data['settings']['application']['tags'].get(uuid)
|
||||
|
||||
form = group_restock_settings_form(formdata=request.form if request.method == 'POST' else None,
|
||||
data=tag,
|
||||
data=default,
|
||||
extra_notification_tokens=datastore.get_unique_notification_tokens_available()
|
||||
)
|
||||
# @todo subclass form so validation works
|
||||
@@ -237,18 +177,15 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
# flash(','.join(l), 'error')
|
||||
# return redirect(url_for('tags.form_tag_edit_submit', uuid=uuid))
|
||||
|
||||
tag.update(form.data)
|
||||
tag['processor'] = 'restock_diff'
|
||||
tag.commit()
|
||||
|
||||
# Clear checksums for all watches using this tag to force reprocessing
|
||||
# Tag changes affect inherited configuration
|
||||
cleared_count = datastore.clear_checksums_for_tag(uuid)
|
||||
logger.info(f"Tag {uuid} updated, cleared {cleared_count} watch checksums")
|
||||
|
||||
flash(gettext("Updated"))
|
||||
datastore.data['settings']['application']['tags'][uuid].update(form.data)
|
||||
datastore.data['settings']['application']['tags'][uuid]['processor'] = 'restock_diff'
|
||||
datastore.needs_write_urgent = True
|
||||
flash("Updated")
|
||||
|
||||
return redirect(url_for('tags.tags_overview_page'))
|
||||
|
||||
|
||||
@tags_blueprint.route("/delete/<string:uuid>", methods=['GET'])
|
||||
def form_tag_delete(uuid):
|
||||
return redirect(url_for('tags.tags_overview_page'))
|
||||
return tags_blueprint
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{% extends 'base.html' %}
|
||||
{% block content %}
|
||||
{% from '_helpers.html' import render_field, render_checkbox_field, render_button, render_ternary_field %}
|
||||
{% from '_helpers.html' import render_field, render_checkbox_field, render_button %}
|
||||
{% from '_common_fields.html' import render_common_settings_form %}
|
||||
<script>
|
||||
const notification_base_url="{{url_for('ui.ui_notification.ajax_callback_send_notification_test', mode="group-settings")}}";
|
||||
@@ -24,12 +24,12 @@
|
||||
|
||||
<div class="tabs collapsable">
|
||||
<ul>
|
||||
<li class="tab" id=""><a href="#general">{{ _('General') }}</a></li>
|
||||
<li class="tab"><a href="#filters-and-triggers">{{ _('Filters & Triggers') }}</a></li>
|
||||
<li class="tab" id=""><a href="#general">General</a></li>
|
||||
<li class="tab"><a href="#filters-and-triggers">AI, Filters & Triggers</a></li>
|
||||
{% if extra_tab_content %}
|
||||
<li class="tab"><a href="#extras_tab">{{ extra_tab_content }}</a></li>
|
||||
{% endif %}
|
||||
<li class="tab"><a href="#notifications">{{ _('Notifications') }}</a></li>
|
||||
<li class="tab"><a href="#notifications">Notifications</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
@@ -47,10 +47,10 @@
|
||||
</div>
|
||||
|
||||
<div class="tab-pane-inner" id="filters-and-triggers">
|
||||
<p>{{ _('These settings are') }} <strong><i>{{ _('added') }}</i></strong> {{ _('to any existing watch configurations.') }}</p>
|
||||
<p>These settings are <strong><i>added</i></strong> to any existing watch configurations.</p>
|
||||
{% include "edit/include_subtract.html" %}
|
||||
<div class="text-filtering border-fieldset">
|
||||
<h3>{{ _('Text filtering') }}</h3>
|
||||
<h3>Text filtering</h3>
|
||||
{% include "edit/text-options.html" %}
|
||||
</div>
|
||||
</div>
|
||||
@@ -64,24 +64,24 @@
|
||||
<div class="tab-pane-inner" id="notifications">
|
||||
<fieldset>
|
||||
<div class="pure-control-group inline-radio">
|
||||
{{ render_ternary_field(form.notification_muted, BooleanField=True) }}
|
||||
{{ render_checkbox_field(form.notification_muted) }}
|
||||
</div>
|
||||
{% if 1 %}
|
||||
<div class="pure-control-group inline-radio">
|
||||
{{ render_checkbox_field(form.notification_screenshot) }}
|
||||
<span class="pure-form-message-inline">
|
||||
<strong>{{ _('Use with caution!') }}</strong> {{ _('This will easily fill up your email storage quota or flood other storages.') }}
|
||||
<strong>Use with caution!</strong> This will easily fill up your email storage quota or flood other storages.
|
||||
</span>
|
||||
</div>
|
||||
{% endif %}
|
||||
<div class="field-group" id="notification-field-group">
|
||||
{% if has_default_notification_urls %}
|
||||
<div class="inline-warning">
|
||||
<img class="inline-warning-icon" src="{{url_for('static_content', group='images', filename='notice.svg')}}" alt="{{ _('Look out!') }}" title="{{ _('Lookout!') }}" >
|
||||
{{ _('There are') }} <a href="{{ url_for('settings.settings_page')}}#notifications">{{ _('system-wide notification URLs enabled') }}</a>, {{ _('this form will override notification settings for this watch only') }} ‐ {{ _('an empty Notification URL list here will still send notifications.') }}
|
||||
<img class="inline-warning-icon" src="{{url_for('static_content', group='images', filename='notice.svg')}}" alt="Look out!" title="Lookout!" >
|
||||
There are <a href="{{ url_for('settings.settings_page')}}#notifications">system-wide notification URLs enabled</a>, this form will override notification settings for this watch only ‐ an empty Notification URL list here will still send notifications.
|
||||
</div>
|
||||
{% endif %}
|
||||
<a href="#notifications" id="notification-setting-reset-to-default" class="pure-button button-xsmall" style="right: 20px; top: 20px; position: absolute; background-color: #5f42dd; border-radius: 4px; font-size: 70%; color: #fff">{{ _('Use system defaults') }}</a>
|
||||
<a href="#notifications" id="notification-setting-reset-to-default" class="pure-button button-xsmall" style="right: 20px; top: 20px; position: absolute; background-color: #5f42dd; border-radius: 4px; font-size: 70%; color: #fff">Use system defaults</a>
|
||||
|
||||
{{ render_common_settings_form(form, emailprefix, settings_application, extra_notification_token_placeholder_info) }}
|
||||
</div>
|
||||
|
||||
@@ -2,23 +2,22 @@
|
||||
{% block content %}
|
||||
{% from '_helpers.html' import render_simple_field, render_field %}
|
||||
<script src="{{url_for('static_content', group='js', filename='jquery-3.6.0.min.js')}}"></script>
|
||||
<script src="{{url_for('static_content', group='js', filename='modal.js')}}"></script>
|
||||
|
||||
<div class="box">
|
||||
<form class="pure-form" action="{{ url_for('tags.form_tag_add') }}" method="POST" id="new-watch-form">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}" >
|
||||
<fieldset>
|
||||
<legend>{{ _('Add a new organisational tag') }}</legend>
|
||||
<legend>Add a new organisational tag</legend>
|
||||
<div id="watch-add-wrapper-zone">
|
||||
<div>
|
||||
{{ render_simple_field(form.name, placeholder=_("Watch group / tag")) }}
|
||||
{{ render_simple_field(form.name, placeholder="Watch group / tag") }}
|
||||
</div>
|
||||
<div>
|
||||
{{ render_simple_field(form.save_button, title=_("Save") ) }}
|
||||
{{ render_simple_field(form.save_button, title="Save" ) }}
|
||||
</div>
|
||||
</div>
|
||||
<br>
|
||||
<div style="color: #fff;">{{ _('Groups allows you to manage filters and notifications for multiple watches under a single organisational tag.') }}</div>
|
||||
<div style="color: #fff;">Groups allows you to manage filters and notifications for multiple watches under a single organisational tag.</div>
|
||||
</fieldset>
|
||||
</form>
|
||||
<!-- @todo maybe some overview matrix, 'tick' with which has notification, filter rules etc -->
|
||||
@@ -28,8 +27,8 @@
|
||||
<thead>
|
||||
<tr>
|
||||
<th></th>
|
||||
<th>{{ _('# Watches') }}</th>
|
||||
<th>{{ _('Tag / Label name') }}</th>
|
||||
<th># Watches</th>
|
||||
<th>Tag / Label name</th>
|
||||
<th></th>
|
||||
</tr>
|
||||
</thead>
|
||||
@@ -39,7 +38,7 @@
|
||||
--->
|
||||
{% if not available_tags|length %}
|
||||
<tr>
|
||||
<td colspan="3">{{ _('No website organisational tags/groups configured') }}</td>
|
||||
<td colspan="3">No website organisational tags/groups configured</td>
|
||||
</tr>
|
||||
{% endif %}
|
||||
{% for uuid, tag in available_tags %}
|
||||
@@ -50,25 +49,9 @@
|
||||
<td>{{ "{:,}".format(tag_count[uuid]) if uuid in tag_count else 0 }}</td>
|
||||
<td class="title-col inline"> <a href="{{url_for('watchlist.index', tag=uuid) }}">{{ tag.title }}</a></td>
|
||||
<td>
|
||||
<a class="pure-button pure-button-primary" href="{{ url_for('tags.form_tag_edit', uuid=uuid) }}">{{ _('Edit') }}</a>
|
||||
<a href="{{ url_for('ui.form_watch_checknow', tag=uuid) }}" class="pure-button pure-button-primary" >{{ _('Recheck') }}</a>
|
||||
<a class="pure-button button-error"
|
||||
href="{{ url_for('tags.delete', uuid=uuid) }}"
|
||||
data-requires-confirm
|
||||
data-confirm-type="danger"
|
||||
data-confirm-title="{{ _('Delete Group?') }}"
|
||||
data-confirm-message="{{ _('<p>Are you sure you want to delete group <strong>%(title)s</strong>?</p><p>This action cannot be undone.</p>', title=tag.title) }}"
|
||||
data-confirm-button="{{ _('Delete') }}"
|
||||
title="{{ _('Deletes and removes tag') }}">{{ _('Delete') }}</a>
|
||||
<a class="pure-button button-warning"
|
||||
href="{{ url_for('tags.unlink', uuid=uuid) }}"
|
||||
data-requires-confirm
|
||||
data-confirm-type="warning"
|
||||
data-confirm-title="{{ _('Unlink Group?') }}"
|
||||
data-confirm-message="{{ _('<p>Are you sure you want to unlink all watches from group <strong>%(title)s</strong>?</p><p>The tag will be kept but watches will be removed from it.</p>', title=tag.title) }}"
|
||||
data-confirm-button="{{ _('Unlink') }}"
|
||||
title="{{ _('Keep the tag but unlink any watches') }}">{{ _('Unlink') }}</a>
|
||||
<a href="{{ url_for('rss.rss_tag_feed', tag_uuid=uuid, token=app_rss_token)}}"><img alt="{{ _('RSS Feed for this watch') }}" style="padding-left: 1em;" src="{{url_for('static_content', group='images', filename='generic_feed-icon.svg')}}" height="15"></a>
|
||||
<a class="pure-button pure-button-primary" href="{{ url_for('tags.form_tag_edit', uuid=uuid) }}">Edit</a>
|
||||
<a class="pure-button pure-button-primary" href="{{ url_for('tags.delete', uuid=uuid) }}" title="Deletes and removes tag">Delete</a>
|
||||
<a class="pure-button pure-button-primary" href="{{ url_for('tags.unlink', uuid=uuid) }}" title="Keep the tag but unlink any watches">Unlink</a>
|
||||
</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
|
||||
@@ -1,16 +1,13 @@
|
||||
import time
|
||||
import threading
|
||||
from flask import Blueprint, request, redirect, url_for, flash, render_template, session, current_app
|
||||
from flask_babel import gettext
|
||||
from flask import Blueprint, request, redirect, url_for, flash, render_template, session
|
||||
from loguru import logger
|
||||
|
||||
from changedetectionio.store import ChangeDetectionStore
|
||||
from changedetectionio.blueprint.ui.edit import construct_blueprint as construct_edit_blueprint
|
||||
from changedetectionio.blueprint.ui.notification import construct_blueprint as construct_notification_blueprint
|
||||
from changedetectionio.blueprint.ui.views import construct_blueprint as construct_views_blueprint
|
||||
from changedetectionio.blueprint.ui import diff, preview
|
||||
|
||||
def _handle_operations(op, uuids, datastore, worker_pool, update_q, queuedWatchMetaData, watch_check_update, extra_data=None, emit_flash=True):
|
||||
def _handle_operations(op, uuids, datastore, worker_handler, update_q, queuedWatchMetaData, watch_check_update, extra_data=None, emit_flash=True):
|
||||
from flask import request, flash
|
||||
|
||||
if op == 'delete':
|
||||
@@ -18,83 +15,77 @@ def _handle_operations(op, uuids, datastore, worker_pool, update_q, queuedWatchM
|
||||
if datastore.data['watching'].get(uuid):
|
||||
datastore.delete(uuid)
|
||||
if emit_flash:
|
||||
flash(gettext("{} watches deleted").format(len(uuids)))
|
||||
flash(f"{len(uuids)} watches deleted")
|
||||
|
||||
elif op == 'pause':
|
||||
for uuid in uuids:
|
||||
if datastore.data['watching'].get(uuid):
|
||||
datastore.data['watching'][uuid]['paused'] = True
|
||||
datastore.data['watching'][uuid].commit()
|
||||
if emit_flash:
|
||||
flash(gettext("{} watches paused").format(len(uuids)))
|
||||
flash(f"{len(uuids)} watches paused")
|
||||
|
||||
elif op == 'unpause':
|
||||
for uuid in uuids:
|
||||
if datastore.data['watching'].get(uuid):
|
||||
datastore.data['watching'][uuid.strip()]['paused'] = False
|
||||
datastore.data['watching'][uuid].commit()
|
||||
if emit_flash:
|
||||
flash(gettext("{} watches unpaused").format(len(uuids)))
|
||||
flash(f"{len(uuids)} watches unpaused")
|
||||
|
||||
elif (op == 'mark-viewed'):
|
||||
for uuid in uuids:
|
||||
if datastore.data['watching'].get(uuid):
|
||||
datastore.set_last_viewed(uuid, int(time.time()))
|
||||
if emit_flash:
|
||||
flash(gettext("{} watches updated").format(len(uuids)))
|
||||
flash(f"{len(uuids)} watches updated")
|
||||
|
||||
elif (op == 'mute'):
|
||||
for uuid in uuids:
|
||||
if datastore.data['watching'].get(uuid):
|
||||
datastore.data['watching'][uuid]['notification_muted'] = True
|
||||
datastore.data['watching'][uuid].commit()
|
||||
if emit_flash:
|
||||
flash(gettext("{} watches muted").format(len(uuids)))
|
||||
flash(f"{len(uuids)} watches muted")
|
||||
|
||||
elif (op == 'unmute'):
|
||||
for uuid in uuids:
|
||||
if datastore.data['watching'].get(uuid):
|
||||
datastore.data['watching'][uuid]['notification_muted'] = False
|
||||
datastore.data['watching'][uuid].commit()
|
||||
if emit_flash:
|
||||
flash(gettext("{} watches un-muted").format(len(uuids)))
|
||||
flash(f"{len(uuids)} watches un-muted")
|
||||
|
||||
elif (op == 'recheck'):
|
||||
for uuid in uuids:
|
||||
if datastore.data['watching'].get(uuid):
|
||||
# Recheck and require a full reprocessing
|
||||
worker_pool.queue_item_async_safe(update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid}))
|
||||
worker_handler.queue_item_async_safe(update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid}))
|
||||
if emit_flash:
|
||||
flash(gettext("{} watches queued for rechecking").format(len(uuids)))
|
||||
flash(f"{len(uuids)} watches queued for rechecking")
|
||||
|
||||
elif (op == 'clear-errors'):
|
||||
for uuid in uuids:
|
||||
if datastore.data['watching'].get(uuid):
|
||||
datastore.data['watching'][uuid]["last_error"] = False
|
||||
datastore.data['watching'][uuid].commit()
|
||||
if emit_flash:
|
||||
flash(gettext("{} watches errors cleared").format(len(uuids)))
|
||||
flash(f"{len(uuids)} watches errors cleared")
|
||||
|
||||
elif (op == 'clear-history'):
|
||||
for uuid in uuids:
|
||||
if datastore.data['watching'].get(uuid):
|
||||
datastore.clear_watch_history(uuid)
|
||||
if emit_flash:
|
||||
flash(gettext("{} watches cleared/reset.").format(len(uuids)))
|
||||
flash(f"{len(uuids)} watches cleared/reset.")
|
||||
|
||||
elif (op == 'notification-default'):
|
||||
from changedetectionio.notification import (
|
||||
USE_SYSTEM_DEFAULT_NOTIFICATION_FORMAT_FOR_WATCH
|
||||
default_notification_format_for_watch
|
||||
)
|
||||
for uuid in uuids:
|
||||
if datastore.data['watching'].get(uuid):
|
||||
datastore.data['watching'][uuid]['notification_title'] = None
|
||||
datastore.data['watching'][uuid]['notification_body'] = None
|
||||
datastore.data['watching'][uuid]['notification_urls'] = []
|
||||
datastore.data['watching'][uuid]['notification_format'] = USE_SYSTEM_DEFAULT_NOTIFICATION_FORMAT_FOR_WATCH
|
||||
datastore.data['watching'][uuid].commit()
|
||||
datastore.data['watching'][uuid]['notification_format'] = default_notification_format_for_watch
|
||||
if emit_flash:
|
||||
flash(gettext("{} watches set to use default notification settings").format(len(uuids)))
|
||||
flash(f"{len(uuids)} watches set to use default notification settings")
|
||||
|
||||
elif (op == 'assign-tag'):
|
||||
op_extradata = extra_data
|
||||
@@ -108,15 +99,14 @@ def _handle_operations(op, uuids, datastore, worker_pool, update_q, queuedWatchM
|
||||
datastore.data['watching'][uuid]['tags'] = []
|
||||
|
||||
datastore.data['watching'][uuid]['tags'].append(tag_uuid)
|
||||
datastore.data['watching'][uuid].commit()
|
||||
if emit_flash:
|
||||
flash(gettext("{} watches were tagged").format(len(uuids)))
|
||||
flash(f"{len(uuids)} watches were tagged")
|
||||
|
||||
if uuids:
|
||||
for uuid in uuids:
|
||||
watch_check_update.send(watch_uuid=uuid)
|
||||
|
||||
def construct_blueprint(datastore: ChangeDetectionStore, update_q, worker_pool, queuedWatchMetaData, watch_check_update):
|
||||
def construct_blueprint(datastore: ChangeDetectionStore, update_q, worker_handler, queuedWatchMetaData, watch_check_update):
|
||||
ui_blueprint = Blueprint('ui', __name__, template_folder="templates")
|
||||
|
||||
# Register the edit blueprint
|
||||
@@ -131,54 +121,32 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q, worker_pool,
|
||||
views_blueprint = construct_views_blueprint(datastore, update_q, queuedWatchMetaData, watch_check_update)
|
||||
ui_blueprint.register_blueprint(views_blueprint)
|
||||
|
||||
# Register diff and preview blueprints
|
||||
diff_blueprint = diff.construct_blueprint(datastore)
|
||||
ui_blueprint.register_blueprint(diff_blueprint)
|
||||
|
||||
preview_blueprint = preview.construct_blueprint(datastore)
|
||||
ui_blueprint.register_blueprint(preview_blueprint)
|
||||
|
||||
# Import the login decorator
|
||||
from changedetectionio.auth_decorator import login_optionally_required
|
||||
|
||||
@ui_blueprint.route("/clear_history/<uuid_str:uuid>", methods=['GET'])
|
||||
@ui_blueprint.route("/clear_history/<string:uuid>", methods=['GET'])
|
||||
@login_optionally_required
|
||||
def clear_watch_history(uuid):
|
||||
try:
|
||||
datastore.clear_watch_history(uuid)
|
||||
except KeyError:
|
||||
flash(gettext('Watch not found'), 'error')
|
||||
flash('Watch not found', 'error')
|
||||
else:
|
||||
flash(gettext("Cleared snapshot history for watch {}").format(uuid))
|
||||
flash("Cleared snapshot history for watch {}".format(uuid))
|
||||
return redirect(url_for('watchlist.index'))
|
||||
|
||||
@ui_blueprint.route("/clear_history", methods=['GET', 'POST'])
|
||||
@login_optionally_required
|
||||
def clear_all_history():
|
||||
if request.method == 'POST':
|
||||
confirmtext = request.form.get('confirmtext', '')
|
||||
confirmtext = request.form.get('confirmtext')
|
||||
|
||||
if confirmtext.strip().lower() == gettext('clear').strip().lower():
|
||||
# Run in background thread to avoid blocking
|
||||
def clear_history_background():
|
||||
# Capture UUIDs first to avoid race conditions
|
||||
watch_uuids = list(datastore.data['watching'].keys())
|
||||
logger.info(f"Background: Clearing history for {len(watch_uuids)} watches")
|
||||
|
||||
for uuid in watch_uuids:
|
||||
try:
|
||||
datastore.clear_watch_history(uuid)
|
||||
except Exception as e:
|
||||
logger.error(f"Error clearing history for watch {uuid}: {e}")
|
||||
|
||||
logger.info("Background: Completed clearing history")
|
||||
|
||||
# Start daemon thread
|
||||
threading.Thread(target=clear_history_background, daemon=True).start()
|
||||
|
||||
flash(gettext("History clearing started in background"))
|
||||
if confirmtext == 'clear':
|
||||
for uuid in datastore.data['watching'].keys():
|
||||
datastore.clear_watch_history(uuid)
|
||||
flash("Cleared snapshot history for all watches")
|
||||
else:
|
||||
flash(gettext('Incorrect confirmation text.'), 'error')
|
||||
flash('Incorrect confirmation text.', 'error')
|
||||
|
||||
return redirect(url_for('watchlist.index'))
|
||||
|
||||
@@ -192,37 +160,17 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q, worker_pool,
|
||||
# Save the current newest history as the most recently viewed
|
||||
with_errors = request.args.get('with_errors') == "1"
|
||||
tag_limit = request.args.get('tag')
|
||||
logger.debug(f"Limiting to tag {tag_limit}")
|
||||
now = int(time.time())
|
||||
for watch_uuid, watch in datastore.data['watching'].items():
|
||||
if with_errors and not watch.get('last_error'):
|
||||
continue
|
||||
|
||||
# Mark watches as viewed - use background thread only for large watch counts
|
||||
def mark_viewed_impl():
|
||||
"""Mark watches as viewed - can run synchronously or in background thread."""
|
||||
marked_count = 0
|
||||
try:
|
||||
for watch_uuid, watch in datastore.data['watching'].items():
|
||||
if with_errors and not watch.get('last_error'):
|
||||
continue
|
||||
if tag_limit and ( not watch.get('tags') or tag_limit not in watch['tags'] ):
|
||||
logger.debug(f"Skipping watch {watch_uuid}")
|
||||
continue
|
||||
|
||||
if tag_limit and (not watch.get('tags') or tag_limit not in watch['tags']):
|
||||
continue
|
||||
|
||||
datastore.set_last_viewed(watch_uuid, now)
|
||||
marked_count += 1
|
||||
|
||||
logger.info(f"Marking complete: {marked_count} watches marked as viewed")
|
||||
except Exception as e:
|
||||
logger.error(f"Error marking as viewed: {e}")
|
||||
|
||||
# For small watch counts (< 10), run synchronously to avoid race conditions in tests
|
||||
# For larger counts, use background thread to avoid blocking the UI
|
||||
watch_count = len(datastore.data['watching'])
|
||||
if watch_count < 10:
|
||||
# Run synchronously for small watch counts
|
||||
mark_viewed_impl()
|
||||
else:
|
||||
# Start background thread for large watch counts
|
||||
thread = threading.Thread(target=mark_viewed_impl, daemon=True)
|
||||
thread.start()
|
||||
datastore.set_last_viewed(watch_uuid, now)
|
||||
|
||||
return redirect(url_for('watchlist.index', tag=tag_limit))
|
||||
|
||||
@@ -230,16 +178,16 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q, worker_pool,
|
||||
@login_optionally_required
|
||||
def form_delete():
|
||||
uuid = request.args.get('uuid')
|
||||
|
||||
if uuid != 'all' and not uuid in datastore.data['watching'].keys():
|
||||
flash('The watch by UUID {} does not exist.'.format(uuid), 'error')
|
||||
return redirect(url_for('watchlist.index'))
|
||||
|
||||
# More for testing, possible to return the first/only
|
||||
if uuid == 'first':
|
||||
uuid = list(datastore.data['watching'].keys()).pop()
|
||||
|
||||
if uuid != 'all' and not uuid in datastore.data['watching'].keys():
|
||||
flash(gettext('The watch by UUID {} does not exist.').format(uuid), 'error')
|
||||
return redirect(url_for('watchlist.index'))
|
||||
|
||||
datastore.delete(uuid)
|
||||
flash(gettext('Deleted.'))
|
||||
flash('Deleted.')
|
||||
|
||||
return redirect(url_for('watchlist.index'))
|
||||
|
||||
@@ -247,16 +195,16 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q, worker_pool,
|
||||
@login_optionally_required
|
||||
def form_clone():
|
||||
uuid = request.args.get('uuid')
|
||||
|
||||
# More for testing, possible to return the first/only
|
||||
if uuid == 'first':
|
||||
uuid = list(datastore.data['watching'].keys()).pop()
|
||||
|
||||
new_uuid = datastore.clone(uuid)
|
||||
|
||||
if not datastore.data['watching'].get(uuid).get('paused'):
|
||||
worker_pool.queue_item_async_safe(update_q, queuedWatchMetaData.PrioritizedItem(priority=5, item={'uuid': new_uuid}))
|
||||
worker_handler.queue_item_async_safe(update_q, queuedWatchMetaData.PrioritizedItem(priority=5, item={'uuid': new_uuid}))
|
||||
|
||||
flash(gettext('Cloned, you are editing the new watch.'))
|
||||
flash('Cloned, you are editing the new watch.')
|
||||
|
||||
return redirect(url_for("ui.ui_edit.edit_page", uuid=new_uuid))
|
||||
|
||||
@@ -268,83 +216,40 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q, worker_pool,
|
||||
uuid = request.args.get('uuid')
|
||||
with_errors = request.args.get('with_errors') == "1"
|
||||
|
||||
i = 0
|
||||
|
||||
running_uuids = worker_handler.get_running_uuids()
|
||||
|
||||
if uuid:
|
||||
# Single watch - check if already queued or running
|
||||
if worker_pool.is_watch_running(uuid) or uuid in update_q.get_queued_uuids():
|
||||
flash(gettext("Watch is already queued or being checked."))
|
||||
else:
|
||||
worker_pool.queue_item_async_safe(update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid}))
|
||||
flash(gettext("Queued 1 watch for rechecking."))
|
||||
if uuid not in running_uuids:
|
||||
worker_handler.queue_item_async_safe(update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid}))
|
||||
i += 1
|
||||
|
||||
else:
|
||||
# Multiple watches - first count how many need to be queued
|
||||
watches_to_queue = []
|
||||
# Recheck all, including muted
|
||||
# Get most overdue first
|
||||
for k in sorted(datastore.data['watching'].items(), key=lambda item: item[1].get('last_checked', 0)):
|
||||
watch_uuid = k[0]
|
||||
watch = k[1]
|
||||
if not watch['paused'] and watch_uuid:
|
||||
if with_errors and not watch.get('last_error'):
|
||||
continue
|
||||
if tag != None and tag not in watch['tags']:
|
||||
continue
|
||||
watches_to_queue.append(watch_uuid)
|
||||
if not watch['paused']:
|
||||
if watch_uuid not in running_uuids:
|
||||
if with_errors and not watch.get('last_error'):
|
||||
continue
|
||||
|
||||
# If less than 20 watches, queue synchronously for immediate feedback
|
||||
if len(watches_to_queue) < 20:
|
||||
# Get already queued/running UUIDs once (efficient)
|
||||
queued_uuids = set(update_q.get_queued_uuids())
|
||||
running_uuids = set(worker_pool.get_running_uuids())
|
||||
if tag != None and tag not in watch['tags']:
|
||||
continue
|
||||
|
||||
# Filter out watches that are already queued or running
|
||||
watches_to_queue_filtered = []
|
||||
for watch_uuid in watches_to_queue:
|
||||
if watch_uuid not in queued_uuids and watch_uuid not in running_uuids:
|
||||
watches_to_queue_filtered.append(watch_uuid)
|
||||
worker_handler.queue_item_async_safe(update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': watch_uuid}))
|
||||
i += 1
|
||||
|
||||
# Queue only the filtered watches
|
||||
for watch_uuid in watches_to_queue_filtered:
|
||||
worker_pool.queue_item_async_safe(update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': watch_uuid}))
|
||||
if i == 1:
|
||||
flash("Queued 1 watch for rechecking.")
|
||||
if i > 1:
|
||||
flash(f"Queued {i} watches for rechecking.")
|
||||
if i == 0:
|
||||
flash("No watches available to recheck.")
|
||||
|
||||
# Provide feedback about skipped watches
|
||||
skipped_count = len(watches_to_queue) - len(watches_to_queue_filtered)
|
||||
if skipped_count > 0:
|
||||
flash(gettext("Queued {} watches for rechecking ({} already queued or running).").format(
|
||||
len(watches_to_queue_filtered), skipped_count))
|
||||
else:
|
||||
if len(watches_to_queue_filtered) == 1:
|
||||
flash(gettext("Queued 1 watch for rechecking."))
|
||||
else:
|
||||
flash(gettext("Queued {} watches for rechecking.").format(len(watches_to_queue_filtered)))
|
||||
else:
|
||||
# 20+ watches - queue in background thread to avoid blocking HTTP response
|
||||
# Capture queued/running state before background thread
|
||||
queued_uuids = set(update_q.get_queued_uuids())
|
||||
running_uuids = set(worker_pool.get_running_uuids())
|
||||
|
||||
def queue_watches_background():
|
||||
"""Background thread to queue watches - discarded after completion."""
|
||||
try:
|
||||
queued_count = 0
|
||||
skipped_count = 0
|
||||
for watch_uuid in watches_to_queue:
|
||||
# Check if already queued or running (state captured at start)
|
||||
if watch_uuid not in queued_uuids and watch_uuid not in running_uuids:
|
||||
worker_pool.queue_item_async_safe(update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': watch_uuid}))
|
||||
queued_count += 1
|
||||
else:
|
||||
skipped_count += 1
|
||||
|
||||
logger.info(f"Background queueing complete: {queued_count} watches queued, {skipped_count} skipped (already queued/running)")
|
||||
except Exception as e:
|
||||
logger.error(f"Error in background queueing: {e}")
|
||||
|
||||
# Start background thread and return immediately
|
||||
thread = threading.Thread(target=queue_watches_background, daemon=True, name="QueueWatches-Background")
|
||||
thread.start()
|
||||
|
||||
# Return immediately with approximate message
|
||||
flash(gettext("Queueing watches for rechecking in background..."))
|
||||
|
||||
return redirect(url_for('watchlist.index', **({'tag': tag} if tag else {})))
|
||||
return redirect(url_for('watchlist.index'))
|
||||
|
||||
@ui_blueprint.route("/form/checkbox-operations", methods=['POST'])
|
||||
@login_optionally_required
|
||||
@@ -357,7 +262,7 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q, worker_pool,
|
||||
extra_data=extra_data,
|
||||
queuedWatchMetaData=queuedWatchMetaData,
|
||||
uuids=uuids,
|
||||
worker_pool=worker_pool,
|
||||
worker_handler=worker_handler,
|
||||
update_q=update_q,
|
||||
watch_check_update=watch_check_update,
|
||||
op=op,
|
||||
@@ -366,7 +271,7 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q, worker_pool,
|
||||
return redirect(url_for('watchlist.index'))
|
||||
|
||||
|
||||
@ui_blueprint.route("/share-url/<uuid_str:uuid>", methods=['GET'])
|
||||
@ui_blueprint.route("/share-url/<string:uuid>", methods=['GET'])
|
||||
@login_optionally_required
|
||||
def form_share_put_watch(uuid):
|
||||
"""Given a watch UUID, upload the info and return a share-link
|
||||
@@ -375,6 +280,9 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q, worker_pool,
|
||||
import json
|
||||
from copy import deepcopy
|
||||
|
||||
# more for testing
|
||||
if uuid == 'first':
|
||||
uuid = list(datastore.data['watching'].keys()).pop()
|
||||
|
||||
# copy it to memory as trim off what we dont need (history)
|
||||
watch = deepcopy(datastore.data['watching'].get(uuid))
|
||||
@@ -410,29 +318,8 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q, worker_pool,
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error sharing -{str(e)}")
|
||||
flash(gettext("Could not share, something went wrong while communicating with the share server - {}").format(str(e)), 'error')
|
||||
flash(f"Could not share, something went wrong while communicating with the share server - {str(e)}", 'error')
|
||||
|
||||
return redirect(url_for('watchlist.index'))
|
||||
|
||||
@ui_blueprint.route("/language/auto-detect", methods=['GET'])
|
||||
def delete_locale_language_session_var_if_it_exists():
|
||||
"""Clear the session locale preference to auto-detect from browser Accept-Language header"""
|
||||
if 'locale' in session:
|
||||
session.pop('locale', None)
|
||||
# Refresh Flask-Babel to clear cached locale
|
||||
from flask_babel import refresh
|
||||
refresh()
|
||||
flash(gettext("Language set to auto-detect from browser"))
|
||||
|
||||
# Check if there's a redirect parameter to return to the same page
|
||||
redirect_url = request.args.get('redirect')
|
||||
|
||||
# If redirect is provided and safe, use it
|
||||
from changedetectionio.is_safe_url import is_safe_url
|
||||
if redirect_url and is_safe_url(redirect_url, current_app):
|
||||
return redirect(redirect_url)
|
||||
|
||||
# Otherwise redirect to watchlist
|
||||
return redirect(url_for('watchlist.index'))
|
||||
|
||||
return ui_blueprint
|
||||
@@ -1,305 +0,0 @@
|
||||
from flask import Blueprint, request, redirect, url_for, flash, render_template, make_response, send_from_directory
|
||||
from flask_babel import gettext
|
||||
|
||||
import re
|
||||
import importlib
|
||||
from loguru import logger
|
||||
from markupsafe import Markup
|
||||
|
||||
from changedetectionio.diff import (
|
||||
REMOVED_STYLE, ADDED_STYLE, REMOVED_INNER_STYLE, ADDED_INNER_STYLE,
|
||||
REMOVED_PLACEMARKER_OPEN, REMOVED_PLACEMARKER_CLOSED,
|
||||
ADDED_PLACEMARKER_OPEN, ADDED_PLACEMARKER_CLOSED,
|
||||
CHANGED_PLACEMARKER_OPEN, CHANGED_PLACEMARKER_CLOSED,
|
||||
CHANGED_INTO_PLACEMARKER_OPEN, CHANGED_INTO_PLACEMARKER_CLOSED
|
||||
)
|
||||
from changedetectionio.store import ChangeDetectionStore
|
||||
from changedetectionio.auth_decorator import login_optionally_required
|
||||
|
||||
|
||||
def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
diff_blueprint = Blueprint('ui_diff', __name__, template_folder="../ui/templates")
|
||||
|
||||
@diff_blueprint.app_template_filter('diff_unescape_difference_spans')
|
||||
def diff_unescape_difference_spans(content):
|
||||
"""Emulate Jinja2's auto-escape, then selectively unescape our diff spans."""
|
||||
from markupsafe import escape
|
||||
|
||||
if not content:
|
||||
return Markup('')
|
||||
|
||||
# Step 1: Escape everything like Jinja2 would (this makes it XSS-safe)
|
||||
escaped_content = escape(str(content))
|
||||
|
||||
# Step 2: Unescape only our exact diff spans generated by apply_html_color_to_body()
|
||||
# Pattern matches the exact structure:
|
||||
# <span style="{STYLE}" role="{ROLE}" aria-label="{LABEL}" title="{TITLE}">
|
||||
|
||||
# Unescape outer span opening tags with full attributes (role, aria-label, title)
|
||||
# Matches removed/added/changed/changed_into spans
|
||||
result = re.sub(
|
||||
rf'<span style="({re.escape(REMOVED_STYLE)}|{re.escape(ADDED_STYLE)})" '
|
||||
rf'role="(deletion|insertion|note)" '
|
||||
rf'aria-label="([^&]+?)" '
|
||||
rf'title="([^&]+?)">',
|
||||
r'<span style="\1" role="\2" aria-label="\3" title="\4">',
|
||||
str(escaped_content),
|
||||
flags=re.IGNORECASE
|
||||
)
|
||||
|
||||
# Unescape inner span opening tags (without additional attributes)
|
||||
# This matches the darker background styles for changed parts within lines
|
||||
result = re.sub(
|
||||
rf'<span style="({re.escape(REMOVED_INNER_STYLE)}|{re.escape(ADDED_INNER_STYLE)})">',
|
||||
r'<span style="\1">',
|
||||
result,
|
||||
flags=re.IGNORECASE
|
||||
)
|
||||
|
||||
# Unescape closing tags (but only as many as we opened)
|
||||
open_count = result.count('<span style=')
|
||||
close_count = str(escaped_content).count('</span>')
|
||||
|
||||
# Replace up to the number of spans we opened
|
||||
for _ in range(min(open_count, close_count)):
|
||||
result = result.replace('</span>', '</span>', 1)
|
||||
|
||||
return Markup(result)
|
||||
|
||||
@diff_blueprint.route("/diff/<uuid_str:uuid>", methods=['GET'])
|
||||
@login_optionally_required
|
||||
def diff_history_page(uuid):
|
||||
"""
|
||||
Render the history/diff page for a watch.
|
||||
|
||||
This route is processor-aware: it delegates rendering to the processor's
|
||||
difference.py module, allowing different processor types to provide
|
||||
custom visualizations:
|
||||
- text_json_diff: Text/HTML diff with syntax highlighting
|
||||
- restock_diff: Could show price charts and stock history
|
||||
- image_diff: Could show image comparison slider/overlay
|
||||
|
||||
Each processor implements processors/{type}/difference.py::render()
|
||||
If a processor doesn't have a difference module, falls back to text_json_diff.
|
||||
"""
|
||||
|
||||
if uuid == 'first':
|
||||
uuid = list(datastore.data['watching'].keys()).pop()
|
||||
|
||||
try:
|
||||
watch = datastore.data['watching'][uuid]
|
||||
except KeyError:
|
||||
flash(gettext("No history found for the specified link, bad link?"), "error")
|
||||
return redirect(url_for('watchlist.index'))
|
||||
|
||||
dates = list(watch.history.keys())
|
||||
if not dates or len(dates) < 2:
|
||||
flash(gettext("Not enough history (2 snapshots required) to show difference page for this watch."), "error")
|
||||
return redirect(url_for('watchlist.index'))
|
||||
|
||||
# Get the processor type for this watch
|
||||
processor_name = watch.get('processor', 'text_json_diff')
|
||||
|
||||
# Try to get the processor's difference module (works for both built-in and plugin processors)
|
||||
from changedetectionio.processors import get_processor_submodule
|
||||
processor_module = get_processor_submodule(processor_name, 'difference')
|
||||
|
||||
# Call the processor's render() function
|
||||
if processor_module and hasattr(processor_module, 'render'):
|
||||
return processor_module.render(
|
||||
watch=watch,
|
||||
datastore=datastore,
|
||||
request=request,
|
||||
url_for=url_for,
|
||||
render_template=render_template,
|
||||
flash=flash,
|
||||
redirect=redirect
|
||||
)
|
||||
|
||||
# Fallback: if processor doesn't have difference module, use text_json_diff as default
|
||||
from changedetectionio.processors.text_json_diff.difference import render as default_render
|
||||
return default_render(
|
||||
watch=watch,
|
||||
datastore=datastore,
|
||||
request=request,
|
||||
url_for=url_for,
|
||||
render_template=render_template,
|
||||
flash=flash,
|
||||
redirect=redirect
|
||||
)
|
||||
|
||||
@diff_blueprint.route("/diff/<uuid_str:uuid>/extract", methods=['GET'])
|
||||
@login_optionally_required
|
||||
def diff_history_page_extract_GET(uuid):
|
||||
"""
|
||||
Render the data extraction form for a watch.
|
||||
|
||||
This route is processor-aware: it delegates to the processor's
|
||||
extract.py module, allowing different processor types to provide
|
||||
custom extraction interfaces.
|
||||
|
||||
Each processor implements processors/{type}/extract.py::render_form()
|
||||
If a processor doesn't have an extract module, falls back to text_json_diff.
|
||||
"""
|
||||
|
||||
|
||||
if uuid == 'first':
|
||||
uuid = list(datastore.data['watching'].keys()).pop()
|
||||
try:
|
||||
watch = datastore.data['watching'][uuid]
|
||||
except KeyError:
|
||||
flash(gettext("No history found for the specified link, bad link?"), "error")
|
||||
return redirect(url_for('watchlist.index'))
|
||||
|
||||
# Get the processor type for this watch
|
||||
processor_name = watch.get('processor', 'text_json_diff')
|
||||
|
||||
# Try to get the processor's extract module (works for both built-in and plugin processors)
|
||||
from changedetectionio.processors import get_processor_submodule
|
||||
processor_module = get_processor_submodule(processor_name, 'extract')
|
||||
|
||||
# Call the processor's render_form() function
|
||||
if processor_module and hasattr(processor_module, 'render_form'):
|
||||
return processor_module.render_form(
|
||||
watch=watch,
|
||||
datastore=datastore,
|
||||
request=request,
|
||||
url_for=url_for,
|
||||
render_template=render_template,
|
||||
flash=flash,
|
||||
redirect=redirect
|
||||
)
|
||||
|
||||
# Fallback: if processor doesn't have extract module, use base processors.extract as default
|
||||
from changedetectionio.processors.extract import render_form as default_render_form
|
||||
return default_render_form(
|
||||
watch=watch,
|
||||
datastore=datastore,
|
||||
request=request,
|
||||
url_for=url_for,
|
||||
render_template=render_template,
|
||||
flash=flash,
|
||||
redirect=redirect
|
||||
)
|
||||
|
||||
@diff_blueprint.route("/diff/<uuid_str:uuid>/extract", methods=['POST'])
|
||||
@login_optionally_required
|
||||
def diff_history_page_extract_POST(uuid):
|
||||
"""
|
||||
Process the data extraction request.
|
||||
|
||||
This route is processor-aware: it delegates to the processor's
|
||||
extract.py module, allowing different processor types to provide
|
||||
custom extraction logic.
|
||||
|
||||
Each processor implements processors/{type}/extract.py::process_extraction()
|
||||
If a processor doesn't have an extract module, falls back to text_json_diff.
|
||||
"""
|
||||
|
||||
if uuid == 'first':
|
||||
uuid = list(datastore.data['watching'].keys()).pop()
|
||||
|
||||
try:
|
||||
watch = datastore.data['watching'][uuid]
|
||||
except KeyError:
|
||||
flash(gettext("No history found for the specified link, bad link?"), "error")
|
||||
return redirect(url_for('watchlist.index'))
|
||||
|
||||
# Get the processor type for this watch
|
||||
processor_name = watch.get('processor', 'text_json_diff')
|
||||
|
||||
# Try to get the processor's extract module (works for both built-in and plugin processors)
|
||||
from changedetectionio.processors import get_processor_submodule
|
||||
processor_module = get_processor_submodule(processor_name, 'extract')
|
||||
|
||||
# Call the processor's process_extraction() function
|
||||
if processor_module and hasattr(processor_module, 'process_extraction'):
|
||||
return processor_module.process_extraction(
|
||||
watch=watch,
|
||||
datastore=datastore,
|
||||
request=request,
|
||||
url_for=url_for,
|
||||
make_response=make_response,
|
||||
send_from_directory=send_from_directory,
|
||||
flash=flash,
|
||||
redirect=redirect
|
||||
)
|
||||
|
||||
# Fallback: if processor doesn't have extract module, use base processors.extract as default
|
||||
from changedetectionio.processors.extract import process_extraction as default_process_extraction
|
||||
return default_process_extraction(
|
||||
watch=watch,
|
||||
datastore=datastore,
|
||||
request=request,
|
||||
url_for=url_for,
|
||||
make_response=make_response,
|
||||
send_from_directory=send_from_directory,
|
||||
flash=flash,
|
||||
redirect=redirect
|
||||
)
|
||||
|
||||
@diff_blueprint.route("/diff/<uuid_str:uuid>/processor-asset/<string:asset_name>", methods=['GET'])
|
||||
@login_optionally_required
|
||||
def processor_asset(uuid, asset_name):
|
||||
"""
|
||||
Serve processor-specific binary assets (images, files, etc.).
|
||||
|
||||
This route is processor-aware: it delegates to the processor's
|
||||
difference.py module, allowing different processor types to serve
|
||||
custom assets without embedding them as base64 in templates.
|
||||
|
||||
This solves memory issues with large binary data (e.g., screenshots)
|
||||
by streaming them as separate HTTP responses instead of embedding
|
||||
in the HTML template.
|
||||
|
||||
Each processor implements processors/{type}/difference.py::get_asset()
|
||||
which returns (binary_data, content_type, cache_control_header).
|
||||
|
||||
Example URLs:
|
||||
- /diff/{uuid}/processor-asset/before
|
||||
- /diff/{uuid}/processor-asset/after
|
||||
- /diff/{uuid}/processor-asset/rendered_diff
|
||||
"""
|
||||
|
||||
if uuid == 'first':
|
||||
uuid = list(datastore.data['watching'].keys()).pop()
|
||||
|
||||
try:
|
||||
watch = datastore.data['watching'][uuid]
|
||||
except KeyError:
|
||||
flash(gettext("No history found for the specified link, bad link?"), "error")
|
||||
return redirect(url_for('watchlist.index'))
|
||||
|
||||
# Get the processor type for this watch
|
||||
processor_name = watch.get('processor', 'text_json_diff')
|
||||
|
||||
# Try to get the processor's difference module (works for both built-in and plugin processors)
|
||||
from changedetectionio.processors import get_processor_submodule
|
||||
processor_module = get_processor_submodule(processor_name, 'difference')
|
||||
|
||||
# Call the processor's get_asset() function
|
||||
if processor_module and hasattr(processor_module, 'get_asset'):
|
||||
result = processor_module.get_asset(
|
||||
asset_name=asset_name,
|
||||
watch=watch,
|
||||
datastore=datastore,
|
||||
request=request
|
||||
)
|
||||
|
||||
if result is None:
|
||||
from flask import abort
|
||||
abort(404, description=f"Asset '{asset_name}' not found")
|
||||
|
||||
binary_data, content_type, cache_control = result
|
||||
|
||||
response = make_response(binary_data)
|
||||
response.headers['Content-Type'] = content_type
|
||||
if cache_control:
|
||||
response.headers['Cache-Control'] = cache_control
|
||||
return response
|
||||
else:
|
||||
logger.warning(f"Processor {processor_name} does not implement get_asset()")
|
||||
from flask import abort
|
||||
abort(404, description=f"Processor '{processor_name}' does not support assets")
|
||||
|
||||
return diff_blueprint
|
||||
@@ -1,15 +1,15 @@
|
||||
import time
|
||||
from copy import deepcopy
|
||||
import os
|
||||
import importlib.resources
|
||||
from flask import Blueprint, request, redirect, url_for, flash, render_template, abort
|
||||
from flask_babel import gettext
|
||||
from flask import Blueprint, request, redirect, url_for, flash, render_template, make_response, send_from_directory, abort
|
||||
from loguru import logger
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
|
||||
from changedetectionio.store import ChangeDetectionStore
|
||||
from changedetectionio.auth_decorator import login_optionally_required
|
||||
from changedetectionio.time_handler import is_within_schedule
|
||||
from changedetectionio import worker_pool
|
||||
from changedetectionio import worker_handler
|
||||
|
||||
def construct_blueprint(datastore: ChangeDetectionStore, update_q, queuedWatchMetaData):
|
||||
edit_blueprint = Blueprint('ui_edit', __name__, template_folder="../ui/templates")
|
||||
@@ -20,25 +20,26 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q, queuedWatchMe
|
||||
if tag_uuid in watch.get('tags', []) and (tag.get('include_filters') or tag.get('subtractive_selectors')):
|
||||
return True
|
||||
|
||||
@edit_blueprint.route("/edit/<uuid_str:uuid>", methods=['GET', 'POST'])
|
||||
@edit_blueprint.route("/edit/<string:uuid>", methods=['GET', 'POST'])
|
||||
@login_optionally_required
|
||||
# https://stackoverflow.com/questions/42984453/wtforms-populate-form-with-data-if-data-exists
|
||||
# https://wtforms.readthedocs.io/en/3.0.x/forms/#wtforms.form.Form.populate_obj ?
|
||||
def edit_page(uuid):
|
||||
from changedetectionio import forms
|
||||
from changedetectionio.browser_steps.browser_steps import browser_step_ui_config
|
||||
from changedetectionio.blueprint.browser_steps.browser_steps import browser_step_ui_config
|
||||
from changedetectionio import processors
|
||||
import importlib
|
||||
|
||||
if uuid == 'first':
|
||||
uuid = list(datastore.data['watching'].keys()).pop()
|
||||
# More for testing, possible to return the first/only
|
||||
if not datastore.data['watching'].keys():
|
||||
flash(gettext("No watches to edit"), "error")
|
||||
flash("No watches to edit", "error")
|
||||
return redirect(url_for('watchlist.index'))
|
||||
|
||||
if uuid == 'first':
|
||||
uuid = list(datastore.data['watching'].keys()).pop()
|
||||
|
||||
if not uuid in datastore.data['watching']:
|
||||
flash(gettext("No watch with the UUID {} found.").format(uuid), "error")
|
||||
flash("No watch with the UUID %s found." % (uuid), "error")
|
||||
return redirect(url_for('watchlist.index'))
|
||||
|
||||
switch_processor = request.args.get('switch_processor')
|
||||
@@ -46,18 +47,12 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q, queuedWatchMe
|
||||
for p in processors.available_processors():
|
||||
if p[0] == switch_processor:
|
||||
datastore.data['watching'][uuid]['processor'] = switch_processor
|
||||
flash(gettext("Switched to mode - {}.").format(p[1]))
|
||||
flash(f"Switched to mode - {p[1]}.")
|
||||
datastore.clear_watch_history(uuid)
|
||||
redirect(url_for('ui_edit.edit_page', uuid=uuid))
|
||||
|
||||
# be sure we update with a copy instead of accidently editing the live object by reference
|
||||
default = None
|
||||
while not default:
|
||||
try:
|
||||
default = deepcopy(datastore.data['watching'][uuid])
|
||||
except RuntimeError as e:
|
||||
# Dictionary changed
|
||||
continue
|
||||
default = deepcopy(datastore.data['watching'][uuid])
|
||||
|
||||
# Defaults for proxy choice
|
||||
if datastore.proxy_list is not None: # When enabled
|
||||
@@ -71,13 +66,8 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q, queuedWatchMe
|
||||
processor_name = datastore.data['watching'][uuid].get('processor', '')
|
||||
processor_classes = next((tpl for tpl in processors.find_processors() if tpl[1] == processor_name), None)
|
||||
if not processor_classes:
|
||||
flash(gettext("Could not load '{}' processor, processor plugin might be missing. Please select a different processor.").format(processor_name), 'error')
|
||||
# Fall back to default processor so user can still edit and change processor
|
||||
processor_classes = next((tpl for tpl in processors.find_processors() if tpl[1] == 'text_json_diff'), None)
|
||||
if not processor_classes:
|
||||
# If even text_json_diff is missing, something is very wrong
|
||||
flash(gettext("Could not load '{}' processor, processor plugin might be missing.").format(processor_name), 'error')
|
||||
return redirect(url_for('watchlist.index'))
|
||||
flash(f"Cannot load the edit form for processor/plugin '{processor_classes[1]}', plugin missing?", 'error')
|
||||
return redirect(url_for('watchlist.index'))
|
||||
|
||||
parent_module = processors.get_parent_module(processor_classes[0])
|
||||
|
||||
@@ -106,39 +96,6 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q, queuedWatchMe
|
||||
form.datastore = datastore
|
||||
form.watch = default
|
||||
|
||||
# Load processor-specific config from JSON file for GET requests
|
||||
if request.method == 'GET' and processor_name:
|
||||
try:
|
||||
from changedetectionio.processors.base import difference_detection_processor
|
||||
# Create a processor instance to access config methods
|
||||
processor_instance = difference_detection_processor(datastore, uuid)
|
||||
# Use processor name as filename so each processor keeps its own config
|
||||
config_filename = f'{processor_name}.json'
|
||||
processor_config = processor_instance.get_extra_watch_config(config_filename)
|
||||
|
||||
if processor_config:
|
||||
from wtforms.fields.form import FormField
|
||||
# Populate processor-config-* fields from JSON
|
||||
for config_key, config_value in processor_config.items():
|
||||
if not isinstance(config_value, dict):
|
||||
continue
|
||||
# Try exact API-named field first (e.g., processor_config_restock_diff)
|
||||
target_field = getattr(form, f'processor_config_{config_key}', None)
|
||||
# Fallback: find any FormField sub-form whose fields cover config_value keys
|
||||
if target_field is None:
|
||||
for form_field in form:
|
||||
if isinstance(form_field, FormField) and all(k in form_field.form._fields for k in config_value):
|
||||
target_field = form_field
|
||||
break
|
||||
if target_field is not None:
|
||||
for sub_key, sub_value in config_value.items():
|
||||
sub_field = target_field.form._fields.get(sub_key)
|
||||
if sub_field is not None:
|
||||
sub_field.data = sub_value
|
||||
logger.debug(f"Loaded processor config from {config_filename}: {sub_key} = {sub_value}")
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to load processor config: {e}")
|
||||
|
||||
for p in datastore.extra_browsers:
|
||||
form.fetch_backend.choices.append(p)
|
||||
|
||||
@@ -157,6 +114,11 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q, queuedWatchMe
|
||||
|
||||
if request.method == 'POST' and form.validate():
|
||||
|
||||
# If they changed processor, it makes sense to reset it.
|
||||
if datastore.data['watching'][uuid].get('processor') != form.data.get('processor'):
|
||||
datastore.data['watching'][uuid].clear_watch()
|
||||
flash("Reset watch history due to change of processor")
|
||||
|
||||
extra_update_obj = {
|
||||
'consecutive_filter_failures': 0,
|
||||
'last_error' : False
|
||||
@@ -167,12 +129,7 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q, queuedWatchMe
|
||||
|
||||
extra_update_obj['time_between_check'] = form.time_between_check.data
|
||||
|
||||
# Handle processor-config-* fields separately (save to JSON, not datastore)
|
||||
# IMPORTANT: These must NOT be saved to url-watches.json, only to the processor-specific JSON file
|
||||
processor_config_data = processors.extract_processor_config_from_form_data(form.data)
|
||||
processors.save_processor_config(datastore, uuid, processor_config_data)
|
||||
|
||||
# Ignore text
|
||||
# Ignore text
|
||||
form_ignore_text = form.ignore_text.data
|
||||
datastore.data['watching'][uuid]['ignore_text'] = form_ignore_text
|
||||
|
||||
@@ -210,19 +167,12 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q, queuedWatchMe
|
||||
|
||||
# Recast it if need be to right data Watch handler
|
||||
watch_class = processors.get_custom_watch_obj_for_processor(form.data.get('processor'))
|
||||
datastore.data['watching'][uuid] = watch_class(datastore_path=datastore.datastore_path, __datastore=datastore.data, default=datastore.data['watching'][uuid])
|
||||
datastore.data['watching'][uuid] = watch_class(datastore_path=datastore.datastore_path, default=datastore.data['watching'][uuid])
|
||||
flash("Updated watch - unpaused!" if request.args.get('unpause_on_save') else "Updated watch.")
|
||||
|
||||
# Save the watch immediately
|
||||
datastore.data['watching'][uuid].commit()
|
||||
|
||||
flash(gettext("Updated watch - unpaused!") if request.args.get('unpause_on_save') else gettext("Updated watch."))
|
||||
|
||||
# Cleanup any browsersteps session for this watch
|
||||
try:
|
||||
from changedetectionio.blueprint.browser_steps import cleanup_session_for_watch
|
||||
cleanup_session_for_watch(uuid)
|
||||
except Exception as e:
|
||||
logger.debug(f"Error cleaning up browsersteps session: {e}")
|
||||
# Re #286 - We wait for syncing new data to disk in another thread every 60 seconds
|
||||
# But in the case something is added we should save straight away
|
||||
datastore.needs_write_urgent = True
|
||||
|
||||
# Do not queue on edit if its not within the time range
|
||||
|
||||
@@ -237,7 +187,7 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q, queuedWatchMe
|
||||
|
||||
tz_name = time_schedule_limit.get('timezone')
|
||||
if not tz_name:
|
||||
tz_name = datastore.data['settings']['application'].get('scheduler_timezone_default', os.getenv('TZ', 'UTC').strip())
|
||||
tz_name = datastore.data['settings']['application'].get('timezone', 'UTC')
|
||||
|
||||
if time_schedule_limit and time_schedule_limit.get('enabled'):
|
||||
try:
|
||||
@@ -252,17 +202,17 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q, queuedWatchMe
|
||||
#############################
|
||||
if not datastore.data['watching'][uuid].get('paused') and is_in_schedule:
|
||||
# Queue the watch for immediate recheck, with a higher priority
|
||||
worker_pool.queue_item_async_safe(update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid}))
|
||||
worker_handler.queue_item_async_safe(update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid}))
|
||||
|
||||
# Diff page [edit] link should go back to diff page
|
||||
if request.args.get("next") and request.args.get("next") == 'diff':
|
||||
return redirect(url_for('ui.ui_diff.diff_history_page', uuid=uuid))
|
||||
return redirect(url_for('ui.ui_views.diff_history_page', uuid=uuid))
|
||||
|
||||
return redirect(url_for('watchlist.index', tag=request.args.get("tag",'')))
|
||||
|
||||
else:
|
||||
if request.method == 'POST' and not form.validate():
|
||||
flash(gettext("An error occurred, please see below."), "error")
|
||||
flash("An error occurred, please see below.", "error")
|
||||
|
||||
# JQ is difficult to install on windows and must be manually added (outside requirements.txt)
|
||||
jq_support = True
|
||||
@@ -273,32 +223,25 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q, queuedWatchMe
|
||||
|
||||
watch = datastore.data['watching'].get(uuid)
|
||||
|
||||
# if system or watch is configured to need a chrome type browser
|
||||
system_uses_webdriver = datastore.data['settings']['application']['fetch_backend'] == 'html_webdriver'
|
||||
watch_needs_selenium_or_playwright = False
|
||||
if (watch.get('fetch_backend') == 'system' and system_uses_webdriver) or watch.get('fetch_backend') == 'html_webdriver' or watch.get('fetch_backend', '').startswith('extra_browser_'):
|
||||
watch_needs_selenium_or_playwright = True
|
||||
|
||||
|
||||
from zoneinfo import available_timezones
|
||||
|
||||
# Only works reliably with Playwright
|
||||
|
||||
# Import the global plugin system
|
||||
from changedetectionio.pluggy_interface import collect_ui_edit_stats_extras, get_fetcher_capabilities
|
||||
|
||||
# Get fetcher capabilities instead of hardcoded logic
|
||||
capabilities = get_fetcher_capabilities(watch, datastore)
|
||||
|
||||
# Add processor capabilities from module
|
||||
capabilities['supports_visual_selector'] = getattr(parent_module, 'supports_visual_selector', False)
|
||||
capabilities['supports_text_filters_and_triggers'] = getattr(parent_module, 'supports_text_filters_and_triggers', False)
|
||||
capabilities['supports_text_filters_and_triggers_elements'] = getattr(parent_module, 'supports_text_filters_and_triggers_elements', False)
|
||||
capabilities['supports_request_type'] = getattr(parent_module, 'supports_request_type', False)
|
||||
|
||||
app_rss_token = datastore.data['settings']['application'].get('rss_access_token'),
|
||||
|
||||
c = [f"processor-{watch.get('processor')}"]
|
||||
if worker_pool.is_watch_running(uuid):
|
||||
c.append('checking-now')
|
||||
|
||||
from changedetectionio.pluggy_interface import collect_ui_edit_stats_extras
|
||||
|
||||
template_args = {
|
||||
'available_processors': processors.available_processors(),
|
||||
'available_timezones': sorted(available_timezones()),
|
||||
'browser_steps_config': browser_step_ui_config,
|
||||
'emailprefix': os.getenv('NOTIFICATION_MAIL_BUTTON_PREFIX', False),
|
||||
'extra_classes': ' '.join(c),
|
||||
'extra_notification_token_placeholder_info': datastore.get_unique_notification_token_placeholders_available(),
|
||||
'extra_processor_config': form.extra_tab_content(),
|
||||
'extra_title': f" - Edit - {watch.label}",
|
||||
@@ -308,19 +251,16 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q, queuedWatchMe
|
||||
'has_special_tag_options': _watch_has_tag_options_set(watch=watch),
|
||||
'jq_support': jq_support,
|
||||
'playwright_enabled': os.getenv('PLAYWRIGHT_DRIVER_URL', False),
|
||||
'app_rss_token': app_rss_token,
|
||||
'rss_uuid_feed' : {
|
||||
'label': watch.label,
|
||||
'url': url_for('rss.rss_single_watch', uuid=watch['uuid'], token=app_rss_token)
|
||||
},
|
||||
'settings_application': datastore.data['settings']['application'],
|
||||
'system_has_playwright_configured': os.getenv('PLAYWRIGHT_DRIVER_URL'),
|
||||
'system_has_webdriver_configured': os.getenv('WEBDRIVER_URL'),
|
||||
'ui_edit_stats_extras': collect_ui_edit_stats_extras(watch),
|
||||
'visual_selector_data_ready': datastore.visualselector_data_is_ready(watch_uuid=uuid),
|
||||
'timezone_default_config': datastore.data['settings']['application'].get('scheduler_timezone_default'),
|
||||
'timezone_default_config': datastore.data['settings']['application'].get('timezone'),
|
||||
'using_global_webdriver_wait': not default['webdriver_delay'],
|
||||
'uuid': uuid,
|
||||
'watch': watch,
|
||||
'capabilities': capabilities
|
||||
'watch_needs_selenium_or_playwright': watch_needs_selenium_or_playwright,
|
||||
}
|
||||
|
||||
included_content = None
|
||||
@@ -340,19 +280,17 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q, queuedWatchMe
|
||||
|
||||
return output
|
||||
|
||||
@edit_blueprint.route("/edit/<uuid_str:uuid>/get-html", methods=['GET'])
|
||||
@edit_blueprint.route("/edit/<string:uuid>/get-html", methods=['GET'])
|
||||
@login_optionally_required
|
||||
def watch_get_latest_html(uuid):
|
||||
from io import BytesIO
|
||||
from flask import send_file
|
||||
import brotli
|
||||
|
||||
if uuid == 'first':
|
||||
uuid = list(datastore.data['watching'].keys()).pop()
|
||||
watch = datastore.data['watching'].get(uuid)
|
||||
if watch and watch.history.keys() and os.path.isdir(watch.data_dir):
|
||||
if watch and watch.history.keys() and os.path.isdir(watch.watch_data_dir):
|
||||
latest_filename = list(watch.history.keys())[-1]
|
||||
html_fname = os.path.join(watch.data_dir, f"{latest_filename}.html.br")
|
||||
html_fname = os.path.join(watch.watch_data_dir, f"{latest_filename}.html.br")
|
||||
with open(html_fname, 'rb') as f:
|
||||
if html_fname.endswith('.br'):
|
||||
# Read and decompress the Brotli file
|
||||
@@ -367,68 +305,34 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q, queuedWatchMe
|
||||
# Return a 500 error
|
||||
abort(500)
|
||||
|
||||
@edit_blueprint.route("/edit/<uuid_str:uuid>/get-data-package", methods=['GET'])
|
||||
@login_optionally_required
|
||||
def watch_get_data_package(uuid):
|
||||
"""Download all data for a single watch as a zip file"""
|
||||
from io import BytesIO
|
||||
from flask import send_file
|
||||
import zipfile
|
||||
from pathlib import Path
|
||||
import datetime
|
||||
|
||||
watch = datastore.data['watching'].get(uuid)
|
||||
if not watch:
|
||||
abort(404)
|
||||
|
||||
# Create zip in memory
|
||||
memory_file = BytesIO()
|
||||
|
||||
with zipfile.ZipFile(memory_file, 'w',
|
||||
compression=zipfile.ZIP_DEFLATED,
|
||||
compresslevel=8) as zipObj:
|
||||
|
||||
# Add the watch's JSON file if it exists
|
||||
watch_json_path = os.path.join(watch.data_dir, 'watch.json')
|
||||
if os.path.isfile(watch_json_path):
|
||||
zipObj.write(watch_json_path,
|
||||
arcname=os.path.join(uuid, 'watch.json'),
|
||||
compress_type=zipfile.ZIP_DEFLATED,
|
||||
compresslevel=8)
|
||||
|
||||
# Add all files in the watch data directory
|
||||
if os.path.isdir(watch.data_dir):
|
||||
for f in Path(watch.data_dir).glob('*'):
|
||||
if f.is_file() and f.name != 'watch.json': # Skip watch.json since we already added it
|
||||
zipObj.write(f,
|
||||
arcname=os.path.join(uuid, f.name),
|
||||
compress_type=zipfile.ZIP_DEFLATED,
|
||||
compresslevel=8)
|
||||
|
||||
# Seek to beginning of file
|
||||
memory_file.seek(0)
|
||||
|
||||
# Generate filename with timestamp
|
||||
timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
|
||||
filename = f"watch-data-{uuid[:8]}-{timestamp}.zip"
|
||||
|
||||
return send_file(memory_file,
|
||||
as_attachment=True,
|
||||
download_name=filename,
|
||||
mimetype='application/zip')
|
||||
|
||||
# Ajax callback
|
||||
@edit_blueprint.route("/edit/<uuid_str:uuid>/preview-rendered", methods=['POST'])
|
||||
@edit_blueprint.route("/edit/<string:uuid>/preview-rendered", methods=['POST'])
|
||||
@login_optionally_required
|
||||
def watch_get_preview_rendered(uuid):
|
||||
'''For when viewing the "preview" of the rendered text from inside of Edit'''
|
||||
from flask import jsonify
|
||||
|
||||
if uuid == 'first':
|
||||
uuid = list(datastore.data['watching'].keys()).pop()
|
||||
from changedetectionio.processors.text_json_diff import prepare_filter_prevew
|
||||
result = prepare_filter_prevew(watch_uuid=uuid, form_data=request.form, datastore=datastore)
|
||||
return jsonify(result)
|
||||
|
||||
watch = datastore.data["watching"].get(uuid)
|
||||
|
||||
if not watch:
|
||||
return jsonify({
|
||||
"error": "Watch not found",
|
||||
"code": 400
|
||||
}), 400
|
||||
|
||||
if not watch.history_n:
|
||||
return jsonify({
|
||||
"error": "Watch has empty history, at least one fetch of the page is required.",
|
||||
"code": 400
|
||||
}), 400
|
||||
#
|
||||
try:
|
||||
result = prepare_filter_prevew(watch_uuid=uuid, form_data=request.form, datastore=datastore)
|
||||
return jsonify(result)
|
||||
except Exception as e:
|
||||
return abort(500, str(e))
|
||||
|
||||
|
||||
@edit_blueprint.route("/highlight_submit_ignore_url", methods=['POST'])
|
||||
@login_optionally_required
|
||||
@@ -449,9 +353,6 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q, queuedWatchMe
|
||||
s = re.sub(r'[0-9]+', r'\\d+', s)
|
||||
datastore.data["watching"][uuid]['ignore_text'].append('/' + s + '/')
|
||||
|
||||
# Save the updated ignore_text
|
||||
datastore.data["watching"][uuid].commit()
|
||||
|
||||
return f"<a href={url_for('ui.ui_preview.preview_page', uuid=uuid)}>Click to preview</a>"
|
||||
return f"<a href={url_for('ui.ui_views.preview_page', uuid=uuid)}>Click to preview</a>"
|
||||
|
||||
return edit_blueprint
|
||||
@@ -14,12 +14,11 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
@notification_blueprint.route("/notification/send-test/", methods=['POST'])
|
||||
@login_optionally_required
|
||||
def ajax_callback_send_notification_test(watch_uuid=None):
|
||||
from changedetectionio.notification_service import NotificationContextData, set_basic_notification_vars
|
||||
|
||||
# Watch_uuid could be unset in the case it`s used in tag editor, global settings
|
||||
import apprise
|
||||
from changedetectionio.notification.handler import process_notification
|
||||
from changedetectionio.notification.apprise_plugin.assets import apprise_asset
|
||||
from changedetectionio.jinja2_custom import render as jinja_render
|
||||
|
||||
from changedetectionio.notification.apprise_plugin.custom_handlers import apprise_http_custom_handler
|
||||
|
||||
@@ -38,7 +37,11 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
return make_response("Error: You must have atleast one watch configured for 'test notification' to work", 400)
|
||||
|
||||
watch = datastore.data['watching'].get(watch_uuid)
|
||||
notification_urls = request.form.get('notification_urls','').strip().splitlines()
|
||||
|
||||
notification_urls = None
|
||||
|
||||
if request.form.get('notification_urls'):
|
||||
notification_urls = request.form['notification_urls'].strip().splitlines()
|
||||
|
||||
if not notification_urls:
|
||||
logger.debug("Test notification - Trying by group/tag in the edit form if available")
|
||||
@@ -58,26 +61,20 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
return 'Error: No Notification URLs set/found'
|
||||
|
||||
for n_url in notification_urls:
|
||||
# We are ONLY validating the apprise:// part here, convert all tags to something so as not to break apprise URLs
|
||||
generic_notification_context_data = NotificationContextData()
|
||||
generic_notification_context_data.set_random_for_validation()
|
||||
n_url = jinja_render(template_str=n_url, **generic_notification_context_data).strip()
|
||||
if len(n_url.strip()):
|
||||
if not apobj.add(n_url):
|
||||
return f'Error: {n_url} is not a valid AppRise URL.'
|
||||
|
||||
try:
|
||||
# use the same as when it is triggered, but then override it with the form test values
|
||||
n_object = NotificationContextData({
|
||||
n_object = {
|
||||
'watch_url': request.form.get('window_url', "https://changedetection.io"),
|
||||
'notification_urls': notification_urls
|
||||
})
|
||||
}
|
||||
|
||||
# Only use if present, if not set in n_object it should use the default system value
|
||||
if 'notification_format' in request.form and request.form['notification_format'].strip():
|
||||
n_object['notification_format'] = request.form.get('notification_format', '').strip()
|
||||
else:
|
||||
n_object['notification_format'] = datastore.data['settings']['application'].get('notification_format')
|
||||
|
||||
if 'notification_title' in request.form and request.form['notification_title'].strip():
|
||||
n_object['notification_title'] = request.form.get('notification_title', '').strip()
|
||||
@@ -94,31 +91,10 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
n_object['notification_body'] = "Test body"
|
||||
|
||||
n_object['as_async'] = False
|
||||
|
||||
# Same like in notification service, should be refactored
|
||||
dates = list(watch.history.keys())
|
||||
trigger_text = ''
|
||||
snapshot_contents = ''
|
||||
|
||||
# Could be called as a 'test notification' with only 1 snapshot available
|
||||
prev_snapshot = "Example text: example test\nExample text: change detection is cool\nExample text: some more examples\n"
|
||||
current_snapshot = "Example text: example test\nExample text: change detection is fantastic\nExample text: even more examples\nExample text: a lot more examples"
|
||||
|
||||
if len(dates) > 1:
|
||||
prev_snapshot = watch.get_history_snapshot(timestamp=dates[-2])
|
||||
current_snapshot = watch.get_history_snapshot(timestamp=dates[-1])
|
||||
|
||||
n_object.update(set_basic_notification_vars(current_snapshot=current_snapshot,
|
||||
prev_snapshot=prev_snapshot,
|
||||
watch=watch,
|
||||
triggered_text=trigger_text,
|
||||
timestamp_changed=dates[-1] if dates else None))
|
||||
|
||||
|
||||
n_object.update(watch.extra_notification_token_values())
|
||||
sent_obj = process_notification(n_object, datastore)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
e_str = str(e)
|
||||
# Remove this text which is not important and floods the container
|
||||
e_str = e_str.replace(
|
||||
|
||||
@@ -1,188 +0,0 @@
|
||||
from flask import Blueprint, request, url_for, flash, render_template, redirect
|
||||
from flask_babel import gettext
|
||||
import time
|
||||
from loguru import logger
|
||||
|
||||
from changedetectionio.store import ChangeDetectionStore
|
||||
from changedetectionio.auth_decorator import login_optionally_required
|
||||
from changedetectionio import html_tools
|
||||
|
||||
def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
preview_blueprint = Blueprint('ui_preview', __name__, template_folder="../ui/templates")
|
||||
|
||||
|
||||
@preview_blueprint.route("/preview/<uuid_str:uuid>", methods=['GET', 'POST'])
|
||||
@login_optionally_required
|
||||
def preview_page(uuid):
|
||||
"""
|
||||
Render the preview page for a watch.
|
||||
|
||||
This route is processor-aware: it delegates rendering to the processor's
|
||||
preview.py module, allowing different processor types to provide
|
||||
custom visualizations:
|
||||
- text_json_diff: Text preview with syntax highlighting
|
||||
- image_ssim_diff: Image preview with proper rendering
|
||||
- restock_diff: Could show latest price/stock data
|
||||
|
||||
Each processor implements processors/{type}/preview.py::render()
|
||||
If a processor doesn't have a preview module, falls back to default text preview.
|
||||
"""
|
||||
|
||||
if uuid == 'first':
|
||||
uuid = list(datastore.data['watching'].keys()).pop()
|
||||
try:
|
||||
watch = datastore.data['watching'][uuid]
|
||||
except KeyError:
|
||||
flash(gettext("No history found for the specified link, bad link?"), "error")
|
||||
return redirect(url_for('watchlist.index'))
|
||||
|
||||
# Get the processor type for this watch
|
||||
processor_name = watch.get('processor', 'text_json_diff')
|
||||
|
||||
# Try to get the processor's preview module (works for both built-in and plugin processors)
|
||||
from changedetectionio.processors import get_processor_submodule
|
||||
processor_module = get_processor_submodule(processor_name, 'preview')
|
||||
|
||||
# Call the processor's render() function
|
||||
if processor_module and hasattr(processor_module, 'render'):
|
||||
return processor_module.render(
|
||||
watch=watch,
|
||||
datastore=datastore,
|
||||
request=request,
|
||||
url_for=url_for,
|
||||
render_template=render_template,
|
||||
flash=flash,
|
||||
redirect=redirect
|
||||
)
|
||||
|
||||
# Fallback: if processor doesn't have preview module, use default text preview
|
||||
content = []
|
||||
versions = []
|
||||
timestamp = None
|
||||
|
||||
extra_stylesheets = [url_for('static_content', group='styles', filename='diff.css')]
|
||||
is_html_webdriver = watch.fetcher_supports_screenshots
|
||||
|
||||
triggered_line_numbers = []
|
||||
ignored_line_numbers = []
|
||||
blocked_line_numbers = []
|
||||
|
||||
if datastore.data['watching'][uuid].history_n == 0 and (watch.get_error_text() or watch.get_error_snapshot()):
|
||||
flash(gettext("Preview unavailable - No fetch/check completed or triggers not reached"), "error")
|
||||
else:
|
||||
# So prepare the latest preview or not
|
||||
preferred_version = request.values.get('version') if request.method == 'POST' else request.args.get('version')
|
||||
|
||||
|
||||
versions = list(watch.history.keys())
|
||||
timestamp = versions[-1]
|
||||
if preferred_version and preferred_version in versions:
|
||||
timestamp = preferred_version
|
||||
|
||||
try:
|
||||
versions = list(watch.history.keys())
|
||||
content = watch.get_history_snapshot(timestamp=timestamp)
|
||||
|
||||
triggered_line_numbers = html_tools.strip_ignore_text(content=content,
|
||||
wordlist=watch.get('trigger_text'),
|
||||
mode='line numbers'
|
||||
)
|
||||
ignored_line_numbers = html_tools.strip_ignore_text(content=content,
|
||||
wordlist=watch.get('ignore_text'),
|
||||
mode='line numbers'
|
||||
)
|
||||
blocked_line_numbers = html_tools.strip_ignore_text(content=content,
|
||||
wordlist=watch.get("text_should_not_be_present"),
|
||||
mode='line numbers'
|
||||
)
|
||||
except Exception as e:
|
||||
content.append({'line': f"File doesnt exist or unable to read timestamp {timestamp}", 'classes': ''})
|
||||
|
||||
from changedetectionio.pluggy_interface import get_fetcher_capabilities
|
||||
capabilities = get_fetcher_capabilities(watch, datastore)
|
||||
|
||||
output = render_template("preview.html",
|
||||
capabilities=capabilities,
|
||||
content=content,
|
||||
current_diff_url=watch['url'],
|
||||
current_version=timestamp,
|
||||
extra_stylesheets=extra_stylesheets,
|
||||
extra_title=f" - Diff - {watch.label} @ {timestamp}",
|
||||
highlight_ignored_line_numbers=ignored_line_numbers,
|
||||
highlight_triggered_line_numbers=triggered_line_numbers,
|
||||
highlight_blocked_line_numbers=blocked_line_numbers,
|
||||
history_n=watch.history_n,
|
||||
is_html_webdriver=is_html_webdriver,
|
||||
last_error=watch['last_error'],
|
||||
last_error_screenshot=watch.get_error_snapshot(),
|
||||
last_error_text=watch.get_error_text(),
|
||||
screenshot=watch.get_screenshot(),
|
||||
uuid=uuid,
|
||||
versions=versions,
|
||||
watch=watch,
|
||||
)
|
||||
|
||||
return output
|
||||
|
||||
@preview_blueprint.route("/preview/<uuid_str:uuid>/processor-asset/<string:asset_name>", methods=['GET'])
|
||||
@login_optionally_required
|
||||
def processor_asset(uuid, asset_name):
|
||||
"""
|
||||
Serve processor-specific binary assets for preview (images, files, etc.).
|
||||
|
||||
This route is processor-aware: it delegates to the processor's
|
||||
preview.py module, allowing different processor types to serve
|
||||
custom assets without embedding them as base64 in templates.
|
||||
|
||||
This solves memory issues with large binary data by streaming them
|
||||
as separate HTTP responses instead of embedding in the HTML template.
|
||||
|
||||
Each processor implements processors/{type}/preview.py::get_asset()
|
||||
which returns (binary_data, content_type, cache_control_header).
|
||||
|
||||
Example URLs:
|
||||
- /preview/{uuid}/processor-asset/screenshot?version=123456789
|
||||
"""
|
||||
from flask import make_response
|
||||
|
||||
if uuid == 'first':
|
||||
uuid = list(datastore.data['watching'].keys()).pop()
|
||||
try:
|
||||
watch = datastore.data['watching'][uuid]
|
||||
except KeyError:
|
||||
flash(gettext("No history found for the specified link, bad link?"), "error")
|
||||
return redirect(url_for('watchlist.index'))
|
||||
|
||||
# Get the processor type for this watch
|
||||
processor_name = watch.get('processor', 'text_json_diff')
|
||||
|
||||
# Try to get the processor's preview module (works for both built-in and plugin processors)
|
||||
from changedetectionio.processors import get_processor_submodule
|
||||
processor_module = get_processor_submodule(processor_name, 'preview')
|
||||
|
||||
# Call the processor's get_asset() function
|
||||
if processor_module and hasattr(processor_module, 'get_asset'):
|
||||
result = processor_module.get_asset(
|
||||
asset_name=asset_name,
|
||||
watch=watch,
|
||||
datastore=datastore,
|
||||
request=request
|
||||
)
|
||||
|
||||
if result is None:
|
||||
from flask import abort
|
||||
abort(404, description=f"Asset '{asset_name}' not found")
|
||||
|
||||
binary_data, content_type, cache_control = result
|
||||
|
||||
response = make_response(binary_data)
|
||||
response.headers['Content-Type'] = content_type
|
||||
if cache_control:
|
||||
response.headers['Cache-Control'] = cache_control
|
||||
return response
|
||||
else:
|
||||
logger.warning(f"Processor {processor_name} does not implement get_asset()")
|
||||
from flask import abort
|
||||
abort(404, description=f"Processor '{processor_name}' does not support assets")
|
||||
|
||||
return preview_blueprint
|
||||
@@ -9,12 +9,13 @@
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}" >
|
||||
<fieldset>
|
||||
<div class="pure-control-group">
|
||||
{{ _('This will remove version history (snapshots) for ALL watches, but keep your list of URLs!') }} <br />
|
||||
{{ _('You may like to use the') }} <strong>{{ _('BACKUP') }}</strong> {{ _('link first.') }}<br />
|
||||
This will remove version history (snapshots) for ALL watches, but keep
|
||||
your list of URLs! <br />
|
||||
You may like to use the <strong>BACKUP</strong> link first.<br />
|
||||
</div>
|
||||
<br />
|
||||
<div class="pure-control-group">
|
||||
<label for="confirmtext">{{ _('Confirmation text') }}</label>
|
||||
<label for="confirmtext">Confirmation text</label>
|
||||
<input
|
||||
type="text"
|
||||
id="confirmtext"
|
||||
@@ -24,19 +25,20 @@
|
||||
size="10"
|
||||
/>
|
||||
<span class="pure-form-message-inline"
|
||||
>{{ _('Type in the word') }} <strong>{{ _('clear') }}</strong> {{ _('to confirm that you understand.') }}</span
|
||||
>Type in the word <strong>clear</strong> to confirm that you
|
||||
understand.</span
|
||||
>
|
||||
</div>
|
||||
<br />
|
||||
<div class="pure-control-group">
|
||||
<button type="submit" class="pure-button pure-button-primary">
|
||||
{{ _('Clear History!') }}
|
||||
Clear History!
|
||||
</button>
|
||||
</div>
|
||||
<br />
|
||||
<div class="pure-control-group">
|
||||
<a href="{{url_for('watchlist.index')}}" class="pure-button button-cancel"
|
||||
>{{ _('Cancel') }}</a
|
||||
>Cancel</a
|
||||
>
|
||||
</div>
|
||||
</fieldset>
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
<ul id="highlightSnippetActions">
|
||||
<li>
|
||||
<button class="pure-button pure-button-primary" onclick="diffToJpeg()" title="{{ _('Share diff as image') }}">{{ _('Share as Image') }}</button>
|
||||
</li>
|
||||
<li>
|
||||
<a class="pure-button pure-button-primary" data-mode="exact" href="javascript:void(0);">{{ _('Ignore any lines matching') }}</a>
|
||||
</li>
|
||||
<li>
|
||||
<a class="pure-button pure-button-primary" data-mode="digit-regex" href="javascript:void(0);" >{{ _('Ignore any lines matching excluding digits') }}</a>
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
@@ -1,166 +0,0 @@
|
||||
{% extends 'base.html' %}
|
||||
{% from '_helpers.html' import render_field, render_checkbox_field, render_button %}
|
||||
{% block content %}
|
||||
<script>
|
||||
const screenshot_url="{{url_for('static_content', group='screenshot', filename=uuid)}}";
|
||||
{% if last_error_screenshot %}
|
||||
const error_screenshot_url="{{url_for('static_content', group='screenshot', filename=uuid, error_screenshot=1) }}";
|
||||
{% endif %}
|
||||
|
||||
const highlight_submit_ignore_url="{{url_for('ui.ui_edit.highlight_submit_ignore_url', uuid=uuid)}}";
|
||||
const watch_url= {{watch_a.link|tojson}};
|
||||
|
||||
// Initial scroll position: if set, scroll to this line number in #difference on page load
|
||||
const initialScrollToLineNumber = {{ initial_scroll_line_number|default('null') }};
|
||||
</script>
|
||||
<script src="https://cdn.jsdelivr.net/npm/html2canvas@1.4.1/dist/html2canvas.min.js"></script>
|
||||
<script src="{{url_for('static_content', group='js', filename='plugins.js')}}"></script>
|
||||
<script src="https://cdn.jsdelivr.net/npm/piexifjs@1.0.6/piexif.min.js"></script>
|
||||
<script src="{{url_for('static_content', group='js', filename='snippet-to-image.js')}}"></script>
|
||||
<script src="{{url_for('static_content', group='js', filename='diff-overview.js')}}" defer></script>
|
||||
|
||||
|
||||
<div id="settings">
|
||||
<form class="pure-form " action="{{ url_for("ui.ui_diff.diff_history_page", uuid=uuid) }}" method="GET" id="diff-form">
|
||||
<fieldset class="diff-fieldset">
|
||||
{% if versions|length >= 1 %}
|
||||
<span style="white-space: nowrap;">
|
||||
<label id="change-from" for="diff-from-version" class="from-to-label">{{ _('From') }}</label>
|
||||
<select id="diff-from-version" name="from_version" class="needs-localtime">
|
||||
{%- for version in versions|reverse -%}
|
||||
<option value="{{ version }}" {% if version== from_version %} selected="" {% endif %}>
|
||||
{{ version }}{#{% if loop.index == 2 %} (Previous){% endif %}#}
|
||||
</option>
|
||||
{%- endfor -%}
|
||||
</select>
|
||||
</span>
|
||||
<span style="white-space: nowrap;">
|
||||
<label id="change-to" for="diff-to-version" class="from-to-label">{{ _('To') }}</label>
|
||||
<select id="diff-to-version" name="to_version" class="needs-localtime">
|
||||
{%- for version in versions|reverse -%}
|
||||
<option value="{{ version }}" {% if version== to_version %} selected="" {% endif %}>
|
||||
{{ version }}{#{% if loop.first %} (Current){% endif %}#}
|
||||
</option>
|
||||
{%- endfor -%}
|
||||
</select>
|
||||
</span>
|
||||
{#<button type="submit" class="pure-button pure-button-primary reset-margin">Go</button>#}
|
||||
{% endif %}
|
||||
</fieldset>
|
||||
<fieldset id="diff-style">
|
||||
<span>
|
||||
<label for="diffWords" class="pure-checkbox">
|
||||
<input type="radio" name="type" id="diffWords" value="diffWords" {% if diff_prefs.type == 'diffWords' %}checked=""{% endif %}> {{ _('Words') }}</label>
|
||||
</span>
|
||||
<span>
|
||||
<label for="diffLines" class="pure-checkbox">
|
||||
<input type="radio" name="type" id="diffLines" value="diffLines" {% if diff_prefs.type == 'diffLines' %}checked=""{% endif %}> {{ _('Lines') }}</label>
|
||||
</span>
|
||||
<span>
|
||||
<label for="ignoreWhitespace" class="pure-checkbox" id="label-diff-ignorewhitespace">
|
||||
<input type="checkbox" id="ignoreWhitespace" name="ignoreWhitespace" {% if diff_prefs.ignoreWhitespace %}checked=""{% endif %}> {{ _('Ignore Whitespace') }}</label>
|
||||
</span>
|
||||
<span>
|
||||
<label for="changesOnly" class="pure-checkbox" id="label-diff-changes">
|
||||
<input type="checkbox" id="changesOnly" name="changesOnly" {% if diff_prefs.changesOnly %}checked=""{% endif %}> {{ _('Same/non-changed') }}</label>
|
||||
</span>
|
||||
<span>
|
||||
<label for="removed" class="pure-checkbox" id="label-diff-removed">
|
||||
<input type="checkbox" id="removed" name="removed" {% if diff_prefs.removed %}checked=""{% endif %}> {{ _('Removed') }}</label>
|
||||
</span>
|
||||
<span>
|
||||
<label for="added" class="pure-checkbox" id="label-diff-added">
|
||||
<input type="checkbox" id="added" name="added" {% if diff_prefs.added %}checked=""{% endif %}> {{ _('Added') }}</label>
|
||||
</span>
|
||||
<span>
|
||||
<label for="replaced" class="pure-checkbox" id="label-diff-replaced">
|
||||
<input type="checkbox" id="replaced" name="replaced" {% if diff_prefs.replaced %}checked=""{% endif %}> {{ _('Replaced') }}</label>
|
||||
</span>
|
||||
</fieldset>
|
||||
{%- if versions|length >= 2 -%}
|
||||
<div id="keyboard-nav">
|
||||
<strong>{{ _('Keyboard:') }} </strong>
|
||||
<a href="" class="pure-button pure-button-primary" id="btn-previous"> ← {{ _('Previous') }}</a>
|
||||
<a class="pure-button pure-button-primary" id="btn-next" href=""> → {{ _('Next') }}</a>
|
||||
</div>
|
||||
{%- endif -%}
|
||||
</form>
|
||||
</div>
|
||||
|
||||
<div id="diff-jump" style="display:none;"><!-- disabled for now -->
|
||||
<a id="jump-next-diff" title="{{ _('Jump to next difference') }}">{{ _('Jump') }}</a>
|
||||
</div>
|
||||
|
||||
<script src="{{url_for('static_content', group='js', filename='tabs.js')}}" defer></script>
|
||||
<div class="tabs">
|
||||
<ul>
|
||||
{% if last_error_text %}<li class="tab" id="error-text-tab"><a href="#error-text">{{ _('Error Text') }}</a></li> {% endif %}
|
||||
{% if last_error_screenshot %}<li class="tab" id="error-screenshot-tab"><a href="#error-screenshot">{{ _('Error Screenshot') }}</a></li> {% endif %}
|
||||
<li class="tab" id="text-tab"><a href="#text">{{ _('Text') }}</a></li>
|
||||
<li class="tab" id="screenshot-tab"><a href="#screenshot">{{ _('Current screenshot') }}</a></li>
|
||||
<li class="tab" id="extract-tab"><a href="{{ url_for('ui.ui_diff.diff_history_page_extract_GET', uuid=uuid)}}">{{ _('Extract Data') }}</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
<div id="diff-ui">
|
||||
<div class="tab-pane-inner" id="error-text">
|
||||
<div class="snapshot-age error">{{watch_a.error_text_ctime|format_seconds_ago}} {{ _('seconds ago.') }}</div>
|
||||
<pre>
|
||||
{{ last_error_text }}
|
||||
</pre>
|
||||
</div>
|
||||
|
||||
<div class="tab-pane-inner" id="error-screenshot">
|
||||
<div class="snapshot-age error">{{watch_a.snapshot_error_screenshot_ctime|format_seconds_ago}} {{ _('seconds ago') }}</div>
|
||||
<img id="error-screenshot-img" style="max-width: 80%" alt="{{ _('Current error-ing screenshot from most recent request') }}" >
|
||||
</div>
|
||||
|
||||
<div class="tab-pane-inner" id="text">
|
||||
{%- if (content | default('')).split('\n') | length > 100 -%}
|
||||
<div id="cell-diff-jump-visualiser" style="user-select: none;">
|
||||
{%- for cell in diff_cell_grid -%}
|
||||
<div{% if cell.class %} class="{{ cell.class }}"{% endif %}></div>
|
||||
{%- endfor -%}
|
||||
</div>
|
||||
{%- endif -%}
|
||||
{%- if password_enabled_and_share_is_off -%}
|
||||
<div class="tip">{{ _('Pro-tip: You can enable') }} <strong>{{ _('"share access when password is enabled"') }}</strong> {{ _('from settings.') }}
|
||||
</div>
|
||||
{%- endif -%}
|
||||
<div id="text-diff-heading-area" style="user-select: none;">
|
||||
<div class="snapshot-age"><span>{{ from_version|format_timestamp_timeago }}</span>
|
||||
{%- if note -%}<span class="note"><strong>{{ note }}</strong></span>{%- endif -%}
|
||||
<a href="{{ url_for("ui.ui_preview.preview_page", uuid=uuid) }}">{{ _('Goto single snapshot') }}</a>
|
||||
</div>
|
||||
</div>
|
||||
<pre id="difference" style="border-left: 2px solid #ddd;">{{ content| diff_unescape_difference_spans }}</pre>
|
||||
<div id="diff-visualiser-area-after" style="user-select: none;">
|
||||
<strong>{{ _('Tip:') }}</strong> {{ _('Highlight text to share or add to ignore lists.') }}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="tab-pane-inner" id="screenshot">
|
||||
<div class="tip">
|
||||
{{ _('For now, Differences are performed on text, not graphically, only the latest screenshot is available.') }}
|
||||
</div>
|
||||
{% if is_html_webdriver %}
|
||||
{% if screenshot %}
|
||||
<div class="snapshot-age">{{watch_a.snapshot_screenshot_ctime|format_timestamp_timeago}}</div>
|
||||
<img style="max-width: 80%" id="screenshot-img" alt="{{ _('Current screenshot from most recent request') }}" >
|
||||
{% else %}
|
||||
{{ _('No screenshot available just yet! Try rechecking the page.') }}
|
||||
{% endif %}
|
||||
{% else %}
|
||||
<strong>{{ _('Screenshot requires Playwright/WebDriver enabled') }}</strong>
|
||||
{% endif %}
|
||||
</div>
|
||||
|
||||
</div>
|
||||
|
||||
<script>
|
||||
const newest_version_timestamp = {{newest_version_timestamp}};
|
||||
</script>
|
||||
<script src="{{url_for('static_content', group='js', filename='diff-render.js')}}"></script>
|
||||
|
||||
|
||||
{% endblock %}
|
||||
@@ -1,12 +1,199 @@
|
||||
from flask import Blueprint, request, redirect, url_for, flash
|
||||
from flask_babel import gettext
|
||||
from flask import Blueprint, request, redirect, url_for, flash, render_template, make_response, send_from_directory, abort
|
||||
import os
|
||||
import time
|
||||
from loguru import logger
|
||||
|
||||
from changedetectionio.store import ChangeDetectionStore
|
||||
from changedetectionio.auth_decorator import login_optionally_required
|
||||
from changedetectionio import worker_pool
|
||||
|
||||
from changedetectionio import html_tools
|
||||
from changedetectionio import worker_handler
|
||||
|
||||
def construct_blueprint(datastore: ChangeDetectionStore, update_q, queuedWatchMetaData, watch_check_update):
|
||||
views_blueprint = Blueprint('ui_views', __name__, template_folder="../ui/templates")
|
||||
|
||||
@views_blueprint.route("/preview/<string:uuid>", methods=['GET'])
|
||||
@login_optionally_required
|
||||
def preview_page(uuid):
|
||||
content = []
|
||||
versions = []
|
||||
timestamp = None
|
||||
|
||||
# More for testing, possible to return the first/only
|
||||
if uuid == 'first':
|
||||
uuid = list(datastore.data['watching'].keys()).pop()
|
||||
|
||||
try:
|
||||
watch = datastore.data['watching'][uuid]
|
||||
except KeyError:
|
||||
flash("No history found for the specified link, bad link?", "error")
|
||||
return redirect(url_for('watchlist.index'))
|
||||
|
||||
system_uses_webdriver = datastore.data['settings']['application']['fetch_backend'] == 'html_webdriver'
|
||||
extra_stylesheets = [url_for('static_content', group='styles', filename='diff.css')]
|
||||
|
||||
is_html_webdriver = False
|
||||
if (watch.get('fetch_backend') == 'system' and system_uses_webdriver) or watch.get('fetch_backend') == 'html_webdriver' or watch.get('fetch_backend', '').startswith('extra_browser_'):
|
||||
is_html_webdriver = True
|
||||
triggered_line_numbers = []
|
||||
if datastore.data['watching'][uuid].history_n == 0 and (watch.get_error_text() or watch.get_error_snapshot()):
|
||||
flash("Preview unavailable - No fetch/check completed or triggers not reached", "error")
|
||||
else:
|
||||
# So prepare the latest preview or not
|
||||
preferred_version = request.args.get('version')
|
||||
versions = list(watch.history.keys())
|
||||
timestamp = versions[-1]
|
||||
if preferred_version and preferred_version in versions:
|
||||
timestamp = preferred_version
|
||||
|
||||
try:
|
||||
versions = list(watch.history.keys())
|
||||
content = watch.get_history_snapshot(timestamp)
|
||||
|
||||
triggered_line_numbers = html_tools.strip_ignore_text(content=content,
|
||||
wordlist=watch['trigger_text'],
|
||||
mode='line numbers'
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
content.append({'line': f"File doesnt exist or unable to read timestamp {timestamp}", 'classes': ''})
|
||||
|
||||
output = render_template("preview.html",
|
||||
content=content,
|
||||
current_version=timestamp,
|
||||
history_n=watch.history_n,
|
||||
extra_stylesheets=extra_stylesheets,
|
||||
extra_title=f" - Diff - {watch.label} @ {timestamp}",
|
||||
triggered_line_numbers=triggered_line_numbers,
|
||||
current_diff_url=watch['url'],
|
||||
screenshot=watch.get_screenshot(),
|
||||
watch=watch,
|
||||
uuid=uuid,
|
||||
is_html_webdriver=is_html_webdriver,
|
||||
last_error=watch['last_error'],
|
||||
last_error_text=watch.get_error_text(),
|
||||
last_error_screenshot=watch.get_error_snapshot(),
|
||||
versions=versions
|
||||
)
|
||||
|
||||
return output
|
||||
|
||||
@views_blueprint.route("/diff/<string:uuid>", methods=['POST'])
|
||||
@login_optionally_required
|
||||
def diff_history_page_build_report(uuid):
|
||||
from changedetectionio import forms
|
||||
|
||||
# More for testing, possible to return the first/only
|
||||
if uuid == 'first':
|
||||
uuid = list(datastore.data['watching'].keys()).pop()
|
||||
|
||||
try:
|
||||
watch = datastore.data['watching'][uuid]
|
||||
except KeyError:
|
||||
flash("No history found for the specified link, bad link?", "error")
|
||||
return redirect(url_for('watchlist.index'))
|
||||
|
||||
# For submission of requesting an extract
|
||||
extract_form = forms.extractDataForm(request.form)
|
||||
if not extract_form.validate():
|
||||
flash("An error occurred, please see below.", "error")
|
||||
|
||||
else:
|
||||
extract_regex = request.form.get('extract_regex').strip()
|
||||
output = watch.extract_regex_from_all_history(extract_regex)
|
||||
if output:
|
||||
watch_dir = os.path.join(datastore.datastore_path, uuid)
|
||||
response = make_response(send_from_directory(directory=watch_dir, path=output, as_attachment=True))
|
||||
response.headers['Content-type'] = 'text/csv'
|
||||
response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'
|
||||
response.headers['Pragma'] = 'no-cache'
|
||||
response.headers['Expires'] = "0"
|
||||
return response
|
||||
|
||||
flash('Nothing matches that RegEx', 'error')
|
||||
redirect(url_for('ui_views.diff_history_page', uuid=uuid) + '#extract')
|
||||
|
||||
@views_blueprint.route("/diff/<string:uuid>", methods=['GET'])
|
||||
@login_optionally_required
|
||||
def diff_history_page(uuid):
|
||||
from changedetectionio import forms
|
||||
|
||||
# More for testing, possible to return the first/only
|
||||
if uuid == 'first':
|
||||
uuid = list(datastore.data['watching'].keys()).pop()
|
||||
|
||||
extra_stylesheets = [url_for('static_content', group='styles', filename='diff.css')]
|
||||
try:
|
||||
watch = datastore.data['watching'][uuid]
|
||||
except KeyError:
|
||||
flash("No history found for the specified link, bad link?", "error")
|
||||
return redirect(url_for('watchlist.index'))
|
||||
|
||||
# For submission of requesting an extract
|
||||
extract_form = forms.extractDataForm(request.form)
|
||||
|
||||
history = watch.history
|
||||
dates = list(history.keys())
|
||||
|
||||
# If a "from_version" was requested, then find it (or the closest one)
|
||||
# Also set "from version" to be the closest version to the one that was last viewed.
|
||||
|
||||
best_last_viewed_timestamp = watch.get_from_version_based_on_last_viewed
|
||||
from_version_timestamp = best_last_viewed_timestamp if best_last_viewed_timestamp else dates[-2]
|
||||
from_version = request.args.get('from_version', from_version_timestamp )
|
||||
|
||||
# Use the current one if nothing was specified
|
||||
to_version = request.args.get('to_version', str(dates[-1]))
|
||||
|
||||
try:
|
||||
to_version_file_contents = watch.get_history_snapshot(timestamp=to_version)
|
||||
except Exception as e:
|
||||
logger.error(f"Unable to read watch history to-version for version {to_version}: {str(e)}")
|
||||
to_version_file_contents = f"Unable to read to-version at {to_version}.\n"
|
||||
|
||||
try:
|
||||
from_version_file_contents = watch.get_history_snapshot(timestamp=from_version)
|
||||
except Exception as e:
|
||||
logger.error(f"Unable to read watch history from-version for version {from_version}: {str(e)}")
|
||||
from_version_file_contents = f"Unable to read to-version {from_version}.\n"
|
||||
|
||||
screenshot_url = watch.get_screenshot()
|
||||
|
||||
system_uses_webdriver = datastore.data['settings']['application']['fetch_backend'] == 'html_webdriver'
|
||||
|
||||
is_html_webdriver = False
|
||||
if (watch.get('fetch_backend') == 'system' and system_uses_webdriver) or watch.get('fetch_backend') == 'html_webdriver' or watch.get('fetch_backend', '').startswith('extra_browser_'):
|
||||
is_html_webdriver = True
|
||||
|
||||
password_enabled_and_share_is_off = False
|
||||
if datastore.data['settings']['application'].get('password') or os.getenv("SALTED_PASS", False):
|
||||
password_enabled_and_share_is_off = not datastore.data['settings']['application'].get('shared_diff_access')
|
||||
|
||||
datastore.set_last_viewed(uuid, time.time())
|
||||
|
||||
output = render_template("diff.html",
|
||||
current_diff_url=watch['url'],
|
||||
from_version=str(from_version),
|
||||
to_version=str(to_version),
|
||||
extra_stylesheets=extra_stylesheets,
|
||||
extra_title=f" - Diff - {watch.label}",
|
||||
extract_form=extract_form,
|
||||
is_html_webdriver=is_html_webdriver,
|
||||
last_error=watch['last_error'],
|
||||
last_error_screenshot=watch.get_error_snapshot(),
|
||||
last_error_text=watch.get_error_text(),
|
||||
left_sticky=True,
|
||||
newest=to_version_file_contents,
|
||||
newest_version_timestamp=dates[-1],
|
||||
password_enabled_and_share_is_off=password_enabled_and_share_is_off,
|
||||
from_version_file_contents=from_version_file_contents,
|
||||
to_version_file_contents=to_version_file_contents,
|
||||
screenshot=screenshot_url,
|
||||
uuid=uuid,
|
||||
versions=dates, # All except current/last
|
||||
watch_a=watch
|
||||
)
|
||||
|
||||
return output
|
||||
|
||||
@views_blueprint.route("/form/add/quickwatch", methods=['POST'])
|
||||
@login_optionally_required
|
||||
@@ -21,22 +208,28 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q, queuedWatchMe
|
||||
|
||||
url = request.form.get('url').strip()
|
||||
if datastore.url_exists(url):
|
||||
flash(gettext('Warning, URL {} already exists').format(url), "notice")
|
||||
flash(f'Warning, URL {url} already exists', "notice")
|
||||
|
||||
add_paused = request.form.get('edit_and_watch_submit_button') != None
|
||||
from changedetectionio import processors
|
||||
processor = request.form.get('processor', processors.get_default_processor())
|
||||
new_uuid = datastore.add_watch(url=url, tag=request.form.get('tags','').strip(), extras={'paused': add_paused, 'processor': processor})
|
||||
processor = request.form.get('processor', 'text_json_diff')
|
||||
extras = {'paused': add_paused, 'processor': processor}
|
||||
|
||||
LLM_prompt = request.form.get('LLM_prompt', '').strip()
|
||||
if LLM_prompt:
|
||||
extras['LLM_prompt'] = LLM_prompt
|
||||
extras['LLM_send_type'] = request.form.get('LLM_send_type', 'text')
|
||||
|
||||
new_uuid = datastore.add_watch(url=url, tag=request.form.get('tags').strip(), extras=extras)
|
||||
|
||||
if new_uuid:
|
||||
if add_paused:
|
||||
flash(gettext('Watch added in Paused state, saving will unpause.'))
|
||||
flash('Watch added in Paused state, saving will unpause.')
|
||||
return redirect(url_for('ui.ui_edit.edit_page', uuid=new_uuid, unpause_on_save=1, tag=request.args.get('tag')))
|
||||
else:
|
||||
# Straight into the queue.
|
||||
worker_pool.queue_item_async_safe(update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': new_uuid}))
|
||||
flash(gettext("Watch added."))
|
||||
worker_handler.queue_item_async_safe(update_q, queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': new_uuid}))
|
||||
flash("Watch added.")
|
||||
|
||||
return redirect(url_for('watchlist.index', tag=request.args.get('tag','')))
|
||||
|
||||
return views_blueprint
|
||||
return views_blueprint
|
||||
@@ -2,11 +2,10 @@ import os
|
||||
import time
|
||||
|
||||
from flask import Blueprint, request, make_response, render_template, redirect, url_for, flash, session
|
||||
from flask_login import current_user
|
||||
from flask_paginate import Pagination, get_page_parameter
|
||||
from flask_babel import gettext as _
|
||||
|
||||
from changedetectionio import forms
|
||||
from changedetectionio import processors
|
||||
from changedetectionio.store import ChangeDetectionStore
|
||||
from changedetectionio.auth_decorator import login_optionally_required
|
||||
|
||||
@@ -39,22 +38,18 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q, queuedWatchMe
|
||||
elif op == 'mute':
|
||||
datastore.data['watching'][uuid].toggle_mute()
|
||||
|
||||
datastore.data['watching'][uuid].commit()
|
||||
datastore.needs_write = True
|
||||
return redirect(url_for('watchlist.index', tag = active_tag_uuid))
|
||||
|
||||
# Sort by last_changed and add the uuid which is usually the key..
|
||||
sorted_watches = []
|
||||
with_errors = request.args.get('with_errors') == "1"
|
||||
unread_only = request.args.get('unread') == "1"
|
||||
errored_count = 0
|
||||
search_q = request.args.get('q').strip().lower() if request.args.get('q') else False
|
||||
for uuid, watch in datastore.data['watching'].items():
|
||||
if with_errors and not watch.get('last_error'):
|
||||
continue
|
||||
|
||||
if unread_only and (watch.viewed or watch.last_changed == 0) :
|
||||
continue
|
||||
|
||||
if active_tag_uuid and not active_tag_uuid in watch['tags']:
|
||||
continue
|
||||
if watch.get('last_error'):
|
||||
@@ -74,14 +69,10 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q, queuedWatchMe
|
||||
|
||||
pagination = Pagination(page=page,
|
||||
total=total_count,
|
||||
per_page=datastore.data['settings']['application'].get('pager_size', 50),
|
||||
css_framework="semantic",
|
||||
display_msg=_('displaying <b>{start} - {end}</b> {record_name} in total <b>{total}</b>'),
|
||||
record_name=_('records'))
|
||||
per_page=datastore.data['settings']['application'].get('pager_size', 50), css_framework="semantic")
|
||||
|
||||
sorted_tags = sorted(datastore.data['settings']['application'].get('tags').items(), key=lambda x: x[1]['title'])
|
||||
|
||||
proxy_list = datastore.proxy_list
|
||||
output = render_template(
|
||||
"watch-overview.html",
|
||||
active_tag=active_tag,
|
||||
@@ -89,38 +80,22 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q, queuedWatchMe
|
||||
app_rss_token=datastore.data['settings']['application'].get('rss_access_token'),
|
||||
datastore=datastore,
|
||||
errored_count=errored_count,
|
||||
extra_classes='has-queue' if not update_q.empty() else '',
|
||||
form=form,
|
||||
generate_tag_colors=processors.generate_processor_badge_colors,
|
||||
guid=datastore.data['app_guid'],
|
||||
has_proxies=proxy_list,
|
||||
has_proxies=datastore.proxy_list,
|
||||
has_unviewed=datastore.has_unviewed,
|
||||
hosted_sticky=os.getenv("SALTED_PASS", False) == False,
|
||||
now_time_server=round(time.time()),
|
||||
pagination=pagination,
|
||||
processor_badge_css=processors.get_processor_badge_css(),
|
||||
processor_badge_texts=processors.get_processor_badge_texts(),
|
||||
processor_descriptions=processors.get_processor_descriptions(),
|
||||
queue_size=update_q.qsize(),
|
||||
queued_uuids=update_q.get_queued_uuids(),
|
||||
queued_uuids=[q_uuid.item['uuid'] for q_uuid in update_q.queue],
|
||||
search_q=request.args.get('q', '').strip(),
|
||||
sort_attribute=request.args.get('sort') if request.args.get('sort') else request.cookies.get('sort'),
|
||||
sort_order=request.args.get('order') if request.args.get('order') else request.cookies.get('order'),
|
||||
system_default_fetcher=datastore.data['settings']['application'].get('fetch_backend'),
|
||||
tags=sorted_tags,
|
||||
unread_changes_count=datastore.unread_changes_count,
|
||||
watches=sorted_watches
|
||||
)
|
||||
|
||||
# Return freed template-building memory to the OS immediately.
|
||||
# render_template allocates ~20MB of intermediate strings that are freed on return,
|
||||
# but glibc keeps those pages mapped in its arenas as RSS. malloc_trim() forces
|
||||
# glibc to release them, preventing RSS growth from concurrent Chrome connections.
|
||||
try:
|
||||
import ctypes
|
||||
ctypes.CDLL('libc.so.6').malloc_trim(0)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if session.get('share-link'):
|
||||
del (session['share-link'])
|
||||
|
||||
|
||||
@@ -1,61 +1,11 @@
|
||||
{%- extends 'base.html' -%}
|
||||
{%- block content -%}
|
||||
{%- set tips = [
|
||||
_("Changedetection.io can monitor more than just web-pages! See our plugins!") ~ ' <a href="https://changedetection.io/plugins">' ~ _('More info') ~ '</a>',
|
||||
_("You can also add 'shared' watches.") ~ ' <a href="https://github.com/dgtlmoon/changedetection.io/wiki/Sharing-a-Watch">' ~ _('More info') ~ '</a>'
|
||||
] -%}
|
||||
{%- from '_helpers.html' import render_simple_field, render_field, render_nolabel_field, sort_by_title -%}
|
||||
<script src="{{url_for('static_content', group='js', filename='jquery-3.6.0.min.js')}}"></script>
|
||||
<script src="{{url_for('static_content', group='js', filename='watch-overview.js')}}" defer></script>
|
||||
<script src="{{url_for('static_content', group='js', filename='modal.js')}}"></script>
|
||||
<script>let nowtimeserver={{ now_time_server }};</script>
|
||||
<script>let favicon_baseURL="{{ url_for('static_content', group='favicon', filename="PLACEHOLDER")}}";</script>
|
||||
<script>
|
||||
// Initialize Feather icons after the page loads
|
||||
document.addEventListener('DOMContentLoaded', function() {
|
||||
feather.replace();
|
||||
|
||||
// Intersection Observer for lazy loading favicons
|
||||
// Only load favicon images when they enter the viewport
|
||||
if ('IntersectionObserver' in window) {
|
||||
const faviconObserver = new IntersectionObserver((entries, observer) => {
|
||||
entries.forEach(entry => {
|
||||
if (entry.isIntersecting) {
|
||||
const img = entry.target;
|
||||
const src = img.getAttribute('data-src');
|
||||
|
||||
if (src) {
|
||||
// Load the actual favicon
|
||||
img.src = src;
|
||||
img.removeAttribute('data-src');
|
||||
}
|
||||
|
||||
// Stop observing this image
|
||||
observer.unobserve(img);
|
||||
}
|
||||
});
|
||||
}, {
|
||||
// Start loading slightly before the image enters viewport
|
||||
rootMargin: '50px',
|
||||
threshold: 0.01
|
||||
});
|
||||
|
||||
// Observe all lazy favicon images
|
||||
document.querySelectorAll('.lazy-favicon').forEach(img => {
|
||||
faviconObserver.observe(img);
|
||||
});
|
||||
} else {
|
||||
// Fallback for older browsers: load all favicons immediately
|
||||
document.querySelectorAll('.lazy-favicon').forEach(img => {
|
||||
const src = img.getAttribute('data-src');
|
||||
if (src) {
|
||||
img.src = src;
|
||||
img.removeAttribute('data-src');
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
</script>
|
||||
<style>
|
||||
.checking-now .last-checked {
|
||||
background-image: linear-gradient(to bottom, transparent 0%, rgba(0,0,0,0.05) 40%, rgba(0,0,0,0.1) 100%);
|
||||
@@ -63,59 +13,31 @@ document.addEventListener('DOMContentLoaded', function() {
|
||||
background-repeat: no-repeat;
|
||||
transition: background-size 0.9s ease
|
||||
}
|
||||
|
||||
/* Auto-generated processor badge colors */
|
||||
{{ processor_badge_css|safe }}
|
||||
|
||||
/* Auto-generated tag colors */
|
||||
{%- for uuid, tag in tags -%}
|
||||
{%- if tag and tag.title -%}
|
||||
{%- set class_name = tag.title|sanitize_tag_class -%}
|
||||
{%- set colors = generate_tag_colors(tag.title) -%}
|
||||
.button-tag.tag-{{ class_name }} {
|
||||
background-color: {{ colors['light']['bg'] }};
|
||||
color: {{ colors['light']['color'] }};
|
||||
}
|
||||
|
||||
.watch-tag-list.tag-{{ class_name }} {
|
||||
background-color: {{ colors['light']['bg'] }};
|
||||
color: {{ colors['light']['color'] }};
|
||||
}
|
||||
|
||||
html[data-darkmode="true"] .button-tag.tag-{{ class_name }} {
|
||||
background-color: {{ colors['dark']['bg'] }};
|
||||
color: {{ colors['dark']['color'] }};
|
||||
}
|
||||
|
||||
html[data-darkmode="true"] .watch-tag-list.tag-{{ class_name }} {
|
||||
background-color: {{ colors['dark']['bg'] }};
|
||||
color: {{ colors['dark']['color'] }};
|
||||
}
|
||||
{%- endif -%}
|
||||
{%- endfor -%}
|
||||
</style>
|
||||
<div class="box" id="form-quick-watch-add">
|
||||
|
||||
<form class="pure-form" action="{{ url_for('ui.ui_views.form_quick_watch_add', tag=active_tag_uuid) }}" method="POST" id="new-watch-form">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}" >
|
||||
<fieldset>
|
||||
<legend>{{ _('Add a new web page change detection watch') }}</legend>
|
||||
<legend>Add a new web page change detection watch</legend>
|
||||
<div id="watch-add-wrapper-zone">
|
||||
{{ render_nolabel_field(form.url, placeholder="https://...", required=true) }}
|
||||
{{ render_nolabel_field(form.watch_submit_button, title=_("Watch this URL!") ) }}
|
||||
{{ render_nolabel_field(form.edit_and_watch_submit_button, title=_("Edit first then Watch") ) }}
|
||||
{{ render_nolabel_field(form.watch_submit_button, title="Watch this URL!" ) }}
|
||||
{{ render_nolabel_field(form.edit_and_watch_submit_button, title="Edit first then Watch") }}
|
||||
</div>
|
||||
<div id="watch-group-tag">
|
||||
{{ render_field(form.tags, value=active_tag.title if active_tag_uuid else '', placeholder=_("Watch group / tag"), class="transparent-field") }}
|
||||
<i data-feather="tag" style="width: 14px; height: 14px; stroke: white; margin-right: 4px;"></i>
|
||||
{{ render_field(form.tags, value=active_tag.title if active_tag_uuid else '', placeholder="Watch group / tag", class="transparent-field") }}
|
||||
</div>
|
||||
|
||||
{%- include 'edit/llm_prompt.html' -%}
|
||||
|
||||
<div id="quick-watch-processor-type">
|
||||
{{ render_simple_field(form.processor) }}
|
||||
</div>
|
||||
|
||||
</fieldset>
|
||||
<span style="color:#eee; font-size: 80%;">
|
||||
<strong>Tip: </strong> {{ tips | random | safe }}
|
||||
</span>
|
||||
<span style="color:#eee; font-size: 80%;"><img alt="Create a shareable link" style="height: 1em;display:inline-block;" src="{{url_for('static_content', group='images', filename='spread-white.svg')}}" > Tip: You can also add 'shared' watches. <a href="https://github.com/dgtlmoon/changedetection.io/wiki/Sharing-a-Watch">More info</a></span>
|
||||
</form>
|
||||
</div>
|
||||
<div class="box">
|
||||
@@ -123,44 +45,29 @@ html[data-darkmode="true"] .watch-tag-list.tag-{{ class_name }} {
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}" >
|
||||
<input type="hidden" id="op_extradata" name="op_extradata" value="" >
|
||||
<div id="checkbox-operations">
|
||||
<button class="pure-button button-secondary button-xsmall" name="op" value="pause"><i data-feather="pause" style="width: 14px; height: 14px; stroke: white; margin-right: 4px;"></i>{{ _('Pause') }}</button>
|
||||
<button class="pure-button button-secondary button-xsmall" name="op" value="unpause"><i data-feather="play" style="width: 14px; height: 14px; stroke: white; margin-right: 4px;"></i>{{ _('UnPause') }}</button>
|
||||
<button class="pure-button button-secondary button-xsmall" name="op" value="mute"><i data-feather="volume-x" style="width: 14px; height: 14px; stroke: white; margin-right: 4px;"></i>{{ _('Mute') }}</button>
|
||||
<button class="pure-button button-secondary button-xsmall" name="op" value="unmute"><i data-feather="volume-2" style="width: 14px; height: 14px; stroke: white; margin-right: 4px;"></i>{{ _('UnMute') }}</button>
|
||||
<button class="pure-button button-secondary button-xsmall" name="op" value="recheck"><i data-feather="refresh-cw" style="width: 14px; height: 14px; stroke: white; margin-right: 4px;"></i>{{ _('Recheck') }}</button>
|
||||
<button class="pure-button button-secondary button-xsmall" name="op" value="assign-tag" id="checkbox-assign-tag"><i data-feather="tag" style="width: 14px; height: 14px; stroke: white; margin-right: 4px;"></i>{{ _('Tag') }}</button>
|
||||
<button class="pure-button button-secondary button-xsmall" name="op" value="mark-viewed"><i data-feather="eye" style="width: 14px; height: 14px; stroke: white; margin-right: 4px;"></i>{{ _('Mark viewed') }}</button>
|
||||
<button class="pure-button button-secondary button-xsmall" name="op" value="notification-default"><i data-feather="bell" style="width: 14px; height: 14px; stroke: white; margin-right: 4px;"></i>{{ _('Use default notification') }}</button>
|
||||
<button class="pure-button button-secondary button-xsmall" name="op" value="clear-errors"><i data-feather="x-circle" style="width: 14px; height: 14px; stroke: white; margin-right: 4px;"></i>{{ _('Clear errors') }}</button>
|
||||
<button class="pure-button button-secondary button-xsmall" style="background: #dd4242;" name="op" value="clear-history"
|
||||
data-requires-confirm
|
||||
data-confirm-type="danger"
|
||||
data-confirm-title="{{ _('Clear Histories') }}"
|
||||
data-confirm-message="{{ _('<p>Are you sure you want to clear history for the selected items?</p><p>This action cannot be undone.</p>') }}"
|
||||
data-confirm-button="{{ _('OK') }}"><i data-feather="trash-2" style="width: 14px; height: 14px; stroke: white; margin-right: 4px;"></i>{{ _('Clear/reset history') }}</button>
|
||||
<button class="pure-button button-secondary button-xsmall" style="background: #dd4242;" name="op" value="delete"
|
||||
data-requires-confirm
|
||||
data-confirm-type="danger"
|
||||
data-confirm-title="{{ _('Delete Watches?') }}"
|
||||
data-confirm-message="{{ _('<p>Are you sure you want to delete the selected watches?</strong></p><p>This action cannot be undone.</p>') }}"
|
||||
data-confirm-button="{{ _('Delete') }}"><i data-feather="trash" style="width: 14px; height: 14px; stroke: white; margin-right: 4px;"></i>{{ _('Delete') }}</button>
|
||||
<button class="pure-button button-secondary button-xsmall" name="op" value="pause"><i data-feather="pause" style="width: 14px; height: 14px; stroke: white; margin-right: 4px;"></i>Pause</button>
|
||||
<button class="pure-button button-secondary button-xsmall" name="op" value="unpause"><i data-feather="play" style="width: 14px; height: 14px; stroke: white; margin-right: 4px;"></i>UnPause</button>
|
||||
<button class="pure-button button-secondary button-xsmall" name="op" value="mute"><i data-feather="volume-x" style="width: 14px; height: 14px; stroke: white; margin-right: 4px;"></i>Mute</button>
|
||||
<button class="pure-button button-secondary button-xsmall" name="op" value="unmute"><i data-feather="volume-2" style="width: 14px; height: 14px; stroke: white; margin-right: 4px;"></i>UnMute</button>
|
||||
<button class="pure-button button-secondary button-xsmall" name="op" value="recheck"><i data-feather="refresh-cw" style="width: 14px; height: 14px; stroke: white; margin-right: 4px;"></i>Recheck</button>
|
||||
<button class="pure-button button-secondary button-xsmall" name="op" value="assign-tag" id="checkbox-assign-tag"><i data-feather="tag" style="width: 14px; height: 14px; stroke: white; margin-right: 4px;"></i>Tag</button>
|
||||
<button class="pure-button button-secondary button-xsmall" name="op" value="mark-viewed"><i data-feather="eye" style="width: 14px; height: 14px; stroke: white; margin-right: 4px;"></i>Mark viewed</button>
|
||||
<button class="pure-button button-secondary button-xsmall" name="op" value="notification-default"><i data-feather="bell" style="width: 14px; height: 14px; stroke: white; margin-right: 4px;"></i>Use default notification</button>
|
||||
<button class="pure-button button-secondary button-xsmall" name="op" value="clear-errors"><i data-feather="x-circle" style="width: 14px; height: 14px; stroke: white; margin-right: 4px;"></i>Clear errors</button>
|
||||
<button class="pure-button button-secondary button-xsmall" style="background: #dd4242;" name="op" value="clear-history"><i data-feather="trash-2" style="width: 14px; height: 14px; stroke: white; margin-right: 4px;"></i>Clear/reset history</button>
|
||||
<button class="pure-button button-secondary button-xsmall" style="background: #dd4242;" name="op" value="delete"><i data-feather="trash" style="width: 14px; height: 14px; stroke: white; margin-right: 4px;"></i>Delete</button>
|
||||
</div>
|
||||
|
||||
<div id="stats_row">
|
||||
<div class="left">{%- if watches|length >= pagination.per_page -%}{{ pagination.info }}{%- endif -%}</div>
|
||||
<div class="right" >{{ _('Queued size') }}: <span id="queue-size-int">{{ queue_size }}</span></div>
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
{%- if search_q -%}<div id="search-result-info">{{ _('Searching') }} "<strong><i>{{search_q}}</i></strong>"</div>{%- endif -%}
|
||||
{%- if watches|length >= pagination.per_page -%}
|
||||
{{ pagination.info }}
|
||||
{%- endif -%}
|
||||
{%- if search_q -%}<div id="search-result-info">Searching "<strong><i>{{search_q}}</i></strong>"</div>{%- endif -%}
|
||||
<div>
|
||||
<a href="{{url_for('watchlist.index')}}" class="pure-button button-tag {{'active' if not active_tag_uuid }}">{{ _('All') }}</a>
|
||||
<a href="{{url_for('watchlist.index')}}" class="pure-button button-tag {{'active' if not active_tag_uuid }}">All</a>
|
||||
|
||||
<!-- tag list -->
|
||||
{%- for uuid, tag in tags -%}
|
||||
{%- if tag != "" -%}
|
||||
<a href="{{url_for('watchlist.index', tag=uuid) }}" class="pure-button button-tag tag-{{ tag.title|sanitize_tag_class }} {{'active' if active_tag_uuid == uuid }}">{{ tag.title }}</a>
|
||||
<a href="{{url_for('watchlist.index', tag=uuid) }}" class="pure-button button-tag {{'active' if active_tag_uuid == uuid }}">{{ tag.title }}</a>
|
||||
{%- endif -%}
|
||||
{%- endfor -%}
|
||||
</div>
|
||||
@@ -174,11 +81,8 @@ html[data-darkmode="true"] .watch-tag-list.tag-{{ class_name }} {
|
||||
{%- set cols_required = cols_required + 1 -%}
|
||||
{%- endif -%}
|
||||
{%- set ui_settings = datastore.data['settings']['application']['ui'] -%}
|
||||
{%- set wrapper_classes = [
|
||||
'has-unread-changes' if unread_changes_count else '',
|
||||
'has-error' if errored_count else '',
|
||||
] -%}
|
||||
<div id="watch-table-wrapper" class="{{ wrapper_classes | reject('equalto', '') | join(' ') }}">
|
||||
|
||||
<div id="watch-table-wrapper">
|
||||
{%- set table_classes = [
|
||||
'favicon-enabled' if 'favicons_enabled' not in ui_settings or ui_settings['favicons_enabled'] else 'favicon-not-enabled',
|
||||
] -%}
|
||||
@@ -193,19 +97,19 @@ html[data-darkmode="true"] .watch-tag-list.tag-{{ class_name }} {
|
||||
|
||||
<a class="{{ 'active '+link_order if sort_attribute == 'notification_muted' else 'inactive' }}" href="{{url_for('watchlist.index', sort='notification_muted', order=link_order, tag=active_tag_uuid)}}"><i data-feather="volume-2" style="vertical-align: bottom; width: 14px; height: 14px; margin-right: 4px;"></i><span class='arrow {{link_order}}'></span></a>
|
||||
</th>
|
||||
<th><a class="{{ 'active '+link_order if sort_attribute == 'label' else 'inactive' }}" href="{{url_for('watchlist.index', sort='label', order=link_order, tag=active_tag_uuid)}}">{{ _('Website') }} <span class='arrow {{link_order}}'></span></a></th>
|
||||
<th><a class="{{ 'active '+link_order if sort_attribute == 'label' else 'inactive' }}" href="{{url_for('watchlist.index', sort='label', order=link_order, tag=active_tag_uuid)}}">Website <span class='arrow {{link_order}}'></span></a></th>
|
||||
{%- if any_has_restock_price_processor -%}
|
||||
<th>{{ _('Restock & Price') }}</th>
|
||||
<th>Restock & Price</th>
|
||||
{%- endif -%}
|
||||
<th><a class="{{ 'active '+link_order if sort_attribute == 'last_checked' else 'inactive' }}" href="{{url_for('watchlist.index', sort='last_checked', order=link_order, tag=active_tag_uuid)}}"><span class="hide-on-mobile">{{ _('Last') }}</span> {{ _('Checked') }} <span class='arrow {{link_order}}'></span></a></th>
|
||||
<th><a class="{{ 'active '+link_order if sort_attribute == 'last_changed' else 'inactive' }}" href="{{url_for('watchlist.index', sort='last_changed', order=link_order, tag=active_tag_uuid)}}"><span class="hide-on-mobile">{{ _('Last') }}</span> {{ _('Changed') }} <span class='arrow {{link_order}}'></span></a></th>
|
||||
<th><a class="{{ 'active '+link_order if sort_attribute == 'last_checked' else 'inactive' }}" href="{{url_for('watchlist.index', sort='last_checked', order=link_order, tag=active_tag_uuid)}}"><span class="hide-on-mobile">Last</span> Checked <span class='arrow {{link_order}}'></span></a></th>
|
||||
<th><a class="{{ 'active '+link_order if sort_attribute == 'last_changed' else 'inactive' }}" href="{{url_for('watchlist.index', sort='last_changed', order=link_order, tag=active_tag_uuid)}}"><span class="hide-on-mobile">Last</span> Changed <span class='arrow {{link_order}}'></span></a></th>
|
||||
<th class="empty-cell"></th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{%- if not watches|length -%}
|
||||
<tr>
|
||||
<td colspan="{{ cols_required }}" style="text-wrap: wrap;">{{ _('No web page change detection watches configured, please add a URL in the box above, or') }} <a href="{{ url_for('imports.import_page')}}" >{{ _('import a list') }}</a>.</td>
|
||||
<td colspan="{{ cols_required }}" style="text-wrap: wrap;">No website watches configured, please add a URL in the box above, or <a href="{{ url_for('imports.import_page')}}" >import a list</a>.</td>
|
||||
</tr>
|
||||
{%- endif -%}
|
||||
|
||||
@@ -213,13 +117,11 @@ html[data-darkmode="true"] .watch-tag-list.tag-{{ class_name }} {
|
||||
{%- set checking_now = is_checking_now(watch) -%}
|
||||
{%- set history_n = watch.history_n -%}
|
||||
{%- set favicon = watch.get_favicon_filename() -%}
|
||||
{%- set error_texts = watch.compile_error_texts(has_proxies=has_proxies) -%}
|
||||
{%- set system_use_url_watchlist = datastore.data['settings']['application']['ui'].get('use_page_title_in_list') -%}
|
||||
{# Class settings mirrored in changedetectionio/static/js/realtime.js for the frontend #}
|
||||
{# Mirror in changedetectionio/static/js/realtime.js for the frontend #}
|
||||
{%- set row_classes = [
|
||||
loop.cycle('pure-table-odd', 'pure-table-even'),
|
||||
'processor-' ~ watch['processor'],
|
||||
'has-error' if error_texts|length > 2 else '',
|
||||
'has-error' if watch.compile_error_texts()|length > 2 else '',
|
||||
'paused' if watch.paused is defined and watch.paused != False else '',
|
||||
'unviewed' if watch.has_unviewed else '',
|
||||
'has-restock-info' if watch.has_restock_info else 'no-restock-info',
|
||||
@@ -230,8 +132,7 @@ html[data-darkmode="true"] .watch-tag-list.tag-{{ class_name }} {
|
||||
'checking-now' if checking_now else '',
|
||||
'notification_muted' if watch.notification_muted else '',
|
||||
'single-history' if history_n == 1 else '',
|
||||
'multiple-history' if history_n >= 2 else '',
|
||||
'use-html-title' if system_use_url_watchlist else 'no-html-title',
|
||||
'multiple-history' if history_n >= 2 else '',
|
||||
] -%}
|
||||
<tr id="{{ watch.uuid }}" data-watch-uuid="{{ watch.uuid }}" class="{{ row_classes | reject('equalto', '') | join(' ') }}">
|
||||
<td class="inline checkbox-uuid" ><div><input name="uuids" type="checkbox" value="{{ watch.uuid}} " > <span class="counter-i">{{ loop.index+pagination.skip }}</span></div></td>
|
||||
@@ -247,47 +148,34 @@ html[data-darkmode="true"] .watch-tag-list.tag-{{ class_name }} {
|
||||
<td class="title-col inline">
|
||||
<div class="flex-wrapper">
|
||||
{% if 'favicons_enabled' not in ui_settings or ui_settings['favicons_enabled'] %}
|
||||
<div>
|
||||
{# Intersection Observer lazy loading: store real URL in data-src, load only when visible in viewport #}
|
||||
<img alt="Favicon thumbnail"
|
||||
class="favicon lazy-favicon"
|
||||
loading="lazy"
|
||||
decoding="async"
|
||||
fetchpriority="low"
|
||||
{% if favicon %}
|
||||
data-src="{{url_for('static_content', group='favicon', filename=watch.uuid)}}"
|
||||
{% endif %}
|
||||
src='data:image/svg+xml;utf8,%3Csvg xmlns="http://www.w3.org/2000/svg" width="7.087" height="7.087" viewBox="0 0 7.087 7.087"%3E%3Ccircle cx="3.543" cy="3.543" r="3.279" stroke="%23e1e1e1" stroke-width="0.45" fill="none" opacity="0.74"/%3E%3C/svg%3E'>
|
||||
<div>{# A page might have hundreds of these images, set IMG options for lazy loading, don't set SRC if we dont have it so it doesnt fetch the placeholder' #}
|
||||
<img alt="Favicon thumbnail" class="favicon" loading="lazy" decoding="async" fetchpriority="low" {% if favicon %} src="{{url_for('static_content', group='favicon', filename=watch.uuid)}}" {% else %} src='data:image/svg+xml;utf8,%3Csvg xmlns="http://www.w3.org/2000/svg" width="7.087" height="7.087" viewBox="0 0 7.087 7.087"%3E%3Ccircle cx="3.543" cy="3.543" r="3.279" stroke="%23e1e1e1" stroke-width="0.45" fill="none" opacity="0.74"/%3E%3C/svg%3E' {% endif %} />
|
||||
</div>
|
||||
{% endif %}
|
||||
<div>
|
||||
{%- if watch['processor'] and watch['processor'] in processor_badge_texts -%}
|
||||
<span class="processor-badge processor-badge-{{ watch['processor'] }}" title="{{ processor_descriptions.get(watch['processor'], watch['processor']) }}">{{ processor_badge_texts[watch['processor']] }}</span>
|
||||
{%- endif -%}
|
||||
<span class="watch-title">
|
||||
{% if system_use_url_watchlist or watch.get('use_page_title_in_list') %}
|
||||
{{ watch.label }}
|
||||
{% else %}
|
||||
{{ watch.get('title') or watch.link }}
|
||||
{% endif %}
|
||||
<a class="external" target="_blank" rel="noopener" href="{{ watch.link.replace('source:','') }}"> </a>
|
||||
</span>
|
||||
<div class="error-text" style="display:none;">{{ error_texts|safe }}</div>
|
||||
<span class="watch-title">
|
||||
{{watch.title if watch.title is not none and watch.title|length > 0 else watch.url}} <a class="external" target="_blank" rel="noopener" href="{{ watch.link.replace('source:','') }}"> </a>
|
||||
</span>
|
||||
<div class="error-text" style="display:none;">{{ watch.compile_error_texts(has_proxies=datastore.proxy_list) }}</div>
|
||||
{%- if watch['processor'] == 'text_json_diff' -%}
|
||||
{%- if watch['has_ldjson_price_data'] and not watch['track_ldjson_price_data'] -%}
|
||||
<div class="ldjson-price-track-offer">Switch to Restock & Price watch mode? <a href="{{url_for('price_data_follower.accept', uuid=watch.uuid)}}" class="pure-button button-xsmall">Yes</a> <a href="{{url_for('price_data_follower.reject', uuid=watch.uuid)}}" class="">No</a></div>
|
||||
{%- endif -%}
|
||||
{%- endif -%}
|
||||
|
||||
{%- if watch['processor'] == 'restock_diff' -%}
|
||||
<span class="tracking-ldjson-price-data" title="Automatically following embedded price information"><img src="{{url_for('static_content', group='images', filename='price-tag-icon.svg')}}" class="status-icon price-follow-tag-icon" > Price</span>
|
||||
{%- endif -%}
|
||||
{%- for watch_tag_uuid, watch_tag in datastore.get_all_tags_for_watch(watch['uuid']).items() -%}
|
||||
<a href="{{url_for('watchlist.index', tag=watch_tag_uuid) }}" class="watch-tag-list tag-{{ watch_tag.title|sanitize_tag_class }}">{{ watch_tag.title }}</a>
|
||||
<span class="watch-tag-list">{{ watch_tag.title }}</span>
|
||||
{%- endfor -%}
|
||||
</div>
|
||||
<div class="status-icons">
|
||||
<a class="link-spread" href="{{url_for('ui.form_share_put_watch', uuid=watch.uuid)}}"><img src="{{url_for('static_content', group='images', filename='spread.svg')}}" class="status-icon icon icon-spread" title="Create a link to share watch config with others" ></a>
|
||||
{%- set effective_fetcher = watch.get_fetch_backend if watch.get_fetch_backend != "system" else system_default_fetcher -%}
|
||||
{%- if effective_fetcher and ("html_webdriver" in effective_fetcher or "html_" in effective_fetcher or "extra_browser_" in effective_fetcher) -%}
|
||||
{{ effective_fetcher|fetcher_status_icons }}
|
||||
{%- if watch.get_fetch_backend == "html_webdriver"
|
||||
or ( watch.get_fetch_backend == "system" and system_default_fetcher == 'html_webdriver' )
|
||||
or "extra_browser_" in watch.get_fetch_backend
|
||||
-%}
|
||||
<img class="status-icon" src="{{url_for('static_content', group='images', filename='google-chrome-icon.png')}}" alt="Using a Chrome browser" title="Using a Chrome browser" >
|
||||
{%- endif -%}
|
||||
{%- if watch.is_pdf -%}<img class="status-icon" src="{{url_for('static_content', group='images', filename='pdf-icon.svg')}}" alt="Converting PDF to text" >{%- endif -%}
|
||||
{%- if watch.has_browser_steps -%}<img class="status-icon status-browsersteps" src="{{url_for('static_content', group='images', filename='steps.svg')}}" alt="Browser Steps is enabled" >{%- endif -%}
|
||||
@@ -299,29 +187,20 @@ html[data-darkmode="true"] .watch-tag-list.tag-{{ class_name }} {
|
||||
<td class="restock-and-price">
|
||||
{%- if watch['processor'] == 'restock_diff' -%}
|
||||
{%- if watch.has_restock_info -%}
|
||||
<span class="restock-label {{'in-stock' if watch['restock']['in_stock'] else 'not-in-stock' }}" title="{{ _('Detecting restock and price') }}">
|
||||
<span class="restock-label {{'in-stock' if watch['restock']['in_stock'] else 'not-in-stock' }}" title="Detecting restock and price">
|
||||
<!-- maybe some object watch['processor'][restock_diff] or.. -->
|
||||
{%- if watch['restock']['in_stock']-%} {{ _('In stock') }} {%- else-%} {{ _('Not in stock') }} {%- endif -%}
|
||||
{%- if watch['restock']['in_stock']-%} In stock {%- else-%} Not in stock {%- endif -%}
|
||||
</span>
|
||||
{%- endif -%}
|
||||
|
||||
{%- if watch.get('restock') and watch['restock'].get('price') -%}
|
||||
{%- set restock = watch['restock'] -%}
|
||||
{%- set price = restock.get('price') -%}
|
||||
{%- set cur = restock.get('currency','') -%}
|
||||
|
||||
{%- if price is not none and (price|string)|regex_search('\d') -%}
|
||||
<span class="restock-label price" title="{{ _('Price') }}">
|
||||
{# @todo: make parse_currency/parse_decimal aware of the locale of the actual web page and use that instead changedetectionio/processors/restock_diff/__init__.py #}
|
||||
{%- if price is number -%}{# It's a number so we can convert it to their locale' #}
|
||||
{{ price|format_number_locale }} {{ cur }}<!-- as number -->
|
||||
{%- else -%}{# It's totally fine if it arrives as something else, the website might be something weird in this field #}
|
||||
{{ price }} {{ cur }}<!-- as string -->
|
||||
{%- endif -%}
|
||||
</span>
|
||||
{%- if watch.get('restock') and watch['restock']['price'] != None -%}
|
||||
{%- if watch['restock']['price'] != None -%}
|
||||
<span class="restock-label price" title="Price">
|
||||
{{ watch['restock']['price']|format_number_locale }} {{ watch['restock']['currency'] }}
|
||||
</span>
|
||||
{%- endif -%}
|
||||
{%- elif not watch.has_restock_info -%}
|
||||
<span class="restock-label error">{{ _('No information') }}</span>
|
||||
<span class="restock-label error">No information</span>
|
||||
{%- endif -%}
|
||||
{%- endif -%}
|
||||
</td>
|
||||
@@ -329,24 +208,24 @@ html[data-darkmode="true"] .watch-tag-list.tag-{{ class_name }} {
|
||||
{#last_checked becomes fetch-start-time#}
|
||||
<td class="last-checked" data-timestamp="{{ watch.last_checked }}" data-fetchduration={{ watch.fetch_time }} data-eta_complete="{{ watch.last_checked+watch.fetch_time }}" >
|
||||
<div class="spinner-wrapper" style="display:none;" >
|
||||
<span class="spinner"></span><span class="status-text"> {{ _('Checking now') }}</span>
|
||||
<span class="spinner"></span><span> Checking now</span>
|
||||
</div>
|
||||
<span class="innertext">{{watch|format_last_checked_time|safe}}</span>
|
||||
</td>
|
||||
<td class="last-changed" data-timestamp="{{ watch.last_changed }}">{%- if watch.history_n >=2 and watch.last_changed >0 -%}
|
||||
{{watch.last_changed|format_timestamp_timeago}}
|
||||
{%- else -%}
|
||||
{{ _('Not yet') }}
|
||||
Not yet
|
||||
{%- endif -%}
|
||||
</td>
|
||||
<td class="buttons">
|
||||
<div>
|
||||
{%- set target_attr = ' target="' ~ watch.uuid ~ '"' if datastore.data['settings']['application']['ui'].get('open_diff_in_new_tab') else '' -%}
|
||||
<a href="" class="already-in-queue-button recheck pure-button pure-button-primary" style="display: none;" disabled="disabled">{{ _('Queued') }}</a>
|
||||
<a href="{{ url_for('ui.form_watch_checknow', uuid=watch.uuid, tag=request.args.get('tag')) }}" data-op='recheck' class="ajax-op recheck pure-button pure-button-primary">{{ _('Recheck') }}</a>
|
||||
<a href="{{ url_for('ui.ui_edit.edit_page', uuid=watch.uuid, tag=active_tag_uuid)}}#general" class="pure-button pure-button-primary">{{ _('Edit') }}</a>
|
||||
<a href="{{ url_for('ui.ui_diff.diff_history_page', uuid=watch.uuid)}}" {{target_attr}} class="pure-button pure-button-primary history-link" style="display: none;">{{ _('History') }}</a>
|
||||
<a href="{{ url_for('ui.ui_preview.preview_page', uuid=watch.uuid)}}" {{target_attr}} class="pure-button pure-button-primary preview-link" style="display: none;">{{ _('Preview') }}</a>
|
||||
<a href="" class="already-in-queue-button recheck pure-button pure-button-primary" style="display: none;" disabled="disabled">Queued</a>
|
||||
<a href="{{ url_for('ui.form_watch_checknow', uuid=watch.uuid, tag=request.args.get('tag')) }}" data-op='recheck' class="ajax-op recheck pure-button pure-button-primary">Recheck</a>
|
||||
<a href="{{ url_for('ui.ui_edit.edit_page', uuid=watch.uuid, tag=active_tag_uuid)}}#general" class="pure-button pure-button-primary">Edit</a>
|
||||
<a href="{{ url_for('ui.ui_views.diff_history_page', uuid=watch.uuid)}}" {{target_attr}} class="pure-button pure-button-primary history-link" style="display: none;">History</a>
|
||||
<a href="{{ url_for('ui.ui_views.preview_page', uuid=watch.uuid)}}" {{target_attr}} class="pure-button pure-button-primary preview-link" style="display: none;">Preview</a>
|
||||
</div>
|
||||
</td>
|
||||
</tr>
|
||||
@@ -354,22 +233,20 @@ html[data-darkmode="true"] .watch-tag-list.tag-{{ class_name }} {
|
||||
</tbody>
|
||||
</table>
|
||||
<ul id="post-list-buttons">
|
||||
<li id="post-list-with-errors" style="display: none;" >
|
||||
<a href="{{url_for('watchlist.index', with_errors=1, tag=request.args.get('tag')) }}" class="pure-button button-tag button-error">{{ _('With errors') }} ({{ errored_count }})</a>
|
||||
<li id="post-list-with-errors" class="{%- if errored_count -%}has-error{%- endif -%}" style="display: none;" >
|
||||
<a href="{{url_for('watchlist.index', with_errors=1, tag=request.args.get('tag')) }}" class="pure-button button-tag button-error">With errors ({{ errored_count }})</a>
|
||||
</li>
|
||||
<li id="post-list-mark-views" style="display: none;" >
|
||||
<a href="{{url_for('ui.mark_all_viewed',with_errors=request.args.get('with_errors',0)) }}" class="pure-button button-tag " id="mark-all-viewed">{{ _('Mark all viewed') }}</a>
|
||||
<li id="post-list-mark-views" class="{%- if has_unviewed -%}has-unviewed{%- endif -%}" style="display: none;" >
|
||||
<a href="{{url_for('ui.mark_all_viewed',with_errors=request.args.get('with_errors',0)) }}" class="pure-button button-tag " id="mark-all-viewed">Mark all viewed</a>
|
||||
</li>
|
||||
{%- if active_tag_uuid -%}
|
||||
<li id="post-list-mark-views-tag">
|
||||
<a href="{{url_for('ui.mark_all_viewed', tag=active_tag_uuid) }}" class="pure-button button-tag " id="mark-all-viewed">{{ _("Mark all viewed in '%(title)s'", title=active_tag.title) }}</a>
|
||||
<a href="{{url_for('ui.mark_all_viewed', tag=active_tag_uuid) }}" class="pure-button button-tag " id="mark-all-viewed">Mark all viewed in '{{active_tag.title}}'</a>
|
||||
</li>
|
||||
{%- endif -%}
|
||||
<li id="post-list-unread" style="display: none;" >
|
||||
<a href="{{url_for('watchlist.index', unread=1, tag=request.args.get('tag')) }}" class="pure-button button-tag">{{ _('Unread') }} (<span id="unread-tab-counter">{{ unread_changes_count }}</span>)</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="{{ url_for('ui.form_watch_checknow', tag=active_tag_uuid, with_errors=request.args.get('with_errors',0)) }}" class="pure-button button-tag" id="recheck-all">{{ _('Recheck all') }} {% if active_tag_uuid %} {{ _("in '%(title)s'", title=active_tag.title) }}{%endif%}</a>
|
||||
<a href="{{ url_for('ui.form_watch_checknow', tag=active_tag_uuid, with_errors=request.args.get('with_errors',0)) }}" class="pure-button button-tag" id="recheck-all">Recheck
|
||||
all {% if active_tag_uuid %} in '{{active_tag.title}}'{%endif%}</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="{{ url_for('rss.feed', tag=active_tag_uuid, token=app_rss_token)}}"><img alt="RSS Feed" id="feed-icon" src="{{url_for('static_content', group='images', filename='generic_feed-icon.svg')}}" height="15"></a>
|
||||
|
||||
@@ -1,7 +1,3 @@
|
||||
"""
|
||||
Levenshtein distance and similarity plugin for text change detection.
|
||||
Provides metrics for measuring text similarity between snapshots.
|
||||
"""
|
||||
import pluggy
|
||||
from loguru import logger
|
||||
|
||||
|
||||
@@ -1,7 +1,3 @@
|
||||
"""
|
||||
Word count plugin for content analysis.
|
||||
Provides word count metrics for snapshot content.
|
||||
"""
|
||||
import pluggy
|
||||
from loguru import logger
|
||||
|
||||
@@ -18,7 +14,7 @@ def count_words_in_history(watch, incoming_text=None):
|
||||
elif watch.history.keys():
|
||||
# When called from UI extras to count latest snapshot
|
||||
latest_key = list(watch.history.keys())[-1]
|
||||
latest_content = watch.get_history_snapshot(timestamp=latest_key)
|
||||
latest_content = watch.get_history_snapshot(latest_key)
|
||||
return len(latest_content.split())
|
||||
return 0
|
||||
except Exception as e:
|
||||
|
||||
@@ -7,9 +7,6 @@ import os
|
||||
# Visual Selector scraper - 'Button' is there because some sites have <button>OUT OF STOCK</button>.
|
||||
visualselector_xpath_selectors = 'div,span,form,table,tbody,tr,td,a,p,ul,li,h1,h2,h3,h4,header,footer,section,article,aside,details,main,nav,section,summary,button'
|
||||
|
||||
# Import hookimpl from centralized pluggy interface
|
||||
from changedetectionio.pluggy_interface import hookimpl
|
||||
|
||||
SCREENSHOT_MAX_HEIGHT_DEFAULT = 20000
|
||||
SCREENSHOT_DEFAULT_QUALITY = 40
|
||||
|
||||
@@ -21,9 +18,7 @@ SCREENSHOT_MAX_TOTAL_HEIGHT = int(os.getenv("SCREENSHOT_MAX_HEIGHT", SCREENSHOT_
|
||||
# The size at which we will switch to stitching method, when below this (and
|
||||
# MAX_TOTAL_HEIGHT which can be set by a user) we will use the default
|
||||
# screenshot method.
|
||||
# Increased from 8000 to 10000 for better performance (fewer chunks = faster)
|
||||
# Most modern GPUs support 16384x16384 textures, so 1280x10000 is safe
|
||||
SCREENSHOT_SIZE_STITCH_THRESHOLD = int(os.getenv("SCREENSHOT_CHUNK_HEIGHT", 10000))
|
||||
SCREENSHOT_SIZE_STITCH_THRESHOLD = 8000
|
||||
|
||||
# available_fetchers() will scan this implementation looking for anything starting with html_
|
||||
# this information is used in the form selections
|
||||
@@ -40,54 +35,17 @@ def available_fetchers():
|
||||
# See the if statement at the bottom of this file for how we switch between playwright and webdriver
|
||||
import inspect
|
||||
p = []
|
||||
|
||||
# Get built-in fetchers (but skip plugin fetchers that were added via setattr)
|
||||
for name, obj in inspect.getmembers(sys.modules[__name__], inspect.isclass):
|
||||
if inspect.isclass(obj):
|
||||
# @todo html_ is maybe better as fetcher_ or something
|
||||
# In this case, make sure to edit the default one in store.py and fetch_site_status.py
|
||||
if name.startswith('html_'):
|
||||
# Skip plugin fetchers that were already registered
|
||||
if name not in _plugin_fetchers:
|
||||
t = tuple([name, obj.fetcher_description])
|
||||
p.append(t)
|
||||
|
||||
# Get plugin fetchers from cache (already loaded at module init)
|
||||
for name, fetcher_class in _plugin_fetchers.items():
|
||||
if hasattr(fetcher_class, 'fetcher_description'):
|
||||
t = tuple([name, fetcher_class.fetcher_description])
|
||||
p.append(t)
|
||||
else:
|
||||
logger.warning(f"Plugin fetcher '{name}' does not have fetcher_description attribute")
|
||||
t = tuple([name, obj.fetcher_description])
|
||||
p.append(t)
|
||||
|
||||
return p
|
||||
|
||||
|
||||
def get_plugin_fetchers():
|
||||
"""Load and return all plugin fetchers from the centralized plugin manager."""
|
||||
from changedetectionio.pluggy_interface import plugin_manager
|
||||
|
||||
fetchers = {}
|
||||
try:
|
||||
# Call the register_content_fetcher hook from all registered plugins
|
||||
results = plugin_manager.hook.register_content_fetcher()
|
||||
for result in results:
|
||||
if result:
|
||||
name, fetcher_class = result
|
||||
fetchers[name] = fetcher_class
|
||||
# Register in current module so hasattr() checks work
|
||||
setattr(sys.modules[__name__], name, fetcher_class)
|
||||
logger.info(f"Registered plugin fetcher: {name} - {getattr(fetcher_class, 'fetcher_description', 'No description')}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error loading plugin fetchers: {e}")
|
||||
|
||||
return fetchers
|
||||
|
||||
|
||||
# Initialize plugins at module load time
|
||||
_plugin_fetchers = get_plugin_fetchers()
|
||||
|
||||
|
||||
# Decide which is the 'real' HTML webdriver, this is more a system wide config
|
||||
# rather than site-specific.
|
||||
use_playwright_as_chrome_fetcher = os.getenv('PLAYWRIGHT_DRIVER_URL', False)
|
||||
@@ -104,8 +62,3 @@ else:
|
||||
logger.debug("Falling back to selenium as fetcher")
|
||||
from .webdriver_selenium import fetcher as html_webdriver
|
||||
|
||||
|
||||
# Register built-in fetchers as plugins after all imports are complete
|
||||
from changedetectionio.pluggy_interface import register_builtin_fetchers
|
||||
register_builtin_fetchers()
|
||||
|
||||
|
||||
@@ -38,6 +38,7 @@ def manage_user_agent(headers, current_ua=''):
|
||||
|
||||
return None
|
||||
|
||||
|
||||
class Fetcher():
|
||||
browser_connection_is_custom = None
|
||||
browser_connection_url = None
|
||||
@@ -50,7 +51,6 @@ class Fetcher():
|
||||
favicon_blob = None
|
||||
instock_data = None
|
||||
instock_data_js = ""
|
||||
screenshot_format = None
|
||||
status_code = None
|
||||
webdriver_js_execute_code = None
|
||||
xpath_data = None
|
||||
@@ -64,79 +64,26 @@ class Fetcher():
|
||||
# Time ONTOP of the system defined env minimum time
|
||||
render_extract_delay = 0
|
||||
|
||||
# Fetcher capability flags - subclasses should override these
|
||||
# These indicate what features the fetcher supports
|
||||
supports_browser_steps = False # Can execute browser automation steps
|
||||
supports_screenshots = False # Can capture page screenshots
|
||||
supports_xpath_element_data = False # Can extract xpath element positions/data for visual selector
|
||||
|
||||
# Screenshot element locking - prevents layout shifts during screenshot capture
|
||||
# Only needed for visual comparison (image_ssim_diff processor)
|
||||
# Locks element dimensions in the first viewport to prevent headers/ads from resizing
|
||||
lock_viewport_elements = False # Default: disabled for performance
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
if kwargs and 'screenshot_format' in kwargs:
|
||||
self.screenshot_format = kwargs.get('screenshot_format')
|
||||
|
||||
# Allow lock_viewport_elements to be set via kwargs
|
||||
if kwargs and 'lock_viewport_elements' in kwargs:
|
||||
self.lock_viewport_elements = kwargs.get('lock_viewport_elements')
|
||||
|
||||
|
||||
@classmethod
|
||||
def get_status_icon_data(cls):
|
||||
"""Return data for status icon to display in the watch overview.
|
||||
|
||||
This method can be overridden by subclasses to provide custom status icons.
|
||||
|
||||
Returns:
|
||||
dict or None: Dictionary with icon data:
|
||||
{
|
||||
'filename': 'icon-name.svg', # Icon filename
|
||||
'alt': 'Alt text', # Alt attribute
|
||||
'title': 'Tooltip text', # Title attribute
|
||||
'style': 'height: 1em;' # Optional inline CSS
|
||||
}
|
||||
Or None if no icon
|
||||
"""
|
||||
return None
|
||||
|
||||
def clear_content(self):
|
||||
"""
|
||||
Explicitly clear all content from memory to free up heap space.
|
||||
Call this after content has been saved to disk.
|
||||
"""
|
||||
self.content = None
|
||||
if hasattr(self, 'raw_content'):
|
||||
self.raw_content = None
|
||||
self.screenshot = None
|
||||
self.xpath_data = None
|
||||
# Keep headers and status_code as they're small
|
||||
|
||||
@abstractmethod
|
||||
def get_error(self):
|
||||
return self.error
|
||||
|
||||
@abstractmethod
|
||||
async def run(self,
|
||||
fetch_favicon=True,
|
||||
current_include_filters=None,
|
||||
empty_pages_are_a_change=False,
|
||||
ignore_status_codes=False,
|
||||
is_binary=False,
|
||||
request_body=None,
|
||||
request_headers=None,
|
||||
request_method=None,
|
||||
timeout=None,
|
||||
url=None,
|
||||
watch_uuid=None,
|
||||
):
|
||||
url,
|
||||
timeout,
|
||||
request_headers,
|
||||
request_body,
|
||||
request_method,
|
||||
ignore_status_codes=False,
|
||||
current_include_filters=None,
|
||||
is_binary=False,
|
||||
empty_pages_are_a_change=False):
|
||||
# Should set self.error, self.status_code and self.content
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def quit(self, watch=None):
|
||||
def quit(self, watch=None):
|
||||
return
|
||||
|
||||
@abstractmethod
|
||||
@@ -162,16 +109,30 @@ class Fetcher():
|
||||
"""
|
||||
return {k.lower(): v for k, v in self.headers.items()}
|
||||
|
||||
def browser_steps_get_valid_steps(self):
|
||||
if self.browser_steps is not None and len(self.browser_steps):
|
||||
valid_steps = list(filter(
|
||||
lambda s: (s['operation'] and len(s['operation']) and s['operation'] != 'Choose one'),
|
||||
self.browser_steps))
|
||||
|
||||
# Just incase they selected Goto site by accident with older JS
|
||||
if valid_steps and valid_steps[0]['operation'] == 'Goto site':
|
||||
del(valid_steps[0])
|
||||
|
||||
return valid_steps
|
||||
|
||||
return None
|
||||
|
||||
async def iterate_browser_steps(self, start_url=None):
|
||||
from changedetectionio.browser_steps.browser_steps import steppable_browser_interface, browser_steps_get_valid_steps
|
||||
from changedetectionio.blueprint.browser_steps.browser_steps import steppable_browser_interface
|
||||
from playwright._impl._errors import TimeoutError, Error
|
||||
from changedetectionio.jinja2_custom import render as jinja_render
|
||||
from changedetectionio.safe_jinja import render as jinja_render
|
||||
step_n = 0
|
||||
|
||||
if self.browser_steps:
|
||||
if self.browser_steps is not None and len(self.browser_steps):
|
||||
interface = steppable_browser_interface(start_url=start_url)
|
||||
interface.page = self.page
|
||||
valid_steps = browser_steps_get_valid_steps(self.browser_steps)
|
||||
valid_steps = self.browser_steps_get_valid_steps()
|
||||
|
||||
for step in valid_steps:
|
||||
step_n += 1
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
import asyncio
|
||||
import gc
|
||||
import json
|
||||
import os
|
||||
from urllib.parse import urlparse
|
||||
@@ -9,141 +7,69 @@ from loguru import logger
|
||||
from changedetectionio.content_fetchers import SCREENSHOT_MAX_HEIGHT_DEFAULT, visualselector_xpath_selectors, \
|
||||
SCREENSHOT_SIZE_STITCH_THRESHOLD, SCREENSHOT_MAX_TOTAL_HEIGHT, XPATH_ELEMENT_JS, INSTOCK_DATA_JS, FAVICON_FETCHER_JS
|
||||
from changedetectionio.content_fetchers.base import Fetcher, manage_user_agent
|
||||
from changedetectionio.content_fetchers.exceptions import PageUnloadable, Non200ErrorCodeReceived, EmptyReply, ScreenshotUnavailable, \
|
||||
BrowserStepsStepException
|
||||
from changedetectionio.content_fetchers.exceptions import PageUnloadable, Non200ErrorCodeReceived, EmptyReply, ScreenshotUnavailable
|
||||
|
||||
|
||||
async def capture_full_page_async(page, screenshot_format='JPEG', watch_uuid=None, lock_viewport_elements=False):
|
||||
async def capture_full_page_async(page):
|
||||
import os
|
||||
import time
|
||||
from multiprocessing import Process, Pipe
|
||||
|
||||
start = time.time()
|
||||
watch_info = f"[{watch_uuid}] " if watch_uuid else ""
|
||||
|
||||
setup_start = time.time()
|
||||
page_height = await page.evaluate("document.documentElement.scrollHeight")
|
||||
page_width = await page.evaluate("document.documentElement.scrollWidth")
|
||||
original_viewport = page.viewport_size
|
||||
dimensions_time = time.time() - setup_start
|
||||
|
||||
logger.debug(f"{watch_info}Playwright viewport size {page.viewport_size} page height {page_height} page width {page_width} (got dimensions in {dimensions_time:.2f}s)")
|
||||
logger.debug(f"Playwright viewport size {page.viewport_size} page height {page_height} page width {page_width}")
|
||||
|
||||
# Use an approach similar to puppeteer: set a larger viewport and take screenshots in chunks
|
||||
step_size = SCREENSHOT_SIZE_STITCH_THRESHOLD # Size that won't cause GPU to overflow
|
||||
screenshot_chunks = []
|
||||
y = 0
|
||||
elements_locked = False
|
||||
|
||||
# Only lock viewport elements if explicitly enabled (for image_ssim_diff processor)
|
||||
# This prevents headers/ads from resizing when viewport changes
|
||||
if lock_viewport_elements and page_height > page.viewport_size['height']:
|
||||
lock_start = time.time()
|
||||
lock_elements_js_path = os.path.join(os.path.dirname(__file__), 'res', 'lock-elements-sizing.js')
|
||||
with open(lock_elements_js_path, 'r') as f:
|
||||
lock_elements_js = f.read()
|
||||
await page.evaluate(lock_elements_js)
|
||||
elements_locked = True
|
||||
lock_time = time.time() - lock_start
|
||||
logger.debug(f"{watch_info}Viewport element locking enabled (took {lock_time:.2f}s)")
|
||||
|
||||
if page_height > page.viewport_size['height']:
|
||||
if page_height < step_size:
|
||||
step_size = page_height # Incase page is bigger than default viewport but smaller than proposed step size
|
||||
viewport_start = time.time()
|
||||
logger.debug(f"{watch_info}Setting bigger viewport to step through large page width W{page.viewport_size['width']}xH{step_size} because page_height > viewport_size")
|
||||
logger.debug(f"Setting bigger viewport to step through large page width W{page.viewport_size['width']}xH{step_size} because page_height > viewport_size")
|
||||
# Set viewport to a larger size to capture more content at once
|
||||
await page.set_viewport_size({'width': page.viewport_size['width'], 'height': step_size})
|
||||
viewport_time = time.time() - viewport_start
|
||||
logger.debug(f"{watch_info}Viewport changed to {page.viewport_size['width']}x{step_size} (took {viewport_time:.2f}s)")
|
||||
|
||||
# Capture screenshots in chunks up to the max total height
|
||||
capture_start = time.time()
|
||||
chunk_times = []
|
||||
# Use PNG for better quality (no compression artifacts), JPEG for smaller size
|
||||
screenshot_type = screenshot_format.lower() if screenshot_format else 'jpeg'
|
||||
# PNG should use quality 100, JPEG uses configurable quality
|
||||
screenshot_quality = 100 if screenshot_type == 'png' else int(os.getenv("SCREENSHOT_QUALITY", 72))
|
||||
|
||||
while y < min(page_height, SCREENSHOT_MAX_TOTAL_HEIGHT):
|
||||
# Only scroll if not at the top (y > 0)
|
||||
if y > 0:
|
||||
await page.evaluate(f"window.scrollTo(0, {y})")
|
||||
|
||||
# Request GC only before screenshot (not 3x per chunk)
|
||||
await page.request_gc()
|
||||
|
||||
screenshot_kwargs = {
|
||||
'type': screenshot_type,
|
||||
'full_page': False
|
||||
}
|
||||
# Only pass quality parameter for jpeg (PNG doesn't support it in Playwright)
|
||||
if screenshot_type == 'jpeg':
|
||||
screenshot_kwargs['quality'] = screenshot_quality
|
||||
|
||||
chunk_start = time.time()
|
||||
screenshot_chunks.append(await page.screenshot(**screenshot_kwargs))
|
||||
chunk_time = time.time() - chunk_start
|
||||
chunk_times.append(chunk_time)
|
||||
logger.debug(f"{watch_info}Chunk {len(screenshot_chunks)} captured in {chunk_time:.2f}s")
|
||||
await page.evaluate(f"window.scrollTo(0, {y})")
|
||||
await page.request_gc()
|
||||
screenshot_chunks.append(await page.screenshot(
|
||||
type="jpeg",
|
||||
full_page=False,
|
||||
quality=int(os.getenv("SCREENSHOT_QUALITY", 72))
|
||||
))
|
||||
y += step_size
|
||||
await page.request_gc()
|
||||
|
||||
# Restore original viewport size
|
||||
await page.set_viewport_size({'width': original_viewport['width'], 'height': original_viewport['height']})
|
||||
|
||||
# Unlock element dimensions if they were locked
|
||||
if elements_locked:
|
||||
unlock_elements_js_path = os.path.join(os.path.dirname(__file__), 'res', 'unlock-elements-sizing.js')
|
||||
with open(unlock_elements_js_path, 'r') as f:
|
||||
unlock_elements_js = f.read()
|
||||
await page.evaluate(unlock_elements_js)
|
||||
logger.debug(f"{watch_info}Element dimensions unlocked after screenshot capture")
|
||||
|
||||
capture_time = time.time() - capture_start
|
||||
total_capture_time = sum(chunk_times)
|
||||
logger.debug(f"{watch_info}All {len(screenshot_chunks)} chunks captured in {capture_time:.2f}s (total chunk time: {total_capture_time:.2f}s)")
|
||||
|
||||
# If we have multiple chunks, stitch them together
|
||||
if len(screenshot_chunks) > 1:
|
||||
stitch_start = time.time()
|
||||
logger.debug(f"{watch_info}Starting stitching of {len(screenshot_chunks)} chunks")
|
||||
|
||||
# Always use spawn subprocess for ANY stitching (2+ chunks)
|
||||
# PIL allocates at C level and Python GC never releases it - subprocess exit forces OS to reclaim
|
||||
# Trade-off: 35MB resource_tracker vs 500MB+ PIL leak in main process
|
||||
from changedetectionio.content_fetchers.screenshot_handler import stitch_images_worker_raw_bytes
|
||||
import multiprocessing
|
||||
import struct
|
||||
|
||||
ctx = multiprocessing.get_context('spawn')
|
||||
parent_conn, child_conn = ctx.Pipe()
|
||||
p = ctx.Process(target=stitch_images_worker_raw_bytes, args=(child_conn, page_height, SCREENSHOT_MAX_TOTAL_HEIGHT))
|
||||
from changedetectionio.content_fetchers.screenshot_handler import stitch_images_worker
|
||||
logger.debug(f"Screenshot stitching {len(screenshot_chunks)} chunks together")
|
||||
parent_conn, child_conn = Pipe()
|
||||
p = Process(target=stitch_images_worker, args=(child_conn, screenshot_chunks, page_height, SCREENSHOT_MAX_TOTAL_HEIGHT))
|
||||
p.start()
|
||||
|
||||
# Send via raw bytes (no pickle)
|
||||
parent_conn.send_bytes(struct.pack('I', len(screenshot_chunks)))
|
||||
for chunk in screenshot_chunks:
|
||||
parent_conn.send_bytes(chunk)
|
||||
|
||||
screenshot = parent_conn.recv_bytes()
|
||||
p.join()
|
||||
|
||||
parent_conn.close()
|
||||
child_conn.close()
|
||||
del p, parent_conn, child_conn
|
||||
|
||||
stitch_time = time.time() - stitch_start
|
||||
total_time = time.time() - start
|
||||
setup_time = total_time - capture_time - stitch_time
|
||||
logger.debug(
|
||||
f"{watch_info}Screenshot complete - Page height: {page_height}px, Capture height: {SCREENSHOT_MAX_TOTAL_HEIGHT}px | "
|
||||
f"Setup: {setup_time:.2f}s, Capture: {capture_time:.2f}s, Stitching: {stitch_time:.2f}s, Total: {total_time:.2f}s")
|
||||
f"Screenshot (chunked/stitched) - Page height: {page_height} Capture height: {SCREENSHOT_MAX_TOTAL_HEIGHT} - Stitched together in {time.time() - start:.2f}s")
|
||||
# Explicit cleanup
|
||||
del screenshot_chunks
|
||||
del p
|
||||
del parent_conn, child_conn
|
||||
screenshot_chunks = None
|
||||
return screenshot
|
||||
|
||||
total_time = time.time() - start
|
||||
setup_time = total_time - capture_time
|
||||
logger.debug(
|
||||
f"{watch_info}Screenshot complete - Page height: {page_height}px, Capture height: {SCREENSHOT_MAX_TOTAL_HEIGHT}px | "
|
||||
f"Setup: {setup_time:.2f}s, Single chunk: {capture_time:.2f}s, Total: {total_time:.2f}s")
|
||||
f"Screenshot Page height: {page_height} Capture height: {SCREENSHOT_MAX_TOTAL_HEIGHT} - Stitched together in {time.time() - start:.2f}s")
|
||||
|
||||
return screenshot_chunks[0]
|
||||
|
||||
@@ -163,22 +89,8 @@ class fetcher(Fetcher):
|
||||
|
||||
proxy = None
|
||||
|
||||
# Capability flags
|
||||
supports_browser_steps = True
|
||||
supports_screenshots = True
|
||||
supports_xpath_element_data = True
|
||||
|
||||
@classmethod
|
||||
def get_status_icon_data(cls):
|
||||
"""Return Chrome browser icon data for Playwright fetcher."""
|
||||
return {
|
||||
'filename': 'google-chrome-icon.png',
|
||||
'alt': 'Using a Chrome browser',
|
||||
'title': 'Using a Chrome browser'
|
||||
}
|
||||
|
||||
def __init__(self, proxy_override=None, custom_browser_connection_url=None, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
def __init__(self, proxy_override=None, custom_browser_connection_url=None):
|
||||
super().__init__()
|
||||
|
||||
self.browser_type = os.getenv("PLAYWRIGHT_BROWSER_TYPE", 'chromium').strip('"')
|
||||
|
||||
@@ -213,57 +125,38 @@ class fetcher(Fetcher):
|
||||
|
||||
async def screenshot_step(self, step_n=''):
|
||||
super().screenshot_step(step_n=step_n)
|
||||
watch_uuid = getattr(self, 'watch_uuid', None)
|
||||
screenshot = await capture_full_page_async(page=self.page, screenshot_format=self.screenshot_format, watch_uuid=watch_uuid, lock_viewport_elements=self.lock_viewport_elements)
|
||||
screenshot = await capture_full_page_async(page=self.page)
|
||||
|
||||
# Request GC immediately after screenshot to free memory
|
||||
# Screenshots can be large and browser steps take many of them
|
||||
await self.page.request_gc()
|
||||
|
||||
if self.browser_steps_screenshot_path is not None:
|
||||
destination = os.path.join(self.browser_steps_screenshot_path, 'step_{}.jpeg'.format(step_n))
|
||||
logger.debug(f"Saving step screenshot to {destination}")
|
||||
with open(destination, 'wb') as f:
|
||||
f.write(screenshot)
|
||||
# Clear local reference to allow screenshot bytes to be collected
|
||||
del screenshot
|
||||
gc.collect()
|
||||
|
||||
async def save_step_html(self, step_n):
|
||||
super().save_step_html(step_n=step_n)
|
||||
content = await self.page.content()
|
||||
|
||||
# Request GC after getting page content
|
||||
await self.page.request_gc()
|
||||
|
||||
destination = os.path.join(self.browser_steps_screenshot_path, 'step_{}.html'.format(step_n))
|
||||
logger.debug(f"Saving step HTML to {destination}")
|
||||
with open(destination, 'w', encoding='utf-8') as f:
|
||||
with open(destination, 'w') as f:
|
||||
f.write(content)
|
||||
# Clear local reference
|
||||
del content
|
||||
gc.collect()
|
||||
|
||||
async def run(self,
|
||||
fetch_favicon=True,
|
||||
current_include_filters=None,
|
||||
empty_pages_are_a_change=False,
|
||||
ignore_status_codes=False,
|
||||
is_binary=False,
|
||||
request_body=None,
|
||||
request_headers=None,
|
||||
request_method=None,
|
||||
screenshot_format=None,
|
||||
timeout=None,
|
||||
url=None,
|
||||
watch_uuid=None,
|
||||
):
|
||||
url,
|
||||
timeout,
|
||||
request_headers,
|
||||
request_body,
|
||||
request_method,
|
||||
ignore_status_codes=False,
|
||||
current_include_filters=None,
|
||||
is_binary=False,
|
||||
empty_pages_are_a_change=False):
|
||||
|
||||
from playwright.async_api import async_playwright
|
||||
import playwright._impl._errors
|
||||
import time
|
||||
self.delete_browser_steps_screenshots()
|
||||
self.watch_uuid = watch_uuid # Store for use in screenshot_step
|
||||
response = None
|
||||
|
||||
async with async_playwright() as p:
|
||||
@@ -295,7 +188,7 @@ class fetcher(Fetcher):
|
||||
self.page.on("console", lambda msg: logger.debug(f"Playwright console: Watch URL: {url} {msg.type}: {msg.text} {msg.args}"))
|
||||
|
||||
# Re-use as much code from browser steps as possible so its the same
|
||||
from changedetectionio.browser_steps.browser_steps import steppable_browser_interface
|
||||
from changedetectionio.blueprint.browser_steps.browser_steps import steppable_browser_interface
|
||||
browsersteps_interface = steppable_browser_interface(start_url=url)
|
||||
browsersteps_interface.page = self.page
|
||||
|
||||
@@ -341,16 +234,14 @@ class fetcher(Fetcher):
|
||||
await browser.close()
|
||||
raise PageUnloadable(url=url, status_code=None, message=str(e))
|
||||
|
||||
if fetch_favicon:
|
||||
try:
|
||||
self.favicon_blob = await self.page.evaluate(FAVICON_FETCHER_JS)
|
||||
await self.page.request_gc()
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching FavIcon info {str(e)}, continuing.")
|
||||
try:
|
||||
self.favicon_blob = await self.page.evaluate(FAVICON_FETCHER_JS)
|
||||
await self.page.request_gc()
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching FavIcon info {str(e)}, continuing.")
|
||||
|
||||
if self.status_code != 200 and not ignore_status_codes:
|
||||
screenshot = await capture_full_page_async(self.page, screenshot_format=self.screenshot_format, watch_uuid=watch_uuid, lock_viewport_elements=self.lock_viewport_elements)
|
||||
# Finally block will handle cleanup
|
||||
screenshot = await capture_full_page_async(self.page)
|
||||
raise Non200ErrorCodeReceived(url=url, status_code=self.status_code, screenshot=screenshot)
|
||||
|
||||
if not empty_pages_are_a_change and len((await self.page.content()).strip()) == 0:
|
||||
@@ -359,113 +250,81 @@ class fetcher(Fetcher):
|
||||
await browser.close()
|
||||
raise EmptyReply(url=url, status_code=response.status)
|
||||
|
||||
# Wrap remaining operations in try/finally to ensure cleanup
|
||||
# Run Browser Steps here
|
||||
if self.browser_steps_get_valid_steps():
|
||||
await self.iterate_browser_steps(start_url=url)
|
||||
|
||||
await self.page.wait_for_timeout(extra_wait * 1000)
|
||||
|
||||
now = time.time()
|
||||
# So we can find an element on the page where its selector was entered manually (maybe not xPath etc)
|
||||
if current_include_filters is not None:
|
||||
await self.page.evaluate("var include_filters={}".format(json.dumps(current_include_filters)))
|
||||
else:
|
||||
await self.page.evaluate("var include_filters=''")
|
||||
await self.page.request_gc()
|
||||
|
||||
# request_gc before and after evaluate to free up memory
|
||||
# @todo browsersteps etc
|
||||
MAX_TOTAL_HEIGHT = int(os.getenv("SCREENSHOT_MAX_HEIGHT", SCREENSHOT_MAX_HEIGHT_DEFAULT))
|
||||
self.xpath_data = await self.page.evaluate(XPATH_ELEMENT_JS, {
|
||||
"visualselector_xpath_selectors": visualselector_xpath_selectors,
|
||||
"max_height": MAX_TOTAL_HEIGHT
|
||||
})
|
||||
await self.page.request_gc()
|
||||
|
||||
self.instock_data = await self.page.evaluate(INSTOCK_DATA_JS)
|
||||
await self.page.request_gc()
|
||||
|
||||
self.content = await self.page.content()
|
||||
await self.page.request_gc()
|
||||
logger.debug(f"Scrape xPath element data in browser done in {time.time() - now:.2f}s")
|
||||
|
||||
|
||||
# Bug 3 in Playwright screenshot handling
|
||||
# Some bug where it gives the wrong screenshot size, but making a request with the clip set first seems to solve it
|
||||
# JPEG is better here because the screenshots can be very very large
|
||||
|
||||
# Screenshots also travel via the ws:// (websocket) meaning that the binary data is base64 encoded
|
||||
# which will significantly increase the IO size between the server and client, it's recommended to use the lowest
|
||||
# acceptable screenshot quality here
|
||||
try:
|
||||
# Run Browser Steps here
|
||||
if self.browser_steps:
|
||||
try:
|
||||
await self.iterate_browser_steps(start_url=url)
|
||||
except BrowserStepsStepException:
|
||||
# Finally block will handle cleanup
|
||||
raise
|
||||
|
||||
await self.page.wait_for_timeout(extra_wait * 1000)
|
||||
|
||||
now = time.time()
|
||||
# So we can find an element on the page where its selector was entered manually (maybe not xPath etc)
|
||||
if current_include_filters is not None:
|
||||
await self.page.evaluate("var include_filters={}".format(json.dumps(current_include_filters)))
|
||||
else:
|
||||
await self.page.evaluate("var include_filters=''")
|
||||
await self.page.request_gc()
|
||||
|
||||
# request_gc before and after evaluate to free up memory
|
||||
# @todo browsersteps etc
|
||||
MAX_TOTAL_HEIGHT = int(os.getenv("SCREENSHOT_MAX_HEIGHT", SCREENSHOT_MAX_HEIGHT_DEFAULT))
|
||||
self.xpath_data = await self.page.evaluate(XPATH_ELEMENT_JS, {
|
||||
"visualselector_xpath_selectors": visualselector_xpath_selectors,
|
||||
"max_height": MAX_TOTAL_HEIGHT
|
||||
})
|
||||
await self.page.request_gc()
|
||||
|
||||
self.instock_data = await self.page.evaluate(INSTOCK_DATA_JS)
|
||||
await self.page.request_gc()
|
||||
|
||||
self.content = await self.page.content()
|
||||
await self.page.request_gc()
|
||||
logger.debug(f"Scrape xPath element data in browser done in {time.time() - now:.2f}s")
|
||||
|
||||
|
||||
# Bug 3 in Playwright screenshot handling
|
||||
# Some bug where it gives the wrong screenshot size, but making a request with the clip set first seems to solve it
|
||||
# JPEG is better here because the screenshots can be very very large
|
||||
|
||||
# Screenshots also travel via the ws:// (websocket) meaning that the binary data is base64 encoded
|
||||
# which will significantly increase the IO size between the server and client, it's recommended to use the lowest
|
||||
# acceptable screenshot quality here
|
||||
# The actual screenshot - this always base64 and needs decoding! horrible! huge CPU usage
|
||||
self.screenshot = await capture_full_page_async(page=self.page, screenshot_format=self.screenshot_format, watch_uuid=watch_uuid, lock_viewport_elements=self.lock_viewport_elements)
|
||||
self.screenshot = await capture_full_page_async(page=self.page)
|
||||
|
||||
# Force aggressive memory cleanup - screenshots are large and base64 decode creates temporary buffers
|
||||
await self.page.request_gc()
|
||||
gc.collect()
|
||||
|
||||
except ScreenshotUnavailable:
|
||||
# Re-raise screenshot unavailable exceptions
|
||||
except Exception as e:
|
||||
# It's likely the screenshot was too long/big and something crashed
|
||||
raise ScreenshotUnavailable(url=url, status_code=self.status_code)
|
||||
|
||||
finally:
|
||||
# Clean up resources properly with timeouts to prevent hanging
|
||||
# Request garbage collection one more time before closing
|
||||
try:
|
||||
if hasattr(self, 'page') and self.page:
|
||||
await self.page.request_gc()
|
||||
await asyncio.wait_for(self.page.close(), timeout=5.0)
|
||||
logger.debug(f"Successfully closed page for {url}")
|
||||
except asyncio.TimeoutError:
|
||||
logger.warning(f"Timed out closing page for {url} (5s)")
|
||||
except Exception as e:
|
||||
logger.warning(f"Error closing page for {url}: {e}")
|
||||
finally:
|
||||
self.page = None
|
||||
await self.page.request_gc()
|
||||
except:
|
||||
pass
|
||||
|
||||
# Clean up resources properly
|
||||
try:
|
||||
await self.page.request_gc()
|
||||
except:
|
||||
pass
|
||||
|
||||
try:
|
||||
if context:
|
||||
await asyncio.wait_for(context.close(), timeout=5.0)
|
||||
logger.debug(f"Successfully closed context for {url}")
|
||||
except asyncio.TimeoutError:
|
||||
logger.warning(f"Timed out closing context for {url} (5s)")
|
||||
except Exception as e:
|
||||
logger.warning(f"Error closing context for {url}: {e}")
|
||||
finally:
|
||||
context = None
|
||||
await self.page.close()
|
||||
except:
|
||||
pass
|
||||
self.page = None
|
||||
|
||||
try:
|
||||
if browser:
|
||||
await asyncio.wait_for(browser.close(), timeout=5.0)
|
||||
logger.debug(f"Successfully closed browser connection for {url}")
|
||||
except asyncio.TimeoutError:
|
||||
logger.warning(f"Timed out closing browser connection for {url} (5s)")
|
||||
except Exception as e:
|
||||
logger.warning(f"Error closing browser for {url}: {e}")
|
||||
finally:
|
||||
browser = None
|
||||
await context.close()
|
||||
except:
|
||||
pass
|
||||
context = None
|
||||
|
||||
# Force Python GC to release Playwright resources immediately
|
||||
# Playwright objects can have circular references that delay cleanup
|
||||
gc.collect()
|
||||
|
||||
|
||||
# Plugin registration for built-in fetcher
|
||||
class PlaywrightFetcherPlugin:
|
||||
"""Plugin class that registers the Playwright fetcher as a built-in plugin."""
|
||||
|
||||
def register_content_fetcher(self):
|
||||
"""Register the Playwright fetcher"""
|
||||
return ('html_webdriver', fetcher)
|
||||
|
||||
|
||||
# Create module-level instance for plugin registration
|
||||
playwright_plugin = PlaywrightFetcherPlugin()
|
||||
try:
|
||||
await browser.close()
|
||||
except:
|
||||
pass
|
||||
browser = None
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import asyncio
|
||||
import gc
|
||||
import json
|
||||
import os
|
||||
import websockets.exceptions
|
||||
@@ -21,20 +20,18 @@ from changedetectionio.content_fetchers.exceptions import PageUnloadable, Non200
|
||||
# Screenshots also travel via the ws:// (websocket) meaning that the binary data is base64 encoded
|
||||
# which will significantly increase the IO size between the server and client, it's recommended to use the lowest
|
||||
# acceptable screenshot quality here
|
||||
async def capture_full_page(page, screenshot_format='JPEG', watch_uuid=None, lock_viewport_elements=False):
|
||||
async def capture_full_page(page):
|
||||
import os
|
||||
import time
|
||||
from multiprocessing import Process, Pipe
|
||||
|
||||
start = time.time()
|
||||
watch_info = f"[{watch_uuid}] " if watch_uuid else ""
|
||||
|
||||
setup_start = time.time()
|
||||
page_height = await page.evaluate("document.documentElement.scrollHeight")
|
||||
page_width = await page.evaluate("document.documentElement.scrollWidth")
|
||||
original_viewport = page.viewport
|
||||
dimensions_time = time.time() - setup_start
|
||||
|
||||
logger.debug(f"{watch_info}Puppeteer viewport size {page.viewport} page height {page_height} page width {page_width} (got dimensions in {dimensions_time:.2f}s)")
|
||||
logger.debug(f"Puppeteer viewport size {page.viewport} page height {page_height} page width {page_width}")
|
||||
|
||||
# Bug 3 in Playwright screenshot handling
|
||||
# Some bug where it gives the wrong screenshot size, but making a request with the clip set first seems to solve it
|
||||
@@ -44,124 +41,48 @@ async def capture_full_page(page, screenshot_format='JPEG', watch_uuid=None, loc
|
||||
# which will significantly increase the IO size between the server and client, it's recommended to use the lowest
|
||||
# acceptable screenshot quality here
|
||||
|
||||
# Use PNG for better quality (no compression artifacts), JPEG for smaller size
|
||||
screenshot_type = screenshot_format.lower() if screenshot_format else 'jpeg'
|
||||
# PNG should use quality 100, JPEG uses configurable quality
|
||||
screenshot_quality = 100 if screenshot_type == 'png' else int(os.getenv("SCREENSHOT_QUALITY", 72))
|
||||
|
||||
step_size = SCREENSHOT_SIZE_STITCH_THRESHOLD # Something that will not cause the GPU to overflow when taking the screenshot
|
||||
screenshot_chunks = []
|
||||
y = 0
|
||||
elements_locked = False
|
||||
|
||||
# Only lock viewport elements if explicitly enabled (for image_ssim_diff processor)
|
||||
# This prevents headers/ads from resizing when viewport changes
|
||||
if lock_viewport_elements and page_height > page.viewport['height']:
|
||||
lock_start = time.time()
|
||||
lock_elements_js_path = os.path.join(os.path.dirname(__file__), 'res', 'lock-elements-sizing.js')
|
||||
file_read_start = time.time()
|
||||
with open(lock_elements_js_path, 'r') as f:
|
||||
lock_elements_js = f.read()
|
||||
file_read_time = time.time() - file_read_start
|
||||
|
||||
evaluate_start = time.time()
|
||||
await page.evaluate(lock_elements_js)
|
||||
evaluate_time = time.time() - evaluate_start
|
||||
|
||||
elements_locked = True
|
||||
lock_time = time.time() - lock_start
|
||||
logger.debug(f"{watch_info}Viewport element locking enabled - File read: {file_read_time:.3f}s, Browser evaluate: {evaluate_time:.2f}s, Total: {lock_time:.2f}s")
|
||||
|
||||
if page_height > page.viewport['height']:
|
||||
if page_height < step_size:
|
||||
step_size = page_height # Incase page is bigger than default viewport but smaller than proposed step size
|
||||
viewport_start = time.time()
|
||||
await page.setViewport({'width': page.viewport['width'], 'height': step_size})
|
||||
viewport_time = time.time() - viewport_start
|
||||
logger.debug(f"{watch_info}Viewport changed to {page.viewport['width']}x{step_size} (took {viewport_time:.2f}s)")
|
||||
|
||||
capture_start = time.time()
|
||||
chunk_times = []
|
||||
while y < min(page_height, SCREENSHOT_MAX_TOTAL_HEIGHT):
|
||||
# better than scrollTo incase they override it in the page
|
||||
await page.evaluate(
|
||||
"""(y) => {
|
||||
const el = document.scrollingElement;
|
||||
if (el) el.scrollTop = y;
|
||||
document.documentElement.scrollTop = y;
|
||||
document.body.scrollTop = y;
|
||||
}""",
|
||||
y
|
||||
)
|
||||
|
||||
screenshot_kwargs = {
|
||||
'type_': screenshot_type,
|
||||
'fullPage': False
|
||||
}
|
||||
# PNG doesn't support quality parameter in Puppeteer
|
||||
if screenshot_type == 'jpeg':
|
||||
screenshot_kwargs['quality'] = screenshot_quality
|
||||
|
||||
chunk_start = time.time()
|
||||
screenshot_chunks.append(await page.screenshot(**screenshot_kwargs))
|
||||
chunk_time = time.time() - chunk_start
|
||||
chunk_times.append(chunk_time)
|
||||
logger.debug(f"{watch_info}Chunk {len(screenshot_chunks)} captured in {chunk_time:.2f}s")
|
||||
screenshot_chunks.append(await page.screenshot(type_='jpeg',
|
||||
fullPage=False,
|
||||
quality=int(os.getenv("SCREENSHOT_QUALITY", 72))))
|
||||
y += step_size
|
||||
|
||||
await page.setViewport({'width': original_viewport['width'], 'height': original_viewport['height']})
|
||||
|
||||
# Unlock element dimensions if they were locked
|
||||
if elements_locked:
|
||||
unlock_elements_js_path = os.path.join(os.path.dirname(__file__), 'res', 'unlock-elements-sizing.js')
|
||||
with open(unlock_elements_js_path, 'r') as f:
|
||||
unlock_elements_js = f.read()
|
||||
await page.evaluate(unlock_elements_js)
|
||||
logger.debug(f"{watch_info}Element dimensions unlocked after screenshot capture")
|
||||
|
||||
capture_time = time.time() - capture_start
|
||||
total_capture_time = sum(chunk_times)
|
||||
logger.debug(f"{watch_info}All {len(screenshot_chunks)} chunks captured in {capture_time:.2f}s (total chunk time: {total_capture_time:.2f}s)")
|
||||
|
||||
if len(screenshot_chunks) > 1:
|
||||
stitch_start = time.time()
|
||||
logger.debug(f"{watch_info}Starting stitching of {len(screenshot_chunks)} chunks")
|
||||
|
||||
# Always use spawn subprocess for ANY stitching (2+ chunks)
|
||||
# PIL allocates at C level and Python GC never releases it - subprocess exit forces OS to reclaim
|
||||
# Trade-off: 35MB resource_tracker vs 500MB+ PIL leak in main process
|
||||
from changedetectionio.content_fetchers.screenshot_handler import stitch_images_worker_raw_bytes
|
||||
import multiprocessing
|
||||
import struct
|
||||
|
||||
ctx = multiprocessing.get_context('spawn')
|
||||
parent_conn, child_conn = ctx.Pipe()
|
||||
p = ctx.Process(target=stitch_images_worker_raw_bytes, args=(child_conn, page_height, SCREENSHOT_MAX_TOTAL_HEIGHT))
|
||||
from changedetectionio.content_fetchers.screenshot_handler import stitch_images_worker
|
||||
logger.debug(f"Screenshot stitching {len(screenshot_chunks)} chunks together")
|
||||
parent_conn, child_conn = Pipe()
|
||||
p = Process(target=stitch_images_worker, args=(child_conn, screenshot_chunks, page_height, SCREENSHOT_MAX_TOTAL_HEIGHT))
|
||||
p.start()
|
||||
|
||||
# Send via raw bytes (no pickle)
|
||||
parent_conn.send_bytes(struct.pack('I', len(screenshot_chunks)))
|
||||
for chunk in screenshot_chunks:
|
||||
parent_conn.send_bytes(chunk)
|
||||
|
||||
screenshot = parent_conn.recv_bytes()
|
||||
p.join()
|
||||
|
||||
parent_conn.close()
|
||||
child_conn.close()
|
||||
del p, parent_conn, child_conn
|
||||
|
||||
stitch_time = time.time() - stitch_start
|
||||
total_time = time.time() - start
|
||||
setup_time = total_time - capture_time - stitch_time
|
||||
logger.debug(
|
||||
f"{watch_info}Screenshot complete - Page height: {page_height}px, Capture height: {SCREENSHOT_MAX_TOTAL_HEIGHT}px | "
|
||||
f"Setup: {setup_time:.2f}s, Capture: {capture_time:.2f}s, Stitching: {stitch_time:.2f}s, Total: {total_time:.2f}s")
|
||||
f"Screenshot (chunked/stitched) - Page height: {page_height} Capture height: {SCREENSHOT_MAX_TOTAL_HEIGHT} - Stitched together in {time.time() - start:.2f}s")
|
||||
|
||||
screenshot_chunks = None
|
||||
return screenshot
|
||||
|
||||
total_time = time.time() - start
|
||||
setup_time = total_time - capture_time
|
||||
logger.debug(
|
||||
f"{watch_info}Screenshot complete - Page height: {page_height}px, Capture height: {SCREENSHOT_MAX_TOTAL_HEIGHT}px | "
|
||||
f"Setup: {setup_time:.2f}s, Single chunk: {capture_time:.2f}s, Total: {total_time:.2f}s")
|
||||
f"Screenshot Page height: {page_height} Capture height: {SCREENSHOT_MAX_TOTAL_HEIGHT} - Stitched together in {time.time() - start:.2f}s")
|
||||
return screenshot_chunks[0]
|
||||
|
||||
|
||||
@@ -172,27 +93,13 @@ class fetcher(Fetcher):
|
||||
if os.getenv("PLAYWRIGHT_DRIVER_URL"):
|
||||
fetcher_description += " via '{}'".format(os.getenv("PLAYWRIGHT_DRIVER_URL"))
|
||||
|
||||
browser = None
|
||||
browser_type = ''
|
||||
command_executor = ''
|
||||
|
||||
proxy = None
|
||||
|
||||
# Capability flags
|
||||
supports_browser_steps = True
|
||||
supports_screenshots = True
|
||||
supports_xpath_element_data = True
|
||||
|
||||
@classmethod
|
||||
def get_status_icon_data(cls):
|
||||
"""Return Chrome browser icon data for Puppeteer fetcher."""
|
||||
return {
|
||||
'filename': 'google-chrome-icon.png',
|
||||
'alt': 'Using a Chrome browser',
|
||||
'title': 'Using a Chrome browser'
|
||||
}
|
||||
|
||||
def __init__(self, proxy_override=None, custom_browser_connection_url=None, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
def __init__(self, proxy_override=None, custom_browser_connection_url=None):
|
||||
super().__init__()
|
||||
|
||||
if custom_browser_connection_url:
|
||||
self.browser_connection_is_custom = True
|
||||
@@ -221,56 +128,37 @@ class fetcher(Fetcher):
|
||||
proxy_url += f"{parsed.hostname}{port}{parsed.path}{q}"
|
||||
self.browser_connection_url += f"{r}--proxy-server={proxy_url}"
|
||||
|
||||
async def quit(self, watch=None):
|
||||
watch_uuid = watch.get('uuid') if watch else 'unknown'
|
||||
|
||||
# Close page
|
||||
try:
|
||||
if hasattr(self, 'page') and self.page:
|
||||
await asyncio.wait_for(self.page.close(), timeout=5.0)
|
||||
logger.debug(f"[{watch_uuid}] Page closed successfully")
|
||||
except asyncio.TimeoutError:
|
||||
logger.warning(f"[{watch_uuid}] Timed out closing page (5s)")
|
||||
except Exception as e:
|
||||
logger.warning(f"[{watch_uuid}] Error closing page: {e}")
|
||||
finally:
|
||||
self.page = None
|
||||
|
||||
# Close browser connection
|
||||
try:
|
||||
if hasattr(self, 'browser') and self.browser:
|
||||
await asyncio.wait_for(self.browser.close(), timeout=5.0)
|
||||
logger.debug(f"[{watch_uuid}] Browser closed successfully")
|
||||
except asyncio.TimeoutError:
|
||||
logger.warning(f"[{watch_uuid}] Timed out closing browser (5s)")
|
||||
except Exception as e:
|
||||
logger.warning(f"[{watch_uuid}] Error closing browser: {e}")
|
||||
finally:
|
||||
self.browser = None
|
||||
|
||||
logger.info(f"[{watch_uuid}] Cleanup puppeteer complete")
|
||||
|
||||
# Force garbage collection to release resources
|
||||
gc.collect()
|
||||
# def screenshot_step(self, step_n=''):
|
||||
# screenshot = self.page.screenshot(type='jpeg', full_page=True, quality=85)
|
||||
#
|
||||
# if self.browser_steps_screenshot_path is not None:
|
||||
# destination = os.path.join(self.browser_steps_screenshot_path, 'step_{}.jpeg'.format(step_n))
|
||||
# logger.debug(f"Saving step screenshot to {destination}")
|
||||
# with open(destination, 'wb') as f:
|
||||
# f.write(screenshot)
|
||||
#
|
||||
# def save_step_html(self, step_n):
|
||||
# content = self.page.content()
|
||||
# destination = os.path.join(self.browser_steps_screenshot_path, 'step_{}.html'.format(step_n))
|
||||
# logger.debug(f"Saving step HTML to {destination}")
|
||||
# with open(destination, 'w') as f:
|
||||
# f.write(content)
|
||||
|
||||
async def fetch_page(self,
|
||||
current_include_filters,
|
||||
empty_pages_are_a_change,
|
||||
fetch_favicon,
|
||||
ignore_status_codes,
|
||||
is_binary,
|
||||
request_body,
|
||||
request_headers,
|
||||
request_method,
|
||||
screenshot_format,
|
||||
timeout,
|
||||
url,
|
||||
watch_uuid
|
||||
timeout,
|
||||
request_headers,
|
||||
request_body,
|
||||
request_method,
|
||||
ignore_status_codes,
|
||||
current_include_filters,
|
||||
is_binary,
|
||||
empty_pages_are_a_change
|
||||
):
|
||||
import re
|
||||
self.delete_browser_steps_screenshots()
|
||||
|
||||
n = int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 12)) + self.render_extract_delay
|
||||
n = int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)) + self.render_extract_delay
|
||||
extra_wait = min(n, 15)
|
||||
|
||||
logger.debug(f"Extra wait set to {extra_wait}s, requested was {n}s.")
|
||||
@@ -281,11 +169,9 @@ class fetcher(Fetcher):
|
||||
# Connect directly using the specified browser_ws_endpoint
|
||||
# @todo timeout
|
||||
try:
|
||||
logger.debug(f"[{watch_uuid}] Connecting to browser at {self.browser_connection_url}")
|
||||
self.browser = await pyppeteer_instance.connect(browserWSEndpoint=self.browser_connection_url,
|
||||
ignoreHTTPSErrors=True
|
||||
)
|
||||
logger.debug(f"[{watch_uuid}] Browser connected successfully")
|
||||
browser = await pyppeteer_instance.connect(browserWSEndpoint=self.browser_connection_url,
|
||||
ignoreHTTPSErrors=True
|
||||
)
|
||||
except websockets.exceptions.InvalidStatusCode as e:
|
||||
raise BrowserConnectError(msg=f"Error while trying to connect the browser, Code {e.status_code} (check your access, whitelist IP, password etc)")
|
||||
except websockets.exceptions.InvalidURI:
|
||||
@@ -294,23 +180,7 @@ class fetcher(Fetcher):
|
||||
raise BrowserConnectError(msg=f"Error connecting to the browser - Exception '{str(e)}'")
|
||||
|
||||
# more reliable is to just request a new page
|
||||
try:
|
||||
logger.debug(f"[{watch_uuid}] Creating new page")
|
||||
self.page = await self.browser.newPage()
|
||||
logger.debug(f"[{watch_uuid}] Page created successfully")
|
||||
except Exception as e:
|
||||
logger.error(f"[{watch_uuid}] Failed to create new page: {e}")
|
||||
# Browser is connected but page creation failed - must cleanup browser
|
||||
try:
|
||||
await asyncio.wait_for(self.browser.close(), timeout=3.0)
|
||||
except Exception as cleanup_error:
|
||||
logger.error(f"[{watch_uuid}] Failed to cleanup browser after page creation failure: {cleanup_error}")
|
||||
finally:
|
||||
self.browser = None
|
||||
raise
|
||||
|
||||
# Add console handler to capture console.log from favicon fetcher
|
||||
#self.page.on('console', lambda msg: logger.debug(f"Browser console [{msg.type}]: {msg.text}"))
|
||||
self.page = await browser.newPage()
|
||||
|
||||
if '--window-size' in self.browser_connection_url:
|
||||
# Be sure the viewport is always the window-size, this is often not the same thing
|
||||
@@ -322,6 +192,7 @@ class fetcher(Fetcher):
|
||||
"height": int(match.group(2))
|
||||
})
|
||||
logger.debug(f"Puppeteer viewport size {self.page.viewport}")
|
||||
|
||||
try:
|
||||
from pyppeteerstealth import inject_evasions_into_page
|
||||
except ImportError:
|
||||
@@ -366,57 +237,33 @@ class fetcher(Fetcher):
|
||||
# browsersteps_interface = steppable_browser_interface()
|
||||
# browsersteps_interface.page = self.page
|
||||
|
||||
# Enable Network domain to detect when first bytes arrive
|
||||
await self.page._client.send('Network.enable')
|
||||
|
||||
# Now set up the frame navigation handlers
|
||||
async def handle_frame_navigation(event=None):
|
||||
# Wait n seconds after the frameStartedLoading, not from any frameStartedLoading/frameStartedNavigating
|
||||
async def handle_frame_navigation(event):
|
||||
logger.debug(f"Frame navigated: {event}")
|
||||
w = extra_wait - 2 if extra_wait > 4 else 2
|
||||
logger.debug(f"Waiting {w} seconds before calling Page.stopLoading...")
|
||||
await asyncio.sleep(w)
|
||||
|
||||
# Check if page still exists (might have been closed due to error during sleep)
|
||||
if not self.page or not hasattr(self.page, '_client'):
|
||||
logger.debug("Page already closed, skipping stopLoading")
|
||||
return
|
||||
|
||||
logger.debug("Issuing stopLoading command...")
|
||||
await self.page._client.send('Page.stopLoading')
|
||||
logger.debug("stopLoading command sent!")
|
||||
|
||||
async def setup_frame_handlers_on_first_response(event):
|
||||
# Only trigger for the main document response
|
||||
if event.get('type') == 'Document':
|
||||
logger.debug("First response received, setting up frame handlers for forced page stop load.")
|
||||
self.page._client.on('Page.frameStartedNavigating', lambda e: asyncio.create_task(handle_frame_navigation(e)))
|
||||
self.page._client.on('Page.frameStartedLoading', lambda e: asyncio.create_task(handle_frame_navigation(e)))
|
||||
self.page._client.on('Page.frameStoppedLoading', lambda e: logger.debug(f"Frame stopped loading: {e}"))
|
||||
logger.debug("First response received, setting up frame handlers for forced page stop load DONE SETUP")
|
||||
# De-register this listener - we only need it once
|
||||
self.page._client.remove_listener('Network.responseReceived', setup_frame_handlers_on_first_response)
|
||||
|
||||
# Listen for first response to trigger frame handler setup
|
||||
self.page._client.on('Network.responseReceived', setup_frame_handlers_on_first_response)
|
||||
self.page._client.on('Page.frameStartedNavigating', lambda event: asyncio.create_task(handle_frame_navigation(event)))
|
||||
self.page._client.on('Page.frameStartedLoading', lambda event: asyncio.create_task(handle_frame_navigation(event)))
|
||||
self.page._client.on('Page.frameStoppedLoading', lambda event: logger.debug(f"Frame stopped loading: {event}"))
|
||||
|
||||
response = None
|
||||
attempt=0
|
||||
while not response:
|
||||
logger.debug(f"Attempting page fetch {url} attempt {attempt}")
|
||||
asyncio.create_task(handle_frame_navigation())
|
||||
response = await self.page.goto(url, timeout=0)
|
||||
response = await self.page.goto(url)
|
||||
await asyncio.sleep(1 + extra_wait)
|
||||
# Check if page still exists before sending command
|
||||
if self.page and hasattr(self.page, '_client'):
|
||||
await self.page._client.send('Page.stopLoading')
|
||||
|
||||
if response:
|
||||
break
|
||||
if not response:
|
||||
logger.warning("Page did not fetch! trying again!")
|
||||
if response is None and attempt>=2:
|
||||
logger.warning(f"Content Fetcher > Response object was none (as in, the response from the browser was empty, not just the content) exiting attempt {attempt}")
|
||||
await self.page.close()
|
||||
await browser.close()
|
||||
logger.warning(f"Content Fetcher > Response object was none (as in, the response from the browser was empty, not just the content) exiting attmpt {attempt}")
|
||||
raise EmptyReply(url=url, status_code=None)
|
||||
attempt+=1
|
||||
|
||||
@@ -428,6 +275,8 @@ class fetcher(Fetcher):
|
||||
except Exception as e:
|
||||
logger.warning("Got exception when running evaluate on custom JS code")
|
||||
logger.error(str(e))
|
||||
await self.page.close()
|
||||
await browser.close()
|
||||
# This can be ok, we will try to grab what we could retrieve
|
||||
raise PageUnloadable(url=url, status_code=None, message=str(e))
|
||||
|
||||
@@ -437,16 +286,17 @@ class fetcher(Fetcher):
|
||||
# https://github.com/dgtlmoon/changedetection.io/discussions/2122#discussioncomment-8241962
|
||||
logger.critical(f"Response from the browser/Playwright did not have a status_code! Response follows.")
|
||||
logger.critical(response)
|
||||
await self.page.close()
|
||||
await browser.close()
|
||||
raise PageUnloadable(url=url, status_code=None, message=str(e))
|
||||
|
||||
if fetch_favicon:
|
||||
try:
|
||||
self.favicon_blob = await self.page.evaluate(FAVICON_FETCHER_JS)
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching FavIcon info {str(e)}, continuing.")
|
||||
try:
|
||||
self.favicon_blob = await self.page.evaluate(FAVICON_FETCHER_JS)
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching FavIcon info {str(e)}, continuing.")
|
||||
|
||||
if self.status_code != 200 and not ignore_status_codes:
|
||||
screenshot = await capture_full_page(page=self.page, screenshot_format=self.screenshot_format, watch_uuid=watch_uuid, lock_viewport_elements=self.lock_viewport_elements)
|
||||
screenshot = await capture_full_page(page=self.page)
|
||||
|
||||
raise Non200ErrorCodeReceived(url=url, status_code=self.status_code, screenshot=screenshot)
|
||||
|
||||
@@ -454,11 +304,13 @@ class fetcher(Fetcher):
|
||||
|
||||
if not empty_pages_are_a_change and len(content.strip()) == 0:
|
||||
logger.error("Content Fetcher > Content was empty (empty_pages_are_a_change is False), closing browsers")
|
||||
await self.page.close()
|
||||
await browser.close()
|
||||
raise EmptyReply(url=url, status_code=response.status)
|
||||
|
||||
# Run Browser Steps here
|
||||
# @todo not yet supported, we switch to playwright in this case
|
||||
# if self.browser_steps:
|
||||
# if self.browser_steps_get_valid_steps():
|
||||
# self.iterate_browser_steps()
|
||||
|
||||
|
||||
@@ -471,16 +323,6 @@ class fetcher(Fetcher):
|
||||
await self.page.evaluate(f"var include_filters=''")
|
||||
|
||||
MAX_TOTAL_HEIGHT = int(os.getenv("SCREENSHOT_MAX_HEIGHT", SCREENSHOT_MAX_HEIGHT_DEFAULT))
|
||||
|
||||
self.content = await self.page.content
|
||||
|
||||
# Now take screenshot (scrolling may trigger layout changes, but measurements are already captured)
|
||||
logger.debug(f"Screenshot format {self.screenshot_format}")
|
||||
self.screenshot = await capture_full_page(page=self.page, screenshot_format=self.screenshot_format, watch_uuid=watch_uuid, lock_viewport_elements=self.lock_viewport_elements)
|
||||
|
||||
# Force garbage collection - pyppeteer base64 decode creates temporary buffers
|
||||
import gc
|
||||
gc.collect()
|
||||
self.xpath_data = await self.page.evaluate(XPATH_ELEMENT_JS, {
|
||||
"visualselector_xpath_selectors": visualselector_xpath_selectors,
|
||||
"max_height": MAX_TOTAL_HEIGHT
|
||||
@@ -488,29 +330,24 @@ class fetcher(Fetcher):
|
||||
if not self.xpath_data:
|
||||
raise Exception(f"Content Fetcher > xPath scraper failed. Please report this URL so we can fix it :)")
|
||||
|
||||
|
||||
self.instock_data = await self.page.evaluate(INSTOCK_DATA_JS)
|
||||
|
||||
self.content = await self.page.content
|
||||
|
||||
self.screenshot = await capture_full_page(page=self.page)
|
||||
|
||||
# It's good to log here in the case that the browser crashes on shutting down but we still get the data we need
|
||||
logger.success(f"Fetching '{url}' complete, closing page")
|
||||
await self.page.close()
|
||||
logger.success(f"Fetching '{url}' complete, closing browser")
|
||||
await browser.close()
|
||||
logger.success(f"Fetching '{url}' complete, exiting puppeteer fetch.")
|
||||
|
||||
async def main(self, **kwargs):
|
||||
await self.fetch_page(**kwargs)
|
||||
|
||||
async def run(self,
|
||||
fetch_favicon=True,
|
||||
current_include_filters=None,
|
||||
empty_pages_are_a_change=False,
|
||||
ignore_status_codes=False,
|
||||
is_binary=False,
|
||||
request_body=None,
|
||||
request_headers=None,
|
||||
request_method=None,
|
||||
screenshot_format=None,
|
||||
timeout=None,
|
||||
url=None,
|
||||
watch_uuid=None,
|
||||
):
|
||||
async def run(self, url, timeout, request_headers, request_body, request_method, ignore_status_codes=False,
|
||||
current_include_filters=None, is_binary=False, empty_pages_are_a_change=False):
|
||||
|
||||
#@todo make update_worker async which could run any of these content_fetchers within memory and time constraints
|
||||
max_time = int(os.getenv('PUPPETEER_MAX_PROCESSING_TIMEOUT_SECONDS', 180))
|
||||
@@ -518,40 +355,16 @@ class fetcher(Fetcher):
|
||||
# Now we run this properly in async context since we're called from async worker
|
||||
try:
|
||||
await asyncio.wait_for(self.main(
|
||||
current_include_filters=current_include_filters,
|
||||
empty_pages_are_a_change=empty_pages_are_a_change,
|
||||
fetch_favicon=fetch_favicon,
|
||||
ignore_status_codes=ignore_status_codes,
|
||||
is_binary=is_binary,
|
||||
request_body=request_body,
|
||||
request_headers=request_headers,
|
||||
request_method=request_method,
|
||||
screenshot_format=None,
|
||||
timeout=timeout,
|
||||
url=url,
|
||||
watch_uuid=watch_uuid,
|
||||
), timeout=max_time
|
||||
)
|
||||
timeout=timeout,
|
||||
request_headers=request_headers,
|
||||
request_body=request_body,
|
||||
request_method=request_method,
|
||||
ignore_status_codes=ignore_status_codes,
|
||||
current_include_filters=current_include_filters,
|
||||
is_binary=is_binary,
|
||||
empty_pages_are_a_change=empty_pages_are_a_change
|
||||
), timeout=max_time)
|
||||
except asyncio.TimeoutError:
|
||||
raise (BrowserFetchTimedOut(msg=f"Browser connected but was unable to process the page in {max_time} seconds."))
|
||||
finally:
|
||||
# Internal cleanup on any exception/timeout - call quit() immediately
|
||||
# This prevents connection leaks during exception bursts
|
||||
# Worker.py's quit() call becomes a redundant safety net (idempotent)
|
||||
try:
|
||||
await self.quit(watch={'uuid': watch_uuid} if watch_uuid else None)
|
||||
except Exception as cleanup_error:
|
||||
logger.error(f"[{watch_uuid}] Error during internal quit() cleanup: {cleanup_error}")
|
||||
raise(BrowserFetchTimedOut(msg=f"Browser connected but was unable to process the page in {max_time} seconds."))
|
||||
|
||||
|
||||
# Plugin registration for built-in fetcher
|
||||
class PuppeteerFetcherPlugin:
|
||||
"""Plugin class that registers the Puppeteer fetcher as a built-in plugin."""
|
||||
|
||||
def register_content_fetcher(self):
|
||||
"""Register the Puppeteer fetcher"""
|
||||
return ('html_webdriver', fetcher)
|
||||
|
||||
|
||||
# Create module-level instance for plugin registration
|
||||
puppeteer_plugin = PuppeteerFetcherPlugin()
|
||||
|
||||
@@ -1,22 +1,18 @@
|
||||
from loguru import logger
|
||||
from urllib.parse import urljoin, urlparse
|
||||
import hashlib
|
||||
import os
|
||||
import re
|
||||
import asyncio
|
||||
|
||||
from changedetectionio import strtobool
|
||||
from changedetectionio.content_fetchers.exceptions import BrowserStepsInUnsupportedFetcher, EmptyReply, Non200ErrorCodeReceived
|
||||
from changedetectionio.content_fetchers.base import Fetcher
|
||||
from changedetectionio.validate_url import is_private_hostname
|
||||
|
||||
|
||||
# "html_requests" is listed as the default fetcher in store.py!
|
||||
class fetcher(Fetcher):
|
||||
fetcher_description = "Basic fast Plaintext/HTTP Client"
|
||||
|
||||
def __init__(self, proxy_override=None, custom_browser_connection_url=None, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
def __init__(self, proxy_override=None, custom_browser_connection_url=None):
|
||||
super().__init__()
|
||||
self.proxy_override = proxy_override
|
||||
# browser_connection_url is none because its always 'launched locally'
|
||||
|
||||
@@ -29,16 +25,14 @@ class fetcher(Fetcher):
|
||||
ignore_status_codes=False,
|
||||
current_include_filters=None,
|
||||
is_binary=False,
|
||||
empty_pages_are_a_change=False,
|
||||
watch_uuid=None,
|
||||
):
|
||||
empty_pages_are_a_change=False):
|
||||
"""Synchronous version of run - the original requests implementation"""
|
||||
|
||||
import chardet
|
||||
import requests
|
||||
from requests.exceptions import ProxyError, ConnectionError, RequestException
|
||||
|
||||
if self.browser_steps:
|
||||
if self.browser_steps_get_valid_steps():
|
||||
raise BrowserStepsInUnsupportedFetcher(url=url)
|
||||
|
||||
proxies = {}
|
||||
@@ -57,72 +51,17 @@ class fetcher(Fetcher):
|
||||
|
||||
session = requests.Session()
|
||||
|
||||
# Configure retry adapter for low-level network errors only
|
||||
# Retries connection timeouts, read timeouts, connection resets - not HTTP status codes
|
||||
# Especially helpful in parallel test execution when servers are slow/overloaded
|
||||
# Configurable via REQUESTS_RETRY_MAX_COUNT (default: 3 attempts)
|
||||
from requests.adapters import HTTPAdapter
|
||||
from urllib3.util.retry import Retry
|
||||
|
||||
max_retries = int(os.getenv("REQUESTS_RETRY_MAX_COUNT", "6"))
|
||||
retry_strategy = Retry(
|
||||
total=max_retries,
|
||||
connect=max_retries, # Retry connection timeouts
|
||||
read=max_retries, # Retry read timeouts
|
||||
status=0, # Don't retry on HTTP status codes
|
||||
backoff_factor=0.5, # Wait 0.3s, 0.6s, 1.2s between retries
|
||||
allowed_methods=["HEAD", "GET", "OPTIONS", "POST"],
|
||||
raise_on_status=False
|
||||
)
|
||||
adapter = HTTPAdapter(max_retries=retry_strategy)
|
||||
session.mount("http://", adapter)
|
||||
session.mount("https://", adapter)
|
||||
|
||||
if strtobool(os.getenv('ALLOW_FILE_URI', 'false')) and url.startswith('file://'):
|
||||
from requests_file import FileAdapter
|
||||
session.mount('file://', FileAdapter())
|
||||
|
||||
allow_iana_restricted = strtobool(os.getenv('ALLOW_IANA_RESTRICTED_ADDRESSES', 'false'))
|
||||
|
||||
try:
|
||||
# Fresh DNS check at fetch time — catches DNS rebinding regardless of add-time cache.
|
||||
if not allow_iana_restricted:
|
||||
parsed_initial = urlparse(url)
|
||||
if parsed_initial.hostname and is_private_hostname(parsed_initial.hostname):
|
||||
raise Exception(f"Fetch blocked: '{url}' resolves to a private/reserved IP address. "
|
||||
f"Set ALLOW_IANA_RESTRICTED_ADDRESSES=true to allow.")
|
||||
|
||||
r = session.request(method=request_method,
|
||||
data=request_body.encode('utf-8') if type(request_body) is str else request_body,
|
||||
url=url,
|
||||
headers=request_headers,
|
||||
timeout=timeout,
|
||||
proxies=proxies,
|
||||
verify=False,
|
||||
allow_redirects=False)
|
||||
|
||||
# Manually follow redirects so each hop's resolved IP can be validated,
|
||||
# preventing SSRF via an open redirect on a public host.
|
||||
current_url = url
|
||||
for _ in range(10):
|
||||
if not r.is_redirect:
|
||||
break
|
||||
location = r.headers.get('Location', '')
|
||||
redirect_url = urljoin(current_url, location)
|
||||
if not allow_iana_restricted:
|
||||
parsed_redirect = urlparse(redirect_url)
|
||||
if parsed_redirect.hostname and is_private_hostname(parsed_redirect.hostname):
|
||||
raise Exception(f"Redirect blocked: '{redirect_url}' resolves to a private/reserved IP address.")
|
||||
current_url = redirect_url
|
||||
r = session.request('GET', redirect_url,
|
||||
headers=request_headers,
|
||||
timeout=timeout,
|
||||
proxies=proxies,
|
||||
verify=False,
|
||||
allow_redirects=False)
|
||||
else:
|
||||
raise Exception("Too many redirects")
|
||||
|
||||
verify=False)
|
||||
except Exception as e:
|
||||
msg = str(e)
|
||||
if proxies and 'SOCKSHTTPSConnectionPool' in msg:
|
||||
@@ -136,44 +75,9 @@ class fetcher(Fetcher):
|
||||
if not is_binary:
|
||||
# Don't run this for PDF (and requests identified as binary) takes a _long_ time
|
||||
if not r.headers.get('content-type') or not 'charset=' in r.headers.get('content-type'):
|
||||
# For XML/RSS feeds, check the XML declaration for encoding attribute
|
||||
# This is more reliable than chardet which can misdetect UTF-8 as MacRoman
|
||||
content_type = r.headers.get('content-type', '').lower()
|
||||
if 'xml' in content_type or 'rss' in content_type:
|
||||
# Look for <?xml version="1.0" encoding="UTF-8"?>
|
||||
xml_encoding_match = re.search(rb'<\?xml[^>]+encoding=["\']([^"\']+)["\']', r.content[:200])
|
||||
if xml_encoding_match:
|
||||
r.encoding = xml_encoding_match.group(1).decode('ascii')
|
||||
else:
|
||||
# Default to UTF-8 for XML if no encoding found
|
||||
r.encoding = 'utf-8'
|
||||
else:
|
||||
# No charset in HTTP header - sniff encoding in priority order matching browsers
|
||||
# (WHATWG encoding sniffing algorithm):
|
||||
# 1. BOM - highest confidence, check before anything else
|
||||
# 2. <meta charset> in first 2kb
|
||||
# 3. chardet statistical detection - last resort
|
||||
# See: https://github.com/dgtlmoon/changedetection.io/issues/3952
|
||||
boms = [
|
||||
(b'\xef\xbb\xbf', 'utf-8-sig'),
|
||||
(b'\xff\xfe', 'utf-16-le'),
|
||||
(b'\xfe\xff', 'utf-16-be'),
|
||||
]
|
||||
bom_encoding = next((enc for bom, enc in boms if r.content.startswith(bom)), None)
|
||||
if bom_encoding:
|
||||
logger.info(f"URL: {url} Using encoding '{bom_encoding}' detected from BOM")
|
||||
r.encoding = bom_encoding
|
||||
else:
|
||||
meta_charset_match = re.search(rb'<meta[^>]+charset\s*=\s*["\']?\s*([^"\'\s;>]+)', r.content[:2000], re.IGNORECASE)
|
||||
if meta_charset_match:
|
||||
encoding = meta_charset_match.group(1).decode('ascii', errors='ignore')
|
||||
logger.info(f"URL: {url} No content-type encoding in HTTP headers - Using encoding '{encoding}' from HTML meta charset tag")
|
||||
r.encoding = encoding
|
||||
else:
|
||||
encoding = chardet.detect(r.content)['encoding']
|
||||
logger.warning(f"URL: {url} No charset in headers or meta tag, guessed encoding as '{encoding}' via chardet")
|
||||
if encoding:
|
||||
r.encoding = encoding
|
||||
encoding = chardet.detect(r.content)['encoding']
|
||||
if encoding:
|
||||
r.encoding = encoding
|
||||
|
||||
self.headers = r.headers
|
||||
|
||||
@@ -199,32 +103,21 @@ class fetcher(Fetcher):
|
||||
|
||||
self.raw_content = r.content
|
||||
|
||||
# If the content is an image, set it as screenshot for SSIM/visual comparison
|
||||
content_type = r.headers.get('content-type', '').lower()
|
||||
if 'image/' in content_type:
|
||||
self.screenshot = r.content
|
||||
logger.debug(f"Image content detected ({content_type}), set as screenshot for comparison")
|
||||
|
||||
async def run(self,
|
||||
fetch_favicon=True,
|
||||
current_include_filters=None,
|
||||
empty_pages_are_a_change=False,
|
||||
ignore_status_codes=False,
|
||||
is_binary=False,
|
||||
request_body=None,
|
||||
request_headers=None,
|
||||
request_method=None,
|
||||
screenshot_format=None,
|
||||
timeout=None,
|
||||
url=None,
|
||||
watch_uuid=None,
|
||||
):
|
||||
url,
|
||||
timeout,
|
||||
request_headers,
|
||||
request_body,
|
||||
request_method,
|
||||
ignore_status_codes=False,
|
||||
current_include_filters=None,
|
||||
is_binary=False,
|
||||
empty_pages_are_a_change=False):
|
||||
"""Async wrapper that runs the synchronous requests code in a thread pool"""
|
||||
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
|
||||
|
||||
# Run the synchronous _run_sync in a thread pool to avoid blocking the event loop
|
||||
# Retry logic is handled by requests' HTTPAdapter (see _run_sync for configuration)
|
||||
await loop.run_in_executor(
|
||||
None, # Use default ThreadPoolExecutor
|
||||
lambda: self._run_sync(
|
||||
@@ -236,12 +129,12 @@ class fetcher(Fetcher):
|
||||
ignore_status_codes=ignore_status_codes,
|
||||
current_include_filters=current_include_filters,
|
||||
is_binary=is_binary,
|
||||
empty_pages_are_a_change=empty_pages_are_a_change,
|
||||
watch_uuid=watch_uuid,
|
||||
empty_pages_are_a_change=empty_pages_are_a_change
|
||||
)
|
||||
)
|
||||
|
||||
async def quit(self, watch=None):
|
||||
def quit(self, watch=None):
|
||||
|
||||
# In case they switched to `requests` fetcher from something else
|
||||
# Then the screenshot could be old, in any case, it's not used here.
|
||||
# REMOVE_REQUESTS_OLD_SCREENSHOTS - Mainly used for testing
|
||||
@@ -253,15 +146,3 @@ class fetcher(Fetcher):
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to unlink screenshot: {screenshot} - {e}")
|
||||
|
||||
|
||||
# Plugin registration for built-in fetcher
|
||||
class RequestsFetcherPlugin:
|
||||
"""Plugin class that registers the requests fetcher as a built-in plugin."""
|
||||
|
||||
def register_content_fetcher(self):
|
||||
"""Register the requests fetcher"""
|
||||
return ('html_requests', fetcher)
|
||||
|
||||
|
||||
# Create module-level instance for plugin registration
|
||||
requests_plugin = RequestsFetcherPlugin()
|
||||
|
||||
@@ -1,101 +1,79 @@
|
||||
(async () => {
|
||||
// Define the function inside the IIFE for console testing
|
||||
window.getFaviconAsBlob = async function() {
|
||||
const links = Array.from(document.querySelectorAll(
|
||||
'link[rel~="apple-touch-icon"], link[rel~="icon"]'
|
||||
));
|
||||
const links = Array.from(document.querySelectorAll(
|
||||
'link[rel~="apple-touch-icon"], link[rel~="icon"]'
|
||||
));
|
||||
|
||||
const icons = links.map(link => {
|
||||
const sizesStr = link.getAttribute('sizes');
|
||||
let size = 0;
|
||||
if (sizesStr) {
|
||||
const [w] = sizesStr.split('x').map(Number);
|
||||
if (!isNaN(w)) size = w;
|
||||
} else {
|
||||
size = 16;
|
||||
}
|
||||
return {
|
||||
size,
|
||||
rel: link.getAttribute('rel'),
|
||||
href: link.href,
|
||||
hasSizes: !!sizesStr
|
||||
};
|
||||
});
|
||||
|
||||
// If no icons found, add fallback favicon.ico
|
||||
if (icons.length === 0) {
|
||||
icons.push({
|
||||
size: 16,
|
||||
rel: 'icon',
|
||||
href: '/favicon.ico',
|
||||
hasSizes: false
|
||||
});
|
||||
const icons = links.map(link => {
|
||||
const sizesStr = link.getAttribute('sizes');
|
||||
let size = 0;
|
||||
if (sizesStr) {
|
||||
const [w] = sizesStr.split('x').map(Number);
|
||||
if (!isNaN(w)) size = w;
|
||||
} else {
|
||||
size = 16;
|
||||
}
|
||||
return {
|
||||
size,
|
||||
rel: link.getAttribute('rel'),
|
||||
href: link.href
|
||||
};
|
||||
});
|
||||
|
||||
// sort preference: highest resolution first, then apple-touch-icon, then regular icons
|
||||
icons.sort((a, b) => {
|
||||
// First priority: actual size (highest first)
|
||||
if (a.size !== b.size) {
|
||||
return b.size - a.size;
|
||||
}
|
||||
|
||||
// Second priority: apple-touch-icon over regular icon
|
||||
const isAppleA = /apple-touch-icon/.test(a.rel);
|
||||
const isAppleB = /apple-touch-icon/.test(b.rel);
|
||||
if (isAppleA && !isAppleB) return -1;
|
||||
if (!isAppleA && isAppleB) return 1;
|
||||
|
||||
// Third priority: icons with no size attribute (fallback icons) last
|
||||
const hasNoSizeA = !a.hasSizes;
|
||||
const hasNoSizeB = !b.hasSizes;
|
||||
if (hasNoSizeA && !hasNoSizeB) return 1;
|
||||
if (!hasNoSizeA && hasNoSizeB) return -1;
|
||||
|
||||
return 0;
|
||||
// If no icons found, add fallback favicon.ico
|
||||
if (icons.length === 0) {
|
||||
icons.push({
|
||||
size: 16,
|
||||
rel: 'icon',
|
||||
href: '/favicon.ico'
|
||||
});
|
||||
}
|
||||
|
||||
const timeoutMs = 2000;
|
||||
// sort preference
|
||||
icons.sort((a, b) => {
|
||||
const isAppleA = /apple-touch-icon/.test(a.rel);
|
||||
const isAppleB = /apple-touch-icon/.test(b.rel);
|
||||
if (isAppleA && !isAppleB) return -1;
|
||||
if (!isAppleA && isAppleB) return 1;
|
||||
return b.size - a.size;
|
||||
});
|
||||
|
||||
for (const icon of icons) {
|
||||
try {
|
||||
const controller = new AbortController();
|
||||
const timeout = setTimeout(() => controller.abort(), timeoutMs);
|
||||
const timeoutMs = 2000;
|
||||
|
||||
const resp = await fetch(icon.href, {
|
||||
signal: controller.signal,
|
||||
redirect: 'follow'
|
||||
});
|
||||
for (const icon of icons) {
|
||||
try {
|
||||
const controller = new AbortController();
|
||||
const timeout = setTimeout(() => controller.abort(), timeoutMs);
|
||||
|
||||
clearTimeout(timeout);
|
||||
const resp = await fetch(icon.href, {
|
||||
signal: controller.signal,
|
||||
redirect: 'follow'
|
||||
});
|
||||
|
||||
if (!resp.ok) {
|
||||
continue;
|
||||
}
|
||||
clearTimeout(timeout);
|
||||
|
||||
const blob = await resp.blob();
|
||||
|
||||
// Convert blob to base64
|
||||
const reader = new FileReader();
|
||||
return await new Promise(resolve => {
|
||||
reader.onloadend = () => {
|
||||
resolve({
|
||||
url: icon.href,
|
||||
base64: reader.result.split(",")[1]
|
||||
});
|
||||
};
|
||||
reader.readAsDataURL(blob);
|
||||
});
|
||||
|
||||
} catch (e) {
|
||||
if (!resp.ok) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const blob = await resp.blob();
|
||||
|
||||
// Convert blob to base64
|
||||
const reader = new FileReader();
|
||||
return await new Promise(resolve => {
|
||||
reader.onloadend = () => {
|
||||
resolve({
|
||||
url: icon.href,
|
||||
base64: reader.result.split(",")[1]
|
||||
});
|
||||
};
|
||||
reader.readAsDataURL(blob);
|
||||
});
|
||||
|
||||
} catch (e) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// nothing found
|
||||
return null;
|
||||
};
|
||||
|
||||
// Auto-execute and return result for page.evaluate()
|
||||
return await window.getFaviconAsBlob();
|
||||
// nothing found
|
||||
return null;
|
||||
})();
|
||||
|
||||
|
||||
@@ -1,107 +0,0 @@
|
||||
/**
|
||||
* Lock Element Dimensions for Screenshot Capture (First Viewport Only)
|
||||
*
|
||||
* THE PROBLEM:
|
||||
* When taking full-page screenshots of tall pages, Chrome/Puppeteer/Playwright need to:
|
||||
* 1. Temporarily change the viewport height to a large value (e.g., 800px → 3809px)
|
||||
* 2. Take screenshots in chunks while scrolling
|
||||
* 3. Stitch the chunks together
|
||||
*
|
||||
* However, changing the viewport height triggers CSS media queries like:
|
||||
* @media (min-height: 860px) { .ad { height: 250px; } }
|
||||
*
|
||||
* This causes elements (especially ads/headers) to resize during screenshot capture.
|
||||
*
|
||||
* THE SOLUTION:
|
||||
* Lock element dimensions in the FIRST VIEWPORT ONLY with !important inline styles.
|
||||
* This prevents headers, navigation, and top ads from resizing when viewport changes.
|
||||
* We only lock the visible portion because:
|
||||
* - Most layout shifts happen in headers/navbars/top ads
|
||||
* - Locking only visible elements is 100x+ faster (100-200 elements vs 10,000+)
|
||||
* - Below-fold content shifts don't affect visual comparison accuracy
|
||||
*
|
||||
* WHAT THIS SCRIPT DOES:
|
||||
* 1. Gets current viewport height
|
||||
* 2. Finds elements within first viewport (top of page to bottom of screen)
|
||||
* 3. Locks their dimensions with !important inline styles
|
||||
* 4. Disables ResizeObserver API (for JS-based resizing)
|
||||
*
|
||||
* USAGE:
|
||||
* Execute this script BEFORE calling capture_full_page() / screenshot functions.
|
||||
* Only enabled for image_ssim_diff processor (visual comparison).
|
||||
* Default: OFF for performance.
|
||||
*
|
||||
* PERFORMANCE:
|
||||
* - Only processes 100-300 elements (first viewport) vs 10,000+ (entire page)
|
||||
* - Typically completes in 10-50ms
|
||||
* - 100x+ faster than locking entire page
|
||||
*
|
||||
* @see https://github.com/dgtlmoon/changedetection.io/issues/XXXX
|
||||
*/
|
||||
|
||||
(() => {
|
||||
// Store original styles in a global WeakMap for later restoration
|
||||
window.__elementSizingRestore = new WeakMap();
|
||||
|
||||
const start = performance.now();
|
||||
|
||||
// Get current viewport height (visible portion of page)
|
||||
const viewportHeight = window.innerHeight;
|
||||
|
||||
// Get all elements and filter to FIRST VIEWPORT ONLY
|
||||
// This dramatically reduces elements to process (100-300 vs 10,000+)
|
||||
const allElements = Array.from(document.querySelectorAll('*'));
|
||||
|
||||
// BATCH READ PHASE: Get bounding rects and filter to viewport
|
||||
const measurements = allElements.map(el => {
|
||||
const rect = el.getBoundingClientRect();
|
||||
const computed = window.getComputedStyle(el);
|
||||
|
||||
// Only lock elements in the first viewport (visible on initial page load)
|
||||
// rect.top < viewportHeight means element starts within visible area
|
||||
const inViewport = rect.top < viewportHeight && rect.top >= 0;
|
||||
const hasSize = rect.height > 0 && rect.width > 0;
|
||||
|
||||
return inViewport && hasSize ? { el, computed, rect } : null;
|
||||
}).filter(Boolean); // Remove null entries
|
||||
|
||||
const elapsed = performance.now() - start;
|
||||
console.log(`Locked first viewport elements: ${measurements.length} of ${allElements.length} total elements (viewport height: ${viewportHeight}px, took ${elapsed.toFixed(0)}ms)`);
|
||||
|
||||
// BATCH WRITE PHASE: Apply all inline styles without triggering layout
|
||||
// No interleaved reads means browser can optimize style application
|
||||
measurements.forEach(({el, computed, rect}) => {
|
||||
// Save original inline style values BEFORE locking
|
||||
const properties = ['height', 'min-height', 'max-height', 'width', 'min-width', 'max-width'];
|
||||
const originalStyles = {};
|
||||
properties.forEach(prop => {
|
||||
originalStyles[prop] = {
|
||||
value: el.style.getPropertyValue(prop),
|
||||
priority: el.style.getPropertyPriority(prop)
|
||||
};
|
||||
});
|
||||
window.__elementSizingRestore.set(el, originalStyles);
|
||||
|
||||
// Lock dimensions with !important to override media queries
|
||||
if (rect.height > 0) {
|
||||
el.style.setProperty('height', computed.height, 'important');
|
||||
el.style.setProperty('min-height', computed.height, 'important');
|
||||
el.style.setProperty('max-height', computed.height, 'important');
|
||||
}
|
||||
if (rect.width > 0) {
|
||||
el.style.setProperty('width', computed.width, 'important');
|
||||
el.style.setProperty('min-width', computed.width, 'important');
|
||||
el.style.setProperty('max-width', computed.width, 'important');
|
||||
}
|
||||
});
|
||||
|
||||
// Also disable ResizeObserver for JS-based resizing
|
||||
window.ResizeObserver = class {
|
||||
constructor() {}
|
||||
observe() {}
|
||||
unobserve() {}
|
||||
disconnect() {}
|
||||
};
|
||||
|
||||
console.log(`✓ Element dimensions locked (${measurements.length} elements) to prevent media query changes during screenshot`);
|
||||
})();
|
||||
@@ -47,7 +47,6 @@ async () => {
|
||||
'nicht lieferbar',
|
||||
'nicht verfügbar',
|
||||
'nicht vorrätig',
|
||||
'nicht mehr lieferbar',
|
||||
'nicht zur verfügung',
|
||||
'nie znaleziono produktów',
|
||||
'niet beschikbaar',
|
||||
|
||||
@@ -1,52 +0,0 @@
|
||||
/**
|
||||
* Unlock Element Dimensions After Screenshot Capture
|
||||
*
|
||||
* This script removes the inline !important styles that were applied by lock-elements-sizing.js
|
||||
* and restores elements to their original state using the WeakMap created during locking.
|
||||
*
|
||||
* USAGE:
|
||||
* Execute this script AFTER completing screenshot capture and restoring the viewport.
|
||||
* This allows the page to return to its normal responsive behavior.
|
||||
*
|
||||
* WHAT THIS SCRIPT DOES:
|
||||
* 1. Iterates through every element that was locked
|
||||
* 2. Reads original style values from the global WeakMap
|
||||
* 3. Restores original inline styles (or removes them if they weren't set originally)
|
||||
* 4. Cleans up the WeakMap
|
||||
*
|
||||
* @see lock-elements-sizing.js for the locking mechanism
|
||||
*/
|
||||
|
||||
(() => {
|
||||
// Check if the restore map exists
|
||||
if (!window.__elementSizingRestore) {
|
||||
console.log('⚠ Element sizing restore map not found - elements may not have been locked');
|
||||
return;
|
||||
}
|
||||
|
||||
// Restore all locked dimension styles to their original state
|
||||
document.querySelectorAll('*').forEach(el => {
|
||||
const originalStyles = window.__elementSizingRestore.get(el);
|
||||
|
||||
if (originalStyles) {
|
||||
const properties = ['height', 'min-height', 'max-height', 'width', 'min-width', 'max-width'];
|
||||
|
||||
properties.forEach(prop => {
|
||||
const original = originalStyles[prop];
|
||||
|
||||
if (original.value) {
|
||||
// Restore original value with original priority
|
||||
el.style.setProperty(prop, original.value, original.priority || '');
|
||||
} else {
|
||||
// Was not set originally, so remove it
|
||||
el.style.removeProperty(prop);
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// Clean up the global WeakMap
|
||||
delete window.__elementSizingRestore;
|
||||
|
||||
console.log('✓ Element dimensions unlocked - page restored to original state');
|
||||
})();
|
||||
@@ -8,42 +8,16 @@ from loguru import logger
|
||||
|
||||
from changedetectionio.content_fetchers import SCREENSHOT_MAX_HEIGHT_DEFAULT, SCREENSHOT_DEFAULT_QUALITY
|
||||
|
||||
def stitch_images_worker_raw_bytes(pipe_conn, original_page_height, capture_height):
|
||||
"""
|
||||
Stitch image chunks together in a separate process.
|
||||
|
||||
Uses spawn multiprocessing to isolate PIL's C-level memory allocation.
|
||||
When the subprocess exits, the OS reclaims ALL memory including C-level allocations
|
||||
that Python's GC cannot release. This prevents the ~50MB per stitch from accumulating
|
||||
in the main process.
|
||||
|
||||
Trade-off: Adds 35MB resource_tracker subprocess, but prevents 500MB+ memory leak
|
||||
in main process (much better at scale: 35GB vs 500GB for 1000 instances).
|
||||
|
||||
Args:
|
||||
pipe_conn: Pipe connection to receive data and send result
|
||||
original_page_height: Original page height in pixels
|
||||
capture_height: Maximum capture height
|
||||
"""
|
||||
def stitch_images_worker(pipe_conn, chunks_bytes, original_page_height, capture_height):
|
||||
import os
|
||||
import io
|
||||
import struct
|
||||
from PIL import Image, ImageDraw, ImageFont
|
||||
|
||||
try:
|
||||
# Receive chunk count as 4-byte integer (no pickle!)
|
||||
count_bytes = pipe_conn.recv_bytes()
|
||||
chunk_count = struct.unpack('I', count_bytes)[0]
|
||||
|
||||
# Receive each chunk as raw bytes (no pickle!)
|
||||
chunks_bytes = []
|
||||
for _ in range(chunk_count):
|
||||
chunks_bytes.append(pipe_conn.recv_bytes())
|
||||
|
||||
# Load images from byte chunks
|
||||
images = [Image.open(io.BytesIO(b)) for b in chunks_bytes]
|
||||
del chunks_bytes
|
||||
|
||||
total_height = sum(im.height for im in images)
|
||||
max_width = max(im.width for im in images)
|
||||
|
||||
@@ -53,42 +27,47 @@ def stitch_images_worker_raw_bytes(pipe_conn, original_page_height, capture_heig
|
||||
for im in images:
|
||||
stitched.paste(im, (0, y_offset))
|
||||
y_offset += im.height
|
||||
im.close()
|
||||
del images
|
||||
|
||||
# Draw caption only if page was trimmed
|
||||
# Draw caption on top (overlaid, not extending canvas)
|
||||
draw = ImageDraw.Draw(stitched)
|
||||
|
||||
if original_page_height > capture_height:
|
||||
draw = ImageDraw.Draw(stitched)
|
||||
caption_text = f"WARNING: Screenshot was {original_page_height}px but trimmed to {capture_height}px because it was too long"
|
||||
padding = 10
|
||||
font_size = 35
|
||||
font_color = (255, 0, 0)
|
||||
background_color = (255, 255, 255)
|
||||
|
||||
|
||||
# Try to load a proper font
|
||||
try:
|
||||
font = ImageFont.truetype("arial.ttf", 35)
|
||||
font = ImageFont.truetype("arial.ttf", font_size)
|
||||
except IOError:
|
||||
font = ImageFont.load_default()
|
||||
|
||||
bbox = draw.textbbox((0, 0), caption_text, font=font)
|
||||
text_width = bbox[2] - bbox[0]
|
||||
text_height = bbox[3] - bbox[1]
|
||||
draw.rectangle([(0, 0), (max_width, text_height + 2 * padding)], fill=(255, 255, 255))
|
||||
text_x = (max_width - text_width) // 2
|
||||
draw.text((text_x, padding), caption_text, font=font, fill=(255, 0, 0))
|
||||
|
||||
# Encode and send
|
||||
# Draw white rectangle background behind text
|
||||
rect_top = 0
|
||||
rect_bottom = text_height + 2 * padding
|
||||
draw.rectangle([(0, rect_top), (max_width, rect_bottom)], fill=background_color)
|
||||
|
||||
# Draw text centered horizontally, 10px padding from top of the rectangle
|
||||
text_x = (max_width - text_width) // 2
|
||||
text_y = padding
|
||||
draw.text((text_x, text_y), caption_text, font=font, fill=font_color)
|
||||
|
||||
# Encode and send image
|
||||
output = io.BytesIO()
|
||||
stitched.save(output, format="JPEG", quality=int(os.getenv("SCREENSHOT_QUALITY", SCREENSHOT_DEFAULT_QUALITY)), optimize=True)
|
||||
result_bytes = output.getvalue()
|
||||
stitched.save(output, format="JPEG", quality=int(os.getenv("SCREENSHOT_QUALITY", SCREENSHOT_DEFAULT_QUALITY)))
|
||||
pipe_conn.send_bytes(output.getvalue())
|
||||
|
||||
stitched.close()
|
||||
del stitched
|
||||
output.close()
|
||||
del output
|
||||
|
||||
pipe_conn.send_bytes(result_bytes)
|
||||
del result_bytes
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in stitch_images_worker_raw_bytes: {e}")
|
||||
error_msg = f"error:{e}".encode('utf-8')
|
||||
pipe_conn.send_bytes(error_msg)
|
||||
pipe_conn.send(f"error:{e}")
|
||||
finally:
|
||||
pipe_conn.close()
|
||||
|
||||
|
||||
|
||||
@@ -4,32 +4,17 @@ import time
|
||||
from loguru import logger
|
||||
from changedetectionio.content_fetchers.base import Fetcher
|
||||
|
||||
|
||||
class fetcher(Fetcher):
|
||||
if os.getenv("WEBDRIVER_URL"):
|
||||
fetcher_description = f"WebDriver Chrome/Javascript via \"{os.getenv('WEBDRIVER_URL', '')}\""
|
||||
fetcher_description = "WebDriver Chrome/Javascript via '{}'".format(os.getenv("WEBDRIVER_URL"))
|
||||
else:
|
||||
fetcher_description = "WebDriver Chrome/Javascript"
|
||||
|
||||
proxy = None
|
||||
proxy_url = None
|
||||
|
||||
# Capability flags
|
||||
supports_browser_steps = False
|
||||
supports_screenshots = True
|
||||
supports_xpath_element_data = True
|
||||
|
||||
@classmethod
|
||||
def get_status_icon_data(cls):
|
||||
"""Return Chrome browser icon data for WebDriver fetcher."""
|
||||
return {
|
||||
'filename': 'google-chrome-icon.png',
|
||||
'alt': 'Using a Chrome browser',
|
||||
'title': 'Using a Chrome browser'
|
||||
}
|
||||
|
||||
def __init__(self, proxy_override=None, custom_browser_connection_url=None, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
def __init__(self, proxy_override=None, custom_browser_connection_url=None):
|
||||
super().__init__()
|
||||
from urllib.parse import urlparse
|
||||
from selenium.webdriver.common.proxy import Proxy
|
||||
|
||||
@@ -40,6 +25,7 @@ class fetcher(Fetcher):
|
||||
self.browser_connection_is_custom = True
|
||||
self.browser_connection_url = custom_browser_connection_url
|
||||
|
||||
|
||||
##### PROXY SETUP #####
|
||||
|
||||
proxy_sources = [
|
||||
@@ -52,7 +38,7 @@ class fetcher(Fetcher):
|
||||
os.getenv('webdriver_proxyHttps'),
|
||||
os.getenv('webdriver_httpsProxy'),
|
||||
os.getenv('webdriver_sslProxy'),
|
||||
proxy_override, # last one should override
|
||||
proxy_override, # last one should override
|
||||
]
|
||||
# The built in selenium proxy handling is super unreliable!!! so we just grab which ever proxy setting we can find and throw it in --proxy-server=
|
||||
for k in filter(None, proxy_sources):
|
||||
@@ -60,23 +46,20 @@ class fetcher(Fetcher):
|
||||
continue
|
||||
self.proxy_url = k.strip()
|
||||
|
||||
|
||||
async def run(self,
|
||||
fetch_favicon=True,
|
||||
current_include_filters=None,
|
||||
empty_pages_are_a_change=False,
|
||||
ignore_status_codes=False,
|
||||
is_binary=False,
|
||||
request_body=None,
|
||||
request_headers=None,
|
||||
request_method=None,
|
||||
screenshot_format=None,
|
||||
timeout=None,
|
||||
url=None,
|
||||
watch_uuid=None,
|
||||
):
|
||||
url,
|
||||
timeout,
|
||||
request_headers,
|
||||
request_body,
|
||||
request_method,
|
||||
ignore_status_codes=False,
|
||||
current_include_filters=None,
|
||||
is_binary=False,
|
||||
empty_pages_are_a_change=False):
|
||||
|
||||
import asyncio
|
||||
|
||||
|
||||
# Wrap the entire selenium operation in a thread executor
|
||||
def _run_sync():
|
||||
from selenium.webdriver.chrome.options import Options as ChromeOptions
|
||||
@@ -147,34 +130,7 @@ class fetcher(Fetcher):
|
||||
time.sleep(int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)) + self.render_extract_delay)
|
||||
self.content = driver.page_source
|
||||
self.headers = {}
|
||||
|
||||
# Selenium always captures as PNG, convert to JPEG if needed
|
||||
screenshot_png = driver.get_screenshot_as_png()
|
||||
|
||||
# Convert to JPEG if requested (for smaller file size)
|
||||
if self.screenshot_format and self.screenshot_format.upper() == 'JPEG':
|
||||
from PIL import Image
|
||||
import io
|
||||
img = Image.open(io.BytesIO(screenshot_png))
|
||||
# Convert to RGB if needed (JPEG doesn't support transparency)
|
||||
# Always convert non-RGB modes to RGB to ensure JPEG compatibility
|
||||
if img.mode in ('RGBA', 'LA', 'P', 'PA'):
|
||||
# Handle transparency by compositing onto white background
|
||||
if img.mode == 'P':
|
||||
img = img.convert('RGBA')
|
||||
background = Image.new('RGB', img.size, (255, 255, 255))
|
||||
if img.mode in ('RGBA', 'LA', 'PA'):
|
||||
background.paste(img, mask=img.split()[-1]) # Use alpha channel as mask
|
||||
img = background
|
||||
elif img.mode != 'RGB':
|
||||
# For other modes, direct conversion
|
||||
img = img.convert('RGB')
|
||||
jpeg_buffer = io.BytesIO()
|
||||
img.save(jpeg_buffer, format='JPEG', quality=int(os.getenv("SCREENSHOT_QUALITY", 72)))
|
||||
self.screenshot = jpeg_buffer.getvalue()
|
||||
img.close()
|
||||
else:
|
||||
self.screenshot = screenshot_png
|
||||
self.screenshot = driver.get_screenshot_as_png()
|
||||
except Exception as e:
|
||||
driver.quit()
|
||||
raise e
|
||||
@@ -185,15 +141,3 @@ class fetcher(Fetcher):
|
||||
loop = asyncio.get_event_loop()
|
||||
await loop.run_in_executor(None, _run_sync)
|
||||
|
||||
|
||||
# Plugin registration for built-in fetcher
|
||||
class WebDriverSeleniumFetcherPlugin:
|
||||
"""Plugin class that registers the WebDriver Selenium fetcher as a built-in plugin."""
|
||||
|
||||
def register_content_fetcher(self):
|
||||
"""Register the WebDriver Selenium fetcher"""
|
||||
return ('html_webdriver', fetcher)
|
||||
|
||||
|
||||
# Create module-level instance for plugin registration
|
||||
webdriver_selenium_plugin = WebDriverSeleniumFetcherPlugin()
|
||||
|
||||
@@ -57,15 +57,14 @@ class SignalPriorityQueue(queue.PriorityQueue):
|
||||
def put(self, item, block=True, timeout=None):
|
||||
# Call the parent's put method first
|
||||
super().put(item, block, timeout)
|
||||
|
||||
|
||||
# After putting the item in the queue, check if it has a UUID and emit signal
|
||||
if hasattr(item, 'item') and isinstance(item.item, dict) and 'uuid' in item.item:
|
||||
uuid = item.item['uuid']
|
||||
# Get the signal and send it if it exists
|
||||
watch_check_update = signal('watch_check_update')
|
||||
if watch_check_update:
|
||||
# NOTE: This would block other workers from .put/.get while this signal sends
|
||||
# Signal handlers may iterate the queue/datastore while holding locks
|
||||
# Send the watch_uuid parameter
|
||||
watch_check_update.send(watch_uuid=uuid)
|
||||
|
||||
# Send queue_length signal with current queue size
|
||||
@@ -313,15 +312,14 @@ class AsyncSignalPriorityQueue(asyncio.PriorityQueue):
|
||||
async def put(self, item):
|
||||
# Call the parent's put method first
|
||||
await super().put(item)
|
||||
|
||||
|
||||
# After putting the item in the queue, check if it has a UUID and emit signal
|
||||
if hasattr(item, 'item') and isinstance(item.item, dict) and 'uuid' in item.item:
|
||||
uuid = item.item['uuid']
|
||||
# Get the signal and send it if it exists
|
||||
watch_check_update = signal('watch_check_update')
|
||||
if watch_check_update:
|
||||
# NOTE: This would block other workers from .put/.get while this signal sends
|
||||
# Signal handlers may iterate the queue/datastore while holding locks
|
||||
# Send the watch_uuid parameter
|
||||
watch_check_update.send(watch_uuid=uuid)
|
||||
|
||||
# Send queue_length signal with current queue size
|
||||
|
||||
113
changedetectionio/diff.py
Normal file
113
changedetectionio/diff.py
Normal file
@@ -0,0 +1,113 @@
|
||||
import difflib
|
||||
from typing import List, Iterator, Union
|
||||
|
||||
REMOVED_STYLE = "background-color: #fadad7; color: #b30000;"
|
||||
ADDED_STYLE = "background-color: #eaf2c2; color: #406619;"
|
||||
|
||||
def same_slicer(lst: List[str], start: int, end: int) -> List[str]:
|
||||
"""Return a slice of the list, or a single element if start == end."""
|
||||
return lst[start:end] if start != end else [lst[start]]
|
||||
|
||||
def customSequenceMatcher(
|
||||
before: List[str],
|
||||
after: List[str],
|
||||
include_equal: bool = False,
|
||||
include_removed: bool = True,
|
||||
include_added: bool = True,
|
||||
include_replaced: bool = True,
|
||||
include_change_type_prefix: bool = True,
|
||||
html_colour: bool = False
|
||||
) -> Iterator[List[str]]:
|
||||
"""
|
||||
Compare two sequences and yield differences based on specified parameters.
|
||||
|
||||
Args:
|
||||
before (List[str]): Original sequence
|
||||
after (List[str]): Modified sequence
|
||||
include_equal (bool): Include unchanged parts
|
||||
include_removed (bool): Include removed parts
|
||||
include_added (bool): Include added parts
|
||||
include_replaced (bool): Include replaced parts
|
||||
include_change_type_prefix (bool): Add prefixes to indicate change types
|
||||
html_colour (bool): Use HTML background colors for differences
|
||||
|
||||
Yields:
|
||||
List[str]: Differences between sequences
|
||||
"""
|
||||
cruncher = difflib.SequenceMatcher(isjunk=lambda x: x in " \t", a=before, b=after)
|
||||
|
||||
|
||||
|
||||
for tag, alo, ahi, blo, bhi in cruncher.get_opcodes():
|
||||
if include_equal and tag == 'equal':
|
||||
yield before[alo:ahi]
|
||||
elif include_removed and tag == 'delete':
|
||||
if html_colour:
|
||||
yield [f'<span style="{REMOVED_STYLE}">{line}</span>' for line in same_slicer(before, alo, ahi)]
|
||||
else:
|
||||
yield [f"(removed) {line}" for line in same_slicer(before, alo, ahi)] if include_change_type_prefix else same_slicer(before, alo, ahi)
|
||||
elif include_replaced and tag == 'replace':
|
||||
if html_colour:
|
||||
yield [f'<span style="{REMOVED_STYLE}">{line}</span>' for line in same_slicer(before, alo, ahi)] + \
|
||||
[f'<span style="{ADDED_STYLE}">{line}</span>' for line in same_slicer(after, blo, bhi)]
|
||||
else:
|
||||
yield [f"(changed) {line}" for line in same_slicer(before, alo, ahi)] + \
|
||||
[f"(into) {line}" for line in same_slicer(after, blo, bhi)] if include_change_type_prefix else same_slicer(before, alo, ahi) + same_slicer(after, blo, bhi)
|
||||
elif include_added and tag == 'insert':
|
||||
if html_colour:
|
||||
yield [f'<span style="{ADDED_STYLE}">{line}</span>' for line in same_slicer(after, blo, bhi)]
|
||||
else:
|
||||
yield [f"(added) {line}" for line in same_slicer(after, blo, bhi)] if include_change_type_prefix else same_slicer(after, blo, bhi)
|
||||
|
||||
def render_diff(
|
||||
previous_version_file_contents: str,
|
||||
newest_version_file_contents: str,
|
||||
include_equal: bool = False,
|
||||
include_removed: bool = True,
|
||||
include_added: bool = True,
|
||||
include_replaced: bool = True,
|
||||
line_feed_sep: str = "\n",
|
||||
include_change_type_prefix: bool = True,
|
||||
patch_format: bool = False,
|
||||
html_colour: bool = False
|
||||
) -> str:
|
||||
"""
|
||||
Render the difference between two file contents.
|
||||
|
||||
Args:
|
||||
previous_version_file_contents (str): Original file contents
|
||||
newest_version_file_contents (str): Modified file contents
|
||||
include_equal (bool): Include unchanged parts
|
||||
include_removed (bool): Include removed parts
|
||||
include_added (bool): Include added parts
|
||||
include_replaced (bool): Include replaced parts
|
||||
line_feed_sep (str): Separator for lines in output
|
||||
include_change_type_prefix (bool): Add prefixes to indicate change types
|
||||
patch_format (bool): Use patch format for output
|
||||
html_colour (bool): Use HTML background colors for differences
|
||||
|
||||
Returns:
|
||||
str: Rendered difference
|
||||
"""
|
||||
newest_lines = [line.rstrip() for line in newest_version_file_contents.splitlines()]
|
||||
previous_lines = [line.rstrip() for line in previous_version_file_contents.splitlines()] if previous_version_file_contents else []
|
||||
|
||||
if patch_format:
|
||||
patch = difflib.unified_diff(previous_lines, newest_lines)
|
||||
return line_feed_sep.join(patch)
|
||||
|
||||
rendered_diff = customSequenceMatcher(
|
||||
before=previous_lines,
|
||||
after=newest_lines,
|
||||
include_equal=include_equal,
|
||||
include_removed=include_removed,
|
||||
include_added=include_added,
|
||||
include_replaced=include_replaced,
|
||||
include_change_type_prefix=include_change_type_prefix,
|
||||
html_colour=html_colour
|
||||
)
|
||||
|
||||
def flatten(lst: List[Union[str, List[str]]]) -> str:
|
||||
return line_feed_sep.join(flatten(x) if isinstance(x, list) else x for x in lst)
|
||||
|
||||
return flatten(rendered_diff)
|
||||
@@ -1,479 +0,0 @@
|
||||
"""
|
||||
Diff rendering module for change detection.
|
||||
|
||||
This module provides functions for rendering differences between text content,
|
||||
with support for various output formats and tokenization strategies.
|
||||
"""
|
||||
|
||||
import difflib
|
||||
from typing import List, Iterator, Union
|
||||
from loguru import logger
|
||||
import diff_match_patch as dmp_module
|
||||
import re
|
||||
import time
|
||||
|
||||
from .tokenizers import TOKENIZERS, tokenize_words_and_html
|
||||
|
||||
# Remember! gmail, outlook etc dont support <style> must be inline.
|
||||
# Gmail: strips <ins> and <del> tags entirely.
|
||||
# This is for the WHOLE line background style
|
||||
REMOVED_STYLE = "background-color: #fadad7; color: #b30000;"
|
||||
ADDED_STYLE = "background-color: #eaf2c2; color: #406619;"
|
||||
HTML_REMOVED_STYLE = REMOVED_STYLE # Export alias for handler.py
|
||||
HTML_ADDED_STYLE = ADDED_STYLE # Export alias for handler.py
|
||||
|
||||
# Darker backgrounds for nested highlighting (changed parts within lines)
|
||||
REMOVED_INNER_STYLE = "background-color: #ff867a; color: #111;"
|
||||
ADDED_INNER_STYLE = "background-color: #b2e841; color: #444;"
|
||||
HTML_CHANGED_STYLE = REMOVED_STYLE
|
||||
HTML_CHANGED_INTO_STYLE = ADDED_STYLE
|
||||
|
||||
# Placemarker constants - these get replaced by apply_service_tweaks() in handler.py
|
||||
# Something that cant get escaped to HTML by accident
|
||||
REMOVED_PLACEMARKER_OPEN = '@removed_PLACEMARKER_OPEN'
|
||||
REMOVED_PLACEMARKER_CLOSED = '@removed_PLACEMARKER_CLOSED'
|
||||
|
||||
ADDED_PLACEMARKER_OPEN = '@added_PLACEMARKER_OPEN'
|
||||
ADDED_PLACEMARKER_CLOSED = '@added_PLACEMARKER_CLOSED'
|
||||
|
||||
CHANGED_PLACEMARKER_OPEN = '@changed_PLACEMARKER_OPEN'
|
||||
CHANGED_PLACEMARKER_CLOSED = '@changed_PLACEMARKER_CLOSED'
|
||||
|
||||
CHANGED_INTO_PLACEMARKER_OPEN = '@changed_into_PLACEMARKER_OPEN'
|
||||
CHANGED_INTO_PLACEMARKER_CLOSED = '@changed_into_PLACEMARKER_CLOSED'
|
||||
|
||||
# Compiled regex patterns for performance
|
||||
WHITESPACE_NORMALIZE_RE = re.compile(r'\s+')
|
||||
|
||||
|
||||
def render_inline_word_diff(before_line: str, after_line: str, ignore_junk: bool = False, markdown_style: str = None, tokenizer: str = 'words_and_html') -> tuple[str, bool]:
|
||||
"""
|
||||
Render word-level differences between two lines inline using diff-match-patch library.
|
||||
|
||||
Args:
|
||||
before_line: Original line text
|
||||
after_line: Modified line text
|
||||
ignore_junk: Ignore whitespace-only changes
|
||||
markdown_style: Unused (kept for backwards compatibility)
|
||||
tokenizer: Name of tokenizer to use from TOKENIZERS registry (default: 'words_and_html')
|
||||
|
||||
Returns:
|
||||
tuple[str, bool]: (diff output with inline word-level highlighting, has_changes flag)
|
||||
"""
|
||||
# Normalize whitespace if ignore_junk is enabled
|
||||
if ignore_junk:
|
||||
# Normalize whitespace: replace multiple spaces/tabs with single space
|
||||
before_normalized = WHITESPACE_NORMALIZE_RE.sub(' ', before_line)
|
||||
after_normalized = WHITESPACE_NORMALIZE_RE.sub(' ', after_line)
|
||||
else:
|
||||
before_normalized = before_line
|
||||
after_normalized = after_line
|
||||
|
||||
# Use diff-match-patch with word-level tokenization
|
||||
# Strategy: Use linesToChars to treat words as atomic units
|
||||
dmp = dmp_module.diff_match_patch()
|
||||
|
||||
# Get the tokenizer function from the registry
|
||||
tokenizer_func = TOKENIZERS.get(tokenizer, tokenize_words_and_html)
|
||||
|
||||
# Tokenize both lines using the selected tokenizer
|
||||
before_tokens = tokenizer_func(before_normalized)
|
||||
after_tokens = tokenizer_func(after_normalized or ' ')
|
||||
|
||||
# Create mappings for linesToChars (using it for word-mode)
|
||||
# Join tokens with newline so each "line" is a token
|
||||
before_text = '\n'.join(before_tokens)
|
||||
after_text = '\n'.join(after_tokens)
|
||||
|
||||
# Use linesToChars for word-mode diffing
|
||||
lines_result = dmp.diff_linesToChars(before_text, after_text)
|
||||
line_before, line_after, line_array = lines_result
|
||||
|
||||
# Perform diff on the encoded strings
|
||||
diffs = dmp.diff_main(line_before, line_after, False)
|
||||
|
||||
# Convert back to original text
|
||||
dmp.diff_charsToLines(diffs, line_array)
|
||||
|
||||
# Remove the newlines we added for tokenization
|
||||
diffs = [(op, text.replace('\n', '')) for op, text in diffs]
|
||||
|
||||
# DON'T apply semantic cleanup here - it would break token boundaries
|
||||
# (e.g., "63" -> "66" would become "6" + "3" vs "6" + "6")
|
||||
# We want to preserve the tokenizer's word boundaries
|
||||
|
||||
# Check if there are any changes
|
||||
has_changes = any(op != 0 for op, _ in diffs)
|
||||
|
||||
if ignore_junk and not has_changes:
|
||||
return after_line, False
|
||||
|
||||
# Check if the whole line is replaced (no unchanged content)
|
||||
whole_line_replaced = not any(op == 0 and text.strip() for op, text in diffs)
|
||||
|
||||
# Build the output using placemarkers
|
||||
# When whole line is replaced, wrap entire removed content once and entire added content once
|
||||
if whole_line_replaced:
|
||||
removed_tokens = []
|
||||
added_tokens = []
|
||||
|
||||
for op, text in diffs:
|
||||
if op == 0: # Equal (e.g., whitespace tokens in common positions)
|
||||
# Include in both removed and added to preserve spacing
|
||||
removed_tokens.append(text)
|
||||
added_tokens.append(text)
|
||||
elif op == -1: # Deletion
|
||||
removed_tokens.append(text)
|
||||
elif op == 1: # Insertion
|
||||
added_tokens.append(text)
|
||||
|
||||
# Join all tokens and wrap the entire string once for removed, once for added
|
||||
result_parts = []
|
||||
|
||||
if removed_tokens:
|
||||
removed_full = ''.join(removed_tokens).rstrip()
|
||||
trailing_removed = ''.join(removed_tokens)[len(removed_full):] if len(''.join(removed_tokens)) > len(removed_full) else ''
|
||||
result_parts.append(f'{CHANGED_PLACEMARKER_OPEN}{removed_full}{CHANGED_PLACEMARKER_CLOSED}{trailing_removed}')
|
||||
|
||||
if added_tokens:
|
||||
if result_parts: # Add newline between removed and added
|
||||
result_parts.append('\n')
|
||||
added_full = ''.join(added_tokens).rstrip()
|
||||
trailing_added = ''.join(added_tokens)[len(added_full):] if len(''.join(added_tokens)) > len(added_full) else ''
|
||||
result_parts.append(f'{CHANGED_INTO_PLACEMARKER_OPEN}{added_full}{CHANGED_INTO_PLACEMARKER_CLOSED}{trailing_added}')
|
||||
|
||||
return ''.join(result_parts), has_changes
|
||||
else:
|
||||
# Inline changes within the line
|
||||
result_parts = []
|
||||
for op, text in diffs:
|
||||
if op == 0: # Equal
|
||||
result_parts.append(text)
|
||||
elif op == 1: # Insertion
|
||||
# Don't wrap empty content (e.g., whitespace-only tokens after rstrip)
|
||||
content = text.rstrip()
|
||||
trailing = text[len(content):] if len(text) > len(content) else ''
|
||||
if content:
|
||||
result_parts.append(f'{ADDED_PLACEMARKER_OPEN}{content}{ADDED_PLACEMARKER_CLOSED}{trailing}')
|
||||
else:
|
||||
result_parts.append(trailing)
|
||||
elif op == -1: # Deletion
|
||||
# Don't wrap empty content (e.g., whitespace-only tokens after rstrip)
|
||||
content = text.rstrip()
|
||||
trailing = text[len(content):] if len(text) > len(content) else ''
|
||||
if content:
|
||||
result_parts.append(f'{REMOVED_PLACEMARKER_OPEN}{content}{REMOVED_PLACEMARKER_CLOSED}{trailing}')
|
||||
else:
|
||||
result_parts.append(trailing)
|
||||
|
||||
return ''.join(result_parts), has_changes
|
||||
|
||||
|
||||
def render_nested_line_diff(before_line: str, after_line: str, ignore_junk: bool = False, tokenizer: str = 'words_and_html') -> tuple[str, str, bool]:
|
||||
"""
|
||||
Render line-level differences with nested highlighting for changed parts.
|
||||
|
||||
Returns two separate lines:
|
||||
- Before line: light red background with dark red on removed parts
|
||||
- After line: light green background with dark green on added parts
|
||||
|
||||
Args:
|
||||
before_line: Original line text
|
||||
after_line: Modified line text
|
||||
ignore_junk: Ignore whitespace-only changes
|
||||
tokenizer: Name of tokenizer to use from TOKENIZERS registry
|
||||
|
||||
Returns:
|
||||
tuple[str, str, bool]: (before_with_highlights, after_with_highlights, has_changes)
|
||||
"""
|
||||
# Normalize whitespace if ignore_junk is enabled
|
||||
if ignore_junk:
|
||||
before_normalized = WHITESPACE_NORMALIZE_RE.sub(' ', before_line)
|
||||
after_normalized = WHITESPACE_NORMALIZE_RE.sub(' ', after_line)
|
||||
else:
|
||||
before_normalized = before_line
|
||||
after_normalized = after_line
|
||||
|
||||
# Use diff-match-patch with word-level tokenization
|
||||
dmp = dmp_module.diff_match_patch()
|
||||
|
||||
# Get the tokenizer function from the registry
|
||||
tokenizer_func = TOKENIZERS.get(tokenizer, tokenize_words_and_html)
|
||||
|
||||
# Tokenize both lines
|
||||
before_tokens = tokenizer_func(before_normalized)
|
||||
after_tokens = tokenizer_func(after_normalized or ' ')
|
||||
|
||||
# Create mappings for linesToChars
|
||||
before_text = '\n'.join(before_tokens)
|
||||
after_text = '\n'.join(after_tokens)
|
||||
|
||||
# Use linesToChars for word-mode diffing
|
||||
lines_result = dmp.diff_linesToChars(before_text, after_text)
|
||||
line_before, line_after, line_array = lines_result
|
||||
|
||||
# Perform diff on the encoded strings
|
||||
diffs = dmp.diff_main(line_before, line_after, False)
|
||||
|
||||
# Convert back to original text
|
||||
dmp.diff_charsToLines(diffs, line_array)
|
||||
|
||||
# Remove the newlines we added for tokenization
|
||||
diffs = [(op, text.replace('\n', '')) for op, text in diffs]
|
||||
|
||||
# DON'T apply semantic cleanup here - it would break token boundaries
|
||||
# (e.g., "63" -> "66" would become "6" + "3" vs "6" + "6")
|
||||
# We want to preserve the tokenizer's word boundaries
|
||||
|
||||
# Check if there are any changes
|
||||
has_changes = any(op != 0 for op, _ in diffs)
|
||||
|
||||
if ignore_junk and not has_changes:
|
||||
return before_line, after_line, False
|
||||
|
||||
# Build the before line (with nested highlighting for removed parts)
|
||||
before_parts = []
|
||||
for op, text in diffs:
|
||||
if op == 0: # Equal
|
||||
before_parts.append(text)
|
||||
elif op == -1: # Deletion (in before)
|
||||
before_parts.append(f'<span style="{REMOVED_INNER_STYLE}">{text}</span>')
|
||||
# Skip insertions (op == 1) for the before line
|
||||
|
||||
before_content = ''.join(before_parts)
|
||||
|
||||
# Build the after line (with nested highlighting for added parts)
|
||||
after_parts = []
|
||||
for op, text in diffs:
|
||||
if op == 0: # Equal
|
||||
after_parts.append(text)
|
||||
elif op == 1: # Insertion (in after)
|
||||
after_parts.append(f'<span style="{ADDED_INNER_STYLE}">{text}</span>')
|
||||
# Skip deletions (op == -1) for the after line
|
||||
|
||||
after_content = ''.join(after_parts)
|
||||
|
||||
# Wrap content with placemarkers (inner HTML highlighting is preserved)
|
||||
before_html = f'{CHANGED_PLACEMARKER_OPEN}{before_content}{CHANGED_PLACEMARKER_CLOSED}'
|
||||
after_html = f'{CHANGED_INTO_PLACEMARKER_OPEN}{after_content}{CHANGED_INTO_PLACEMARKER_CLOSED}'
|
||||
|
||||
return before_html, after_html, has_changes
|
||||
|
||||
|
||||
def same_slicer(lst: List[str], start: int, end: int) -> List[str]:
|
||||
"""Return a slice of the list, or a single element if start == end."""
|
||||
return lst[start:end] if start != end else [lst[start]]
|
||||
|
||||
def customSequenceMatcher(
|
||||
before: List[str],
|
||||
after: List[str],
|
||||
include_equal: bool = False,
|
||||
include_removed: bool = True,
|
||||
include_added: bool = True,
|
||||
include_replaced: bool = True,
|
||||
include_change_type_prefix: bool = True,
|
||||
word_diff: bool = False,
|
||||
context_lines: int = 0,
|
||||
case_insensitive: bool = False,
|
||||
ignore_junk: bool = False,
|
||||
tokenizer: str = 'words_and_html'
|
||||
) -> Iterator[List[str]]:
|
||||
"""
|
||||
Compare two sequences and yield differences based on specified parameters.
|
||||
|
||||
Args:
|
||||
before (List[str]): Original sequence
|
||||
after (List[str]): Modified sequence
|
||||
include_equal (bool): Include unchanged parts
|
||||
include_removed (bool): Include removed parts
|
||||
include_added (bool): Include added parts
|
||||
include_replaced (bool): Include replaced parts
|
||||
include_change_type_prefix (bool): Add prefixes to indicate change types
|
||||
word_diff (bool): Use word-level diffing for replaced lines (controls inline rendering)
|
||||
context_lines (int): Number of unchanged lines to show around changes (like grep -C)
|
||||
case_insensitive (bool): Perform case-insensitive comparison
|
||||
ignore_junk (bool): Ignore whitespace-only changes
|
||||
tokenizer (str): Name of tokenizer to use from TOKENIZERS registry (default: 'words_and_html')
|
||||
|
||||
Yields:
|
||||
List[str]: Differences between sequences
|
||||
"""
|
||||
# Prepare sequences for comparison (lowercase if case-insensitive, normalize whitespace if ignore_junk)
|
||||
def prepare_line(line):
|
||||
if case_insensitive:
|
||||
line = line.lower()
|
||||
if ignore_junk:
|
||||
# Normalize whitespace: replace multiple spaces/tabs with single space
|
||||
line = WHITESPACE_NORMALIZE_RE.sub(' ', line)
|
||||
return line
|
||||
|
||||
compare_before = [prepare_line(line) for line in before]
|
||||
compare_after = [prepare_line(line) for line in after]
|
||||
|
||||
cruncher = difflib.SequenceMatcher(isjunk=lambda x: x in " \t", a=compare_before, b=compare_after)
|
||||
|
||||
# When context_lines is set and include_equal is False, we need to track which equal lines to include
|
||||
if context_lines > 0 and not include_equal:
|
||||
opcodes = list(cruncher.get_opcodes())
|
||||
# Mark equal ranges that should be included based on context
|
||||
included_equal_ranges = set()
|
||||
|
||||
for i, (tag, alo, ahi, blo, bhi) in enumerate(opcodes):
|
||||
if tag != 'equal':
|
||||
# Include context lines before this change
|
||||
for j in range(max(0, i - 1), i):
|
||||
if opcodes[j][0] == 'equal':
|
||||
prev_alo, prev_ahi = opcodes[j][1], opcodes[j][2]
|
||||
# Include last N lines of the previous equal block
|
||||
context_start = max(prev_alo, prev_ahi - context_lines)
|
||||
for line_num in range(context_start, prev_ahi):
|
||||
included_equal_ranges.add(line_num)
|
||||
|
||||
# Include context lines after this change
|
||||
for j in range(i + 1, min(len(opcodes), i + 2)):
|
||||
if opcodes[j][0] == 'equal':
|
||||
next_alo, next_ahi = opcodes[j][1], opcodes[j][2]
|
||||
# Include first N lines of the next equal block
|
||||
context_end = min(next_ahi, next_alo + context_lines)
|
||||
for line_num in range(next_alo, context_end):
|
||||
included_equal_ranges.add(line_num)
|
||||
|
||||
# Remember! gmail, outlook etc dont support <style> must be inline.
|
||||
# Gmail: strips <ins> and <del> tags entirely.
|
||||
for tag, alo, ahi, blo, bhi in cruncher.get_opcodes():
|
||||
if tag == 'equal':
|
||||
if include_equal:
|
||||
yield before[alo:ahi]
|
||||
elif context_lines > 0:
|
||||
# Only include equal lines that are in the context range
|
||||
context_lines_to_include = [before[i] for i in range(alo, ahi) if i in included_equal_ranges]
|
||||
if context_lines_to_include:
|
||||
yield context_lines_to_include
|
||||
elif include_removed and tag == 'delete':
|
||||
if include_change_type_prefix:
|
||||
yield [f'{REMOVED_PLACEMARKER_OPEN}{line}{REMOVED_PLACEMARKER_CLOSED}' for line in same_slicer(before, alo, ahi)]
|
||||
else:
|
||||
yield same_slicer(before, alo, ahi)
|
||||
elif include_replaced and tag == 'replace':
|
||||
before_lines = same_slicer(before, alo, ahi)
|
||||
after_lines = same_slicer(after, blo, bhi)
|
||||
|
||||
# Use inline word-level diff for single line replacements when word_diff is enabled
|
||||
if word_diff and len(before_lines) == 1 and len(after_lines) == 1:
|
||||
inline_diff, has_changes = render_inline_word_diff(before_lines[0], after_lines[0], ignore_junk=ignore_junk, tokenizer=tokenizer)
|
||||
# Check if there are any actual changes (not just whitespace when ignore_junk is enabled)
|
||||
if ignore_junk and not has_changes:
|
||||
# No real changes, skip this line
|
||||
continue
|
||||
yield [inline_diff]
|
||||
else:
|
||||
# Fall back to line-level diff for multi-line changes
|
||||
if include_change_type_prefix:
|
||||
yield [f'{CHANGED_PLACEMARKER_OPEN}{line}{CHANGED_PLACEMARKER_CLOSED}' for line in before_lines] + \
|
||||
[f'{CHANGED_INTO_PLACEMARKER_OPEN}{line}{CHANGED_INTO_PLACEMARKER_CLOSED}' for line in after_lines]
|
||||
else:
|
||||
yield before_lines + after_lines
|
||||
elif include_added and tag == 'insert':
|
||||
if include_change_type_prefix:
|
||||
yield [f'{ADDED_PLACEMARKER_OPEN}{line}{ADDED_PLACEMARKER_CLOSED}' for line in same_slicer(after, blo, bhi)]
|
||||
else:
|
||||
yield same_slicer(after, blo, bhi)
|
||||
|
||||
def render_diff(
|
||||
previous_version_file_contents: str,
|
||||
newest_version_file_contents: str,
|
||||
include_equal: bool = False,
|
||||
include_removed: bool = True,
|
||||
include_added: bool = True,
|
||||
include_replaced: bool = True,
|
||||
include_change_type_prefix: bool = True,
|
||||
patch_format: bool = False,
|
||||
word_diff: bool = True,
|
||||
context_lines: int = 0,
|
||||
case_insensitive: bool = False,
|
||||
ignore_junk: bool = False,
|
||||
tokenizer: str = 'words_and_html'
|
||||
) -> str:
|
||||
"""
|
||||
Render the difference between two file contents.
|
||||
|
||||
Args:
|
||||
previous_version_file_contents (str): Original file contents
|
||||
newest_version_file_contents (str): Modified file contents
|
||||
include_equal (bool): Include unchanged parts
|
||||
include_removed (bool): Include removed parts
|
||||
include_added (bool): Include added parts
|
||||
include_replaced (bool): Include replaced parts
|
||||
include_change_type_prefix (bool): Add prefixes to indicate change types
|
||||
patch_format (bool): Use patch format for output
|
||||
word_diff (bool): Use word-level diffing for replaced lines (controls inline rendering)
|
||||
context_lines (int): Number of unchanged lines to show around changes (like grep -C)
|
||||
case_insensitive (bool): Perform case-insensitive comparison, By default the test_json_diff/process.py is case sensitive, so this follows same logic
|
||||
ignore_junk (bool): Ignore whitespace-only changes
|
||||
tokenizer (str): Name of tokenizer to use from TOKENIZERS registry (default: 'words_and_html')
|
||||
|
||||
Returns:
|
||||
str: Rendered difference
|
||||
"""
|
||||
newest_lines = [line.rstrip() for line in newest_version_file_contents.splitlines()]
|
||||
previous_lines = [line.rstrip() for line in previous_version_file_contents.splitlines()] if previous_version_file_contents else []
|
||||
now = time.time()
|
||||
logger.debug(
|
||||
f"diff options: "
|
||||
f"include_equal={include_equal}, "
|
||||
f"include_removed={include_removed}, "
|
||||
f"include_added={include_added}, "
|
||||
f"include_replaced={include_replaced}, "
|
||||
f"include_change_type_prefix={include_change_type_prefix}, "
|
||||
f"patch_format={patch_format}, "
|
||||
f"word_diff={word_diff}, "
|
||||
f"context_lines={context_lines}, "
|
||||
f"case_insensitive={case_insensitive}, "
|
||||
f"ignore_junk={ignore_junk}, "
|
||||
f"tokenizer={tokenizer}"
|
||||
)
|
||||
if patch_format:
|
||||
patch = difflib.unified_diff(previous_lines, newest_lines)
|
||||
return "\n".join(patch)
|
||||
|
||||
rendered_diff = customSequenceMatcher(
|
||||
before=previous_lines,
|
||||
after=newest_lines,
|
||||
include_equal=include_equal,
|
||||
include_removed=include_removed,
|
||||
include_added=include_added,
|
||||
include_replaced=include_replaced,
|
||||
include_change_type_prefix=include_change_type_prefix,
|
||||
word_diff=word_diff,
|
||||
context_lines=context_lines,
|
||||
case_insensitive=case_insensitive,
|
||||
ignore_junk=ignore_junk,
|
||||
tokenizer=tokenizer
|
||||
)
|
||||
|
||||
def flatten(lst: List[Union[str, List[str]]]) -> str:
|
||||
result = []
|
||||
for x in lst:
|
||||
if isinstance(x, list):
|
||||
result.extend(x)
|
||||
else:
|
||||
result.append(x)
|
||||
return "\n".join(result)
|
||||
|
||||
logger.debug(f"Diff generated in {time.time() - now:.2f}s")
|
||||
|
||||
return flatten(rendered_diff)
|
||||
|
||||
|
||||
# Export main public API
|
||||
__all__ = [
|
||||
'render_diff',
|
||||
'customSequenceMatcher',
|
||||
'render_inline_word_diff',
|
||||
'render_nested_line_diff',
|
||||
'TOKENIZERS',
|
||||
'REMOVED_STYLE',
|
||||
'ADDED_STYLE',
|
||||
'REMOVED_INNER_STYLE',
|
||||
'ADDED_INNER_STYLE',
|
||||
]
|
||||
@@ -1,23 +0,0 @@
|
||||
"""
|
||||
Tokenizers for diff operations.
|
||||
|
||||
This module provides various tokenization strategies for use with the diff system.
|
||||
New tokenizers can be easily added by:
|
||||
1. Creating a new module in this directory
|
||||
2. Importing and registering it in the TOKENIZERS dictionary below
|
||||
"""
|
||||
|
||||
from .natural_text import tokenize_words
|
||||
from .words_and_html import tokenize_words_and_html
|
||||
|
||||
# Tokenizer registry - maps tokenizer names to functions
|
||||
TOKENIZERS = {
|
||||
'words': tokenize_words,
|
||||
'words_and_html': tokenize_words_and_html,
|
||||
}
|
||||
|
||||
__all__ = [
|
||||
'tokenize_words',
|
||||
'tokenize_words_and_html',
|
||||
'TOKENIZERS',
|
||||
]
|
||||
@@ -1,44 +0,0 @@
|
||||
"""
|
||||
Simple word tokenizer using whitespace boundaries.
|
||||
|
||||
This is a simpler tokenizer that treats all whitespace as token boundaries
|
||||
without special handling for HTML tags or other markup.
|
||||
"""
|
||||
|
||||
from typing import List
|
||||
|
||||
|
||||
def tokenize_words(text: str) -> List[str]:
|
||||
"""
|
||||
Split text into words using simple whitespace boundaries.
|
||||
|
||||
This is a simpler tokenizer that treats all whitespace as token boundaries
|
||||
without special handling for HTML tags.
|
||||
|
||||
Args:
|
||||
text: Input text to tokenize
|
||||
|
||||
Returns:
|
||||
List of tokens (words and whitespace)
|
||||
|
||||
Examples:
|
||||
>>> tokenize_words("Hello world")
|
||||
['Hello', ' ', 'world']
|
||||
>>> tokenize_words("one two")
|
||||
['one', ' ', ' ', 'two']
|
||||
"""
|
||||
tokens = []
|
||||
current = ''
|
||||
|
||||
for char in text:
|
||||
if char.isspace():
|
||||
if current:
|
||||
tokens.append(current)
|
||||
current = ''
|
||||
tokens.append(char)
|
||||
else:
|
||||
current += char
|
||||
|
||||
if current:
|
||||
tokens.append(current)
|
||||
return tokens
|
||||
@@ -1,61 +0,0 @@
|
||||
"""
|
||||
Tokenizer that preserves HTML tags as atomic units while splitting on whitespace.
|
||||
|
||||
This tokenizer is specifically designed for HTML content where:
|
||||
- HTML tags should remain intact (e.g., '<p>', '<a href="...">')
|
||||
- Whitespace tokens are preserved for accurate diff reconstruction
|
||||
- Words are split on whitespace boundaries
|
||||
"""
|
||||
|
||||
from typing import List
|
||||
|
||||
|
||||
def tokenize_words_and_html(text: str) -> List[str]:
|
||||
"""
|
||||
Split text into words and boundaries (spaces, HTML tags).
|
||||
|
||||
This tokenizer preserves HTML tags as atomic units while splitting on whitespace.
|
||||
Useful for content that contains HTML markup.
|
||||
|
||||
Args:
|
||||
text: Input text to tokenize
|
||||
|
||||
Returns:
|
||||
List of tokens (words, spaces, HTML tags)
|
||||
|
||||
Examples:
|
||||
>>> tokenize_words_and_html("<p>Hello world</p>")
|
||||
['<p>', 'Hello', ' ', 'world', '</p>']
|
||||
>>> tokenize_words_and_html("<a href='test.com'>link</a>")
|
||||
['<a href=\\'test.com\\'>', 'link', '</a>']
|
||||
"""
|
||||
tokens = []
|
||||
current = ''
|
||||
in_tag = False
|
||||
|
||||
for char in text:
|
||||
if char == '<':
|
||||
# Start of HTML tag
|
||||
if current:
|
||||
tokens.append(current)
|
||||
current = ''
|
||||
current = '<'
|
||||
in_tag = True
|
||||
elif char == '>' and in_tag:
|
||||
# End of HTML tag
|
||||
current += '>'
|
||||
tokens.append(current)
|
||||
current = ''
|
||||
in_tag = False
|
||||
elif char.isspace() and not in_tag:
|
||||
# Space outside of tag
|
||||
if current:
|
||||
tokens.append(current)
|
||||
current = ''
|
||||
tokens.append(char)
|
||||
else:
|
||||
current += char
|
||||
|
||||
if current:
|
||||
tokens.append(current)
|
||||
return tokens
|
||||
@@ -1,43 +0,0 @@
|
||||
"""
|
||||
Favicon utilities for changedetection.io
|
||||
Handles favicon MIME type detection with caching
|
||||
"""
|
||||
|
||||
from functools import lru_cache
|
||||
|
||||
|
||||
@lru_cache(maxsize=1000)
|
||||
def get_favicon_mime_type(filepath):
|
||||
"""
|
||||
Detect MIME type of favicon by reading file content using puremagic.
|
||||
Results are cached to avoid repeatedly reading the same files.
|
||||
|
||||
Args:
|
||||
filepath: Full path to the favicon file
|
||||
|
||||
Returns:
|
||||
MIME type string (e.g., 'image/png')
|
||||
"""
|
||||
mime = None
|
||||
|
||||
try:
|
||||
import puremagic
|
||||
with open(filepath, 'rb') as f:
|
||||
content_bytes = f.read(200) # Read first 200 bytes
|
||||
|
||||
detections = puremagic.magic_string(content_bytes)
|
||||
if detections:
|
||||
mime = detections[0].mime_type
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Fallback to mimetypes if puremagic fails
|
||||
if not mime:
|
||||
import mimetypes
|
||||
mime, _ = mimetypes.guess_type(filepath)
|
||||
|
||||
# Final fallback based on extension
|
||||
if not mime:
|
||||
mime = 'image/x-icon' if filepath.endswith('.ico') else 'image/png'
|
||||
|
||||
return mime
|
||||
@@ -4,18 +4,16 @@ import flask_login
|
||||
import locale
|
||||
import os
|
||||
import queue
|
||||
import re
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
import timeago
|
||||
from blinker import signal
|
||||
from pathlib import Path
|
||||
|
||||
from changedetectionio.strtobool import strtobool
|
||||
from threading import Event
|
||||
from changedetectionio.queue_handlers import RecheckPriorityQueue, NotificationQueue
|
||||
from changedetectionio import worker_pool
|
||||
from changedetectionio.custom_queue import SignalPriorityQueue, AsyncSignalPriorityQueue, NotificationQueue
|
||||
from changedetectionio import worker_handler
|
||||
|
||||
from flask import (
|
||||
Flask,
|
||||
@@ -25,9 +23,10 @@ from flask import (
|
||||
render_template,
|
||||
request,
|
||||
send_from_directory,
|
||||
session,
|
||||
url_for,
|
||||
)
|
||||
from flask_compress import Compress as FlaskCompress
|
||||
from flask_login import current_user
|
||||
from flask_restful import abort, Api
|
||||
from flask_cors import CORS
|
||||
|
||||
@@ -35,18 +34,13 @@ from flask_cors import CORS
|
||||
# Make this a global singleton to avoid multiple signal objects
|
||||
watch_check_update = signal('watch_check_update', doc='Signal sent when a watch check is completed')
|
||||
from flask_wtf import CSRFProtect
|
||||
from flask_babel import Babel, gettext, get_locale
|
||||
from loguru import logger
|
||||
|
||||
from changedetectionio import __version__
|
||||
from changedetectionio import queuedWatchMetaData
|
||||
from changedetectionio.api import Watch, WatchHistory, WatchSingleHistory, WatchHistoryDiff, CreateWatch, Import, SystemInfo, Tag, Tags, Notifications, WatchFavicon, Spec
|
||||
from changedetectionio.api import Watch, WatchHistory, WatchSingleHistory, CreateWatch, Import, SystemInfo, Tag, Tags, Notifications, WatchFavicon
|
||||
from changedetectionio.api.Search import Search
|
||||
from .time_handler import is_within_schedule
|
||||
from changedetectionio.languages import get_available_languages, get_language_codes, get_flag_for_locale, get_timeago_locale
|
||||
from changedetectionio.favicon_utils import get_favicon_mime_type
|
||||
|
||||
IN_PYTEST = "pytest" in sys.modules or "PYTEST_CURRENT_TEST" in os.environ
|
||||
|
||||
datastore = None
|
||||
|
||||
@@ -54,10 +48,10 @@ datastore = None
|
||||
ticker_thread = None
|
||||
extra_stylesheets = []
|
||||
|
||||
# Use bulletproof janus-based queues for sync/async reliability
|
||||
update_q = RecheckPriorityQueue()
|
||||
# Use async queue by default, keep sync for backward compatibility
|
||||
update_q = AsyncSignalPriorityQueue() if worker_handler.USE_ASYNC_WORKERS else SignalPriorityQueue()
|
||||
notification_q = NotificationQueue()
|
||||
MAX_QUEUE_SIZE = 5000
|
||||
MAX_QUEUE_SIZE = 2000
|
||||
|
||||
app = Flask(__name__,
|
||||
static_url_path="",
|
||||
@@ -69,45 +63,9 @@ socketio_server = None
|
||||
|
||||
# Enable CORS, especially useful for the Chrome extension to operate from anywhere
|
||||
CORS(app)
|
||||
from werkzeug.routing import BaseConverter, ValidationError
|
||||
from uuid import UUID
|
||||
|
||||
class StrictUUIDConverter(BaseConverter):
|
||||
# Special sentinel values allowed in addition to strict UUIDs
|
||||
_ALLOWED_SENTINELS = frozenset({'first'})
|
||||
|
||||
def to_python(self, value: str) -> str:
|
||||
if value in self._ALLOWED_SENTINELS:
|
||||
return value
|
||||
try:
|
||||
u = UUID(value)
|
||||
except ValueError as e:
|
||||
raise ValidationError() from e
|
||||
# Reject non-standard formats (braces, URNs, no-hyphens)
|
||||
if str(u) != value.lower():
|
||||
raise ValidationError()
|
||||
return str(u)
|
||||
|
||||
def to_url(self, value) -> str:
|
||||
return str(value)
|
||||
|
||||
# app setup (once)
|
||||
app.url_map.converters["uuid_str"] = StrictUUIDConverter
|
||||
|
||||
# Flask-Compress handles HTTP compression, Socket.IO compression disabled to prevent memory leak.
|
||||
# There's also a bug between flask compress and socketio that causes some kind of slow memory leak
|
||||
# It's better to use compression on your reverse proxy (nginx etc) instead.
|
||||
if strtobool(os.getenv("FLASK_ENABLE_COMPRESSION")):
|
||||
from flask_compress import Compress as FlaskCompress
|
||||
app.config['COMPRESS_MIN_SIZE'] = 2096
|
||||
app.config['COMPRESS_MIMETYPES'] = ['text/html', 'text/css', 'text/javascript', 'application/json', 'application/javascript', 'image/svg+xml']
|
||||
# Use gzip only - smaller memory footprint than zstd/brotli (4-8KB vs 200-500KB contexts)
|
||||
app.config['COMPRESS_ALGORITHM'] = ['gzip']
|
||||
compress = FlaskCompress()
|
||||
compress.init_app(app)
|
||||
|
||||
app.config['TEMPLATES_AUTO_RELOAD'] = False
|
||||
|
||||
# Super handy for compressing large BrowserSteps responses and others
|
||||
FlaskCompress(app)
|
||||
|
||||
# Stop browser caching of assets
|
||||
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
|
||||
@@ -118,43 +76,11 @@ app.config['NEW_VERSION_AVAILABLE'] = False
|
||||
if os.getenv('FLASK_SERVER_NAME'):
|
||||
app.config['SERVER_NAME'] = os.getenv('FLASK_SERVER_NAME')
|
||||
|
||||
# Babel/i18n configuration
|
||||
app.config['BABEL_TRANSLATION_DIRECTORIES'] = str(Path(__file__).parent / 'translations')
|
||||
app.config['BABEL_DEFAULT_LOCALE'] = 'en_GB'
|
||||
|
||||
# Session configuration
|
||||
# NOTE: Flask session (for locale, etc.) is separate from Flask-Login's remember-me cookie
|
||||
# - Flask session stores data like session['locale'] in a signed cookie
|
||||
# - Flask-Login's remember=True creates a separate authentication cookie
|
||||
# - Setting PERMANENT_SESSION_LIFETIME controls how long the Flask session cookie lasts
|
||||
from datetime import timedelta
|
||||
app.config['PERMANENT_SESSION_LIFETIME'] = timedelta(days=3650) # ~10 years (effectively unlimited)
|
||||
|
||||
#app.config["EXPLAIN_TEMPLATE_LOADING"] = True
|
||||
|
||||
|
||||
# Disables caching of the templates
|
||||
app.config['TEMPLATES_AUTO_RELOAD'] = True
|
||||
app.jinja_env.add_extension('jinja2.ext.loopcontrols')
|
||||
|
||||
# Configure Jinja2 to search for templates in plugin directories
|
||||
def _configure_plugin_templates():
|
||||
"""Configure Jinja2 loader to include plugin template directories."""
|
||||
from jinja2 import ChoiceLoader, FileSystemLoader
|
||||
from changedetectionio.pluggy_interface import get_plugin_template_paths
|
||||
|
||||
# Get plugin template paths
|
||||
plugin_template_paths = get_plugin_template_paths()
|
||||
|
||||
if plugin_template_paths:
|
||||
# Create a ChoiceLoader that searches app templates first, then plugin templates
|
||||
loaders = [app.jinja_loader] # Keep the default app loader first
|
||||
for path in plugin_template_paths:
|
||||
loaders.append(FileSystemLoader(path))
|
||||
|
||||
app.jinja_loader = ChoiceLoader(loaders)
|
||||
logger.info(f"Configured Jinja2 to search {len(plugin_template_paths)} plugin template directories")
|
||||
|
||||
# Configure plugin templates (called after plugins are loaded)
|
||||
_configure_plugin_templates()
|
||||
csrf = CSRFProtect()
|
||||
csrf.init_app(app)
|
||||
notification_debug_log=[]
|
||||
@@ -175,12 +101,12 @@ def init_app_secret(datastore_path):
|
||||
path = os.path.join(datastore_path, "secret.txt")
|
||||
|
||||
try:
|
||||
with open(path, "r", encoding='utf-8') as f:
|
||||
with open(path, "r") as f:
|
||||
secret = f.read()
|
||||
|
||||
except FileNotFoundError:
|
||||
import secrets
|
||||
with open(path, "w", encoding='utf-8') as f:
|
||||
with open(path, "w") as f:
|
||||
secret = secrets.token_hex(32)
|
||||
f.write(secret)
|
||||
|
||||
@@ -207,27 +133,18 @@ def get_socketio_path():
|
||||
# Socket.IO will be available at {prefix}/socket.io/
|
||||
return prefix
|
||||
|
||||
@app.template_global('is_safe_valid_url')
|
||||
def _is_safe_valid_url(test_url):
|
||||
from .validate_url import is_safe_valid_url
|
||||
return is_safe_valid_url(test_url)
|
||||
|
||||
|
||||
@app.template_filter('format_number_locale')
|
||||
def _jinja2_filter_format_number_locale(value: float) -> str:
|
||||
"Formats for example 4000.10 to the local locale default of 4,000.10"
|
||||
# Format the number with two decimal places (locale format string will return 6 decimal)
|
||||
formatted_value = locale.format_string("%.2f", value, grouping=True)
|
||||
return formatted_value
|
||||
|
||||
@app.template_filter('regex_search')
|
||||
def _jinja2_filter_regex_search(value, pattern):
|
||||
import re
|
||||
return re.search(pattern, str(value)) is not None
|
||||
return formatted_value
|
||||
|
||||
@app.template_global('is_checking_now')
|
||||
def _watch_is_checking_now(watch_obj, format="%Y-%m-%d %H:%M:%S"):
|
||||
return worker_pool.is_watch_running(watch_obj['uuid'])
|
||||
return worker_handler.is_watch_running(watch_obj['uuid'])
|
||||
|
||||
@app.template_global('get_watch_queue_position')
|
||||
def _get_watch_queue_position(watch_obj):
|
||||
@@ -238,13 +155,13 @@ def _get_watch_queue_position(watch_obj):
|
||||
@app.template_global('get_current_worker_count')
|
||||
def _get_current_worker_count():
|
||||
"""Get the current number of operational workers"""
|
||||
return worker_pool.get_worker_count()
|
||||
return worker_handler.get_worker_count()
|
||||
|
||||
@app.template_global('get_worker_status_info')
|
||||
def _get_worker_status_info():
|
||||
"""Get detailed worker status information for display"""
|
||||
status = worker_pool.get_worker_status()
|
||||
running_uuids = worker_pool.get_running_uuids()
|
||||
status = worker_handler.get_worker_status()
|
||||
running_uuids = worker_handler.get_running_uuids()
|
||||
|
||||
return {
|
||||
'count': status['worker_count'],
|
||||
@@ -261,26 +178,16 @@ def _get_worker_status_info():
|
||||
def _jinja2_filter_datetime(watch_obj, format="%Y-%m-%d %H:%M:%S"):
|
||||
|
||||
if watch_obj['last_checked'] == 0:
|
||||
return gettext('Not yet')
|
||||
return 'Not yet'
|
||||
|
||||
locale = get_timeago_locale(str(get_locale()))
|
||||
try:
|
||||
return timeago.format(int(watch_obj['last_checked']), time.time(), locale)
|
||||
except:
|
||||
# Fallback to English if locale not supported by timeago
|
||||
return timeago.format(int(watch_obj['last_checked']), time.time(), 'en')
|
||||
return timeago.format(int(watch_obj['last_checked']), time.time())
|
||||
|
||||
@app.template_filter('format_timestamp_timeago')
|
||||
def _jinja2_filter_datetimestamp(timestamp, format="%Y-%m-%d %H:%M:%S"):
|
||||
if not timestamp:
|
||||
return gettext('Not yet')
|
||||
return 'Not yet'
|
||||
|
||||
locale = get_timeago_locale(str(get_locale()))
|
||||
try:
|
||||
return timeago.format(int(timestamp), time.time(), locale)
|
||||
except:
|
||||
# Fallback to English if locale not supported by timeago
|
||||
return timeago.format(int(timestamp), time.time(), 'en')
|
||||
return timeago.format(int(timestamp), time.time())
|
||||
|
||||
|
||||
@app.template_filter('pagination_slice')
|
||||
@@ -294,120 +201,10 @@ def _jinja2_filter_pagination_slice(arr, skip):
|
||||
@app.template_filter('format_seconds_ago')
|
||||
def _jinja2_filter_seconds_precise(timestamp):
|
||||
if timestamp == False:
|
||||
return gettext('Not yet')
|
||||
return 'Not yet'
|
||||
|
||||
return format(int(time.time()-timestamp), ',d')
|
||||
|
||||
@app.template_filter('format_duration')
|
||||
def _jinja2_filter_format_duration(seconds):
|
||||
"""Format a duration in seconds into human readable string like '5 days, 3 hours, 30 minutes'"""
|
||||
from datetime import timedelta
|
||||
|
||||
if not seconds or seconds < 0:
|
||||
return gettext('0 seconds')
|
||||
|
||||
td = timedelta(seconds=int(seconds))
|
||||
|
||||
# Calculate components
|
||||
years = td.days // 365
|
||||
remaining_days = td.days % 365
|
||||
months = remaining_days // 30
|
||||
remaining_days = remaining_days % 30
|
||||
weeks = remaining_days // 7
|
||||
days = remaining_days % 7
|
||||
|
||||
hours = td.seconds // 3600
|
||||
minutes = (td.seconds % 3600) // 60
|
||||
secs = td.seconds % 60
|
||||
|
||||
# Build parts list
|
||||
parts = []
|
||||
if years > 0:
|
||||
parts.append(f"{years} {gettext('year') if years == 1 else gettext('years')}")
|
||||
if months > 0:
|
||||
parts.append(f"{months} {gettext('month') if months == 1 else gettext('months')}")
|
||||
if weeks > 0:
|
||||
parts.append(f"{weeks} {gettext('week') if weeks == 1 else gettext('weeks')}")
|
||||
if days > 0:
|
||||
parts.append(f"{days} {gettext('day') if days == 1 else gettext('days')}")
|
||||
if hours > 0:
|
||||
parts.append(f"{hours} {gettext('hour') if hours == 1 else gettext('hours')}")
|
||||
if minutes > 0:
|
||||
parts.append(f"{minutes} {gettext('minute') if minutes == 1 else gettext('minutes')}")
|
||||
if secs > 0 or not parts:
|
||||
parts.append(f"{secs} {gettext('second') if secs == 1 else gettext('seconds')}")
|
||||
|
||||
return ", ".join(parts)
|
||||
|
||||
@app.template_filter('fetcher_status_icons')
|
||||
def _jinja2_filter_fetcher_status_icons(fetcher_name):
|
||||
"""Get status icon HTML for a given fetcher.
|
||||
|
||||
This filter checks both built-in fetchers and plugin fetchers for status icons.
|
||||
|
||||
Args:
|
||||
fetcher_name: The fetcher name (e.g., 'html_webdriver', 'html_js_zyte')
|
||||
|
||||
Returns:
|
||||
str: HTML string containing status icon elements
|
||||
"""
|
||||
from changedetectionio import content_fetchers
|
||||
from changedetectionio.pluggy_interface import collect_fetcher_status_icons
|
||||
from markupsafe import Markup
|
||||
from flask import url_for
|
||||
|
||||
icon_data = None
|
||||
|
||||
# First check if it's a plugin fetcher (plugins have priority)
|
||||
plugin_icon_data = collect_fetcher_status_icons(fetcher_name)
|
||||
if plugin_icon_data:
|
||||
icon_data = plugin_icon_data
|
||||
# Check if it's a built-in fetcher
|
||||
elif hasattr(content_fetchers, fetcher_name):
|
||||
fetcher_class = getattr(content_fetchers, fetcher_name)
|
||||
if hasattr(fetcher_class, 'get_status_icon_data'):
|
||||
icon_data = fetcher_class.get_status_icon_data()
|
||||
|
||||
# Build HTML from icon data
|
||||
if icon_data and isinstance(icon_data, dict):
|
||||
# Use 'group' from icon_data if specified, otherwise default to 'images'
|
||||
group = icon_data.get('group', 'images')
|
||||
|
||||
# Try to use url_for, but fall back to manual URL building if endpoint not registered yet
|
||||
try:
|
||||
icon_url = url_for('static_content', group=group, filename=icon_data['filename'])
|
||||
except:
|
||||
# Fallback: build URL manually respecting APPLICATION_ROOT
|
||||
from flask import request
|
||||
app_root = request.script_root if hasattr(request, 'script_root') else ''
|
||||
icon_url = f"{app_root}/static/{group}/{icon_data['filename']}"
|
||||
|
||||
style_attr = f' style="{icon_data["style"]}"' if icon_data.get('style') else ''
|
||||
html = f'<img class="status-icon" src="{icon_url}" alt="{icon_data["alt"]}" title="{icon_data["title"]}"{style_attr}>'
|
||||
return Markup(html)
|
||||
|
||||
return ''
|
||||
|
||||
_RE_SANITIZE_TAG = re.compile(r'[^a-zA-Z0-9]')
|
||||
|
||||
@app.template_filter('sanitize_tag_class')
|
||||
def _jinja2_filter_sanitize_tag_class(tag_title):
|
||||
"""Sanitize a tag title to create a valid CSS class name.
|
||||
Removes all non-alphanumeric characters and converts to lowercase.
|
||||
|
||||
Args:
|
||||
tag_title: The tag title string
|
||||
|
||||
Returns:
|
||||
str: A sanitized string suitable for use as a CSS class name
|
||||
"""
|
||||
# Remove all non-alphanumeric characters and convert to lowercase
|
||||
sanitized = _RE_SANITIZE_TAG.sub('', tag_title).lower()
|
||||
# Ensure it starts with a letter (CSS requirement)
|
||||
if sanitized and not sanitized[0].isalpha():
|
||||
sanitized = 'tag' + sanitized
|
||||
return sanitized if sanitized else 'tag'
|
||||
|
||||
# Import login_optionally_required from auth_decorator
|
||||
from changedetectionio.auth_decorator import login_optionally_required
|
||||
|
||||
@@ -462,61 +259,17 @@ def changedetection_app(config=None, datastore_o=None):
|
||||
global datastore, socketio_server
|
||||
datastore = datastore_o
|
||||
|
||||
# Set datastore reference in notification queue for all_muted checking
|
||||
notification_q.set_datastore(datastore)
|
||||
|
||||
# Import and create a wrapper for is_safe_url that has access to app
|
||||
from changedetectionio.is_safe_url import is_safe_url as _is_safe_url
|
||||
|
||||
def is_safe_url(target):
|
||||
"""Wrapper for is_safe_url that passes the app instance"""
|
||||
return _is_safe_url(target, app)
|
||||
|
||||
# so far just for read-only via tests, but this will be moved eventually to be the main source
|
||||
# (instead of the global var)
|
||||
app.config['DATASTORE'] = datastore_o
|
||||
|
||||
# Store batch mode flag to skip background threads when running in batch mode
|
||||
app.config['batch_mode'] = config.get('batch_mode', False) if config else False
|
||||
|
||||
|
||||
# Store the signal in the app config to ensure it's accessible everywhere
|
||||
app.config['watch_check_update_SIGNAL'] = watch_check_update
|
||||
|
||||
login_manager = flask_login.LoginManager(app)
|
||||
login_manager.login_view = 'login'
|
||||
app.secret_key = init_app_secret(config['datastore_path'])
|
||||
|
||||
# Initialize Flask-Babel for i18n support
|
||||
available_languages = get_available_languages()
|
||||
language_codes = get_language_codes()
|
||||
|
||||
_locale_aliases = {
|
||||
'zh-TW': 'zh_Hant_TW', # Traditional Chinese: browser sends zh-TW, we use zh_Hant_TW
|
||||
'zh_TW': 'zh_Hant_TW', # Also handle underscore variant
|
||||
}
|
||||
_locale_match_list = language_codes + list(_locale_aliases.keys())
|
||||
|
||||
def get_locale():
|
||||
# 1. Try to get locale from session (user explicitly selected)
|
||||
if 'locale' in session:
|
||||
return session['locale']
|
||||
|
||||
# 2. Fall back to Accept-Language header
|
||||
browser_locale = request.accept_languages.best_match(_locale_match_list)
|
||||
# 3. Map browser locale to our internal locale if needed
|
||||
return _locale_aliases.get(browser_locale, browser_locale)
|
||||
|
||||
# Initialize Babel with locale selector
|
||||
babel = Babel(app, locale_selector=get_locale)
|
||||
|
||||
# Make i18n functions available to templates
|
||||
app.jinja_env.globals.update(
|
||||
_=gettext,
|
||||
get_locale=get_locale,
|
||||
get_flag_for_locale=get_flag_for_locale,
|
||||
available_languages=available_languages
|
||||
)
|
||||
|
||||
|
||||
# Set up a request hook to check authentication for all routes
|
||||
@app.before_request
|
||||
def check_authentication():
|
||||
@@ -527,12 +280,6 @@ def changedetection_app(config=None, datastore_o=None):
|
||||
if request.endpoint and request.endpoint == 'static_content' and request.view_args:
|
||||
# Handled by static_content handler
|
||||
return None
|
||||
# Permitted - static flag icons need to load on login page
|
||||
elif request.endpoint and request.endpoint == 'static_flags':
|
||||
return None
|
||||
# Permitted - language selection should work on login page
|
||||
elif request.endpoint and request.endpoint == 'set_language':
|
||||
return None
|
||||
# Permitted
|
||||
elif request.endpoint and 'login' in request.endpoint:
|
||||
return None
|
||||
@@ -555,23 +302,20 @@ def changedetection_app(config=None, datastore_o=None):
|
||||
return login_manager.unauthorized()
|
||||
|
||||
|
||||
watch_api.add_resource(WatchHistoryDiff,
|
||||
'/api/v1/watch/<uuid_str:uuid>/difference/<string:from_timestamp>/<string:to_timestamp>',
|
||||
resource_class_kwargs={'datastore': datastore})
|
||||
watch_api.add_resource(WatchSingleHistory,
|
||||
'/api/v1/watch/<uuid_str:uuid>/history/<string:timestamp>',
|
||||
'/api/v1/watch/<string:uuid>/history/<string:timestamp>',
|
||||
resource_class_kwargs={'datastore': datastore, 'update_q': update_q})
|
||||
watch_api.add_resource(WatchFavicon,
|
||||
'/api/v1/watch/<uuid_str:uuid>/favicon',
|
||||
'/api/v1/watch/<string:uuid>/favicon',
|
||||
resource_class_kwargs={'datastore': datastore})
|
||||
watch_api.add_resource(WatchHistory,
|
||||
'/api/v1/watch/<uuid_str:uuid>/history',
|
||||
'/api/v1/watch/<string:uuid>/history',
|
||||
resource_class_kwargs={'datastore': datastore})
|
||||
|
||||
watch_api.add_resource(CreateWatch, '/api/v1/watch',
|
||||
resource_class_kwargs={'datastore': datastore, 'update_q': update_q})
|
||||
|
||||
watch_api.add_resource(Watch, '/api/v1/watch/<uuid_str:uuid>',
|
||||
watch_api.add_resource(Watch, '/api/v1/watch/<string:uuid>',
|
||||
resource_class_kwargs={'datastore': datastore, 'update_q': update_q})
|
||||
|
||||
watch_api.add_resource(SystemInfo, '/api/v1/systeminfo',
|
||||
@@ -584,8 +328,8 @@ def changedetection_app(config=None, datastore_o=None):
|
||||
watch_api.add_resource(Tags, '/api/v1/tags',
|
||||
resource_class_kwargs={'datastore': datastore})
|
||||
|
||||
watch_api.add_resource(Tag, '/api/v1/tag', '/api/v1/tag/<uuid_str:uuid>',
|
||||
resource_class_kwargs={'datastore': datastore, 'update_q': update_q})
|
||||
watch_api.add_resource(Tag, '/api/v1/tag', '/api/v1/tag/<string:uuid>',
|
||||
resource_class_kwargs={'datastore': datastore})
|
||||
|
||||
watch_api.add_resource(Search, '/api/v1/search',
|
||||
resource_class_kwargs={'datastore': datastore})
|
||||
@@ -593,8 +337,6 @@ def changedetection_app(config=None, datastore_o=None):
|
||||
watch_api.add_resource(Notifications, '/api/v1/notifications',
|
||||
resource_class_kwargs={'datastore': datastore})
|
||||
|
||||
watch_api.add_resource(Spec, '/api/v1/full-spec')
|
||||
|
||||
@login_manager.user_loader
|
||||
def user_loader(email):
|
||||
user = User()
|
||||
@@ -603,76 +345,25 @@ def changedetection_app(config=None, datastore_o=None):
|
||||
|
||||
@login_manager.unauthorized_handler
|
||||
def unauthorized_handler():
|
||||
# Pass the current request path so users are redirected back after login
|
||||
return redirect(url_for('login', redirect=request.path))
|
||||
flash("You must be logged in, please log in.", 'error')
|
||||
return redirect(url_for('login', next=url_for('watchlist.index')))
|
||||
|
||||
@app.route('/logout')
|
||||
def logout():
|
||||
flask_login.logout_user()
|
||||
|
||||
# Check if there's a redirect parameter to return to after re-login
|
||||
redirect_url = request.args.get('redirect')
|
||||
|
||||
# If redirect is provided and safe, pass it to login page
|
||||
if redirect_url and is_safe_url(redirect_url):
|
||||
return redirect(url_for('login', redirect=redirect_url))
|
||||
|
||||
# Otherwise just go to watchlist
|
||||
return redirect(url_for('watchlist.index'))
|
||||
|
||||
@app.route('/set-language/<locale>')
|
||||
def set_language(locale):
|
||||
"""Set the user's preferred language in the session"""
|
||||
if not request.cookies:
|
||||
logger.error("Cannot set language without session cookie")
|
||||
flash("Cannot set language without session cookie", 'error')
|
||||
return redirect(url_for('watchlist.index'))
|
||||
|
||||
# Validate the locale against available languages
|
||||
if locale in language_codes:
|
||||
# Make session permanent so language preference persists across browser sessions
|
||||
# NOTE: This is the Flask session cookie (separate from Flask-Login's remember-me auth cookie)
|
||||
session.permanent = True
|
||||
session['locale'] = locale
|
||||
|
||||
# CRITICAL: Flask-Babel caches the locale in the request context (ctx.babel_locale)
|
||||
# We must refresh to clear this cache so the new locale takes effect immediately
|
||||
# This is especially important for tests where multiple requests happen rapidly
|
||||
from flask_babel import refresh
|
||||
refresh()
|
||||
else:
|
||||
logger.error(f"Invalid locale {locale}, available: {language_codes}")
|
||||
|
||||
# Check if there's a redirect parameter to return to the same page
|
||||
redirect_url = request.args.get('redirect')
|
||||
|
||||
# If redirect is provided and safe, use it
|
||||
if redirect_url and is_safe_url(redirect_url):
|
||||
return redirect(redirect_url)
|
||||
|
||||
# Otherwise redirect to watchlist
|
||||
return redirect(url_for('watchlist.index'))
|
||||
|
||||
# https://github.com/pallets/flask/blob/93dd1709d05a1cf0e886df6223377bdab3b077fb/examples/tutorial/flaskr/__init__.py#L39
|
||||
# You can divide up the stuff like this
|
||||
@app.route('/login', methods=['GET', 'POST'])
|
||||
def login():
|
||||
# Extract and validate the redirect parameter
|
||||
redirect_url = request.args.get('redirect') or request.form.get('redirect')
|
||||
|
||||
# Validate the redirect URL - default to watchlist if invalid
|
||||
if redirect_url and is_safe_url(redirect_url):
|
||||
validated_redirect = redirect_url
|
||||
else:
|
||||
validated_redirect = url_for('watchlist.index')
|
||||
|
||||
if request.method == 'GET':
|
||||
if flask_login.current_user.is_authenticated:
|
||||
# Already logged in - redirect immediately to the target
|
||||
flash(gettext("Already logged in"))
|
||||
return redirect(validated_redirect)
|
||||
flash(gettext("You must be logged in, please log in."), 'error')
|
||||
output = render_template("login.html", redirect_url=validated_redirect)
|
||||
flash("Already logged in")
|
||||
return redirect(url_for("watchlist.index"))
|
||||
|
||||
output = render_template("login.html")
|
||||
return output
|
||||
|
||||
user = User()
|
||||
@@ -682,13 +373,23 @@ def changedetection_app(config=None, datastore_o=None):
|
||||
|
||||
if (user.check_password(password)):
|
||||
flask_login.login_user(user, remember=True)
|
||||
# Redirect to the validated URL after successful login
|
||||
return redirect(validated_redirect)
|
||||
|
||||
# For now there's nothing else interesting here other than the index/list page
|
||||
# It's more reliable and safe to ignore the 'next' redirect
|
||||
# When we used...
|
||||
# next = request.args.get('next')
|
||||
# return redirect(next or url_for('watchlist.index'))
|
||||
# We would sometimes get login loop errors on sites hosted in sub-paths
|
||||
|
||||
# note for the future:
|
||||
# if not is_safe_url(next):
|
||||
# return flask.abort(400)
|
||||
return redirect(url_for('watchlist.index'))
|
||||
|
||||
else:
|
||||
flash(gettext('Incorrect password'), 'error')
|
||||
flash('Incorrect password', 'error')
|
||||
|
||||
return redirect(url_for('login', redirect=redirect_url if redirect_url else None))
|
||||
return redirect(url_for('login'))
|
||||
|
||||
@app.before_request
|
||||
def before_request_handle_cookie_x_settings():
|
||||
@@ -698,52 +399,12 @@ def changedetection_app(config=None, datastore_o=None):
|
||||
app.config['SESSION_COOKIE_PATH'] = request.headers['X-Forwarded-Prefix']
|
||||
return None
|
||||
|
||||
@app.route("/static/flags/<path:flag_path>", methods=['GET'])
|
||||
def static_flags(flag_path):
|
||||
"""Handle flag icon files with subdirectories"""
|
||||
from flask import make_response
|
||||
import re
|
||||
|
||||
# flag_path comes in as "1x1/de.svg" or "4x3/de.svg"
|
||||
if re.match(r'^(1x1|4x3)/[a-z0-9-]+\.svg$', flag_path.lower()):
|
||||
# Reconstruct the path safely with additional validation
|
||||
parts = flag_path.lower().split('/')
|
||||
if len(parts) != 2:
|
||||
abort(404)
|
||||
|
||||
subdir = parts[0]
|
||||
svg_file = parts[1]
|
||||
|
||||
# Extra validation: ensure subdir is exactly 1x1 or 4x3
|
||||
if subdir not in ['1x1', '4x3']:
|
||||
abort(404)
|
||||
|
||||
# Extra validation: ensure svg_file only contains safe characters
|
||||
if not re.match(r'^[a-z0-9-]+\.svg$', svg_file):
|
||||
abort(404)
|
||||
|
||||
try:
|
||||
response = make_response(send_from_directory(f"static/flags/{subdir}", svg_file))
|
||||
response.headers['Content-type'] = 'image/svg+xml'
|
||||
response.headers['Cache-Control'] = 'max-age=86400, public' # Cache for 24 hours
|
||||
return response
|
||||
except FileNotFoundError:
|
||||
abort(404)
|
||||
else:
|
||||
abort(404)
|
||||
|
||||
@app.route("/static/<string:group>/<string:filename>", methods=['GET'])
|
||||
def static_content(group, filename):
|
||||
from flask import make_response
|
||||
import re
|
||||
|
||||
# Strict sanitization: only allow a-z, 0-9, and underscore (blocks .. and other traversal)
|
||||
group = re.sub(r'[^a-z0-9_-]+', '', group.lower())
|
||||
filename = filename
|
||||
|
||||
# Additional safety: reject if sanitization resulted in empty strings
|
||||
if not group or not filename:
|
||||
abort(404)
|
||||
group = re.sub(r'[^\w.-]+', '', group.lower())
|
||||
filename = re.sub(r'[^\w.-]+', '', filename.lower())
|
||||
|
||||
if group == 'screenshot':
|
||||
# Could be sensitive, follow password requirements
|
||||
@@ -777,11 +438,18 @@ def changedetection_app(config=None, datastore_o=None):
|
||||
|
||||
favicon_filename = watch.get_favicon_filename()
|
||||
if favicon_filename:
|
||||
# Use cached MIME type detection
|
||||
filepath = os.path.join(watch.data_dir, favicon_filename)
|
||||
mime = get_favicon_mime_type(filepath)
|
||||
try:
|
||||
import magic
|
||||
mime = magic.from_file(
|
||||
os.path.join(watch.watch_data_dir, favicon_filename),
|
||||
mime=True
|
||||
)
|
||||
except ImportError:
|
||||
# Fallback, no python-magic
|
||||
import mimetypes
|
||||
mime, encoding = mimetypes.guess_type(favicon_filename)
|
||||
|
||||
response = make_response(send_from_directory(watch.data_dir, favicon_filename))
|
||||
response = make_response(send_from_directory(watch.watch_data_dir, favicon_filename))
|
||||
response.headers['Content-type'] = mime
|
||||
response.headers['Cache-Control'] = 'max-age=300, must-revalidate' # Cache for 5 minutes, then revalidate
|
||||
return response
|
||||
@@ -815,31 +483,6 @@ def changedetection_app(config=None, datastore_o=None):
|
||||
except FileNotFoundError:
|
||||
abort(404)
|
||||
|
||||
# Handle plugin group specially
|
||||
if group == 'plugin':
|
||||
# Serve files from plugin static directories
|
||||
from changedetectionio.pluggy_interface import plugin_manager
|
||||
import os as os_check
|
||||
|
||||
for plugin_name, plugin_obj in plugin_manager.list_name_plugin():
|
||||
if hasattr(plugin_obj, 'plugin_static_path'):
|
||||
try:
|
||||
static_path = plugin_obj.plugin_static_path()
|
||||
if static_path and os_check.path.isdir(static_path):
|
||||
# Check if file exists in plugin's static directory
|
||||
plugin_file_path = os_check.path.join(static_path, filename)
|
||||
if os_check.path.isfile(plugin_file_path):
|
||||
# Found the file in a plugin
|
||||
response = make_response(send_from_directory(static_path, filename))
|
||||
response.headers['Cache-Control'] = 'max-age=3600, public' # Cache for 1 hour
|
||||
return response
|
||||
except Exception as e:
|
||||
logger.debug(f"Error checking plugin {plugin_name} for static file: {e}")
|
||||
pass
|
||||
|
||||
# File not found in any plugin
|
||||
abort(404)
|
||||
|
||||
# These files should be in our subdirectory
|
||||
try:
|
||||
return send_from_directory(f"static/{group}", path=filename)
|
||||
@@ -876,15 +519,13 @@ def changedetection_app(config=None, datastore_o=None):
|
||||
|
||||
# watchlist UI buttons etc
|
||||
import changedetectionio.blueprint.ui as ui
|
||||
app.register_blueprint(ui.construct_blueprint(datastore, update_q, worker_pool, queuedWatchMetaData, watch_check_update))
|
||||
app.register_blueprint(ui.construct_blueprint(datastore, update_q, worker_handler, queuedWatchMetaData, watch_check_update))
|
||||
|
||||
import changedetectionio.blueprint.watchlist as watchlist
|
||||
app.register_blueprint(watchlist.construct_blueprint(datastore=datastore, update_q=update_q, queuedWatchMetaData=queuedWatchMetaData), url_prefix='')
|
||||
|
||||
# Initialize Socket.IO server conditionally based on settings
|
||||
socket_io_enabled = datastore.data['settings']['application'].get('ui', {}).get('socket_io_enabled', True)
|
||||
if socket_io_enabled and app.config.get('batch_mode'):
|
||||
socket_io_enabled = False
|
||||
socket_io_enabled = datastore.data['settings']['application']['ui'].get('socket_io_enabled', True)
|
||||
if socket_io_enabled:
|
||||
from changedetectionio.realtime.socket_server import init_socketio
|
||||
global socketio_server
|
||||
@@ -913,10 +554,10 @@ def changedetection_app(config=None, datastore_o=None):
|
||||
expected_workers = int(os.getenv("FETCH_WORKERS", datastore.data['settings']['requests']['workers']))
|
||||
|
||||
# Get basic status
|
||||
status = worker_pool.get_worker_status()
|
||||
status = worker_handler.get_worker_status()
|
||||
|
||||
# Perform health check
|
||||
health_result = worker_pool.check_worker_health(
|
||||
health_result = worker_handler.check_worker_health(
|
||||
expected_count=expected_workers,
|
||||
update_q=update_q,
|
||||
notification_q=notification_q,
|
||||
@@ -980,31 +621,16 @@ def changedetection_app(config=None, datastore_o=None):
|
||||
# Can be overridden by ENV or use the default settings
|
||||
n_workers = int(os.getenv("FETCH_WORKERS", datastore.data['settings']['requests']['workers']))
|
||||
logger.info(f"Starting {n_workers} workers during app initialization")
|
||||
worker_pool.start_workers(n_workers, update_q, notification_q, app, datastore)
|
||||
worker_handler.start_workers(n_workers, update_q, notification_q, app, datastore)
|
||||
|
||||
# Skip background threads in batch mode (just process queue and exit)
|
||||
batch_mode = app.config.get('batch_mode', False)
|
||||
if not batch_mode:
|
||||
# @todo handle ctrl break
|
||||
ticker_thread = threading.Thread(target=ticker_thread_check_time_launch_checks, daemon=True, name="TickerThread-ScheduleChecker").start()
|
||||
# @todo handle ctrl break
|
||||
ticker_thread = threading.Thread(target=ticker_thread_check_time_launch_checks).start()
|
||||
threading.Thread(target=notification_runner).start()
|
||||
|
||||
# Start configurable number of notification workers (default 1)
|
||||
notification_workers = int(os.getenv("NOTIFICATION_WORKERS", "1"))
|
||||
for i in range(notification_workers):
|
||||
threading.Thread(
|
||||
target=notification_runner,
|
||||
args=(i,),
|
||||
daemon=True,
|
||||
name=f"NotificationRunner-{i}"
|
||||
).start()
|
||||
logger.info(f"Started {notification_workers} notification worker(s)")
|
||||
|
||||
in_pytest = "pytest" in sys.modules or "PYTEST_CURRENT_TEST" in os.environ
|
||||
# Check for new release version, but not when running in test/build or pytest
|
||||
if not os.getenv("GITHUB_REF", False) and not strtobool(os.getenv('DISABLE_VERSION_CHECK', 'no')) and not in_pytest:
|
||||
threading.Thread(target=check_for_new_version, daemon=True, name="VersionChecker").start()
|
||||
else:
|
||||
logger.info("Batch mode: Skipping ticker thread, notification runner, and version checker")
|
||||
in_pytest = "pytest" in sys.modules or "PYTEST_CURRENT_TEST" in os.environ
|
||||
# Check for new release version, but not when running in test/build or pytest
|
||||
if not os.getenv("GITHUB_REF", False) and not strtobool(os.getenv('DISABLE_VERSION_CHECK', 'no')) and not in_pytest:
|
||||
threading.Thread(target=check_for_new_version).start()
|
||||
|
||||
# Return the Flask app - the Socket.IO will be attached to it but initialized separately
|
||||
# This avoids circular dependencies
|
||||
@@ -1017,16 +643,15 @@ def check_for_new_version():
|
||||
import urllib3
|
||||
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
||||
|
||||
session = requests.Session()
|
||||
session.verify = False
|
||||
|
||||
while not app.config.exit.is_set():
|
||||
try:
|
||||
r = session.post("https://changedetection.io/check-ver.php",
|
||||
r = requests.post("https://changedetection.io/check-ver.php",
|
||||
data={'version': __version__,
|
||||
'app_guid': datastore.data['app_guid'],
|
||||
'watch_count': len(datastore.data['watching'])
|
||||
})
|
||||
},
|
||||
|
||||
verify=False)
|
||||
except:
|
||||
pass
|
||||
|
||||
@@ -1040,17 +665,17 @@ def check_for_new_version():
|
||||
app.config.exit.wait(86400)
|
||||
|
||||
|
||||
def notification_runner(worker_id=0):
|
||||
def notification_runner():
|
||||
global notification_debug_log
|
||||
from datetime import datetime
|
||||
import json
|
||||
with app.app_context():
|
||||
while not app.config.exit.is_set():
|
||||
try:
|
||||
# Multiple workers can run concurrently (configurable via NOTIFICATION_WORKERS)
|
||||
# At the moment only one thread runs (single runner)
|
||||
n_object = notification_q.get(block=False)
|
||||
except queue.Empty:
|
||||
app.config.exit.wait(1)
|
||||
time.sleep(1)
|
||||
|
||||
else:
|
||||
|
||||
@@ -1073,7 +698,7 @@ def notification_runner(worker_id=0):
|
||||
sent_obj = process_notification(n_object, datastore)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Notification worker {worker_id} - Watch URL: {n_object['watch_url']} Error {str(e)}")
|
||||
logger.error(f"Watch URL: {n_object['watch_url']} Error {str(e)}")
|
||||
|
||||
# UUID wont be present when we submit a 'test' from the global settings
|
||||
if 'uuid' in n_object:
|
||||
@@ -1087,7 +712,7 @@ def notification_runner(worker_id=0):
|
||||
app.config['watch_check_update_SIGNAL'].send(app_context=app, watch_uuid=n_object.get('uuid'))
|
||||
|
||||
# Process notifications
|
||||
notification_debug_log+= ["{} - SENDING - {}".format(now.strftime("%c"), json.dumps(sent_obj))]
|
||||
notification_debug_log+= ["{} - SENDING - {}".format(now.strftime("%Y/%m/%d %H:%M:%S,000"), json.dumps(sent_obj))]
|
||||
# Trim the log length
|
||||
notification_debug_log = notification_debug_log[-100:]
|
||||
|
||||
@@ -1103,10 +728,6 @@ def ticker_thread_check_time_launch_checks():
|
||||
logger.debug(f"System env MINIMUM_SECONDS_RECHECK_TIME {recheck_time_minimum_seconds}")
|
||||
|
||||
# Workers are now started during app initialization, not here
|
||||
WAIT_TIME_BETWEEN_LOOP = 1.0 if not IN_PYTEST else 0.01
|
||||
if IN_PYTEST:
|
||||
# The time between loops should be less than the first .sleep/wait in def wait_for_all_checks() of tests/util.py
|
||||
logger.warning(f"Looks like we're in PYTEST! Setting time between searching for items to add to the queue to {WAIT_TIME_BETWEEN_LOOP}s")
|
||||
|
||||
while not app.config.exit.is_set():
|
||||
|
||||
@@ -1114,7 +735,7 @@ def ticker_thread_check_time_launch_checks():
|
||||
now = time.time()
|
||||
if now - last_health_check > 60:
|
||||
expected_workers = int(os.getenv("FETCH_WORKERS", datastore.data['settings']['requests']['workers']))
|
||||
health_result = worker_pool.check_worker_health(
|
||||
health_result = worker_handler.check_worker_health(
|
||||
expected_count=expected_workers,
|
||||
update_q=update_q,
|
||||
notification_q=notification_q,
|
||||
@@ -1124,19 +745,11 @@ def ticker_thread_check_time_launch_checks():
|
||||
|
||||
if health_result['status'] != 'healthy':
|
||||
logger.warning(f"Worker health check: {health_result['message']}")
|
||||
|
||||
|
||||
last_health_check = now
|
||||
|
||||
# Check if all checks are paused
|
||||
if datastore.data['settings']['application'].get('all_paused', False):
|
||||
app.config.exit.wait(1)
|
||||
continue
|
||||
|
||||
# Get a list of watches by UUID that are currently fetching data
|
||||
running_uuids = worker_pool.get_running_uuids()
|
||||
|
||||
# Build set of queued UUIDs once for O(1) lookup instead of O(n) per watch
|
||||
queued_uuids = {q_item.item['uuid'] for q_item in update_q.queue}
|
||||
running_uuids = worker_handler.get_running_uuids()
|
||||
|
||||
# Re #232 - Deepcopy the data incase it changes while we're iterating through it all
|
||||
watch_uuid_list = []
|
||||
@@ -1154,17 +767,16 @@ def ticker_thread_check_time_launch_checks():
|
||||
else:
|
||||
break
|
||||
|
||||
# Re #438 - Don't place more watches in the queue to be checked if the queue is already large
|
||||
while update_q.qsize() >= 2000:
|
||||
logger.warning(f"Recheck watches queue size limit reached ({MAX_QUEUE_SIZE}), skipping adding more items")
|
||||
time.sleep(3)
|
||||
|
||||
|
||||
recheck_time_system_seconds = int(datastore.threshold_seconds)
|
||||
|
||||
# Check for watches outside of the time threshold to put in the thread queue.
|
||||
for watch_index, uuid in enumerate(watch_uuid_list):
|
||||
# Re #438 - Check queue size every 100 watches for CPU efficiency (not every watch)
|
||||
if watch_index % 100 == 0:
|
||||
current_queue_size = update_q.qsize()
|
||||
if current_queue_size >= MAX_QUEUE_SIZE:
|
||||
logger.debug(f"Queue size limit reached ({current_queue_size}/{MAX_QUEUE_SIZE}), stopping scheduler this iteration.")
|
||||
break
|
||||
|
||||
for uuid in watch_uuid_list:
|
||||
now = time.time()
|
||||
watch = datastore.data['watching'].get(uuid)
|
||||
if not watch:
|
||||
@@ -1177,19 +789,15 @@ def ticker_thread_check_time_launch_checks():
|
||||
|
||||
# @todo - Maybe make this a hook?
|
||||
# Time schedule limit - Decide between watch or global settings
|
||||
scheduler_source = None
|
||||
if watch.get('time_between_check_use_default'):
|
||||
time_schedule_limit = datastore.data['settings']['requests'].get('time_schedule_limit', {})
|
||||
scheduler_source = 'system/global settings'
|
||||
|
||||
logger.trace(f"{uuid} Time scheduler - Using system/global settings")
|
||||
else:
|
||||
time_schedule_limit = watch.get('time_schedule_limit')
|
||||
scheduler_source = 'watch'
|
||||
|
||||
tz_name = datastore.data['settings']['application'].get('scheduler_timezone_default', os.getenv('TZ', 'UTC').strip())
|
||||
logger.trace(f"{uuid} Time scheduler - Using watch settings (not global settings)")
|
||||
tz_name = datastore.data['settings']['application'].get('timezone', 'UTC')
|
||||
|
||||
if time_schedule_limit and time_schedule_limit.get('enabled'):
|
||||
logger.trace(f"{uuid} Time scheduler - Using scheduler settings from {scheduler_source}")
|
||||
try:
|
||||
result = is_within_schedule(time_schedule_limit=time_schedule_limit,
|
||||
default_tz=tz_name
|
||||
@@ -1201,7 +809,6 @@ def ticker_thread_check_time_launch_checks():
|
||||
logger.error(
|
||||
f"{uuid} - Recheck scheduler, error handling timezone, check skipped - TZ name '{tz_name}' - {str(e)}")
|
||||
return False
|
||||
|
||||
# If they supplied an individual entry minutes to threshold.
|
||||
threshold = recheck_time_system_seconds if watch.get('time_between_check_use_default') else watch.threshold_seconds()
|
||||
|
||||
@@ -1214,7 +821,7 @@ def ticker_thread_check_time_launch_checks():
|
||||
seconds_since_last_recheck = now - watch['last_checked']
|
||||
|
||||
if seconds_since_last_recheck >= (threshold + watch.jitter_seconds) and seconds_since_last_recheck >= recheck_time_minimum_seconds:
|
||||
if not uuid in running_uuids and uuid not in queued_uuids:
|
||||
if not uuid in running_uuids and uuid not in [q_uuid.item['uuid'] for q_uuid in update_q.queue]:
|
||||
|
||||
# Proxies can be set to have a limit on seconds between which they can be called
|
||||
watch_proxy = datastore.get_preferred_proxy_for_watch(uuid=uuid)
|
||||
@@ -1237,24 +844,21 @@ def ticker_thread_check_time_launch_checks():
|
||||
|
||||
# Use Epoch time as priority, so we get a "sorted" PriorityQueue, but we can still push a priority 1 into it.
|
||||
priority = int(time.time())
|
||||
logger.debug(
|
||||
f"> Queued watch UUID {uuid} "
|
||||
f"last checked at {watch['last_checked']} "
|
||||
f"queued at {now:0.2f} priority {priority} "
|
||||
f"jitter {watch.jitter_seconds:0.2f}s, "
|
||||
f"{now - watch['last_checked']:0.2f}s since last checked")
|
||||
|
||||
# Into the queue with you
|
||||
queued_successfully = worker_pool.queue_item_async_safe(update_q,
|
||||
queuedWatchMetaData.PrioritizedItem(priority=priority,
|
||||
item={'uuid': uuid})
|
||||
)
|
||||
if queued_successfully:
|
||||
logger.debug(
|
||||
f"> Queued watch UUID {uuid} "
|
||||
f"last checked at {watch['last_checked']} "
|
||||
f"queued at {now:0.2f} priority {priority} "
|
||||
f"jitter {watch.jitter_seconds:0.2f}s, "
|
||||
f"{now - watch['last_checked']:0.2f}s since last checked")
|
||||
else:
|
||||
logger.critical(f"CRITICAL: Failed to queue watch UUID {uuid} in ticker thread!")
|
||||
|
||||
worker_handler.queue_item_async_safe(update_q, queuedWatchMetaData.PrioritizedItem(priority=priority, item={'uuid': uuid}))
|
||||
|
||||
# Reset for next time
|
||||
watch.jitter_seconds = 0
|
||||
|
||||
# Wait before checking the list again - saves CPU
|
||||
time.sleep(1)
|
||||
|
||||
# Should be low so we can break this out in testing
|
||||
app.config.exit.wait(WAIT_TIME_BETWEEN_LOOP)
|
||||
app.config.exit.wait(1)
|
||||
|
||||
@@ -2,19 +2,15 @@ import os
|
||||
import re
|
||||
from loguru import logger
|
||||
from wtforms.widgets.core import TimeInput
|
||||
from flask_babel import lazy_gettext as _l, gettext
|
||||
|
||||
from changedetectionio.blueprint.rss import RSS_FORMAT_TYPES, RSS_TEMPLATE_TYPE_OPTIONS, RSS_TEMPLATE_HTML_DEFAULT
|
||||
from changedetectionio.blueprint.rss import RSS_FORMAT_TYPES
|
||||
from changedetectionio.conditions.form import ConditionFormRow
|
||||
from changedetectionio.notification_service import NotificationContextData
|
||||
from changedetectionio.strtobool import strtobool
|
||||
from changedetectionio import processors
|
||||
|
||||
from wtforms import (
|
||||
BooleanField,
|
||||
Form,
|
||||
Field,
|
||||
FloatField,
|
||||
IntegerField,
|
||||
RadioField,
|
||||
SelectField,
|
||||
@@ -27,15 +23,15 @@ from wtforms import (
|
||||
)
|
||||
from flask_wtf.file import FileField, FileAllowed
|
||||
from wtforms.fields import FieldList
|
||||
from wtforms.utils import unset_value
|
||||
|
||||
from wtforms.validators import ValidationError
|
||||
|
||||
from changedetectionio.widgets import TernaryNoneBooleanField
|
||||
from validators.url import url as url_validator
|
||||
|
||||
|
||||
# default
|
||||
# each select <option data-enabled="enabled-0-0"
|
||||
from changedetectionio.browser_steps.browser_steps import browser_step_ui_config
|
||||
from changedetectionio.blueprint.browser_steps.browser_steps import browser_step_ui_config
|
||||
|
||||
from changedetectionio import html_tools, content_fetchers
|
||||
|
||||
@@ -58,8 +54,18 @@ valid_method = {
|
||||
|
||||
default_method = 'GET'
|
||||
allow_simplehost = not strtobool(os.getenv('BLOCK_SIMPLEHOSTS', 'False'))
|
||||
REQUIRE_ATLEAST_ONE_TIME_PART_MESSAGE_DEFAULT=_l('At least one time interval (weeks, days, hours, minutes, or seconds) must be specified.')
|
||||
REQUIRE_ATLEAST_ONE_TIME_PART_WHEN_NOT_GLOBAL_DEFAULT=_l('At least one time interval (weeks, days, hours, minutes, or seconds) must be specified when not using global settings.')
|
||||
|
||||
LLM_example_texts = ['Tell me simply "Price, In stock"',
|
||||
'Give me a list of all products for sale in this text',
|
||||
'Tell me simply "Yes" "No" or "Maybe" if you think the weather outlook is good for a 4-day small camping trip',
|
||||
'Look at this restaurant menu and only give me list of meals you think are good for type 2 diabetics, if nothing is found just say "nothing"',
|
||||
]
|
||||
|
||||
LLM_send_type_choices = [('text', 'Plain text after filters'),
|
||||
('above_fold_text', 'Text above the fold'),
|
||||
('Screenshot', 'Screenshot / Selection'),
|
||||
('HTML', 'HTML Source')
|
||||
]
|
||||
|
||||
class StringListField(StringField):
|
||||
widget = widgets.TextArea()
|
||||
@@ -159,7 +165,7 @@ class TimeStringField(Field):
|
||||
time_str = valuelist[0]
|
||||
# Simple validation for HH:MM format
|
||||
if not time_str or len(time_str.split(":")) != 2:
|
||||
raise ValidationError(_l("Invalid time format. Use HH:MM."))
|
||||
raise ValidationError("Invalid time format. Use HH:MM.")
|
||||
self.data = time_str
|
||||
|
||||
|
||||
@@ -175,15 +181,15 @@ class validateTimeZoneName(object):
|
||||
from zoneinfo import available_timezones
|
||||
python_timezones = available_timezones()
|
||||
if field.data and field.data not in python_timezones:
|
||||
raise ValidationError(_l("Not a valid timezone name"))
|
||||
raise ValidationError("Not a valid timezone name")
|
||||
|
||||
class ScheduleLimitDaySubForm(Form):
|
||||
enabled = BooleanField(_l("not set"), default=True)
|
||||
start_time = TimeStringField(_l("Start At"), default="00:00", validators=[validators.Optional()])
|
||||
duration = FormField(TimeDurationForm, label=_l("Run duration"))
|
||||
enabled = BooleanField("not set", default=True)
|
||||
start_time = TimeStringField("Start At", default="00:00", validators=[validators.Optional()])
|
||||
duration = FormField(TimeDurationForm, label="Run duration")
|
||||
|
||||
class ScheduleLimitForm(Form):
|
||||
enabled = BooleanField(_l("Use time scheduler"), default=False)
|
||||
enabled = BooleanField("Use time scheduler", default=False)
|
||||
# Because the label for=""" doesnt line up/work with the actual checkbox
|
||||
monday = FormField(ScheduleLimitDaySubForm, label="")
|
||||
tuesday = FormField(ScheduleLimitDaySubForm, label="")
|
||||
@@ -193,7 +199,7 @@ class ScheduleLimitForm(Form):
|
||||
saturday = FormField(ScheduleLimitDaySubForm, label="")
|
||||
sunday = FormField(ScheduleLimitDaySubForm, label="")
|
||||
|
||||
timezone = StringField(_l("Optional timezone to run in"),
|
||||
timezone = StringField("Optional timezone to run in",
|
||||
render_kw={"list": "timezones"},
|
||||
validators=[validateTimeZoneName()]
|
||||
)
|
||||
@@ -207,169 +213,23 @@ class ScheduleLimitForm(Form):
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(formdata, obj, prefix, data, meta, **kwargs)
|
||||
self.monday.form.enabled.label.text=_l("Monday")
|
||||
self.tuesday.form.enabled.label.text = _l("Tuesday")
|
||||
self.wednesday.form.enabled.label.text = _l("Wednesday")
|
||||
self.thursday.form.enabled.label.text = _l("Thursday")
|
||||
self.friday.form.enabled.label.text = _l("Friday")
|
||||
self.saturday.form.enabled.label.text = _l("Saturday")
|
||||
self.sunday.form.enabled.label.text = _l("Sunday")
|
||||
|
||||
|
||||
def validate_time_between_check_has_values(form):
|
||||
"""
|
||||
Custom validation function for TimeBetweenCheckForm.
|
||||
Returns True if at least one time interval field has a value > 0.
|
||||
"""
|
||||
res = any([
|
||||
form.weeks.data and int(form.weeks.data) > 0,
|
||||
form.days.data and int(form.days.data) > 0,
|
||||
form.hours.data and int(form.hours.data) > 0,
|
||||
form.minutes.data and int(form.minutes.data) > 0,
|
||||
form.seconds.data and int(form.seconds.data) > 0
|
||||
])
|
||||
|
||||
return res
|
||||
|
||||
|
||||
class RequiredTimeInterval(object):
|
||||
"""
|
||||
WTForms validator that ensures at least one time interval field has a value > 0.
|
||||
Use this with FormField(TimeBetweenCheckForm, validators=[RequiredTimeInterval()]).
|
||||
"""
|
||||
def __init__(self, message=None):
|
||||
self.message = message or _l('At least one time interval (weeks, days, hours, minutes, or seconds) must be specified.')
|
||||
|
||||
def __call__(self, form, field):
|
||||
if not validate_time_between_check_has_values(field.form):
|
||||
raise ValidationError(self.message)
|
||||
self.monday.form.enabled.label.text="Monday"
|
||||
self.tuesday.form.enabled.label.text = "Tuesday"
|
||||
self.wednesday.form.enabled.label.text = "Wednesday"
|
||||
self.thursday.form.enabled.label.text = "Thursday"
|
||||
self.friday.form.enabled.label.text = "Friday"
|
||||
self.saturday.form.enabled.label.text = "Saturday"
|
||||
self.sunday.form.enabled.label.text = "Sunday"
|
||||
|
||||
|
||||
class TimeBetweenCheckForm(Form):
|
||||
weeks = IntegerField(_l('Weeks'), validators=[validators.Optional(), validators.NumberRange(min=0, message=_l("Should contain zero or more seconds"))])
|
||||
days = IntegerField(_l('Days'), validators=[validators.Optional(), validators.NumberRange(min=0, message=_l("Should contain zero or more seconds"))])
|
||||
hours = IntegerField(_l('Hours'), validators=[validators.Optional(), validators.NumberRange(min=0, message=_l("Should contain zero or more seconds"))])
|
||||
minutes = IntegerField(_l('Minutes'), validators=[validators.Optional(), validators.NumberRange(min=0, message=_l("Should contain zero or more seconds"))])
|
||||
seconds = IntegerField(_l('Seconds'), validators=[validators.Optional(), validators.NumberRange(min=0, message=_l("Should contain zero or more seconds"))])
|
||||
weeks = IntegerField('Weeks', validators=[validators.Optional(), validators.NumberRange(min=0, message="Should contain zero or more seconds")])
|
||||
days = IntegerField('Days', validators=[validators.Optional(), validators.NumberRange(min=0, message="Should contain zero or more seconds")])
|
||||
hours = IntegerField('Hours', validators=[validators.Optional(), validators.NumberRange(min=0, message="Should contain zero or more seconds")])
|
||||
minutes = IntegerField('Minutes', validators=[validators.Optional(), validators.NumberRange(min=0, message="Should contain zero or more seconds")])
|
||||
seconds = IntegerField('Seconds', validators=[validators.Optional(), validators.NumberRange(min=0, message="Should contain zero or more seconds")])
|
||||
# @todo add total seconds minimum validatior = minimum_seconds_recheck_time
|
||||
|
||||
def __init__(self, formdata=None, obj=None, prefix="", data=None, meta=None, **kwargs):
|
||||
super().__init__(formdata, obj, prefix, data, meta, **kwargs)
|
||||
self.require_at_least_one = kwargs.get('require_at_least_one', False)
|
||||
self.require_at_least_one_message = kwargs.get('require_at_least_one_message', REQUIRE_ATLEAST_ONE_TIME_PART_MESSAGE_DEFAULT)
|
||||
|
||||
def validate(self, **kwargs):
|
||||
"""Custom validation that can optionally require at least one time interval."""
|
||||
# Run normal field validation first
|
||||
if not super().validate(**kwargs):
|
||||
return False
|
||||
|
||||
# Apply optional "at least one" validation
|
||||
if self.require_at_least_one:
|
||||
if not validate_time_between_check_has_values(self):
|
||||
# Add error to the form's general errors (not field-specific)
|
||||
if not hasattr(self, '_formdata_errors'):
|
||||
self._formdata_errors = []
|
||||
self._formdata_errors.append(self.require_at_least_one_message)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
class EnhancedFormField(FormField):
|
||||
"""
|
||||
An enhanced FormField that supports conditional validation with top-level error messages.
|
||||
Adds a 'top_errors' property for validation errors at the FormField level.
|
||||
"""
|
||||
|
||||
def __init__(self, form_class, label=None, validators=None, separator="-",
|
||||
conditional_field=None, conditional_message=None, conditional_test_function=None, **kwargs):
|
||||
"""
|
||||
Initialize EnhancedFormField with optional conditional validation.
|
||||
|
||||
:param conditional_field: Name of the field this FormField depends on (e.g. 'time_between_check_use_default')
|
||||
:param conditional_message: Error message to show when validation fails
|
||||
:param conditional_test_function: Custom function to test if FormField has valid values.
|
||||
Should take self.form as parameter and return True if valid.
|
||||
"""
|
||||
super().__init__(form_class, label, validators, separator, **kwargs)
|
||||
self.top_errors = []
|
||||
self.conditional_field = conditional_field
|
||||
self.conditional_message = conditional_message or "At least one field must have a value when not using defaults."
|
||||
self.conditional_test_function = conditional_test_function
|
||||
|
||||
def validate(self, form, extra_validators=()):
|
||||
"""
|
||||
Custom validation that supports conditional logic and stores top-level errors.
|
||||
"""
|
||||
self.top_errors = []
|
||||
|
||||
# First run the normal FormField validation
|
||||
base_valid = super().validate(form, extra_validators)
|
||||
|
||||
# Apply conditional validation if configured
|
||||
if self.conditional_field and hasattr(form, self.conditional_field):
|
||||
conditional_field_obj = getattr(form, self.conditional_field)
|
||||
|
||||
# If the conditional field is False/unchecked, check if this FormField has any values
|
||||
if not conditional_field_obj.data:
|
||||
# Use custom test function if provided, otherwise use generic fallback
|
||||
if self.conditional_test_function:
|
||||
has_any_value = self.conditional_test_function(self.form)
|
||||
else:
|
||||
# Generic fallback - check if any field has truthy data
|
||||
has_any_value = any(field.data for field in self.form if hasattr(field, 'data') and field.data)
|
||||
|
||||
if not has_any_value:
|
||||
self.top_errors.append(self.conditional_message)
|
||||
base_valid = False
|
||||
|
||||
return base_valid
|
||||
|
||||
|
||||
class RequiredFormField(FormField):
|
||||
"""
|
||||
A FormField that passes require_at_least_one=True to TimeBetweenCheckForm.
|
||||
Use this when you want the sub-form to always require at least one value.
|
||||
"""
|
||||
|
||||
def __init__(self, form_class, label=None, validators=None, separator="-", **kwargs):
|
||||
super().__init__(form_class, label, validators, separator, **kwargs)
|
||||
|
||||
def process(self, formdata, data=unset_value, extra_filters=None):
|
||||
if extra_filters:
|
||||
raise TypeError(
|
||||
"FormField cannot take filters, as the encapsulated"
|
||||
"data is not mutable."
|
||||
)
|
||||
|
||||
if data is unset_value:
|
||||
try:
|
||||
data = self.default()
|
||||
except TypeError:
|
||||
data = self.default
|
||||
self._obj = data
|
||||
|
||||
self.object_data = data
|
||||
|
||||
prefix = self.name + self.separator
|
||||
# Pass require_at_least_one=True to the sub-form
|
||||
if isinstance(data, dict):
|
||||
self.form = self.form_class(formdata=formdata, prefix=prefix, require_at_least_one=True, **data)
|
||||
else:
|
||||
self.form = self.form_class(formdata=formdata, obj=data, prefix=prefix, require_at_least_one=True)
|
||||
|
||||
@property
|
||||
def errors(self):
|
||||
"""Include sub-form validation errors"""
|
||||
form_errors = self.form.errors
|
||||
# Add any general form errors to a special 'form' key
|
||||
if hasattr(self.form, '_formdata_errors') and self.form._formdata_errors:
|
||||
form_errors = dict(form_errors) # Make a copy
|
||||
form_errors['form'] = self.form._formdata_errors
|
||||
return form_errors
|
||||
|
||||
|
||||
# Separated by key:value
|
||||
class StringDictKeyValue(StringField):
|
||||
widget = widgets.TextArea()
|
||||
@@ -470,16 +330,11 @@ class ValidateAppRiseServers(object):
|
||||
import apprise
|
||||
from .notification.apprise_plugin.assets import apprise_asset
|
||||
from .notification.apprise_plugin.custom_handlers import apprise_http_custom_handler # noqa: F401
|
||||
from changedetectionio.jinja2_custom import render as jinja_render
|
||||
|
||||
apobj = apprise.Apprise(asset=apprise_asset)
|
||||
|
||||
for server_url in field.data:
|
||||
generic_notification_context_data = NotificationContextData()
|
||||
# Make sure something is atleast in all those regular token fields
|
||||
generic_notification_context_data.set_random_for_validation()
|
||||
|
||||
url = jinja_render(template_str=server_url.strip(), **generic_notification_context_data).strip()
|
||||
url = server_url.strip()
|
||||
if url.startswith("#"):
|
||||
continue
|
||||
|
||||
@@ -492,8 +347,10 @@ class ValidateJinja2Template(object):
|
||||
Validates that a {token} is from a valid set
|
||||
"""
|
||||
def __call__(self, form, field):
|
||||
from changedetectionio.jinja2_custom import create_jinja_env
|
||||
from changedetectionio import notification
|
||||
|
||||
from jinja2 import BaseLoader, TemplateSyntaxError, UndefinedError
|
||||
from jinja2.sandbox import ImmutableSandboxedEnvironment
|
||||
from jinja2.meta import find_undeclared_variables
|
||||
import jinja2.exceptions
|
||||
|
||||
@@ -501,13 +358,9 @@ class ValidateJinja2Template(object):
|
||||
joined_data = ' '.join(map(str, field.data)) if isinstance(field.data, list) else f"{field.data}"
|
||||
|
||||
try:
|
||||
# Use the shared helper to create a properly configured environment
|
||||
jinja2_env = create_jinja_env(loader=BaseLoader)
|
||||
|
||||
# Add notification tokens for validation
|
||||
static_token_placeholders = NotificationContextData()
|
||||
static_token_placeholders.set_random_for_validation()
|
||||
jinja2_env.globals.update(static_token_placeholders)
|
||||
jinja2_env = ImmutableSandboxedEnvironment(loader=BaseLoader)
|
||||
jinja2_env.globals.update(notification.valid_tokens)
|
||||
# Extra validation tokens provided on the form_class(... extra_tokens={}) setup
|
||||
if hasattr(field, 'extra_notification_tokens'):
|
||||
jinja2_env.globals.update(field.extra_notification_tokens)
|
||||
|
||||
@@ -519,7 +372,6 @@ class ValidateJinja2Template(object):
|
||||
except jinja2.exceptions.SecurityError as e:
|
||||
raise ValidationError(f"This is not a valid Jinja2 template: {e}") from e
|
||||
|
||||
# Check for undeclared variables
|
||||
ast = jinja2_env.parse(joined_data)
|
||||
undefined = ", ".join(find_undeclared_variables(ast))
|
||||
if undefined:
|
||||
@@ -542,23 +394,19 @@ class validateURL(object):
|
||||
|
||||
|
||||
def validate_url(test_url):
|
||||
from changedetectionio.validate_url import is_safe_valid_url
|
||||
if not is_safe_valid_url(test_url):
|
||||
# If hosts that only contain alphanumerics are allowed ("localhost" for example)
|
||||
try:
|
||||
url_validator(test_url, simple_host=allow_simplehost)
|
||||
except validators.ValidationError:
|
||||
#@todo check for xss
|
||||
message = f"'{test_url}' is not a valid URL."
|
||||
# This should be wtforms.validators.
|
||||
raise ValidationError('Watch protocol is not permitted or invalid URL format')
|
||||
|
||||
|
||||
class ValidateSinglePythonRegexString(object):
|
||||
def __init__(self, message=None):
|
||||
self.message = message
|
||||
|
||||
def __call__(self, form, field):
|
||||
try:
|
||||
re.compile(field.data)
|
||||
except re.error:
|
||||
message = field.gettext('RegEx \'%s\' is not a valid regular expression.')
|
||||
raise ValidationError(message % (field.data))
|
||||
raise ValidationError(message)
|
||||
|
||||
from .model.Watch import is_safe_url
|
||||
if not is_safe_url(test_url):
|
||||
# This should be wtforms.validators.
|
||||
raise ValidationError('Watch protocol is not permitted by SAFE_PROTOCOL_REGEX or incorrect URL format')
|
||||
|
||||
class ValidateListRegex(object):
|
||||
"""
|
||||
@@ -578,7 +426,6 @@ class ValidateListRegex(object):
|
||||
message = field.gettext('RegEx \'%s\' is not a valid regular expression.')
|
||||
raise ValidationError(message % (line))
|
||||
|
||||
|
||||
class ValidateCSSJSONXPATHInput(object):
|
||||
"""
|
||||
Filter validation
|
||||
@@ -608,12 +455,13 @@ class ValidateCSSJSONXPATHInput(object):
|
||||
raise ValidationError("XPath not permitted in this field!")
|
||||
from lxml import etree, html
|
||||
import elementpath
|
||||
from changedetectionio.html_tools import SafeXPath3Parser
|
||||
# xpath 2.0-3.1
|
||||
from elementpath.xpath3 import XPath3Parser
|
||||
tree = html.fromstring("<html></html>")
|
||||
line = line.replace('xpath:', '')
|
||||
|
||||
try:
|
||||
elementpath.select(tree, line.strip(), parser=SafeXPath3Parser)
|
||||
elementpath.select(tree, line.strip(), parser=XPath3Parser)
|
||||
except elementpath.ElementPathError as e:
|
||||
message = field.gettext('\'%s\' is not a valid XPath expression. (%s)')
|
||||
raise ValidationError(message % (line, str(e)))
|
||||
@@ -677,62 +525,25 @@ class ValidateCSSJSONXPATHInput(object):
|
||||
except:
|
||||
raise ValidationError("A system-error occurred when validating your jq expression")
|
||||
|
||||
class ValidateSimpleURL:
|
||||
"""Validate that the value can be parsed by urllib.parse.urlparse() and has a scheme/netloc."""
|
||||
def __init__(self, message=None):
|
||||
self.message = message or "Invalid URL."
|
||||
|
||||
def __call__(self, form, field):
|
||||
data = (field.data or "").strip()
|
||||
if not data:
|
||||
return # empty is OK — pair with validators.Optional()
|
||||
from urllib.parse import urlparse
|
||||
|
||||
parsed = urlparse(data)
|
||||
if not parsed.scheme or not parsed.netloc:
|
||||
raise ValidationError(self.message)
|
||||
|
||||
class ValidateStartsWithRegex(object):
|
||||
def __init__(self, regex, *, flags=0, message=None, allow_empty=True, split_lines=True):
|
||||
# compile with given flags (we’ll pass re.IGNORECASE below)
|
||||
self.pattern = re.compile(regex, flags) if isinstance(regex, str) else regex
|
||||
self.message = message
|
||||
self.allow_empty = allow_empty
|
||||
self.split_lines = split_lines
|
||||
|
||||
def __call__(self, form, field):
|
||||
data = field.data
|
||||
if not data:
|
||||
return
|
||||
|
||||
# normalize into list of lines
|
||||
if isinstance(data, str) and self.split_lines:
|
||||
lines = data.splitlines()
|
||||
elif isinstance(data, (list, tuple)):
|
||||
lines = data
|
||||
else:
|
||||
lines = [data]
|
||||
|
||||
for line in lines:
|
||||
stripped = line.strip()
|
||||
if not stripped:
|
||||
if self.allow_empty:
|
||||
continue
|
||||
raise ValidationError(self.message or _l("Empty value not allowed."))
|
||||
if not self.pattern.match(stripped):
|
||||
raise ValidationError(self.message or _l("Invalid value."))
|
||||
|
||||
class quickWatchForm(Form):
|
||||
url = fields.URLField(_l('URL'), validators=[validateURL()])
|
||||
tags = StringTagUUID(_l('Group tag'), validators=[validators.Optional()])
|
||||
watch_submit_button = SubmitField(_l('Watch'), render_kw={"class": "pure-button pure-button-primary"})
|
||||
processor = RadioField(_l('Processor'), choices=lambda: processors.available_processors(), default=processors.get_default_processor)
|
||||
edit_and_watch_submit_button = SubmitField(_l('Edit > Watch'), render_kw={"class": "pure-button pure-button-primary"})
|
||||
from . import processors
|
||||
import random
|
||||
|
||||
url = fields.URLField('URL', validators=[validateURL()])
|
||||
tags = StringTagUUID('Group tag', [validators.Optional()])
|
||||
watch_submit_button = SubmitField('Watch', render_kw={"class": "pure-button pure-button-primary"})
|
||||
processor = RadioField(u'Processor', choices=processors.available_processors(), default="text_json_diff")
|
||||
LLM_prompt = TextAreaField(u'AI Prompt', [validators.Optional()], render_kw={"placeholder": f'Example, "{random.choice(LLM_example_texts)}"'})
|
||||
LLM_send_type = RadioField(u'LLM Send', choices=LLM_send_type_choices, default="text")
|
||||
|
||||
edit_and_watch_submit_button = SubmitField('Edit > Watch', render_kw={"class": "pure-button pure-button-primary"})
|
||||
|
||||
|
||||
|
||||
# Common to a single watch and the global settings
|
||||
class commonSettingsForm(Form):
|
||||
from . import processors
|
||||
import random
|
||||
|
||||
def __init__(self, formdata=None, obj=None, prefix="", data=None, meta=None, **kwargs):
|
||||
super().__init__(formdata, obj, prefix, data, meta, **kwargs)
|
||||
@@ -740,99 +551,88 @@ class commonSettingsForm(Form):
|
||||
self.notification_title.extra_notification_tokens = kwargs.get('extra_notification_tokens', {})
|
||||
self.notification_urls.extra_notification_tokens = kwargs.get('extra_notification_tokens', {})
|
||||
|
||||
fetch_backend = RadioField(_l('Fetch Method'), choices=content_fetchers.available_fetchers(), validators=[ValidateContentFetcherIsReady()])
|
||||
notification_body = TextAreaField(_l('Notification Body'), default='{{ watch_url }} had a change.', validators=[validators.Optional(), ValidateJinja2Template()])
|
||||
notification_format = SelectField(_l('Notification format'), choices=list(valid_notification_formats.items()))
|
||||
notification_title = StringField(_l('Notification Title'), default='ChangeDetection.io Notification - {{ watch_url }}', validators=[validators.Optional(), ValidateJinja2Template()])
|
||||
notification_urls = StringListField(_l('Notification URL List'), validators=[validators.Optional(), ValidateAppRiseServers(), ValidateJinja2Template()])
|
||||
processor = RadioField( label=_l("Processor - What do you want to achieve?"), choices=lambda: processors.available_processors(), default=processors.get_default_processor)
|
||||
scheduler_timezone_default = StringField(_l("Default timezone for watch check scheduler"), render_kw={"list": "timezones"}, validators=[validateTimeZoneName()])
|
||||
webdriver_delay = IntegerField(_l('Wait seconds before extracting text'), validators=[validators.Optional(), validators.NumberRange(min=1, message=_l("Should contain one or more seconds"))])
|
||||
|
||||
# Not true anymore but keep the validate_ hook for future use, we convert color tags
|
||||
# def validate_notification_urls(self, field):
|
||||
# """Validate that HTML Color format is not used with Telegram"""
|
||||
# if self.notification_format.data == 'HTML Color' and field.data:
|
||||
# for url in field.data:
|
||||
# if url and ('tgram://' in url or 'discord://' in url or 'discord.com/api/webhooks' in url):
|
||||
# raise ValidationError('HTML Color format is not supported by Telegram and Discord. Please choose another Notification Format (Plain Text, HTML, or Markdown to HTML).')
|
||||
extract_title_as_title = BooleanField('Extract <title> from document and use as watch title', default=False)
|
||||
fetch_backend = RadioField(u'Fetch Method', choices=content_fetchers.available_fetchers(), validators=[ValidateContentFetcherIsReady()])
|
||||
notification_body = TextAreaField('Notification Body', default='{{ watch_url }} had a change.', validators=[validators.Optional(), ValidateJinja2Template()])
|
||||
notification_format = SelectField('Notification format', choices=valid_notification_formats.keys())
|
||||
notification_title = StringField('Notification Title', default='ChangeDetection.io Notification - {{ watch_url }}', validators=[validators.Optional(), ValidateJinja2Template()])
|
||||
notification_urls = StringListField('Notification URL List', validators=[validators.Optional(), ValidateAppRiseServers(), ValidateJinja2Template()])
|
||||
processor = RadioField( label=u"Processor - What do you want to achieve?", choices=processors.available_processors(), default="text_json_diff")
|
||||
timezone = StringField("Timezone for watch schedule", render_kw={"list": "timezones"}, validators=[validateTimeZoneName()])
|
||||
webdriver_delay = IntegerField('Wait seconds before extracting text', validators=[validators.Optional(), validators.NumberRange(min=1, message="Should contain one or more seconds")])
|
||||
|
||||
LLM_prompt = TextAreaField(u'AI Prompt', [validators.Optional()], render_kw={"placeholder": f'Example, "{random.choice(LLM_example_texts)}"'})
|
||||
LLM_send_type = RadioField(u'LLM Send', choices=LLM_send_type_choices, default="text")
|
||||
|
||||
class importForm(Form):
|
||||
processor = RadioField(_l('Processor'), choices=lambda: processors.available_processors(), default=processors.get_default_processor)
|
||||
urls = TextAreaField(_l('URLs'))
|
||||
xlsx_file = FileField(_l('Upload .xlsx file'), validators=[FileAllowed(['xlsx'], _l('Must be .xlsx file!'))])
|
||||
file_mapping = SelectField(_l('File mapping'), [validators.DataRequired()], choices={('wachete', 'Wachete mapping'), ('custom','Custom mapping')})
|
||||
from . import processors
|
||||
processor = RadioField(u'Processor', choices=processors.available_processors(), default="text_json_diff")
|
||||
urls = TextAreaField('URLs')
|
||||
xlsx_file = FileField('Upload .xlsx file', validators=[FileAllowed(['xlsx'], 'Must be .xlsx file!')])
|
||||
file_mapping = SelectField('File mapping', [validators.DataRequired()], choices={('wachete', 'Wachete mapping'), ('custom','Custom mapping')})
|
||||
|
||||
class SingleBrowserStep(Form):
|
||||
|
||||
operation = SelectField(_l('Operation'), [validators.Optional()], choices=browser_step_ui_config.keys())
|
||||
operation = SelectField('Operation', [validators.Optional()], choices=browser_step_ui_config.keys())
|
||||
|
||||
# maybe better to set some <script>var..
|
||||
selector = StringField(_l('Selector'), [validators.Optional()], render_kw={"placeholder": "CSS or xPath selector"})
|
||||
optional_value = StringField(_l('value'), [validators.Optional()], render_kw={"placeholder": "Value"})
|
||||
selector = StringField('Selector', [validators.Optional()], render_kw={"placeholder": "CSS or xPath selector"})
|
||||
optional_value = StringField('value', [validators.Optional()], render_kw={"placeholder": "Value"})
|
||||
# @todo move to JS? ajax fetch new field?
|
||||
# remove_button = SubmitField(_l('-'), render_kw={"type": "button", "class": "pure-button pure-button-primary", 'title': 'Remove'})
|
||||
# add_button = SubmitField(_l('+'), render_kw={"type": "button", "class": "pure-button pure-button-primary", 'title': 'Add new step after'})
|
||||
# remove_button = SubmitField('-', render_kw={"type": "button", "class": "pure-button pure-button-primary", 'title': 'Remove'})
|
||||
# add_button = SubmitField('+', render_kw={"type": "button", "class": "pure-button pure-button-primary", 'title': 'Add new step after'})
|
||||
|
||||
class processor_text_json_diff_form(commonSettingsForm):
|
||||
|
||||
url = fields.URLField('Web Page URL', validators=[validateURL()])
|
||||
tags = StringTagUUID('Group Tag', [validators.Optional()], default='')
|
||||
url = fields.URLField('URL', validators=[validateURL()])
|
||||
tags = StringTagUUID('Group tag', [validators.Optional()], default='')
|
||||
|
||||
time_between_check = EnhancedFormField(
|
||||
TimeBetweenCheckForm,
|
||||
label=_l('Time Between Check'),
|
||||
conditional_field='time_between_check_use_default',
|
||||
conditional_message=REQUIRE_ATLEAST_ONE_TIME_PART_WHEN_NOT_GLOBAL_DEFAULT,
|
||||
conditional_test_function=validate_time_between_check_has_values
|
||||
)
|
||||
time_between_check = FormField(TimeBetweenCheckForm)
|
||||
|
||||
time_schedule_limit = FormField(ScheduleLimitForm)
|
||||
|
||||
time_between_check_use_default = BooleanField(_l('Use global settings for time between check and scheduler.'), default=False)
|
||||
time_between_check_use_default = BooleanField('Use global settings for time between check', default=False)
|
||||
|
||||
include_filters = StringListField(_l('CSS/JSONPath/JQ/XPath Filters'), [ValidateCSSJSONXPATHInput()], default='')
|
||||
include_filters = StringListField('CSS/JSONPath/JQ/XPath Filters', [ValidateCSSJSONXPATHInput()], default='')
|
||||
|
||||
subtractive_selectors = StringListField(_l('Remove elements'), [ValidateCSSJSONXPATHInput(allow_json=False)])
|
||||
subtractive_selectors = StringListField('Remove elements', [ValidateCSSJSONXPATHInput(allow_json=False)])
|
||||
|
||||
extract_text = StringListField(_l('Extract text'), [ValidateListRegex()])
|
||||
extract_text = StringListField('Extract text', [ValidateListRegex()])
|
||||
|
||||
title = StringField(_l('Title'), default='')
|
||||
title = StringField('Title', default='')
|
||||
|
||||
ignore_text = StringListField(_l('Ignore lines containing'), [ValidateListRegex()])
|
||||
ignore_text = StringListField('Ignore lines containing', [ValidateListRegex()])
|
||||
headers = StringDictKeyValue('Request headers')
|
||||
body = TextAreaField(_l('Request body'), [validators.Optional()])
|
||||
method = SelectField(_l('Request method'), choices=valid_method, default=default_method)
|
||||
ignore_status_codes = BooleanField(_l('Ignore status codes (process non-2xx status codes as normal)'), default=False)
|
||||
check_unique_lines = BooleanField(_l('Only trigger when unique lines appear in all history'), default=False)
|
||||
remove_duplicate_lines = BooleanField(_l('Remove duplicate lines of text'), default=False)
|
||||
sort_text_alphabetically = BooleanField(_l('Sort text alphabetically'), default=False)
|
||||
strip_ignored_lines = TernaryNoneBooleanField(_l('Strip ignored lines'), default=None)
|
||||
trim_text_whitespace = BooleanField(_l('Trim whitespace before and after text'), default=False)
|
||||
body = TextAreaField('Request body', [validators.Optional()])
|
||||
method = SelectField('Request method', choices=valid_method, default=default_method)
|
||||
ignore_status_codes = BooleanField('Ignore status codes (process non-2xx status codes as normal)', default=False)
|
||||
check_unique_lines = BooleanField('Only trigger when unique lines appear in all history', default=False)
|
||||
remove_duplicate_lines = BooleanField('Remove duplicate lines of text', default=False)
|
||||
sort_text_alphabetically = BooleanField('Sort text alphabetically', default=False)
|
||||
trim_text_whitespace = BooleanField('Trim whitespace before and after text', default=False)
|
||||
|
||||
filter_text_added = BooleanField(_l('Added lines'), default=True)
|
||||
filter_text_replaced = BooleanField(_l('Replaced/changed lines'), default=True)
|
||||
filter_text_removed = BooleanField(_l('Removed lines'), default=True)
|
||||
filter_text_added = BooleanField('Added lines', default=True)
|
||||
filter_text_replaced = BooleanField('Replaced/changed lines', default=True)
|
||||
filter_text_removed = BooleanField('Removed lines', default=True)
|
||||
|
||||
trigger_text = StringListField(_l('Keyword triggers - Trigger/wait for text'), [validators.Optional(), ValidateListRegex()])
|
||||
browser_steps = FieldList(FormField(SingleBrowserStep), min_entries=10)
|
||||
text_should_not_be_present = StringListField(_l('Block change-detection while text matches'), [validators.Optional(), ValidateListRegex()])
|
||||
webdriver_js_execute_code = TextAreaField(_l('Execute JavaScript before change detection'), render_kw={"rows": "5"}, validators=[validators.Optional()])
|
||||
trigger_text = StringListField('Keyword triggers - Trigger/wait for text', [validators.Optional(), ValidateListRegex()])
|
||||
if os.getenv("PLAYWRIGHT_DRIVER_URL"):
|
||||
browser_steps = FieldList(FormField(SingleBrowserStep), min_entries=10)
|
||||
text_should_not_be_present = StringListField('Block change-detection while text matches', [validators.Optional(), ValidateListRegex()])
|
||||
webdriver_js_execute_code = TextAreaField('Execute JavaScript before change detection', render_kw={"rows": "5"}, validators=[validators.Optional()])
|
||||
|
||||
save_button = SubmitField(_l('Save'), render_kw={"class": "pure-button pure-button-primary"})
|
||||
save_button = SubmitField('Save', render_kw={"class": "pure-button button-small pure-button-primary"})
|
||||
|
||||
proxy = RadioField(_l('Proxy'))
|
||||
# filter_failure_notification_send @todo make ternary
|
||||
filter_failure_notification_send = BooleanField(_l('Send a notification when the filter can no longer be found on the page'), default=False)
|
||||
notification_muted = TernaryNoneBooleanField(_l('Notifications'), default=None, yes_text=_l("Muted"), no_text=_l("On"))
|
||||
notification_screenshot = BooleanField(_l('Attach screenshot to notification (where possible)'), default=False)
|
||||
proxy = RadioField('Proxy')
|
||||
filter_failure_notification_send = BooleanField(
|
||||
'Send a notification when the filter can no longer be found on the page', default=False)
|
||||
|
||||
conditions_match_logic = RadioField(_l('Match'), choices=[('ALL', _l('Match all of the following')),('ANY', _l('Match any of the following'))], default='ALL')
|
||||
notification_muted = BooleanField('Notifications Muted / Off', default=False)
|
||||
notification_screenshot = BooleanField('Attach screenshot to notification (where possible)', default=False)
|
||||
|
||||
conditions_match_logic = RadioField(u'Match', choices=[('ALL', 'Match all of the following'),('ANY', 'Match any of the following')], default='ALL')
|
||||
conditions = FieldList(FormField(ConditionFormRow), min_entries=1) # Add rule logic here
|
||||
use_page_title_in_list = TernaryNoneBooleanField(_l('Use page <title> in list'), default=None)
|
||||
|
||||
history_snapshot_max_length = IntegerField(_l('Number of history items per watch to keep'), render_kw={"style": "width: 5em;"}, validators=[validators.Optional(), validators.NumberRange(min=2)])
|
||||
|
||||
def extra_tab_content(self):
|
||||
return None
|
||||
@@ -844,12 +644,12 @@ class processor_text_json_diff_form(commonSettingsForm):
|
||||
if not super().validate():
|
||||
return False
|
||||
|
||||
from changedetectionio.jinja2_custom import render as jinja_render
|
||||
from changedetectionio.safe_jinja import render as jinja_render
|
||||
result = True
|
||||
|
||||
# Fail form validation when a body is set for a GET
|
||||
if self.method.data == 'GET' and self.body.data:
|
||||
self.body.errors.append(gettext('Body must be empty when Request Method is set to GET'))
|
||||
self.body.errors.append('Body must be empty when Request Method is set to GET')
|
||||
result = False
|
||||
|
||||
# Attempt to validate jinja2 templates in the URL
|
||||
@@ -858,11 +658,11 @@ class processor_text_json_diff_form(commonSettingsForm):
|
||||
except ModuleNotFoundError as e:
|
||||
# incase jinja2_time or others is missing
|
||||
logger.error(e)
|
||||
self.url.errors.append(gettext('Invalid template syntax configuration: %(error)s') % {'error': e})
|
||||
self.url.errors.append(f'Invalid template syntax configuration: {e}')
|
||||
result = False
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
self.url.errors.append(gettext('Invalid template syntax: %(error)s') % {'error': e})
|
||||
self.url.errors.append(f'Invalid template syntax: {e}')
|
||||
result = False
|
||||
|
||||
# Attempt to validate jinja2 templates in the body
|
||||
@@ -872,11 +672,11 @@ class processor_text_json_diff_form(commonSettingsForm):
|
||||
except ModuleNotFoundError as e:
|
||||
# incase jinja2_time or others is missing
|
||||
logger.error(e)
|
||||
self.body.errors.append(gettext('Invalid template syntax configuration: %(error)s') % {'error': e})
|
||||
self.body.errors.append(f'Invalid template syntax configuration: {e}')
|
||||
result = False
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
self.body.errors.append(gettext('Invalid template syntax: %(error)s') % {'error': e})
|
||||
self.body.errors.append(f'Invalid template syntax: {e}')
|
||||
result = False
|
||||
|
||||
# Attempt to validate jinja2 templates in the headers
|
||||
@@ -887,11 +687,11 @@ class processor_text_json_diff_form(commonSettingsForm):
|
||||
except ModuleNotFoundError as e:
|
||||
# incase jinja2_time or others is missing
|
||||
logger.error(e)
|
||||
self.headers.errors.append(gettext('Invalid template syntax configuration: %(error)s') % {'error': e})
|
||||
self.headers.errors.append(f'Invalid template syntax configuration: {e}')
|
||||
result = False
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
self.headers.errors.append(gettext('Invalid template syntax in \"%(header)s\" header: %(error)s') % {'header': header, 'error': e})
|
||||
self.headers.errors.append(f'Invalid template syntax in "{header}" header: {e}')
|
||||
result = False
|
||||
|
||||
return result
|
||||
@@ -907,134 +707,117 @@ class processor_text_json_diff_form(commonSettingsForm):
|
||||
):
|
||||
super().__init__(formdata, obj, prefix, data, meta, **kwargs)
|
||||
if kwargs and kwargs.get('default_system_settings'):
|
||||
default_tz = kwargs.get('default_system_settings').get('application', {}).get('scheduler_timezone_default')
|
||||
default_tz = kwargs.get('default_system_settings').get('application', {}).get('timezone')
|
||||
if default_tz:
|
||||
self.time_schedule_limit.form.timezone.render_kw['placeholder'] = default_tz
|
||||
|
||||
|
||||
|
||||
class SingleExtraProxy(Form):
|
||||
|
||||
# maybe better to set some <script>var..
|
||||
proxy_name = StringField(_l('Name'), [validators.Optional()], render_kw={"placeholder": "Name"})
|
||||
proxy_url = StringField(_l('Proxy URL'), [
|
||||
validators.Optional(),
|
||||
ValidateStartsWithRegex(
|
||||
regex=r'^(https?|socks5)://', # ✅ main pattern
|
||||
flags=re.IGNORECASE, # ✅ makes it case-insensitive
|
||||
message=_l('Proxy URLs must start with http://, https:// or socks5://'),
|
||||
),
|
||||
ValidateSimpleURL()
|
||||
], render_kw={"placeholder": "socks5:// or regular proxy http://user:pass@...:3128", "size":50})
|
||||
proxy_name = StringField('Name', [validators.Optional()], render_kw={"placeholder": "Name"})
|
||||
proxy_url = StringField('Proxy URL', [validators.Optional()], render_kw={"placeholder": "socks5:// or regular proxy http://user:pass@...:3128", "size":50})
|
||||
# @todo do the validation here instead
|
||||
|
||||
class SingleExtraBrowser(Form):
|
||||
browser_name = StringField(_l('Name'), [validators.Optional()], render_kw={"placeholder": "Name"})
|
||||
browser_connection_url = StringField(_l('Browser connection URL'), [
|
||||
validators.Optional(),
|
||||
ValidateStartsWithRegex(
|
||||
regex=r'^(wss?|ws)://',
|
||||
flags=re.IGNORECASE,
|
||||
message=_l('Browser URLs must start with wss:// or ws://')
|
||||
),
|
||||
ValidateSimpleURL()
|
||||
], render_kw={"placeholder": "wss://brightdata... wss://oxylabs etc", "size":50})
|
||||
browser_name = StringField('Name', [validators.Optional()], render_kw={"placeholder": "Name"})
|
||||
browser_connection_url = StringField('Browser connection URL', [validators.Optional()], render_kw={"placeholder": "wss://brightdata... wss://oxylabs etc", "size":50})
|
||||
# @todo do the validation here instead
|
||||
|
||||
class DefaultUAInputForm(Form):
|
||||
html_requests = StringField(_l('Plaintext requests'), validators=[validators.Optional()], render_kw={"placeholder": "<default>"})
|
||||
html_requests = StringField('Plaintext requests', validators=[validators.Optional()], render_kw={"placeholder": "<default>"})
|
||||
if os.getenv("PLAYWRIGHT_DRIVER_URL") or os.getenv("WEBDRIVER_URL"):
|
||||
html_webdriver = StringField(_l('Chrome requests'), validators=[validators.Optional()], render_kw={"placeholder": "<default>"})
|
||||
html_webdriver = StringField('Chrome requests', validators=[validators.Optional()], render_kw={"placeholder": "<default>"})
|
||||
|
||||
# datastore.data['settings']['requests']..
|
||||
class globalSettingsRequestForm(Form):
|
||||
time_between_check = RequiredFormField(TimeBetweenCheckForm, label=_l('Time Between Check'))
|
||||
time_between_check = FormField(TimeBetweenCheckForm)
|
||||
time_schedule_limit = FormField(ScheduleLimitForm)
|
||||
proxy = RadioField(_l('Default proxy'))
|
||||
jitter_seconds = IntegerField(_l('Random jitter seconds ± check'),
|
||||
proxy = RadioField('Proxy')
|
||||
jitter_seconds = IntegerField('Random jitter seconds ± check',
|
||||
render_kw={"style": "width: 5em;"},
|
||||
validators=[validators.NumberRange(min=0, message=_l("Should contain zero or more seconds"))])
|
||||
validators=[validators.NumberRange(min=0, message="Should contain zero or more seconds")])
|
||||
|
||||
workers = IntegerField(_l('Number of fetch workers'),
|
||||
workers = IntegerField('Number of fetch workers',
|
||||
render_kw={"style": "width: 5em;"},
|
||||
validators=[validators.NumberRange(min=1, max=50,
|
||||
message=_l("Should be between 1 and 50"))])
|
||||
|
||||
timeout = IntegerField(_l('Requests timeout in seconds'),
|
||||
render_kw={"style": "width: 5em;"},
|
||||
validators=[validators.NumberRange(min=1, max=999,
|
||||
message=_l("Should be between 1 and 999"))])
|
||||
|
||||
message="Should be between 1 and 50")])
|
||||
|
||||
extra_proxies = FieldList(FormField(SingleExtraProxy), min_entries=5)
|
||||
extra_browsers = FieldList(FormField(SingleExtraBrowser), min_entries=5)
|
||||
|
||||
default_ua = FormField(DefaultUAInputForm, label=_l("Default User-Agent overrides"))
|
||||
default_ua = FormField(DefaultUAInputForm, label="Default User-Agent overrides")
|
||||
|
||||
def validate_extra_proxies(self, extra_validators=None):
|
||||
for e in self.data['extra_proxies']:
|
||||
if e.get('proxy_name') or e.get('proxy_url'):
|
||||
if not e.get('proxy_name','').strip() or not e.get('proxy_url','').strip():
|
||||
self.extra_proxies.errors.append(gettext('Both a name, and a Proxy URL is required.'))
|
||||
self.extra_proxies.errors.append('Both a name, and a Proxy URL is required.')
|
||||
return False
|
||||
|
||||
class globalSettingsApplicationUIForm(Form):
|
||||
open_diff_in_new_tab = BooleanField(_l("Open 'History' page in a new tab"), default=True, validators=[validators.Optional()])
|
||||
socket_io_enabled = BooleanField(_l('Realtime UI Updates Enabled'), default=True, validators=[validators.Optional()])
|
||||
favicons_enabled = BooleanField(_l('Favicons Enabled'), default=True, validators=[validators.Optional()])
|
||||
use_page_title_in_list = BooleanField(_l('Use page <title> in watch overview list')) #BooleanField=True
|
||||
open_diff_in_new_tab = BooleanField("Open 'History' page in a new tab", default=True, validators=[validators.Optional()])
|
||||
socket_io_enabled = BooleanField('Realtime UI Updates Enabled', default=True, validators=[validators.Optional()])
|
||||
favicons_enabled = BooleanField('Favicons Enabled', default=True, validators=[validators.Optional()])
|
||||
|
||||
class globalSettingsApplicationAIKeysForm(Form):
|
||||
|
||||
openai = StringField('OpenAI Key',
|
||||
validators=[validators.Optional()],
|
||||
render_kw={"placeholder": 'xxxxxxxxx'}
|
||||
)
|
||||
gemini = StringField('Google Gemini Key',
|
||||
validators=[validators.Optional()],
|
||||
render_kw={"placeholder": 'ooooooooo'}
|
||||
)
|
||||
|
||||
class globalSettingsApplicationAIForm(Form):
|
||||
|
||||
#@todo use only configured types?
|
||||
LLM_backend = RadioField(u'LLM Backend',
|
||||
choices=[('openai', 'Open AI'), ('gemini', 'Gemini')],
|
||||
default="text")
|
||||
|
||||
# So that we can pass this to our LLM/__init__.py as a keys dict
|
||||
API_keys = FormField(globalSettingsApplicationAIKeysForm)
|
||||
|
||||
|
||||
|
||||
# datastore.data['settings']['application']..
|
||||
class globalSettingsApplicationForm(commonSettingsForm):
|
||||
|
||||
api_access_token_enabled = BooleanField(_l('API access token security check enabled'), default=True, validators=[validators.Optional()])
|
||||
base_url = StringField(_l('Notification base URL override'),
|
||||
api_access_token_enabled = BooleanField('API access token security check enabled', default=True, validators=[validators.Optional()])
|
||||
base_url = StringField('Notification base URL override',
|
||||
validators=[validators.Optional()],
|
||||
render_kw={"placeholder": os.getenv('BASE_URL', 'Not set')}
|
||||
)
|
||||
empty_pages_are_a_change = BooleanField(_l('Treat empty pages as a change?'), default=False)
|
||||
fetch_backend = RadioField(_l('Fetch Method'), default="html_requests", choices=content_fetchers.available_fetchers(), validators=[ValidateContentFetcherIsReady()])
|
||||
global_ignore_text = StringListField(_l('Ignore Text'), [ValidateListRegex()])
|
||||
global_subtractive_selectors = StringListField(_l('Remove elements'), [ValidateCSSJSONXPATHInput(allow_json=False)])
|
||||
ignore_whitespace = BooleanField(_l('Ignore whitespace'))
|
||||
|
||||
# Screenshot comparison settings
|
||||
min_change_percentage = FloatField(
|
||||
'Screenshot: Minimum Change Percentage',
|
||||
validators=[
|
||||
validators.Optional(),
|
||||
validators.NumberRange(min=0.0, max=100.0, message=_l('Must be between 0 and 100'))
|
||||
],
|
||||
default=0.1,
|
||||
render_kw={"placeholder": "0.1", "style": "width: 8em;"}
|
||||
)
|
||||
|
||||
password = SaltyPasswordField(_l('Password'))
|
||||
pager_size = IntegerField(_l('Pager size'),
|
||||
empty_pages_are_a_change = BooleanField('Treat empty pages as a change?', default=False)
|
||||
fetch_backend = RadioField('Fetch Method', default="html_requests", choices=content_fetchers.available_fetchers(), validators=[ValidateContentFetcherIsReady()])
|
||||
global_ignore_text = StringListField('Ignore Text', [ValidateListRegex()])
|
||||
global_subtractive_selectors = StringListField('Remove elements', [ValidateCSSJSONXPATHInput(allow_json=False)])
|
||||
ignore_whitespace = BooleanField('Ignore whitespace')
|
||||
password = SaltyPasswordField()
|
||||
pager_size = IntegerField('Pager size',
|
||||
render_kw={"style": "width: 5em;"},
|
||||
validators=[validators.NumberRange(min=0,
|
||||
message=_l("Should be atleast zero (disabled)"))])
|
||||
message="Should be atleast zero (disabled)")])
|
||||
|
||||
rss_content_format = SelectField(_l('RSS Content format'), choices=list(RSS_FORMAT_TYPES.items()))
|
||||
rss_template_type = SelectField(_l('RSS <description> body built from'), choices=list(RSS_TEMPLATE_TYPE_OPTIONS.items()))
|
||||
rss_template_override = TextAreaField(_l('RSS "System default" template override'), render_kw={"rows": "5", "placeholder": RSS_TEMPLATE_HTML_DEFAULT}, validators=[validators.Optional(), ValidateJinja2Template()])
|
||||
rss_content_format = SelectField('RSS Content format', choices=RSS_FORMAT_TYPES)
|
||||
|
||||
removepassword_button = SubmitField(_l('Remove password'), render_kw={"class": "pure-button pure-button-primary"})
|
||||
render_anchor_tag_content = BooleanField(_l('Render anchor tag content'), default=False)
|
||||
shared_diff_access = BooleanField(_l('Allow anonymous access to watch history page when password is enabled'), default=False, validators=[validators.Optional()])
|
||||
strip_ignored_lines = BooleanField(_l('Strip ignored lines'))
|
||||
rss_hide_muted_watches = BooleanField(_l('Hide muted watches from RSS feed'), default=True,
|
||||
removepassword_button = SubmitField('Remove password', render_kw={"class": "pure-button pure-button-primary"})
|
||||
render_anchor_tag_content = BooleanField('Render anchor tag content', default=False)
|
||||
shared_diff_access = BooleanField('Allow access to view diff page when password is enabled', default=False, validators=[validators.Optional()])
|
||||
rss_hide_muted_watches = BooleanField('Hide muted watches from RSS feed', default=True,
|
||||
validators=[validators.Optional()])
|
||||
|
||||
rss_reader_mode = BooleanField(_l('Enable RSS reader mode '), default=False, validators=[validators.Optional()])
|
||||
rss_diff_length = IntegerField(label=_l('Number of changes to show in watch RSS feed'),
|
||||
render_kw={"style": "width: 5em;"},
|
||||
validators=[validators.NumberRange(min=0, message=_l("Should contain zero or more attempts"))])
|
||||
|
||||
filter_failure_notification_threshold_attempts = IntegerField(_l('Number of times the filter can be missing before sending a notification'),
|
||||
filter_failure_notification_threshold_attempts = IntegerField('Number of times the filter can be missing before sending a notification',
|
||||
render_kw={"style": "width: 5em;"},
|
||||
validators=[validators.NumberRange(min=0,
|
||||
message=_l("Should contain zero or more attempts"))])
|
||||
|
||||
history_snapshot_max_length = IntegerField(_l('Number of history items per watch to keep'), render_kw={"style": "width: 5em;"}, validators=[validators.Optional(), validators.NumberRange(min=2)])
|
||||
message="Should contain zero or more attempts")])
|
||||
ui = FormField(globalSettingsApplicationUIForm)
|
||||
|
||||
ai = FormField(globalSettingsApplicationAIForm)
|
||||
|
||||
|
||||
class globalSettingsForm(Form):
|
||||
# Define these as FormFields/"sub forms", this way it matches the JSON storage
|
||||
@@ -1048,9 +831,9 @@ class globalSettingsForm(Form):
|
||||
|
||||
requests = FormField(globalSettingsRequestForm)
|
||||
application = FormField(globalSettingsApplicationForm)
|
||||
save_button = SubmitField(_l('Save'), render_kw={"class": "pure-button pure-button-primary"})
|
||||
save_button = SubmitField('Save', render_kw={"class": "pure-button button-small pure-button-primary"})
|
||||
|
||||
|
||||
class extractDataForm(Form):
|
||||
extract_regex = StringField(_l('RegEx to extract'), validators=[validators.DataRequired(), ValidateSinglePythonRegexString()])
|
||||
extract_submit_button = SubmitField(_l('Extract as CSV'), render_kw={"class": "pure-button pure-button-primary"})
|
||||
extract_regex = StringField('RegEx to extract', validators=[validators.Length(min=1, message="Needs a RegEx")])
|
||||
extract_submit_button = SubmitField('Extract as CSV', render_kw={"class": "pure-button pure-button-primary"})
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
from functools import lru_cache
|
||||
|
||||
from loguru import logger
|
||||
from lxml import etree
|
||||
from typing import List
|
||||
import html
|
||||
import json
|
||||
import re
|
||||
|
||||
@@ -11,10 +9,6 @@ TEXT_FILTER_LIST_LINE_SUFFIX = "<br>"
|
||||
TRANSLATE_WHITESPACE_TABLE = str.maketrans('', '', '\r\n\t ')
|
||||
PERL_STYLE_REGEX = r'^/(.*?)/([a-z]*)?$'
|
||||
|
||||
TITLE_RE = re.compile(r"<title[^>]*>(.*?)</title>", re.I | re.S)
|
||||
META_CS = re.compile(r'<meta[^>]+charset=["\']?\s*([a-z0-9_\-:+.]+)', re.I)
|
||||
META_CT = re.compile(r'<meta[^>]+http-equiv=["\']?content-type["\']?[^>]*content=["\'][^>]*charset=([a-z0-9_\-:+.]+)', re.I)
|
||||
|
||||
# 'price' , 'lowPrice', 'highPrice' are usually under here
|
||||
# All of those may or may not appear on different websites - I didnt find a way todo case-insensitive searching here
|
||||
LD_JSON_PRODUCT_OFFER_SELECTORS = ["json:$..offers", "json:$..Offers"]
|
||||
@@ -24,55 +18,8 @@ class JSONNotFound(ValueError):
|
||||
ValueError.__init__(self, msg)
|
||||
|
||||
|
||||
_DEFAULT_UNSAFE_XPATH3_FUNCTIONS = [
|
||||
'unparsed-text',
|
||||
'unparsed-text-lines',
|
||||
'unparsed-text-available',
|
||||
'doc',
|
||||
'doc-available',
|
||||
'environment-variable',
|
||||
'available-environment-variables',
|
||||
]
|
||||
|
||||
|
||||
def _build_safe_xpath3_parser():
|
||||
"""Return an XPath3Parser subclass with filesystem/environment access functions removed.
|
||||
|
||||
XPath 3.0 includes functions that can read arbitrary files or environment variables:
|
||||
- unparsed-text / unparsed-text-lines / unparsed-text-available (file read)
|
||||
- doc / doc-available (XML fetch from URI)
|
||||
- environment-variable / available-environment-variables (env var leakage)
|
||||
|
||||
Subclassing gives us an independent symbol_table copy (not shared with the parent class),
|
||||
so removing entries here does not affect XPath3Parser itself.
|
||||
|
||||
Override the blocked list via the XPATH_BLOCKED_FUNCTIONS environment variable
|
||||
(comma-separated, e.g. "unparsed-text,doc,environment-variable").
|
||||
"""
|
||||
import os
|
||||
from elementpath.xpath3 import XPath3Parser
|
||||
|
||||
class SafeXPath3Parser(XPath3Parser):
|
||||
pass
|
||||
|
||||
env_override = os.getenv('XPATH_BLOCKED_FUNCTIONS')
|
||||
if env_override is not None:
|
||||
blocked = [f.strip() for f in env_override.split(',') if f.strip()]
|
||||
else:
|
||||
blocked = _DEFAULT_UNSAFE_XPATH3_FUNCTIONS
|
||||
|
||||
for _fn in blocked:
|
||||
SafeXPath3Parser.symbol_table.pop(_fn, None)
|
||||
|
||||
return SafeXPath3Parser
|
||||
|
||||
|
||||
# Module-level singleton — built once, reused everywhere.
|
||||
SafeXPath3Parser = _build_safe_xpath3_parser()
|
||||
|
||||
# Doesn't look like python supports forward slash auto enclosure in re.findall
|
||||
# So convert it to inline flag "(?i)foobar" type configuration
|
||||
@lru_cache(maxsize=100)
|
||||
def perl_style_slash_enclosed_regex_to_options(regex):
|
||||
|
||||
res = re.search(PERL_STYLE_REGEX, regex, re.IGNORECASE)
|
||||
@@ -105,17 +52,13 @@ def include_filters(include_filters, html_content, append_pretty_line_formatting
|
||||
|
||||
return html_block
|
||||
|
||||
def subtractive_css_selector(css_selector, content):
|
||||
def subtractive_css_selector(css_selector, html_content):
|
||||
from bs4 import BeautifulSoup
|
||||
soup = BeautifulSoup(content, "html.parser")
|
||||
soup = BeautifulSoup(html_content, "html.parser")
|
||||
|
||||
# So that the elements dont shift their index, build a list of elements here which will be pointers to their place in the DOM
|
||||
elements_to_remove = soup.select(css_selector)
|
||||
|
||||
if not elements_to_remove:
|
||||
# Better to return the original that rebuild with BeautifulSoup
|
||||
return content
|
||||
|
||||
# Then, remove them in a separate loop
|
||||
for item in elements_to_remove:
|
||||
item.decompose()
|
||||
@@ -123,7 +66,6 @@ def subtractive_css_selector(css_selector, content):
|
||||
return str(soup)
|
||||
|
||||
def subtractive_xpath_selector(selectors: List[str], html_content: str) -> str:
|
||||
from lxml import etree
|
||||
# Parse the HTML content using lxml
|
||||
html_tree = etree.HTML(html_content)
|
||||
|
||||
@@ -135,10 +77,6 @@ def subtractive_xpath_selector(selectors: List[str], html_content: str) -> str:
|
||||
# Collect elements for each selector
|
||||
elements_to_remove.extend(html_tree.xpath(selector))
|
||||
|
||||
# If no elements were found, return the original HTML content
|
||||
if not elements_to_remove:
|
||||
return html_content
|
||||
|
||||
# Then, remove them in a separate loop
|
||||
for element in elements_to_remove:
|
||||
if element.getparent() is not None: # Ensure the element has a parent before removing
|
||||
@@ -156,7 +94,7 @@ def element_removal(selectors: List[str], html_content):
|
||||
xpath_selectors = []
|
||||
|
||||
for selector in selectors:
|
||||
if selector.strip().startswith(('xpath:', 'xpath1:', '//')):
|
||||
if selector.startswith(('xpath:', 'xpath1:', '//')):
|
||||
# Handle XPath selectors separately
|
||||
xpath_selector = selector.removeprefix('xpath:').removeprefix('xpath1:')
|
||||
xpath_selectors.append(xpath_selector)
|
||||
@@ -219,132 +157,75 @@ def elementpath_tostring(obj):
|
||||
return str(obj)
|
||||
|
||||
# Return str Utf-8 of matched rules
|
||||
def xpath_filter(xpath_filter, html_content, append_pretty_line_formatting=False, is_xml=False):
|
||||
"""
|
||||
|
||||
:param xpath_filter:
|
||||
:param html_content:
|
||||
:param append_pretty_line_formatting:
|
||||
:param is_xml: set to true if is XML or is RSS (RSS is XML)
|
||||
:return:
|
||||
"""
|
||||
def xpath_filter(xpath_filter, html_content, append_pretty_line_formatting=False, is_rss=False):
|
||||
from lxml import etree, html
|
||||
import elementpath
|
||||
# xpath 2.0-3.1
|
||||
from elementpath.xpath3 import XPath3Parser
|
||||
|
||||
parser = etree.HTMLParser()
|
||||
tree = None
|
||||
try:
|
||||
if is_xml:
|
||||
# So that we can keep CDATA for cdata_in_document_to_text() to process
|
||||
parser = etree.XMLParser(strip_cdata=False)
|
||||
# For XML/RSS content, use etree.fromstring to properly handle XML declarations
|
||||
tree = etree.fromstring(html_content.encode('utf-8') if isinstance(html_content, str) else html_content, parser=parser)
|
||||
if is_rss:
|
||||
# So that we can keep CDATA for cdata_in_document_to_text() to process
|
||||
parser = etree.XMLParser(strip_cdata=False)
|
||||
|
||||
tree = html.fromstring(bytes(html_content, encoding='utf-8'), parser=parser)
|
||||
html_block = ""
|
||||
|
||||
r = elementpath.select(tree, xpath_filter.strip(), namespaces={'re': 'http://exslt.org/regular-expressions'}, parser=XPath3Parser)
|
||||
#@note: //title/text() wont work where <title>CDATA..
|
||||
|
||||
if type(r) != list:
|
||||
r = [r]
|
||||
|
||||
for element in r:
|
||||
# When there's more than 1 match, then add the suffix to separate each line
|
||||
# And where the matched result doesn't include something that will cause Inscriptis to add a newline
|
||||
# (This way each 'match' reliably has a new-line in the diff)
|
||||
# Divs are converted to 4 whitespaces by inscriptis
|
||||
if append_pretty_line_formatting and len(html_block) and (not hasattr( element, 'tag' ) or not element.tag in (['br', 'hr', 'div', 'p'])):
|
||||
html_block += TEXT_FILTER_LIST_LINE_SUFFIX
|
||||
|
||||
if type(element) == str:
|
||||
html_block += element
|
||||
elif issubclass(type(element), etree._Element) or issubclass(type(element), etree._ElementTree):
|
||||
html_block += etree.tostring(element, pretty_print=True).decode('utf-8')
|
||||
else:
|
||||
tree = html.fromstring(html_content, parser=parser)
|
||||
html_block = ""
|
||||
html_block += elementpath_tostring(element)
|
||||
|
||||
# Build namespace map for XPath queries
|
||||
namespaces = {'re': 'http://exslt.org/regular-expressions'}
|
||||
|
||||
# Handle default namespace in documents (common in RSS/Atom feeds, but can occur in any XML)
|
||||
# XPath spec: unprefixed element names have no namespace, not the default namespace
|
||||
# Solution: Register the default namespace with empty string prefix in elementpath
|
||||
# This is primarily for RSS/Atom feeds but works for any XML with default namespace
|
||||
if hasattr(tree, 'nsmap') and tree.nsmap and None in tree.nsmap:
|
||||
# Register the default namespace with empty string prefix for elementpath
|
||||
# This allows //title to match elements in the default namespace
|
||||
namespaces[''] = tree.nsmap[None]
|
||||
|
||||
r = elementpath.select(tree, xpath_filter.strip(), namespaces=namespaces, parser=SafeXPath3Parser)
|
||||
#@note: //title/text() now works with default namespaces (fixed by registering '' prefix)
|
||||
#@note: //title/text() wont work where <title>CDATA.. (use cdata_in_document_to_text first)
|
||||
|
||||
if type(r) != list:
|
||||
r = [r]
|
||||
|
||||
for element in r:
|
||||
# When there's more than 1 match, then add the suffix to separate each line
|
||||
# And where the matched result doesn't include something that will cause Inscriptis to add a newline
|
||||
# (This way each 'match' reliably has a new-line in the diff)
|
||||
# Divs are converted to 4 whitespaces by inscriptis
|
||||
if append_pretty_line_formatting and len(html_block) and (not hasattr( element, 'tag' ) or not element.tag in (['br', 'hr', 'div', 'p'])):
|
||||
html_block += TEXT_FILTER_LIST_LINE_SUFFIX
|
||||
|
||||
if type(element) == str:
|
||||
html_block += element
|
||||
elif issubclass(type(element), etree._Element) or issubclass(type(element), etree._ElementTree):
|
||||
# Use 'xml' method for RSS/XML content, 'html' for HTML content
|
||||
# parser will be XMLParser if we detected XML content
|
||||
method = 'xml' if (is_xml or isinstance(parser, etree.XMLParser)) else 'html'
|
||||
html_block += etree.tostring(element, pretty_print=True, method=method, encoding='unicode')
|
||||
else:
|
||||
html_block += elementpath_tostring(element)
|
||||
|
||||
# Drop element references before the finally block so tree.clear() can release
|
||||
# the libxml2 document immediately (elements pin the C-level doc via refcount).
|
||||
del r
|
||||
return html_block
|
||||
finally:
|
||||
# Explicitly clear the tree to free memory
|
||||
# lxml trees can hold significant memory, especially with large documents
|
||||
if tree is not None:
|
||||
tree.clear()
|
||||
return html_block
|
||||
|
||||
# Return str Utf-8 of matched rules
|
||||
# 'xpath1:'
|
||||
def xpath1_filter(xpath_filter, html_content, append_pretty_line_formatting=False, is_xml=False):
|
||||
def xpath1_filter(xpath_filter, html_content, append_pretty_line_formatting=False, is_rss=False):
|
||||
from lxml import etree, html
|
||||
|
||||
parser = None
|
||||
tree = None
|
||||
try:
|
||||
if is_xml:
|
||||
# So that we can keep CDATA for cdata_in_document_to_text() to process
|
||||
parser = etree.XMLParser(strip_cdata=False)
|
||||
# For XML/RSS content, use etree.fromstring to properly handle XML declarations
|
||||
tree = etree.fromstring(html_content.encode('utf-8') if isinstance(html_content, str) else html_content, parser=parser)
|
||||
if is_rss:
|
||||
# So that we can keep CDATA for cdata_in_document_to_text() to process
|
||||
parser = etree.XMLParser(strip_cdata=False)
|
||||
|
||||
tree = html.fromstring(bytes(html_content, encoding='utf-8'), parser=parser)
|
||||
html_block = ""
|
||||
|
||||
r = tree.xpath(xpath_filter.strip(), namespaces={'re': 'http://exslt.org/regular-expressions'})
|
||||
#@note: //title/text() wont work where <title>CDATA..
|
||||
|
||||
for element in r:
|
||||
# When there's more than 1 match, then add the suffix to separate each line
|
||||
# And where the matched result doesn't include something that will cause Inscriptis to add a newline
|
||||
# (This way each 'match' reliably has a new-line in the diff)
|
||||
# Divs are converted to 4 whitespaces by inscriptis
|
||||
if append_pretty_line_formatting and len(html_block) and (not hasattr(element, 'tag') or not element.tag in (['br', 'hr', 'div', 'p'])):
|
||||
html_block += TEXT_FILTER_LIST_LINE_SUFFIX
|
||||
|
||||
# Some kind of text, UTF-8 or other
|
||||
if isinstance(element, (str, bytes)):
|
||||
html_block += element
|
||||
else:
|
||||
tree = html.fromstring(html_content, parser=parser)
|
||||
html_block = ""
|
||||
# Return the HTML which will get parsed as text
|
||||
html_block += etree.tostring(element, pretty_print=True).decode('utf-8')
|
||||
|
||||
# Build namespace map for XPath queries
|
||||
namespaces = {'re': 'http://exslt.org/regular-expressions'}
|
||||
|
||||
# NOTE: lxml's native xpath() does NOT support empty string prefix for default namespace
|
||||
# For documents with default namespace (RSS/Atom feeds), users must use:
|
||||
# - local-name(): //*[local-name()='title']/text()
|
||||
# - Or use xpath_filter (not xpath1_filter) which supports default namespaces
|
||||
# XPath spec: unprefixed element names have no namespace, not the default namespace
|
||||
|
||||
r = tree.xpath(xpath_filter.strip(), namespaces=namespaces)
|
||||
#@note: xpath1 (lxml) does NOT automatically handle default namespaces
|
||||
#@note: Use //*[local-name()='element'] or switch to xpath_filter for default namespace support
|
||||
#@note: //title/text() wont work where <title>CDATA.. (use cdata_in_document_to_text first)
|
||||
|
||||
for element in r:
|
||||
# When there's more than 1 match, then add the suffix to separate each line
|
||||
# And where the matched result doesn't include something that will cause Inscriptis to add a newline
|
||||
# (This way each 'match' reliably has a new-line in the diff)
|
||||
# Divs are converted to 4 whitespaces by inscriptis
|
||||
if append_pretty_line_formatting and len(html_block) and (not hasattr(element, 'tag') or not element.tag in (['br', 'hr', 'div', 'p'])):
|
||||
html_block += TEXT_FILTER_LIST_LINE_SUFFIX
|
||||
|
||||
# Some kind of text, UTF-8 or other
|
||||
if isinstance(element, (str, bytes)):
|
||||
html_block += element
|
||||
else:
|
||||
# Return the HTML/XML which will get parsed as text
|
||||
# Use 'xml' method for RSS/XML content, 'html' for HTML content
|
||||
# parser will be XMLParser if we detected XML content
|
||||
method = 'xml' if (is_xml or isinstance(parser, etree.XMLParser)) else 'html'
|
||||
html_block += etree.tostring(element, pretty_print=True, method=method, encoding='unicode')
|
||||
|
||||
return html_block
|
||||
finally:
|
||||
# Explicitly clear the tree to free memory
|
||||
# lxml trees can hold significant memory, especially with large documents
|
||||
if tree is not None:
|
||||
tree.clear()
|
||||
return html_block
|
||||
|
||||
# Extract/find element
|
||||
def extract_element(find='title', html_content=''):
|
||||
@@ -408,104 +289,70 @@ def _get_stripped_text_from_json_match(match):
|
||||
|
||||
return stripped_text_from_html
|
||||
|
||||
def extract_json_blob_from_html(content, ensure_is_ldjson_info_type, json_filter):
|
||||
from bs4 import BeautifulSoup
|
||||
stripped_text_from_html = ''
|
||||
|
||||
# Foreach <script json></script> blob.. just return the first that matches json_filter
|
||||
# As a last resort, try to parse the whole <body>
|
||||
soup = BeautifulSoup(content, 'html.parser')
|
||||
|
||||
if ensure_is_ldjson_info_type:
|
||||
bs_result = soup.find_all('script', {"type": "application/ld+json"})
|
||||
else:
|
||||
bs_result = soup.find_all('script')
|
||||
bs_result += soup.find_all('body')
|
||||
|
||||
bs_jsons = []
|
||||
|
||||
for result in bs_result:
|
||||
# result.text is how bs4 magically strips JSON from the body
|
||||
content_start = result.text.lstrip("\ufeff").strip()[:100] if result.text else ''
|
||||
# Skip empty tags, and things that dont even look like JSON
|
||||
if not result.text or not (content_start[0] == '{' or content_start[0] == '['):
|
||||
continue
|
||||
try:
|
||||
json_data = json.loads(result.text)
|
||||
bs_jsons.append(json_data)
|
||||
except json.JSONDecodeError:
|
||||
# Skip objects which cannot be parsed
|
||||
continue
|
||||
|
||||
if not bs_jsons:
|
||||
raise JSONNotFound("No parsable JSON found in this document")
|
||||
|
||||
for json_data in bs_jsons:
|
||||
stripped_text_from_html = _parse_json(json_data, json_filter)
|
||||
|
||||
if ensure_is_ldjson_info_type:
|
||||
# Could sometimes be list, string or something else random
|
||||
if isinstance(json_data, dict):
|
||||
# If it has LD JSON 'key' @type, and @type is 'product', and something was found for the search
|
||||
# (Some sites have multiple of the same ld+json @type='product', but some have the review part, some have the 'price' part)
|
||||
# @type could also be a list although non-standard ("@type": ["Product", "SubType"],)
|
||||
# LD_JSON auto-extract also requires some content PLUS the ldjson to be present
|
||||
# 1833 - could be either str or dict, should not be anything else
|
||||
|
||||
t = json_data.get('@type')
|
||||
if t and stripped_text_from_html:
|
||||
|
||||
if isinstance(t, str) and t.lower() == ensure_is_ldjson_info_type.lower():
|
||||
break
|
||||
# The non-standard part, some have a list
|
||||
elif isinstance(t, list):
|
||||
if ensure_is_ldjson_info_type.lower() in [x.lower().strip() for x in t]:
|
||||
break
|
||||
|
||||
elif stripped_text_from_html:
|
||||
break
|
||||
|
||||
return stripped_text_from_html
|
||||
|
||||
# content - json
|
||||
# json_filter - ie json:$..price
|
||||
# ensure_is_ldjson_info_type - str "product", optional, "@type == product" (I dont know how to do that as a json selector)
|
||||
def extract_json_as_string(content, json_filter, ensure_is_ldjson_info_type=None):
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
stripped_text_from_html = False
|
||||
# https://github.com/dgtlmoon/changedetection.io/pull/2041#issuecomment-1848397161w
|
||||
# Try to parse/filter out the JSON, if we get some parser error, then maybe it's embedded within HTML tags
|
||||
try:
|
||||
# .lstrip("\ufeff") strings ByteOrderMark from UTF8 and still lets the UTF work
|
||||
stripped_text_from_html = _parse_json(json.loads(content.lstrip("\ufeff") ), json_filter)
|
||||
except json.JSONDecodeError as e:
|
||||
logger.warning(str(e))
|
||||
|
||||
# Looks like clean JSON, dont bother extracting from HTML
|
||||
# Foreach <script json></script> blob.. just return the first that matches json_filter
|
||||
# As a last resort, try to parse the whole <body>
|
||||
soup = BeautifulSoup(content, 'html.parser')
|
||||
|
||||
content_start = content.lstrip("\ufeff").strip()[:100]
|
||||
if ensure_is_ldjson_info_type:
|
||||
bs_result = soup.find_all('script', {"type": "application/ld+json"})
|
||||
else:
|
||||
bs_result = soup.find_all('script')
|
||||
bs_result += soup.find_all('body')
|
||||
|
||||
if content_start[0] == '{' or content_start[0] == '[':
|
||||
try:
|
||||
# .lstrip("\ufeff") strings ByteOrderMark from UTF8 and still lets the UTF work
|
||||
stripped_text_from_html = _parse_json(json.loads(content.lstrip("\ufeff")), json_filter)
|
||||
except json.JSONDecodeError as e:
|
||||
logger.warning(f"Error processing JSON {content[:20]}...{str(e)})")
|
||||
else:
|
||||
# Check for JSONP wrapper: someCallback({...}) or some.namespace({...})
|
||||
# Server may claim application/json but actually return JSONP
|
||||
jsonp_match = re.match(r'^\w[\w.]*\s*\((.+)\)\s*;?\s*$', content.lstrip("\ufeff").strip(), re.DOTALL)
|
||||
if jsonp_match:
|
||||
bs_jsons = []
|
||||
for result in bs_result:
|
||||
# Skip empty tags, and things that dont even look like JSON
|
||||
if not result.text or '{' not in result.text:
|
||||
continue
|
||||
try:
|
||||
inner = jsonp_match.group(1).strip()
|
||||
logger.warning(f"Content looks like JSONP, attempting to extract inner JSON for filter '{json_filter}'")
|
||||
stripped_text_from_html = _parse_json(json.loads(inner), json_filter)
|
||||
except json.JSONDecodeError as e:
|
||||
logger.warning(f"Error processing JSONP inner content {content[:20]}...{str(e)})")
|
||||
json_data = json.loads(result.text)
|
||||
bs_jsons.append(json_data)
|
||||
except json.JSONDecodeError:
|
||||
# Skip objects which cannot be parsed
|
||||
continue
|
||||
|
||||
if not stripped_text_from_html:
|
||||
# Probably something else, go fish inside for it
|
||||
try:
|
||||
stripped_text_from_html = extract_json_blob_from_html(content=content,
|
||||
ensure_is_ldjson_info_type=ensure_is_ldjson_info_type,
|
||||
json_filter=json_filter)
|
||||
except json.JSONDecodeError as e:
|
||||
logger.warning(f"Error processing JSON while extracting JSON from HTML blob {content[:20]}...{str(e)})")
|
||||
if not bs_jsons:
|
||||
raise JSONNotFound("No parsable JSON found in this document")
|
||||
|
||||
for json_data in bs_jsons:
|
||||
stripped_text_from_html = _parse_json(json_data, json_filter)
|
||||
|
||||
if ensure_is_ldjson_info_type:
|
||||
# Could sometimes be list, string or something else random
|
||||
if isinstance(json_data, dict):
|
||||
# If it has LD JSON 'key' @type, and @type is 'product', and something was found for the search
|
||||
# (Some sites have multiple of the same ld+json @type='product', but some have the review part, some have the 'price' part)
|
||||
# @type could also be a list although non-standard ("@type": ["Product", "SubType"],)
|
||||
# LD_JSON auto-extract also requires some content PLUS the ldjson to be present
|
||||
# 1833 - could be either str or dict, should not be anything else
|
||||
|
||||
t = json_data.get('@type')
|
||||
if t and stripped_text_from_html:
|
||||
|
||||
if isinstance(t, str) and t.lower() == ensure_is_ldjson_info_type.lower():
|
||||
break
|
||||
# The non-standard part, some have a list
|
||||
elif isinstance(t, list):
|
||||
if ensure_is_ldjson_info_type.lower() in [x.lower().strip() for x in t]:
|
||||
break
|
||||
|
||||
elif stripped_text_from_html:
|
||||
break
|
||||
|
||||
if not stripped_text_from_html:
|
||||
# Re 265 - Just return an empty string when filter not found
|
||||
@@ -524,13 +371,7 @@ def strip_ignore_text(content, wordlist, mode="content"):
|
||||
ignore_regex_multiline = []
|
||||
ignored_lines = []
|
||||
|
||||
if not content:
|
||||
return ''
|
||||
|
||||
for k in wordlist:
|
||||
# Skip empty strings to avoid matching everything
|
||||
if not k or not k.strip():
|
||||
continue
|
||||
# Is it a regex?
|
||||
res = re.search(PERL_STYLE_REGEX, k, re.IGNORECASE)
|
||||
if res:
|
||||
@@ -599,18 +440,6 @@ def cdata_in_document_to_text(html_content: str, render_anchor_tag_content=False
|
||||
|
||||
|
||||
def html_to_text(html_content: str, render_anchor_tag_content=False, is_rss=False, timeout=10) -> str:
|
||||
"""
|
||||
Convert HTML content to plain text using inscriptis.
|
||||
|
||||
Thread-Safety: This function uses inscriptis.get_text() which internally calls
|
||||
lxml.html.fromstring() with the default parser. Testing with 50 concurrent threads
|
||||
confirms this approach is thread-safe and produces deterministic output.
|
||||
|
||||
Alternative Approach Rejected: An explicit HTMLParser instance (thread-local or fresh)
|
||||
would also be thread-safe, but was found to break change detection logic in subtle ways
|
||||
(test_check_basic_change_detection_functionality). The default parser provides correct
|
||||
and reliable behavior.
|
||||
"""
|
||||
from inscriptis import get_text
|
||||
from inscriptis.model.config import ParserConfig
|
||||
|
||||
@@ -621,33 +450,10 @@ def html_to_text(html_content: str, render_anchor_tag_content=False, is_rss=Fals
|
||||
)
|
||||
else:
|
||||
parser_config = None
|
||||
|
||||
if is_rss:
|
||||
html_content = re.sub(r'<title([\s>])', r'<h1\1', html_content)
|
||||
html_content = re.sub(r'</title>', r'</h1>', html_content)
|
||||
else:
|
||||
# Use BS4 html.parser to strip bloat — SPA's often dump 10MB+ of CSS/JS into <head>,
|
||||
# causing inscriptis to silently give up. Regex-based stripping is unsafe because tags
|
||||
# can appear inside JSON data attributes with JS-escaped closing tags (e.g. <\/script>),
|
||||
# causing the regex to scan past the intended close and eat real page content.
|
||||
from bs4 import BeautifulSoup
|
||||
soup = BeautifulSoup(html_content, 'html.parser')
|
||||
# Strip tags that inscriptis cannot render as meaningful text and which can be very large.
|
||||
# svg/math: produce path-data/MathML garbage; canvas/iframe/template: no inscriptis handlers.
|
||||
# video/audio/picture are kept — they may contain meaningful fallback text or captions.
|
||||
for tag in soup.find_all(['head', 'script', 'style', 'noscript', 'svg',
|
||||
'math', 'canvas', 'iframe', 'template']):
|
||||
tag.decompose()
|
||||
|
||||
# SPAs often use <body style="display:none"> to hide content until JS loads.
|
||||
# inscriptis respects CSS display rules, so strip hiding styles from the body tag.
|
||||
body_tag = soup.find('body')
|
||||
if body_tag and body_tag.get('style'):
|
||||
style = body_tag['style']
|
||||
if re.search(r'\b(?:display\s*:\s*none|visibility\s*:\s*hidden)\b', style, re.IGNORECASE):
|
||||
logger.debug(f"html_to_text: Removing hiding styles from body tag (found: '{style}')")
|
||||
del body_tag['style']
|
||||
|
||||
html_content = str(soup)
|
||||
|
||||
text_content = get_text(html_content, config=parser_config)
|
||||
return text_content
|
||||
@@ -704,43 +510,3 @@ def get_triggered_text(content, trigger_text):
|
||||
i += 1
|
||||
|
||||
return triggered_text
|
||||
|
||||
|
||||
def extract_title(data: bytes | str, sniff_bytes: int = 2048, scan_chars: int = 8192) -> str | None:
|
||||
try:
|
||||
# Only decode/process the prefix we need for title extraction
|
||||
match data:
|
||||
case bytes() if data.startswith((b"\xff\xfe", b"\xfe\xff")):
|
||||
prefix = data[:scan_chars * 2].decode("utf-16", errors="replace")
|
||||
case bytes() if data.startswith((b"\xff\xfe\x00\x00", b"\x00\x00\xfe\xff")):
|
||||
prefix = data[:scan_chars * 4].decode("utf-32", errors="replace")
|
||||
case bytes():
|
||||
try:
|
||||
prefix = data[:scan_chars].decode("utf-8")
|
||||
except UnicodeDecodeError:
|
||||
try:
|
||||
head = data[:sniff_bytes].decode("ascii", errors="ignore")
|
||||
if m := (META_CS.search(head) or META_CT.search(head)):
|
||||
enc = m.group(1).lower()
|
||||
else:
|
||||
enc = "cp1252"
|
||||
prefix = data[:scan_chars * 2].decode(enc, errors="replace")
|
||||
except Exception as e:
|
||||
logger.error(f"Title extraction encoding detection failed: {e}")
|
||||
return None
|
||||
case str():
|
||||
prefix = data[:scan_chars] if len(data) > scan_chars else data
|
||||
case _:
|
||||
logger.error(f"Title extraction received unsupported data type: {type(data)}")
|
||||
return None
|
||||
|
||||
# Search only in the prefix
|
||||
if m := TITLE_RE.search(prefix):
|
||||
title = html.unescape(" ".join(m.group(1).split())).strip()
|
||||
# Some safe limit
|
||||
return title[:2000]
|
||||
return None
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Title extraction failed: {e}")
|
||||
return None
|
||||
@@ -1,113 +0,0 @@
|
||||
"""
|
||||
URL redirect validation module for preventing open redirect vulnerabilities.
|
||||
|
||||
This module provides functionality to safely validate redirect URLs, ensuring they:
|
||||
1. Point to internal routes only (no external redirects)
|
||||
2. Are properly normalized (preventing browser parsing differences)
|
||||
3. Match registered Flask routes (no fake/non-existent pages)
|
||||
4. Are fully logged for security monitoring
|
||||
|
||||
References:
|
||||
- https://flask-login.readthedocs.io/ (safe redirect patterns)
|
||||
- https://blog.miguelgrinberg.com/post/the-flask-mega-tutorial-part-v-user-logins
|
||||
- https://www.pythonkitchen.com/how-prevent-open-redirect-vulnerab-flask/
|
||||
"""
|
||||
|
||||
from urllib.parse import urlparse, urljoin
|
||||
from flask import request
|
||||
from loguru import logger
|
||||
|
||||
|
||||
def is_safe_url(target, app):
|
||||
"""
|
||||
Validate that a redirect URL is safe to prevent open redirect vulnerabilities.
|
||||
|
||||
This follows Flask/Werkzeug best practices by ensuring the redirect URL:
|
||||
1. Is a relative path starting with exactly one '/'
|
||||
2. Does not start with '//' (double-slash attack)
|
||||
3. Has no external protocol handlers
|
||||
4. Points to a valid registered route in the application
|
||||
5. Is properly normalized to prevent browser parsing differences
|
||||
|
||||
Args:
|
||||
target: The URL to validate (e.g., '/settings', '/login#top')
|
||||
app: The Flask application instance (needed for route validation)
|
||||
|
||||
Returns:
|
||||
bool: True if the URL is safe for redirection, False otherwise
|
||||
|
||||
Examples:
|
||||
>>> is_safe_url('/settings', app)
|
||||
True
|
||||
>>> is_safe_url('//evil.com', app)
|
||||
False
|
||||
>>> is_safe_url('/settings#general', app)
|
||||
True
|
||||
>>> is_safe_url('/fake-page', app)
|
||||
False
|
||||
"""
|
||||
if not target:
|
||||
return False
|
||||
|
||||
# Normalize the URL to prevent browser parsing differences
|
||||
# Strip whitespace and replace backslashes (which some browsers interpret as forward slashes)
|
||||
target = target.strip()
|
||||
target = target.replace('\\', '/')
|
||||
|
||||
# First, check if it starts with // or more (double-slash attack)
|
||||
if target.startswith('//'):
|
||||
logger.warning(f"Blocked redirect attempt with double-slash: {target}")
|
||||
return False
|
||||
|
||||
# Parse the URL to check for scheme and netloc
|
||||
parsed = urlparse(target)
|
||||
|
||||
# Block any URL with a scheme (http://, https://, javascript:, etc.)
|
||||
if parsed.scheme:
|
||||
logger.warning(f"Blocked redirect attempt with scheme: {target}")
|
||||
return False
|
||||
|
||||
# Block any URL with a network location (netloc)
|
||||
# This catches patterns like //evil.com, user@host, etc.
|
||||
if parsed.netloc:
|
||||
logger.warning(f"Blocked redirect attempt with netloc: {target}")
|
||||
return False
|
||||
|
||||
# At this point, we have a relative URL with no scheme or netloc
|
||||
# Use urljoin to resolve it and verify it points to the same host
|
||||
ref_url = urlparse(request.host_url)
|
||||
test_url = urlparse(urljoin(request.host_url, target))
|
||||
|
||||
# Check: ensure the resolved URL has the same netloc as current host
|
||||
if not (test_url.scheme in ('http', 'https') and ref_url.netloc == test_url.netloc):
|
||||
logger.warning(f"Blocked redirect attempt with mismatched netloc: {target}")
|
||||
return False
|
||||
|
||||
# Additional validation: Check if the URL matches a registered route
|
||||
# This prevents redirects to non-existent pages or unintended endpoints
|
||||
try:
|
||||
# Get the path without query string and fragment
|
||||
# Fragments (like #general) are automatically stripped by urlparse
|
||||
path = parsed.path
|
||||
|
||||
# Create a URL adapter bound to the server name
|
||||
adapter = app.url_map.bind(ref_url.netloc)
|
||||
|
||||
# Try to match the path to a registered route
|
||||
# This will raise NotFound if the route doesn't exist
|
||||
endpoint, values = adapter.match(path, return_rule=False)
|
||||
|
||||
# Block redirects to static file endpoints - these are catch-all routes
|
||||
# that would match arbitrary paths, potentially allowing unintended redirects
|
||||
if endpoint in ('static_content', 'static', 'static_flags'):
|
||||
logger.warning(f"Blocked redirect to static endpoint: {target}")
|
||||
return False
|
||||
|
||||
# Successfully matched a valid route
|
||||
logger.debug(f"Validated safe redirect to endpoint '{endpoint}': {target}")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
# Route doesn't exist or can't be matched
|
||||
logger.warning(f"Blocked redirect to non-existent route: {target} (error: {e})")
|
||||
return False
|
||||
@@ -1,22 +0,0 @@
|
||||
"""
|
||||
Jinja2 custom extensions and safe rendering utilities.
|
||||
"""
|
||||
from .extensions.TimeExtension import TimeExtension
|
||||
from .safe_jinja import (
|
||||
render,
|
||||
render_fully_escaped,
|
||||
create_jinja_env,
|
||||
JINJA2_MAX_RETURN_PAYLOAD_SIZE,
|
||||
DEFAULT_JINJA2_EXTENSIONS,
|
||||
)
|
||||
from .plugins.regex import regex_replace
|
||||
|
||||
__all__ = [
|
||||
'TimeExtension',
|
||||
'render',
|
||||
'render_fully_escaped',
|
||||
'create_jinja_env',
|
||||
'JINJA2_MAX_RETURN_PAYLOAD_SIZE',
|
||||
'DEFAULT_JINJA2_EXTENSIONS',
|
||||
'regex_replace',
|
||||
]
|
||||
@@ -1,221 +0,0 @@
|
||||
"""
|
||||
Jinja2 TimeExtension - Custom date/time handling for templates.
|
||||
|
||||
This extension provides the {% now %} tag for Jinja2 templates, offering timezone-aware
|
||||
date/time formatting with support for time offsets.
|
||||
|
||||
Why This Extension Exists:
|
||||
The Arrow library has a now() function (arrow.now()), but Jinja2 templates cannot
|
||||
directly call Python functions - they need extensions or filters to expose functionality.
|
||||
|
||||
This TimeExtension serves as a Jinja2-to-Arrow bridge that:
|
||||
|
||||
1. Makes Arrow accessible in templates - Jinja2 requires registering functions/tags
|
||||
through extensions. You cannot use arrow.now() directly in a template.
|
||||
|
||||
2. Provides template-friendly syntax - Instead of complex Python code, you get clean tags:
|
||||
{% now 'UTC' %}
|
||||
{% now 'UTC' + 'hours=2' %}
|
||||
{% now 'Europe/London', '%Y-%m-%d' %}
|
||||
|
||||
3. Adds convenience features on top of Arrow:
|
||||
- Default timezone from environment variable (TZ) or config
|
||||
- Default datetime format configuration
|
||||
- Offset syntax parsing: 'hours=2,minutes=30' → shift(hours=2, minutes=30)
|
||||
- Empty string timezone support to use configured defaults
|
||||
|
||||
4. Maintains security - Works within Jinja2's sandboxed environment so users
|
||||
cannot access arbitrary Python code or objects.
|
||||
|
||||
Essentially, this is a Jinja2 wrapper around arrow.now() and arrow.shift() that
|
||||
provides user-friendly template syntax while maintaining security.
|
||||
|
||||
Basic Usage:
|
||||
{% now 'UTC' %}
|
||||
# Output: Wed, 09 Dec 2015 23:33:01
|
||||
|
||||
Custom Format:
|
||||
{% now 'UTC', '%Y-%m-%d %H:%M:%S' %}
|
||||
# Output: 2015-12-09 23:33:01
|
||||
|
||||
Timezone Support:
|
||||
{% now 'America/New_York' %}
|
||||
{% now 'Europe/London' %}
|
||||
{% now '' %} # Uses default timezone from environment.default_timezone
|
||||
|
||||
Time Offsets (Addition):
|
||||
{% now 'UTC' + 'hours=2' %}
|
||||
{% now 'UTC' + 'hours=2,minutes=30' %}
|
||||
{% now 'UTC' + 'days=1,hours=2,minutes=15,seconds=10' %}
|
||||
|
||||
Time Offsets (Subtraction):
|
||||
{% now 'UTC' - 'minutes=11' %}
|
||||
{% now 'UTC' - 'days=2,minutes=33,seconds=1' %}
|
||||
|
||||
Time Offsets with Custom Format:
|
||||
{% now 'UTC' + 'hours=2', '%Y-%m-%d %H:%M:%S' %}
|
||||
# Output: 2015-12-10 01:33:01
|
||||
|
||||
Weekday Support (for finding next/previous weekday):
|
||||
{% now 'UTC' + 'weekday=0' %} # Next Monday (0=Monday, 6=Sunday)
|
||||
{% now 'UTC' + 'weekday=4' %} # Next Friday
|
||||
|
||||
Configuration:
|
||||
- Default timezone: Set via TZ environment variable or override environment.default_timezone
|
||||
- Default format: '%a, %d %b %Y %H:%M:%S' (can be overridden via environment.datetime_format)
|
||||
|
||||
Environment Customization:
|
||||
from changedetectionio.jinja2_custom import create_jinja_env
|
||||
|
||||
jinja2_env = create_jinja_env()
|
||||
jinja2_env.default_timezone = 'America/New_York' # Override default timezone
|
||||
jinja2_env.datetime_format = '%Y-%m-%d %H:%M' # Override default format
|
||||
|
||||
Supported Offset Parameters:
|
||||
- years, months, weeks, days
|
||||
- hours, minutes, seconds, microseconds
|
||||
- weekday (0=Monday through 6=Sunday, must be integer)
|
||||
|
||||
Note:
|
||||
This extension uses the Arrow library for timezone-aware datetime handling.
|
||||
All timezone names should be valid IANA timezone identifiers (e.g., 'America/New_York').
|
||||
"""
|
||||
import arrow
|
||||
|
||||
from jinja2 import nodes
|
||||
from jinja2.ext import Extension
|
||||
import os
|
||||
|
||||
class TimeExtension(Extension):
|
||||
"""
|
||||
Jinja2 Extension providing the {% now %} tag for timezone-aware date/time rendering.
|
||||
|
||||
This extension adds two attributes to the Jinja2 environment:
|
||||
- datetime_format: Default strftime format string (default: '%a, %d %b %Y %H:%M:%S')
|
||||
- default_timezone: Default timezone for rendering (default: TZ env var or 'UTC')
|
||||
|
||||
Both can be overridden after environment creation by setting the attributes directly.
|
||||
"""
|
||||
|
||||
tags = {'now'}
|
||||
|
||||
def __init__(self, environment):
|
||||
"""Jinja2 Extension constructor."""
|
||||
super().__init__(environment)
|
||||
|
||||
environment.extend(
|
||||
datetime_format='%a, %d %b %Y %H:%M:%S',
|
||||
default_timezone=os.getenv('TZ', 'UTC').strip()
|
||||
)
|
||||
|
||||
def _datetime(self, timezone, operator, offset, datetime_format):
|
||||
"""
|
||||
Get current datetime with time offset applied.
|
||||
|
||||
Args:
|
||||
timezone: IANA timezone identifier (e.g., 'UTC', 'America/New_York') or empty string for default
|
||||
operator: '+' for addition or '-' for subtraction
|
||||
offset: Comma-separated offset parameters (e.g., 'hours=2,minutes=30')
|
||||
datetime_format: strftime format string or None to use environment default
|
||||
|
||||
Returns:
|
||||
Formatted datetime string with offset applied
|
||||
|
||||
Example:
|
||||
_datetime('UTC', '+', 'hours=2,minutes=30', '%Y-%m-%d %H:%M:%S')
|
||||
# Returns current time + 2.5 hours
|
||||
"""
|
||||
# Use default timezone if none specified
|
||||
if not timezone or timezone == '':
|
||||
timezone = self.environment.default_timezone
|
||||
|
||||
d = arrow.now(timezone)
|
||||
|
||||
# parse shift params from offset and include operator
|
||||
shift_params = {}
|
||||
for param in offset.split(','):
|
||||
interval, value = param.split('=')
|
||||
shift_params[interval.strip()] = float(operator + value.strip())
|
||||
|
||||
# Fix weekday parameter can not be float
|
||||
if 'weekday' in shift_params:
|
||||
shift_params['weekday'] = int(shift_params['weekday'])
|
||||
|
||||
d = d.shift(**shift_params)
|
||||
|
||||
if datetime_format is None:
|
||||
datetime_format = self.environment.datetime_format
|
||||
return d.strftime(datetime_format)
|
||||
|
||||
def _now(self, timezone, datetime_format):
|
||||
"""
|
||||
Get current datetime without any offset.
|
||||
|
||||
Args:
|
||||
timezone: IANA timezone identifier (e.g., 'UTC', 'America/New_York') or empty string for default
|
||||
datetime_format: strftime format string or None to use environment default
|
||||
|
||||
Returns:
|
||||
Formatted datetime string for current time
|
||||
|
||||
Example:
|
||||
_now('America/New_York', '%Y-%m-%d %H:%M:%S')
|
||||
# Returns current time in New York timezone
|
||||
"""
|
||||
# Use default timezone if none specified
|
||||
if not timezone or timezone == '':
|
||||
timezone = self.environment.default_timezone
|
||||
|
||||
if datetime_format is None:
|
||||
datetime_format = self.environment.datetime_format
|
||||
return arrow.now(timezone).strftime(datetime_format)
|
||||
|
||||
def parse(self, parser):
|
||||
"""
|
||||
Parse the {% now %} tag and generate appropriate AST nodes.
|
||||
|
||||
This method is called by Jinja2 when it encounters a {% now %} tag.
|
||||
It parses the tag syntax and determines whether to call _now() or _datetime()
|
||||
based on whether offset operations (+ or -) are present.
|
||||
|
||||
Supported syntax:
|
||||
{% now 'timezone' %} -> calls _now()
|
||||
{% now 'timezone', 'format' %} -> calls _now()
|
||||
{% now 'timezone' + 'offset' %} -> calls _datetime()
|
||||
{% now 'timezone' + 'offset', 'format' %} -> calls _datetime()
|
||||
{% now 'timezone' - 'offset', 'format' %} -> calls _datetime()
|
||||
|
||||
Args:
|
||||
parser: Jinja2 parser instance
|
||||
|
||||
Returns:
|
||||
nodes.Output: AST output node containing the formatted datetime string
|
||||
"""
|
||||
lineno = next(parser.stream).lineno
|
||||
|
||||
node = parser.parse_expression()
|
||||
|
||||
if parser.stream.skip_if('comma'):
|
||||
datetime_format = parser.parse_expression()
|
||||
else:
|
||||
datetime_format = nodes.Const(None)
|
||||
|
||||
if isinstance(node, nodes.Add):
|
||||
call_method = self.call_method(
|
||||
'_datetime',
|
||||
[node.left, nodes.Const('+'), node.right, datetime_format],
|
||||
lineno=lineno,
|
||||
)
|
||||
elif isinstance(node, nodes.Sub):
|
||||
call_method = self.call_method(
|
||||
'_datetime',
|
||||
[node.left, nodes.Const('-'), node.right, datetime_format],
|
||||
lineno=lineno,
|
||||
)
|
||||
else:
|
||||
call_method = self.call_method(
|
||||
'_now',
|
||||
[node, datetime_format],
|
||||
lineno=lineno,
|
||||
)
|
||||
return nodes.Output([call_method], lineno=lineno)
|
||||
@@ -1,6 +0,0 @@
|
||||
"""
|
||||
Jinja2 custom filter plugins for changedetection.io
|
||||
"""
|
||||
from .regex import regex_replace
|
||||
|
||||
__all__ = ['regex_replace']
|
||||
@@ -1,98 +0,0 @@
|
||||
"""
|
||||
Regex filter plugin for Jinja2 templates.
|
||||
|
||||
Provides regex_replace filter for pattern-based string replacements in templates.
|
||||
"""
|
||||
import re
|
||||
import signal
|
||||
from loguru import logger
|
||||
|
||||
|
||||
def regex_replace(value: str, pattern: str, replacement: str = '', count: int = 0) -> str:
|
||||
"""
|
||||
Replace occurrences of a regex pattern in a string.
|
||||
|
||||
Security: Protected against ReDoS (Regular Expression Denial of Service) attacks:
|
||||
- Limits input value size to prevent excessive processing
|
||||
- Uses timeout mechanism to prevent runaway regex operations
|
||||
- Validates pattern complexity to prevent catastrophic backtracking
|
||||
|
||||
Args:
|
||||
value: The input string to perform replacements on
|
||||
pattern: The regex pattern to search for
|
||||
replacement: The replacement string (default: '')
|
||||
count: Maximum number of replacements (0 = replace all, default: 0)
|
||||
|
||||
Returns:
|
||||
String with replacements applied, or original value on error
|
||||
|
||||
Example:
|
||||
{{ "hello world" | regex_replace("world", "universe") }}
|
||||
{{ diff | regex_replace("<td>([^<]+)</td><td>([^<]+)</td>", "Label1: \\1\\nLabel2: \\2") }}
|
||||
|
||||
Security limits:
|
||||
- Maximum input size: 10MB
|
||||
- Maximum pattern length: 500 characters
|
||||
- Operation timeout: 10 seconds
|
||||
- Dangerous nested quantifier patterns are rejected
|
||||
"""
|
||||
# Security limits
|
||||
MAX_INPUT_SIZE = 1024 * 1024 * 10 # 10MB max input size
|
||||
MAX_PATTERN_LENGTH = 500 # Maximum regex pattern length
|
||||
REGEX_TIMEOUT_SECONDS = 10 # Maximum time for regex operation
|
||||
|
||||
# Validate input sizes
|
||||
value_str = str(value)
|
||||
if len(value_str) > MAX_INPUT_SIZE:
|
||||
logger.warning(f"regex_replace: Input too large ({len(value_str)} bytes), truncating")
|
||||
value_str = value_str[:MAX_INPUT_SIZE]
|
||||
|
||||
if len(pattern) > MAX_PATTERN_LENGTH:
|
||||
logger.warning(f"regex_replace: Pattern too long ({len(pattern)} chars), rejecting")
|
||||
return value_str
|
||||
|
||||
# Check for potentially dangerous patterns (basic checks)
|
||||
# Nested quantifiers like (a+)+ can cause catastrophic backtracking
|
||||
dangerous_patterns = [
|
||||
r'\([^)]*\+[^)]*\)\+', # (x+)+
|
||||
r'\([^)]*\*[^)]*\)\+', # (x*)+
|
||||
r'\([^)]*\+[^)]*\)\*', # (x+)*
|
||||
r'\([^)]*\*[^)]*\)\*', # (x*)*
|
||||
]
|
||||
|
||||
for dangerous in dangerous_patterns:
|
||||
if re.search(dangerous, pattern):
|
||||
logger.warning(f"regex_replace: Potentially dangerous pattern detected: {pattern}")
|
||||
return value_str
|
||||
|
||||
def timeout_handler(signum, frame):
|
||||
raise TimeoutError("Regex operation timed out")
|
||||
|
||||
try:
|
||||
# Set up timeout for regex operation (Unix-like systems only)
|
||||
# This prevents ReDoS attacks
|
||||
old_handler = None
|
||||
if hasattr(signal, 'SIGALRM'):
|
||||
old_handler = signal.signal(signal.SIGALRM, timeout_handler)
|
||||
signal.alarm(REGEX_TIMEOUT_SECONDS)
|
||||
|
||||
try:
|
||||
result = re.sub(pattern, replacement, value_str, count=count)
|
||||
finally:
|
||||
# Cancel the alarm
|
||||
if hasattr(signal, 'SIGALRM'):
|
||||
signal.alarm(0)
|
||||
if old_handler is not None:
|
||||
signal.signal(signal.SIGALRM, old_handler)
|
||||
|
||||
return result
|
||||
|
||||
except TimeoutError:
|
||||
logger.error(f"regex_replace: Regex operation timed out - possible ReDoS attack. Pattern: {pattern}")
|
||||
return value_str
|
||||
except re.error as e:
|
||||
logger.warning(f"regex_replace: Invalid regex pattern: {e}")
|
||||
return value_str
|
||||
except Exception as e:
|
||||
logger.error(f"regex_replace: Unexpected error: {e}")
|
||||
return value_str
|
||||
@@ -1,64 +0,0 @@
|
||||
"""
|
||||
Safe Jinja2 render with max payload sizes
|
||||
|
||||
See https://jinja.palletsprojects.com/en/3.1.x/sandbox/#security-considerations
|
||||
"""
|
||||
|
||||
import jinja2.sandbox
|
||||
import typing as t
|
||||
import os
|
||||
from .extensions.TimeExtension import TimeExtension
|
||||
from .plugins import regex_replace
|
||||
|
||||
JINJA2_MAX_RETURN_PAYLOAD_SIZE = 1024 * int(os.getenv("JINJA2_MAX_RETURN_PAYLOAD_SIZE_KB", 1024 * 10))
|
||||
|
||||
# Default extensions - can be overridden in create_jinja_env()
|
||||
DEFAULT_JINJA2_EXTENSIONS = [TimeExtension]
|
||||
|
||||
def create_jinja_env(extensions=None, **kwargs) -> jinja2.sandbox.ImmutableSandboxedEnvironment:
|
||||
"""
|
||||
Create a sandboxed Jinja2 environment with our custom extensions and default timezone.
|
||||
|
||||
Args:
|
||||
extensions: List of extension classes to use (defaults to DEFAULT_JINJA2_EXTENSIONS)
|
||||
**kwargs: Additional arguments to pass to ImmutableSandboxedEnvironment
|
||||
|
||||
Returns:
|
||||
Configured Jinja2 environment
|
||||
"""
|
||||
if extensions is None:
|
||||
extensions = DEFAULT_JINJA2_EXTENSIONS
|
||||
|
||||
jinja2_env = jinja2.sandbox.ImmutableSandboxedEnvironment(
|
||||
extensions=extensions,
|
||||
**kwargs
|
||||
)
|
||||
|
||||
# Get default timezone from environment variable
|
||||
default_timezone = os.getenv('TZ', 'UTC').strip()
|
||||
jinja2_env.default_timezone = default_timezone
|
||||
|
||||
# Register custom filters
|
||||
jinja2_env.filters['regex_replace'] = regex_replace
|
||||
|
||||
return jinja2_env
|
||||
|
||||
|
||||
# This is used for notifications etc, so actually it's OK to send custom HTML such as <a href> etc, but it should limit what data is available.
|
||||
# (Which also limits available functions that could be called)
|
||||
def render(template_str, **args: t.Any) -> str:
|
||||
jinja2_env = create_jinja_env()
|
||||
output = jinja2_env.from_string(template_str).render(args)
|
||||
return output[:JINJA2_MAX_RETURN_PAYLOAD_SIZE]
|
||||
|
||||
def render_fully_escaped(content):
|
||||
"""
|
||||
Escape HTML content safely.
|
||||
|
||||
MEMORY LEAK FIX: Use markupsafe.escape() directly instead of creating
|
||||
Jinja2 environments (was causing 1M+ compilations per page load).
|
||||
Simpler, faster, and no concerns about environment state.
|
||||
"""
|
||||
from markupsafe import escape
|
||||
return str(escape(content))
|
||||
|
||||
@@ -1,112 +0,0 @@
|
||||
"""
|
||||
Language configuration for i18n support
|
||||
Automatically discovers available languages from translations directory
|
||||
"""
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def get_timeago_locale(flask_locale):
|
||||
"""
|
||||
Convert Flask-Babel locale codes to timeago library locale codes.
|
||||
|
||||
The Python timeago library (https://github.com/hustcc/timeago) supports 48 locales
|
||||
but uses different naming conventions than Flask-Babel. This function maps between them.
|
||||
|
||||
Notable differences:
|
||||
- Chinese: Flask uses 'zh', timeago uses 'zh_CN'
|
||||
- Portuguese: Flask uses 'pt', timeago uses 'pt_PT' or 'pt_BR'
|
||||
- Swedish: Flask uses 'sv', timeago uses 'sv_SE'
|
||||
- Norwegian: Flask uses 'no', timeago uses 'nb_NO' or 'nn_NO'
|
||||
- Hindi: Flask uses 'hi', timeago uses 'in_HI'
|
||||
- Czech: Flask uses 'cs', but timeago doesn't support Czech - fallback to English
|
||||
|
||||
Args:
|
||||
flask_locale (str): Flask-Babel locale code (e.g., 'cs', 'zh', 'pt')
|
||||
|
||||
Returns:
|
||||
str: timeago library locale code (e.g., 'en', 'zh_CN', 'pt_PT')
|
||||
"""
|
||||
locale_map = {
|
||||
'zh': 'zh_CN', # Chinese Simplified
|
||||
# timeago library just hasn't been updated to use the more modern locale naming convention, before BCP 47 / RFC 5646.
|
||||
'zh_TW': 'zh_TW', # Chinese Traditional (timeago uses zh_TW)
|
||||
'zh_Hant_TW': 'zh_TW', # Flask-Babel normalizes zh_TW to zh_Hant_TW, map back to timeago's zh_TW
|
||||
'pt': 'pt_PT', # Portuguese (Portugal)
|
||||
'sv': 'sv_SE', # Swedish
|
||||
'no': 'nb_NO', # Norwegian Bokmål
|
||||
'hi': 'in_HI', # Hindi
|
||||
'cs': 'en', # Czech not supported by timeago, fallback to English
|
||||
'uk': 'uk', # Ukrainian
|
||||
'en_GB': 'en', # British English - timeago uses 'en'
|
||||
'en_US': 'en', # American English - timeago uses 'en'
|
||||
}
|
||||
return locale_map.get(flask_locale, flask_locale)
|
||||
|
||||
# Language metadata: flag icon CSS class and native name
|
||||
# Using flag-icons library: https://flagicons.lipis.dev/
|
||||
LANGUAGE_DATA = {
|
||||
'en_GB': {'flag': 'fi fi-gb fis', 'name': 'English (UK)'},
|
||||
'en_US': {'flag': 'fi fi-us fis', 'name': 'English (US)'},
|
||||
'de': {'flag': 'fi fi-de fis', 'name': 'Deutsch'},
|
||||
'fr': {'flag': 'fi fi-fr fis', 'name': 'Français'},
|
||||
'ko': {'flag': 'fi fi-kr fis', 'name': '한국어'},
|
||||
'cs': {'flag': 'fi fi-cz fis', 'name': 'Čeština'},
|
||||
'es': {'flag': 'fi fi-es fis', 'name': 'Español'},
|
||||
'pt': {'flag': 'fi fi-pt fis', 'name': 'Português'},
|
||||
'it': {'flag': 'fi fi-it fis', 'name': 'Italiano'},
|
||||
'ja': {'flag': 'fi fi-jp fis', 'name': '日本語'},
|
||||
'zh': {'flag': 'fi fi-cn fis', 'name': '中文 (简体)'},
|
||||
'zh_Hant_TW': {'flag': 'fi fi-tw fis', 'name': '繁體中文'},
|
||||
'ru': {'flag': 'fi fi-ru fis', 'name': 'Русский'},
|
||||
'pl': {'flag': 'fi fi-pl fis', 'name': 'Polski'},
|
||||
'nl': {'flag': 'fi fi-nl fis', 'name': 'Nederlands'},
|
||||
'sv': {'flag': 'fi fi-se fis', 'name': 'Svenska'},
|
||||
'da': {'flag': 'fi fi-dk fis', 'name': 'Dansk'},
|
||||
'no': {'flag': 'fi fi-no fis', 'name': 'Norsk'},
|
||||
'fi': {'flag': 'fi fi-fi fis', 'name': 'Suomi'},
|
||||
'tr': {'flag': 'fi fi-tr fis', 'name': 'Türkçe'},
|
||||
'ar': {'flag': 'fi fi-sa fis', 'name': 'العربية'},
|
||||
'hi': {'flag': 'fi fi-in fis', 'name': 'हिन्दी'},
|
||||
'uk': {'flag': 'fi fi-ua fis', 'name': 'Українська'},
|
||||
}
|
||||
|
||||
|
||||
def get_available_languages():
|
||||
"""
|
||||
Discover available languages by scanning the translations directory
|
||||
Returns a dict of available languages with their metadata
|
||||
"""
|
||||
translations_dir = Path(__file__).parent / 'translations'
|
||||
|
||||
available = {}
|
||||
|
||||
# Scan for translation directories
|
||||
if translations_dir.exists():
|
||||
for lang_dir in translations_dir.iterdir():
|
||||
if lang_dir.is_dir() and lang_dir.name in LANGUAGE_DATA:
|
||||
# Check if messages.po exists
|
||||
po_file = lang_dir / 'LC_MESSAGES' / 'messages.po'
|
||||
if po_file.exists():
|
||||
available[lang_dir.name] = LANGUAGE_DATA[lang_dir.name]
|
||||
|
||||
# If no English variants found, fall back to adding en_GB as default
|
||||
if 'en_GB' not in available and 'en_US' not in available:
|
||||
available['en_GB'] = LANGUAGE_DATA['en_GB']
|
||||
|
||||
return available
|
||||
|
||||
|
||||
def get_language_codes():
|
||||
"""Get list of available language codes"""
|
||||
return list(get_available_languages().keys())
|
||||
|
||||
|
||||
def get_flag_for_locale(locale):
|
||||
"""Get flag emoji for a locale, or globe if unknown"""
|
||||
return LANGUAGE_DATA.get(locale, {}).get('flag', '🌐')
|
||||
|
||||
|
||||
def get_name_for_locale(locale):
|
||||
"""Get native name for a locale"""
|
||||
return LANGUAGE_DATA.get(locale, {}).get('name', locale.upper())
|
||||
@@ -1,8 +1,6 @@
|
||||
from os import getenv
|
||||
from copy import deepcopy
|
||||
|
||||
from changedetectionio.blueprint.rss import RSS_FORMAT_TYPES, RSS_CONTENT_FORMAT_DEFAULT
|
||||
from changedetectionio.model.Tags import TagsDict
|
||||
from changedetectionio.blueprint.rss import RSS_FORMAT_TYPES
|
||||
|
||||
from changedetectionio.notification import (
|
||||
default_notification_body,
|
||||
@@ -30,7 +28,7 @@ class model(dict):
|
||||
'proxy': None, # Preferred proxy connection
|
||||
'time_between_check': {'weeks': None, 'days': None, 'hours': 3, 'minutes': None, 'seconds': None},
|
||||
'timeout': int(getenv("DEFAULT_SETTINGS_REQUESTS_TIMEOUT", "45")), # Default 45 seconds
|
||||
'workers': int(getenv("DEFAULT_SETTINGS_REQUESTS_WORKERS", "5")), # Number of threads, lower is better for slow connections
|
||||
'workers': int(getenv("DEFAULT_SETTINGS_REQUESTS_WORKERS", "10")), # Number of threads, lower is better for slow connections
|
||||
'default_ua': {
|
||||
'html_requests': getenv("DEFAULT_SETTINGS_HEADERS_USERAGENT", DEFAULT_SETTINGS_HEADERS_USERAGENT),
|
||||
'html_webdriver': None,
|
||||
@@ -38,19 +36,15 @@ class model(dict):
|
||||
},
|
||||
'application': {
|
||||
# Custom notification content
|
||||
'all_paused': False,
|
||||
'all_muted': False,
|
||||
'api_access_token_enabled': True,
|
||||
'base_url' : None,
|
||||
'empty_pages_are_a_change': False,
|
||||
'extract_title_as_title': False,
|
||||
'fetch_backend': getenv("DEFAULT_FETCH_BACKEND", "html_requests"),
|
||||
'filter_failure_notification_threshold_attempts': _FILTER_FAILURE_THRESHOLD_ATTEMPTS_DEFAULT,
|
||||
'global_ignore_text': [], # List of text to ignore when calculating the comparison checksum
|
||||
'global_subtractive_selectors': [],
|
||||
'history_snapshot_max_length': None,
|
||||
'ignore_whitespace': True,
|
||||
'ignore_status_codes': False, #@todo implement, as ternary.
|
||||
'ssim_threshold': '0.96', # Default SSIM threshold for screenshot comparison
|
||||
'notification_body': default_notification_body,
|
||||
'notification_format': default_notification_format,
|
||||
'notification_title': default_notification_title,
|
||||
@@ -59,43 +53,34 @@ class model(dict):
|
||||
'password': False,
|
||||
'render_anchor_tag_content': False,
|
||||
'rss_access_token': None,
|
||||
'rss_content_format': RSS_CONTENT_FORMAT_DEFAULT,
|
||||
'rss_template_type': 'system_default',
|
||||
'rss_template_override': None,
|
||||
'rss_diff_length': 5,
|
||||
'rss_content_format': RSS_FORMAT_TYPES[0][0],
|
||||
'rss_hide_muted_watches': True,
|
||||
'rss_reader_mode': False,
|
||||
'scheduler_timezone_default': None, # Default IANA timezone name
|
||||
'schema_version' : 0,
|
||||
'shared_diff_access': False,
|
||||
'strip_ignored_lines': False,
|
||||
'tags': None, # Initialized in __init__ with real datastore_path
|
||||
'webdriver_delay': None , # Extra delay in seconds before extracting text
|
||||
'tags': {}, #@todo use Tag.model initialisers
|
||||
'timezone': None, # Default IANA timezone name
|
||||
'ui': {
|
||||
'use_page_title_in_list': True,
|
||||
'open_diff_in_new_tab': True,
|
||||
'socket_io_enabled': True,
|
||||
'favicons_enabled': True
|
||||
},
|
||||
'ai': {
|
||||
'openai_key': None,
|
||||
'gemini_key': None
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def __init__(self, *arg, datastore_path=None, **kw):
|
||||
def __init__(self, *arg, **kw):
|
||||
super(model, self).__init__(*arg, **kw)
|
||||
# Capture any tags data passed in before base_config overwrites the structure
|
||||
existing_tags = self.get('settings', {}).get('application', {}).get('tags') or {}
|
||||
# CRITICAL: deepcopy to avoid sharing mutable objects between instances
|
||||
self.update(deepcopy(self.base_config))
|
||||
# TagsDict requires the real datastore_path at runtime (cannot be set at class-definition time)
|
||||
if datastore_path is None:
|
||||
raise ValueError("App.model() requires 'datastore_path' keyword argument")
|
||||
self['settings']['application']['tags'] = TagsDict(existing_tags, datastore_path=datastore_path)
|
||||
self.update(self.base_config)
|
||||
|
||||
|
||||
def parse_headers_from_text_file(filepath):
|
||||
headers = {}
|
||||
with open(filepath, 'r', encoding='utf-8') as f:
|
||||
with open(filepath, 'r') as f:
|
||||
for l in f.readlines():
|
||||
l = l.strip()
|
||||
if not l.startswith('#') and ':' in l:
|
||||
|
||||
@@ -1,48 +1,10 @@
|
||||
"""
|
||||
Tag/Group domain model for organizing and overriding watch settings.
|
||||
|
||||
ARCHITECTURE NOTE: Configuration Override Hierarchy
|
||||
===================================================
|
||||
|
||||
Tags can override Watch settings when overrides_watch=True.
|
||||
Current implementation requires manual checking in processors:
|
||||
|
||||
for tag_uuid in watch.get('tags'):
|
||||
tag = datastore['settings']['application']['tags'][tag_uuid]
|
||||
if tag.get('overrides_watch'):
|
||||
restock_settings = tag.get('restock_settings', {})
|
||||
break
|
||||
|
||||
With Pydantic, this would be automatic via chain resolution:
|
||||
Watch → Tag (first with overrides_watch) → Global
|
||||
|
||||
See: Watch.py model docstring for full Pydantic architecture explanation
|
||||
See: processors/restock_diff/processor.py:184-192 for current manual implementation
|
||||
"""
|
||||
|
||||
from changedetectionio.model import watch_base
|
||||
from changedetectionio.model.persistence import EntityPersistenceMixin
|
||||
|
||||
class model(EntityPersistenceMixin, watch_base):
|
||||
"""
|
||||
Tag domain model - groups watches and can override their settings.
|
||||
|
||||
Tags inherit from watch_base to reuse all the same fields as Watch.
|
||||
When overrides_watch=True, tag settings take precedence over watch settings
|
||||
for all watches in this tag/group.
|
||||
|
||||
Fields:
|
||||
overrides_watch (bool): If True, this tag's settings override watch settings
|
||||
title (str): Display name for this tag/group
|
||||
uuid (str): Unique identifier
|
||||
... (all fields from watch_base can be set as tag-level overrides)
|
||||
|
||||
Resolution order when overrides_watch=True:
|
||||
Watch.field → Tag.field (if overrides_watch) → Global.field
|
||||
"""
|
||||
class model(watch_base):
|
||||
|
||||
def __init__(self, *arg, **kw):
|
||||
# Parent class (watch_base) handles __datastore and __datastore_path
|
||||
super(model, self).__init__(*arg, **kw)
|
||||
|
||||
self['overrides_watch'] = kw.get('default', {}).get('overrides_watch')
|
||||
@@ -50,7 +12,3 @@ class model(EntityPersistenceMixin, watch_base):
|
||||
if kw.get('default'):
|
||||
self.update(kw['default'])
|
||||
del kw['default']
|
||||
|
||||
# _save_to_disk() method provided by EntityPersistenceMixin
|
||||
# commit() and _get_commit_data() methods inherited from watch_base
|
||||
# Tag uses default _get_commit_data() (includes all keys)
|
||||
|
||||
@@ -1,39 +0,0 @@
|
||||
import os
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
from loguru import logger
|
||||
|
||||
_SENTINEL = object()
|
||||
|
||||
|
||||
class TagsDict(dict):
|
||||
"""Dict subclass that removes the corresponding tag.json file when a tag is deleted."""
|
||||
|
||||
def __init__(self, *args, datastore_path: str | os.PathLike, **kwargs) -> None:
|
||||
self._datastore_path = Path(datastore_path)
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
def __delitem__(self, key: str) -> None:
|
||||
super().__delitem__(key)
|
||||
tag_dir = self._datastore_path / key
|
||||
tag_json_file = tag_dir / "tag.json"
|
||||
if not os.path.exists(tag_json_file):
|
||||
logger.critical(f"Aborting deletion of directory '{tag_dir}' because '{tag_json_file}' does not exist.")
|
||||
return
|
||||
try:
|
||||
shutil.rmtree(tag_dir)
|
||||
logger.info(f"Deleted tag directory for tag {key!r}")
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
except OSError as e:
|
||||
logger.error(f"Failed to delete tag directory for tag {key!r}: {e}")
|
||||
|
||||
def pop(self, key: str, default=_SENTINEL):
|
||||
"""Remove and return tag, deleting its tag.json file. Raises KeyError if missing and no default given."""
|
||||
if key in self:
|
||||
value = self[key]
|
||||
del self[key]
|
||||
return value
|
||||
if default is _SENTINEL:
|
||||
raise KeyError(key)
|
||||
return default
|
||||
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user