Compare commits

..

1 Commits

Author SHA1 Message Date
dgtlmoon
5742cf7a0e Extra validation 2023-09-23 14:31:01 +02:00
74 changed files with 820 additions and 2262 deletions

View File

@@ -2,7 +2,7 @@
# Test that we can still build on Alpine (musl modified libc https://musl.libc.org/) # Test that we can still build on Alpine (musl modified libc https://musl.libc.org/)
# Some packages wont install via pypi because they dont have a wheel available under this architecture. # Some packages wont install via pypi because they dont have a wheel available under this architecture.
FROM ghcr.io/linuxserver/baseimage-alpine:3.18 FROM ghcr.io/linuxserver/baseimage-alpine:3.16
ENV PYTHONUNBUFFERED=1 ENV PYTHONUNBUFFERED=1
COPY requirements.txt /requirements.txt COPY requirements.txt /requirements.txt
@@ -26,6 +26,6 @@ RUN \
py3-pip && \ py3-pip && \
echo "**** pip3 install test of changedetection.io ****" && \ echo "**** pip3 install test of changedetection.io ****" && \
pip3 install -U pip wheel setuptools && \ pip3 install -U pip wheel setuptools && \
pip3 install -U --no-cache-dir --find-links https://wheel-index.linuxserver.io/alpine-3.18/ -r /requirements.txt && \ pip3 install -U --no-cache-dir --find-links https://wheel-index.linuxserver.io/alpine-3.16/ -r /requirements.txt && \
apk del --purge \ apk del --purge \
build-dependencies build-dependencies

View File

@@ -30,11 +30,11 @@ jobs:
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v4 uses: actions/checkout@v2
# Initializes the CodeQL tools for scanning. # Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL - name: Initialize CodeQL
uses: github/codeql-action/init@v2 uses: github/codeql-action/init@v1
with: with:
languages: ${{ matrix.language }} languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file. # If you wish to specify custom queries, you can do so here or in a config file.
@@ -45,7 +45,7 @@ jobs:
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below) # If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild - name: Autobuild
uses: github/codeql-action/autobuild@v2 uses: github/codeql-action/autobuild@v1
# Command-line programs to run using the OS shell. # Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl # 📚 https://git.io/JvXDl
@@ -59,4 +59,4 @@ jobs:
# make release # make release
- name: Perform CodeQL Analysis - name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2 uses: github/codeql-action/analyze@v1

View File

@@ -39,11 +39,11 @@ jobs:
# Or if we are in a tagged release scenario. # Or if we are in a tagged release scenario.
if: ${{ github.event.workflow_run.conclusion == 'success' }} || ${{ github.event.release.tag_name }} != '' if: ${{ github.event.workflow_run.conclusion == 'success' }} || ${{ github.event.release.tag_name }} != ''
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v2
- name: Set up Python 3.11 - name: Set up Python 3.9
uses: actions/setup-python@v4 uses: actions/setup-python@v2
with: with:
python-version: 3.11 python-version: 3.9
- name: Install dependencies - name: Install dependencies
run: | run: |
@@ -58,27 +58,27 @@ jobs:
echo ${{ github.ref }} > changedetectionio/tag.txt echo ${{ github.ref }} > changedetectionio/tag.txt
- name: Set up QEMU - name: Set up QEMU
uses: docker/setup-qemu-action@v3 uses: docker/setup-qemu-action@v1
with: with:
image: tonistiigi/binfmt:latest image: tonistiigi/binfmt:latest
platforms: all platforms: all
- name: Login to GitHub Container Registry - name: Login to GitHub Container Registry
uses: docker/login-action@v3 uses: docker/login-action@v1
with: with:
registry: ghcr.io registry: ghcr.io
username: ${{ github.actor }} username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }} password: ${{ secrets.GITHUB_TOKEN }}
- name: Login to Docker Hub Container Registry - name: Login to Docker Hub Container Registry
uses: docker/login-action@v3 uses: docker/login-action@v1
with: with:
username: ${{ secrets.DOCKER_HUB_USERNAME }} username: ${{ secrets.DOCKER_HUB_USERNAME }}
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }} password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
- name: Set up Docker Buildx - name: Set up Docker Buildx
id: buildx id: buildx
uses: docker/setup-buildx-action@v3 uses: docker/setup-buildx-action@v1
with: with:
install: true install: true
version: latest version: latest
@@ -88,7 +88,7 @@ jobs:
- name: Build and push :dev - name: Build and push :dev
id: docker_build id: docker_build
if: ${{ github.ref }} == "refs/heads/master" if: ${{ github.ref }} == "refs/heads/master"
uses: docker/build-push-action@v5 uses: docker/build-push-action@v2
with: with:
context: ./ context: ./
file: ./Dockerfile file: ./Dockerfile
@@ -96,9 +96,8 @@ jobs:
tags: | tags: |
${{ secrets.DOCKER_HUB_USERNAME }}/changedetection.io:dev,ghcr.io/${{ github.repository }}:dev ${{ secrets.DOCKER_HUB_USERNAME }}/changedetection.io:dev,ghcr.io/${{ github.repository }}:dev
platforms: linux/amd64,linux/arm64,linux/arm/v6,linux/arm/v7,linux/arm/v8 platforms: linux/amd64,linux/arm64,linux/arm/v6,linux/arm/v7,linux/arm/v8
cache-from: type=gha cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=gha,mode=max cache-to: type=local,dest=/tmp/.buildx-cache
# Looks like this was disabled # Looks like this was disabled
# provenance: false # provenance: false
@@ -106,7 +105,7 @@ jobs:
- name: Build and push :tag - name: Build and push :tag
id: docker_build_tag_release id: docker_build_tag_release
if: github.event_name == 'release' && startsWith(github.event.release.tag_name, '0.') if: github.event_name == 'release' && startsWith(github.event.release.tag_name, '0.')
uses: docker/build-push-action@v5 uses: docker/build-push-action@v2
with: with:
context: ./ context: ./
file: ./Dockerfile file: ./Dockerfile
@@ -117,11 +116,18 @@ jobs:
${{ secrets.DOCKER_HUB_USERNAME }}/changedetection.io:latest ${{ secrets.DOCKER_HUB_USERNAME }}/changedetection.io:latest
ghcr.io/dgtlmoon/changedetection.io:latest ghcr.io/dgtlmoon/changedetection.io:latest
platforms: linux/amd64,linux/arm64,linux/arm/v6,linux/arm/v7,linux/arm/v8 platforms: linux/amd64,linux/arm64,linux/arm/v6,linux/arm/v7,linux/arm/v8
cache-from: type=gha cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=gha,mode=max cache-to: type=local,dest=/tmp/.buildx-cache
# Looks like this was disabled # Looks like this was disabled
# provenance: false # provenance: false
- name: Image digest - name: Image digest
run: echo step SHA ${{ steps.vars.outputs.sha_short }} tag ${{steps.vars.outputs.tag}} branch ${{steps.vars.outputs.branch}} digest ${{ steps.docker_build.outputs.digest }} run: echo step SHA ${{ steps.vars.outputs.sha_short }} tag ${{steps.vars.outputs.tag}} branch ${{steps.vars.outputs.branch}} digest ${{ steps.docker_build.outputs.digest }}
- name: Cache Docker layers
uses: actions/cache@v2
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-

View File

@@ -24,22 +24,22 @@ jobs:
test-container-build: test-container-build:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v2
- name: Set up Python 3.11 - name: Set up Python 3.9
uses: actions/setup-python@v4 uses: actions/setup-python@v2
with: with:
python-version: 3.11 python-version: 3.9
# Just test that the build works, some libraries won't compile on ARM/rPi etc # Just test that the build works, some libraries won't compile on ARM/rPi etc
- name: Set up QEMU - name: Set up QEMU
uses: docker/setup-qemu-action@v3 uses: docker/setup-qemu-action@v1
with: with:
image: tonistiigi/binfmt:latest image: tonistiigi/binfmt:latest
platforms: all platforms: all
- name: Set up Docker Buildx - name: Set up Docker Buildx
id: buildx id: buildx
uses: docker/setup-buildx-action@v3 uses: docker/setup-buildx-action@v1
with: with:
install: true install: true
version: latest version: latest
@@ -49,7 +49,7 @@ jobs:
# Check we can still build under alpine/musl # Check we can still build under alpine/musl
- name: Test that the docker containers can build (musl via alpine check) - name: Test that the docker containers can build (musl via alpine check)
id: docker_build_musl id: docker_build_musl
uses: docker/build-push-action@v5 uses: docker/build-push-action@v2
with: with:
context: ./ context: ./
file: ./.github/test/Dockerfile-alpine file: ./.github/test/Dockerfile-alpine
@@ -57,7 +57,7 @@ jobs:
- name: Test that the docker containers can build - name: Test that the docker containers can build
id: docker_build id: docker_build
uses: docker/build-push-action@v5 uses: docker/build-push-action@v2
# https://github.com/docker/build-push-action#customizing # https://github.com/docker/build-push-action#customizing
with: with:
context: ./ context: ./

View File

@@ -7,13 +7,13 @@ jobs:
test-application: test-application:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v2
# Mainly just for link/flake8 # Mainly just for link/flake8
- name: Set up Python 3.11 - name: Set up Python 3.10
uses: actions/setup-python@v4 uses: actions/setup-python@v2
with: with:
python-version: '3.11' python-version: '3.10'
- name: Lint with flake8 - name: Lint with flake8
run: | run: |
@@ -29,8 +29,8 @@ jobs:
docker network create changedet-network docker network create changedet-network
# Selenium+browserless # Selenium+browserless
docker run --network changedet-network -d --hostname selenium -p 4444:4444 --rm --shm-size="2g" selenium/standalone-chrome:4 docker run --network changedet-network -d --hostname selenium -p 4444:4444 --rm --shm-size="2g" selenium/standalone-chrome-debug:3.141.59
docker run --network changedet-network -d --hostname browserless -e "FUNCTION_BUILT_INS=[\"fs\",\"crypto\"]" -e "DEFAULT_LAUNCH_ARGS=[\"--window-size=1920,1080\"]" --rm -p 3000:3000 --shm-size="2g" browserless/chrome:1.60-chrome-stable docker run --network changedet-network -d --hostname browserless -e "FUNCTION_BUILT_INS=[\"fs\",\"crypto\"]" -e "DEFAULT_LAUNCH_ARGS=[\"--window-size=1920,1080\"]" --rm -p 3000:3000 --shm-size="2g" browserless/chrome:1.53-chrome-stable
- name: Build changedetection.io container for testing - name: Build changedetection.io container for testing
run: | run: |
@@ -83,7 +83,6 @@ jobs:
run: | run: |
cd changedetectionio cd changedetectionio
./run_proxy_tests.sh ./run_proxy_tests.sh
# And again with PLAYWRIGHT_DRIVER_URL=..
cd .. cd ..
- name: Test changedetection.io container starts+runs basically without error - name: Test changedetection.io container starts+runs basically without error
@@ -99,4 +98,4 @@ jobs:
#export WEBDRIVER_URL=http://localhost:4444/wd/hub #export WEBDRIVER_URL=http://localhost:4444/wd/hub
#pytest tests/fetchers/test_content.py #pytest tests/fetchers/test_content.py
#pytest tests/test_errorhandling.py #pytest tests/test_errorhandling.py

View File

@@ -11,12 +11,12 @@ jobs:
test-pip-build-basics: test-pip-build-basics:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v2
- name: Set up Python 3.11 - name: Set up Python 3.9
uses: actions/setup-python@v4 uses: actions/setup-python@v2
with: with:
python-version: 3.11 python-version: 3.9
- name: Test that the basic pip built package runs without error - name: Test that the basic pip built package runs without error

View File

@@ -1,5 +1,5 @@
# pip dependencies install stage # pip dependencies install stage
FROM python:3.11-slim-bookworm as builder FROM python:3.10-slim-bullseye as builder
# See `cryptography` pin comment in requirements.txt # See `cryptography` pin comment in requirements.txt
ARG CRYPTOGRAPHY_DONT_BUILD_RUST=1 ARG CRYPTOGRAPHY_DONT_BUILD_RUST=1
@@ -25,13 +25,14 @@ RUN pip install --target=/dependencies -r /requirements.txt
# Playwright is an alternative to Selenium # Playwright is an alternative to Selenium
# Excluded this package from requirements.txt to prevent arm/v6 and arm/v7 builds from failing # Excluded this package from requirements.txt to prevent arm/v6 and arm/v7 builds from failing
# https://github.com/dgtlmoon/changedetection.io/pull/1067 also musl/alpine (not supported) # https://github.com/dgtlmoon/changedetection.io/pull/1067 also musl/alpine (not supported)
RUN pip install --target=/dependencies playwright~=1.39 \ RUN pip install --target=/dependencies playwright~=1.27.1 \
|| echo "WARN: Failed to install Playwright. The application can still run, but the Playwright option will be disabled." || echo "WARN: Failed to install Playwright. The application can still run, but the Playwright option will be disabled."
# Final image stage # Final image stage
FROM python:3.11-slim-bookworm FROM python:3.10-slim-bullseye
RUN apt-get update && apt-get install -y --no-install-recommends \ RUN apt-get update && apt-get install -y --no-install-recommends \
libssl1.1 \
libxslt1.1 \ libxslt1.1 \
# For pdftohtml # For pdftohtml
poppler-utils \ poppler-utils \

View File

@@ -16,4 +16,3 @@ global-exclude venv
global-exclude test-datastore global-exclude test-datastore
global-exclude changedetection.io*dist-info global-exclude changedetection.io*dist-info
global-exclude changedetectionio/tests/proxy_socks5/test-datastore

View File

@@ -2,44 +2,19 @@
Live your data-life pro-actively, track website content changes and receive notifications via Discord, Email, Slack, Telegram and 70+ more Live your data-life pro-actively, track website content changes and receive notifications via Discord, Email, Slack, Telegram and 70+ more
[<img src="https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/docs/screenshot.png" style="max-width:100%;" alt="Self-hosted web page change monitoring, list of websites with changes" title="Self-hosted web page change monitoring, list of websites with changes" />](https://changedetection.io) [<img src="https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/docs/screenshot.png" style="max-width:100%;" alt="Self-hosted web page change monitoring" title="Self-hosted web page change monitoring" />](https://changedetection.io)
[**Don't have time? Let us host it for you! try our extremely affordable subscription use our proxies and support!**](https://changedetection.io) [**Don't have time? Let us host it for you! try our extremely affordable subscription use our proxies and support!**](https://changedetection.io)
### Target specific parts of the webpage using the Visual Selector tool. #### Example use cases
Available when connected to a <a href="https://github.com/dgtlmoon/changedetection.io/wiki/Playwright-content-fetcher">playwright content fetcher</a> (included as part of our subscription service)
[<img src="https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/docs/visualselector-anim.gif" style="max-width:100%;" alt="Select parts and elements of a web page to monitor for changes" title="Select parts and elements of a web page to monitor for changes" />](https://changedetection.io?src=pip)
### Easily see what changed, examine by word, line, or individual character.
[<img src="https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/docs/screenshot-diff.png" style="max-width:100%;" alt="Self-hosted web page change monitoring context difference " title="Self-hosted web page change monitoring context difference " />](https://changedetection.io?src=pip)
### Perform interactive browser steps
Fill in text boxes, click buttons and more, setup your changedetection scenario.
Using the **Browser Steps** configuration, add basic steps before performing change detection, such as logging into websites, adding a product to a cart, accept cookie logins, entering dates and refining searches.
[<img src="docs/browsersteps-anim.gif" style="max-width:100%;" alt="Website change detection with interactive browser steps, detect changes behind login and password, search queries and more" title="Website change detection with interactive browser steps, detect changes behind login and password, search queries and more" />](https://changedetection.io?src=pip)
After **Browser Steps** have been run, then visit the **Visual Selector** tab to refine the content you're interested in.
Requires Playwright to be enabled.
### Example use cases
- Products and services have a change in pricing - Products and services have a change in pricing
- _Out of stock notification_ and _Back In stock notification_ - _Out of stock notification_ and _Back In stock notification_
- Monitor and track PDF file changes, know when a PDF file has text changes.
- Governmental department updates (changes are often only on their websites) - Governmental department updates (changes are often only on their websites)
- New software releases, security advisories when you're not on their mailing list. - New software releases, security advisories when you're not on their mailing list.
- Festivals with changes - Festivals with changes
- Discogs restock alerts and monitoring
- Realestate listing changes - Realestate listing changes
- Know when your favourite whiskey is on sale, or other special deals are announced before anyone else - Know when your favourite whiskey is on sale, or other special deals are announced before anyone else
- COVID related news from government websites - COVID related news from government websites
@@ -52,34 +27,18 @@ Requires Playwright to be enabled.
- Create RSS feeds based on changes in web content - Create RSS feeds based on changes in web content
- Monitor HTML source code for unexpected changes, strengthen your PCI compliance - Monitor HTML source code for unexpected changes, strengthen your PCI compliance
- You have a very sensitive list of URLs to watch and you do _not_ want to use the paid alternatives. (Remember, _you_ are the product) - You have a very sensitive list of URLs to watch and you do _not_ want to use the paid alternatives. (Remember, _you_ are the product)
- Get notified when certain keywords appear in Twitter search results
- Proactively search for jobs, get notified when companies update their careers page, search job portals for keywords.
- Get alerts when new job positions are open on Bamboo HR and other job platforms
- Website defacement monitoring
- Pokémon Card Restock Tracker / Pokémon TCG Tracker
- RegTech - stay ahead of regulatory changes, regulatory compliance
_Need an actual Chrome runner with Javascript support? We support fetching via WebDriver and Playwright!</a>_ _Need an actual Chrome runner with Javascript support? We support fetching via WebDriver and Playwright!</a>_
#### Key Features #### Key Features
- Lots of trigger filters, such as "Trigger on text", "Remove text by selector", "Ignore text", "Extract text", also using regular-expressions! - Lots of trigger filters, such as "Trigger on text", "Remove text by selector", "Ignore text", "Extract text", also using regular-expressions!
- Target elements with xPath(1.0) and CSS Selectors, Easily monitor complex JSON with JSONPath or jq - Target elements with xPath and CSS Selectors, Easily monitor complex JSON with JSONPath or jq
- Switch between fast non-JS and Chrome JS based "fetchers" - Switch between fast non-JS and Chrome JS based "fetchers"
- Track changes in PDF files (Monitor text changed in the PDF, Also monitor PDF filesize and checksums)
- Easily specify how often a site should be checked - Easily specify how often a site should be checked
- Execute JS before extracting text (Good for logging in, see examples in the UI!) - Execute JS before extracting text (Good for logging in, see examples in the UI!)
- Override Request Headers, Specify `POST` or `GET` and other methods - Override Request Headers, Specify `POST` or `GET` and other methods
- Use the "Visual Selector" to help target specific elements - Use the "Visual Selector" to help target specific elements
- Configurable [proxy per watch](https://github.com/dgtlmoon/changedetection.io/wiki/Proxy-configuration)
- Send a screenshot with the notification when a change is detected in the web page
We [recommend and use Bright Data](https://brightdata.grsm.io/n0r16zf7eivq) global proxy services, Bright Data will match any first deposit up to $100 using our signup link.
[Oxylabs](https://oxylabs.go2cloud.org/SH2d) is also an excellent proxy provider and well worth using, they offer Residental, ISP, Rotating and many other proxy types to suit your project.
Please :star: star :star: this project and help it grow! https://github.com/dgtlmoon/changedetection.io/
```bash ```bash

View File

@@ -5,7 +5,7 @@
_Live your data-life pro-actively._ _Live your data-life pro-actively._
[<img src="https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/docs/screenshot.png" style="max-width:100%;" alt="Self-hosted web site page change monitoring" title="Self-hosted web site page change monitoring" />](https://changedetection.io?src=github) [<img src="https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/docs/screenshot.png" style="max-width:100%;" alt="Self-hosted web page change monitoring" title="Self-hosted web page change monitoring" />](https://changedetection.io?src=github)
[![Release Version][release-shield]][release-link] [![Docker Pulls][docker-pulls]][docker-link] [![License][license-shield]](LICENSE.md) [![Release Version][release-shield]][release-link] [![Docker Pulls][docker-pulls]][docker-link] [![License][license-shield]](LICENSE.md)
@@ -22,7 +22,7 @@ _Live your data-life pro-actively._
Available when connected to a <a href="https://github.com/dgtlmoon/changedetection.io/wiki/Playwright-content-fetcher">playwright content fetcher</a> (included as part of our subscription service) Available when connected to a <a href="https://github.com/dgtlmoon/changedetection.io/wiki/Playwright-content-fetcher">playwright content fetcher</a> (included as part of our subscription service)
[<img src="https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/docs/visualselector-anim.gif" style="max-width:100%;" alt="Select parts and elements of a web page to monitor for changes" title="Select parts and elements of a web page to monitor for changes" />](https://changedetection.io?src=github) [<img src="https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/docs/visualselector-anim.gif" style="max-width:100%;" alt="Self-hosted web page change monitoring context difference " title="Self-hosted web page change monitoring context difference " />](https://changedetection.io?src=github)
### Easily see what changed, examine by word, line, or individual character. ### Easily see what changed, examine by word, line, or individual character.
@@ -35,7 +35,7 @@ Fill in text boxes, click buttons and more, setup your changedetection scenario.
Using the **Browser Steps** configuration, add basic steps before performing change detection, such as logging into websites, adding a product to a cart, accept cookie logins, entering dates and refining searches. Using the **Browser Steps** configuration, add basic steps before performing change detection, such as logging into websites, adding a product to a cart, accept cookie logins, entering dates and refining searches.
[<img src="docs/browsersteps-anim.gif" style="max-width:100%;" alt="Website change detection with interactive browser steps, detect changes behind login and password, search queries and more" title="Website change detection with interactive browser steps, detect changes behind login and password, search queries and more" />](https://changedetection.io?src=github) [<img src="docs/browsersteps-anim.gif" style="max-width:100%;" alt="Self-hosted web page change monitoring context difference " title="Website change detection with interactive browser steps, login, cookies etc" />](https://changedetection.io?src=github)
After **Browser Steps** have been run, then visit the **Visual Selector** tab to refine the content you're interested in. After **Browser Steps** have been run, then visit the **Visual Selector** tab to refine the content you're interested in.
Requires Playwright to be enabled. Requires Playwright to be enabled.
@@ -226,19 +226,12 @@ The application also supports notifying you that it can follow this information
## Proxy Configuration ## Proxy Configuration
See the wiki https://github.com/dgtlmoon/changedetection.io/wiki/Proxy-configuration , we also support using [Bright Data proxy services where possible](https://github.com/dgtlmoon/changedetection.io/wiki/Proxy-configuration#brightdata-proxy-support) and [Oxylabs](https://oxylabs.go2cloud.org/SH2d) proxy services. See the wiki https://github.com/dgtlmoon/changedetection.io/wiki/Proxy-configuration , we also support using [Bright Data proxy services where possible]( https://github.com/dgtlmoon/changedetection.io/wiki/Proxy-configuration#brightdata-proxy-support)
## Raspberry Pi support? ## Raspberry Pi support?
Raspberry Pi and linux/arm/v6 linux/arm/v7 arm64 devices are supported! See the wiki for [details](https://github.com/dgtlmoon/changedetection.io/wiki/Fetching-pages-with-WebDriver) Raspberry Pi and linux/arm/v6 linux/arm/v7 arm64 devices are supported! See the wiki for [details](https://github.com/dgtlmoon/changedetection.io/wiki/Fetching-pages-with-WebDriver)
## Import support
Easily [import your list of websites to watch for changes in Excel .xslx file format](https://changedetection.io/tutorial/how-import-your-website-change-detection-lists-excel), or paste in lists of website URLs as plaintext.
Excel import is recommended - that way you can better organise tags/groups of websites and other features.
## API Support ## API Support
Supports managing the website watch list [via our API](https://changedetection.io/docs/api_v1/index.html) Supports managing the website watch list [via our API](https://changedetection.io/docs/api_v1/index.html)

View File

@@ -38,7 +38,7 @@ from flask_paginate import Pagination, get_page_parameter
from changedetectionio import html_tools from changedetectionio import html_tools
from changedetectionio.api import api_v1 from changedetectionio.api import api_v1
__version__ = '0.45.7.3' __version__ = '0.45.2'
from changedetectionio.store import BASE_URL_NOT_SET_TEXT from changedetectionio.store import BASE_URL_NOT_SET_TEXT
@@ -105,10 +105,6 @@ def get_darkmode_state():
css_dark_mode = request.cookies.get('css_dark_mode', 'false') css_dark_mode = request.cookies.get('css_dark_mode', 'false')
return 'true' if css_dark_mode and strtobool(css_dark_mode) else 'false' return 'true' if css_dark_mode and strtobool(css_dark_mode) else 'false'
@app.template_global()
def get_css_version():
return __version__
# We use the whole watch object from the store/JSON so we can see if there's some related status in terms of a thread # We use the whole watch object from the store/JSON so we can see if there's some related status in terms of a thread
# running or something similar. # running or something similar.
@app.template_filter('format_last_checked_time') @app.template_filter('format_last_checked_time')
@@ -190,6 +186,7 @@ class User(flask_login.UserMixin):
pass pass
def login_optionally_required(func): def login_optionally_required(func):
@wraps(func) @wraps(func)
def decorated_view(*args, **kwargs): def decorated_view(*args, **kwargs):
@@ -202,6 +199,7 @@ def login_optionally_required(func):
# Permitted # Permitted
elif request.endpoint == 'diff_history_page' and datastore.data['settings']['application'].get('shared_diff_access'): elif request.endpoint == 'diff_history_page' and datastore.data['settings']['application'].get('shared_diff_access'):
return func(*args, **kwargs) return func(*args, **kwargs)
elif request.method in flask_login.config.EXEMPT_METHODS: elif request.method in flask_login.config.EXEMPT_METHODS:
return func(*args, **kwargs) return func(*args, **kwargs)
elif app.config.get('LOGIN_DISABLED'): elif app.config.get('LOGIN_DISABLED'):
@@ -420,18 +418,11 @@ def changedetection_app(config=None, datastore_o=None):
# Sort by last_changed and add the uuid which is usually the key.. # Sort by last_changed and add the uuid which is usually the key..
sorted_watches = [] sorted_watches = []
with_errors = request.args.get('with_errors') == "1"
errored_count = 0
search_q = request.args.get('q').strip().lower() if request.args.get('q') else False search_q = request.args.get('q').strip().lower() if request.args.get('q') else False
for uuid, watch in datastore.data['watching'].items(): for uuid, watch in datastore.data['watching'].items():
if with_errors and not watch.get('last_error'):
continue
if limit_tag and not limit_tag in watch['tags']: if limit_tag and not limit_tag in watch['tags']:
continue continue
if watch.get('last_error'):
errored_count += 1
if search_q: if search_q:
if (watch.get('title') and search_q in watch.get('title').lower()) or search_q in watch.get('url', '').lower(): if (watch.get('title') and search_q in watch.get('title').lower()) or search_q in watch.get('url', '').lower():
sorted_watches.append(watch) sorted_watches.append(watch)
@@ -453,7 +444,6 @@ def changedetection_app(config=None, datastore_o=None):
active_tag=limit_tag, active_tag=limit_tag,
app_rss_token=datastore.data['settings']['application']['rss_access_token'], app_rss_token=datastore.data['settings']['application']['rss_access_token'],
datastore=datastore, datastore=datastore,
errored_count=errored_count,
form=form, form=form,
guid=datastore.data['app_guid'], guid=datastore.data['app_guid'],
has_proxies=datastore.proxy_list, has_proxies=datastore.proxy_list,
@@ -634,6 +624,7 @@ def changedetection_app(config=None, datastore_o=None):
if request.args.get('unpause_on_save'): if request.args.get('unpause_on_save'):
extra_update_obj['paused'] = False extra_update_obj['paused'] = False
# Re #110, if they submit the same as the default value, set it to None, so we continue to follow the default # Re #110, if they submit the same as the default value, set it to None, so we continue to follow the default
# Assume we use the default value, unless something relevant is different, then use the form value # Assume we use the default value, unless something relevant is different, then use the form value
# values could be None, 0 etc. # values could be None, 0 etc.
@@ -719,11 +710,11 @@ def changedetection_app(config=None, datastore_o=None):
# Only works reliably with Playwright # Only works reliably with Playwright
visualselector_enabled = os.getenv('PLAYWRIGHT_DRIVER_URL', False) and is_html_webdriver visualselector_enabled = os.getenv('PLAYWRIGHT_DRIVER_URL', False) and is_html_webdriver
output = render_template("edit.html", output = render_template("edit.html",
available_processors=processors.available_processors(), available_processors=processors.available_processors(),
browser_steps_config=browser_step_ui_config, browser_steps_config=browser_step_ui_config,
emailprefix=os.getenv('NOTIFICATION_MAIL_BUTTON_PREFIX', False), emailprefix=os.getenv('NOTIFICATION_MAIL_BUTTON_PREFIX', False),
extra_title=f" - Edit - {watch.label}",
form=form, form=form,
has_default_notification_urls=True if len(datastore.data['settings']['application']['notification_urls']) else False, has_default_notification_urls=True if len(datastore.data['settings']['application']['notification_urls']) else False,
has_empty_checktime=using_default_check_time, has_empty_checktime=using_default_check_time,
@@ -819,16 +810,6 @@ def changedetection_app(config=None, datastore_o=None):
return output return output
@app.route("/settings/reset-api-key", methods=['GET'])
@login_optionally_required
def settings_reset_api_key():
import secrets
secret = secrets.token_hex(16)
datastore.data['settings']['application']['api_access_token'] = secret
datastore.needs_write_urgent = True
flash("API Key was regenerated.")
return redirect(url_for('settings_page')+'#api')
@app.route("/import", methods=['GET', "POST"]) @app.route("/import", methods=['GET', "POST"])
@login_optionally_required @login_optionally_required
def import_page(): def import_page():
@@ -836,7 +817,6 @@ def changedetection_app(config=None, datastore_o=None):
from . import forms from . import forms
if request.method == 'POST': if request.method == 'POST':
from .importer import import_url_list, import_distill_io_json from .importer import import_url_list, import_distill_io_json
# URL List import # URL List import
@@ -860,32 +840,11 @@ def changedetection_app(config=None, datastore_o=None):
for uuid in d_importer.new_uuids: for uuid in d_importer.new_uuids:
update_q.put(queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid, 'skip_when_checksum_same': True})) update_q.put(queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid, 'skip_when_checksum_same': True}))
# XLSX importer
if request.files and request.files.get('xlsx_file'):
file = request.files['xlsx_file']
from .importer import import_xlsx_wachete, import_xlsx_custom
if request.values.get('file_mapping') == 'wachete':
w_importer = import_xlsx_wachete()
w_importer.run(data=file, flash=flash, datastore=datastore)
else:
w_importer = import_xlsx_custom()
# Building mapping of col # to col # type
map = {}
for i in range(10):
c = request.values.get(f"custom_xlsx[col_{i}]")
v = request.values.get(f"custom_xlsx[col_type_{i}]")
if c and v:
map[int(c)] = v
w_importer.import_profile = map
w_importer.run(data=file, flash=flash, datastore=datastore)
for uuid in w_importer.new_uuids:
update_q.put(queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid, 'skip_when_checksum_same': True}))
form = forms.importForm(formdata=request.form if request.method == 'POST' else None,
# data=default,
)
# Could be some remaining, or we could be on GET # Could be some remaining, or we could be on GET
form = forms.importForm(formdata=request.form if request.method == 'POST' else None)
output = render_template("import.html", output = render_template("import.html",
form=form, form=form,
import_url_list_remaining="\n".join(remaining_urls), import_url_list_remaining="\n".join(remaining_urls),
@@ -899,10 +858,7 @@ def changedetection_app(config=None, datastore_o=None):
def mark_all_viewed(): def mark_all_viewed():
# Save the current newest history as the most recently viewed # Save the current newest history as the most recently viewed
with_errors = request.args.get('with_errors') == "1"
for watch_uuid, watch in datastore.data['watching'].items(): for watch_uuid, watch in datastore.data['watching'].items():
if with_errors and not watch.get('last_error'):
continue
datastore.set_last_viewed(watch_uuid, int(time.time())) datastore.set_last_viewed(watch_uuid, int(time.time()))
return redirect(url_for('index')) return redirect(url_for('index'))
@@ -958,29 +914,21 @@ def changedetection_app(config=None, datastore_o=None):
# Read as binary and force decode as UTF-8 # Read as binary and force decode as UTF-8
# Windows may fail decode in python if we just use 'r' mode (chardet decode exception) # Windows may fail decode in python if we just use 'r' mode (chardet decode exception)
from_version = request.args.get('from_version') try:
from_version_index = -2 # second newest newest_version_file_contents = watch.get_history_snapshot(dates[-1])
if from_version and from_version in dates: except Exception as e:
from_version_index = dates.index(from_version) newest_version_file_contents = "Unable to read {}.\n".format(dates[-1])
else:
from_version = dates[from_version_index] previous_version = request.args.get('previous_version')
previous_timestamp = dates[-2]
if previous_version:
previous_timestamp = previous_version
try: try:
from_version_file_contents = watch.get_history_snapshot(dates[from_version_index]) previous_version_file_contents = watch.get_history_snapshot(previous_timestamp)
except Exception as e: except Exception as e:
from_version_file_contents = "Unable to read to-version at index{}.\n".format(dates[from_version_index]) previous_version_file_contents = "Unable to read {}.\n".format(previous_timestamp)
to_version = request.args.get('to_version')
to_version_index = -1
if to_version and to_version in dates:
to_version_index = dates.index(to_version)
else:
to_version = dates[to_version_index]
try:
to_version_file_contents = watch.get_history_snapshot(dates[to_version_index])
except Exception as e:
to_version_file_contents = "Unable to read to-version at index{}.\n".format(dates[to_version_index])
screenshot_url = watch.get_screenshot() screenshot_url = watch.get_screenshot()
@@ -996,24 +944,22 @@ def changedetection_app(config=None, datastore_o=None):
output = render_template("diff.html", output = render_template("diff.html",
current_diff_url=watch['url'], current_diff_url=watch['url'],
from_version=str(from_version), current_previous_version=str(previous_version),
to_version=str(to_version),
extra_stylesheets=extra_stylesheets, extra_stylesheets=extra_stylesheets,
extra_title=f" - Diff - {watch.label}", extra_title=" - Diff - {}".format(watch['title'] if watch['title'] else watch['url']),
extract_form=extract_form, extract_form=extract_form,
is_html_webdriver=is_html_webdriver, is_html_webdriver=is_html_webdriver,
last_error=watch['last_error'], last_error=watch['last_error'],
last_error_screenshot=watch.get_error_snapshot(), last_error_screenshot=watch.get_error_snapshot(),
last_error_text=watch.get_error_text(), last_error_text=watch.get_error_text(),
left_sticky=True, left_sticky=True,
newest=to_version_file_contents, newest=newest_version_file_contents,
newest_version_timestamp=dates[-1], newest_version_timestamp=dates[-1],
password_enabled_and_share_is_off=password_enabled_and_share_is_off, password_enabled_and_share_is_off=password_enabled_and_share_is_off,
from_version_file_contents=from_version_file_contents, previous=previous_version_file_contents,
to_version_file_contents=to_version_file_contents,
screenshot=screenshot_url, screenshot=screenshot_url,
uuid=uuid, uuid=uuid,
versions=dates, # All except current/last versions=dates[:-1], # All except current/last
watch_a=watch watch_a=watch
) )
@@ -1222,7 +1168,8 @@ def changedetection_app(config=None, datastore_o=None):
# These files should be in our subdirectory # These files should be in our subdirectory
try: try:
# set nocache, set content-type # set nocache, set content-type
response = make_response(send_from_directory(os.path.join(datastore_o.datastore_path, filename), "elements.json")) watch_dir = datastore_o.datastore_path + "/" + filename
response = make_response(send_from_directory(filename="elements.json", directory=watch_dir, path=watch_dir + "/elements.json"))
response.headers['Content-type'] = 'application/json' response.headers['Content-type'] = 'application/json'
response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate' response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'
response.headers['Pragma'] = 'no-cache' response.headers['Pragma'] = 'no-cache'
@@ -1310,8 +1257,6 @@ def changedetection_app(config=None, datastore_o=None):
# Forced recheck will skip the 'skip if content is the same' rule (, 'reprocess_existing_data': True}))) # Forced recheck will skip the 'skip if content is the same' rule (, 'reprocess_existing_data': True})))
tag = request.args.get('tag') tag = request.args.get('tag')
uuid = request.args.get('uuid') uuid = request.args.get('uuid')
with_errors = request.args.get('with_errors') == "1"
i = 0 i = 0
running_uuids = [] running_uuids = []
@@ -1327,8 +1272,6 @@ def changedetection_app(config=None, datastore_o=None):
# Items that have this current tag # Items that have this current tag
for watch_uuid, watch in datastore.data['watching'].items(): for watch_uuid, watch in datastore.data['watching'].items():
if tag in watch.get('tags', {}): if tag in watch.get('tags', {}):
if with_errors and not watch.get('last_error'):
continue
if watch_uuid not in running_uuids and not datastore.data['watching'][watch_uuid]['paused']: if watch_uuid not in running_uuids and not datastore.data['watching'][watch_uuid]['paused']:
update_q.put( update_q.put(
queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': watch_uuid, 'skip_when_checksum_same': False}) queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': watch_uuid, 'skip_when_checksum_same': False})
@@ -1339,11 +1282,8 @@ def changedetection_app(config=None, datastore_o=None):
# No tag, no uuid, add everything. # No tag, no uuid, add everything.
for watch_uuid, watch in datastore.data['watching'].items(): for watch_uuid, watch in datastore.data['watching'].items():
if watch_uuid not in running_uuids and not datastore.data['watching'][watch_uuid]['paused']: if watch_uuid not in running_uuids and not datastore.data['watching'][watch_uuid]['paused']:
if with_errors and not watch.get('last_error'):
continue
update_q.put(queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': watch_uuid, 'skip_when_checksum_same': False})) update_q.put(queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': watch_uuid, 'skip_when_checksum_same': False}))
i += 1 i += 1
flash("{} watches queued for rechecking.".format(i)) flash("{} watches queued for rechecking.".format(i))
return redirect(url_for('index', tag=tag)) return redirect(url_for('index', tag=tag))
@@ -1491,7 +1431,6 @@ def changedetection_app(config=None, datastore_o=None):
return redirect(url_for('index')) return redirect(url_for('index'))
@app.route("/highlight_submit_ignore_url", methods=['POST']) @app.route("/highlight_submit_ignore_url", methods=['POST'])
@login_optionally_required
def highlight_submit_ignore_url(): def highlight_submit_ignore_url():
import re import re
mode = request.form.get('mode') mode = request.form.get('mode')

View File

@@ -23,10 +23,8 @@
from distutils.util import strtobool from distutils.util import strtobool
from flask import Blueprint, request, make_response from flask import Blueprint, request, make_response
import logging
import os import os
import re import logging
from changedetectionio.store import ChangeDetectionStore from changedetectionio.store import ChangeDetectionStore
from changedetectionio import login_optionally_required from changedetectionio import login_optionally_required
@@ -46,7 +44,7 @@ def construct_blueprint(datastore: ChangeDetectionStore):
# We keep the playwright session open for many minutes # We keep the playwright session open for many minutes
keepalive_seconds = int(os.getenv('BROWSERSTEPS_MINUTES_KEEPALIVE', 10)) * 60 seconds_keepalive = int(os.getenv('BROWSERSTEPS_MINUTES_KEEPALIVE', 10)) * 60
browsersteps_start_session = {'start_time': time.time()} browsersteps_start_session = {'start_time': time.time()}
@@ -58,18 +56,16 @@ def construct_blueprint(datastore: ChangeDetectionStore):
# Start the Playwright context, which is actually a nodejs sub-process and communicates over STDIN/STDOUT pipes # Start the Playwright context, which is actually a nodejs sub-process and communicates over STDIN/STDOUT pipes
io_interface_context = io_interface_context.start() io_interface_context = io_interface_context.start()
keepalive_ms = ((keepalive_seconds + 3) * 1000)
base_url = os.getenv('PLAYWRIGHT_DRIVER_URL', '')
a = "?" if not '?' in base_url else '&'
base_url += a + f"timeout={keepalive_ms}"
# keep it alive for 10 seconds more than we advertise, sometimes it helps to keep it shutting down cleanly
keepalive = "&timeout={}".format(((seconds_keepalive + 3) * 1000))
try: try:
browsersteps_start_session['browser'] = io_interface_context.chromium.connect_over_cdp(base_url) browsersteps_start_session['browser'] = io_interface_context.chromium.connect_over_cdp(
os.getenv('PLAYWRIGHT_DRIVER_URL', '') + keepalive)
except Exception as e: except Exception as e:
if 'ECONNREFUSED' in str(e): if 'ECONNREFUSED' in str(e):
return make_response('Unable to start the Playwright Browser session, is it running?', 401) return make_response('Unable to start the Playwright Browser session, is it running?', 401)
else: else:
# Other errors, bad URL syntax, bad reply etc
return make_response(str(e), 401) return make_response(str(e), 401)
proxy_id = datastore.get_preferred_proxy_for_watch(uuid=watch_uuid) proxy_id = datastore.get_preferred_proxy_for_watch(uuid=watch_uuid)
@@ -122,31 +118,6 @@ def construct_blueprint(datastore: ChangeDetectionStore):
print("Starting connection with playwright - done") print("Starting connection with playwright - done")
return {'browsersteps_session_id': browsersteps_session_id} return {'browsersteps_session_id': browsersteps_session_id}
@login_optionally_required
@browser_steps_blueprint.route("/browsersteps_image", methods=['GET'])
def browser_steps_fetch_screenshot_image():
from flask import (
make_response,
request,
send_from_directory,
)
uuid = request.args.get('uuid')
step_n = int(request.args.get('step_n'))
watch = datastore.data['watching'].get(uuid)
filename = f"step_before-{step_n}.jpeg" if request.args.get('type', '') == 'before' else f"step_{step_n}.jpeg"
if step_n and watch and os.path.isfile(os.path.join(watch.watch_data_dir, filename)):
response = make_response(send_from_directory(directory=watch.watch_data_dir, path=filename))
response.headers['Content-type'] = 'image/jpeg'
response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'
response.headers['Pragma'] = 'no-cache'
response.headers['Expires'] = 0
return response
else:
return make_response('Unable to fetch image, is the URL correct? does the watch exist? does the step_type-n.jpeg exist?', 401)
# A request for an action was received # A request for an action was received
@login_optionally_required @login_optionally_required
@browser_steps_blueprint.route("/browsersteps_update", methods=['POST']) @browser_steps_blueprint.route("/browsersteps_update", methods=['POST'])

View File

@@ -77,13 +77,13 @@ class steppable_browser_interface():
def action_goto_url(self, selector=None, value=None): def action_goto_url(self, selector=None, value=None):
# self.page.set_viewport_size({"width": 1280, "height": 5000}) # self.page.set_viewport_size({"width": 1280, "height": 5000})
now = time.time() now = time.time()
response = self.page.goto(value, timeout=0, wait_until='load') response = self.page.goto(value, timeout=0, wait_until='commit')
# Should be the same as the puppeteer_fetch.js methods, means, load with no timeout set (skip timeout)
#and also wait for seconds ? # Wait_until = commit
#await page.waitForTimeout(1000); # - `'commit'` - consider operation to be finished when network response is received and the document started loading.
#await page.waitForTimeout(extra_wait_ms); # Better to not use any smarts from Playwright and just wait an arbitrary number of seconds
# This seemed to solve nearly all 'TimeoutErrors'
print("Time to goto URL ", time.time() - now) print("Time to goto URL ", time.time() - now)
return response
def action_click_element_containing_text(self, selector=None, value=''): def action_click_element_containing_text(self, selector=None, value=''):
if not len(value.strip()): if not len(value.strip()):
@@ -99,8 +99,7 @@ class steppable_browser_interface():
self.page.fill(selector, value, timeout=10 * 1000) self.page.fill(selector, value, timeout=10 * 1000)
def action_execute_js(self, selector, value): def action_execute_js(self, selector, value):
response = self.page.evaluate(value) self.page.evaluate(value)
return response
def action_click_element(self, selector, value): def action_click_element(self, selector, value):
print("Clicking element") print("Clicking element")
@@ -139,13 +138,13 @@ class steppable_browser_interface():
def action_wait_for_text(self, selector, value): def action_wait_for_text(self, selector, value):
import json import json
v = json.dumps(value) v = json.dumps(value)
self.page.wait_for_function(f'document.querySelector("body").innerText.includes({v});', timeout=30000) self.page.wait_for_function(f'document.querySelector("body").innerText.includes({v});', timeout=90000)
def action_wait_for_text_in_element(self, selector, value): def action_wait_for_text_in_element(self, selector, value):
import json import json
s = json.dumps(selector) s = json.dumps(selector)
v = json.dumps(value) v = json.dumps(value)
self.page.wait_for_function(f'document.querySelector({s}).innerText.includes({v});', timeout=30000) self.page.wait_for_function(f'document.querySelector({s}).innerText.includes({v});', timeout=90000)
# @todo - in the future make some popout interface to capture what needs to be set # @todo - in the future make some popout interface to capture what needs to be set
# https://playwright.dev/python/docs/api/class-keyboard # https://playwright.dev/python/docs/api/class-keyboard

View File

@@ -40,8 +40,8 @@ def construct_blueprint(datastore: ChangeDetectionStore):
contents = '' contents = ''
now = time.time() now = time.time()
try: try:
update_handler = text_json_diff.perform_site_check(datastore=datastore, watch_uuid=uuid) update_handler = text_json_diff.perform_site_check(datastore=datastore)
update_handler.call_browser() changed_detected, update_obj, contents = update_handler.run(uuid, preferred_proxy=preferred_proxy, skip_when_checksum_same=False)
# title, size is len contents not len xfer # title, size is len contents not len xfer
except content_fetcher.Non200ErrorCodeReceived as e: except content_fetcher.Non200ErrorCodeReceived as e:
if e.status_code == 404: if e.status_code == 404:
@@ -57,11 +57,9 @@ def construct_blueprint(datastore: ChangeDetectionStore):
status.update({'status': 'ERROR OTHER', 'length': len(contents), 'text': f"Got empty reply with code {e.status_code} - Access denied"}) status.update({'status': 'ERROR OTHER', 'length': len(contents), 'text': f"Got empty reply with code {e.status_code} - Access denied"})
else: else:
status.update({'status': 'ERROR OTHER', 'length': len(contents) if contents else 0, 'text': f"Empty reply with code {e.status_code}, needs chrome?"}) status.update({'status': 'ERROR OTHER', 'length': len(contents) if contents else 0, 'text': f"Empty reply with code {e.status_code}, needs chrome?"})
except content_fetcher.ReplyWithContentButNoText as e:
txt = f"Got reply but with no content - Status code {e.status_code} - It's possible that the filters were found, but contained no usable text (or contained only an image)."
status.update({'status': 'ERROR', 'text': txt})
except Exception as e: except Exception as e:
status.update({'status': 'ERROR OTHER', 'length': len(contents) if contents else 0, 'text': 'Error: '+type(e).__name__+str(e)}) status.update({'status': 'ERROR OTHER', 'length': len(contents) if contents else 0, 'text': 'Error: '+str(e)})
else: else:
status.update({'status': 'OK', 'length': len(contents), 'text': ''}) status.update({'status': 'OK', 'length': len(contents), 'text': ''})

View File

@@ -1,15 +1,12 @@
from abc import abstractmethod
from distutils.util import strtobool
from urllib.parse import urlparse
import chardet
import hashlib import hashlib
from abc import abstractmethod
import chardet
import json import json
import logging import logging
import os import os
import requests import requests
import sys import sys
import time import time
import urllib.parse
visualselector_xpath_selectors = 'div,span,form,table,tbody,tr,td,a,p,ul,li,h1,h2,h3,h4, header, footer, section, article, aside, details, main, nav, section, summary' visualselector_xpath_selectors = 'div,span,form,table,tbody,tr,td,a,p,ul,li,h1,h2,h3,h4, header, footer, section, article, aside, details, main, nav, section, summary'
@@ -80,13 +77,11 @@ class ScreenshotUnavailable(Exception):
class ReplyWithContentButNoText(Exception): class ReplyWithContentButNoText(Exception):
def __init__(self, status_code, url, screenshot=None, has_filters=False, html_content=''): def __init__(self, status_code, url, screenshot=None):
# Set this so we can use it in other parts of the app # Set this so we can use it in other parts of the app
self.status_code = status_code self.status_code = status_code
self.url = url self.url = url
self.screenshot = screenshot self.screenshot = screenshot
self.has_filters = has_filters
self.html_content = html_content
return return
@@ -159,16 +154,6 @@ class Fetcher():
""" """
return {k.lower(): v for k, v in self.headers.items()} return {k.lower(): v for k, v in self.headers.items()}
def browser_steps_get_valid_steps(self):
if self.browser_steps is not None and len(self.browser_steps):
valid_steps = filter(
lambda s: (s['operation'] and len(s['operation']) and s['operation'] != 'Choose one' and s['operation'] != 'Goto site'),
self.browser_steps)
return valid_steps
return None
def iterate_browser_steps(self): def iterate_browser_steps(self):
from changedetectionio.blueprint.browser_steps.browser_steps import steppable_browser_interface from changedetectionio.blueprint.browser_steps.browser_steps import steppable_browser_interface
from playwright._impl._api_types import TimeoutError from playwright._impl._api_types import TimeoutError
@@ -180,7 +165,10 @@ class Fetcher():
if self.browser_steps is not None and len(self.browser_steps): if self.browser_steps is not None and len(self.browser_steps):
interface = steppable_browser_interface() interface = steppable_browser_interface()
interface.page = self.page interface.page = self.page
valid_steps = self.browser_steps_get_valid_steps()
valid_steps = filter(
lambda s: (s['operation'] and len(s['operation']) and s['operation'] != 'Choose one' and s['operation'] != 'Goto site'),
self.browser_steps)
for step in valid_steps: for step in valid_steps:
step_n += 1 step_n += 1
@@ -276,6 +264,7 @@ class base_html_playwright(Fetcher):
if self.proxy: if self.proxy:
# Playwright needs separate username and password values # Playwright needs separate username and password values
from urllib.parse import urlparse
parsed = urlparse(self.proxy.get('server')) parsed = urlparse(self.proxy.get('server'))
if parsed.username: if parsed.username:
self.proxy['username'] = parsed.username self.proxy['username'] = parsed.username
@@ -330,11 +319,13 @@ class base_html_playwright(Fetcher):
# Append proxy connect string # Append proxy connect string
if self.proxy: if self.proxy:
import urllib.parse
# Remove username/password if it exists in the URL or you will receive "ERR_NO_SUPPORTED_PROXIES" error # Remove username/password if it exists in the URL or you will receive "ERR_NO_SUPPORTED_PROXIES" error
# Actual authentication handled by Puppeteer/node # Actual authentication handled by Puppeteer/node
o = urlparse(self.proxy.get('server')) o = urlparse(self.proxy.get('server'))
proxy_url = urllib.parse.quote(o._replace(netloc="{}:{}".format(o.hostname, o.port)).geturl()) proxy_url = urllib.parse.quote(o._replace(netloc="{}:{}".format(o.hostname, o.port)).geturl())
browserless_function_url = f"{browserless_function_url}&--proxy-server={proxy_url}" browserless_function_url = f"{browserless_function_url}&--proxy-server={proxy_url}&dumpio=true"
try: try:
amp = '&' if '?' in browserless_function_url else '?' amp = '&' if '?' in browserless_function_url else '?'
@@ -352,9 +343,9 @@ class base_html_playwright(Fetcher):
'req_headers': request_headers, 'req_headers': request_headers,
'screenshot_quality': int(os.getenv("PLAYWRIGHT_SCREENSHOT_QUALITY", 72)), 'screenshot_quality': int(os.getenv("PLAYWRIGHT_SCREENSHOT_QUALITY", 72)),
'url': url, 'url': url,
'user_agent': {k.lower(): v for k, v in request_headers.items()}.get('user-agent', None), 'user_agent': request_headers.get('User-Agent', 'Mozilla/5.0'),
'proxy_username': self.proxy.get('username', '') if self.proxy else False, 'proxy_username': self.proxy.get('username','') if self.proxy else False,
'proxy_password': self.proxy.get('password', '') if self.proxy and self.proxy.get('username') else False, 'proxy_password': self.proxy.get('password', '') if self.proxy else False,
'no_cache_list': [ 'no_cache_list': [
'twitter', 'twitter',
'.pdf' '.pdf'
@@ -419,8 +410,12 @@ class base_html_playwright(Fetcher):
is_binary=False): is_binary=False):
# For now, USE_EXPERIMENTAL_PUPPETEER_FETCH is not supported by watches with BrowserSteps (for now!) # For now, USE_EXPERIMENTAL_PUPPETEER_FETCH is not supported by watches with BrowserSteps (for now!)
if not self.browser_steps and os.getenv('USE_EXPERIMENTAL_PUPPETEER_FETCH'): has_browser_steps = self.browser_steps and list(filter(
if strtobool(os.getenv('USE_EXPERIMENTAL_PUPPETEER_FETCH')): lambda s: (s['operation'] and len(s['operation']) and s['operation'] != 'Choose one' and s['operation'] != 'Goto site'),
self.browser_steps))
if not has_browser_steps:
if os.getenv('USE_EXPERIMENTAL_PUPPETEER_FETCH'):
# Temporary backup solution until we rewrite the playwright code # Temporary backup solution until we rewrite the playwright code
return self.run_fetch_browserless_puppeteer( return self.run_fetch_browserless_puppeteer(
url, url,
@@ -437,7 +432,6 @@ class base_html_playwright(Fetcher):
self.delete_browser_steps_screenshots() self.delete_browser_steps_screenshots()
response = None response = None
with sync_playwright() as p: with sync_playwright() as p:
browser_type = getattr(p, self.browser_type) browser_type = getattr(p, self.browser_type)
@@ -446,13 +440,10 @@ class base_html_playwright(Fetcher):
# 60,000 connection timeout only # 60,000 connection timeout only
browser = browser_type.connect_over_cdp(self.command_executor, timeout=60000) browser = browser_type.connect_over_cdp(self.command_executor, timeout=60000)
# SOCKS5 with authentication is not supported (yet)
# https://github.com/microsoft/playwright/issues/10567
# Set user agent to prevent Cloudflare from blocking the browser # Set user agent to prevent Cloudflare from blocking the browser
# Use the default one configured in the App.py model that's passed from fetch_site_status.py # Use the default one configured in the App.py model that's passed from fetch_site_status.py
context = browser.new_context( context = browser.new_context(
user_agent={k.lower(): v for k, v in request_headers.items()}.get('user-agent', None), user_agent=request_headers.get('User-Agent', 'Mozilla/5.0'),
proxy=self.proxy, proxy=self.proxy,
# This is needed to enable JavaScript execution on GitHub and others # This is needed to enable JavaScript execution on GitHub and others
bypass_csp=True, bypass_csp=True,
@@ -466,26 +457,40 @@ class base_html_playwright(Fetcher):
if len(request_headers): if len(request_headers):
context.set_extra_http_headers(request_headers) context.set_extra_http_headers(request_headers)
# Listen for all console events and handle errors self.page.set_default_navigation_timeout(90000)
self.page.on("console", lambda msg: print(f"Playwright console: Watch URL: {url} {msg.type}: {msg.text} {msg.args}")) self.page.set_default_timeout(90000)
# Re-use as much code from browser steps as possible so its the same # Listen for all console events and handle errors
from changedetectionio.blueprint.browser_steps.browser_steps import steppable_browser_interface self.page.on("console", lambda msg: print(f"Playwright console: Watch URL: {url} {msg.type}: {msg.text} {msg.args}"))
browsersteps_interface = steppable_browser_interface()
browsersteps_interface.page = self.page
response = browsersteps_interface.action_goto_url(value=url) # Goto page
self.headers = response.all_headers() try:
# Wait_until = commit
# - `'commit'` - consider operation to be finished when network response is received and the document started loading.
# Better to not use any smarts from Playwright and just wait an arbitrary number of seconds
# This seemed to solve nearly all 'TimeoutErrors'
response = self.page.goto(url, wait_until='commit')
except playwright._impl._api_types.Error as e:
# Retry once - https://github.com/browserless/chrome/issues/2485
# Sometimes errors related to invalid cert's and other can be random
print("Content Fetcher > retrying request got error - ", str(e))
time.sleep(1)
response = self.page.goto(url, wait_until='commit')
if response is None: except Exception as e:
print("Content Fetcher > Other exception when page.goto", str(e))
context.close() context.close()
browser.close() browser.close()
print("Content Fetcher > Response object was none") raise PageUnloadable(url=url, status_code=None, message=str(e))
raise EmptyReply(url=url, status_code=None)
# Execute any browser steps
try: try:
extra_wait = int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)) + self.render_extract_delay
self.page.wait_for_timeout(extra_wait * 1000)
if self.webdriver_js_execute_code is not None and len(self.webdriver_js_execute_code): if self.webdriver_js_execute_code is not None and len(self.webdriver_js_execute_code):
browsersteps_interface.action_execute_js(value=self.webdriver_js_execute_code, selector=None) self.page.evaluate(self.webdriver_js_execute_code)
except playwright._impl._api_types.TimeoutError as e: except playwright._impl._api_types.TimeoutError as e:
context.close() context.close()
browser.close() browser.close()
@@ -497,26 +502,28 @@ class base_html_playwright(Fetcher):
browser.close() browser.close()
raise PageUnloadable(url=url, status_code=None, message=str(e)) raise PageUnloadable(url=url, status_code=None, message=str(e))
if response is None:
context.close()
browser.close()
print("Content Fetcher > Response object was none")
raise EmptyReply(url=url, status_code=None)
# Run Browser Steps here
self.iterate_browser_steps()
extra_wait = int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)) + self.render_extract_delay extra_wait = int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)) + self.render_extract_delay
self.page.wait_for_timeout(extra_wait * 1000) time.sleep(extra_wait)
self.content = self.page.content()
self.status_code = response.status self.status_code = response.status
if self.status_code != 200 and not ignore_status_codes:
raise Non200ErrorCodeReceived(url=url, status_code=self.status_code)
if len(self.page.content().strip()) == 0: if len(self.page.content().strip()) == 0:
context.close() context.close()
browser.close() browser.close()
print("Content Fetcher > Content was empty") print("Content Fetcher > Content was empty")
raise EmptyReply(url=url, status_code=response.status) raise EmptyReply(url=url, status_code=response.status)
# Run Browser Steps here self.status_code = response.status
if self.browser_steps_get_valid_steps(): self.headers = response.all_headers()
self.iterate_browser_steps()
self.page.wait_for_timeout(extra_wait * 1000)
# So we can find an element on the page where its selector was entered manually (maybe not xPath etc) # So we can find an element on the page where its selector was entered manually (maybe not xPath etc)
if current_include_filters is not None: if current_include_filters is not None:
@@ -528,7 +535,6 @@ class base_html_playwright(Fetcher):
"async () => {" + self.xpath_element_js.replace('%ELEMENTS%', visualselector_xpath_selectors) + "}") "async () => {" + self.xpath_element_js.replace('%ELEMENTS%', visualselector_xpath_selectors) + "}")
self.instock_data = self.page.evaluate("async () => {" + self.instock_data_js + "}") self.instock_data = self.page.evaluate("async () => {" + self.instock_data_js + "}")
self.content = self.page.content()
# Bug 3 in Playwright screenshot handling # Bug 3 in Playwright screenshot handling
# Some bug where it gives the wrong screenshot size, but making a request with the clip set first seems to solve it # Some bug where it gives the wrong screenshot size, but making a request with the clip set first seems to solve it
# JPEG is better here because the screenshots can be very very large # JPEG is better here because the screenshots can be very very large
@@ -543,7 +549,7 @@ class base_html_playwright(Fetcher):
except Exception as e: except Exception as e:
context.close() context.close()
browser.close() browser.close()
raise ScreenshotUnavailable(url=url, status_code=response.status_code) raise ScreenshotUnavailable(url=url, status_code=None)
context.close() context.close()
browser.close() browser.close()
@@ -602,17 +608,15 @@ class base_html_webdriver(Fetcher):
is_binary=False): is_binary=False):
from selenium import webdriver from selenium import webdriver
from selenium.webdriver.chrome.options import Options as ChromeOptions from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.common.exceptions import WebDriverException from selenium.common.exceptions import WebDriverException
# request_body, request_method unused for now, until some magic in the future happens. # request_body, request_method unused for now, until some magic in the future happens.
options = ChromeOptions() # check env for WEBDRIVER_URL
if self.proxy:
options.proxy = self.proxy
self.driver = webdriver.Remote( self.driver = webdriver.Remote(
command_executor=self.command_executor, command_executor=self.command_executor,
options=options) desired_capabilities=DesiredCapabilities.CHROME,
proxy=self.proxy)
try: try:
self.driver.get(url) self.driver.get(url)
@@ -644,11 +648,11 @@ class base_html_webdriver(Fetcher):
# Does the connection to the webdriver work? run a test connection. # Does the connection to the webdriver work? run a test connection.
def is_ready(self): def is_ready(self):
from selenium import webdriver from selenium import webdriver
from selenium.webdriver.chrome.options import Options as ChromeOptions from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
self.driver = webdriver.Remote( self.driver = webdriver.Remote(
command_executor=self.command_executor, command_executor=self.command_executor,
options=ChromeOptions()) desired_capabilities=DesiredCapabilities.CHROME)
# driver.quit() seems to cause better exceptions # driver.quit() seems to cause better exceptions
self.quit() self.quit()
@@ -667,7 +671,6 @@ class html_requests(Fetcher):
fetcher_description = "Basic fast Plaintext/HTTP Client" fetcher_description = "Basic fast Plaintext/HTTP Client"
def __init__(self, proxy_override=None): def __init__(self, proxy_override=None):
super().__init__()
self.proxy_override = proxy_override self.proxy_override = proxy_override
def run(self, def run(self,
@@ -681,17 +684,13 @@ class html_requests(Fetcher):
is_binary=False): is_binary=False):
# Make requests use a more modern looking user-agent # Make requests use a more modern looking user-agent
if not {k.lower(): v for k, v in request_headers.items()}.get('user-agent', None): if not 'User-Agent' in request_headers:
request_headers['User-Agent'] = os.getenv("DEFAULT_SETTINGS_HEADERS_USERAGENT", request_headers['User-Agent'] = os.getenv("DEFAULT_SETTINGS_HEADERS_USERAGENT",
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36') 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36')
proxies = {} proxies = {}
# Allows override the proxy on a per-request basis # Allows override the proxy on a per-request basis
# https://requests.readthedocs.io/en/latest/user/advanced/#socks
# Should also work with `socks5://user:pass@host:port` type syntax.
if self.proxy_override: if self.proxy_override:
proxies = {'http': self.proxy_override, 'https': self.proxy_override, 'ftp': self.proxy_override} proxies = {'http': self.proxy_override, 'https': self.proxy_override, 'ftp': self.proxy_override}
else: else:

View File

@@ -15,20 +15,14 @@ from wtforms import (
validators, validators,
widgets widgets
) )
from flask_wtf.file import FileField, FileAllowed
from wtforms.fields import FieldList from wtforms.fields import FieldList
from wtforms.validators import ValidationError from wtforms.validators import ValidationError
from validators.url import url as url_validator
# default # default
# each select <option data-enabled="enabled-0-0" # each select <option data-enabled="enabled-0-0"
from changedetectionio.blueprint.browser_steps.browser_steps import browser_step_ui_config from changedetectionio.blueprint.browser_steps.browser_steps import browser_step_ui_config
from changedetectionio import content_fetcher, html_tools from changedetectionio import content_fetcher
from changedetectionio.notification import ( from changedetectionio.notification import (
valid_notification_formats, valid_notification_formats,
) )
@@ -46,7 +40,7 @@ valid_method = {
} }
default_method = 'GET' default_method = 'GET'
allow_simplehost = not strtobool(os.getenv('BLOCK_SIMPLEHOSTS', 'False'))
class StringListField(StringField): class StringListField(StringField):
widget = widgets.TextArea() widget = widgets.TextArea()
@@ -266,23 +260,19 @@ class validateURL(object):
self.message = message self.message = message
def __call__(self, form, field): def __call__(self, form, field):
# This should raise a ValidationError() or not import validators
validate_url(field.data) # If hosts that only contain alphanumerics are allowed ("localhost" for example)
allow_simplehost = not strtobool(os.getenv('BLOCK_SIMPLEHOSTS', 'False'))
try:
validators.url(field.data.strip(), simple_host=allow_simplehost)
except validators.ValidationFailure:
message = field.gettext('\'%s\' is not a valid URL.' % (field.data.strip()))
raise ValidationError(message)
def validate_url(test_url): from .model.Watch import is_safe_url
# If hosts that only contain alphanumerics are allowed ("localhost" for example) if not is_safe_url(field.data):
try: raise ValidationError('Watch protocol is not permitted by SAFE_PROTOCOL_REGEX')
url_validator(test_url, simple_host=allow_simplehost)
except validators.ValidationError:
#@todo check for xss
message = f"'{test_url}' is not a valid URL."
# This should be wtforms.validators.
raise ValidationError(message)
from .model.Watch import is_safe_url
if not is_safe_url(test_url):
# This should be wtforms.validators.
raise ValidationError('Watch protocol is not permitted by SAFE_PROTOCOL_REGEX or incorrect URL format')
class ValidateListRegex(object): class ValidateListRegex(object):
""" """
@@ -294,10 +284,11 @@ class ValidateListRegex(object):
def __call__(self, form, field): def __call__(self, form, field):
for line in field.data: for line in field.data:
if re.search(html_tools.PERL_STYLE_REGEX, line, re.IGNORECASE): if line[0] == '/' and line[-1] == '/':
# Because internally we dont wrap in /
line = line.strip('/')
try: try:
regex = html_tools.perl_style_slash_enclosed_regex_to_options(line) re.compile(line)
re.compile(regex)
except re.error: except re.error:
message = field.gettext('RegEx \'%s\' is not a valid regular expression.') message = field.gettext('RegEx \'%s\' is not a valid regular expression.')
raise ValidationError(message % (line)) raise ValidationError(message % (line))
@@ -407,9 +398,6 @@ class importForm(Form):
from . import processors from . import processors
processor = RadioField(u'Processor', choices=processors.available_processors(), default="text_json_diff") processor = RadioField(u'Processor', choices=processors.available_processors(), default="text_json_diff")
urls = TextAreaField('URLs') urls = TextAreaField('URLs')
xlsx_file = FileField('Upload .xlsx file', validators=[FileAllowed(['xlsx'], 'Must be .xlsx file!')])
file_mapping = SelectField('File mapping', [validators.DataRequired()], choices={('wachete', 'Wachete mapping'), ('custom','Custom mapping')})
class SingleBrowserStep(Form): class SingleBrowserStep(Form):
@@ -493,7 +481,7 @@ class SingleExtraProxy(Form):
# maybe better to set some <script>var.. # maybe better to set some <script>var..
proxy_name = StringField('Name', [validators.Optional()], render_kw={"placeholder": "Name"}) proxy_name = StringField('Name', [validators.Optional()], render_kw={"placeholder": "Name"})
proxy_url = StringField('Proxy URL', [validators.Optional()], render_kw={"placeholder": "socks5:// or regular proxy http://user:pass@...:3128", "size":50}) proxy_url = StringField('Proxy URL', [validators.Optional()], render_kw={"placeholder": "http://user:pass@...:3128", "size":50})
# @todo do the validation here instead # @todo do the validation here instead
# datastore.data['settings']['requests'].. # datastore.data['settings']['requests']..

View File

@@ -1,44 +1,23 @@
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from inscriptis import get_text from inscriptis import get_text
from inscriptis.model.config import ParserConfig
from jsonpath_ng.ext import parse from jsonpath_ng.ext import parse
from typing import List from typing import List
from inscriptis.css_profiles import CSS_PROFILES, HtmlElement
from inscriptis.html_properties import Display
from inscriptis.model.config import ParserConfig
from xml.sax.saxutils import escape as xml_escape
import json import json
import re import re
# HTML added to be sure each result matching a filter (.example) gets converted to a new line by Inscriptis # HTML added to be sure each result matching a filter (.example) gets converted to a new line by Inscriptis
TEXT_FILTER_LIST_LINE_SUFFIX = "<br>" TEXT_FILTER_LIST_LINE_SUFFIX = "<br>"
PERL_STYLE_REGEX = r'^/(.*?)/([a-z]*)?$'
# 'price' , 'lowPrice', 'highPrice' are usually under here # 'price' , 'lowPrice', 'highPrice' are usually under here
# All of those may or may not appear on different websites - I didnt find a way todo case-insensitive searching here # all of those may or may not appear on different websites
LD_JSON_PRODUCT_OFFER_SELECTORS = ["json:$..offers", "json:$..Offers"] LD_JSON_PRODUCT_OFFER_SELECTOR = "json:$..offers"
class JSONNotFound(ValueError): class JSONNotFound(ValueError):
def __init__(self, msg): def __init__(self, msg):
ValueError.__init__(self, msg) ValueError.__init__(self, msg)
# Doesn't look like python supports forward slash auto enclosure in re.findall
# So convert it to inline flag "(?i)foobar" type configuration
def perl_style_slash_enclosed_regex_to_options(regex):
res = re.search(PERL_STYLE_REGEX, regex, re.IGNORECASE)
if res:
flags = res.group(2) if res.group(2) else 'i'
regex = f"(?{flags}){res.group(1)}"
else:
# Fall back to just ignorecase as an option
regex = f"(?i){regex}"
return regex
# Given a CSS Rule, and a blob of HTML, return the blob of HTML that matches # Given a CSS Rule, and a blob of HTML, return the blob of HTML that matches
def include_filters(include_filters, html_content, append_pretty_line_formatting=False): def include_filters(include_filters, html_content, append_pretty_line_formatting=False):
soup = BeautifulSoup(html_content, "html.parser") soup = BeautifulSoup(html_content, "html.parser")
@@ -71,15 +50,10 @@ def element_removal(selectors: List[str], html_content):
# Return str Utf-8 of matched rules # Return str Utf-8 of matched rules
def xpath_filter(xpath_filter, html_content, append_pretty_line_formatting=False, is_rss=False): def xpath_filter(xpath_filter, html_content, append_pretty_line_formatting=False):
from lxml import etree, html from lxml import etree, html
parser = None tree = html.fromstring(bytes(html_content, encoding='utf-8'))
if is_rss:
# So that we can keep CDATA for cdata_in_document_to_text() to process
parser = etree.XMLParser(strip_cdata=False)
tree = html.fromstring(bytes(html_content, encoding='utf-8'), parser=parser)
html_block = "" html_block = ""
r = tree.xpath(xpath_filter.strip(), namespaces={'re': 'http://exslt.org/regular-expressions'}) r = tree.xpath(xpath_filter.strip(), namespaces={'re': 'http://exslt.org/regular-expressions'})
@@ -102,6 +76,7 @@ def xpath_filter(xpath_filter, html_content, append_pretty_line_formatting=False
return html_block return html_block
# Extract/find element # Extract/find element
def extract_element(find='title', html_content=''): def extract_element(find='title', html_content=''):
@@ -169,6 +144,7 @@ def extract_json_as_string(content, json_filter, ensure_is_ldjson_info_type=None
# Foreach <script json></script> blob.. just return the first that matches json_filter # Foreach <script json></script> blob.. just return the first that matches json_filter
# As a last resort, try to parse the whole <body> # As a last resort, try to parse the whole <body>
s = []
soup = BeautifulSoup(content, 'html.parser') soup = BeautifulSoup(content, 'html.parser')
if ensure_is_ldjson_info_type: if ensure_is_ldjson_info_type:
@@ -194,24 +170,13 @@ def extract_json_as_string(content, json_filter, ensure_is_ldjson_info_type=None
for json_data in bs_jsons: for json_data in bs_jsons:
stripped_text_from_html = _parse_json(json_data, json_filter) stripped_text_from_html = _parse_json(json_data, json_filter)
if ensure_is_ldjson_info_type: if ensure_is_ldjson_info_type:
# Could sometimes be list, string or something else random # Could sometimes be list, string or something else random
if isinstance(json_data, dict): if isinstance(json_data, dict):
# If it has LD JSON 'key' @type, and @type is 'product', and something was found for the search # If it has LD JSON 'key' @type, and @type is 'product', and something was found for the search
# (Some sites have multiple of the same ld+json @type='product', but some have the review part, some have the 'price' part) # (Some sites have multiple of the same ld+json @type='product', but some have the review part, some have the 'price' part)
# @type could also be a list (Product, SubType) if json_data.get('@type', False) and json_data.get('@type','').lower() == ensure_is_ldjson_info_type.lower() and stripped_text_from_html:
# LD_JSON auto-extract also requires some content PLUS the ldjson to be present break
# 1833 - could be either str or dict, should not be anything else
if json_data.get('@type') and stripped_text_from_html:
try:
if json_data.get('@type') == str or json_data.get('@type') == dict:
types = [json_data.get('@type')] if isinstance(json_data.get('@type'), str) else json_data.get('@type')
if ensure_is_ldjson_info_type.lower() in [x.lower().strip() for x in types]:
break
except:
continue
elif stripped_text_from_html: elif stripped_text_from_html:
break break
@@ -230,14 +195,23 @@ def strip_ignore_text(content, wordlist, mode="content"):
output = [] output = []
ignore_text = [] ignore_text = []
ignore_regex = [] ignore_regex = []
ignored_line_numbers = [] ignored_line_numbers = []
for k in wordlist: for k in wordlist:
# Is it a regex? # Is it a regex?
res = re.search(PERL_STYLE_REGEX, k, re.IGNORECASE) x = re.search('^\/(.*)\/(.*)', k.strip())
if res: if x:
ignore_regex.append(re.compile(perl_style_slash_enclosed_regex_to_options(k))) # Starts with / but doesn't look like a regex
p = x.group(1)
try:
# @Todo python regex options can go before the regex str, but not really many of the options apply on a per-line basis
ignore_regex.append(re.compile(rf"{p}", re.IGNORECASE))
except Exception as e:
# Badly formed regex, treat as text
ignore_text.append(k.strip())
else: else:
# Had a / but doesn't work as regex
ignore_text.append(k.strip()) ignore_text.append(k.strip())
for line in content.splitlines(): for line in content.splitlines():
@@ -267,15 +241,8 @@ def strip_ignore_text(content, wordlist, mode="content"):
return "\n".encode('utf8').join(output) return "\n".encode('utf8').join(output)
def cdata_in_document_to_text(html_content: str, render_anchor_tag_content=False) -> str:
pattern = '<!\[CDATA\[(\s*(?:.(?<!\]\]>)\s*)*)\]\]>'
def repl(m):
text = m.group(1)
return xml_escape(html_to_text(html_content=text)).strip()
return re.sub(pattern, repl, html_content) def html_to_text(html_content: str, render_anchor_tag_content=False) -> str:
def html_to_text(html_content: str, render_anchor_tag_content=False, is_rss=False) -> str:
"""Converts html string to a string with just the text. If ignoring """Converts html string to a string with just the text. If ignoring
rendering anchor tag content is enable, anchor tag content are also rendering anchor tag content is enable, anchor tag content are also
included in the text included in the text
@@ -291,21 +258,16 @@ def html_to_text(html_content: str, render_anchor_tag_content=False, is_rss=Fals
# if anchor tag content flag is set to True define a config for # if anchor tag content flag is set to True define a config for
# extracting this content # extracting this content
if render_anchor_tag_content: if render_anchor_tag_content:
parser_config = ParserConfig( parser_config = ParserConfig(
annotation_rules={"a": ["hyperlink"]}, annotation_rules={"a": ["hyperlink"]}, display_links=True
display_links=True
) )
# otherwise set config to None/default
# otherwise set config to None
else: else:
parser_config = None parser_config = None
# RSS Mode - Inscriptis will treat `title` as something else. # get text and annotations via inscriptis
# Make it as a regular block display element (//item/title)
# This is a bit of a hack - the real way it to use XSLT to convert it to HTML #1874
if is_rss:
html_content = re.sub(r'<title([\s>])', r'<h1\1', html_content)
html_content = re.sub(r'</title>', r'</h1>', html_content)
text_content = get_text(html_content, config=parser_config) text_content = get_text(html_content, config=parser_config)
return text_content return text_content
@@ -313,18 +275,9 @@ def html_to_text(html_content: str, render_anchor_tag_content=False, is_rss=Fals
# Does LD+JSON exist with a @type=='product' and a .price set anywhere? # Does LD+JSON exist with a @type=='product' and a .price set anywhere?
def has_ldjson_product_info(content): def has_ldjson_product_info(content):
pricing_data = ''
try: try:
if not 'application/ld+json' in content: pricing_data = extract_json_as_string(content=content, json_filter=LD_JSON_PRODUCT_OFFER_SELECTOR, ensure_is_ldjson_info_type="product")
return False except JSONNotFound as e:
for filter in LD_JSON_PRODUCT_OFFER_SELECTORS:
pricing_data += extract_json_as_string(content=content,
json_filter=filter,
ensure_is_ldjson_info_type="product")
except Exception as e:
# Totally fine # Totally fine
return False return False
x=bool(pricing_data) x=bool(pricing_data)

View File

@@ -1,9 +1,6 @@
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
import time import time
import validators import validators
from wtforms import ValidationError
from changedetectionio.forms import validate_url
class Importer(): class Importer():
@@ -15,7 +12,6 @@ class Importer():
self.new_uuids = [] self.new_uuids = []
self.good = 0 self.good = 0
self.remaining_data = [] self.remaining_data = []
self.import_profile = None
@abstractmethod @abstractmethod
def run(self, def run(self,
@@ -136,167 +132,3 @@ class import_distill_io_json(Importer):
good += 1 good += 1
flash("{} Imported from Distill.io in {:.2f}s, {} Skipped.".format(len(self.new_uuids), time.time() - now, len(self.remaining_data))) flash("{} Imported from Distill.io in {:.2f}s, {} Skipped.".format(len(self.new_uuids), time.time() - now, len(self.remaining_data)))
class import_xlsx_wachete(Importer):
def run(self,
data,
flash,
datastore,
):
good = 0
now = time.time()
self.new_uuids = []
from openpyxl import load_workbook
try:
wb = load_workbook(data)
except Exception as e:
# @todo correct except
flash("Unable to read export XLSX file, something wrong with the file?", 'error')
return
row_id = 2
for row in wb.active.iter_rows(min_row=row_id):
try:
extras = {}
data = {}
for cell in row:
if not cell.value:
continue
column_title = wb.active.cell(row=1, column=cell.column).value.strip().lower()
data[column_title] = cell.value
# Forced switch to webdriver/playwright/etc
dynamic_wachet = str(data.get('dynamic wachet', '')).strip().lower() # Convert bool to str to cover all cases
# libreoffice and others can have it as =FALSE() =TRUE(), or bool(true)
if 'true' in dynamic_wachet or dynamic_wachet == '1':
extras['fetch_backend'] = 'html_webdriver'
elif 'false' in dynamic_wachet or dynamic_wachet == '0':
extras['fetch_backend'] = 'html_requests'
if data.get('xpath'):
# @todo split by || ?
extras['include_filters'] = [data.get('xpath')]
if data.get('name'):
extras['title'] = data.get('name').strip()
if data.get('interval (min)'):
minutes = int(data.get('interval (min)'))
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
weeks, days = divmod(days, 7)
extras['time_between_check'] = {'weeks': weeks, 'days': days, 'hours': hours, 'minutes': minutes, 'seconds': 0}
# At minimum a URL is required.
if data.get('url'):
try:
validate_url(data.get('url'))
except ValidationError as e:
print(">> import URL error", data.get('url'), str(e))
flash(f"Error processing row number {row_id}, URL value was incorrect, row was skipped.", 'error')
# Don't bother processing anything else on this row
continue
new_uuid = datastore.add_watch(url=data['url'].strip(),
extras=extras,
tag=data.get('folder'),
write_to_disk_now=False)
if new_uuid:
# Straight into the queue.
self.new_uuids.append(new_uuid)
good += 1
except Exception as e:
print(e)
flash(f"Error processing row number {row_id}, check all cell data types are correct, row was skipped.", 'error')
else:
row_id += 1
flash(
"{} imported from Wachete .xlsx in {:.2f}s".format(len(self.new_uuids), time.time() - now))
class import_xlsx_custom(Importer):
def run(self,
data,
flash,
datastore,
):
good = 0
now = time.time()
self.new_uuids = []
from openpyxl import load_workbook
try:
wb = load_workbook(data)
except Exception as e:
# @todo correct except
flash("Unable to read export XLSX file, something wrong with the file?", 'error')
return
# @todo cehck atleast 2 rows, same in other method
from .forms import validate_url
row_i = 1
try:
for row in wb.active.iter_rows():
url = None
tags = None
extras = {}
for cell in row:
if not self.import_profile.get(cell.col_idx):
continue
if not cell.value:
continue
cell_map = self.import_profile.get(cell.col_idx)
cell_val = str(cell.value).strip() # could be bool
if cell_map == 'url':
url = cell.value.strip()
try:
validate_url(url)
except ValidationError as e:
print(">> Import URL error", url, str(e))
flash(f"Error processing row number {row_i}, URL value was incorrect, row was skipped.", 'error')
# Don't bother processing anything else on this row
url = None
break
elif cell_map == 'tag':
tags = cell.value.strip()
elif cell_map == 'include_filters':
# @todo validate?
extras['include_filters'] = [cell.value.strip()]
elif cell_map == 'interval_minutes':
hours, minutes = divmod(int(cell_val), 60)
days, hours = divmod(hours, 24)
weeks, days = divmod(days, 7)
extras['time_between_check'] = {'weeks': weeks, 'days': days, 'hours': hours, 'minutes': minutes, 'seconds': 0}
else:
extras[cell_map] = cell_val
# At minimum a URL is required.
if url:
new_uuid = datastore.add_watch(url=url,
extras=extras,
tag=tags,
write_to_disk_now=False)
if new_uuid:
# Straight into the queue.
self.new_uuids.append(new_uuid)
good += 1
except Exception as e:
print(e)
flash(f"Error processing row number {row_i}, check all cell data types are correct, row was skipped.", 'error')
else:
row_i += 1
flash(
"{} imported from custom .xlsx in {:.2f}s".format(len(self.new_uuids), time.time() - now))

View File

@@ -4,7 +4,6 @@ import os
import re import re
import time import time
import uuid import uuid
from pathlib import Path
# Allowable protocols, protects against javascript: etc # Allowable protocols, protects against javascript: etc
# file:// is further checked by ALLOW_FILE_URI # file:// is further checked by ALLOW_FILE_URI
@@ -19,8 +18,6 @@ from changedetectionio.notification import (
base_config = { base_config = {
'body': None, 'body': None,
'browser_steps': [],
'browser_steps_last_error_step': None,
'check_unique_lines': False, # On change-detected, compare against all history if its something new 'check_unique_lines': False, # On change-detected, compare against all history if its something new
'check_count': 0, 'check_count': 0,
'date_created': None, 'date_created': None,
@@ -28,7 +25,6 @@ base_config = {
'extract_text': [], # Extract text by regex after filters 'extract_text': [], # Extract text by regex after filters
'extract_title_as_title': False, 'extract_title_as_title': False,
'fetch_backend': 'system', # plaintext, playwright etc 'fetch_backend': 'system', # plaintext, playwright etc
'fetch_time': 0.0,
'processor': 'text_json_diff', # could be restock_diff or others from .processors 'processor': 'text_json_diff', # could be restock_diff or others from .processors
'filter_failure_notification_send': strtobool(os.getenv('FILTER_FAILURE_NOTIFICATION_SEND_DEFAULT', 'True')), 'filter_failure_notification_send': strtobool(os.getenv('FILTER_FAILURE_NOTIFICATION_SEND_DEFAULT', 'True')),
'filter_text_added': True, 'filter_text_added': True,
@@ -146,14 +142,8 @@ class model(dict):
flash(message, 'error') flash(message, 'error')
return '' return ''
if ready_url.startswith('source:'):
ready_url=ready_url.replace('source:', '')
return ready_url return ready_url
@property
def is_source_type_url(self):
return self.get('url', '').startswith('source:')
@property @property
def get_fetch_backend(self): def get_fetch_backend(self):
""" """
@@ -177,7 +167,9 @@ class model(dict):
@property @property
def label(self): def label(self):
# Used for sorting # Used for sorting
return self.get('title') if self.get('title') else self.get('url') if self['title']:
return self['title']
return self['url']
@property @property
def last_changed(self): def last_changed(self):
@@ -241,14 +233,6 @@ class model(dict):
fname = os.path.join(self.watch_data_dir, "history.txt") fname = os.path.join(self.watch_data_dir, "history.txt")
return os.path.isfile(fname) return os.path.isfile(fname)
@property
def has_browser_steps(self):
has_browser_steps = self.get('browser_steps') and list(filter(
lambda s: (s['operation'] and len(s['operation']) and s['operation'] != 'Choose one' and s['operation'] != 'Goto site'),
self.get('browser_steps')))
return has_browser_steps
# Returns the newest key, but if theres only 1 record, then it's counted as not being new, so return 0. # Returns the newest key, but if theres only 1 record, then it's counted as not being new, so return 0.
@property @property
def newest_history_key(self): def newest_history_key(self):
@@ -507,13 +491,3 @@ class model(dict):
filepath = os.path.join(self.watch_data_dir, 'last-fetched.br') filepath = os.path.join(self.watch_data_dir, 'last-fetched.br')
with open(filepath, 'wb') as f: with open(filepath, 'wb') as f:
f.write(brotli.compress(contents, mode=brotli.MODE_TEXT)) f.write(brotli.compress(contents, mode=brotli.MODE_TEXT))
@property
def get_browsersteps_available_screenshots(self):
"For knowing which screenshots are available to show the user in BrowserSteps UI"
available = []
for f in Path(self.watch_data_dir).glob('step_before-*.jpeg'):
step_n=re.search(r'step_before-(\d+)', f.name)
if step_n:
available.append(step_n.group(1))
return available

View File

@@ -1,108 +1,15 @@
from abc import abstractmethod from abc import abstractmethod
import os
import hashlib import hashlib
import re
from changedetectionio import content_fetcher
from copy import deepcopy
from distutils.util import strtobool
class difference_detection_processor(): class difference_detection_processor():
datastore = None
fetcher = None
screenshot = None
xpath_data = None
browser_steps = None
def __init__(self, *args, datastore, watch_uuid, **kwargs): def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs) super().__init__(*args, **kwargs)
self.datastore = datastore
self.watch = deepcopy(self.datastore.data['watching'].get(watch_uuid))
def call_browser(self):
# Protect against file:// access
if re.search(r'^file://', self.watch.get('url', '').strip(), re.IGNORECASE):
if not strtobool(os.getenv('ALLOW_FILE_URI', 'false')):
raise Exception(
"file:// type access is denied for security reasons."
)
url = self.watch.link
# Requests, playwright, other browser via wss:// etc, fetch_extra_something
prefer_fetch_backend = self.watch.get('fetch_backend', 'system')
# Proxy ID "key"
preferred_proxy_id = self.datastore.get_preferred_proxy_for_watch(uuid=self.watch.get('uuid'))
# Pluggable content self.fetcher
if not prefer_fetch_backend or prefer_fetch_backend == 'system':
prefer_fetch_backend = self.datastore.data['settings']['application'].get('fetch_backend')
# Grab the right kind of 'fetcher', (playwright, requests, etc)
if hasattr(content_fetcher, prefer_fetch_backend):
fetcher_obj = getattr(content_fetcher, prefer_fetch_backend)
else:
# If the klass doesnt exist, just use a default
fetcher_obj = getattr(content_fetcher, "html_requests")
proxy_url = None
if preferred_proxy_id:
proxy_url = self.datastore.proxy_list.get(preferred_proxy_id).get('url')
print(f"Using proxy Key: {preferred_proxy_id} as Proxy URL {proxy_url}")
# Now call the fetcher (playwright/requests/etc) with arguments that only a fetcher would need.
self.fetcher = fetcher_obj(proxy_override=proxy_url,
#browser_url_extra/configurable browser url=...
)
if self.watch.has_browser_steps:
self.fetcher.browser_steps = self.watch.get('browser_steps', [])
self.fetcher.browser_steps_screenshot_path = os.path.join(self.datastore.datastore_path, self.watch.get('uuid'))
# Tweak the base config with the per-watch ones
request_headers = self.watch.get('headers', [])
request_headers.update(self.datastore.get_all_base_headers())
request_headers.update(self.datastore.get_all_headers_in_textfile_for_watch(uuid=self.watch.get('uuid')))
# https://github.com/psf/requests/issues/4525
# Requests doesnt yet support brotli encoding, so don't put 'br' here, be totally sure that the user cannot
# do this by accident.
if 'Accept-Encoding' in request_headers and "br" in request_headers['Accept-Encoding']:
request_headers['Accept-Encoding'] = request_headers['Accept-Encoding'].replace(', br', '')
timeout = self.datastore.data['settings']['requests'].get('timeout')
request_body = self.watch.get('body')
request_method = self.watch.get('method')
ignore_status_codes = self.watch.get('ignore_status_codes', False)
# Configurable per-watch or global extra delay before extracting text (for webDriver types)
system_webdriver_delay = self.datastore.data['settings']['application'].get('webdriver_delay', None)
if self.watch.get('webdriver_delay'):
self.fetcher.render_extract_delay = self.watch.get('webdriver_delay')
elif system_webdriver_delay is not None:
self.fetcher.render_extract_delay = system_webdriver_delay
if self.watch.get('webdriver_js_execute_code') is not None and self.watch.get('webdriver_js_execute_code').strip():
self.fetcher.webdriver_js_execute_code = self.watch.get('webdriver_js_execute_code')
# Requests for PDF's, images etc should be passwd the is_binary flag
is_binary = self.watch.is_pdf
# And here we go! call the right browser with browser-specific settings
self.fetcher.run(url, timeout, request_headers, request_body, request_method, ignore_status_codes, self.watch.get('include_filters'),
is_binary=is_binary)
#@todo .quit here could go on close object, so we can run JS if change-detected
self.fetcher.quit()
# After init, call run_changedetection() which will do the actual change-detection
@abstractmethod @abstractmethod
def run_changedetection(self, uuid, skip_when_checksum_same=True): def run(self, uuid, skip_when_checksum_same=True, preferred_proxy=None):
update_obj = {'last_notification_error': False, 'last_error': False} update_obj = {'last_notification_error': False, 'last_error': False}
some_data = 'xxxxx' some_data = 'xxxxx'
update_obj["previous_md5"] = hashlib.md5(some_data.encode('utf-8')).hexdigest() update_obj["previous_md5"] = hashlib.md5(some_data.encode('utf-8')).hexdigest()

View File

@@ -1,7 +1,10 @@
import hashlib import hashlib
import os
import re
import urllib3 import urllib3
from . import difference_detection_processor from . import difference_detection_processor
from changedetectionio import content_fetcher
from copy import deepcopy from copy import deepcopy
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
@@ -19,7 +22,11 @@ class perform_site_check(difference_detection_processor):
screenshot = None screenshot = None
xpath_data = None xpath_data = None
def run_changedetection(self, uuid, skip_when_checksum_same=True): def __init__(self, *args, datastore, **kwargs):
super().__init__(*args, **kwargs)
self.datastore = datastore
def run(self, uuid, skip_when_checksum_same=True):
# DeepCopy so we can be sure we don't accidently change anything by reference # DeepCopy so we can be sure we don't accidently change anything by reference
watch = deepcopy(self.datastore.data['watching'].get(uuid)) watch = deepcopy(self.datastore.data['watching'].get(uuid))
@@ -27,24 +34,84 @@ class perform_site_check(difference_detection_processor):
if not watch: if not watch:
raise Exception("Watch no longer exists.") raise Exception("Watch no longer exists.")
# Protect against file:// access
if re.search(r'^file', watch.get('url', ''), re.IGNORECASE) and not os.getenv('ALLOW_FILE_URI', False):
raise Exception(
"file:// type access is denied for security reasons."
)
# Unset any existing notification error # Unset any existing notification error
update_obj = {'last_notification_error': False, 'last_error': False} update_obj = {'last_notification_error': False, 'last_error': False}
self.screenshot = self.fetcher.screenshot request_headers = watch.get('headers', [])
self.xpath_data = self.fetcher.xpath_data request_headers.update(self.datastore.get_all_base_headers())
request_headers.update(self.datastore.get_all_headers_in_textfile_for_watch(uuid=uuid))
# https://github.com/psf/requests/issues/4525
# Requests doesnt yet support brotli encoding, so don't put 'br' here, be totally sure that the user cannot
# do this by accident.
if 'Accept-Encoding' in request_headers and "br" in request_headers['Accept-Encoding']:
request_headers['Accept-Encoding'] = request_headers['Accept-Encoding'].replace(', br', '')
timeout = self.datastore.data['settings']['requests'].get('timeout')
url = watch.link
request_body = self.datastore.data['watching'][uuid].get('body')
request_method = self.datastore.data['watching'][uuid].get('method')
ignore_status_codes = self.datastore.data['watching'][uuid].get('ignore_status_codes', False)
# Pluggable content fetcher
prefer_backend = watch.get_fetch_backend
if not prefer_backend or prefer_backend == 'system':
prefer_backend = self.datastore.data['settings']['application']['fetch_backend']
if hasattr(content_fetcher, prefer_backend):
klass = getattr(content_fetcher, prefer_backend)
else:
# If the klass doesnt exist, just use a default
klass = getattr(content_fetcher, "html_requests")
proxy_id = self.datastore.get_preferred_proxy_for_watch(uuid=uuid)
proxy_url = None
if proxy_id:
proxy_url = self.datastore.proxy_list.get(proxy_id).get('url')
print("UUID {} Using proxy {}".format(uuid, proxy_url))
fetcher = klass(proxy_override=proxy_url)
# Configurable per-watch or global extra delay before extracting text (for webDriver types)
system_webdriver_delay = self.datastore.data['settings']['application'].get('webdriver_delay', None)
if watch['webdriver_delay'] is not None:
fetcher.render_extract_delay = watch.get('webdriver_delay')
elif system_webdriver_delay is not None:
fetcher.render_extract_delay = system_webdriver_delay
# Could be removed if requests/plaintext could also return some info?
if prefer_backend != 'html_webdriver':
raise Exception("Re-stock detection requires Chrome or compatible webdriver/playwright fetcher to work")
if watch.get('webdriver_js_execute_code') is not None and watch.get('webdriver_js_execute_code').strip():
fetcher.webdriver_js_execute_code = watch.get('webdriver_js_execute_code')
fetcher.run(url, timeout, request_headers, request_body, request_method, ignore_status_codes, watch.get('include_filters'))
fetcher.quit()
self.screenshot = fetcher.screenshot
self.xpath_data = fetcher.xpath_data
# Track the content type # Track the content type
update_obj['content_type'] = self.fetcher.headers.get('Content-Type', '') update_obj['content_type'] = fetcher.headers.get('Content-Type', '')
update_obj["last_check_status"] = self.fetcher.get_last_status_code() update_obj["last_check_status"] = fetcher.get_last_status_code()
# Main detection method # Main detection method
fetched_md5 = None fetched_md5 = None
if self.fetcher.instock_data: if fetcher.instock_data:
fetched_md5 = hashlib.md5(self.fetcher.instock_data.encode('utf-8')).hexdigest() fetched_md5 = hashlib.md5(fetcher.instock_data.encode('utf-8')).hexdigest()
# 'Possibly in stock' comes from stock-not-in-stock.js when no string found above the fold. # 'Possibly in stock' comes from stock-not-in-stock.js when no string found above the fold.
update_obj["in_stock"] = True if self.fetcher.instock_data == 'Possibly in stock' else False update_obj["in_stock"] = True if fetcher.instock_data == 'Possibly in stock' else False
else: else:
raise UnableToExtractRestockData(status_code=self.fetcher.status_code) raise UnableToExtractRestockData(status_code=fetcher.status_code)
# The main thing that all this at the moment comes down to :) # The main thing that all this at the moment comes down to :)
changed_detected = False changed_detected = False
@@ -61,4 +128,4 @@ class perform_site_check(difference_detection_processor):
# Always record the new checksum # Always record the new checksum
update_obj["previous_md5"] = fetched_md5 update_obj["previous_md5"] = fetched_md5
return changed_detected, update_obj, self.fetcher.instock_data.encode('utf-8') return changed_detected, update_obj, fetcher.instock_data.encode('utf-8')

View File

@@ -1,4 +1,4 @@
# HTML to TEXT/JSON DIFFERENCE self.fetcher # HTML to TEXT/JSON DIFFERENCE FETCHER
import hashlib import hashlib
import json import json
@@ -11,19 +11,17 @@ from changedetectionio import content_fetcher, html_tools
from changedetectionio.blueprint.price_data_follower import PRICE_DATA_TRACK_ACCEPT, PRICE_DATA_TRACK_REJECT from changedetectionio.blueprint.price_data_follower import PRICE_DATA_TRACK_ACCEPT, PRICE_DATA_TRACK_REJECT
from copy import deepcopy from copy import deepcopy
from . import difference_detection_processor from . import difference_detection_processor
from ..html_tools import PERL_STYLE_REGEX, cdata_in_document_to_text
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
name = 'Webpage Text/HTML, JSON and PDF changes'
name = 'Webpage Text/HTML, JSON and PDF changes'
description = 'Detects all text changes where possible' description = 'Detects all text changes where possible'
json_filter_prefixes = ['json:', 'jq:']
class FilterNotFoundInResponse(ValueError): class FilterNotFoundInResponse(ValueError):
def __init__(self, msg): def __init__(self, msg):
ValueError.__init__(self, msg) ValueError.__init__(self, msg)
class PDFToHTMLToolNotFound(ValueError): class PDFToHTMLToolNotFound(ValueError):
def __init__(self, msg): def __init__(self, msg):
ValueError.__init__(self, msg) ValueError.__init__(self, msg)
@@ -32,10 +30,28 @@ class PDFToHTMLToolNotFound(ValueError):
# Some common stuff here that can be moved to a base class # Some common stuff here that can be moved to a base class
# (set_proxy_from_list) # (set_proxy_from_list)
class perform_site_check(difference_detection_processor): class perform_site_check(difference_detection_processor):
screenshot = None
xpath_data = None
def run_changedetection(self, uuid, skip_when_checksum_same=True): def __init__(self, *args, datastore, **kwargs):
super().__init__(*args, **kwargs)
self.datastore = datastore
# Doesn't look like python supports forward slash auto enclosure in re.findall
# So convert it to inline flag "foobar(?i)" type configuration
def forward_slash_enclosed_regex_to_options(self, regex):
res = re.search(r'^/(.*?)/(\w+)$', regex, re.IGNORECASE)
if res:
regex = res.group(1)
regex += '(?{})'.format(res.group(2))
else:
regex += '(?{})'.format('i')
return regex
def run(self, uuid, skip_when_checksum_same=True, preferred_proxy=None):
changed_detected = False changed_detected = False
html_content = ""
screenshot = False # as bytes screenshot = False # as bytes
stripped_text_from_html = "" stripped_text_from_html = ""
@@ -44,25 +60,100 @@ class perform_site_check(difference_detection_processor):
if not watch: if not watch:
raise Exception("Watch no longer exists.") raise Exception("Watch no longer exists.")
# Protect against file:// access
if re.search(r'^file', watch.get('url', ''), re.IGNORECASE) and not os.getenv('ALLOW_FILE_URI', False):
raise Exception(
"file:// type access is denied for security reasons."
)
# Unset any existing notification error # Unset any existing notification error
update_obj = {'last_notification_error': False, 'last_error': False} update_obj = {'last_notification_error': False, 'last_error': False}
# Tweak the base config with the per-watch ones
request_headers = watch.get('headers', [])
request_headers.update(self.datastore.get_all_base_headers())
request_headers.update(self.datastore.get_all_headers_in_textfile_for_watch(uuid=uuid))
# https://github.com/psf/requests/issues/4525
# Requests doesnt yet support brotli encoding, so don't put 'br' here, be totally sure that the user cannot
# do this by accident.
if 'Accept-Encoding' in request_headers and "br" in request_headers['Accept-Encoding']:
request_headers['Accept-Encoding'] = request_headers['Accept-Encoding'].replace(', br', '')
timeout = self.datastore.data['settings']['requests'].get('timeout')
url = watch.link url = watch.link
self.screenshot = self.fetcher.screenshot request_body = self.datastore.data['watching'][uuid].get('body')
self.xpath_data = self.fetcher.xpath_data request_method = self.datastore.data['watching'][uuid].get('method')
ignore_status_codes = self.datastore.data['watching'][uuid].get('ignore_status_codes', False)
# source: support
is_source = False
if url.startswith('source:'):
url = url.replace('source:', '')
is_source = True
# Pluggable content fetcher
prefer_backend = watch.get_fetch_backend
if not prefer_backend or prefer_backend == 'system':
prefer_backend = self.datastore.data['settings']['application']['fetch_backend']
if hasattr(content_fetcher, prefer_backend):
klass = getattr(content_fetcher, prefer_backend)
else:
# If the klass doesnt exist, just use a default
klass = getattr(content_fetcher, "html_requests")
if preferred_proxy:
proxy_id = preferred_proxy
else:
proxy_id = self.datastore.get_preferred_proxy_for_watch(uuid=uuid)
proxy_url = None
if proxy_id:
proxy_url = self.datastore.proxy_list.get(proxy_id).get('url')
print("UUID {} Using proxy {}".format(uuid, proxy_url))
fetcher = klass(proxy_override=proxy_url)
# Configurable per-watch or global extra delay before extracting text (for webDriver types)
system_webdriver_delay = self.datastore.data['settings']['application'].get('webdriver_delay', None)
if watch['webdriver_delay'] is not None:
fetcher.render_extract_delay = watch.get('webdriver_delay')
elif system_webdriver_delay is not None:
fetcher.render_extract_delay = system_webdriver_delay
# Possible conflict
if prefer_backend == 'html_webdriver':
fetcher.browser_steps = watch.get('browser_steps', None)
fetcher.browser_steps_screenshot_path = os.path.join(self.datastore.datastore_path, uuid)
if watch.get('webdriver_js_execute_code') is not None and watch.get('webdriver_js_execute_code').strip():
fetcher.webdriver_js_execute_code = watch.get('webdriver_js_execute_code')
# requests for PDF's, images etc should be passwd the is_binary flag
is_binary = watch.is_pdf
fetcher.run(url, timeout, request_headers, request_body, request_method, ignore_status_codes, watch.get('include_filters'), is_binary=is_binary)
fetcher.quit()
self.screenshot = fetcher.screenshot
self.xpath_data = fetcher.xpath_data
# Track the content type # Track the content type
update_obj['content_type'] = self.fetcher.get_all_headers().get('content-type', '').lower() update_obj['content_type'] = fetcher.get_all_headers().get('content-type', '').lower()
# Watches added automatically in the queue manager will skip if its the same checksum as the previous run # Watches added automatically in the queue manager will skip if its the same checksum as the previous run
# Saves a lot of CPU # Saves a lot of CPU
update_obj['previous_md5_before_filters'] = hashlib.md5(self.fetcher.content.encode('utf-8')).hexdigest() update_obj['previous_md5_before_filters'] = hashlib.md5(fetcher.content.encode('utf-8')).hexdigest()
if skip_when_checksum_same: if skip_when_checksum_same:
if update_obj['previous_md5_before_filters'] == watch.get('previous_md5_before_filters'): if update_obj['previous_md5_before_filters'] == watch.get('previous_md5_before_filters'):
raise content_fetcher.checksumFromPreviousCheckWasTheSame() raise content_fetcher.checksumFromPreviousCheckWasTheSame()
# Fetching complete, now filters # Fetching complete, now filters
# @todo move to class / maybe inside of fetcher abstract base?
# @note: I feel like the following should be in a more obvious chain system # @note: I feel like the following should be in a more obvious chain system
# - Check filter text # - Check filter text
@@ -71,24 +162,15 @@ class perform_site_check(difference_detection_processor):
# https://stackoverflow.com/questions/41817578/basic-method-chaining ? # https://stackoverflow.com/questions/41817578/basic-method-chaining ?
# return content().textfilter().jsonextract().checksumcompare() ? # return content().textfilter().jsonextract().checksumcompare() ?
is_json = 'application/json' in self.fetcher.get_all_headers().get('content-type', '').lower() is_json = 'application/json' in fetcher.get_all_headers().get('content-type', '').lower()
is_html = not is_json is_html = not is_json
is_rss = False
ctype_header = self.fetcher.get_all_headers().get('content-type', '').lower()
# Go into RSS preprocess for converting CDATA/comment to usable text
if any(substring in ctype_header for substring in ['application/xml', 'application/rss', 'text/xml']):
if '<rss' in self.fetcher.content[:100].lower():
self.fetcher.content = cdata_in_document_to_text(html_content=self.fetcher.content)
is_rss = True
# source: support, basically treat it as plaintext # source: support, basically treat it as plaintext
if watch.is_source_type_url: if is_source:
is_html = False is_html = False
is_json = False is_json = False
inline_pdf = self.fetcher.get_all_headers().get('content-disposition', '') and '%PDF-1' in self.fetcher.content[:10] if watch.is_pdf or 'application/pdf' in fetcher.get_all_headers().get('content-type', '').lower():
if watch.is_pdf or 'application/pdf' in self.fetcher.get_all_headers().get('content-type', '').lower() or inline_pdf:
from shutil import which from shutil import which
tool = os.getenv("PDF_TO_HTML_TOOL", "pdftohtml") tool = os.getenv("PDF_TO_HTML_TOOL", "pdftohtml")
if not which(tool): if not which(tool):
@@ -99,18 +181,18 @@ class perform_site_check(difference_detection_processor):
[tool, '-stdout', '-', '-s', 'out.pdf', '-i'], [tool, '-stdout', '-', '-s', 'out.pdf', '-i'],
stdout=subprocess.PIPE, stdout=subprocess.PIPE,
stdin=subprocess.PIPE) stdin=subprocess.PIPE)
proc.stdin.write(self.fetcher.raw_content) proc.stdin.write(fetcher.raw_content)
proc.stdin.close() proc.stdin.close()
self.fetcher.content = proc.stdout.read().decode('utf-8') fetcher.content = proc.stdout.read().decode('utf-8')
proc.wait(timeout=60) proc.wait(timeout=60)
# Add a little metadata so we know if the file changes (like if an image changes, but the text is the same # Add a little metadata so we know if the file changes (like if an image changes, but the text is the same
# @todo may cause problems with non-UTF8? # @todo may cause problems with non-UTF8?
metadata = "<p>Added by changedetection.io: Document checksum - {} Filesize - {} bytes</p>".format( metadata = "<p>Added by changedetection.io: Document checksum - {} Filesize - {} bytes</p>".format(
hashlib.md5(self.fetcher.raw_content).hexdigest().upper(), hashlib.md5(fetcher.raw_content).hexdigest().upper(),
len(self.fetcher.content)) len(fetcher.content))
self.fetcher.content = self.fetcher.content.replace('</body>', metadata + '</body>') fetcher.content = fetcher.content.replace('</body>', metadata + '</body>')
# Better would be if Watch.model could access the global data also # Better would be if Watch.model could access the global data also
# and then use getattr https://docs.python.org/3/reference/datamodel.html#object.__getitem__ # and then use getattr https://docs.python.org/3/reference/datamodel.html#object.__getitem__
@@ -125,7 +207,7 @@ class perform_site_check(difference_detection_processor):
# Inject a virtual LD+JSON price tracker rule # Inject a virtual LD+JSON price tracker rule
if watch.get('track_ldjson_price_data', '') == PRICE_DATA_TRACK_ACCEPT: if watch.get('track_ldjson_price_data', '') == PRICE_DATA_TRACK_ACCEPT:
include_filters_rule += html_tools.LD_JSON_PRODUCT_OFFER_SELECTORS include_filters_rule.append(html_tools.LD_JSON_PRODUCT_OFFER_SELECTOR)
has_filter_rule = len(include_filters_rule) and len(include_filters_rule[0].strip()) has_filter_rule = len(include_filters_rule) and len(include_filters_rule[0].strip())
has_subtractive_selectors = len(subtractive_selectors) and len(subtractive_selectors[0].strip()) has_subtractive_selectors = len(subtractive_selectors) and len(subtractive_selectors[0].strip())
@@ -137,30 +219,33 @@ class perform_site_check(difference_detection_processor):
if is_json: if is_json:
# Sort the JSON so we dont get false alerts when the content is just re-ordered # Sort the JSON so we dont get false alerts when the content is just re-ordered
try: try:
self.fetcher.content = json.dumps(json.loads(self.fetcher.content), sort_keys=True) fetcher.content = json.dumps(json.loads(fetcher.content), sort_keys=True)
except Exception as e: except Exception as e:
# Might have just been a snippet, or otherwise bad JSON, continue # Might have just been a snippet, or otherwise bad JSON, continue
pass pass
if has_filter_rule: if has_filter_rule:
json_filter_prefixes = ['json:', 'jq:']
for filter in include_filters_rule: for filter in include_filters_rule:
if any(prefix in filter for prefix in json_filter_prefixes): if any(prefix in filter for prefix in json_filter_prefixes):
stripped_text_from_html += html_tools.extract_json_as_string(content=self.fetcher.content, json_filter=filter) stripped_text_from_html += html_tools.extract_json_as_string(content=fetcher.content, json_filter=filter)
is_html = False is_html = False
if is_html or watch.is_source_type_url:
if is_html or is_source:
# CSS Filter, extract the HTML that matches and feed that into the existing inscriptis::get_text # CSS Filter, extract the HTML that matches and feed that into the existing inscriptis::get_text
self.fetcher.content = html_tools.workarounds_for_obfuscations(self.fetcher.content) fetcher.content = html_tools.workarounds_for_obfuscations(fetcher.content)
html_content = self.fetcher.content html_content = fetcher.content
# If not JSON, and if it's not text/plain.. # If not JSON, and if it's not text/plain..
if 'text/plain' in self.fetcher.get_all_headers().get('content-type', '').lower(): if 'text/plain' in fetcher.get_all_headers().get('content-type', '').lower():
# Don't run get_text or xpath/css filters on plaintext # Don't run get_text or xpath/css filters on plaintext
stripped_text_from_html = html_content stripped_text_from_html = html_content
else: else:
# Does it have some ld+json price data? used for easier monitoring # Does it have some ld+json price data? used for easier monitoring
update_obj['has_ldjson_price_data'] = html_tools.has_ldjson_product_info(self.fetcher.content) update_obj['has_ldjson_price_data'] = html_tools.has_ldjson_product_info(fetcher.content)
# Then we assume HTML # Then we assume HTML
if has_filter_rule: if has_filter_rule:
@@ -170,14 +255,13 @@ class perform_site_check(difference_detection_processor):
# For HTML/XML we offer xpath as an option, just start a regular xPath "/.." # For HTML/XML we offer xpath as an option, just start a regular xPath "/.."
if filter_rule[0] == '/' or filter_rule.startswith('xpath:'): if filter_rule[0] == '/' or filter_rule.startswith('xpath:'):
html_content += html_tools.xpath_filter(xpath_filter=filter_rule.replace('xpath:', ''), html_content += html_tools.xpath_filter(xpath_filter=filter_rule.replace('xpath:', ''),
html_content=self.fetcher.content, html_content=fetcher.content,
append_pretty_line_formatting=not watch.is_source_type_url, append_pretty_line_formatting=not is_source)
is_rss=is_rss)
else: else:
# CSS Filter, extract the HTML that matches and feed that into the existing inscriptis::get_text # CSS Filter, extract the HTML that matches and feed that into the existing inscriptis::get_text
html_content += html_tools.include_filters(include_filters=filter_rule, html_content += html_tools.include_filters(include_filters=filter_rule,
html_content=self.fetcher.content, html_content=fetcher.content,
append_pretty_line_formatting=not watch.is_source_type_url) append_pretty_line_formatting=not is_source)
if not html_content.strip(): if not html_content.strip():
raise FilterNotFoundInResponse(include_filters_rule) raise FilterNotFoundInResponse(include_filters_rule)
@@ -185,21 +269,21 @@ class perform_site_check(difference_detection_processor):
if has_subtractive_selectors: if has_subtractive_selectors:
html_content = html_tools.element_removal(subtractive_selectors, html_content) html_content = html_tools.element_removal(subtractive_selectors, html_content)
if watch.is_source_type_url: if is_source:
stripped_text_from_html = html_content stripped_text_from_html = html_content
else: else:
# extract text # extract text
do_anchor = self.datastore.data["settings"]["application"].get("render_anchor_tag_content", False) do_anchor = self.datastore.data["settings"]["application"].get("render_anchor_tag_content", False)
stripped_text_from_html = \ stripped_text_from_html = \
html_tools.html_to_text( html_tools.html_to_text(
html_content=html_content, html_content,
render_anchor_tag_content=do_anchor, render_anchor_tag_content=do_anchor
is_rss=is_rss # #1874 activate the <title workaround hack
) )
# Re #340 - return the content before the 'ignore text' was applied # Re #340 - return the content before the 'ignore text' was applied
text_content_before_ignored_filter = stripped_text_from_html.encode('utf-8') text_content_before_ignored_filter = stripped_text_from_html.encode('utf-8')
# @todo whitespace coming from missing rtrim()? # @todo whitespace coming from missing rtrim()?
# stripped_text_from_html could be based on their preferences, replace the processed text with only that which they want to know about. # stripped_text_from_html could be based on their preferences, replace the processed text with only that which they want to know about.
# Rewrite's the processing text based on only what diff result they want to see # Rewrite's the processing text based on only what diff result they want to see
@@ -209,13 +293,13 @@ class perform_site_check(difference_detection_processor):
# needs to not include (added) etc or it may get used twice # needs to not include (added) etc or it may get used twice
# Replace the processed text with the preferred result # Replace the processed text with the preferred result
rendered_diff = diff.render_diff(previous_version_file_contents=watch.get_last_fetched_before_filters(), rendered_diff = diff.render_diff(previous_version_file_contents=watch.get_last_fetched_before_filters(),
newest_version_file_contents=stripped_text_from_html, newest_version_file_contents=stripped_text_from_html,
include_equal=False, # not the same lines include_equal=False, # not the same lines
include_added=watch.get('filter_text_added', True), include_added=watch.get('filter_text_added', True),
include_removed=watch.get('filter_text_removed', True), include_removed=watch.get('filter_text_removed', True),
include_replaced=watch.get('filter_text_replaced', True), include_replaced=watch.get('filter_text_replaced', True),
line_feed_sep="\n", line_feed_sep="\n",
include_change_type_prefix=False) include_change_type_prefix=False)
watch.save_last_fetched_before_filters(text_content_before_ignored_filter) watch.save_last_fetched_before_filters(text_content_before_ignored_filter)
@@ -230,17 +314,12 @@ class perform_site_check(difference_detection_processor):
# Treat pages with no renderable text content as a change? No by default # Treat pages with no renderable text content as a change? No by default
empty_pages_are_a_change = self.datastore.data['settings']['application'].get('empty_pages_are_a_change', False) empty_pages_are_a_change = self.datastore.data['settings']['application'].get('empty_pages_are_a_change', False)
if not is_json and not empty_pages_are_a_change and len(stripped_text_from_html.strip()) == 0: if not is_json and not empty_pages_are_a_change and len(stripped_text_from_html.strip()) == 0:
raise content_fetcher.ReplyWithContentButNoText(url=url, raise content_fetcher.ReplyWithContentButNoText(url=url, status_code=fetcher.get_last_status_code(), screenshot=screenshot)
status_code=self.fetcher.get_last_status_code(),
screenshot=screenshot,
has_filters=has_filter_rule,
html_content=html_content
)
# We rely on the actual text in the html output.. many sites have random script vars etc, # We rely on the actual text in the html output.. many sites have random script vars etc,
# in the future we'll implement other mechanisms. # in the future we'll implement other mechanisms.
update_obj["last_check_status"] = self.fetcher.get_last_status_code() update_obj["last_check_status"] = fetcher.get_last_status_code()
# If there's text to skip # If there's text to skip
# @todo we could abstract out the get_text() to handle this cleaner # @todo we could abstract out the get_text() to handle this cleaner
@@ -256,25 +335,16 @@ class perform_site_check(difference_detection_processor):
regex_matched_output = [] regex_matched_output = []
for s_re in extract_text: for s_re in extract_text:
# incase they specified something in '/.../x' # incase they specified something in '/.../x'
if re.search(PERL_STYLE_REGEX, s_re, re.IGNORECASE): regex = self.forward_slash_enclosed_regex_to_options(s_re)
regex = html_tools.perl_style_slash_enclosed_regex_to_options(s_re) result = re.findall(regex.encode('utf-8'), stripped_text_from_html)
result = re.findall(regex.encode('utf-8'), stripped_text_from_html)
for l in result: for l in result:
if type(l) is tuple: if type(l) is tuple:
# @todo - some formatter option default (between groups) # @todo - some formatter option default (between groups)
regex_matched_output += list(l) + [b'\n'] regex_matched_output += list(l) + [b'\n']
else: else:
# @todo - some formatter option default (between each ungrouped result) # @todo - some formatter option default (between each ungrouped result)
regex_matched_output += [l] + [b'\n'] regex_matched_output += [l] + [b'\n']
else:
# Doesnt look like regex, just hunt for plaintext and return that which matches
# `stripped_text_from_html` will be bytes, so we must encode s_re also to bytes
r = re.compile(re.escape(s_re.encode('utf-8')), re.IGNORECASE)
res = r.findall(stripped_text_from_html)
if res:
for match in res:
regex_matched_output += [match] + [b'\n']
# Now we will only show what the regex matched # Now we will only show what the regex matched
stripped_text_from_html = b'' stripped_text_from_html = b''
@@ -328,7 +398,7 @@ class perform_site_check(difference_detection_processor):
if is_html: if is_html:
if self.datastore.data['settings']['application'].get('extract_title_as_title') or watch['extract_title_as_title']: if self.datastore.data['settings']['application'].get('extract_title_as_title') or watch['extract_title_as_title']:
if not watch['title'] or not len(watch['title']): if not watch['title'] or not len(watch['title']):
update_obj['title'] = html_tools.extract_element(find='title', html_content=self.fetcher.content) update_obj['title'] = html_tools.extract_element(find='title', html_content=fetcher.content)
if changed_detected: if changed_detected:
if watch.get('check_unique_lines', False): if watch.get('check_unique_lines', False):

View File

@@ -18,19 +18,12 @@ module.exports = async ({page, context}) => {
await page.setBypassCSP(true) await page.setBypassCSP(true)
await page.setExtraHTTPHeaders(req_headers); await page.setExtraHTTPHeaders(req_headers);
await page.setUserAgent(user_agent);
if (user_agent) {
await page.setUserAgent(user_agent);
}
// https://ourcodeworld.com/articles/read/1106/how-to-solve-puppeteer-timeouterror-navigation-timeout-of-30000-ms-exceeded // https://ourcodeworld.com/articles/read/1106/how-to-solve-puppeteer-timeouterror-navigation-timeout-of-30000-ms-exceeded
await page.setDefaultNavigationTimeout(0); await page.setDefaultNavigationTimeout(0);
if (proxy_username) { if (proxy_username) {
// Setting Proxy-Authentication header is deprecated, and doing so can trigger header change errors from Puppeteer
// https://github.com/puppeteer/puppeteer/issues/676 ?
// https://help.brightdata.com/hc/en-us/articles/12632549957649-Proxy-Manager-How-to-Guides#h_01HAKWR4Q0AFS8RZTNYWRDFJC2
// https://cri.dev/posts/2020-03-30-How-to-solve-Puppeteer-Chrome-Error-ERR_INVALID_ARGUMENT/
await page.authenticate({ await page.authenticate({
username: proxy_username, username: proxy_username,
password: proxy_password password: proxy_password

View File

@@ -5,19 +5,14 @@ function isItemInStock() {
'agotado', 'agotado',
'artikel zurzeit vergriffen', 'artikel zurzeit vergriffen',
'as soon as stock is available', 'as soon as stock is available',
'ausverkauft', // sold out
'available for back order', 'available for back order',
'back-order or out of stock',
'backordered', 'backordered',
'benachrichtigt mich', // notify me
'brak na stanie', 'brak na stanie',
'brak w magazynie', 'brak w magazynie',
'coming soon', 'coming soon',
'currently have any tickets for this', 'currently have any tickets for this',
'currently unavailable', 'currently unavailable',
'dostępne wkrótce',
'en rupture de stock', 'en rupture de stock',
'ist derzeit nicht auf lager',
'item is no longer available', 'item is no longer available',
'message if back in stock', 'message if back in stock',
'nachricht bei', 'nachricht bei',
@@ -42,7 +37,6 @@ function isItemInStock() {
'unavailable tickets', 'unavailable tickets',
'we do not currently have an estimate of when this product will be back in stock.', 'we do not currently have an estimate of when this product will be back in stock.',
'zur zeit nicht an lager', 'zur zeit nicht an lager',
'已售完',
]; ];

View File

@@ -10,40 +10,6 @@ set -x
docker run --network changedet-network -d --name squid-one --hostname squid-one --rm -v `pwd`/tests/proxy_list/squid.conf:/etc/squid/conf.d/debian.conf ubuntu/squid:4.13-21.10_edge docker run --network changedet-network -d --name squid-one --hostname squid-one --rm -v `pwd`/tests/proxy_list/squid.conf:/etc/squid/conf.d/debian.conf ubuntu/squid:4.13-21.10_edge
docker run --network changedet-network -d --name squid-two --hostname squid-two --rm -v `pwd`/tests/proxy_list/squid.conf:/etc/squid/conf.d/debian.conf ubuntu/squid:4.13-21.10_edge docker run --network changedet-network -d --name squid-two --hostname squid-two --rm -v `pwd`/tests/proxy_list/squid.conf:/etc/squid/conf.d/debian.conf ubuntu/squid:4.13-21.10_edge
# SOCKS5 related - start simple Socks5 proxy server
# SOCKSTEST=xyz should show in the logs of this service to confirm it fetched
docker run --network changedet-network -d --hostname socks5proxy --name socks5proxy -p 1080:1080 -e PROXY_USER=proxy_user123 -e PROXY_PASSWORD=proxy_pass123 serjs/go-socks5-proxy
docker run --network changedet-network -d --hostname socks5proxy-noauth -p 1081:1080 --name socks5proxy-noauth serjs/go-socks5-proxy
echo "---------------------------------- SOCKS5 -------------------"
# SOCKS5 related - test from proxies.json
docker run --network changedet-network \
-v `pwd`/tests/proxy_socks5/proxies.json-example:/app/changedetectionio/test-datastore/proxies.json \
--rm \
-e "SOCKSTEST=proxiesjson" \
test-changedetectionio \
bash -c 'cd changedetectionio && pytest tests/proxy_socks5/test_socks5_proxy_sources.py'
# SOCKS5 related - by manually entering in UI
docker run --network changedet-network \
--rm \
-e "SOCKSTEST=manual" \
test-changedetectionio \
bash -c 'cd changedetectionio && pytest tests/proxy_socks5/test_socks5_proxy.py'
# SOCKS5 related - test from proxies.json via playwright - NOTE- PLAYWRIGHT DOESNT SUPPORT AUTHENTICATING PROXY
docker run --network changedet-network \
-e "SOCKSTEST=manual-playwright" \
-v `pwd`/tests/proxy_socks5/proxies.json-example-noauth:/app/changedetectionio/test-datastore/proxies.json \
-e "PLAYWRIGHT_DRIVER_URL=ws://browserless:3000" \
--rm \
test-changedetectionio \
bash -c 'cd changedetectionio && pytest tests/proxy_socks5/test_socks5_proxy_sources.py'
echo "socks5 server logs"
docker logs socks5proxy
echo "----------------------------------"
# Used for configuring a custom proxy URL via the UI # Used for configuring a custom proxy URL via the UI
docker run --network changedet-network -d \ docker run --network changedet-network -d \
--name squid-custom \ --name squid-custom \

View File

@@ -208,7 +208,7 @@ $(document).ready(function () {
console.log(x); console.log(x);
if (x && first_available.length) { if (x && first_available.length) {
// @todo will it let you click shit that has a layer ontop? probably not. // @todo will it let you click shit that has a layer ontop? probably not.
if (x['tagtype'] === 'text' || x['tagtype'] === 'number' || x['tagtype'] === 'email' || x['tagName'] === 'textarea' || x['tagtype'] === 'password' || x['tagtype'] === 'search') { if (x['tagtype'] === 'text' || x['tagtype'] === 'email' || x['tagName'] === 'textarea' || x['tagtype'] === 'password' || x['tagtype'] === 'search') {
$('select', first_available).val('Enter text in field').change(); $('select', first_available).val('Enter text in field').change();
$('input[type=text]', first_available).first().val(x['xpath']); $('input[type=text]', first_available).first().val(x['xpath']);
$('input[placeholder="Value"]', first_available).addClass('ok').click().focus(); $('input[placeholder="Value"]', first_available).addClass('ok').click().focus();
@@ -321,14 +321,8 @@ $(document).ready(function () {
var s = '<div class="control">' + '<a data-step-index=' + i + ' class="pure-button button-secondary button-green button-xsmall apply" >Apply</a>&nbsp;'; var s = '<div class="control">' + '<a data-step-index=' + i + ' class="pure-button button-secondary button-green button-xsmall apply" >Apply</a>&nbsp;';
if (i > 0) { if (i > 0) {
// The first step never gets these (Goto-site) // The first step never gets these (Goto-site)
s += `<a data-step-index="${i}" class="pure-button button-secondary button-xsmall clear" >Clear</a>&nbsp;` + s += '<a data-step-index=' + i + ' class="pure-button button-secondary button-xsmall clear" >Clear</a>&nbsp;' +
`<a data-step-index="${i}" class="pure-button button-secondary button-red button-xsmall remove" >Remove</a>`; '<a data-step-index=' + i + ' class="pure-button button-secondary button-red button-xsmall remove" >Remove</a>';
// if a screenshot is available
if (browser_steps_available_screenshots.includes(i.toString())) {
var d = (browser_steps_last_error_step === i+1) ? 'before' : 'after';
s += `&nbsp;<a data-step-index="${i}" class="pure-button button-secondary button-xsmall show-screenshot" title="Show screenshot from last run" data-type="${d}">Pic</a>&nbsp;`;
}
} }
s += '</div>'; s += '</div>';
$(this).append(s) $(this).append(s)
@@ -443,24 +437,6 @@ $(document).ready(function () {
}); });
$('ul#browser_steps li .control .show-screenshot').click(function (element) {
var step_n = $(event.currentTarget).data('step-index');
w = window.open(this.href, "_blank", "width=640,height=480");
const t = $(event.currentTarget).data('type');
const url = browser_steps_fetch_screenshot_image_url + `&step_n=${step_n}&type=${t}`;
w.document.body.innerHTML = `<!DOCTYPE html>
<html lang="en">
<body>
<img src="${url}" style="width: 100%" alt="Browser Step at step ${step_n} from last run." title="Browser Step at step ${step_n} from last run."/>
</body>
</html>`;
w.document.title = `Browser Step at step ${step_n} from last run.`;
});
if (browser_steps_last_error_step) {
$("ul#browser_steps>li:nth-child("+browser_steps_last_error_step+")").addClass("browser-step-with-error");
}
$("ul#browser_steps select").change(function () { $("ul#browser_steps select").change(function () {
set_greyed_state(); set_greyed_state();

View File

@@ -1,120 +1,110 @@
$(document).ready(function () { var a = document.getElementById("a");
var a = document.getElementById("a"); var b = document.getElementById("b");
var b = document.getElementById("b"); var result = document.getElementById("result");
var result = document.getElementById("result");
var inputs;
$('#jump-next-diff').click(function () { function changed() {
// https://github.com/kpdecker/jsdiff/issues/389
// I would love to use `{ignoreWhitespace: true}` here but it breaks the formatting
options = {
ignoreWhitespace: document.getElementById("ignoreWhitespace").checked,
};
var element = inputs[inputs.current]; var diff = Diff[window.diffType](a.textContent, b.textContent, options);
var headerOffset = 80; var fragment = document.createDocumentFragment();
var elementPosition = element.getBoundingClientRect().top; for (var i = 0; i < diff.length; i++) {
var offsetPosition = elementPosition - headerOffset + window.scrollY; if (diff[i].added && diff[i + 1] && diff[i + 1].removed) {
var swap = diff[i];
window.scrollTo({ diff[i] = diff[i + 1];
top: offsetPosition, diff[i + 1] = swap;
behavior: "smooth",
});
inputs.current++;
if (inputs.current >= inputs.length) {
inputs.current = 0;
}
});
function changed() {
// https://github.com/kpdecker/jsdiff/issues/389
// I would love to use `{ignoreWhitespace: true}` here but it breaks the formatting
options = {
ignoreWhitespace: document.getElementById("ignoreWhitespace").checked,
};
var diff = Diff[window.diffType](a.textContent, b.textContent, options);
var fragment = document.createDocumentFragment();
for (var i = 0; i < diff.length; i++) {
if (diff[i].added && diff[i + 1] && diff[i + 1].removed) {
var swap = diff[i];
diff[i] = diff[i + 1];
diff[i + 1] = swap;
}
var node;
if (diff[i].removed) {
node = document.createElement("del");
node.classList.add("change");
const wrapper = node.appendChild(document.createElement("span"));
wrapper.appendChild(document.createTextNode(diff[i].value));
} else if (diff[i].added) {
node = document.createElement("ins");
node.classList.add("change");
const wrapper = node.appendChild(document.createElement("span"));
wrapper.appendChild(document.createTextNode(diff[i].value));
} else {
node = document.createTextNode(diff[i].value);
}
fragment.appendChild(node);
}
result.textContent = "";
result.appendChild(fragment);
// For nice mouse-over hover/title information
const removed_current_option = $('#diff-version option:selected')
if (removed_current_option) {
$('del').each(function () {
$(this).prop('title', 'Removed '+removed_current_option[0].label);
});
}
const inserted_current_option = $('#current-version option:selected')
if (removed_current_option) {
$('ins').each(function () {
$(this).prop('title', 'Inserted '+inserted_current_option[0].label);
});
}
// Set the list of possible differences to jump to
inputs = document.querySelectorAll('#diff-ui .change')
// Set the "current" diff pointer
inputs.current = 0;
// Goto diff
$('#jump-next-diff').click();
} }
$('.needs-localtime').each(function () { var node;
for (var option of this.options) { if (diff[i].removed) {
var dateObject = new Date(option.value * 1000); node = document.createElement("del");
option.label = dateObject.toLocaleString(undefined, {dateStyle: "full", timeStyle: "medium"}); node.classList.add("change");
} const wrapper = node.appendChild(document.createElement("span"));
}) wrapper.appendChild(document.createTextNode(diff[i].value));
onDiffTypeChange( } else if (diff[i].added) {
document.querySelector('#settings [name="diff_type"]:checked'), node = document.createElement("ins");
); node.classList.add("change");
changed(); const wrapper = node.appendChild(document.createElement("span"));
wrapper.appendChild(document.createTextNode(diff[i].value));
a.onpaste = a.onchange = b.onpaste = b.onchange = changed;
if ("oninput" in a) {
a.oninput = b.oninput = changed;
} else { } else {
a.onkeyup = b.onkeyup = changed; node = document.createTextNode(diff[i].value);
} }
fragment.appendChild(node);
}
function onDiffTypeChange(radio) { result.textContent = "";
window.diffType = radio.value; result.appendChild(fragment);
// Not necessary
// document.title = "Diff " + radio.value.slice(4); // Jump at start
inputs.current = 0;
next_diff();
}
window.onload = function () {
/* Convert what is options from UTC time.time() to local browser time */
var diffList = document.getElementById("diff-version");
if (typeof diffList != "undefined" && diffList != null) {
for (var option of diffList.options) {
var dateObject = new Date(option.value * 1000);
option.label = dateObject.toLocaleString();
} }
}
var radio = document.getElementsByName("diff_type"); /* Set current version date as local time in the browser also */
for (var i = 0; i < radio.length; i++) { var current_v = document.getElementById("current-v-date");
radio[i].onchange = function (e) { var dateObject = new Date(newest_version_timestamp * 1000);
onDiffTypeChange(e.target); current_v.innerHTML = dateObject.toLocaleString();
changed(); onDiffTypeChange(
}; document.querySelector('#settings [name="diff_type"]:checked'),
} );
changed();
};
document.getElementById("ignoreWhitespace").onchange = function (e) { a.onpaste = a.onchange = b.onpaste = b.onchange = changed;
changed();
};
}); if ("oninput" in a) {
a.oninput = b.oninput = changed;
} else {
a.onkeyup = b.onkeyup = changed;
}
function onDiffTypeChange(radio) {
window.diffType = radio.value;
// Not necessary
// document.title = "Diff " + radio.value.slice(4);
}
var radio = document.getElementsByName("diff_type");
for (var i = 0; i < radio.length; i++) {
radio[i].onchange = function (e) {
onDiffTypeChange(e.target);
changed();
};
}
document.getElementById("ignoreWhitespace").onchange = function (e) {
changed();
};
var inputs = document.getElementsByClassName("change");
inputs.current = 0;
function next_diff() {
var element = inputs[inputs.current];
var headerOffset = 80;
var elementPosition = element.getBoundingClientRect().top;
var offsetPosition = elementPosition - headerOffset + window.scrollY;
window.scrollTo({
top: offsetPosition,
behavior: "smooth",
});
inputs.current++;
if (inputs.current >= inputs.length) {
inputs.current = 0;
}
}

View File

@@ -3,50 +3,45 @@
* Toggles theme between light and dark mode. * Toggles theme between light and dark mode.
*/ */
$(document).ready(function () { $(document).ready(function () {
const button = document.getElementById("toggle-light-mode"); const button = document.getElementById("toggle-light-mode");
button.onclick = () => { button.onclick = () => {
const htmlElement = document.getElementsByTagName("html"); const htmlElement = document.getElementsByTagName("html");
const isDarkMode = htmlElement[0].dataset.darkmode === "true"; const isDarkMode = htmlElement[0].dataset.darkmode === "true";
htmlElement[0].dataset.darkmode = !isDarkMode; htmlElement[0].dataset.darkmode = !isDarkMode;
setCookieValue(!isDarkMode); setCookieValue(!isDarkMode);
}; };
const setCookieValue = (value) => { const setCookieValue = (value) => {
document.cookie = `css_dark_mode=${value};max-age=31536000;path=/` document.cookie = `css_dark_mode=${value};max-age=31536000;path=/`
} }
// Search input box behaviour // Search input box behaviour
const toggle_search = document.getElementById("toggle-search"); const toggle_search = document.getElementById("toggle-search");
const search_q = document.getElementById("search-q"); const search_q = document.getElementById("search-q");
if(search_q) { window.addEventListener('keydown', function (e) {
window.addEventListener('keydown', function (e) {
if (e.altKey == true && e.keyCode == 83) {
search_q.classList.toggle('expanded');
search_q.focus();
}
});
search_q.onkeydown = (e) => { if (e.altKey == true && e.keyCode == 83)
var key = e.keyCode || e.which; search_q.classList.toggle('expanded');
if (key === 13) { search_q.focus();
document.searchForm.submit(); });
}
};
toggle_search.onclick = () => { search_q.onkeydown = (e) => {
// Could be that they want to search something once text is in there var key = e.keyCode || e.which;
if (search_q.value.length) { if (key === 13) {
document.searchForm.submit(); document.searchForm.submit();
} else {
// If not..
search_q.classList.toggle('expanded');
search_q.focus();
}
};
} }
};
toggle_search.onclick = () => {
// Could be that they want to search something once text is in there
if (search_q.value.length) {
document.searchForm.submit();
} else {
// If not..
search_q.classList.toggle('expanded');
search_q.focus();
}
};
$('#heart-us').click(function () {
$("#overlay").toggleClass('visible');
heartpath.style.fill = document.getElementById("overlay").classList.contains("visible") ? '#ff0000' : 'var(--color-background)';
});
}); });

View File

@@ -4,14 +4,6 @@ $(function () {
$(this).closest('.unviewed').removeClass('unviewed'); $(this).closest('.unviewed').removeClass('unviewed');
}); });
$('td[data-timestamp]').each(function () {
$(this).prop('title', new Intl.DateTimeFormat(undefined,
{
dateStyle: 'full',
timeStyle: 'long'
}).format($(this).data('timestamp') * 1000));
})
$("#checkbox-assign-tag").click(function (e) { $("#checkbox-assign-tag").click(function (e) {
$('#op_extradata').val(prompt("Enter a tag name")); $('#op_extradata').val(prompt("Enter a tag name"));
}); });

View File

@@ -187,10 +187,6 @@ ins {
padding: 0.5em; } padding: 0.5em; }
#settings ins { #settings ins {
padding: 0.5em; } padding: 0.5em; }
#settings option:checked {
font-weight: bold; }
#settings [type=radio], #settings [type=checkbox] {
vertical-align: middle; }
.source { .source {
position: absolute; position: absolute;

View File

@@ -77,13 +77,6 @@ ins {
ins { ins {
padding: 0.5em; padding: 0.5em;
} }
option:checked {
font-weight: bold;
}
[type=radio],[type=checkbox] {
vertical-align: middle;
}
} }
.source { .source {

View File

@@ -6,10 +6,6 @@
} }
li { li {
&.browser-step-with-error {
background-color: #ffd6d6;
border-radius: 4px;
}
&:not(:first-child) { &:not(:first-child) {
&:hover { &:hover {
opacity: 1.0; opacity: 1.0;
@@ -48,7 +44,7 @@
#browser-steps .flex-wrapper { #browser-steps .flex-wrapper {
display: flex; display: flex;
flex-flow: row; flex-flow: row;
height: 70vh; height: 600px; /*@todo make this dynamic */
} }
/* this is duplicate :( */ /* this is duplicate :( */

View File

@@ -1,6 +1,6 @@
#toggle-light-mode { #toggle-light-mode {
/* width: 3rem;*/ width: 3rem;
/* default */ /* default */
.icon-dark { .icon-dark {
display: none; display: none;

View File

@@ -1,38 +0,0 @@
#overlay {
opacity: 0.95;
position: fixed;
width: 350px;
max-width: 100%;
height: 100%;
top: 0;
right: -350px;
background-color: var(--color-table-stripe);
z-index: 2;
transform: translateX(0);
transition: transform .5s ease;
&.visible {
transform: translateX(-100%);
}
.content {
font-size: 0.875rem;
padding: 1rem;
margin-top: 5rem;
max-width: 400px;
color: var(--color-watch-table-row-text);
}
}
#heartpath {
&:hover {
fill: #ff0000 !important;
transition: all ease 0.3s !important;
}
transition: all ease 0.3s !important;
}

View File

@@ -1,25 +0,0 @@
.pure-menu-link {
padding: 0.5rem 1em;
line-height: 1.2rem;
}
.pure-menu-item {
svg {
height: 1.2rem;
}
* {
vertical-align: middle;
}
.github-link {
height: 1.8rem;
display: block;
svg {
height: 100%;
}
}
.bi-heart {
&:hover {
cursor: pointer;
}
}
}

View File

@@ -1,28 +0,0 @@
#selector-wrapper {
height: 100%;
max-height: 70vh;
overflow-y: scroll;
position: relative;
//width: 100%;
>img {
position: absolute;
z-index: 4;
max-width: 100%;
}
>canvas {
position: relative;
z-index: 5;
max-width: 100%;
&:hover {
cursor: pointer;
}
}
}
#selector-current-xpath {
font-size: 80%;
}

View File

@@ -9,13 +9,10 @@
@import "parts/_spinners"; @import "parts/_spinners";
@import "parts/_variables"; @import "parts/_variables";
@import "parts/_darkmode"; @import "parts/_darkmode";
@import "parts/_menu";
@import "parts/_love";
body { body {
color: var(--color-text); color: var(--color-text);
background: var(--color-background-page); background: var(--color-background-page);
font-family: Helvetica Neue, Helvetica, Lucida Grande, Arial, Ubuntu, Cantarell, Fira Sans, sans-serif;
} }
.visually-hidden { .visually-hidden {
@@ -58,6 +55,11 @@ a.github-link {
} }
} }
#toggle-search {
width: 2rem;
}
#search-q { #search-q {
opacity: 0; opacity: 0;
-webkit-transition: all .9s ease; -webkit-transition: all .9s ease;
@@ -469,11 +471,7 @@ footer {
padding: 10px; padding: 10px;
&#left-sticky { &#left-sticky {
left: 0; left: 0px;
position: fixed;
border-top-right-radius: 5px;
border-bottom-right-radius: 5px;
box-shadow: 1px 1px 4px var(--color-shadow-jump);
} }
&#right-sticky { &#right-sticky {
@@ -941,7 +939,32 @@ ul {
} }
} }
@import "parts/_visualselector"; #selector-wrapper {
height: 100%;
overflow-y: scroll;
position: relative;
//width: 100%;
>img {
position: absolute;
z-index: 4;
max-width: 100%;
}
>canvas {
position: relative;
z-index: 5;
max-width: 100%;
&:hover {
cursor: pointer;
}
}
}
#selector-current-xpath {
font-size: 80%;
}
#webdriver-override-options { #webdriver-override-options {
input[type="number"] { input[type="number"] {
@@ -1080,4 +1103,3 @@ ul {
border-radius: 3px; border-radius: 3px;
white-space: nowrap; white-space: nowrap;
} }

View File

@@ -26,9 +26,6 @@
#browser_steps li { #browser_steps li {
list-style: decimal; list-style: decimal;
padding: 5px; } padding: 5px; }
#browser_steps li.browser-step-with-error {
background-color: #ffd6d6;
border-radius: 4px; }
#browser_steps li:not(:first-child):hover { #browser_steps li:not(:first-child):hover {
opacity: 1.0; } opacity: 1.0; }
#browser_steps li .control { #browser_steps li .control {
@@ -53,7 +50,8 @@
#browser-steps .flex-wrapper { #browser-steps .flex-wrapper {
display: flex; display: flex;
flex-flow: row; flex-flow: row;
height: 70vh; } height: 600px;
/*@todo make this dynamic */ }
/* this is duplicate :( */ /* this is duplicate :( */
#browsersteps-selector-wrapper { #browsersteps-selector-wrapper {
@@ -331,7 +329,7 @@ html[data-darkmode="true"] {
color: var(--color-watch-table-error); } color: var(--color-watch-table-error); }
#toggle-light-mode { #toggle-light-mode {
/* width: 3rem;*/ width: 3rem;
/* default */ } /* default */ }
#toggle-light-mode .icon-dark { #toggle-light-mode .icon-dark {
display: none; } display: none; }
@@ -342,56 +340,9 @@ html[data-darkmode="true"] #toggle-light-mode .icon-light {
html[data-darkmode="true"] #toggle-light-mode .icon-dark { html[data-darkmode="true"] #toggle-light-mode .icon-dark {
display: block; } display: block; }
.pure-menu-link {
padding: 0.5rem 1em;
line-height: 1.2rem; }
.pure-menu-item svg {
height: 1.2rem; }
.pure-menu-item * {
vertical-align: middle; }
.pure-menu-item .github-link {
height: 1.8rem;
display: block; }
.pure-menu-item .github-link svg {
height: 100%; }
.pure-menu-item .bi-heart:hover {
cursor: pointer; }
#overlay {
opacity: 0.95;
position: fixed;
width: 350px;
max-width: 100%;
height: 100%;
top: 0;
right: -350px;
background-color: var(--color-table-stripe);
z-index: 2;
transform: translateX(0);
transition: transform .5s ease; }
#overlay.visible {
transform: translateX(-100%); }
#overlay .content {
font-size: 0.875rem;
padding: 1rem;
margin-top: 5rem;
max-width: 400px;
color: var(--color-watch-table-row-text); }
#heartpath {
transition: all ease 0.3s !important; }
#heartpath:hover {
fill: #ff0000 !important;
transition: all ease 0.3s !important; }
body { body {
color: var(--color-text); color: var(--color-text);
background: var(--color-background-page); background: var(--color-background-page); }
font-family: Helvetica Neue, Helvetica, Lucida Grande, Arial, Ubuntu, Cantarell, Fira Sans, sans-serif; }
.visually-hidden { .visually-hidden {
clip: rect(0 0 0 0); clip: rect(0 0 0 0);
@@ -423,6 +374,9 @@ a.github-link {
a.github-link:hover { a.github-link:hover {
color: var(--color-icon-github-hover); } color: var(--color-icon-github-hover); }
#toggle-search {
width: 2rem; }
#search-q { #search-q {
opacity: 0; opacity: 0;
-webkit-transition: all .9s ease; -webkit-transition: all .9s ease;
@@ -714,11 +668,7 @@ footer {
background: var(--color-background); background: var(--color-background);
padding: 10px; } padding: 10px; }
.sticky-tab#left-sticky { .sticky-tab#left-sticky {
left: 0; left: 0px; }
position: fixed;
border-top-right-radius: 5px;
border-bottom-right-radius: 5px;
box-shadow: 1px 1px 4px var(--color-shadow-jump); }
.sticky-tab#right-sticky { .sticky-tab#right-sticky {
right: 0px; } right: 0px; }
.sticky-tab#hosted-sticky { .sticky-tab#hosted-sticky {
@@ -1027,7 +977,6 @@ ul {
#selector-wrapper { #selector-wrapper {
height: 100%; height: 100%;
max-height: 70vh;
overflow-y: scroll; overflow-y: scroll;
position: relative; } position: relative; }
#selector-wrapper > img { #selector-wrapper > img {

View File

@@ -42,7 +42,6 @@ class ChangeDetectionStore:
self.__data = App.model() self.__data = App.model()
self.datastore_path = datastore_path self.datastore_path = datastore_path
self.json_store_path = "{}/url-watches.json".format(self.datastore_path) self.json_store_path = "{}/url-watches.json".format(self.datastore_path)
print(">>> Datastore path is ", self.json_store_path)
self.needs_write = False self.needs_write = False
self.start_time = time.time() self.start_time = time.time()
self.stop_thread = False self.stop_thread = False
@@ -96,14 +95,6 @@ class ChangeDetectionStore:
self.add_watch(url='https://changedetection.io/CHANGELOG.txt', self.add_watch(url='https://changedetection.io/CHANGELOG.txt',
tag='changedetection.io', tag='changedetection.io',
extras={'fetch_backend': 'html_requests'}) extras={'fetch_backend': 'html_requests'})
updates_available = self.get_updates_available()
self.__data['settings']['application']['schema_version'] = updates_available.pop()
else:
# Bump the update version by running updates
self.run_updates()
self.__data['version_tag'] = version_tag self.__data['version_tag'] = version_tag
# Just to test that proxies.json if it exists, doesnt throw a parsing error on startup # Just to test that proxies.json if it exists, doesnt throw a parsing error on startup
@@ -133,6 +124,9 @@ class ChangeDetectionStore:
secret = secrets.token_hex(16) secret = secrets.token_hex(16)
self.__data['settings']['application']['api_access_token'] = secret self.__data['settings']['application']['api_access_token'] = secret
# Bump the update version by running updates
self.run_updates()
self.needs_write = True self.needs_write = True
# Finally start the thread that will manage periodic data saves to JSON # Finally start the thread that will manage periodic data saves to JSON
@@ -244,16 +238,12 @@ class ChangeDetectionStore:
import pathlib import pathlib
self.__data['watching'][uuid].update({ self.__data['watching'][uuid].update({
'browser_steps_last_error_step' : None,
'check_count': 0,
'fetch_time' : 0.0,
'has_ldjson_price_data': None,
'last_checked': 0, 'last_checked': 0,
'has_ldjson_price_data': None,
'last_error': False, 'last_error': False,
'last_notification_error': False, 'last_notification_error': False,
'last_viewed': 0, 'last_viewed': 0,
'previous_md5': False, 'previous_md5': False,
'previous_md5_before_filters': False,
'track_ldjson_price_data': None, 'track_ldjson_price_data': None,
}) })
@@ -360,8 +350,6 @@ class ChangeDetectionStore:
if write_to_disk_now: if write_to_disk_now:
self.sync_to_json() self.sync_to_json()
print("added ", url)
return new_uuid return new_uuid
def visualselector_data_is_ready(self, watch_uuid): def visualselector_data_is_ready(self, watch_uuid):
@@ -636,8 +624,14 @@ class ChangeDetectionStore:
def tag_exists_by_name(self, tag_name): def tag_exists_by_name(self, tag_name):
return any(v.get('title', '').lower() == tag_name.lower() for k, v in self.__data['settings']['application']['tags'].items()) return any(v.get('title', '').lower() == tag_name.lower() for k, v in self.__data['settings']['application']['tags'].items())
def get_updates_available(self): # Run all updates
# IMPORTANT - Each update could be run even when they have a new install and the schema is correct
# So therefor - each `update_n` should be very careful about checking if it needs to actually run
# Probably we should bump the current update schema version with each tag release version?
def run_updates(self):
import inspect import inspect
import shutil
updates_available = [] updates_available = []
for i, o in inspect.getmembers(self, predicate=inspect.ismethod): for i, o in inspect.getmembers(self, predicate=inspect.ismethod):
m = re.search(r'update_(\d+)$', i) m = re.search(r'update_(\d+)$', i)
@@ -645,15 +639,6 @@ class ChangeDetectionStore:
updates_available.append(int(m.group(1))) updates_available.append(int(m.group(1)))
updates_available.sort() updates_available.sort()
return updates_available
# Run all updates
# IMPORTANT - Each update could be run even when they have a new install and the schema is correct
# So therefor - each `update_n` should be very careful about checking if it needs to actually run
# Probably we should bump the current update schema version with each tag release version?
def run_updates(self):
import shutil
updates_available = self.get_updates_available()
for update_n in updates_available: for update_n in updates_available:
if update_n > self.__data['settings']['application']['schema_version']: if update_n > self.__data['settings']['application']['schema_version']:
print ("Applying update_{}".format((update_n))) print ("Applying update_{}".format((update_n)))

View File

@@ -101,7 +101,7 @@
</tr> </tr>
<tr> <tr>
<td><code>{{ '{{current_snapshot}}' }}</code></td> <td><code>{{ '{{current_snapshot}}' }}</code></td>
<td>The current snapshot text contents value, useful when combined with JSON or CSS filters <td>The current snapshot value, useful when combined with JSON or CSS filters
</td> </td>
</tr> </tr>
<tr> <tr>

View File

@@ -8,10 +8,10 @@
<title>Change Detection{{extra_title}}</title> <title>Change Detection{{extra_title}}</title>
<link rel="alternate" type="application/rss+xml" title="Changedetection.io » Feed{% if active_tag %}- {{active_tag}}{% endif %}" href="{{ url_for('rss', tag=active_tag , token=app_rss_token)}}" > <link rel="alternate" type="application/rss+xml" title="Changedetection.io » Feed{% if active_tag %}- {{active_tag}}{% endif %}" href="{{ url_for('rss', tag=active_tag , token=app_rss_token)}}" >
<link rel="stylesheet" href="{{url_for('static_content', group='styles', filename='pure-min.css')}}" > <link rel="stylesheet" href="{{url_for('static_content', group='styles', filename='pure-min.css')}}" >
<link rel="stylesheet" href="{{url_for('static_content', group='styles', filename='styles.css')}}?v={{ get_css_version() }}" > <link rel="stylesheet" href="{{url_for('static_content', group='styles', filename='styles.css')}}" >
{% if extra_stylesheets %} {% if extra_stylesheets %}
{% for m in extra_stylesheets %} {% for m in extra_stylesheets %}
<link rel="stylesheet" href="{{ m }}?ver={{ get_css_version() }}" > <link rel="stylesheet" href="{{ m }}?ver=1000" >
{% endfor %} {% endfor %}
{% endif %} {% endif %}
@@ -85,7 +85,6 @@
<a href="{{url_for('logout')}}" class="pure-menu-link">LOG OUT</a> <a href="{{url_for('logout')}}" class="pure-menu-link">LOG OUT</a>
</li> </li>
{% endif %} {% endif %}
{% if current_user.is_authenticated or not has_password %}
<li class="pure-menu-item pure-form" id="search-menu-item"> <li class="pure-menu-item pure-form" id="search-menu-item">
<!-- We use GET here so it offers people a chance to set bookmarks etc --> <!-- We use GET here so it offers people a chance to set bookmarks etc -->
<form name="searchForm" action="" method="GET"> <form name="searchForm" action="" method="GET">
@@ -96,7 +95,6 @@
</button> </button>
</form> </form>
</li> </li>
{% endif %}
<li class="pure-menu-item"> <li class="pure-menu-item">
<button class="toggle-button" id ="toggle-light-mode" type="button" title="Toggle Light/Dark Mode"> <button class="toggle-button" id ="toggle-light-mode" type="button" title="Toggle Light/Dark Mode">
<span class="visually-hidden">Toggle light/dark mode</span> <span class="visually-hidden">Toggle light/dark mode</span>
@@ -108,20 +106,6 @@
</span> </span>
</button> </button>
</li> </li>
<li class="pure-menu-item" id="heart-us">
<svg
fill="#ff0000"
class="bi bi-heart"
preserveAspectRatio="xMidYMid meet"
viewBox="0 0 16.9 16.1"
id="svg-heart"
xmlns="http://www.w3.org/2000/svg"
xmlns:svg="http://www.w3.org/2000/svg">
<path id="heartpath" d="M 5.338316,0.50302766 C 0.71136983,0.50647126 -3.9576371,7.2707777 8.5004254,15.503028 23.833425,5.3700277 13.220206,-2.5384409 8.6762066,1.6475589 c -0.060791,0.054322 -0.11943,0.1110064 -0.1757812,0.1699219 -0.057,-0.059 -0.1157813,-0.116875 -0.1757812,-0.171875 C 7.4724566,0.86129334 6.4060729,0.50223298 5.338316,0.50302766 Z"
style="fill:var(--color-background);fill-opacity:1;stroke:#ff0000;stroke-opacity:1" />
</svg>
</li>
<li class="pure-menu-item"> <li class="pure-menu-item">
<a class="github-link" href="https://github.com/dgtlmoon/changedetection.io"> <a class="github-link" href="https://github.com/dgtlmoon/changedetection.io">
{% include "svgs/github.svg" %} {% include "svgs/github.svg" %}
@@ -137,52 +121,14 @@
{% endif %} {% endif %}
{% if left_sticky %} {% if left_sticky %}
<div class="sticky-tab" id="left-sticky"> <div class="sticky-tab" id="left-sticky">
<a href="{{url_for('preview_page', uuid=uuid)}}">Show current snapshot</a><br> <a href="{{url_for('preview_page', uuid=uuid)}}">Show current snapshot</a>
Visualise <strong>triggers</strong> and <strong>ignored text</strong>
</div> </div>
{% endif %} {% endif %}
{% if right_sticky %} {% if right_sticky %}
<div class="sticky-tab" id="right-sticky">{{ right_sticky }}</div> <div class="sticky-tab" id="right-sticky">{{ right_sticky }}</div>
{% endif %} {% endif %}
<section class="content"> <section class="content">
<div id="overlay"> <header>
<div class="content">
<strong>changedetection.io needs your support!</strong><br>
<p>
You can help us by supporting changedetection.io on these platforms;
</p>
<p>
<ul>
<li>
<a href="https://alternativeto.net/software/changedetection-io/about/">Rate us at
AlternativeTo.net</a>
</li>
<li>
<a href="https://github.com/dgtlmoon/changedetection.io">Star us on GitHub</a>
</li>
<li>
<a href="https://twitter.com/change_det_io">Follow us at Twitter/X</a>
</li>
<li>
<a href="https://www.linkedin.com/company/changedetection-io">Check us out on LinkedIn</a>
</li>
<li>
And tell your friends and colleagues :)
</li>
</ul>
</p>
<p>
The more popular changedetection.io is, the more time we can dedicate to adding amazing features!
</p>
<p>
Many thanks :)<br>
</p>
<p>
<i>changedetection.io team</i>
</p>
</div>
</div>
<header>
{% block header %}{% endblock %} {% block header %}{% endblock %}
</header> </header>

View File

@@ -13,31 +13,10 @@
<script src="{{url_for('static_content', group='js', filename='diff-overview.js')}}" defer></script> <script src="{{url_for('static_content', group='js', filename='diff-overview.js')}}" defer></script>
<div id="settings"> <div id="settings">
<h1>Differences</h1>
<form class="pure-form " action="" method="GET"> <form class="pure-form " action="" method="GET">
<fieldset> <fieldset>
{% if versions|length >= 1 %}
<strong>Compare</strong>
<del class="change"><span>from</span></del>
<select id="diff-version" name="from_version" class="needs-localtime">
{% for version in versions|reverse %}
<option value="{{ version }}" {% if version== from_version %} selected="" {% endif %}>
{{ version }}
</option>
{% endfor %}
</select>
<ins class="change"><span>to</span></ins>
<select id="current-version" name="to_version" class="needs-localtime">
{% for version in versions|reverse %}
<option value="{{ version }}" {% if version== to_version %} selected="" {% endif %}>
{{ version }}
</option>
{% endfor %}
</select>
<button type="submit" class="pure-button pure-button-primary">Go</button>
{% endif %}
</fieldset>
<fieldset>
<strong>Style</strong>
<label for="diffWords" class="pure-checkbox"> <label for="diffWords" class="pure-checkbox">
<input type="radio" name="diff_type" id="diffWords" value="diffWords"> Words</label> <input type="radio" name="diff_type" id="diffWords" value="diffWords"> Words</label>
<label for="diffLines" class="pure-checkbox"> <label for="diffLines" class="pure-checkbox">
@@ -47,20 +26,32 @@
<input type="radio" name="diff_type" id="diffChars" value="diffChars"> Chars</label> <input type="radio" name="diff_type" id="diffChars" value="diffChars"> Chars</label>
<!-- @todo - when mimetype is JSON, select this by default? --> <!-- @todo - when mimetype is JSON, select this by default? -->
<label for="diffJson" class="pure-checkbox"> <label for="diffJson" class="pure-checkbox">
<input type="radio" name="diff_type" id="diffJson" value="diffJson"> JSON</label> <input type="radio" name="diff_type" id="diffJson" value="diffJson" > JSON</label>
<span> {% if versions|length >= 1 %}
<!-- https://github.com/kpdecker/jsdiff/issues/389 ? --> <label for="diff-version">Compare newest (<span id="current-v-date"></span>) with</label>
<label for="ignoreWhitespace" class="pure-checkbox" id="label-diff-ignorewhitespace"> <select id="diff-version" name="previous_version">
<input type="checkbox" id="ignoreWhitespace" name="ignoreWhitespace"> Ignore Whitespace</label> {% for version in versions|reverse %}
</span> <option value="{{version}}" {% if version== current_previous_version %} selected="" {% endif %}>
{{version}}
</option>
{% endfor %}
</select>
<button type="submit" class="pure-button pure-button-primary">Go</button>
{% endif %}
</fieldset> </fieldset>
</form> </form>
<del>Removed text</del>
<ins>Inserted Text</ins>
<span>
<!-- https://github.com/kpdecker/jsdiff/issues/389 ? -->
<label for="ignoreWhitespace" class="pure-checkbox" id="label-diff-ignorewhitespace">
<input type="checkbox" id="ignoreWhitespace" name="ignoreWhitespace" > Ignore Whitespace</label>
</span>
</div> </div>
<div id="diff-jump"> <div id="diff-jump">
<a id="jump-next-diff" title="Jump to next difference">Jump</a> <a onclick="next_diff();">Jump</a>
</div> </div>
<script src="{{url_for('static_content', group='js', filename='tabs.js')}}" defer></script> <script src="{{url_for('static_content', group='js', filename='tabs.js')}}" defer></script>
@@ -88,6 +79,8 @@
</div> </div>
<div class="tab-pane-inner" id="text"> <div class="tab-pane-inner" id="text">
<div class="tip">Pro-tip: Use <strong>show current snapshot</strong> tab to visualise what will be ignored, highlight text to add to ignore filters</div>
{% if password_enabled_and_share_is_off %} {% if password_enabled_and_share_is_off %}
<div class="tip">Pro-tip: You can enable <strong>"share access when password is enabled"</strong> from settings</div> <div class="tip">Pro-tip: You can enable <strong>"share access when password is enabled"</strong> from settings</div>
{% endif %} {% endif %}
@@ -98,8 +91,8 @@
<tbody> <tbody>
<tr> <tr>
<!-- just proof of concept copied straight from github.com/kpdecker/jsdiff --> <!-- just proof of concept copied straight from github.com/kpdecker/jsdiff -->
<td id="a" style="display: none;">{{from_version_file_contents}}</td> <td id="a" style="display: none;">{{previous}}</td>
<td id="b" style="display: none;">{{to_version_file_contents}}</td> <td id="b" style="display: none;">{{newest}}</td>
<td id="diff-col"> <td id="diff-col">
<span id="result" class="highlightable-filter"></span> <span id="result" class="highlightable-filter"></span>
</td> </td>

View File

@@ -4,10 +4,8 @@
{% from '_common_fields.jinja' import render_common_settings_form %} {% from '_common_fields.jinja' import render_common_settings_form %}
<script src="{{url_for('static_content', group='js', filename='tabs.js')}}" defer></script> <script src="{{url_for('static_content', group='js', filename='tabs.js')}}" defer></script>
<script> <script>
const browser_steps_available_screenshots=JSON.parse('{{ watch.get_browsersteps_available_screenshots|tojson }}');
const browser_steps_config=JSON.parse('{{ browser_steps_config|tojson }}'); const browser_steps_config=JSON.parse('{{ browser_steps_config|tojson }}');
const browser_steps_fetch_screenshot_image_url="{{url_for('browser_steps.browser_steps_fetch_screenshot_image', uuid=uuid)}}";
const browser_steps_last_error_step={{ watch.browser_steps_last_error_step|tojson }};
const browser_steps_start_url="{{url_for('browser_steps.browsersteps_start_session', uuid=uuid)}}"; const browser_steps_start_url="{{url_for('browser_steps.browsersteps_start_session', uuid=uuid)}}";
const browser_steps_sync_url="{{url_for('browser_steps.browsersteps_ui_update', uuid=uuid)}}"; const browser_steps_sync_url="{{url_for('browser_steps.browsersteps_ui_update', uuid=uuid)}}";
{% if emailprefix %} {% if emailprefix %}
@@ -51,7 +49,6 @@
<li class="tab"><a href="#restock">Restock Detection</a></li> <li class="tab"><a href="#restock">Restock Detection</a></li>
{% endif %} {% endif %}
<li class="tab"><a href="#notifications">Notifications</a></li> <li class="tab"><a href="#notifications">Notifications</a></li>
<li class="tab"><a href="#stats">Stats</a></li>
</ul> </ul>
</div> </div>
@@ -112,7 +109,7 @@
<span class="pure-form-message-inline"> <span class="pure-form-message-inline">
<p>Use the <strong>Basic</strong> method (default) where your watched site doesn't need Javascript to render.</p> <p>Use the <strong>Basic</strong> method (default) where your watched site doesn't need Javascript to render.</p>
<p>The <strong>Chrome/Javascript</strong> method requires a network connection to a running WebDriver+Chrome server, set by the ENV var 'WEBDRIVER_URL'. </p> <p>The <strong>Chrome/Javascript</strong> method requires a network connection to a running WebDriver+Chrome server, set by the ENV var 'WEBDRIVER_URL'. </p>
Tip: <a href="https://github.com/dgtlmoon/changedetection.io/wiki/Proxy-configuration#brightdata-proxy-support">Connect using Bright Data and Oxylabs Proxies, find out more here.</a> Tip: <a href="https://github.com/dgtlmoon/changedetection.io/wiki/Proxy-configuration#brightdata-proxy-support">Connect using BrightData Proxies, find out more here.</a>
</span> </span>
</div> </div>
{% if form.proxy %} {% if form.proxy %}
@@ -381,16 +378,15 @@ Unavailable") }}
{{ render_field(form.extract_text, rows=5, placeholder="\d+ online") }} {{ render_field(form.extract_text, rows=5, placeholder="\d+ online") }}
<span class="pure-form-message-inline"> <span class="pure-form-message-inline">
<ul> <ul>
<li>Extracts text in the final output (line by line) after other filters using regular expressions or string match; <li>Extracts text in the final output (line by line) after other filters using regular expressions;
<ul> <ul>
<li>Regular expression &dash; example <code>/reports.+?2022/i</code></li> <li>Regular expression &dash; example <code>/reports.+?2022/i</code></li>
<li>Don't forget to consider the white-space at the start of a line <code>/.+?reports.+?2022/i</code></li>
<li>Use <code>//(?aiLmsux))</code> type flags (more <a href="https://docs.python.org/3/library/re.html#index-15">information here</a>)<br></li> <li>Use <code>//(?aiLmsux))</code> type flags (more <a href="https://docs.python.org/3/library/re.html#index-15">information here</a>)<br></li>
<li>Keyword example &dash; example <code>Out of stock</code></li> <li>Keyword example &dash; example <code>Out of stock</code></li>
<li>Use groups to extract just that text &dash; example <code>/reports.+?(\d+)/i</code> returns a list of years only</li> <li>Use groups to extract just that text &dash; example <code>/reports.+?(\d+)/i</code> returns a list of years only</li>
</ul> </ul>
</li> </li>
<li>One line per regular-expression/string match</li> <li>One line per regular-expression/ string match</li>
</ul> </ul>
</span> </span>
</div> </div>
@@ -444,35 +440,7 @@ Unavailable") }}
</fieldset> </fieldset>
</div> </div>
{% endif %} {% endif %}
<div class="tab-pane-inner" id="stats">
<div class="pure-control-group">
<style>
#stats-table tr > td:first-child {
font-weight: bold;
}
</style>
<table class="pure-table" id="stats-table">
<tbody>
<tr>
<td>Check count</td>
<td>{{ "{:,}".format( watch.check_count) }}</td>
</tr>
<tr>
<td>Consecutive filter failures</td>
<td>{{ "{:,}".format( watch.consecutive_filter_failures) }}</td>
</tr>
<tr>
<td>History length</td>
<td>{{ "{:,}".format(watch.history|length) }}</td>
</tr>
<tr>
<td>Last fetch time</td>
<td>{{ watch.fetch_time }}s</td>
</tr>
</tbody>
</table>
</div>
</div>
<div id="actions"> <div id="actions">
<div class="pure-control-group"> <div class="pure-control-group">
{{ render_button(form.save_button) }} {{ render_button(form.save_button) }}

View File

@@ -8,12 +8,11 @@
<ul> <ul>
<li class="tab" id=""><a href="#url-list">URL List</a></li> <li class="tab" id=""><a href="#url-list">URL List</a></li>
<li class="tab"><a href="#distill-io">Distill.io</a></li> <li class="tab"><a href="#distill-io">Distill.io</a></li>
<li class="tab"><a href="#xlsx">.XLSX &amp; Wachete</a></li>
</ul> </ul>
</div> </div>
<div class="box-wrap inner"> <div class="box-wrap inner">
<form class="pure-form" action="{{url_for('import_page')}}" method="POST" enctype="multipart/form-data"> <form class="pure-form pure-form-aligned" action="{{url_for('import_page')}}" method="POST">
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}"> <input type="hidden" name="csrf_token" value="{{ csrf_token() }}">
<div class="tab-pane-inner" id="url-list"> <div class="tab-pane-inner" id="url-list">
<legend> <legend>
@@ -80,42 +79,6 @@
" rows="25">{{ original_distill_json }}</textarea> " rows="25">{{ original_distill_json }}</textarea>
</div> </div>
<div class="tab-pane-inner" id="xlsx">
<fieldset>
<div class="pure-control-group">
{{ render_field(form.xlsx_file, class="processor") }}
</div>
<div class="pure-control-group">
{{ render_field(form.file_mapping, class="processor") }}
</div>
</fieldset>
<div class="pure-control-group">
<span class="pure-form-message-inline">
Table of custom column and data types mapping for the <strong>Custom mapping</strong> File mapping type.
</span>
<table style="border: 1px solid #aaa; padding: 0.5rem; border-radius: 4px;">
<tr>
<td><strong>Column #</strong></td>
{% for n in range(4) %}
<td><input type="number" name="custom_xlsx[col_{{n}}]" style="width: 4rem;" min="1"></td>
{% endfor %}
</tr>
<tr>
<td><strong>Type</strong></td>
{% for n in range(4) %}
<td><select name="custom_xlsx[col_type_{{n}}]">
<option value="" style="color: #aaa"> -- none --</option>
<option value="url">URL</option>
<option value="title">Title</option>
<option value="include_filter">CSS/xPath filter</option>
<option value="tag">Group / Tag name(s)</option>
<option value="interval_minutes">Recheck time (minutes)</option>
</select></td>
{% endfor %}
</tr>
</table>
</div>
</div>
<button type="submit" class="pure-button pure-input-1-2 pure-button-primary">Import</button> <button type="submit" class="pure-button pure-input-1-2 pure-button-primary">Import</button>
</form> </form>

View File

@@ -109,7 +109,7 @@
<p>The <strong>Chrome/Javascript</strong> method requires a network connection to a running WebDriver+Chrome server, set by the ENV var 'WEBDRIVER_URL'. </p> <p>The <strong>Chrome/Javascript</strong> method requires a network connection to a running WebDriver+Chrome server, set by the ENV var 'WEBDRIVER_URL'. </p>
</span> </span>
<br> <br>
Tip: <a href="https://github.com/dgtlmoon/changedetection.io/wiki/Proxy-configuration#brightdata-proxy-support">Connect using Bright Data and Oxylabs Proxies, find out more here.</a> Tip: <a href="https://github.com/dgtlmoon/changedetection.io/wiki/Proxy-configuration#brightdata-proxy-support">Connect using BrightData Proxies, find out more here.</a>
</div> </div>
<fieldset class="pure-group" id="webdriver-override-options"> <fieldset class="pure-group" id="webdriver-override-options">
<div class="pure-form-message-inline"> <div class="pure-form-message-inline">
@@ -178,9 +178,6 @@ nav
<span style="display:none;" id="api-key-copy" >copy</span> <span style="display:none;" id="api-key-copy" >copy</span>
</div> </div>
</div> </div>
<div class="pure-control-group">
<a href="{{url_for('settings_reset_api_key')}}" class="pure-button button-small button-cancel">Regenerate API key</a>
</div>
</div> </div>
<div class="tab-pane-inner" id="proxies"> <div class="tab-pane-inner" id="proxies">
<div id="recommended-proxy"> <div id="recommended-proxy">
@@ -232,8 +229,7 @@ nav
<div class="pure-control-group"> <div class="pure-control-group">
{{ render_field(form.requests.form.extra_proxies) }} {{ render_field(form.requests.form.extra_proxies) }}
<span class="pure-form-message-inline">"Name" will be used for selecting the proxy in the Watch Edit settings</span><br> <span class="pure-form-message-inline">"Name" will be used for selecting the proxy in the Watch Edit settings</span>
<span class="pure-form-message-inline">SOCKS5 proxies with authentication are only supported with 'plain requests' fetcher, for other fetchers you should whitelist the IP access instead</span>
</div> </div>
</div> </div>
<div id="actions"> <div id="actions">

View File

@@ -1,6 +1,3 @@
<svg class="octicon octicon-mark-github v-align-middle" viewbox="0 0 16 16" version="1.1" aria-hidden="true"> <svg class="octicon octicon-mark-github v-align-middle" height="32" viewbox="0 0 16 16" version="1.1" width="32" aria-hidden="true">
<path <path fill-rule="evenodd" d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0016 8c0-4.42-3.58-8-8-8z"></path>
fill-rule="evenodd"
d="M 8,0 C 3.58,0 0,3.58 0,8 c 0,3.54 2.29,6.53 5.47,7.59 0.4,0.07 0.55,-0.17 0.55,-0.38 0,-0.19 -0.01,-0.82 -0.01,-1.49 C 4,14.09 3.48,13.23 3.32,12.78 3.23,12.55 2.84,11.84 2.5,11.65 2.22,11.5 1.82,11.13 2.49,11.12 3.12,11.11 3.57,11.7 3.72,11.94 4.44,13.15 5.59,12.81 6.05,12.6 6.12,12.08 6.33,11.73 6.56,11.53 4.78,11.33 2.92,10.64 2.92,7.58 2.92,6.71 3.23,5.99 3.74,5.43 3.66,5.23 3.38,4.41 3.82,3.31 c 0,0 0.67,-0.21 2.2,0.82 0.64,-0.18 1.32,-0.27 2,-0.27 0.68,0 1.36,0.09 2,0.27 1.53,-1.04 2.2,-0.82 2.2,-0.82 0.44,1.1 0.16,1.92 0.08,2.12 0.51,0.56 0.82,1.27 0.82,2.15 0,3.07 -1.87,3.75 -3.65,3.95 0.29,0.25 0.54,0.73 0.54,1.48 0,1.07 -0.01,1.93 -0.01,2.2 0,0.21 0.15,0.46 0.55,0.38 A 8.013,8.013 0 0 0 16,8 C 16,3.58 12.42,0 8,0 Z"
id="path2" />
</svg> </svg>

Before

Width:  |  Height:  |  Size: 917 B

After

Width:  |  Height:  |  Size: 749 B

View File

@@ -119,9 +119,6 @@
<a href="{{ url_for('settings_page', uuid=watch.uuid) }}#proxies">Try adding external proxies/locations</a> <a href="{{ url_for('settings_page', uuid=watch.uuid) }}#proxies">Try adding external proxies/locations</a>
{% endif %} {% endif %}
{% if 'empty result or contain only an image' in watch.last_error %}
<a href="https://github.com/dgtlmoon/changedetection.io/wiki/Detecting-changes-in-images">more help here</a>.
{% endif %}
</div> </div>
{% endif %} {% endif %}
{% if watch.last_notification_error is defined and watch.last_notification_error != False %} {% if watch.last_notification_error is defined and watch.last_notification_error != False %}
@@ -154,8 +151,8 @@
{% endfor %} {% endfor %}
</td> </td>
<td class="last-checked" data-timestamp="{{ watch.last_checked }}">{{watch|format_last_checked_time|safe}}</td> <td class="last-checked">{{watch|format_last_checked_time|safe}}</td>
<td class="last-changed" data-timestamp="{{ watch.last_changed }}">{% if watch.history_n >=2 and watch.last_changed >0 %} <td class="last-changed">{% if watch.history_n >=2 and watch.last_changed >0 %}
{{watch.last_changed|format_timestamp_timeago}} {{watch.last_changed|format_timestamp_timeago}}
{% else %} {% else %}
Not yet Not yet
@@ -178,18 +175,13 @@
</tbody> </tbody>
</table> </table>
<ul id="post-list-buttons"> <ul id="post-list-buttons">
{% if errored_count %}
<li>
<a href="{{url_for('index', with_errors=1, tag=request.args.get('tag')) }}" class="pure-button button-tag button-error ">With errors ({{ errored_count }})</a>
</li>
{% endif %}
{% if has_unviewed %} {% if has_unviewed %}
<li> <li>
<a href="{{url_for('mark_all_viewed',with_errors=request.args.get('with_errors',0)) }}" class="pure-button button-tag ">Mark all viewed</a> <a href="{{url_for('mark_all_viewed', tag=request.args.get('tag')) }}" class="pure-button button-tag ">Mark all viewed</a>
</li> </li>
{% endif %} {% endif %}
<li> <li>
<a href="{{ url_for('form_watch_checknow', tag=active_tag, with_errors=request.args.get('with_errors',0)) }}" class="pure-button button-tag ">Recheck <a href="{{ url_for('form_watch_checknow', tag=active_tag) }}" class="pure-button button-tag ">Recheck
all {% if active_tag%} in "{{tags[active_tag].title}}"{%endif%}</a> all {% if active_tag%} in "{{tags[active_tag].title}}"{%endif%}</a>
</li> </li>
<li> <li>

View File

@@ -28,6 +28,8 @@ def test_fetch_webdriver_content(client, live_server):
) )
assert b"1 Imported" in res.data assert b"1 Imported" in res.data
time.sleep(3)
wait_for_all_checks(client) wait_for_all_checks(client)

View File

@@ -2,11 +2,12 @@
import time import time
from flask import url_for from flask import url_for
from ..util import live_server_setup, wait_for_all_checks from ..util import live_server_setup
def test_preferred_proxy(client, live_server): def test_preferred_proxy(client, live_server):
time.sleep(1)
live_server_setup(live_server) live_server_setup(live_server)
time.sleep(1)
url = "http://chosen.changedetection.io" url = "http://chosen.changedetection.io"
res = client.post( res = client.post(
@@ -19,7 +20,7 @@ def test_preferred_proxy(client, live_server):
assert b"1 Imported" in res.data assert b"1 Imported" in res.data
wait_for_all_checks(client) time.sleep(2)
res = client.post( res = client.post(
url_for("edit_page", uuid="first"), url_for("edit_page", uuid="first"),
data={ data={
@@ -33,5 +34,5 @@ def test_preferred_proxy(client, live_server):
follow_redirects=True follow_redirects=True
) )
assert b"Updated watch." in res.data assert b"Updated watch." in res.data
wait_for_all_checks(client) time.sleep(2)
# Now the request should appear in the second-squid logs # Now the request should appear in the second-squid logs

View File

@@ -1,6 +0,0 @@
{
"socks5proxy": {
"label": "socks5proxy",
"url": "socks5://proxy_user123:proxy_pass123@socks5proxy:1080"
}
}

View File

@@ -1,6 +0,0 @@
{
"socks5proxy": {
"label": "socks5proxy",
"url": "socks5://socks5proxy-noauth:1080"
}
}

View File

@@ -1,63 +0,0 @@
#!/usr/bin/python3
import os
import time
from flask import url_for
from changedetectionio.tests.util import live_server_setup, wait_for_all_checks
def test_socks5(client, live_server):
live_server_setup(live_server)
# Setup a proxy
res = client.post(
url_for("settings_page"),
data={
"requests-time_between_check-minutes": 180,
"application-ignore_whitespace": "y",
"application-fetch_backend": "html_requests",
# set in .github/workflows/test-only.yml
"requests-extra_proxies-0-proxy_url": "socks5://proxy_user123:proxy_pass123@socks5proxy:1080",
"requests-extra_proxies-0-proxy_name": "socks5proxy",
},
follow_redirects=True
)
assert b"Settings updated." in res.data
test_url = "https://changedetection.io/CHANGELOG.txt?socks-test-tag=" + os.getenv('SOCKSTEST', '')
res = client.post(
url_for("form_quick_watch_add"),
data={"url": test_url, "tags": '', 'edit_and_watch_submit_button': 'Edit > Watch'},
follow_redirects=True
)
assert b"Watch added in Paused state, saving will unpause" in res.data
res = client.get(
url_for("edit_page", uuid="first", unpause_on_save=1),
)
# check the proxy is offered as expected
assert b'ui-0socks5proxy' in res.data
res = client.post(
url_for("edit_page", uuid="first", unpause_on_save=1),
data={
"include_filters": "",
"fetch_backend": 'html_webdriver' if os.getenv('PLAYWRIGHT_DRIVER_URL') else 'html_requests',
"headers": "",
"proxy": "ui-0socks5proxy",
"tags": "",
"url": test_url,
},
follow_redirects=True
)
assert b"unpaused" in res.data
wait_for_all_checks(client)
res = client.get(
url_for("preview_page", uuid="first"),
follow_redirects=True
)
# Should see the proper string
assert "+0200:".encode('utf-8') in res.data

View File

@@ -1,52 +0,0 @@
#!/usr/bin/python3
import os
import time
from flask import url_for
from changedetectionio.tests.util import live_server_setup, wait_for_all_checks
# should be proxies.json mounted from run_proxy_tests.sh already
# -v `pwd`/tests/proxy_socks5/proxies.json-example:/app/changedetectionio/test-datastore/proxies.json
def test_socks5_from_proxiesjson_file(client, live_server):
live_server_setup(live_server)
test_url = "https://changedetection.io/CHANGELOG.txt?socks-test-tag=" + os.getenv('SOCKSTEST', '')
res = client.get(url_for("settings_page"))
assert b'name="requests-proxy" type="radio" value="socks5proxy"' in res.data
res = client.post(
url_for("form_quick_watch_add"),
data={"url": test_url, "tags": '', 'edit_and_watch_submit_button': 'Edit > Watch'},
follow_redirects=True
)
assert b"Watch added in Paused state, saving will unpause" in res.data
res = client.get(
url_for("edit_page", uuid="first", unpause_on_save=1),
)
# check the proxy is offered as expected
assert b'name="proxy" type="radio" value="socks5proxy"' in res.data
res = client.post(
url_for("edit_page", uuid="first", unpause_on_save=1),
data={
"include_filters": "",
"fetch_backend": 'html_webdriver' if os.getenv('PLAYWRIGHT_DRIVER_URL') else 'html_requests',
"headers": "",
"proxy": "socks5proxy",
"tags": "",
"url": test_url,
},
follow_redirects=True
)
assert b"unpaused" in res.data
wait_for_all_checks(client)
res = client.get(
url_for("preview_page", uuid="first"),
follow_redirects=True
)
# Should see the proper string
assert "+0200:".encode('utf-8') in res.data

View File

@@ -1,4 +1,4 @@
from .util import live_server_setup, extract_UUID_from_client, wait_for_all_checks from . util import live_server_setup, extract_UUID_from_client
from flask import url_for from flask import url_for
import time import time
@@ -19,16 +19,10 @@ def test_check_access_control(app, client, live_server):
) )
assert b"1 Imported" in res.data assert b"1 Imported" in res.data
time.sleep(3) time.sleep(2)
# causes a 'Popped wrong request context.' error when client. is accessed? res = client.get(url_for("form_watch_checknow"), follow_redirects=True)
#wait_for_all_checks(client)
res = c.get(url_for("form_watch_checknow"), follow_redirects=True)
assert b'1 watches queued for rechecking.' in res.data assert b'1 watches queued for rechecking.' in res.data
time.sleep(3) time.sleep(2)
# causes a 'Popped wrong request context.' error when client. is accessed?
#wait_for_all_checks(client)
# Enable password check and diff page access bypass # Enable password check and diff page access bypass
res = c.post( res = c.post(
@@ -48,7 +42,7 @@ def test_check_access_control(app, client, live_server):
assert b"Login" in res.data assert b"Login" in res.data
# The diff page should return something valid when logged out # The diff page should return something valid when logged out
res = c.get(url_for("diff_history_page", uuid="first")) res = client.get(url_for("diff_history_page", uuid="first"))
assert b'Random content' in res.data assert b'Random content' in res.data
# Check wrong password does not let us in # Check wrong password does not let us in
@@ -89,8 +83,6 @@ def test_check_access_control(app, client, live_server):
res = c.get(url_for("logout"), res = c.get(url_for("logout"),
follow_redirects=True) follow_redirects=True)
assert b"Login" in res.data
res = c.get(url_for("settings_page"), res = c.get(url_for("settings_page"),
follow_redirects=True) follow_redirects=True)
@@ -168,5 +160,5 @@ def test_check_access_control(app, client, live_server):
assert b"Login" in res.data assert b"Login" in res.data
# The diff page should return something valid when logged out # The diff page should return something valid when logged out
res = c.get(url_for("diff_history_page", uuid="first")) res = client.get(url_for("diff_history_page", uuid="first"))
assert b'Random content' not in res.data assert b'Random content' not in res.data

View File

@@ -2,8 +2,7 @@
import time import time
from flask import url_for from flask import url_for
from .util import live_server_setup, extract_UUID_from_client, extract_api_key_from_UI, wait_for_all_checks from .util import live_server_setup, extract_UUID_from_client, extract_api_key_from_UI
def set_response_with_ldjson(): def set_response_with_ldjson():
test_return_data = """<html> test_return_data = """<html>
@@ -28,7 +27,7 @@ def set_response_with_ldjson():
"description":"You dont need it", "description":"You dont need it",
"mpn":"111111", "mpn":"111111",
"sku":"22222", "sku":"22222",
"Offers":{ "offers":{
"@type":"AggregateOffer", "@type":"AggregateOffer",
"lowPrice":8097000, "lowPrice":8097000,
"highPrice":8099900, "highPrice":8099900,
@@ -76,11 +75,12 @@ def set_response_without_ldjson():
f.write(test_return_data) f.write(test_return_data)
return None return None
def test_setup(client, live_server):
live_server_setup(live_server)
# actually only really used by the distll.io importer, but could be handy too # actually only really used by the distll.io importer, but could be handy too
def test_check_ldjson_price_autodetect(client, live_server): def test_check_ldjson_price_autodetect(client, live_server):
live_server_setup(live_server)
# Give the endpoint time to spin up
time.sleep(1)
set_response_with_ldjson() set_response_with_ldjson()
@@ -92,7 +92,7 @@ def test_check_ldjson_price_autodetect(client, live_server):
follow_redirects=True follow_redirects=True
) )
assert b"1 Imported" in res.data assert b"1 Imported" in res.data
wait_for_all_checks(client) time.sleep(3)
# Should get a notice that it's available # Should get a notice that it's available
res = client.get(url_for("index")) res = client.get(url_for("index"))
@@ -102,11 +102,11 @@ def test_check_ldjson_price_autodetect(client, live_server):
uuid = extract_UUID_from_client(client) uuid = extract_UUID_from_client(client)
client.get(url_for('price_data_follower.accept', uuid=uuid, follow_redirects=True)) client.get(url_for('price_data_follower.accept', uuid=uuid, follow_redirects=True))
wait_for_all_checks(client) time.sleep(2)
# Trigger a check # Trigger a check
client.get(url_for("form_watch_checknow"), follow_redirects=True) client.get(url_for("form_watch_checknow"), follow_redirects=True)
wait_for_all_checks(client) time.sleep(2)
# Offer should be gone # Offer should be gone
res = client.get(url_for("index")) res = client.get(url_for("index"))
assert b'Embedded price data' not in res.data assert b'Embedded price data' not in res.data
@@ -138,97 +138,9 @@ def test_check_ldjson_price_autodetect(client, live_server):
follow_redirects=True follow_redirects=True
) )
assert b"1 Imported" in res.data assert b"1 Imported" in res.data
wait_for_all_checks(client) time.sleep(3)
res = client.get(url_for("index")) res = client.get(url_for("index"))
assert b'ldjson-price-track-offer' not in res.data assert b'ldjson-price-track-offer' not in res.data
########################################################################################## ##########################################################################################
client.get(url_for("form_delete", uuid="all"), follow_redirects=True) client.get(url_for("form_delete", uuid="all"), follow_redirects=True)
def _test_runner_check_bad_format_ignored(live_server, client, has_ldjson_price_data):
test_url = url_for('test_endpoint', _external=True)
res = client.post(
url_for("import_page"),
data={"urls": test_url},
follow_redirects=True
)
assert b"1 Imported" in res.data
wait_for_all_checks(client)
for k,v in client.application.config.get('DATASTORE').data['watching'].items():
assert v.get('last_error') == False
assert v.get('has_ldjson_price_data') == has_ldjson_price_data
##########################################################################################
client.get(url_for("form_delete", uuid="all"), follow_redirects=True)
def test_bad_ldjson_is_correctly_ignored(client, live_server):
#live_server_setup(live_server)
test_return_data = """
<html>
<head>
<script type="application/ld+json">
{
"@context": "http://schema.org",
"@type": ["Product", "SubType"],
"name": "My test product",
"description": "",
"offers": {
"note" : "You can see the case-insensitive OffERS key, it should work",
"@type": "Offer",
"offeredBy": {
"@type": "Organization",
"name":"Person",
"telephone":"+1 999 999 999"
},
"price": "1",
"priceCurrency": "EUR",
"url": "/some/url"
}
}
</script>
</head>
<body>
<div class="yes">Some extra stuff</div>
</body></html>
"""
with open("test-datastore/endpoint-content.txt", "w") as f:
f.write(test_return_data)
_test_runner_check_bad_format_ignored(live_server=live_server, client=client, has_ldjson_price_data=True)
test_return_data = """
<html>
<head>
<script type="application/ld+json">
{
"@context": "http://schema.org",
"@type": ["Product", "SubType"],
"name": "My test product",
"description": "",
"BrokenOffers": {
"@type": "Offer",
"offeredBy": {
"@type": "Organization",
"name":"Person",
"telephone":"+1 999 999 999"
},
"price": "1",
"priceCurrency": "EUR",
"url": "/some/url"
}
}
</script>
</head>
<body>
<div class="yes">Some extra stuff</div>
</body></html>
"""
with open("test-datastore/endpoint-content.txt", "w") as f:
f.write(test_return_data)
_test_runner_check_bad_format_ignored(live_server=live_server, client=client, has_ldjson_price_data=False)

View File

@@ -89,7 +89,7 @@ def test_check_basic_change_detection_functionality(client, live_server):
# Following the 'diff' link, it should no longer display as 'unviewed' even after we recheck it a few times # Following the 'diff' link, it should no longer display as 'unviewed' even after we recheck it a few times
res = client.get(url_for("diff_history_page", uuid="first")) res = client.get(url_for("diff_history_page", uuid="first"))
assert b'selected=""' in res.data, "Confirm diff history page loaded" assert b'Compare newest' in res.data
# Check the [preview] pulls the right one # Check the [preview] pulls the right one
res = client.get( res = client.get(

View File

@@ -2,7 +2,7 @@
import time import time
from flask import url_for from flask import url_for
from .util import live_server_setup, wait_for_all_checks from . util import live_server_setup
from ..html_tools import * from ..html_tools import *
@@ -176,77 +176,3 @@ def test_check_multiple_filters(client, live_server):
assert b"Blob A" in res.data # CSS was ok assert b"Blob A" in res.data # CSS was ok
assert b"Blob B" in res.data # xPath was ok assert b"Blob B" in res.data # xPath was ok
assert b"Blob C" not in res.data # Should not be included assert b"Blob C" not in res.data # Should not be included
# The filter exists, but did not contain anything useful
# Mainly used when the filter contains just an IMG, this can happen when someone selects an image in the visual-selector
# Tests fetcher can throw a "ReplyWithContentButNoText" exception after applying filter and extracting text
def test_filter_is_empty_help_suggestion(client, live_server):
#live_server_setup(live_server)
include_filters = "#blob-a"
with open("test-datastore/endpoint-content.txt", "w") as f:
f.write("""<html><body>
<div id="blob-a">
<img src="something.jpg">
</div>
</body>
</html>
""")
# Add our URL to the import page
test_url = url_for('test_endpoint', _external=True)
res = client.post(
url_for("import_page"),
data={"urls": test_url},
follow_redirects=True
)
assert b"1 Imported" in res.data
wait_for_all_checks(client)
# Goto the edit page, add our ignore text
# Add our URL to the import page
res = client.post(
url_for("edit_page", uuid="first"),
data={"include_filters": include_filters,
"url": test_url,
"tags": "",
"headers": "",
'fetch_backend': "html_requests"},
follow_redirects=True
)
assert b"Updated watch." in res.data
wait_for_all_checks(client)
res = client.get(
url_for("index"),
follow_redirects=True
)
assert b'empty result or contain only an image' in res.data
### Just an empty selector, no image
with open("test-datastore/endpoint-content.txt", "w") as f:
f.write("""<html><body>
<div id="blob-a">
<!-- doo doo -->
</div>
</body>
</html>
""")
res = client.get(url_for("form_watch_checknow"), follow_redirects=True)
wait_for_all_checks(client)
res = client.get(
url_for("index"),
follow_redirects=True
)
assert b'empty result or contain only an image' not in res.data
assert b'but contained no usable text' in res.data

View File

@@ -24,7 +24,7 @@ def test_check_extract_text_from_diff(client, live_server):
) )
assert b"1 Imported" in res.data assert b"1 Imported" in res.data
wait_for_all_checks(client) time.sleep(1)
# Load in 5 different numbers/changes # Load in 5 different numbers/changes
last_date="" last_date=""

View File

@@ -2,7 +2,7 @@
import time import time
from flask import url_for from flask import url_for
from .util import live_server_setup, wait_for_all_checks from .util import live_server_setup
from ..html_tools import * from ..html_tools import *
@@ -55,8 +55,6 @@ def set_multiline_response():
</p> </p>
<div>aaand something lines</div> <div>aaand something lines</div>
<br>
<div>and this should be</div>
</body> </body>
</html> </html>
""" """
@@ -68,10 +66,11 @@ def set_multiline_response():
def test_setup(client, live_server): def test_setup(client, live_server):
live_server_setup(live_server) live_server_setup(live_server)
def test_check_filter_multiline(client, live_server): def test_check_filter_multiline(client, live_server):
#live_server_setup(live_server)
set_multiline_response() set_multiline_response()
# Add our URL to the import page # Add our URL to the import page
@@ -83,15 +82,14 @@ def test_check_filter_multiline(client, live_server):
) )
assert b"1 Imported" in res.data assert b"1 Imported" in res.data
wait_for_all_checks(client) time.sleep(3)
# Goto the edit page, add our ignore text # Goto the edit page, add our ignore text
# Add our URL to the import page # Add our URL to the import page
res = client.post( res = client.post(
url_for("edit_page", uuid="first"), url_for("edit_page", uuid="first"),
data={"include_filters": '', data={"include_filters": '',
# Test a regex and a plaintext 'extract_text': '/something.+?6 billion.+?lines/si',
'extract_text': '/something.+?6 billion.+?lines/si\r\nand this should be',
"url": test_url, "url": test_url,
"tags": "", "tags": "",
"headers": "", "headers": "",
@@ -101,19 +99,13 @@ def test_check_filter_multiline(client, live_server):
) )
assert b"Updated watch." in res.data assert b"Updated watch." in res.data
wait_for_all_checks(client) time.sleep(3)
res = client.get(url_for("index"))
# Issue 1828
assert b'not at the start of the expression' not in res.data
res = client.get( res = client.get(
url_for("preview_page", uuid="first"), url_for("preview_page", uuid="first"),
follow_redirects=True follow_redirects=True
) )
# Plaintext that doesnt look like a regex should match also
assert b'and this should be' in res.data
assert b'<div class="">Something' in res.data assert b'<div class="">Something' in res.data
assert b'<div class="">across 6 billion multiple' in res.data assert b'<div class="">across 6 billion multiple' in res.data
@@ -123,11 +115,14 @@ def test_check_filter_multiline(client, live_server):
assert b'aaand something lines' not in res.data assert b'aaand something lines' not in res.data
def test_check_filter_and_regex_extract(client, live_server): def test_check_filter_and_regex_extract(client, live_server):
sleep_time_for_fetch_thread = 3
include_filters = ".changetext" include_filters = ".changetext"
set_original_response() set_original_response()
# Give the endpoint time to spin up
time.sleep(1)
# Add our URL to the import page # Add our URL to the import page
test_url = url_for('test_endpoint', _external=True) test_url = url_for('test_endpoint', _external=True)
res = client.post( res = client.post(
@@ -137,15 +132,19 @@ def test_check_filter_and_regex_extract(client, live_server):
) )
assert b"1 Imported" in res.data assert b"1 Imported" in res.data
time.sleep(1)
# Trigger a check
client.get(url_for("form_watch_checknow"), follow_redirects=True)
# Give the thread time to pick it up # Give the thread time to pick it up
wait_for_all_checks(client) time.sleep(sleep_time_for_fetch_thread)
# Goto the edit page, add our ignore text # Goto the edit page, add our ignore text
# Add our URL to the import page # Add our URL to the import page
res = client.post( res = client.post(
url_for("edit_page", uuid="first"), url_for("edit_page", uuid="first"),
data={"include_filters": include_filters, data={"include_filters": include_filters,
'extract_text': '/\d+ online/\r\n/\d+ guests/\r\n/somecase insensitive \d+/i\r\n/somecase insensitive (345\d)/i\r\n/issue1828.+?2022/i', 'extract_text': '\d+ online\r\n\d+ guests\r\n/somecase insensitive \d+/i\r\n/somecase insensitive (345\d)/i',
"url": test_url, "url": test_url,
"tags": "", "tags": "",
"headers": "", "headers": "",
@@ -156,13 +155,8 @@ def test_check_filter_and_regex_extract(client, live_server):
assert b"Updated watch." in res.data assert b"Updated watch." in res.data
# Give the thread time to pick it up # Give the thread time to pick it up
wait_for_all_checks(client) time.sleep(sleep_time_for_fetch_thread)
res = client.get(url_for("index"))
#issue 1828
assert b'not at the start of the expression' not in res.data
# Make a change # Make a change
set_modified_response() set_modified_response()
@@ -170,7 +164,7 @@ def test_check_filter_and_regex_extract(client, live_server):
# Trigger a check # Trigger a check
client.get(url_for("form_watch_checknow"), follow_redirects=True) client.get(url_for("form_watch_checknow"), follow_redirects=True)
# Give the thread time to pick it up # Give the thread time to pick it up
wait_for_all_checks(client) time.sleep(sleep_time_for_fetch_thread)
# It should have 'unviewed' still # It should have 'unviewed' still
# Because it should be looking at only that 'sametext' id # Because it should be looking at only that 'sametext' id
@@ -202,35 +196,3 @@ def test_check_filter_and_regex_extract(client, live_server):
# Should not be here # Should not be here
assert b'Some text that did change' not in res.data assert b'Some text that did change' not in res.data
def test_regex_error_handling(client, live_server):
#live_server_setup(live_server)
# Add our URL to the import page
test_url = url_for('test_endpoint', _external=True)
res = client.post(
url_for("import_page"),
data={"urls": test_url},
follow_redirects=True
)
assert b"1 Imported" in res.data
### test regex error handling
res = client.post(
url_for("edit_page", uuid="first"),
data={"extract_text": '/something bad\d{3/XYZ',
"url": test_url,
"fetch_backend": "html_requests"},
follow_redirects=True
)
with open('/tmp/fuck.html', 'wb') as f:
f.write(res.data)
assert b'is not a valid regular expression.' in res.data
res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True)
assert b'Deleted' in res.data

View File

@@ -33,6 +33,8 @@ def test_strip_regex_text_func():
"/not" "/not"
] ]
fetcher = fetch_site_status.perform_site_check(datastore=False)
stripped_content = html_tools.strip_ignore_text(test_content, ignore_lines) stripped_content = html_tools.strip_ignore_text(test_content, ignore_lines)
assert b"but 1 lines" in stripped_content assert b"but 1 lines" in stripped_content

View File

@@ -24,6 +24,7 @@ def test_strip_text_func():
ignore_lines = ["sometimes"] ignore_lines = ["sometimes"]
fetcher = fetch_site_status.perform_site_check(datastore=False)
stripped_content = html_tools.strip_ignore_text(test_content, ignore_lines) stripped_content = html_tools.strip_ignore_text(test_content, ignore_lines)
assert b"sometimes" not in stripped_content assert b"sometimes" not in stripped_content

View File

@@ -1,19 +1,16 @@
#!/usr/bin/python3 #!/usr/bin/python3
import io
import os
import time import time
from flask import url_for from flask import url_for
from .util import live_server_setup, wait_for_all_checks from .util import live_server_setup
def test_setup(client, live_server): def test_setup(client, live_server):
live_server_setup(live_server) live_server_setup(live_server)
def test_import(client, live_server): def test_import(client, live_server):
# Give the endpoint time to spin up # Give the endpoint time to spin up
wait_for_all_checks(client) time.sleep(1)
res = client.post( res = client.post(
url_for("import_page"), url_for("import_page"),
@@ -122,97 +119,3 @@ def test_import_distillio(client, live_server):
res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True) res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True)
# Clear flask alerts # Clear flask alerts
res = client.get(url_for("index")) res = client.get(url_for("index"))
def test_import_custom_xlsx(client, live_server):
"""Test can upload a excel spreadsheet and the watches are created correctly"""
#live_server_setup(live_server)
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, 'import/spreadsheet.xlsx')
with open(filename, 'rb') as f:
data= {
'file_mapping': 'custom',
'custom_xlsx[col_0]': '1',
'custom_xlsx[col_1]': '3',
'custom_xlsx[col_2]': '5',
'custom_xlsx[col_3]': '4',
'custom_xlsx[col_type_0]': 'title',
'custom_xlsx[col_type_1]': 'url',
'custom_xlsx[col_type_2]': 'include_filters',
'custom_xlsx[col_type_3]': 'interval_minutes',
'xlsx_file': (io.BytesIO(f.read()), 'spreadsheet.xlsx')
}
res = client.post(
url_for("import_page"),
data=data,
follow_redirects=True,
)
assert b'4 imported from custom .xlsx' in res.data
# Because this row was actually just a header with no usable URL, we should get an error
assert b'Error processing row number 1' in res.data
res = client.get(
url_for("index")
)
assert b'Somesite results ABC' in res.data
assert b'City news results' in res.data
# Just find one to check over
for uuid, watch in live_server.app.config['DATASTORE'].data['watching'].items():
if watch.get('title') == 'Somesite results ABC':
filters = watch.get('include_filters')
assert filters[0] == '/html[1]/body[1]/div[4]/div[1]/div[1]/div[1]||//*[@id=\'content\']/div[3]/div[1]/div[1]||//*[@id=\'content\']/div[1]'
assert watch.get('time_between_check') == {'weeks': 0, 'days': 1, 'hours': 6, 'minutes': 24, 'seconds': 0}
res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True)
assert b'Deleted' in res.data
def test_import_watchete_xlsx(client, live_server):
"""Test can upload a excel spreadsheet and the watches are created correctly"""
#live_server_setup(live_server)
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, 'import/spreadsheet.xlsx')
with open(filename, 'rb') as f:
data= {
'file_mapping': 'wachete',
'xlsx_file': (io.BytesIO(f.read()), 'spreadsheet.xlsx')
}
res = client.post(
url_for("import_page"),
data=data,
follow_redirects=True,
)
assert b'4 imported from Wachete .xlsx' in res.data
res = client.get(
url_for("index")
)
assert b'Somesite results ABC' in res.data
assert b'City news results' in res.data
# Just find one to check over
for uuid, watch in live_server.app.config['DATASTORE'].data['watching'].items():
if watch.get('title') == 'Somesite results ABC':
filters = watch.get('include_filters')
assert filters[0] == '/html[1]/body[1]/div[4]/div[1]/div[1]/div[1]||//*[@id=\'content\']/div[3]/div[1]/div[1]||//*[@id=\'content\']/div[1]'
assert watch.get('time_between_check') == {'weeks': 0, 'days': 1, 'hours': 6, 'minutes': 24, 'seconds': 0}
assert watch.get('fetch_backend') == 'html_requests' # Has inactive 'dynamic wachet'
if watch.get('title') == 'JS website':
assert watch.get('fetch_backend') == 'html_webdriver' # Has active 'dynamic wachet'
if watch.get('title') == 'system default website':
assert watch.get('fetch_backend') == 'system' # uses default if blank
res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True)
assert b'Deleted' in res.data

View File

@@ -80,11 +80,8 @@ def test_headers_in_request(client, live_server):
# Should be only one with headers set # Should be only one with headers set
assert watches_with_headers==1 assert watches_with_headers==1
res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True)
assert b'Deleted' in res.data
def test_body_in_request(client, live_server): def test_body_in_request(client, live_server):
# Add our URL to the import page # Add our URL to the import page
test_url = url_for('test_body', _external=True) test_url = url_for('test_body', _external=True)
if os.getenv('PLAYWRIGHT_DRIVER_URL'): if os.getenv('PLAYWRIGHT_DRIVER_URL'):
@@ -173,8 +170,7 @@ def test_body_in_request(client, live_server):
follow_redirects=True follow_redirects=True
) )
assert b"Body must be empty when Request Method is set to GET" in res.data assert b"Body must be empty when Request Method is set to GET" in res.data
res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True)
assert b'Deleted' in res.data
def test_method_in_request(client, live_server): def test_method_in_request(client, live_server):
# Add our URL to the import page # Add our URL to the import page

View File

@@ -2,61 +2,12 @@
import time import time
from flask import url_for from flask import url_for
from .util import set_original_response, set_modified_response, live_server_setup, wait_for_all_checks, extract_rss_token_from_UI, \ from .util import set_original_response, set_modified_response, live_server_setup, wait_for_all_checks, extract_rss_token_from_UI
extract_UUID_from_client
def set_original_cdata_xml():
test_return_data = """<rss xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:media="http://search.yahoo.com/mrss/" xmlns:atom="http://www.w3.org/2005/Atom" version="2.0">
<channel>
<title>Gizi</title>
<link>https://test.com</link>
<atom:link href="https://testsite.com" rel="self" type="application/rss+xml"/>
<description>
<![CDATA[ The Future Could Be Here ]]>
</description>
<language>en</language>
<item>
<title>
<![CDATA[ <img src="https://testsite.com/hacked.jpg"> Hackers can access your computer ]]>
</title>
<link>https://testsite.com/news/12341234234</link>
<description>
<![CDATA[ <img class="type:primaryImage" src="https://testsite.com/701c981da04869e.jpg"/><p>The days of Terminator and The Matrix could be closer. But be positive.</p><p><a href="https://testsite.com">Read more link...</a></p> ]]>
</description>
<category>cybernetics</category>
<category>rand corporation</category>
<pubDate>Tue, 17 Oct 2023 15:10:00 GMT</pubDate>
<guid isPermaLink="false">1850933241</guid>
<dc:creator>
<![CDATA[ Mr Hacker News ]]>
</dc:creator>
<media:thumbnail url="https://testsite.com/thumbnail-c224e10d81488e818701c981da04869e.jpg"/>
</item>
<item>
<title> Some other title </title>
<link>https://testsite.com/news/12341234236</link>
<description>
Some other description
</description>
</item>
</channel>
</rss>
"""
with open("test-datastore/endpoint-content.txt", "w") as f:
f.write(test_return_data)
def test_setup(client, live_server):
live_server_setup(live_server)
def test_rss_and_token(client, live_server): def test_rss_and_token(client, live_server):
# live_server_setup(live_server)
set_original_response() set_original_response()
rss_token = extract_rss_token_from_UI(client) live_server_setup(live_server)
# Add our URL to the import page # Add our URL to the import page
res = client.post( res = client.post(
@@ -66,11 +17,11 @@ def test_rss_and_token(client, live_server):
) )
assert b"1 Imported" in res.data assert b"1 Imported" in res.data
rss_token = extract_rss_token_from_UI(client)
wait_for_all_checks(client) time.sleep(2)
set_modified_response()
client.get(url_for("form_watch_checknow"), follow_redirects=True) client.get(url_for("form_watch_checknow"), follow_redirects=True)
wait_for_all_checks(client) time.sleep(2)
# Add our URL to the import page # Add our URL to the import page
res = client.get( res = client.get(
@@ -86,80 +37,3 @@ def test_rss_and_token(client, live_server):
) )
assert b"Access denied, bad token" not in res.data assert b"Access denied, bad token" not in res.data
assert b"Random content" in res.data assert b"Random content" in res.data
res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True)
def test_basic_cdata_rss_markup(client, live_server):
#live_server_setup(live_server)
set_original_cdata_xml()
test_url = url_for('test_endpoint', content_type="application/xml", _external=True)
# Add our URL to the import page
res = client.post(
url_for("import_page"),
data={"urls": test_url},
follow_redirects=True
)
assert b"1 Imported" in res.data
wait_for_all_checks(client)
res = client.get(
url_for("preview_page", uuid="first"),
follow_redirects=True
)
assert b'CDATA' not in res.data
assert b'<![' not in res.data
assert b'Hackers can access your computer' in res.data
assert b'The days of Terminator' in res.data
res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True)
def test_rss_xpath_filtering(client, live_server):
#live_server_setup(live_server)
set_original_cdata_xml()
test_url = url_for('test_endpoint', content_type="application/xml", _external=True)
res = client.post(
url_for("form_quick_watch_add"),
data={"url": test_url, "tags": '', 'edit_and_watch_submit_button': 'Edit > Watch'},
follow_redirects=True
)
assert b"Watch added in Paused state, saving will unpause" in res.data
uuid = extract_UUID_from_client(client)
res = client.post(
url_for("edit_page", uuid=uuid, unpause_on_save=1),
data={
"include_filters": "//item/title",
"fetch_backend": "html_requests",
"headers": "",
"proxy": "no-proxy",
"tags": "",
"url": test_url,
},
follow_redirects=True
)
assert b"unpaused" in res.data
wait_for_all_checks(client)
res = client.get(
url_for("preview_page", uuid="first"),
follow_redirects=True
)
assert b'CDATA' not in res.data
assert b'<![' not in res.data
# #1874 All but the first <title was getting selected
# Convert any HTML with just a top level <title> to <h1> to be sure title renders
assert b'Hackers can access your computer' in res.data # Should ONLY be selected by the xpath
assert b'Some other title' in res.data # Should ONLY be selected by the xpath
assert b'The days of Terminator' not in res.data # Should NOT be selected by the xpath
assert b'Some other description' not in res.data # Should NOT be selected by the xpath
res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True)

View File

@@ -1,5 +1,5 @@
from flask import url_for from flask import url_for
from .util import set_original_response, set_modified_response, live_server_setup, wait_for_all_checks from . util import set_original_response, set_modified_response, live_server_setup
import time import time
@@ -12,7 +12,6 @@ def test_bad_access(client, live_server):
) )
assert b"1 Imported" in res.data assert b"1 Imported" in res.data
wait_for_all_checks(client)
# Attempt to add a body with a GET method # Attempt to add a body with a GET method
res = client.post( res = client.post(
@@ -60,7 +59,7 @@ def test_bad_access(client, live_server):
data={"url": 'file:///tasty/disk/drive', "tags": ''}, data={"url": 'file:///tasty/disk/drive', "tags": ''},
follow_redirects=True follow_redirects=True
) )
wait_for_all_checks(client) time.sleep(1)
res = client.get(url_for("index")) res = client.get(url_for("index"))
assert b'file:// type access is denied for security reasons.' in res.data assert b'file:// type access is denied for security reasons.' in res.data

View File

@@ -2,7 +2,7 @@
import time import time
from flask import url_for from flask import url_for
from .util import live_server_setup, wait_for_all_checks from . util import live_server_setup
def set_original_ignore_response(): def set_original_ignore_response():
@@ -26,8 +26,13 @@ def test_trigger_regex_functionality(client, live_server):
live_server_setup(live_server) live_server_setup(live_server)
sleep_time_for_fetch_thread = 3
set_original_ignore_response() set_original_ignore_response()
# Give the endpoint time to spin up
time.sleep(1)
# Add our URL to the import page # Add our URL to the import page
test_url = url_for('test_endpoint', _external=True) test_url = url_for('test_endpoint', _external=True)
res = client.post( res = client.post(
@@ -38,7 +43,7 @@ def test_trigger_regex_functionality(client, live_server):
assert b"1 Imported" in res.data assert b"1 Imported" in res.data
# Give the thread time to pick it up # Give the thread time to pick it up
wait_for_all_checks(client) time.sleep(sleep_time_for_fetch_thread)
# It should report nothing found (just a new one shouldnt have anything) # It should report nothing found (just a new one shouldnt have anything)
res = client.get(url_for("index")) res = client.get(url_for("index"))
@@ -52,7 +57,7 @@ def test_trigger_regex_functionality(client, live_server):
"fetch_backend": "html_requests"}, "fetch_backend": "html_requests"},
follow_redirects=True follow_redirects=True
) )
wait_for_all_checks(client) time.sleep(sleep_time_for_fetch_thread)
# so that we set the state to 'unviewed' after all the edits # so that we set the state to 'unviewed' after all the edits
client.get(url_for("diff_history_page", uuid="first")) client.get(url_for("diff_history_page", uuid="first"))
@@ -60,7 +65,7 @@ def test_trigger_regex_functionality(client, live_server):
f.write("some new noise") f.write("some new noise")
client.get(url_for("form_watch_checknow"), follow_redirects=True) client.get(url_for("form_watch_checknow"), follow_redirects=True)
wait_for_all_checks(client) time.sleep(sleep_time_for_fetch_thread)
# It should report nothing found (nothing should match the regex) # It should report nothing found (nothing should match the regex)
res = client.get(url_for("index")) res = client.get(url_for("index"))
@@ -70,7 +75,7 @@ def test_trigger_regex_functionality(client, live_server):
f.write("regex test123<br>\nsomething 123") f.write("regex test123<br>\nsomething 123")
client.get(url_for("form_watch_checknow"), follow_redirects=True) client.get(url_for("form_watch_checknow"), follow_redirects=True)
wait_for_all_checks(client) time.sleep(sleep_time_for_fetch_thread)
res = client.get(url_for("index")) res = client.get(url_for("index"))
assert b'unviewed' in res.data assert b'unviewed' in res.data

View File

@@ -2,7 +2,7 @@
import time import time
from flask import url_for from flask import url_for
from .util import live_server_setup, wait_for_all_checks from . util import live_server_setup
from ..html_tools import * from ..html_tools import *
@@ -86,14 +86,14 @@ def test_check_xpath_filter_utf8(client, live_server):
follow_redirects=True follow_redirects=True
) )
assert b"1 Imported" in res.data assert b"1 Imported" in res.data
wait_for_all_checks(client) time.sleep(1)
res = client.post( res = client.post(
url_for("edit_page", uuid="first"), url_for("edit_page", uuid="first"),
data={"include_filters": filter, "url": test_url, "tags": "", "headers": "", 'fetch_backend': "html_requests"}, data={"include_filters": filter, "url": test_url, "tags": "", "headers": "", 'fetch_backend': "html_requests"},
follow_redirects=True follow_redirects=True
) )
assert b"Updated watch." in res.data assert b"Updated watch." in res.data
wait_for_all_checks(client) time.sleep(3)
res = client.get(url_for("index")) res = client.get(url_for("index"))
assert b'Unicode strings with encoding declaration are not supported.' not in res.data assert b'Unicode strings with encoding declaration are not supported.' not in res.data
res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True) res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True)
@@ -140,14 +140,14 @@ def test_check_xpath_text_function_utf8(client, live_server):
follow_redirects=True follow_redirects=True
) )
assert b"1 Imported" in res.data assert b"1 Imported" in res.data
wait_for_all_checks(client) time.sleep(1)
res = client.post( res = client.post(
url_for("edit_page", uuid="first"), url_for("edit_page", uuid="first"),
data={"include_filters": filter, "url": test_url, "tags": "", "headers": "", 'fetch_backend': "html_requests"}, data={"include_filters": filter, "url": test_url, "tags": "", "headers": "", 'fetch_backend': "html_requests"},
follow_redirects=True follow_redirects=True
) )
assert b"Updated watch." in res.data assert b"Updated watch." in res.data
wait_for_all_checks(client) time.sleep(3)
res = client.get(url_for("index")) res = client.get(url_for("index"))
assert b'Unicode strings with encoding declaration are not supported.' not in res.data assert b'Unicode strings with encoding declaration are not supported.' not in res.data
@@ -164,6 +164,7 @@ def test_check_xpath_text_function_utf8(client, live_server):
assert b'Deleted' in res.data assert b'Deleted' in res.data
def test_check_markup_xpath_filter_restriction(client, live_server): def test_check_markup_xpath_filter_restriction(client, live_server):
sleep_time_for_fetch_thread = 3
xpath_filter = "//*[contains(@class, 'sametext')]" xpath_filter = "//*[contains(@class, 'sametext')]"
@@ -182,7 +183,7 @@ def test_check_markup_xpath_filter_restriction(client, live_server):
assert b"1 Imported" in res.data assert b"1 Imported" in res.data
# Give the thread time to pick it up # Give the thread time to pick it up
wait_for_all_checks(client) time.sleep(sleep_time_for_fetch_thread)
# Goto the edit page, add our ignore text # Goto the edit page, add our ignore text
# Add our URL to the import page # Add our URL to the import page
@@ -194,7 +195,7 @@ def test_check_markup_xpath_filter_restriction(client, live_server):
assert b"Updated watch." in res.data assert b"Updated watch." in res.data
# Give the thread time to pick it up # Give the thread time to pick it up
wait_for_all_checks(client) time.sleep(sleep_time_for_fetch_thread)
# view it/reset state back to viewed # view it/reset state back to viewed
client.get(url_for("diff_history_page", uuid="first"), follow_redirects=True) client.get(url_for("diff_history_page", uuid="first"), follow_redirects=True)
@@ -205,7 +206,7 @@ def test_check_markup_xpath_filter_restriction(client, live_server):
# Trigger a check # Trigger a check
client.get(url_for("form_watch_checknow"), follow_redirects=True) client.get(url_for("form_watch_checknow"), follow_redirects=True)
# Give the thread time to pick it up # Give the thread time to pick it up
wait_for_all_checks(client) time.sleep(sleep_time_for_fetch_thread)
res = client.get(url_for("index")) res = client.get(url_for("index"))
assert b'unviewed' not in res.data assert b'unviewed' not in res.data
@@ -215,6 +216,9 @@ def test_check_markup_xpath_filter_restriction(client, live_server):
def test_xpath_validation(client, live_server): def test_xpath_validation(client, live_server):
# Give the endpoint time to spin up
time.sleep(1)
# Add our URL to the import page # Add our URL to the import page
test_url = url_for('test_endpoint', _external=True) test_url = url_for('test_endpoint', _external=True)
res = client.post( res = client.post(
@@ -223,7 +227,7 @@ def test_xpath_validation(client, live_server):
follow_redirects=True follow_redirects=True
) )
assert b"1 Imported" in res.data assert b"1 Imported" in res.data
wait_for_all_checks(client) time.sleep(2)
res = client.post( res = client.post(
url_for("edit_page", uuid="first"), url_for("edit_page", uuid="first"),
@@ -240,8 +244,11 @@ def test_check_with_prefix_include_filters(client, live_server):
res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True) res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True)
assert b'Deleted' in res.data assert b'Deleted' in res.data
# Give the endpoint time to spin up
time.sleep(1)
set_original_response() set_original_response()
wait_for_all_checks(client)
# Add our URL to the import page # Add our URL to the import page
test_url = url_for('test_endpoint', _external=True) test_url = url_for('test_endpoint', _external=True)
res = client.post( res = client.post(
@@ -250,7 +257,7 @@ def test_check_with_prefix_include_filters(client, live_server):
follow_redirects=True follow_redirects=True
) )
assert b"1 Imported" in res.data assert b"1 Imported" in res.data
wait_for_all_checks(client) time.sleep(3)
res = client.post( res = client.post(
url_for("edit_page", uuid="first"), url_for("edit_page", uuid="first"),
@@ -259,7 +266,7 @@ def test_check_with_prefix_include_filters(client, live_server):
) )
assert b"Updated watch." in res.data assert b"Updated watch." in res.data
wait_for_all_checks(client) time.sleep(3)
res = client.get( res = client.get(
url_for("preview_page", uuid="first"), url_for("preview_page", uuid="first"),
@@ -270,46 +277,3 @@ def test_check_with_prefix_include_filters(client, live_server):
assert b"Some text that will change" not in res.data #not in selector assert b"Some text that will change" not in res.data #not in selector
client.get(url_for("form_delete", uuid="all"), follow_redirects=True) client.get(url_for("form_delete", uuid="all"), follow_redirects=True)
def test_various_rules(client, live_server):
# Just check these don't error
#live_server_setup(live_server)
with open("test-datastore/endpoint-content.txt", "w") as f:
f.write("""<html>
<body>
Some initial text<br>
<p>Which is across multiple lines</p>
<br>
So let's see what happens. <br>
<div class="sametext">Some text thats the same</div>
<div class="changetext">Some text that will change</div>
<a href=''>some linky </a>
<a href=''>another some linky </a>
<!-- related to https://github.com/dgtlmoon/changedetection.io/pull/1774 -->
<input type="email" id="email" />
</body>
</html>
""")
test_url = url_for('test_endpoint', _external=True)
res = client.post(
url_for("import_page"),
data={"urls": test_url},
follow_redirects=True
)
assert b"1 Imported" in res.data
wait_for_all_checks(client)
for r in ['//div', '//a', 'xpath://div', 'xpath://a']:
res = client.post(
url_for("edit_page", uuid="first"),
data={"include_filters": r,
"url": test_url,
"tags": "",
"headers": "",
'fetch_backend': "html_requests"},
follow_redirects=True
)
wait_for_all_checks(client)
assert b"Updated watch." in res.data
res = client.get(url_for("index"))
assert b'fetch-error' not in res.data, f"Should not see errors after '{r} filter"

View File

@@ -1,19 +1,18 @@
#!/usr/bin/python3 #!/usr/bin/python3
import time import time
import os
from flask import url_for from flask import url_for
from ..util import live_server_setup, wait_for_all_checks, extract_UUID_from_client from ..util import live_server_setup, wait_for_all_checks, extract_UUID_from_client
def test_setup(client, live_server):
live_server_setup(live_server)
# Add a site in paused mode, add an invalid filter, we should still have visual selector data ready # Add a site in paused mode, add an invalid filter, we should still have visual selector data ready
def test_visual_selector_content_ready(client, live_server): def test_visual_selector_content_ready(client, live_server):
import os import os
import json import json
assert os.getenv('PLAYWRIGHT_DRIVER_URL'), "Needs PLAYWRIGHT_DRIVER_URL set for this test" assert os.getenv('PLAYWRIGHT_DRIVER_URL'), "Needs PLAYWRIGHT_DRIVER_URL set for this test"
time.sleep(1)
live_server_setup(live_server)
# Add our URL to the import page, because the docker container (playwright/selenium) wont be able to connect to our usual test url # Add our URL to the import page, because the docker container (playwright/selenium) wont be able to connect to our usual test url
test_url = "https://changedetection.io/ci-test/test-runjs.html" test_url = "https://changedetection.io/ci-test/test-runjs.html"
@@ -54,13 +53,6 @@ def test_visual_selector_content_ready(client, live_server):
with open(os.path.join('test-datastore', uuid, 'elements.json'), 'r') as f: with open(os.path.join('test-datastore', uuid, 'elements.json'), 'r') as f:
json.load(f) json.load(f)
# Attempt to fetch it via the web hook that the browser would use
res = client.get(url_for('static_content', group='visual_selector_data', filename=uuid))
json.loads(res.data)
assert res.mimetype == 'application/json'
assert res.status_code == 200
# Some options should be enabled # Some options should be enabled
# @todo - in the future, the visibility should be toggled by JS from the request type setting # @todo - in the future, the visibility should be toggled by JS from the request type setting
res = client.get( res = client.get(
@@ -68,75 +60,4 @@ def test_visual_selector_content_ready(client, live_server):
follow_redirects=True follow_redirects=True
) )
assert b'notification_screenshot' in res.data assert b'notification_screenshot' in res.data
client.get(
url_for("form_delete", uuid="all"),
follow_redirects=True
)
def test_basic_browserstep(client, live_server):
assert os.getenv('PLAYWRIGHT_DRIVER_URL'), "Needs PLAYWRIGHT_DRIVER_URL set for this test"
#live_server_setup(live_server)
# Add our URL to the import page, because the docker container (playwright/selenium) wont be able to connect to our usual test url
test_url = "https://changedetection.io/ci-test/test-runjs.html"
res = client.post(
url_for("form_quick_watch_add"),
data={"url": test_url, "tags": '', 'edit_and_watch_submit_button': 'Edit > Watch'},
follow_redirects=True
)
assert b"Watch added in Paused state, saving will unpause" in res.data
res = client.post(
url_for("edit_page", uuid="first", unpause_on_save=1),
data={
"url": test_url,
"tags": "",
"headers": "",
'fetch_backend': "html_webdriver",
'browser_steps-0-operation': 'Goto site',
'browser_steps-1-operation': 'Click element',
'browser_steps-1-selector': 'button[name=test-button]',
'browser_steps-1-optional_value': ''
},
follow_redirects=True
)
assert b"unpaused" in res.data
wait_for_all_checks(client)
uuid = extract_UUID_from_client(client)
# Check HTML conversion detected and workd
res = client.get(
url_for("preview_page", uuid=uuid),
follow_redirects=True
)
assert b"This text should be removed" not in res.data
assert b"I smell JavaScript because the button was pressed" in res.data
# now test for 404 errors
res = client.post(
url_for("edit_page", uuid=uuid, unpause_on_save=1),
data={
"url": "https://changedetection.io/404",
"tags": "",
"headers": "",
'fetch_backend': "html_webdriver",
'browser_steps-0-operation': 'Goto site',
'browser_steps-1-operation': 'Click element',
'browser_steps-1-selector': 'button[name=test-button]',
'browser_steps-1-optional_value': ''
},
follow_redirects=True
)
assert b"unpaused" in res.data
wait_for_all_checks(client)
res = client.get(url_for("index"))
assert b'Error - 404' in res.data
client.get(
url_for("form_delete", uuid="all"),
follow_redirects=True
)

View File

@@ -3,7 +3,7 @@ import threading
import queue import queue
import time import time
from changedetectionio import content_fetcher, html_tools from changedetectionio import content_fetcher
from .processors.text_json_diff import FilterNotFoundInResponse from .processors.text_json_diff import FilterNotFoundInResponse
from .processors.restock_diff import UnableToExtractRestockData from .processors.restock_diff import UnableToExtractRestockData
@@ -209,7 +209,6 @@ class update_worker(threading.Thread):
from .processors import text_json_diff, restock_diff from .processors import text_json_diff, restock_diff
while not self.app.config.exit.is_set(): while not self.app.config.exit.is_set():
update_handler = None
try: try:
queued_item_data = self.q.get(block=False) queued_item_data = self.q.get(block=False)
@@ -230,36 +229,16 @@ class update_worker(threading.Thread):
now = time.time() now = time.time()
try: try:
# Processor is what we are using for detecting the "Change" processor = self.datastore.data['watching'][uuid].get('processor','text_json_diff')
processor = self.datastore.data['watching'][uuid].get('processor', 'text_json_diff')
# if system...
# Abort processing when the content was the same as the last fetch
skip_when_same_checksum = queued_item_data.item.get('skip_when_checksum_same')
# @todo some way to switch by name # @todo some way to switch by name
# Init a new 'difference_detection_processor'
if processor == 'restock_diff': if processor == 'restock_diff':
update_handler = restock_diff.perform_site_check(datastore=self.datastore, update_handler = restock_diff.perform_site_check(datastore=self.datastore)
watch_uuid=uuid
)
else: else:
# Used as a default and also by some tests # Used as a default and also by some tests
update_handler = text_json_diff.perform_site_check(datastore=self.datastore, update_handler = text_json_diff.perform_site_check(datastore=self.datastore)
watch_uuid=uuid
)
# Clear last errors (move to preflight func?)
self.datastore.data['watching'][uuid]['browser_steps_last_error_step'] = None
update_handler.call_browser()
changed_detected, update_obj, contents = update_handler.run_changedetection(uuid,
skip_when_checksum_same=skip_when_same_checksum,
)
changed_detected, update_obj, contents = update_handler.run(uuid, skip_when_checksum_same=queued_item_data.item.get('skip_when_checksum_same'))
# Re #342 # Re #342
# In Python 3, all strings are sequences of Unicode characters. There is a bytes type that holds raw bytes. # In Python 3, all strings are sequences of Unicode characters. There is a bytes type that holds raw bytes.
# We then convert/.decode('utf-8') for the notification etc # We then convert/.decode('utf-8') for the notification etc
@@ -272,20 +251,7 @@ class update_worker(threading.Thread):
# Totally fine, it's by choice - just continue on, nothing more to care about # Totally fine, it's by choice - just continue on, nothing more to care about
# Page had elements/content but no renderable text # Page had elements/content but no renderable text
# Backend (not filters) gave zero output # Backend (not filters) gave zero output
extra_help = "" self.datastore.update_watch(uuid=uuid, update_obj={'last_error': "Got HTML content but no text found (With {} reply code).".format(e.status_code)})
if e.has_filters:
# Maybe it contains an image? offer a more helpful link
has_img = html_tools.include_filters(include_filters='img',
html_content=e.html_content)
if has_img:
extra_help = ", it's possible that the filters you have give an empty result or contain only an image."
else:
extra_help = ", it's possible that the filters were found, but contained no usable text."
self.datastore.update_watch(uuid=uuid, update_obj={
'last_error': f"Got HTML content but no text found (With {e.status_code} reply code){extra_help}"
})
if e.screenshot: if e.screenshot:
self.datastore.save_screenshot(watch_uuid=uuid, screenshot=e.screenshot) self.datastore.save_screenshot(watch_uuid=uuid, screenshot=e.screenshot)
process_changedetection_results = False process_changedetection_results = False
@@ -345,13 +311,8 @@ class update_worker(threading.Thread):
if not self.datastore.data['watching'].get(uuid): if not self.datastore.data['watching'].get(uuid):
continue continue
error_step = e.step_n + 1 err_text = "Warning, browser step at position {} could not run, target not found, check the watch, add a delay if necessary.".format(e.step_n+1)
err_text = f"Warning, browser step at position {error_step} could not run, target not found, check the watch, add a delay if necessary, view Browser Steps to see screenshot at that step" self.datastore.update_watch(uuid=uuid, update_obj={'last_error': err_text})
self.datastore.update_watch(uuid=uuid,
update_obj={'last_error': err_text,
'browser_steps_last_error_step': error_step
}
)
if self.datastore.data['watching'][uuid].get('filter_failure_notification_send', False): if self.datastore.data['watching'][uuid].get('filter_failure_notification_send', False):
@@ -410,9 +371,6 @@ class update_worker(threading.Thread):
self.datastore.update_watch(uuid=uuid, update_obj={'last_error': str(e)}) self.datastore.update_watch(uuid=uuid, update_obj={'last_error': str(e)})
# Other serious error # Other serious error
process_changedetection_results = False process_changedetection_results = False
# import traceback
# print(traceback.format_exc())
else: else:
# Crash protection, the watch entry could have been removed by this point (during a slow chrome fetch etc) # Crash protection, the watch entry could have been removed by this point (during a slow chrome fetch etc)
if not self.datastore.data['watching'].get(uuid): if not self.datastore.data['watching'].get(uuid):

View File

@@ -66,12 +66,25 @@ services:
# browser-chrome: # browser-chrome:
# condition: service_started # condition: service_started
# browser-chrome:
# hostname: browser-chrome
# image: selenium/standalone-chrome-debug:3.141.59
# environment:
# - VNC_NO_PASSWORD=1
# - SCREEN_WIDTH=1920
# - SCREEN_HEIGHT=1080
# - SCREEN_DEPTH=24
# volumes:
# # Workaround to avoid the browser crashing inside a docker container
# # See https://github.com/SeleniumHQ/docker-selenium#quick-start
# - /dev/shm:/dev/shm
# restart: unless-stopped
# Used for fetching pages via Playwright+Chrome where you need Javascript support. # Used for fetching pages via Playwright+Chrome where you need Javascript support.
# Note: Playwright/browserless not supported on ARM type devices (rPi etc)
# RECOMMENDED FOR FETCHING PAGES WITH CHROME
# playwright-chrome: # playwright-chrome:
# hostname: playwright-chrome # hostname: playwright-chrome
# image: browserless/chrome:1.60-chrome-stable # image: browserless/chrome
# restart: unless-stopped # restart: unless-stopped
# environment: # environment:
# - SCREEN_WIDTH=1920 # - SCREEN_WIDTH=1920
@@ -88,23 +101,6 @@ services:
# Ignore HTTPS errors, like for self-signed certs # Ignore HTTPS errors, like for self-signed certs
# - DEFAULT_IGNORE_HTTPS_ERRORS=true # - DEFAULT_IGNORE_HTTPS_ERRORS=true
# #
# Used for fetching pages via Playwright+Chrome where you need Javascript support.
# Note: works well but is deprecated, doesnt fetch full page screenshots and other issues
# browser-chrome:
# hostname: browser-chrome
# image: selenium/standalone-chrome:4
# environment:
# - VNC_NO_PASSWORD=1
# - SCREEN_WIDTH=1920
# - SCREEN_HEIGHT=1080
# - SCREEN_DEPTH=24
# volumes:
# # Workaround to avoid the browser crashing inside a docker container
# # See https://github.com/SeleniumHQ/docker-selenium#quick-start
# - /dev/shm:/dev/shm
# restart: unless-stopped
volumes: volumes:
changedetection-data: changedetection-data:

View File

@@ -1,13 +1,12 @@
eventlet>=0.33.3 # related to dnspython fixes eventlet>=0.31.0
feedgen~=0.9 feedgen~=0.9
flask-compress flask-compress
# 0.6.3 included compatibility fix for werkzeug 3.x (2.x had deprecation of url handlers) flask-login~=0.5
flask-login>=0.6.3
flask-paginate flask-paginate
flask_expects_json~=1.7 flask_expects_json~=1.7
flask_restful flask_restful
flask_wtf~=1.2 flask_wtf
flask~=2.3 flask~=2.0
inscriptis~=2.2 inscriptis~=2.2
pytz pytz
timeago~=1.0 timeago~=1.0
@@ -17,7 +16,7 @@ validators~=0.21
# Set these versions together to avoid a RequestsDependencyWarning # Set these versions together to avoid a RequestsDependencyWarning
# >= 2.26 also adds Brotli support if brotli is installed # >= 2.26 also adds Brotli support if brotli is installed
brotli~=1.0 brotli~=1.0
requests[socks] requests[socks] ~=2.28
urllib3>1.26 urllib3>1.26
chardet>2.3.0 chardet>2.3.0
@@ -25,12 +24,16 @@ chardet>2.3.0
wtforms~=3.0 wtforms~=3.0
jsonpath-ng~=1.5.3 jsonpath-ng~=1.5.3
dnspython~=2.4 # related to eventlet fixes
# dnspython 2.3.0 is not compatible with eventlet
# * https://github.com/eventlet/eventlet/issues/781
# * https://datastax-oss.atlassian.net/browse/PYTHON-1320
dnspython<2.3.0
# jq not available on Windows so must be installed manually # jq not available on Windows so must be installed manually
# Notification library # Notification library
apprise~=1.6.0 apprise~=1.5.0
# apprise mqtt https://github.com/dgtlmoon/changedetection.io/issues/315 # apprise mqtt https://github.com/dgtlmoon/changedetection.io/issues/315
paho-mqtt paho-mqtt
@@ -46,18 +49,21 @@ beautifulsoup4
# XPath filtering, lxml is required by bs4 anyway, but put it here to be safe. # XPath filtering, lxml is required by bs4 anyway, but put it here to be safe.
lxml lxml
selenium~=4.14.0 # 3.141 was missing socksVersion, 3.150 was not in pypi, so we try 4.1.0
selenium~=4.1.0
werkzeug~=3.0 # https://stackoverflow.com/questions/71652965/importerror-cannot-import-name-safe-str-cmp-from-werkzeug-security/71653849#71653849
# ImportError: cannot import name 'safe_str_cmp' from 'werkzeug.security'
# need to revisit flask login versions
werkzeug~=2.0.0
# Templating, so far just in the URLs but in the future can be for the notifications also # Templating, so far just in the URLs but in the future can be for the notifications also
jinja2~=3.1 jinja2~=3.1
jinja2-time jinja2-time
openpyxl
# https://peps.python.org/pep-0508/#environment-markers # https://peps.python.org/pep-0508/#environment-markers
# https://github.com/dgtlmoon/changedetection.io/pull/1009 # https://github.com/dgtlmoon/changedetection.io/pull/1009
jq~=1.3; python_version >= "3.8" and sys_platform == "darwin" jq~=1.3 ;python_version >= "3.8" and sys_platform == "linux"
jq~=1.3; python_version >= "3.8" and sys_platform == "linux"
# Any current modern version, required so far for screenshot PNG->JPEG conversion but will be used more in the future # Any current modern version, required so far for screenshot PNG->JPEG conversion but will be used more in the future
pillow pillow

View File

@@ -1 +1 @@
python-3.11.5 python-3.9.15

View File

@@ -41,7 +41,7 @@ setup(
include_package_data=True, include_package_data=True,
install_requires=install_requires, install_requires=install_requires,
license="Apache License 2.0", license="Apache License 2.0",
python_requires=">= 3.7", python_requires=">= 3.6",
classifiers=['Intended Audience :: Customer Service', classifiers=['Intended Audience :: Customer Service',
'Intended Audience :: Developers', 'Intended Audience :: Developers',
'Intended Audience :: Education', 'Intended Audience :: Education',