mirror of
				https://github.com/dgtlmoon/changedetection.io.git
				synced 2025-10-31 06:37:41 +00:00 
			
		
		
		
	Compare commits
	
		
			87 Commits
		
	
	
		
			filters-co
			...
			upgrade-pl
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
|   | 0b086df3e0 | ||
|   | aa88c7d002 | ||
|   | 4ae466386f | ||
|   | 8fd57280b7 | ||
|   | 0285d00f13 | ||
|   | f7f98945a2 | ||
|   | 5e2049c538 | ||
|   | 26931e0167 | ||
|   | 5229094e44 | ||
|   | 5a306aa78c | ||
|   | c8dcc072c8 | ||
|   | 7c97a5a403 | ||
|   | 7dd967be8e | ||
|   | 3607d15185 | ||
|   | 3382b4cb3f | ||
|   | 5f030d3668 | ||
|   | 06975d6d8f | ||
|   | f58e5b7f19 | ||
|   | e50eff8e35 | ||
|   | 07a853ce59 | ||
|   | 80f8d23309 | ||
|   | 9f41d15908 | ||
|   | 89797dfe02 | ||
|   | c905652780 | ||
|   | 99246d3e6d | ||
|   | f9f69bf0dd | ||
|   | 68efb25e9b | ||
|   | 70606ab05d | ||
|   | d3c8386874 | ||
|   | 47103d7f3d | ||
|   | 03c671bfff | ||
|   | e209d9fba0 | ||
|   | 3b43da35ec | ||
|   | a0665e1f18 | ||
|   | 9ffe7e0eaf | ||
|   | 3e5671a3a2 | ||
|   | cd1aca9ee3 | ||
|   | 6a589e14f3 | ||
|   | dbb76f3618 | ||
|   | 4ae27af511 | ||
|   | e1860549dc | ||
|   | 9765d56a23 | ||
|   | 349111eb35 | ||
|   | 71e50569a0 | ||
|   | c372942295 | ||
|   | 0aef5483d9 | ||
|   | c266c64b94 | ||
|   | 32e5498a9d | ||
|   | 0ba7928d58 | ||
|   | 1709e8f936 | ||
|   | b16d65741c | ||
|   | 1cadcc6d15 | ||
|   | b58d521d19 | ||
|   | 52225f2ad8 | ||
|   | 7220afab0a | ||
|   | 1c0fe4c23e | ||
|   | 4f6b0eb8a5 | ||
|   | f707c914b6 | ||
|   | 9cb636e638 | ||
|   | 1d5fe51157 | ||
|   | c0b49d3be9 | ||
|   | c4dc85525f | ||
|   | 26159840c8 | ||
|   | 522e9786c6 | ||
|   | 9ce86a2835 | ||
|   | f9f6300a70 | ||
|   | 7734b22a19 | ||
|   | da421fe110 | ||
|   | 3e2b55a46f | ||
|   | 7ace259d70 | ||
|   | aa6ad7bf47 | ||
|   | 40dd29dbc6 | ||
|   | 7debccca73 | ||
|   | 59578803bf | ||
|   | a5db3a0b99 | ||
|   | 49a5337ac4 | ||
|   | ceac8c21e4 | ||
|   | a7132b1cfc | ||
|   | 2b948c15c1 | ||
|   | 34f2d30968 | ||
|   | 700729a332 | ||
|   | b6060ac90c | ||
|   | 5cccccb0b6 | ||
|   | c52eb512e8 | ||
|   | 7282df9c08 | ||
|   | e30b17b8bc | ||
|   | 1e88136325 | 
							
								
								
									
										4
									
								
								.github/test/Dockerfile-alpine
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								.github/test/Dockerfile-alpine
									
									
									
									
										vendored
									
									
								
							| @@ -2,7 +2,7 @@ | ||||
| # Test that we can still build on Alpine (musl modified libc https://musl.libc.org/) | ||||
| # Some packages wont install via pypi because they dont have a wheel available under this architecture. | ||||
|  | ||||
| FROM ghcr.io/linuxserver/baseimage-alpine:3.16 | ||||
| FROM ghcr.io/linuxserver/baseimage-alpine:3.18 | ||||
| ENV PYTHONUNBUFFERED=1 | ||||
|  | ||||
| COPY requirements.txt /requirements.txt | ||||
| @@ -26,6 +26,6 @@ RUN \ | ||||
|     py3-pip && \ | ||||
|   echo "**** pip3 install test of changedetection.io ****" && \ | ||||
|   pip3 install -U pip wheel setuptools && \ | ||||
|   pip3 install -U --no-cache-dir --find-links https://wheel-index.linuxserver.io/alpine-3.16/ -r /requirements.txt && \ | ||||
|   pip3 install -U --no-cache-dir --find-links https://wheel-index.linuxserver.io/alpine-3.18/ -r /requirements.txt && \ | ||||
|   apk del --purge \ | ||||
|     build-dependencies | ||||
|   | ||||
							
								
								
									
										8
									
								
								.github/workflows/codeql-analysis.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										8
									
								
								.github/workflows/codeql-analysis.yml
									
									
									
									
										vendored
									
									
								
							| @@ -30,11 +30,11 @@ jobs: | ||||
|  | ||||
|     steps: | ||||
|     - name: Checkout repository | ||||
|       uses: actions/checkout@v2 | ||||
|       uses: actions/checkout@v4 | ||||
|  | ||||
|     # Initializes the CodeQL tools for scanning. | ||||
|     - name: Initialize CodeQL | ||||
|       uses: github/codeql-action/init@v1 | ||||
|       uses: github/codeql-action/init@v2 | ||||
|       with: | ||||
|         languages: ${{ matrix.language }} | ||||
|         # If you wish to specify custom queries, you can do so here or in a config file. | ||||
| @@ -45,7 +45,7 @@ jobs: | ||||
|     # Autobuild attempts to build any compiled languages  (C/C++, C#, or Java). | ||||
|     # If this step fails, then you should remove it and run the build manually (see below) | ||||
|     - name: Autobuild | ||||
|       uses: github/codeql-action/autobuild@v1 | ||||
|       uses: github/codeql-action/autobuild@v2 | ||||
|  | ||||
|     # ℹ️ Command-line programs to run using the OS shell. | ||||
|     # 📚 https://git.io/JvXDl | ||||
| @@ -59,4 +59,4 @@ jobs: | ||||
|     #   make release | ||||
|  | ||||
|     - name: Perform CodeQL Analysis | ||||
|       uses: github/codeql-action/analyze@v1 | ||||
|       uses: github/codeql-action/analyze@v2 | ||||
|   | ||||
							
								
								
									
										36
									
								
								.github/workflows/containers.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										36
									
								
								.github/workflows/containers.yml
									
									
									
									
										vendored
									
									
								
							| @@ -39,11 +39,11 @@ jobs: | ||||
|     # Or if we are in a tagged release scenario. | ||||
|     if: ${{ github.event.workflow_run.conclusion == 'success' }} || ${{ github.event.release.tag_name }} != '' | ||||
|     steps: | ||||
|       - uses: actions/checkout@v2 | ||||
|       - name: Set up Python 3.9 | ||||
|         uses: actions/setup-python@v2 | ||||
|       - uses: actions/checkout@v4 | ||||
|       - name: Set up Python 3.11 | ||||
|         uses: actions/setup-python@v4 | ||||
|         with: | ||||
|           python-version: 3.9 | ||||
|           python-version: 3.11 | ||||
|  | ||||
|       - name: Install dependencies | ||||
|         run: | | ||||
| @@ -58,27 +58,27 @@ jobs: | ||||
|           echo ${{ github.ref }} > changedetectionio/tag.txt | ||||
|  | ||||
|       - name: Set up QEMU | ||||
|         uses: docker/setup-qemu-action@v1 | ||||
|         uses: docker/setup-qemu-action@v3 | ||||
|         with: | ||||
|           image: tonistiigi/binfmt:latest | ||||
|           platforms: all | ||||
|  | ||||
|       - name: Login to GitHub Container Registry | ||||
|         uses: docker/login-action@v1 | ||||
|         uses: docker/login-action@v3 | ||||
|         with: | ||||
|           registry: ghcr.io | ||||
|           username: ${{ github.actor }} | ||||
|           password: ${{ secrets.GITHUB_TOKEN }} | ||||
|  | ||||
|       - name: Login to Docker Hub Container Registry | ||||
|         uses: docker/login-action@v1 | ||||
|         uses: docker/login-action@v3 | ||||
|         with: | ||||
|           username: ${{ secrets.DOCKER_HUB_USERNAME }} | ||||
|           password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }} | ||||
|  | ||||
|       - name: Set up Docker Buildx | ||||
|         id: buildx | ||||
|         uses: docker/setup-buildx-action@v1 | ||||
|         uses: docker/setup-buildx-action@v3 | ||||
|         with: | ||||
|           install: true | ||||
|           version: latest | ||||
| @@ -88,7 +88,7 @@ jobs: | ||||
|       - name: Build and push :dev | ||||
|         id: docker_build | ||||
|         if: ${{ github.ref }} == "refs/heads/master" | ||||
|         uses: docker/build-push-action@v2 | ||||
|         uses: docker/build-push-action@v5 | ||||
|         with: | ||||
|           context: ./ | ||||
|           file: ./Dockerfile | ||||
| @@ -96,8 +96,9 @@ jobs: | ||||
|           tags: | | ||||
|             ${{ secrets.DOCKER_HUB_USERNAME }}/changedetection.io:dev,ghcr.io/${{ github.repository }}:dev | ||||
|           platforms: linux/amd64,linux/arm64,linux/arm/v6,linux/arm/v7,linux/arm/v8 | ||||
|           cache-from: type=local,src=/tmp/.buildx-cache | ||||
|           cache-to: type=local,dest=/tmp/.buildx-cache | ||||
|           cache-from: type=gha | ||||
|           cache-to: type=gha,mode=max | ||||
|  | ||||
| # Looks like this was disabled | ||||
| #          provenance: false | ||||
|  | ||||
| @@ -105,7 +106,7 @@ jobs: | ||||
|       - name: Build and push :tag | ||||
|         id: docker_build_tag_release | ||||
|         if: github.event_name == 'release' && startsWith(github.event.release.tag_name, '0.') | ||||
|         uses: docker/build-push-action@v2 | ||||
|         uses: docker/build-push-action@v5 | ||||
|         with: | ||||
|           context: ./ | ||||
|           file: ./Dockerfile | ||||
| @@ -116,18 +117,11 @@ jobs: | ||||
|             ${{ secrets.DOCKER_HUB_USERNAME }}/changedetection.io:latest | ||||
|             ghcr.io/dgtlmoon/changedetection.io:latest | ||||
|           platforms: linux/amd64,linux/arm64,linux/arm/v6,linux/arm/v7,linux/arm/v8 | ||||
|           cache-from: type=local,src=/tmp/.buildx-cache | ||||
|           cache-to: type=local,dest=/tmp/.buildx-cache | ||||
|           cache-from: type=gha | ||||
|           cache-to: type=gha,mode=max | ||||
| # Looks like this was disabled | ||||
| #          provenance: false | ||||
|  | ||||
|       - name: Image digest | ||||
|         run: echo step SHA ${{ steps.vars.outputs.sha_short }} tag ${{steps.vars.outputs.tag}} branch ${{steps.vars.outputs.branch}} digest ${{ steps.docker_build.outputs.digest }} | ||||
|  | ||||
|       - name: Cache Docker layers | ||||
|         uses: actions/cache@v2 | ||||
|         with: | ||||
|           path: /tmp/.buildx-cache | ||||
|           key: ${{ runner.os }}-buildx-${{ github.sha }} | ||||
|           restore-keys: | | ||||
|             ${{ runner.os }}-buildx- | ||||
|   | ||||
							
								
								
									
										16
									
								
								.github/workflows/test-container-build.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										16
									
								
								.github/workflows/test-container-build.yml
									
									
									
									
										vendored
									
									
								
							| @@ -24,22 +24,22 @@ jobs: | ||||
|   test-container-build: | ||||
|     runs-on: ubuntu-latest | ||||
|     steps: | ||||
|         - uses: actions/checkout@v2 | ||||
|         - name: Set up Python 3.9 | ||||
|           uses: actions/setup-python@v2 | ||||
|         - uses: actions/checkout@v4 | ||||
|         - name: Set up Python 3.11 | ||||
|           uses: actions/setup-python@v4 | ||||
|           with: | ||||
|             python-version: 3.9 | ||||
|             python-version: 3.11 | ||||
|  | ||||
|         # Just test that the build works, some libraries won't compile on ARM/rPi etc | ||||
|         - name: Set up QEMU | ||||
|           uses: docker/setup-qemu-action@v1 | ||||
|           uses: docker/setup-qemu-action@v3 | ||||
|           with: | ||||
|             image: tonistiigi/binfmt:latest | ||||
|             platforms: all | ||||
|  | ||||
|         - name: Set up Docker Buildx | ||||
|           id: buildx | ||||
|           uses: docker/setup-buildx-action@v1 | ||||
|           uses: docker/setup-buildx-action@v3 | ||||
|           with: | ||||
|             install: true | ||||
|             version: latest | ||||
| @@ -49,7 +49,7 @@ jobs: | ||||
|         # Check we can still build under alpine/musl | ||||
|         - name: Test that the docker containers can build (musl via alpine check) | ||||
|           id: docker_build_musl | ||||
|           uses: docker/build-push-action@v2 | ||||
|           uses: docker/build-push-action@v5 | ||||
|           with: | ||||
|             context: ./ | ||||
|             file: ./.github/test/Dockerfile-alpine | ||||
| @@ -57,7 +57,7 @@ jobs: | ||||
|  | ||||
|         - name: Test that the docker containers can build | ||||
|           id: docker_build | ||||
|           uses: docker/build-push-action@v2 | ||||
|           uses: docker/build-push-action@v5 | ||||
|           # https://github.com/docker/build-push-action#customizing | ||||
|           with: | ||||
|             context: ./ | ||||
|   | ||||
							
								
								
									
										25
									
								
								.github/workflows/test-only.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										25
									
								
								.github/workflows/test-only.yml
									
									
									
									
										vendored
									
									
								
							| @@ -7,13 +7,13 @@ jobs: | ||||
|   test-application: | ||||
|     runs-on: ubuntu-latest | ||||
|     steps: | ||||
|       - uses: actions/checkout@v2 | ||||
|       - uses: actions/checkout@v4 | ||||
|  | ||||
|       # Mainly just for link/flake8 | ||||
|       - name: Set up Python 3.10 | ||||
|         uses: actions/setup-python@v2 | ||||
|       - name: Set up Python 3.11 | ||||
|         uses: actions/setup-python@v4 | ||||
|         with: | ||||
|           python-version: '3.10' | ||||
|           python-version: '3.11' | ||||
|  | ||||
|       - name: Lint with flake8 | ||||
|         run: | | ||||
| @@ -29,8 +29,11 @@ jobs: | ||||
|           docker network create changedet-network | ||||
|  | ||||
|           # Selenium+browserless | ||||
|           docker run --network changedet-network -d --hostname selenium  -p 4444:4444 --rm --shm-size="2g"  selenium/standalone-chrome-debug:3.141.59 | ||||
|           docker run --network changedet-network -d --hostname browserless -e "FUNCTION_BUILT_INS=[\"fs\",\"crypto\"]" -e "DEFAULT_LAUNCH_ARGS=[\"--window-size=1920,1080\"]" --rm  -p 3000:3000  --shm-size="2g"  browserless/chrome:1.53-chrome-stable | ||||
|           docker run --network changedet-network -d --hostname selenium  -p 4444:4444 --rm --shm-size="2g"  selenium/standalone-chrome:4 | ||||
|           docker run --network changedet-network -d --name browserless --hostname browserless -e "FUNCTION_BUILT_INS=[\"fs\",\"crypto\"]" -e "DEFAULT_LAUNCH_ARGS=[\"--window-size=1920,1080\"]" --rm  -p 3000:3000  --shm-size="2g"  browserless/chrome:1.60-chrome-stable | ||||
|            | ||||
|           # For accessing custom browser tests | ||||
|           docker run --network changedet-network -d --name browserless-custom-url --hostname browserless-custom-url -e "FUNCTION_BUILT_INS=[\"fs\",\"crypto\"]" -e "DEFAULT_LAUNCH_ARGS=[\"--window-size=1920,1080\"]" --rm --shm-size="2g"  browserless/chrome:1.60-chrome-stable | ||||
|  | ||||
|       - name: Build changedetection.io container for testing | ||||
|         run: |          | ||||
| @@ -48,6 +51,7 @@ jobs: | ||||
|         run: | | ||||
|           # Unit tests | ||||
|           docker run test-changedetectionio  bash -c 'python3 -m unittest changedetectionio.tests.unit.test_notification_diff' | ||||
|           docker run test-changedetectionio  bash -c 'python3 -m unittest changedetectionio.tests.unit.test_watch_model' | ||||
|            | ||||
|           # All tests | ||||
|           docker run --network changedet-network  test-changedetectionio  bash -c 'cd changedetectionio && ./run_basic_tests.sh' | ||||
| @@ -83,6 +87,13 @@ jobs: | ||||
|         run: | | ||||
|           cd changedetectionio | ||||
|           ./run_proxy_tests.sh | ||||
|           # And again with PLAYWRIGHT_DRIVER_URL=.. | ||||
|           cd .. | ||||
|  | ||||
|       - name: Test custom browser URL | ||||
|         run: | | ||||
|           cd changedetectionio | ||||
|           ./run_custom_browser_url_tests.sh | ||||
|           cd .. | ||||
|  | ||||
|       - name: Test changedetection.io container starts+runs basically without error | ||||
| @@ -98,4 +109,4 @@ jobs: | ||||
|  | ||||
| #export WEBDRIVER_URL=http://localhost:4444/wd/hub | ||||
| #pytest tests/fetchers/test_content.py | ||||
| #pytest tests/test_errorhandling.py | ||||
| #pytest tests/test_errorhandling.py | ||||
|   | ||||
							
								
								
									
										8
									
								
								.github/workflows/test-pip-build.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										8
									
								
								.github/workflows/test-pip-build.yml
									
									
									
									
										vendored
									
									
								
							| @@ -11,12 +11,12 @@ jobs: | ||||
|   test-pip-build-basics: | ||||
|     runs-on: ubuntu-latest | ||||
|     steps: | ||||
|         - uses: actions/checkout@v2 | ||||
|         - uses: actions/checkout@v4 | ||||
|  | ||||
|         - name: Set up Python 3.9 | ||||
|           uses: actions/setup-python@v2 | ||||
|         - name: Set up Python 3.11 | ||||
|           uses: actions/setup-python@v4 | ||||
|           with: | ||||
|             python-version: 3.9 | ||||
|             python-version: 3.11 | ||||
|  | ||||
|  | ||||
|         - name: Test that the basic pip built package runs without error | ||||
|   | ||||
| @@ -1,5 +1,5 @@ | ||||
| # pip dependencies install stage | ||||
| FROM python:3.10-slim-bullseye as builder | ||||
| FROM python:3.11-slim-bookworm as builder | ||||
|  | ||||
| # See `cryptography` pin comment in requirements.txt | ||||
| ARG CRYPTOGRAPHY_DONT_BUILD_RUST=1 | ||||
| @@ -25,14 +25,13 @@ RUN pip install --target=/dependencies -r /requirements.txt | ||||
| # Playwright is an alternative to Selenium | ||||
| # Excluded this package from requirements.txt to prevent arm/v6 and arm/v7 builds from failing | ||||
| # https://github.com/dgtlmoon/changedetection.io/pull/1067 also musl/alpine (not supported) | ||||
| RUN pip install --target=/dependencies playwright~=1.27.1 \ | ||||
| RUN pip install --target=/dependencies playwright~=1.40 \ | ||||
|     || echo "WARN: Failed to install Playwright. The application can still run, but the Playwright option will be disabled." | ||||
|  | ||||
| # Final image stage | ||||
| FROM python:3.10-slim-bullseye | ||||
| FROM python:3.11-slim-bookworm | ||||
|  | ||||
| RUN apt-get update && apt-get install -y --no-install-recommends \ | ||||
|     libssl1.1 \ | ||||
|     libxslt1.1 \ | ||||
|     # For pdftohtml | ||||
|     poppler-utils \ | ||||
|   | ||||
| @@ -16,3 +16,4 @@ global-exclude venv | ||||
|  | ||||
| global-exclude test-datastore | ||||
| global-exclude changedetection.io*dist-info | ||||
| global-exclude changedetectionio/tests/proxy_socks5/test-datastore | ||||
|   | ||||
| @@ -2,19 +2,44 @@ | ||||
|  | ||||
| Live your data-life pro-actively, track website content changes and receive notifications via Discord, Email, Slack, Telegram and 70+ more | ||||
|  | ||||
| [<img src="https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/docs/screenshot.png" style="max-width:100%;" alt="Self-hosted web page change monitoring"  title="Self-hosted web page change monitoring"  />](https://changedetection.io) | ||||
| [<img src="https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/docs/screenshot.png" style="max-width:100%;" alt="Self-hosted web page change monitoring, list of websites with changes"  title="Self-hosted web page change monitoring, list of websites with changes"  />](https://changedetection.io) | ||||
|  | ||||
|  | ||||
| [**Don't have time? Let us host it for you! try our extremely affordable subscription use our proxies and support!**](https://changedetection.io)  | ||||
|  | ||||
|  | ||||
| #### Example use cases | ||||
| ### Target specific parts of the webpage using the Visual Selector tool. | ||||
|  | ||||
| Available when connected to a <a href="https://github.com/dgtlmoon/changedetection.io/wiki/Playwright-content-fetcher">playwright content fetcher</a> (included as part of our subscription service) | ||||
|  | ||||
| [<img src="https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/docs/visualselector-anim.gif" style="max-width:100%;" alt="Select parts and elements of a web page to monitor for changes"  title="Select parts and elements of a web page to monitor for changes" />](https://changedetection.io?src=pip) | ||||
|  | ||||
| ### Easily see what changed, examine by word, line, or individual character. | ||||
|  | ||||
| [<img src="https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/docs/screenshot-diff.png" style="max-width:100%;" alt="Self-hosted web page change monitoring context difference "  title="Self-hosted web page change monitoring context difference " />](https://changedetection.io?src=pip) | ||||
|  | ||||
|  | ||||
| ### Perform interactive browser steps | ||||
|  | ||||
| Fill in text boxes, click buttons and more, setup your changedetection scenario.  | ||||
|  | ||||
| Using the **Browser Steps** configuration, add basic steps before performing change detection, such as logging into websites, adding a product to a cart, accept cookie logins, entering dates and refining searches. | ||||
|  | ||||
| [<img src="docs/browsersteps-anim.gif" style="max-width:100%;" alt="Website change detection with interactive browser steps, detect changes behind login and password, search queries and more"  title="Website change detection with interactive browser steps, detect changes behind login and password, search queries and more" />](https://changedetection.io?src=pip) | ||||
|  | ||||
| After **Browser Steps** have been run, then visit the **Visual Selector** tab to refine the content you're interested in. | ||||
| Requires Playwright to be enabled. | ||||
|  | ||||
|  | ||||
| ### Example use cases | ||||
|  | ||||
| - Products and services have a change in pricing | ||||
| - _Out of stock notification_ and _Back In stock notification_ | ||||
| - Monitor and track PDF file changes, know when a PDF file has text changes. | ||||
| - Governmental department updates (changes are often only on their websites) | ||||
| - New software releases, security advisories when you're not on their mailing list. | ||||
| - Festivals with changes | ||||
| - Discogs restock alerts and monitoring | ||||
| - Realestate listing changes | ||||
| - Know when your favourite whiskey is on sale, or other special deals are announced before anyone else | ||||
| - COVID related news from government websites | ||||
| @@ -27,18 +52,34 @@ Live your data-life pro-actively, track website content changes and receive noti | ||||
| - Create RSS feeds based on changes in web content | ||||
| - Monitor HTML source code for unexpected changes, strengthen your PCI compliance | ||||
| - You have a very sensitive list of URLs to watch and you do _not_ want to use the paid alternatives. (Remember, _you_ are the product) | ||||
| - Get notified when certain keywords appear in Twitter search results | ||||
| - Proactively search for jobs, get notified when companies update their careers page, search job portals for keywords. | ||||
| - Get alerts when new job positions are open on Bamboo HR and other job platforms | ||||
| - Website defacement monitoring | ||||
| - Pokémon Card Restock Tracker / Pokémon TCG Tracker | ||||
| - RegTech - stay ahead of regulatory changes, regulatory compliance | ||||
|  | ||||
| _Need an actual Chrome runner with Javascript support? We support fetching via WebDriver and Playwright!</a>_ | ||||
|  | ||||
| #### Key Features | ||||
|  | ||||
| - Lots of trigger filters, such as "Trigger on text", "Remove text by selector", "Ignore text", "Extract text", also using regular-expressions! | ||||
| - Target elements with xPath and CSS Selectors, Easily monitor complex JSON with JSONPath or jq | ||||
| - Target elements with xPath(1.0) and CSS Selectors, Easily monitor complex JSON with JSONPath or jq | ||||
| - Switch between fast non-JS and Chrome JS based "fetchers" | ||||
| - Track changes in PDF files (Monitor text changed in the PDF, Also monitor PDF filesize and checksums) | ||||
| - Easily specify how often a site should be checked | ||||
| - Execute JS before extracting text (Good for logging in, see examples in the UI!) | ||||
| - Override Request Headers, Specify `POST` or `GET` and other methods | ||||
| - Use the "Visual Selector" to help target specific elements | ||||
| - Configurable [proxy per watch](https://github.com/dgtlmoon/changedetection.io/wiki/Proxy-configuration) | ||||
| - Send a screenshot with the notification when a change is detected in the web page | ||||
|  | ||||
| We [recommend and use Bright Data](https://brightdata.grsm.io/n0r16zf7eivq) global proxy services, Bright Data will match any first deposit up to $100 using our signup link. | ||||
|  | ||||
| [Oxylabs](https://oxylabs.go2cloud.org/SH2d) is also an excellent proxy provider and well worth using, they offer Residental, ISP, Rotating and many other proxy types to suit your project.  | ||||
|  | ||||
| Please :star: star :star: this project and help it grow! https://github.com/dgtlmoon/changedetection.io/ | ||||
|  | ||||
|  | ||||
|  | ||||
| ```bash | ||||
|   | ||||
							
								
								
									
										19
									
								
								README.md
									
									
									
									
									
								
							
							
						
						
									
										19
									
								
								README.md
									
									
									
									
									
								
							| @@ -5,7 +5,7 @@ | ||||
| _Live your data-life pro-actively._  | ||||
|  | ||||
|  | ||||
| [<img src="https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/docs/screenshot.png" style="max-width:100%;" alt="Self-hosted web page change monitoring"  title="Self-hosted web page change monitoring"  />](https://changedetection.io?src=github) | ||||
| [<img src="https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/docs/screenshot.png" style="max-width:100%;" alt="Self-hosted web site page change monitoring"  title="Self-hosted web site page change monitoring"  />](https://changedetection.io?src=github) | ||||
|  | ||||
| [![Release Version][release-shield]][release-link] [![Docker Pulls][docker-pulls]][docker-link] [![License][license-shield]](LICENSE.md) | ||||
|  | ||||
| @@ -22,7 +22,7 @@ _Live your data-life pro-actively._ | ||||
|  | ||||
| Available when connected to a <a href="https://github.com/dgtlmoon/changedetection.io/wiki/Playwright-content-fetcher">playwright content fetcher</a> (included as part of our subscription service) | ||||
|  | ||||
| [<img src="https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/docs/visualselector-anim.gif" style="max-width:100%;" alt="Self-hosted web page change monitoring context difference "  title="Self-hosted web page change monitoring context difference " />](https://changedetection.io?src=github) | ||||
| [<img src="https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/docs/visualselector-anim.gif" style="max-width:100%;" alt="Select parts and elements of a web page to monitor for changes"  title="Select parts and elements of a web page to monitor for changes" />](https://changedetection.io?src=github) | ||||
|  | ||||
| ### Easily see what changed, examine by word, line, or individual character. | ||||
|  | ||||
| @@ -35,7 +35,7 @@ Fill in text boxes, click buttons and more, setup your changedetection scenario. | ||||
|  | ||||
| Using the **Browser Steps** configuration, add basic steps before performing change detection, such as logging into websites, adding a product to a cart, accept cookie logins, entering dates and refining searches. | ||||
|  | ||||
| [<img src="docs/browsersteps-anim.gif" style="max-width:100%;" alt="Self-hosted web page change monitoring context difference "  title="Website change detection with interactive browser steps, login, cookies etc" />](https://changedetection.io?src=github) | ||||
| [<img src="docs/browsersteps-anim.gif" style="max-width:100%;" alt="Website change detection with interactive browser steps, detect changes behind login and password, search queries and more"  title="Website change detection with interactive browser steps, detect changes behind login and password, search queries and more" />](https://changedetection.io?src=github) | ||||
|  | ||||
| After **Browser Steps** have been run, then visit the **Visual Selector** tab to refine the content you're interested in. | ||||
| Requires Playwright to be enabled. | ||||
| @@ -226,12 +226,19 @@ The application also supports notifying you that it can follow this information | ||||
|  | ||||
| ## Proxy Configuration | ||||
|  | ||||
| See the wiki https://github.com/dgtlmoon/changedetection.io/wiki/Proxy-configuration , we also support using [Bright Data proxy services where possible]( https://github.com/dgtlmoon/changedetection.io/wiki/Proxy-configuration#brightdata-proxy-support) | ||||
| See the wiki https://github.com/dgtlmoon/changedetection.io/wiki/Proxy-configuration , we also support using [Bright Data proxy services where possible](https://github.com/dgtlmoon/changedetection.io/wiki/Proxy-configuration#brightdata-proxy-support) and [Oxylabs](https://oxylabs.go2cloud.org/SH2d) proxy services. | ||||
|  | ||||
| ## Raspberry Pi support? | ||||
|  | ||||
| Raspberry Pi and linux/arm/v6 linux/arm/v7 arm64 devices are supported! See the wiki for [details](https://github.com/dgtlmoon/changedetection.io/wiki/Fetching-pages-with-WebDriver) | ||||
|  | ||||
| ## Import support | ||||
|  | ||||
| Easily [import your list of websites to watch for changes in Excel .xslx file format](https://changedetection.io/tutorial/how-import-your-website-change-detection-lists-excel), or paste in lists of website URLs as plaintext.  | ||||
|  | ||||
| Excel import is recommended - that way you can better organise tags/groups of websites and other features. | ||||
|  | ||||
|  | ||||
| ## API Support | ||||
|  | ||||
| Supports managing the website watch list [via our API](https://changedetection.io/docs/api_v1/index.html) | ||||
| @@ -261,3 +268,7 @@ I offer commercial support, this software is depended on by network security, ae | ||||
| [license-shield]: https://img.shields.io/github/license/dgtlmoon/changedetection.io.svg?style=for-the-badge | ||||
| [release-link]: https://github.com/dgtlmoon/changedetection.io/releases | ||||
| [docker-link]: https://hub.docker.com/r/dgtlmoon/changedetection.io | ||||
|  | ||||
| ## Third-party licenses | ||||
|  | ||||
| changedetectionio.html_tools.elementpath_tostring: Copyright (c), 2018-2021, SISSA (Scuola Internazionale Superiore di Studi Avanzati), Licensed under [MIT license](https://github.com/sissaschool/elementpath/blob/master/LICENSE) | ||||
|   | ||||
| @@ -38,7 +38,7 @@ from flask_paginate import Pagination, get_page_parameter | ||||
| from changedetectionio import html_tools | ||||
| from changedetectionio.api import api_v1 | ||||
|  | ||||
| __version__ = '0.45.2' | ||||
| __version__ = '0.45.7.3' | ||||
|  | ||||
| from changedetectionio.store import BASE_URL_NOT_SET_TEXT | ||||
|  | ||||
| @@ -105,6 +105,10 @@ def get_darkmode_state(): | ||||
|     css_dark_mode = request.cookies.get('css_dark_mode', 'false') | ||||
|     return 'true' if css_dark_mode and strtobool(css_dark_mode) else 'false' | ||||
|  | ||||
| @app.template_global() | ||||
| def get_css_version(): | ||||
|     return __version__ | ||||
|  | ||||
| # We use the whole watch object from the store/JSON so we can see if there's some related status in terms of a thread | ||||
| # running or something similar. | ||||
| @app.template_filter('format_last_checked_time') | ||||
| @@ -186,7 +190,6 @@ class User(flask_login.UserMixin): | ||||
|  | ||||
|     pass | ||||
|  | ||||
|  | ||||
| def login_optionally_required(func): | ||||
|     @wraps(func) | ||||
|     def decorated_view(*args, **kwargs): | ||||
| @@ -199,7 +202,6 @@ def login_optionally_required(func): | ||||
|         # Permitted | ||||
|         elif request.endpoint == 'diff_history_page' and datastore.data['settings']['application'].get('shared_diff_access'): | ||||
|             return func(*args, **kwargs) | ||||
|  | ||||
|         elif request.method in flask_login.config.EXEMPT_METHODS: | ||||
|             return func(*args, **kwargs) | ||||
|         elif app.config.get('LOGIN_DISABLED'): | ||||
| @@ -418,11 +420,18 @@ def changedetection_app(config=None, datastore_o=None): | ||||
|  | ||||
|         # Sort by last_changed and add the uuid which is usually the key.. | ||||
|         sorted_watches = [] | ||||
|         with_errors = request.args.get('with_errors') == "1" | ||||
|         errored_count = 0 | ||||
|         search_q = request.args.get('q').strip().lower() if request.args.get('q') else False | ||||
|         for uuid, watch in datastore.data['watching'].items(): | ||||
|             if with_errors and not watch.get('last_error'): | ||||
|                 continue | ||||
|  | ||||
|             if limit_tag and not limit_tag in watch['tags']: | ||||
|                     continue | ||||
|  | ||||
|             if watch.get('last_error'): | ||||
|                 errored_count += 1 | ||||
|                  | ||||
|             if search_q: | ||||
|                 if (watch.get('title') and search_q in watch.get('title').lower()) or search_q in watch.get('url', '').lower(): | ||||
|                     sorted_watches.append(watch) | ||||
| @@ -444,6 +453,7 @@ def changedetection_app(config=None, datastore_o=None): | ||||
|                                  active_tag=limit_tag, | ||||
|                                  app_rss_token=datastore.data['settings']['application']['rss_access_token'], | ||||
|                                  datastore=datastore, | ||||
|                                  errored_count=errored_count, | ||||
|                                  form=form, | ||||
|                                  guid=datastore.data['app_guid'], | ||||
|                                  has_proxies=datastore.proxy_list, | ||||
| @@ -604,6 +614,8 @@ def changedetection_app(config=None, datastore_o=None): | ||||
|         # For the form widget tag uuid lookup | ||||
|         form.tags.datastore = datastore # in _value | ||||
|  | ||||
|         for p in datastore.extra_browsers: | ||||
|             form.fetch_backend.choices.append(p) | ||||
|  | ||||
|         form.fetch_backend.choices.append(("system", 'System settings default')) | ||||
|  | ||||
| @@ -624,7 +636,6 @@ def changedetection_app(config=None, datastore_o=None): | ||||
|  | ||||
|             if request.args.get('unpause_on_save'): | ||||
|                 extra_update_obj['paused'] = False | ||||
|  | ||||
|             # Re #110, if they submit the same as the default value, set it to None, so we continue to follow the default | ||||
|             # Assume we use the default value, unless something relevant is different, then use the form value | ||||
|             # values could be None, 0 etc. | ||||
| @@ -705,16 +716,16 @@ def changedetection_app(config=None, datastore_o=None): | ||||
|             system_uses_webdriver = datastore.data['settings']['application']['fetch_backend'] == 'html_webdriver' | ||||
|  | ||||
|             is_html_webdriver = False | ||||
|             if (watch.get('fetch_backend') == 'system' and system_uses_webdriver) or watch.get('fetch_backend') == 'html_webdriver': | ||||
|             if (watch.get('fetch_backend') == 'system' and system_uses_webdriver) or watch.get('fetch_backend') == 'html_webdriver' or watch.get('fetch_backend', '').startswith('extra_browser_'): | ||||
|                 is_html_webdriver = True | ||||
|  | ||||
|             # Only works reliably with Playwright | ||||
|             visualselector_enabled = os.getenv('PLAYWRIGHT_DRIVER_URL', False) and is_html_webdriver | ||||
|  | ||||
|             output = render_template("edit.html", | ||||
|                                      available_processors=processors.available_processors(), | ||||
|                                      browser_steps_config=browser_step_ui_config, | ||||
|                                      emailprefix=os.getenv('NOTIFICATION_MAIL_BUTTON_PREFIX', False), | ||||
|                                      extra_title=f" - Edit - {watch.label}", | ||||
|                                      form=form, | ||||
|                                      has_default_notification_urls=True if len(datastore.data['settings']['application']['notification_urls']) else False, | ||||
|                                      has_empty_checktime=using_default_check_time, | ||||
| @@ -810,6 +821,16 @@ def changedetection_app(config=None, datastore_o=None): | ||||
|  | ||||
|         return output | ||||
|  | ||||
|     @app.route("/settings/reset-api-key", methods=['GET']) | ||||
|     @login_optionally_required | ||||
|     def settings_reset_api_key(): | ||||
|         import secrets | ||||
|         secret = secrets.token_hex(16) | ||||
|         datastore.data['settings']['application']['api_access_token'] = secret | ||||
|         datastore.needs_write_urgent = True | ||||
|         flash("API Key was regenerated.") | ||||
|         return redirect(url_for('settings_page')+'#api') | ||||
|  | ||||
|     @app.route("/import", methods=['GET', "POST"]) | ||||
|     @login_optionally_required | ||||
|     def import_page(): | ||||
| @@ -817,6 +838,7 @@ def changedetection_app(config=None, datastore_o=None): | ||||
|         from . import forms | ||||
|  | ||||
|         if request.method == 'POST': | ||||
|  | ||||
|             from .importer import import_url_list, import_distill_io_json | ||||
|  | ||||
|             # URL List import | ||||
| @@ -840,11 +862,32 @@ def changedetection_app(config=None, datastore_o=None): | ||||
|                 for uuid in d_importer.new_uuids: | ||||
|                     update_q.put(queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid, 'skip_when_checksum_same': True})) | ||||
|  | ||||
|             # XLSX importer | ||||
|             if request.files and request.files.get('xlsx_file'): | ||||
|                 file = request.files['xlsx_file'] | ||||
|                 from .importer import import_xlsx_wachete, import_xlsx_custom | ||||
|  | ||||
|                 if request.values.get('file_mapping') == 'wachete': | ||||
|                     w_importer = import_xlsx_wachete() | ||||
|                     w_importer.run(data=file, flash=flash, datastore=datastore) | ||||
|                 else: | ||||
|                     w_importer = import_xlsx_custom() | ||||
|                     # Building mapping of col # to col # type | ||||
|                     map = {} | ||||
|                     for i in range(10): | ||||
|                         c = request.values.get(f"custom_xlsx[col_{i}]") | ||||
|                         v = request.values.get(f"custom_xlsx[col_type_{i}]") | ||||
|                         if c and v: | ||||
|                             map[int(c)] = v | ||||
|  | ||||
|                     w_importer.import_profile = map | ||||
|                     w_importer.run(data=file, flash=flash, datastore=datastore) | ||||
|  | ||||
|                 for uuid in w_importer.new_uuids: | ||||
|                     update_q.put(queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid, 'skip_when_checksum_same': True})) | ||||
|  | ||||
|         form = forms.importForm(formdata=request.form if request.method == 'POST' else None, | ||||
| #                               data=default, | ||||
|                                ) | ||||
|         # Could be some remaining, or we could be on GET | ||||
|         form = forms.importForm(formdata=request.form if request.method == 'POST' else None) | ||||
|         output = render_template("import.html", | ||||
|                                  form=form, | ||||
|                                  import_url_list_remaining="\n".join(remaining_urls), | ||||
| @@ -858,7 +901,10 @@ def changedetection_app(config=None, datastore_o=None): | ||||
|     def mark_all_viewed(): | ||||
|  | ||||
|         # Save the current newest history as the most recently viewed | ||||
|         with_errors = request.args.get('with_errors') == "1" | ||||
|         for watch_uuid, watch in datastore.data['watching'].items(): | ||||
|             if with_errors and not watch.get('last_error'): | ||||
|                 continue | ||||
|             datastore.set_last_viewed(watch_uuid, int(time.time())) | ||||
|  | ||||
|         return redirect(url_for('index')) | ||||
| @@ -914,28 +960,36 @@ def changedetection_app(config=None, datastore_o=None): | ||||
|  | ||||
|         # Read as binary and force decode as UTF-8 | ||||
|         # Windows may fail decode in python if we just use 'r' mode (chardet decode exception) | ||||
|         try: | ||||
|             newest_version_file_contents = watch.get_history_snapshot(dates[-1]) | ||||
|         except Exception as e: | ||||
|             newest_version_file_contents = "Unable to read {}.\n".format(dates[-1]) | ||||
|  | ||||
|         previous_version = request.args.get('previous_version') | ||||
|         previous_timestamp = dates[-2] | ||||
|         if previous_version: | ||||
|             previous_timestamp = previous_version | ||||
|         from_version = request.args.get('from_version') | ||||
|         from_version_index = -2  # second newest | ||||
|         if from_version and from_version in dates: | ||||
|             from_version_index = dates.index(from_version) | ||||
|         else: | ||||
|             from_version = dates[from_version_index] | ||||
|  | ||||
|         try: | ||||
|             previous_version_file_contents = watch.get_history_snapshot(previous_timestamp) | ||||
|             from_version_file_contents = watch.get_history_snapshot(dates[from_version_index]) | ||||
|         except Exception as e: | ||||
|             previous_version_file_contents = "Unable to read {}.\n".format(previous_timestamp) | ||||
|             from_version_file_contents = f"Unable to read to-version at index {dates[from_version_index]}.\n" | ||||
|  | ||||
|         to_version = request.args.get('to_version') | ||||
|         to_version_index = -1 | ||||
|         if to_version and to_version in dates: | ||||
|             to_version_index = dates.index(to_version) | ||||
|         else: | ||||
|             to_version = dates[to_version_index] | ||||
|  | ||||
|         try: | ||||
|             to_version_file_contents = watch.get_history_snapshot(dates[to_version_index]) | ||||
|         except Exception as e: | ||||
|             to_version_file_contents = "Unable to read to-version at index{}.\n".format(dates[to_version_index]) | ||||
|  | ||||
|         screenshot_url = watch.get_screenshot() | ||||
|  | ||||
|         system_uses_webdriver = datastore.data['settings']['application']['fetch_backend'] == 'html_webdriver' | ||||
|  | ||||
|         is_html_webdriver = False | ||||
|         if (watch.get('fetch_backend') == 'system' and system_uses_webdriver) or watch.get('fetch_backend') == 'html_webdriver': | ||||
|         if (watch.get('fetch_backend') == 'system' and system_uses_webdriver) or watch.get('fetch_backend') == 'html_webdriver' or watch.get('fetch_backend', '').startswith('extra_browser_'): | ||||
|             is_html_webdriver = True | ||||
|  | ||||
|         password_enabled_and_share_is_off = False | ||||
| @@ -944,22 +998,24 @@ def changedetection_app(config=None, datastore_o=None): | ||||
|  | ||||
|         output = render_template("diff.html", | ||||
|                                  current_diff_url=watch['url'], | ||||
|                                  current_previous_version=str(previous_version), | ||||
|                                  from_version=str(from_version), | ||||
|                                  to_version=str(to_version), | ||||
|                                  extra_stylesheets=extra_stylesheets, | ||||
|                                  extra_title=" - Diff - {}".format(watch['title'] if watch['title'] else watch['url']), | ||||
|                                  extra_title=f" - Diff - {watch.label}", | ||||
|                                  extract_form=extract_form, | ||||
|                                  is_html_webdriver=is_html_webdriver, | ||||
|                                  last_error=watch['last_error'], | ||||
|                                  last_error_screenshot=watch.get_error_snapshot(), | ||||
|                                  last_error_text=watch.get_error_text(), | ||||
|                                  left_sticky=True, | ||||
|                                  newest=newest_version_file_contents, | ||||
|                                  newest=to_version_file_contents, | ||||
|                                  newest_version_timestamp=dates[-1], | ||||
|                                  password_enabled_and_share_is_off=password_enabled_and_share_is_off, | ||||
|                                  previous=previous_version_file_contents, | ||||
|                                  from_version_file_contents=from_version_file_contents, | ||||
|                                  to_version_file_contents=to_version_file_contents, | ||||
|                                  screenshot=screenshot_url, | ||||
|                                  uuid=uuid, | ||||
|                                  versions=dates[:-1], # All except current/last | ||||
|                                  versions=dates, # All except current/last | ||||
|                                  watch_a=watch | ||||
|                                  ) | ||||
|  | ||||
| @@ -987,7 +1043,7 @@ def changedetection_app(config=None, datastore_o=None): | ||||
|  | ||||
|  | ||||
|         is_html_webdriver = False | ||||
|         if (watch.get('fetch_backend') == 'system' and system_uses_webdriver) or watch.get('fetch_backend') == 'html_webdriver': | ||||
|         if (watch.get('fetch_backend') == 'system' and system_uses_webdriver) or watch.get('fetch_backend') == 'html_webdriver' or watch.get('fetch_backend', '').startswith('extra_browser_'): | ||||
|             is_html_webdriver = True | ||||
|  | ||||
|         # Never requested successfully, but we detected a fetch error | ||||
| @@ -1168,8 +1224,7 @@ def changedetection_app(config=None, datastore_o=None): | ||||
|             # These files should be in our subdirectory | ||||
|             try: | ||||
|                 # set nocache, set content-type | ||||
|                 watch_dir = datastore_o.datastore_path + "/" + filename | ||||
|                 response = make_response(send_from_directory(filename="elements.json", directory=watch_dir, path=watch_dir + "/elements.json")) | ||||
|                 response = make_response(send_from_directory(os.path.join(datastore_o.datastore_path, filename), "elements.json")) | ||||
|                 response.headers['Content-type'] = 'application/json' | ||||
|                 response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate' | ||||
|                 response.headers['Pragma'] = 'no-cache' | ||||
| @@ -1257,6 +1312,8 @@ def changedetection_app(config=None, datastore_o=None): | ||||
|         # Forced recheck will skip the 'skip if content is the same' rule (, 'reprocess_existing_data': True}))) | ||||
|         tag = request.args.get('tag') | ||||
|         uuid = request.args.get('uuid') | ||||
|         with_errors = request.args.get('with_errors') == "1" | ||||
|  | ||||
|         i = 0 | ||||
|  | ||||
|         running_uuids = [] | ||||
| @@ -1272,6 +1329,8 @@ def changedetection_app(config=None, datastore_o=None): | ||||
|             # Items that have this current tag | ||||
|             for watch_uuid, watch in datastore.data['watching'].items(): | ||||
|                 if tag in watch.get('tags', {}): | ||||
|                     if with_errors and not watch.get('last_error'): | ||||
|                         continue | ||||
|                     if watch_uuid not in running_uuids and not datastore.data['watching'][watch_uuid]['paused']: | ||||
|                         update_q.put( | ||||
|                             queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': watch_uuid, 'skip_when_checksum_same': False}) | ||||
| @@ -1282,8 +1341,11 @@ def changedetection_app(config=None, datastore_o=None): | ||||
|             # No tag, no uuid, add everything. | ||||
|             for watch_uuid, watch in datastore.data['watching'].items(): | ||||
|                 if watch_uuid not in running_uuids and not datastore.data['watching'][watch_uuid]['paused']: | ||||
|                     if with_errors and not watch.get('last_error'): | ||||
|                         continue | ||||
|                     update_q.put(queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': watch_uuid, 'skip_when_checksum_same': False})) | ||||
|                     i += 1 | ||||
|  | ||||
|         flash("{} watches queued for rechecking.".format(i)) | ||||
|         return redirect(url_for('index', tag=tag)) | ||||
|  | ||||
| @@ -1431,6 +1493,7 @@ def changedetection_app(config=None, datastore_o=None): | ||||
|         return redirect(url_for('index')) | ||||
|  | ||||
|     @app.route("/highlight_submit_ignore_url", methods=['POST']) | ||||
|     @login_optionally_required | ||||
|     def highlight_submit_ignore_url(): | ||||
|         import re | ||||
|         mode = request.form.get('mode') | ||||
|   | ||||
| @@ -23,8 +23,10 @@ | ||||
|  | ||||
| from distutils.util import strtobool | ||||
| from flask import Blueprint, request, make_response | ||||
| import os | ||||
| import logging | ||||
| import os | ||||
| import re | ||||
|  | ||||
| from changedetectionio.store import ChangeDetectionStore | ||||
| from changedetectionio import login_optionally_required | ||||
|  | ||||
| @@ -44,7 +46,7 @@ def construct_blueprint(datastore: ChangeDetectionStore): | ||||
|  | ||||
|  | ||||
|         # We keep the playwright session open for many minutes | ||||
|         seconds_keepalive = int(os.getenv('BROWSERSTEPS_MINUTES_KEEPALIVE', 10)) * 60 | ||||
|         keepalive_seconds = int(os.getenv('BROWSERSTEPS_MINUTES_KEEPALIVE', 10)) * 60 | ||||
|  | ||||
|         browsersteps_start_session = {'start_time': time.time()} | ||||
|  | ||||
| @@ -56,16 +58,18 @@ def construct_blueprint(datastore: ChangeDetectionStore): | ||||
|             # Start the Playwright context, which is actually a nodejs sub-process and communicates over STDIN/STDOUT pipes | ||||
|             io_interface_context = io_interface_context.start() | ||||
|  | ||||
|         keepalive_ms = ((keepalive_seconds + 3) * 1000) | ||||
|         base_url = os.getenv('PLAYWRIGHT_DRIVER_URL', '') | ||||
|         a = "?" if not '?' in base_url else '&' | ||||
|         base_url += a + f"timeout={keepalive_ms}" | ||||
|  | ||||
|         # keep it alive for 10 seconds more than we advertise, sometimes it helps to keep it shutting down cleanly | ||||
|         keepalive = "&timeout={}".format(((seconds_keepalive + 3) * 1000)) | ||||
|         try: | ||||
|             browsersteps_start_session['browser'] = io_interface_context.chromium.connect_over_cdp( | ||||
|                 os.getenv('PLAYWRIGHT_DRIVER_URL', '') + keepalive) | ||||
|             browsersteps_start_session['browser'] = io_interface_context.chromium.connect_over_cdp(base_url) | ||||
|         except Exception as e: | ||||
|             if 'ECONNREFUSED' in str(e): | ||||
|                 return make_response('Unable to start the Playwright Browser session, is it running?', 401) | ||||
|             else: | ||||
|                 # Other errors, bad URL syntax, bad reply etc | ||||
|                 return make_response(str(e), 401) | ||||
|  | ||||
|         proxy_id = datastore.get_preferred_proxy_for_watch(uuid=watch_uuid) | ||||
| @@ -118,12 +122,37 @@ def construct_blueprint(datastore: ChangeDetectionStore): | ||||
|         print("Starting connection with playwright - done") | ||||
|         return {'browsersteps_session_id': browsersteps_session_id} | ||||
|  | ||||
|     @login_optionally_required | ||||
|     @browser_steps_blueprint.route("/browsersteps_image", methods=['GET']) | ||||
|     def browser_steps_fetch_screenshot_image(): | ||||
|         from flask import ( | ||||
|             make_response, | ||||
|             request, | ||||
|             send_from_directory, | ||||
|         ) | ||||
|         uuid = request.args.get('uuid') | ||||
|         step_n = int(request.args.get('step_n')) | ||||
|  | ||||
|         watch = datastore.data['watching'].get(uuid) | ||||
|         filename = f"step_before-{step_n}.jpeg" if request.args.get('type', '') == 'before' else f"step_{step_n}.jpeg" | ||||
|  | ||||
|         if step_n and watch and os.path.isfile(os.path.join(watch.watch_data_dir, filename)): | ||||
|             response = make_response(send_from_directory(directory=watch.watch_data_dir, path=filename)) | ||||
|             response.headers['Content-type'] = 'image/jpeg' | ||||
|             response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate' | ||||
|             response.headers['Pragma'] = 'no-cache' | ||||
|             response.headers['Expires'] = 0 | ||||
|             return response | ||||
|  | ||||
|         else: | ||||
|             return make_response('Unable to fetch image, is the URL correct? does the watch exist? does the step_type-n.jpeg exist?', 401) | ||||
|  | ||||
|     # A request for an action was received | ||||
|     @login_optionally_required | ||||
|     @browser_steps_blueprint.route("/browsersteps_update", methods=['POST']) | ||||
|     def browsersteps_ui_update(): | ||||
|         import base64 | ||||
|         import playwright._impl._api_types | ||||
|         import playwright._impl._errors | ||||
|         global browsersteps_sessions | ||||
|         from changedetectionio.blueprint.browser_steps import browser_steps | ||||
|  | ||||
|   | ||||
| @@ -77,13 +77,13 @@ class steppable_browser_interface(): | ||||
|     def action_goto_url(self, selector=None, value=None): | ||||
|         # self.page.set_viewport_size({"width": 1280, "height": 5000}) | ||||
|         now = time.time() | ||||
|         response = self.page.goto(value, timeout=0, wait_until='commit') | ||||
|  | ||||
|         # Wait_until = commit | ||||
|         # - `'commit'` - consider operation to be finished when network response is received and the document started loading. | ||||
|         # Better to not use any smarts from Playwright and just wait an arbitrary number of seconds | ||||
|         # This seemed to solve nearly all 'TimeoutErrors' | ||||
|         response = self.page.goto(value, timeout=0, wait_until='load') | ||||
|         # Should be the same as the puppeteer_fetch.js methods, means, load with no timeout set (skip timeout) | ||||
|         #and also wait for seconds ? | ||||
|         #await page.waitForTimeout(1000); | ||||
|         #await page.waitForTimeout(extra_wait_ms); | ||||
|         print("Time to goto URL ", time.time() - now) | ||||
|         return response | ||||
|  | ||||
|     def action_click_element_containing_text(self, selector=None, value=''): | ||||
|         if not len(value.strip()): | ||||
| @@ -99,7 +99,8 @@ class steppable_browser_interface(): | ||||
|         self.page.fill(selector, value, timeout=10 * 1000) | ||||
|  | ||||
|     def action_execute_js(self, selector, value): | ||||
|         self.page.evaluate(value) | ||||
|         response = self.page.evaluate(value) | ||||
|         return response | ||||
|  | ||||
|     def action_click_element(self, selector, value): | ||||
|         print("Clicking element") | ||||
| @@ -109,7 +110,7 @@ class steppable_browser_interface(): | ||||
|         self.page.click(selector=selector, timeout=30 * 1000, delay=randint(200, 500)) | ||||
|  | ||||
|     def action_click_element_if_exists(self, selector, value): | ||||
|         import playwright._impl._api_types as _api_types | ||||
|         import playwright._impl._errors as _api_types | ||||
|         print("Clicking element if exists") | ||||
|         if not len(selector.strip()): | ||||
|             return | ||||
| @@ -138,13 +139,13 @@ class steppable_browser_interface(): | ||||
|     def action_wait_for_text(self, selector, value): | ||||
|         import json | ||||
|         v = json.dumps(value) | ||||
|         self.page.wait_for_function(f'document.querySelector("body").innerText.includes({v});', timeout=90000) | ||||
|         self.page.wait_for_function(f'document.querySelector("body").innerText.includes({v});', timeout=30000) | ||||
|  | ||||
|     def action_wait_for_text_in_element(self, selector, value): | ||||
|         import json | ||||
|         s = json.dumps(selector) | ||||
|         v = json.dumps(value) | ||||
|         self.page.wait_for_function(f'document.querySelector({s}).innerText.includes({v});', timeout=90000) | ||||
|         self.page.wait_for_function(f'document.querySelector({s}).innerText.includes({v});', timeout=30000) | ||||
|  | ||||
|     # @todo - in the future make some popout interface to capture what needs to be set | ||||
|     # https://playwright.dev/python/docs/api/class-keyboard | ||||
|   | ||||
| @@ -40,8 +40,8 @@ def construct_blueprint(datastore: ChangeDetectionStore): | ||||
|         contents = '' | ||||
|         now = time.time() | ||||
|         try: | ||||
|             update_handler = text_json_diff.perform_site_check(datastore=datastore) | ||||
|             changed_detected, update_obj, contents = update_handler.run(uuid, preferred_proxy=preferred_proxy, skip_when_checksum_same=False) | ||||
|             update_handler = text_json_diff.perform_site_check(datastore=datastore, watch_uuid=uuid) | ||||
|             update_handler.call_browser() | ||||
|         # title, size is len contents not len xfer | ||||
|         except content_fetcher.Non200ErrorCodeReceived as e: | ||||
|             if e.status_code == 404: | ||||
| @@ -57,9 +57,11 @@ def construct_blueprint(datastore: ChangeDetectionStore): | ||||
|                 status.update({'status': 'ERROR OTHER', 'length': len(contents), 'text': f"Got empty reply with code {e.status_code} - Access denied"}) | ||||
|             else: | ||||
|                 status.update({'status': 'ERROR OTHER', 'length': len(contents) if contents else 0, 'text': f"Empty reply with code {e.status_code}, needs chrome?"}) | ||||
|  | ||||
|         except content_fetcher.ReplyWithContentButNoText as e: | ||||
|             txt = f"Got reply but with no content - Status code {e.status_code} - It's possible that the filters were found, but contained no usable text (or contained only an image)." | ||||
|             status.update({'status': 'ERROR', 'text': txt}) | ||||
|         except Exception as e: | ||||
|             status.update({'status': 'ERROR OTHER', 'length': len(contents) if contents else 0, 'text': 'Error: '+str(e)}) | ||||
|             status.update({'status': 'ERROR OTHER', 'length': len(contents) if contents else 0, 'text': 'Error: '+type(e).__name__+str(e)}) | ||||
|         else: | ||||
|             status.update({'status': 'OK', 'length': len(contents), 'text': ''}) | ||||
|  | ||||
|   | ||||
| @@ -69,11 +69,12 @@ xpath://body/div/span[contains(@class, 'example-class')]", | ||||
|                                 {% endif %} | ||||
|                             </ul> | ||||
|                         </li> | ||||
|                         <li>XPath - Limit text to this XPath rule, simply start with a forward-slash, | ||||
|                         <li>XPath - Limit text to this XPath rule, simply start with a forward-slash. To specify XPath to be used explicitly or the XPath rule starts with an XPath function: Prefix with <code>xpath:</code> | ||||
|                             <ul> | ||||
|                                 <li>Example:  <code>//*[contains(@class, 'sametext')]</code> or <code>xpath://*[contains(@class, 'sametext')]</code>, <a | ||||
|                                 <li>Example:  <code>//*[contains(@class, 'sametext')]</code> or <code>xpath:count(//*[contains(@class, 'sametext')])</code>, <a | ||||
|                                 href="http://xpather.com/" target="new">test your XPath here</a></li> | ||||
|                                 <li>Example: Get all titles from an RSS feed <code>//title/text()</code></li> | ||||
|                                 <li>To use XPath1.0: Prefix with <code>xpath1:</code></li> | ||||
|                             </ul> | ||||
|                             </li> | ||||
|                     </ul> | ||||
|   | ||||
| @@ -1,12 +1,15 @@ | ||||
| import hashlib | ||||
| from abc import abstractmethod | ||||
| from distutils.util import strtobool | ||||
| from urllib.parse import urlparse | ||||
| import chardet | ||||
| import hashlib | ||||
| import json | ||||
| import logging | ||||
| import os | ||||
| import requests | ||||
| import sys | ||||
| import time | ||||
| import urllib.parse | ||||
|  | ||||
| visualselector_xpath_selectors = 'div,span,form,table,tbody,tr,td,a,p,ul,li,h1,h2,h3,h4, header, footer, section, article, aside, details, main, nav, section, summary' | ||||
|  | ||||
| @@ -77,11 +80,13 @@ class ScreenshotUnavailable(Exception): | ||||
|  | ||||
|  | ||||
| class ReplyWithContentButNoText(Exception): | ||||
|     def __init__(self, status_code, url, screenshot=None): | ||||
|     def __init__(self, status_code, url, screenshot=None, has_filters=False, html_content=''): | ||||
|         # Set this so we can use it in other parts of the app | ||||
|         self.status_code = status_code | ||||
|         self.url = url | ||||
|         self.screenshot = screenshot | ||||
|         self.has_filters = has_filters | ||||
|         self.html_content = html_content | ||||
|         return | ||||
|  | ||||
|  | ||||
| @@ -91,6 +96,7 @@ class Fetcher(): | ||||
|     content = None | ||||
|     error = None | ||||
|     fetcher_description = "No description" | ||||
|     browser_connection_url = None | ||||
|     headers = {} | ||||
|     status_code = None | ||||
|     webdriver_js_execute_code = None | ||||
| @@ -154,9 +160,19 @@ class Fetcher(): | ||||
|         """ | ||||
|         return {k.lower(): v for k, v in self.headers.items()} | ||||
|  | ||||
|     def browser_steps_get_valid_steps(self): | ||||
|         if self.browser_steps is not None and len(self.browser_steps): | ||||
|             valid_steps = filter( | ||||
|                 lambda s: (s['operation'] and len(s['operation']) and s['operation'] != 'Choose one' and s['operation'] != 'Goto site'), | ||||
|                 self.browser_steps) | ||||
|  | ||||
|             return valid_steps | ||||
|  | ||||
|         return None | ||||
|  | ||||
|     def iterate_browser_steps(self): | ||||
|         from changedetectionio.blueprint.browser_steps.browser_steps import steppable_browser_interface | ||||
|         from playwright._impl._api_types import TimeoutError | ||||
|         from playwright._impl._errors import TimeoutError | ||||
|         from jinja2 import Environment | ||||
|         jinja2_env = Environment(extensions=['jinja2_time.TimeExtension']) | ||||
|  | ||||
| @@ -165,10 +181,7 @@ class Fetcher(): | ||||
|         if self.browser_steps is not None and len(self.browser_steps): | ||||
|             interface = steppable_browser_interface() | ||||
|             interface.page = self.page | ||||
|  | ||||
|             valid_steps = filter( | ||||
|                 lambda s: (s['operation'] and len(s['operation']) and s['operation'] != 'Choose one' and s['operation'] != 'Goto site'), | ||||
|                 self.browser_steps) | ||||
|             valid_steps = self.browser_steps_get_valid_steps() | ||||
|  | ||||
|             for step in valid_steps: | ||||
|                 step_n += 1 | ||||
| @@ -239,14 +252,16 @@ class base_html_playwright(Fetcher): | ||||
|  | ||||
|     proxy = None | ||||
|  | ||||
|     def __init__(self, proxy_override=None): | ||||
|     def __init__(self, proxy_override=None, browser_connection_url=None): | ||||
|         super().__init__() | ||||
|         # .strip('"') is going to save someone a lot of time when they accidently wrap the env value | ||||
|  | ||||
|         self.browser_type = os.getenv("PLAYWRIGHT_BROWSER_TYPE", 'chromium').strip('"') | ||||
|         self.command_executor = os.getenv( | ||||
|             "PLAYWRIGHT_DRIVER_URL", | ||||
|             'ws://playwright-chrome:3000' | ||||
|         ).strip('"') | ||||
|  | ||||
|         # .strip('"') is going to save someone a lot of time when they accidently wrap the env value | ||||
|         if not browser_connection_url: | ||||
|             self.browser_connection_url = os.getenv("PLAYWRIGHT_DRIVER_URL", 'ws://playwright-chrome:3000').strip('"') | ||||
|         else: | ||||
|             self.browser_connection_url = browser_connection_url | ||||
|  | ||||
|         # If any proxy settings are enabled, then we should setup the proxy object | ||||
|         proxy_args = {} | ||||
| @@ -264,7 +279,6 @@ class base_html_playwright(Fetcher): | ||||
|  | ||||
|         if self.proxy: | ||||
|             # Playwright needs separate username and password values | ||||
|             from urllib.parse import urlparse | ||||
|             parsed = urlparse(self.proxy.get('server')) | ||||
|             if parsed.username: | ||||
|                 self.proxy['username'] = parsed.username | ||||
| @@ -319,13 +333,11 @@ class base_html_playwright(Fetcher): | ||||
|  | ||||
|         # Append proxy connect string | ||||
|         if self.proxy: | ||||
|             import urllib.parse | ||||
|             # Remove username/password if it exists in the URL or you will receive "ERR_NO_SUPPORTED_PROXIES" error | ||||
|             # Actual authentication handled by Puppeteer/node | ||||
|             o = urlparse(self.proxy.get('server')) | ||||
|             proxy_url = urllib.parse.quote(o._replace(netloc="{}:{}".format(o.hostname, o.port)).geturl()) | ||||
|             browserless_function_url = f"{browserless_function_url}&--proxy-server={proxy_url}&dumpio=true" | ||||
|  | ||||
|             browserless_function_url = f"{browserless_function_url}&--proxy-server={proxy_url}" | ||||
|  | ||||
|         try: | ||||
|             amp = '&' if '?' in browserless_function_url else '?' | ||||
| @@ -345,7 +357,7 @@ class base_html_playwright(Fetcher): | ||||
|                         'url': url, | ||||
|                         'user_agent': {k.lower(): v for k, v in request_headers.items()}.get('user-agent', None), | ||||
|                         'proxy_username': self.proxy.get('username', '') if self.proxy else False, | ||||
|                         'proxy_password': self.proxy.get('password', '') if self.proxy else False, | ||||
|                         'proxy_password': self.proxy.get('password', '') if self.proxy and self.proxy.get('username') else False, | ||||
|                         'no_cache_list': [ | ||||
|                             'twitter', | ||||
|                             '.pdf' | ||||
| @@ -410,12 +422,8 @@ class base_html_playwright(Fetcher): | ||||
|             is_binary=False): | ||||
|  | ||||
|         # For now, USE_EXPERIMENTAL_PUPPETEER_FETCH is not supported by watches with BrowserSteps (for now!) | ||||
|         has_browser_steps = self.browser_steps and list(filter( | ||||
|                 lambda s: (s['operation'] and len(s['operation']) and s['operation'] != 'Choose one' and s['operation'] != 'Goto site'), | ||||
|                 self.browser_steps)) | ||||
|  | ||||
|         if not has_browser_steps: | ||||
|             if os.getenv('USE_EXPERIMENTAL_PUPPETEER_FETCH'): | ||||
|         if not self.browser_steps and os.getenv('USE_EXPERIMENTAL_PUPPETEER_FETCH'): | ||||
|             if strtobool(os.getenv('USE_EXPERIMENTAL_PUPPETEER_FETCH')): | ||||
|                 # Temporary backup solution until we rewrite the playwright code | ||||
|                 return self.run_fetch_browserless_puppeteer( | ||||
|                     url, | ||||
| @@ -428,17 +436,21 @@ class base_html_playwright(Fetcher): | ||||
|                     is_binary) | ||||
|  | ||||
|         from playwright.sync_api import sync_playwright | ||||
|         import playwright._impl._api_types | ||||
|         import playwright._impl._errors | ||||
|  | ||||
|         self.delete_browser_steps_screenshots() | ||||
|         response = None | ||||
|  | ||||
|         with sync_playwright() as p: | ||||
|             browser_type = getattr(p, self.browser_type) | ||||
|  | ||||
|             # Seemed to cause a connection Exception even tho I can see it connect | ||||
|             # self.browser = browser_type.connect(self.command_executor, timeout=timeout*1000) | ||||
|             # 60,000 connection timeout only | ||||
|             browser = browser_type.connect_over_cdp(self.command_executor, timeout=60000) | ||||
|             browser = browser_type.connect_over_cdp(self.browser_connection_url, timeout=60000) | ||||
|  | ||||
|             # SOCKS5 with authentication is not supported (yet) | ||||
|             # https://github.com/microsoft/playwright/issues/10567 | ||||
|  | ||||
|             # Set user agent to prevent Cloudflare from blocking the browser | ||||
|             # Use the default one configured in the App.py model that's passed from fetch_site_status.py | ||||
| @@ -457,41 +469,27 @@ class base_html_playwright(Fetcher): | ||||
|             if len(request_headers): | ||||
|                 context.set_extra_http_headers(request_headers) | ||||
|  | ||||
|                 self.page.set_default_navigation_timeout(90000) | ||||
|                 self.page.set_default_timeout(90000) | ||||
|             # Listen for all console events and handle errors | ||||
|             self.page.on("console", lambda msg: print(f"Playwright console: Watch URL: {url} {msg.type}: {msg.text} {msg.args}")) | ||||
|  | ||||
|                 # Listen for all console events and handle errors | ||||
|                 self.page.on("console", lambda msg: print(f"Playwright console: Watch URL: {url} {msg.type}: {msg.text} {msg.args}")) | ||||
|             # Re-use as much code from browser steps as possible so its the same | ||||
|             from changedetectionio.blueprint.browser_steps.browser_steps import steppable_browser_interface | ||||
|             browsersteps_interface = steppable_browser_interface() | ||||
|             browsersteps_interface.page = self.page | ||||
|  | ||||
|             # Goto page | ||||
|             try: | ||||
|                 # Wait_until = commit | ||||
|                 # - `'commit'` - consider operation to be finished when network response is received and the document started loading. | ||||
|                 # Better to not use any smarts from Playwright and just wait an arbitrary number of seconds | ||||
|                 # This seemed to solve nearly all 'TimeoutErrors' | ||||
|                 response = self.page.goto(url, wait_until='commit') | ||||
|             except playwright._impl._api_types.Error as e: | ||||
|                 # Retry once - https://github.com/browserless/chrome/issues/2485 | ||||
|                 # Sometimes errors related to invalid cert's and other can be random | ||||
|                 print("Content Fetcher > retrying request got error - ", str(e)) | ||||
|                 time.sleep(1) | ||||
|                 response = self.page.goto(url, wait_until='commit') | ||||
|             response = browsersteps_interface.action_goto_url(value=url) | ||||
|             self.headers = response.all_headers() | ||||
|  | ||||
|             except Exception as e: | ||||
|                 print("Content Fetcher > Other exception when page.goto", str(e)) | ||||
|             if response is None: | ||||
|                 context.close() | ||||
|                 browser.close() | ||||
|                 raise PageUnloadable(url=url, status_code=None, message=str(e)) | ||||
|                 print("Content Fetcher > Response object was none") | ||||
|                 raise EmptyReply(url=url, status_code=None) | ||||
|  | ||||
|             # Execute any browser steps | ||||
|             try: | ||||
|                 extra_wait = int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)) + self.render_extract_delay | ||||
|                 self.page.wait_for_timeout(extra_wait * 1000) | ||||
|  | ||||
|                 if self.webdriver_js_execute_code is not None and len(self.webdriver_js_execute_code): | ||||
|                     self.page.evaluate(self.webdriver_js_execute_code) | ||||
|  | ||||
|             except playwright._impl._api_types.TimeoutError as e: | ||||
|                     browsersteps_interface.action_execute_js(value=self.webdriver_js_execute_code, selector=None) | ||||
|             except playwright._impl._errors.TimeoutError as e: | ||||
|                 context.close() | ||||
|                 browser.close() | ||||
|                 # This can be ok, we will try to grab what we could retrieve | ||||
| @@ -502,28 +500,30 @@ class base_html_playwright(Fetcher): | ||||
|                 browser.close() | ||||
|                 raise PageUnloadable(url=url, status_code=None, message=str(e)) | ||||
|  | ||||
|             if response is None: | ||||
|                 context.close() | ||||
|                 browser.close() | ||||
|                 print("Content Fetcher > Response object was none") | ||||
|                 raise EmptyReply(url=url, status_code=None) | ||||
|  | ||||
|             # Run Browser Steps here | ||||
|             self.iterate_browser_steps() | ||||
|  | ||||
|             extra_wait = int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)) + self.render_extract_delay | ||||
|             time.sleep(extra_wait) | ||||
|             self.page.wait_for_timeout(extra_wait * 1000) | ||||
|  | ||||
|  | ||||
|             self.content = self.page.content() | ||||
|             self.status_code = response.status | ||||
|  | ||||
|             if self.status_code != 200 and not ignore_status_codes: | ||||
|  | ||||
|                 screenshot=self.page.screenshot(type='jpeg', full_page=True, | ||||
|                                      quality=int(os.getenv("PLAYWRIGHT_SCREENSHOT_QUALITY", 72))) | ||||
|  | ||||
|                 raise Non200ErrorCodeReceived(url=url, status_code=self.status_code, screenshot=screenshot) | ||||
|  | ||||
|             if len(self.page.content().strip()) == 0: | ||||
|                 context.close() | ||||
|                 browser.close() | ||||
|                 print("Content Fetcher > Content was empty") | ||||
|                 raise EmptyReply(url=url, status_code=response.status) | ||||
|  | ||||
|             self.status_code = response.status | ||||
|             self.headers = response.all_headers() | ||||
|             # Run Browser Steps here | ||||
|             if self.browser_steps_get_valid_steps(): | ||||
|                 self.iterate_browser_steps() | ||||
|                  | ||||
|             self.page.wait_for_timeout(extra_wait * 1000) | ||||
|  | ||||
|             # So we can find an element on the page where its selector was entered manually (maybe not xPath etc) | ||||
|             if current_include_filters is not None: | ||||
| @@ -535,6 +535,7 @@ class base_html_playwright(Fetcher): | ||||
|                 "async () => {" + self.xpath_element_js.replace('%ELEMENTS%', visualselector_xpath_selectors) + "}") | ||||
|             self.instock_data = self.page.evaluate("async () => {" + self.instock_data_js + "}") | ||||
|  | ||||
|             self.content = self.page.content() | ||||
|             # Bug 3 in Playwright screenshot handling | ||||
|             # Some bug where it gives the wrong screenshot size, but making a request with the clip set first seems to solve it | ||||
|             # JPEG is better here because the screenshots can be very very large | ||||
| @@ -549,7 +550,7 @@ class base_html_playwright(Fetcher): | ||||
|             except Exception as e: | ||||
|                 context.close() | ||||
|                 browser.close() | ||||
|                 raise ScreenshotUnavailable(url=url, status_code=None) | ||||
|                 raise ScreenshotUnavailable(url=url, status_code=response.status_code) | ||||
|  | ||||
|             context.close() | ||||
|             browser.close() | ||||
| @@ -561,8 +562,6 @@ class base_html_webdriver(Fetcher): | ||||
|     else: | ||||
|         fetcher_description = "WebDriver Chrome/Javascript" | ||||
|  | ||||
|     command_executor = '' | ||||
|  | ||||
|     # Configs for Proxy setup | ||||
|     # In the ENV vars, is prefixed with "webdriver_", so it is for example "webdriver_sslProxy" | ||||
|     selenium_proxy_settings_mappings = ['proxyType', 'ftpProxy', 'httpProxy', 'noProxy', | ||||
| @@ -570,12 +569,15 @@ class base_html_webdriver(Fetcher): | ||||
|                                         'socksProxy', 'socksVersion', 'socksUsername', 'socksPassword'] | ||||
|     proxy = None | ||||
|  | ||||
|     def __init__(self, proxy_override=None): | ||||
|     def __init__(self, proxy_override=None, browser_connection_url=None): | ||||
|         super().__init__() | ||||
|         from selenium.webdriver.common.proxy import Proxy as SeleniumProxy | ||||
|  | ||||
|         # .strip('"') is going to save someone a lot of time when they accidently wrap the env value | ||||
|         self.command_executor = os.getenv("WEBDRIVER_URL", 'http://browser-chrome:4444/wd/hub').strip('"') | ||||
|         if not browser_connection_url: | ||||
|             self.browser_connection_url = os.getenv("WEBDRIVER_URL", 'http://browser-chrome:4444/wd/hub').strip('"') | ||||
|         else: | ||||
|             self.browser_connection_url = browser_connection_url | ||||
|  | ||||
|         # If any proxy settings are enabled, then we should setup the proxy object | ||||
|         proxy_args = {} | ||||
| @@ -608,15 +610,17 @@ class base_html_webdriver(Fetcher): | ||||
|             is_binary=False): | ||||
|  | ||||
|         from selenium import webdriver | ||||
|         from selenium.webdriver.common.desired_capabilities import DesiredCapabilities | ||||
|         from selenium.webdriver.chrome.options import Options as ChromeOptions | ||||
|         from selenium.common.exceptions import WebDriverException | ||||
|         # request_body, request_method unused for now, until some magic in the future happens. | ||||
|  | ||||
|         # check env for WEBDRIVER_URL | ||||
|         options = ChromeOptions() | ||||
|         if self.proxy: | ||||
|             options.proxy = self.proxy | ||||
|  | ||||
|         self.driver = webdriver.Remote( | ||||
|             command_executor=self.command_executor, | ||||
|             desired_capabilities=DesiredCapabilities.CHROME, | ||||
|             proxy=self.proxy) | ||||
|             command_executor=self.browser_connection_url, | ||||
|             options=options) | ||||
|  | ||||
|         try: | ||||
|             self.driver.get(url) | ||||
| @@ -648,11 +652,11 @@ class base_html_webdriver(Fetcher): | ||||
|     # Does the connection to the webdriver work? run a test connection. | ||||
|     def is_ready(self): | ||||
|         from selenium import webdriver | ||||
|         from selenium.webdriver.common.desired_capabilities import DesiredCapabilities | ||||
|         from selenium.webdriver.chrome.options import Options as ChromeOptions | ||||
|  | ||||
|         self.driver = webdriver.Remote( | ||||
|             command_executor=self.command_executor, | ||||
|             desired_capabilities=DesiredCapabilities.CHROME) | ||||
|             options=ChromeOptions()) | ||||
|  | ||||
|         # driver.quit() seems to cause better exceptions | ||||
|         self.quit() | ||||
| @@ -670,8 +674,10 @@ class base_html_webdriver(Fetcher): | ||||
| class html_requests(Fetcher): | ||||
|     fetcher_description = "Basic fast Plaintext/HTTP Client" | ||||
|  | ||||
|     def __init__(self, proxy_override=None): | ||||
|     def __init__(self, proxy_override=None, browser_connection_url=None): | ||||
|         super().__init__() | ||||
|         self.proxy_override = proxy_override | ||||
|         # browser_connection_url is none because its always 'launched locally' | ||||
|  | ||||
|     def run(self, | ||||
|             url, | ||||
| @@ -691,6 +697,10 @@ class html_requests(Fetcher): | ||||
|         proxies = {} | ||||
|  | ||||
|         # Allows override the proxy on a per-request basis | ||||
|  | ||||
|         # https://requests.readthedocs.io/en/latest/user/advanced/#socks | ||||
|         # Should also work with `socks5://user:pass@host:port` type syntax. | ||||
|  | ||||
|         if self.proxy_override: | ||||
|             proxies = {'http': self.proxy_override, 'https': self.proxy_override, 'ftp': self.proxy_override} | ||||
|         else: | ||||
|   | ||||
| @@ -15,14 +15,20 @@ from wtforms import ( | ||||
|     validators, | ||||
|     widgets | ||||
| ) | ||||
| from flask_wtf.file import FileField, FileAllowed | ||||
| from wtforms.fields import FieldList | ||||
|  | ||||
| from wtforms.validators import ValidationError | ||||
|  | ||||
| from validators.url import url as url_validator | ||||
|  | ||||
|  | ||||
| # default | ||||
| # each select <option data-enabled="enabled-0-0" | ||||
| from changedetectionio.blueprint.browser_steps.browser_steps import browser_step_ui_config | ||||
|  | ||||
| from changedetectionio import content_fetcher | ||||
| from changedetectionio import content_fetcher, html_tools | ||||
|  | ||||
| from changedetectionio.notification import ( | ||||
|     valid_notification_formats, | ||||
| ) | ||||
| @@ -40,7 +46,7 @@ valid_method = { | ||||
| } | ||||
|  | ||||
| default_method = 'GET' | ||||
|  | ||||
| allow_simplehost = not strtobool(os.getenv('BLOCK_SIMPLEHOSTS', 'False')) | ||||
|  | ||||
| class StringListField(StringField): | ||||
|     widget = widgets.TextArea() | ||||
| @@ -162,7 +168,9 @@ class ValidateContentFetcherIsReady(object): | ||||
|     def __call__(self, form, field): | ||||
|         import urllib3.exceptions | ||||
|         from changedetectionio import content_fetcher | ||||
|         return | ||||
|  | ||||
| # AttributeError: module 'changedetectionio.content_fetcher' has no attribute 'extra_browser_unlocked<>ASDF213r123r' | ||||
|         # Better would be a radiohandler that keeps a reference to each class | ||||
|         if field.data is not None and field.data != 'system': | ||||
|             klass = getattr(content_fetcher, field.data) | ||||
| @@ -260,19 +268,23 @@ class validateURL(object): | ||||
|         self.message = message | ||||
|  | ||||
|     def __call__(self, form, field): | ||||
|         import validators | ||||
|         # If hosts that only contain alphanumerics are allowed ("localhost" for example) | ||||
|         allow_simplehost = not strtobool(os.getenv('BLOCK_SIMPLEHOSTS', 'False')) | ||||
|         try: | ||||
|             validators.url(field.data.strip(), simple_host=allow_simplehost) | ||||
|         except validators.ValidationFailure: | ||||
|             message = field.gettext('\'%s\' is not a valid URL.' % (field.data.strip())) | ||||
|             raise ValidationError(message) | ||||
|         # This should raise a ValidationError() or not | ||||
|         validate_url(field.data) | ||||
|  | ||||
|         from .model.Watch import is_safe_url | ||||
|         if not is_safe_url(field.data): | ||||
|             raise ValidationError('Watch protocol is not permitted by SAFE_PROTOCOL_REGEX') | ||||
| def validate_url(test_url): | ||||
|     # If hosts that only contain alphanumerics are allowed ("localhost" for example) | ||||
|     try: | ||||
|         url_validator(test_url, simple_host=allow_simplehost) | ||||
|     except validators.ValidationError: | ||||
|         #@todo check for xss | ||||
|         message = f"'{test_url}' is not a valid URL." | ||||
|         # This should be wtforms.validators. | ||||
|         raise ValidationError(message) | ||||
|  | ||||
|     from .model.Watch import is_safe_url | ||||
|     if not is_safe_url(test_url): | ||||
|         # This should be wtforms.validators. | ||||
|         raise ValidationError('Watch protocol is not permitted by SAFE_PROTOCOL_REGEX or incorrect URL format') | ||||
|  | ||||
| class ValidateListRegex(object): | ||||
|     """ | ||||
| @@ -284,11 +296,10 @@ class ValidateListRegex(object): | ||||
|     def __call__(self, form, field): | ||||
|  | ||||
|         for line in field.data: | ||||
|             if line[0] == '/' and line[-1] == '/': | ||||
|                 # Because internally we dont wrap in / | ||||
|                 line = line.strip('/') | ||||
|             if re.search(html_tools.PERL_STYLE_REGEX, line, re.IGNORECASE): | ||||
|                 try: | ||||
|                     re.compile(line) | ||||
|                     regex = html_tools.perl_style_slash_enclosed_regex_to_options(line) | ||||
|                     re.compile(regex) | ||||
|                 except re.error: | ||||
|                     message = field.gettext('RegEx \'%s\' is not a valid regular expression.') | ||||
|                     raise ValidationError(message % (line)) | ||||
| @@ -317,11 +328,30 @@ class ValidateCSSJSONXPATHInput(object): | ||||
|                 return | ||||
|  | ||||
|             # Does it look like XPath? | ||||
|             if line.strip()[0] == '/': | ||||
|             if line.strip()[0] == '/' or line.strip().startswith('xpath:'): | ||||
|                 if not self.allow_xpath: | ||||
|                     raise ValidationError("XPath not permitted in this field!") | ||||
|                 from lxml import etree, html | ||||
|                 import elementpath | ||||
|                 # xpath 2.0-3.1 | ||||
|                 from elementpath.xpath3 import XPath3Parser | ||||
|                 tree = html.fromstring("<html></html>") | ||||
|                 line = line.replace('xpath:', '') | ||||
|  | ||||
|                 try: | ||||
|                     elementpath.select(tree, line.strip(), parser=XPath3Parser) | ||||
|                 except elementpath.ElementPathError as e: | ||||
|                     message = field.gettext('\'%s\' is not a valid XPath expression. (%s)') | ||||
|                     raise ValidationError(message % (line, str(e))) | ||||
|                 except: | ||||
|                     raise ValidationError("A system-error occurred when validating your XPath expression") | ||||
|  | ||||
|             if line.strip().startswith('xpath1:'): | ||||
|                 if not self.allow_xpath: | ||||
|                     raise ValidationError("XPath not permitted in this field!") | ||||
|                 from lxml import etree, html | ||||
|                 tree = html.fromstring("<html></html>") | ||||
|                 line = re.sub(r'^xpath1:', '', line) | ||||
|  | ||||
|                 try: | ||||
|                     tree.xpath(line.strip()) | ||||
| @@ -398,6 +428,9 @@ class importForm(Form): | ||||
|     from . import processors | ||||
|     processor = RadioField(u'Processor', choices=processors.available_processors(), default="text_json_diff") | ||||
|     urls = TextAreaField('URLs') | ||||
|     xlsx_file = FileField('Upload .xlsx file', validators=[FileAllowed(['xlsx'], 'Must be .xlsx file!')]) | ||||
|     file_mapping = SelectField('File mapping', [validators.DataRequired()], choices={('wachete', 'Wachete mapping'), ('custom','Custom mapping')}) | ||||
|  | ||||
|  | ||||
| class SingleBrowserStep(Form): | ||||
|  | ||||
| @@ -481,9 +514,15 @@ class SingleExtraProxy(Form): | ||||
|  | ||||
|     # maybe better to set some <script>var.. | ||||
|     proxy_name = StringField('Name', [validators.Optional()], render_kw={"placeholder": "Name"}) | ||||
|     proxy_url = StringField('Proxy URL', [validators.Optional()], render_kw={"placeholder": "http://user:pass@...:3128", "size":50}) | ||||
|     proxy_url = StringField('Proxy URL', [validators.Optional()], render_kw={"placeholder": "socks5:// or regular proxy http://user:pass@...:3128", "size":50}) | ||||
|     # @todo do the validation here instead | ||||
|  | ||||
| class SingleExtraBrowser(Form): | ||||
|     browser_name = StringField('Name', [validators.Optional()], render_kw={"placeholder": "Name"}) | ||||
|     browser_connection_url = StringField('Browser connection URL', [validators.Optional()], render_kw={"placeholder": "wss://brightdata... wss://oxylabs etc", "size":50}) | ||||
|     # @todo do the validation here instead | ||||
|  | ||||
|  | ||||
| # datastore.data['settings']['requests'].. | ||||
| class globalSettingsRequestForm(Form): | ||||
|     time_between_check = FormField(TimeBetweenCheckForm) | ||||
| @@ -492,6 +531,7 @@ class globalSettingsRequestForm(Form): | ||||
|                                   render_kw={"style": "width: 5em;"}, | ||||
|                                   validators=[validators.NumberRange(min=0, message="Should contain zero or more seconds")]) | ||||
|     extra_proxies = FieldList(FormField(SingleExtraProxy), min_entries=5) | ||||
|     extra_browsers = FieldList(FormField(SingleExtraBrowser), min_entries=5) | ||||
|  | ||||
|     def validate_extra_proxies(self, extra_validators=None): | ||||
|         for e in self.data['extra_proxies']: | ||||
|   | ||||
| @@ -1,23 +1,44 @@ | ||||
|  | ||||
| from bs4 import BeautifulSoup | ||||
| from inscriptis import get_text | ||||
| from inscriptis.model.config import ParserConfig | ||||
| from jsonpath_ng.ext import parse | ||||
| from typing import List | ||||
| from inscriptis.css_profiles import CSS_PROFILES, HtmlElement | ||||
| from inscriptis.html_properties import Display | ||||
| from inscriptis.model.config import ParserConfig | ||||
| from xml.sax.saxutils import escape as xml_escape | ||||
| import json | ||||
| import re | ||||
|  | ||||
|  | ||||
| # HTML added to be sure each result matching a filter (.example) gets converted to a new line by Inscriptis | ||||
| TEXT_FILTER_LIST_LINE_SUFFIX = "<br>" | ||||
|  | ||||
| PERL_STYLE_REGEX = r'^/(.*?)/([a-z]*)?$' | ||||
| # 'price' , 'lowPrice', 'highPrice' are usually under here | ||||
| # all of those may or may not appear on different websites | ||||
| LD_JSON_PRODUCT_OFFER_SELECTOR = "json:$..offers" | ||||
| # All of those may or may not appear on different websites - I didnt find a way todo case-insensitive searching here | ||||
| LD_JSON_PRODUCT_OFFER_SELECTORS = ["json:$..offers", "json:$..Offers"] | ||||
|  | ||||
| class JSONNotFound(ValueError): | ||||
|     def __init__(self, msg): | ||||
|         ValueError.__init__(self, msg) | ||||
|          | ||||
|  | ||||
|  | ||||
| # Doesn't look like python supports forward slash auto enclosure in re.findall | ||||
| # So convert it to inline flag "(?i)foobar" type configuration | ||||
| def perl_style_slash_enclosed_regex_to_options(regex): | ||||
|  | ||||
|     res = re.search(PERL_STYLE_REGEX, regex, re.IGNORECASE) | ||||
|  | ||||
|     if res: | ||||
|         flags = res.group(2) if res.group(2) else 'i' | ||||
|         regex = f"(?{flags}){res.group(1)}" | ||||
|     else: | ||||
|         # Fall back to just ignorecase as an option | ||||
|         regex = f"(?i){regex}" | ||||
|  | ||||
|     return regex | ||||
|  | ||||
| # Given a CSS Rule, and a blob of HTML, return the blob of HTML that matches | ||||
| def include_filters(include_filters, html_content, append_pretty_line_formatting=False): | ||||
|     soup = BeautifulSoup(html_content, "html.parser") | ||||
| @@ -48,12 +69,96 @@ def element_removal(selectors: List[str], html_content): | ||||
|     selector = ",".join(selectors) | ||||
|     return subtractive_css_selector(selector, html_content) | ||||
|  | ||||
| def elementpath_tostring(obj): | ||||
|     """ | ||||
|     change elementpath.select results to string type | ||||
|     # The MIT License (MIT), Copyright (c), 2018-2021, SISSA (Scuola Internazionale Superiore di Studi Avanzati) | ||||
|     # https://github.com/sissaschool/elementpath/blob/dfcc2fd3d6011b16e02bf30459a7924f547b47d0/elementpath/xpath_tokens.py#L1038 | ||||
|     """ | ||||
|  | ||||
|     import elementpath | ||||
|     from decimal import Decimal | ||||
|     import math | ||||
|  | ||||
|     if obj is None: | ||||
|         return '' | ||||
|     # https://elementpath.readthedocs.io/en/latest/xpath_api.html#elementpath.select | ||||
|     elif isinstance(obj, elementpath.XPathNode): | ||||
|         return obj.string_value | ||||
|     elif isinstance(obj, bool): | ||||
|         return 'true' if obj else 'false' | ||||
|     elif isinstance(obj, Decimal): | ||||
|         value = format(obj, 'f') | ||||
|         if '.' in value: | ||||
|             return value.rstrip('0').rstrip('.') | ||||
|         return value | ||||
|  | ||||
|     elif isinstance(obj, float): | ||||
|         if math.isnan(obj): | ||||
|             return 'NaN' | ||||
|         elif math.isinf(obj): | ||||
|             return str(obj).upper() | ||||
|  | ||||
|         value = str(obj) | ||||
|         if '.' in value: | ||||
|             value = value.rstrip('0').rstrip('.') | ||||
|         if '+' in value: | ||||
|             value = value.replace('+', '') | ||||
|         if 'e' in value: | ||||
|             return value.upper() | ||||
|         return value | ||||
|  | ||||
|     return str(obj) | ||||
|  | ||||
| # Return str Utf-8 of matched rules | ||||
| def xpath_filter(xpath_filter, html_content, append_pretty_line_formatting=False): | ||||
| def xpath_filter(xpath_filter, html_content, append_pretty_line_formatting=False, is_rss=False): | ||||
|     from lxml import etree, html | ||||
|     import elementpath | ||||
|     # xpath 2.0-3.1 | ||||
|     from elementpath.xpath3 import XPath3Parser | ||||
|  | ||||
|     parser = etree.HTMLParser() | ||||
|     if is_rss: | ||||
|         # So that we can keep CDATA for cdata_in_document_to_text() to process | ||||
|         parser = etree.XMLParser(strip_cdata=False) | ||||
|  | ||||
|     tree = html.fromstring(bytes(html_content, encoding='utf-8'), parser=parser) | ||||
|     html_block = "" | ||||
|  | ||||
|     r = elementpath.select(tree, xpath_filter.strip(), namespaces={'re': 'http://exslt.org/regular-expressions'}, parser=XPath3Parser) | ||||
|     #@note: //title/text() wont work where <title>CDATA.. | ||||
|  | ||||
|     if type(r) != list: | ||||
|         r = [r] | ||||
|  | ||||
|     for element in r: | ||||
|         # When there's more than 1 match, then add the suffix to separate each line | ||||
|         # And where the matched result doesn't include something that will cause Inscriptis to add a newline | ||||
|         # (This way each 'match' reliably has a new-line in the diff) | ||||
|         # Divs are converted to 4 whitespaces by inscriptis | ||||
|         if append_pretty_line_formatting and len(html_block) and (not hasattr( element, 'tag' ) or not element.tag in (['br', 'hr', 'div', 'p'])): | ||||
|             html_block += TEXT_FILTER_LIST_LINE_SUFFIX | ||||
|  | ||||
|         if type(element) == str: | ||||
|             html_block += element | ||||
|         elif issubclass(type(element), etree._Element) or issubclass(type(element), etree._ElementTree): | ||||
|             html_block += etree.tostring(element, pretty_print=True).decode('utf-8') | ||||
|         else: | ||||
|             html_block += elementpath_tostring(element) | ||||
|  | ||||
|     return html_block | ||||
|  | ||||
| # Return str Utf-8 of matched rules | ||||
| # 'xpath1:' | ||||
| def xpath1_filter(xpath_filter, html_content, append_pretty_line_formatting=False, is_rss=False): | ||||
|     from lxml import etree, html | ||||
|  | ||||
|     tree = html.fromstring(bytes(html_content, encoding='utf-8')) | ||||
|     parser = None | ||||
|     if is_rss: | ||||
|         # So that we can keep CDATA for cdata_in_document_to_text() to process | ||||
|         parser = etree.XMLParser(strip_cdata=False) | ||||
|  | ||||
|     tree = html.fromstring(bytes(html_content, encoding='utf-8'), parser=parser) | ||||
|     html_block = "" | ||||
|  | ||||
|     r = tree.xpath(xpath_filter.strip(), namespaces={'re': 'http://exslt.org/regular-expressions'}) | ||||
| @@ -76,7 +181,6 @@ def xpath_filter(xpath_filter, html_content, append_pretty_line_formatting=False | ||||
|  | ||||
|     return html_block | ||||
|  | ||||
|  | ||||
| # Extract/find element | ||||
| def extract_element(find='title', html_content=''): | ||||
|  | ||||
| @@ -144,7 +248,6 @@ def extract_json_as_string(content, json_filter, ensure_is_ldjson_info_type=None | ||||
|  | ||||
|         # Foreach <script json></script> blob.. just return the first that matches json_filter | ||||
|         # As a last resort, try to parse the whole <body> | ||||
|         s = [] | ||||
|         soup = BeautifulSoup(content, 'html.parser') | ||||
|  | ||||
|         if ensure_is_ldjson_info_type: | ||||
| @@ -170,13 +273,24 @@ def extract_json_as_string(content, json_filter, ensure_is_ldjson_info_type=None | ||||
|          | ||||
|         for json_data in bs_jsons: | ||||
|             stripped_text_from_html = _parse_json(json_data, json_filter) | ||||
|  | ||||
|             if ensure_is_ldjson_info_type: | ||||
|                 # Could sometimes be list, string or something else random | ||||
|                 if isinstance(json_data, dict): | ||||
|                     # If it has LD JSON 'key' @type, and @type is 'product', and something was found for the search | ||||
|                     # (Some sites have multiple of the same ld+json @type='product', but some have the review part, some have the 'price' part) | ||||
|                     if json_data.get('@type', False) and json_data.get('@type','').lower() == ensure_is_ldjson_info_type.lower() and stripped_text_from_html: | ||||
|                         break | ||||
|                     # @type could also be a list (Product, SubType) | ||||
|                     # LD_JSON auto-extract also requires some content PLUS the ldjson to be present | ||||
|                     # 1833 - could be either str or dict, should not be anything else | ||||
|                     if json_data.get('@type') and stripped_text_from_html: | ||||
|                         try: | ||||
|                             if json_data.get('@type') == str or json_data.get('@type') == dict: | ||||
|                                 types = [json_data.get('@type')] if isinstance(json_data.get('@type'), str) else json_data.get('@type') | ||||
|                                 if ensure_is_ldjson_info_type.lower() in [x.lower().strip() for x in types]: | ||||
|                                     break | ||||
|                         except: | ||||
|                             continue | ||||
|  | ||||
|             elif stripped_text_from_html: | ||||
|                 break | ||||
|  | ||||
| @@ -195,23 +309,14 @@ def strip_ignore_text(content, wordlist, mode="content"): | ||||
|     output = [] | ||||
|     ignore_text = [] | ||||
|     ignore_regex = [] | ||||
|  | ||||
|     ignored_line_numbers = [] | ||||
|  | ||||
|     for k in wordlist: | ||||
|         # Is it a regex? | ||||
|         x = re.search('^\/(.*)\/(.*)', k.strip()) | ||||
|         if x: | ||||
|             # Starts with / but doesn't look like a regex | ||||
|             p = x.group(1) | ||||
|             try: | ||||
|                 # @Todo python regex options can go before the regex str, but not really many of the options apply on a per-line basis | ||||
|                 ignore_regex.append(re.compile(rf"{p}", re.IGNORECASE)) | ||||
|             except Exception as e: | ||||
|                 # Badly formed regex, treat as text | ||||
|                 ignore_text.append(k.strip()) | ||||
|         res = re.search(PERL_STYLE_REGEX, k, re.IGNORECASE) | ||||
|         if res: | ||||
|             ignore_regex.append(re.compile(perl_style_slash_enclosed_regex_to_options(k))) | ||||
|         else: | ||||
|             # Had a / but doesn't work as regex | ||||
|             ignore_text.append(k.strip()) | ||||
|  | ||||
|     for line in content.splitlines(): | ||||
| @@ -241,8 +346,15 @@ def strip_ignore_text(content, wordlist, mode="content"): | ||||
|  | ||||
|     return "\n".encode('utf8').join(output) | ||||
|  | ||||
| def cdata_in_document_to_text(html_content: str, render_anchor_tag_content=False) -> str: | ||||
|     pattern = '<!\[CDATA\[(\s*(?:.(?<!\]\]>)\s*)*)\]\]>' | ||||
|     def repl(m): | ||||
|         text = m.group(1) | ||||
|         return xml_escape(html_to_text(html_content=text)).strip() | ||||
|  | ||||
| def html_to_text(html_content: str, render_anchor_tag_content=False) -> str: | ||||
|     return re.sub(pattern, repl, html_content) | ||||
|  | ||||
| def html_to_text(html_content: str, render_anchor_tag_content=False, is_rss=False) -> str: | ||||
|     """Converts html string to a string with just the text. If ignoring | ||||
|     rendering anchor tag content is enable, anchor tag content are also | ||||
|     included in the text | ||||
| @@ -258,16 +370,21 @@ def html_to_text(html_content: str, render_anchor_tag_content=False) -> str: | ||||
|     #  if anchor tag content flag is set to True define a config for | ||||
|     #  extracting this content | ||||
|     if render_anchor_tag_content: | ||||
|  | ||||
|         parser_config = ParserConfig( | ||||
|             annotation_rules={"a": ["hyperlink"]}, display_links=True | ||||
|             annotation_rules={"a": ["hyperlink"]}, | ||||
|             display_links=True | ||||
|         ) | ||||
|  | ||||
|     # otherwise set config to None | ||||
|     # otherwise set config to None/default | ||||
|     else: | ||||
|         parser_config = None | ||||
|  | ||||
|     # get text and annotations via inscriptis | ||||
|     # RSS Mode - Inscriptis will treat `title` as something else. | ||||
|     # Make it as a regular block display element (//item/title) | ||||
|     # This is a bit of a hack - the real way it to use XSLT to convert it to HTML #1874 | ||||
|     if is_rss: | ||||
|         html_content = re.sub(r'<title([\s>])', r'<h1\1', html_content) | ||||
|         html_content = re.sub(r'</title>', r'</h1>', html_content) | ||||
|  | ||||
|     text_content = get_text(html_content, config=parser_config) | ||||
|  | ||||
|     return text_content | ||||
| @@ -275,9 +392,18 @@ def html_to_text(html_content: str, render_anchor_tag_content=False) -> str: | ||||
|  | ||||
| # Does LD+JSON exist with a @type=='product' and a .price set anywhere? | ||||
| def has_ldjson_product_info(content): | ||||
|     pricing_data = '' | ||||
|  | ||||
|     try: | ||||
|         pricing_data = extract_json_as_string(content=content, json_filter=LD_JSON_PRODUCT_OFFER_SELECTOR, ensure_is_ldjson_info_type="product") | ||||
|     except JSONNotFound as e: | ||||
|         if not 'application/ld+json' in content: | ||||
|             return False | ||||
|  | ||||
|         for filter in LD_JSON_PRODUCT_OFFER_SELECTORS: | ||||
|             pricing_data += extract_json_as_string(content=content, | ||||
|                                                   json_filter=filter, | ||||
|                                                   ensure_is_ldjson_info_type="product") | ||||
|  | ||||
|     except Exception as e: | ||||
|         # Totally fine | ||||
|         return False | ||||
|     x=bool(pricing_data) | ||||
|   | ||||
| @@ -1,6 +1,9 @@ | ||||
| from abc import ABC, abstractmethod | ||||
| import time | ||||
| import validators | ||||
| from wtforms import ValidationError | ||||
|  | ||||
| from changedetectionio.forms import validate_url | ||||
|  | ||||
|  | ||||
| class Importer(): | ||||
| @@ -12,6 +15,7 @@ class Importer(): | ||||
|         self.new_uuids = [] | ||||
|         self.good = 0 | ||||
|         self.remaining_data = [] | ||||
|         self.import_profile = None | ||||
|  | ||||
|     @abstractmethod | ||||
|     def run(self, | ||||
| @@ -132,3 +136,167 @@ class import_distill_io_json(Importer): | ||||
|                     good += 1 | ||||
|  | ||||
|         flash("{} Imported from Distill.io in {:.2f}s, {} Skipped.".format(len(self.new_uuids), time.time() - now, len(self.remaining_data))) | ||||
|  | ||||
|  | ||||
| class import_xlsx_wachete(Importer): | ||||
|  | ||||
|     def run(self, | ||||
|             data, | ||||
|             flash, | ||||
|             datastore, | ||||
|             ): | ||||
|  | ||||
|         good = 0 | ||||
|         now = time.time() | ||||
|         self.new_uuids = [] | ||||
|  | ||||
|         from openpyxl import load_workbook | ||||
|  | ||||
|         try: | ||||
|             wb = load_workbook(data) | ||||
|         except Exception as e: | ||||
|             # @todo correct except | ||||
|             flash("Unable to read export XLSX file, something wrong with the file?", 'error') | ||||
|             return | ||||
|  | ||||
|         row_id = 2 | ||||
|         for row in wb.active.iter_rows(min_row=row_id): | ||||
|             try: | ||||
|                 extras = {} | ||||
|                 data = {} | ||||
|                 for cell in row: | ||||
|                     if not cell.value: | ||||
|                         continue | ||||
|                     column_title = wb.active.cell(row=1, column=cell.column).value.strip().lower() | ||||
|                     data[column_title] = cell.value | ||||
|  | ||||
|                 # Forced switch to webdriver/playwright/etc | ||||
|                 dynamic_wachet = str(data.get('dynamic wachet', '')).strip().lower()  # Convert bool to str to cover all cases | ||||
|                 # libreoffice and others can have it as =FALSE() =TRUE(), or bool(true) | ||||
|                 if 'true' in dynamic_wachet or dynamic_wachet == '1': | ||||
|                     extras['fetch_backend'] = 'html_webdriver' | ||||
|                 elif 'false' in dynamic_wachet or dynamic_wachet == '0': | ||||
|                     extras['fetch_backend'] = 'html_requests' | ||||
|  | ||||
|                 if data.get('xpath'): | ||||
|                     # @todo split by || ? | ||||
|                     extras['include_filters'] = [data.get('xpath')] | ||||
|                 if data.get('name'): | ||||
|                     extras['title'] = data.get('name').strip() | ||||
|                 if data.get('interval (min)'): | ||||
|                     minutes = int(data.get('interval (min)')) | ||||
|                     hours, minutes = divmod(minutes, 60) | ||||
|                     days, hours = divmod(hours, 24) | ||||
|                     weeks, days = divmod(days, 7) | ||||
|                     extras['time_between_check'] = {'weeks': weeks, 'days': days, 'hours': hours, 'minutes': minutes, 'seconds': 0} | ||||
|  | ||||
|                 # At minimum a URL is required. | ||||
|                 if data.get('url'): | ||||
|                     try: | ||||
|                         validate_url(data.get('url')) | ||||
|                     except ValidationError as e: | ||||
|                         print(">> import URL error", data.get('url'), str(e)) | ||||
|                         flash(f"Error processing row number {row_id}, URL value was incorrect, row was skipped.", 'error') | ||||
|                         # Don't bother processing anything else on this row | ||||
|                         continue | ||||
|  | ||||
|                     new_uuid = datastore.add_watch(url=data['url'].strip(), | ||||
|                                                    extras=extras, | ||||
|                                                    tag=data.get('folder'), | ||||
|                                                    write_to_disk_now=False) | ||||
|                     if new_uuid: | ||||
|                         # Straight into the queue. | ||||
|                         self.new_uuids.append(new_uuid) | ||||
|                         good += 1 | ||||
|             except Exception as e: | ||||
|                 print(e) | ||||
|                 flash(f"Error processing row number {row_id}, check all cell data types are correct, row was skipped.", 'error') | ||||
|             else: | ||||
|                 row_id += 1 | ||||
|  | ||||
|         flash( | ||||
|             "{} imported from Wachete .xlsx in {:.2f}s".format(len(self.new_uuids), time.time() - now)) | ||||
|  | ||||
|  | ||||
| class import_xlsx_custom(Importer): | ||||
|  | ||||
|     def run(self, | ||||
|             data, | ||||
|             flash, | ||||
|             datastore, | ||||
|             ): | ||||
|  | ||||
|         good = 0 | ||||
|         now = time.time() | ||||
|         self.new_uuids = [] | ||||
|  | ||||
|         from openpyxl import load_workbook | ||||
|  | ||||
|         try: | ||||
|             wb = load_workbook(data) | ||||
|         except Exception as e: | ||||
|             # @todo correct except | ||||
|             flash("Unable to read export XLSX file, something wrong with the file?", 'error') | ||||
|             return | ||||
|  | ||||
|         # @todo cehck atleast 2 rows, same in other method | ||||
|         from .forms import validate_url | ||||
|         row_i = 1 | ||||
|  | ||||
|         try: | ||||
|             for row in wb.active.iter_rows(): | ||||
|                 url = None | ||||
|                 tags = None | ||||
|                 extras = {} | ||||
|  | ||||
|                 for cell in row: | ||||
|                     if not self.import_profile.get(cell.col_idx): | ||||
|                         continue | ||||
|                     if not cell.value: | ||||
|                         continue | ||||
|  | ||||
|                     cell_map = self.import_profile.get(cell.col_idx) | ||||
|  | ||||
|                     cell_val = str(cell.value).strip()  # could be bool | ||||
|  | ||||
|                     if cell_map == 'url': | ||||
|                         url = cell.value.strip() | ||||
|                         try: | ||||
|                             validate_url(url) | ||||
|                         except ValidationError as e: | ||||
|                             print(">> Import URL error", url, str(e)) | ||||
|                             flash(f"Error processing row number {row_i}, URL value was incorrect, row was skipped.", 'error') | ||||
|                             # Don't bother processing anything else on this row | ||||
|                             url = None | ||||
|                             break | ||||
|                     elif cell_map == 'tag': | ||||
|                         tags = cell.value.strip() | ||||
|                     elif cell_map == 'include_filters': | ||||
|                         # @todo validate? | ||||
|                         extras['include_filters'] = [cell.value.strip()] | ||||
|                     elif cell_map == 'interval_minutes': | ||||
|                         hours, minutes = divmod(int(cell_val), 60) | ||||
|                         days, hours = divmod(hours, 24) | ||||
|                         weeks, days = divmod(days, 7) | ||||
|                         extras['time_between_check'] = {'weeks': weeks, 'days': days, 'hours': hours, 'minutes': minutes, 'seconds': 0} | ||||
|                     else: | ||||
|                         extras[cell_map] = cell_val | ||||
|  | ||||
|                 # At minimum a URL is required. | ||||
|                 if url: | ||||
|                     new_uuid = datastore.add_watch(url=url, | ||||
|                                                    extras=extras, | ||||
|                                                    tag=tags, | ||||
|                                                    write_to_disk_now=False) | ||||
|                     if new_uuid: | ||||
|                         # Straight into the queue. | ||||
|                         self.new_uuids.append(new_uuid) | ||||
|                         good += 1 | ||||
|         except Exception as e: | ||||
|             print(e) | ||||
|             flash(f"Error processing row number {row_i}, check all cell data types are correct, row was skipped.", 'error') | ||||
|         else: | ||||
|             row_i += 1 | ||||
|  | ||||
|         flash( | ||||
|             "{} imported from custom .xlsx in {:.2f}s".format(len(self.new_uuids), time.time() - now)) | ||||
|   | ||||
| @@ -16,6 +16,7 @@ class model(dict): | ||||
|                 }, | ||||
|                 'requests': { | ||||
|                     'extra_proxies': [], # Configurable extra proxies via the UI | ||||
|                     'extra_browsers': [],  # Configurable extra proxies via the UI | ||||
|                     'jitter_seconds': 0, | ||||
|                     'proxy': None, # Preferred proxy connection | ||||
|                     'time_between_check': {'weeks': None, 'days': None, 'hours': 3, 'minutes': None, 'seconds': None}, | ||||
|   | ||||
| @@ -4,6 +4,7 @@ import os | ||||
| import re | ||||
| import time | ||||
| import uuid | ||||
| from pathlib import Path | ||||
|  | ||||
| # Allowable protocols, protects against javascript: etc | ||||
| # file:// is further checked by ALLOW_FILE_URI | ||||
| @@ -18,6 +19,8 @@ from changedetectionio.notification import ( | ||||
|  | ||||
| base_config = { | ||||
|     'body': None, | ||||
|     'browser_steps': [], | ||||
|     'browser_steps_last_error_step': None, | ||||
|     'check_unique_lines': False,  # On change-detected, compare against all history if its something new | ||||
|     'check_count': 0, | ||||
|     'date_created': None, | ||||
| @@ -25,6 +28,7 @@ base_config = { | ||||
|     'extract_text': [],  # Extract text by regex after filters | ||||
|     'extract_title_as_title': False, | ||||
|     'fetch_backend': 'system', # plaintext, playwright etc | ||||
|     'fetch_time': 0.0, | ||||
|     'processor': 'text_json_diff', # could be restock_diff or others from .processors | ||||
|     'filter_failure_notification_send': strtobool(os.getenv('FILTER_FAILURE_NOTIFICATION_SEND_DEFAULT', 'True')), | ||||
|     'filter_text_added': True, | ||||
| @@ -142,8 +146,14 @@ class model(dict): | ||||
|                 flash(message, 'error') | ||||
|                 return '' | ||||
|  | ||||
|         if ready_url.startswith('source:'): | ||||
|             ready_url=ready_url.replace('source:', '') | ||||
|         return ready_url | ||||
|  | ||||
|     @property | ||||
|     def is_source_type_url(self): | ||||
|         return self.get('url', '').startswith('source:') | ||||
|  | ||||
|     @property | ||||
|     def get_fetch_backend(self): | ||||
|         """ | ||||
| @@ -167,9 +177,7 @@ class model(dict): | ||||
|     @property | ||||
|     def label(self): | ||||
|         # Used for sorting | ||||
|         if self['title']: | ||||
|             return self['title'] | ||||
|         return self['url'] | ||||
|         return self.get('title') if self.get('title') else self.get('url') | ||||
|  | ||||
|     @property | ||||
|     def last_changed(self): | ||||
| @@ -233,6 +241,14 @@ class model(dict): | ||||
|         fname = os.path.join(self.watch_data_dir, "history.txt") | ||||
|         return os.path.isfile(fname) | ||||
|  | ||||
|     @property | ||||
|     def has_browser_steps(self): | ||||
|         has_browser_steps = self.get('browser_steps') and list(filter( | ||||
|                 lambda s: (s['operation'] and len(s['operation']) and s['operation'] != 'Choose one' and s['operation'] != 'Goto site'), | ||||
|                 self.get('browser_steps'))) | ||||
|  | ||||
|         return  has_browser_steps | ||||
|  | ||||
|     # Returns the newest key, but if theres only 1 record, then it's counted as not being new, so return 0. | ||||
|     @property | ||||
|     def newest_history_key(self): | ||||
| @@ -246,6 +262,38 @@ class model(dict): | ||||
|         bump = self.history | ||||
|         return self.__newest_history_key | ||||
|  | ||||
|     # Given an arbitrary timestamp, find the closest next key | ||||
|     # For example, last_viewed = 1000 so it should return the next 1001 timestamp | ||||
|     # | ||||
|     # used for the [diff] button so it can preset a smarter from_version | ||||
|     @property | ||||
|     def get_next_snapshot_key_to_last_viewed(self): | ||||
|  | ||||
|         """Unfortunately for now timestamp is stored as string key""" | ||||
|         keys = list(self.history.keys()) | ||||
|         if not keys: | ||||
|             return None | ||||
|  | ||||
|         last_viewed = int(self.get('last_viewed')) | ||||
|         prev_k = keys[0] | ||||
|         sorted_keys = sorted(keys, key=lambda x: int(x)) | ||||
|         sorted_keys.reverse() | ||||
|  | ||||
|         # When the 'last viewed' timestamp is greater than the newest snapshot, return second last | ||||
|         if last_viewed > int(sorted_keys[0]): | ||||
|             return sorted_keys[1] | ||||
|  | ||||
|         for k in sorted_keys: | ||||
|             if int(k) < last_viewed: | ||||
|                 if prev_k == sorted_keys[0]: | ||||
|                     # Return the second last one so we dont recommend the same version compares itself | ||||
|                     return sorted_keys[1] | ||||
|  | ||||
|                 return prev_k | ||||
|             prev_k = k | ||||
|  | ||||
|         return keys[0] | ||||
|  | ||||
|     def get_history_snapshot(self, timestamp): | ||||
|         import brotli | ||||
|         filepath = self.history[timestamp] | ||||
| @@ -491,3 +539,13 @@ class model(dict): | ||||
|         filepath = os.path.join(self.watch_data_dir, 'last-fetched.br') | ||||
|         with open(filepath, 'wb') as f: | ||||
|             f.write(brotli.compress(contents, mode=brotli.MODE_TEXT)) | ||||
|  | ||||
|     @property | ||||
|     def get_browsersteps_available_screenshots(self): | ||||
|         "For knowing which screenshots are available to show the user in BrowserSteps UI" | ||||
|         available = [] | ||||
|         for f in Path(self.watch_data_dir).glob('step_before-*.jpeg'): | ||||
|             step_n=re.search(r'step_before-(\d+)', f.name) | ||||
|             if step_n: | ||||
|                 available.append(step_n.group(1)) | ||||
|         return available | ||||
|   | ||||
| @@ -1,15 +1,122 @@ | ||||
| from abc import abstractmethod | ||||
| import os | ||||
| import hashlib | ||||
|  | ||||
| import re | ||||
| from changedetectionio import content_fetcher | ||||
| from copy import deepcopy | ||||
| from distutils.util import strtobool | ||||
|  | ||||
| class difference_detection_processor(): | ||||
|  | ||||
|     browser_steps = None | ||||
|     datastore = None | ||||
|     fetcher = None | ||||
|     screenshot = None | ||||
|     watch = None | ||||
|     xpath_data = None | ||||
|  | ||||
|     def __init__(self, *args, **kwargs): | ||||
|     def __init__(self, *args, datastore, watch_uuid, **kwargs): | ||||
|         super().__init__(*args, **kwargs) | ||||
|         self.datastore = datastore | ||||
|         self.watch = deepcopy(self.datastore.data['watching'].get(watch_uuid)) | ||||
|  | ||||
|     def call_browser(self): | ||||
|  | ||||
|         # Protect against file:// access | ||||
|         if re.search(r'^file://', self.watch.get('url', '').strip(), re.IGNORECASE): | ||||
|             if not strtobool(os.getenv('ALLOW_FILE_URI', 'false')): | ||||
|                 raise Exception( | ||||
|                     "file:// type access is denied for security reasons." | ||||
|                 ) | ||||
|  | ||||
|         url = self.watch.link | ||||
|  | ||||
|         # Requests, playwright, other browser via wss:// etc, fetch_extra_something | ||||
|         prefer_fetch_backend = self.watch.get('fetch_backend', 'system') | ||||
|  | ||||
|         # Proxy ID "key" | ||||
|         preferred_proxy_id = self.datastore.get_preferred_proxy_for_watch(uuid=self.watch.get('uuid')) | ||||
|  | ||||
|         # Pluggable content self.fetcher | ||||
|         if not prefer_fetch_backend or prefer_fetch_backend == 'system': | ||||
|             prefer_fetch_backend = self.datastore.data['settings']['application'].get('fetch_backend') | ||||
|  | ||||
|         # In the case that the preferred fetcher was a browser config with custom connection URL.. | ||||
|         # @todo - on save watch, if its extra_browser_ then it should be obvious it will use playwright (like if its requests now..) | ||||
|         browser_connection_url = None | ||||
|         if prefer_fetch_backend.startswith('extra_browser_'): | ||||
|             (t, key) = prefer_fetch_backend.split('extra_browser_') | ||||
|             connection = list( | ||||
|                 filter(lambda s: (s['browser_name'] == key), self.datastore.data['settings']['requests'].get('extra_browsers', []))) | ||||
|             if connection: | ||||
|                 prefer_fetch_backend = 'base_html_playwright' | ||||
|                 browser_connection_url = connection[0].get('browser_connection_url') | ||||
|  | ||||
|  | ||||
|         # Grab the right kind of 'fetcher', (playwright, requests, etc) | ||||
|         if hasattr(content_fetcher, prefer_fetch_backend): | ||||
|             fetcher_obj = getattr(content_fetcher, prefer_fetch_backend) | ||||
|         else: | ||||
|             # If the klass doesnt exist, just use a default | ||||
|             fetcher_obj = getattr(content_fetcher, "html_requests") | ||||
|  | ||||
|  | ||||
|         proxy_url = None | ||||
|         if preferred_proxy_id: | ||||
|             proxy_url = self.datastore.proxy_list.get(preferred_proxy_id).get('url') | ||||
|             print(f"Using proxy Key: {preferred_proxy_id} as Proxy URL {proxy_url}") | ||||
|  | ||||
|         # Now call the fetcher (playwright/requests/etc) with arguments that only a fetcher would need. | ||||
|         # When browser_connection_url is None, it method should default to working out whats the best defaults (os env vars etc) | ||||
|         self.fetcher = fetcher_obj(proxy_override=proxy_url, | ||||
|                                    browser_connection_url=browser_connection_url | ||||
|                                    ) | ||||
|  | ||||
|         if self.watch.has_browser_steps: | ||||
|             self.fetcher.browser_steps = self.watch.get('browser_steps', []) | ||||
|             self.fetcher.browser_steps_screenshot_path = os.path.join(self.datastore.datastore_path, self.watch.get('uuid')) | ||||
|  | ||||
|         # Tweak the base config with the per-watch ones | ||||
|         request_headers = self.watch.get('headers', []) | ||||
|         request_headers.update(self.datastore.get_all_base_headers()) | ||||
|         request_headers.update(self.datastore.get_all_headers_in_textfile_for_watch(uuid=self.watch.get('uuid'))) | ||||
|  | ||||
|         # https://github.com/psf/requests/issues/4525 | ||||
|         # Requests doesnt yet support brotli encoding, so don't put 'br' here, be totally sure that the user cannot | ||||
|         # do this by accident. | ||||
|         if 'Accept-Encoding' in request_headers and "br" in request_headers['Accept-Encoding']: | ||||
|             request_headers['Accept-Encoding'] = request_headers['Accept-Encoding'].replace(', br', '') | ||||
|  | ||||
|         timeout = self.datastore.data['settings']['requests'].get('timeout') | ||||
|  | ||||
|         request_body = self.watch.get('body') | ||||
|         request_method = self.watch.get('method') | ||||
|         ignore_status_codes = self.watch.get('ignore_status_codes', False) | ||||
|  | ||||
|         # Configurable per-watch or global extra delay before extracting text (for webDriver types) | ||||
|         system_webdriver_delay = self.datastore.data['settings']['application'].get('webdriver_delay', None) | ||||
|         if self.watch.get('webdriver_delay'): | ||||
|             self.fetcher.render_extract_delay = self.watch.get('webdriver_delay') | ||||
|         elif system_webdriver_delay is not None: | ||||
|             self.fetcher.render_extract_delay = system_webdriver_delay | ||||
|  | ||||
|         if self.watch.get('webdriver_js_execute_code') is not None and self.watch.get('webdriver_js_execute_code').strip(): | ||||
|             self.fetcher.webdriver_js_execute_code = self.watch.get('webdriver_js_execute_code') | ||||
|  | ||||
|         # Requests for PDF's, images etc should be passwd the is_binary flag | ||||
|         is_binary = self.watch.is_pdf | ||||
|  | ||||
|         # And here we go! call the right browser with browser-specific settings | ||||
|         self.fetcher.run(url, timeout, request_headers, request_body, request_method, ignore_status_codes, self.watch.get('include_filters'), | ||||
|                     is_binary=is_binary) | ||||
|  | ||||
|         #@todo .quit here could go on close object, so we can run JS if change-detected | ||||
|         self.fetcher.quit() | ||||
|  | ||||
|         # After init, call run_changedetection() which will do the actual change-detection | ||||
|  | ||||
|     @abstractmethod | ||||
|     def run(self, uuid, skip_when_checksum_same=True, preferred_proxy=None): | ||||
|     def run_changedetection(self, uuid, skip_when_checksum_same=True): | ||||
|         update_obj = {'last_notification_error': False, 'last_error': False} | ||||
|         some_data = 'xxxxx' | ||||
|         update_obj["previous_md5"] = hashlib.md5(some_data.encode('utf-8')).hexdigest() | ||||
|   | ||||
| @@ -1,10 +1,7 @@ | ||||
|  | ||||
| import hashlib | ||||
| import os | ||||
| import re | ||||
| import urllib3 | ||||
| from . import difference_detection_processor | ||||
| from changedetectionio import content_fetcher | ||||
| from copy import deepcopy | ||||
|  | ||||
| urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) | ||||
| @@ -22,11 +19,7 @@ class perform_site_check(difference_detection_processor): | ||||
|     screenshot = None | ||||
|     xpath_data = None | ||||
|  | ||||
|     def __init__(self, *args, datastore, **kwargs): | ||||
|         super().__init__(*args, **kwargs) | ||||
|         self.datastore = datastore | ||||
|  | ||||
|     def run(self, uuid, skip_when_checksum_same=True): | ||||
|     def run_changedetection(self, uuid, skip_when_checksum_same=True): | ||||
|  | ||||
|         # DeepCopy so we can be sure we don't accidently change anything by reference | ||||
|         watch = deepcopy(self.datastore.data['watching'].get(uuid)) | ||||
| @@ -34,84 +27,24 @@ class perform_site_check(difference_detection_processor): | ||||
|         if not watch: | ||||
|             raise Exception("Watch no longer exists.") | ||||
|  | ||||
|         # Protect against file:// access | ||||
|         if re.search(r'^file', watch.get('url', ''), re.IGNORECASE) and not os.getenv('ALLOW_FILE_URI', False): | ||||
|             raise Exception( | ||||
|                 "file:// type access is denied for security reasons." | ||||
|             ) | ||||
|  | ||||
|         # Unset any existing notification error | ||||
|         update_obj = {'last_notification_error': False, 'last_error': False} | ||||
|  | ||||
|         request_headers = watch.get('headers', []) | ||||
|         request_headers.update(self.datastore.get_all_base_headers()) | ||||
|         request_headers.update(self.datastore.get_all_headers_in_textfile_for_watch(uuid=uuid)) | ||||
|  | ||||
|         # https://github.com/psf/requests/issues/4525 | ||||
|         # Requests doesnt yet support brotli encoding, so don't put 'br' here, be totally sure that the user cannot | ||||
|         # do this by accident. | ||||
|         if 'Accept-Encoding' in request_headers and "br" in request_headers['Accept-Encoding']: | ||||
|             request_headers['Accept-Encoding'] = request_headers['Accept-Encoding'].replace(', br', '') | ||||
|  | ||||
|         timeout = self.datastore.data['settings']['requests'].get('timeout') | ||||
|  | ||||
|         url = watch.link | ||||
|  | ||||
|         request_body = self.datastore.data['watching'][uuid].get('body') | ||||
|         request_method = self.datastore.data['watching'][uuid].get('method') | ||||
|         ignore_status_codes = self.datastore.data['watching'][uuid].get('ignore_status_codes', False) | ||||
|  | ||||
|         # Pluggable content fetcher | ||||
|         prefer_backend = watch.get_fetch_backend | ||||
|         if not prefer_backend or prefer_backend == 'system': | ||||
|             prefer_backend = self.datastore.data['settings']['application']['fetch_backend'] | ||||
|  | ||||
|         if hasattr(content_fetcher, prefer_backend): | ||||
|             klass = getattr(content_fetcher, prefer_backend) | ||||
|         else: | ||||
|             # If the klass doesnt exist, just use a default | ||||
|             klass = getattr(content_fetcher, "html_requests") | ||||
|  | ||||
|         proxy_id = self.datastore.get_preferred_proxy_for_watch(uuid=uuid) | ||||
|         proxy_url = None | ||||
|         if proxy_id: | ||||
|             proxy_url = self.datastore.proxy_list.get(proxy_id).get('url') | ||||
|             print("UUID {} Using proxy {}".format(uuid, proxy_url)) | ||||
|  | ||||
|         fetcher = klass(proxy_override=proxy_url) | ||||
|  | ||||
|         # Configurable per-watch or global extra delay before extracting text (for webDriver types) | ||||
|         system_webdriver_delay = self.datastore.data['settings']['application'].get('webdriver_delay', None) | ||||
|         if watch['webdriver_delay'] is not None: | ||||
|             fetcher.render_extract_delay = watch.get('webdriver_delay') | ||||
|         elif system_webdriver_delay is not None: | ||||
|             fetcher.render_extract_delay = system_webdriver_delay | ||||
|  | ||||
|         # Could be removed if requests/plaintext could also return some info? | ||||
|         if prefer_backend != 'html_webdriver': | ||||
|             raise Exception("Re-stock detection requires Chrome or compatible webdriver/playwright fetcher to work") | ||||
|  | ||||
|         if watch.get('webdriver_js_execute_code') is not None and watch.get('webdriver_js_execute_code').strip(): | ||||
|             fetcher.webdriver_js_execute_code = watch.get('webdriver_js_execute_code') | ||||
|  | ||||
|         fetcher.run(url, timeout, request_headers, request_body, request_method, ignore_status_codes, watch.get('include_filters')) | ||||
|         fetcher.quit() | ||||
|  | ||||
|         self.screenshot = fetcher.screenshot | ||||
|         self.xpath_data = fetcher.xpath_data | ||||
|         self.screenshot = self.fetcher.screenshot | ||||
|         self.xpath_data = self.fetcher.xpath_data | ||||
|  | ||||
|         # Track the content type | ||||
|         update_obj['content_type'] = fetcher.headers.get('Content-Type', '') | ||||
|         update_obj["last_check_status"] = fetcher.get_last_status_code() | ||||
|         update_obj['content_type'] = self.fetcher.headers.get('Content-Type', '') | ||||
|         update_obj["last_check_status"] = self.fetcher.get_last_status_code() | ||||
|  | ||||
|         # Main detection method | ||||
|         fetched_md5 = None | ||||
|         if fetcher.instock_data: | ||||
|             fetched_md5 = hashlib.md5(fetcher.instock_data.encode('utf-8')).hexdigest() | ||||
|         if self.fetcher.instock_data: | ||||
|             fetched_md5 = hashlib.md5(self.fetcher.instock_data.encode('utf-8')).hexdigest() | ||||
|             # 'Possibly in stock' comes from stock-not-in-stock.js when no string found above the fold. | ||||
|             update_obj["in_stock"] = True if fetcher.instock_data == 'Possibly in stock' else False | ||||
|             update_obj["in_stock"] = True if self.fetcher.instock_data == 'Possibly in stock' else False | ||||
|         else: | ||||
|             raise UnableToExtractRestockData(status_code=fetcher.status_code) | ||||
|             raise UnableToExtractRestockData(status_code=self.fetcher.status_code) | ||||
|  | ||||
|         # The main thing that all this at the moment comes down to :) | ||||
|         changed_detected = False | ||||
| @@ -128,4 +61,4 @@ class perform_site_check(difference_detection_processor): | ||||
|         # Always record the new checksum | ||||
|         update_obj["previous_md5"] = fetched_md5 | ||||
|  | ||||
|         return changed_detected, update_obj, fetcher.instock_data.encode('utf-8') | ||||
|         return changed_detected, update_obj, self.fetcher.instock_data.encode('utf-8') | ||||
|   | ||||
| @@ -1,4 +1,4 @@ | ||||
| # HTML to TEXT/JSON DIFFERENCE FETCHER | ||||
| # HTML to TEXT/JSON DIFFERENCE self.fetcher | ||||
|  | ||||
| import hashlib | ||||
| import json | ||||
| @@ -11,17 +11,19 @@ from changedetectionio import content_fetcher, html_tools | ||||
| from changedetectionio.blueprint.price_data_follower import PRICE_DATA_TRACK_ACCEPT, PRICE_DATA_TRACK_REJECT | ||||
| from copy import deepcopy | ||||
| from . import difference_detection_processor | ||||
| from ..html_tools import PERL_STYLE_REGEX, cdata_in_document_to_text | ||||
|  | ||||
| urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) | ||||
|  | ||||
|  | ||||
| name =  'Webpage Text/HTML, JSON and PDF changes' | ||||
| name = 'Webpage Text/HTML, JSON and PDF changes' | ||||
| description = 'Detects all text changes where possible' | ||||
| json_filter_prefixes = ['json:', 'jq:'] | ||||
|  | ||||
| class FilterNotFoundInResponse(ValueError): | ||||
|     def __init__(self, msg): | ||||
|         ValueError.__init__(self, msg) | ||||
|  | ||||
|  | ||||
| class PDFToHTMLToolNotFound(ValueError): | ||||
|     def __init__(self, msg): | ||||
|         ValueError.__init__(self, msg) | ||||
| @@ -30,28 +32,10 @@ class PDFToHTMLToolNotFound(ValueError): | ||||
| # Some common stuff here that can be moved to a base class | ||||
| # (set_proxy_from_list) | ||||
| class perform_site_check(difference_detection_processor): | ||||
|     screenshot = None | ||||
|     xpath_data = None | ||||
|  | ||||
|     def __init__(self, *args, datastore, **kwargs): | ||||
|         super().__init__(*args, **kwargs) | ||||
|         self.datastore = datastore | ||||
|  | ||||
|     # Doesn't look like python supports forward slash auto enclosure in re.findall | ||||
|     # So convert it to inline flag "foobar(?i)" type configuration | ||||
|     def forward_slash_enclosed_regex_to_options(self, regex): | ||||
|         res = re.search(r'^/(.*?)/(\w+)$', regex, re.IGNORECASE) | ||||
|  | ||||
|         if res: | ||||
|             regex = res.group(1) | ||||
|             regex += '(?{})'.format(res.group(2)) | ||||
|         else: | ||||
|             regex += '(?{})'.format('i') | ||||
|  | ||||
|         return regex | ||||
|  | ||||
|     def run(self, uuid, skip_when_checksum_same=True, preferred_proxy=None): | ||||
|     def run_changedetection(self, uuid, skip_when_checksum_same=True): | ||||
|         changed_detected = False | ||||
|         html_content = "" | ||||
|         screenshot = False  # as bytes | ||||
|         stripped_text_from_html = "" | ||||
|  | ||||
| @@ -60,100 +44,25 @@ class perform_site_check(difference_detection_processor): | ||||
|         if not watch: | ||||
|             raise Exception("Watch no longer exists.") | ||||
|  | ||||
|         # Protect against file:// access | ||||
|         if re.search(r'^file', watch.get('url', ''), re.IGNORECASE) and not os.getenv('ALLOW_FILE_URI', False): | ||||
|             raise Exception( | ||||
|                 "file:// type access is denied for security reasons." | ||||
|             ) | ||||
|  | ||||
|         # Unset any existing notification error | ||||
|         update_obj = {'last_notification_error': False, 'last_error': False} | ||||
|  | ||||
|         # Tweak the base config with the per-watch ones | ||||
|         request_headers = watch.get('headers', []) | ||||
|         request_headers.update(self.datastore.get_all_base_headers()) | ||||
|         request_headers.update(self.datastore.get_all_headers_in_textfile_for_watch(uuid=uuid)) | ||||
|  | ||||
|         # https://github.com/psf/requests/issues/4525 | ||||
|         # Requests doesnt yet support brotli encoding, so don't put 'br' here, be totally sure that the user cannot | ||||
|         # do this by accident. | ||||
|         if 'Accept-Encoding' in request_headers and "br" in request_headers['Accept-Encoding']: | ||||
|             request_headers['Accept-Encoding'] = request_headers['Accept-Encoding'].replace(', br', '') | ||||
|  | ||||
|         timeout = self.datastore.data['settings']['requests'].get('timeout') | ||||
|  | ||||
|         url = watch.link | ||||
|  | ||||
|         request_body = self.datastore.data['watching'][uuid].get('body') | ||||
|         request_method = self.datastore.data['watching'][uuid].get('method') | ||||
|         ignore_status_codes = self.datastore.data['watching'][uuid].get('ignore_status_codes', False) | ||||
|  | ||||
|         # source: support | ||||
|         is_source = False | ||||
|         if url.startswith('source:'): | ||||
|             url = url.replace('source:', '') | ||||
|             is_source = True | ||||
|  | ||||
|         # Pluggable content fetcher | ||||
|         prefer_backend = watch.get_fetch_backend | ||||
|         if not prefer_backend or prefer_backend == 'system': | ||||
|             prefer_backend = self.datastore.data['settings']['application']['fetch_backend'] | ||||
|  | ||||
|         if hasattr(content_fetcher, prefer_backend): | ||||
|             klass = getattr(content_fetcher, prefer_backend) | ||||
|         else: | ||||
|             # If the klass doesnt exist, just use a default | ||||
|             klass = getattr(content_fetcher, "html_requests") | ||||
|  | ||||
|         if preferred_proxy: | ||||
|             proxy_id = preferred_proxy | ||||
|         else: | ||||
|             proxy_id = self.datastore.get_preferred_proxy_for_watch(uuid=uuid) | ||||
|  | ||||
|         proxy_url = None | ||||
|         if proxy_id: | ||||
|             proxy_url = self.datastore.proxy_list.get(proxy_id).get('url') | ||||
|             print("UUID {} Using proxy {}".format(uuid, proxy_url)) | ||||
|  | ||||
|         fetcher = klass(proxy_override=proxy_url) | ||||
|  | ||||
|         # Configurable per-watch or global extra delay before extracting text (for webDriver types) | ||||
|         system_webdriver_delay = self.datastore.data['settings']['application'].get('webdriver_delay', None) | ||||
|         if watch['webdriver_delay'] is not None: | ||||
|             fetcher.render_extract_delay = watch.get('webdriver_delay') | ||||
|         elif system_webdriver_delay is not None: | ||||
|             fetcher.render_extract_delay = system_webdriver_delay | ||||
|  | ||||
|         # Possible conflict | ||||
|         if prefer_backend == 'html_webdriver': | ||||
|             fetcher.browser_steps = watch.get('browser_steps', None) | ||||
|             fetcher.browser_steps_screenshot_path = os.path.join(self.datastore.datastore_path, uuid) | ||||
|  | ||||
|         if watch.get('webdriver_js_execute_code') is not None and watch.get('webdriver_js_execute_code').strip(): | ||||
|             fetcher.webdriver_js_execute_code = watch.get('webdriver_js_execute_code') | ||||
|  | ||||
|         # requests for PDF's, images etc should be passwd the is_binary flag | ||||
|         is_binary = watch.is_pdf | ||||
|  | ||||
|         fetcher.run(url, timeout, request_headers, request_body, request_method, ignore_status_codes, watch.get('include_filters'), is_binary=is_binary) | ||||
|         fetcher.quit() | ||||
|  | ||||
|         self.screenshot = fetcher.screenshot | ||||
|         self.xpath_data = fetcher.xpath_data | ||||
|         self.screenshot = self.fetcher.screenshot | ||||
|         self.xpath_data = self.fetcher.xpath_data | ||||
|  | ||||
|         # Track the content type | ||||
|         update_obj['content_type'] = fetcher.get_all_headers().get('content-type', '').lower() | ||||
|         update_obj['content_type'] = self.fetcher.get_all_headers().get('content-type', '').lower() | ||||
|  | ||||
|         # Watches added automatically in the queue manager will skip if its the same checksum as the previous run | ||||
|         # Saves a lot of CPU | ||||
|         update_obj['previous_md5_before_filters'] = hashlib.md5(fetcher.content.encode('utf-8')).hexdigest() | ||||
|         update_obj['previous_md5_before_filters'] = hashlib.md5(self.fetcher.content.encode('utf-8')).hexdigest() | ||||
|         if skip_when_checksum_same: | ||||
|             if update_obj['previous_md5_before_filters'] == watch.get('previous_md5_before_filters'): | ||||
|                 raise content_fetcher.checksumFromPreviousCheckWasTheSame() | ||||
|  | ||||
|  | ||||
|         # Fetching complete, now filters | ||||
|         # @todo move to class / maybe inside of fetcher abstract base? | ||||
|  | ||||
|         # @note: I feel like the following should be in a more obvious chain system | ||||
|         #  - Check filter text | ||||
| @@ -162,15 +71,24 @@ class perform_site_check(difference_detection_processor): | ||||
|         # https://stackoverflow.com/questions/41817578/basic-method-chaining ? | ||||
|         # return content().textfilter().jsonextract().checksumcompare() ? | ||||
|  | ||||
|         is_json = 'application/json' in fetcher.get_all_headers().get('content-type', '').lower() | ||||
|         is_json = 'application/json' in self.fetcher.get_all_headers().get('content-type', '').lower() | ||||
|         is_html = not is_json | ||||
|         is_rss = False | ||||
|  | ||||
|         ctype_header = self.fetcher.get_all_headers().get('content-type', '').lower() | ||||
|         # Go into RSS preprocess for converting CDATA/comment to usable text | ||||
|         if any(substring in ctype_header for substring in ['application/xml', 'application/rss', 'text/xml']): | ||||
|             if '<rss' in self.fetcher.content[:100].lower(): | ||||
|                 self.fetcher.content = cdata_in_document_to_text(html_content=self.fetcher.content) | ||||
|                 is_rss = True | ||||
|  | ||||
|         # source: support, basically treat it as plaintext | ||||
|         if is_source: | ||||
|         if watch.is_source_type_url: | ||||
|             is_html = False | ||||
|             is_json = False | ||||
|  | ||||
|         if watch.is_pdf or 'application/pdf' in fetcher.get_all_headers().get('content-type', '').lower(): | ||||
|         inline_pdf = self.fetcher.get_all_headers().get('content-disposition', '') and '%PDF-1' in self.fetcher.content[:10] | ||||
|         if watch.is_pdf or 'application/pdf' in self.fetcher.get_all_headers().get('content-type', '').lower() or inline_pdf: | ||||
|             from shutil import which | ||||
|             tool = os.getenv("PDF_TO_HTML_TOOL", "pdftohtml") | ||||
|             if not which(tool): | ||||
| @@ -181,18 +99,18 @@ class perform_site_check(difference_detection_processor): | ||||
|                 [tool, '-stdout', '-', '-s', 'out.pdf', '-i'], | ||||
|                 stdout=subprocess.PIPE, | ||||
|                 stdin=subprocess.PIPE) | ||||
|             proc.stdin.write(fetcher.raw_content) | ||||
|             proc.stdin.write(self.fetcher.raw_content) | ||||
|             proc.stdin.close() | ||||
|             fetcher.content = proc.stdout.read().decode('utf-8') | ||||
|             self.fetcher.content = proc.stdout.read().decode('utf-8') | ||||
|             proc.wait(timeout=60) | ||||
|  | ||||
|             # Add a little metadata so we know if the file changes (like if an image changes, but the text is the same | ||||
|             # @todo may cause problems with non-UTF8? | ||||
|             metadata = "<p>Added by changedetection.io: Document checksum - {} Filesize - {} bytes</p>".format( | ||||
|                 hashlib.md5(fetcher.raw_content).hexdigest().upper(), | ||||
|                 len(fetcher.content)) | ||||
|                 hashlib.md5(self.fetcher.raw_content).hexdigest().upper(), | ||||
|                 len(self.fetcher.content)) | ||||
|  | ||||
|             fetcher.content = fetcher.content.replace('</body>', metadata + '</body>') | ||||
|             self.fetcher.content = self.fetcher.content.replace('</body>', metadata + '</body>') | ||||
|  | ||||
|         # Better would be if Watch.model could access the global data also | ||||
|         # and then use getattr https://docs.python.org/3/reference/datamodel.html#object.__getitem__ | ||||
| @@ -207,7 +125,7 @@ class perform_site_check(difference_detection_processor): | ||||
|  | ||||
|         # Inject a virtual LD+JSON price tracker rule | ||||
|         if watch.get('track_ldjson_price_data', '') == PRICE_DATA_TRACK_ACCEPT: | ||||
|             include_filters_rule.append(html_tools.LD_JSON_PRODUCT_OFFER_SELECTOR) | ||||
|             include_filters_rule += html_tools.LD_JSON_PRODUCT_OFFER_SELECTORS | ||||
|  | ||||
|         has_filter_rule = len(include_filters_rule) and len(include_filters_rule[0].strip()) | ||||
|         has_subtractive_selectors = len(subtractive_selectors) and len(subtractive_selectors[0].strip()) | ||||
| @@ -219,33 +137,30 @@ class perform_site_check(difference_detection_processor): | ||||
|         if is_json: | ||||
|             # Sort the JSON so we dont get false alerts when the content is just re-ordered | ||||
|             try: | ||||
|                 fetcher.content = json.dumps(json.loads(fetcher.content), sort_keys=True) | ||||
|                 self.fetcher.content = json.dumps(json.loads(self.fetcher.content), sort_keys=True) | ||||
|             except Exception as e: | ||||
|                 # Might have just been a snippet, or otherwise bad JSON, continue | ||||
|                 pass | ||||
|  | ||||
|         if has_filter_rule: | ||||
|             json_filter_prefixes = ['json:', 'jq:'] | ||||
|             for filter in include_filters_rule: | ||||
|                 if any(prefix in filter for prefix in json_filter_prefixes): | ||||
|                     stripped_text_from_html += html_tools.extract_json_as_string(content=fetcher.content, json_filter=filter) | ||||
|                     stripped_text_from_html += html_tools.extract_json_as_string(content=self.fetcher.content, json_filter=filter) | ||||
|                     is_html = False | ||||
|  | ||||
|  | ||||
|  | ||||
|         if is_html or is_source: | ||||
|         if is_html or watch.is_source_type_url: | ||||
|  | ||||
|             # CSS Filter, extract the HTML that matches and feed that into the existing inscriptis::get_text | ||||
|             fetcher.content = html_tools.workarounds_for_obfuscations(fetcher.content) | ||||
|             html_content = fetcher.content | ||||
|             self.fetcher.content = html_tools.workarounds_for_obfuscations(self.fetcher.content) | ||||
|             html_content = self.fetcher.content | ||||
|  | ||||
|             # If not JSON,  and if it's not text/plain.. | ||||
|             if 'text/plain' in fetcher.get_all_headers().get('content-type', '').lower(): | ||||
|             if 'text/plain' in self.fetcher.get_all_headers().get('content-type', '').lower(): | ||||
|                 # Don't run get_text or xpath/css filters on plaintext | ||||
|                 stripped_text_from_html = html_content | ||||
|             else: | ||||
|                 # Does it have some ld+json price data? used for easier monitoring | ||||
|                 update_obj['has_ldjson_price_data'] = html_tools.has_ldjson_product_info(fetcher.content) | ||||
|                 update_obj['has_ldjson_price_data'] = html_tools.has_ldjson_product_info(self.fetcher.content) | ||||
|  | ||||
|                 # Then we assume HTML | ||||
|                 if has_filter_rule: | ||||
| @@ -255,13 +170,19 @@ class perform_site_check(difference_detection_processor): | ||||
|                         # For HTML/XML we offer xpath as an option, just start a regular xPath "/.." | ||||
|                         if filter_rule[0] == '/' or filter_rule.startswith('xpath:'): | ||||
|                             html_content += html_tools.xpath_filter(xpath_filter=filter_rule.replace('xpath:', ''), | ||||
|                                                                     html_content=fetcher.content, | ||||
|                                                                     append_pretty_line_formatting=not is_source) | ||||
|                                                                     html_content=self.fetcher.content, | ||||
|                                                                     append_pretty_line_formatting=not watch.is_source_type_url, | ||||
|                                                                     is_rss=is_rss) | ||||
|                         elif filter_rule.startswith('xpath1:'): | ||||
|                             html_content += html_tools.xpath1_filter(xpath_filter=filter_rule.replace('xpath1:', ''), | ||||
|                                                                     html_content=self.fetcher.content, | ||||
|                                                                     append_pretty_line_formatting=not watch.is_source_type_url, | ||||
|                                                                     is_rss=is_rss) | ||||
|                         else: | ||||
|                             # CSS Filter, extract the HTML that matches and feed that into the existing inscriptis::get_text | ||||
|                             html_content += html_tools.include_filters(include_filters=filter_rule, | ||||
|                                                                        html_content=fetcher.content, | ||||
|                                                                        append_pretty_line_formatting=not is_source) | ||||
|                                                                        html_content=self.fetcher.content, | ||||
|                                                                        append_pretty_line_formatting=not watch.is_source_type_url) | ||||
|  | ||||
|                     if not html_content.strip(): | ||||
|                         raise FilterNotFoundInResponse(include_filters_rule) | ||||
| @@ -269,21 +190,21 @@ class perform_site_check(difference_detection_processor): | ||||
|                 if has_subtractive_selectors: | ||||
|                     html_content = html_tools.element_removal(subtractive_selectors, html_content) | ||||
|  | ||||
|                 if is_source: | ||||
|                 if watch.is_source_type_url: | ||||
|                     stripped_text_from_html = html_content | ||||
|                 else: | ||||
|                     # extract text | ||||
|                     do_anchor = self.datastore.data["settings"]["application"].get("render_anchor_tag_content", False) | ||||
|                     stripped_text_from_html = \ | ||||
|                         html_tools.html_to_text( | ||||
|                             html_content, | ||||
|                             render_anchor_tag_content=do_anchor | ||||
|                             html_content=html_content, | ||||
|                             render_anchor_tag_content=do_anchor, | ||||
|                             is_rss=is_rss # #1874 activate the <title workaround hack | ||||
|                         ) | ||||
|  | ||||
|         # Re #340 - return the content before the 'ignore text' was applied | ||||
|         text_content_before_ignored_filter = stripped_text_from_html.encode('utf-8') | ||||
|  | ||||
|  | ||||
|         # @todo whitespace coming from missing rtrim()? | ||||
|         # stripped_text_from_html could be based on their preferences, replace the processed text with only that which they want to know about. | ||||
|         # Rewrite's the processing text based on only what diff result they want to see | ||||
| @@ -293,13 +214,13 @@ class perform_site_check(difference_detection_processor): | ||||
|             # needs to not include (added) etc or it may get used twice | ||||
|             # Replace the processed text with the preferred result | ||||
|             rendered_diff = diff.render_diff(previous_version_file_contents=watch.get_last_fetched_before_filters(), | ||||
|                                                        newest_version_file_contents=stripped_text_from_html, | ||||
|                                                        include_equal=False,  # not the same lines | ||||
|                                                        include_added=watch.get('filter_text_added', True), | ||||
|                                                        include_removed=watch.get('filter_text_removed', True), | ||||
|                                                        include_replaced=watch.get('filter_text_replaced', True), | ||||
|                                                        line_feed_sep="\n", | ||||
|                                                        include_change_type_prefix=False) | ||||
|                                              newest_version_file_contents=stripped_text_from_html, | ||||
|                                              include_equal=False,  # not the same lines | ||||
|                                              include_added=watch.get('filter_text_added', True), | ||||
|                                              include_removed=watch.get('filter_text_removed', True), | ||||
|                                              include_replaced=watch.get('filter_text_replaced', True), | ||||
|                                              line_feed_sep="\n", | ||||
|                                              include_change_type_prefix=False) | ||||
|  | ||||
|             watch.save_last_fetched_before_filters(text_content_before_ignored_filter) | ||||
|  | ||||
| @@ -314,12 +235,17 @@ class perform_site_check(difference_detection_processor): | ||||
|         # Treat pages with no renderable text content as a change? No by default | ||||
|         empty_pages_are_a_change = self.datastore.data['settings']['application'].get('empty_pages_are_a_change', False) | ||||
|         if not is_json and not empty_pages_are_a_change and len(stripped_text_from_html.strip()) == 0: | ||||
|             raise content_fetcher.ReplyWithContentButNoText(url=url, status_code=fetcher.get_last_status_code(), screenshot=screenshot) | ||||
|             raise content_fetcher.ReplyWithContentButNoText(url=url, | ||||
|                                                             status_code=self.fetcher.get_last_status_code(), | ||||
|                                                             screenshot=screenshot, | ||||
|                                                             has_filters=has_filter_rule, | ||||
|                                                             html_content=html_content | ||||
|                                                             ) | ||||
|  | ||||
|         # We rely on the actual text in the html output.. many sites have random script vars etc, | ||||
|         # in the future we'll implement other mechanisms. | ||||
|  | ||||
|         update_obj["last_check_status"] = fetcher.get_last_status_code() | ||||
|         update_obj["last_check_status"] = self.fetcher.get_last_status_code() | ||||
|  | ||||
|         # If there's text to skip | ||||
|         # @todo we could abstract out the get_text() to handle this cleaner | ||||
| @@ -335,16 +261,25 @@ class perform_site_check(difference_detection_processor): | ||||
|             regex_matched_output = [] | ||||
|             for s_re in extract_text: | ||||
|                 # incase they specified something in '/.../x' | ||||
|                 regex = self.forward_slash_enclosed_regex_to_options(s_re) | ||||
|                 result = re.findall(regex.encode('utf-8'), stripped_text_from_html) | ||||
|                 if re.search(PERL_STYLE_REGEX, s_re, re.IGNORECASE): | ||||
|                     regex = html_tools.perl_style_slash_enclosed_regex_to_options(s_re) | ||||
|                     result = re.findall(regex.encode('utf-8'), stripped_text_from_html) | ||||
|  | ||||
|                 for l in result: | ||||
|                     if type(l) is tuple: | ||||
|                         # @todo - some formatter option default (between groups) | ||||
|                         regex_matched_output += list(l) + [b'\n'] | ||||
|                     else: | ||||
|                         # @todo - some formatter option default (between each ungrouped result) | ||||
|                         regex_matched_output += [l] + [b'\n'] | ||||
|                     for l in result: | ||||
|                         if type(l) is tuple: | ||||
|                             # @todo - some formatter option default (between groups) | ||||
|                             regex_matched_output += list(l) + [b'\n'] | ||||
|                         else: | ||||
|                             # @todo - some formatter option default (between each ungrouped result) | ||||
|                             regex_matched_output += [l] + [b'\n'] | ||||
|                 else: | ||||
|                     # Doesnt look like regex, just hunt for plaintext and return that which matches | ||||
|                     # `stripped_text_from_html` will be bytes, so we must encode s_re also to bytes | ||||
|                     r = re.compile(re.escape(s_re.encode('utf-8')), re.IGNORECASE) | ||||
|                     res = r.findall(stripped_text_from_html) | ||||
|                     if res: | ||||
|                         for match in res: | ||||
|                             regex_matched_output += [match] + [b'\n'] | ||||
|  | ||||
|             # Now we will only show what the regex matched | ||||
|             stripped_text_from_html = b'' | ||||
| @@ -398,7 +333,7 @@ class perform_site_check(difference_detection_processor): | ||||
|         if is_html: | ||||
|             if self.datastore.data['settings']['application'].get('extract_title_as_title') or watch['extract_title_as_title']: | ||||
|                 if not watch['title'] or not len(watch['title']): | ||||
|                     update_obj['title'] = html_tools.extract_element(find='title', html_content=fetcher.content) | ||||
|                     update_obj['title'] = html_tools.extract_element(find='title', html_content=self.fetcher.content) | ||||
|  | ||||
|         if changed_detected: | ||||
|             if watch.get('check_unique_lines', False): | ||||
|   | ||||
| @@ -18,6 +18,7 @@ module.exports = async ({page, context}) => { | ||||
|  | ||||
|     await page.setBypassCSP(true) | ||||
|     await page.setExtraHTTPHeaders(req_headers); | ||||
|  | ||||
|     if (user_agent) { | ||||
|         await page.setUserAgent(user_agent); | ||||
|     } | ||||
| @@ -26,6 +27,10 @@ module.exports = async ({page, context}) => { | ||||
|     await page.setDefaultNavigationTimeout(0); | ||||
|  | ||||
|     if (proxy_username) { | ||||
|         // Setting Proxy-Authentication header is deprecated, and doing so can trigger header change errors from Puppeteer | ||||
|         // https://github.com/puppeteer/puppeteer/issues/676 ? | ||||
|         // https://help.brightdata.com/hc/en-us/articles/12632549957649-Proxy-Manager-How-to-Guides#h_01HAKWR4Q0AFS8RZTNYWRDFJC2 | ||||
|         // https://cri.dev/posts/2020-03-30-How-to-solve-Puppeteer-Chrome-Error-ERR_INVALID_ARGUMENT/ | ||||
|         await page.authenticate({ | ||||
|             username: proxy_username, | ||||
|             password: proxy_password | ||||
|   | ||||
| @@ -5,14 +5,19 @@ function isItemInStock() { | ||||
|     'agotado', | ||||
|     'artikel zurzeit vergriffen', | ||||
|     'as soon as stock is available', | ||||
|     'ausverkauft', // sold out | ||||
|     'available for back order', | ||||
|     'back-order or out of stock', | ||||
|     'backordered', | ||||
|     'benachrichtigt mich', // notify me | ||||
|     'brak na stanie', | ||||
|     'brak w magazynie', | ||||
|     'coming soon', | ||||
|     'currently have any tickets for this', | ||||
|     'currently unavailable', | ||||
|     'dostępne wkrótce', | ||||
|     'en rupture de stock', | ||||
|     'ist derzeit nicht auf lager', | ||||
|     'item is no longer available', | ||||
|     'message if back in stock', | ||||
|     'nachricht bei', | ||||
| @@ -37,6 +42,7 @@ function isItemInStock() { | ||||
|     'unavailable tickets', | ||||
|     'we do not currently have an estimate of when this product will be back in stock.', | ||||
|     'zur zeit nicht an lager', | ||||
|     '已售完', | ||||
|   ]; | ||||
|  | ||||
|  | ||||
|   | ||||
| @@ -170,9 +170,12 @@ if (include_filters.length) { | ||||
|  | ||||
|         try { | ||||
|             // is it xpath? | ||||
|             if (f.startsWith('/') || f.startsWith('xpath:')) { | ||||
|                 q = document.evaluate(f.replace('xpath:', ''), document, null, XPathResult.FIRST_ORDERED_NODE_TYPE, null).singleNodeValue; | ||||
|             if (f.startsWith('/') || f.startsWith('xpath')) { | ||||
|                 var qry_f = f.replace(/xpath(:|\d:)/, '') | ||||
|                 console.log("[xpath] Scanning for included filter " + qry_f) | ||||
|                 q = document.evaluate(qry_f, document, null, XPathResult.FIRST_ORDERED_NODE_TYPE, null).singleNodeValue; | ||||
|             } else { | ||||
|                 console.log("[css] Scanning for included filter " + f) | ||||
|                 q = document.querySelector(f); | ||||
|             } | ||||
|         } catch (e) { | ||||
| @@ -182,8 +185,18 @@ if (include_filters.length) { | ||||
|         } | ||||
|  | ||||
|         if (q) { | ||||
|             // Try to resolve //something/text() back to its /something so we can atleast get the bounding box | ||||
|             try { | ||||
|                 if (typeof q.nodeName == 'string' && q.nodeName === '#text') { | ||||
|                     q = q.parentElement | ||||
|                 } | ||||
|             } catch (e) { | ||||
|                 console.log(e) | ||||
|                 console.log("xpath_element_scraper: #text resolver") | ||||
|             } | ||||
|  | ||||
|             // #1231 - IN the case XPath attribute filter is applied, we will have to traverse up and find the element. | ||||
|             if (q.hasOwnProperty('getBoundingClientRect')) { | ||||
|             if (typeof q.getBoundingClientRect == 'function') { | ||||
|                 bbox = q.getBoundingClientRect(); | ||||
|                 console.log("xpath_element_scraper: Got filter element, scroll from top was " + scroll_y) | ||||
|             } else { | ||||
| @@ -192,7 +205,8 @@ if (include_filters.length) { | ||||
|                     bbox = q.ownerElement.getBoundingClientRect(); | ||||
|                     console.log("xpath_element_scraper: Got filter by ownerElement element, scroll from top was " + scroll_y) | ||||
|                 } catch (e) { | ||||
|                     console.log("xpath_element_scraper: error looking up ownerElement") | ||||
|                     console.log(e) | ||||
|                     console.log("xpath_element_scraper: error looking up q.ownerElement") | ||||
|                 } | ||||
|             } | ||||
|         } | ||||
|   | ||||
							
								
								
									
										44
									
								
								changedetectionio/run_custom_browser_url_tests.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										44
									
								
								changedetectionio/run_custom_browser_url_tests.sh
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,44 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| # run some tests and look if the 'custom-browser-search-string=1' connect string appeared in the correct containers | ||||
|  | ||||
| # enable debug | ||||
| set -x | ||||
|  | ||||
| # A extra browser is configured, but we never chose to use it, so it should NOT show in the logs | ||||
| docker run --rm -e "PLAYWRIGHT_DRIVER_URL=ws://browserless:3000" --network changedet-network test-changedetectionio  bash -c 'cd changedetectionio;pytest tests/custom_browser_url/test_custom_browser_url.py::test_request_not_via_custom_browser_url' | ||||
| docker logs browserless-custom-url &>log.txt | ||||
| grep 'custom-browser-search-string=1' log.txt | ||||
| if [ $? -ne 1 ] | ||||
| then | ||||
|   echo "Saw a request in 'browserless-custom-url' container with 'custom-browser-search-string=1' when I should not" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| docker logs browserless &>log.txt | ||||
| grep 'custom-browser-search-string=1' log.txt | ||||
| if [ $? -ne 1 ] | ||||
| then | ||||
|   echo "Saw a request in 'browser' container with 'custom-browser-search-string=1' when I should not" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| # Special connect string should appear in the custom-url container, but not in the 'default' one | ||||
| docker run --rm -e "PLAYWRIGHT_DRIVER_URL=ws://browserless:3000" --network changedet-network test-changedetectionio  bash -c 'cd changedetectionio;pytest tests/custom_browser_url/test_custom_browser_url.py::test_request_via_custom_browser_url' | ||||
| docker logs browserless-custom-url &>log.txt | ||||
| grep 'custom-browser-search-string=1' log.txt | ||||
| if [ $? -ne 0 ] | ||||
| then | ||||
|   echo "Did not see request in 'browserless-custom-url' container with 'custom-browser-search-string=1' when I should" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| docker logs browserless &>log.txt | ||||
| grep 'custom-browser-search-string=1' log.txt | ||||
| if [ $? -ne 1 ] | ||||
| then | ||||
|   echo "Saw a request in 'browser' container with 'custom-browser-search-string=1' when I should not" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
|  | ||||
| @@ -10,6 +10,40 @@ set -x | ||||
| docker run --network changedet-network -d --name squid-one --hostname squid-one --rm -v `pwd`/tests/proxy_list/squid.conf:/etc/squid/conf.d/debian.conf ubuntu/squid:4.13-21.10_edge | ||||
| docker run --network changedet-network -d --name squid-two --hostname squid-two --rm -v `pwd`/tests/proxy_list/squid.conf:/etc/squid/conf.d/debian.conf ubuntu/squid:4.13-21.10_edge | ||||
|  | ||||
| # SOCKS5 related - start simple Socks5 proxy server | ||||
| # SOCKSTEST=xyz should show in the logs of this service to confirm it fetched | ||||
| docker run --network changedet-network -d --hostname socks5proxy --name socks5proxy -p 1080:1080 -e PROXY_USER=proxy_user123 -e PROXY_PASSWORD=proxy_pass123 serjs/go-socks5-proxy | ||||
| docker run --network changedet-network -d --hostname socks5proxy-noauth -p 1081:1080 --name socks5proxy-noauth  serjs/go-socks5-proxy | ||||
|  | ||||
| echo "---------------------------------- SOCKS5 -------------------" | ||||
| # SOCKS5 related - test from proxies.json | ||||
| docker run --network changedet-network \ | ||||
|   -v `pwd`/tests/proxy_socks5/proxies.json-example:/app/changedetectionio/test-datastore/proxies.json \ | ||||
|   --rm \ | ||||
|   -e "SOCKSTEST=proxiesjson" \ | ||||
|   test-changedetectionio \ | ||||
|   bash -c 'cd changedetectionio && pytest tests/proxy_socks5/test_socks5_proxy_sources.py' | ||||
|  | ||||
| # SOCKS5 related - by manually entering in UI | ||||
| docker run --network changedet-network \ | ||||
|   --rm \ | ||||
|   -e "SOCKSTEST=manual" \ | ||||
|   test-changedetectionio \ | ||||
|   bash -c 'cd changedetectionio && pytest tests/proxy_socks5/test_socks5_proxy.py' | ||||
|  | ||||
| # SOCKS5 related - test from proxies.json via playwright - NOTE- PLAYWRIGHT DOESNT SUPPORT AUTHENTICATING PROXY | ||||
| docker run --network changedet-network \ | ||||
|   -e "SOCKSTEST=manual-playwright" \ | ||||
|   -v `pwd`/tests/proxy_socks5/proxies.json-example-noauth:/app/changedetectionio/test-datastore/proxies.json \ | ||||
|   -e "PLAYWRIGHT_DRIVER_URL=ws://browserless:3000" \ | ||||
|   --rm \ | ||||
|   test-changedetectionio \ | ||||
|   bash -c 'cd changedetectionio && pytest tests/proxy_socks5/test_socks5_proxy_sources.py' | ||||
|  | ||||
| echo "socks5 server logs" | ||||
| docker logs socks5proxy | ||||
| echo "----------------------------------" | ||||
|  | ||||
| # Used for configuring a custom proxy URL via the UI | ||||
| docker run --network changedet-network -d \ | ||||
|   --name squid-custom \ | ||||
|   | ||||
| @@ -208,7 +208,7 @@ $(document).ready(function () { | ||||
|             console.log(x); | ||||
|             if (x && first_available.length) { | ||||
|                 // @todo will it let you click shit that has a layer ontop? probably not. | ||||
|                 if (x['tagtype'] === 'text' || x['tagtype'] === 'email' || x['tagName'] === 'textarea' || x['tagtype'] === 'password' || x['tagtype'] === 'search') { | ||||
|                 if (x['tagtype'] === 'text' || x['tagtype'] === 'number' || x['tagtype'] === 'email' || x['tagName'] === 'textarea' || x['tagtype'] === 'password' || x['tagtype'] === 'search') { | ||||
|                     $('select', first_available).val('Enter text in field').change(); | ||||
|                     $('input[type=text]', first_available).first().val(x['xpath']); | ||||
|                     $('input[placeholder="Value"]', first_available).addClass('ok').click().focus(); | ||||
| @@ -321,8 +321,14 @@ $(document).ready(function () { | ||||
|             var s = '<div class="control">' + '<a data-step-index=' + i + ' class="pure-button button-secondary button-green button-xsmall apply" >Apply</a> '; | ||||
|             if (i > 0) { | ||||
|                 // The first step never gets these (Goto-site) | ||||
|                 s += '<a data-step-index=' + i + ' class="pure-button button-secondary button-xsmall clear" >Clear</a> ' + | ||||
|                     '<a data-step-index=' + i + ' class="pure-button button-secondary button-red button-xsmall remove" >Remove</a>'; | ||||
|                 s += `<a data-step-index="${i}" class="pure-button button-secondary button-xsmall clear" >Clear</a> ` + | ||||
|                     `<a data-step-index="${i}" class="pure-button button-secondary button-red button-xsmall remove" >Remove</a>`; | ||||
|  | ||||
|                 // if a screenshot is available | ||||
|                 if (browser_steps_available_screenshots.includes(i.toString())) { | ||||
|                     var d = (browser_steps_last_error_step === i+1) ? 'before' : 'after'; | ||||
|                     s += ` <a data-step-index="${i}" class="pure-button button-secondary button-xsmall show-screenshot" title="Show screenshot from last run" data-type="${d}">Pic</a> `; | ||||
|                 } | ||||
|             } | ||||
|             s += '</div>'; | ||||
|             $(this).append(s) | ||||
| @@ -437,6 +443,24 @@ $(document).ready(function () { | ||||
|  | ||||
|     }); | ||||
|  | ||||
|     $('ul#browser_steps li .control .show-screenshot').click(function (element) { | ||||
|         var step_n = $(event.currentTarget).data('step-index'); | ||||
|         w = window.open(this.href, "_blank", "width=640,height=480"); | ||||
|         const t = $(event.currentTarget).data('type'); | ||||
|  | ||||
|         const url = browser_steps_fetch_screenshot_image_url + `&step_n=${step_n}&type=${t}`; | ||||
|         w.document.body.innerHTML = `<!DOCTYPE html> | ||||
|             <html lang="en"> | ||||
|                 <body> | ||||
|                     <img src="${url}" style="width: 100%" alt="Browser Step at step ${step_n} from last run." title="Browser Step at step ${step_n} from last run."/> | ||||
|                 </body> | ||||
|         </html>`; | ||||
|         w.document.title = `Browser Step at step ${step_n} from last run.`; | ||||
|     }); | ||||
|  | ||||
|     if (browser_steps_last_error_step) { | ||||
|         $("ul#browser_steps>li:nth-child("+browser_steps_last_error_step+")").addClass("browser-step-with-error"); | ||||
|     } | ||||
|  | ||||
|     $("ul#browser_steps select").change(function () { | ||||
|         set_greyed_state(); | ||||
|   | ||||
| @@ -1,110 +1,120 @@ | ||||
| var a = document.getElementById("a"); | ||||
| var b = document.getElementById("b"); | ||||
| var result = document.getElementById("result"); | ||||
| $(document).ready(function () { | ||||
|     var a = document.getElementById("a"); | ||||
|     var b = document.getElementById("b"); | ||||
|     var result = document.getElementById("result"); | ||||
|     var inputs; | ||||
|  | ||||
| function changed() { | ||||
|   // https://github.com/kpdecker/jsdiff/issues/389 | ||||
|   // I would love to use `{ignoreWhitespace: true}` here but it breaks the formatting | ||||
|   options = { | ||||
|     ignoreWhitespace: document.getElementById("ignoreWhitespace").checked, | ||||
|   }; | ||||
|     $('#jump-next-diff').click(function () { | ||||
|  | ||||
|   var diff = Diff[window.diffType](a.textContent, b.textContent, options); | ||||
|   var fragment = document.createDocumentFragment(); | ||||
|   for (var i = 0; i < diff.length; i++) { | ||||
|     if (diff[i].added && diff[i + 1] && diff[i + 1].removed) { | ||||
|       var swap = diff[i]; | ||||
|       diff[i] = diff[i + 1]; | ||||
|       diff[i + 1] = swap; | ||||
|         var element = inputs[inputs.current]; | ||||
|         var headerOffset = 80; | ||||
|         var elementPosition = element.getBoundingClientRect().top; | ||||
|         var offsetPosition = elementPosition - headerOffset + window.scrollY; | ||||
|  | ||||
|         window.scrollTo({ | ||||
|             top: offsetPosition, | ||||
|             behavior: "smooth", | ||||
|         }); | ||||
|  | ||||
|         inputs.current++; | ||||
|         if (inputs.current >= inputs.length) { | ||||
|             inputs.current = 0; | ||||
|         } | ||||
|     }); | ||||
|  | ||||
|     function changed() { | ||||
|         // https://github.com/kpdecker/jsdiff/issues/389 | ||||
|         // I would love to use `{ignoreWhitespace: true}` here but it breaks the formatting | ||||
|         options = { | ||||
|             ignoreWhitespace: document.getElementById("ignoreWhitespace").checked, | ||||
|         }; | ||||
|  | ||||
|         var diff = Diff[window.diffType](a.textContent, b.textContent, options); | ||||
|         var fragment = document.createDocumentFragment(); | ||||
|         for (var i = 0; i < diff.length; i++) { | ||||
|             if (diff[i].added && diff[i + 1] && diff[i + 1].removed) { | ||||
|                 var swap = diff[i]; | ||||
|                 diff[i] = diff[i + 1]; | ||||
|                 diff[i + 1] = swap; | ||||
|             } | ||||
|  | ||||
|             var node; | ||||
|             if (diff[i].removed) { | ||||
|                 node = document.createElement("del"); | ||||
|                 node.classList.add("change"); | ||||
|                 const wrapper = node.appendChild(document.createElement("span")); | ||||
|                 wrapper.appendChild(document.createTextNode(diff[i].value)); | ||||
|             } else if (diff[i].added) { | ||||
|                 node = document.createElement("ins"); | ||||
|                 node.classList.add("change"); | ||||
|                 const wrapper = node.appendChild(document.createElement("span")); | ||||
|                 wrapper.appendChild(document.createTextNode(diff[i].value)); | ||||
|             } else { | ||||
|                 node = document.createTextNode(diff[i].value); | ||||
|             } | ||||
|             fragment.appendChild(node); | ||||
|         } | ||||
|  | ||||
|         result.textContent = ""; | ||||
|         result.appendChild(fragment); | ||||
|  | ||||
|         // For nice mouse-over hover/title information | ||||
|         const removed_current_option = $('#diff-version option:selected') | ||||
|         if (removed_current_option) { | ||||
|             $('del').each(function () { | ||||
|                 $(this).prop('title', 'Removed '+removed_current_option[0].label); | ||||
|             }); | ||||
|         } | ||||
|         const inserted_current_option = $('#current-version option:selected') | ||||
|         if (removed_current_option) { | ||||
|             $('ins').each(function () { | ||||
|                 $(this).prop('title', 'Inserted '+inserted_current_option[0].label); | ||||
|             }); | ||||
|         } | ||||
|         // Set the list of possible differences to jump to | ||||
|         inputs = document.querySelectorAll('#diff-ui .change') | ||||
|         // Set the "current" diff pointer | ||||
|         inputs.current = 0; | ||||
|         // Goto diff | ||||
|         $('#jump-next-diff').click(); | ||||
|     } | ||||
|  | ||||
|     var node; | ||||
|     if (diff[i].removed) { | ||||
|       node = document.createElement("del"); | ||||
|       node.classList.add("change"); | ||||
|       const wrapper = node.appendChild(document.createElement("span")); | ||||
|       wrapper.appendChild(document.createTextNode(diff[i].value)); | ||||
|     } else if (diff[i].added) { | ||||
|       node = document.createElement("ins"); | ||||
|       node.classList.add("change"); | ||||
|       const wrapper = node.appendChild(document.createElement("span")); | ||||
|       wrapper.appendChild(document.createTextNode(diff[i].value)); | ||||
|     } else { | ||||
|       node = document.createTextNode(diff[i].value); | ||||
|     } | ||||
|     fragment.appendChild(node); | ||||
|   } | ||||
|  | ||||
|   result.textContent = ""; | ||||
|   result.appendChild(fragment); | ||||
|  | ||||
|   // Jump at start | ||||
|   inputs.current = 0; | ||||
|   next_diff(); | ||||
| } | ||||
|  | ||||
| window.onload = function () { | ||||
|   /* Convert what is options from UTC time.time() to local browser time */ | ||||
|   var diffList = document.getElementById("diff-version"); | ||||
|   if (typeof diffList != "undefined" && diffList != null) { | ||||
|     for (var option of diffList.options) { | ||||
|       var dateObject = new Date(option.value * 1000); | ||||
|       option.label = dateObject.toLocaleString(); | ||||
|     } | ||||
|   } | ||||
|  | ||||
|   /* Set current version date as local time in the browser also */ | ||||
|   var current_v = document.getElementById("current-v-date"); | ||||
|   var dateObject = new Date(newest_version_timestamp * 1000); | ||||
|   current_v.innerHTML = dateObject.toLocaleString(); | ||||
|   onDiffTypeChange( | ||||
|     document.querySelector('#settings [name="diff_type"]:checked'), | ||||
|   ); | ||||
|   changed(); | ||||
| }; | ||||
|  | ||||
| a.onpaste = a.onchange = b.onpaste = b.onchange = changed; | ||||
|  | ||||
| if ("oninput" in a) { | ||||
|   a.oninput = b.oninput = changed; | ||||
| } else { | ||||
|   a.onkeyup = b.onkeyup = changed; | ||||
| } | ||||
|  | ||||
| function onDiffTypeChange(radio) { | ||||
|   window.diffType = radio.value; | ||||
|   // Not necessary | ||||
|   //	document.title = "Diff " + radio.value.slice(4); | ||||
| } | ||||
|  | ||||
| var radio = document.getElementsByName("diff_type"); | ||||
| for (var i = 0; i < radio.length; i++) { | ||||
|   radio[i].onchange = function (e) { | ||||
|     onDiffTypeChange(e.target); | ||||
|     $('.needs-localtime').each(function () { | ||||
|         for (var option of this.options) { | ||||
|             var dateObject = new Date(option.value * 1000); | ||||
|             option.label = dateObject.toLocaleString(undefined, {dateStyle: "full", timeStyle: "medium"}); | ||||
|         } | ||||
|     }) | ||||
|     onDiffTypeChange( | ||||
|         document.querySelector('#settings [name="diff_type"]:checked'), | ||||
|     ); | ||||
|     changed(); | ||||
|   }; | ||||
| } | ||||
|  | ||||
| document.getElementById("ignoreWhitespace").onchange = function (e) { | ||||
|   changed(); | ||||
| }; | ||||
|     a.onpaste = a.onchange = b.onpaste = b.onchange = changed; | ||||
|  | ||||
| var inputs = document.getElementsByClassName("change"); | ||||
| inputs.current = 0; | ||||
|     if ("oninput" in a) { | ||||
|         a.oninput = b.oninput = changed; | ||||
|     } else { | ||||
|         a.onkeyup = b.onkeyup = changed; | ||||
|     } | ||||
|  | ||||
| function next_diff() { | ||||
|   var element = inputs[inputs.current]; | ||||
|   var headerOffset = 80; | ||||
|   var elementPosition = element.getBoundingClientRect().top; | ||||
|   var offsetPosition = elementPosition - headerOffset + window.scrollY; | ||||
|     function onDiffTypeChange(radio) { | ||||
|         window.diffType = radio.value; | ||||
|         // Not necessary | ||||
|         //	document.title = "Diff " + radio.value.slice(4); | ||||
|     } | ||||
|  | ||||
|   window.scrollTo({ | ||||
|     top: offsetPosition, | ||||
|     behavior: "smooth", | ||||
|   }); | ||||
|     var radio = document.getElementsByName("diff_type"); | ||||
|     for (var i = 0; i < radio.length; i++) { | ||||
|         radio[i].onchange = function (e) { | ||||
|             onDiffTypeChange(e.target); | ||||
|             changed(); | ||||
|         }; | ||||
|     } | ||||
|  | ||||
|     document.getElementById("ignoreWhitespace").onchange = function (e) { | ||||
|         changed(); | ||||
|     }; | ||||
|  | ||||
| }); | ||||
|  | ||||
|   inputs.current++; | ||||
|   if (inputs.current >= inputs.length) { | ||||
|     inputs.current = 0; | ||||
|   } | ||||
| } | ||||
|   | ||||
| @@ -3,45 +3,50 @@ | ||||
|  * Toggles theme between light and dark mode. | ||||
|  */ | ||||
| $(document).ready(function () { | ||||
|   const button = document.getElementById("toggle-light-mode"); | ||||
|     const button = document.getElementById("toggle-light-mode"); | ||||
|  | ||||
|   button.onclick = () => { | ||||
|     const htmlElement = document.getElementsByTagName("html"); | ||||
|     const isDarkMode = htmlElement[0].dataset.darkmode === "true"; | ||||
|     htmlElement[0].dataset.darkmode = !isDarkMode; | ||||
|     setCookieValue(!isDarkMode); | ||||
|   }; | ||||
|     button.onclick = () => { | ||||
|         const htmlElement = document.getElementsByTagName("html"); | ||||
|         const isDarkMode = htmlElement[0].dataset.darkmode === "true"; | ||||
|         htmlElement[0].dataset.darkmode = !isDarkMode; | ||||
|         setCookieValue(!isDarkMode); | ||||
|     }; | ||||
|  | ||||
|   const setCookieValue = (value) => { | ||||
|     document.cookie = `css_dark_mode=${value};max-age=31536000;path=/` | ||||
|   } | ||||
|     const setCookieValue = (value) => { | ||||
|         document.cookie = `css_dark_mode=${value};max-age=31536000;path=/` | ||||
|     } | ||||
|  | ||||
|   // Search input box behaviour | ||||
|     // Search input box behaviour | ||||
|     const toggle_search = document.getElementById("toggle-search"); | ||||
|   const search_q = document.getElementById("search-q"); | ||||
|   window.addEventListener('keydown', function (e) { | ||||
|     const search_q = document.getElementById("search-q"); | ||||
|     if(search_q) { | ||||
|       window.addEventListener('keydown', function (e) { | ||||
|         if (e.altKey == true && e.keyCode == 83) { | ||||
|           search_q.classList.toggle('expanded'); | ||||
|           search_q.focus(); | ||||
|         } | ||||
|       }); | ||||
|  | ||||
|     if (e.altKey == true && e.keyCode == 83) | ||||
|       search_q.classList.toggle('expanded'); | ||||
|       search_q.focus(); | ||||
|   }); | ||||
|  | ||||
|  | ||||
|   search_q.onkeydown = (e) => { | ||||
|     var key = e.keyCode || e.which; | ||||
|     if (key === 13) { | ||||
|       document.searchForm.submit(); | ||||
|       search_q.onkeydown = (e) => { | ||||
|         var key = e.keyCode || e.which; | ||||
|         if (key === 13) { | ||||
|           document.searchForm.submit(); | ||||
|         } | ||||
|       }; | ||||
|       toggle_search.onclick = () => { | ||||
|         // Could be that they want to search something once text is in there | ||||
|         if (search_q.value.length) { | ||||
|           document.searchForm.submit(); | ||||
|         } else { | ||||
|           // If not.. | ||||
|           search_q.classList.toggle('expanded'); | ||||
|           search_q.focus(); | ||||
|         } | ||||
|       }; | ||||
|     } | ||||
|   }; | ||||
|   toggle_search.onclick = () => { | ||||
|     // Could be that they want to search something once text is in there | ||||
|     if (search_q.value.length) { | ||||
|       document.searchForm.submit(); | ||||
|     } else { | ||||
|       // If not.. | ||||
|       search_q.classList.toggle('expanded'); | ||||
|       search_q.focus(); | ||||
|     } | ||||
|   }; | ||||
|  | ||||
|     $('#heart-us').click(function () { | ||||
|         $("#overlay").toggleClass('visible'); | ||||
|         heartpath.style.fill = document.getElementById("overlay").classList.contains("visible") ? '#ff0000' : 'var(--color-background)'; | ||||
|     }); | ||||
| }); | ||||
|   | ||||
| @@ -149,7 +149,7 @@ $(document).ready(function () { | ||||
|             // @todo In the future paint all that match | ||||
|             for (const c of current_default_xpath) { | ||||
|                 for (var i = selector_data['size_pos'].length; i !== 0; i--) { | ||||
|                     if (selector_data['size_pos'][i - 1].xpath === c) { | ||||
|                     if (selector_data['size_pos'][i - 1].xpath.trim() === c.trim()) { | ||||
|                         console.log("highlighting " + c); | ||||
|                         current_selected_i = i - 1; | ||||
|                         highlight_current_selected_i(); | ||||
|   | ||||
| @@ -4,6 +4,14 @@ $(function () { | ||||
|         $(this).closest('.unviewed').removeClass('unviewed'); | ||||
|     }); | ||||
|  | ||||
|     $('td[data-timestamp]').each(function () { | ||||
|         $(this).prop('title', new Intl.DateTimeFormat(undefined, | ||||
|             { | ||||
|                 dateStyle: 'full', | ||||
|                 timeStyle: 'long' | ||||
|             }).format($(this).data('timestamp') * 1000)); | ||||
|     }) | ||||
|  | ||||
|     $("#checkbox-assign-tag").click(function (e) { | ||||
|         $('#op_extradata').val(prompt("Enter a tag name")); | ||||
|     }); | ||||
|   | ||||
| @@ -187,6 +187,10 @@ ins { | ||||
|     padding: 0.5em; } | ||||
|   #settings ins { | ||||
|     padding: 0.5em; } | ||||
|   #settings option:checked { | ||||
|     font-weight: bold; } | ||||
|   #settings [type=radio], #settings [type=checkbox] { | ||||
|     vertical-align: middle; } | ||||
|  | ||||
| .source { | ||||
|   position: absolute; | ||||
|   | ||||
| @@ -77,6 +77,13 @@ ins { | ||||
|   ins { | ||||
|     padding: 0.5em; | ||||
|   } | ||||
|  | ||||
|   option:checked { | ||||
|     font-weight: bold; | ||||
|   } | ||||
|   [type=radio],[type=checkbox] { | ||||
|     vertical-align: middle; | ||||
|   } | ||||
| } | ||||
|  | ||||
| .source { | ||||
|   | ||||
| @@ -6,6 +6,10 @@ | ||||
|   } | ||||
|  | ||||
|   li { | ||||
|     &.browser-step-with-error { | ||||
|       background-color: #ffd6d6; | ||||
|       border-radius: 4px; | ||||
|     } | ||||
|     &:not(:first-child) { | ||||
|       &:hover { | ||||
|         opacity: 1.0; | ||||
| @@ -44,7 +48,7 @@ | ||||
| #browser-steps .flex-wrapper { | ||||
|   display: flex; | ||||
|   flex-flow: row; | ||||
|   height: 600px; /*@todo make this dynamic */ | ||||
|   height: 70vh; | ||||
| } | ||||
|  | ||||
| /*  this is duplicate :( */ | ||||
|   | ||||
| @@ -1,6 +1,6 @@ | ||||
|  | ||||
| #toggle-light-mode { | ||||
|   width: 3rem; | ||||
| /*  width: 3rem;*/ | ||||
|   /* default */ | ||||
|   .icon-dark { | ||||
|     display: none; | ||||
|   | ||||
| @@ -0,0 +1,24 @@ | ||||
| ul#requests-extra_browsers { | ||||
|   list-style: none; | ||||
|   /* tidy up the table to look more "inline" */ | ||||
|   li { | ||||
|     > label { | ||||
|       display: none; | ||||
|     } | ||||
|  | ||||
|   } | ||||
|  | ||||
|   /* each proxy entry is a `table` */ | ||||
|   table { | ||||
|     tr { | ||||
|       display: inline; | ||||
|     } | ||||
|   } | ||||
| } | ||||
|  | ||||
| #extra-browsers-setting { | ||||
|   border: 1px solid var(--color-grey-800); | ||||
|   border-radius: 4px; | ||||
|   margin: 1em; | ||||
|    padding: 1em; | ||||
| } | ||||
| @@ -60,3 +60,10 @@ body.proxy-check-active { | ||||
|  | ||||
|   padding-bottom: 1em; | ||||
| } | ||||
|  | ||||
| #extra-proxies-setting { | ||||
|   border: 1px solid var(--color-grey-800); | ||||
|   border-radius: 4px; | ||||
|     margin: 1em; | ||||
|    padding: 1em; | ||||
| } | ||||
|   | ||||
							
								
								
									
										38
									
								
								changedetectionio/static/styles/scss/parts/_love.scss
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										38
									
								
								changedetectionio/static/styles/scss/parts/_love.scss
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,38 @@ | ||||
| #overlay { | ||||
|  | ||||
|   opacity: 0.95; | ||||
|   position: fixed; | ||||
|  | ||||
|   width: 350px; | ||||
|   max-width: 100%; | ||||
|   height: 100%; | ||||
|   top: 0; | ||||
|   right: -350px; | ||||
|   background-color: var(--color-table-stripe); | ||||
|   z-index: 2; | ||||
|  | ||||
|   transform: translateX(0); | ||||
|   transition: transform .5s ease; | ||||
|  | ||||
|  | ||||
|   &.visible { | ||||
|     transform: translateX(-100%); | ||||
|  | ||||
|   } | ||||
|  | ||||
|   .content { | ||||
|     font-size: 0.875rem; | ||||
|     padding: 1rem; | ||||
|     margin-top: 5rem; | ||||
|     max-width: 400px; | ||||
|     color: var(--color-watch-table-row-text); | ||||
|   } | ||||
| } | ||||
|  | ||||
| #heartpath { | ||||
|   &:hover { | ||||
|     fill: #ff0000 !important; | ||||
|     transition: all ease 0.3s !important; | ||||
|   } | ||||
|   transition: all ease 0.3s !important; | ||||
| } | ||||
							
								
								
									
										25
									
								
								changedetectionio/static/styles/scss/parts/_menu.scss
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										25
									
								
								changedetectionio/static/styles/scss/parts/_menu.scss
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,25 @@ | ||||
| .pure-menu-link { | ||||
|   padding: 0.5rem 1em; | ||||
|   line-height: 1.2rem; | ||||
| } | ||||
|  | ||||
| .pure-menu-item { | ||||
|   svg { | ||||
|     height: 1.2rem; | ||||
|   } | ||||
|   * { | ||||
|     vertical-align: middle; | ||||
|   } | ||||
|   .github-link { | ||||
|     height: 1.8rem; | ||||
|     display: block; | ||||
|     svg { | ||||
|       height: 100%; | ||||
|     } | ||||
|   } | ||||
|   .bi-heart { | ||||
|     &:hover { | ||||
|       cursor: pointer; | ||||
|     } | ||||
|   } | ||||
| } | ||||
| @@ -0,0 +1,28 @@ | ||||
|  | ||||
| #selector-wrapper { | ||||
|   height: 100%; | ||||
|   max-height: 70vh; | ||||
|   overflow-y: scroll; | ||||
|   position: relative; | ||||
|  | ||||
|   //width: 100%; | ||||
|   >img { | ||||
|     position: absolute; | ||||
|     z-index: 4; | ||||
|     max-width: 100%; | ||||
|   } | ||||
|  | ||||
|   >canvas { | ||||
|     position: relative; | ||||
|     z-index: 5; | ||||
|     max-width: 100%; | ||||
|  | ||||
|     &:hover { | ||||
|       cursor: pointer; | ||||
|     } | ||||
|   } | ||||
| } | ||||
|  | ||||
| #selector-current-xpath { | ||||
|   font-size: 80%; | ||||
| } | ||||
| @@ -5,14 +5,18 @@ | ||||
| @import "parts/_arrows"; | ||||
| @import "parts/_browser-steps"; | ||||
| @import "parts/_extra_proxies"; | ||||
| @import "parts/_extra_browsers"; | ||||
| @import "parts/_pagination"; | ||||
| @import "parts/_spinners"; | ||||
| @import "parts/_variables"; | ||||
| @import "parts/_darkmode"; | ||||
| @import "parts/_menu"; | ||||
| @import "parts/_love"; | ||||
|  | ||||
| body { | ||||
|   color: var(--color-text); | ||||
|   background: var(--color-background-page); | ||||
|   font-family: Helvetica Neue, Helvetica, Lucida Grande, Arial, Ubuntu, Cantarell, Fira Sans, sans-serif; | ||||
| } | ||||
|  | ||||
| .visually-hidden { | ||||
| @@ -55,11 +59,6 @@ a.github-link { | ||||
|   } | ||||
| } | ||||
|  | ||||
|  | ||||
| #toggle-search { | ||||
|   width: 2rem; | ||||
| } | ||||
|  | ||||
| #search-q { | ||||
|   opacity: 0; | ||||
|   -webkit-transition: all .9s ease; | ||||
| @@ -471,7 +470,11 @@ footer { | ||||
|   padding: 10px; | ||||
|  | ||||
|   &#left-sticky { | ||||
|     left: 0px; | ||||
|     left: 0; | ||||
|     position: fixed; | ||||
|     border-top-right-radius: 5px; | ||||
|     border-bottom-right-radius: 5px; | ||||
|     box-shadow: 1px 1px 4px var(--color-shadow-jump); | ||||
|   } | ||||
|  | ||||
|   &#right-sticky { | ||||
| @@ -939,32 +942,7 @@ ul { | ||||
|   } | ||||
| } | ||||
|  | ||||
| #selector-wrapper { | ||||
|   height: 100%; | ||||
|   overflow-y: scroll; | ||||
|   position: relative; | ||||
|  | ||||
|   //width: 100%; | ||||
|   >img { | ||||
|     position: absolute; | ||||
|     z-index: 4; | ||||
|     max-width: 100%; | ||||
|   } | ||||
|  | ||||
|   >canvas { | ||||
|     position: relative; | ||||
|     z-index: 5; | ||||
|     max-width: 100%; | ||||
|  | ||||
|     &:hover { | ||||
|       cursor: pointer; | ||||
|     } | ||||
|   } | ||||
| } | ||||
|  | ||||
| #selector-current-xpath { | ||||
|   font-size: 80%; | ||||
| } | ||||
| @import "parts/_visualselector"; | ||||
|  | ||||
| #webdriver-override-options { | ||||
|   input[type="number"] { | ||||
| @@ -1103,3 +1081,4 @@ ul { | ||||
|   border-radius: 3px; | ||||
|   white-space: nowrap; | ||||
| } | ||||
|  | ||||
|   | ||||
| @@ -26,6 +26,9 @@ | ||||
|   #browser_steps li { | ||||
|     list-style: decimal; | ||||
|     padding: 5px; } | ||||
|     #browser_steps li.browser-step-with-error { | ||||
|       background-color: #ffd6d6; | ||||
|       border-radius: 4px; } | ||||
|     #browser_steps li:not(:first-child):hover { | ||||
|       opacity: 1.0; } | ||||
|     #browser_steps li .control { | ||||
| @@ -50,8 +53,7 @@ | ||||
| #browser-steps .flex-wrapper { | ||||
|   display: flex; | ||||
|   flex-flow: row; | ||||
|   height: 600px; | ||||
|   /*@todo make this dynamic */ } | ||||
|   height: 70vh; } | ||||
|  | ||||
| /*  this is duplicate :( */ | ||||
| #browsersteps-selector-wrapper { | ||||
| @@ -126,6 +128,27 @@ body.proxy-check-active #request .proxy-timing { | ||||
|     border-radius: 4px; | ||||
|     padding: 1em; } | ||||
|  | ||||
| #extra-proxies-setting { | ||||
|   border: 1px solid var(--color-grey-800); | ||||
|   border-radius: 4px; | ||||
|   margin: 1em; | ||||
|   padding: 1em; } | ||||
|  | ||||
| ul#requests-extra_browsers { | ||||
|   list-style: none; | ||||
|   /* tidy up the table to look more "inline" */ | ||||
|   /* each proxy entry is a `table` */ } | ||||
|   ul#requests-extra_browsers li > label { | ||||
|     display: none; } | ||||
|   ul#requests-extra_browsers table tr { | ||||
|     display: inline; } | ||||
|  | ||||
| #extra-browsers-setting { | ||||
|   border: 1px solid var(--color-grey-800); | ||||
|   border-radius: 4px; | ||||
|   margin: 1em; | ||||
|   padding: 1em; } | ||||
|  | ||||
| .pagination-page-info { | ||||
|   color: #fff; | ||||
|   font-size: 0.85rem; | ||||
| @@ -329,7 +352,7 @@ html[data-darkmode="true"] { | ||||
|       color: var(--color-watch-table-error); } | ||||
|  | ||||
| #toggle-light-mode { | ||||
|   width: 3rem; | ||||
|   /*  width: 3rem;*/ | ||||
|   /* default */ } | ||||
|   #toggle-light-mode .icon-dark { | ||||
|     display: none; } | ||||
| @@ -340,9 +363,56 @@ html[data-darkmode="true"] #toggle-light-mode .icon-light { | ||||
| html[data-darkmode="true"] #toggle-light-mode .icon-dark { | ||||
|   display: block; } | ||||
|  | ||||
| .pure-menu-link { | ||||
|   padding: 0.5rem 1em; | ||||
|   line-height: 1.2rem; } | ||||
|  | ||||
| .pure-menu-item svg { | ||||
|   height: 1.2rem; } | ||||
|  | ||||
| .pure-menu-item * { | ||||
|   vertical-align: middle; } | ||||
|  | ||||
| .pure-menu-item .github-link { | ||||
|   height: 1.8rem; | ||||
|   display: block; } | ||||
|   .pure-menu-item .github-link svg { | ||||
|     height: 100%; } | ||||
|  | ||||
| .pure-menu-item .bi-heart:hover { | ||||
|   cursor: pointer; } | ||||
|  | ||||
| #overlay { | ||||
|   opacity: 0.95; | ||||
|   position: fixed; | ||||
|   width: 350px; | ||||
|   max-width: 100%; | ||||
|   height: 100%; | ||||
|   top: 0; | ||||
|   right: -350px; | ||||
|   background-color: var(--color-table-stripe); | ||||
|   z-index: 2; | ||||
|   transform: translateX(0); | ||||
|   transition: transform .5s ease; } | ||||
|   #overlay.visible { | ||||
|     transform: translateX(-100%); } | ||||
|   #overlay .content { | ||||
|     font-size: 0.875rem; | ||||
|     padding: 1rem; | ||||
|     margin-top: 5rem; | ||||
|     max-width: 400px; | ||||
|     color: var(--color-watch-table-row-text); } | ||||
|  | ||||
| #heartpath { | ||||
|   transition: all ease 0.3s !important; } | ||||
|   #heartpath:hover { | ||||
|     fill: #ff0000 !important; | ||||
|     transition: all ease 0.3s !important; } | ||||
|  | ||||
| body { | ||||
|   color: var(--color-text); | ||||
|   background: var(--color-background-page); } | ||||
|   background: var(--color-background-page); | ||||
|   font-family: Helvetica Neue, Helvetica, Lucida Grande, Arial, Ubuntu, Cantarell, Fira Sans, sans-serif; } | ||||
|  | ||||
| .visually-hidden { | ||||
|   clip: rect(0 0 0 0); | ||||
| @@ -374,9 +444,6 @@ a.github-link { | ||||
|   a.github-link:hover { | ||||
|     color: var(--color-icon-github-hover); } | ||||
|  | ||||
| #toggle-search { | ||||
|   width: 2rem; } | ||||
|  | ||||
| #search-q { | ||||
|   opacity: 0; | ||||
|   -webkit-transition: all .9s ease; | ||||
| @@ -668,7 +735,11 @@ footer { | ||||
|   background: var(--color-background); | ||||
|   padding: 10px; } | ||||
|   .sticky-tab#left-sticky { | ||||
|     left: 0px; } | ||||
|     left: 0; | ||||
|     position: fixed; | ||||
|     border-top-right-radius: 5px; | ||||
|     border-bottom-right-radius: 5px; | ||||
|     box-shadow: 1px 1px 4px var(--color-shadow-jump); } | ||||
|   .sticky-tab#right-sticky { | ||||
|     right: 0px; } | ||||
|   .sticky-tab#hosted-sticky { | ||||
| @@ -977,6 +1048,7 @@ ul { | ||||
|  | ||||
| #selector-wrapper { | ||||
|   height: 100%; | ||||
|   max-height: 70vh; | ||||
|   overflow-y: scroll; | ||||
|   position: relative; } | ||||
|   #selector-wrapper > img { | ||||
|   | ||||
| @@ -42,6 +42,7 @@ class ChangeDetectionStore: | ||||
|         self.__data = App.model() | ||||
|         self.datastore_path = datastore_path | ||||
|         self.json_store_path = "{}/url-watches.json".format(self.datastore_path) | ||||
|         print(">>> Datastore path is ", self.json_store_path) | ||||
|         self.needs_write = False | ||||
|         self.start_time = time.time() | ||||
|         self.stop_thread = False | ||||
| @@ -95,6 +96,14 @@ class ChangeDetectionStore: | ||||
|                 self.add_watch(url='https://changedetection.io/CHANGELOG.txt', | ||||
|                                tag='changedetection.io', | ||||
|                                extras={'fetch_backend': 'html_requests'}) | ||||
|  | ||||
|             updates_available = self.get_updates_available() | ||||
|             self.__data['settings']['application']['schema_version'] = updates_available.pop() | ||||
|  | ||||
|         else: | ||||
|             # Bump the update version by running updates | ||||
|             self.run_updates() | ||||
|  | ||||
|         self.__data['version_tag'] = version_tag | ||||
|  | ||||
|         # Just to test that proxies.json if it exists, doesnt throw a parsing error on startup | ||||
| @@ -124,9 +133,6 @@ class ChangeDetectionStore: | ||||
|             secret = secrets.token_hex(16) | ||||
|             self.__data['settings']['application']['api_access_token'] = secret | ||||
|  | ||||
|         # Bump the update version by running updates | ||||
|         self.run_updates() | ||||
|  | ||||
|         self.needs_write = True | ||||
|  | ||||
|         # Finally start the thread that will manage periodic data saves to JSON | ||||
| @@ -238,12 +244,16 @@ class ChangeDetectionStore: | ||||
|         import pathlib | ||||
|  | ||||
|         self.__data['watching'][uuid].update({ | ||||
|                 'last_checked': 0, | ||||
|                 'browser_steps_last_error_step' : None, | ||||
|                 'check_count': 0, | ||||
|                 'fetch_time' : 0.0, | ||||
|                 'has_ldjson_price_data': None, | ||||
|                 'last_checked': 0, | ||||
|                 'last_error': False, | ||||
|                 'last_notification_error': False, | ||||
|                 'last_viewed': 0, | ||||
|                 'previous_md5': False, | ||||
|                 'previous_md5_before_filters': False, | ||||
|                 'track_ldjson_price_data': None, | ||||
|             }) | ||||
|  | ||||
| @@ -350,6 +360,8 @@ class ChangeDetectionStore: | ||||
|         if write_to_disk_now: | ||||
|             self.sync_to_json() | ||||
|  | ||||
|         print("added ", url) | ||||
|  | ||||
|         return new_uuid | ||||
|  | ||||
|     def visualselector_data_is_ready(self, watch_uuid): | ||||
| @@ -621,17 +633,23 @@ class ChangeDetectionStore: | ||||
|  | ||||
|         return {} | ||||
|  | ||||
|     @property | ||||
|     def extra_browsers(self): | ||||
|         res = [] | ||||
|         p = list(filter( | ||||
|             lambda s: (s.get('browser_name') and s.get('browser_connection_url')), | ||||
|             self.__data['settings']['requests'].get('extra_browsers', []))) | ||||
|         if p: | ||||
|             for i in p: | ||||
|                 res.append(("extra_browser_"+i['browser_name'], i['browser_name'])) | ||||
|  | ||||
|         return res | ||||
|  | ||||
|     def tag_exists_by_name(self, tag_name): | ||||
|         return any(v.get('title', '').lower() == tag_name.lower() for k, v in self.__data['settings']['application']['tags'].items()) | ||||
|  | ||||
|     # Run all updates | ||||
|     # IMPORTANT - Each update could be run even when they have a new install and the schema is correct | ||||
|     #             So therefor - each `update_n` should be very careful about checking if it needs to actually run | ||||
|     #             Probably we should bump the current update schema version with each tag release version? | ||||
|     def run_updates(self): | ||||
|     def get_updates_available(self): | ||||
|         import inspect | ||||
|         import shutil | ||||
|  | ||||
|         updates_available = [] | ||||
|         for i, o in inspect.getmembers(self, predicate=inspect.ismethod): | ||||
|             m = re.search(r'update_(\d+)$', i) | ||||
| @@ -639,6 +657,15 @@ class ChangeDetectionStore: | ||||
|                 updates_available.append(int(m.group(1))) | ||||
|         updates_available.sort() | ||||
|  | ||||
|         return updates_available | ||||
|  | ||||
|     # Run all updates | ||||
|     # IMPORTANT - Each update could be run even when they have a new install and the schema is correct | ||||
|     #             So therefor - each `update_n` should be very careful about checking if it needs to actually run | ||||
|     #             Probably we should bump the current update schema version with each tag release version? | ||||
|     def run_updates(self): | ||||
|         import shutil | ||||
|         updates_available = self.get_updates_available() | ||||
|         for update_n in updates_available: | ||||
|             if update_n > self.__data['settings']['application']['schema_version']: | ||||
|                 print ("Applying update_{}".format((update_n))) | ||||
| @@ -820,4 +847,14 @@ class ChangeDetectionStore: | ||||
|             if not watch.get('date_created'): | ||||
|                 self.data['watching'][uuid]['date_created'] = i | ||||
|             i+=1 | ||||
|         return | ||||
|         return | ||||
|  | ||||
|     # #1774 - protect xpath1 against migration | ||||
|     def update_14(self): | ||||
|         for awatch in self.__data["watching"]: | ||||
|             if self.__data["watching"][awatch]['include_filters']: | ||||
|                 for num, selector in enumerate(self.__data["watching"][awatch]['include_filters']): | ||||
|                     if selector.startswith('/'): | ||||
|                         self.__data["watching"][awatch]['include_filters'][num] = 'xpath1:' + selector | ||||
|                     if selector.startswith('xpath:'): | ||||
|                         self.__data["watching"][awatch]['include_filters'][num] = selector.replace('xpath:', 'xpath1:', 1) | ||||
|   | ||||
| @@ -8,10 +8,10 @@ | ||||
|     <title>Change Detection{{extra_title}}</title> | ||||
|     <link rel="alternate" type="application/rss+xml" title="Changedetection.io » Feed{% if active_tag %}- {{active_tag}}{% endif %}" href="{{ url_for('rss', tag=active_tag , token=app_rss_token)}}" > | ||||
|     <link rel="stylesheet" href="{{url_for('static_content', group='styles', filename='pure-min.css')}}" > | ||||
|     <link rel="stylesheet" href="{{url_for('static_content', group='styles', filename='styles.css')}}" > | ||||
|     <link rel="stylesheet" href="{{url_for('static_content', group='styles', filename='styles.css')}}?v={{ get_css_version() }}" > | ||||
|     {% if extra_stylesheets %} | ||||
|       {% for m in extra_stylesheets %} | ||||
|         <link rel="stylesheet" href="{{ m }}?ver=1000" > | ||||
|         <link rel="stylesheet" href="{{ m }}?ver={{ get_css_version() }}" > | ||||
|       {% endfor %} | ||||
|     {% endif %} | ||||
|  | ||||
| @@ -85,6 +85,7 @@ | ||||
|               <a href="{{url_for('logout')}}" class="pure-menu-link">LOG OUT</a> | ||||
|             </li> | ||||
|           {% endif %} | ||||
|           {% if current_user.is_authenticated or not has_password %} | ||||
|           <li class="pure-menu-item pure-form" id="search-menu-item"> | ||||
|             <!-- We use GET here so it offers people a chance to set bookmarks etc --> | ||||
|             <form name="searchForm" action="" method="GET"> | ||||
| @@ -95,6 +96,7 @@ | ||||
|               </button> | ||||
|             </form> | ||||
|           </li> | ||||
|           {% endif %} | ||||
|           <li class="pure-menu-item"> | ||||
|             <button class="toggle-button" id ="toggle-light-mode" type="button" title="Toggle Light/Dark Mode"> | ||||
|               <span class="visually-hidden">Toggle light/dark mode</span> | ||||
| @@ -106,6 +108,20 @@ | ||||
|               </span> | ||||
|             </button> | ||||
|           </li> | ||||
|           <li class="pure-menu-item" id="heart-us"> | ||||
|                 <svg | ||||
|                    fill="#ff0000" | ||||
|                    class="bi bi-heart" | ||||
|                    preserveAspectRatio="xMidYMid meet" | ||||
|                    viewBox="0 0 16.9 16.1" | ||||
|                    id="svg-heart" | ||||
|                    xmlns="http://www.w3.org/2000/svg" | ||||
|                    xmlns:svg="http://www.w3.org/2000/svg"> | ||||
|                   <path id="heartpath" d="M 5.338316,0.50302766 C 0.71136983,0.50647126 -3.9576371,7.2707777 8.5004254,15.503028 23.833425,5.3700277 13.220206,-2.5384409 8.6762066,1.6475589 c -0.060791,0.054322 -0.11943,0.1110064 -0.1757812,0.1699219 -0.057,-0.059 -0.1157813,-0.116875 -0.1757812,-0.171875 C 7.4724566,0.86129334 6.4060729,0.50223298 5.338316,0.50302766 Z" | ||||
|                      style="fill:var(--color-background);fill-opacity:1;stroke:#ff0000;stroke-opacity:1" /> | ||||
|                 </svg> | ||||
|  | ||||
|           </li> | ||||
|           <li class="pure-menu-item"> | ||||
|             <a class="github-link" href="https://github.com/dgtlmoon/changedetection.io"> | ||||
|               {% include "svgs/github.svg" %} | ||||
| @@ -121,14 +137,52 @@ | ||||
|     {% endif %} | ||||
|     {% if left_sticky %} | ||||
|       <div class="sticky-tab" id="left-sticky"> | ||||
|         <a href="{{url_for('preview_page', uuid=uuid)}}">Show current snapshot</a> | ||||
|         <a href="{{url_for('preview_page', uuid=uuid)}}">Show current snapshot</a><br> | ||||
|           Visualise <strong>triggers</strong> and <strong>ignored text</strong> | ||||
|       </div> | ||||
|     {% endif %} | ||||
|     {% if right_sticky %} | ||||
|       <div class="sticky-tab" id="right-sticky">{{ right_sticky }}</div> | ||||
|     {% endif %} | ||||
|     <section class="content"> | ||||
|       <header> | ||||
|         <div id="overlay"> | ||||
|             <div class="content"> | ||||
|                 <strong>changedetection.io needs your support!</strong><br> | ||||
|                 <p> | ||||
|                     You can help us by supporting changedetection.io on these platforms; | ||||
|                 </p> | ||||
|                 <p> | ||||
|                 <ul> | ||||
|                     <li> | ||||
|                         <a href="https://alternativeto.net/software/changedetection-io/about/">Rate us at | ||||
|                         AlternativeTo.net</a> | ||||
|                     </li> | ||||
|                 <li> | ||||
|                     <a href="https://github.com/dgtlmoon/changedetection.io">Star us on GitHub</a> | ||||
|                 </li> | ||||
|                 <li> | ||||
|                     <a href="https://twitter.com/change_det_io">Follow us at Twitter/X</a> | ||||
|                 </li> | ||||
|                 <li> | ||||
|                     <a href="https://www.linkedin.com/company/changedetection-io">Check us out on LinkedIn</a> | ||||
|                 </li> | ||||
|                 <li> | ||||
|                     And tell your friends and colleagues :) | ||||
|                 </li> | ||||
|                 </ul> | ||||
|                 </p> | ||||
|                 <p> | ||||
|                     The more popular changedetection.io is, the more time we can dedicate to adding amazing features! | ||||
|                 </p> | ||||
|                 <p> | ||||
|                     Many thanks :)<br> | ||||
|                 </p> | ||||
|                 <p> | ||||
|                     <i>changedetection.io team</i> | ||||
|                 </p> | ||||
|             </div> | ||||
|         </div> | ||||
|         <header> | ||||
|         {% block header %}{% endblock %} | ||||
|       </header> | ||||
|  | ||||
|   | ||||
| @@ -13,10 +13,31 @@ | ||||
| <script src="{{url_for('static_content', group='js', filename='diff-overview.js')}}" defer></script> | ||||
|  | ||||
| <div id="settings"> | ||||
|     <h1>Differences</h1> | ||||
|     <form class="pure-form " action="" method="GET"> | ||||
|         <fieldset> | ||||
|  | ||||
|             {% if versions|length >= 1 %} | ||||
|                 <strong>Compare</strong> | ||||
|                 <del class="change"><span>from</span></del> | ||||
|                 <select id="diff-version" name="from_version" class="needs-localtime"> | ||||
|                     {% for version in versions|reverse %} | ||||
|                         <option value="{{ version }}" {% if version== from_version %} selected="" {% endif %}> | ||||
|                             {{ version }} | ||||
|                         </option> | ||||
|                     {% endfor %} | ||||
|                 </select> | ||||
|                 <ins class="change"><span>to</span></ins> | ||||
|                 <select id="current-version" name="to_version" class="needs-localtime"> | ||||
|                     {% for version in versions|reverse %} | ||||
|                         <option value="{{ version }}" {% if version== to_version %} selected="" {% endif %}> | ||||
|                             {{ version }} | ||||
|                         </option> | ||||
|                     {% endfor %} | ||||
|                 </select> | ||||
|                 <button type="submit" class="pure-button pure-button-primary">Go</button> | ||||
|             {% endif %} | ||||
|         </fieldset> | ||||
|         <fieldset> | ||||
|             <strong>Style</strong> | ||||
|             <label for="diffWords" class="pure-checkbox"> | ||||
|                 <input type="radio" name="diff_type" id="diffWords" value="diffWords"> Words</label> | ||||
|             <label for="diffLines" class="pure-checkbox"> | ||||
| @@ -26,32 +47,20 @@ | ||||
|                 <input type="radio" name="diff_type" id="diffChars" value="diffChars"> Chars</label> | ||||
|             <!-- @todo - when mimetype is JSON, select this by default? --> | ||||
|             <label for="diffJson" class="pure-checkbox"> | ||||
|                 <input type="radio" name="diff_type" id="diffJson" value="diffJson" > JSON</label> | ||||
|                 <input type="radio" name="diff_type" id="diffJson" value="diffJson"> JSON</label> | ||||
|  | ||||
|             {% if versions|length >= 1 %} | ||||
|             <label for="diff-version">Compare newest (<span id="current-v-date"></span>) with</label> | ||||
|             <select id="diff-version" name="previous_version"> | ||||
|                 {% for version in versions|reverse %} | ||||
|                 <option value="{{version}}" {% if version== current_previous_version %} selected="" {% endif %}> | ||||
|                     {{version}} | ||||
|                 </option> | ||||
|                 {% endfor %} | ||||
|             </select> | ||||
|             <button type="submit" class="pure-button pure-button-primary">Go</button> | ||||
|             {% endif %} | ||||
|         </fieldset> | ||||
|     </form> | ||||
|     <del>Removed text</del> | ||||
|     <ins>Inserted Text</ins> | ||||
|     <span> | ||||
|             <span> | ||||
|         <!-- https://github.com/kpdecker/jsdiff/issues/389 ? --> | ||||
|         <label for="ignoreWhitespace" class="pure-checkbox" id="label-diff-ignorewhitespace"> | ||||
|             <input type="checkbox" id="ignoreWhitespace" name="ignoreWhitespace" > Ignore Whitespace</label> | ||||
|             <input type="checkbox" id="ignoreWhitespace" name="ignoreWhitespace"> Ignore Whitespace</label> | ||||
|     </span> | ||||
|         </fieldset> | ||||
|     </form> | ||||
|  | ||||
| </div> | ||||
|  | ||||
| <div id="diff-jump"> | ||||
|     <a onclick="next_diff();">Jump</a> | ||||
|     <a id="jump-next-diff" title="Jump to next difference">Jump</a> | ||||
| </div> | ||||
|  | ||||
| <script src="{{url_for('static_content', group='js', filename='tabs.js')}}" defer></script> | ||||
| @@ -79,8 +88,6 @@ | ||||
|     </div> | ||||
|  | ||||
|      <div class="tab-pane-inner" id="text"> | ||||
|          <div class="tip">Pro-tip: Use <strong>show current snapshot</strong> tab to visualise what will be ignored, highlight text to add to ignore filters</div> | ||||
|  | ||||
|          {% if password_enabled_and_share_is_off %} | ||||
|            <div class="tip">Pro-tip: You can enable <strong>"share access when password is enabled"</strong> from settings</div> | ||||
|          {% endif %} | ||||
| @@ -91,8 +98,8 @@ | ||||
|              <tbody> | ||||
|              <tr> | ||||
|                  <!-- just proof of concept copied straight from github.com/kpdecker/jsdiff --> | ||||
|                  <td id="a" style="display: none;">{{previous}}</td> | ||||
|                  <td id="b" style="display: none;">{{newest}}</td> | ||||
|                  <td id="a" style="display: none;">{{from_version_file_contents}}</td> | ||||
|                  <td id="b" style="display: none;">{{to_version_file_contents}}</td> | ||||
|                  <td id="diff-col"> | ||||
|                      <span id="result" class="highlightable-filter"></span> | ||||
|                  </td> | ||||
|   | ||||
| @@ -4,8 +4,10 @@ | ||||
| {% from '_common_fields.jinja' import render_common_settings_form %} | ||||
| <script src="{{url_for('static_content', group='js', filename='tabs.js')}}" defer></script> | ||||
| <script> | ||||
|  | ||||
|     const browser_steps_available_screenshots=JSON.parse('{{ watch.get_browsersteps_available_screenshots|tojson }}'); | ||||
|     const browser_steps_config=JSON.parse('{{ browser_steps_config|tojson }}'); | ||||
|     const browser_steps_fetch_screenshot_image_url="{{url_for('browser_steps.browser_steps_fetch_screenshot_image', uuid=uuid)}}"; | ||||
|     const browser_steps_last_error_step={{ watch.browser_steps_last_error_step|tojson }}; | ||||
|     const browser_steps_start_url="{{url_for('browser_steps.browsersteps_start_session', uuid=uuid)}}"; | ||||
|     const browser_steps_sync_url="{{url_for('browser_steps.browsersteps_ui_update', uuid=uuid)}}"; | ||||
| {% if emailprefix %} | ||||
| @@ -49,6 +51,7 @@ | ||||
|             <li class="tab"><a href="#restock">Restock Detection</a></li> | ||||
|             {% endif %} | ||||
|             <li class="tab"><a href="#notifications">Notifications</a></li> | ||||
|             <li class="tab"><a href="#stats">Stats</a></li> | ||||
|         </ul> | ||||
|     </div> | ||||
|  | ||||
| @@ -109,7 +112,7 @@ | ||||
|                         <span class="pure-form-message-inline"> | ||||
|                             <p>Use the <strong>Basic</strong> method (default) where your watched site doesn't need Javascript to render.</p> | ||||
|                             <p>The <strong>Chrome/Javascript</strong> method requires a network connection to a running WebDriver+Chrome server, set by the ENV var 'WEBDRIVER_URL'. </p> | ||||
|                             Tip: <a href="https://github.com/dgtlmoon/changedetection.io/wiki/Proxy-configuration#brightdata-proxy-support">Connect using BrightData Proxies, find out more here.</a> | ||||
|                             Tip: <a href="https://github.com/dgtlmoon/changedetection.io/wiki/Proxy-configuration#brightdata-proxy-support">Connect using Bright Data and Oxylabs Proxies, find out more here.</a> | ||||
|                         </span> | ||||
|                     </div> | ||||
|                 {% if form.proxy %} | ||||
| @@ -287,11 +290,12 @@ xpath://body/div/span[contains(@class, 'example-class')]", | ||||
|                                 {% endif %} | ||||
|                             </ul> | ||||
|                         </li> | ||||
|                         <li>XPath - Limit text to this XPath rule, simply start with a forward-slash, | ||||
|                         <li>XPath - Limit text to this XPath rule, simply start with a forward-slash. To specify XPath to be used explicitly or the XPath rule starts with an XPath function: Prefix with <code>xpath:</code> | ||||
|                             <ul> | ||||
|                                 <li>Example:  <code>//*[contains(@class, 'sametext')]</code> or <code>xpath://*[contains(@class, 'sametext')]</code>, <a | ||||
|                                 <li>Example:  <code>//*[contains(@class, 'sametext')]</code> or <code>xpath:count(//*[contains(@class, 'sametext')])</code>, <a | ||||
|                                 href="http://xpather.com/" target="new">test your XPath here</a></li> | ||||
|                                 <li>Example: Get all titles from an RSS feed <code>//title/text()</code></li> | ||||
|                                 <li>To use XPath1.0: Prefix with <code>xpath1:</code></li> | ||||
|                             </ul> | ||||
|                             </li> | ||||
|                     </ul> | ||||
| @@ -378,15 +382,16 @@ Unavailable") }} | ||||
|                         {{ render_field(form.extract_text, rows=5, placeholder="\d+ online") }} | ||||
|                         <span class="pure-form-message-inline"> | ||||
|                     <ul> | ||||
|                         <li>Extracts text in the final output (line by line) after other filters using regular expressions; | ||||
|                         <li>Extracts text in the final output (line by line) after other filters using regular expressions or string match; | ||||
|                             <ul> | ||||
|                                 <li>Regular expression ‐ example <code>/reports.+?2022/i</code></li> | ||||
|                                 <li>Don't forget to consider the white-space at the start of a line <code>/.+?reports.+?2022/i</code></li> | ||||
|                                 <li>Use <code>//(?aiLmsux))</code> type flags (more <a href="https://docs.python.org/3/library/re.html#index-15">information here</a>)<br></li> | ||||
|                                 <li>Keyword example ‐ example <code>Out of stock</code></li> | ||||
|                                 <li>Use groups to extract just that text ‐ example <code>/reports.+?(\d+)/i</code> returns a list of years only</li> | ||||
|                             </ul> | ||||
|                         </li> | ||||
|                         <li>One line per regular-expression/ string match</li> | ||||
|                         <li>One line per regular-expression/string match</li> | ||||
|                     </ul> | ||||
|                         </span> | ||||
|                     </div> | ||||
| @@ -440,7 +445,35 @@ Unavailable") }} | ||||
|                 </fieldset> | ||||
|             </div> | ||||
|             {% endif %} | ||||
|  | ||||
|             <div class="tab-pane-inner" id="stats"> | ||||
|                 <div class="pure-control-group"> | ||||
|                     <style> | ||||
|                     #stats-table tr > td:first-child { | ||||
|                         font-weight: bold; | ||||
|                     } | ||||
|                     </style> | ||||
|                     <table class="pure-table" id="stats-table"> | ||||
|                         <tbody> | ||||
|                         <tr> | ||||
|                             <td>Check count</td> | ||||
|                             <td>{{ "{:,}".format( watch.check_count) }}</td> | ||||
|                         </tr> | ||||
|                         <tr> | ||||
|                             <td>Consecutive filter failures</td> | ||||
|                             <td>{{ "{:,}".format( watch.consecutive_filter_failures) }}</td> | ||||
|                         </tr> | ||||
|                         <tr> | ||||
|                             <td>History length</td> | ||||
|                             <td>{{ "{:,}".format(watch.history|length) }}</td> | ||||
|                         </tr> | ||||
|                         <tr> | ||||
|                             <td>Last fetch time</td> | ||||
|                             <td>{{ watch.fetch_time }}s</td> | ||||
|                         </tr> | ||||
|                         </tbody> | ||||
|                     </table> | ||||
|                 </div> | ||||
|             </div> | ||||
|             <div id="actions"> | ||||
|                 <div class="pure-control-group"> | ||||
|                     {{ render_button(form.save_button) }} | ||||
|   | ||||
| @@ -8,11 +8,12 @@ | ||||
|         <ul> | ||||
|             <li class="tab" id=""><a href="#url-list">URL List</a></li> | ||||
|             <li class="tab"><a href="#distill-io">Distill.io</a></li> | ||||
|             <li class="tab"><a href="#xlsx">.XLSX & Wachete</a></li> | ||||
|         </ul> | ||||
|     </div> | ||||
|  | ||||
|     <div class="box-wrap inner"> | ||||
|         <form class="pure-form pure-form-aligned" action="{{url_for('import_page')}}" method="POST"> | ||||
|         <form class="pure-form" action="{{url_for('import_page')}}" method="POST" enctype="multipart/form-data"> | ||||
|             <input type="hidden" name="csrf_token" value="{{ csrf_token() }}"> | ||||
|             <div class="tab-pane-inner" id="url-list"> | ||||
|                     <legend> | ||||
| @@ -79,6 +80,42 @@ | ||||
| " rows="25">{{ original_distill_json }}</textarea> | ||||
|  | ||||
|             </div> | ||||
|             <div class="tab-pane-inner" id="xlsx"> | ||||
|             <fieldset> | ||||
|                 <div class="pure-control-group"> | ||||
|                 {{ render_field(form.xlsx_file, class="processor") }} | ||||
|                 </div> | ||||
|                 <div class="pure-control-group"> | ||||
|                     {{ render_field(form.file_mapping, class="processor") }} | ||||
|                 </div> | ||||
|             </fieldset> | ||||
|                 <div class="pure-control-group"> | ||||
|                 <span class="pure-form-message-inline"> | ||||
|                     Table of custom column and data types mapping for the <strong>Custom mapping</strong> File mapping type. | ||||
|                 </span> | ||||
|                     <table style="border: 1px solid #aaa; padding: 0.5rem; border-radius: 4px;"> | ||||
|                         <tr> | ||||
|                             <td><strong>Column #</strong></td> | ||||
|                             {% for n in range(4) %} | ||||
|                                 <td><input type="number" name="custom_xlsx[col_{{n}}]" style="width: 4rem;" min="1"></td> | ||||
|                             {%  endfor %} | ||||
|                         </tr> | ||||
|                         <tr> | ||||
|                             <td><strong>Type</strong></td> | ||||
|                             {% for n in range(4) %} | ||||
|                                 <td><select name="custom_xlsx[col_type_{{n}}]"> | ||||
|                                     <option value="" style="color: #aaa"> -- none --</option> | ||||
|                                     <option value="url">URL</option> | ||||
|                                     <option value="title">Title</option> | ||||
|                                     <option value="include_filter">CSS/xPath filter</option> | ||||
|                                     <option value="tag">Group / Tag name(s)</option> | ||||
|                                     <option value="interval_minutes">Recheck time (minutes)</option> | ||||
|                                 </select></td> | ||||
|                             {%  endfor %} | ||||
|                         </tr> | ||||
|                     </table> | ||||
|                 </div> | ||||
|             </div> | ||||
|             <button type="submit" class="pure-button pure-input-1-2 pure-button-primary">Import</button> | ||||
|         </form> | ||||
|  | ||||
|   | ||||
| @@ -109,7 +109,7 @@ | ||||
|                         <p>The <strong>Chrome/Javascript</strong> method requires a network connection to a running WebDriver+Chrome server, set by the ENV var 'WEBDRIVER_URL'. </p> | ||||
|                     </span> | ||||
|                     <br> | ||||
|                     Tip: <a href="https://github.com/dgtlmoon/changedetection.io/wiki/Proxy-configuration#brightdata-proxy-support">Connect using BrightData Proxies, find out more here.</a> | ||||
|                     Tip: <a href="https://github.com/dgtlmoon/changedetection.io/wiki/Proxy-configuration#brightdata-proxy-support">Connect using Bright Data and Oxylabs Proxies, find out more here.</a> | ||||
|                 </div> | ||||
|                 <fieldset class="pure-group" id="webdriver-override-options"> | ||||
|                     <div class="pure-form-message-inline"> | ||||
| @@ -178,6 +178,9 @@ nav | ||||
|                         <span style="display:none;" id="api-key-copy" >copy</span> | ||||
|                     </div> | ||||
|                 </div> | ||||
|                 <div class="pure-control-group"> | ||||
|                     <a href="{{url_for('settings_reset_api_key')}}" class="pure-button button-small button-cancel">Regenerate API key</a> | ||||
|                 </div> | ||||
|             </div> | ||||
|             <div class="tab-pane-inner" id="proxies"> | ||||
|                 <div id="recommended-proxy"> | ||||
| @@ -227,9 +230,14 @@ nav | ||||
|                 </p> | ||||
|                <p><strong>Tip</strong>: "Residential" and "Mobile" proxy type can be more successfull than "Data Center" for blocked websites. | ||||
|  | ||||
|                 <div class="pure-control-group"> | ||||
|                 <div class="pure-control-group" id="extra-proxies-setting"> | ||||
|                 {{ render_field(form.requests.form.extra_proxies) }} | ||||
|                 <span class="pure-form-message-inline">"Name" will be used for selecting the proxy in the Watch Edit settings</span> | ||||
|                 <span class="pure-form-message-inline">"Name" will be used for selecting the proxy in the Watch Edit settings</span><br> | ||||
|                 <span class="pure-form-message-inline">SOCKS5 proxies with authentication are only supported with 'plain requests' fetcher, for other fetchers you should whitelist the IP access instead</span> | ||||
|                 </div> | ||||
|                 <div class="pure-control-group" id="extra-browsers-setting"> | ||||
|                     <span class="pure-form-message-inline"><i>Extra Browsers</i> allow changedetection.io to communicate with a different web-browser.</span><br> | ||||
|                   {{ render_field(form.requests.form.extra_browsers) }} | ||||
|                 </div> | ||||
|             </div> | ||||
|             <div id="actions"> | ||||
|   | ||||
| @@ -1,3 +1,6 @@ | ||||
| <svg class="octicon octicon-mark-github v-align-middle" height="32" viewbox="0 0 16 16" version="1.1" width="32" aria-hidden="true"> | ||||
|   <path fill-rule="evenodd" d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0016 8c0-4.42-3.58-8-8-8z"></path> | ||||
| <svg class="octicon octicon-mark-github v-align-middle"  viewbox="0 0 16 16" version="1.1" aria-hidden="true"> | ||||
|     <path | ||||
|      fill-rule="evenodd" | ||||
|      d="M 8,0 C 3.58,0 0,3.58 0,8 c 0,3.54 2.29,6.53 5.47,7.59 0.4,0.07 0.55,-0.17 0.55,-0.38 0,-0.19 -0.01,-0.82 -0.01,-1.49 C 4,14.09 3.48,13.23 3.32,12.78 3.23,12.55 2.84,11.84 2.5,11.65 2.22,11.5 1.82,11.13 2.49,11.12 3.12,11.11 3.57,11.7 3.72,11.94 4.44,13.15 5.59,12.81 6.05,12.6 6.12,12.08 6.33,11.73 6.56,11.53 4.78,11.33 2.92,10.64 2.92,7.58 2.92,6.71 3.23,5.99 3.74,5.43 3.66,5.23 3.38,4.41 3.82,3.31 c 0,0 0.67,-0.21 2.2,0.82 0.64,-0.18 1.32,-0.27 2,-0.27 0.68,0 1.36,0.09 2,0.27 1.53,-1.04 2.2,-0.82 2.2,-0.82 0.44,1.1 0.16,1.92 0.08,2.12 0.51,0.56 0.82,1.27 0.82,2.15 0,3.07 -1.87,3.75 -3.65,3.95 0.29,0.25 0.54,0.73 0.54,1.48 0,1.07 -0.01,1.93 -0.01,2.2 0,0.21 0.15,0.46 0.55,0.38 A 8.013,8.013 0 0 0 16,8 C 16,3.58 12.42,0 8,0 Z" | ||||
|      id="path2" /> | ||||
| </svg> | ||||
|   | ||||
| Before Width: | Height: | Size: 749 B After Width: | Height: | Size: 917 B | 
| @@ -82,12 +82,15 @@ | ||||
|             </tr> | ||||
|             {% endif %} | ||||
|             {% for watch in (watches|sort(attribute=sort_attribute, reverse=sort_order == 'asc'))|pagination_slice(skip=pagination.skip) %} | ||||
|  | ||||
|                 {% set is_unviewed =  watch.newest_history_key| int > watch.last_viewed and watch.history_n>=2 %} | ||||
|  | ||||
|             <tr id="{{ watch.uuid }}" | ||||
|                 class="{{ loop.cycle('pure-table-odd', 'pure-table-even') }} processor-{{ watch['processor'] }} | ||||
|                 {% if watch.last_error is defined and watch.last_error != False %}error{% endif %} | ||||
|                 {% if watch.last_notification_error is defined and watch.last_notification_error != False %}error{% endif %} | ||||
|                 {% if watch.paused is defined and watch.paused != False %}paused{% endif %} | ||||
|                 {% if watch.newest_history_key| int > watch.last_viewed and watch.history_n>=2 %}unviewed{% endif %} | ||||
|                 {% if is_unviewed %}unviewed{% endif %} | ||||
|                 {% if watch.uuid in queued_uuids %}queued{% endif %}"> | ||||
|                 <td class="inline checkbox-uuid" ><input name="uuids"  type="checkbox" value="{{ watch.uuid}} " > <span>{{ loop.index+pagination.skip }}</span></td> | ||||
|                 <td class="inline watch-controls"> | ||||
| @@ -104,8 +107,9 @@ | ||||
|  | ||||
|                     {% if watch.get_fetch_backend == "html_webdriver" | ||||
|                          or (  watch.get_fetch_backend == "system" and system_default_fetcher == 'html_webdriver'  ) | ||||
|                          or "extra_browser_" in watch.get_fetch_backend | ||||
|                     %} | ||||
|                     <img class="status-icon" src="{{url_for('static_content', group='images', filename='Google-Chrome-icon.png')}}" title="Using a chrome browser" > | ||||
|                     <img class="status-icon" src="{{url_for('static_content', group='images', filename='Google-Chrome-icon.png')}}" title="Using a Chrome browser" > | ||||
|                     {% endif %} | ||||
|  | ||||
|                     {%if watch.is_pdf  %}<img class="status-icon" src="{{url_for('static_content', group='images', filename='pdf-icon.svg')}}" title="Converting PDF to text" >{% endif %} | ||||
| @@ -119,6 +123,9 @@ | ||||
|                             <a href="{{ url_for('settings_page', uuid=watch.uuid) }}#proxies">Try adding external proxies/locations</a> | ||||
|                          | ||||
|                         {% endif %} | ||||
|                         {% if 'empty result or contain only an image' in watch.last_error %} | ||||
|                             <a href="https://github.com/dgtlmoon/changedetection.io/wiki/Detecting-changes-in-images">more help here</a>. | ||||
|                         {% endif %} | ||||
|                     </div> | ||||
|                     {% endif %} | ||||
|                     {% if watch.last_notification_error is defined and watch.last_notification_error != False %} | ||||
| @@ -151,8 +158,8 @@ | ||||
|                     {% endfor %} | ||||
|  | ||||
|                 </td> | ||||
|                 <td class="last-checked">{{watch|format_last_checked_time|safe}}</td> | ||||
|                 <td class="last-changed">{% if watch.history_n >=2 and watch.last_changed >0 %} | ||||
|                 <td class="last-checked" data-timestamp="{{ watch.last_checked }}">{{watch|format_last_checked_time|safe}}</td> | ||||
|                 <td class="last-changed" data-timestamp="{{ watch.last_changed }}">{% if watch.history_n >=2 and watch.last_changed >0 %} | ||||
|                     {{watch.last_changed|format_timestamp_timeago}} | ||||
|                     {% else %} | ||||
|                     Not yet | ||||
| @@ -163,7 +170,13 @@ | ||||
|                        class="recheck pure-button pure-button-primary">{% if watch.uuid in queued_uuids %}Queued{% else %}Recheck{% endif %}</a> | ||||
|                     <a href="{{ url_for('edit_page', uuid=watch.uuid)}}" class="pure-button pure-button-primary">Edit</a> | ||||
|                     {% if watch.history_n >= 2 %} | ||||
|                     <a href="{{ url_for('diff_history_page', uuid=watch.uuid) }}" target="{{watch.uuid}}" class="pure-button pure-button-primary diff-link">Diff</a> | ||||
|  | ||||
|                         {%  if is_unviewed %} | ||||
|                            <a href="{{ url_for('diff_history_page', uuid=watch.uuid, from_version=watch.get_next_snapshot_key_to_last_viewed) }}" target="{{watch.uuid}}" class="pure-button pure-button-primary diff-link">Diff</a> | ||||
|                         {% else %} | ||||
|                            <a href="{{ url_for('diff_history_page', uuid=watch.uuid)}}" target="{{watch.uuid}}" class="pure-button pure-button-primary diff-link">Diff</a> | ||||
|                         {% endif %} | ||||
|  | ||||
|                     {% else %} | ||||
|                         {% if watch.history_n == 1 or (watch.history_n ==0 and watch.error_text_ctime )%} | ||||
|                             <a href="{{ url_for('preview_page', uuid=watch.uuid)}}" target="{{watch.uuid}}" class="pure-button pure-button-primary">Preview</a> | ||||
| @@ -175,13 +188,18 @@ | ||||
|             </tbody> | ||||
|         </table> | ||||
|         <ul id="post-list-buttons"> | ||||
|             {% if errored_count %} | ||||
|             <li> | ||||
|                 <a href="{{url_for('index', with_errors=1, tag=request.args.get('tag')) }}" class="pure-button button-tag button-error ">With errors ({{ errored_count }})</a> | ||||
|             </li> | ||||
|             {% endif %} | ||||
|             {% if has_unviewed %} | ||||
|             <li> | ||||
|                 <a href="{{url_for('mark_all_viewed', tag=request.args.get('tag')) }}" class="pure-button button-tag ">Mark all viewed</a> | ||||
|                 <a href="{{url_for('mark_all_viewed',with_errors=request.args.get('with_errors',0)) }}" class="pure-button button-tag ">Mark all viewed</a> | ||||
|             </li> | ||||
|             {% endif %} | ||||
|             <li> | ||||
|                <a href="{{ url_for('form_watch_checknow', tag=active_tag) }}" class="pure-button button-tag ">Recheck | ||||
|                <a href="{{ url_for('form_watch_checknow', tag=active_tag, with_errors=request.args.get('with_errors',0)) }}" class="pure-button button-tag ">Recheck | ||||
|                 all {% if active_tag%} in "{{tags[active_tag].title}}"{%endif%}</a> | ||||
|             </li> | ||||
|             <li> | ||||
|   | ||||
							
								
								
									
										1
									
								
								changedetectionio/tests/custom_browser_url/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1
									
								
								changedetectionio/tests/custom_browser_url/__init__.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1 @@ | ||||
| # placeholder | ||||
| @@ -0,0 +1,89 @@ | ||||
| # !/usr/bin/python3 | ||||
| import os | ||||
|  | ||||
| from flask import url_for | ||||
| from ..util import live_server_setup, wait_for_all_checks | ||||
|  | ||||
| def do_test(client, live_server, make_test_use_extra_browser=False): | ||||
|  | ||||
|     # Grep for this string in the logs? | ||||
|     test_url = f"https://changedetection.io/ci-test.html" | ||||
|     custom_browser_name = 'custom browser URL' | ||||
|  | ||||
|     # needs to be set and something like 'ws://127.0.0.1:3000?stealth=1&--disable-web-security=true' | ||||
|     assert os.getenv('PLAYWRIGHT_DRIVER_URL'), "Needs PLAYWRIGHT_DRIVER_URL set for this test" | ||||
|  | ||||
|     ##################### | ||||
|     res = client.post( | ||||
|         url_for("settings_page"), | ||||
|         data={"application-empty_pages_are_a_change": "", | ||||
|               "requests-time_between_check-minutes": 180, | ||||
|               'application-fetch_backend': "html_webdriver", | ||||
|               # browserless-custom-url is setup in  .github/workflows/test-only.yml | ||||
|               # the test script run_custom_browser_url_test.sh will look for 'custom-browser-search-string' in the container logs | ||||
|               'requests-extra_browsers-0-browser_connection_url': 'ws://browserless-custom-url:3000?stealth=1&--disable-web-security=true&custom-browser-search-string=1', | ||||
|               'requests-extra_browsers-0-browser_name': custom_browser_name | ||||
|               }, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|  | ||||
|     assert b"Settings updated." in res.data | ||||
|  | ||||
|     # Add our URL to the import page | ||||
|     res = client.post( | ||||
|         url_for("import_page"), | ||||
|         data={"urls": test_url}, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|  | ||||
|     assert b"1 Imported" in res.data | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     if make_test_use_extra_browser: | ||||
|  | ||||
|         # So the name should appear in the edit page under "Request" > "Fetch Method" | ||||
|         res = client.get( | ||||
|             url_for("edit_page", uuid="first"), | ||||
|             follow_redirects=True | ||||
|         ) | ||||
|         assert b'custom browser URL' in res.data | ||||
|  | ||||
|         res = client.post( | ||||
|             url_for("edit_page", uuid="first"), | ||||
|             data={ | ||||
|                   "url": test_url, | ||||
|                   "tags": "", | ||||
|                   "headers": "", | ||||
|                   'fetch_backend': f"extra_browser_{custom_browser_name}", | ||||
|                   'webdriver_js_execute_code': '' | ||||
|             }, | ||||
|             follow_redirects=True | ||||
|         ) | ||||
|  | ||||
|         assert b"Updated watch." in res.data | ||||
|         wait_for_all_checks(client) | ||||
|  | ||||
|     # Force recheck | ||||
|     res = client.get(url_for("form_watch_checknow"), follow_redirects=True) | ||||
|     assert b'1 watches queued for rechecking.' in res.data | ||||
|  | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     res = client.get( | ||||
|         url_for("preview_page", uuid="first"), | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b'cool it works' in res.data | ||||
|  | ||||
|  | ||||
| # Requires playwright to be installed | ||||
| def test_request_via_custom_browser_url(client, live_server): | ||||
|     live_server_setup(live_server) | ||||
|     # We do this so we can grep the logs of the custom container and see if the request actually went through that container | ||||
|     do_test(client, live_server, make_test_use_extra_browser=True) | ||||
|  | ||||
|  | ||||
| def test_request_not_via_custom_browser_url(client, live_server): | ||||
|     live_server_setup(live_server) | ||||
|     # We do this so we can grep the logs of the custom container and see if the request actually went through that container | ||||
|     do_test(client, live_server, make_test_use_extra_browser=False) | ||||
| @@ -28,8 +28,6 @@ def test_fetch_webdriver_content(client, live_server): | ||||
|     ) | ||||
|  | ||||
|     assert b"1 Imported" in res.data | ||||
|     time.sleep(3) | ||||
|  | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|  | ||||
|   | ||||
							
								
								
									
										
											BIN
										
									
								
								changedetectionio/tests/import/spreadsheet.xlsx
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								changedetectionio/tests/import/spreadsheet.xlsx
									
									
									
									
									
										Normal file
									
								
							
										
											Binary file not shown.
										
									
								
							| @@ -2,12 +2,11 @@ | ||||
|  | ||||
| import time | ||||
| from flask import url_for | ||||
| from ..util import live_server_setup | ||||
| from ..util import live_server_setup, wait_for_all_checks | ||||
|  | ||||
|  | ||||
| def test_preferred_proxy(client, live_server): | ||||
|     time.sleep(1) | ||||
|     live_server_setup(live_server) | ||||
|     time.sleep(1) | ||||
|     url = "http://chosen.changedetection.io" | ||||
|  | ||||
|     res = client.post( | ||||
| @@ -20,7 +19,7 @@ def test_preferred_proxy(client, live_server): | ||||
|  | ||||
|     assert b"1 Imported" in res.data | ||||
|  | ||||
|     time.sleep(2) | ||||
|     wait_for_all_checks(client) | ||||
|     res = client.post( | ||||
|         url_for("edit_page", uuid="first"), | ||||
|         data={ | ||||
| @@ -34,5 +33,5 @@ def test_preferred_proxy(client, live_server): | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b"Updated watch." in res.data | ||||
|     time.sleep(2) | ||||
|     wait_for_all_checks(client) | ||||
|     # Now the request should appear in the second-squid logs | ||||
|   | ||||
| @@ -0,0 +1,6 @@ | ||||
| { | ||||
|   "socks5proxy": { | ||||
|     "label": "socks5proxy", | ||||
|     "url": "socks5://proxy_user123:proxy_pass123@socks5proxy:1080" | ||||
|   } | ||||
| } | ||||
| @@ -0,0 +1,6 @@ | ||||
| { | ||||
|   "socks5proxy": { | ||||
|     "label": "socks5proxy", | ||||
|     "url": "socks5://socks5proxy-noauth:1080" | ||||
|   } | ||||
| } | ||||
							
								
								
									
										63
									
								
								changedetectionio/tests/proxy_socks5/test_socks5_proxy.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										63
									
								
								changedetectionio/tests/proxy_socks5/test_socks5_proxy.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,63 @@ | ||||
| #!/usr/bin/python3 | ||||
| import os | ||||
| import time | ||||
| from flask import url_for | ||||
| from changedetectionio.tests.util import live_server_setup, wait_for_all_checks | ||||
|  | ||||
|  | ||||
| def test_socks5(client, live_server): | ||||
|     live_server_setup(live_server) | ||||
|  | ||||
|     # Setup a proxy | ||||
|     res = client.post( | ||||
|         url_for("settings_page"), | ||||
|         data={ | ||||
|             "requests-time_between_check-minutes": 180, | ||||
|             "application-ignore_whitespace": "y", | ||||
|             "application-fetch_backend": "html_requests", | ||||
|             # set in .github/workflows/test-only.yml | ||||
|             "requests-extra_proxies-0-proxy_url": "socks5://proxy_user123:proxy_pass123@socks5proxy:1080", | ||||
|             "requests-extra_proxies-0-proxy_name": "socks5proxy", | ||||
|         }, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|  | ||||
|     assert b"Settings updated." in res.data | ||||
|  | ||||
|     test_url = "https://changedetection.io/CHANGELOG.txt?socks-test-tag=" + os.getenv('SOCKSTEST', '') | ||||
|  | ||||
|     res = client.post( | ||||
|         url_for("form_quick_watch_add"), | ||||
|         data={"url": test_url, "tags": '', 'edit_and_watch_submit_button': 'Edit > Watch'}, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b"Watch added in Paused state, saving will unpause" in res.data | ||||
|  | ||||
|     res = client.get( | ||||
|         url_for("edit_page", uuid="first", unpause_on_save=1), | ||||
|     ) | ||||
|     # check the proxy is offered as expected | ||||
|     assert b'ui-0socks5proxy' in res.data | ||||
|  | ||||
|     res = client.post( | ||||
|         url_for("edit_page", uuid="first", unpause_on_save=1), | ||||
|         data={ | ||||
|             "include_filters": "", | ||||
|             "fetch_backend": 'html_webdriver' if os.getenv('PLAYWRIGHT_DRIVER_URL') else 'html_requests', | ||||
|             "headers": "", | ||||
|             "proxy": "ui-0socks5proxy", | ||||
|             "tags": "", | ||||
|             "url": test_url, | ||||
|         }, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b"unpaused" in res.data | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     res = client.get( | ||||
|         url_for("preview_page", uuid="first"), | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|  | ||||
|     # Should see the proper string | ||||
|     assert "+0200:".encode('utf-8') in res.data | ||||
| @@ -0,0 +1,52 @@ | ||||
| #!/usr/bin/python3 | ||||
| import os | ||||
| import time | ||||
| from flask import url_for | ||||
| from changedetectionio.tests.util import live_server_setup, wait_for_all_checks | ||||
|  | ||||
|  | ||||
| # should be proxies.json mounted from run_proxy_tests.sh already | ||||
| # -v `pwd`/tests/proxy_socks5/proxies.json-example:/app/changedetectionio/test-datastore/proxies.json | ||||
| def test_socks5_from_proxiesjson_file(client, live_server): | ||||
|     live_server_setup(live_server) | ||||
|  | ||||
|     test_url = "https://changedetection.io/CHANGELOG.txt?socks-test-tag=" + os.getenv('SOCKSTEST', '') | ||||
|  | ||||
|     res = client.get(url_for("settings_page")) | ||||
|     assert b'name="requests-proxy" type="radio" value="socks5proxy"' in res.data | ||||
|  | ||||
|     res = client.post( | ||||
|         url_for("form_quick_watch_add"), | ||||
|         data={"url": test_url, "tags": '', 'edit_and_watch_submit_button': 'Edit > Watch'}, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b"Watch added in Paused state, saving will unpause" in res.data | ||||
|  | ||||
|     res = client.get( | ||||
|         url_for("edit_page", uuid="first", unpause_on_save=1), | ||||
|     ) | ||||
|     # check the proxy is offered as expected | ||||
|     assert b'name="proxy" type="radio" value="socks5proxy"' in res.data | ||||
|  | ||||
|     res = client.post( | ||||
|         url_for("edit_page", uuid="first", unpause_on_save=1), | ||||
|         data={ | ||||
|             "include_filters": "", | ||||
|             "fetch_backend": 'html_webdriver' if os.getenv('PLAYWRIGHT_DRIVER_URL') else 'html_requests', | ||||
|             "headers": "", | ||||
|             "proxy": "socks5proxy", | ||||
|             "tags": "", | ||||
|             "url": test_url, | ||||
|         }, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b"unpaused" in res.data | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     res = client.get( | ||||
|         url_for("preview_page", uuid="first"), | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|  | ||||
|     # Should see the proper string | ||||
|     assert "+0200:".encode('utf-8') in res.data | ||||
							
								
								
									
										
											BIN
										
									
								
								changedetectionio/tests/test2.pdf
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								changedetectionio/tests/test2.pdf
									
									
									
									
									
										Normal file
									
								
							
										
											Binary file not shown.
										
									
								
							| @@ -1,4 +1,4 @@ | ||||
| from . util import live_server_setup, extract_UUID_from_client | ||||
| from .util import live_server_setup, extract_UUID_from_client, wait_for_all_checks | ||||
| from flask import url_for | ||||
| import time | ||||
|  | ||||
| @@ -19,10 +19,16 @@ def test_check_access_control(app, client, live_server): | ||||
|         ) | ||||
|  | ||||
|         assert b"1 Imported" in res.data | ||||
|         time.sleep(2) | ||||
|         res = client.get(url_for("form_watch_checknow"), follow_redirects=True) | ||||
|         time.sleep(3) | ||||
|         # causes a 'Popped wrong request context.' error when client. is accessed? | ||||
|         #wait_for_all_checks(client) | ||||
|  | ||||
|         res = c.get(url_for("form_watch_checknow"), follow_redirects=True) | ||||
|         assert b'1 watches queued for rechecking.' in res.data | ||||
|         time.sleep(2) | ||||
|         time.sleep(3) | ||||
|         # causes a 'Popped wrong request context.' error when client. is accessed? | ||||
|         #wait_for_all_checks(client) | ||||
|  | ||||
|  | ||||
|         # Enable password check and diff page access bypass | ||||
|         res = c.post( | ||||
| @@ -42,7 +48,7 @@ def test_check_access_control(app, client, live_server): | ||||
|         assert b"Login" in res.data | ||||
|  | ||||
|         # The diff page should return something valid when logged out | ||||
|         res = client.get(url_for("diff_history_page", uuid="first")) | ||||
|         res = c.get(url_for("diff_history_page", uuid="first")) | ||||
|         assert b'Random content' in res.data | ||||
|  | ||||
|         # Check wrong password does not let us in | ||||
| @@ -83,6 +89,8 @@ def test_check_access_control(app, client, live_server): | ||||
|         res = c.get(url_for("logout"), | ||||
|             follow_redirects=True) | ||||
|  | ||||
|         assert b"Login" in res.data | ||||
|  | ||||
|         res = c.get(url_for("settings_page"), | ||||
|             follow_redirects=True) | ||||
|  | ||||
| @@ -160,5 +168,5 @@ def test_check_access_control(app, client, live_server): | ||||
|         assert b"Login" in res.data | ||||
|  | ||||
|         # The diff page should return something valid when logged out | ||||
|         res = client.get(url_for("diff_history_page", uuid="first")) | ||||
|         res = c.get(url_for("diff_history_page", uuid="first")) | ||||
|         assert b'Random content' not in res.data | ||||
|   | ||||
| @@ -2,7 +2,8 @@ | ||||
|  | ||||
| import time | ||||
| from flask import url_for | ||||
| from .util import live_server_setup, extract_UUID_from_client, extract_api_key_from_UI | ||||
| from .util import live_server_setup, extract_UUID_from_client, extract_api_key_from_UI, wait_for_all_checks | ||||
|  | ||||
|  | ||||
| def set_response_with_ldjson(): | ||||
|     test_return_data = """<html> | ||||
| @@ -27,7 +28,7 @@ def set_response_with_ldjson(): | ||||
|            "description":"You dont need it", | ||||
|            "mpn":"111111", | ||||
|            "sku":"22222", | ||||
|            "offers":{ | ||||
|            "Offers":{ | ||||
|               "@type":"AggregateOffer", | ||||
|               "lowPrice":8097000, | ||||
|               "highPrice":8099900, | ||||
| @@ -75,12 +76,11 @@ def set_response_without_ldjson(): | ||||
|         f.write(test_return_data) | ||||
|     return None | ||||
|  | ||||
| # actually only really used by the distll.io importer, but could be handy too | ||||
| def test_check_ldjson_price_autodetect(client, live_server): | ||||
| def test_setup(client, live_server): | ||||
|     live_server_setup(live_server) | ||||
|  | ||||
|     # Give the endpoint time to spin up | ||||
|     time.sleep(1) | ||||
| # actually only really used by the distll.io importer, but could be handy too | ||||
| def test_check_ldjson_price_autodetect(client, live_server): | ||||
|  | ||||
|     set_response_with_ldjson() | ||||
|  | ||||
| @@ -92,7 +92,7 @@ def test_check_ldjson_price_autodetect(client, live_server): | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b"1 Imported" in res.data | ||||
|     time.sleep(3) | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     # Should get a notice that it's available | ||||
|     res = client.get(url_for("index")) | ||||
| @@ -102,11 +102,11 @@ def test_check_ldjson_price_autodetect(client, live_server): | ||||
|     uuid = extract_UUID_from_client(client) | ||||
|  | ||||
|     client.get(url_for('price_data_follower.accept', uuid=uuid, follow_redirects=True)) | ||||
|     time.sleep(2) | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     # Trigger a check | ||||
|     client.get(url_for("form_watch_checknow"), follow_redirects=True) | ||||
|     time.sleep(2) | ||||
|     wait_for_all_checks(client) | ||||
|     # Offer should be gone | ||||
|     res = client.get(url_for("index")) | ||||
|     assert b'Embedded price data' not in res.data | ||||
| @@ -138,9 +138,97 @@ def test_check_ldjson_price_autodetect(client, live_server): | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b"1 Imported" in res.data | ||||
|     time.sleep(3) | ||||
|     wait_for_all_checks(client) | ||||
|     res = client.get(url_for("index")) | ||||
|     assert b'ldjson-price-track-offer' not in res.data | ||||
|      | ||||
|     ########################################################################################## | ||||
|     client.get(url_for("form_delete", uuid="all"), follow_redirects=True) | ||||
|  | ||||
|  | ||||
| def _test_runner_check_bad_format_ignored(live_server, client, has_ldjson_price_data): | ||||
|  | ||||
|     test_url = url_for('test_endpoint', _external=True) | ||||
|     res = client.post( | ||||
|         url_for("import_page"), | ||||
|         data={"urls": test_url}, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b"1 Imported" in res.data | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     for k,v in client.application.config.get('DATASTORE').data['watching'].items(): | ||||
|         assert v.get('last_error') == False | ||||
|         assert v.get('has_ldjson_price_data') == has_ldjson_price_data | ||||
|  | ||||
|  | ||||
|     ########################################################################################## | ||||
|     client.get(url_for("form_delete", uuid="all"), follow_redirects=True) | ||||
|  | ||||
|  | ||||
| def test_bad_ldjson_is_correctly_ignored(client, live_server): | ||||
|     #live_server_setup(live_server) | ||||
|     test_return_data = """ | ||||
|             <html> | ||||
|             <head> | ||||
|                 <script type="application/ld+json"> | ||||
|                     { | ||||
|                         "@context": "http://schema.org", | ||||
|                         "@type": ["Product", "SubType"], | ||||
|                         "name": "My test product", | ||||
|                         "description": "", | ||||
|                         "offers": { | ||||
|                             "note" : "You can see the case-insensitive OffERS key, it should work", | ||||
|                             "@type": "Offer", | ||||
|                             "offeredBy": { | ||||
|                                 "@type": "Organization", | ||||
|                                 "name":"Person", | ||||
|                                 "telephone":"+1 999 999 999" | ||||
|                             }, | ||||
|                             "price": "1", | ||||
|                             "priceCurrency": "EUR", | ||||
|                             "url": "/some/url" | ||||
|                         } | ||||
|                     } | ||||
|                 </script> | ||||
|             </head> | ||||
|             <body> | ||||
|             <div class="yes">Some extra stuff</div> | ||||
|             </body></html> | ||||
|      """ | ||||
|     with open("test-datastore/endpoint-content.txt", "w") as f: | ||||
|         f.write(test_return_data) | ||||
|  | ||||
|     _test_runner_check_bad_format_ignored(live_server=live_server, client=client, has_ldjson_price_data=True) | ||||
|     test_return_data = """ | ||||
|             <html> | ||||
|             <head> | ||||
|                 <script type="application/ld+json"> | ||||
|                     { | ||||
|                         "@context": "http://schema.org", | ||||
|                         "@type": ["Product", "SubType"], | ||||
|                         "name": "My test product", | ||||
|                         "description": "", | ||||
|                         "BrokenOffers": { | ||||
|                             "@type": "Offer", | ||||
|                             "offeredBy": { | ||||
|                                 "@type": "Organization", | ||||
|                                 "name":"Person", | ||||
|                                 "telephone":"+1 999 999 999" | ||||
|                             }, | ||||
|                             "price": "1", | ||||
|                             "priceCurrency": "EUR", | ||||
|                             "url": "/some/url" | ||||
|                         } | ||||
|                     } | ||||
|                 </script> | ||||
|             </head> | ||||
|             <body> | ||||
|             <div class="yes">Some extra stuff</div> | ||||
|             </body></html> | ||||
|      """ | ||||
|     with open("test-datastore/endpoint-content.txt", "w") as f: | ||||
|         f.write(test_return_data) | ||||
|  | ||||
|     _test_runner_check_bad_format_ignored(live_server=live_server, client=client, has_ldjson_price_data=False) | ||||
|  | ||||
|   | ||||
| @@ -89,7 +89,7 @@ def test_check_basic_change_detection_functionality(client, live_server): | ||||
|  | ||||
|     # Following the 'diff' link, it should no longer display as 'unviewed' even after we recheck it a few times | ||||
|     res = client.get(url_for("diff_history_page", uuid="first")) | ||||
|     assert b'Compare newest' in res.data | ||||
|     assert b'selected=""' in res.data, "Confirm diff history page loaded" | ||||
|  | ||||
|     # Check the [preview] pulls the right one | ||||
|     res = client.get( | ||||
|   | ||||
| @@ -2,7 +2,7 @@ | ||||
|  | ||||
| import time | ||||
| from flask import url_for | ||||
| from . util import live_server_setup | ||||
| from .util import live_server_setup, wait_for_all_checks | ||||
|  | ||||
| from ..html_tools import * | ||||
|  | ||||
| @@ -176,3 +176,77 @@ def test_check_multiple_filters(client, live_server): | ||||
|     assert b"Blob A" in res.data # CSS was ok | ||||
|     assert b"Blob B" in res.data # xPath was ok | ||||
|     assert b"Blob C" not in res.data # Should not be included | ||||
|  | ||||
| # The filter exists, but did not contain anything useful | ||||
| # Mainly used when the filter contains just an IMG, this can happen when someone selects an image in the visual-selector | ||||
| # Tests fetcher can throw a "ReplyWithContentButNoText" exception after applying filter and extracting text | ||||
| def test_filter_is_empty_help_suggestion(client, live_server): | ||||
|     #live_server_setup(live_server) | ||||
|  | ||||
|     include_filters = "#blob-a" | ||||
|  | ||||
|     with open("test-datastore/endpoint-content.txt", "w") as f: | ||||
|         f.write("""<html><body> | ||||
|          <div id="blob-a"> | ||||
|            <img src="something.jpg"> | ||||
|          </div> | ||||
|          </body> | ||||
|          </html> | ||||
|         """) | ||||
|  | ||||
|  | ||||
|     # Add our URL to the import page | ||||
|     test_url = url_for('test_endpoint', _external=True) | ||||
|     res = client.post( | ||||
|         url_for("import_page"), | ||||
|         data={"urls": test_url}, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b"1 Imported" in res.data | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     # Goto the edit page, add our ignore text | ||||
|     # Add our URL to the import page | ||||
|     res = client.post( | ||||
|         url_for("edit_page", uuid="first"), | ||||
|         data={"include_filters": include_filters, | ||||
|               "url": test_url, | ||||
|               "tags": "", | ||||
|               "headers": "", | ||||
|               'fetch_backend': "html_requests"}, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b"Updated watch." in res.data | ||||
|  | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|  | ||||
|     res = client.get( | ||||
|         url_for("index"), | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|  | ||||
|     assert b'empty result or contain only an image' in res.data | ||||
|  | ||||
|  | ||||
|     ### Just an empty selector, no image | ||||
|  | ||||
|     with open("test-datastore/endpoint-content.txt", "w") as f: | ||||
|         f.write("""<html><body> | ||||
|          <div id="blob-a"> | ||||
|            <!-- doo doo --> | ||||
|          </div> | ||||
|          </body> | ||||
|          </html> | ||||
|         """) | ||||
|  | ||||
|     res = client.get(url_for("form_watch_checknow"), follow_redirects=True) | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     res = client.get( | ||||
|         url_for("index"), | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|  | ||||
|     assert b'empty result or contain only an image' not in res.data | ||||
|     assert b'but contained no usable text' in res.data | ||||
|   | ||||
| @@ -24,7 +24,7 @@ def test_check_extract_text_from_diff(client, live_server): | ||||
|     ) | ||||
|  | ||||
|     assert b"1 Imported" in res.data | ||||
|     time.sleep(1) | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     # Load in 5 different numbers/changes | ||||
|     last_date="" | ||||
|   | ||||
| @@ -2,7 +2,7 @@ | ||||
|  | ||||
| import time | ||||
| from flask import url_for | ||||
| from .util import live_server_setup | ||||
| from .util import live_server_setup, wait_for_all_checks | ||||
|  | ||||
| from ..html_tools import * | ||||
|  | ||||
| @@ -55,6 +55,8 @@ def set_multiline_response(): | ||||
|      </p> | ||||
|       | ||||
|      <div>aaand something lines</div> | ||||
|      <br> | ||||
|      <div>and this should be</div> | ||||
|      </body> | ||||
|      </html> | ||||
|     """ | ||||
| @@ -66,11 +68,10 @@ def set_multiline_response(): | ||||
|  | ||||
|  | ||||
| def test_setup(client, live_server): | ||||
|  | ||||
|     live_server_setup(live_server) | ||||
|  | ||||
| def test_check_filter_multiline(client, live_server): | ||||
|  | ||||
|     #live_server_setup(live_server) | ||||
|     set_multiline_response() | ||||
|  | ||||
|     # Add our URL to the import page | ||||
| @@ -82,14 +83,15 @@ def test_check_filter_multiline(client, live_server): | ||||
|     ) | ||||
|     assert b"1 Imported" in res.data | ||||
|  | ||||
|     time.sleep(3) | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     # Goto the edit page, add our ignore text | ||||
|     # Add our URL to the import page | ||||
|     res = client.post( | ||||
|         url_for("edit_page", uuid="first"), | ||||
|         data={"include_filters": '', | ||||
|               'extract_text': '/something.+?6 billion.+?lines/si', | ||||
|               # Test a regex and a plaintext | ||||
|               'extract_text': '/something.+?6 billion.+?lines/si\r\nand this should be', | ||||
|               "url": test_url, | ||||
|               "tags": "", | ||||
|               "headers": "", | ||||
| @@ -99,13 +101,19 @@ def test_check_filter_multiline(client, live_server): | ||||
|     ) | ||||
|  | ||||
|     assert b"Updated watch." in res.data | ||||
|     time.sleep(3) | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     res = client.get(url_for("index")) | ||||
|  | ||||
|     # Issue 1828 | ||||
|     assert b'not at the start of the expression' not in res.data | ||||
|  | ||||
|     res = client.get( | ||||
|         url_for("preview_page", uuid="first"), | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|  | ||||
|     # Plaintext that doesnt look like a regex should match also | ||||
|     assert b'and this should be' in res.data | ||||
|  | ||||
|     assert b'<div class="">Something' in res.data | ||||
|     assert b'<div class="">across 6 billion multiple' in res.data | ||||
| @@ -115,14 +123,11 @@ def test_check_filter_multiline(client, live_server): | ||||
|     assert b'aaand something lines' not in res.data | ||||
|  | ||||
| def test_check_filter_and_regex_extract(client, live_server): | ||||
|     sleep_time_for_fetch_thread = 3 | ||||
|      | ||||
|     include_filters = ".changetext" | ||||
|  | ||||
|     set_original_response() | ||||
|  | ||||
|     # Give the endpoint time to spin up | ||||
|     time.sleep(1) | ||||
|  | ||||
|     # Add our URL to the import page | ||||
|     test_url = url_for('test_endpoint', _external=True) | ||||
|     res = client.post( | ||||
| @@ -132,19 +137,15 @@ def test_check_filter_and_regex_extract(client, live_server): | ||||
|     ) | ||||
|     assert b"1 Imported" in res.data | ||||
|  | ||||
|     time.sleep(1) | ||||
|     # Trigger a check | ||||
|     client.get(url_for("form_watch_checknow"), follow_redirects=True) | ||||
|  | ||||
|     # Give the thread time to pick it up | ||||
|     time.sleep(sleep_time_for_fetch_thread) | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     # Goto the edit page, add our ignore text | ||||
|     # Add our URL to the import page | ||||
|     res = client.post( | ||||
|         url_for("edit_page", uuid="first"), | ||||
|         data={"include_filters": include_filters, | ||||
|               'extract_text': '\d+ online\r\n\d+ guests\r\n/somecase insensitive \d+/i\r\n/somecase insensitive (345\d)/i', | ||||
|               'extract_text': '/\d+ online/\r\n/\d+ guests/\r\n/somecase insensitive \d+/i\r\n/somecase insensitive (345\d)/i\r\n/issue1828.+?2022/i', | ||||
|               "url": test_url, | ||||
|               "tags": "", | ||||
|               "headers": "", | ||||
| @@ -155,8 +156,13 @@ def test_check_filter_and_regex_extract(client, live_server): | ||||
|  | ||||
|     assert b"Updated watch." in res.data | ||||
|  | ||||
|  | ||||
|     # Give the thread time to pick it up | ||||
|     time.sleep(sleep_time_for_fetch_thread) | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     res = client.get(url_for("index")) | ||||
|     #issue 1828 | ||||
|     assert b'not at the start of the expression' not in res.data | ||||
|  | ||||
|     #  Make a change | ||||
|     set_modified_response() | ||||
| @@ -164,7 +170,7 @@ def test_check_filter_and_regex_extract(client, live_server): | ||||
|     # Trigger a check | ||||
|     client.get(url_for("form_watch_checknow"), follow_redirects=True) | ||||
|     # Give the thread time to pick it up | ||||
|     time.sleep(sleep_time_for_fetch_thread) | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     # It should have 'unviewed' still | ||||
|     # Because it should be looking at only that 'sametext' id | ||||
| @@ -196,3 +202,32 @@ def test_check_filter_and_regex_extract(client, live_server): | ||||
|  | ||||
|     # Should not be here | ||||
|     assert b'Some text that did change' not in res.data | ||||
|  | ||||
|  | ||||
|  | ||||
| def test_regex_error_handling(client, live_server): | ||||
|  | ||||
|     #live_server_setup(live_server) | ||||
|  | ||||
|     # Add our URL to the import page | ||||
|     test_url = url_for('test_endpoint', _external=True) | ||||
|     res = client.post( | ||||
|         url_for("import_page"), | ||||
|         data={"urls": test_url}, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b"1 Imported" in res.data | ||||
|  | ||||
|     ### test regex error handling | ||||
|     res = client.post( | ||||
|         url_for("edit_page", uuid="first"), | ||||
|         data={"extract_text": '/something bad\d{3/XYZ', | ||||
|               "url": test_url, | ||||
|               "fetch_backend": "html_requests"}, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|  | ||||
|     assert b'is not a valid regular expression.' in res.data | ||||
|  | ||||
|     res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True) | ||||
|     assert b'Deleted' in res.data | ||||
|   | ||||
| @@ -33,8 +33,6 @@ def test_strip_regex_text_func(): | ||||
|         "/not" | ||||
|     ] | ||||
|  | ||||
|  | ||||
|     fetcher = fetch_site_status.perform_site_check(datastore=False) | ||||
|     stripped_content = html_tools.strip_ignore_text(test_content, ignore_lines) | ||||
|  | ||||
|     assert b"but 1 lines" in stripped_content | ||||
|   | ||||
| @@ -24,7 +24,6 @@ def test_strip_text_func(): | ||||
|  | ||||
|     ignore_lines = ["sometimes"] | ||||
|  | ||||
|     fetcher = fetch_site_status.perform_site_check(datastore=False) | ||||
|     stripped_content = html_tools.strip_ignore_text(test_content, ignore_lines) | ||||
|  | ||||
|     assert b"sometimes" not in stripped_content | ||||
|   | ||||
| @@ -1,16 +1,19 @@ | ||||
| #!/usr/bin/python3 | ||||
|  | ||||
| import io | ||||
| import os | ||||
| import time | ||||
|  | ||||
| from flask import url_for | ||||
|  | ||||
| from .util import live_server_setup | ||||
| from .util import live_server_setup, wait_for_all_checks | ||||
|  | ||||
|  | ||||
| def test_setup(client, live_server): | ||||
|     live_server_setup(live_server) | ||||
|  | ||||
| def test_import(client, live_server): | ||||
|     # Give the endpoint time to spin up | ||||
|     time.sleep(1) | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     res = client.post( | ||||
|         url_for("import_page"), | ||||
| @@ -119,3 +122,97 @@ def test_import_distillio(client, live_server): | ||||
|     res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True) | ||||
|     # Clear flask alerts | ||||
|     res = client.get(url_for("index")) | ||||
|  | ||||
| def test_import_custom_xlsx(client, live_server): | ||||
|     """Test can upload a excel spreadsheet and the watches are created correctly""" | ||||
|  | ||||
|     #live_server_setup(live_server) | ||||
|  | ||||
|     dirname = os.path.dirname(__file__) | ||||
|     filename = os.path.join(dirname, 'import/spreadsheet.xlsx') | ||||
|     with open(filename, 'rb') as f: | ||||
|  | ||||
|         data= { | ||||
|             'file_mapping': 'custom', | ||||
|             'custom_xlsx[col_0]': '1', | ||||
|             'custom_xlsx[col_1]': '3', | ||||
|             'custom_xlsx[col_2]': '5', | ||||
|             'custom_xlsx[col_3]': '4', | ||||
|             'custom_xlsx[col_type_0]': 'title', | ||||
|             'custom_xlsx[col_type_1]': 'url', | ||||
|             'custom_xlsx[col_type_2]': 'include_filters', | ||||
|             'custom_xlsx[col_type_3]': 'interval_minutes', | ||||
|             'xlsx_file': (io.BytesIO(f.read()), 'spreadsheet.xlsx') | ||||
|         } | ||||
|  | ||||
|     res = client.post( | ||||
|         url_for("import_page"), | ||||
|         data=data, | ||||
|         follow_redirects=True, | ||||
|     ) | ||||
|  | ||||
|     assert b'4 imported from custom .xlsx' in res.data | ||||
|     # Because this row was actually just a header with no usable URL, we should get an error | ||||
|     assert b'Error processing row number 1' in res.data | ||||
|  | ||||
|     res = client.get( | ||||
|         url_for("index") | ||||
|     ) | ||||
|  | ||||
|     assert b'Somesite results ABC' in res.data | ||||
|     assert b'City news results' in res.data | ||||
|  | ||||
|     # Just find one to check over | ||||
|     for uuid, watch in live_server.app.config['DATASTORE'].data['watching'].items(): | ||||
|         if watch.get('title') == 'Somesite results ABC': | ||||
|             filters = watch.get('include_filters') | ||||
|             assert filters[0] == '/html[1]/body[1]/div[4]/div[1]/div[1]/div[1]||//*[@id=\'content\']/div[3]/div[1]/div[1]||//*[@id=\'content\']/div[1]' | ||||
|             assert watch.get('time_between_check') == {'weeks': 0, 'days': 1, 'hours': 6, 'minutes': 24, 'seconds': 0} | ||||
|  | ||||
|     res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True) | ||||
|     assert b'Deleted' in res.data | ||||
|  | ||||
| def test_import_watchete_xlsx(client, live_server): | ||||
|     """Test can upload a excel spreadsheet and the watches are created correctly""" | ||||
|  | ||||
|     #live_server_setup(live_server) | ||||
|     dirname = os.path.dirname(__file__) | ||||
|     filename = os.path.join(dirname, 'import/spreadsheet.xlsx') | ||||
|     with open(filename, 'rb') as f: | ||||
|  | ||||
|         data= { | ||||
|             'file_mapping': 'wachete', | ||||
|             'xlsx_file': (io.BytesIO(f.read()), 'spreadsheet.xlsx') | ||||
|         } | ||||
|  | ||||
|     res = client.post( | ||||
|         url_for("import_page"), | ||||
|         data=data, | ||||
|         follow_redirects=True, | ||||
|     ) | ||||
|  | ||||
|     assert b'4 imported from Wachete .xlsx' in res.data | ||||
|  | ||||
|     res = client.get( | ||||
|         url_for("index") | ||||
|     ) | ||||
|  | ||||
|     assert b'Somesite results ABC' in res.data | ||||
|     assert b'City news results' in res.data | ||||
|  | ||||
|     # Just find one to check over | ||||
|     for uuid, watch in live_server.app.config['DATASTORE'].data['watching'].items(): | ||||
|         if watch.get('title') == 'Somesite results ABC': | ||||
|             filters = watch.get('include_filters') | ||||
|             assert filters[0] == '/html[1]/body[1]/div[4]/div[1]/div[1]/div[1]||//*[@id=\'content\']/div[3]/div[1]/div[1]||//*[@id=\'content\']/div[1]' | ||||
|             assert watch.get('time_between_check') == {'weeks': 0, 'days': 1, 'hours': 6, 'minutes': 24, 'seconds': 0} | ||||
|             assert watch.get('fetch_backend') == 'html_requests' # Has inactive 'dynamic wachet' | ||||
|  | ||||
|         if watch.get('title') == 'JS website': | ||||
|             assert watch.get('fetch_backend') == 'html_webdriver' # Has active 'dynamic wachet' | ||||
|  | ||||
|         if watch.get('title') == 'system default website': | ||||
|             assert watch.get('fetch_backend') == 'system' # uses default if blank | ||||
|  | ||||
|     res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True) | ||||
|     assert b'Deleted' in res.data | ||||
|   | ||||
| @@ -2,9 +2,8 @@ | ||||
|  | ||||
| import time | ||||
| from flask import url_for | ||||
| from .util import set_original_response, set_modified_response, live_server_setup | ||||
| from .util import set_original_response, set_modified_response, live_server_setup, wait_for_all_checks | ||||
|  | ||||
| sleep_time_for_fetch_thread = 3 | ||||
|  | ||||
| # `subtractive_selectors` should still work in `source:` type requests | ||||
| def test_fetch_pdf(client, live_server): | ||||
| @@ -22,7 +21,9 @@ def test_fetch_pdf(client, live_server): | ||||
|  | ||||
|     assert b"1 Imported" in res.data | ||||
|  | ||||
|     time.sleep(sleep_time_for_fetch_thread) | ||||
|  | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     res = client.get( | ||||
|         url_for("preview_page", uuid="first"), | ||||
|         follow_redirects=True | ||||
| @@ -33,8 +34,42 @@ def test_fetch_pdf(client, live_server): | ||||
|  | ||||
|     # So we know if the file changes in other ways | ||||
|     import hashlib | ||||
|     md5 = hashlib.md5(open("test-datastore/endpoint-test.pdf", 'rb').read()).hexdigest().upper() | ||||
|     original_md5 = hashlib.md5(open("test-datastore/endpoint-test.pdf", 'rb').read()).hexdigest().upper() | ||||
|     # We should have one | ||||
|     assert len(md5) >0 | ||||
|     assert len(original_md5) >0 | ||||
|     # And it's going to be in the document | ||||
|     assert b'Document checksum - '+bytes(str(md5).encode('utf-8')) in res.data | ||||
|     assert b'Document checksum - '+bytes(str(original_md5).encode('utf-8')) in res.data | ||||
|  | ||||
|  | ||||
|     shutil.copy("tests/test2.pdf", "test-datastore/endpoint-test.pdf") | ||||
|     changed_md5 = hashlib.md5(open("test-datastore/endpoint-test.pdf", 'rb').read()).hexdigest().upper() | ||||
|     res = client.get(url_for("form_watch_checknow"), follow_redirects=True) | ||||
|     assert b'1 watches queued for rechecking.' in res.data | ||||
|  | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     # Now something should be ready, indicated by having a 'unviewed' class | ||||
|     res = client.get(url_for("index")) | ||||
|     assert b'unviewed' in res.data | ||||
|  | ||||
|     # The original checksum should be not be here anymore (cdio adds it to the bottom of the text) | ||||
|  | ||||
|     res = client.get( | ||||
|         url_for("preview_page", uuid="first"), | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|  | ||||
|     assert original_md5.encode('utf-8') not in res.data | ||||
|     assert changed_md5.encode('utf-8') in res.data | ||||
|  | ||||
|  | ||||
|     res = client.get( | ||||
|         url_for("diff_history_page", uuid="first"), | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|  | ||||
|     assert original_md5.encode('utf-8') in res.data | ||||
|     assert changed_md5.encode('utf-8') in res.data | ||||
|  | ||||
|     assert b'here is a change' in res.data | ||||
|      | ||||
| @@ -80,8 +80,11 @@ def test_headers_in_request(client, live_server): | ||||
|  | ||||
|     # Should be only one with headers set | ||||
|     assert watches_with_headers==1 | ||||
|     res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True) | ||||
|     assert b'Deleted' in res.data | ||||
|  | ||||
| def test_body_in_request(client, live_server): | ||||
|  | ||||
|     # Add our URL to the import page | ||||
|     test_url = url_for('test_body', _external=True) | ||||
|     if os.getenv('PLAYWRIGHT_DRIVER_URL'): | ||||
| @@ -170,7 +173,8 @@ def test_body_in_request(client, live_server): | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b"Body must be empty when Request Method is set to GET" in res.data | ||||
|  | ||||
|     res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True) | ||||
|     assert b'Deleted' in res.data | ||||
|  | ||||
| def test_method_in_request(client, live_server): | ||||
|     # Add our URL to the import page | ||||
|   | ||||
| @@ -2,12 +2,61 @@ | ||||
|  | ||||
| import time | ||||
| from flask import url_for | ||||
| from .util import set_original_response, set_modified_response, live_server_setup, wait_for_all_checks, extract_rss_token_from_UI | ||||
| from .util import set_original_response, set_modified_response, live_server_setup, wait_for_all_checks, extract_rss_token_from_UI, \ | ||||
|     extract_UUID_from_client | ||||
|  | ||||
|  | ||||
| def set_original_cdata_xml(): | ||||
|     test_return_data = """<rss xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:media="http://search.yahoo.com/mrss/" xmlns:atom="http://www.w3.org/2005/Atom" version="2.0"> | ||||
|     <channel> | ||||
|     <title>Gizi</title> | ||||
|     <link>https://test.com</link> | ||||
|     <atom:link href="https://testsite.com" rel="self" type="application/rss+xml"/> | ||||
|     <description> | ||||
|     <![CDATA[ The Future Could Be Here ]]> | ||||
|     </description> | ||||
|     <language>en</language> | ||||
|     <item> | ||||
|     <title> | ||||
|     <![CDATA[ <img src="https://testsite.com/hacked.jpg"> Hackers can access your computer ]]> | ||||
|     </title> | ||||
|     <link>https://testsite.com/news/12341234234</link> | ||||
|     <description> | ||||
|     <![CDATA[ <img class="type:primaryImage" src="https://testsite.com/701c981da04869e.jpg"/><p>The days of Terminator and The Matrix could be closer. But be positive.</p><p><a href="https://testsite.com">Read more link...</a></p> ]]> | ||||
|     </description> | ||||
|     <category>cybernetics</category> | ||||
|     <category>rand corporation</category> | ||||
|     <pubDate>Tue, 17 Oct 2023 15:10:00 GMT</pubDate> | ||||
|     <guid isPermaLink="false">1850933241</guid> | ||||
|     <dc:creator> | ||||
|     <![CDATA[ Mr Hacker News ]]> | ||||
|     </dc:creator> | ||||
|     <media:thumbnail url="https://testsite.com/thumbnail-c224e10d81488e818701c981da04869e.jpg"/> | ||||
|     </item> | ||||
|  | ||||
|     <item> | ||||
|         <title>    Some other title    </title> | ||||
|         <link>https://testsite.com/news/12341234236</link> | ||||
|         <description> | ||||
|         Some other description | ||||
|         </description> | ||||
|     </item>     | ||||
|     </channel> | ||||
|     </rss> | ||||
|             """ | ||||
|  | ||||
|     with open("test-datastore/endpoint-content.txt", "w") as f: | ||||
|         f.write(test_return_data) | ||||
|  | ||||
|  | ||||
| def test_setup(client, live_server): | ||||
|     live_server_setup(live_server) | ||||
|  | ||||
| def test_rss_and_token(client, live_server): | ||||
|     #    live_server_setup(live_server) | ||||
|  | ||||
|     set_original_response() | ||||
|     live_server_setup(live_server) | ||||
|     rss_token = extract_rss_token_from_UI(client) | ||||
|  | ||||
|     # Add our URL to the import page | ||||
|     res = client.post( | ||||
| @@ -17,11 +66,11 @@ def test_rss_and_token(client, live_server): | ||||
|     ) | ||||
|  | ||||
|     assert b"1 Imported" in res.data | ||||
|     rss_token = extract_rss_token_from_UI(client) | ||||
|  | ||||
|     time.sleep(2) | ||||
|     wait_for_all_checks(client) | ||||
|     set_modified_response() | ||||
|     client.get(url_for("form_watch_checknow"), follow_redirects=True) | ||||
|     time.sleep(2) | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     # Add our URL to the import page | ||||
|     res = client.get( | ||||
| @@ -37,3 +86,80 @@ def test_rss_and_token(client, live_server): | ||||
|     ) | ||||
|     assert b"Access denied, bad token" not in res.data | ||||
|     assert b"Random content" in res.data | ||||
|  | ||||
|     res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True) | ||||
|  | ||||
| def test_basic_cdata_rss_markup(client, live_server): | ||||
|     #live_server_setup(live_server) | ||||
|  | ||||
|     set_original_cdata_xml() | ||||
|  | ||||
|     test_url = url_for('test_endpoint', content_type="application/xml", _external=True) | ||||
|  | ||||
|     # Add our URL to the import page | ||||
|     res = client.post( | ||||
|         url_for("import_page"), | ||||
|         data={"urls": test_url}, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|  | ||||
|     assert b"1 Imported" in res.data | ||||
|  | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     res = client.get( | ||||
|         url_for("preview_page", uuid="first"), | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b'CDATA' not in res.data | ||||
|     assert b'<![' not in res.data | ||||
|     assert b'Hackers can access your computer' in res.data | ||||
|     assert b'The days of Terminator' in res.data | ||||
|     res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True) | ||||
|  | ||||
| def test_rss_xpath_filtering(client, live_server): | ||||
|     #live_server_setup(live_server) | ||||
|  | ||||
|     set_original_cdata_xml() | ||||
|  | ||||
|     test_url = url_for('test_endpoint', content_type="application/xml", _external=True) | ||||
|  | ||||
|     res = client.post( | ||||
|         url_for("form_quick_watch_add"), | ||||
|         data={"url": test_url, "tags": '', 'edit_and_watch_submit_button': 'Edit > Watch'}, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b"Watch added in Paused state, saving will unpause" in res.data | ||||
|  | ||||
|     uuid = extract_UUID_from_client(client) | ||||
|     res = client.post( | ||||
|         url_for("edit_page", uuid=uuid, unpause_on_save=1), | ||||
|         data={ | ||||
|                 "include_filters": "//item/title", | ||||
|                 "fetch_backend": "html_requests", | ||||
|                 "headers": "", | ||||
|                 "proxy": "no-proxy", | ||||
|                 "tags": "", | ||||
|                 "url": test_url, | ||||
|               }, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b"unpaused" in res.data | ||||
|  | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     res = client.get( | ||||
|         url_for("preview_page", uuid="first"), | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b'CDATA' not in res.data | ||||
|     assert b'<![' not in res.data | ||||
|     # #1874  All but the first <title was getting selected | ||||
|     # Convert any HTML with just a top level <title> to <h1> to be sure title renders | ||||
|  | ||||
|     assert b'Hackers can access your computer' in res.data # Should ONLY be selected by the xpath | ||||
|     assert b'Some other title' in res.data  # Should ONLY be selected by the xpath | ||||
|     assert b'The days of Terminator' not in res.data # Should NOT be selected by the xpath | ||||
|     assert b'Some other description' not in res.data  # Should NOT be selected by the xpath | ||||
|  | ||||
|     res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True) | ||||
|   | ||||
| @@ -1,5 +1,5 @@ | ||||
| from flask import url_for | ||||
| from . util import set_original_response, set_modified_response, live_server_setup | ||||
| from .util import set_original_response, set_modified_response, live_server_setup, wait_for_all_checks | ||||
| import time | ||||
|  | ||||
|  | ||||
| @@ -12,6 +12,7 @@ def test_bad_access(client, live_server): | ||||
|     ) | ||||
|  | ||||
|     assert b"1 Imported" in res.data | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     # Attempt to add a body with a GET method | ||||
|     res = client.post( | ||||
| @@ -59,7 +60,7 @@ def test_bad_access(client, live_server): | ||||
|         data={"url": 'file:///tasty/disk/drive', "tags": ''}, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     time.sleep(1) | ||||
|     wait_for_all_checks(client) | ||||
|     res = client.get(url_for("index")) | ||||
|  | ||||
|     assert b'file:// type access is denied for security reasons.' in res.data | ||||
| @@ -2,7 +2,7 @@ | ||||
|  | ||||
| import time | ||||
| from flask import url_for | ||||
| from . util import live_server_setup | ||||
| from .util import live_server_setup, wait_for_all_checks | ||||
|  | ||||
|  | ||||
| def set_original_ignore_response(): | ||||
| @@ -26,13 +26,8 @@ def test_trigger_regex_functionality(client, live_server): | ||||
|  | ||||
|     live_server_setup(live_server) | ||||
|  | ||||
|     sleep_time_for_fetch_thread = 3 | ||||
|  | ||||
|     set_original_ignore_response() | ||||
|  | ||||
|     # Give the endpoint time to spin up | ||||
|     time.sleep(1) | ||||
|  | ||||
|     # Add our URL to the import page | ||||
|     test_url = url_for('test_endpoint', _external=True) | ||||
|     res = client.post( | ||||
| @@ -43,7 +38,7 @@ def test_trigger_regex_functionality(client, live_server): | ||||
|     assert b"1 Imported" in res.data | ||||
|  | ||||
|     # Give the thread time to pick it up | ||||
|     time.sleep(sleep_time_for_fetch_thread) | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     # It should report nothing found (just a new one shouldnt have anything) | ||||
|     res = client.get(url_for("index")) | ||||
| @@ -57,7 +52,7 @@ def test_trigger_regex_functionality(client, live_server): | ||||
|               "fetch_backend": "html_requests"}, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     time.sleep(sleep_time_for_fetch_thread) | ||||
|     wait_for_all_checks(client) | ||||
|     # so that we set the state to 'unviewed' after all the edits | ||||
|     client.get(url_for("diff_history_page", uuid="first")) | ||||
|  | ||||
| @@ -65,7 +60,7 @@ def test_trigger_regex_functionality(client, live_server): | ||||
|         f.write("some new noise") | ||||
|  | ||||
|     client.get(url_for("form_watch_checknow"), follow_redirects=True) | ||||
|     time.sleep(sleep_time_for_fetch_thread) | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     # It should report nothing found (nothing should match the regex) | ||||
|     res = client.get(url_for("index")) | ||||
| @@ -75,7 +70,7 @@ def test_trigger_regex_functionality(client, live_server): | ||||
|         f.write("regex test123<br>\nsomething 123") | ||||
|  | ||||
|     client.get(url_for("form_watch_checknow"), follow_redirects=True) | ||||
|     time.sleep(sleep_time_for_fetch_thread) | ||||
|     wait_for_all_checks(client) | ||||
|     res = client.get(url_for("index")) | ||||
|     assert b'unviewed' in res.data | ||||
|  | ||||
|   | ||||
| @@ -2,13 +2,15 @@ | ||||
|  | ||||
| import time | ||||
| from flask import url_for | ||||
| from . util import live_server_setup | ||||
| from .util import live_server_setup, wait_for_all_checks | ||||
|  | ||||
| from ..html_tools import * | ||||
|  | ||||
|  | ||||
| def test_setup(live_server): | ||||
|     live_server_setup(live_server) | ||||
|  | ||||
|  | ||||
| def set_original_response(): | ||||
|     test_return_data = """<html> | ||||
|        <body> | ||||
| @@ -26,6 +28,7 @@ def set_original_response(): | ||||
|         f.write(test_return_data) | ||||
|     return None | ||||
|  | ||||
|  | ||||
| def set_modified_response(): | ||||
|     test_return_data = """<html> | ||||
|        <body> | ||||
| @@ -44,11 +47,12 @@ def set_modified_response(): | ||||
|  | ||||
|     return None | ||||
|  | ||||
|  | ||||
| # Handle utf-8 charset replies https://github.com/dgtlmoon/changedetection.io/pull/613 | ||||
| def test_check_xpath_filter_utf8(client, live_server): | ||||
|     filter='//item/*[self::description]' | ||||
|     filter = '//item/*[self::description]' | ||||
|  | ||||
|     d='''<?xml version="1.0" encoding="UTF-8"?> | ||||
|     d = '''<?xml version="1.0" encoding="UTF-8"?> | ||||
| <rss xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:itunes="http://www.itunes.com/dtds/podcast-1.0.dtd" xmlns:dc="http://purl.org/dc/elements/1.1/" version="2.0"> | ||||
| 	<channel> | ||||
| 		<title>rpilocator.com</title> | ||||
| @@ -86,14 +90,14 @@ def test_check_xpath_filter_utf8(client, live_server): | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b"1 Imported" in res.data | ||||
|     time.sleep(1) | ||||
|     wait_for_all_checks(client) | ||||
|     res = client.post( | ||||
|         url_for("edit_page", uuid="first"), | ||||
|         data={"include_filters": filter, "url": test_url, "tags": "", "headers": "", 'fetch_backend': "html_requests"}, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b"Updated watch." in res.data | ||||
|     time.sleep(3) | ||||
|     wait_for_all_checks(client) | ||||
|     res = client.get(url_for("index")) | ||||
|     assert b'Unicode strings with encoding declaration are not supported.' not in res.data | ||||
|     res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True) | ||||
| @@ -102,9 +106,9 @@ def test_check_xpath_filter_utf8(client, live_server): | ||||
|  | ||||
| # Handle utf-8 charset replies https://github.com/dgtlmoon/changedetection.io/pull/613 | ||||
| def test_check_xpath_text_function_utf8(client, live_server): | ||||
|     filter='//item/title/text()' | ||||
|     filter = '//item/title/text()' | ||||
|  | ||||
|     d='''<?xml version="1.0" encoding="UTF-8"?> | ||||
|     d = '''<?xml version="1.0" encoding="UTF-8"?> | ||||
| <rss xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:itunes="http://www.itunes.com/dtds/podcast-1.0.dtd" xmlns:dc="http://purl.org/dc/elements/1.1/" version="2.0"> | ||||
| 	<channel> | ||||
| 		<title>rpilocator.com</title> | ||||
| @@ -140,14 +144,14 @@ def test_check_xpath_text_function_utf8(client, live_server): | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b"1 Imported" in res.data | ||||
|     time.sleep(1) | ||||
|     wait_for_all_checks(client) | ||||
|     res = client.post( | ||||
|         url_for("edit_page", uuid="first"), | ||||
|         data={"include_filters": filter, "url": test_url, "tags": "", "headers": "", 'fetch_backend': "html_requests"}, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b"Updated watch." in res.data | ||||
|     time.sleep(3) | ||||
|     wait_for_all_checks(client) | ||||
|     res = client.get(url_for("index")) | ||||
|     assert b'Unicode strings with encoding declaration are not supported.' not in res.data | ||||
|  | ||||
| @@ -163,16 +167,12 @@ def test_check_xpath_text_function_utf8(client, live_server): | ||||
|     res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True) | ||||
|     assert b'Deleted' in res.data | ||||
|  | ||||
| def test_check_markup_xpath_filter_restriction(client, live_server): | ||||
|     sleep_time_for_fetch_thread = 3 | ||||
|  | ||||
| def test_check_markup_xpath_filter_restriction(client, live_server): | ||||
|     xpath_filter = "//*[contains(@class, 'sametext')]" | ||||
|  | ||||
|     set_original_response() | ||||
|  | ||||
|     # Give the endpoint time to spin up | ||||
|     time.sleep(1) | ||||
|  | ||||
|     # Add our URL to the import page | ||||
|     test_url = url_for('test_endpoint', _external=True) | ||||
|     res = client.post( | ||||
| @@ -183,7 +183,7 @@ def test_check_markup_xpath_filter_restriction(client, live_server): | ||||
|     assert b"1 Imported" in res.data | ||||
|  | ||||
|     # Give the thread time to pick it up | ||||
|     time.sleep(sleep_time_for_fetch_thread) | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     # Goto the edit page, add our ignore text | ||||
|     # Add our URL to the import page | ||||
| @@ -195,7 +195,7 @@ def test_check_markup_xpath_filter_restriction(client, live_server): | ||||
|     assert b"Updated watch." in res.data | ||||
|  | ||||
|     # Give the thread time to pick it up | ||||
|     time.sleep(sleep_time_for_fetch_thread) | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     # view it/reset state back to viewed | ||||
|     client.get(url_for("diff_history_page", uuid="first"), follow_redirects=True) | ||||
| @@ -206,7 +206,7 @@ def test_check_markup_xpath_filter_restriction(client, live_server): | ||||
|     # Trigger a check | ||||
|     client.get(url_for("form_watch_checknow"), follow_redirects=True) | ||||
|     # Give the thread time to pick it up | ||||
|     time.sleep(sleep_time_for_fetch_thread) | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     res = client.get(url_for("index")) | ||||
|     assert b'unviewed' not in res.data | ||||
| @@ -215,10 +215,6 @@ def test_check_markup_xpath_filter_restriction(client, live_server): | ||||
|  | ||||
|  | ||||
| def test_xpath_validation(client, live_server): | ||||
|  | ||||
|     # Give the endpoint time to spin up | ||||
|     time.sleep(1) | ||||
|  | ||||
|     # Add our URL to the import page | ||||
|     test_url = url_for('test_endpoint', _external=True) | ||||
|     res = client.post( | ||||
| @@ -227,7 +223,7 @@ def test_xpath_validation(client, live_server): | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b"1 Imported" in res.data | ||||
|     time.sleep(2) | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     res = client.post( | ||||
|         url_for("edit_page", uuid="first"), | ||||
| @@ -239,14 +235,172 @@ def test_xpath_validation(client, live_server): | ||||
|     assert b'Deleted' in res.data | ||||
|  | ||||
|  | ||||
| def test_xpath23_prefix_validation(client, live_server): | ||||
|     # Add our URL to the import page | ||||
|     test_url = url_for('test_endpoint', _external=True) | ||||
|     res = client.post( | ||||
|         url_for("import_page"), | ||||
|         data={"urls": test_url}, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b"1 Imported" in res.data | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     res = client.post( | ||||
|         url_for("edit_page", uuid="first"), | ||||
|         data={"include_filters": "xpath:/something horrible", "url": test_url, "tags": "", "headers": "", 'fetch_backend': "html_requests"}, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b"is not a valid XPath expression" in res.data | ||||
|     res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True) | ||||
|     assert b'Deleted' in res.data | ||||
|  | ||||
|  | ||||
| def test_xpath1_validation(client, live_server): | ||||
|     # Add our URL to the import page | ||||
|     test_url = url_for('test_endpoint', _external=True) | ||||
|     res = client.post( | ||||
|         url_for("import_page"), | ||||
|         data={"urls": test_url}, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b"1 Imported" in res.data | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     res = client.post( | ||||
|         url_for("edit_page", uuid="first"), | ||||
|         data={"include_filters": "xpath1:/something horrible", "url": test_url, "tags": "", "headers": "", 'fetch_backend': "html_requests"}, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b"is not a valid XPath expression" in res.data | ||||
|     res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True) | ||||
|     assert b'Deleted' in res.data | ||||
|  | ||||
|  | ||||
| # actually only really used by the distll.io importer, but could be handy too | ||||
| def test_check_with_prefix_include_filters(client, live_server): | ||||
|     res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True) | ||||
|     assert b'Deleted' in res.data | ||||
|  | ||||
|     # Give the endpoint time to spin up | ||||
|     time.sleep(1) | ||||
|     set_original_response() | ||||
|     wait_for_all_checks(client) | ||||
|     # Add our URL to the import page | ||||
|     test_url = url_for('test_endpoint', _external=True) | ||||
|     res = client.post( | ||||
|         url_for("import_page"), | ||||
|         data={"urls": test_url}, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b"1 Imported" in res.data | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     res = client.post( | ||||
|         url_for("edit_page", uuid="first"), | ||||
|         data={"include_filters": "xpath://*[contains(@class, 'sametext')]", "url": test_url, "tags": "", "headers": "", | ||||
|               'fetch_backend': "html_requests"}, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|  | ||||
|     assert b"Updated watch." in res.data | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     res = client.get( | ||||
|         url_for("preview_page", uuid="first"), | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|  | ||||
|     assert b"Some text thats the same" in res.data  # in selector | ||||
|     assert b"Some text that will change" not in res.data  # not in selector | ||||
|  | ||||
|     client.get(url_for("form_delete", uuid="all"), follow_redirects=True) | ||||
|  | ||||
|  | ||||
| def test_various_rules(client, live_server): | ||||
|     # Just check these don't error | ||||
|     # live_server_setup(live_server) | ||||
|     with open("test-datastore/endpoint-content.txt", "w") as f: | ||||
|         f.write("""<html> | ||||
|        <body> | ||||
|      Some initial text<br> | ||||
|      <p>Which is across multiple lines</p> | ||||
|      <br> | ||||
|      So let's see what happens.  <br> | ||||
|      <div class="sametext">Some text thats the same</div> | ||||
|      <div class="changetext">Some text that will change</div> | ||||
|      <a href=''>some linky </a> | ||||
|      <a href=''>another some linky </a> | ||||
|      <!-- related to https://github.com/dgtlmoon/changedetection.io/pull/1774 --> | ||||
|      <input   type="email"   id="email" />      | ||||
|      </body> | ||||
|      </html> | ||||
|     """) | ||||
|  | ||||
|     test_url = url_for('test_endpoint', _external=True) | ||||
|     res = client.post( | ||||
|         url_for("import_page"), | ||||
|         data={"urls": test_url}, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b"1 Imported" in res.data | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     for r in ['//div', '//a', 'xpath://div', 'xpath://a']: | ||||
|         res = client.post( | ||||
|             url_for("edit_page", uuid="first"), | ||||
|             data={"include_filters": r, | ||||
|                   "url": test_url, | ||||
|                   "tags": "", | ||||
|                   "headers": "", | ||||
|                   'fetch_backend': "html_requests"}, | ||||
|             follow_redirects=True | ||||
|         ) | ||||
|         wait_for_all_checks(client) | ||||
|         assert b"Updated watch." in res.data | ||||
|         res = client.get(url_for("index")) | ||||
|         assert b'fetch-error' not in res.data, f"Should not see errors after '{r} filter" | ||||
|  | ||||
|     res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True) | ||||
|     assert b'Deleted' in res.data | ||||
|  | ||||
|  | ||||
| def test_xpath_20(client, live_server): | ||||
|     test_url = url_for('test_endpoint', _external=True) | ||||
|     res = client.post( | ||||
|         url_for("import_page"), | ||||
|         data={"urls": test_url}, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b"1 Imported" in res.data | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     set_original_response() | ||||
|  | ||||
|     test_url = url_for('test_endpoint', _external=True) | ||||
|     res = client.post( | ||||
|         url_for("edit_page", uuid="first"), | ||||
|         data={"include_filters": "//*[contains(@class, 'sametext')]|//*[contains(@class, 'changetext')]", | ||||
|               "url": test_url, | ||||
|               "tags": "", | ||||
|               "headers": "", | ||||
|               'fetch_backend': "html_requests"}, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|  | ||||
|     assert b"Updated watch." in res.data | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     res = client.get( | ||||
|         url_for("preview_page", uuid="first"), | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|  | ||||
|     assert b"Some text thats the same" in res.data  # in selector | ||||
|     assert b"Some text that will change" in res.data  # in selector | ||||
|  | ||||
|     client.get(url_for("form_delete", uuid="all"), follow_redirects=True) | ||||
|  | ||||
|  | ||||
| def test_xpath_20_function_count(client, live_server): | ||||
|     set_original_response() | ||||
|  | ||||
|     # Add our URL to the import page | ||||
| @@ -257,23 +411,100 @@ def test_check_with_prefix_include_filters(client, live_server): | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b"1 Imported" in res.data | ||||
|     time.sleep(3) | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     res = client.post( | ||||
|         url_for("edit_page", uuid="first"), | ||||
|         data={"include_filters":  "xpath://*[contains(@class, 'sametext')]", "url": test_url, "tags": "", "headers": "", 'fetch_backend': "html_requests"}, | ||||
|         data={"include_filters": "xpath:count(//div) * 123456789987654321", | ||||
|               "url": test_url, | ||||
|               "tags": "", | ||||
|               "headers": "", | ||||
|               'fetch_backend': "html_requests"}, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|  | ||||
|     assert b"Updated watch." in res.data | ||||
|     time.sleep(3) | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     res = client.get( | ||||
|         url_for("preview_page", uuid="first"), | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|  | ||||
|     assert b"Some text thats the same" in res.data #in selector | ||||
|     assert b"Some text that will change" not in res.data #not in selector | ||||
|     assert b"246913579975308642" in res.data  # in selector | ||||
|  | ||||
|     client.get(url_for("form_delete", uuid="all"), follow_redirects=True) | ||||
|  | ||||
|  | ||||
| def test_xpath_20_function_count2(client, live_server): | ||||
|     set_original_response() | ||||
|  | ||||
|     # Add our URL to the import page | ||||
|     test_url = url_for('test_endpoint', _external=True) | ||||
|     res = client.post( | ||||
|         url_for("import_page"), | ||||
|         data={"urls": test_url}, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b"1 Imported" in res.data | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     res = client.post( | ||||
|         url_for("edit_page", uuid="first"), | ||||
|         data={"include_filters": "/html/body/count(div) * 123456789987654321", | ||||
|               "url": test_url, | ||||
|               "tags": "", | ||||
|               "headers": "", | ||||
|               'fetch_backend': "html_requests"}, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|  | ||||
|     assert b"Updated watch." in res.data | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     res = client.get( | ||||
|         url_for("preview_page", uuid="first"), | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|  | ||||
|     assert b"246913579975308642" in res.data  # in selector | ||||
|  | ||||
|     client.get(url_for("form_delete", uuid="all"), follow_redirects=True) | ||||
|  | ||||
|  | ||||
| def test_xpath_20_function_string_join_matches(client, live_server): | ||||
|     set_original_response() | ||||
|  | ||||
|     # Add our URL to the import page | ||||
|     test_url = url_for('test_endpoint', _external=True) | ||||
|     res = client.post( | ||||
|         url_for("import_page"), | ||||
|         data={"urls": test_url}, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b"1 Imported" in res.data | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     res = client.post( | ||||
|         url_for("edit_page", uuid="first"), | ||||
|         data={ | ||||
|             "include_filters": "xpath:string-join(//*[contains(@class, 'sametext')]|//*[matches(@class, 'changetext')], 'specialconjunction')", | ||||
|             "url": test_url, | ||||
|             "tags": "", | ||||
|             "headers": "", | ||||
|             'fetch_backend': "html_requests"}, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|  | ||||
|     assert b"Updated watch." in res.data | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     res = client.get( | ||||
|         url_for("preview_page", uuid="first"), | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|  | ||||
|     assert b"Some text thats the samespecialconjunctionSome text that will change" in res.data  # in selector | ||||
|  | ||||
|     client.get(url_for("form_delete", uuid="all"), follow_redirects=True) | ||||
|  | ||||
|   | ||||
							
								
								
									
										203
									
								
								changedetectionio/tests/test_xpath_selector_unit.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										203
									
								
								changedetectionio/tests/test_xpath_selector_unit.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,203 @@ | ||||
| import sys | ||||
| import os | ||||
| import pytest | ||||
| sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) | ||||
| import html_tools | ||||
|  | ||||
| # test generation guide. | ||||
| # 1. Do not include encoding in the xml declaration if the test object is a str type. | ||||
| # 2. Always paraphrase test. | ||||
|  | ||||
| hotels = """ | ||||
| <hotel> | ||||
|   <branch location="California"> | ||||
|     <staff> | ||||
|       <given_name>Christopher</given_name> | ||||
|       <surname>Anderson</surname> | ||||
|       <age>25</age> | ||||
|     </staff> | ||||
|     <staff> | ||||
|       <given_name>Christopher</given_name> | ||||
|       <surname>Carter</surname> | ||||
|       <age>30</age> | ||||
|     </staff> | ||||
|   </branch> | ||||
|   <branch location="Las Vegas"> | ||||
|     <staff> | ||||
|       <given_name>Lisa</given_name> | ||||
|       <surname>Walker</surname> | ||||
|       <age>60</age> | ||||
|     </staff> | ||||
|     <staff> | ||||
|       <given_name>Jessica</given_name> | ||||
|       <surname>Walker</surname> | ||||
|       <age>32</age> | ||||
|     </staff> | ||||
|     <staff> | ||||
|       <given_name>Jennifer</given_name> | ||||
|       <surname>Roberts</surname> | ||||
|       <age>50</age> | ||||
|     </staff> | ||||
|   </branch> | ||||
| </hotel>""" | ||||
|  | ||||
| @pytest.mark.parametrize("html_content", [hotels]) | ||||
| @pytest.mark.parametrize("xpath, answer", [('(//staff/given_name, //staff/age)', '25'), | ||||
|                           ("xs:date('2023-10-10')", '2023-10-10'), | ||||
|                           ("if (/hotel/branch[@location = 'California']/staff[1]/age = 25) then 'is 25' else 'is not 25'", 'is 25'), | ||||
|                           ("if (//hotel/branch[@location = 'California']/staff[1]/age = 25) then 'is 25' else 'is not 25'", 'is 25'), | ||||
|                           ("if (count(/hotel/branch/staff) = 5) then true() else false()", 'true'), | ||||
|                           ("if (count(//hotel/branch/staff) = 5) then true() else false()", 'true'), | ||||
|                           ("for $i in /hotel/branch/staff return if ($i/age >= 40) then upper-case($i/surname) else lower-case($i/surname)", 'anderson'), | ||||
|                           ("given_name  =  'Christopher' and age  =  40", 'false'), | ||||
|                           ("//given_name  =  'Christopher' and //age  =  40", 'false'), | ||||
|                           #("(staff/given_name, staff/age)", 'Lisa'), | ||||
|                           ("(//staff/given_name, //staff/age)", 'Lisa'), | ||||
|                           #("hotel/branch[@location = 'California']/staff/age union hotel/branch[@location = 'Las Vegas']/staff/age", ''), | ||||
|                           ("(//hotel/branch[@location = 'California']/staff/age union //hotel/branch[@location = 'Las Vegas']/staff/age)", '60'), | ||||
|                           ("(200 to 210)", "205"), | ||||
|                           ("(//hotel/branch[@location = 'California']/staff/age union //hotel/branch[@location = 'Las Vegas']/staff/age)", "50"), | ||||
|                           ("(1, 9, 9, 5)", "5"), | ||||
|                           ("(3, (), (14, 15), 92, 653)", "653"), | ||||
|                           ("for $i in /hotel/branch/staff return $i/given_name", "Christopher"), | ||||
|                           ("for $i in //hotel/branch/staff return $i/given_name", "Christopher"), | ||||
|                           ("distinct-values(for $i in /hotel/branch/staff return $i/given_name)", "Jessica"), | ||||
|                           ("distinct-values(for $i in //hotel/branch/staff return $i/given_name)", "Jessica"), | ||||
|                           ("for $i in (7 to  15) return $i*10", "130"), | ||||
|                           ("some $i in /hotel/branch/staff satisfies $i/age < 20", "false"), | ||||
|                           ("some $i in //hotel/branch/staff satisfies $i/age < 20", "false"), | ||||
|                           ("every $i in /hotel/branch/staff satisfies $i/age > 20", "true"), | ||||
|                           ("every $i in //hotel/branch/staff satisfies $i/age > 20 ", "true"), | ||||
|                           ("let $x := branch[@location = 'California'], $y := branch[@location = 'Las Vegas'] return (avg($x/staff/age), avg($y/staff/age))", "27.5"), | ||||
|                           ("let $x := //branch[@location = 'California'], $y := //branch[@location = 'Las Vegas'] return (avg($x/staff/age), avg($y/staff/age))", "27.5"), | ||||
|                           ("let $nu := 1, $de := 1000 return  'probability = ' || $nu div $de * 100 || '%'", "0.1%"), | ||||
|                           ("let $nu := 2, $probability := function ($argument) { 'probability = ' ||  $nu div $argument  * 100 || '%'}, $de := 5 return $probability($de)", "40%"), | ||||
|                           ("'XPATH2.0-3.1 dissemination' instance of xs:string ", "true"), | ||||
|                           ("'new stackoverflow question incoming' instance of xs:integer ", "false"), | ||||
|                           ("'50000' cast as xs:integer", "50000"), | ||||
|                           ("//branch[@location = 'California']/staff[1]/surname eq 'Anderson'", "true"), | ||||
|                           ("fn:false()", "false")]) | ||||
| def test_hotels(html_content, xpath, answer): | ||||
|     html_content = html_tools.xpath_filter(xpath, html_content, append_pretty_line_formatting=True) | ||||
|     assert type(html_content) == str | ||||
|     assert answer in html_content | ||||
|  | ||||
|  | ||||
|  | ||||
| branches_to_visit = """<?xml version="1.0" ?> | ||||
|   <branches_to_visit> | ||||
|      <manager name="Godot" room_no="501"> | ||||
|          <branch>Area 51</branch> | ||||
|          <branch>A place with no name</branch> | ||||
|          <branch>Stalsk12</branch> | ||||
|      </manager> | ||||
|       <manager name="Freya" room_no="305"> | ||||
|          <branch>Stalsk12</branch> | ||||
|          <branch>Barcelona</branch> | ||||
|          <branch>Paris</branch> | ||||
|      </manager> | ||||
|  </branches_to_visit>""" | ||||
| @pytest.mark.parametrize("html_content", [branches_to_visit]) | ||||
| @pytest.mark.parametrize("xpath, answer", [ | ||||
|     ("manager[@name = 'Godot']/branch union manager[@name = 'Freya']/branch", "Area 51"), | ||||
|     ("//manager[@name = 'Godot']/branch union //manager[@name = 'Freya']/branch", "Stalsk12"), | ||||
|     ("manager[@name = 'Godot']/branch | manager[@name = 'Freya']/branch", "Stalsk12"), | ||||
|     ("//manager[@name = 'Godot']/branch | //manager[@name = 'Freya']/branch", "Stalsk12"), | ||||
|     ("manager/branch intersect manager[@name = 'Godot']/branch", "A place with no name"), | ||||
|     ("//manager/branch intersect //manager[@name = 'Godot']/branch", "A place with no name"), | ||||
|     ("manager[@name = 'Godot']/branch intersect manager[@name = 'Freya']/branch", ""), | ||||
|     ("manager/branch except manager[@name = 'Godot']/branch", "Barcelona"), | ||||
|     ("manager[@name = 'Godot']/branch[1]  eq 'Area 51'", "true"), | ||||
|     ("//manager[@name = 'Godot']/branch[1]  eq 'Area 51'", "true"), | ||||
|     ("manager[@name = 'Godot']/branch[1]  eq 'Seoul'", "false"), | ||||
|     ("//manager[@name = 'Godot']/branch[1]  eq 'Seoul'", "false"), | ||||
|     ("manager[@name = 'Godot']/branch[2] eq manager[@name = 'Freya']/branch[2]", "false"), | ||||
|     ("//manager[@name = 'Godot']/branch[2] eq //manager[@name = 'Freya']/branch[2]", "false"), | ||||
|     ("manager[1]/@room_no lt manager[2]/@room_no", "false"), | ||||
|     ("//manager[1]/@room_no lt //manager[2]/@room_no", "false"), | ||||
|     ("manager[1]/@room_no gt manager[2]/@room_no", "true"), | ||||
|     ("//manager[1]/@room_no gt //manager[2]/@room_no", "true"), | ||||
|     ("manager[@name = 'Godot']/branch[1]  = 'Area 51'", "true"), | ||||
|     ("//manager[@name = 'Godot']/branch[1]  = 'Area 51'", "true"), | ||||
|     ("manager[@name = 'Godot']/branch[1]  = 'Seoul'", "false"), | ||||
|     ("//manager[@name = 'Godot']/branch[1]  = 'Seoul'", "false"), | ||||
|     ("manager[@name = 'Godot']/branch  = 'Area 51'", "true"), | ||||
|     ("//manager[@name = 'Godot']/branch  = 'Area 51'", "true"), | ||||
|     ("manager[@name = 'Godot']/branch  = 'Barcelona'", "false"), | ||||
|     ("//manager[@name = 'Godot']/branch  = 'Barcelona'", "false"), | ||||
|     ("manager[1]/@room_no > manager[2]/@room_no", "true"), | ||||
|     ("//manager[1]/@room_no > //manager[2]/@room_no", "true"), | ||||
|     ("manager[@name = 'Godot']/branch[ . = 'Stalsk12'] is manager[1]/branch[1]", "false"), | ||||
|     ("//manager[@name = 'Godot']/branch[ . = 'Stalsk12'] is //manager[1]/branch[1]", "false"), | ||||
|     ("manager[@name = 'Godot']/branch[ . = 'Stalsk12'] is manager[1]/branch[3]", "true"), | ||||
|     ("//manager[@name = 'Godot']/branch[ . = 'Stalsk12'] is //manager[1]/branch[3]", "true"), | ||||
|     ("manager[@name = 'Godot']/branch[ . = 'Stalsk12'] <<  manager[1]/branch[1]", "false"), | ||||
|     ("//manager[@name = 'Godot']/branch[ . = 'Stalsk12'] <<  //manager[1]/branch[1]", "false"), | ||||
|     ("manager[@name = 'Godot']/branch[ . = 'Stalsk12']  >>  manager[1]/branch[1]", "true"), | ||||
|     ("//manager[@name = 'Godot']/branch[ . = 'Stalsk12'] >>  //manager[1]/branch[1]", "true"), | ||||
|     ("manager[@name = 'Godot']/branch[ . = 'Stalsk12'] is manager[@name = 'Freya']/branch[ . = 'Stalsk12']", "false"), | ||||
|     ("//manager[@name = 'Godot']/branch[ . = 'Stalsk12'] is //manager[@name = 'Freya']/branch[ . = 'Stalsk12']", "false"), | ||||
|     ("manager[1]/@name || manager[2]/@name", "GodotFreya"), | ||||
|     ("//manager[1]/@name || //manager[2]/@name", "GodotFreya"), | ||||
|                           ]) | ||||
| def test_branches_to_visit(html_content, xpath, answer): | ||||
|     html_content = html_tools.xpath_filter(xpath, html_content, append_pretty_line_formatting=True) | ||||
|     assert type(html_content) == str | ||||
|     assert answer in html_content | ||||
|  | ||||
| trips = """ | ||||
| <trips> | ||||
|    <trip reservation_number="10"> | ||||
|        <depart>2023-10-06</depart> | ||||
|        <arrive>2023-10-10</arrive> | ||||
|        <traveler name="Christopher Anderson"> | ||||
|            <duration>4</duration> | ||||
|            <price>2000.00</price> | ||||
|        </traveler> | ||||
|    </trip> | ||||
|    <trip reservation_number="12"> | ||||
|        <depart>2023-10-06</depart> | ||||
|        <arrive>2023-10-12</arrive> | ||||
|        <traveler name="Frank Carter"> | ||||
|            <duration>6</duration> | ||||
|            <price>3500.34</price> | ||||
|        </traveler> | ||||
|    </trip> | ||||
| </trips>""" | ||||
| @pytest.mark.parametrize("html_content", [trips]) | ||||
| @pytest.mark.parametrize("xpath, answer", [ | ||||
|     ("1 + 9 * 9 + 5 div 5", "83"), | ||||
|     ("(1 + 9 * 9 + 5) div 6", "14.5"), | ||||
|     ("23 idiv 3", "7"), | ||||
|     ("23 div 3", "7.66666666"), | ||||
|     ("for $i in ./trip return $i/traveler/duration * $i/traveler/price", "21002.04"), | ||||
|     ("for $i in ./trip return $i/traveler/duration ", "4"), | ||||
|     ("for $i in .//trip return $i/traveler/duration * $i/traveler/price", "21002.04"), | ||||
|     ("sum(for $i in ./trip return $i/traveler/duration * $i/traveler/price)", "29002.04"), | ||||
|     ("sum(for $i in .//trip return $i/traveler/duration * $i/traveler/price)", "29002.04"), | ||||
|     #("trip[1]/depart - trip[1]/arrive", "fail_to_get_answer"), | ||||
|     #("//trip[1]/depart - //trip[1]/arrive", "fail_to_get_answer"), | ||||
|     #("trip[1]/depart + trip[1]/arrive", "fail_to_get_answer"), | ||||
|     #("xs:date(trip[1]/depart) + xs:date(trip[1]/arrive)", "fail_to_get_answer"), | ||||
|     ("(//trip[1]/arrive cast as xs:date) - (//trip[1]/depart cast as xs:date)", "P4D"), | ||||
|     ("(//trip[1]/depart cast as xs:date) - (//trip[1]/arrive cast as xs:date)", "-P4D"), | ||||
|     ("(//trip[1]/depart cast as xs:date) + xs:dayTimeDuration('P3D')", "2023-10-09"), | ||||
|     ("(//trip[1]/depart cast as xs:date) - xs:dayTimeDuration('P3D')", "2023-10-03"), | ||||
|     ("(456, 623) instance of xs:integer", "false"), | ||||
|     ("(456, 623) instance of xs:integer*", "true"), | ||||
|     ("/trips/trip instance of element()", "false"), | ||||
|     ("/trips/trip instance of element()*", "true"), | ||||
|     ("/trips/trip[1]/arrive instance of xs:date", "false"), | ||||
|     ("date(/trips/trip[1]/arrive) instance of xs:date", "true"), | ||||
|     ("'8' cast as xs:integer", "8"), | ||||
|     ("'11.1E3' cast as xs:double", "11100"), | ||||
|     ("6.5 cast as xs:integer", "6"), | ||||
|     #("/trips/trip[1]/arrive cast as xs:dateTime", "fail_to_get_answer"), | ||||
|     ("/trips/trip[1]/arrive cast as xs:date", "2023-10-10"), | ||||
|     ("('2023-10-12') cast as xs:date", "2023-10-12"), | ||||
|     ("for $i in //trip return concat($i/depart, '  ', $i/arrive)", "2023-10-06  2023-10-10"), | ||||
|                           ]) | ||||
| def test_trips(html_content, xpath, answer): | ||||
|     html_content = html_tools.xpath_filter(xpath, html_content, append_pretty_line_formatting=True) | ||||
|     assert type(html_content) == str | ||||
|     assert answer in html_content | ||||
							
								
								
									
										54
									
								
								changedetectionio/tests/unit/test_watch_model.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										54
									
								
								changedetectionio/tests/unit/test_watch_model.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,54 @@ | ||||
| #!/usr/bin/python3 | ||||
|  | ||||
| # run from dir above changedetectionio/ dir | ||||
| # python3 -m unittest changedetectionio.tests.unit.test_notification_diff | ||||
|  | ||||
| import unittest | ||||
| import os | ||||
|  | ||||
| from changedetectionio.model import Watch | ||||
|  | ||||
| # mostly | ||||
| class TestDiffBuilder(unittest.TestCase): | ||||
|  | ||||
|     def test_watch_get_suggested_from_diff_timestamp(self): | ||||
|         import uuid as uuid_builder | ||||
|         watch = Watch.model(datastore_path='/tmp', default={}) | ||||
|         watch.ensure_data_dir_exists() | ||||
|  | ||||
|         watch['last_viewed'] = 110 | ||||
|  | ||||
|         watch.save_history_text(contents=b"hello world", timestamp=100, snapshot_id=str(uuid_builder.uuid4())) | ||||
|         watch.save_history_text(contents=b"hello world", timestamp=105, snapshot_id=str(uuid_builder.uuid4())) | ||||
|         watch.save_history_text(contents=b"hello world", timestamp=109, snapshot_id=str(uuid_builder.uuid4())) | ||||
|         watch.save_history_text(contents=b"hello world", timestamp=112, snapshot_id=str(uuid_builder.uuid4())) | ||||
|         watch.save_history_text(contents=b"hello world", timestamp=115, snapshot_id=str(uuid_builder.uuid4())) | ||||
|         watch.save_history_text(contents=b"hello world", timestamp=117, snapshot_id=str(uuid_builder.uuid4())) | ||||
|  | ||||
|         p = watch.get_next_snapshot_key_to_last_viewed | ||||
|         assert p == "112", "Correct last-viewed timestamp was detected" | ||||
|  | ||||
|         # When there is only one step of difference from the end of the list, it should return second-last change | ||||
|         watch['last_viewed'] = 116 | ||||
|         p = watch.get_next_snapshot_key_to_last_viewed | ||||
|         assert p == "115", "Correct 'second last' last-viewed timestamp was detected when using the last timestamp" | ||||
|  | ||||
|         watch['last_viewed'] = 99 | ||||
|         p = watch.get_next_snapshot_key_to_last_viewed | ||||
|         assert p == "100" | ||||
|  | ||||
|         watch['last_viewed'] = 200 | ||||
|         p = watch.get_next_snapshot_key_to_last_viewed | ||||
|         assert p == "115", "When the 'last viewed' timestamp is greater than the newest snapshot, return second last " | ||||
|  | ||||
|         watch['last_viewed'] = 109 | ||||
|         p = watch.get_next_snapshot_key_to_last_viewed | ||||
|         assert p == "109", "Correct when its the same time" | ||||
|  | ||||
|         # new empty one | ||||
|         watch = Watch.model(datastore_path='/tmp', default={}) | ||||
|         p = watch.get_next_snapshot_key_to_last_viewed | ||||
|         assert p == None, "None when no history available" | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|     unittest.main() | ||||
| @@ -1,18 +1,19 @@ | ||||
| #!/usr/bin/python3 | ||||
|  | ||||
| import time | ||||
| import os | ||||
| from flask import url_for | ||||
| from ..util import live_server_setup, wait_for_all_checks, extract_UUID_from_client | ||||
|  | ||||
| def test_setup(client, live_server): | ||||
|     live_server_setup(live_server) | ||||
|  | ||||
| # Add a site in paused mode, add an invalid filter, we should still have visual selector data ready | ||||
| def test_visual_selector_content_ready(client, live_server): | ||||
|     import os | ||||
|     import json | ||||
|  | ||||
|     assert os.getenv('PLAYWRIGHT_DRIVER_URL'), "Needs PLAYWRIGHT_DRIVER_URL set for this test" | ||||
|     time.sleep(1) | ||||
|     live_server_setup(live_server) | ||||
|  | ||||
|  | ||||
|     # Add our URL to the import page, because the docker container (playwright/selenium) wont be able to connect to our usual test url | ||||
|     test_url = "https://changedetection.io/ci-test/test-runjs.html" | ||||
| @@ -53,6 +54,13 @@ def test_visual_selector_content_ready(client, live_server): | ||||
|     with open(os.path.join('test-datastore', uuid, 'elements.json'), 'r') as f: | ||||
|         json.load(f) | ||||
|  | ||||
|     # Attempt to fetch it via the web hook that the browser would use | ||||
|     res = client.get(url_for('static_content', group='visual_selector_data', filename=uuid)) | ||||
|     json.loads(res.data) | ||||
|     assert res.mimetype == 'application/json' | ||||
|     assert res.status_code == 200 | ||||
|  | ||||
|  | ||||
|     # Some options should be enabled | ||||
|     # @todo - in the future, the visibility should be toggled by JS from the request type setting | ||||
|     res = client.get( | ||||
| @@ -60,4 +68,75 @@ def test_visual_selector_content_ready(client, live_server): | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b'notification_screenshot' in res.data | ||||
|     client.get( | ||||
|         url_for("form_delete", uuid="all"), | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|  | ||||
| def test_basic_browserstep(client, live_server): | ||||
|  | ||||
|     assert os.getenv('PLAYWRIGHT_DRIVER_URL'), "Needs PLAYWRIGHT_DRIVER_URL set for this test" | ||||
|     #live_server_setup(live_server) | ||||
|  | ||||
|     # Add our URL to the import page, because the docker container (playwright/selenium) wont be able to connect to our usual test url | ||||
|     test_url = "https://changedetection.io/ci-test/test-runjs.html" | ||||
|  | ||||
|     res = client.post( | ||||
|         url_for("form_quick_watch_add"), | ||||
|         data={"url": test_url, "tags": '', 'edit_and_watch_submit_button': 'Edit > Watch'}, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b"Watch added in Paused state, saving will unpause" in res.data | ||||
|  | ||||
|     res = client.post( | ||||
|         url_for("edit_page", uuid="first", unpause_on_save=1), | ||||
|         data={ | ||||
|               "url": test_url, | ||||
|               "tags": "", | ||||
|               "headers": "", | ||||
|               'fetch_backend': "html_webdriver", | ||||
|               'browser_steps-0-operation': 'Goto site', | ||||
|               'browser_steps-1-operation': 'Click element', | ||||
|               'browser_steps-1-selector': 'button[name=test-button]', | ||||
|               'browser_steps-1-optional_value': '' | ||||
|         }, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b"unpaused" in res.data | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     uuid = extract_UUID_from_client(client) | ||||
|  | ||||
|     # Check HTML conversion detected and workd | ||||
|     res = client.get( | ||||
|         url_for("preview_page", uuid=uuid), | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b"This text should be removed" not in res.data | ||||
|     assert b"I smell JavaScript because the button was pressed" in res.data | ||||
|  | ||||
|     # now test for 404 errors | ||||
|     res = client.post( | ||||
|         url_for("edit_page", uuid=uuid, unpause_on_save=1), | ||||
|         data={ | ||||
|               "url": "https://changedetection.io/404", | ||||
|               "tags": "", | ||||
|               "headers": "", | ||||
|               'fetch_backend': "html_webdriver", | ||||
|               'browser_steps-0-operation': 'Goto site', | ||||
|               'browser_steps-1-operation': 'Click element', | ||||
|               'browser_steps-1-selector': 'button[name=test-button]', | ||||
|               'browser_steps-1-optional_value': '' | ||||
|         }, | ||||
|         follow_redirects=True | ||||
|     ) | ||||
|     assert b"unpaused" in res.data | ||||
|     wait_for_all_checks(client) | ||||
|  | ||||
|     res = client.get(url_for("index")) | ||||
|     assert b'Error - 404' in res.data | ||||
|  | ||||
|     client.get( | ||||
|         url_for("form_delete", uuid="all"), | ||||
|         follow_redirects=True | ||||
|     ) | ||||
| @@ -3,7 +3,7 @@ import threading | ||||
| import queue | ||||
| import time | ||||
|  | ||||
| from changedetectionio import content_fetcher | ||||
| from changedetectionio import content_fetcher, html_tools | ||||
| from .processors.text_json_diff import FilterNotFoundInResponse | ||||
| from .processors.restock_diff import UnableToExtractRestockData | ||||
|  | ||||
| @@ -209,6 +209,7 @@ class update_worker(threading.Thread): | ||||
|         from .processors import text_json_diff, restock_diff | ||||
|  | ||||
|         while not self.app.config.exit.is_set(): | ||||
|             update_handler = None | ||||
|  | ||||
|             try: | ||||
|                 queued_item_data = self.q.get(block=False) | ||||
| @@ -229,16 +230,36 @@ class update_worker(threading.Thread): | ||||
|                     now = time.time() | ||||
|  | ||||
|                     try: | ||||
|                         processor = self.datastore.data['watching'][uuid].get('processor','text_json_diff') | ||||
|                         # Processor is what we are using for detecting the "Change" | ||||
|                         processor = self.datastore.data['watching'][uuid].get('processor', 'text_json_diff') | ||||
|                         # if system... | ||||
|  | ||||
|                         # Abort processing when the content was the same as the last fetch | ||||
|                         skip_when_same_checksum = queued_item_data.item.get('skip_when_checksum_same') | ||||
|  | ||||
|  | ||||
|                         # @todo some way to switch by name | ||||
|                         # Init a new 'difference_detection_processor' | ||||
|  | ||||
|                         if processor == 'restock_diff': | ||||
|                             update_handler = restock_diff.perform_site_check(datastore=self.datastore) | ||||
|                             update_handler = restock_diff.perform_site_check(datastore=self.datastore, | ||||
|                                                                              watch_uuid=uuid | ||||
|                                                                              ) | ||||
|                         else: | ||||
|                             # Used as a default and also by some tests | ||||
|                             update_handler = text_json_diff.perform_site_check(datastore=self.datastore) | ||||
|                             update_handler = text_json_diff.perform_site_check(datastore=self.datastore, | ||||
|                                                                                watch_uuid=uuid | ||||
|                                                                                ) | ||||
|  | ||||
|                         # Clear last errors (move to preflight func?) | ||||
|                         self.datastore.data['watching'][uuid]['browser_steps_last_error_step'] = None | ||||
|  | ||||
|                         update_handler.call_browser() | ||||
|  | ||||
|                         changed_detected, update_obj, contents = update_handler.run_changedetection(uuid, | ||||
|                                                                                     skip_when_checksum_same=skip_when_same_checksum, | ||||
|                                                                                     ) | ||||
|  | ||||
|                         changed_detected, update_obj, contents = update_handler.run(uuid, skip_when_checksum_same=queued_item_data.item.get('skip_when_checksum_same')) | ||||
|                         # Re #342 | ||||
|                         # In Python 3, all strings are sequences of Unicode characters. There is a bytes type that holds raw bytes. | ||||
|                         # We then convert/.decode('utf-8') for the notification etc | ||||
| @@ -251,7 +272,20 @@ class update_worker(threading.Thread): | ||||
|                         # Totally fine, it's by choice - just continue on, nothing more to care about | ||||
|                         # Page had elements/content but no renderable text | ||||
|                         # Backend (not filters) gave zero output | ||||
|                         self.datastore.update_watch(uuid=uuid, update_obj={'last_error': "Got HTML content but no text found (With {} reply code).".format(e.status_code)}) | ||||
|                         extra_help = "" | ||||
|                         if e.has_filters: | ||||
|                             # Maybe it contains an image? offer a more helpful link | ||||
|                             has_img = html_tools.include_filters(include_filters='img', | ||||
|                                                                  html_content=e.html_content) | ||||
|                             if has_img: | ||||
|                                 extra_help = ", it's possible that the filters you have give an empty result or contain only an image." | ||||
|                             else: | ||||
|                                 extra_help = ", it's possible that the filters were found, but contained no usable text." | ||||
|  | ||||
|                         self.datastore.update_watch(uuid=uuid, update_obj={ | ||||
|                             'last_error': f"Got HTML content but no text found (With {e.status_code} reply code){extra_help}" | ||||
|                         }) | ||||
|  | ||||
|                         if e.screenshot: | ||||
|                             self.datastore.save_screenshot(watch_uuid=uuid, screenshot=e.screenshot) | ||||
|                         process_changedetection_results = False | ||||
| @@ -311,8 +345,13 @@ class update_worker(threading.Thread): | ||||
|                         if not self.datastore.data['watching'].get(uuid): | ||||
|                             continue | ||||
|  | ||||
|                         err_text = "Warning, browser step at position {} could not run, target not found, check the watch, add a delay if necessary.".format(e.step_n+1) | ||||
|                         self.datastore.update_watch(uuid=uuid, update_obj={'last_error': err_text}) | ||||
|                         error_step = e.step_n + 1 | ||||
|                         err_text = f"Warning, browser step at position {error_step} could not run, target not found, check the watch, add a delay if necessary, view Browser Steps to see screenshot at that step" | ||||
|                         self.datastore.update_watch(uuid=uuid, | ||||
|                                                     update_obj={'last_error': err_text, | ||||
|                                                                 'browser_steps_last_error_step': error_step | ||||
|                                                                 } | ||||
|                                                     ) | ||||
|  | ||||
|  | ||||
|                         if self.datastore.data['watching'][uuid].get('filter_failure_notification_send', False): | ||||
| @@ -371,6 +410,9 @@ class update_worker(threading.Thread): | ||||
|                         self.datastore.update_watch(uuid=uuid, update_obj={'last_error': str(e)}) | ||||
|                         # Other serious error | ||||
|                         process_changedetection_results = False | ||||
| #                        import traceback | ||||
| #                        print(traceback.format_exc()) | ||||
|  | ||||
|                     else: | ||||
|                         # Crash protection, the watch entry could have been removed by this point (during a slow chrome fetch etc) | ||||
|                         if not self.datastore.data['watching'].get(uuid): | ||||
|   | ||||
| @@ -66,25 +66,12 @@ services: | ||||
| #        browser-chrome: | ||||
| #            condition: service_started | ||||
|  | ||||
| #    browser-chrome: | ||||
| #        hostname: browser-chrome | ||||
| #        image: selenium/standalone-chrome-debug:3.141.59 | ||||
| #        environment: | ||||
| #            - VNC_NO_PASSWORD=1 | ||||
| #            - SCREEN_WIDTH=1920 | ||||
| #            - SCREEN_HEIGHT=1080 | ||||
| #            - SCREEN_DEPTH=24 | ||||
| #        volumes: | ||||
| #            # Workaround to avoid the browser crashing inside a docker container | ||||
| #            # See https://github.com/SeleniumHQ/docker-selenium#quick-start | ||||
| #            - /dev/shm:/dev/shm | ||||
| #        restart: unless-stopped | ||||
|  | ||||
|      # Used for fetching pages via Playwright+Chrome where you need Javascript support. | ||||
|  | ||||
|      # Note: Playwright/browserless not supported on ARM type devices (rPi etc) | ||||
|      # RECOMMENDED FOR FETCHING PAGES WITH CHROME | ||||
| #    playwright-chrome: | ||||
| #        hostname: playwright-chrome | ||||
| #        image: browserless/chrome | ||||
| #        image: browserless/chrome:1.60-chrome-stable | ||||
| #        restart: unless-stopped | ||||
| #        environment: | ||||
| #            - SCREEN_WIDTH=1920 | ||||
| @@ -101,6 +88,23 @@ services: | ||||
| #             Ignore HTTPS errors, like for self-signed certs | ||||
| #            - DEFAULT_IGNORE_HTTPS_ERRORS=true | ||||
| # | ||||
|  | ||||
|      # Used for fetching pages via Playwright+Chrome where you need Javascript support. | ||||
|      # Note: works well but is deprecated, doesnt fetch full page screenshots and other issues | ||||
| #    browser-chrome: | ||||
| #        hostname: browser-chrome | ||||
| #        image: selenium/standalone-chrome:4 | ||||
| #        environment: | ||||
| #            - VNC_NO_PASSWORD=1 | ||||
| #            - SCREEN_WIDTH=1920 | ||||
| #            - SCREEN_HEIGHT=1080 | ||||
| #            - SCREEN_DEPTH=24 | ||||
| #        volumes: | ||||
| #            # Workaround to avoid the browser crashing inside a docker container | ||||
| #            # See https://github.com/SeleniumHQ/docker-selenium#quick-start | ||||
| #            - /dev/shm:/dev/shm | ||||
| #        restart: unless-stopped | ||||
|  | ||||
| volumes: | ||||
|   changedetection-data: | ||||
|  | ||||
|   | ||||
| @@ -1,12 +1,13 @@ | ||||
| eventlet>=0.31.0 | ||||
| eventlet>=0.33.3 # related to dnspython fixes | ||||
| feedgen~=0.9 | ||||
| flask-compress | ||||
| flask-login~=0.5 | ||||
| # 0.6.3 included compatibility fix for werkzeug 3.x (2.x had deprecation of url handlers) | ||||
| flask-login>=0.6.3 | ||||
| flask-paginate | ||||
| flask_expects_json~=1.7 | ||||
| flask_restful | ||||
| flask_wtf | ||||
| flask~=2.0 | ||||
| flask_wtf~=1.2 | ||||
| flask~=2.3 | ||||
| inscriptis~=2.2 | ||||
| pytz | ||||
| timeago~=1.0 | ||||
| @@ -16,7 +17,7 @@ validators~=0.21 | ||||
| # Set these versions together to avoid a RequestsDependencyWarning | ||||
| # >= 2.26 also adds Brotli support if brotli is installed | ||||
| brotli~=1.0 | ||||
| requests[socks] ~=2.28 | ||||
| requests[socks] | ||||
|  | ||||
| urllib3>1.26 | ||||
| chardet>2.3.0 | ||||
| @@ -24,16 +25,12 @@ chardet>2.3.0 | ||||
| wtforms~=3.0 | ||||
| jsonpath-ng~=1.5.3 | ||||
|  | ||||
|  | ||||
| # dnspython 2.3.0 is not compatible with eventlet | ||||
| # * https://github.com/eventlet/eventlet/issues/781 | ||||
| # * https://datastax-oss.atlassian.net/browse/PYTHON-1320 | ||||
| dnspython<2.3.0 | ||||
| dnspython~=2.4 # related to eventlet fixes | ||||
|  | ||||
| # jq not available on Windows so must be installed manually | ||||
|  | ||||
| # Notification library | ||||
| apprise~=1.5.0 | ||||
| apprise~=1.6.0 | ||||
|  | ||||
| # apprise mqtt https://github.com/dgtlmoon/changedetection.io/issues/315 | ||||
| paho-mqtt | ||||
| @@ -49,21 +46,21 @@ beautifulsoup4 | ||||
| # XPath filtering, lxml is required by bs4 anyway, but put it here to be safe. | ||||
| lxml | ||||
|  | ||||
| # 3.141 was missing socksVersion, 3.150 was not in pypi, so we try 4.1.0 | ||||
| selenium~=4.1.0 | ||||
| # XPath 2.0-3.1 support | ||||
| elementpath | ||||
|  | ||||
| # https://stackoverflow.com/questions/71652965/importerror-cannot-import-name-safe-str-cmp-from-werkzeug-security/71653849#71653849 | ||||
| # ImportError: cannot import name 'safe_str_cmp' from 'werkzeug.security' | ||||
| # need to revisit flask login versions | ||||
| werkzeug~=2.0.0 | ||||
| selenium~=4.14.0 | ||||
|  | ||||
| werkzeug~=3.0 | ||||
|  | ||||
| # Templating, so far just in the URLs but in the future can be for the notifications also | ||||
| jinja2~=3.1 | ||||
| jinja2-time | ||||
|  | ||||
| openpyxl | ||||
| # https://peps.python.org/pep-0508/#environment-markers | ||||
| # https://github.com/dgtlmoon/changedetection.io/pull/1009 | ||||
| jq~=1.3 ;python_version >= "3.8" and sys_platform == "linux" | ||||
| jq~=1.3; python_version >= "3.8" and sys_platform == "darwin" | ||||
| jq~=1.3; python_version >= "3.8" and sys_platform == "linux" | ||||
|  | ||||
| # Any current modern version, required so far for screenshot PNG->JPEG conversion but will be used more in the future | ||||
| pillow | ||||
|   | ||||
| @@ -1 +1 @@ | ||||
| python-3.9.15 | ||||
| python-3.11.5 | ||||
							
								
								
									
										2
									
								
								setup.py
									
									
									
									
									
								
							
							
						
						
									
										2
									
								
								setup.py
									
									
									
									
									
								
							| @@ -41,7 +41,7 @@ setup( | ||||
|     include_package_data=True, | ||||
|     install_requires=install_requires, | ||||
|     license="Apache License 2.0", | ||||
|     python_requires=">= 3.6", | ||||
|     python_requires=">= 3.7", | ||||
|     classifiers=['Intended Audience :: Customer Service', | ||||
|                  'Intended Audience :: Developers', | ||||
|                  'Intended Audience :: Education', | ||||
|   | ||||
		Reference in New Issue
	
	Block a user