Compare commits
465 Commits
0.45.3
...
abstracted
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
80434fa16a | ||
|
|
db10422415 | ||
|
|
380e571ded | ||
|
|
1c2cfc37aa | ||
|
|
0634fe021d | ||
|
|
04934b6b3b | ||
|
|
ff00417bc5 | ||
|
|
849c5b2293 | ||
|
|
4bf560256b | ||
|
|
7903b03a0c | ||
|
|
5e7c0880c1 | ||
|
|
957aef4ff3 | ||
|
|
8e9a83d8f4 | ||
|
|
5961838143 | ||
|
|
8cf4a8128b | ||
|
|
24c3bfe5ad | ||
|
|
bdd9760f3c | ||
|
|
e37467f649 | ||
|
|
d42fdf0257 | ||
|
|
939fa86582 | ||
|
|
b87c92b9e0 | ||
|
|
4d5535d72c | ||
|
|
ad08219d03 | ||
|
|
82211eef82 | ||
|
|
5d9380609c | ||
|
|
a8b3918fca | ||
|
|
e83fb37fb6 | ||
|
|
6b99afe0f7 | ||
|
|
09ebc6ec63 | ||
|
|
6b1065502e | ||
|
|
d4c470984a | ||
|
|
55da48f719 | ||
|
|
dbd4adf23a | ||
|
|
b1e700b3ff | ||
|
|
1c61b5a623 | ||
|
|
e799a1cdcb | ||
|
|
938065db6f | ||
|
|
4f2d38ff49 | ||
|
|
8960f401b7 | ||
|
|
1c1f1c6f6b | ||
|
|
a2a98811a5 | ||
|
|
5a0ef8fc01 | ||
|
|
d90de0851d | ||
|
|
360b4f0d8b | ||
|
|
6fc04d7f1c | ||
|
|
66fb05527b | ||
|
|
202e47d728 | ||
|
|
d67d396b88 | ||
|
|
05f54f0ce6 | ||
|
|
6adf10597e | ||
|
|
4419bc0e61 | ||
|
|
f7e9846c9b | ||
|
|
5dea5e1def | ||
|
|
0fade0a473 | ||
|
|
121e9c20e0 | ||
|
|
12cec2d541 | ||
|
|
d52e6e8e11 | ||
|
|
bae1a89b75 | ||
|
|
e49711f449 | ||
|
|
a3a3ab0622 | ||
|
|
c5fe188b28 | ||
|
|
1fb0adde54 | ||
|
|
2614b275f0 | ||
|
|
1631a55830 | ||
|
|
f00b8e4efb | ||
|
|
179ca171d4 | ||
|
|
84f2870d4f | ||
|
|
7421e0f95e | ||
|
|
c6162e48f1 | ||
|
|
feccb18cdc | ||
|
|
1462ad89ac | ||
|
|
cfb9fadec8 | ||
|
|
d9f9fa735d | ||
|
|
6084b0f23d | ||
|
|
4e18aea5ff | ||
|
|
fdba6b5566 | ||
|
|
4e6c783c45 | ||
|
|
0f0f5af7b5 | ||
|
|
7fcba26bea | ||
|
|
4bda1a234f | ||
|
|
d297850539 | ||
|
|
751239250f | ||
|
|
6aceeb01ab | ||
|
|
49bc982c69 | ||
|
|
e0abf0b505 | ||
|
|
f08a1185aa | ||
|
|
ad5d7efbbf | ||
|
|
7029d10f8b | ||
|
|
26d3a23e05 | ||
|
|
942625e1fb | ||
|
|
33c83230a6 | ||
|
|
87510becb5 | ||
|
|
5e95dc62a5 | ||
|
|
7d94535dbf | ||
|
|
563c196396 | ||
|
|
e8b82c47ca | ||
|
|
e84de7e8f4 | ||
|
|
1543edca24 | ||
|
|
82e0b99b07 | ||
|
|
b0ff9d161e | ||
|
|
c1dd681643 | ||
|
|
ecafa27833 | ||
|
|
f7d4e58613 | ||
|
|
5bb47e47db | ||
|
|
03151da68e | ||
|
|
a16a70229d | ||
|
|
9476c1076b | ||
|
|
a4959b5971 | ||
|
|
a278fa22f2 | ||
|
|
d39530b261 | ||
|
|
d4b4355ff5 | ||
|
|
c1c8de3104 | ||
|
|
5a768d7db3 | ||
|
|
f38429ec93 | ||
|
|
783926962d | ||
|
|
6cd1d50a4f | ||
|
|
54a4970a4c | ||
|
|
fd00453e6d | ||
|
|
2842ffb205 | ||
|
|
ec4e2f5649 | ||
|
|
fe8e3d1cb1 | ||
|
|
69fbafbdb7 | ||
|
|
f255165571 | ||
|
|
7ff34baa90 | ||
|
|
043378d09c | ||
|
|
af4bafcff8 | ||
|
|
b656338c63 | ||
|
|
97af190910 | ||
|
|
e9e063e18e | ||
|
|
45c444d0db | ||
|
|
00458b95c4 | ||
|
|
dad9760832 | ||
|
|
e2c2a76cb2 | ||
|
|
5b34aece96 | ||
|
|
1b625dc18a | ||
|
|
367afc81e9 | ||
|
|
ddfbef6db3 | ||
|
|
e173954cdd | ||
|
|
e830fb2320 | ||
|
|
c6589ee1b4 | ||
|
|
dc936a2e8a | ||
|
|
8c1527c1ad | ||
|
|
a5ff1cd1d7 | ||
|
|
543cb205d2 | ||
|
|
273adfa0a4 | ||
|
|
8ecfd17973 | ||
|
|
19f3851c9d | ||
|
|
7f2fa20318 | ||
|
|
e16814e40b | ||
|
|
337fcab3f1 | ||
|
|
eaccd6026c | ||
|
|
5b70625eaa | ||
|
|
60d292107d | ||
|
|
1cb38347da | ||
|
|
55fe2abf42 | ||
|
|
4225900ec3 | ||
|
|
1fb4342488 | ||
|
|
7071df061a | ||
|
|
6dd1fa2b88 | ||
|
|
371f85d544 | ||
|
|
932cf15e1e | ||
|
|
bf0d410d32 | ||
|
|
730f37c7ba | ||
|
|
8a35d62e02 | ||
|
|
f527744024 | ||
|
|
71c9b1273c | ||
|
|
ec68450df1 | ||
|
|
2fd762a783 | ||
|
|
d7e85ffe8f | ||
|
|
d23a301826 | ||
|
|
3ce6096fdb | ||
|
|
8acdcdd861 | ||
|
|
755cba33de | ||
|
|
8aae7dfae0 | ||
|
|
ed00f67a80 | ||
|
|
44e7e142f8 | ||
|
|
fe704e05a3 | ||
|
|
e756e0af5e | ||
|
|
c0b6c8581e | ||
|
|
de558f208f | ||
|
|
321426dea2 | ||
|
|
bde27c8a8f | ||
|
|
1405e962f0 | ||
|
|
a9f10946f4 | ||
|
|
6f2186b442 | ||
|
|
cf0ff26275 | ||
|
|
cffb6d748c | ||
|
|
99b0935b42 | ||
|
|
f1853b0ce7 | ||
|
|
c331612a22 | ||
|
|
445bb0dde3 | ||
|
|
8f3a6a42bc | ||
|
|
732ae1d935 | ||
|
|
5437144dff | ||
|
|
ed38012c6e | ||
|
|
f07ff9b55e | ||
|
|
1c46914992 | ||
|
|
e9c4037178 | ||
|
|
1af342ef64 | ||
|
|
e09ee7da97 | ||
|
|
09bc24ff34 | ||
|
|
a1d04bb37f | ||
|
|
01f910f840 | ||
|
|
bed16009bb | ||
|
|
faeed78ffb | ||
|
|
5d9081ccb2 | ||
|
|
2cf1829073 | ||
|
|
526551a205 | ||
|
|
ba139e7f3f | ||
|
|
13e343f9da | ||
|
|
13be4623db | ||
|
|
3b19e3d2bf | ||
|
|
ce42f8ea26 | ||
|
|
343e359b39 | ||
|
|
ffd160ce0e | ||
|
|
d31fc860cc | ||
|
|
90b357f457 | ||
|
|
cc147be76e | ||
|
|
8ae5ed76ce | ||
|
|
a9ed113369 | ||
|
|
eacf920b9a | ||
|
|
c9af9b6374 | ||
|
|
5e65fb606b | ||
|
|
434a1b242e | ||
|
|
bce02f9c82 | ||
|
|
76ffc3e891 | ||
|
|
c6ee6687b5 | ||
|
|
de48892243 | ||
|
|
6aded50aca | ||
|
|
b8e279a025 | ||
|
|
8041d00e75 | ||
|
|
6a0e14cfce | ||
|
|
be91c5425c | ||
|
|
778680d517 | ||
|
|
7e8aa7e3ff | ||
|
|
d77f913aa0 | ||
|
|
59cefe58e7 | ||
|
|
cfc689e046 | ||
|
|
7b04b52e45 | ||
|
|
f49eb4567f | ||
|
|
a8959be348 | ||
|
|
05bf3c9a5c | ||
|
|
4293639f51 | ||
|
|
f0ed4f64e8 | ||
|
|
add2c658b4 | ||
|
|
e27f66eb73 | ||
|
|
e4504fee49 | ||
|
|
5798581f18 | ||
|
|
ef910b86ef | ||
|
|
8d1fb96d18 | ||
|
|
5df5d0fbe7 | ||
|
|
815cba11ca | ||
|
|
3aed4e5af9 | ||
|
|
3618c389c6 | ||
|
|
d127214d8f | ||
|
|
c0f000b1d1 | ||
|
|
ee5294740a | ||
|
|
bd6eda696c | ||
|
|
1ba29655f5 | ||
|
|
830a0a3a82 | ||
|
|
e110b3ee93 | ||
|
|
3ae9bfa6f9 | ||
|
|
6f3c3b7dfb | ||
|
|
74707909f1 | ||
|
|
d4dac23ba1 | ||
|
|
f9954f93f3 | ||
|
|
1a43b112dc | ||
|
|
db59bf73e1 | ||
|
|
8aac7bccbe | ||
|
|
9449c59fbb | ||
|
|
21f4ba2208 | ||
|
|
daef1cd036 | ||
|
|
56b365df40 | ||
|
|
8e5bf91965 | ||
|
|
1ae59551be | ||
|
|
a176468fb8 | ||
|
|
8fac593201 | ||
|
|
e3b8c0f5af | ||
|
|
514fd7f91e | ||
|
|
38c4768b92 | ||
|
|
6555d99044 | ||
|
|
e719dbd19b | ||
|
|
b28a8316cc | ||
|
|
e609a2d048 | ||
|
|
994d34c776 | ||
|
|
de776800e9 | ||
|
|
8b8ed58f20 | ||
|
|
79c6d765de | ||
|
|
c6db7fc90e | ||
|
|
bc587efae2 | ||
|
|
6ee6be1a5f | ||
|
|
c83485094b | ||
|
|
387ce32e6f | ||
|
|
6b9a788d75 | ||
|
|
14e632bc19 | ||
|
|
52c895b2e8 | ||
|
|
a62043e086 | ||
|
|
3d390b6ea4 | ||
|
|
301a40ca34 | ||
|
|
1c099cdba6 | ||
|
|
af747e6e3f | ||
|
|
aefad0bdf6 | ||
|
|
904ef84f82 | ||
|
|
d2569ba715 | ||
|
|
ccb42bcb12 | ||
|
|
4163030805 | ||
|
|
140d375ad0 | ||
|
|
1a608d0ae6 | ||
|
|
e6ed91cfe3 | ||
|
|
008272cd77 | ||
|
|
823a0c99f4 | ||
|
|
1f57d9d0b6 | ||
|
|
3287283065 | ||
|
|
c5a4e0aaa3 | ||
|
|
5119efe4fb | ||
|
|
78a2dceb81 | ||
|
|
72c7645f60 | ||
|
|
e09eb47fb7 | ||
|
|
616c0b3f65 | ||
|
|
c90b27823a | ||
|
|
3b16b19a94 | ||
|
|
4ee9fa79e1 | ||
|
|
4b49759113 | ||
|
|
e9a9790cb0 | ||
|
|
593660e2f6 | ||
|
|
7d96b4ba83 | ||
|
|
fca40e4d5b | ||
|
|
66e2dfcead | ||
|
|
bce7eb68fb | ||
|
|
93c0385119 | ||
|
|
e17f3be739 | ||
|
|
3a9f79b756 | ||
|
|
1f5670253e | ||
|
|
fe3cf5ffd2 | ||
|
|
d31a45d49a | ||
|
|
19ee65361d | ||
|
|
677082723c | ||
|
|
96793890f8 | ||
|
|
0439155127 | ||
|
|
29ca2521eb | ||
|
|
7d67ad057c | ||
|
|
2e88872b7e | ||
|
|
b30b718373 | ||
|
|
402f1e47e7 | ||
|
|
9510345e01 | ||
|
|
36085d8cf4 | ||
|
|
399cdf0fbf | ||
|
|
4be0fafa93 | ||
|
|
51ce7ac66e | ||
|
|
c3d825f38c | ||
|
|
fc3c4b804d | ||
|
|
1749c07750 | ||
|
|
65428655b8 | ||
|
|
8be0029260 | ||
|
|
3c727ca54b | ||
|
|
4596532090 | ||
|
|
d0a88d54a1 | ||
|
|
21ab4b16a0 | ||
|
|
77133de1cf | ||
|
|
0d92be348a | ||
|
|
3ac0c9346c | ||
|
|
b6d8db4c67 | ||
|
|
436a66d465 | ||
|
|
764514e5eb | ||
|
|
ad3ffb6ccb | ||
|
|
e051b29bf2 | ||
|
|
126852b778 | ||
|
|
d115b2c858 | ||
|
|
2db04e4211 | ||
|
|
946a556fb6 | ||
|
|
eda23678aa | ||
|
|
273bd45ad7 | ||
|
|
3d1e1025d2 | ||
|
|
5528b7c4b3 | ||
|
|
0dce3f4fec | ||
|
|
af4311a68c | ||
|
|
792fedb8bc | ||
|
|
824748df9e | ||
|
|
c086ec0d68 | ||
|
|
8e207ba438 | ||
|
|
f0823126c8 | ||
|
|
98f56736c1 | ||
|
|
872bd2de85 | ||
|
|
e6de1dd135 | ||
|
|
599291645d | ||
|
|
156d403552 | ||
|
|
7fe0ef7099 | ||
|
|
fe70beeaed | ||
|
|
abf7ed9085 | ||
|
|
19e752e9ba | ||
|
|
684e96f5f1 | ||
|
|
8f321139fd | ||
|
|
7fdae82e46 | ||
|
|
bbc18d8e80 | ||
|
|
d8ee5472f1 | ||
|
|
8fd57280b7 | ||
|
|
0285d00f13 | ||
|
|
f7f98945a2 | ||
|
|
5e2049c538 | ||
|
|
26931e0167 | ||
|
|
5229094e44 | ||
|
|
5a306aa78c | ||
|
|
c8dcc072c8 | ||
|
|
7c97a5a403 | ||
|
|
7dd967be8e | ||
|
|
3607d15185 | ||
|
|
3382b4cb3f | ||
|
|
5f030d3668 | ||
|
|
06975d6d8f | ||
|
|
f58e5b7f19 | ||
|
|
e50eff8e35 | ||
|
|
07a853ce59 | ||
|
|
80f8d23309 | ||
|
|
9f41d15908 | ||
|
|
89797dfe02 | ||
|
|
c905652780 | ||
|
|
99246d3e6d | ||
|
|
f9f69bf0dd | ||
|
|
68efb25e9b | ||
|
|
70606ab05d | ||
|
|
d3c8386874 | ||
|
|
47103d7f3d | ||
|
|
03c671bfff | ||
|
|
e209d9fba0 | ||
|
|
3b43da35ec | ||
|
|
a0665e1f18 | ||
|
|
9ffe7e0eaf | ||
|
|
3e5671a3a2 | ||
|
|
cd1aca9ee3 | ||
|
|
6a589e14f3 | ||
|
|
dbb76f3618 | ||
|
|
4ae27af511 | ||
|
|
e1860549dc | ||
|
|
9765d56a23 | ||
|
|
349111eb35 | ||
|
|
71e50569a0 | ||
|
|
c372942295 | ||
|
|
0aef5483d9 | ||
|
|
c266c64b94 | ||
|
|
32e5498a9d | ||
|
|
0ba7928d58 | ||
|
|
1709e8f936 | ||
|
|
b16d65741c | ||
|
|
1cadcc6d15 | ||
|
|
b58d521d19 | ||
|
|
52225f2ad8 | ||
|
|
7220afab0a | ||
|
|
1c0fe4c23e | ||
|
|
4f6b0eb8a5 | ||
|
|
f707c914b6 | ||
|
|
9cb636e638 | ||
|
|
1d5fe51157 | ||
|
|
c0b49d3be9 | ||
|
|
c4dc85525f | ||
|
|
26159840c8 | ||
|
|
522e9786c6 | ||
|
|
9ce86a2835 | ||
|
|
f9f6300a70 | ||
|
|
7734b22a19 | ||
|
|
da421fe110 | ||
|
|
3e2b55a46f | ||
|
|
7ace259d70 | ||
|
|
aa6ad7bf47 | ||
|
|
40dd29dbc6 | ||
|
|
7debccca73 |
@@ -1,18 +1,31 @@
|
||||
.git
|
||||
.github
|
||||
changedetectionio/processors/__pycache__
|
||||
changedetectionio/api/__pycache__
|
||||
changedetectionio/model/__pycache__
|
||||
changedetectionio/blueprint/price_data_follower/__pycache__
|
||||
changedetectionio/blueprint/tags/__pycache__
|
||||
changedetectionio/blueprint/__pycache__
|
||||
changedetectionio/blueprint/browser_steps/__pycache__
|
||||
changedetectionio/fetchers/__pycache__
|
||||
changedetectionio/tests/visualselector/__pycache__
|
||||
changedetectionio/tests/restock/__pycache__
|
||||
changedetectionio/tests/__pycache__
|
||||
changedetectionio/tests/fetchers/__pycache__
|
||||
changedetectionio/tests/unit/__pycache__
|
||||
changedetectionio/tests/proxy_list/__pycache__
|
||||
changedetectionio/__pycache__
|
||||
# Git
|
||||
.git/
|
||||
.gitignore
|
||||
|
||||
# GitHub
|
||||
.github/
|
||||
|
||||
# Byte-compiled / optimized / DLL files
|
||||
**/__pycache__
|
||||
**/*.py[cod]
|
||||
|
||||
# Caches
|
||||
.mypy_cache/
|
||||
.pytest_cache/
|
||||
.ruff_cache/
|
||||
|
||||
# Distribution / packaging
|
||||
build/
|
||||
dist/
|
||||
*.egg-info*
|
||||
|
||||
# Virtual environment
|
||||
.env
|
||||
.venv/
|
||||
venv/
|
||||
|
||||
# IntelliJ IDEA
|
||||
.idea/
|
||||
|
||||
# Visual Studio
|
||||
.vscode/
|
||||
|
||||
4
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -27,6 +27,10 @@ A clear and concise description of what the bug is.
|
||||
**Version**
|
||||
*Exact version* in the top right area: 0....
|
||||
|
||||
**How did you install?**
|
||||
|
||||
Docker, Pip, from source directly etc
|
||||
|
||||
**To Reproduce**
|
||||
|
||||
Steps to reproduce the behavior:
|
||||
|
||||
14
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: github-actions
|
||||
directory: /
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
"caronc/apprise":
|
||||
versioning-strategy: "increase"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
groups:
|
||||
all:
|
||||
patterns:
|
||||
- "*"
|
||||
23
.github/test/Dockerfile-alpine
vendored
@@ -2,30 +2,33 @@
|
||||
# Test that we can still build on Alpine (musl modified libc https://musl.libc.org/)
|
||||
# Some packages wont install via pypi because they dont have a wheel available under this architecture.
|
||||
|
||||
FROM ghcr.io/linuxserver/baseimage-alpine:3.18
|
||||
FROM ghcr.io/linuxserver/baseimage-alpine:3.21
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
|
||||
COPY requirements.txt /requirements.txt
|
||||
|
||||
RUN \
|
||||
apk add --update --no-cache --virtual=build-dependencies \
|
||||
apk add --update --no-cache --virtual=build-dependencies \
|
||||
build-base \
|
||||
cargo \
|
||||
g++ \
|
||||
gcc \
|
||||
git \
|
||||
jpeg-dev \
|
||||
libc-dev \
|
||||
libffi-dev \
|
||||
libxslt-dev \
|
||||
make \
|
||||
openssl-dev \
|
||||
py3-wheel \
|
||||
python3-dev \
|
||||
zip \
|
||||
zlib-dev && \
|
||||
apk add --update --no-cache \
|
||||
libjpeg \
|
||||
libxslt \
|
||||
python3 \
|
||||
py3-pip && \
|
||||
nodejs \
|
||||
poppler-utils \
|
||||
python3 && \
|
||||
echo "**** pip3 install test of changedetection.io ****" && \
|
||||
pip3 install -U pip wheel setuptools && \
|
||||
pip3 install -U --no-cache-dir --find-links https://wheel-index.linuxserver.io/alpine-3.18/ -r /requirements.txt && \
|
||||
python3 -m venv /lsiopy && \
|
||||
pip install -U pip wheel setuptools && \
|
||||
pip install -U --no-cache-dir --find-links https://wheel-index.linuxserver.io/alpine-3.21/ -r /requirements.txt && \
|
||||
apk del --purge \
|
||||
build-dependencies
|
||||
|
||||
6
.github/workflows/codeql-analysis.yml
vendored
@@ -34,7 +34,7 @@ jobs:
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v2
|
||||
uses: github/codeql-action/init@v3
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
@@ -45,7 +45,7 @@ jobs:
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v2
|
||||
uses: github/codeql-action/autobuild@v3
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
@@ -59,4 +59,4 @@ jobs:
|
||||
# make release
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v2
|
||||
uses: github/codeql-action/analyze@v3
|
||||
|
||||
45
.github/workflows/containers.yml
vendored
@@ -41,7 +41,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python 3.11
|
||||
uses: actions/setup-python@v4
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: 3.11
|
||||
|
||||
@@ -88,46 +88,49 @@ jobs:
|
||||
- name: Build and push :dev
|
||||
id: docker_build
|
||||
if: ${{ github.ref }} == "refs/heads/master"
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: ./
|
||||
file: ./Dockerfile
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKER_HUB_USERNAME }}/changedetection.io:dev,ghcr.io/${{ github.repository }}:dev
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v6,linux/arm/v7,linux/arm/v8
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/arm/v8,linux/arm64/v8
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
# Looks like this was disabled
|
||||
# provenance: false
|
||||
|
||||
# A new tagged release is required, which builds :tag and :latest
|
||||
- name: Docker meta :tag
|
||||
if: github.event_name == 'release' && startsWith(github.event.release.tag_name, '0.')
|
||||
uses: docker/metadata-action@v5
|
||||
id: meta
|
||||
with:
|
||||
images: |
|
||||
${{ secrets.DOCKER_HUB_USERNAME }}/changedetection.io
|
||||
ghcr.io/dgtlmoon/changedetection.io
|
||||
tags: |
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
|
||||
- name: Build and push :tag
|
||||
id: docker_build_tag_release
|
||||
if: github.event_name == 'release' && startsWith(github.event.release.tag_name, '0.')
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: ./
|
||||
file: ./Dockerfile
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKER_HUB_USERNAME }}/changedetection.io:${{ github.event.release.tag_name }}
|
||||
ghcr.io/dgtlmoon/changedetection.io:${{ github.event.release.tag_name }}
|
||||
${{ secrets.DOCKER_HUB_USERNAME }}/changedetection.io:latest
|
||||
ghcr.io/dgtlmoon/changedetection.io:latest
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v6,linux/arm/v7,linux/arm/v8
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/arm/v8,linux/arm64/v8
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
# Looks like this was disabled
|
||||
# provenance: false
|
||||
|
||||
- name: Image digest
|
||||
run: echo step SHA ${{ steps.vars.outputs.sha_short }} tag ${{steps.vars.outputs.tag}} branch ${{steps.vars.outputs.branch}} digest ${{ steps.docker_build.outputs.digest }}
|
||||
|
||||
- name: Cache Docker layers
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: /tmp/.buildx-cache
|
||||
key: ${{ runner.os }}-buildx-${{ github.sha }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
|
||||
80
.github/workflows/pypi-release.yml
vendored
Normal file
@@ -0,0 +1,80 @@
|
||||
name: Publish Python 🐍distribution 📦 to PyPI and TestPyPI
|
||||
|
||||
on: push
|
||||
jobs:
|
||||
build:
|
||||
name: Build distribution 📦
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11"
|
||||
- name: Install pypa/build
|
||||
run: >-
|
||||
python3 -m
|
||||
pip install
|
||||
build
|
||||
--user
|
||||
- name: Build a binary wheel and a source tarball
|
||||
run: python3 -m build
|
||||
- name: Store the distribution packages
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: python-package-distributions
|
||||
path: dist/
|
||||
|
||||
|
||||
test-pypi-package:
|
||||
name: Test the built 📦 package works basically.
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- build
|
||||
steps:
|
||||
- name: Download all the dists
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: python-package-distributions
|
||||
path: dist/
|
||||
- name: Set up Python 3.11
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
- name: Test that the basic pip built package runs without error
|
||||
run: |
|
||||
set -ex
|
||||
ls -alR
|
||||
|
||||
# Find and install the first .whl file
|
||||
find dist -type f -name "*.whl" -exec pip3 install {} \; -quit
|
||||
changedetection.io -d /tmp -p 10000 &
|
||||
|
||||
sleep 3
|
||||
curl --retry-connrefused --retry 6 http://127.0.0.1:10000/static/styles/pure-min.css >/dev/null
|
||||
curl --retry-connrefused --retry 6 http://127.0.0.1:10000/ >/dev/null
|
||||
killall changedetection.io
|
||||
|
||||
|
||||
publish-to-pypi:
|
||||
name: >-
|
||||
Publish Python 🐍 distribution 📦 to PyPI
|
||||
if: startsWith(github.ref, 'refs/tags/') # only publish to PyPI on tag pushes
|
||||
needs:
|
||||
- test-pypi-package
|
||||
runs-on: ubuntu-latest
|
||||
environment:
|
||||
name: release
|
||||
url: https://pypi.org/p/changedetection.io
|
||||
permissions:
|
||||
id-token: write # IMPORTANT: mandatory for trusted publishing
|
||||
|
||||
steps:
|
||||
- name: Download all the dists
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: python-package-distributions
|
||||
path: dist/
|
||||
- name: Publish distribution 📦 to PyPI
|
||||
uses: pypa/gh-action-pypi-publish@release/v1
|
||||
10
.github/workflows/test-container-build.yml
vendored
@@ -11,12 +11,14 @@ on:
|
||||
- requirements.txt
|
||||
- Dockerfile
|
||||
- .github/workflows/*
|
||||
- .github/test/Dockerfile*
|
||||
|
||||
pull_request:
|
||||
paths:
|
||||
- requirements.txt
|
||||
- Dockerfile
|
||||
- .github/workflows/*
|
||||
- .github/test/Dockerfile*
|
||||
|
||||
# Changes to requirements.txt packages and Dockerfile may or may not always be compatible with arm etc, so worth testing
|
||||
# @todo: some kind of path filter for requirements.txt and Dockerfile
|
||||
@@ -26,7 +28,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python 3.11
|
||||
uses: actions/setup-python@v4
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: 3.11
|
||||
|
||||
@@ -49,7 +51,7 @@ jobs:
|
||||
# Check we can still build under alpine/musl
|
||||
- name: Test that the docker containers can build (musl via alpine check)
|
||||
id: docker_build_musl
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: ./
|
||||
file: ./.github/test/Dockerfile-alpine
|
||||
@@ -57,12 +59,12 @@ jobs:
|
||||
|
||||
- name: Test that the docker containers can build
|
||||
id: docker_build
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@v6
|
||||
# https://github.com/docker/build-push-action#customizing
|
||||
with:
|
||||
context: ./
|
||||
file: ./Dockerfile
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v6,linux/arm/v7,linux/arm/v8
|
||||
platforms: linux/amd64,linux/arm64,linux/arm/v7,linux/arm/v8,linux/arm64/v8
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache
|
||||
|
||||
|
||||
105
.github/workflows/test-only.yml
vendored
@@ -4,17 +4,10 @@ name: ChangeDetection.io App Test
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
test-application:
|
||||
lint-code:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
# Mainly just for link/flake8
|
||||
- name: Set up Python 3.11
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
- name: Lint with flake8
|
||||
run: |
|
||||
pip3 install flake8
|
||||
@@ -23,79 +16,31 @@ jobs:
|
||||
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
|
||||
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
|
||||
|
||||
- name: Spin up ancillary testable services
|
||||
run: |
|
||||
|
||||
docker network create changedet-network
|
||||
test-application-3-10:
|
||||
needs: lint-code
|
||||
uses: ./.github/workflows/test-stack-reusable-workflow.yml
|
||||
with:
|
||||
python-version: '3.10'
|
||||
|
||||
# Selenium+browserless
|
||||
docker run --network changedet-network -d --hostname selenium -p 4444:4444 --rm --shm-size="2g" selenium/standalone-chrome-debug:3.141.59
|
||||
docker run --network changedet-network -d --hostname browserless -e "FUNCTION_BUILT_INS=[\"fs\",\"crypto\"]" -e "DEFAULT_LAUNCH_ARGS=[\"--window-size=1920,1080\"]" --rm -p 3000:3000 --shm-size="2g" browserless/chrome:1.53-chrome-stable
|
||||
|
||||
- name: Build changedetection.io container for testing
|
||||
run: |
|
||||
# Build a changedetection.io container and start testing inside
|
||||
docker build . -t test-changedetectionio
|
||||
# Debug info
|
||||
docker run test-changedetectionio bash -c 'pip list'
|
||||
test-application-3-11:
|
||||
needs: lint-code
|
||||
uses: ./.github/workflows/test-stack-reusable-workflow.yml
|
||||
with:
|
||||
python-version: '3.11'
|
||||
skip-pypuppeteer: true
|
||||
|
||||
- name: Spin up ancillary SMTP+Echo message test server
|
||||
run: |
|
||||
# Debug SMTP server/echo message back server
|
||||
docker run --network changedet-network -d -p 11025:11025 -p 11080:11080 --hostname mailserver test-changedetectionio bash -c 'python changedetectionio/tests/smtp/smtp-test-server.py'
|
||||
test-application-3-12:
|
||||
needs: lint-code
|
||||
uses: ./.github/workflows/test-stack-reusable-workflow.yml
|
||||
with:
|
||||
python-version: '3.12'
|
||||
skip-pypuppeteer: true
|
||||
|
||||
- name: Test built container with pytest
|
||||
run: |
|
||||
# Unit tests
|
||||
docker run test-changedetectionio bash -c 'python3 -m unittest changedetectionio.tests.unit.test_notification_diff'
|
||||
|
||||
# All tests
|
||||
docker run --network changedet-network test-changedetectionio bash -c 'cd changedetectionio && ./run_basic_tests.sh'
|
||||
|
||||
- name: Test built container selenium+browserless/playwright
|
||||
run: |
|
||||
|
||||
# Selenium fetch
|
||||
docker run --rm -e "WEBDRIVER_URL=http://selenium:4444/wd/hub" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest tests/fetchers/test_content.py && pytest tests/test_errorhandling.py'
|
||||
|
||||
# Playwright/Browserless fetch
|
||||
docker run --rm -e "PLAYWRIGHT_DRIVER_URL=ws://browserless:3000" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest tests/fetchers/test_content.py && pytest tests/test_errorhandling.py && pytest tests/visualselector/test_fetch_data.py'
|
||||
|
||||
# Settings headers playwright tests - Call back in from Browserless, check headers
|
||||
docker run --name "changedet" --hostname changedet --rm -e "FLASK_SERVER_NAME=changedet" -e "PLAYWRIGHT_DRIVER_URL=ws://browserless:3000?dumpio=true" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio; pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/test_request.py'
|
||||
docker run --name "changedet" --hostname changedet --rm -e "FLASK_SERVER_NAME=changedet" -e "WEBDRIVER_URL=http://selenium:4444/wd/hub" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio; pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/test_request.py'
|
||||
docker run --name "changedet" --hostname changedet --rm -e "FLASK_SERVER_NAME=changedet" -e "USE_EXPERIMENTAL_PUPPETEER_FETCH=yes" -e "PLAYWRIGHT_DRIVER_URL=ws://browserless:3000?dumpio=true" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio; pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/test_request.py'
|
||||
|
||||
# restock detection via playwright - added name=changedet here so that playwright/browserless can connect to it
|
||||
docker run --rm --name "changedet" -e "FLASK_SERVER_NAME=changedet" -e "PLAYWRIGHT_DRIVER_URL=ws://browserless:3000" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-port=5004 --live-server-host=0.0.0.0 tests/restock/test_restock.py'
|
||||
|
||||
- name: Test SMTP notification mime types
|
||||
run: |
|
||||
# SMTP content types - needs the 'Debug SMTP server/echo message back server' container from above
|
||||
docker run --rm --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest tests/smtp/test_notification_smtp.py'
|
||||
|
||||
- name: Test with puppeteer fetcher and disk cache
|
||||
run: |
|
||||
docker run --rm -e "PUPPETEER_DISK_CACHE=/tmp/data/" -e "USE_EXPERIMENTAL_PUPPETEER_FETCH=yes" -e "PLAYWRIGHT_DRIVER_URL=ws://browserless:3000" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest tests/fetchers/test_content.py && pytest tests/test_errorhandling.py && pytest tests/visualselector/test_fetch_data.py'
|
||||
# Browserless would have had -e "FUNCTION_BUILT_INS=[\"fs\",\"crypto\"]" added above
|
||||
|
||||
- name: Test proxy interaction
|
||||
run: |
|
||||
cd changedetectionio
|
||||
./run_proxy_tests.sh
|
||||
cd ..
|
||||
|
||||
- name: Test changedetection.io container starts+runs basically without error
|
||||
run: |
|
||||
docker run -p 5556:5000 -d test-changedetectionio
|
||||
sleep 3
|
||||
# Should return 0 (no error) when grep finds it
|
||||
curl -s http://localhost:5556 |grep -q checkbox-uuid
|
||||
|
||||
# and IPv6
|
||||
curl -s -g -6 "http://[::1]:5556"|grep -q checkbox-uuid
|
||||
|
||||
|
||||
#export WEBDRIVER_URL=http://localhost:4444/wd/hub
|
||||
#pytest tests/fetchers/test_content.py
|
||||
#pytest tests/test_errorhandling.py
|
||||
test-application-3-13:
|
||||
needs: lint-code
|
||||
uses: ./.github/workflows/test-stack-reusable-workflow.yml
|
||||
with:
|
||||
python-version: '3.13'
|
||||
skip-pypuppeteer: true
|
||||
|
||||
|
||||
36
.github/workflows/test-pip-build.yml
vendored
@@ -1,36 +0,0 @@
|
||||
name: ChangeDetection.io PIP package test
|
||||
|
||||
# Triggers the workflow on push or pull request events
|
||||
|
||||
# This line doesnt work, even tho it is the documented one
|
||||
on: [push, pull_request]
|
||||
|
||||
# Changes to requirements.txt packages and Dockerfile may or may not always be compatible with arm etc, so worth testing
|
||||
# @todo: some kind of path filter for requirements.txt and Dockerfile
|
||||
jobs:
|
||||
test-pip-build-basics:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python 3.11
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.11
|
||||
|
||||
|
||||
- name: Test that the basic pip built package runs without error
|
||||
run: |
|
||||
set -e
|
||||
mkdir dist
|
||||
pip3 install wheel
|
||||
python3 setup.py bdist_wheel
|
||||
pip3 install -r requirements.txt
|
||||
rm ./changedetection.py
|
||||
rm -rf changedetectio
|
||||
|
||||
pip3 install dist/changedetection.io*.whl
|
||||
changedetection.io -d /tmp -p 10000 &
|
||||
sleep 3
|
||||
curl http://127.0.0.1:10000/static/styles/pure-min.css >/dev/null
|
||||
killall -9 changedetection.io
|
||||
241
.github/workflows/test-stack-reusable-workflow.yml
vendored
Normal file
@@ -0,0 +1,241 @@
|
||||
name: ChangeDetection.io App Test
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
python-version:
|
||||
description: 'Python version to use'
|
||||
required: true
|
||||
type: string
|
||||
default: '3.10'
|
||||
skip-pypuppeteer:
|
||||
description: 'Skip PyPuppeteer (not supported in 3.11/3.12)'
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
jobs:
|
||||
test-application:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
PYTHON_VERSION: ${{ inputs.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
# Mainly just for link/flake8
|
||||
- name: Set up Python ${{ env.PYTHON_VERSION }}
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ env.PYTHON_VERSION }}
|
||||
|
||||
- name: Build changedetection.io container for testing under Python ${{ env.PYTHON_VERSION }}
|
||||
run: |
|
||||
echo "---- Building for Python ${{ env.PYTHON_VERSION }} -----"
|
||||
# Build a changedetection.io container and start testing inside
|
||||
docker build --build-arg PYTHON_VERSION=${{ env.PYTHON_VERSION }} --build-arg LOGGER_LEVEL=TRACE -t test-changedetectionio .
|
||||
# Debug info
|
||||
docker run test-changedetectionio bash -c 'pip list'
|
||||
|
||||
- name: We should be Python ${{ env.PYTHON_VERSION }} ...
|
||||
run: |
|
||||
docker run test-changedetectionio bash -c 'python3 --version'
|
||||
|
||||
- name: Spin up ancillary testable services
|
||||
run: |
|
||||
|
||||
docker network create changedet-network
|
||||
|
||||
# Selenium
|
||||
docker run --network changedet-network -d --hostname selenium -p 4444:4444 --rm --shm-size="2g" selenium/standalone-chrome:4
|
||||
|
||||
# SocketPuppetBrowser + Extra for custom browser test
|
||||
docker run --network changedet-network -d -e "LOG_LEVEL=TRACE" --cap-add=SYS_ADMIN --name sockpuppetbrowser --hostname sockpuppetbrowser --rm -p 3000:3000 dgtlmoon/sockpuppetbrowser:latest
|
||||
docker run --network changedet-network -d -e "LOG_LEVEL=TRACE" --cap-add=SYS_ADMIN --name sockpuppetbrowser-custom-url --hostname sockpuppetbrowser-custom-url -p 3001:3000 --rm dgtlmoon/sockpuppetbrowser:latest
|
||||
|
||||
- name: Spin up ancillary SMTP+Echo message test server
|
||||
run: |
|
||||
# Debug SMTP server/echo message back server
|
||||
docker run --network changedet-network -d -p 11025:11025 -p 11080:11080 --hostname mailserver test-changedetectionio bash -c 'pip3 install aiosmtpd && python changedetectionio/tests/smtp/smtp-test-server.py'
|
||||
docker ps
|
||||
|
||||
- name: Show docker container state and other debug info
|
||||
run: |
|
||||
set -x
|
||||
echo "Running processes in docker..."
|
||||
docker ps
|
||||
|
||||
- name: Run Unit Tests
|
||||
run: |
|
||||
# Unit tests
|
||||
docker run test-changedetectionio bash -c 'python3 -m unittest changedetectionio.tests.unit.test_notification_diff'
|
||||
docker run test-changedetectionio bash -c 'python3 -m unittest changedetectionio.tests.unit.test_watch_model'
|
||||
docker run test-changedetectionio bash -c 'python3 -m unittest changedetectionio.tests.unit.test_jinja2_security'
|
||||
docker run test-changedetectionio bash -c 'python3 -m unittest changedetectionio.tests.unit.test_semver'
|
||||
|
||||
- name: Test built container with Pytest (generally as requests/plaintext fetching)
|
||||
run: |
|
||||
# All tests
|
||||
echo "run test with pytest"
|
||||
# The default pytest logger_level is TRACE
|
||||
# To change logger_level for pytest(test/conftest.py),
|
||||
# append the docker option. e.g. '-e LOGGER_LEVEL=DEBUG'
|
||||
docker run --name test-cdio-basic-tests --network changedet-network test-changedetectionio bash -c 'cd changedetectionio && ./run_basic_tests.sh'
|
||||
|
||||
# PLAYWRIGHT/NODE-> CDP
|
||||
- name: Playwright and SocketPuppetBrowser - Specific tests in built container
|
||||
run: |
|
||||
# Playwright via Sockpuppetbrowser fetch
|
||||
# tests/visualselector/test_fetch_data.py will do browser steps
|
||||
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/fetchers/test_content.py'
|
||||
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/test_errorhandling.py'
|
||||
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/visualselector/test_fetch_data.py'
|
||||
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/fetchers/test_custom_js_before_content.py'
|
||||
|
||||
|
||||
- name: Playwright and SocketPuppetBrowser - Headers and requests
|
||||
run: |
|
||||
# Settings headers playwright tests - Call back in from Sockpuppetbrowser, check headers
|
||||
docker run --name "changedet" --hostname changedet --rm -e "FLASK_SERVER_NAME=changedet" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000?dumpio=true" --network changedet-network test-changedetectionio bash -c 'find .; cd changedetectionio; pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/test_request.py; pwd;find .'
|
||||
|
||||
- name: Playwright and SocketPuppetBrowser - Restock detection
|
||||
run: |
|
||||
# restock detection via playwright - added name=changedet here so that playwright and sockpuppetbrowser can connect to it
|
||||
docker run --rm --name "changedet" -e "FLASK_SERVER_NAME=changedet" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-port=5004 --live-server-host=0.0.0.0 tests/restock/test_restock.py'
|
||||
|
||||
# STRAIGHT TO CDP
|
||||
- name: Pyppeteer and SocketPuppetBrowser - Specific tests in built container
|
||||
if: ${{ inputs.skip-pypuppeteer == false }}
|
||||
run: |
|
||||
# Playwright via Sockpuppetbrowser fetch
|
||||
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "FAST_PUPPETEER_CHROME_FETCHER=True" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/fetchers/test_content.py'
|
||||
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "FAST_PUPPETEER_CHROME_FETCHER=True" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/test_errorhandling.py'
|
||||
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "FAST_PUPPETEER_CHROME_FETCHER=True" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/visualselector/test_fetch_data.py'
|
||||
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "FAST_PUPPETEER_CHROME_FETCHER=True" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/fetchers/test_custom_js_before_content.py'
|
||||
|
||||
- name: Pyppeteer and SocketPuppetBrowser - Headers and requests checks
|
||||
if: ${{ inputs.skip-pypuppeteer == false }}
|
||||
run: |
|
||||
# Settings headers playwright tests - Call back in from Sockpuppetbrowser, check headers
|
||||
docker run --name "changedet" --hostname changedet --rm -e "FAST_PUPPETEER_CHROME_FETCHER=True" -e "FLASK_SERVER_NAME=changedet" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000?dumpio=true" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio; pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/test_request.py'
|
||||
|
||||
- name: Pyppeteer and SocketPuppetBrowser - Restock detection
|
||||
if: ${{ inputs.skip-pypuppeteer == false }}
|
||||
run: |
|
||||
# restock detection via playwright - added name=changedet here so that playwright and sockpuppetbrowser can connect to it
|
||||
docker run --rm --name "changedet" -e "FLASK_SERVER_NAME=changedet" -e "FAST_PUPPETEER_CHROME_FETCHER=True" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-port=5004 --live-server-host=0.0.0.0 tests/restock/test_restock.py'
|
||||
|
||||
# SELENIUM
|
||||
- name: Specific tests in built container for Selenium
|
||||
run: |
|
||||
# Selenium fetch
|
||||
docker run --rm -e "WEBDRIVER_URL=http://selenium:4444/wd/hub" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest tests/fetchers/test_content.py && pytest tests/test_errorhandling.py'
|
||||
|
||||
- name: Specific tests in built container for headers and requests checks with Selenium
|
||||
run: |
|
||||
docker run --name "changedet" --hostname changedet --rm -e "FLASK_SERVER_NAME=changedet" -e "WEBDRIVER_URL=http://selenium:4444/wd/hub" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio; pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/test_request.py'
|
||||
|
||||
# OTHER STUFF
|
||||
- name: Test SMTP notification mime types
|
||||
run: |
|
||||
# SMTP content types - needs the 'Debug SMTP server/echo message back server' container from above
|
||||
# "mailserver" hostname defined above
|
||||
docker run --rm --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest tests/smtp/test_notification_smtp.py'
|
||||
|
||||
# @todo Add a test via playwright/puppeteer
|
||||
# squid with auth is tested in run_proxy_tests.sh -> tests/proxy_list/test_select_custom_proxy.py
|
||||
- name: Test proxy squid style interaction
|
||||
run: |
|
||||
cd changedetectionio
|
||||
./run_proxy_tests.sh
|
||||
cd ..
|
||||
|
||||
- name: Test proxy SOCKS5 style interaction
|
||||
run: |
|
||||
cd changedetectionio
|
||||
./run_socks_proxy_tests.sh
|
||||
cd ..
|
||||
|
||||
- name: Test custom browser URL
|
||||
run: |
|
||||
cd changedetectionio
|
||||
./run_custom_browser_url_tests.sh
|
||||
cd ..
|
||||
|
||||
- name: Test changedetection.io container starts+runs basically without error
|
||||
run: |
|
||||
docker run --name test-changedetectionio -p 5556:5000 -d test-changedetectionio
|
||||
sleep 3
|
||||
# Should return 0 (no error) when grep finds it
|
||||
curl --retry-connrefused --retry 6 -s http://localhost:5556 |grep -q checkbox-uuid
|
||||
|
||||
# and IPv6
|
||||
curl --retry-connrefused --retry 6 -s -g -6 "http://[::1]:5556"|grep -q checkbox-uuid
|
||||
|
||||
# Check whether TRACE log is enabled.
|
||||
# Also, check whether TRACE is came from STDERR
|
||||
docker logs test-changedetectionio 2>&1 1>/dev/null | grep 'TRACE log is enabled' || exit 1
|
||||
# Check whether DEBUG is came from STDOUT
|
||||
docker logs test-changedetectionio 2>/dev/null | grep 'DEBUG' || exit 1
|
||||
|
||||
docker kill test-changedetectionio
|
||||
|
||||
- name: Test changedetection.io SIGTERM and SIGINT signal shutdown
|
||||
run: |
|
||||
|
||||
echo SIGINT Shutdown request test
|
||||
docker run --name sig-test -d test-changedetectionio
|
||||
sleep 3
|
||||
echo ">>> Sending SIGINT to sig-test container"
|
||||
docker kill --signal=SIGINT sig-test
|
||||
sleep 3
|
||||
# invert the check (it should be not 0/not running)
|
||||
docker ps
|
||||
# check signal catch(STDERR) log. Because of
|
||||
# changedetectionio/__init__.py: logger.add(sys.stderr, level=logger_level)
|
||||
docker logs sig-test 2>&1 | grep 'Shutdown: Got Signal - SIGINT' || exit 1
|
||||
test -z "`docker ps|grep sig-test`"
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
echo "Looks like container was running when it shouldnt be"
|
||||
docker ps
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# @todo - scan the container log to see the right "graceful shutdown" text exists
|
||||
docker rm sig-test
|
||||
|
||||
echo SIGTERM Shutdown request test
|
||||
docker run --name sig-test -d test-changedetectionio
|
||||
sleep 3
|
||||
echo ">>> Sending SIGTERM to sig-test container"
|
||||
docker kill --signal=SIGTERM sig-test
|
||||
sleep 3
|
||||
# invert the check (it should be not 0/not running)
|
||||
docker ps
|
||||
# check signal catch(STDERR) log. Because of
|
||||
# changedetectionio/__init__.py: logger.add(sys.stderr, level=logger_level)
|
||||
docker logs sig-test 2>&1 | grep 'Shutdown: Got Signal - SIGTERM' || exit 1
|
||||
test -z "`docker ps|grep sig-test`"
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
echo "Looks like container was running when it shouldnt be"
|
||||
docker ps
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# @todo - scan the container log to see the right "graceful shutdown" text exists
|
||||
docker rm sig-test
|
||||
|
||||
- name: Dump container log
|
||||
if: always()
|
||||
run: |
|
||||
mkdir output-logs
|
||||
docker logs test-cdio-basic-tests > output-logs/test-cdio-basic-tests-stdout-${{ env.PYTHON_VERSION }}.txt
|
||||
docker logs test-cdio-basic-tests 2> output-logs/test-cdio-basic-tests-stderr-${{ env.PYTHON_VERSION }}.txt
|
||||
|
||||
- name: Store everything including test-datastore
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test-cdio-basic-tests-output-py${{ env.PYTHON_VERSION }}
|
||||
path: .
|
||||
39
.gitignore
vendored
@@ -1,14 +1,29 @@
|
||||
__pycache__
|
||||
.idea
|
||||
*.pyc
|
||||
datastore/url-watches.json
|
||||
datastore/*
|
||||
__pycache__
|
||||
.pytest_cache
|
||||
build
|
||||
dist
|
||||
venv
|
||||
test-datastore/*
|
||||
test-datastore
|
||||
# Byte-compiled / optimized / DLL files
|
||||
**/__pycache__
|
||||
**/*.py[cod]
|
||||
|
||||
# Caches
|
||||
.mypy_cache/
|
||||
.pytest_cache/
|
||||
.ruff_cache/
|
||||
|
||||
# Distribution / packaging
|
||||
build/
|
||||
dist/
|
||||
*.egg-info*
|
||||
|
||||
# Virtual environment
|
||||
.env
|
||||
.venv/
|
||||
venv/
|
||||
|
||||
# IDEs
|
||||
.idea
|
||||
.vscode/settings.json
|
||||
|
||||
# Datastore files
|
||||
datastore/
|
||||
test-datastore/
|
||||
|
||||
# Memory consumption log
|
||||
test-memory.log
|
||||
|
||||
54
COMMERCIAL_LICENCE.md
Normal file
@@ -0,0 +1,54 @@
|
||||
# Generally
|
||||
|
||||
In any commercial activity involving 'Hosting' (as defined herein), whether in part or in full, this license must be executed and adhered to.
|
||||
|
||||
# Commercial License Agreement
|
||||
|
||||
This Commercial License Agreement ("Agreement") is entered into by and between Web Technologies s.r.o. here-in ("Licensor") and (your company or personal name) _____________ ("Licensee"). This Agreement sets forth the terms and conditions under which Licensor provides its software ("Software") and services to Licensee for the purpose of reselling the software either in part or full, as part of any commercial activity where the activity involves a third party.
|
||||
|
||||
### Definition of Hosting
|
||||
|
||||
For the purposes of this Agreement, "hosting" means making the functionality of the Program or modified version available to third parties as a service. This includes, without limitation:
|
||||
- Enabling third parties to interact with the functionality of the Program or modified version remotely through a computer network.
|
||||
- Offering a service the value of which entirely or primarily derives from the value of the Program or modified version.
|
||||
- Offering a service that accomplishes for users the primary purpose of the Program or modified version.
|
||||
|
||||
## 1. Grant of License
|
||||
Subject to the terms and conditions of this Agreement, Licensor grants Licensee a non-exclusive, non-transferable license to install, use, and resell the Software. Licensee may:
|
||||
- Resell the Software as part of a service offering or as a standalone product.
|
||||
- Host the Software on a server and provide it as a hosted service (e.g., Software as a Service - SaaS).
|
||||
- Integrate the Software into a larger product or service that is then sold or provided for commercial purposes, where the software is used either in part or full.
|
||||
|
||||
## 2. License Fees
|
||||
Licensee agrees to pay Licensor the license fees specified in the ordering document. License fees are due and payable as specified in the ordering document. The fees may include initial licensing costs and recurring fees based on the number of end users, instances of the Software resold, or revenue generated from the resale activities.
|
||||
|
||||
## 3. Resale Conditions
|
||||
Licensee must comply with the following conditions when reselling the Software, whether the software is resold in part or full:
|
||||
- Provide end users with access to the source code under the same open-source license conditions as provided by Licensor.
|
||||
- Clearly state in all marketing and sales materials that the Software is provided under a commercial license from Licensor, and provide a link back to https://changedetection.io.
|
||||
- Ensure end users are aware of and agree to the terms of the commercial license prior to resale.
|
||||
- Do not sublicense or transfer the Software to third parties except as part of an authorized resale activity.
|
||||
|
||||
## 4. Hosting and Provision of Services
|
||||
Licensee may host the Software (either in part or full) on its servers and provide it as a hosted service to end users. The following conditions apply:
|
||||
- Licensee must ensure that all hosted versions of the Software comply with the terms of this Agreement.
|
||||
- Licensee must provide Licensor with regular reports detailing the number of end users and instances of the hosted service.
|
||||
- Any modifications to the Software made by Licensee for hosting purposes must be made available to end users under the same open-source license conditions, unless agreed otherwise.
|
||||
|
||||
## 5. Services
|
||||
Licensor will provide support and maintenance services as described in the support policy referenced in the ordering document should such an agreement be signed by all parties. Additional fees may apply for support services provided to end users resold by Licensee.
|
||||
|
||||
## 6. Reporting and Audits
|
||||
Licensee agrees to provide Licensor with regular reports detailing the number of instances, end users, and revenue generated from the resale of the Software. Licensor reserves the right to audit Licensee’s records to ensure compliance with this Agreement.
|
||||
|
||||
## 7. Term and Termination
|
||||
This Agreement shall commence on the effective date and continue for the period set forth in the ordering document unless terminated earlier in accordance with this Agreement. Either party may terminate this Agreement if the other party breaches any material term and fails to cure such breach within thirty (30) days after receipt of written notice.
|
||||
|
||||
## 8. Limitation of Liability and Disclaimer of Warranty
|
||||
Executing this commercial license does not waive the Limitation of Liability or Disclaimer of Warranty as stated in the open-source LICENSE provided with the Software. The Software is provided "as is," without warranty of any kind, express or implied, including but not limited to the warranties of merchantability, fitness for a particular purpose, and noninfringement. In no event shall the authors or copyright holders be liable for any claim, damages, or other liability, whether in an action of contract, tort, or otherwise, arising from, out of, or in connection with the Software or the use or other dealings in the Software.
|
||||
|
||||
## 9. Governing Law
|
||||
This Agreement shall be governed by and construed in accordance with the laws of the Czech Republic.
|
||||
|
||||
## Contact Information
|
||||
For commercial licensing inquiries, please contact contact@changedetection.io and dgtlmoon@gmail.com.
|
||||
@@ -2,7 +2,7 @@ Contributing is always welcome!
|
||||
|
||||
I am no professional flask developer, if you know a better way that something can be done, please let me know!
|
||||
|
||||
Otherwise, it's always best to PR into the `dev` branch.
|
||||
Otherwise, it's always best to PR into the `master` branch.
|
||||
|
||||
Please be sure that all new functionality has a matching test!
|
||||
|
||||
|
||||
34
Dockerfile
@@ -1,5 +1,11 @@
|
||||
# pip dependencies install stage
|
||||
FROM python:3.11-slim-bullseye as builder
|
||||
|
||||
# @NOTE! I would love to move to 3.11 but it breaks the async handler in changedetectionio/content_fetchers/puppeteer.py
|
||||
# If you know how to fix it, please do! and test it for both 3.10 and 3.11
|
||||
|
||||
ARG PYTHON_VERSION=3.11
|
||||
|
||||
FROM python:${PYTHON_VERSION}-slim-bookworm AS builder
|
||||
|
||||
# See `cryptography` pin comment in requirements.txt
|
||||
ARG CRYPTOGRAPHY_DONT_BUILD_RUST=1
|
||||
@@ -20,20 +26,23 @@ WORKDIR /install
|
||||
|
||||
COPY requirements.txt /requirements.txt
|
||||
|
||||
RUN pip install --target=/dependencies -r /requirements.txt
|
||||
# --extra-index-url https://www.piwheels.org/simple is for cryptography module to be prebuilt (or rustc etc needs to be installed)
|
||||
RUN pip install --extra-index-url https://www.piwheels.org/simple --target=/dependencies -r /requirements.txt
|
||||
|
||||
# Playwright is an alternative to Selenium
|
||||
# Excluded this package from requirements.txt to prevent arm/v6 and arm/v7 builds from failing
|
||||
# https://github.com/dgtlmoon/changedetection.io/pull/1067 also musl/alpine (not supported)
|
||||
RUN pip install --target=/dependencies playwright~=1.27.1 \
|
||||
RUN pip install --target=/dependencies playwright~=1.48.0 \
|
||||
|| echo "WARN: Failed to install Playwright. The application can still run, but the Playwright option will be disabled."
|
||||
|
||||
# Final image stage
|
||||
FROM python:3.11-slim-bullseye
|
||||
FROM python:${PYTHON_VERSION}-slim-bookworm
|
||||
LABEL org.opencontainers.image.source="https://github.com/dgtlmoon/changedetection.io"
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
libssl1.1 \
|
||||
libxslt1.1 \
|
||||
# For presenting price amounts correctly in the restock/price detection overview
|
||||
locales \
|
||||
# For pdftohtml
|
||||
poppler-utils \
|
||||
zlib1g \
|
||||
@@ -54,12 +63,17 @@ ENV PYTHONPATH=/usr/local
|
||||
|
||||
EXPOSE 5000
|
||||
|
||||
# The actual flask app
|
||||
# The actual flask app module
|
||||
COPY changedetectionio /app/changedetectionio
|
||||
|
||||
# The eventlet server wrapper
|
||||
# Starting wrapper
|
||||
COPY changedetection.py /app/changedetection.py
|
||||
|
||||
WORKDIR /app
|
||||
# Github Action test purpose(test-only.yml).
|
||||
# On production, it is effectively LOGGER_LEVEL=''.
|
||||
ARG LOGGER_LEVEL=''
|
||||
ENV LOGGER_LEVEL "$LOGGER_LEVEL"
|
||||
|
||||
WORKDIR /app
|
||||
CMD ["python", "./changedetection.py", "-d", "/datastore"]
|
||||
|
||||
|
||||
CMD [ "python", "./changedetection.py" , "-d", "/datastore"]
|
||||
|
||||
@@ -1,18 +1,23 @@
|
||||
recursive-include changedetectionio/api *
|
||||
recursive-include changedetectionio/apprise_plugin *
|
||||
recursive-include changedetectionio/blueprint *
|
||||
recursive-include changedetectionio/content_fetchers *
|
||||
recursive-include changedetectionio/model *
|
||||
recursive-include changedetectionio/processors *
|
||||
recursive-include changedetectionio/res *
|
||||
recursive-include changedetectionio/static *
|
||||
recursive-include changedetectionio/storage *
|
||||
recursive-include changedetectionio/templates *
|
||||
recursive-include changedetectionio/tests *
|
||||
prune changedetectionio/static/package-lock.json
|
||||
prune changedetectionio/static/styles/node_modules
|
||||
prune changedetectionio/static/styles/package-lock.json
|
||||
include changedetection.py
|
||||
include requirements.txt
|
||||
include README-pip.md
|
||||
global-exclude *.pyc
|
||||
global-exclude node_modules
|
||||
global-exclude venv
|
||||
|
||||
global-exclude test-datastore
|
||||
global-exclude changedetection.io*dist-info
|
||||
global-exclude changedetectionio/tests/proxy_socks5/test-datastore
|
||||
|
||||
71
README.md
@@ -11,12 +11,13 @@ _Live your data-life pro-actively._
|
||||
|
||||

|
||||
|
||||
[**Don't have time? Let us host it for you! try our $8.99/month subscription - use our proxies and support!**](https://changedetection.io) , _half the price of other website change monitoring services!_
|
||||
[**Get started with website page change monitoring straight away. Don't have time? Try our $8.99/month subscription, use our proxies and support!**](https://changedetection.io) , _half the price of other website change monitoring services!_
|
||||
|
||||
- Chrome browser included.
|
||||
- Nothing to install, access via browser login after signup.
|
||||
- Super fast, no registration needed setup.
|
||||
- Get started watching and receiving website change notifications straight away.
|
||||
|
||||
- See our [tutorials and how-to page for more inspiration](https://changedetection.io/tutorials)
|
||||
|
||||
### Target specific parts of the webpage using the Visual Selector tool.
|
||||
|
||||
@@ -40,6 +41,20 @@ Using the **Browser Steps** configuration, add basic steps before performing cha
|
||||
After **Browser Steps** have been run, then visit the **Visual Selector** tab to refine the content you're interested in.
|
||||
Requires Playwright to be enabled.
|
||||
|
||||
### Awesome restock and price change notifications
|
||||
|
||||
Enable the _"Re-stock & Price detection for single product pages"_ option to activate the best way to monitor product pricing, this will extract any meta-data in the HTML page and give you many options to follow the pricing of the product.
|
||||
|
||||
Easily organise and monitor prices for products from the dashboard, get alerts and notifications when the price of a product changes or comes back in stock again!
|
||||
|
||||
[<img src="docs/restock-overview.png" style="max-width:100%;" alt="Easily keep an eye on product price changes directly from the UI" title="Easily keep an eye on product price changes directly from the UI" />](https://changedetection.io?src=github)
|
||||
|
||||
Set price change notification parameters, upper and lower price, price change percentage and more.
|
||||
Always know when a product for sale drops in price.
|
||||
|
||||
[<img src="docs/restock-settings.png" style="max-width:100%;" alt="Set upper lower and percentage price change notification values" title="Set upper lower and percentage price change notification values" />](https://changedetection.io?src=github)
|
||||
|
||||
|
||||
|
||||
### Example use cases
|
||||
|
||||
@@ -90,6 +105,23 @@ We [recommend and use Bright Data](https://brightdata.grsm.io/n0r16zf7eivq) glob
|
||||
|
||||
Please :star: star :star: this project and help it grow! https://github.com/dgtlmoon/changedetection.io/
|
||||
|
||||
### Schedule web page watches in any timezone, limit by day of week and time.
|
||||
|
||||
Easily set a re-check schedule, for example you could limit the web page change detection to only operate during business hours.
|
||||
Or perhaps based on a foreign timezone (for example, you want to check for the latest news-headlines in a foreign country at 0900 AM),
|
||||
|
||||
<img src="./docs/scheduler.png" style="max-width:80%;" alt="How to monitor web page changes according to a schedule" title="How to monitor web page changes according to a schedule" />
|
||||
|
||||
Includes quick short-cut buttons to setup a schedule for **business hours only**, or **weekends**.
|
||||
|
||||
### We have a Chrome extension!
|
||||
|
||||
Easily add the current web page to your changedetection.io tool, simply install the extension and click "Sync" to connect it to your existing changedetection.io install.
|
||||
|
||||
[<img src="./docs/chrome-extension-screenshot.png" style="max-width:80%;" alt="Chrome Extension to easily add the current web-page to detect a change." title="Chrome Extension to easily add the current web-page to detect a change." />](https://chromewebstore.google.com/detail/changedetectionio-website/kefcfmgmlhmankjmnbijimhofdjekbop)
|
||||
|
||||
[Goto the Chrome Webstore to download the extension.](https://chromewebstore.google.com/detail/changedetectionio-website/kefcfmgmlhmankjmnbijimhofdjekbop) ( Or check out the [GitHub repo](https://github.com/dgtlmoon/changedetection.io-browser-extension) )
|
||||
|
||||
## Installation
|
||||
|
||||
### Docker
|
||||
@@ -97,7 +129,7 @@ Please :star: star :star: this project and help it grow! https://github.com/dgtl
|
||||
With Docker composer, just clone this repository and..
|
||||
|
||||
```bash
|
||||
$ docker-compose up -d
|
||||
$ docker compose up -d
|
||||
```
|
||||
|
||||
Docker standalone
|
||||
@@ -136,10 +168,10 @@ docker rm $(docker ps -a -f name=changedetection.io -q)
|
||||
docker run -d --restart always -p "127.0.0.1:5000:5000" -v datastore-volume:/datastore --name changedetection.io dgtlmoon/changedetection.io
|
||||
```
|
||||
|
||||
### docker-compose
|
||||
### docker compose
|
||||
|
||||
```bash
|
||||
docker-compose pull && docker-compose up -d
|
||||
docker compose pull && docker compose up -d
|
||||
```
|
||||
|
||||
See the wiki for more information https://github.com/dgtlmoon/changedetection.io/wiki
|
||||
@@ -232,6 +264,13 @@ See the wiki https://github.com/dgtlmoon/changedetection.io/wiki/Proxy-configura
|
||||
|
||||
Raspberry Pi and linux/arm/v6 linux/arm/v7 arm64 devices are supported! See the wiki for [details](https://github.com/dgtlmoon/changedetection.io/wiki/Fetching-pages-with-WebDriver)
|
||||
|
||||
## Import support
|
||||
|
||||
Easily [import your list of websites to watch for changes in Excel .xslx file format](https://changedetection.io/tutorial/how-import-your-website-change-detection-lists-excel), or paste in lists of website URLs as plaintext.
|
||||
|
||||
Excel import is recommended - that way you can better organise tags/groups of websites and other features.
|
||||
|
||||
|
||||
## API Support
|
||||
|
||||
Supports managing the website watch list [via our API](https://changedetection.io/docs/api_v1/index.html)
|
||||
@@ -241,13 +280,7 @@ Supports managing the website watch list [via our API](https://changedetection.i
|
||||
Do you use changedetection.io to make money? does it save you time or money? Does it make your life easier? less stressful? Remember, we write this software when we should be doing actual paid work, we have to buy food and pay rent just like you.
|
||||
|
||||
|
||||
Firstly, consider taking out a [change detection monthly subscription - unlimited checks and watches](https://changedetection.io?src=github) , even if you don't use it, you still get the warm fuzzy feeling of helping out the project. (And who knows, you might just use it!)
|
||||
|
||||
Or directly donate an amount PayPal [](https://www.paypal.com/donate/?hosted_button_id=7CP6HR9ZCNDYJ)
|
||||
|
||||
Or BTC `1PLFN327GyUarpJd7nVe7Reqg9qHx5frNn`
|
||||
|
||||
<img src="https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/docs/btc-support.png" style="max-width:50%;" alt="Support us!" />
|
||||
Consider taking out an officially supported [website change detection subscription](https://changedetection.io?src=github) , even if you don't use it, you still get the warm fuzzy feeling of helping out the project. (And who knows, you might just use it!)
|
||||
|
||||
## Commercial Support
|
||||
|
||||
@@ -261,3 +294,17 @@ I offer commercial support, this software is depended on by network security, ae
|
||||
[license-shield]: https://img.shields.io/github/license/dgtlmoon/changedetection.io.svg?style=for-the-badge
|
||||
[release-link]: https://github.com/dgtlmoon/changedetection.io/releases
|
||||
[docker-link]: https://hub.docker.com/r/dgtlmoon/changedetection.io
|
||||
|
||||
## Commercial Licencing
|
||||
|
||||
If you are reselling this software either in part or full as part of any commercial arrangement, you must abide by our COMMERCIAL_LICENCE.md found in our code repository, please contact dgtlmoon@gmail.com and contact@changedetection.io .
|
||||
|
||||
## Third-party licenses
|
||||
|
||||
changedetectionio.html_tools.elementpath_tostring: Copyright (c), 2018-2021, SISSA (Scuola Internazionale Superiore di Studi Avanzati), Licensed under [MIT license](https://github.com/sissaschool/elementpath/blob/master/LICENSE)
|
||||
|
||||
## Contributors
|
||||
|
||||
Recognition of fantastic contributors to the project
|
||||
|
||||
- Constantin Hong https://github.com/Constantin1489
|
||||
|
||||
21
app.json
@@ -1,21 +0,0 @@
|
||||
{
|
||||
"name": "ChangeDetection.io",
|
||||
"description": "The best and simplest self-hosted open source website change detection monitoring and notification service.",
|
||||
"keywords": [
|
||||
"changedetection",
|
||||
"website monitoring"
|
||||
],
|
||||
"repository": "https://github.com/dgtlmoon/changedetection.io",
|
||||
"success_url": "/",
|
||||
"scripts": {
|
||||
},
|
||||
"env": {
|
||||
},
|
||||
"formation": {
|
||||
"web": {
|
||||
"quantity": 1,
|
||||
"size": "free"
|
||||
}
|
||||
},
|
||||
"image": "heroku/python"
|
||||
}
|
||||
@@ -1,44 +1,6 @@
|
||||
#!/usr/bin/python3
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# Entry-point for running from the CLI when not installed via Pip, Pip will handle the console_scripts entry_points's from setup.py
|
||||
# It's recommended to use `pip3 install changedetection.io` and start with `changedetection.py` instead, it will be linkd to your global path.
|
||||
# or Docker.
|
||||
# Read more https://github.com/dgtlmoon/changedetection.io/wiki
|
||||
# Only exists for direct CLI usage
|
||||
|
||||
from changedetectionio import changedetection
|
||||
import multiprocessing
|
||||
import sys
|
||||
import os
|
||||
|
||||
def sigchld_handler(_signo, _stack_frame):
|
||||
import sys
|
||||
print('Shutdown: Got SIGCHLD')
|
||||
# https://stackoverflow.com/questions/40453496/python-multiprocessing-capturing-signals-to-restart-child-processes-or-shut-do
|
||||
pid, status = os.waitpid(-1, os.WNOHANG | os.WUNTRACED | os.WCONTINUED)
|
||||
|
||||
print('Sub-process: pid %d status %d' % (pid, status))
|
||||
if status != 0:
|
||||
sys.exit(1)
|
||||
|
||||
raise SystemExit
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
#signal.signal(signal.SIGCHLD, sigchld_handler)
|
||||
|
||||
# The only way I could find to get Flask to shutdown, is to wrap it and then rely on the subsystem issuing SIGTERM/SIGKILL
|
||||
parse_process = multiprocessing.Process(target=changedetection.main)
|
||||
parse_process.daemon = True
|
||||
parse_process.start()
|
||||
import time
|
||||
|
||||
try:
|
||||
while True:
|
||||
time.sleep(1)
|
||||
if not parse_process.is_alive():
|
||||
# Process died/crashed for some reason, exit with error set
|
||||
sys.exit(1)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
#parse_process.terminate() not needed, because this process will issue it to the sub-process anyway
|
||||
print ("Exited - CTRL+C")
|
||||
import changedetectionio
|
||||
changedetectionio.main()
|
||||
|
||||
@@ -112,6 +112,35 @@ def build_watch_json_schema(d):
|
||||
|
||||
schema['properties']['time_between_check'] = build_time_between_check_json_schema()
|
||||
|
||||
schema['properties']['browser_steps'] = {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"operation": {
|
||||
"type": ["string", "null"],
|
||||
"maxLength": 5000 # Allows null and any string up to 5000 chars (including "")
|
||||
},
|
||||
"selector": {
|
||||
"type": ["string", "null"],
|
||||
"maxLength": 5000
|
||||
},
|
||||
"optional_value": {
|
||||
"type": ["string", "null"],
|
||||
"maxLength": 5000
|
||||
}
|
||||
},
|
||||
"required": ["operation", "selector", "optional_value"],
|
||||
"additionalProperties": False # No extra keys allowed
|
||||
}
|
||||
},
|
||||
{"type": "null"}, # Allows null for `browser_steps`
|
||||
{"type": "array", "maxItems": 0} # Allows empty array []
|
||||
]
|
||||
}
|
||||
|
||||
# headers ?
|
||||
return schema
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import os
|
||||
from distutils.util import strtobool
|
||||
from changedetectionio.strtobool import strtobool
|
||||
|
||||
from flask_expects_json import expects_json
|
||||
from changedetectionio import queuedWatchMetaData
|
||||
@@ -12,9 +12,10 @@ import copy
|
||||
# See docs/README.md for rebuilding the docs/apidoc information
|
||||
|
||||
from . import api_schema
|
||||
from ..model import watch_base
|
||||
|
||||
# Build a JSON Schema atleast partially based on our Watch model
|
||||
from changedetectionio.model.Watch import base_config as watch_base_config
|
||||
watch_base_config = watch_base()
|
||||
schema = api_schema.build_watch_json_schema(watch_base_config)
|
||||
|
||||
schema_create_watch = copy.deepcopy(schema)
|
||||
@@ -30,7 +31,7 @@ class Watch(Resource):
|
||||
self.update_q = kwargs['update_q']
|
||||
|
||||
# Get information about a single watch, excluding the history list (can be large)
|
||||
# curl http://localhost:4000/api/v1/watch/<string:uuid>
|
||||
# curl http://localhost:5000/api/v1/watch/<string:uuid>
|
||||
# @todo - version2 - ?muted and ?paused should be able to be called together, return the watch struct not "OK"
|
||||
# ?recheck=true
|
||||
@auth.check_token
|
||||
@@ -39,9 +40,9 @@ class Watch(Resource):
|
||||
@api {get} /api/v1/watch/:uuid Single watch - get data, recheck, pause, mute.
|
||||
@apiDescription Retrieve watch information and set muted/paused status
|
||||
@apiExample {curl} Example usage:
|
||||
curl http://localhost:4000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091 -H"x-api-key:813031b16330fe25e3780cf0325daa45"
|
||||
curl "http://localhost:4000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091?muted=unmuted" -H"x-api-key:813031b16330fe25e3780cf0325daa45"
|
||||
curl "http://localhost:4000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091?paused=unpaused" -H"x-api-key:813031b16330fe25e3780cf0325daa45"
|
||||
curl http://localhost:5000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091 -H"x-api-key:813031b16330fe25e3780cf0325daa45"
|
||||
curl "http://localhost:5000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091?muted=unmuted" -H"x-api-key:813031b16330fe25e3780cf0325daa45"
|
||||
curl "http://localhost:5000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091?paused=unpaused" -H"x-api-key:813031b16330fe25e3780cf0325daa45"
|
||||
@apiName Watch
|
||||
@apiGroup Watch
|
||||
@apiParam {uuid} uuid Watch unique ID.
|
||||
@@ -57,7 +58,7 @@ class Watch(Resource):
|
||||
abort(404, message='No watch exists with the UUID of {}'.format(uuid))
|
||||
|
||||
if request.args.get('recheck'):
|
||||
self.update_q.put(queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid, 'skip_when_checksum_same': True}))
|
||||
self.update_q.put(queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid}))
|
||||
return "OK", 200
|
||||
if request.args.get('paused', '') == 'paused':
|
||||
self.datastore.data['watching'].get(uuid).pause()
|
||||
@@ -75,8 +76,9 @@ class Watch(Resource):
|
||||
# Return without history, get that via another API call
|
||||
# Properties are not returned as a JSON, so add the required props manually
|
||||
watch['history_n'] = watch.history_n
|
||||
# attr .last_changed will check for the last written text snapshot on change
|
||||
watch['last_changed'] = watch.last_changed
|
||||
|
||||
watch['viewed'] = watch.viewed
|
||||
return watch
|
||||
|
||||
@auth.check_token
|
||||
@@ -84,7 +86,7 @@ class Watch(Resource):
|
||||
"""
|
||||
@api {delete} /api/v1/watch/:uuid Delete a watch and related history
|
||||
@apiExample {curl} Example usage:
|
||||
curl http://localhost:4000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091 -X DELETE -H"x-api-key:813031b16330fe25e3780cf0325daa45"
|
||||
curl http://localhost:5000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091 -X DELETE -H"x-api-key:813031b16330fe25e3780cf0325daa45"
|
||||
@apiParam {uuid} uuid Watch unique ID.
|
||||
@apiName Delete
|
||||
@apiGroup Watch
|
||||
@@ -103,7 +105,7 @@ class Watch(Resource):
|
||||
@api {put} /api/v1/watch/:uuid Update watch information
|
||||
@apiExample {curl} Example usage:
|
||||
Update (PUT)
|
||||
curl http://localhost:4000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091 -X PUT -H"x-api-key:813031b16330fe25e3780cf0325daa45" -H "Content-Type: application/json" -d '{"url": "https://my-nice.com" , "tag": "new list"}'
|
||||
curl http://localhost:5000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091 -X PUT -H"x-api-key:813031b16330fe25e3780cf0325daa45" -H "Content-Type: application/json" -d '{"url": "https://my-nice.com" , "tag": "new list"}'
|
||||
|
||||
@apiDescription Updates an existing watch using JSON, accepts the same structure as returned in <a href="#api-Watch-Watch">get single watch information</a>
|
||||
@apiParam {uuid} uuid Watch unique ID.
|
||||
@@ -132,13 +134,14 @@ class WatchHistory(Resource):
|
||||
self.datastore = kwargs['datastore']
|
||||
|
||||
# Get a list of available history for a watch by UUID
|
||||
# curl http://localhost:4000/api/v1/watch/<string:uuid>/history
|
||||
# curl http://localhost:5000/api/v1/watch/<string:uuid>/history
|
||||
@auth.check_token
|
||||
def get(self, uuid):
|
||||
"""
|
||||
@api {get} /api/v1/watch/<string:uuid>/history Get a list of all historical snapshots available for a watch
|
||||
@apiDescription Requires `uuid`, returns list
|
||||
@apiExample {curl} Example usage:
|
||||
curl http://localhost:4000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091/history -H"x-api-key:813031b16330fe25e3780cf0325daa45" -H "Content-Type: application/json"
|
||||
curl http://localhost:5000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091/history -H"x-api-key:813031b16330fe25e3780cf0325daa45" -H "Content-Type: application/json"
|
||||
{
|
||||
"1676649279": "/tmp/data/6a4b7d5c-fee4-4616-9f43-4ac97046b595/cb7e9be8258368262246910e6a2a4c30.txt",
|
||||
"1677092785": "/tmp/data/6a4b7d5c-fee4-4616-9f43-4ac97046b595/e20db368d6fc633e34f559ff67bb4044.txt",
|
||||
@@ -166,26 +169,36 @@ class WatchSingleHistory(Resource):
|
||||
@api {get} /api/v1/watch/<string:uuid>/history/<int:timestamp> Get single snapshot from watch
|
||||
@apiDescription Requires watch `uuid` and `timestamp`. `timestamp` of "`latest`" for latest available snapshot, or <a href="#api-Watch_History-Get_list_of_available_stored_snapshots_for_watch">use the list returned here</a>
|
||||
@apiExample {curl} Example usage:
|
||||
curl http://localhost:4000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091/history/1677092977 -H"x-api-key:813031b16330fe25e3780cf0325daa45" -H "Content-Type: application/json"
|
||||
curl http://localhost:5000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091/history/1677092977 -H"x-api-key:813031b16330fe25e3780cf0325daa45" -H "Content-Type: application/json"
|
||||
@apiName Get single snapshot content
|
||||
@apiGroup Watch History
|
||||
@apiParam {String} [html] Optional Set to =1 to return the last HTML (only stores last 2 snapshots, use `latest` as timestamp)
|
||||
@apiSuccess (200) {String} OK
|
||||
@apiSuccess (404) {String} ERR Not found
|
||||
"""
|
||||
watch = self.datastore.data['watching'].get(uuid)
|
||||
if not watch:
|
||||
abort(404, message='No watch exists with the UUID of {}'.format(uuid))
|
||||
abort(404, message=f"No watch exists with the UUID of {uuid}")
|
||||
|
||||
if not len(watch.history):
|
||||
abort(404, message='Watch found but no history exists for the UUID {}'.format(uuid))
|
||||
abort(404, message=f"Watch found but no history exists for the UUID {uuid}")
|
||||
|
||||
if timestamp == 'latest':
|
||||
timestamp = list(watch.history.keys())[-1]
|
||||
|
||||
content = watch.get_history_snapshot(timestamp)
|
||||
if request.args.get('html'):
|
||||
content = watch.get_fetched_html(timestamp)
|
||||
if content:
|
||||
response = make_response(content, 200)
|
||||
response.mimetype = "text/html"
|
||||
else:
|
||||
response = make_response("No content found", 404)
|
||||
response.mimetype = "text/plain"
|
||||
else:
|
||||
content = watch.get_history_snapshot(timestamp)
|
||||
response = make_response(content, 200)
|
||||
response.mimetype = "text/plain"
|
||||
|
||||
response = make_response(content, 200)
|
||||
response.mimetype = "text/plain"
|
||||
return response
|
||||
|
||||
|
||||
@@ -202,7 +215,7 @@ class CreateWatch(Resource):
|
||||
@api {post} /api/v1/watch Create a single watch
|
||||
@apiDescription Requires atleast `url` set, can accept the same structure as <a href="#api-Watch-Watch">get single watch information</a> to create.
|
||||
@apiExample {curl} Example usage:
|
||||
curl http://localhost:4000/api/v1/watch -H"x-api-key:813031b16330fe25e3780cf0325daa45" -H "Content-Type: application/json" -d '{"url": "https://my-nice.com" , "tag": "nice list"}'
|
||||
curl http://localhost:5000/api/v1/watch -H"x-api-key:813031b16330fe25e3780cf0325daa45" -H "Content-Type: application/json" -d '{"url": "https://my-nice.com" , "tag": "nice list"}'
|
||||
@apiName Create
|
||||
@apiGroup Watch
|
||||
@apiSuccess (200) {String} OK Was created
|
||||
@@ -234,7 +247,7 @@ class CreateWatch(Resource):
|
||||
|
||||
new_uuid = self.datastore.add_watch(url=url, extras=extras, tag=tags)
|
||||
if new_uuid:
|
||||
self.update_q.put(queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': new_uuid, 'skip_when_checksum_same': True}))
|
||||
self.update_q.put(queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': new_uuid}))
|
||||
return {'uuid': new_uuid}, 201
|
||||
else:
|
||||
return "Invalid or unsupported URL", 400
|
||||
@@ -245,7 +258,7 @@ class CreateWatch(Resource):
|
||||
@api {get} /api/v1/watch List watches
|
||||
@apiDescription Return concise list of available watches and some very basic info
|
||||
@apiExample {curl} Example usage:
|
||||
curl http://localhost:4000/api/v1/watch -H"x-api-key:813031b16330fe25e3780cf0325daa45"
|
||||
curl http://localhost:5000/api/v1/watch -H"x-api-key:813031b16330fe25e3780cf0325daa45"
|
||||
{
|
||||
"6a4b7d5c-fee4-4616-9f43-4ac97046b595": {
|
||||
"last_changed": 1677103794,
|
||||
@@ -280,19 +293,77 @@ class CreateWatch(Resource):
|
||||
if tag_limit and not any(v.get('title').lower() == tag_limit for k, v in tags.items()):
|
||||
continue
|
||||
|
||||
list[uuid] = {'url': watch['url'],
|
||||
'title': watch['title'],
|
||||
'last_checked': watch['last_checked'],
|
||||
'last_changed': watch.last_changed,
|
||||
'last_error': watch['last_error']}
|
||||
list[uuid] = {
|
||||
'last_changed': watch.last_changed,
|
||||
'last_checked': watch['last_checked'],
|
||||
'last_error': watch['last_error'],
|
||||
'title': watch['title'],
|
||||
'url': watch['url'],
|
||||
'viewed': watch.viewed
|
||||
}
|
||||
|
||||
if request.args.get('recheck_all'):
|
||||
for uuid in self.datastore.data['watching'].keys():
|
||||
self.update_q.put(queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid, 'skip_when_checksum_same': True}))
|
||||
self.update_q.put(queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid}))
|
||||
return {'status': "OK"}, 200
|
||||
|
||||
return list, 200
|
||||
|
||||
class Import(Resource):
|
||||
def __init__(self, **kwargs):
|
||||
# datastore is a black box dependency
|
||||
self.datastore = kwargs['datastore']
|
||||
|
||||
@auth.check_token
|
||||
def post(self):
|
||||
"""
|
||||
@api {post} /api/v1/import Import a list of watched URLs
|
||||
@apiDescription Accepts a line-feed separated list of URLs to import, additionally with ?tag_uuids=(tag id), ?tag=(name), ?proxy={key}, ?dedupe=true (default true) one URL per line.
|
||||
@apiExample {curl} Example usage:
|
||||
curl http://localhost:5000/api/v1/import --data-binary @list-of-sites.txt -H"x-api-key:8a111a21bc2f8f1dd9b9353bbd46049a"
|
||||
@apiName Import
|
||||
@apiGroup Watch
|
||||
@apiSuccess (200) {List} OK List of watch UUIDs added
|
||||
@apiSuccess (500) {String} ERR Some other error
|
||||
"""
|
||||
|
||||
extras = {}
|
||||
|
||||
if request.args.get('proxy'):
|
||||
plist = self.datastore.proxy_list
|
||||
if not request.args.get('proxy') in plist:
|
||||
return "Invalid proxy choice, currently supported proxies are '{}'".format(', '.join(plist)), 400
|
||||
else:
|
||||
extras['proxy'] = request.args.get('proxy')
|
||||
|
||||
dedupe = strtobool(request.args.get('dedupe', 'true'))
|
||||
|
||||
tags = request.args.get('tag')
|
||||
tag_uuids = request.args.get('tag_uuids')
|
||||
|
||||
if tag_uuids:
|
||||
tag_uuids = tag_uuids.split(',')
|
||||
|
||||
urls = request.get_data().decode('utf8').splitlines()
|
||||
added = []
|
||||
allow_simplehost = not strtobool(os.getenv('BLOCK_SIMPLEHOSTS', 'False'))
|
||||
for url in urls:
|
||||
url = url.strip()
|
||||
if not len(url):
|
||||
continue
|
||||
|
||||
# If hosts that only contain alphanumerics are allowed ("localhost" for example)
|
||||
if not validators.url(url, simple_host=allow_simplehost):
|
||||
return f"Invalid or unsupported URL - {url}", 400
|
||||
|
||||
if dedupe and self.datastore.url_exists(url):
|
||||
continue
|
||||
|
||||
new_uuid = self.datastore.add_watch(url=url, extras=extras, tag=tags, tag_uuids=tag_uuids)
|
||||
added.append(new_uuid)
|
||||
|
||||
return added
|
||||
|
||||
class SystemInfo(Resource):
|
||||
def __init__(self, **kwargs):
|
||||
# datastore is a black box dependency
|
||||
@@ -305,7 +376,7 @@ class SystemInfo(Resource):
|
||||
@api {get} /api/v1/systeminfo Return system info
|
||||
@apiDescription Return some info about the current system state
|
||||
@apiExample {curl} Example usage:
|
||||
curl http://localhost:4000/api/v1/systeminfo -H"x-api-key:813031b16330fe25e3780cf0325daa45"
|
||||
curl http://localhost:5000/api/v1/systeminfo -H"x-api-key:813031b16330fe25e3780cf0325daa45"
|
||||
HTTP/1.0 200
|
||||
{
|
||||
'queue_size': 10 ,
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
from changedetectionio import apprise_plugin
|
||||
import apprise
|
||||
|
||||
# Create our AppriseAsset and populate it with some of our new values:
|
||||
|
||||
98
changedetectionio/apprise_plugin/__init__.py
Normal file
@@ -0,0 +1,98 @@
|
||||
# include the decorator
|
||||
from apprise.decorators import notify
|
||||
from loguru import logger
|
||||
from requests.structures import CaseInsensitiveDict
|
||||
|
||||
|
||||
@notify(on="delete")
|
||||
@notify(on="deletes")
|
||||
@notify(on="get")
|
||||
@notify(on="gets")
|
||||
@notify(on="post")
|
||||
@notify(on="posts")
|
||||
@notify(on="put")
|
||||
@notify(on="puts")
|
||||
def apprise_custom_api_call_wrapper(body, title, notify_type, *args, **kwargs):
|
||||
import requests
|
||||
import json
|
||||
import re
|
||||
|
||||
from urllib.parse import unquote_plus
|
||||
from apprise.utils.parse import parse_url as apprise_parse_url
|
||||
|
||||
url = kwargs['meta'].get('url')
|
||||
schema = kwargs['meta'].get('schema').lower().strip()
|
||||
|
||||
# Choose POST, GET etc from requests
|
||||
method = re.sub(rf's$', '', schema)
|
||||
requests_method = getattr(requests, method)
|
||||
|
||||
params = CaseInsensitiveDict({}) # Added to requests
|
||||
auth = None
|
||||
has_error = False
|
||||
|
||||
# Convert /foobar?+some-header=hello to proper header dictionary
|
||||
results = apprise_parse_url(url)
|
||||
|
||||
# Add our headers that the user can potentially over-ride if they wish
|
||||
# to to our returned result set and tidy entries by unquoting them
|
||||
headers = CaseInsensitiveDict({unquote_plus(x): unquote_plus(y)
|
||||
for x, y in results['qsd+'].items()})
|
||||
|
||||
# https://github.com/caronc/apprise/wiki/Notify_Custom_JSON#get-parameter-manipulation
|
||||
# In Apprise, it relies on prefixing each request arg with "-", because it uses say &method=update as a flag for apprise
|
||||
# but here we are making straight requests, so we need todo convert this against apprise's logic
|
||||
for k, v in results['qsd'].items():
|
||||
if not k.strip('+-') in results['qsd+'].keys():
|
||||
params[unquote_plus(k)] = unquote_plus(v)
|
||||
|
||||
# Determine Authentication
|
||||
auth = ''
|
||||
if results.get('user') and results.get('password'):
|
||||
auth = (unquote_plus(results.get('user')), unquote_plus(results.get('user')))
|
||||
elif results.get('user'):
|
||||
auth = (unquote_plus(results.get('user')))
|
||||
|
||||
# If it smells like it could be JSON and no content-type was already set, offer a default content type.
|
||||
if body and '{' in body[:100] and not headers.get('Content-Type'):
|
||||
json_header = 'application/json; charset=utf-8'
|
||||
try:
|
||||
# Try if it's JSON
|
||||
json.loads(body)
|
||||
headers['Content-Type'] = json_header
|
||||
except ValueError as e:
|
||||
logger.warning(f"Could not automatically add '{json_header}' header to the notification because the document failed to parse as JSON: {e}")
|
||||
pass
|
||||
|
||||
# POSTS -> HTTPS etc
|
||||
if schema.lower().endswith('s'):
|
||||
url = re.sub(rf'^{schema}', 'https', results.get('url'))
|
||||
else:
|
||||
url = re.sub(rf'^{schema}', 'http', results.get('url'))
|
||||
|
||||
status_str = ''
|
||||
try:
|
||||
r = requests_method(url,
|
||||
auth=auth,
|
||||
data=body.encode('utf-8') if type(body) is str else body,
|
||||
headers=headers,
|
||||
params=params
|
||||
)
|
||||
|
||||
if not (200 <= r.status_code < 300):
|
||||
status_str = f"Error sending '{method.upper()}' request to {url} - Status: {r.status_code}: '{r.reason}'"
|
||||
logger.error(status_str)
|
||||
has_error = True
|
||||
else:
|
||||
logger.info(f"Sent '{method.upper()}' request to {url}")
|
||||
has_error = False
|
||||
|
||||
except requests.RequestException as e:
|
||||
status_str = f"Error sending '{method.upper()}' request to {url} - {str(e)}"
|
||||
logger.error(status_str)
|
||||
has_error = True
|
||||
|
||||
if has_error:
|
||||
raise TypeError(status_str)
|
||||
|
||||
return True
|
||||
164
changedetectionio/blueprint/backups/__init__.py
Normal file
@@ -0,0 +1,164 @@
|
||||
import datetime
|
||||
import glob
|
||||
import threading
|
||||
|
||||
from flask import Blueprint, render_template, send_from_directory, flash, url_for, redirect, abort
|
||||
import os
|
||||
|
||||
from changedetectionio.store import ChangeDetectionStore
|
||||
from changedetectionio.flask_app import login_optionally_required
|
||||
from loguru import logger
|
||||
|
||||
BACKUP_FILENAME_FORMAT = "changedetection-backup-{}.zip"
|
||||
|
||||
|
||||
def create_backup(datastore_path, watches: dict):
|
||||
logger.debug("Creating backup...")
|
||||
import zipfile
|
||||
from pathlib import Path
|
||||
|
||||
# create a ZipFile object
|
||||
timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
|
||||
backupname = BACKUP_FILENAME_FORMAT.format(timestamp)
|
||||
backup_filepath = os.path.join(datastore_path, backupname)
|
||||
|
||||
with zipfile.ZipFile(backup_filepath.replace('.zip', '.tmp'), "w",
|
||||
compression=zipfile.ZIP_DEFLATED,
|
||||
compresslevel=8) as zipObj:
|
||||
|
||||
# Add the index
|
||||
zipObj.write(os.path.join(datastore_path, "url-watches.json"), arcname="url-watches.json")
|
||||
|
||||
# Add the flask app secret
|
||||
zipObj.write(os.path.join(datastore_path, "secret.txt"), arcname="secret.txt")
|
||||
|
||||
# Add any data in the watch data directory.
|
||||
for uuid, w in watches.items():
|
||||
for f in Path(w.watch_data_dir).glob('*'):
|
||||
zipObj.write(f,
|
||||
# Use the full path to access the file, but make the file 'relative' in the Zip.
|
||||
arcname=os.path.join(f.parts[-2], f.parts[-1]),
|
||||
compress_type=zipfile.ZIP_DEFLATED,
|
||||
compresslevel=8)
|
||||
|
||||
# Create a list file with just the URLs, so it's easier to port somewhere else in the future
|
||||
list_file = "url-list.txt"
|
||||
with open(os.path.join(datastore_path, list_file), "w") as f:
|
||||
for uuid in watches:
|
||||
url = watches[uuid]["url"]
|
||||
f.write("{}\r\n".format(url))
|
||||
list_with_tags_file = "url-list-with-tags.txt"
|
||||
with open(
|
||||
os.path.join(datastore_path, list_with_tags_file), "w"
|
||||
) as f:
|
||||
for uuid in watches:
|
||||
url = watches[uuid].get('url')
|
||||
tag = watches[uuid].get('tags', {})
|
||||
f.write("{} {}\r\n".format(url, tag))
|
||||
|
||||
# Add it to the Zip
|
||||
zipObj.write(
|
||||
os.path.join(datastore_path, list_file),
|
||||
arcname=list_file,
|
||||
compress_type=zipfile.ZIP_DEFLATED,
|
||||
compresslevel=8,
|
||||
)
|
||||
zipObj.write(
|
||||
os.path.join(datastore_path, list_with_tags_file),
|
||||
arcname=list_with_tags_file,
|
||||
compress_type=zipfile.ZIP_DEFLATED,
|
||||
compresslevel=8,
|
||||
)
|
||||
|
||||
# Now it's done, rename it so it shows up finally and its completed being written.
|
||||
os.rename(backup_filepath.replace('.zip', '.tmp'), backup_filepath.replace('.tmp', '.zip'))
|
||||
|
||||
|
||||
def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
backups_blueprint = Blueprint('backups', __name__, template_folder="templates")
|
||||
backup_threads = []
|
||||
|
||||
@login_optionally_required
|
||||
@backups_blueprint.route("/request-backup", methods=['GET'])
|
||||
def request_backup():
|
||||
if any(thread.is_alive() for thread in backup_threads):
|
||||
flash("A backup is already running, check back in a few minutes", "error")
|
||||
return redirect(url_for('backups.index'))
|
||||
|
||||
if len(find_backups()) > int(os.getenv("MAX_NUMBER_BACKUPS", 100)):
|
||||
flash("Maximum number of backups reached, please remove some", "error")
|
||||
return redirect(url_for('backups.index'))
|
||||
|
||||
# Be sure we're written fresh
|
||||
datastore.sync_to_json()
|
||||
zip_thread = threading.Thread(target=create_backup, args=(datastore.datastore_path, datastore.data.get("watching")))
|
||||
zip_thread.start()
|
||||
backup_threads.append(zip_thread)
|
||||
flash("Backup building in background, check back in a few minutes.")
|
||||
|
||||
return redirect(url_for('backups.index'))
|
||||
|
||||
def find_backups():
|
||||
backup_filepath = os.path.join(datastore.datastore_path, BACKUP_FILENAME_FORMAT.format("*"))
|
||||
backups = glob.glob(backup_filepath)
|
||||
backup_info = []
|
||||
|
||||
for backup in backups:
|
||||
size = os.path.getsize(backup) / (1024 * 1024)
|
||||
creation_time = os.path.getctime(backup)
|
||||
backup_info.append({
|
||||
'filename': os.path.basename(backup),
|
||||
'filesize': f"{size:.2f}",
|
||||
'creation_time': creation_time
|
||||
})
|
||||
|
||||
backup_info.sort(key=lambda x: x['creation_time'], reverse=True)
|
||||
|
||||
return backup_info
|
||||
|
||||
@login_optionally_required
|
||||
@backups_blueprint.route("/download/<string:filename>", methods=['GET'])
|
||||
def download_backup(filename):
|
||||
import re
|
||||
filename = filename.strip()
|
||||
backup_filename_regex = BACKUP_FILENAME_FORMAT.format("\d+")
|
||||
|
||||
full_path = os.path.join(os.path.abspath(datastore.datastore_path), filename)
|
||||
if not full_path.startswith(os.path.abspath(datastore.datastore_path)):
|
||||
abort(404)
|
||||
|
||||
if filename == 'latest':
|
||||
backups = find_backups()
|
||||
filename = backups[0]['filename']
|
||||
|
||||
if not re.match(r"^" + backup_filename_regex + "$", filename):
|
||||
abort(400) # Bad Request if the filename doesn't match the pattern
|
||||
|
||||
logger.debug(f"Backup download request for '{full_path}'")
|
||||
return send_from_directory(os.path.abspath(datastore.datastore_path), filename, as_attachment=True)
|
||||
|
||||
@login_optionally_required
|
||||
@backups_blueprint.route("/", methods=['GET'])
|
||||
def index():
|
||||
backups = find_backups()
|
||||
output = render_template("overview.html",
|
||||
available_backups=backups,
|
||||
backup_running=any(thread.is_alive() for thread in backup_threads)
|
||||
)
|
||||
|
||||
return output
|
||||
|
||||
@login_optionally_required
|
||||
@backups_blueprint.route("/remove-backups", methods=['GET'])
|
||||
def remove_backups():
|
||||
|
||||
backup_filepath = os.path.join(datastore.datastore_path, BACKUP_FILENAME_FORMAT.format("*"))
|
||||
backups = glob.glob(backup_filepath)
|
||||
for backup in backups:
|
||||
os.unlink(backup)
|
||||
|
||||
flash("Backups were deleted.")
|
||||
|
||||
return redirect(url_for('backups.index'))
|
||||
|
||||
return backups_blueprint
|
||||
36
changedetectionio/blueprint/backups/templates/overview.html
Normal file
@@ -0,0 +1,36 @@
|
||||
{% extends 'base.html' %}
|
||||
{% block content %}
|
||||
{% from '_helpers.html' import render_simple_field, render_field %}
|
||||
<div class="edit-form">
|
||||
<div class="box-wrap inner">
|
||||
<h4>Backups</h4>
|
||||
{% if backup_running %}
|
||||
<p>
|
||||
<strong>A backup is running!</strong>
|
||||
</p>
|
||||
{% endif %}
|
||||
<p>
|
||||
Here you can download and request a new backup, when a backup is completed you will see it listed below.
|
||||
</p>
|
||||
<br>
|
||||
{% if available_backups %}
|
||||
<ul>
|
||||
{% for backup in available_backups %}
|
||||
<li><a href="{{ url_for('backups.download_backup', filename=backup["filename"]) }}">{{ backup["filename"] }}</a> {{ backup["filesize"] }} Mb</li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{% else %}
|
||||
<p>
|
||||
<strong>No backups found.</strong>
|
||||
</p>
|
||||
{% endif %}
|
||||
|
||||
<a class="pure-button pure-button-primary" href="{{ url_for('backups.request_backup') }}">Create backup</a>
|
||||
{% if available_backups %}
|
||||
<a class="pure-button button-small button-error " href="{{ url_for('backups.remove_backups') }}">Remove backups</a>
|
||||
{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
{% endblock %}
|
||||
7
changedetectionio/blueprint/browser_steps/TODO.txt
Normal file
@@ -0,0 +1,7 @@
|
||||
- This needs an abstraction to directly handle the puppeteer connection methods
|
||||
- Then remove the playwright stuff
|
||||
- Remove hack redirect at line 65 changedetectionio/processors/__init__.py
|
||||
|
||||
The screenshots are base64 encoded/decoded which is very CPU intensive for large screenshots (in playwright) but not
|
||||
in the direct puppeteer connection (they are binary end to end)
|
||||
|
||||
@@ -4,33 +4,28 @@
|
||||
# Why?
|
||||
# `browsersteps_playwright_browser_interface.chromium.connect_over_cdp()` will only run once without async()
|
||||
# - this flask app is not async()
|
||||
# - browserless has a single timeout/keepalive which applies to the session made at .connect_over_cdp()
|
||||
# - A single timeout/keepalive which applies to the session made at .connect_over_cdp()
|
||||
#
|
||||
# So it means that we must unfortunately for now just keep a single timer since .connect_over_cdp() was run
|
||||
# and know when that reaches timeout/keepalive :( when that time is up, restart the connection and tell the user
|
||||
# that their time is up, insert another coin. (reload)
|
||||
#
|
||||
# Bigger picture
|
||||
# - It's horrible that we have this click+wait deal, some nice socket.io solution using something similar
|
||||
# to what the browserless debug UI already gives us would be smarter..
|
||||
#
|
||||
# OR
|
||||
# - Some API call that should be hacked into browserless or playwright that we can "/api/bump-keepalive/{session_id}/60"
|
||||
# So we can tell it that we need more time (run this on each action)
|
||||
#
|
||||
# OR
|
||||
# - use multiprocessing to bump this over to its own process and add some transport layer (queue/pipes)
|
||||
|
||||
from distutils.util import strtobool
|
||||
from changedetectionio.strtobool import strtobool
|
||||
from flask import Blueprint, request, make_response
|
||||
import os
|
||||
import logging
|
||||
|
||||
from changedetectionio.store import ChangeDetectionStore
|
||||
from changedetectionio import login_optionally_required
|
||||
from changedetectionio.flask_app import login_optionally_required
|
||||
from loguru import logger
|
||||
|
||||
browsersteps_sessions = {}
|
||||
io_interface_context = None
|
||||
|
||||
import json
|
||||
import base64
|
||||
import hashlib
|
||||
from flask import Response
|
||||
|
||||
def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
browser_steps_blueprint = Blueprint('browser_steps', __name__, template_folder="templates")
|
||||
@@ -44,7 +39,7 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
|
||||
|
||||
# We keep the playwright session open for many minutes
|
||||
seconds_keepalive = int(os.getenv('BROWSERSTEPS_MINUTES_KEEPALIVE', 10)) * 60
|
||||
keepalive_seconds = int(os.getenv('BROWSERSTEPS_MINUTES_KEEPALIVE', 10)) * 60
|
||||
|
||||
browsersteps_start_session = {'start_time': time.time()}
|
||||
|
||||
@@ -56,16 +51,18 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
# Start the Playwright context, which is actually a nodejs sub-process and communicates over STDIN/STDOUT pipes
|
||||
io_interface_context = io_interface_context.start()
|
||||
|
||||
keepalive_ms = ((keepalive_seconds + 3) * 1000)
|
||||
base_url = os.getenv('PLAYWRIGHT_DRIVER_URL', '').strip('"')
|
||||
a = "?" if not '?' in base_url else '&'
|
||||
base_url += a + f"timeout={keepalive_ms}"
|
||||
|
||||
# keep it alive for 10 seconds more than we advertise, sometimes it helps to keep it shutting down cleanly
|
||||
keepalive = "&timeout={}".format(((seconds_keepalive + 3) * 1000))
|
||||
try:
|
||||
browsersteps_start_session['browser'] = io_interface_context.chromium.connect_over_cdp(
|
||||
os.getenv('PLAYWRIGHT_DRIVER_URL', '') + keepalive)
|
||||
browsersteps_start_session['browser'] = io_interface_context.chromium.connect_over_cdp(base_url)
|
||||
except Exception as e:
|
||||
if 'ECONNREFUSED' in str(e):
|
||||
return make_response('Unable to start the Playwright Browser session, is it running?', 401)
|
||||
else:
|
||||
# Other errors, bad URL syntax, bad reply etc
|
||||
return make_response(str(e), 401)
|
||||
|
||||
proxy_id = datastore.get_preferred_proxy_for_watch(uuid=watch_uuid)
|
||||
@@ -85,12 +82,15 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
if parsed.password:
|
||||
proxy['password'] = parsed.password
|
||||
|
||||
print("Browser Steps: UUID {} selected proxy {}".format(watch_uuid, proxy_url))
|
||||
logger.debug(f"Browser Steps: UUID {watch_uuid} selected proxy {proxy_url}")
|
||||
|
||||
# Tell Playwright to connect to Chrome and setup a new session via our stepper interface
|
||||
browsersteps_start_session['browserstepper'] = browser_steps.browsersteps_live_ui(
|
||||
playwright_browser=browsersteps_start_session['browser'],
|
||||
proxy=proxy)
|
||||
proxy=proxy,
|
||||
start_url=datastore.data['watching'][watch_uuid].link,
|
||||
headers=datastore.data['watching'][watch_uuid].get('headers')
|
||||
)
|
||||
|
||||
# For test
|
||||
#browsersteps_start_session['browserstepper'].action_goto_url(value="http://example.com?time="+str(time.time()))
|
||||
@@ -112,18 +112,43 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
if not watch_uuid:
|
||||
return make_response('No Watch UUID specified', 500)
|
||||
|
||||
print("Starting connection with playwright")
|
||||
logging.debug("browser_steps.py connecting")
|
||||
logger.debug("Starting connection with playwright")
|
||||
logger.debug("browser_steps.py connecting")
|
||||
browsersteps_sessions[browsersteps_session_id] = start_browsersteps_session(watch_uuid)
|
||||
print("Starting connection with playwright - done")
|
||||
logger.debug("Starting connection with playwright - done")
|
||||
return {'browsersteps_session_id': browsersteps_session_id}
|
||||
|
||||
@login_optionally_required
|
||||
@browser_steps_blueprint.route("/browsersteps_image", methods=['GET'])
|
||||
def browser_steps_fetch_screenshot_image():
|
||||
from flask import (
|
||||
make_response,
|
||||
request,
|
||||
send_from_directory,
|
||||
)
|
||||
uuid = request.args.get('uuid')
|
||||
step_n = int(request.args.get('step_n'))
|
||||
|
||||
watch = datastore.data['watching'].get(uuid)
|
||||
filename = f"step_before-{step_n}.jpeg" if request.args.get('type', '') == 'before' else f"step_{step_n}.jpeg"
|
||||
|
||||
if step_n and watch and os.path.isfile(os.path.join(watch.watch_data_dir, filename)):
|
||||
response = make_response(send_from_directory(directory=watch.watch_data_dir, path=filename))
|
||||
response.headers['Content-type'] = 'image/jpeg'
|
||||
response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'
|
||||
response.headers['Pragma'] = 'no-cache'
|
||||
response.headers['Expires'] = 0
|
||||
return response
|
||||
|
||||
else:
|
||||
return make_response('Unable to fetch image, is the URL correct? does the watch exist? does the step_type-n.jpeg exist?', 401)
|
||||
|
||||
# A request for an action was received
|
||||
@login_optionally_required
|
||||
@browser_steps_blueprint.route("/browsersteps_update", methods=['POST'])
|
||||
def browsersteps_ui_update():
|
||||
import base64
|
||||
import playwright._impl._api_types
|
||||
import playwright._impl._errors
|
||||
global browsersteps_sessions
|
||||
from changedetectionio.blueprint.browser_steps import browser_steps
|
||||
|
||||
@@ -138,21 +163,15 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
if not browsersteps_sessions.get(browsersteps_session_id):
|
||||
return make_response('No session exists under that ID', 500)
|
||||
|
||||
|
||||
is_last_step = False
|
||||
# Actions - step/apply/etc, do the thing and return state
|
||||
if request.method == 'POST':
|
||||
# @todo - should always be an existing session
|
||||
step_operation = request.form.get('operation')
|
||||
step_selector = request.form.get('selector')
|
||||
step_optional_value = request.form.get('optional_value')
|
||||
step_n = int(request.form.get('step_n'))
|
||||
is_last_step = strtobool(request.form.get('is_last_step'))
|
||||
|
||||
if step_operation == 'Goto site':
|
||||
step_operation = 'goto_url'
|
||||
step_optional_value = datastore.data['watching'][uuid].get('url')
|
||||
step_selector = None
|
||||
|
||||
# @todo try.. accept.. nice errors not popups..
|
||||
try:
|
||||
|
||||
@@ -161,18 +180,10 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
optional_value=step_optional_value)
|
||||
|
||||
except Exception as e:
|
||||
print("Exception when calling step operation", step_operation, str(e))
|
||||
logger.error(f"Exception when calling step operation {step_operation} {str(e)}")
|
||||
# Try to find something of value to give back to the user
|
||||
return make_response(str(e).splitlines()[0], 401)
|
||||
|
||||
# Get visual selector ready/update its data (also use the current filter info from the page?)
|
||||
# When the last 'apply' button was pressed
|
||||
# @todo this adds overhead because the xpath selection is happening twice
|
||||
u = browsersteps_sessions[browsersteps_session_id]['browserstepper'].page.url
|
||||
if is_last_step and u:
|
||||
(screenshot, xpath_data) = browsersteps_sessions[browsersteps_session_id]['browserstepper'].request_visualselector_data()
|
||||
datastore.save_screenshot(watch_uuid=uuid, screenshot=screenshot)
|
||||
datastore.save_xpath_data(watch_uuid=uuid, data=xpath_data)
|
||||
|
||||
# if not this_session.page:
|
||||
# cleanup_playwright_session()
|
||||
@@ -180,31 +191,35 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
|
||||
# Screenshots and other info only needed on requesting a step (POST)
|
||||
try:
|
||||
state = browsersteps_sessions[browsersteps_session_id]['browserstepper'].get_current_state()
|
||||
(screenshot, xpath_data) = browsersteps_sessions[browsersteps_session_id]['browserstepper'].get_current_state()
|
||||
if is_last_step:
|
||||
watch = datastore.data['watching'].get(uuid)
|
||||
u = browsersteps_sessions[browsersteps_session_id]['browserstepper'].page.url
|
||||
if watch and u:
|
||||
watch.save_screenshot(screenshot=screenshot)
|
||||
watch.save_xpath_data(data=xpath_data)
|
||||
|
||||
except playwright._impl._api_types.Error as e:
|
||||
return make_response("Browser session ran out of time :( Please reload this page."+str(e), 401)
|
||||
except Exception as e:
|
||||
return make_response("Error fetching screenshot and element data - " + str(e), 401)
|
||||
|
||||
# Use send_file() which is way faster than read/write loop on bytes
|
||||
import json
|
||||
from tempfile import mkstemp
|
||||
from flask import send_file
|
||||
tmp_fd, tmp_file = mkstemp(text=True, suffix=".json", prefix="changedetectionio-")
|
||||
# SEND THIS BACK TO THE BROWSER
|
||||
|
||||
output = json.dumps({'screenshot': "data:image/jpeg;base64,{}".format(
|
||||
base64.b64encode(state[0]).decode('ascii')),
|
||||
'xpath_data': state[1],
|
||||
'session_age_start': browsersteps_sessions[browsersteps_session_id]['browserstepper'].age_start,
|
||||
'browser_time_remaining': round(remaining)
|
||||
})
|
||||
output = {
|
||||
"screenshot": f"data:image/jpeg;base64,{base64.b64encode(screenshot).decode('ascii')}",
|
||||
"xpath_data": xpath_data,
|
||||
"session_age_start": browsersteps_sessions[browsersteps_session_id]['browserstepper'].age_start,
|
||||
"browser_time_remaining": round(remaining)
|
||||
}
|
||||
json_data = json.dumps(output)
|
||||
|
||||
with os.fdopen(tmp_fd, 'w') as f:
|
||||
f.write(output)
|
||||
# Generate an ETag (hash of the response body)
|
||||
etag_hash = hashlib.md5(json_data.encode('utf-8')).hexdigest()
|
||||
|
||||
response = make_response(send_file(path_or_file=tmp_file,
|
||||
mimetype='application/json; charset=UTF-8',
|
||||
etag=True))
|
||||
# No longer needed
|
||||
os.unlink(tmp_file)
|
||||
# Create the response with ETag
|
||||
response = Response(json_data, mimetype="application/json; charset=UTF-8")
|
||||
response.set_etag(etag_hash)
|
||||
|
||||
return response
|
||||
|
||||
|
||||
@@ -1,9 +1,14 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
import os
|
||||
import time
|
||||
import re
|
||||
from random import randint
|
||||
from loguru import logger
|
||||
|
||||
from changedetectionio.content_fetchers.helpers import capture_stitched_together_full_page, SCREENSHOT_SIZE_STITCH_THRESHOLD
|
||||
from changedetectionio.content_fetchers.base import manage_user_agent
|
||||
from changedetectionio.safe_jinja import render as jinja_render
|
||||
|
||||
|
||||
|
||||
# Two flags, tell the JS which of the "Selector" or "Value" field should be enabled in the front end
|
||||
# 0- off, 1- on
|
||||
@@ -21,11 +26,13 @@ browser_step_ui_config = {'Choose one': '0 0',
|
||||
'Click element if exists': '1 0',
|
||||
'Click element': '1 0',
|
||||
'Click element containing text': '0 1',
|
||||
'Click element containing text if exists': '0 1',
|
||||
'Enter text in field': '1 1',
|
||||
'Execute JS': '0 1',
|
||||
# 'Extract text and use as filter': '1 0',
|
||||
'Goto site': '0 0',
|
||||
'Goto URL': '0 1',
|
||||
'Make all child elements visible': '1 0',
|
||||
'Press Enter': '0 0',
|
||||
'Select by label': '1 1',
|
||||
'Scroll down': '0 0',
|
||||
@@ -33,6 +40,7 @@ browser_step_ui_config = {'Choose one': '0 0',
|
||||
'Wait for seconds': '0 1',
|
||||
'Wait for text': '0 1',
|
||||
'Wait for text in element': '1 1',
|
||||
'Remove elements': '1 0',
|
||||
# 'Press Page Down': '0 0',
|
||||
# 'Press Page Up': '0 0',
|
||||
# weird bug, come back to it later
|
||||
@@ -45,6 +53,12 @@ browser_step_ui_config = {'Choose one': '0 0',
|
||||
# ONLY Works in Playwright because we need the fullscreen screenshot
|
||||
class steppable_browser_interface():
|
||||
page = None
|
||||
start_url = None
|
||||
|
||||
action_timeout = 10 * 1000
|
||||
|
||||
def __init__(self, start_url):
|
||||
self.start_url = start_url
|
||||
|
||||
# Convert and perform "Click Button" for example
|
||||
def call_action(self, action_name, selector=None, optional_value=None):
|
||||
@@ -53,7 +67,7 @@ class steppable_browser_interface():
|
||||
if call_action_name == 'choose_one':
|
||||
return
|
||||
|
||||
print("> action calling", call_action_name)
|
||||
logger.debug(f"> Action calling '{call_action_name}'")
|
||||
# https://playwright.dev/python/docs/selectors#xpath-selectors
|
||||
if selector and selector.startswith('/') and not selector.startswith('//'):
|
||||
selector = "xpath=" + selector
|
||||
@@ -61,60 +75,75 @@ class steppable_browser_interface():
|
||||
action_handler = getattr(self, "action_" + call_action_name)
|
||||
|
||||
# Support for Jinja2 variables in the value and selector
|
||||
from jinja2 import Environment
|
||||
jinja2_env = Environment(extensions=['jinja2_time.TimeExtension'])
|
||||
|
||||
if selector and ('{%' in selector or '{{' in selector):
|
||||
selector = str(jinja2_env.from_string(selector).render())
|
||||
selector = jinja_render(template_str=selector)
|
||||
|
||||
if optional_value and ('{%' in optional_value or '{{' in optional_value):
|
||||
optional_value = str(jinja2_env.from_string(optional_value).render())
|
||||
optional_value = jinja_render(template_str=optional_value)
|
||||
|
||||
action_handler(selector, optional_value)
|
||||
self.page.wait_for_timeout(1.5 * 1000)
|
||||
print("Call action done in", time.time() - now)
|
||||
logger.debug(f"Call action done in {time.time()-now:.2f}s")
|
||||
|
||||
def action_goto_url(self, selector=None, value=None):
|
||||
# self.page.set_viewport_size({"width": 1280, "height": 5000})
|
||||
now = time.time()
|
||||
response = self.page.goto(value, timeout=0, wait_until='commit')
|
||||
response = self.page.goto(value, timeout=0, wait_until='load')
|
||||
# Should be the same as the puppeteer_fetch.js methods, means, load with no timeout set (skip timeout)
|
||||
#and also wait for seconds ?
|
||||
#await page.waitForTimeout(1000);
|
||||
#await page.waitForTimeout(extra_wait_ms);
|
||||
logger.debug(f"Time to goto URL {time.time()-now:.2f}s")
|
||||
return response
|
||||
|
||||
# Wait_until = commit
|
||||
# - `'commit'` - consider operation to be finished when network response is received and the document started loading.
|
||||
# Better to not use any smarts from Playwright and just wait an arbitrary number of seconds
|
||||
# This seemed to solve nearly all 'TimeoutErrors'
|
||||
print("Time to goto URL ", time.time() - now)
|
||||
# Incase they request to go back to the start
|
||||
def action_goto_site(self, selector=None, value=None):
|
||||
return self.action_goto_url(value=self.start_url)
|
||||
|
||||
def action_click_element_containing_text(self, selector=None, value=''):
|
||||
logger.debug("Clicking element containing text")
|
||||
if not len(value.strip()):
|
||||
return
|
||||
elem = self.page.get_by_text(value)
|
||||
if elem.count():
|
||||
elem.first.click(delay=randint(200, 500), timeout=3000)
|
||||
elem.first.click(delay=randint(200, 500), timeout=self.action_timeout)
|
||||
|
||||
def action_click_element_containing_text_if_exists(self, selector=None, value=''):
|
||||
logger.debug("Clicking element containing text if exists")
|
||||
if not len(value.strip()):
|
||||
return
|
||||
elem = self.page.get_by_text(value)
|
||||
logger.debug(f"Clicking element containing text - {elem.count()} elements found")
|
||||
if elem.count():
|
||||
elem.first.click(delay=randint(200, 500), timeout=self.action_timeout)
|
||||
else:
|
||||
return
|
||||
|
||||
def action_enter_text_in_field(self, selector, value):
|
||||
if not len(selector.strip()):
|
||||
return
|
||||
|
||||
self.page.fill(selector, value, timeout=10 * 1000)
|
||||
self.page.fill(selector, value, timeout=self.action_timeout)
|
||||
|
||||
def action_execute_js(self, selector, value):
|
||||
self.page.evaluate(value)
|
||||
response = self.page.evaluate(value)
|
||||
return response
|
||||
|
||||
def action_click_element(self, selector, value):
|
||||
print("Clicking element")
|
||||
logger.debug("Clicking element")
|
||||
if not len(selector.strip()):
|
||||
return
|
||||
|
||||
self.page.click(selector=selector, timeout=30 * 1000, delay=randint(200, 500))
|
||||
self.page.click(selector=selector, timeout=self.action_timeout + 20 * 1000, delay=randint(200, 500))
|
||||
|
||||
def action_click_element_if_exists(self, selector, value):
|
||||
import playwright._impl._api_types as _api_types
|
||||
print("Clicking element if exists")
|
||||
import playwright._impl._errors as _api_types
|
||||
logger.debug("Clicking element if exists")
|
||||
if not len(selector.strip()):
|
||||
return
|
||||
try:
|
||||
self.page.click(selector, timeout=10 * 1000, delay=randint(200, 500))
|
||||
self.page.click(selector, timeout=self.action_timeout, delay=randint(200, 500))
|
||||
except _api_types.TimeoutError as e:
|
||||
return
|
||||
except _api_types.Error as e:
|
||||
@@ -122,6 +151,9 @@ class steppable_browser_interface():
|
||||
return
|
||||
|
||||
def action_click_x_y(self, selector, value):
|
||||
if not re.match(r'^\s?\d+\s?,\s?\d+\s?$', value):
|
||||
raise Exception("'Click X,Y' step should be in the format of '100 , 90'")
|
||||
|
||||
x, y = value.strip().split(',')
|
||||
x = int(float(x.strip()))
|
||||
y = int(float(y.strip()))
|
||||
@@ -138,13 +170,13 @@ class steppable_browser_interface():
|
||||
def action_wait_for_text(self, selector, value):
|
||||
import json
|
||||
v = json.dumps(value)
|
||||
self.page.wait_for_function(f'document.querySelector("body").innerText.includes({v});', timeout=90000)
|
||||
self.page.wait_for_function(f'document.querySelector("body").innerText.includes({v});', timeout=30000)
|
||||
|
||||
def action_wait_for_text_in_element(self, selector, value):
|
||||
import json
|
||||
s = json.dumps(selector)
|
||||
v = json.dumps(value)
|
||||
self.page.wait_for_function(f'document.querySelector({s}).innerText.includes({v});', timeout=90000)
|
||||
self.page.wait_for_function(f'document.querySelector({s}).innerText.includes({v});', timeout=30000)
|
||||
|
||||
# @todo - in the future make some popout interface to capture what needs to be set
|
||||
# https://playwright.dev/python/docs/api/class-keyboard
|
||||
@@ -158,13 +190,31 @@ class steppable_browser_interface():
|
||||
self.page.keyboard.press("PageDown", delay=randint(200, 500))
|
||||
|
||||
def action_check_checkbox(self, selector, value):
|
||||
self.page.locator(selector).check(timeout=1000)
|
||||
self.page.locator(selector).check(timeout=self.action_timeout)
|
||||
|
||||
def action_uncheck_checkbox(self, selector, value):
|
||||
self.page.locator(selector, timeout=1000).uncheck(timeout=1000)
|
||||
self.page.locator(selector).uncheck(timeout=self.action_timeout)
|
||||
|
||||
def action_remove_elements(self, selector, value):
|
||||
"""Removes all elements matching the given selector from the DOM."""
|
||||
self.page.locator(selector).evaluate_all("els => els.forEach(el => el.remove())")
|
||||
|
||||
# Responsible for maintaining a live 'context' with browserless
|
||||
def action_make_all_child_elements_visible(self, selector, value):
|
||||
"""Recursively makes all child elements inside the given selector fully visible."""
|
||||
self.page.locator(selector).locator("*").evaluate_all("""
|
||||
els => els.forEach(el => {
|
||||
el.style.display = 'block'; // Forces it to be displayed
|
||||
el.style.visibility = 'visible'; // Ensures it's not hidden
|
||||
el.style.opacity = '1'; // Fully opaque
|
||||
el.style.position = 'relative'; // Avoids 'absolute' hiding
|
||||
el.style.height = 'auto'; // Expands collapsed elements
|
||||
el.style.width = 'auto'; // Ensures full visibility
|
||||
el.removeAttribute('hidden'); // Removes hidden attribute
|
||||
el.classList.remove('hidden', 'd-none'); // Removes common CSS hidden classes
|
||||
})
|
||||
""")
|
||||
|
||||
# Responsible for maintaining a live 'context' with the chrome CDP
|
||||
# @todo - how long do contexts live for anyway?
|
||||
class browsersteps_live_ui(steppable_browser_interface):
|
||||
context = None
|
||||
@@ -173,6 +223,7 @@ class browsersteps_live_ui(steppable_browser_interface):
|
||||
stale = False
|
||||
# bump and kill this if idle after X sec
|
||||
age_start = 0
|
||||
headers = {}
|
||||
|
||||
# use a special driver, maybe locally etc
|
||||
command_executor = os.getenv(
|
||||
@@ -187,9 +238,11 @@ class browsersteps_live_ui(steppable_browser_interface):
|
||||
|
||||
browser_type = os.getenv("PLAYWRIGHT_BROWSER_TYPE", 'chromium').strip('"')
|
||||
|
||||
def __init__(self, playwright_browser, proxy=None):
|
||||
def __init__(self, playwright_browser, proxy=None, headers=None, start_url=None):
|
||||
self.headers = headers or {}
|
||||
self.age_start = time.time()
|
||||
self.playwright_browser = playwright_browser
|
||||
self.start_url = start_url
|
||||
if self.context is None:
|
||||
self.connect(proxy=proxy)
|
||||
|
||||
@@ -201,16 +254,17 @@ class browsersteps_live_ui(steppable_browser_interface):
|
||||
|
||||
# @todo handle multiple contexts, bind a unique id from the browser on each req?
|
||||
self.context = self.playwright_browser.new_context(
|
||||
# @todo
|
||||
# user_agent=request_headers['User-Agent'] if request_headers.get('User-Agent') else 'Mozilla/5.0',
|
||||
# proxy=self.proxy,
|
||||
# This is needed to enable JavaScript execution on GitHub and others
|
||||
bypass_csp=True,
|
||||
# Should never be needed
|
||||
accept_downloads=False,
|
||||
proxy=proxy
|
||||
accept_downloads=False, # Should never be needed
|
||||
bypass_csp=True, # This is needed to enable JavaScript execution on GitHub and others
|
||||
extra_http_headers=self.headers,
|
||||
ignore_https_errors=True,
|
||||
proxy=proxy,
|
||||
service_workers=os.getenv('PLAYWRIGHT_SERVICE_WORKERS', 'allow'),
|
||||
# Should be `allow` or `block` - sites like YouTube can transmit large amounts of data via Service Workers
|
||||
user_agent=manage_user_agent(headers=self.headers),
|
||||
)
|
||||
|
||||
|
||||
self.page = self.context.new_page()
|
||||
|
||||
# self.page.set_default_navigation_timeout(keep_open)
|
||||
@@ -223,11 +277,12 @@ class browsersteps_live_ui(steppable_browser_interface):
|
||||
# Listen for all console events and handle errors
|
||||
self.page.on("console", lambda msg: print(f"Browser steps console - {msg.type}: {msg.text} {msg.args}"))
|
||||
|
||||
print("Time to browser setup", time.time() - now)
|
||||
logger.debug(f"Time to browser setup {time.time()-now:.2f}s")
|
||||
self.page.wait_for_timeout(1 * 1000)
|
||||
|
||||
|
||||
def mark_as_closed(self):
|
||||
print("Page closed, cleaning up..")
|
||||
logger.debug("Page closed, cleaning up..")
|
||||
|
||||
@property
|
||||
def has_expired(self):
|
||||
@@ -237,46 +292,36 @@ class browsersteps_live_ui(steppable_browser_interface):
|
||||
|
||||
def get_current_state(self):
|
||||
"""Return the screenshot and interactive elements mapping, generally always called after action_()"""
|
||||
from pkg_resources import resource_string
|
||||
xpath_element_js = resource_string(__name__, "../../res/xpath_element_scraper.js").decode('utf-8')
|
||||
import importlib.resources
|
||||
xpath_element_js = importlib.resources.files("changedetectionio.content_fetchers.res").joinpath('xpath_element_scraper.js').read_text()
|
||||
|
||||
now = time.time()
|
||||
self.page.wait_for_timeout(1 * 1000)
|
||||
|
||||
# The actual screenshot
|
||||
screenshot = self.page.screenshot(type='jpeg', full_page=True, quality=40)
|
||||
|
||||
full_height = self.page.evaluate("document.documentElement.scrollHeight")
|
||||
|
||||
if full_height >= SCREENSHOT_SIZE_STITCH_THRESHOLD:
|
||||
logger.warning(f"Page full Height: {full_height}px longer than {SCREENSHOT_SIZE_STITCH_THRESHOLD}px, using 'stitched screenshot method'.")
|
||||
screenshot = capture_stitched_together_full_page(self.page)
|
||||
else:
|
||||
screenshot = self.page.screenshot(type='jpeg', full_page=True, quality=40)
|
||||
|
||||
logger.debug(f"Time to get screenshot from browser {time.time() - now:.2f}s")
|
||||
|
||||
now = time.time()
|
||||
self.page.evaluate("var include_filters=''")
|
||||
# Go find the interactive elements
|
||||
# @todo in the future, something smarter that can scan for elements with .click/focus etc event handlers?
|
||||
elements = 'a,button,input,select,textarea,i,th,td,p,li,h1,h2,h3,h4,div,span'
|
||||
xpath_element_js = xpath_element_js.replace('%ELEMENTS%', elements)
|
||||
|
||||
xpath_data = self.page.evaluate("async () => {" + xpath_element_js + "}")
|
||||
# So the JS will find the smallest one first
|
||||
xpath_data['size_pos'] = sorted(xpath_data['size_pos'], key=lambda k: k['width'] * k['height'], reverse=True)
|
||||
print("Time to complete get_current_state of browser", time.time() - now)
|
||||
# except
|
||||
logger.debug(f"Time to scrape xpath element data in browser {time.time()-now:.2f}s")
|
||||
|
||||
# playwright._impl._api_types.Error: Browser closed.
|
||||
# @todo show some countdown timer?
|
||||
return (screenshot, xpath_data)
|
||||
|
||||
def request_visualselector_data(self):
|
||||
"""
|
||||
Does the same that the playwright operation in content_fetcher does
|
||||
This is used to just bump the VisualSelector data so it' ready to go if they click on the tab
|
||||
@todo refactor and remove duplicate code, add include_filters
|
||||
:param xpath_data:
|
||||
:param screenshot:
|
||||
:param current_include_filters:
|
||||
:return:
|
||||
"""
|
||||
|
||||
self.page.evaluate("var include_filters=''")
|
||||
from pkg_resources import resource_string
|
||||
# The code that scrapes elements and makes a list of elements/size/position to click on in the VisualSelector
|
||||
xpath_element_js = resource_string(__name__, "../../res/xpath_element_scraper.js").decode('utf-8')
|
||||
from changedetectionio.content_fetcher import visualselector_xpath_selectors
|
||||
xpath_element_js = xpath_element_js.replace('%ELEMENTS%', visualselector_xpath_selectors)
|
||||
xpath_data = self.page.evaluate("async () => {" + xpath_element_js + "}")
|
||||
screenshot = self.page.screenshot(type='jpeg', full_page=True, quality=int(os.getenv("PLAYWRIGHT_SCREENSHOT_QUALITY", 72)))
|
||||
|
||||
return (screenshot, xpath_data)
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
from playwright.sync_api import PlaywrightContextManager
|
||||
import asyncio
|
||||
|
||||
# So playwright wants to run as a context manager, but we do something horrible and hacky
|
||||
# we are holding the session open for as long as possible, then shutting it down, and opening a new one
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
import importlib
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
from changedetectionio.processors.text_json_diff.processor import FilterNotFoundInResponse
|
||||
from changedetectionio.store import ChangeDetectionStore
|
||||
|
||||
from functools import wraps
|
||||
|
||||
from flask import Blueprint
|
||||
from flask_login import login_required
|
||||
|
||||
from changedetectionio.processors import text_json_diff
|
||||
from changedetectionio.store import ChangeDetectionStore
|
||||
|
||||
|
||||
STATUS_CHECKING = 0
|
||||
STATUS_FAILED = 1
|
||||
STATUS_OK = 2
|
||||
@@ -32,32 +32,36 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
@threadpool
|
||||
def long_task(uuid, preferred_proxy):
|
||||
import time
|
||||
from changedetectionio import content_fetcher
|
||||
from changedetectionio.content_fetchers import exceptions as content_fetcher_exceptions
|
||||
from changedetectionio.safe_jinja import render as jinja_render
|
||||
|
||||
status = {'status': '', 'length': 0, 'text': ''}
|
||||
from jinja2 import Environment, BaseLoader
|
||||
|
||||
contents = ''
|
||||
now = time.time()
|
||||
try:
|
||||
update_handler = text_json_diff.perform_site_check(datastore=datastore)
|
||||
changed_detected, update_obj, contents = update_handler.run(uuid, preferred_proxy=preferred_proxy, skip_when_checksum_same=False)
|
||||
processor_module = importlib.import_module("changedetectionio.processors.text_json_diff.processor")
|
||||
update_handler = processor_module.perform_site_check(datastore=datastore,
|
||||
watch_uuid=uuid
|
||||
)
|
||||
|
||||
update_handler.call_browser(preferred_proxy_id=preferred_proxy)
|
||||
# title, size is len contents not len xfer
|
||||
except content_fetcher.Non200ErrorCodeReceived as e:
|
||||
except content_fetcher_exceptions.Non200ErrorCodeReceived as e:
|
||||
if e.status_code == 404:
|
||||
status.update({'status': 'OK', 'length': len(contents), 'text': f"OK but 404 (page not found)"})
|
||||
elif e.status_code == 403 or e.status_code == 401:
|
||||
status.update({'status': 'ERROR', 'length': len(contents), 'text': f"{e.status_code} - Access denied"})
|
||||
else:
|
||||
status.update({'status': 'ERROR', 'length': len(contents), 'text': f"Status code: {e.status_code}"})
|
||||
except text_json_diff.FilterNotFoundInResponse:
|
||||
except FilterNotFoundInResponse:
|
||||
status.update({'status': 'OK', 'length': len(contents), 'text': f"OK but CSS/xPath filter not found (page changed layout?)"})
|
||||
except content_fetcher.EmptyReply as e:
|
||||
except content_fetcher_exceptions.EmptyReply as e:
|
||||
if e.status_code == 403 or e.status_code == 401:
|
||||
status.update({'status': 'ERROR OTHER', 'length': len(contents), 'text': f"Got empty reply with code {e.status_code} - Access denied"})
|
||||
else:
|
||||
status.update({'status': 'ERROR OTHER', 'length': len(contents) if contents else 0, 'text': f"Empty reply with code {e.status_code}, needs chrome?"})
|
||||
except content_fetcher.ReplyWithContentButNoText as e:
|
||||
except content_fetcher_exceptions.ReplyWithContentButNoText as e:
|
||||
txt = f"Got reply but with no content - Status code {e.status_code} - It's possible that the filters were found, but contained no usable text (or contained only an image)."
|
||||
status.update({'status': 'ERROR', 'text': txt})
|
||||
except Exception as e:
|
||||
@@ -66,7 +70,9 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
status.update({'status': 'OK', 'length': len(contents), 'text': ''})
|
||||
|
||||
if status.get('text'):
|
||||
status['text'] = Environment(loader=BaseLoader()).from_string('{{text|e}}').render({'text': status['text']})
|
||||
# parse 'text' as text for safety
|
||||
v = {'text': status['text']}
|
||||
status['text'] = jinja_render(template_str='{{text|e}}', **v)
|
||||
|
||||
status['time'] = "{:.2f}s".format(time.time() - now)
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
|
||||
from distutils.util import strtobool
|
||||
from changedetectionio.strtobool import strtobool
|
||||
from flask import Blueprint, flash, redirect, url_for
|
||||
from flask_login import login_required
|
||||
from changedetectionio.store import ChangeDetectionStore
|
||||
@@ -17,9 +17,10 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q: PriorityQueue
|
||||
@price_data_follower_blueprint.route("/<string:uuid>/accept", methods=['GET'])
|
||||
def accept(uuid):
|
||||
datastore.data['watching'][uuid]['track_ldjson_price_data'] = PRICE_DATA_TRACK_ACCEPT
|
||||
update_q.put(queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid, 'skip_when_checksum_same': False}))
|
||||
return redirect(url_for("form_watch_checknow", uuid=uuid))
|
||||
|
||||
datastore.data['watching'][uuid]['processor'] = 'restock_diff'
|
||||
datastore.data['watching'][uuid].clear_watch()
|
||||
update_q.put(queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid}))
|
||||
return redirect(url_for("index"))
|
||||
|
||||
@login_required
|
||||
@price_data_follower_blueprint.route("/<string:uuid>/reject", methods=['GET'])
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
from flask import Blueprint, request, make_response, render_template, flash, url_for, redirect
|
||||
from flask import Blueprint, request, render_template, flash, url_for, redirect
|
||||
|
||||
|
||||
from changedetectionio.store import ChangeDetectionStore
|
||||
from changedetectionio import login_optionally_required
|
||||
from changedetectionio.flask_app import login_optionally_required
|
||||
|
||||
|
||||
def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
@@ -11,9 +13,17 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
def tags_overview_page():
|
||||
from .form import SingleTag
|
||||
add_form = SingleTag(request.form)
|
||||
|
||||
sorted_tags = sorted(datastore.data['settings']['application'].get('tags').items(), key=lambda x: x[1]['title'])
|
||||
|
||||
from collections import Counter
|
||||
|
||||
tag_count = Counter(tag for watch in datastore.data['watching'].values() if watch.get('tags') for tag in watch['tags'])
|
||||
|
||||
output = render_template("groups-overview.html",
|
||||
available_tags=sorted_tags,
|
||||
form=add_form,
|
||||
available_tags=datastore.data['settings']['application'].get('tags', {}),
|
||||
tag_count=tag_count
|
||||
)
|
||||
|
||||
return output
|
||||
@@ -89,22 +99,57 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
@tags_blueprint.route("/edit/<string:uuid>", methods=['GET'])
|
||||
@login_optionally_required
|
||||
def form_tag_edit(uuid):
|
||||
from changedetectionio import forms
|
||||
|
||||
from changedetectionio.blueprint.tags.form import group_restock_settings_form
|
||||
if uuid == 'first':
|
||||
uuid = list(datastore.data['settings']['application']['tags'].keys()).pop()
|
||||
|
||||
default = datastore.data['settings']['application']['tags'].get(uuid)
|
||||
|
||||
form = forms.watchForm(formdata=request.form if request.method == 'POST' else None,
|
||||
data=default,
|
||||
)
|
||||
form.datastore=datastore # needed?
|
||||
form = group_restock_settings_form(
|
||||
formdata=request.form if request.method == 'POST' else None,
|
||||
data=default,
|
||||
extra_notification_tokens=datastore.get_unique_notification_tokens_available(),
|
||||
default_system_settings = datastore.data['settings'],
|
||||
)
|
||||
|
||||
template_args = {
|
||||
'data': default,
|
||||
'form': form,
|
||||
'watch': default,
|
||||
'extra_notification_token_placeholder_info': datastore.get_unique_notification_token_placeholders_available(),
|
||||
}
|
||||
|
||||
included_content = {}
|
||||
if form.extra_form_content():
|
||||
# So that the extra panels can access _helpers.html etc, we set the environment to load from templates/
|
||||
# And then render the code from the module
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
import importlib.resources
|
||||
templates_dir = str(importlib.resources.files("changedetectionio").joinpath('templates'))
|
||||
env = Environment(loader=FileSystemLoader(templates_dir))
|
||||
template_str = """{% from '_helpers.html' import render_field, render_checkbox_field, render_button %}
|
||||
<script>
|
||||
$(document).ready(function () {
|
||||
toggleOpacity('#overrides_watch', '#restock-fieldset-price-group', true);
|
||||
});
|
||||
</script>
|
||||
<fieldset>
|
||||
<div class="pure-control-group">
|
||||
<fieldset class="pure-group">
|
||||
{{ render_checkbox_field(form.overrides_watch) }}
|
||||
<span class="pure-form-message-inline">Used for watches in "Restock & Price detection" mode</span>
|
||||
</fieldset>
|
||||
</fieldset>
|
||||
"""
|
||||
template_str += form.extra_form_content()
|
||||
template = env.from_string(template_str)
|
||||
included_content = template.render(**template_args)
|
||||
|
||||
output = render_template("edit-tag.html",
|
||||
data=default,
|
||||
form=form,
|
||||
settings_application=datastore.data['settings']['application'],
|
||||
extra_tab_content=form.extra_tab_content() if form.extra_tab_content() else None,
|
||||
extra_form_content=included_content,
|
||||
**template_args
|
||||
)
|
||||
|
||||
return output
|
||||
@@ -113,14 +158,15 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
@tags_blueprint.route("/edit/<string:uuid>", methods=['POST'])
|
||||
@login_optionally_required
|
||||
def form_tag_edit_submit(uuid):
|
||||
from changedetectionio import forms
|
||||
from changedetectionio.blueprint.tags.form import group_restock_settings_form
|
||||
if uuid == 'first':
|
||||
uuid = list(datastore.data['settings']['application']['tags'].keys()).pop()
|
||||
|
||||
default = datastore.data['settings']['application']['tags'].get(uuid)
|
||||
|
||||
form = forms.watchForm(formdata=request.form if request.method == 'POST' else None,
|
||||
form = group_restock_settings_form(formdata=request.form if request.method == 'POST' else None,
|
||||
data=default,
|
||||
extra_notification_tokens=datastore.get_unique_notification_tokens_available()
|
||||
)
|
||||
# @todo subclass form so validation works
|
||||
#if not form.validate():
|
||||
@@ -129,6 +175,7 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
# return redirect(url_for('tags.form_tag_edit_submit', uuid=uuid))
|
||||
|
||||
datastore.data['settings']['application']['tags'][uuid].update(form.data)
|
||||
datastore.data['settings']['application']['tags'][uuid]['processor'] = 'restock_diff'
|
||||
datastore.needs_write_urgent = True
|
||||
flash("Updated")
|
||||
|
||||
|
||||
@@ -1,16 +1,15 @@
|
||||
from wtforms import (
|
||||
BooleanField,
|
||||
Form,
|
||||
IntegerField,
|
||||
RadioField,
|
||||
SelectField,
|
||||
StringField,
|
||||
SubmitField,
|
||||
TextAreaField,
|
||||
validators,
|
||||
)
|
||||
from wtforms.fields.simple import BooleanField
|
||||
|
||||
from changedetectionio.processors.restock_diff.forms import processor_settings_form as restock_settings_form
|
||||
|
||||
class group_restock_settings_form(restock_settings_form):
|
||||
overrides_watch = BooleanField('Activate for individual watches in this tag/group?', default=False)
|
||||
|
||||
class SingleTag(Form):
|
||||
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
{% extends 'base.html' %}
|
||||
{% block content %}
|
||||
{% from '_helpers.jinja' import render_field, render_checkbox_field, render_button %}
|
||||
{% from '_common_fields.jinja' import render_common_settings_form %}
|
||||
{% from '_helpers.html' import render_field, render_checkbox_field, render_button %}
|
||||
{% from '_common_fields.html' import render_common_settings_form %}
|
||||
<script>
|
||||
const notification_base_url="{{url_for('ajax_callback_send_notification_test')}}";
|
||||
const notification_base_url="{{url_for('ajax_callback_send_notification_test', mode="group-settings")}}";
|
||||
</script>
|
||||
|
||||
<script src="{{url_for('static_content', group='js', filename='tabs.js')}}" defer></script>
|
||||
@@ -17,7 +17,6 @@
|
||||
</script>
|
||||
|
||||
<script src="{{url_for('static_content', group='js', filename='watch-settings.js')}}" defer></script>
|
||||
<!--<script src="{{url_for('static_content', group='js', filename='limit.js')}}" defer></script>-->
|
||||
<script src="{{url_for('static_content', group='js', filename='notifications.js')}}" defer></script>
|
||||
|
||||
<div class="edit-form monospaced-textarea">
|
||||
@@ -26,6 +25,9 @@
|
||||
<ul>
|
||||
<li class="tab" id=""><a href="#general">General</a></li>
|
||||
<li class="tab"><a href="#filters-and-triggers">Filters & Triggers</a></li>
|
||||
{% if extra_tab_content %}
|
||||
<li class="tab"><a href="#extras_tab">{{ extra_tab_content }}</a></li>
|
||||
{% endif %}
|
||||
<li class="tab"><a href="#notifications">Notifications</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
@@ -55,25 +57,26 @@ xpath://body/div/span[contains(@class, 'example-class')]",
|
||||
{% if '/text()' in field %}
|
||||
<span class="pure-form-message-inline"><strong>Note!: //text() function does not work where the <element> contains <![CDATA[]]></strong></span><br>
|
||||
{% endif %}
|
||||
<span class="pure-form-message-inline">One rule per line, <i>any</i> rules that matches will be used.<br>
|
||||
|
||||
<ul>
|
||||
<span class="pure-form-message-inline">One CSS, xPath, JSON Path/JQ selector per line, <i>any</i> rules that matches will be used.<br>
|
||||
<div data-target="#advanced-help-selectors" class="toggle-show pure-button button-tag button-xsmall">Show advanced help and tips</div>
|
||||
<ul id="advanced-help-selectors">
|
||||
<li>CSS - Limit text to this CSS rule, only text matching this CSS rule is included.</li>
|
||||
<li>JSON - Limit text to this JSON rule, using either <a href="https://pypi.org/project/jsonpath-ng/" target="new">JSONPath</a> or <a href="https://stedolan.github.io/jq/" target="new">jq</a> (if installed).
|
||||
<ul>
|
||||
<li>JSONPath: Prefix with <code>json:</code>, use <code>json:$</code> to force re-formatting if required, <a href="https://jsonpath.com/" target="new">test your JSONPath here</a>.</li>
|
||||
{% if jq_support %}
|
||||
<li>jq: Prefix with <code>jq:</code> and <a href="https://jqplay.org/" target="new">test your jq here</a>. Using <a href="https://stedolan.github.io/jq/" target="new">jq</a> allows for complex filtering and processing of JSON data with built-in functions, regex, filtering, and more. See examples and documentation <a href="https://stedolan.github.io/jq/manual/" target="new">here</a>.</li>
|
||||
<li>jq: Prefix with <code>jq:</code> and <a href="https://jqplay.org/" target="new">test your jq here</a>. Using <a href="https://stedolan.github.io/jq/" target="new">jq</a> allows for complex filtering and processing of JSON data with built-in functions, regex, filtering, and more. See examples and documentation <a href="https://stedolan.github.io/jq/manual/" target="new">here</a>. Prefix <code>jqraw:</code> outputs the results as text instead of a JSON list.</li>
|
||||
{% else %}
|
||||
<li>jq support not installed</li>
|
||||
{% endif %}
|
||||
</ul>
|
||||
</li>
|
||||
<li>XPath - Limit text to this XPath rule, simply start with a forward-slash,
|
||||
<li>XPath - Limit text to this XPath rule, simply start with a forward-slash. To specify XPath to be used explicitly or the XPath rule starts with an XPath function: Prefix with <code>xpath:</code>
|
||||
<ul>
|
||||
<li>Example: <code>//*[contains(@class, 'sametext')]</code> or <code>xpath://*[contains(@class, 'sametext')]</code>, <a
|
||||
<li>Example: <code>//*[contains(@class, 'sametext')]</code> or <code>xpath:count(//*[contains(@class, 'sametext')])</code>, <a
|
||||
href="http://xpather.com/" target="new">test your XPath here</a></li>
|
||||
<li>Example: Get all titles from an RSS feed <code>//title/text()</code></li>
|
||||
<li>To use XPath1.0: Prefix with <code>xpath1:</code></li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
@@ -85,17 +88,25 @@ xpath://body/div/span[contains(@class, 'example-class')]",
|
||||
{{ render_field(form.subtractive_selectors, rows=5, placeholder="header
|
||||
footer
|
||||
nav
|
||||
.stockticker") }}
|
||||
.stockticker
|
||||
//*[contains(text(), 'Advertisement')]") }}
|
||||
<span class="pure-form-message-inline">
|
||||
<ul>
|
||||
<li> Remove HTML element(s) by CSS selector before text conversion. </li>
|
||||
<li> Add multiple elements or CSS selectors per line to ignore multiple parts of the HTML. </li>
|
||||
<li> Remove HTML element(s) by CSS and XPath selectors before text conversion. </li>
|
||||
<li> Don't paste HTML here, use only CSS and XPath selectors </li>
|
||||
<li> Add multiple elements, CSS or XPath selectors per line to ignore multiple parts of the HTML. </li>
|
||||
</ul>
|
||||
</span>
|
||||
</fieldset>
|
||||
|
||||
</div>
|
||||
|
||||
{# rendered sub Template #}
|
||||
{% if extra_form_content %}
|
||||
<div class="tab-pane-inner" id="extras_tab">
|
||||
{{ extra_form_content|safe }}
|
||||
</div>
|
||||
{% endif %}
|
||||
<div class="tab-pane-inner" id="notifications">
|
||||
<fieldset>
|
||||
<div class="pure-control-group inline-radio">
|
||||
@@ -118,7 +129,7 @@ nav
|
||||
{% endif %}
|
||||
<a href="#notifications" id="notification-setting-reset-to-default" class="pure-button button-xsmall" style="right: 20px; top: 20px; position: absolute; background-color: #5f42dd; border-radius: 4px; font-size: 70%; color: #fff">Use system defaults</a>
|
||||
|
||||
{{ render_common_settings_form(form, emailprefix, settings_application) }}
|
||||
{{ render_common_settings_form(form, emailprefix, settings_application, extra_notification_token_placeholder_info) }}
|
||||
</div>
|
||||
</fieldset>
|
||||
</div>
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{% extends 'base.html' %}
|
||||
{% block content %}
|
||||
{% from '_helpers.jinja' import render_simple_field, render_field %}
|
||||
{% from '_helpers.html' import render_simple_field, render_field %}
|
||||
<script src="{{url_for('static_content', group='js', filename='jquery-3.6.0.min.js')}}"></script>
|
||||
|
||||
<div class="box">
|
||||
@@ -27,6 +27,7 @@
|
||||
<thead>
|
||||
<tr>
|
||||
<th></th>
|
||||
<th># Watches</th>
|
||||
<th>Tag / Label name</th>
|
||||
<th></th>
|
||||
</tr>
|
||||
@@ -40,12 +41,13 @@
|
||||
<td colspan="3">No website organisational tags/groups configured</td>
|
||||
</tr>
|
||||
{% endif %}
|
||||
{% for uuid, tag in available_tags.items() %}
|
||||
{% for uuid, tag in available_tags %}
|
||||
<tr id="{{ uuid }}" class="{{ loop.cycle('pure-table-odd', 'pure-table-even') }}">
|
||||
<td class="watch-controls">
|
||||
<a class="link-mute state-{{'on' if tag.notification_muted else 'off'}}" href="{{url_for('tags.mute', uuid=tag.uuid)}}"><img src="{{url_for('static_content', group='images', filename='bell-off.svg')}}" alt="Mute notifications" title="Mute notifications" class="icon icon-mute" ></a>
|
||||
</td>
|
||||
<td class="title-col inline">{{tag.title}}</td>
|
||||
<td>{{ "{:,}".format(tag_count[uuid]) if uuid in tag_count else 0 }}</td>
|
||||
<td class="title-col inline"> <a href="{{url_for('index', tag=uuid) }}">{{ tag.title }}</a></td>
|
||||
<td>
|
||||
<a class="pure-button pure-button-primary" href="{{ url_for('tags.form_tag_edit', uuid=uuid) }}">Edit</a>
|
||||
<a class="pure-button pure-button-primary" href="{{ url_for('tags.delete', uuid=uuid) }}" title="Deletes and removes tag">Delete</a>
|
||||
|
||||
@@ -1,153 +0,0 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
# Launch as a eventlet.wsgi server instance.
|
||||
|
||||
from distutils.util import strtobool
|
||||
from json.decoder import JSONDecodeError
|
||||
|
||||
import eventlet
|
||||
import eventlet.wsgi
|
||||
import getopt
|
||||
import os
|
||||
import signal
|
||||
import socket
|
||||
import sys
|
||||
|
||||
from . import store, changedetection_app, content_fetcher
|
||||
from . import __version__
|
||||
|
||||
# Only global so we can access it in the signal handler
|
||||
app = None
|
||||
datastore = None
|
||||
|
||||
def sigterm_handler(_signo, _stack_frame):
|
||||
global app
|
||||
global datastore
|
||||
# app.config.exit.set()
|
||||
print('Shutdown: Got SIGTERM, DB saved to disk')
|
||||
datastore.sync_to_json()
|
||||
# raise SystemExit
|
||||
|
||||
def main():
|
||||
global datastore
|
||||
global app
|
||||
|
||||
datastore_path = None
|
||||
do_cleanup = False
|
||||
host = ''
|
||||
ipv6_enabled = False
|
||||
port = os.environ.get('PORT') or 5000
|
||||
ssl_mode = False
|
||||
|
||||
# On Windows, create and use a default path.
|
||||
if os.name == 'nt':
|
||||
datastore_path = os.path.expandvars(r'%APPDATA%\changedetection.io')
|
||||
os.makedirs(datastore_path, exist_ok=True)
|
||||
else:
|
||||
# Must be absolute so that send_from_directory doesnt try to make it relative to backend/
|
||||
datastore_path = os.path.join(os.getcwd(), "../datastore")
|
||||
|
||||
try:
|
||||
opts, args = getopt.getopt(sys.argv[1:], "6Ccsd:h:p:", "port")
|
||||
except getopt.GetoptError:
|
||||
print('backend.py -s SSL enable -h [host] -p [port] -d [datastore path]')
|
||||
sys.exit(2)
|
||||
|
||||
create_datastore_dir = False
|
||||
|
||||
for opt, arg in opts:
|
||||
if opt == '-s':
|
||||
ssl_mode = True
|
||||
|
||||
if opt == '-h':
|
||||
host = arg
|
||||
|
||||
if opt == '-p':
|
||||
port = int(arg)
|
||||
|
||||
if opt == '-d':
|
||||
datastore_path = arg
|
||||
|
||||
if opt == '-6':
|
||||
print ("Enabling IPv6 listen support")
|
||||
ipv6_enabled = True
|
||||
|
||||
# Cleanup (remove text files that arent in the index)
|
||||
if opt == '-c':
|
||||
do_cleanup = True
|
||||
|
||||
# Create the datadir if it doesnt exist
|
||||
if opt == '-C':
|
||||
create_datastore_dir = True
|
||||
|
||||
# isnt there some @thingy to attach to each route to tell it, that this route needs a datastore
|
||||
app_config = {'datastore_path': datastore_path}
|
||||
|
||||
if not os.path.isdir(app_config['datastore_path']):
|
||||
if create_datastore_dir:
|
||||
os.mkdir(app_config['datastore_path'])
|
||||
else:
|
||||
print(
|
||||
"ERROR: Directory path for the datastore '{}' does not exist, cannot start, please make sure the directory exists or specify a directory with the -d option.\n"
|
||||
"Or use the -C parameter to create the directory.".format(app_config['datastore_path']), file=sys.stderr)
|
||||
sys.exit(2)
|
||||
|
||||
try:
|
||||
datastore = store.ChangeDetectionStore(datastore_path=app_config['datastore_path'], version_tag=__version__)
|
||||
except JSONDecodeError as e:
|
||||
# Dont' start if the JSON DB looks corrupt
|
||||
print ("ERROR: JSON DB or Proxy List JSON at '{}' appears to be corrupt, aborting".format(app_config['datastore_path']))
|
||||
print(str(e))
|
||||
return
|
||||
|
||||
app = changedetection_app(app_config, datastore)
|
||||
|
||||
signal.signal(signal.SIGTERM, sigterm_handler)
|
||||
|
||||
# Go into cleanup mode
|
||||
if do_cleanup:
|
||||
datastore.remove_unused_snapshots()
|
||||
|
||||
app.config['datastore_path'] = datastore_path
|
||||
|
||||
|
||||
@app.context_processor
|
||||
def inject_version():
|
||||
return dict(right_sticky="v{}".format(datastore.data['version_tag']),
|
||||
new_version_available=app.config['NEW_VERSION_AVAILABLE'],
|
||||
has_password=datastore.data['settings']['application']['password'] != False
|
||||
)
|
||||
|
||||
# Monitored websites will not receive a Referer header when a user clicks on an outgoing link.
|
||||
# @Note: Incompatible with password login (and maybe other features) for now, submit a PR!
|
||||
@app.after_request
|
||||
def hide_referrer(response):
|
||||
if strtobool(os.getenv("HIDE_REFERER", 'false')):
|
||||
response.headers["Referrer-Policy"] = "no-referrer"
|
||||
|
||||
return response
|
||||
|
||||
# Proxy sub-directory support
|
||||
# Set environment var USE_X_SETTINGS=1 on this script
|
||||
# And then in your proxy_pass settings
|
||||
#
|
||||
# proxy_set_header Host "localhost";
|
||||
# proxy_set_header X-Forwarded-Prefix /app;
|
||||
|
||||
if os.getenv('USE_X_SETTINGS'):
|
||||
print ("USE_X_SETTINGS is ENABLED\n")
|
||||
from werkzeug.middleware.proxy_fix import ProxyFix
|
||||
app.wsgi_app = ProxyFix(app.wsgi_app, x_prefix=1, x_host=1)
|
||||
|
||||
s_type = socket.AF_INET6 if ipv6_enabled else socket.AF_INET
|
||||
|
||||
if ssl_mode:
|
||||
# @todo finalise SSL config, but this should get you in the right direction if you need it.
|
||||
eventlet.wsgi.server(eventlet.wrap_ssl(eventlet.listen((host, port), s_type),
|
||||
certfile='cert.pem',
|
||||
keyfile='privkey.pem',
|
||||
server_side=True), app)
|
||||
|
||||
else:
|
||||
eventlet.wsgi.server(eventlet.listen((host, int(port)), s_type), app)
|
||||
|
||||
@@ -1,749 +0,0 @@
|
||||
import hashlib
|
||||
from abc import abstractmethod
|
||||
import chardet
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import requests
|
||||
import sys
|
||||
import time
|
||||
|
||||
visualselector_xpath_selectors = 'div,span,form,table,tbody,tr,td,a,p,ul,li,h1,h2,h3,h4, header, footer, section, article, aside, details, main, nav, section, summary'
|
||||
|
||||
|
||||
class Non200ErrorCodeReceived(Exception):
|
||||
def __init__(self, status_code, url, screenshot=None, xpath_data=None, page_html=None):
|
||||
# Set this so we can use it in other parts of the app
|
||||
self.status_code = status_code
|
||||
self.url = url
|
||||
self.screenshot = screenshot
|
||||
self.xpath_data = xpath_data
|
||||
self.page_text = None
|
||||
|
||||
if page_html:
|
||||
from changedetectionio import html_tools
|
||||
self.page_text = html_tools.html_to_text(page_html)
|
||||
return
|
||||
|
||||
|
||||
class checksumFromPreviousCheckWasTheSame(Exception):
|
||||
def __init__(self):
|
||||
return
|
||||
|
||||
|
||||
class JSActionExceptions(Exception):
|
||||
def __init__(self, status_code, url, screenshot, message=''):
|
||||
self.status_code = status_code
|
||||
self.url = url
|
||||
self.screenshot = screenshot
|
||||
self.message = message
|
||||
return
|
||||
|
||||
|
||||
class BrowserStepsStepTimout(Exception):
|
||||
def __init__(self, step_n):
|
||||
self.step_n = step_n
|
||||
return
|
||||
|
||||
|
||||
class PageUnloadable(Exception):
|
||||
def __init__(self, status_code, url, message, screenshot=False):
|
||||
# Set this so we can use it in other parts of the app
|
||||
self.status_code = status_code
|
||||
self.url = url
|
||||
self.screenshot = screenshot
|
||||
self.message = message
|
||||
return
|
||||
|
||||
|
||||
class EmptyReply(Exception):
|
||||
def __init__(self, status_code, url, screenshot=None):
|
||||
# Set this so we can use it in other parts of the app
|
||||
self.status_code = status_code
|
||||
self.url = url
|
||||
self.screenshot = screenshot
|
||||
return
|
||||
|
||||
|
||||
class ScreenshotUnavailable(Exception):
|
||||
def __init__(self, status_code, url, page_html=None):
|
||||
# Set this so we can use it in other parts of the app
|
||||
self.status_code = status_code
|
||||
self.url = url
|
||||
if page_html:
|
||||
from html_tools import html_to_text
|
||||
self.page_text = html_to_text(page_html)
|
||||
return
|
||||
|
||||
|
||||
class ReplyWithContentButNoText(Exception):
|
||||
def __init__(self, status_code, url, screenshot=None, has_filters=False, html_content=''):
|
||||
# Set this so we can use it in other parts of the app
|
||||
self.status_code = status_code
|
||||
self.url = url
|
||||
self.screenshot = screenshot
|
||||
self.has_filters = has_filters
|
||||
self.html_content = html_content
|
||||
return
|
||||
|
||||
|
||||
class Fetcher():
|
||||
browser_steps = None
|
||||
browser_steps_screenshot_path = None
|
||||
content = None
|
||||
error = None
|
||||
fetcher_description = "No description"
|
||||
headers = {}
|
||||
status_code = None
|
||||
webdriver_js_execute_code = None
|
||||
xpath_data = None
|
||||
xpath_element_js = ""
|
||||
instock_data = None
|
||||
instock_data_js = ""
|
||||
|
||||
# Will be needed in the future by the VisualSelector, always get this where possible.
|
||||
screenshot = False
|
||||
system_http_proxy = os.getenv('HTTP_PROXY')
|
||||
system_https_proxy = os.getenv('HTTPS_PROXY')
|
||||
|
||||
# Time ONTOP of the system defined env minimum time
|
||||
render_extract_delay = 0
|
||||
|
||||
def __init__(self):
|
||||
from pkg_resources import resource_string
|
||||
# The code that scrapes elements and makes a list of elements/size/position to click on in the VisualSelector
|
||||
self.xpath_element_js = resource_string(__name__, "res/xpath_element_scraper.js").decode('utf-8')
|
||||
self.instock_data_js = resource_string(__name__, "res/stock-not-in-stock.js").decode('utf-8')
|
||||
|
||||
@abstractmethod
|
||||
def get_error(self):
|
||||
return self.error
|
||||
|
||||
@abstractmethod
|
||||
def run(self,
|
||||
url,
|
||||
timeout,
|
||||
request_headers,
|
||||
request_body,
|
||||
request_method,
|
||||
ignore_status_codes=False,
|
||||
current_include_filters=None,
|
||||
is_binary=False):
|
||||
# Should set self.error, self.status_code and self.content
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def quit(self):
|
||||
return
|
||||
|
||||
@abstractmethod
|
||||
def get_last_status_code(self):
|
||||
return self.status_code
|
||||
|
||||
@abstractmethod
|
||||
def screenshot_step(self, step_n):
|
||||
return None
|
||||
|
||||
@abstractmethod
|
||||
# Return true/false if this checker is ready to run, in the case it needs todo some special config check etc
|
||||
def is_ready(self):
|
||||
return True
|
||||
|
||||
def get_all_headers(self):
|
||||
"""
|
||||
Get all headers but ensure all keys are lowercase
|
||||
:return:
|
||||
"""
|
||||
return {k.lower(): v for k, v in self.headers.items()}
|
||||
|
||||
def iterate_browser_steps(self):
|
||||
from changedetectionio.blueprint.browser_steps.browser_steps import steppable_browser_interface
|
||||
from playwright._impl._api_types import TimeoutError
|
||||
from jinja2 import Environment
|
||||
jinja2_env = Environment(extensions=['jinja2_time.TimeExtension'])
|
||||
|
||||
step_n = 0
|
||||
|
||||
if self.browser_steps is not None and len(self.browser_steps):
|
||||
interface = steppable_browser_interface()
|
||||
interface.page = self.page
|
||||
|
||||
valid_steps = filter(
|
||||
lambda s: (s['operation'] and len(s['operation']) and s['operation'] != 'Choose one' and s['operation'] != 'Goto site'),
|
||||
self.browser_steps)
|
||||
|
||||
for step in valid_steps:
|
||||
step_n += 1
|
||||
print(">> Iterating check - browser Step n {} - {}...".format(step_n, step['operation']))
|
||||
self.screenshot_step("before-" + str(step_n))
|
||||
self.save_step_html("before-" + str(step_n))
|
||||
try:
|
||||
optional_value = step['optional_value']
|
||||
selector = step['selector']
|
||||
# Support for jinja2 template in step values, with date module added
|
||||
if '{%' in step['optional_value'] or '{{' in step['optional_value']:
|
||||
optional_value = str(jinja2_env.from_string(step['optional_value']).render())
|
||||
if '{%' in step['selector'] or '{{' in step['selector']:
|
||||
selector = str(jinja2_env.from_string(step['selector']).render())
|
||||
|
||||
getattr(interface, "call_action")(action_name=step['operation'],
|
||||
selector=selector,
|
||||
optional_value=optional_value)
|
||||
self.screenshot_step(step_n)
|
||||
self.save_step_html(step_n)
|
||||
except TimeoutError as e:
|
||||
print(str(e))
|
||||
# Stop processing here
|
||||
raise BrowserStepsStepTimout(step_n=step_n)
|
||||
|
||||
# It's always good to reset these
|
||||
def delete_browser_steps_screenshots(self):
|
||||
import glob
|
||||
if self.browser_steps_screenshot_path is not None:
|
||||
dest = os.path.join(self.browser_steps_screenshot_path, 'step_*.jpeg')
|
||||
files = glob.glob(dest)
|
||||
for f in files:
|
||||
if os.path.isfile(f):
|
||||
os.unlink(f)
|
||||
|
||||
|
||||
# Maybe for the future, each fetcher provides its own diff output, could be used for text, image
|
||||
# the current one would return javascript output (as we use JS to generate the diff)
|
||||
#
|
||||
def available_fetchers():
|
||||
# See the if statement at the bottom of this file for how we switch between playwright and webdriver
|
||||
import inspect
|
||||
p = []
|
||||
for name, obj in inspect.getmembers(sys.modules[__name__], inspect.isclass):
|
||||
if inspect.isclass(obj):
|
||||
# @todo html_ is maybe better as fetcher_ or something
|
||||
# In this case, make sure to edit the default one in store.py and fetch_site_status.py
|
||||
if name.startswith('html_'):
|
||||
t = tuple([name, obj.fetcher_description])
|
||||
p.append(t)
|
||||
|
||||
return p
|
||||
|
||||
|
||||
class base_html_playwright(Fetcher):
|
||||
fetcher_description = "Playwright {}/Javascript".format(
|
||||
os.getenv("PLAYWRIGHT_BROWSER_TYPE", 'chromium').capitalize()
|
||||
)
|
||||
if os.getenv("PLAYWRIGHT_DRIVER_URL"):
|
||||
fetcher_description += " via '{}'".format(os.getenv("PLAYWRIGHT_DRIVER_URL"))
|
||||
|
||||
browser_type = ''
|
||||
command_executor = ''
|
||||
|
||||
# Configs for Proxy setup
|
||||
# In the ENV vars, is prefixed with "playwright_proxy_", so it is for example "playwright_proxy_server"
|
||||
playwright_proxy_settings_mappings = ['bypass', 'server', 'username', 'password']
|
||||
|
||||
proxy = None
|
||||
|
||||
def __init__(self, proxy_override=None):
|
||||
super().__init__()
|
||||
# .strip('"') is going to save someone a lot of time when they accidently wrap the env value
|
||||
self.browser_type = os.getenv("PLAYWRIGHT_BROWSER_TYPE", 'chromium').strip('"')
|
||||
self.command_executor = os.getenv(
|
||||
"PLAYWRIGHT_DRIVER_URL",
|
||||
'ws://playwright-chrome:3000'
|
||||
).strip('"')
|
||||
|
||||
# If any proxy settings are enabled, then we should setup the proxy object
|
||||
proxy_args = {}
|
||||
for k in self.playwright_proxy_settings_mappings:
|
||||
v = os.getenv('playwright_proxy_' + k, False)
|
||||
if v:
|
||||
proxy_args[k] = v.strip('"')
|
||||
|
||||
if proxy_args:
|
||||
self.proxy = proxy_args
|
||||
|
||||
# allow per-watch proxy selection override
|
||||
if proxy_override:
|
||||
self.proxy = {'server': proxy_override}
|
||||
|
||||
if self.proxy:
|
||||
# Playwright needs separate username and password values
|
||||
from urllib.parse import urlparse
|
||||
parsed = urlparse(self.proxy.get('server'))
|
||||
if parsed.username:
|
||||
self.proxy['username'] = parsed.username
|
||||
self.proxy['password'] = parsed.password
|
||||
|
||||
def screenshot_step(self, step_n=''):
|
||||
screenshot = self.page.screenshot(type='jpeg', full_page=True, quality=85)
|
||||
|
||||
if self.browser_steps_screenshot_path is not None:
|
||||
destination = os.path.join(self.browser_steps_screenshot_path, 'step_{}.jpeg'.format(step_n))
|
||||
logging.debug("Saving step screenshot to {}".format(destination))
|
||||
with open(destination, 'wb') as f:
|
||||
f.write(screenshot)
|
||||
|
||||
def save_step_html(self, step_n):
|
||||
content = self.page.content()
|
||||
destination = os.path.join(self.browser_steps_screenshot_path, 'step_{}.html'.format(step_n))
|
||||
logging.debug("Saving step HTML to {}".format(destination))
|
||||
with open(destination, 'w') as f:
|
||||
f.write(content)
|
||||
|
||||
def run_fetch_browserless_puppeteer(self,
|
||||
url,
|
||||
timeout,
|
||||
request_headers,
|
||||
request_body,
|
||||
request_method,
|
||||
ignore_status_codes=False,
|
||||
current_include_filters=None,
|
||||
is_binary=False):
|
||||
|
||||
from pkg_resources import resource_string
|
||||
|
||||
extra_wait_ms = (int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)) + self.render_extract_delay) * 1000
|
||||
|
||||
self.xpath_element_js = self.xpath_element_js.replace('%ELEMENTS%', visualselector_xpath_selectors)
|
||||
code = resource_string(__name__, "res/puppeteer_fetch.js").decode('utf-8')
|
||||
# In the future inject this is a proper JS package
|
||||
code = code.replace('%xpath_scrape_code%', self.xpath_element_js)
|
||||
code = code.replace('%instock_scrape_code%', self.instock_data_js)
|
||||
|
||||
from requests.exceptions import ConnectTimeout, ReadTimeout
|
||||
wait_browserless_seconds = 240
|
||||
|
||||
browserless_function_url = os.getenv('BROWSERLESS_FUNCTION_URL')
|
||||
from urllib.parse import urlparse
|
||||
if not browserless_function_url:
|
||||
# Convert/try to guess from PLAYWRIGHT_DRIVER_URL
|
||||
o = urlparse(os.getenv('PLAYWRIGHT_DRIVER_URL'))
|
||||
browserless_function_url = o._replace(scheme="http")._replace(path="function").geturl()
|
||||
|
||||
|
||||
# Append proxy connect string
|
||||
if self.proxy:
|
||||
import urllib.parse
|
||||
# Remove username/password if it exists in the URL or you will receive "ERR_NO_SUPPORTED_PROXIES" error
|
||||
# Actual authentication handled by Puppeteer/node
|
||||
o = urlparse(self.proxy.get('server'))
|
||||
proxy_url = urllib.parse.quote(o._replace(netloc="{}:{}".format(o.hostname, o.port)).geturl())
|
||||
browserless_function_url = f"{browserless_function_url}&--proxy-server={proxy_url}&dumpio=true"
|
||||
|
||||
|
||||
try:
|
||||
amp = '&' if '?' in browserless_function_url else '?'
|
||||
response = requests.request(
|
||||
method="POST",
|
||||
json={
|
||||
"code": code,
|
||||
"context": {
|
||||
# Very primitive disk cache - USE WITH EXTREME CAUTION
|
||||
# Run browserless container with -e "FUNCTION_BUILT_INS=[\"fs\",\"crypto\"]"
|
||||
'disk_cache_dir': os.getenv("PUPPETEER_DISK_CACHE", False), # or path to disk cache ending in /, ie /tmp/cache/
|
||||
'execute_js': self.webdriver_js_execute_code,
|
||||
'extra_wait_ms': extra_wait_ms,
|
||||
'include_filters': current_include_filters,
|
||||
'req_headers': request_headers,
|
||||
'screenshot_quality': int(os.getenv("PLAYWRIGHT_SCREENSHOT_QUALITY", 72)),
|
||||
'url': url,
|
||||
'user_agent': {k.lower(): v for k, v in request_headers.items()}.get('user-agent', None),
|
||||
'proxy_username': self.proxy.get('username', '') if self.proxy else False,
|
||||
'proxy_password': self.proxy.get('password', '') if self.proxy else False,
|
||||
'no_cache_list': [
|
||||
'twitter',
|
||||
'.pdf'
|
||||
],
|
||||
# Could use https://github.com/easylist/easylist here, or install a plugin
|
||||
'block_url_list': [
|
||||
'adnxs.com',
|
||||
'analytics.twitter.com',
|
||||
'doubleclick.net',
|
||||
'google-analytics.com',
|
||||
'googletagmanager',
|
||||
'trustpilot.com'
|
||||
]
|
||||
}
|
||||
},
|
||||
# @todo /function needs adding ws:// to http:// rebuild this
|
||||
url=browserless_function_url+f"{amp}--disable-features=AudioServiceOutOfProcess&dumpio=true&--disable-remote-fonts",
|
||||
timeout=wait_browserless_seconds)
|
||||
|
||||
except ReadTimeout:
|
||||
raise PageUnloadable(url=url, status_code=None, message=f"No response from browserless in {wait_browserless_seconds}s")
|
||||
except ConnectTimeout:
|
||||
raise PageUnloadable(url=url, status_code=None, message=f"Timed out connecting to browserless, retrying..")
|
||||
else:
|
||||
# 200 Here means that the communication to browserless worked only, not the page state
|
||||
if response.status_code == 200:
|
||||
import base64
|
||||
|
||||
x = response.json()
|
||||
if not x.get('screenshot'):
|
||||
# https://github.com/puppeteer/puppeteer/blob/v1.0.0/docs/troubleshooting.md#tips
|
||||
# https://github.com/puppeteer/puppeteer/issues/1834
|
||||
# https://github.com/puppeteer/puppeteer/issues/1834#issuecomment-381047051
|
||||
# Check your memory is shared and big enough
|
||||
raise ScreenshotUnavailable(url=url, status_code=None)
|
||||
|
||||
if not x.get('content', '').strip():
|
||||
raise EmptyReply(url=url, status_code=None)
|
||||
|
||||
if x.get('status_code', 200) != 200 and not ignore_status_codes:
|
||||
raise Non200ErrorCodeReceived(url=url, status_code=x.get('status_code', 200), page_html=x['content'])
|
||||
|
||||
self.content = x.get('content')
|
||||
self.headers = x.get('headers')
|
||||
self.instock_data = x.get('instock_data')
|
||||
self.screenshot = base64.b64decode(x.get('screenshot'))
|
||||
self.status_code = x.get('status_code')
|
||||
self.xpath_data = x.get('xpath_data')
|
||||
|
||||
else:
|
||||
# Some other error from browserless
|
||||
raise PageUnloadable(url=url, status_code=None, message=response.content.decode('utf-8'))
|
||||
|
||||
def run(self,
|
||||
url,
|
||||
timeout,
|
||||
request_headers,
|
||||
request_body,
|
||||
request_method,
|
||||
ignore_status_codes=False,
|
||||
current_include_filters=None,
|
||||
is_binary=False):
|
||||
|
||||
# For now, USE_EXPERIMENTAL_PUPPETEER_FETCH is not supported by watches with BrowserSteps (for now!)
|
||||
has_browser_steps = self.browser_steps and list(filter(
|
||||
lambda s: (s['operation'] and len(s['operation']) and s['operation'] != 'Choose one' and s['operation'] != 'Goto site'),
|
||||
self.browser_steps))
|
||||
|
||||
if not has_browser_steps:
|
||||
if os.getenv('USE_EXPERIMENTAL_PUPPETEER_FETCH'):
|
||||
# Temporary backup solution until we rewrite the playwright code
|
||||
return self.run_fetch_browserless_puppeteer(
|
||||
url,
|
||||
timeout,
|
||||
request_headers,
|
||||
request_body,
|
||||
request_method,
|
||||
ignore_status_codes,
|
||||
current_include_filters,
|
||||
is_binary)
|
||||
|
||||
from playwright.sync_api import sync_playwright
|
||||
import playwright._impl._api_types
|
||||
|
||||
self.delete_browser_steps_screenshots()
|
||||
response = None
|
||||
with sync_playwright() as p:
|
||||
browser_type = getattr(p, self.browser_type)
|
||||
|
||||
# Seemed to cause a connection Exception even tho I can see it connect
|
||||
# self.browser = browser_type.connect(self.command_executor, timeout=timeout*1000)
|
||||
# 60,000 connection timeout only
|
||||
browser = browser_type.connect_over_cdp(self.command_executor, timeout=60000)
|
||||
|
||||
# Set user agent to prevent Cloudflare from blocking the browser
|
||||
# Use the default one configured in the App.py model that's passed from fetch_site_status.py
|
||||
context = browser.new_context(
|
||||
user_agent={k.lower(): v for k, v in request_headers.items()}.get('user-agent', None),
|
||||
proxy=self.proxy,
|
||||
# This is needed to enable JavaScript execution on GitHub and others
|
||||
bypass_csp=True,
|
||||
# Should be `allow` or `block` - sites like YouTube can transmit large amounts of data via Service Workers
|
||||
service_workers=os.getenv('PLAYWRIGHT_SERVICE_WORKERS', 'allow'),
|
||||
# Should never be needed
|
||||
accept_downloads=False
|
||||
)
|
||||
|
||||
self.page = context.new_page()
|
||||
if len(request_headers):
|
||||
context.set_extra_http_headers(request_headers)
|
||||
|
||||
self.page.set_default_navigation_timeout(90000)
|
||||
self.page.set_default_timeout(90000)
|
||||
|
||||
# Listen for all console events and handle errors
|
||||
self.page.on("console", lambda msg: print(f"Playwright console: Watch URL: {url} {msg.type}: {msg.text} {msg.args}"))
|
||||
|
||||
# Goto page
|
||||
try:
|
||||
# Wait_until = commit
|
||||
# - `'commit'` - consider operation to be finished when network response is received and the document started loading.
|
||||
# Better to not use any smarts from Playwright and just wait an arbitrary number of seconds
|
||||
# This seemed to solve nearly all 'TimeoutErrors'
|
||||
response = self.page.goto(url, wait_until='commit')
|
||||
except playwright._impl._api_types.Error as e:
|
||||
# Retry once - https://github.com/browserless/chrome/issues/2485
|
||||
# Sometimes errors related to invalid cert's and other can be random
|
||||
print("Content Fetcher > retrying request got error - ", str(e))
|
||||
time.sleep(1)
|
||||
response = self.page.goto(url, wait_until='commit')
|
||||
|
||||
except Exception as e:
|
||||
print("Content Fetcher > Other exception when page.goto", str(e))
|
||||
context.close()
|
||||
browser.close()
|
||||
raise PageUnloadable(url=url, status_code=None, message=str(e))
|
||||
|
||||
# Execute any browser steps
|
||||
try:
|
||||
extra_wait = int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)) + self.render_extract_delay
|
||||
self.page.wait_for_timeout(extra_wait * 1000)
|
||||
|
||||
if self.webdriver_js_execute_code is not None and len(self.webdriver_js_execute_code):
|
||||
self.page.evaluate(self.webdriver_js_execute_code)
|
||||
|
||||
except playwright._impl._api_types.TimeoutError as e:
|
||||
context.close()
|
||||
browser.close()
|
||||
# This can be ok, we will try to grab what we could retrieve
|
||||
pass
|
||||
except Exception as e:
|
||||
print("Content Fetcher > Other exception when executing custom JS code", str(e))
|
||||
context.close()
|
||||
browser.close()
|
||||
raise PageUnloadable(url=url, status_code=None, message=str(e))
|
||||
|
||||
if response is None:
|
||||
context.close()
|
||||
browser.close()
|
||||
print("Content Fetcher > Response object was none")
|
||||
raise EmptyReply(url=url, status_code=None)
|
||||
|
||||
# Run Browser Steps here
|
||||
self.iterate_browser_steps()
|
||||
|
||||
extra_wait = int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)) + self.render_extract_delay
|
||||
time.sleep(extra_wait)
|
||||
|
||||
self.content = self.page.content()
|
||||
self.status_code = response.status
|
||||
if len(self.page.content().strip()) == 0:
|
||||
context.close()
|
||||
browser.close()
|
||||
print("Content Fetcher > Content was empty")
|
||||
raise EmptyReply(url=url, status_code=response.status)
|
||||
|
||||
self.status_code = response.status
|
||||
self.headers = response.all_headers()
|
||||
|
||||
# So we can find an element on the page where its selector was entered manually (maybe not xPath etc)
|
||||
if current_include_filters is not None:
|
||||
self.page.evaluate("var include_filters={}".format(json.dumps(current_include_filters)))
|
||||
else:
|
||||
self.page.evaluate("var include_filters=''")
|
||||
|
||||
self.xpath_data = self.page.evaluate(
|
||||
"async () => {" + self.xpath_element_js.replace('%ELEMENTS%', visualselector_xpath_selectors) + "}")
|
||||
self.instock_data = self.page.evaluate("async () => {" + self.instock_data_js + "}")
|
||||
|
||||
# Bug 3 in Playwright screenshot handling
|
||||
# Some bug where it gives the wrong screenshot size, but making a request with the clip set first seems to solve it
|
||||
# JPEG is better here because the screenshots can be very very large
|
||||
|
||||
# Screenshots also travel via the ws:// (websocket) meaning that the binary data is base64 encoded
|
||||
# which will significantly increase the IO size between the server and client, it's recommended to use the lowest
|
||||
# acceptable screenshot quality here
|
||||
try:
|
||||
# The actual screenshot
|
||||
self.screenshot = self.page.screenshot(type='jpeg', full_page=True,
|
||||
quality=int(os.getenv("PLAYWRIGHT_SCREENSHOT_QUALITY", 72)))
|
||||
except Exception as e:
|
||||
context.close()
|
||||
browser.close()
|
||||
raise ScreenshotUnavailable(url=url, status_code=None)
|
||||
|
||||
context.close()
|
||||
browser.close()
|
||||
|
||||
|
||||
class base_html_webdriver(Fetcher):
|
||||
if os.getenv("WEBDRIVER_URL"):
|
||||
fetcher_description = "WebDriver Chrome/Javascript via '{}'".format(os.getenv("WEBDRIVER_URL"))
|
||||
else:
|
||||
fetcher_description = "WebDriver Chrome/Javascript"
|
||||
|
||||
command_executor = ''
|
||||
|
||||
# Configs for Proxy setup
|
||||
# In the ENV vars, is prefixed with "webdriver_", so it is for example "webdriver_sslProxy"
|
||||
selenium_proxy_settings_mappings = ['proxyType', 'ftpProxy', 'httpProxy', 'noProxy',
|
||||
'proxyAutoconfigUrl', 'sslProxy', 'autodetect',
|
||||
'socksProxy', 'socksVersion', 'socksUsername', 'socksPassword']
|
||||
proxy = None
|
||||
|
||||
def __init__(self, proxy_override=None):
|
||||
super().__init__()
|
||||
from selenium.webdriver.common.proxy import Proxy as SeleniumProxy
|
||||
|
||||
# .strip('"') is going to save someone a lot of time when they accidently wrap the env value
|
||||
self.command_executor = os.getenv("WEBDRIVER_URL", 'http://browser-chrome:4444/wd/hub').strip('"')
|
||||
|
||||
# If any proxy settings are enabled, then we should setup the proxy object
|
||||
proxy_args = {}
|
||||
for k in self.selenium_proxy_settings_mappings:
|
||||
v = os.getenv('webdriver_' + k, False)
|
||||
if v:
|
||||
proxy_args[k] = v.strip('"')
|
||||
|
||||
# Map back standard HTTP_ and HTTPS_PROXY to webDriver httpProxy/sslProxy
|
||||
if not proxy_args.get('webdriver_httpProxy') and self.system_http_proxy:
|
||||
proxy_args['httpProxy'] = self.system_http_proxy
|
||||
if not proxy_args.get('webdriver_sslProxy') and self.system_https_proxy:
|
||||
proxy_args['httpsProxy'] = self.system_https_proxy
|
||||
|
||||
# Allows override the proxy on a per-request basis
|
||||
if proxy_override is not None:
|
||||
proxy_args['httpProxy'] = proxy_override
|
||||
|
||||
if proxy_args:
|
||||
self.proxy = SeleniumProxy(raw=proxy_args)
|
||||
|
||||
def run(self,
|
||||
url,
|
||||
timeout,
|
||||
request_headers,
|
||||
request_body,
|
||||
request_method,
|
||||
ignore_status_codes=False,
|
||||
current_include_filters=None,
|
||||
is_binary=False):
|
||||
|
||||
from selenium import webdriver
|
||||
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
|
||||
from selenium.common.exceptions import WebDriverException
|
||||
# request_body, request_method unused for now, until some magic in the future happens.
|
||||
|
||||
# check env for WEBDRIVER_URL
|
||||
self.driver = webdriver.Remote(
|
||||
command_executor=self.command_executor,
|
||||
desired_capabilities=DesiredCapabilities.CHROME,
|
||||
proxy=self.proxy)
|
||||
|
||||
try:
|
||||
self.driver.get(url)
|
||||
except WebDriverException as e:
|
||||
# Be sure we close the session window
|
||||
self.quit()
|
||||
raise
|
||||
|
||||
self.driver.set_window_size(1280, 1024)
|
||||
self.driver.implicitly_wait(int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)))
|
||||
|
||||
if self.webdriver_js_execute_code is not None:
|
||||
self.driver.execute_script(self.webdriver_js_execute_code)
|
||||
# Selenium doesn't automatically wait for actions as good as Playwright, so wait again
|
||||
self.driver.implicitly_wait(int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)))
|
||||
|
||||
# @todo - how to check this? is it possible?
|
||||
self.status_code = 200
|
||||
# @todo somehow we should try to get this working for WebDriver
|
||||
# raise EmptyReply(url=url, status_code=r.status_code)
|
||||
|
||||
# @todo - dom wait loaded?
|
||||
time.sleep(int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)) + self.render_extract_delay)
|
||||
self.content = self.driver.page_source
|
||||
self.headers = {}
|
||||
|
||||
self.screenshot = self.driver.get_screenshot_as_png()
|
||||
|
||||
# Does the connection to the webdriver work? run a test connection.
|
||||
def is_ready(self):
|
||||
from selenium import webdriver
|
||||
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
|
||||
|
||||
self.driver = webdriver.Remote(
|
||||
command_executor=self.command_executor,
|
||||
desired_capabilities=DesiredCapabilities.CHROME)
|
||||
|
||||
# driver.quit() seems to cause better exceptions
|
||||
self.quit()
|
||||
return True
|
||||
|
||||
def quit(self):
|
||||
if self.driver:
|
||||
try:
|
||||
self.driver.quit()
|
||||
except Exception as e:
|
||||
print("Content Fetcher > Exception in chrome shutdown/quit" + str(e))
|
||||
|
||||
|
||||
# "html_requests" is listed as the default fetcher in store.py!
|
||||
class html_requests(Fetcher):
|
||||
fetcher_description = "Basic fast Plaintext/HTTP Client"
|
||||
|
||||
def __init__(self, proxy_override=None):
|
||||
self.proxy_override = proxy_override
|
||||
|
||||
def run(self,
|
||||
url,
|
||||
timeout,
|
||||
request_headers,
|
||||
request_body,
|
||||
request_method,
|
||||
ignore_status_codes=False,
|
||||
current_include_filters=None,
|
||||
is_binary=False):
|
||||
|
||||
# Make requests use a more modern looking user-agent
|
||||
if not {k.lower(): v for k, v in request_headers.items()}.get('user-agent', None):
|
||||
request_headers['User-Agent'] = os.getenv("DEFAULT_SETTINGS_HEADERS_USERAGENT",
|
||||
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36')
|
||||
|
||||
proxies = {}
|
||||
|
||||
# Allows override the proxy on a per-request basis
|
||||
if self.proxy_override:
|
||||
proxies = {'http': self.proxy_override, 'https': self.proxy_override, 'ftp': self.proxy_override}
|
||||
else:
|
||||
if self.system_http_proxy:
|
||||
proxies['http'] = self.system_http_proxy
|
||||
if self.system_https_proxy:
|
||||
proxies['https'] = self.system_https_proxy
|
||||
|
||||
r = requests.request(method=request_method,
|
||||
data=request_body,
|
||||
url=url,
|
||||
headers=request_headers,
|
||||
timeout=timeout,
|
||||
proxies=proxies,
|
||||
verify=False)
|
||||
|
||||
# If the response did not tell us what encoding format to expect, Then use chardet to override what `requests` thinks.
|
||||
# For example - some sites don't tell us it's utf-8, but return utf-8 content
|
||||
# This seems to not occur when using webdriver/selenium, it seems to detect the text encoding more reliably.
|
||||
# https://github.com/psf/requests/issues/1604 good info about requests encoding detection
|
||||
if not is_binary:
|
||||
# Don't run this for PDF (and requests identified as binary) takes a _long_ time
|
||||
if not r.headers.get('content-type') or not 'charset=' in r.headers.get('content-type'):
|
||||
encoding = chardet.detect(r.content)['encoding']
|
||||
if encoding:
|
||||
r.encoding = encoding
|
||||
|
||||
if not r.content or not len(r.content):
|
||||
raise EmptyReply(url=url, status_code=r.status_code)
|
||||
|
||||
# @todo test this
|
||||
# @todo maybe you really want to test zero-byte return pages?
|
||||
if r.status_code != 200 and not ignore_status_codes:
|
||||
# maybe check with content works?
|
||||
raise Non200ErrorCodeReceived(url=url, status_code=r.status_code, page_html=r.text)
|
||||
|
||||
self.status_code = r.status_code
|
||||
if is_binary:
|
||||
# Binary files just return their checksum until we add something smarter
|
||||
self.content = hashlib.md5(r.content).hexdigest()
|
||||
else:
|
||||
self.content = r.text
|
||||
|
||||
self.headers = r.headers
|
||||
self.raw_content = r.content
|
||||
|
||||
|
||||
# Decide which is the 'real' HTML webdriver, this is more a system wide config
|
||||
# rather than site-specific.
|
||||
use_playwright_as_chrome_fetcher = os.getenv('PLAYWRIGHT_DRIVER_URL', False)
|
||||
if use_playwright_as_chrome_fetcher:
|
||||
html_webdriver = base_html_playwright
|
||||
else:
|
||||
html_webdriver = base_html_webdriver
|
||||
45
changedetectionio/content_fetchers/__init__.py
Normal file
@@ -0,0 +1,45 @@
|
||||
import sys
|
||||
from changedetectionio.strtobool import strtobool
|
||||
from loguru import logger
|
||||
from changedetectionio.content_fetchers.exceptions import BrowserStepsStepException
|
||||
import os
|
||||
|
||||
# Visual Selector scraper - 'Button' is there because some sites have <button>OUT OF STOCK</button>.
|
||||
visualselector_xpath_selectors = 'div,span,form,table,tbody,tr,td,a,p,ul,li,h1,h2,h3,h4,header,footer,section,article,aside,details,main,nav,section,summary,button'
|
||||
|
||||
|
||||
# available_fetchers() will scan this implementation looking for anything starting with html_
|
||||
# this information is used in the form selections
|
||||
from changedetectionio.content_fetchers.requests import fetcher as html_requests
|
||||
|
||||
def available_fetchers():
|
||||
# See the if statement at the bottom of this file for how we switch between playwright and webdriver
|
||||
import inspect
|
||||
p = []
|
||||
for name, obj in inspect.getmembers(sys.modules[__name__], inspect.isclass):
|
||||
if inspect.isclass(obj):
|
||||
# @todo html_ is maybe better as fetcher_ or something
|
||||
# In this case, make sure to edit the default one in store.py and fetch_site_status.py
|
||||
if name.startswith('html_'):
|
||||
t = tuple([name, obj.fetcher_description])
|
||||
p.append(t)
|
||||
|
||||
return p
|
||||
|
||||
|
||||
# Decide which is the 'real' HTML webdriver, this is more a system wide config
|
||||
# rather than site-specific.
|
||||
use_playwright_as_chrome_fetcher = os.getenv('PLAYWRIGHT_DRIVER_URL', False)
|
||||
if use_playwright_as_chrome_fetcher:
|
||||
# @note - For now, browser steps always uses playwright
|
||||
if not strtobool(os.getenv('FAST_PUPPETEER_CHROME_FETCHER', 'False')):
|
||||
logger.debug('Using Playwright library as fetcher')
|
||||
from .playwright import fetcher as html_webdriver
|
||||
else:
|
||||
logger.debug('Using direct Python Puppeteer library as fetcher')
|
||||
from .puppeteer import fetcher as html_webdriver
|
||||
|
||||
else:
|
||||
logger.debug("Falling back to selenium as fetcher")
|
||||
from .webdriver_selenium import fetcher as html_webdriver
|
||||
|
||||
179
changedetectionio/content_fetchers/base.py
Normal file
@@ -0,0 +1,179 @@
|
||||
import os
|
||||
from abc import abstractmethod
|
||||
from loguru import logger
|
||||
|
||||
from changedetectionio.content_fetchers import BrowserStepsStepException
|
||||
|
||||
|
||||
def manage_user_agent(headers, current_ua=''):
|
||||
"""
|
||||
Basic setting of user-agent
|
||||
|
||||
NOTE!!!!!! The service that does the actual Chrome fetching should handle any anti-robot techniques
|
||||
THERE ARE MANY WAYS THAT IT CAN BE DETECTED AS A ROBOT!!
|
||||
This does not take care of
|
||||
- Scraping of 'navigator' (platform, productSub, vendor, oscpu etc etc) browser object (navigator.appVersion) etc
|
||||
- TCP/IP fingerprint JA3 etc
|
||||
- Graphic rendering fingerprinting
|
||||
- Your IP being obviously in a pool of bad actors
|
||||
- Too many requests
|
||||
- Scraping of SCH-UA browser replies (thanks google!!)
|
||||
- Scraping of ServiceWorker, new window calls etc
|
||||
|
||||
See https://filipvitas.medium.com/how-to-set-user-agent-header-with-puppeteer-js-and-not-fail-28c7a02165da
|
||||
Puppeteer requests https://github.com/dgtlmoon/pyppeteerstealth
|
||||
|
||||
:param page:
|
||||
:param headers:
|
||||
:return:
|
||||
"""
|
||||
# Ask it what the user agent is, if its obviously ChromeHeadless, switch it to the default
|
||||
ua_in_custom_headers = headers.get('User-Agent')
|
||||
if ua_in_custom_headers:
|
||||
return ua_in_custom_headers
|
||||
|
||||
if not ua_in_custom_headers and current_ua:
|
||||
current_ua = current_ua.replace('HeadlessChrome', 'Chrome')
|
||||
return current_ua
|
||||
|
||||
return None
|
||||
|
||||
|
||||
class Fetcher():
|
||||
browser_connection_is_custom = None
|
||||
browser_connection_url = None
|
||||
browser_steps = None
|
||||
browser_steps_screenshot_path = None
|
||||
content = None
|
||||
error = None
|
||||
fetcher_description = "No description"
|
||||
headers = {}
|
||||
instock_data = None
|
||||
instock_data_js = ""
|
||||
status_code = None
|
||||
webdriver_js_execute_code = None
|
||||
xpath_data = None
|
||||
xpath_element_js = ""
|
||||
|
||||
# Will be needed in the future by the VisualSelector, always get this where possible.
|
||||
screenshot = False
|
||||
system_http_proxy = os.getenv('HTTP_PROXY')
|
||||
system_https_proxy = os.getenv('HTTPS_PROXY')
|
||||
|
||||
# Time ONTOP of the system defined env minimum time
|
||||
render_extract_delay = 0
|
||||
|
||||
def __init__(self):
|
||||
import importlib.resources
|
||||
self.xpath_element_js = importlib.resources.files("changedetectionio.content_fetchers.res").joinpath('xpath_element_scraper.js').read_text(encoding='utf-8')
|
||||
self.instock_data_js = importlib.resources.files("changedetectionio.content_fetchers.res").joinpath('stock-not-in-stock.js').read_text(encoding='utf-8')
|
||||
|
||||
@abstractmethod
|
||||
def get_error(self):
|
||||
return self.error
|
||||
|
||||
@abstractmethod
|
||||
def run(self,
|
||||
url,
|
||||
timeout,
|
||||
request_headers,
|
||||
request_body,
|
||||
request_method,
|
||||
ignore_status_codes=False,
|
||||
current_include_filters=None,
|
||||
is_binary=False,
|
||||
empty_pages_are_a_change=False):
|
||||
# Should set self.error, self.status_code and self.content
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def quit(self):
|
||||
return
|
||||
|
||||
@abstractmethod
|
||||
def get_last_status_code(self):
|
||||
return self.status_code
|
||||
|
||||
@abstractmethod
|
||||
def screenshot_step(self, step_n):
|
||||
if self.browser_steps_screenshot_path and not os.path.isdir(self.browser_steps_screenshot_path):
|
||||
logger.debug(f"> Creating data dir {self.browser_steps_screenshot_path}")
|
||||
os.mkdir(self.browser_steps_screenshot_path)
|
||||
return None
|
||||
|
||||
@abstractmethod
|
||||
# Return true/false if this checker is ready to run, in the case it needs todo some special config check etc
|
||||
def is_ready(self):
|
||||
return True
|
||||
|
||||
def get_all_headers(self):
|
||||
"""
|
||||
Get all headers but ensure all keys are lowercase
|
||||
:return:
|
||||
"""
|
||||
return {k.lower(): v for k, v in self.headers.items()}
|
||||
|
||||
def browser_steps_get_valid_steps(self):
|
||||
if self.browser_steps is not None and len(self.browser_steps):
|
||||
valid_steps = list(filter(
|
||||
lambda s: (s['operation'] and len(s['operation']) and s['operation'] != 'Choose one'),
|
||||
self.browser_steps))
|
||||
|
||||
# Just incase they selected Goto site by accident with older JS
|
||||
if valid_steps and valid_steps[0]['operation'] == 'Goto site':
|
||||
del(valid_steps[0])
|
||||
|
||||
return valid_steps
|
||||
|
||||
return None
|
||||
|
||||
def iterate_browser_steps(self, start_url=None):
|
||||
from changedetectionio.blueprint.browser_steps.browser_steps import steppable_browser_interface
|
||||
from playwright._impl._errors import TimeoutError, Error
|
||||
from changedetectionio.safe_jinja import render as jinja_render
|
||||
step_n = 0
|
||||
|
||||
if self.browser_steps is not None and len(self.browser_steps):
|
||||
interface = steppable_browser_interface(start_url=start_url)
|
||||
interface.page = self.page
|
||||
valid_steps = self.browser_steps_get_valid_steps()
|
||||
|
||||
for step in valid_steps:
|
||||
step_n += 1
|
||||
logger.debug(f">> Iterating check - browser Step n {step_n} - {step['operation']}...")
|
||||
self.screenshot_step("before-" + str(step_n))
|
||||
self.save_step_html("before-" + str(step_n))
|
||||
try:
|
||||
optional_value = step['optional_value']
|
||||
selector = step['selector']
|
||||
# Support for jinja2 template in step values, with date module added
|
||||
if '{%' in step['optional_value'] or '{{' in step['optional_value']:
|
||||
optional_value = jinja_render(template_str=step['optional_value'])
|
||||
if '{%' in step['selector'] or '{{' in step['selector']:
|
||||
selector = jinja_render(template_str=step['selector'])
|
||||
|
||||
getattr(interface, "call_action")(action_name=step['operation'],
|
||||
selector=selector,
|
||||
optional_value=optional_value)
|
||||
self.screenshot_step(step_n)
|
||||
self.save_step_html(step_n)
|
||||
except (Error, TimeoutError) as e:
|
||||
logger.debug(str(e))
|
||||
# Stop processing here
|
||||
raise BrowserStepsStepException(step_n=step_n, original_e=e)
|
||||
|
||||
# It's always good to reset these
|
||||
def delete_browser_steps_screenshots(self):
|
||||
import glob
|
||||
if self.browser_steps_screenshot_path is not None:
|
||||
dest = os.path.join(self.browser_steps_screenshot_path, 'step_*.jpeg')
|
||||
files = glob.glob(dest)
|
||||
for f in files:
|
||||
if os.path.isfile(f):
|
||||
os.unlink(f)
|
||||
|
||||
def save_step_html(self, step_n):
|
||||
if self.browser_steps_screenshot_path and not os.path.isdir(self.browser_steps_screenshot_path):
|
||||
logger.debug(f"> Creating data dir {self.browser_steps_screenshot_path}")
|
||||
os.mkdir(self.browser_steps_screenshot_path)
|
||||
pass
|
||||
97
changedetectionio/content_fetchers/exceptions/__init__.py
Normal file
@@ -0,0 +1,97 @@
|
||||
from loguru import logger
|
||||
|
||||
class Non200ErrorCodeReceived(Exception):
|
||||
def __init__(self, status_code, url, screenshot=None, xpath_data=None, page_html=None):
|
||||
# Set this so we can use it in other parts of the app
|
||||
self.status_code = status_code
|
||||
self.url = url
|
||||
self.screenshot = screenshot
|
||||
self.xpath_data = xpath_data
|
||||
self.page_text = None
|
||||
|
||||
if page_html:
|
||||
from changedetectionio import html_tools
|
||||
self.page_text = html_tools.html_to_text(page_html)
|
||||
return
|
||||
|
||||
|
||||
class checksumFromPreviousCheckWasTheSame(Exception):
|
||||
def __init__(self):
|
||||
return
|
||||
|
||||
|
||||
class JSActionExceptions(Exception):
|
||||
def __init__(self, status_code, url, screenshot, message=''):
|
||||
self.status_code = status_code
|
||||
self.url = url
|
||||
self.screenshot = screenshot
|
||||
self.message = message
|
||||
return
|
||||
|
||||
class BrowserConnectError(Exception):
|
||||
msg = ''
|
||||
def __init__(self, msg):
|
||||
self.msg = msg
|
||||
logger.error(f"Browser connection error {msg}")
|
||||
return
|
||||
|
||||
class BrowserFetchTimedOut(Exception):
|
||||
msg = ''
|
||||
def __init__(self, msg):
|
||||
self.msg = msg
|
||||
logger.error(f"Browser processing took too long - {msg}")
|
||||
return
|
||||
|
||||
class BrowserStepsStepException(Exception):
|
||||
def __init__(self, step_n, original_e):
|
||||
self.step_n = step_n
|
||||
self.original_e = original_e
|
||||
logger.debug(f"Browser Steps exception at step {self.step_n} {str(original_e)}")
|
||||
return
|
||||
|
||||
|
||||
# @todo - make base Exception class that announces via logger()
|
||||
class PageUnloadable(Exception):
|
||||
def __init__(self, status_code=None, url='', message='', screenshot=False):
|
||||
# Set this so we can use it in other parts of the app
|
||||
self.status_code = status_code
|
||||
self.url = url
|
||||
self.screenshot = screenshot
|
||||
self.message = message
|
||||
return
|
||||
|
||||
class BrowserStepsInUnsupportedFetcher(Exception):
|
||||
def __init__(self, url):
|
||||
self.url = url
|
||||
return
|
||||
|
||||
class EmptyReply(Exception):
|
||||
def __init__(self, status_code, url, screenshot=None):
|
||||
# Set this so we can use it in other parts of the app
|
||||
self.status_code = status_code
|
||||
self.url = url
|
||||
self.screenshot = screenshot
|
||||
return
|
||||
|
||||
|
||||
class ScreenshotUnavailable(Exception):
|
||||
def __init__(self, status_code, url, page_html=None):
|
||||
# Set this so we can use it in other parts of the app
|
||||
self.status_code = status_code
|
||||
self.url = url
|
||||
if page_html:
|
||||
from changedetectionio.html_tools import html_to_text
|
||||
self.page_text = html_to_text(page_html)
|
||||
return
|
||||
|
||||
|
||||
class ReplyWithContentButNoText(Exception):
|
||||
def __init__(self, status_code, url, screenshot=None, has_filters=False, html_content='', xpath_data=None):
|
||||
# Set this so we can use it in other parts of the app
|
||||
self.status_code = status_code
|
||||
self.url = url
|
||||
self.screenshot = screenshot
|
||||
self.has_filters = has_filters
|
||||
self.html_content = html_content
|
||||
self.xpath_data = xpath_data
|
||||
return
|
||||
104
changedetectionio/content_fetchers/helpers.py
Normal file
@@ -0,0 +1,104 @@
|
||||
|
||||
# Pages with a vertical height longer than this will use the 'stitch together' method.
|
||||
|
||||
# - Many GPUs have a max texture size of 16384x16384px (or lower on older devices).
|
||||
# - If a page is taller than ~8000–10000px, it risks exceeding GPU memory limits.
|
||||
# - This is especially important on headless Chromium, where Playwright may fail to allocate a massive full-page buffer.
|
||||
|
||||
|
||||
# The size at which we will switch to stitching method
|
||||
SCREENSHOT_SIZE_STITCH_THRESHOLD=8000
|
||||
|
||||
from loguru import logger
|
||||
|
||||
def capture_stitched_together_full_page(page):
|
||||
import io
|
||||
import os
|
||||
import time
|
||||
from PIL import Image, ImageDraw, ImageFont
|
||||
|
||||
MAX_TOTAL_HEIGHT = SCREENSHOT_SIZE_STITCH_THRESHOLD*4 # Maximum total height for the final image (When in stitch mode)
|
||||
MAX_CHUNK_HEIGHT = 4000 # Height per screenshot chunk
|
||||
WARNING_TEXT_HEIGHT = 20 # Height of the warning text overlay
|
||||
|
||||
# Save the original viewport size
|
||||
original_viewport = page.viewport_size
|
||||
now = time.time()
|
||||
|
||||
try:
|
||||
viewport = page.viewport_size
|
||||
page_height = page.evaluate("document.documentElement.scrollHeight")
|
||||
|
||||
# Limit the total capture height
|
||||
capture_height = min(page_height, MAX_TOTAL_HEIGHT)
|
||||
|
||||
images = []
|
||||
total_captured_height = 0
|
||||
|
||||
for offset in range(0, capture_height, MAX_CHUNK_HEIGHT):
|
||||
# Ensure we do not exceed the total height limit
|
||||
chunk_height = min(MAX_CHUNK_HEIGHT, MAX_TOTAL_HEIGHT - total_captured_height)
|
||||
|
||||
# Adjust viewport size for this chunk
|
||||
page.set_viewport_size({"width": viewport["width"], "height": chunk_height})
|
||||
|
||||
# Scroll to the correct position
|
||||
page.evaluate(f"window.scrollTo(0, {offset})")
|
||||
|
||||
# Capture screenshot chunk
|
||||
screenshot_bytes = page.screenshot(type='jpeg', quality=int(os.getenv("SCREENSHOT_QUALITY", 30)))
|
||||
images.append(Image.open(io.BytesIO(screenshot_bytes)))
|
||||
|
||||
total_captured_height += chunk_height
|
||||
|
||||
# Stop if we reached the maximum total height
|
||||
if total_captured_height >= MAX_TOTAL_HEIGHT:
|
||||
break
|
||||
|
||||
# Create the final stitched image
|
||||
stitched_image = Image.new('RGB', (viewport["width"], total_captured_height))
|
||||
y_offset = 0
|
||||
|
||||
# Stitch the screenshot chunks together
|
||||
for img in images:
|
||||
stitched_image.paste(img, (0, y_offset))
|
||||
y_offset += img.height
|
||||
|
||||
logger.debug(f"Screenshot stitched together in {time.time()-now:.2f}s")
|
||||
|
||||
# Overlay warning text if the screenshot was trimmed
|
||||
if page_height > MAX_TOTAL_HEIGHT:
|
||||
draw = ImageDraw.Draw(stitched_image)
|
||||
warning_text = f"WARNING: Screenshot was {page_height}px but trimmed to {MAX_TOTAL_HEIGHT}px because it was too long"
|
||||
|
||||
# Load font (default system font if Arial is unavailable)
|
||||
try:
|
||||
font = ImageFont.truetype("arial.ttf", WARNING_TEXT_HEIGHT) # Arial (Windows/Mac)
|
||||
except IOError:
|
||||
font = ImageFont.load_default() # Default font if Arial not found
|
||||
|
||||
# Get text bounding box (correct method for newer Pillow versions)
|
||||
text_bbox = draw.textbbox((0, 0), warning_text, font=font)
|
||||
text_width = text_bbox[2] - text_bbox[0] # Calculate text width
|
||||
text_height = text_bbox[3] - text_bbox[1] # Calculate text height
|
||||
|
||||
# Define background rectangle (top of the image)
|
||||
draw.rectangle([(0, 0), (viewport["width"], WARNING_TEXT_HEIGHT)], fill="white")
|
||||
|
||||
# Center text horizontally within the warning area
|
||||
text_x = (viewport["width"] - text_width) // 2
|
||||
text_y = (WARNING_TEXT_HEIGHT - text_height) // 2
|
||||
|
||||
# Draw the warning text in red
|
||||
draw.text((text_x, text_y), warning_text, fill="red", font=font)
|
||||
|
||||
# Save or return the final image
|
||||
output = io.BytesIO()
|
||||
stitched_image.save(output, format="JPEG", quality=int(os.getenv("SCREENSHOT_QUALITY", 30)))
|
||||
screenshot = output.getvalue()
|
||||
|
||||
finally:
|
||||
# Restore the original viewport size
|
||||
page.set_viewport_size(original_viewport)
|
||||
|
||||
return screenshot
|
||||
221
changedetectionio/content_fetchers/playwright.py
Normal file
@@ -0,0 +1,221 @@
|
||||
import json
|
||||
import os
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from loguru import logger
|
||||
|
||||
from changedetectionio.content_fetchers.helpers import capture_stitched_together_full_page, SCREENSHOT_SIZE_STITCH_THRESHOLD
|
||||
from changedetectionio.content_fetchers.base import Fetcher, manage_user_agent
|
||||
from changedetectionio.content_fetchers.exceptions import PageUnloadable, Non200ErrorCodeReceived, EmptyReply, ScreenshotUnavailable
|
||||
|
||||
class fetcher(Fetcher):
|
||||
fetcher_description = "Playwright {}/Javascript".format(
|
||||
os.getenv("PLAYWRIGHT_BROWSER_TYPE", 'chromium').capitalize()
|
||||
)
|
||||
if os.getenv("PLAYWRIGHT_DRIVER_URL"):
|
||||
fetcher_description += " via '{}'".format(os.getenv("PLAYWRIGHT_DRIVER_URL"))
|
||||
|
||||
browser_type = ''
|
||||
command_executor = ''
|
||||
|
||||
# Configs for Proxy setup
|
||||
# In the ENV vars, is prefixed with "playwright_proxy_", so it is for example "playwright_proxy_server"
|
||||
playwright_proxy_settings_mappings = ['bypass', 'server', 'username', 'password']
|
||||
|
||||
proxy = None
|
||||
|
||||
def __init__(self, proxy_override=None, custom_browser_connection_url=None):
|
||||
super().__init__()
|
||||
|
||||
self.browser_type = os.getenv("PLAYWRIGHT_BROWSER_TYPE", 'chromium').strip('"')
|
||||
|
||||
if custom_browser_connection_url:
|
||||
self.browser_connection_is_custom = True
|
||||
self.browser_connection_url = custom_browser_connection_url
|
||||
else:
|
||||
# Fallback to fetching from system
|
||||
# .strip('"') is going to save someone a lot of time when they accidently wrap the env value
|
||||
self.browser_connection_url = os.getenv("PLAYWRIGHT_DRIVER_URL", 'ws://playwright-chrome:3000').strip('"')
|
||||
|
||||
# If any proxy settings are enabled, then we should setup the proxy object
|
||||
proxy_args = {}
|
||||
for k in self.playwright_proxy_settings_mappings:
|
||||
v = os.getenv('playwright_proxy_' + k, False)
|
||||
if v:
|
||||
proxy_args[k] = v.strip('"')
|
||||
|
||||
if proxy_args:
|
||||
self.proxy = proxy_args
|
||||
|
||||
# allow per-watch proxy selection override
|
||||
if proxy_override:
|
||||
self.proxy = {'server': proxy_override}
|
||||
|
||||
if self.proxy:
|
||||
# Playwright needs separate username and password values
|
||||
parsed = urlparse(self.proxy.get('server'))
|
||||
if parsed.username:
|
||||
self.proxy['username'] = parsed.username
|
||||
self.proxy['password'] = parsed.password
|
||||
|
||||
def screenshot_step(self, step_n=''):
|
||||
super().screenshot_step(step_n=step_n)
|
||||
screenshot = self.page.screenshot(type='jpeg', full_page=True, quality=int(os.getenv("SCREENSHOT_QUALITY", 72)))
|
||||
|
||||
if self.browser_steps_screenshot_path is not None:
|
||||
destination = os.path.join(self.browser_steps_screenshot_path, 'step_{}.jpeg'.format(step_n))
|
||||
logger.debug(f"Saving step screenshot to {destination}")
|
||||
with open(destination, 'wb') as f:
|
||||
f.write(screenshot)
|
||||
|
||||
def save_step_html(self, step_n):
|
||||
super().save_step_html(step_n=step_n)
|
||||
content = self.page.content()
|
||||
destination = os.path.join(self.browser_steps_screenshot_path, 'step_{}.html'.format(step_n))
|
||||
logger.debug(f"Saving step HTML to {destination}")
|
||||
with open(destination, 'w') as f:
|
||||
f.write(content)
|
||||
|
||||
def run(self,
|
||||
url,
|
||||
timeout,
|
||||
request_headers,
|
||||
request_body,
|
||||
request_method,
|
||||
ignore_status_codes=False,
|
||||
current_include_filters=None,
|
||||
is_binary=False,
|
||||
empty_pages_are_a_change=False):
|
||||
|
||||
from playwright.sync_api import sync_playwright
|
||||
import playwright._impl._errors
|
||||
from changedetectionio.content_fetchers import visualselector_xpath_selectors
|
||||
import time
|
||||
self.delete_browser_steps_screenshots()
|
||||
response = None
|
||||
|
||||
with sync_playwright() as p:
|
||||
browser_type = getattr(p, self.browser_type)
|
||||
|
||||
# Seemed to cause a connection Exception even tho I can see it connect
|
||||
# self.browser = browser_type.connect(self.command_executor, timeout=timeout*1000)
|
||||
# 60,000 connection timeout only
|
||||
browser = browser_type.connect_over_cdp(self.browser_connection_url, timeout=60000)
|
||||
|
||||
# SOCKS5 with authentication is not supported (yet)
|
||||
# https://github.com/microsoft/playwright/issues/10567
|
||||
|
||||
# Set user agent to prevent Cloudflare from blocking the browser
|
||||
# Use the default one configured in the App.py model that's passed from fetch_site_status.py
|
||||
context = browser.new_context(
|
||||
accept_downloads=False, # Should never be needed
|
||||
bypass_csp=True, # This is needed to enable JavaScript execution on GitHub and others
|
||||
extra_http_headers=request_headers,
|
||||
ignore_https_errors=True,
|
||||
proxy=self.proxy,
|
||||
service_workers=os.getenv('PLAYWRIGHT_SERVICE_WORKERS', 'allow'), # Should be `allow` or `block` - sites like YouTube can transmit large amounts of data via Service Workers
|
||||
user_agent=manage_user_agent(headers=request_headers),
|
||||
)
|
||||
|
||||
self.page = context.new_page()
|
||||
|
||||
# Listen for all console events and handle errors
|
||||
self.page.on("console", lambda msg: print(f"Playwright console: Watch URL: {url} {msg.type}: {msg.text} {msg.args}"))
|
||||
|
||||
# Re-use as much code from browser steps as possible so its the same
|
||||
from changedetectionio.blueprint.browser_steps.browser_steps import steppable_browser_interface
|
||||
browsersteps_interface = steppable_browser_interface(start_url=url)
|
||||
browsersteps_interface.page = self.page
|
||||
|
||||
response = browsersteps_interface.action_goto_url(value=url)
|
||||
self.headers = response.all_headers()
|
||||
|
||||
if response is None:
|
||||
context.close()
|
||||
browser.close()
|
||||
logger.debug("Content Fetcher > Response object from the browser communication was none")
|
||||
raise EmptyReply(url=url, status_code=None)
|
||||
|
||||
try:
|
||||
if self.webdriver_js_execute_code is not None and len(self.webdriver_js_execute_code):
|
||||
browsersteps_interface.action_execute_js(value=self.webdriver_js_execute_code, selector=None)
|
||||
except playwright._impl._errors.TimeoutError as e:
|
||||
context.close()
|
||||
browser.close()
|
||||
# This can be ok, we will try to grab what we could retrieve
|
||||
pass
|
||||
except Exception as e:
|
||||
logger.debug(f"Content Fetcher > Other exception when executing custom JS code {str(e)}")
|
||||
context.close()
|
||||
browser.close()
|
||||
raise PageUnloadable(url=url, status_code=None, message=str(e))
|
||||
|
||||
extra_wait = int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)) + self.render_extract_delay
|
||||
self.page.wait_for_timeout(extra_wait * 1000)
|
||||
|
||||
try:
|
||||
self.status_code = response.status
|
||||
except Exception as e:
|
||||
# https://github.com/dgtlmoon/changedetection.io/discussions/2122#discussioncomment-8241962
|
||||
logger.critical(f"Response from the browser/Playwright did not have a status_code! Response follows.")
|
||||
logger.critical(response)
|
||||
context.close()
|
||||
browser.close()
|
||||
raise PageUnloadable(url=url, status_code=None, message=str(e))
|
||||
|
||||
if self.status_code != 200 and not ignore_status_codes:
|
||||
screenshot = self.page.screenshot(type='jpeg', full_page=True,
|
||||
quality=int(os.getenv("SCREENSHOT_QUALITY", 72)))
|
||||
|
||||
raise Non200ErrorCodeReceived(url=url, status_code=self.status_code, screenshot=screenshot)
|
||||
|
||||
if not empty_pages_are_a_change and len(self.page.content().strip()) == 0:
|
||||
logger.debug("Content Fetcher > Content was empty, empty_pages_are_a_change = False")
|
||||
context.close()
|
||||
browser.close()
|
||||
raise EmptyReply(url=url, status_code=response.status)
|
||||
|
||||
# Run Browser Steps here
|
||||
if self.browser_steps_get_valid_steps():
|
||||
self.iterate_browser_steps(start_url=url)
|
||||
|
||||
self.page.wait_for_timeout(extra_wait * 1000)
|
||||
|
||||
now = time.time()
|
||||
# So we can find an element on the page where its selector was entered manually (maybe not xPath etc)
|
||||
if current_include_filters is not None:
|
||||
self.page.evaluate("var include_filters={}".format(json.dumps(current_include_filters)))
|
||||
else:
|
||||
self.page.evaluate("var include_filters=''")
|
||||
|
||||
self.xpath_data = self.page.evaluate(
|
||||
"async () => {" + self.xpath_element_js.replace('%ELEMENTS%', visualselector_xpath_selectors) + "}")
|
||||
self.instock_data = self.page.evaluate("async () => {" + self.instock_data_js + "}")
|
||||
|
||||
self.content = self.page.content()
|
||||
logger.debug(f"Time to scrape xpath element data in browser {time.time() - now:.2f}s")
|
||||
|
||||
# Bug 3 in Playwright screenshot handling
|
||||
# Some bug where it gives the wrong screenshot size, but making a request with the clip set first seems to solve it
|
||||
# JPEG is better here because the screenshots can be very very large
|
||||
|
||||
# Screenshots also travel via the ws:// (websocket) meaning that the binary data is base64 encoded
|
||||
# which will significantly increase the IO size between the server and client, it's recommended to use the lowest
|
||||
# acceptable screenshot quality here
|
||||
try:
|
||||
# The actual screenshot - this always base64 and needs decoding! horrible! huge CPU usage
|
||||
full_height = self.page.evaluate("document.documentElement.scrollHeight")
|
||||
|
||||
if full_height >= SCREENSHOT_SIZE_STITCH_THRESHOLD:
|
||||
logger.warning(
|
||||
f"Page full Height: {full_height}px longer than {SCREENSHOT_SIZE_STITCH_THRESHOLD}px, using 'stitched screenshot method'.")
|
||||
self.screenshot = capture_stitched_together_full_page(self.page)
|
||||
else:
|
||||
self.screenshot = self.page.screenshot(type='jpeg', full_page=True, quality=int(os.getenv("SCREENSHOT_QUALITY", 30)))
|
||||
|
||||
except Exception as e:
|
||||
# It's likely the screenshot was too long/big and something crashed
|
||||
raise ScreenshotUnavailable(url=url, status_code=self.status_code)
|
||||
finally:
|
||||
context.close()
|
||||
browser.close()
|
||||
272
changedetectionio/content_fetchers/puppeteer.py
Normal file
@@ -0,0 +1,272 @@
|
||||
import asyncio
|
||||
import json
|
||||
import os
|
||||
import websockets.exceptions
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from loguru import logger
|
||||
|
||||
from changedetectionio.content_fetchers.base import Fetcher, manage_user_agent
|
||||
from changedetectionio.content_fetchers.exceptions import PageUnloadable, Non200ErrorCodeReceived, EmptyReply, BrowserFetchTimedOut, BrowserConnectError
|
||||
|
||||
class fetcher(Fetcher):
|
||||
fetcher_description = "Puppeteer/direct {}/Javascript".format(
|
||||
os.getenv("PLAYWRIGHT_BROWSER_TYPE", 'chromium').capitalize()
|
||||
)
|
||||
if os.getenv("PLAYWRIGHT_DRIVER_URL"):
|
||||
fetcher_description += " via '{}'".format(os.getenv("PLAYWRIGHT_DRIVER_URL"))
|
||||
|
||||
browser_type = ''
|
||||
command_executor = ''
|
||||
|
||||
proxy = None
|
||||
|
||||
def __init__(self, proxy_override=None, custom_browser_connection_url=None):
|
||||
super().__init__()
|
||||
|
||||
if custom_browser_connection_url:
|
||||
self.browser_connection_is_custom = True
|
||||
self.browser_connection_url = custom_browser_connection_url
|
||||
else:
|
||||
# Fallback to fetching from system
|
||||
# .strip('"') is going to save someone a lot of time when they accidently wrap the env value
|
||||
self.browser_connection_url = os.getenv("PLAYWRIGHT_DRIVER_URL", 'ws://playwright-chrome:3000').strip('"')
|
||||
|
||||
# allow per-watch proxy selection override
|
||||
# @todo check global too?
|
||||
if proxy_override:
|
||||
# Playwright needs separate username and password values
|
||||
parsed = urlparse(proxy_override)
|
||||
if parsed:
|
||||
self.proxy = {'username': parsed.username, 'password': parsed.password}
|
||||
# Add the proxy server chrome start option, the username and password never gets added here
|
||||
# (It always goes in via await self.page.authenticate(self.proxy))
|
||||
|
||||
# @todo filter some injection attack?
|
||||
# check scheme when no scheme
|
||||
proxy_url = parsed.scheme + "://" if parsed.scheme else 'http://'
|
||||
r = "?" if not '?' in self.browser_connection_url else '&'
|
||||
port = ":"+str(parsed.port) if parsed.port else ''
|
||||
q = "?"+parsed.query if parsed.query else ''
|
||||
proxy_url += f"{parsed.hostname}{port}{parsed.path}{q}"
|
||||
self.browser_connection_url += f"{r}--proxy-server={proxy_url}"
|
||||
|
||||
# def screenshot_step(self, step_n=''):
|
||||
# screenshot = self.page.screenshot(type='jpeg', full_page=True, quality=85)
|
||||
#
|
||||
# if self.browser_steps_screenshot_path is not None:
|
||||
# destination = os.path.join(self.browser_steps_screenshot_path, 'step_{}.jpeg'.format(step_n))
|
||||
# logger.debug(f"Saving step screenshot to {destination}")
|
||||
# with open(destination, 'wb') as f:
|
||||
# f.write(screenshot)
|
||||
#
|
||||
# def save_step_html(self, step_n):
|
||||
# content = self.page.content()
|
||||
# destination = os.path.join(self.browser_steps_screenshot_path, 'step_{}.html'.format(step_n))
|
||||
# logger.debug(f"Saving step HTML to {destination}")
|
||||
# with open(destination, 'w') as f:
|
||||
# f.write(content)
|
||||
|
||||
async def fetch_page(self,
|
||||
url,
|
||||
timeout,
|
||||
request_headers,
|
||||
request_body,
|
||||
request_method,
|
||||
ignore_status_codes,
|
||||
current_include_filters,
|
||||
is_binary,
|
||||
empty_pages_are_a_change
|
||||
):
|
||||
|
||||
from changedetectionio.content_fetchers import visualselector_xpath_selectors
|
||||
self.delete_browser_steps_screenshots()
|
||||
extra_wait = int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)) + self.render_extract_delay
|
||||
|
||||
from pyppeteer import Pyppeteer
|
||||
pyppeteer_instance = Pyppeteer()
|
||||
|
||||
# Connect directly using the specified browser_ws_endpoint
|
||||
# @todo timeout
|
||||
try:
|
||||
browser = await pyppeteer_instance.connect(browserWSEndpoint=self.browser_connection_url,
|
||||
ignoreHTTPSErrors=True
|
||||
)
|
||||
except websockets.exceptions.InvalidStatusCode as e:
|
||||
raise BrowserConnectError(msg=f"Error while trying to connect the browser, Code {e.status_code} (check your access, whitelist IP, password etc)")
|
||||
except websockets.exceptions.InvalidURI:
|
||||
raise BrowserConnectError(msg=f"Error connecting to the browser, check your browser connection address (should be ws:// or wss://")
|
||||
except Exception as e:
|
||||
raise BrowserConnectError(msg=f"Error connecting to the browser {str(e)}")
|
||||
|
||||
# Better is to launch chrome with the URL as arg
|
||||
# non-headless - newPage() will launch an extra tab/window, .browser should already contain 1 page/tab
|
||||
# headless - ask a new page
|
||||
self.page = (pages := await browser.pages) and len(pages) or await browser.newPage()
|
||||
|
||||
try:
|
||||
from pyppeteerstealth import inject_evasions_into_page
|
||||
except ImportError:
|
||||
logger.debug("pyppeteerstealth module not available, skipping")
|
||||
pass
|
||||
else:
|
||||
# I tried hooking events via self.page.on(Events.Page.DOMContentLoaded, inject_evasions_requiring_obj_to_page)
|
||||
# But I could never get it to fire reliably, so we just inject it straight after
|
||||
await inject_evasions_into_page(self.page)
|
||||
|
||||
# This user agent is similar to what was used when tweaking the evasions in inject_evasions_into_page(..)
|
||||
user_agent = None
|
||||
if request_headers and request_headers.get('User-Agent'):
|
||||
# Request_headers should now be CaaseInsensitiveDict
|
||||
# Remove it so it's not sent again with headers after
|
||||
user_agent = request_headers.pop('User-Agent').strip()
|
||||
await self.page.setUserAgent(user_agent)
|
||||
|
||||
if not user_agent:
|
||||
# Attempt to strip 'HeadlessChrome' etc
|
||||
await self.page.setUserAgent(manage_user_agent(headers=request_headers, current_ua=await self.page.evaluate('navigator.userAgent')))
|
||||
|
||||
await self.page.setBypassCSP(True)
|
||||
if request_headers:
|
||||
await self.page.setExtraHTTPHeaders(request_headers)
|
||||
|
||||
# SOCKS5 with authentication is not supported (yet)
|
||||
# https://github.com/microsoft/playwright/issues/10567
|
||||
self.page.setDefaultNavigationTimeout(0)
|
||||
await self.page.setCacheEnabled(True)
|
||||
if self.proxy and self.proxy.get('username'):
|
||||
# Setting Proxy-Authentication header is deprecated, and doing so can trigger header change errors from Puppeteer
|
||||
# https://github.com/puppeteer/puppeteer/issues/676 ?
|
||||
# https://help.brightdata.com/hc/en-us/articles/12632549957649-Proxy-Manager-How-to-Guides#h_01HAKWR4Q0AFS8RZTNYWRDFJC2
|
||||
# https://cri.dev/posts/2020-03-30-How-to-solve-Puppeteer-Chrome-Error-ERR_INVALID_ARGUMENT/
|
||||
await self.page.authenticate(self.proxy)
|
||||
|
||||
# Re-use as much code from browser steps as possible so its the same
|
||||
# from changedetectionio.blueprint.browser_steps.browser_steps import steppable_browser_interface
|
||||
|
||||
# not yet used here, we fallback to playwright when browsersteps is required
|
||||
# browsersteps_interface = steppable_browser_interface()
|
||||
# browsersteps_interface.page = self.page
|
||||
|
||||
response = await self.page.goto(url, waitUntil="load")
|
||||
|
||||
|
||||
if response is None:
|
||||
await self.page.close()
|
||||
await browser.close()
|
||||
logger.warning("Content Fetcher > Response object was none (as in, the response from the browser was empty, not just the content)")
|
||||
raise EmptyReply(url=url, status_code=None)
|
||||
|
||||
self.headers = response.headers
|
||||
|
||||
try:
|
||||
if self.webdriver_js_execute_code is not None and len(self.webdriver_js_execute_code):
|
||||
await self.page.evaluate(self.webdriver_js_execute_code)
|
||||
except Exception as e:
|
||||
logger.warning("Got exception when running evaluate on custom JS code")
|
||||
logger.error(str(e))
|
||||
await self.page.close()
|
||||
await browser.close()
|
||||
# This can be ok, we will try to grab what we could retrieve
|
||||
raise PageUnloadable(url=url, status_code=None, message=str(e))
|
||||
|
||||
try:
|
||||
self.status_code = response.status
|
||||
except Exception as e:
|
||||
# https://github.com/dgtlmoon/changedetection.io/discussions/2122#discussioncomment-8241962
|
||||
logger.critical(f"Response from the browser/Playwright did not have a status_code! Response follows.")
|
||||
logger.critical(response)
|
||||
await self.page.close()
|
||||
await browser.close()
|
||||
raise PageUnloadable(url=url, status_code=None, message=str(e))
|
||||
|
||||
if self.status_code != 200 and not ignore_status_codes:
|
||||
screenshot = await self.page.screenshot(type_='jpeg',
|
||||
fullPage=True,
|
||||
quality=int(os.getenv("SCREENSHOT_QUALITY", 72)))
|
||||
|
||||
raise Non200ErrorCodeReceived(url=url, status_code=self.status_code, screenshot=screenshot)
|
||||
content = await self.page.content
|
||||
|
||||
if not empty_pages_are_a_change and len(content.strip()) == 0:
|
||||
logger.error("Content Fetcher > Content was empty (empty_pages_are_a_change is False), closing browsers")
|
||||
await self.page.close()
|
||||
await browser.close()
|
||||
raise EmptyReply(url=url, status_code=response.status)
|
||||
|
||||
# Run Browser Steps here
|
||||
# @todo not yet supported, we switch to playwright in this case
|
||||
# if self.browser_steps_get_valid_steps():
|
||||
# self.iterate_browser_steps()
|
||||
|
||||
await asyncio.sleep(1 + extra_wait)
|
||||
|
||||
# So we can find an element on the page where its selector was entered manually (maybe not xPath etc)
|
||||
# Setup the xPath/VisualSelector scraper
|
||||
if current_include_filters is not None:
|
||||
js = json.dumps(current_include_filters)
|
||||
await self.page.evaluate(f"var include_filters={js}")
|
||||
else:
|
||||
await self.page.evaluate(f"var include_filters=''")
|
||||
|
||||
self.xpath_data = await self.page.evaluate(
|
||||
"async () => {" + self.xpath_element_js.replace('%ELEMENTS%', visualselector_xpath_selectors) + "}")
|
||||
self.instock_data = await self.page.evaluate("async () => {" + self.instock_data_js + "}")
|
||||
|
||||
self.content = await self.page.content
|
||||
# Bug 3 in Playwright screenshot handling
|
||||
# Some bug where it gives the wrong screenshot size, but making a request with the clip set first seems to solve it
|
||||
# JPEG is better here because the screenshots can be very very large
|
||||
|
||||
# Screenshots also travel via the ws:// (websocket) meaning that the binary data is base64 encoded
|
||||
# which will significantly increase the IO size between the server and client, it's recommended to use the lowest
|
||||
# acceptable screenshot quality here
|
||||
try:
|
||||
self.screenshot = await self.page.screenshot(type_='jpeg',
|
||||
fullPage=True,
|
||||
quality=int(os.getenv("SCREENSHOT_QUALITY", 72)))
|
||||
except Exception as e:
|
||||
logger.error("Error fetching screenshot")
|
||||
# // May fail on very large pages with 'WARNING: tile memory limits exceeded, some content may not draw'
|
||||
# // @ todo after text extract, we can place some overlay text with red background to say 'croppped'
|
||||
logger.error('ERROR: content-fetcher page was maybe too large for a screenshot, reverting to viewport only screenshot')
|
||||
try:
|
||||
self.screenshot = await self.page.screenshot(type_='jpeg',
|
||||
fullPage=False,
|
||||
quality=int(os.getenv("SCREENSHOT_QUALITY", 72)))
|
||||
except Exception as e:
|
||||
logger.error('ERROR: Failed to get viewport-only reduced screenshot :(')
|
||||
pass
|
||||
finally:
|
||||
# It's good to log here in the case that the browser crashes on shutting down but we still get the data we need
|
||||
logger.success(f"Fetching '{url}' complete, closing page")
|
||||
await self.page.close()
|
||||
logger.success(f"Fetching '{url}' complete, closing browser")
|
||||
await browser.close()
|
||||
logger.success(f"Fetching '{url}' complete, exiting puppeteer fetch.")
|
||||
|
||||
async def main(self, **kwargs):
|
||||
await self.fetch_page(**kwargs)
|
||||
|
||||
def run(self, url, timeout, request_headers, request_body, request_method, ignore_status_codes=False,
|
||||
current_include_filters=None, is_binary=False, empty_pages_are_a_change=False):
|
||||
|
||||
#@todo make update_worker async which could run any of these content_fetchers within memory and time constraints
|
||||
max_time = os.getenv('PUPPETEER_MAX_PROCESSING_TIMEOUT_SECONDS', 180)
|
||||
|
||||
# This will work in 3.10 but not >= 3.11 because 3.11 wants tasks only
|
||||
try:
|
||||
asyncio.run(asyncio.wait_for(self.main(
|
||||
url=url,
|
||||
timeout=timeout,
|
||||
request_headers=request_headers,
|
||||
request_body=request_body,
|
||||
request_method=request_method,
|
||||
ignore_status_codes=ignore_status_codes,
|
||||
current_include_filters=current_include_filters,
|
||||
is_binary=is_binary,
|
||||
empty_pages_are_a_change=empty_pages_are_a_change
|
||||
), timeout=max_time))
|
||||
except asyncio.TimeoutError:
|
||||
raise(BrowserFetchTimedOut(msg=f"Browser connected but was unable to process the page in {max_time} seconds."))
|
||||
|
||||
98
changedetectionio/content_fetchers/requests.py
Normal file
@@ -0,0 +1,98 @@
|
||||
from loguru import logger
|
||||
import hashlib
|
||||
import os
|
||||
from changedetectionio import strtobool
|
||||
from changedetectionio.content_fetchers.exceptions import BrowserStepsInUnsupportedFetcher, EmptyReply, Non200ErrorCodeReceived
|
||||
from changedetectionio.content_fetchers.base import Fetcher
|
||||
|
||||
|
||||
# "html_requests" is listed as the default fetcher in store.py!
|
||||
class fetcher(Fetcher):
|
||||
fetcher_description = "Basic fast Plaintext/HTTP Client"
|
||||
|
||||
def __init__(self, proxy_override=None, custom_browser_connection_url=None):
|
||||
super().__init__()
|
||||
self.proxy_override = proxy_override
|
||||
# browser_connection_url is none because its always 'launched locally'
|
||||
|
||||
def run(self,
|
||||
url,
|
||||
timeout,
|
||||
request_headers,
|
||||
request_body,
|
||||
request_method,
|
||||
ignore_status_codes=False,
|
||||
current_include_filters=None,
|
||||
is_binary=False,
|
||||
empty_pages_are_a_change=False):
|
||||
|
||||
import chardet
|
||||
import requests
|
||||
|
||||
if self.browser_steps_get_valid_steps():
|
||||
raise BrowserStepsInUnsupportedFetcher(url=url)
|
||||
|
||||
proxies = {}
|
||||
|
||||
# Allows override the proxy on a per-request basis
|
||||
|
||||
# https://requests.readthedocs.io/en/latest/user/advanced/#socks
|
||||
# Should also work with `socks5://user:pass@host:port` type syntax.
|
||||
|
||||
if self.proxy_override:
|
||||
proxies = {'http': self.proxy_override, 'https': self.proxy_override, 'ftp': self.proxy_override}
|
||||
else:
|
||||
if self.system_http_proxy:
|
||||
proxies['http'] = self.system_http_proxy
|
||||
if self.system_https_proxy:
|
||||
proxies['https'] = self.system_https_proxy
|
||||
|
||||
session = requests.Session()
|
||||
|
||||
if strtobool(os.getenv('ALLOW_FILE_URI', 'false')) and url.startswith('file://'):
|
||||
from requests_file import FileAdapter
|
||||
session.mount('file://', FileAdapter())
|
||||
|
||||
r = session.request(method=request_method,
|
||||
data=request_body.encode('utf-8') if type(request_body) is str else request_body,
|
||||
url=url,
|
||||
headers=request_headers,
|
||||
timeout=timeout,
|
||||
proxies=proxies,
|
||||
verify=False)
|
||||
|
||||
# If the response did not tell us what encoding format to expect, Then use chardet to override what `requests` thinks.
|
||||
# For example - some sites don't tell us it's utf-8, but return utf-8 content
|
||||
# This seems to not occur when using webdriver/selenium, it seems to detect the text encoding more reliably.
|
||||
# https://github.com/psf/requests/issues/1604 good info about requests encoding detection
|
||||
if not is_binary:
|
||||
# Don't run this for PDF (and requests identified as binary) takes a _long_ time
|
||||
if not r.headers.get('content-type') or not 'charset=' in r.headers.get('content-type'):
|
||||
encoding = chardet.detect(r.content)['encoding']
|
||||
if encoding:
|
||||
r.encoding = encoding
|
||||
|
||||
self.headers = r.headers
|
||||
|
||||
if not r.content or not len(r.content):
|
||||
logger.debug(f"Requests returned empty content for '{url}'")
|
||||
if not empty_pages_are_a_change:
|
||||
raise EmptyReply(url=url, status_code=r.status_code)
|
||||
else:
|
||||
logger.debug(f"URL {url} gave zero byte content reply with Status Code {r.status_code}, but empty_pages_are_a_change = True")
|
||||
|
||||
# @todo test this
|
||||
# @todo maybe you really want to test zero-byte return pages?
|
||||
if r.status_code != 200 and not ignore_status_codes:
|
||||
# maybe check with content works?
|
||||
raise Non200ErrorCodeReceived(url=url, status_code=r.status_code, page_html=r.text)
|
||||
|
||||
self.status_code = r.status_code
|
||||
if is_binary:
|
||||
# Binary files just return their checksum until we add something smarter
|
||||
self.content = hashlib.md5(r.content).hexdigest()
|
||||
else:
|
||||
self.content = r.text
|
||||
|
||||
|
||||
self.raw_content = r.content
|
||||
1
changedetectionio/content_fetchers/res/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# resources for browser injection/scraping
|
||||
@@ -18,6 +18,7 @@ module.exports = async ({page, context}) => {
|
||||
|
||||
await page.setBypassCSP(true)
|
||||
await page.setExtraHTTPHeaders(req_headers);
|
||||
|
||||
if (user_agent) {
|
||||
await page.setUserAgent(user_agent);
|
||||
}
|
||||
@@ -26,6 +27,10 @@ module.exports = async ({page, context}) => {
|
||||
await page.setDefaultNavigationTimeout(0);
|
||||
|
||||
if (proxy_username) {
|
||||
// Setting Proxy-Authentication header is deprecated, and doing so can trigger header change errors from Puppeteer
|
||||
// https://github.com/puppeteer/puppeteer/issues/676 ?
|
||||
// https://help.brightdata.com/hc/en-us/articles/12632549957649-Proxy-Manager-How-to-Guides#h_01HAKWR4Q0AFS8RZTNYWRDFJC2
|
||||
// https://cri.dev/posts/2020-03-30-How-to-solve-Puppeteer-Chrome-Error-ERR_INVALID_ARGUMENT/
|
||||
await page.authenticate({
|
||||
username: proxy_username,
|
||||
password: proxy_password
|
||||
@@ -141,7 +146,7 @@ module.exports = async ({page, context}) => {
|
||||
var xpath_data;
|
||||
var instock_data;
|
||||
try {
|
||||
// Not sure the best way here, in the future this should be a new package added to npm then run in browserless
|
||||
// Not sure the best way here, in the future this should be a new package added to npm then run in evaluatedCode
|
||||
// (Once the old playwright is removed)
|
||||
xpath_data = await page.evaluate((include_filters) => {%xpath_scrape_code%}, include_filters);
|
||||
instock_data = await page.evaluate(() => {%instock_scrape_code%});
|
||||
223
changedetectionio/content_fetchers/res/stock-not-in-stock.js
Normal file
@@ -0,0 +1,223 @@
|
||||
// Restock Detector
|
||||
// (c) Leigh Morresi dgtlmoon@gmail.com
|
||||
//
|
||||
// Assumes the product is in stock to begin with, unless the following appears above the fold ;
|
||||
// - outOfStockTexts appears above the fold (out of stock)
|
||||
// - negateOutOfStockRegex (really is in stock)
|
||||
|
||||
function isItemInStock() {
|
||||
// @todo Pass these in so the same list can be used in non-JS fetchers
|
||||
const outOfStockTexts = [
|
||||
' أخبرني عندما يتوفر',
|
||||
'0 in stock',
|
||||
'actuellement indisponible',
|
||||
'agotado',
|
||||
'article épuisé',
|
||||
'artikel zurzeit vergriffen',
|
||||
'as soon as stock is available',
|
||||
'ausverkauft', // sold out
|
||||
'available for back order',
|
||||
'awaiting stock',
|
||||
'back in stock soon',
|
||||
'back-order or out of stock',
|
||||
'backordered',
|
||||
'benachrichtigt mich', // notify me
|
||||
'brak na stanie',
|
||||
'brak w magazynie',
|
||||
'coming soon',
|
||||
'currently have any tickets for this',
|
||||
'currently unavailable',
|
||||
'dieser artikel ist bald wieder verfügbar',
|
||||
'dostępne wkrótce',
|
||||
'en rupture',
|
||||
'en rupture de stock',
|
||||
'épuisé',
|
||||
'esgotado',
|
||||
'indisponible',
|
||||
'indisponível',
|
||||
'isn\'t in stock right now',
|
||||
'isnt in stock right now',
|
||||
'isn’t in stock right now',
|
||||
'item is no longer available',
|
||||
'let me know when it\'s available',
|
||||
'mail me when available',
|
||||
'message if back in stock',
|
||||
'mevcut değil',
|
||||
'nachricht bei',
|
||||
'nicht auf lager',
|
||||
'nicht lagernd',
|
||||
'nicht lieferbar',
|
||||
'nicht verfügbar',
|
||||
'nicht vorrätig',
|
||||
'nicht zur verfügung',
|
||||
'nie znaleziono produktów',
|
||||
'niet beschikbaar',
|
||||
'niet leverbaar',
|
||||
'niet op voorraad',
|
||||
'no disponible',
|
||||
'non disponibile',
|
||||
'non disponible',
|
||||
'no longer in stock',
|
||||
'no tickets available',
|
||||
'not available',
|
||||
'not currently available',
|
||||
'not in stock',
|
||||
'notify me when available',
|
||||
'notify me',
|
||||
'notify when available',
|
||||
'não disponível',
|
||||
'não estamos a aceitar encomendas',
|
||||
'out of stock',
|
||||
'out-of-stock',
|
||||
'plus disponible',
|
||||
'prodotto esaurito',
|
||||
'produkt niedostępny',
|
||||
'rupture',
|
||||
'sold out',
|
||||
'sold-out',
|
||||
'stokta yok',
|
||||
'temporarily out of stock',
|
||||
'temporarily unavailable',
|
||||
'there were no search results for',
|
||||
'this item is currently unavailable',
|
||||
'tickets unavailable',
|
||||
'tijdelijk uitverkocht',
|
||||
'tükendi',
|
||||
'unavailable nearby',
|
||||
'unavailable tickets',
|
||||
'vergriffen',
|
||||
'vorbestellen',
|
||||
'vorbestellung ist bald möglich',
|
||||
'we don\'t currently have any',
|
||||
'we couldn\'t find any products that match',
|
||||
'we do not currently have an estimate of when this product will be back in stock.',
|
||||
'we don\'t know when or if this item will be back in stock.',
|
||||
'we were not able to find a match',
|
||||
'when this arrives in stock',
|
||||
'zur zeit nicht an lager',
|
||||
'品切れ',
|
||||
'已售',
|
||||
'已售完',
|
||||
'품절'
|
||||
];
|
||||
|
||||
|
||||
const vh = Math.max(document.documentElement.clientHeight || 0, window.innerHeight || 0);
|
||||
|
||||
function getElementBaseText(element) {
|
||||
// .textContent can include text from children which may give the wrong results
|
||||
// scan only immediate TEXT_NODEs, which will be a child of the element
|
||||
var text = "";
|
||||
for (var i = 0; i < element.childNodes.length; ++i)
|
||||
if (element.childNodes[i].nodeType === Node.TEXT_NODE)
|
||||
text += element.childNodes[i].textContent;
|
||||
return text.toLowerCase().trim();
|
||||
}
|
||||
|
||||
const negateOutOfStockRegex = new RegExp('^([0-9] in stock|add to cart|in stock)', 'ig');
|
||||
|
||||
// The out-of-stock or in-stock-text is generally always above-the-fold
|
||||
// and often below-the-fold is a list of related products that may or may not contain trigger text
|
||||
// so it's good to filter to just the 'above the fold' elements
|
||||
// and it should be atleast 100px from the top to ignore items in the toolbar, sometimes menu items like "Coming soon" exist
|
||||
|
||||
|
||||
// @todo - if it's SVG or IMG, go into image diff mode
|
||||
// %ELEMENTS% replaced at injection time because different interfaces use it with different settings
|
||||
|
||||
console.log("Scanning %ELEMENTS%");
|
||||
|
||||
function collectVisibleElements(parent, visibleElements) {
|
||||
if (!parent) return; // Base case: if parent is null or undefined, return
|
||||
|
||||
// Add the parent itself to the visible elements array if it's of the specified types
|
||||
visibleElements.push(parent);
|
||||
|
||||
// Iterate over the parent's children
|
||||
const children = parent.children;
|
||||
for (let i = 0; i < children.length; i++) {
|
||||
const child = children[i];
|
||||
if (
|
||||
child.nodeType === Node.ELEMENT_NODE &&
|
||||
window.getComputedStyle(child).display !== 'none' &&
|
||||
window.getComputedStyle(child).visibility !== 'hidden' &&
|
||||
child.offsetWidth >= 0 &&
|
||||
child.offsetHeight >= 0 &&
|
||||
window.getComputedStyle(child).contentVisibility !== 'hidden'
|
||||
) {
|
||||
// If the child is an element and is visible, recursively collect visible elements
|
||||
collectVisibleElements(child, visibleElements);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const elementsToScan = [];
|
||||
collectVisibleElements(document.body, elementsToScan);
|
||||
|
||||
var elementText = "";
|
||||
|
||||
// REGEXS THAT REALLY MEAN IT'S IN STOCK
|
||||
for (let i = elementsToScan.length - 1; i >= 0; i--) {
|
||||
const element = elementsToScan[i];
|
||||
|
||||
// outside the 'fold' or some weird text in the heading area
|
||||
// .getBoundingClientRect() was causing a crash in chrome 119, can only be run on contentVisibility != hidden
|
||||
if (element.getBoundingClientRect().top + window.scrollY >= vh || element.getBoundingClientRect().top + window.scrollY <= 100) {
|
||||
continue
|
||||
}
|
||||
|
||||
elementText = "";
|
||||
try {
|
||||
if (element.tagName.toLowerCase() === "input") {
|
||||
elementText = element.value.toLowerCase().trim();
|
||||
} else {
|
||||
elementText = getElementBaseText(element);
|
||||
}
|
||||
} catch (e) {
|
||||
console.warn('stock-not-in-stock.js scraper - handling element for gettext failed', e);
|
||||
}
|
||||
|
||||
if (elementText.length) {
|
||||
// try which ones could mean its in stock
|
||||
if (negateOutOfStockRegex.test(elementText) && !elementText.includes('(0 products)')) {
|
||||
console.log(`Negating/overriding 'Out of Stock' back to "Possibly in stock" found "${elementText}"`)
|
||||
return 'Possibly in stock';
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// OTHER STUFF THAT COULD BE THAT IT'S OUT OF STOCK
|
||||
for (let i = elementsToScan.length - 1; i >= 0; i--) {
|
||||
const element = elementsToScan[i];
|
||||
// outside the 'fold' or some weird text in the heading area
|
||||
// .getBoundingClientRect() was causing a crash in chrome 119, can only be run on contentVisibility != hidden
|
||||
// Note: theres also an automated test that places the 'out of stock' text fairly low down
|
||||
if (element.getBoundingClientRect().top + window.scrollY >= vh + 250 || element.getBoundingClientRect().top + window.scrollY <= 100) {
|
||||
continue
|
||||
}
|
||||
elementText = "";
|
||||
if (element.tagName.toLowerCase() === "input") {
|
||||
elementText = element.value.toLowerCase().trim();
|
||||
} else {
|
||||
elementText = getElementBaseText(element);
|
||||
}
|
||||
|
||||
if (elementText.length) {
|
||||
// and these mean its out of stock
|
||||
for (const outOfStockText of outOfStockTexts) {
|
||||
if (elementText.includes(outOfStockText)) {
|
||||
console.log(`Selected 'Out of Stock' - found text "${outOfStockText}" - "${elementText}" - offset top ${element.getBoundingClientRect().top}, page height is ${vh}`)
|
||||
return outOfStockText; // item is out of stock
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
console.log(`Returning 'Possibly in stock' - cant' find any useful matching text`)
|
||||
return 'Possibly in stock'; // possibly in stock, cant decide otherwise.
|
||||
}
|
||||
|
||||
// returns the element text that makes it think it's out of stock
|
||||
return isItemInStock().trim()
|
||||
|
||||
|
||||
285
changedetectionio/content_fetchers/res/xpath_element_scraper.js
Normal file
@@ -0,0 +1,285 @@
|
||||
// Copyright (C) 2021 Leigh Morresi (dgtlmoon@gmail.com)
|
||||
// All rights reserved.
|
||||
|
||||
// @file Scrape the page looking for elements of concern (%ELEMENTS%)
|
||||
// http://matatk.agrip.org.uk/tests/position-and-width/
|
||||
// https://stackoverflow.com/questions/26813480/when-is-element-getboundingclientrect-guaranteed-to-be-updated-accurate
|
||||
//
|
||||
// Some pages like https://www.londonstockexchange.com/stock/NCCL/ncondezi-energy-limited/analysis
|
||||
// will automatically force a scroll somewhere, so include the position offset
|
||||
// Lets hope the position doesnt change while we iterate the bbox's, but this is better than nothing
|
||||
var scroll_y = 0;
|
||||
try {
|
||||
scroll_y = +document.documentElement.scrollTop || document.body.scrollTop
|
||||
} catch (e) {
|
||||
console.log(e);
|
||||
}
|
||||
|
||||
|
||||
// Include the getXpath script directly, easier than fetching
|
||||
function getxpath(e) {
|
||||
var n = e;
|
||||
if (n && n.id) return '//*[@id="' + n.id + '"]';
|
||||
for (var o = []; n && Node.ELEMENT_NODE === n.nodeType;) {
|
||||
for (var i = 0, r = !1, d = n.previousSibling; d;) d.nodeType !== Node.DOCUMENT_TYPE_NODE && d.nodeName === n.nodeName && i++, d = d.previousSibling;
|
||||
for (d = n.nextSibling; d;) {
|
||||
if (d.nodeName === n.nodeName) {
|
||||
r = !0;
|
||||
break
|
||||
}
|
||||
d = d.nextSibling
|
||||
}
|
||||
o.push((n.prefix ? n.prefix + ":" : "") + n.localName + (i || r ? "[" + (i + 1) + "]" : "")), n = n.parentNode
|
||||
}
|
||||
return o.length ? "/" + o.reverse().join("/") : ""
|
||||
}
|
||||
|
||||
const findUpTag = (el) => {
|
||||
let r = el
|
||||
chained_css = [];
|
||||
depth = 0;
|
||||
|
||||
// Strategy 1: If it's an input, with name, and there's only one, prefer that
|
||||
if (el.name !== undefined && el.name.length) {
|
||||
var proposed = el.tagName + "[name=\"" + CSS.escape(el.name) + "\"]";
|
||||
var proposed_element = window.document.querySelectorAll(proposed);
|
||||
if (proposed_element.length) {
|
||||
if (proposed_element.length === 1) {
|
||||
return proposed;
|
||||
} else {
|
||||
// Some sites change ID but name= stays the same, we can hit it if we know the index
|
||||
// Find all the elements that match and work out the input[n]
|
||||
var n = Array.from(proposed_element).indexOf(el);
|
||||
// Return a Playwright selector for nthinput[name=zipcode]
|
||||
return proposed + " >> nth=" + n;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Strategy 2: Keep going up until we hit an ID tag, imagine it's like #list-widget div h4
|
||||
while (r.parentNode) {
|
||||
if (depth === 5) {
|
||||
break;
|
||||
}
|
||||
if ('' !== r.id) {
|
||||
chained_css.unshift("#" + CSS.escape(r.id));
|
||||
final_selector = chained_css.join(' > ');
|
||||
// Be sure theres only one, some sites have multiples of the same ID tag :-(
|
||||
if (window.document.querySelectorAll(final_selector).length === 1) {
|
||||
return final_selector;
|
||||
}
|
||||
return null;
|
||||
} else {
|
||||
chained_css.unshift(r.tagName.toLowerCase());
|
||||
}
|
||||
r = r.parentNode;
|
||||
depth += 1;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
|
||||
// @todo - if it's SVG or IMG, go into image diff mode
|
||||
// %ELEMENTS% replaced at injection time because different interfaces use it with different settings
|
||||
|
||||
var size_pos = [];
|
||||
// after page fetch, inject this JS
|
||||
// build a map of all elements and their positions (maybe that only include text?)
|
||||
var bbox;
|
||||
console.log("Scanning %ELEMENTS%");
|
||||
|
||||
function collectVisibleElements(parent, visibleElements) {
|
||||
if (!parent) return; // Base case: if parent is null or undefined, return
|
||||
|
||||
|
||||
// Add the parent itself to the visible elements array if it's of the specified types
|
||||
const tagName = parent.tagName.toLowerCase();
|
||||
if ("%ELEMENTS%".split(',').includes(tagName)) {
|
||||
visibleElements.push(parent);
|
||||
}
|
||||
|
||||
// Iterate over the parent's children
|
||||
const children = parent.children;
|
||||
for (let i = 0; i < children.length; i++) {
|
||||
const child = children[i];
|
||||
const computedStyle = window.getComputedStyle(child);
|
||||
|
||||
if (
|
||||
child.nodeType === Node.ELEMENT_NODE &&
|
||||
computedStyle.display !== 'none' &&
|
||||
computedStyle.visibility !== 'hidden' &&
|
||||
child.offsetWidth >= 0 &&
|
||||
child.offsetHeight >= 0 &&
|
||||
computedStyle.contentVisibility !== 'hidden'
|
||||
) {
|
||||
// If the child is an element and is visible, recursively collect visible elements
|
||||
collectVisibleElements(child, visibleElements);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create an array to hold the visible elements
|
||||
const visibleElementsArray = [];
|
||||
|
||||
// Call collectVisibleElements with the starting parent element
|
||||
collectVisibleElements(document.body, visibleElementsArray);
|
||||
|
||||
|
||||
visibleElementsArray.forEach(function (element) {
|
||||
|
||||
bbox = element.getBoundingClientRect();
|
||||
|
||||
// Skip really small ones, and where width or height ==0
|
||||
if (bbox['width'] * bbox['height'] < 10) {
|
||||
return
|
||||
}
|
||||
|
||||
// Don't include elements that are offset from canvas
|
||||
if (bbox['top'] + scroll_y < 0 || bbox['left'] < 0) {
|
||||
return
|
||||
}
|
||||
|
||||
// @todo the getXpath kind of sucks, it doesnt know when there is for example just one ID sometimes
|
||||
// it should not traverse when we know we can anchor off just an ID one level up etc..
|
||||
// maybe, get current class or id, keep traversing up looking for only class or id until there is just one match
|
||||
|
||||
// 1st primitive - if it has class, try joining it all and select, if theres only one.. well thats us.
|
||||
xpath_result = false;
|
||||
try {
|
||||
var d = findUpTag(element);
|
||||
if (d) {
|
||||
xpath_result = d;
|
||||
}
|
||||
} catch (e) {
|
||||
console.log(e);
|
||||
}
|
||||
// You could swap it and default to getXpath and then try the smarter one
|
||||
// default back to the less intelligent one
|
||||
if (!xpath_result) {
|
||||
try {
|
||||
// I've seen on FB and eBay that this doesnt work
|
||||
// ReferenceError: getXPath is not defined at eval (eval at evaluate (:152:29), <anonymous>:67:20) at UtilityScript.evaluate (<anonymous>:159:18) at UtilityScript.<anonymous> (<anonymous>:1:44)
|
||||
xpath_result = getxpath(element);
|
||||
} catch (e) {
|
||||
console.log(e);
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
let label = "not-interesting" // A placeholder, the actual labels for training are done by hand for now
|
||||
|
||||
let text = element.textContent.trim().slice(0, 30).trim();
|
||||
while (/\n{2,}|\t{2,}/.test(text)) {
|
||||
text = text.replace(/\n{2,}/g, '\n').replace(/\t{2,}/g, '\t')
|
||||
}
|
||||
|
||||
// Try to identify any possible currency amounts "Sale: 4000" or "Sale now 3000 Kc", can help with the training.
|
||||
const hasDigitCurrency = (/\d/.test(text.slice(0, 6)) || /\d/.test(text.slice(-6)) ) && /([€£$¥₩₹]|USD|AUD|EUR|Kč|kr|SEK|,–)/.test(text) ;
|
||||
const computedStyle = window.getComputedStyle(element);
|
||||
|
||||
size_pos.push({
|
||||
xpath: xpath_result,
|
||||
width: Math.round(bbox['width']),
|
||||
height: Math.round(bbox['height']),
|
||||
left: Math.floor(bbox['left']),
|
||||
top: Math.floor(bbox['top']) + scroll_y,
|
||||
// tagName used by Browser Steps
|
||||
tagName: (element.tagName) ? element.tagName.toLowerCase() : '',
|
||||
// tagtype used by Browser Steps
|
||||
tagtype: (element.tagName.toLowerCase() === 'input' && element.type) ? element.type.toLowerCase() : '',
|
||||
isClickable: computedStyle.cursor === "pointer",
|
||||
// Used by the keras trainer
|
||||
fontSize: computedStyle.getPropertyValue('font-size'),
|
||||
fontWeight: computedStyle.getPropertyValue('font-weight'),
|
||||
hasDigitCurrency: hasDigitCurrency,
|
||||
label: label,
|
||||
});
|
||||
|
||||
});
|
||||
|
||||
|
||||
// Inject the current one set in the include_filters, which may be a CSS rule
|
||||
// used for displaying the current one in VisualSelector, where its not one we generated.
|
||||
if (include_filters.length) {
|
||||
let results;
|
||||
// Foreach filter, go and find it on the page and add it to the results so we can visualise it again
|
||||
for (const f of include_filters) {
|
||||
bbox = false;
|
||||
q = false;
|
||||
|
||||
if (!f.length) {
|
||||
console.log("xpath_element_scraper: Empty filter, skipping");
|
||||
continue;
|
||||
}
|
||||
|
||||
try {
|
||||
// is it xpath?
|
||||
if (f.startsWith('/') || f.startsWith('xpath')) {
|
||||
var qry_f = f.replace(/xpath(:|\d:)/, '')
|
||||
console.log("[xpath] Scanning for included filter " + qry_f)
|
||||
let xpathResult = document.evaluate(qry_f, document, null, XPathResult.ORDERED_NODE_SNAPSHOT_TYPE, null);
|
||||
results = [];
|
||||
for (let i = 0; i < xpathResult.snapshotLength; i++) {
|
||||
results.push(xpathResult.snapshotItem(i));
|
||||
}
|
||||
} else {
|
||||
console.log("[css] Scanning for included filter " + f)
|
||||
console.log("[css] Scanning for included filter " + f);
|
||||
results = document.querySelectorAll(f);
|
||||
}
|
||||
} catch (e) {
|
||||
// Maybe catch DOMException and alert?
|
||||
console.log("xpath_element_scraper: Exception selecting element from filter " + f);
|
||||
console.log(e);
|
||||
}
|
||||
|
||||
if (results != null && results.length) {
|
||||
|
||||
// Iterate over the results
|
||||
results.forEach(node => {
|
||||
// Try to resolve //something/text() back to its /something so we can atleast get the bounding box
|
||||
try {
|
||||
if (typeof node.nodeName == 'string' && node.nodeName === '#text') {
|
||||
node = node.parentElement
|
||||
}
|
||||
} catch (e) {
|
||||
console.log(e)
|
||||
console.log("xpath_element_scraper: #text resolver")
|
||||
}
|
||||
|
||||
// #1231 - IN the case XPath attribute filter is applied, we will have to traverse up and find the element.
|
||||
if (typeof node.getBoundingClientRect == 'function') {
|
||||
bbox = node.getBoundingClientRect();
|
||||
console.log("xpath_element_scraper: Got filter element, scroll from top was " + scroll_y)
|
||||
} else {
|
||||
try {
|
||||
// Try and see we can find its ownerElement
|
||||
bbox = node.ownerElement.getBoundingClientRect();
|
||||
console.log("xpath_element_scraper: Got filter by ownerElement element, scroll from top was " + scroll_y)
|
||||
} catch (e) {
|
||||
console.log(e)
|
||||
console.log("xpath_element_scraper: error looking up q.ownerElement")
|
||||
}
|
||||
}
|
||||
|
||||
if (bbox && bbox['width'] > 0 && bbox['height'] > 0) {
|
||||
size_pos.push({
|
||||
xpath: f,
|
||||
width: parseInt(bbox['width']),
|
||||
height: parseInt(bbox['height']),
|
||||
left: parseInt(bbox['left']),
|
||||
top: parseInt(bbox['top']) + scroll_y,
|
||||
highlight_as_custom_filter: true
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Sort the elements so we find the smallest one first, in other words, we find the smallest one matching in that area
|
||||
// so that we dont select the wrapping element by mistake and be unable to select what we want
|
||||
size_pos.sort((a, b) => (a.width * a.height > b.width * b.height) ? 1 : -1)
|
||||
|
||||
// Window.width required for proper scaling in the frontend
|
||||
return {'size_pos': size_pos, 'browser_width': window.innerWidth};
|
||||
120
changedetectionio/content_fetchers/webdriver_selenium.py
Normal file
@@ -0,0 +1,120 @@
|
||||
import os
|
||||
import time
|
||||
|
||||
from loguru import logger
|
||||
from changedetectionio.content_fetchers.base import Fetcher
|
||||
|
||||
class fetcher(Fetcher):
|
||||
if os.getenv("WEBDRIVER_URL"):
|
||||
fetcher_description = "WebDriver Chrome/Javascript via '{}'".format(os.getenv("WEBDRIVER_URL"))
|
||||
else:
|
||||
fetcher_description = "WebDriver Chrome/Javascript"
|
||||
|
||||
# Configs for Proxy setup
|
||||
# In the ENV vars, is prefixed with "webdriver_", so it is for example "webdriver_sslProxy"
|
||||
selenium_proxy_settings_mappings = ['proxyType', 'ftpProxy', 'httpProxy', 'noProxy',
|
||||
'proxyAutoconfigUrl', 'sslProxy', 'autodetect',
|
||||
'socksProxy', 'socksVersion', 'socksUsername', 'socksPassword']
|
||||
proxy = None
|
||||
|
||||
def __init__(self, proxy_override=None, custom_browser_connection_url=None):
|
||||
super().__init__()
|
||||
from selenium.webdriver.common.proxy import Proxy as SeleniumProxy
|
||||
|
||||
# .strip('"') is going to save someone a lot of time when they accidently wrap the env value
|
||||
if not custom_browser_connection_url:
|
||||
self.browser_connection_url = os.getenv("WEBDRIVER_URL", 'http://browser-chrome:4444/wd/hub').strip('"')
|
||||
else:
|
||||
self.browser_connection_is_custom = True
|
||||
self.browser_connection_url = custom_browser_connection_url
|
||||
|
||||
# If any proxy settings are enabled, then we should setup the proxy object
|
||||
proxy_args = {}
|
||||
for k in self.selenium_proxy_settings_mappings:
|
||||
v = os.getenv('webdriver_' + k, False)
|
||||
if v:
|
||||
proxy_args[k] = v.strip('"')
|
||||
|
||||
# Map back standard HTTP_ and HTTPS_PROXY to webDriver httpProxy/sslProxy
|
||||
if not proxy_args.get('webdriver_httpProxy') and self.system_http_proxy:
|
||||
proxy_args['httpProxy'] = self.system_http_proxy
|
||||
if not proxy_args.get('webdriver_sslProxy') and self.system_https_proxy:
|
||||
proxy_args['httpsProxy'] = self.system_https_proxy
|
||||
|
||||
# Allows override the proxy on a per-request basis
|
||||
if proxy_override is not None:
|
||||
proxy_args['httpProxy'] = proxy_override
|
||||
|
||||
if proxy_args:
|
||||
self.proxy = SeleniumProxy(raw=proxy_args)
|
||||
|
||||
def run(self,
|
||||
url,
|
||||
timeout,
|
||||
request_headers,
|
||||
request_body,
|
||||
request_method,
|
||||
ignore_status_codes=False,
|
||||
current_include_filters=None,
|
||||
is_binary=False,
|
||||
empty_pages_are_a_change=False):
|
||||
|
||||
from selenium import webdriver
|
||||
from selenium.webdriver.chrome.options import Options as ChromeOptions
|
||||
from selenium.common.exceptions import WebDriverException
|
||||
# request_body, request_method unused for now, until some magic in the future happens.
|
||||
|
||||
options = ChromeOptions()
|
||||
if self.proxy:
|
||||
options.proxy = self.proxy
|
||||
|
||||
self.driver = webdriver.Remote(
|
||||
command_executor=self.browser_connection_url,
|
||||
options=options)
|
||||
|
||||
try:
|
||||
self.driver.get(url)
|
||||
except WebDriverException as e:
|
||||
# Be sure we close the session window
|
||||
self.quit()
|
||||
raise
|
||||
|
||||
self.driver.set_window_size(1280, 1024)
|
||||
self.driver.implicitly_wait(int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)))
|
||||
|
||||
if self.webdriver_js_execute_code is not None:
|
||||
self.driver.execute_script(self.webdriver_js_execute_code)
|
||||
# Selenium doesn't automatically wait for actions as good as Playwright, so wait again
|
||||
self.driver.implicitly_wait(int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)))
|
||||
|
||||
# @todo - how to check this? is it possible?
|
||||
self.status_code = 200
|
||||
# @todo somehow we should try to get this working for WebDriver
|
||||
# raise EmptyReply(url=url, status_code=r.status_code)
|
||||
|
||||
# @todo - dom wait loaded?
|
||||
time.sleep(int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)) + self.render_extract_delay)
|
||||
self.content = self.driver.page_source
|
||||
self.headers = {}
|
||||
|
||||
self.screenshot = self.driver.get_screenshot_as_png()
|
||||
|
||||
# Does the connection to the webdriver work? run a test connection.
|
||||
def is_ready(self):
|
||||
from selenium import webdriver
|
||||
from selenium.webdriver.chrome.options import Options as ChromeOptions
|
||||
|
||||
self.driver = webdriver.Remote(
|
||||
command_executor=self.command_executor,
|
||||
options=ChromeOptions())
|
||||
|
||||
# driver.quit() seems to cause better exceptions
|
||||
self.quit()
|
||||
return True
|
||||
|
||||
def quit(self):
|
||||
if self.driver:
|
||||
try:
|
||||
self.driver.quit()
|
||||
except Exception as e:
|
||||
logger.debug(f"Content Fetcher > Exception in chrome shutdown/quit {str(e)}")
|
||||
@@ -1,62 +1,113 @@
|
||||
# used for the notifications, the front-end is using a JS library
|
||||
|
||||
import difflib
|
||||
from typing import List, Iterator, Union
|
||||
|
||||
REMOVED_STYLE = "background-color: #fadad7; color: #b30000;"
|
||||
ADDED_STYLE = "background-color: #eaf2c2; color: #406619;"
|
||||
|
||||
def same_slicer(lst: List[str], start: int, end: int) -> List[str]:
|
||||
"""Return a slice of the list, or a single element if start == end."""
|
||||
return lst[start:end] if start != end else [lst[start]]
|
||||
|
||||
def customSequenceMatcher(
|
||||
before: List[str],
|
||||
after: List[str],
|
||||
include_equal: bool = False,
|
||||
include_removed: bool = True,
|
||||
include_added: bool = True,
|
||||
include_replaced: bool = True,
|
||||
include_change_type_prefix: bool = True,
|
||||
html_colour: bool = False
|
||||
) -> Iterator[List[str]]:
|
||||
"""
|
||||
Compare two sequences and yield differences based on specified parameters.
|
||||
|
||||
Args:
|
||||
before (List[str]): Original sequence
|
||||
after (List[str]): Modified sequence
|
||||
include_equal (bool): Include unchanged parts
|
||||
include_removed (bool): Include removed parts
|
||||
include_added (bool): Include added parts
|
||||
include_replaced (bool): Include replaced parts
|
||||
include_change_type_prefix (bool): Add prefixes to indicate change types
|
||||
html_colour (bool): Use HTML background colors for differences
|
||||
|
||||
Yields:
|
||||
List[str]: Differences between sequences
|
||||
"""
|
||||
cruncher = difflib.SequenceMatcher(isjunk=lambda x: x in " \t", a=before, b=after)
|
||||
|
||||
|
||||
def same_slicer(l, a, b):
|
||||
if a == b:
|
||||
return [l[a]]
|
||||
else:
|
||||
return l[a:b]
|
||||
|
||||
# like .compare but a little different output
|
||||
def customSequenceMatcher(before, after, include_equal=False, include_removed=True, include_added=True, include_replaced=True, include_change_type_prefix=True):
|
||||
cruncher = difflib.SequenceMatcher(isjunk=lambda x: x in " \\t", a=before, b=after)
|
||||
|
||||
# @todo Line-by-line mode instead of buncghed, including `after` that is not in `before` (maybe unset?)
|
||||
for tag, alo, ahi, blo, bhi in cruncher.get_opcodes():
|
||||
if include_equal and tag == 'equal':
|
||||
g = before[alo:ahi]
|
||||
yield g
|
||||
yield before[alo:ahi]
|
||||
elif include_removed and tag == 'delete':
|
||||
row_prefix = "(removed) " if include_change_type_prefix else ''
|
||||
g = [ row_prefix + i for i in same_slicer(before, alo, ahi)]
|
||||
yield g
|
||||
if html_colour:
|
||||
yield [f'<span style="{REMOVED_STYLE}">{line}</span>' for line in same_slicer(before, alo, ahi)]
|
||||
else:
|
||||
yield [f"(removed) {line}" for line in same_slicer(before, alo, ahi)] if include_change_type_prefix else same_slicer(before, alo, ahi)
|
||||
elif include_replaced and tag == 'replace':
|
||||
row_prefix = "(changed) " if include_change_type_prefix else ''
|
||||
g = [row_prefix + i for i in same_slicer(before, alo, ahi)]
|
||||
row_prefix = "(into) " if include_change_type_prefix else ''
|
||||
g += [row_prefix + i for i in same_slicer(after, blo, bhi)]
|
||||
yield g
|
||||
if html_colour:
|
||||
yield [f'<span style="{REMOVED_STYLE}">{line}</span>' for line in same_slicer(before, alo, ahi)] + \
|
||||
[f'<span style="{ADDED_STYLE}">{line}</span>' for line in same_slicer(after, blo, bhi)]
|
||||
else:
|
||||
yield [f"(changed) {line}" for line in same_slicer(before, alo, ahi)] + \
|
||||
[f"(into) {line}" for line in same_slicer(after, blo, bhi)] if include_change_type_prefix else same_slicer(before, alo, ahi) + same_slicer(after, blo, bhi)
|
||||
elif include_added and tag == 'insert':
|
||||
row_prefix = "(added) " if include_change_type_prefix else ''
|
||||
g = [row_prefix + i for i in same_slicer(after, blo, bhi)]
|
||||
yield g
|
||||
if html_colour:
|
||||
yield [f'<span style="{ADDED_STYLE}">{line}</span>' for line in same_slicer(after, blo, bhi)]
|
||||
else:
|
||||
yield [f"(added) {line}" for line in same_slicer(after, blo, bhi)] if include_change_type_prefix else same_slicer(after, blo, bhi)
|
||||
|
||||
# only_differences - only return info about the differences, no context
|
||||
# line_feed_sep could be "<br>" or "<li>" or "\n" etc
|
||||
def render_diff(previous_version_file_contents, newest_version_file_contents, include_equal=False, include_removed=True, include_added=True, include_replaced=True, line_feed_sep="\n", include_change_type_prefix=True, patch_format=False):
|
||||
def render_diff(
|
||||
previous_version_file_contents: str,
|
||||
newest_version_file_contents: str,
|
||||
include_equal: bool = False,
|
||||
include_removed: bool = True,
|
||||
include_added: bool = True,
|
||||
include_replaced: bool = True,
|
||||
line_feed_sep: str = "\n",
|
||||
include_change_type_prefix: bool = True,
|
||||
patch_format: bool = False,
|
||||
html_colour: bool = False
|
||||
) -> str:
|
||||
"""
|
||||
Render the difference between two file contents.
|
||||
|
||||
newest_version_file_contents = [line.rstrip() for line in newest_version_file_contents.splitlines()]
|
||||
Args:
|
||||
previous_version_file_contents (str): Original file contents
|
||||
newest_version_file_contents (str): Modified file contents
|
||||
include_equal (bool): Include unchanged parts
|
||||
include_removed (bool): Include removed parts
|
||||
include_added (bool): Include added parts
|
||||
include_replaced (bool): Include replaced parts
|
||||
line_feed_sep (str): Separator for lines in output
|
||||
include_change_type_prefix (bool): Add prefixes to indicate change types
|
||||
patch_format (bool): Use patch format for output
|
||||
html_colour (bool): Use HTML background colors for differences
|
||||
|
||||
if previous_version_file_contents:
|
||||
previous_version_file_contents = [line.rstrip() for line in previous_version_file_contents.splitlines()]
|
||||
else:
|
||||
previous_version_file_contents = ""
|
||||
Returns:
|
||||
str: Rendered difference
|
||||
"""
|
||||
newest_lines = [line.rstrip() for line in newest_version_file_contents.splitlines()]
|
||||
previous_lines = [line.rstrip() for line in previous_version_file_contents.splitlines()] if previous_version_file_contents else []
|
||||
|
||||
if patch_format:
|
||||
patch = difflib.unified_diff(previous_version_file_contents, newest_version_file_contents)
|
||||
patch = difflib.unified_diff(previous_lines, newest_lines)
|
||||
return line_feed_sep.join(patch)
|
||||
|
||||
rendered_diff = customSequenceMatcher(before=previous_version_file_contents,
|
||||
after=newest_version_file_contents,
|
||||
include_equal=include_equal,
|
||||
include_removed=include_removed,
|
||||
include_added=include_added,
|
||||
include_replaced=include_replaced,
|
||||
include_change_type_prefix=include_change_type_prefix)
|
||||
rendered_diff = customSequenceMatcher(
|
||||
before=previous_lines,
|
||||
after=newest_lines,
|
||||
include_equal=include_equal,
|
||||
include_removed=include_removed,
|
||||
include_added=include_added,
|
||||
include_replaced=include_replaced,
|
||||
include_change_type_prefix=include_change_type_prefix,
|
||||
html_colour=html_colour
|
||||
)
|
||||
|
||||
# Recursively join lists
|
||||
f = lambda L: line_feed_sep.join([f(x) if type(x) is list else x for x in L])
|
||||
p= f(rendered_diff)
|
||||
return p
|
||||
def flatten(lst: List[Union[str, List[str]]]) -> str:
|
||||
return line_feed_sep.join(flatten(x) if isinstance(x, list) else x for x in lst)
|
||||
|
||||
return flatten(rendered_diff)
|
||||
1907
changedetectionio/flask_app.py
Normal file
@@ -1,10 +1,14 @@
|
||||
import os
|
||||
import re
|
||||
from distutils.util import strtobool
|
||||
from loguru import logger
|
||||
from wtforms.widgets.core import TimeInput
|
||||
|
||||
from changedetectionio.strtobool import strtobool
|
||||
|
||||
from wtforms import (
|
||||
BooleanField,
|
||||
Form,
|
||||
Field,
|
||||
IntegerField,
|
||||
RadioField,
|
||||
SelectField,
|
||||
@@ -15,14 +19,20 @@ from wtforms import (
|
||||
validators,
|
||||
widgets
|
||||
)
|
||||
from flask_wtf.file import FileField, FileAllowed
|
||||
from wtforms.fields import FieldList
|
||||
|
||||
from wtforms.validators import ValidationError
|
||||
|
||||
from validators.url import url as url_validator
|
||||
|
||||
|
||||
# default
|
||||
# each select <option data-enabled="enabled-0-0"
|
||||
from changedetectionio.blueprint.browser_steps.browser_steps import browser_step_ui_config
|
||||
|
||||
from changedetectionio import content_fetcher
|
||||
from changedetectionio import html_tools, content_fetchers
|
||||
|
||||
from changedetectionio.notification import (
|
||||
valid_notification_formats,
|
||||
)
|
||||
@@ -37,10 +47,11 @@ valid_method = {
|
||||
'PUT',
|
||||
'PATCH',
|
||||
'DELETE',
|
||||
'OPTIONS',
|
||||
}
|
||||
|
||||
default_method = 'GET'
|
||||
|
||||
allow_simplehost = not strtobool(os.getenv('BLOCK_SIMPLEHOSTS', 'False'))
|
||||
|
||||
class StringListField(StringField):
|
||||
widget = widgets.TextArea()
|
||||
@@ -116,6 +127,87 @@ class StringTagUUID(StringField):
|
||||
|
||||
return 'error'
|
||||
|
||||
class TimeDurationForm(Form):
|
||||
hours = SelectField(choices=[(f"{i}", f"{i}") for i in range(0, 25)], default="24", validators=[validators.Optional()])
|
||||
minutes = SelectField(choices=[(f"{i}", f"{i}") for i in range(0, 60)], default="00", validators=[validators.Optional()])
|
||||
|
||||
class TimeStringField(Field):
|
||||
"""
|
||||
A WTForms field for time inputs (HH:MM) that stores the value as a string.
|
||||
"""
|
||||
widget = TimeInput() # Use the built-in time input widget
|
||||
|
||||
def _value(self):
|
||||
"""
|
||||
Returns the value for rendering in the form.
|
||||
"""
|
||||
return self.data if self.data is not None else ""
|
||||
|
||||
def process_formdata(self, valuelist):
|
||||
"""
|
||||
Processes the raw input from the form and stores it as a string.
|
||||
"""
|
||||
if valuelist:
|
||||
time_str = valuelist[0]
|
||||
# Simple validation for HH:MM format
|
||||
if not time_str or len(time_str.split(":")) != 2:
|
||||
raise ValidationError("Invalid time format. Use HH:MM.")
|
||||
self.data = time_str
|
||||
|
||||
|
||||
class validateTimeZoneName(object):
|
||||
"""
|
||||
Flask wtform validators wont work with basic auth
|
||||
"""
|
||||
|
||||
def __init__(self, message=None):
|
||||
self.message = message
|
||||
|
||||
def __call__(self, form, field):
|
||||
from zoneinfo import available_timezones
|
||||
python_timezones = available_timezones()
|
||||
if field.data and field.data not in python_timezones:
|
||||
raise ValidationError("Not a valid timezone name")
|
||||
|
||||
class ScheduleLimitDaySubForm(Form):
|
||||
enabled = BooleanField("not set", default=True)
|
||||
start_time = TimeStringField("Start At", default="00:00", validators=[validators.Optional()])
|
||||
duration = FormField(TimeDurationForm, label="Run duration")
|
||||
|
||||
class ScheduleLimitForm(Form):
|
||||
enabled = BooleanField("Use time scheduler", default=False)
|
||||
# Because the label for=""" doesnt line up/work with the actual checkbox
|
||||
monday = FormField(ScheduleLimitDaySubForm, label="")
|
||||
tuesday = FormField(ScheduleLimitDaySubForm, label="")
|
||||
wednesday = FormField(ScheduleLimitDaySubForm, label="")
|
||||
thursday = FormField(ScheduleLimitDaySubForm, label="")
|
||||
friday = FormField(ScheduleLimitDaySubForm, label="")
|
||||
saturday = FormField(ScheduleLimitDaySubForm, label="")
|
||||
sunday = FormField(ScheduleLimitDaySubForm, label="")
|
||||
|
||||
timezone = StringField("Optional timezone to run in",
|
||||
render_kw={"list": "timezones"},
|
||||
validators=[validateTimeZoneName()]
|
||||
)
|
||||
def __init__(
|
||||
self,
|
||||
formdata=None,
|
||||
obj=None,
|
||||
prefix="",
|
||||
data=None,
|
||||
meta=None,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(formdata, obj, prefix, data, meta, **kwargs)
|
||||
self.monday.form.enabled.label.text="Monday"
|
||||
self.tuesday.form.enabled.label.text = "Tuesday"
|
||||
self.wednesday.form.enabled.label.text = "Wednesday"
|
||||
self.thursday.form.enabled.label.text = "Thursday"
|
||||
self.friday.form.enabled.label.text = "Friday"
|
||||
self.saturday.form.enabled.label.text = "Saturday"
|
||||
self.sunday.form.enabled.label.text = "Sunday"
|
||||
|
||||
|
||||
class TimeBetweenCheckForm(Form):
|
||||
weeks = IntegerField('Weeks', validators=[validators.Optional(), validators.NumberRange(min=0, message="Should contain zero or more seconds")])
|
||||
days = IntegerField('Days', validators=[validators.Optional(), validators.NumberRange(min=0, message="Should contain zero or more seconds")])
|
||||
@@ -160,31 +252,31 @@ class ValidateContentFetcherIsReady(object):
|
||||
self.message = message
|
||||
|
||||
def __call__(self, form, field):
|
||||
import urllib3.exceptions
|
||||
from changedetectionio import content_fetcher
|
||||
return
|
||||
|
||||
# AttributeError: module 'changedetectionio.content_fetcher' has no attribute 'extra_browser_unlocked<>ASDF213r123r'
|
||||
# Better would be a radiohandler that keeps a reference to each class
|
||||
if field.data is not None and field.data != 'system':
|
||||
klass = getattr(content_fetcher, field.data)
|
||||
some_object = klass()
|
||||
try:
|
||||
ready = some_object.is_ready()
|
||||
|
||||
except urllib3.exceptions.MaxRetryError as e:
|
||||
driver_url = some_object.command_executor
|
||||
message = field.gettext('Content fetcher \'%s\' did not respond.' % (field.data))
|
||||
message += '<br>' + field.gettext(
|
||||
'Be sure that the selenium/webdriver runner is running and accessible via network from this container/host.')
|
||||
message += '<br>' + field.gettext('Did you follow the instructions in the wiki?')
|
||||
message += '<br><br>' + field.gettext('WebDriver Host: %s' % (driver_url))
|
||||
message += '<br><a href="https://github.com/dgtlmoon/changedetection.io/wiki/Fetching-pages-with-WebDriver">Go here for more information</a>'
|
||||
message += '<br>'+field.gettext('Content fetcher did not respond properly, unable to use it.\n %s' % (str(e)))
|
||||
|
||||
raise ValidationError(message)
|
||||
|
||||
except Exception as e:
|
||||
message = field.gettext('Content fetcher \'%s\' did not respond properly, unable to use it.\n %s')
|
||||
raise ValidationError(message % (field.data, e))
|
||||
# if field.data is not None and field.data != 'system':
|
||||
# klass = getattr(content_fetcher, field.data)
|
||||
# some_object = klass()
|
||||
# try:
|
||||
# ready = some_object.is_ready()
|
||||
#
|
||||
# except urllib3.exceptions.MaxRetryError as e:
|
||||
# driver_url = some_object.command_executor
|
||||
# message = field.gettext('Content fetcher \'%s\' did not respond.' % (field.data))
|
||||
# message += '<br>' + field.gettext(
|
||||
# 'Be sure that the selenium/webdriver runner is running and accessible via network from this container/host.')
|
||||
# message += '<br>' + field.gettext('Did you follow the instructions in the wiki?')
|
||||
# message += '<br><br>' + field.gettext('WebDriver Host: %s' % (driver_url))
|
||||
# message += '<br><a href="https://github.com/dgtlmoon/changedetection.io/wiki/Fetching-pages-with-WebDriver">Go here for more information</a>'
|
||||
# message += '<br>'+field.gettext('Content fetcher did not respond properly, unable to use it.\n %s' % (str(e)))
|
||||
#
|
||||
# raise ValidationError(message)
|
||||
#
|
||||
# except Exception as e:
|
||||
# message = field.gettext('Content fetcher \'%s\' did not respond properly, unable to use it.\n %s')
|
||||
# raise ValidationError(message % (field.data, e))
|
||||
|
||||
|
||||
class ValidateNotificationBodyAndTitleWhenURLisSet(object):
|
||||
@@ -213,37 +305,48 @@ class ValidateAppRiseServers(object):
|
||||
def __call__(self, form, field):
|
||||
import apprise
|
||||
apobj = apprise.Apprise()
|
||||
|
||||
# so that the custom endpoints are registered
|
||||
from changedetectionio.apprise_plugin import apprise_custom_api_call_wrapper
|
||||
for server_url in field.data:
|
||||
if not apobj.add(server_url):
|
||||
message = field.gettext('\'%s\' is not a valid AppRise URL.' % (server_url))
|
||||
url = server_url.strip()
|
||||
if url.startswith("#"):
|
||||
continue
|
||||
|
||||
if not apobj.add(url):
|
||||
message = field.gettext('\'%s\' is not a valid AppRise URL.' % (url))
|
||||
raise ValidationError(message)
|
||||
|
||||
class ValidateJinja2Template(object):
|
||||
"""
|
||||
Validates that a {token} is from a valid set
|
||||
"""
|
||||
def __init__(self, message=None):
|
||||
self.message = message
|
||||
|
||||
def __call__(self, form, field):
|
||||
from changedetectionio import notification
|
||||
|
||||
from jinja2 import Environment, BaseLoader, TemplateSyntaxError, UndefinedError
|
||||
from jinja2 import BaseLoader, TemplateSyntaxError, UndefinedError
|
||||
from jinja2.sandbox import ImmutableSandboxedEnvironment
|
||||
from jinja2.meta import find_undeclared_variables
|
||||
import jinja2.exceptions
|
||||
|
||||
# Might be a list of text, or might be just text (like from the apprise url list)
|
||||
joined_data = ' '.join(map(str, field.data)) if isinstance(field.data, list) else f"{field.data}"
|
||||
|
||||
try:
|
||||
jinja2_env = Environment(loader=BaseLoader)
|
||||
jinja2_env = ImmutableSandboxedEnvironment(loader=BaseLoader)
|
||||
jinja2_env.globals.update(notification.valid_tokens)
|
||||
# Extra validation tokens provided on the form_class(... extra_tokens={}) setup
|
||||
if hasattr(field, 'extra_notification_tokens'):
|
||||
jinja2_env.globals.update(field.extra_notification_tokens)
|
||||
|
||||
rendered = jinja2_env.from_string(field.data).render()
|
||||
jinja2_env.from_string(joined_data).render()
|
||||
except TemplateSyntaxError as e:
|
||||
raise ValidationError(f"This is not a valid Jinja2 template: {e}") from e
|
||||
except UndefinedError as e:
|
||||
raise ValidationError(f"A variable or function is not defined: {e}") from e
|
||||
except jinja2.exceptions.SecurityError as e:
|
||||
raise ValidationError(f"This is not a valid Jinja2 template: {e}") from e
|
||||
|
||||
ast = jinja2_env.parse(field.data)
|
||||
ast = jinja2_env.parse(joined_data)
|
||||
undefined = ", ".join(find_undeclared_variables(ast))
|
||||
if undefined:
|
||||
raise ValidationError(
|
||||
@@ -260,19 +363,24 @@ class validateURL(object):
|
||||
self.message = message
|
||||
|
||||
def __call__(self, form, field):
|
||||
import validators
|
||||
# If hosts that only contain alphanumerics are allowed ("localhost" for example)
|
||||
allow_simplehost = not strtobool(os.getenv('BLOCK_SIMPLEHOSTS', 'False'))
|
||||
try:
|
||||
validators.url(field.data.strip(), simple_host=allow_simplehost)
|
||||
except validators.ValidationFailure:
|
||||
message = field.gettext('\'%s\' is not a valid URL.' % (field.data.strip()))
|
||||
raise ValidationError(message)
|
||||
# This should raise a ValidationError() or not
|
||||
validate_url(field.data)
|
||||
|
||||
from .model.Watch import is_safe_url
|
||||
if not is_safe_url(field.data):
|
||||
raise ValidationError('Watch protocol is not permitted by SAFE_PROTOCOL_REGEX')
|
||||
|
||||
def validate_url(test_url):
|
||||
# If hosts that only contain alphanumerics are allowed ("localhost" for example)
|
||||
try:
|
||||
url_validator(test_url, simple_host=allow_simplehost)
|
||||
except validators.ValidationError:
|
||||
#@todo check for xss
|
||||
message = f"'{test_url}' is not a valid URL."
|
||||
# This should be wtforms.validators.
|
||||
raise ValidationError(message)
|
||||
|
||||
from .model.Watch import is_safe_url
|
||||
if not is_safe_url(test_url):
|
||||
# This should be wtforms.validators.
|
||||
raise ValidationError('Watch protocol is not permitted by SAFE_PROTOCOL_REGEX or incorrect URL format')
|
||||
|
||||
class ValidateListRegex(object):
|
||||
"""
|
||||
@@ -284,11 +392,10 @@ class ValidateListRegex(object):
|
||||
def __call__(self, form, field):
|
||||
|
||||
for line in field.data:
|
||||
if line[0] == '/' and line[-1] == '/':
|
||||
# Because internally we dont wrap in /
|
||||
line = line.strip('/')
|
||||
if re.search(html_tools.PERL_STYLE_REGEX, line, re.IGNORECASE):
|
||||
try:
|
||||
re.compile(line)
|
||||
regex = html_tools.perl_style_slash_enclosed_regex_to_options(line)
|
||||
re.compile(regex)
|
||||
except re.error:
|
||||
message = field.gettext('RegEx \'%s\' is not a valid regular expression.')
|
||||
raise ValidationError(message % (line))
|
||||
@@ -317,11 +424,30 @@ class ValidateCSSJSONXPATHInput(object):
|
||||
return
|
||||
|
||||
# Does it look like XPath?
|
||||
if line.strip()[0] == '/':
|
||||
if line.strip()[0] == '/' or line.strip().startswith('xpath:'):
|
||||
if not self.allow_xpath:
|
||||
raise ValidationError("XPath not permitted in this field!")
|
||||
from lxml import etree, html
|
||||
import elementpath
|
||||
# xpath 2.0-3.1
|
||||
from elementpath.xpath3 import XPath3Parser
|
||||
tree = html.fromstring("<html></html>")
|
||||
line = line.replace('xpath:', '')
|
||||
|
||||
try:
|
||||
elementpath.select(tree, line.strip(), parser=XPath3Parser)
|
||||
except elementpath.ElementPathError as e:
|
||||
message = field.gettext('\'%s\' is not a valid XPath expression. (%s)')
|
||||
raise ValidationError(message % (line, str(e)))
|
||||
except:
|
||||
raise ValidationError("A system-error occurred when validating your XPath expression")
|
||||
|
||||
if line.strip().startswith('xpath1:'):
|
||||
if not self.allow_xpath:
|
||||
raise ValidationError("XPath not permitted in this field!")
|
||||
from lxml import etree, html
|
||||
tree = html.fromstring("<html></html>")
|
||||
line = re.sub(r'^xpath1:', '', line)
|
||||
|
||||
try:
|
||||
tree.xpath(line.strip())
|
||||
@@ -385,19 +511,31 @@ class quickWatchForm(Form):
|
||||
|
||||
# Common to a single watch and the global settings
|
||||
class commonSettingsForm(Form):
|
||||
from . import processors
|
||||
|
||||
notification_urls = StringListField('Notification URL List', validators=[validators.Optional(), ValidateAppRiseServers()])
|
||||
notification_title = StringField('Notification Title', default='ChangeDetection.io Notification - {{ watch_url }}', validators=[validators.Optional(), ValidateJinja2Template()])
|
||||
def __init__(self, formdata=None, obj=None, prefix="", data=None, meta=None, **kwargs):
|
||||
super().__init__(formdata, obj, prefix, data, meta, **kwargs)
|
||||
self.notification_body.extra_notification_tokens = kwargs.get('extra_notification_tokens', {})
|
||||
self.notification_title.extra_notification_tokens = kwargs.get('extra_notification_tokens', {})
|
||||
self.notification_urls.extra_notification_tokens = kwargs.get('extra_notification_tokens', {})
|
||||
|
||||
extract_title_as_title = BooleanField('Extract <title> from document and use as watch title', default=False)
|
||||
fetch_backend = RadioField(u'Fetch Method', choices=content_fetchers.available_fetchers(), validators=[ValidateContentFetcherIsReady()])
|
||||
notification_body = TextAreaField('Notification Body', default='{{ watch_url }} had a change.', validators=[validators.Optional(), ValidateJinja2Template()])
|
||||
notification_format = SelectField('Notification format', choices=valid_notification_formats.keys())
|
||||
fetch_backend = RadioField(u'Fetch Method', choices=content_fetcher.available_fetchers(), validators=[ValidateContentFetcherIsReady()])
|
||||
extract_title_as_title = BooleanField('Extract <title> from document and use as watch title', default=False)
|
||||
webdriver_delay = IntegerField('Wait seconds before extracting text', validators=[validators.Optional(), validators.NumberRange(min=1,
|
||||
message="Should contain one or more seconds")])
|
||||
notification_title = StringField('Notification Title', default='ChangeDetection.io Notification - {{ watch_url }}', validators=[validators.Optional(), ValidateJinja2Template()])
|
||||
notification_urls = StringListField('Notification URL List', validators=[validators.Optional(), ValidateAppRiseServers(), ValidateJinja2Template()])
|
||||
processor = RadioField( label=u"Processor - What do you want to achieve?", choices=processors.available_processors(), default="text_json_diff")
|
||||
timezone = StringField("Timezone for watch schedule", render_kw={"list": "timezones"}, validators=[validateTimeZoneName()])
|
||||
webdriver_delay = IntegerField('Wait seconds before extracting text', validators=[validators.Optional(), validators.NumberRange(min=1, message="Should contain one or more seconds")])
|
||||
|
||||
|
||||
class importForm(Form):
|
||||
from . import processors
|
||||
processor = RadioField(u'Processor', choices=processors.available_processors(), default="text_json_diff")
|
||||
urls = TextAreaField('URLs')
|
||||
xlsx_file = FileField('Upload .xlsx file', validators=[FileAllowed(['xlsx'], 'Must be .xlsx file!')])
|
||||
file_mapping = SelectField('File mapping', [validators.DataRequired()], choices={('wachete', 'Wachete mapping'), ('custom','Custom mapping')})
|
||||
|
||||
class SingleBrowserStep(Form):
|
||||
|
||||
@@ -410,42 +548,46 @@ class SingleBrowserStep(Form):
|
||||
# remove_button = SubmitField('-', render_kw={"type": "button", "class": "pure-button pure-button-primary", 'title': 'Remove'})
|
||||
# add_button = SubmitField('+', render_kw={"type": "button", "class": "pure-button pure-button-primary", 'title': 'Add new step after'})
|
||||
|
||||
class watchForm(commonSettingsForm):
|
||||
class processor_text_json_diff_form(commonSettingsForm):
|
||||
|
||||
url = fields.URLField('URL', validators=[validateURL()])
|
||||
tags = StringTagUUID('Group tag', [validators.Optional()], default='')
|
||||
|
||||
time_between_check = FormField(TimeBetweenCheckForm)
|
||||
|
||||
time_schedule_limit = FormField(ScheduleLimitForm)
|
||||
|
||||
time_between_check_use_default = BooleanField('Use global settings for time between check', default=False)
|
||||
|
||||
include_filters = StringListField('CSS/JSONPath/JQ/XPath Filters', [ValidateCSSJSONXPATHInput()], default='')
|
||||
|
||||
subtractive_selectors = StringListField('Remove elements', [ValidateCSSJSONXPATHInput(allow_xpath=False, allow_json=False)])
|
||||
subtractive_selectors = StringListField('Remove elements', [ValidateCSSJSONXPATHInput(allow_json=False)])
|
||||
|
||||
extract_text = StringListField('Extract text', [ValidateListRegex()])
|
||||
|
||||
title = StringField('Title', default='')
|
||||
|
||||
ignore_text = StringListField('Ignore text', [ValidateListRegex()])
|
||||
ignore_text = StringListField('Ignore lines containing', [ValidateListRegex()])
|
||||
headers = StringDictKeyValue('Request headers')
|
||||
body = TextAreaField('Request body', [validators.Optional()])
|
||||
method = SelectField('Request method', choices=valid_method, default=default_method)
|
||||
ignore_status_codes = BooleanField('Ignore status codes (process non-2xx status codes as normal)', default=False)
|
||||
check_unique_lines = BooleanField('Only trigger when unique lines appear', default=False)
|
||||
check_unique_lines = BooleanField('Only trigger when unique lines appear in all history', default=False)
|
||||
remove_duplicate_lines = BooleanField('Remove duplicate lines of text', default=False)
|
||||
sort_text_alphabetically = BooleanField('Sort text alphabetically', default=False)
|
||||
trim_text_whitespace = BooleanField('Trim whitespace before and after text', default=False)
|
||||
|
||||
filter_text_added = BooleanField('Added lines', default=True)
|
||||
filter_text_replaced = BooleanField('Replaced/changed lines', default=True)
|
||||
filter_text_removed = BooleanField('Removed lines', default=True)
|
||||
|
||||
# @todo this class could be moved to its own text_json_diff_watchForm and this goes to restock_diff_Watchform perhaps
|
||||
in_stock_only = BooleanField('Only trigger when product goes BACK to in-stock', default=True)
|
||||
|
||||
trigger_text = StringListField('Trigger/wait for text', [validators.Optional(), ValidateListRegex()])
|
||||
if os.getenv("PLAYWRIGHT_DRIVER_URL"):
|
||||
browser_steps = FieldList(FormField(SingleBrowserStep), min_entries=10)
|
||||
text_should_not_be_present = StringListField('Block change-detection while text matches', [validators.Optional(), ValidateListRegex()])
|
||||
webdriver_js_execute_code = TextAreaField('Execute JavaScript before change detection', render_kw={"rows": "5"}, validators=[validators.Optional()])
|
||||
|
||||
save_button = SubmitField('Save', render_kw={"class": "pure-button pure-button-primary"})
|
||||
save_button = SubmitField('Save', render_kw={"class": "pure-button button-small pure-button-primary"})
|
||||
|
||||
proxy = RadioField('Proxy')
|
||||
filter_failure_notification_send = BooleanField(
|
||||
@@ -454,10 +596,17 @@ class watchForm(commonSettingsForm):
|
||||
notification_muted = BooleanField('Notifications Muted / Off', default=False)
|
||||
notification_screenshot = BooleanField('Attach screenshot to notification (where possible)', default=False)
|
||||
|
||||
def extra_tab_content(self):
|
||||
return None
|
||||
|
||||
def extra_form_content(self):
|
||||
return None
|
||||
|
||||
def validate(self, **kwargs):
|
||||
if not super().validate():
|
||||
return False
|
||||
|
||||
from changedetectionio.safe_jinja import render as jinja_render
|
||||
result = True
|
||||
|
||||
# Fail form validation when a body is set for a GET
|
||||
@@ -466,32 +615,95 @@ class watchForm(commonSettingsForm):
|
||||
result = False
|
||||
|
||||
# Attempt to validate jinja2 templates in the URL
|
||||
from jinja2 import Environment
|
||||
# Jinja2 available in URLs along with https://pypi.org/project/jinja2-time/
|
||||
jinja2_env = Environment(extensions=['jinja2_time.TimeExtension'])
|
||||
try:
|
||||
ready_url = str(jinja2_env.from_string(self.url.data).render())
|
||||
except Exception as e:
|
||||
self.url.errors.append('Invalid template syntax')
|
||||
jinja_render(template_str=self.url.data)
|
||||
except ModuleNotFoundError as e:
|
||||
# incase jinja2_time or others is missing
|
||||
logger.error(e)
|
||||
self.url.errors.append(f'Invalid template syntax configuration: {e}')
|
||||
result = False
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
self.url.errors.append(f'Invalid template syntax: {e}')
|
||||
result = False
|
||||
|
||||
# Attempt to validate jinja2 templates in the body
|
||||
if self.body.data and self.body.data.strip():
|
||||
try:
|
||||
jinja_render(template_str=self.body.data)
|
||||
except ModuleNotFoundError as e:
|
||||
# incase jinja2_time or others is missing
|
||||
logger.error(e)
|
||||
self.body.errors.append(f'Invalid template syntax configuration: {e}')
|
||||
result = False
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
self.body.errors.append(f'Invalid template syntax: {e}')
|
||||
result = False
|
||||
|
||||
# Attempt to validate jinja2 templates in the headers
|
||||
if len(self.headers.data) > 0:
|
||||
try:
|
||||
for header, value in self.headers.data.items():
|
||||
jinja_render(template_str=value)
|
||||
except ModuleNotFoundError as e:
|
||||
# incase jinja2_time or others is missing
|
||||
logger.error(e)
|
||||
self.headers.errors.append(f'Invalid template syntax configuration: {e}')
|
||||
result = False
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
self.headers.errors.append(f'Invalid template syntax in "{header}" header: {e}')
|
||||
result = False
|
||||
|
||||
return result
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
formdata=None,
|
||||
obj=None,
|
||||
prefix="",
|
||||
data=None,
|
||||
meta=None,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(formdata, obj, prefix, data, meta, **kwargs)
|
||||
if kwargs and kwargs.get('default_system_settings'):
|
||||
default_tz = kwargs.get('default_system_settings').get('application', {}).get('timezone')
|
||||
if default_tz:
|
||||
self.time_schedule_limit.form.timezone.render_kw['placeholder'] = default_tz
|
||||
|
||||
|
||||
|
||||
class SingleExtraProxy(Form):
|
||||
|
||||
# maybe better to set some <script>var..
|
||||
proxy_name = StringField('Name', [validators.Optional()], render_kw={"placeholder": "Name"})
|
||||
proxy_url = StringField('Proxy URL', [validators.Optional()], render_kw={"placeholder": "http://user:pass@...:3128", "size":50})
|
||||
proxy_url = StringField('Proxy URL', [validators.Optional()], render_kw={"placeholder": "socks5:// or regular proxy http://user:pass@...:3128", "size":50})
|
||||
# @todo do the validation here instead
|
||||
|
||||
class SingleExtraBrowser(Form):
|
||||
browser_name = StringField('Name', [validators.Optional()], render_kw={"placeholder": "Name"})
|
||||
browser_connection_url = StringField('Browser connection URL', [validators.Optional()], render_kw={"placeholder": "wss://brightdata... wss://oxylabs etc", "size":50})
|
||||
# @todo do the validation here instead
|
||||
|
||||
class DefaultUAInputForm(Form):
|
||||
html_requests = StringField('Plaintext requests', validators=[validators.Optional()], render_kw={"placeholder": "<default>"})
|
||||
if os.getenv("PLAYWRIGHT_DRIVER_URL") or os.getenv("WEBDRIVER_URL"):
|
||||
html_webdriver = StringField('Chrome requests', validators=[validators.Optional()], render_kw={"placeholder": "<default>"})
|
||||
|
||||
# datastore.data['settings']['requests']..
|
||||
class globalSettingsRequestForm(Form):
|
||||
time_between_check = FormField(TimeBetweenCheckForm)
|
||||
time_schedule_limit = FormField(ScheduleLimitForm)
|
||||
proxy = RadioField('Proxy')
|
||||
jitter_seconds = IntegerField('Random jitter seconds ± check',
|
||||
render_kw={"style": "width: 5em;"},
|
||||
validators=[validators.NumberRange(min=0, message="Should contain zero or more seconds")])
|
||||
extra_proxies = FieldList(FormField(SingleExtraProxy), min_entries=5)
|
||||
extra_browsers = FieldList(FormField(SingleExtraBrowser), min_entries=5)
|
||||
|
||||
default_ua = FormField(DefaultUAInputForm, label="Default User-Agent overrides")
|
||||
|
||||
def validate_extra_proxies(self, extra_validators=None):
|
||||
for e in self.data['extra_proxies']:
|
||||
@@ -510,9 +722,9 @@ class globalSettingsApplicationForm(commonSettingsForm):
|
||||
render_kw={"placeholder": os.getenv('BASE_URL', 'Not set')}
|
||||
)
|
||||
empty_pages_are_a_change = BooleanField('Treat empty pages as a change?', default=False)
|
||||
fetch_backend = RadioField('Fetch Method', default="html_requests", choices=content_fetcher.available_fetchers(), validators=[ValidateContentFetcherIsReady()])
|
||||
fetch_backend = RadioField('Fetch Method', default="html_requests", choices=content_fetchers.available_fetchers(), validators=[ValidateContentFetcherIsReady()])
|
||||
global_ignore_text = StringListField('Ignore Text', [ValidateListRegex()])
|
||||
global_subtractive_selectors = StringListField('Remove elements', [ValidateCSSJSONXPATHInput(allow_xpath=False, allow_json=False)])
|
||||
global_subtractive_selectors = StringListField('Remove elements', [ValidateCSSJSONXPATHInput(allow_json=False)])
|
||||
ignore_whitespace = BooleanField('Ignore whitespace')
|
||||
password = SaltyPasswordField()
|
||||
pager_size = IntegerField('Pager size',
|
||||
@@ -522,6 +734,8 @@ class globalSettingsApplicationForm(commonSettingsForm):
|
||||
removepassword_button = SubmitField('Remove password', render_kw={"class": "pure-button pure-button-primary"})
|
||||
render_anchor_tag_content = BooleanField('Render anchor tag content', default=False)
|
||||
shared_diff_access = BooleanField('Allow access to view diff page when password is enabled', default=False, validators=[validators.Optional()])
|
||||
rss_hide_muted_watches = BooleanField('Hide muted watches from RSS feed', default=True,
|
||||
validators=[validators.Optional()])
|
||||
filter_failure_notification_threshold_attempts = IntegerField('Number of times the filter can be missing before sending a notification',
|
||||
render_kw={"style": "width: 5em;"},
|
||||
validators=[validators.NumberRange(min=0,
|
||||
@@ -532,10 +746,15 @@ class globalSettingsForm(Form):
|
||||
# Define these as FormFields/"sub forms", this way it matches the JSON storage
|
||||
# datastore.data['settings']['application']..
|
||||
# datastore.data['settings']['requests']..
|
||||
def __init__(self, formdata=None, obj=None, prefix="", data=None, meta=None, **kwargs):
|
||||
super().__init__(formdata, obj, prefix, data, meta, **kwargs)
|
||||
self.application.notification_body.extra_notification_tokens = kwargs.get('extra_notification_tokens', {})
|
||||
self.application.notification_title.extra_notification_tokens = kwargs.get('extra_notification_tokens', {})
|
||||
self.application.notification_urls.extra_notification_tokens = kwargs.get('extra_notification_tokens', {})
|
||||
|
||||
requests = FormField(globalSettingsRequestForm)
|
||||
application = FormField(globalSettingsApplicationForm)
|
||||
save_button = SubmitField('Save', render_kw={"class": "pure-button pure-button-primary"})
|
||||
save_button = SubmitField('Save', render_kw={"class": "pure-button button-small pure-button-primary"})
|
||||
|
||||
|
||||
class extractDataForm(Form):
|
||||
|
||||
@@ -1,17 +1,14 @@
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
from inscriptis import get_text
|
||||
from inscriptis.model.config import ParserConfig
|
||||
from jsonpath_ng.ext import parse
|
||||
from loguru import logger
|
||||
from lxml import etree
|
||||
from typing import List
|
||||
import json
|
||||
import re
|
||||
|
||||
|
||||
# HTML added to be sure each result matching a filter (.example) gets converted to a new line by Inscriptis
|
||||
TEXT_FILTER_LIST_LINE_SUFFIX = "<br>"
|
||||
|
||||
TRANSLATE_WHITESPACE_TABLE = str.maketrans('', '', '\r\n\t ')
|
||||
PERL_STYLE_REGEX = r'^/(.*?)/([a-z]*)?$'
|
||||
|
||||
# 'price' , 'lowPrice', 'highPrice' are usually under here
|
||||
# All of those may or may not appear on different websites - I didnt find a way todo case-insensitive searching here
|
||||
LD_JSON_PRODUCT_OFFER_SELECTORS = ["json:$..offers", "json:$..Offers"]
|
||||
@@ -38,6 +35,7 @@ def perl_style_slash_enclosed_regex_to_options(regex):
|
||||
|
||||
# Given a CSS Rule, and a blob of HTML, return the blob of HTML that matches
|
||||
def include_filters(include_filters, html_content, append_pretty_line_formatting=False):
|
||||
from bs4 import BeautifulSoup
|
||||
soup = BeautifulSoup(html_content, "html.parser")
|
||||
html_block = ""
|
||||
r = soup.select(include_filters, separator="")
|
||||
@@ -55,23 +53,158 @@ def include_filters(include_filters, html_content, append_pretty_line_formatting
|
||||
return html_block
|
||||
|
||||
def subtractive_css_selector(css_selector, html_content):
|
||||
from bs4 import BeautifulSoup
|
||||
soup = BeautifulSoup(html_content, "html.parser")
|
||||
for item in soup.select(css_selector):
|
||||
|
||||
# So that the elements dont shift their index, build a list of elements here which will be pointers to their place in the DOM
|
||||
elements_to_remove = soup.select(css_selector)
|
||||
|
||||
# Then, remove them in a separate loop
|
||||
for item in elements_to_remove:
|
||||
item.decompose()
|
||||
|
||||
return str(soup)
|
||||
|
||||
def subtractive_xpath_selector(selectors: List[str], html_content: str) -> str:
|
||||
# Parse the HTML content using lxml
|
||||
html_tree = etree.HTML(html_content)
|
||||
|
||||
# First, collect all elements to remove
|
||||
elements_to_remove = []
|
||||
|
||||
# Iterate over the list of XPath selectors
|
||||
for selector in selectors:
|
||||
# Collect elements for each selector
|
||||
elements_to_remove.extend(html_tree.xpath(selector))
|
||||
|
||||
# Then, remove them in a separate loop
|
||||
for element in elements_to_remove:
|
||||
if element.getparent() is not None: # Ensure the element has a parent before removing
|
||||
element.getparent().remove(element)
|
||||
|
||||
# Convert the modified HTML tree back to a string
|
||||
modified_html = etree.tostring(html_tree, method="html").decode("utf-8")
|
||||
return modified_html
|
||||
|
||||
|
||||
def element_removal(selectors: List[str], html_content):
|
||||
"""Joins individual filters into one css filter."""
|
||||
selector = ",".join(selectors)
|
||||
return subtractive_css_selector(selector, html_content)
|
||||
"""Removes elements that match a list of CSS or XPath selectors."""
|
||||
modified_html = html_content
|
||||
css_selectors = []
|
||||
xpath_selectors = []
|
||||
|
||||
for selector in selectors:
|
||||
if selector.startswith(('xpath:', 'xpath1:', '//')):
|
||||
# Handle XPath selectors separately
|
||||
xpath_selector = selector.removeprefix('xpath:').removeprefix('xpath1:')
|
||||
xpath_selectors.append(xpath_selector)
|
||||
else:
|
||||
# Collect CSS selectors as one "hit", see comment in subtractive_css_selector
|
||||
css_selectors.append(selector.strip().strip(","))
|
||||
|
||||
if xpath_selectors:
|
||||
modified_html = subtractive_xpath_selector(xpath_selectors, modified_html)
|
||||
|
||||
if css_selectors:
|
||||
# Remove duplicates, then combine all CSS selectors into one string, separated by commas
|
||||
# This stops the elements index shifting
|
||||
unique_selectors = list(set(css_selectors)) # Ensure uniqueness
|
||||
combined_css_selector = " , ".join(unique_selectors)
|
||||
modified_html = subtractive_css_selector(combined_css_selector, modified_html)
|
||||
|
||||
|
||||
return modified_html
|
||||
|
||||
def elementpath_tostring(obj):
|
||||
"""
|
||||
change elementpath.select results to string type
|
||||
# The MIT License (MIT), Copyright (c), 2018-2021, SISSA (Scuola Internazionale Superiore di Studi Avanzati)
|
||||
# https://github.com/sissaschool/elementpath/blob/dfcc2fd3d6011b16e02bf30459a7924f547b47d0/elementpath/xpath_tokens.py#L1038
|
||||
"""
|
||||
|
||||
import elementpath
|
||||
from decimal import Decimal
|
||||
import math
|
||||
|
||||
if obj is None:
|
||||
return ''
|
||||
# https://elementpath.readthedocs.io/en/latest/xpath_api.html#elementpath.select
|
||||
elif isinstance(obj, elementpath.XPathNode):
|
||||
return obj.string_value
|
||||
elif isinstance(obj, bool):
|
||||
return 'true' if obj else 'false'
|
||||
elif isinstance(obj, Decimal):
|
||||
value = format(obj, 'f')
|
||||
if '.' in value:
|
||||
return value.rstrip('0').rstrip('.')
|
||||
return value
|
||||
|
||||
elif isinstance(obj, float):
|
||||
if math.isnan(obj):
|
||||
return 'NaN'
|
||||
elif math.isinf(obj):
|
||||
return str(obj).upper()
|
||||
|
||||
value = str(obj)
|
||||
if '.' in value:
|
||||
value = value.rstrip('0').rstrip('.')
|
||||
if '+' in value:
|
||||
value = value.replace('+', '')
|
||||
if 'e' in value:
|
||||
return value.upper()
|
||||
return value
|
||||
|
||||
return str(obj)
|
||||
|
||||
# Return str Utf-8 of matched rules
|
||||
def xpath_filter(xpath_filter, html_content, append_pretty_line_formatting=False):
|
||||
def xpath_filter(xpath_filter, html_content, append_pretty_line_formatting=False, is_rss=False):
|
||||
from lxml import etree, html
|
||||
import elementpath
|
||||
# xpath 2.0-3.1
|
||||
from elementpath.xpath3 import XPath3Parser
|
||||
|
||||
parser = etree.HTMLParser()
|
||||
if is_rss:
|
||||
# So that we can keep CDATA for cdata_in_document_to_text() to process
|
||||
parser = etree.XMLParser(strip_cdata=False)
|
||||
|
||||
tree = html.fromstring(bytes(html_content, encoding='utf-8'), parser=parser)
|
||||
html_block = ""
|
||||
|
||||
r = elementpath.select(tree, xpath_filter.strip(), namespaces={'re': 'http://exslt.org/regular-expressions'}, parser=XPath3Parser)
|
||||
#@note: //title/text() wont work where <title>CDATA..
|
||||
|
||||
if type(r) != list:
|
||||
r = [r]
|
||||
|
||||
for element in r:
|
||||
# When there's more than 1 match, then add the suffix to separate each line
|
||||
# And where the matched result doesn't include something that will cause Inscriptis to add a newline
|
||||
# (This way each 'match' reliably has a new-line in the diff)
|
||||
# Divs are converted to 4 whitespaces by inscriptis
|
||||
if append_pretty_line_formatting and len(html_block) and (not hasattr( element, 'tag' ) or not element.tag in (['br', 'hr', 'div', 'p'])):
|
||||
html_block += TEXT_FILTER_LIST_LINE_SUFFIX
|
||||
|
||||
if type(element) == str:
|
||||
html_block += element
|
||||
elif issubclass(type(element), etree._Element) or issubclass(type(element), etree._ElementTree):
|
||||
html_block += etree.tostring(element, pretty_print=True).decode('utf-8')
|
||||
else:
|
||||
html_block += elementpath_tostring(element)
|
||||
|
||||
return html_block
|
||||
|
||||
# Return str Utf-8 of matched rules
|
||||
# 'xpath1:'
|
||||
def xpath1_filter(xpath_filter, html_content, append_pretty_line_formatting=False, is_rss=False):
|
||||
from lxml import etree, html
|
||||
|
||||
tree = html.fromstring(bytes(html_content, encoding='utf-8'))
|
||||
parser = None
|
||||
if is_rss:
|
||||
# So that we can keep CDATA for cdata_in_document_to_text() to process
|
||||
parser = etree.XMLParser(strip_cdata=False)
|
||||
|
||||
tree = html.fromstring(bytes(html_content, encoding='utf-8'), parser=parser)
|
||||
html_block = ""
|
||||
|
||||
r = tree.xpath(xpath_filter.strip(), namespaces={'re': 'http://exslt.org/regular-expressions'})
|
||||
@@ -82,21 +215,21 @@ def xpath_filter(xpath_filter, html_content, append_pretty_line_formatting=False
|
||||
# And where the matched result doesn't include something that will cause Inscriptis to add a newline
|
||||
# (This way each 'match' reliably has a new-line in the diff)
|
||||
# Divs are converted to 4 whitespaces by inscriptis
|
||||
if append_pretty_line_formatting and len(html_block) and (not hasattr( element, 'tag' ) or not element.tag in (['br', 'hr', 'div', 'p'])):
|
||||
if append_pretty_line_formatting and len(html_block) and (not hasattr(element, 'tag') or not element.tag in (['br', 'hr', 'div', 'p'])):
|
||||
html_block += TEXT_FILTER_LIST_LINE_SUFFIX
|
||||
|
||||
if type(element) == etree._ElementStringResult:
|
||||
html_block += str(element)
|
||||
elif type(element) == etree._ElementUnicodeResult:
|
||||
html_block += str(element)
|
||||
# Some kind of text, UTF-8 or other
|
||||
if isinstance(element, (str, bytes)):
|
||||
html_block += element
|
||||
else:
|
||||
# Return the HTML which will get parsed as text
|
||||
html_block += etree.tostring(element, pretty_print=True).decode('utf-8')
|
||||
|
||||
return html_block
|
||||
|
||||
|
||||
# Extract/find element
|
||||
def extract_element(find='title', html_content=''):
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
#Re #106, be sure to handle when its not found
|
||||
element_text = None
|
||||
@@ -110,12 +243,14 @@ def extract_element(find='title', html_content=''):
|
||||
|
||||
#
|
||||
def _parse_json(json_data, json_filter):
|
||||
if 'json:' in json_filter:
|
||||
from jsonpath_ng.ext import parse
|
||||
|
||||
if json_filter.startswith("json:"):
|
||||
jsonpath_expression = parse(json_filter.replace('json:', ''))
|
||||
match = jsonpath_expression.find(json_data)
|
||||
return _get_stripped_text_from_json_match(match)
|
||||
|
||||
if 'jq:' in json_filter:
|
||||
if json_filter.startswith("jq:") or json_filter.startswith("jqraw:"):
|
||||
|
||||
try:
|
||||
import jq
|
||||
@@ -123,10 +258,15 @@ def _parse_json(json_data, json_filter):
|
||||
# `jq` requires full compilation in windows and so isn't generally available
|
||||
raise Exception("jq not support not found")
|
||||
|
||||
jq_expression = jq.compile(json_filter.replace('jq:', ''))
|
||||
match = jq_expression.input(json_data).all()
|
||||
if json_filter.startswith("jq:"):
|
||||
jq_expression = jq.compile(json_filter.removeprefix("jq:"))
|
||||
match = jq_expression.input(json_data).all()
|
||||
return _get_stripped_text_from_json_match(match)
|
||||
|
||||
return _get_stripped_text_from_json_match(match)
|
||||
if json_filter.startswith("jqraw:"):
|
||||
jq_expression = jq.compile(json_filter.removeprefix("jqraw:"))
|
||||
match = jq_expression.input(json_data).all()
|
||||
return '\n'.join(str(item) for item in match)
|
||||
|
||||
def _get_stripped_text_from_json_match(match):
|
||||
s = []
|
||||
@@ -153,12 +293,16 @@ def _get_stripped_text_from_json_match(match):
|
||||
# json_filter - ie json:$..price
|
||||
# ensure_is_ldjson_info_type - str "product", optional, "@type == product" (I dont know how to do that as a json selector)
|
||||
def extract_json_as_string(content, json_filter, ensure_is_ldjson_info_type=None):
|
||||
stripped_text_from_html = False
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
stripped_text_from_html = False
|
||||
# https://github.com/dgtlmoon/changedetection.io/pull/2041#issuecomment-1848397161w
|
||||
# Try to parse/filter out the JSON, if we get some parser error, then maybe it's embedded within HTML tags
|
||||
try:
|
||||
stripped_text_from_html = _parse_json(json.loads(content), json_filter)
|
||||
except json.JSONDecodeError:
|
||||
# .lstrip("\ufeff") strings ByteOrderMark from UTF8 and still lets the UTF work
|
||||
stripped_text_from_html = _parse_json(json.loads(content.lstrip("\ufeff") ), json_filter)
|
||||
except json.JSONDecodeError as e:
|
||||
logger.warning(str(e))
|
||||
|
||||
# Foreach <script json></script> blob.. just return the first that matches json_filter
|
||||
# As a last resort, try to parse the whole <body>
|
||||
@@ -193,17 +337,19 @@ def extract_json_as_string(content, json_filter, ensure_is_ldjson_info_type=None
|
||||
if isinstance(json_data, dict):
|
||||
# If it has LD JSON 'key' @type, and @type is 'product', and something was found for the search
|
||||
# (Some sites have multiple of the same ld+json @type='product', but some have the review part, some have the 'price' part)
|
||||
# @type could also be a list (Product, SubType)
|
||||
# @type could also be a list although non-standard ("@type": ["Product", "SubType"],)
|
||||
# LD_JSON auto-extract also requires some content PLUS the ldjson to be present
|
||||
# 1833 - could be either str or dict, should not be anything else
|
||||
if json_data.get('@type') and stripped_text_from_html:
|
||||
try:
|
||||
if json_data.get('@type') == str or json_data.get('@type') == dict:
|
||||
types = [json_data.get('@type')] if isinstance(json_data.get('@type'), str) else json_data.get('@type')
|
||||
if ensure_is_ldjson_info_type.lower() in [x.lower().strip() for x in types]:
|
||||
break
|
||||
except:
|
||||
continue
|
||||
|
||||
t = json_data.get('@type')
|
||||
if t and stripped_text_from_html:
|
||||
|
||||
if isinstance(t, str) and t.lower() == ensure_is_ldjson_info_type.lower():
|
||||
break
|
||||
# The non-standard part, some have a list
|
||||
elif isinstance(t, list):
|
||||
if ensure_is_ldjson_info_type.lower() in [x.lower().strip() for x in t]:
|
||||
break
|
||||
|
||||
elif stripped_text_from_html:
|
||||
break
|
||||
@@ -218,6 +364,7 @@ def extract_json_as_string(content, json_filter, ensure_is_ldjson_info_type=None
|
||||
# - "line numbers" return a list of line numbers that match (int list)
|
||||
#
|
||||
# wordlist - list of regex's (str) or words (str)
|
||||
# Preserves all linefeeds and other whitespacing, its not the job of this to remove that
|
||||
def strip_ignore_text(content, wordlist, mode="content"):
|
||||
i = 0
|
||||
output = []
|
||||
@@ -233,35 +380,44 @@ def strip_ignore_text(content, wordlist, mode="content"):
|
||||
else:
|
||||
ignore_text.append(k.strip())
|
||||
|
||||
for line in content.splitlines():
|
||||
for line in content.splitlines(keepends=True):
|
||||
i += 1
|
||||
# Always ignore blank lines in this mode. (when this function gets called)
|
||||
got_match = False
|
||||
if len(line.strip()):
|
||||
for l in ignore_text:
|
||||
if l.lower() in line.lower():
|
||||
for l in ignore_text:
|
||||
if l.lower() in line.lower():
|
||||
got_match = True
|
||||
|
||||
if not got_match:
|
||||
for r in ignore_regex:
|
||||
if r.search(line):
|
||||
got_match = True
|
||||
|
||||
if not got_match:
|
||||
for r in ignore_regex:
|
||||
if r.search(line):
|
||||
got_match = True
|
||||
|
||||
if not got_match:
|
||||
# Not ignored
|
||||
output.append(line.encode('utf8'))
|
||||
else:
|
||||
ignored_line_numbers.append(i)
|
||||
|
||||
if not got_match:
|
||||
# Not ignored, and should preserve "keepends"
|
||||
output.append(line)
|
||||
else:
|
||||
ignored_line_numbers.append(i)
|
||||
|
||||
# Used for finding out what to highlight
|
||||
if mode == "line numbers":
|
||||
return ignored_line_numbers
|
||||
|
||||
return "\n".encode('utf8').join(output)
|
||||
return ''.join(output)
|
||||
|
||||
def cdata_in_document_to_text(html_content: str, render_anchor_tag_content=False) -> str:
|
||||
from xml.sax.saxutils import escape as xml_escape
|
||||
pattern = '<!\[CDATA\[(\s*(?:.(?<!\]\]>)\s*)*)\]\]>'
|
||||
def repl(m):
|
||||
text = m.group(1)
|
||||
return xml_escape(html_to_text(html_content=text)).strip()
|
||||
|
||||
return re.sub(pattern, repl, html_content)
|
||||
|
||||
def html_to_text(html_content: str, render_anchor_tag_content=False, is_rss=False) -> str:
|
||||
from inscriptis import get_text
|
||||
from inscriptis.model.config import ParserConfig
|
||||
|
||||
def html_to_text(html_content: str, render_anchor_tag_content=False) -> str:
|
||||
"""Converts html string to a string with just the text. If ignoring
|
||||
rendering anchor tag content is enable, anchor tag content are also
|
||||
included in the text
|
||||
@@ -277,16 +433,21 @@ def html_to_text(html_content: str, render_anchor_tag_content=False) -> str:
|
||||
# if anchor tag content flag is set to True define a config for
|
||||
# extracting this content
|
||||
if render_anchor_tag_content:
|
||||
|
||||
parser_config = ParserConfig(
|
||||
annotation_rules={"a": ["hyperlink"]}, display_links=True
|
||||
annotation_rules={"a": ["hyperlink"]},
|
||||
display_links=True
|
||||
)
|
||||
|
||||
# otherwise set config to None
|
||||
# otherwise set config to None/default
|
||||
else:
|
||||
parser_config = None
|
||||
|
||||
# get text and annotations via inscriptis
|
||||
# RSS Mode - Inscriptis will treat `title` as something else.
|
||||
# Make it as a regular block display element (//item/title)
|
||||
# This is a bit of a hack - the real way it to use XSLT to convert it to HTML #1874
|
||||
if is_rss:
|
||||
html_content = re.sub(r'<title([\s>])', r'<h1\1', html_content)
|
||||
html_content = re.sub(r'</title>', r'</h1>', html_content)
|
||||
|
||||
text_content = get_text(html_content, config=parser_config)
|
||||
|
||||
return text_content
|
||||
@@ -294,22 +455,23 @@ def html_to_text(html_content: str, render_anchor_tag_content=False) -> str:
|
||||
|
||||
# Does LD+JSON exist with a @type=='product' and a .price set anywhere?
|
||||
def has_ldjson_product_info(content):
|
||||
pricing_data = ''
|
||||
|
||||
try:
|
||||
if not 'application/ld+json' in content:
|
||||
return False
|
||||
|
||||
for filter in LD_JSON_PRODUCT_OFFER_SELECTORS:
|
||||
pricing_data += extract_json_as_string(content=content,
|
||||
json_filter=filter,
|
||||
ensure_is_ldjson_info_type="product")
|
||||
lc = content.lower()
|
||||
if 'application/ld+json' in lc and lc.count('"price"') == 1 and '"pricecurrency"' in lc:
|
||||
return True
|
||||
|
||||
# On some pages this is really terribly expensive when they dont really need it
|
||||
# (For example you never want price monitoring, but this runs on every watch to suggest it)
|
||||
# for filter in LD_JSON_PRODUCT_OFFER_SELECTORS:
|
||||
# pricing_data += extract_json_as_string(content=content,
|
||||
# json_filter=filter,
|
||||
# ensure_is_ldjson_info_type="product")
|
||||
except Exception as e:
|
||||
# Totally fine
|
||||
# OK too
|
||||
return False
|
||||
x=bool(pricing_data)
|
||||
return x
|
||||
|
||||
return False
|
||||
|
||||
|
||||
|
||||
def workarounds_for_obfuscations(content):
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
from abc import ABC, abstractmethod
|
||||
import time
|
||||
import validators
|
||||
from wtforms import ValidationError
|
||||
from loguru import logger
|
||||
|
||||
from changedetectionio.forms import validate_url
|
||||
|
||||
|
||||
class Importer():
|
||||
@@ -12,6 +16,7 @@ class Importer():
|
||||
self.new_uuids = []
|
||||
self.good = 0
|
||||
self.remaining_data = []
|
||||
self.import_profile = None
|
||||
|
||||
@abstractmethod
|
||||
def run(self,
|
||||
@@ -52,7 +57,7 @@ class import_url_list(Importer):
|
||||
|
||||
# Flask wtform validators wont work with basic auth, use validators package
|
||||
# Up to 5000 per batch so we dont flood the server
|
||||
# @todo validators.url failed on local hostnames (such as referring to ourself when using browserless)
|
||||
# @todo validators.url will fail when you add your own IP etc
|
||||
if len(url) and 'http' in url.lower() and good < 5000:
|
||||
extras = None
|
||||
if processor:
|
||||
@@ -132,3 +137,167 @@ class import_distill_io_json(Importer):
|
||||
good += 1
|
||||
|
||||
flash("{} Imported from Distill.io in {:.2f}s, {} Skipped.".format(len(self.new_uuids), time.time() - now, len(self.remaining_data)))
|
||||
|
||||
|
||||
class import_xlsx_wachete(Importer):
|
||||
|
||||
def run(self,
|
||||
data,
|
||||
flash,
|
||||
datastore,
|
||||
):
|
||||
|
||||
good = 0
|
||||
now = time.time()
|
||||
self.new_uuids = []
|
||||
|
||||
from openpyxl import load_workbook
|
||||
|
||||
try:
|
||||
wb = load_workbook(data)
|
||||
except Exception as e:
|
||||
# @todo correct except
|
||||
flash("Unable to read export XLSX file, something wrong with the file?", 'error')
|
||||
return
|
||||
|
||||
row_id = 2
|
||||
for row in wb.active.iter_rows(min_row=row_id):
|
||||
try:
|
||||
extras = {}
|
||||
data = {}
|
||||
for cell in row:
|
||||
if not cell.value:
|
||||
continue
|
||||
column_title = wb.active.cell(row=1, column=cell.column).value.strip().lower()
|
||||
data[column_title] = cell.value
|
||||
|
||||
# Forced switch to webdriver/playwright/etc
|
||||
dynamic_wachet = str(data.get('dynamic wachet', '')).strip().lower() # Convert bool to str to cover all cases
|
||||
# libreoffice and others can have it as =FALSE() =TRUE(), or bool(true)
|
||||
if 'true' in dynamic_wachet or dynamic_wachet == '1':
|
||||
extras['fetch_backend'] = 'html_webdriver'
|
||||
elif 'false' in dynamic_wachet or dynamic_wachet == '0':
|
||||
extras['fetch_backend'] = 'html_requests'
|
||||
|
||||
if data.get('xpath'):
|
||||
# @todo split by || ?
|
||||
extras['include_filters'] = [data.get('xpath')]
|
||||
if data.get('name'):
|
||||
extras['title'] = data.get('name').strip()
|
||||
if data.get('interval (min)'):
|
||||
minutes = int(data.get('interval (min)'))
|
||||
hours, minutes = divmod(minutes, 60)
|
||||
days, hours = divmod(hours, 24)
|
||||
weeks, days = divmod(days, 7)
|
||||
extras['time_between_check'] = {'weeks': weeks, 'days': days, 'hours': hours, 'minutes': minutes, 'seconds': 0}
|
||||
|
||||
# At minimum a URL is required.
|
||||
if data.get('url'):
|
||||
try:
|
||||
validate_url(data.get('url'))
|
||||
except ValidationError as e:
|
||||
logger.error(f">> Import URL error {data.get('url')} {str(e)}")
|
||||
flash(f"Error processing row number {row_id}, URL value was incorrect, row was skipped.", 'error')
|
||||
# Don't bother processing anything else on this row
|
||||
continue
|
||||
|
||||
new_uuid = datastore.add_watch(url=data['url'].strip(),
|
||||
extras=extras,
|
||||
tag=data.get('folder'),
|
||||
write_to_disk_now=False)
|
||||
if new_uuid:
|
||||
# Straight into the queue.
|
||||
self.new_uuids.append(new_uuid)
|
||||
good += 1
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
flash(f"Error processing row number {row_id}, check all cell data types are correct, row was skipped.", 'error')
|
||||
else:
|
||||
row_id += 1
|
||||
|
||||
flash(
|
||||
"{} imported from Wachete .xlsx in {:.2f}s".format(len(self.new_uuids), time.time() - now))
|
||||
|
||||
|
||||
class import_xlsx_custom(Importer):
|
||||
|
||||
def run(self,
|
||||
data,
|
||||
flash,
|
||||
datastore,
|
||||
):
|
||||
|
||||
good = 0
|
||||
now = time.time()
|
||||
self.new_uuids = []
|
||||
|
||||
from openpyxl import load_workbook
|
||||
|
||||
try:
|
||||
wb = load_workbook(data)
|
||||
except Exception as e:
|
||||
# @todo correct except
|
||||
flash("Unable to read export XLSX file, something wrong with the file?", 'error')
|
||||
return
|
||||
|
||||
# @todo cehck atleast 2 rows, same in other method
|
||||
from .forms import validate_url
|
||||
row_i = 1
|
||||
|
||||
try:
|
||||
for row in wb.active.iter_rows():
|
||||
url = None
|
||||
tags = None
|
||||
extras = {}
|
||||
|
||||
for cell in row:
|
||||
if not self.import_profile.get(cell.col_idx):
|
||||
continue
|
||||
if not cell.value:
|
||||
continue
|
||||
|
||||
cell_map = self.import_profile.get(cell.col_idx)
|
||||
|
||||
cell_val = str(cell.value).strip() # could be bool
|
||||
|
||||
if cell_map == 'url':
|
||||
url = cell.value.strip()
|
||||
try:
|
||||
validate_url(url)
|
||||
except ValidationError as e:
|
||||
logger.error(f">> Import URL error {url} {str(e)}")
|
||||
flash(f"Error processing row number {row_i}, URL value was incorrect, row was skipped.", 'error')
|
||||
# Don't bother processing anything else on this row
|
||||
url = None
|
||||
break
|
||||
elif cell_map == 'tag':
|
||||
tags = cell.value.strip()
|
||||
elif cell_map == 'include_filters':
|
||||
# @todo validate?
|
||||
extras['include_filters'] = [cell.value.strip()]
|
||||
elif cell_map == 'interval_minutes':
|
||||
hours, minutes = divmod(int(cell_val), 60)
|
||||
days, hours = divmod(hours, 24)
|
||||
weeks, days = divmod(days, 7)
|
||||
extras['time_between_check'] = {'weeks': weeks, 'days': days, 'hours': hours, 'minutes': minutes, 'seconds': 0}
|
||||
else:
|
||||
extras[cell_map] = cell_val
|
||||
|
||||
# At minimum a URL is required.
|
||||
if url:
|
||||
new_uuid = datastore.add_watch(url=url,
|
||||
extras=extras,
|
||||
tag=tags,
|
||||
write_to_disk_now=False)
|
||||
if new_uuid:
|
||||
# Straight into the queue.
|
||||
self.new_uuids.append(new_uuid)
|
||||
good += 1
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
flash(f"Error processing row number {row_i}, check all cell data types are correct, row was skipped.", 'error')
|
||||
else:
|
||||
row_i += 1
|
||||
|
||||
flash(
|
||||
"{} imported from custom .xlsx in {:.2f}s".format(len(self.new_uuids), time.time() - now))
|
||||
|
||||
@@ -5,7 +5,9 @@ from changedetectionio.notification import (
|
||||
default_notification_title,
|
||||
)
|
||||
|
||||
# Equal to or greater than this number of FilterNotFoundInResponse exceptions will trigger a filter-not-found notification
|
||||
_FILTER_FAILURE_THRESHOLD_ATTEMPTS_DEFAULT = 6
|
||||
DEFAULT_SETTINGS_HEADERS_USERAGENT='Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36'
|
||||
|
||||
class model(dict):
|
||||
base_config = {
|
||||
@@ -16,11 +18,16 @@ class model(dict):
|
||||
},
|
||||
'requests': {
|
||||
'extra_proxies': [], # Configurable extra proxies via the UI
|
||||
'extra_browsers': [], # Configurable extra proxies via the UI
|
||||
'jitter_seconds': 0,
|
||||
'proxy': None, # Preferred proxy connection
|
||||
'time_between_check': {'weeks': None, 'days': None, 'hours': 3, 'minutes': None, 'seconds': None},
|
||||
'timeout': int(getenv("DEFAULT_SETTINGS_REQUESTS_TIMEOUT", "45")), # Default 45 seconds
|
||||
'workers': int(getenv("DEFAULT_SETTINGS_REQUESTS_WORKERS", "10")), # Number of threads, lower is better for slow connections
|
||||
'default_ua': {
|
||||
'html_requests': getenv("DEFAULT_SETTINGS_HEADERS_USERAGENT", DEFAULT_SETTINGS_HEADERS_USERAGENT),
|
||||
'html_webdriver': None,
|
||||
}
|
||||
},
|
||||
'application': {
|
||||
# Custom notification content
|
||||
@@ -40,10 +47,13 @@ class model(dict):
|
||||
'pager_size': 50,
|
||||
'password': False,
|
||||
'render_anchor_tag_content': False,
|
||||
'rss_access_token': None,
|
||||
'rss_hide_muted_watches': True,
|
||||
'schema_version' : 0,
|
||||
'shared_diff_access': False,
|
||||
'webdriver_delay': None , # Extra delay in seconds before extracting text
|
||||
'tags': {} #@todo use Tag.model initialisers
|
||||
'tags': {}, #@todo use Tag.model initialisers
|
||||
'timezone': None, # Default IANA timezone name
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -59,7 +69,7 @@ def parse_headers_from_text_file(filepath):
|
||||
for l in f.readlines():
|
||||
l = l.strip()
|
||||
if not l.startswith('#') and ':' in l:
|
||||
(k, v) = l.split(':')
|
||||
(k, v) = l.split(':', 1) # Split only on the first colon
|
||||
headers[k.strip()] = v.strip()
|
||||
|
||||
return headers
|
||||
@@ -1,19 +1,14 @@
|
||||
from .Watch import base_config
|
||||
import uuid
|
||||
|
||||
class model(dict):
|
||||
from changedetectionio.model import watch_base
|
||||
|
||||
|
||||
class model(watch_base):
|
||||
|
||||
def __init__(self, *arg, **kw):
|
||||
super(model, self).__init__(*arg, **kw)
|
||||
|
||||
self.update(base_config)
|
||||
|
||||
self['uuid'] = str(uuid.uuid4())
|
||||
self['overrides_watch'] = kw.get('default', {}).get('overrides_watch')
|
||||
|
||||
if kw.get('default'):
|
||||
self.update(kw['default'])
|
||||
del kw['default']
|
||||
|
||||
|
||||
# Goes at the end so we update the default object with the initialiser
|
||||
super(model, self).__init__(*arg, **kw)
|
||||
|
||||
|
||||
@@ -1,72 +1,20 @@
|
||||
from distutils.util import strtobool
|
||||
import logging
|
||||
from changedetectionio.strtobool import strtobool
|
||||
from changedetectionio.safe_jinja import render as jinja_render
|
||||
from . import watch_base
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
import uuid
|
||||
from pathlib import Path
|
||||
from loguru import logger
|
||||
|
||||
from ..html_tools import TRANSLATE_WHITESPACE_TABLE
|
||||
|
||||
# Allowable protocols, protects against javascript: etc
|
||||
# file:// is further checked by ALLOW_FILE_URI
|
||||
SAFE_PROTOCOL_REGEX='^(http|https|ftp|file):'
|
||||
|
||||
minimum_seconds_recheck_time = int(os.getenv('MINIMUM_SECONDS_RECHECK_TIME', 60))
|
||||
minimum_seconds_recheck_time = int(os.getenv('MINIMUM_SECONDS_RECHECK_TIME', 3))
|
||||
mtable = {'seconds': 1, 'minutes': 60, 'hours': 3600, 'days': 86400, 'weeks': 86400 * 7}
|
||||
|
||||
from changedetectionio.notification import (
|
||||
default_notification_format_for_watch
|
||||
)
|
||||
|
||||
base_config = {
|
||||
'body': None,
|
||||
'check_unique_lines': False, # On change-detected, compare against all history if its something new
|
||||
'check_count': 0,
|
||||
'date_created': None,
|
||||
'consecutive_filter_failures': 0, # Every time the CSS/xPath filter cannot be located, reset when all is fine.
|
||||
'extract_text': [], # Extract text by regex after filters
|
||||
'extract_title_as_title': False,
|
||||
'fetch_backend': 'system', # plaintext, playwright etc
|
||||
'processor': 'text_json_diff', # could be restock_diff or others from .processors
|
||||
'filter_failure_notification_send': strtobool(os.getenv('FILTER_FAILURE_NOTIFICATION_SEND_DEFAULT', 'True')),
|
||||
'filter_text_added': True,
|
||||
'filter_text_replaced': True,
|
||||
'filter_text_removed': True,
|
||||
'has_ldjson_price_data': None,
|
||||
'track_ldjson_price_data': None,
|
||||
'headers': {}, # Extra headers to send
|
||||
'ignore_text': [], # List of text to ignore when calculating the comparison checksum
|
||||
'in_stock_only' : True, # Only trigger change on going to instock from out-of-stock
|
||||
'include_filters': [],
|
||||
'last_checked': 0,
|
||||
'last_error': False,
|
||||
'last_viewed': 0, # history key value of the last viewed via the [diff] link
|
||||
'method': 'GET',
|
||||
# Custom notification content
|
||||
'notification_body': None,
|
||||
'notification_format': default_notification_format_for_watch,
|
||||
'notification_muted': False,
|
||||
'notification_title': None,
|
||||
'notification_screenshot': False, # Include the latest screenshot if available and supported by the apprise URL
|
||||
'notification_urls': [], # List of URLs to add to the notification Queue (Usually AppRise)
|
||||
'paused': False,
|
||||
'previous_md5': False,
|
||||
'previous_md5_before_filters': False, # Used for skipping changedetection entirely
|
||||
'proxy': None, # Preferred proxy connection
|
||||
'subtractive_selectors': [],
|
||||
'tag': '', # Old system of text name for a tag, to be removed
|
||||
'tags': [], # list of UUIDs to App.Tags
|
||||
'text_should_not_be_present': [], # Text that should not present
|
||||
# Re #110, so then if this is set to None, we know to use the default value instead
|
||||
# Requires setting to None on submit if it's the same as the default
|
||||
# Should be all None by default, so we use the system default in this case.
|
||||
'time_between_check': {'weeks': None, 'days': None, 'hours': None, 'minutes': None, 'seconds': None},
|
||||
'title': None,
|
||||
'trigger_text': [], # List of text or regex to wait for until a change is detected
|
||||
'url': '',
|
||||
'uuid': str(uuid.uuid4()),
|
||||
'webdriver_delay': None,
|
||||
'webdriver_js_execute_code': None, # Run before change-detection
|
||||
}
|
||||
|
||||
|
||||
def is_safe_url(test_url):
|
||||
# See https://github.com/dgtlmoon/changedetection.io/issues/1358
|
||||
@@ -83,40 +31,38 @@ def is_safe_url(test_url):
|
||||
|
||||
return True
|
||||
|
||||
class model(dict):
|
||||
|
||||
class model(watch_base):
|
||||
__newest_history_key = None
|
||||
__history_n = 0
|
||||
jitter_seconds = 0
|
||||
|
||||
def __init__(self, *arg, **kw):
|
||||
|
||||
self.update(base_config)
|
||||
self.__datastore_path = kw['datastore_path']
|
||||
|
||||
self['uuid'] = str(uuid.uuid4())
|
||||
|
||||
del kw['datastore_path']
|
||||
|
||||
self.__datastore_path = kw.get('datastore_path')
|
||||
if kw.get('datastore_path'):
|
||||
del kw['datastore_path']
|
||||
super(model, self).__init__(*arg, **kw)
|
||||
if kw.get('default'):
|
||||
self.update(kw['default'])
|
||||
del kw['default']
|
||||
|
||||
if self.get('default'):
|
||||
del self['default']
|
||||
|
||||
# Be sure the cached timestamp is ready
|
||||
bump = self.history
|
||||
|
||||
# Goes at the end so we update the default object with the initialiser
|
||||
super(model, self).__init__(*arg, **kw)
|
||||
|
||||
@property
|
||||
def viewed(self):
|
||||
if int(self['last_viewed']) >= int(self.newest_history_key) :
|
||||
# Don't return viewed when last_viewed is 0 and newest_key is 0
|
||||
if int(self['last_viewed']) and int(self['last_viewed']) >= int(self.newest_history_key) :
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def ensure_data_dir_exists(self):
|
||||
if not os.path.isdir(self.watch_data_dir):
|
||||
print ("> Creating data dir {}".format(self.watch_data_dir))
|
||||
logger.debug(f"> Creating data dir {self.watch_data_dir}")
|
||||
os.mkdir(self.watch_data_dir)
|
||||
|
||||
@property
|
||||
@@ -128,12 +74,11 @@ class model(dict):
|
||||
|
||||
ready_url = url
|
||||
if '{%' in url or '{{' in url:
|
||||
from jinja2 import Environment
|
||||
# Jinja2 available in URLs along with https://pypi.org/project/jinja2-time/
|
||||
jinja2_env = Environment(extensions=['jinja2_time.TimeExtension'])
|
||||
try:
|
||||
ready_url = str(jinja2_env.from_string(url).render())
|
||||
ready_url = jinja_render(template_str=url)
|
||||
except Exception as e:
|
||||
logger.critical(f"Invalid URL template for: '{url}' - {str(e)}")
|
||||
from flask import (
|
||||
flash, Markup, url_for
|
||||
)
|
||||
@@ -142,8 +87,45 @@ class model(dict):
|
||||
flash(message, 'error')
|
||||
return ''
|
||||
|
||||
if ready_url.startswith('source:'):
|
||||
ready_url=ready_url.replace('source:', '')
|
||||
|
||||
# Also double check it after any Jinja2 formatting just incase
|
||||
if not is_safe_url(ready_url):
|
||||
return 'DISABLED'
|
||||
return ready_url
|
||||
|
||||
def clear_watch(self):
|
||||
import pathlib
|
||||
|
||||
# JSON Data, Screenshots, Textfiles (history index and snapshots), HTML in the future etc
|
||||
for item in pathlib.Path(str(self.watch_data_dir)).rglob("*.*"):
|
||||
os.unlink(item)
|
||||
|
||||
# Force the attr to recalculate
|
||||
bump = self.history
|
||||
|
||||
# Do this last because it will trigger a recheck due to last_checked being zero
|
||||
self.update({
|
||||
'browser_steps_last_error_step': None,
|
||||
'check_count': 0,
|
||||
'fetch_time': 0.0,
|
||||
'has_ldjson_price_data': None,
|
||||
'last_checked': 0,
|
||||
'last_error': False,
|
||||
'last_notification_error': False,
|
||||
'last_viewed': 0,
|
||||
'previous_md5': False,
|
||||
'previous_md5_before_filters': False,
|
||||
'remote_server_reply': None,
|
||||
'track_ldjson_price_data': None
|
||||
})
|
||||
return
|
||||
|
||||
@property
|
||||
def is_source_type_url(self):
|
||||
return self.get('url', '').startswith('source:')
|
||||
|
||||
@property
|
||||
def get_fetch_backend(self):
|
||||
"""
|
||||
@@ -167,9 +149,7 @@ class model(dict):
|
||||
@property
|
||||
def label(self):
|
||||
# Used for sorting
|
||||
if self['title']:
|
||||
return self['title']
|
||||
return self['url']
|
||||
return self.get('title') if self.get('title') else self.get('url')
|
||||
|
||||
@property
|
||||
def last_changed(self):
|
||||
@@ -198,10 +178,14 @@ class model(dict):
|
||||
"""
|
||||
tmp_history = {}
|
||||
|
||||
# In the case we are only using the watch for processing without history
|
||||
if not self.watch_data_dir:
|
||||
return []
|
||||
|
||||
# Read the history file as a dict
|
||||
fname = os.path.join(self.watch_data_dir, "history.txt")
|
||||
if os.path.isfile(fname):
|
||||
logging.debug("Reading history index " + str(time.time()))
|
||||
logger.debug(f"Reading watch history index for {self.get('uuid')}")
|
||||
with open(fname, "r") as f:
|
||||
for i in f.readlines():
|
||||
if ',' in i:
|
||||
@@ -223,6 +207,8 @@ class model(dict):
|
||||
|
||||
if len(tmp_history):
|
||||
self.__newest_history_key = list(tmp_history.keys())[-1]
|
||||
else:
|
||||
self.__newest_history_key = None
|
||||
|
||||
self.__history_n = len(tmp_history)
|
||||
|
||||
@@ -233,6 +219,21 @@ class model(dict):
|
||||
fname = os.path.join(self.watch_data_dir, "history.txt")
|
||||
return os.path.isfile(fname)
|
||||
|
||||
@property
|
||||
def has_browser_steps(self):
|
||||
has_browser_steps = self.get('browser_steps') and list(filter(
|
||||
lambda s: (s['operation'] and len(s['operation']) and s['operation'] != 'Choose one' and s['operation'] != 'Goto site'),
|
||||
self.get('browser_steps')))
|
||||
|
||||
return has_browser_steps
|
||||
|
||||
@property
|
||||
def has_restock_info(self):
|
||||
if self.get('restock') and self['restock'].get('in_stock') != None:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
# Returns the newest key, but if theres only 1 record, then it's counted as not being new, so return 0.
|
||||
@property
|
||||
def newest_history_key(self):
|
||||
@@ -246,6 +247,33 @@ class model(dict):
|
||||
bump = self.history
|
||||
return self.__newest_history_key
|
||||
|
||||
# Given an arbitrary timestamp, find the best history key for the [diff] button so it can preset a smarter from_version
|
||||
@property
|
||||
def get_from_version_based_on_last_viewed(self):
|
||||
|
||||
"""Unfortunately for now timestamp is stored as string key"""
|
||||
keys = list(self.history.keys())
|
||||
if not keys:
|
||||
return None
|
||||
if len(keys) == 1:
|
||||
return keys[0]
|
||||
|
||||
last_viewed = int(self.get('last_viewed'))
|
||||
sorted_keys = sorted(keys, key=lambda x: int(x))
|
||||
sorted_keys.reverse()
|
||||
|
||||
# When the 'last viewed' timestamp is greater than or equal the newest snapshot, return second newest
|
||||
if last_viewed >= int(sorted_keys[0]):
|
||||
return sorted_keys[1]
|
||||
|
||||
# When the 'last viewed' timestamp is between snapshots, return the older snapshot
|
||||
for newer, older in list(zip(sorted_keys[0:], sorted_keys[1:])):
|
||||
if last_viewed < int(newer) and last_viewed >= int(older):
|
||||
return older
|
||||
|
||||
# When the 'last viewed' timestamp is less than the oldest snapshot, return oldest
|
||||
return sorted_keys[-1]
|
||||
|
||||
def get_history_snapshot(self, timestamp):
|
||||
import brotli
|
||||
filepath = self.history[timestamp]
|
||||
@@ -271,37 +299,17 @@ class model(dict):
|
||||
# Save some text file to the appropriate path and bump the history
|
||||
# result_obj from fetch_site_status.run()
|
||||
def save_history_text(self, contents, timestamp, snapshot_id):
|
||||
import brotli
|
||||
from changedetectionio.store import ChangeDetectionStore
|
||||
from changedetectionio.storage.filesystem_storage import FileSystemStorage
|
||||
|
||||
self.ensure_data_dir_exists()
|
||||
logger.trace(f"{self.get('uuid')} - Updating history.txt with timestamp {timestamp}")
|
||||
|
||||
# Small hack so that we sleep just enough to allow 1 second between history snapshots
|
||||
# this is because history.txt indexes/keys snapshots by epoch seconds and we dont want dupe keys
|
||||
if self.__newest_history_key and int(timestamp) == int(self.__newest_history_key):
|
||||
time.sleep(timestamp - self.__newest_history_key)
|
||||
|
||||
threshold = int(os.getenv('SNAPSHOT_BROTLI_COMPRESSION_THRESHOLD', 1024))
|
||||
skip_brotli = strtobool(os.getenv('DISABLE_BROTLI_TEXT_SNAPSHOT', 'False'))
|
||||
|
||||
if not skip_brotli and len(contents) > threshold:
|
||||
snapshot_fname = f"{snapshot_id}.txt.br"
|
||||
dest = os.path.join(self.watch_data_dir, snapshot_fname)
|
||||
if not os.path.exists(dest):
|
||||
with open(dest, 'wb') as f:
|
||||
f.write(brotli.compress(contents, mode=brotli.MODE_TEXT))
|
||||
else:
|
||||
snapshot_fname = f"{snapshot_id}.txt"
|
||||
dest = os.path.join(self.watch_data_dir, snapshot_fname)
|
||||
if not os.path.exists(dest):
|
||||
with open(dest, 'wb') as f:
|
||||
f.write(contents)
|
||||
|
||||
# Append to index
|
||||
# @todo check last char was \n
|
||||
index_fname = os.path.join(self.watch_data_dir, "history.txt")
|
||||
with open(index_fname, 'a') as f:
|
||||
f.write("{},{}\n".format(timestamp, snapshot_fname))
|
||||
f.close()
|
||||
# Get storage from singleton store or create a filesystem storage as default
|
||||
store = ChangeDetectionStore.instance if hasattr(ChangeDetectionStore, 'instance') else None
|
||||
storage = store.storage if store and hasattr(store, 'storage') else FileSystemStorage(self.__datastore_path)
|
||||
|
||||
# Use the storage backend to save the history text
|
||||
snapshot_fname = storage.save_history_text(self.get('uuid'), contents, timestamp, snapshot_id)
|
||||
|
||||
self.__newest_history_key = timestamp
|
||||
self.__history_n += 1
|
||||
@@ -325,14 +333,32 @@ class model(dict):
|
||||
return seconds
|
||||
|
||||
# Iterate over all history texts and see if something new exists
|
||||
def lines_contain_something_unique_compared_to_history(self, lines: list):
|
||||
local_lines = set([l.decode('utf-8').strip().lower() for l in lines])
|
||||
# Always applying .strip() to start/end but optionally replace any other whitespace
|
||||
def lines_contain_something_unique_compared_to_history(self, lines: list, ignore_whitespace=False):
|
||||
local_lines = set([])
|
||||
if lines:
|
||||
if ignore_whitespace:
|
||||
if isinstance(lines[0], str): # Can be either str or bytes depending on what was on the disk
|
||||
local_lines = set([l.translate(TRANSLATE_WHITESPACE_TABLE).lower() for l in lines])
|
||||
else:
|
||||
local_lines = set([l.decode('utf-8').translate(TRANSLATE_WHITESPACE_TABLE).lower() for l in lines])
|
||||
else:
|
||||
if isinstance(lines[0], str): # Can be either str or bytes depending on what was on the disk
|
||||
local_lines = set([l.strip().lower() for l in lines])
|
||||
else:
|
||||
local_lines = set([l.decode('utf-8').strip().lower() for l in lines])
|
||||
|
||||
|
||||
# Compare each lines (set) against each history text file (set) looking for something new..
|
||||
existing_history = set({})
|
||||
for k, v in self.history.items():
|
||||
content = self.get_history_snapshot(k)
|
||||
alist = set([line.strip().lower() for line in content.splitlines()])
|
||||
|
||||
if ignore_whitespace:
|
||||
alist = set([line.translate(TRANSLATE_WHITESPACE_TABLE).lower() for line in content.splitlines()])
|
||||
else:
|
||||
alist = set([line.strip().lower() for line in content.splitlines()])
|
||||
|
||||
existing_history = existing_history.union(alist)
|
||||
|
||||
# Check that everything in local_lines(new stuff) already exists in existing_history - it should
|
||||
@@ -376,8 +402,8 @@ class model(dict):
|
||||
@property
|
||||
def watch_data_dir(self):
|
||||
# The base dir of the watch data
|
||||
return os.path.join(self.__datastore_path, self['uuid'])
|
||||
|
||||
return os.path.join(self.__datastore_path, self['uuid']) if self.__datastore_path else None
|
||||
|
||||
def get_error_text(self):
|
||||
"""Return the text saved from a previous request that resulted in a non-200 error"""
|
||||
fname = os.path.join(self.watch_data_dir, "last-error.txt")
|
||||
@@ -412,6 +438,17 @@ class model(dict):
|
||||
def toggle_mute(self):
|
||||
self['notification_muted'] ^= True
|
||||
|
||||
def extra_notification_token_values(self):
|
||||
# Used for providing extra tokens
|
||||
# return {'widget': 555}
|
||||
return {}
|
||||
|
||||
def extra_notification_token_placeholder_info(self):
|
||||
# Used for providing extra tokens
|
||||
# return [('widget', "Get widget amounts")]
|
||||
return []
|
||||
|
||||
|
||||
def extract_regex_from_all_history(self, regex):
|
||||
import csv
|
||||
import re
|
||||
@@ -470,8 +507,43 @@ class model(dict):
|
||||
# None is set
|
||||
return False
|
||||
|
||||
def save_error_text(self, contents):
|
||||
self.ensure_data_dir_exists()
|
||||
target_path = os.path.join(self.watch_data_dir, "last-error.txt")
|
||||
with open(target_path, 'w', encoding='utf-8') as f:
|
||||
f.write(contents)
|
||||
|
||||
def get_last_fetched_before_filters(self):
|
||||
def save_xpath_data(self, data, as_error=False):
|
||||
import json
|
||||
import zlib
|
||||
|
||||
if as_error:
|
||||
target_path = os.path.join(str(self.watch_data_dir), "elements-error.deflate")
|
||||
else:
|
||||
target_path = os.path.join(str(self.watch_data_dir), "elements.deflate")
|
||||
|
||||
self.ensure_data_dir_exists()
|
||||
|
||||
with open(target_path, 'wb') as f:
|
||||
f.write(zlib.compress(json.dumps(data).encode()))
|
||||
f.close()
|
||||
|
||||
# Save as PNG, PNG is larger but better for doing visual diff in the future
|
||||
def save_screenshot(self, screenshot: bytes, as_error=False):
|
||||
|
||||
if as_error:
|
||||
target_path = os.path.join(self.watch_data_dir, "last-error-screenshot.png")
|
||||
else:
|
||||
target_path = os.path.join(self.watch_data_dir, "last-screenshot.png")
|
||||
|
||||
self.ensure_data_dir_exists()
|
||||
|
||||
with open(target_path, 'wb') as f:
|
||||
f.write(screenshot)
|
||||
f.close()
|
||||
|
||||
|
||||
def get_last_fetched_text_before_filters(self):
|
||||
import brotli
|
||||
filepath = os.path.join(self.watch_data_dir, 'last-fetched.br')
|
||||
|
||||
@@ -486,8 +558,62 @@ class model(dict):
|
||||
with open(filepath, 'rb') as f:
|
||||
return(brotli.decompress(f.read()).decode('utf-8'))
|
||||
|
||||
def save_last_fetched_before_filters(self, contents):
|
||||
def save_last_text_fetched_before_filters(self, contents):
|
||||
import brotli
|
||||
filepath = os.path.join(self.watch_data_dir, 'last-fetched.br')
|
||||
with open(filepath, 'wb') as f:
|
||||
f.write(brotli.compress(contents, mode=brotli.MODE_TEXT))
|
||||
|
||||
def save_last_fetched_html(self, timestamp, contents):
|
||||
import brotli
|
||||
|
||||
self.ensure_data_dir_exists()
|
||||
snapshot_fname = f"{timestamp}.html.br"
|
||||
filepath = os.path.join(self.watch_data_dir, snapshot_fname)
|
||||
|
||||
with open(filepath, 'wb') as f:
|
||||
contents = contents.encode('utf-8') if isinstance(contents, str) else contents
|
||||
try:
|
||||
f.write(brotli.compress(contents))
|
||||
except Exception as e:
|
||||
logger.warning(f"{self.get('uuid')} - Unable to compress snapshot, saving as raw data to {filepath}")
|
||||
logger.warning(e)
|
||||
f.write(contents)
|
||||
|
||||
self._prune_last_fetched_html_snapshots()
|
||||
|
||||
def get_fetched_html(self, timestamp):
|
||||
import brotli
|
||||
|
||||
snapshot_fname = f"{timestamp}.html.br"
|
||||
filepath = os.path.join(self.watch_data_dir, snapshot_fname)
|
||||
if os.path.isfile(filepath):
|
||||
with open(filepath, 'rb') as f:
|
||||
return (brotli.decompress(f.read()).decode('utf-8'))
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def _prune_last_fetched_html_snapshots(self):
|
||||
|
||||
dates = list(self.history.keys())
|
||||
dates.reverse()
|
||||
|
||||
for index, timestamp in enumerate(dates):
|
||||
snapshot_fname = f"{timestamp}.html.br"
|
||||
filepath = os.path.join(self.watch_data_dir, snapshot_fname)
|
||||
|
||||
# Keep only the first 2
|
||||
if index > 1 and os.path.isfile(filepath):
|
||||
os.remove(filepath)
|
||||
|
||||
|
||||
@property
|
||||
def get_browsersteps_available_screenshots(self):
|
||||
"For knowing which screenshots are available to show the user in BrowserSteps UI"
|
||||
available = []
|
||||
for f in Path(self.watch_data_dir).glob('step_before-*.jpeg'):
|
||||
step_n=re.search(r'step_before-(\d+)', f.name)
|
||||
if step_n:
|
||||
available.append(step_n.group(1))
|
||||
return available
|
||||
|
||||
@@ -0,0 +1,135 @@
|
||||
import os
|
||||
import uuid
|
||||
|
||||
from changedetectionio import strtobool
|
||||
from changedetectionio.notification import default_notification_format_for_watch
|
||||
|
||||
class watch_base(dict):
|
||||
|
||||
def __init__(self, *arg, **kw):
|
||||
self.update({
|
||||
# Custom notification content
|
||||
# Re #110, so then if this is set to None, we know to use the default value instead
|
||||
# Requires setting to None on submit if it's the same as the default
|
||||
# Should be all None by default, so we use the system default in this case.
|
||||
'body': None,
|
||||
'browser_steps': [],
|
||||
'browser_steps_last_error_step': None,
|
||||
'check_count': 0,
|
||||
'check_unique_lines': False, # On change-detected, compare against all history if its something new
|
||||
'consecutive_filter_failures': 0, # Every time the CSS/xPath filter cannot be located, reset when all is fine.
|
||||
'content-type': None,
|
||||
'date_created': None,
|
||||
'extract_text': [], # Extract text by regex after filters
|
||||
'extract_title_as_title': False,
|
||||
'fetch_backend': 'system', # plaintext, playwright etc
|
||||
'fetch_time': 0.0,
|
||||
'filter_failure_notification_send': strtobool(os.getenv('FILTER_FAILURE_NOTIFICATION_SEND_DEFAULT', 'True')),
|
||||
'filter_text_added': True,
|
||||
'filter_text_removed': True,
|
||||
'filter_text_replaced': True,
|
||||
'follow_price_changes': True,
|
||||
'has_ldjson_price_data': None,
|
||||
'headers': {}, # Extra headers to send
|
||||
'ignore_text': [], # List of text to ignore when calculating the comparison checksum
|
||||
'in_stock_only': True, # Only trigger change on going to instock from out-of-stock
|
||||
'include_filters': [],
|
||||
'last_checked': 0,
|
||||
'last_error': False,
|
||||
'last_viewed': 0, # history key value of the last viewed via the [diff] link
|
||||
'method': 'GET',
|
||||
'notification_alert_count': 0,
|
||||
'notification_body': None,
|
||||
'notification_format': default_notification_format_for_watch,
|
||||
'notification_muted': False,
|
||||
'notification_screenshot': False, # Include the latest screenshot if available and supported by the apprise URL
|
||||
'notification_title': None,
|
||||
'notification_urls': [], # List of URLs to add to the notification Queue (Usually AppRise)
|
||||
'paused': False,
|
||||
'previous_md5': False,
|
||||
'previous_md5_before_filters': False, # Used for skipping changedetection entirely
|
||||
'processor': 'text_json_diff', # could be restock_diff or others from .processors
|
||||
'price_change_threshold_percent': None,
|
||||
'proxy': None, # Preferred proxy connection
|
||||
'remote_server_reply': None, # From 'server' reply header
|
||||
'sort_text_alphabetically': False,
|
||||
'subtractive_selectors': [],
|
||||
'tag': '', # Old system of text name for a tag, to be removed
|
||||
'tags': [], # list of UUIDs to App.Tags
|
||||
'text_should_not_be_present': [], # Text that should not present
|
||||
'time_between_check': {'weeks': None, 'days': None, 'hours': None, 'minutes': None, 'seconds': None},
|
||||
'time_between_check_use_default': True,
|
||||
"time_schedule_limit": {
|
||||
"enabled": False,
|
||||
"monday": {
|
||||
"enabled": True,
|
||||
"start_time": "00:00",
|
||||
"duration": {
|
||||
"hours": "24",
|
||||
"minutes": "00"
|
||||
}
|
||||
},
|
||||
"tuesday": {
|
||||
"enabled": True,
|
||||
"start_time": "00:00",
|
||||
"duration": {
|
||||
"hours": "24",
|
||||
"minutes": "00"
|
||||
}
|
||||
},
|
||||
"wednesday": {
|
||||
"enabled": True,
|
||||
"start_time": "00:00",
|
||||
"duration": {
|
||||
"hours": "24",
|
||||
"minutes": "00"
|
||||
}
|
||||
},
|
||||
"thursday": {
|
||||
"enabled": True,
|
||||
"start_time": "00:00",
|
||||
"duration": {
|
||||
"hours": "24",
|
||||
"minutes": "00"
|
||||
}
|
||||
},
|
||||
"friday": {
|
||||
"enabled": True,
|
||||
"start_time": "00:00",
|
||||
"duration": {
|
||||
"hours": "24",
|
||||
"minutes": "00"
|
||||
}
|
||||
},
|
||||
"saturday": {
|
||||
"enabled": True,
|
||||
"start_time": "00:00",
|
||||
"duration": {
|
||||
"hours": "24",
|
||||
"minutes": "00"
|
||||
}
|
||||
},
|
||||
"sunday": {
|
||||
"enabled": True,
|
||||
"start_time": "00:00",
|
||||
"duration": {
|
||||
"hours": "24",
|
||||
"minutes": "00"
|
||||
}
|
||||
},
|
||||
},
|
||||
'title': None,
|
||||
'track_ldjson_price_data': None,
|
||||
'trim_text_whitespace': False,
|
||||
'remove_duplicate_lines': False,
|
||||
'trigger_text': [], # List of text or regex to wait for until a change is detected
|
||||
'url': '',
|
||||
'uuid': str(uuid.uuid4()),
|
||||
'webdriver_delay': None,
|
||||
'webdriver_js_execute_code': None, # Run before change-detection
|
||||
})
|
||||
|
||||
super(watch_base, self).__init__(*arg, **kw)
|
||||
|
||||
if self.get('default'):
|
||||
del self['default']
|
||||
@@ -1,7 +1,9 @@
|
||||
import apprise
|
||||
from jinja2 import Environment, BaseLoader
|
||||
|
||||
import time
|
||||
from apprise import NotifyFormat
|
||||
import json
|
||||
import apprise
|
||||
from loguru import logger
|
||||
|
||||
|
||||
valid_tokens = {
|
||||
'base_url': '',
|
||||
@@ -21,7 +23,7 @@ valid_tokens = {
|
||||
}
|
||||
|
||||
default_notification_format_for_watch = 'System default'
|
||||
default_notification_format = 'Text'
|
||||
default_notification_format = 'HTML Color'
|
||||
default_notification_body = '{{watch_url}} had a change.\n---\n{{diff}}\n---\n'
|
||||
default_notification_title = 'ChangeDetection.io Notification - {{watch_url}}'
|
||||
|
||||
@@ -29,66 +31,24 @@ valid_notification_formats = {
|
||||
'Text': NotifyFormat.TEXT,
|
||||
'Markdown': NotifyFormat.MARKDOWN,
|
||||
'HTML': NotifyFormat.HTML,
|
||||
'HTML Color': 'htmlcolor',
|
||||
# Used only for editing a watch (not for global)
|
||||
default_notification_format_for_watch: default_notification_format_for_watch
|
||||
}
|
||||
|
||||
# include the decorator
|
||||
from apprise.decorators import notify
|
||||
|
||||
@notify(on="delete")
|
||||
@notify(on="deletes")
|
||||
@notify(on="get")
|
||||
@notify(on="gets")
|
||||
@notify(on="post")
|
||||
@notify(on="posts")
|
||||
@notify(on="put")
|
||||
@notify(on="puts")
|
||||
def apprise_custom_api_call_wrapper(body, title, notify_type, *args, **kwargs):
|
||||
import requests
|
||||
url = kwargs['meta'].get('url')
|
||||
|
||||
if url.startswith('post'):
|
||||
r = requests.post
|
||||
elif url.startswith('get'):
|
||||
r = requests.get
|
||||
elif url.startswith('put'):
|
||||
r = requests.put
|
||||
elif url.startswith('delete'):
|
||||
r = requests.delete
|
||||
|
||||
url = url.replace('post://', 'http://')
|
||||
url = url.replace('posts://', 'https://')
|
||||
url = url.replace('put://', 'http://')
|
||||
url = url.replace('puts://', 'https://')
|
||||
url = url.replace('get://', 'http://')
|
||||
url = url.replace('gets://', 'https://')
|
||||
url = url.replace('put://', 'http://')
|
||||
url = url.replace('puts://', 'https://')
|
||||
url = url.replace('delete://', 'http://')
|
||||
url = url.replace('deletes://', 'https://')
|
||||
|
||||
# Try to auto-guess if it's JSON
|
||||
headers = {}
|
||||
try:
|
||||
json.loads(body)
|
||||
headers = {'Content-Type': 'application/json; charset=utf-8'}
|
||||
except ValueError as e:
|
||||
pass
|
||||
|
||||
|
||||
r(url, headers=headers, data=body)
|
||||
|
||||
|
||||
def process_notification(n_object, datastore):
|
||||
# so that the custom endpoints are registered
|
||||
from changedetectionio.apprise_plugin import apprise_custom_api_call_wrapper
|
||||
|
||||
from .safe_jinja import render as jinja_render
|
||||
now = time.time()
|
||||
if n_object.get('notification_timestamp'):
|
||||
logger.trace(f"Time since queued {now-n_object['notification_timestamp']:.3f}s")
|
||||
# Insert variables into the notification content
|
||||
notification_parameters = create_notification_parameters(n_object, datastore)
|
||||
|
||||
# Get the notification body from datastore
|
||||
jinja2_env = Environment(loader=BaseLoader)
|
||||
n_body = jinja2_env.from_string(n_object.get('notification_body', default_notification_body)).render(**notification_parameters)
|
||||
n_title = jinja2_env.from_string(n_object.get('notification_title', default_notification_title)).render(**notification_parameters)
|
||||
n_format = valid_notification_formats.get(
|
||||
n_object.get('notification_format', default_notification_format),
|
||||
valid_notification_formats[default_notification_format],
|
||||
@@ -99,103 +59,129 @@ def process_notification(n_object, datastore):
|
||||
# Initially text or whatever
|
||||
n_format = datastore.data['settings']['application'].get('notification_format', valid_notification_formats[default_notification_format])
|
||||
|
||||
logger.trace(f"Complete notification body including Jinja and placeholders calculated in {time.time() - now:.3f}s")
|
||||
|
||||
# https://github.com/caronc/apprise/wiki/Development_LogCapture
|
||||
# Anything higher than or equal to WARNING (which covers things like Connection errors)
|
||||
# raise it as an exception
|
||||
apobjs=[]
|
||||
sent_objs=[]
|
||||
|
||||
sent_objs = []
|
||||
from .apprise_asset import asset
|
||||
for url in n_object['notification_urls']:
|
||||
url = jinja2_env.from_string(url).render(**notification_parameters)
|
||||
apobj = apprise.Apprise(debug=True, asset=asset)
|
||||
url = url.strip()
|
||||
if len(url):
|
||||
print(">> Process Notification: AppRise notifying {}".format(url))
|
||||
with apprise.LogCapture(level=apprise.logging.DEBUG) as logs:
|
||||
# Re 323 - Limit discord length to their 2000 char limit total or it wont send.
|
||||
# Because different notifications may require different pre-processing, run each sequentially :(
|
||||
# 2000 bytes minus -
|
||||
# 200 bytes for the overhead of the _entire_ json payload, 200 bytes for {tts, wait, content} etc headers
|
||||
# Length of URL - Incase they specify a longer custom avatar_url
|
||||
|
||||
# So if no avatar_url is specified, add one so it can be correctly calculated into the total payload
|
||||
k = '?' if not '?' in url else '&'
|
||||
if not 'avatar_url' in url \
|
||||
and not url.startswith('mail') \
|
||||
and not url.startswith('post') \
|
||||
and not url.startswith('get') \
|
||||
and not url.startswith('delete') \
|
||||
and not url.startswith('put'):
|
||||
url += k + 'avatar_url=https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/changedetectionio/static/images/avatar-256x256.png'
|
||||
if 'as_async' in n_object:
|
||||
asset.async_mode = n_object.get('as_async')
|
||||
|
||||
if url.startswith('tgram://'):
|
||||
# Telegram only supports a limit subset of HTML, remove the '<br>' we place in.
|
||||
# re https://github.com/dgtlmoon/changedetection.io/issues/555
|
||||
# @todo re-use an existing library we have already imported to strip all non-allowed tags
|
||||
n_body = n_body.replace('<br>', '\n')
|
||||
n_body = n_body.replace('</br>', '\n')
|
||||
# real limit is 4096, but minus some for extra metadata
|
||||
payload_max_size = 3600
|
||||
body_limit = max(0, payload_max_size - len(n_title))
|
||||
n_title = n_title[0:payload_max_size]
|
||||
n_body = n_body[0:body_limit]
|
||||
apobj = apprise.Apprise(debug=True, asset=asset)
|
||||
|
||||
elif url.startswith('discord://') or url.startswith('https://discordapp.com/api/webhooks') or url.startswith('https://discord.com/api'):
|
||||
# real limit is 2000, but minus some for extra metadata
|
||||
payload_max_size = 1700
|
||||
body_limit = max(0, payload_max_size - len(n_title))
|
||||
n_title = n_title[0:payload_max_size]
|
||||
n_body = n_body[0:body_limit]
|
||||
if not n_object.get('notification_urls'):
|
||||
return None
|
||||
|
||||
elif url.startswith('mailto'):
|
||||
# Apprise will default to HTML, so we need to override it
|
||||
# So that whats' generated in n_body is in line with what is going to be sent.
|
||||
# https://github.com/caronc/apprise/issues/633#issuecomment-1191449321
|
||||
if not 'format=' in url and (n_format == 'Text' or n_format == 'Markdown'):
|
||||
prefix = '?' if not '?' in url else '&'
|
||||
# Apprise format is lowercase text https://github.com/caronc/apprise/issues/633
|
||||
n_format = n_format.tolower()
|
||||
url = "{}{}format={}".format(url, prefix, n_format)
|
||||
# If n_format == HTML, then apprise email should default to text/html and we should be sending HTML only
|
||||
with apprise.LogCapture(level=apprise.logging.DEBUG) as logs:
|
||||
for url in n_object['notification_urls']:
|
||||
|
||||
apobj.add(url)
|
||||
# Get the notification body from datastore
|
||||
n_body = jinja_render(template_str=n_object.get('notification_body', ''), **notification_parameters)
|
||||
if n_object.get('notification_format', '').startswith('HTML'):
|
||||
n_body = n_body.replace("\n", '<br>')
|
||||
|
||||
apobj.notify(
|
||||
title=n_title,
|
||||
body=n_body,
|
||||
body_format=n_format,
|
||||
# False is not an option for AppRise, must be type None
|
||||
attach=n_object.get('screenshot', None)
|
||||
)
|
||||
n_title = jinja_render(template_str=n_object.get('notification_title', ''), **notification_parameters)
|
||||
|
||||
apobj.clear()
|
||||
url = url.strip()
|
||||
if url.startswith('#'):
|
||||
logger.trace(f"Skipping commented out notification URL - {url}")
|
||||
continue
|
||||
|
||||
# Incase it needs to exist in memory for a while after to process(?)
|
||||
apobjs.append(apobj)
|
||||
if not url:
|
||||
logger.warning(f"Process Notification: skipping empty notification URL.")
|
||||
continue
|
||||
|
||||
# Returns empty string if nothing found, multi-line string otherwise
|
||||
log_value = logs.getvalue()
|
||||
if log_value and 'WARNING' in log_value or 'ERROR' in log_value:
|
||||
raise Exception(log_value)
|
||||
logger.info(f">> Process Notification: AppRise notifying {url}")
|
||||
url = jinja_render(template_str=url, **notification_parameters)
|
||||
|
||||
sent_objs.append({'title': n_title,
|
||||
'body': n_body,
|
||||
'url' : url,
|
||||
'body_format': n_format})
|
||||
# Re 323 - Limit discord length to their 2000 char limit total or it wont send.
|
||||
# Because different notifications may require different pre-processing, run each sequentially :(
|
||||
# 2000 bytes minus -
|
||||
# 200 bytes for the overhead of the _entire_ json payload, 200 bytes for {tts, wait, content} etc headers
|
||||
# Length of URL - Incase they specify a longer custom avatar_url
|
||||
|
||||
# So if no avatar_url is specified, add one so it can be correctly calculated into the total payload
|
||||
k = '?' if not '?' in url else '&'
|
||||
if not 'avatar_url' in url \
|
||||
and not url.startswith('mail') \
|
||||
and not url.startswith('post') \
|
||||
and not url.startswith('get') \
|
||||
and not url.startswith('delete') \
|
||||
and not url.startswith('put'):
|
||||
url += k + 'avatar_url=https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/changedetectionio/static/images/avatar-256x256.png'
|
||||
|
||||
if url.startswith('tgram://'):
|
||||
# Telegram only supports a limit subset of HTML, remove the '<br>' we place in.
|
||||
# re https://github.com/dgtlmoon/changedetection.io/issues/555
|
||||
# @todo re-use an existing library we have already imported to strip all non-allowed tags
|
||||
n_body = n_body.replace('<br>', '\n')
|
||||
n_body = n_body.replace('</br>', '\n')
|
||||
# real limit is 4096, but minus some for extra metadata
|
||||
payload_max_size = 3600
|
||||
body_limit = max(0, payload_max_size - len(n_title))
|
||||
n_title = n_title[0:payload_max_size]
|
||||
n_body = n_body[0:body_limit]
|
||||
|
||||
elif url.startswith('discord://') or url.startswith('https://discordapp.com/api/webhooks') or url.startswith(
|
||||
'https://discord.com/api'):
|
||||
# real limit is 2000, but minus some for extra metadata
|
||||
payload_max_size = 1700
|
||||
body_limit = max(0, payload_max_size - len(n_title))
|
||||
n_title = n_title[0:payload_max_size]
|
||||
n_body = n_body[0:body_limit]
|
||||
|
||||
elif url.startswith('mailto'):
|
||||
# Apprise will default to HTML, so we need to override it
|
||||
# So that whats' generated in n_body is in line with what is going to be sent.
|
||||
# https://github.com/caronc/apprise/issues/633#issuecomment-1191449321
|
||||
if not 'format=' in url and (n_format == 'Text' or n_format == 'Markdown'):
|
||||
prefix = '?' if not '?' in url else '&'
|
||||
# Apprise format is lowercase text https://github.com/caronc/apprise/issues/633
|
||||
n_format = n_format.lower()
|
||||
url = f"{url}{prefix}format={n_format}"
|
||||
# If n_format == HTML, then apprise email should default to text/html and we should be sending HTML only
|
||||
|
||||
apobj.add(url)
|
||||
|
||||
sent_objs.append({'title': n_title,
|
||||
'body': n_body,
|
||||
'url': url,
|
||||
'body_format': n_format})
|
||||
|
||||
# Blast off the notifications tht are set in .add()
|
||||
apobj.notify(
|
||||
title=n_title,
|
||||
body=n_body,
|
||||
body_format=n_format,
|
||||
# False is not an option for AppRise, must be type None
|
||||
attach=n_object.get('screenshot', None)
|
||||
)
|
||||
|
||||
|
||||
# Returns empty string if nothing found, multi-line string otherwise
|
||||
log_value = logs.getvalue()
|
||||
|
||||
if log_value and 'WARNING' in log_value or 'ERROR' in log_value:
|
||||
logger.critical(log_value)
|
||||
raise Exception(log_value)
|
||||
|
||||
# Return what was sent for better logging - after the for loop
|
||||
return sent_objs
|
||||
|
||||
|
||||
# Notification title + body content parameters get created here.
|
||||
# ( Where we prepare the tokens in the notification to be replaced with actual values )
|
||||
def create_notification_parameters(n_object, datastore):
|
||||
from copy import deepcopy
|
||||
|
||||
# in the case we send a test notification from the main settings, there is no UUID.
|
||||
uuid = n_object['uuid'] if 'uuid' in n_object else ''
|
||||
|
||||
if uuid != '':
|
||||
if uuid:
|
||||
watch_title = datastore.data['watching'][uuid].get('title', '')
|
||||
tag_list = []
|
||||
tags = datastore.get_all_tags_for_watch(uuid)
|
||||
@@ -223,19 +209,18 @@ def create_notification_parameters(n_object, datastore):
|
||||
tokens.update(
|
||||
{
|
||||
'base_url': base_url,
|
||||
'current_snapshot': n_object['current_snapshot'] if 'current_snapshot' in n_object else '',
|
||||
'diff': n_object.get('diff', ''), # Null default in the case we use a test
|
||||
'diff_added': n_object.get('diff_added', ''), # Null default in the case we use a test
|
||||
'diff_full': n_object.get('diff_full', ''), # Null default in the case we use a test
|
||||
'diff_patch': n_object.get('diff_patch', ''), # Null default in the case we use a test
|
||||
'diff_removed': n_object.get('diff_removed', ''), # Null default in the case we use a test
|
||||
'diff_url': diff_url,
|
||||
'preview_url': preview_url,
|
||||
'triggered_text': n_object.get('triggered_text', ''),
|
||||
'watch_tag': watch_tag if watch_tag is not None else '',
|
||||
'watch_title': watch_title if watch_title is not None else '',
|
||||
'watch_url': watch_url,
|
||||
'watch_uuid': uuid,
|
||||
})
|
||||
|
||||
# n_object will contain diff, diff_added etc etc
|
||||
tokens.update(n_object)
|
||||
|
||||
if uuid:
|
||||
tokens.update(datastore.data['watching'].get(uuid).extra_notification_token_values())
|
||||
|
||||
return tokens
|
||||
|
||||
@@ -8,4 +8,8 @@ The concept here is to be able to switch between different domain specific probl
|
||||
Some suggestions for the future
|
||||
|
||||
- `graphical`
|
||||
- `restock_and_price` - extract price AND stock text
|
||||
|
||||
## Todo
|
||||
|
||||
- Make each processor return a extra list of sub-processed (so you could configure a single processor in different ways)
|
||||
- move restock_diff to its own pip/github repo
|
||||
|
||||
@@ -1,15 +1,170 @@
|
||||
from abc import abstractmethod
|
||||
from changedetectionio.content_fetchers.base import Fetcher
|
||||
from changedetectionio.strtobool import strtobool
|
||||
from copy import deepcopy
|
||||
from loguru import logger
|
||||
import hashlib
|
||||
|
||||
import importlib
|
||||
import inspect
|
||||
import os
|
||||
import pkgutil
|
||||
import re
|
||||
|
||||
class difference_detection_processor():
|
||||
|
||||
browser_steps = None
|
||||
datastore = None
|
||||
fetcher = None
|
||||
screenshot = None
|
||||
watch = None
|
||||
xpath_data = None
|
||||
preferred_proxy = None
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
def __init__(self, *args, datastore, watch_uuid, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.datastore = datastore
|
||||
self.watch = deepcopy(self.datastore.data['watching'].get(watch_uuid))
|
||||
# Generic fetcher that should be extended (requests, playwright etc)
|
||||
self.fetcher = Fetcher()
|
||||
|
||||
def call_browser(self, preferred_proxy_id=None):
|
||||
|
||||
from requests.structures import CaseInsensitiveDict
|
||||
|
||||
url = self.watch.link
|
||||
|
||||
# Protect against file:, file:/, file:// access, check the real "link" without any meta "source:" etc prepended.
|
||||
if re.search(r'^file:', url.strip(), re.IGNORECASE):
|
||||
if not strtobool(os.getenv('ALLOW_FILE_URI', 'false')):
|
||||
raise Exception(
|
||||
"file:// type access is denied for security reasons."
|
||||
)
|
||||
|
||||
# Requests, playwright, other browser via wss:// etc, fetch_extra_something
|
||||
prefer_fetch_backend = self.watch.get('fetch_backend', 'system')
|
||||
|
||||
# Proxy ID "key"
|
||||
preferred_proxy_id = preferred_proxy_id if preferred_proxy_id else self.datastore.get_preferred_proxy_for_watch(uuid=self.watch.get('uuid'))
|
||||
|
||||
# Pluggable content self.fetcher
|
||||
if not prefer_fetch_backend or prefer_fetch_backend == 'system':
|
||||
prefer_fetch_backend = self.datastore.data['settings']['application'].get('fetch_backend')
|
||||
|
||||
# In the case that the preferred fetcher was a browser config with custom connection URL..
|
||||
# @todo - on save watch, if its extra_browser_ then it should be obvious it will use playwright (like if its requests now..)
|
||||
custom_browser_connection_url = None
|
||||
if prefer_fetch_backend.startswith('extra_browser_'):
|
||||
(t, key) = prefer_fetch_backend.split('extra_browser_')
|
||||
connection = list(
|
||||
filter(lambda s: (s['browser_name'] == key), self.datastore.data['settings']['requests'].get('extra_browsers', [])))
|
||||
if connection:
|
||||
prefer_fetch_backend = 'html_webdriver'
|
||||
custom_browser_connection_url = connection[0].get('browser_connection_url')
|
||||
|
||||
# PDF should be html_requests because playwright will serve it up (so far) in a embedded page
|
||||
# @todo https://github.com/dgtlmoon/changedetection.io/issues/2019
|
||||
# @todo needs test to or a fix
|
||||
if self.watch.is_pdf:
|
||||
prefer_fetch_backend = "html_requests"
|
||||
|
||||
# Grab the right kind of 'fetcher', (playwright, requests, etc)
|
||||
from changedetectionio import content_fetchers
|
||||
if hasattr(content_fetchers, prefer_fetch_backend):
|
||||
# @todo TEMPORARY HACK - SWITCH BACK TO PLAYWRIGHT FOR BROWSERSTEPS
|
||||
if prefer_fetch_backend == 'html_webdriver' and self.watch.has_browser_steps:
|
||||
# This is never supported in selenium anyway
|
||||
logger.warning("Using playwright fetcher override for possible puppeteer request in browsersteps, because puppetteer:browser steps is incomplete.")
|
||||
from changedetectionio.content_fetchers.playwright import fetcher as playwright_fetcher
|
||||
fetcher_obj = playwright_fetcher
|
||||
else:
|
||||
fetcher_obj = getattr(content_fetchers, prefer_fetch_backend)
|
||||
else:
|
||||
# What it referenced doesnt exist, Just use a default
|
||||
fetcher_obj = getattr(content_fetchers, "html_requests")
|
||||
|
||||
proxy_url = None
|
||||
if preferred_proxy_id:
|
||||
# Custom browser endpoints should NOT have a proxy added
|
||||
if not prefer_fetch_backend.startswith('extra_browser_'):
|
||||
proxy_url = self.datastore.proxy_list.get(preferred_proxy_id).get('url')
|
||||
logger.debug(f"Selected proxy key '{preferred_proxy_id}' as proxy URL '{proxy_url}' for {url}")
|
||||
else:
|
||||
logger.debug(f"Skipping adding proxy data when custom Browser endpoint is specified. ")
|
||||
|
||||
# Now call the fetcher (playwright/requests/etc) with arguments that only a fetcher would need.
|
||||
# When browser_connection_url is None, it method should default to working out whats the best defaults (os env vars etc)
|
||||
self.fetcher = fetcher_obj(proxy_override=proxy_url,
|
||||
custom_browser_connection_url=custom_browser_connection_url
|
||||
)
|
||||
|
||||
if self.watch.has_browser_steps:
|
||||
self.fetcher.browser_steps = self.watch.get('browser_steps', [])
|
||||
self.fetcher.browser_steps_screenshot_path = os.path.join(self.datastore.datastore_path, self.watch.get('uuid'))
|
||||
|
||||
# Tweak the base config with the per-watch ones
|
||||
from changedetectionio.safe_jinja import render as jinja_render
|
||||
request_headers = CaseInsensitiveDict()
|
||||
|
||||
ua = self.datastore.data['settings']['requests'].get('default_ua')
|
||||
if ua and ua.get(prefer_fetch_backend):
|
||||
request_headers.update({'User-Agent': ua.get(prefer_fetch_backend)})
|
||||
|
||||
request_headers.update(self.watch.get('headers', {}))
|
||||
request_headers.update(self.datastore.get_all_base_headers())
|
||||
request_headers.update(self.datastore.get_all_headers_in_textfile_for_watch(uuid=self.watch.get('uuid')))
|
||||
|
||||
# https://github.com/psf/requests/issues/4525
|
||||
# Requests doesnt yet support brotli encoding, so don't put 'br' here, be totally sure that the user cannot
|
||||
# do this by accident.
|
||||
if 'Accept-Encoding' in request_headers and "br" in request_headers['Accept-Encoding']:
|
||||
request_headers['Accept-Encoding'] = request_headers['Accept-Encoding'].replace(', br', '')
|
||||
|
||||
for header_name in request_headers:
|
||||
request_headers.update({header_name: jinja_render(template_str=request_headers.get(header_name))})
|
||||
|
||||
timeout = self.datastore.data['settings']['requests'].get('timeout')
|
||||
|
||||
request_body = self.watch.get('body')
|
||||
if request_body:
|
||||
request_body = jinja_render(template_str=self.watch.get('body'))
|
||||
|
||||
request_method = self.watch.get('method')
|
||||
ignore_status_codes = self.watch.get('ignore_status_codes', False)
|
||||
|
||||
# Configurable per-watch or global extra delay before extracting text (for webDriver types)
|
||||
system_webdriver_delay = self.datastore.data['settings']['application'].get('webdriver_delay', None)
|
||||
if self.watch.get('webdriver_delay'):
|
||||
self.fetcher.render_extract_delay = self.watch.get('webdriver_delay')
|
||||
elif system_webdriver_delay is not None:
|
||||
self.fetcher.render_extract_delay = system_webdriver_delay
|
||||
|
||||
if self.watch.get('webdriver_js_execute_code') is not None and self.watch.get('webdriver_js_execute_code').strip():
|
||||
self.fetcher.webdriver_js_execute_code = self.watch.get('webdriver_js_execute_code')
|
||||
|
||||
# Requests for PDF's, images etc should be passwd the is_binary flag
|
||||
is_binary = self.watch.is_pdf
|
||||
|
||||
# And here we go! call the right browser with browser-specific settings
|
||||
empty_pages_are_a_change = self.datastore.data['settings']['application'].get('empty_pages_are_a_change', False)
|
||||
|
||||
self.fetcher.run(url=url,
|
||||
timeout=timeout,
|
||||
request_headers=request_headers,
|
||||
request_body=request_body,
|
||||
request_method=request_method,
|
||||
ignore_status_codes=ignore_status_codes,
|
||||
current_include_filters=self.watch.get('include_filters'),
|
||||
is_binary=is_binary,
|
||||
empty_pages_are_a_change=empty_pages_are_a_change
|
||||
)
|
||||
|
||||
#@todo .quit here could go on close object, so we can run JS if change-detected
|
||||
self.fetcher.quit()
|
||||
|
||||
# After init, call run_changedetection() which will do the actual change-detection
|
||||
|
||||
@abstractmethod
|
||||
def run(self, uuid, skip_when_checksum_same=True, preferred_proxy=None):
|
||||
def run_changedetection(self, watch):
|
||||
update_obj = {'last_notification_error': False, 'last_error': False}
|
||||
some_data = 'xxxxx'
|
||||
update_obj["previous_md5"] = hashlib.md5(some_data.encode('utf-8')).hexdigest()
|
||||
@@ -17,8 +172,83 @@ class difference_detection_processor():
|
||||
return changed_detected, update_obj, ''.encode('utf-8')
|
||||
|
||||
|
||||
def find_sub_packages(package_name):
|
||||
"""
|
||||
Find all sub-packages within the given package.
|
||||
|
||||
:param package_name: The name of the base package to scan for sub-packages.
|
||||
:return: A list of sub-package names.
|
||||
"""
|
||||
package = importlib.import_module(package_name)
|
||||
return [name for _, name, is_pkg in pkgutil.iter_modules(package.__path__) if is_pkg]
|
||||
|
||||
|
||||
def find_processors():
|
||||
"""
|
||||
Find all subclasses of DifferenceDetectionProcessor in the specified package.
|
||||
|
||||
:param package_name: The name of the package to scan for processor modules.
|
||||
:return: A list of (module, class) tuples.
|
||||
"""
|
||||
package_name = "changedetectionio.processors" # Name of the current package/module
|
||||
|
||||
processors = []
|
||||
sub_packages = find_sub_packages(package_name)
|
||||
|
||||
for sub_package in sub_packages:
|
||||
module_name = f"{package_name}.{sub_package}.processor"
|
||||
try:
|
||||
module = importlib.import_module(module_name)
|
||||
|
||||
# Iterate through all classes in the module
|
||||
for name, obj in inspect.getmembers(module, inspect.isclass):
|
||||
if issubclass(obj, difference_detection_processor) and obj is not difference_detection_processor:
|
||||
processors.append((module, sub_package))
|
||||
except (ModuleNotFoundError, ImportError) as e:
|
||||
logger.warning(f"Failed to import module {module_name}: {e} (find_processors())")
|
||||
|
||||
return processors
|
||||
|
||||
|
||||
def get_parent_module(module):
|
||||
module_name = module.__name__
|
||||
if '.' not in module_name:
|
||||
return None # Top-level module has no parent
|
||||
parent_module_name = module_name.rsplit('.', 1)[0]
|
||||
try:
|
||||
return importlib.import_module(parent_module_name)
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
return False
|
||||
|
||||
|
||||
|
||||
def get_custom_watch_obj_for_processor(processor_name):
|
||||
from changedetectionio.model import Watch
|
||||
watch_class = Watch.model
|
||||
processor_classes = find_processors()
|
||||
custom_watch_obj = next((tpl for tpl in processor_classes if tpl[1] == processor_name), None)
|
||||
if custom_watch_obj:
|
||||
# Parent of .processor.py COULD have its own Watch implementation
|
||||
parent_module = get_parent_module(custom_watch_obj[0])
|
||||
if hasattr(parent_module, 'Watch'):
|
||||
watch_class = parent_module.Watch
|
||||
|
||||
return watch_class
|
||||
|
||||
|
||||
def available_processors():
|
||||
from . import restock_diff, text_json_diff
|
||||
x=[('text_json_diff', text_json_diff.name), ('restock_diff', restock_diff.name)]
|
||||
# @todo Make this smarter with introspection of sorts.
|
||||
return x
|
||||
"""
|
||||
Get a list of processors by name and description for the UI elements
|
||||
:return: A list :)
|
||||
"""
|
||||
|
||||
processor_classes = find_processors()
|
||||
|
||||
available = []
|
||||
for package, processor_class in processor_classes:
|
||||
available.append((processor_class, package.name))
|
||||
|
||||
return available
|
||||
|
||||
|
||||
10
changedetectionio/processors/exceptions.py
Normal file
@@ -0,0 +1,10 @@
|
||||
class ProcessorException(Exception):
|
||||
def __init__(self, message=None, status_code=None, url=None, screenshot=None, has_filters=False, html_content='', xpath_data=None):
|
||||
self.message = message
|
||||
self.status_code = status_code
|
||||
self.url = url
|
||||
self.screenshot = screenshot
|
||||
self.has_filters = has_filters
|
||||
self.html_content = html_content
|
||||
self.xpath_data = xpath_data
|
||||
return
|
||||
@@ -1,131 +0,0 @@
|
||||
|
||||
import hashlib
|
||||
import os
|
||||
import re
|
||||
import urllib3
|
||||
from . import difference_detection_processor
|
||||
from changedetectionio import content_fetcher
|
||||
from copy import deepcopy
|
||||
|
||||
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
||||
|
||||
name = 'Re-stock detection for single product pages'
|
||||
description = 'Detects if the product goes back to in-stock'
|
||||
|
||||
class UnableToExtractRestockData(Exception):
|
||||
def __init__(self, status_code):
|
||||
# Set this so we can use it in other parts of the app
|
||||
self.status_code = status_code
|
||||
return
|
||||
|
||||
class perform_site_check(difference_detection_processor):
|
||||
screenshot = None
|
||||
xpath_data = None
|
||||
|
||||
def __init__(self, *args, datastore, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.datastore = datastore
|
||||
|
||||
def run(self, uuid, skip_when_checksum_same=True):
|
||||
|
||||
# DeepCopy so we can be sure we don't accidently change anything by reference
|
||||
watch = deepcopy(self.datastore.data['watching'].get(uuid))
|
||||
|
||||
if not watch:
|
||||
raise Exception("Watch no longer exists.")
|
||||
|
||||
# Protect against file:// access
|
||||
if re.search(r'^file', watch.get('url', ''), re.IGNORECASE) and not os.getenv('ALLOW_FILE_URI', False):
|
||||
raise Exception(
|
||||
"file:// type access is denied for security reasons."
|
||||
)
|
||||
|
||||
# Unset any existing notification error
|
||||
update_obj = {'last_notification_error': False, 'last_error': False}
|
||||
|
||||
request_headers = watch.get('headers', [])
|
||||
request_headers.update(self.datastore.get_all_base_headers())
|
||||
request_headers.update(self.datastore.get_all_headers_in_textfile_for_watch(uuid=uuid))
|
||||
|
||||
# https://github.com/psf/requests/issues/4525
|
||||
# Requests doesnt yet support brotli encoding, so don't put 'br' here, be totally sure that the user cannot
|
||||
# do this by accident.
|
||||
if 'Accept-Encoding' in request_headers and "br" in request_headers['Accept-Encoding']:
|
||||
request_headers['Accept-Encoding'] = request_headers['Accept-Encoding'].replace(', br', '')
|
||||
|
||||
timeout = self.datastore.data['settings']['requests'].get('timeout')
|
||||
|
||||
url = watch.link
|
||||
|
||||
request_body = self.datastore.data['watching'][uuid].get('body')
|
||||
request_method = self.datastore.data['watching'][uuid].get('method')
|
||||
ignore_status_codes = self.datastore.data['watching'][uuid].get('ignore_status_codes', False)
|
||||
|
||||
# Pluggable content fetcher
|
||||
prefer_backend = watch.get_fetch_backend
|
||||
if not prefer_backend or prefer_backend == 'system':
|
||||
prefer_backend = self.datastore.data['settings']['application']['fetch_backend']
|
||||
|
||||
if hasattr(content_fetcher, prefer_backend):
|
||||
klass = getattr(content_fetcher, prefer_backend)
|
||||
else:
|
||||
# If the klass doesnt exist, just use a default
|
||||
klass = getattr(content_fetcher, "html_requests")
|
||||
|
||||
proxy_id = self.datastore.get_preferred_proxy_for_watch(uuid=uuid)
|
||||
proxy_url = None
|
||||
if proxy_id:
|
||||
proxy_url = self.datastore.proxy_list.get(proxy_id).get('url')
|
||||
print("UUID {} Using proxy {}".format(uuid, proxy_url))
|
||||
|
||||
fetcher = klass(proxy_override=proxy_url)
|
||||
|
||||
# Configurable per-watch or global extra delay before extracting text (for webDriver types)
|
||||
system_webdriver_delay = self.datastore.data['settings']['application'].get('webdriver_delay', None)
|
||||
if watch['webdriver_delay'] is not None:
|
||||
fetcher.render_extract_delay = watch.get('webdriver_delay')
|
||||
elif system_webdriver_delay is not None:
|
||||
fetcher.render_extract_delay = system_webdriver_delay
|
||||
|
||||
# Could be removed if requests/plaintext could also return some info?
|
||||
if prefer_backend != 'html_webdriver':
|
||||
raise Exception("Re-stock detection requires Chrome or compatible webdriver/playwright fetcher to work")
|
||||
|
||||
if watch.get('webdriver_js_execute_code') is not None and watch.get('webdriver_js_execute_code').strip():
|
||||
fetcher.webdriver_js_execute_code = watch.get('webdriver_js_execute_code')
|
||||
|
||||
fetcher.run(url, timeout, request_headers, request_body, request_method, ignore_status_codes, watch.get('include_filters'))
|
||||
fetcher.quit()
|
||||
|
||||
self.screenshot = fetcher.screenshot
|
||||
self.xpath_data = fetcher.xpath_data
|
||||
|
||||
# Track the content type
|
||||
update_obj['content_type'] = fetcher.headers.get('Content-Type', '')
|
||||
update_obj["last_check_status"] = fetcher.get_last_status_code()
|
||||
|
||||
# Main detection method
|
||||
fetched_md5 = None
|
||||
if fetcher.instock_data:
|
||||
fetched_md5 = hashlib.md5(fetcher.instock_data.encode('utf-8')).hexdigest()
|
||||
# 'Possibly in stock' comes from stock-not-in-stock.js when no string found above the fold.
|
||||
update_obj["in_stock"] = True if fetcher.instock_data == 'Possibly in stock' else False
|
||||
else:
|
||||
raise UnableToExtractRestockData(status_code=fetcher.status_code)
|
||||
|
||||
# The main thing that all this at the moment comes down to :)
|
||||
changed_detected = False
|
||||
|
||||
if watch.get('previous_md5') and watch.get('previous_md5') != fetched_md5:
|
||||
# Yes if we only care about it going to instock, AND we are in stock
|
||||
if watch.get('in_stock_only') and update_obj["in_stock"]:
|
||||
changed_detected = True
|
||||
|
||||
if not watch.get('in_stock_only'):
|
||||
# All cases
|
||||
changed_detected = True
|
||||
|
||||
# Always record the new checksum
|
||||
update_obj["previous_md5"] = fetched_md5
|
||||
|
||||
return changed_detected, update_obj, fetcher.instock_data.encode('utf-8')
|
||||
84
changedetectionio/processors/restock_diff/__init__.py
Normal file
@@ -0,0 +1,84 @@
|
||||
|
||||
from babel.numbers import parse_decimal
|
||||
from changedetectionio.model.Watch import model as BaseWatch
|
||||
from typing import Union
|
||||
import re
|
||||
|
||||
class Restock(dict):
|
||||
|
||||
def parse_currency(self, raw_value: str) -> Union[float, None]:
|
||||
# Clean and standardize the value (ie 1,400.00 should be 1400.00), even better would be store the whole thing as an integer.
|
||||
standardized_value = raw_value
|
||||
|
||||
if ',' in standardized_value and '.' in standardized_value:
|
||||
# Identify the correct decimal separator
|
||||
if standardized_value.rfind('.') > standardized_value.rfind(','):
|
||||
standardized_value = standardized_value.replace(',', '')
|
||||
else:
|
||||
standardized_value = standardized_value.replace('.', '').replace(',', '.')
|
||||
else:
|
||||
standardized_value = standardized_value.replace(',', '.')
|
||||
|
||||
# Remove any non-numeric characters except for the decimal point
|
||||
standardized_value = re.sub(r'[^\d.-]', '', standardized_value)
|
||||
|
||||
if standardized_value:
|
||||
# Convert to float
|
||||
return float(parse_decimal(standardized_value, locale='en'))
|
||||
|
||||
return None
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
# Define default values
|
||||
default_values = {
|
||||
'in_stock': None,
|
||||
'price': None,
|
||||
'currency': None,
|
||||
'original_price': None
|
||||
}
|
||||
|
||||
# Initialize the dictionary with default values
|
||||
super().__init__(default_values)
|
||||
|
||||
# Update with any provided positional arguments (dictionaries)
|
||||
if args:
|
||||
if len(args) == 1 and isinstance(args[0], dict):
|
||||
self.update(args[0])
|
||||
else:
|
||||
raise ValueError("Only one positional argument of type 'dict' is allowed")
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
# Custom logic to handle setting price and original_price
|
||||
if key == 'price' or key == 'original_price':
|
||||
if isinstance(value, str):
|
||||
value = self.parse_currency(raw_value=value)
|
||||
|
||||
super().__setitem__(key, value)
|
||||
|
||||
class Watch(BaseWatch):
|
||||
def __init__(self, *arg, **kw):
|
||||
super().__init__(*arg, **kw)
|
||||
self['restock'] = Restock(kw['default']['restock']) if kw.get('default') and kw['default'].get('restock') else Restock()
|
||||
|
||||
self['restock_settings'] = kw['default']['restock_settings'] if kw.get('default',{}).get('restock_settings') else {
|
||||
'follow_price_changes': True,
|
||||
'in_stock_processing' : 'in_stock_only'
|
||||
} #@todo update
|
||||
|
||||
def clear_watch(self):
|
||||
super().clear_watch()
|
||||
self.update({'restock': Restock()})
|
||||
|
||||
def extra_notification_token_values(self):
|
||||
values = super().extra_notification_token_values()
|
||||
values['restock'] = self.get('restock', {})
|
||||
return values
|
||||
|
||||
def extra_notification_token_placeholder_info(self):
|
||||
values = super().extra_notification_token_placeholder_info()
|
||||
|
||||
values.append(('restock.price', "Price detected"))
|
||||
values.append(('restock.original_price', "Original price at first check"))
|
||||
|
||||
return values
|
||||
|
||||
81
changedetectionio/processors/restock_diff/forms.py
Normal file
@@ -0,0 +1,81 @@
|
||||
from wtforms import (
|
||||
BooleanField,
|
||||
validators,
|
||||
FloatField
|
||||
)
|
||||
from wtforms.fields.choices import RadioField
|
||||
from wtforms.fields.form import FormField
|
||||
from wtforms.form import Form
|
||||
|
||||
from changedetectionio.forms import processor_text_json_diff_form
|
||||
|
||||
|
||||
class RestockSettingsForm(Form):
|
||||
in_stock_processing = RadioField(label='Re-stock detection', choices=[
|
||||
('in_stock_only', "In Stock only (Out Of Stock -> In Stock only)"),
|
||||
('all_changes', "Any availability changes"),
|
||||
('off', "Off, don't follow availability/restock"),
|
||||
], default="in_stock_only")
|
||||
|
||||
price_change_min = FloatField('Below price to trigger notification', [validators.Optional()],
|
||||
render_kw={"placeholder": "No limit", "size": "10"})
|
||||
price_change_max = FloatField('Above price to trigger notification', [validators.Optional()],
|
||||
render_kw={"placeholder": "No limit", "size": "10"})
|
||||
price_change_threshold_percent = FloatField('Threshold in % for price changes since the original price', validators=[
|
||||
|
||||
validators.Optional(),
|
||||
validators.NumberRange(min=0, max=100, message="Should be between 0 and 100"),
|
||||
], render_kw={"placeholder": "0%", "size": "5"})
|
||||
|
||||
follow_price_changes = BooleanField('Follow price changes', default=True)
|
||||
|
||||
class processor_settings_form(processor_text_json_diff_form):
|
||||
restock_settings = FormField(RestockSettingsForm)
|
||||
|
||||
def extra_tab_content(self):
|
||||
return 'Restock & Price Detection'
|
||||
|
||||
def extra_form_content(self):
|
||||
output = ""
|
||||
|
||||
if getattr(self, 'watch', None) and getattr(self, 'datastore'):
|
||||
for tag_uuid in self.watch.get('tags'):
|
||||
tag = self.datastore.data['settings']['application']['tags'].get(tag_uuid, {})
|
||||
if tag.get('overrides_watch'):
|
||||
# @todo - Quick and dirty, cant access 'url_for' here because its out of scope somehow
|
||||
output = f"""<p><strong>Note! A Group tag overrides the restock and price detection here.</strong></p><style>#restock-fieldset-price-group {{ opacity: 0.6; }}</style>"""
|
||||
|
||||
output += """
|
||||
{% from '_helpers.html' import render_field, render_checkbox_field, render_button %}
|
||||
<script>
|
||||
$(document).ready(function () {
|
||||
toggleOpacity('#restock_settings-follow_price_changes', '.price-change-minmax', true);
|
||||
});
|
||||
</script>
|
||||
|
||||
<fieldset id="restock-fieldset-price-group">
|
||||
<div class="pure-control-group">
|
||||
<fieldset class="pure-group inline-radio">
|
||||
{{ render_field(form.restock_settings.in_stock_processing) }}
|
||||
</fieldset>
|
||||
<fieldset class="pure-group">
|
||||
{{ render_checkbox_field(form.restock_settings.follow_price_changes) }}
|
||||
<span class="pure-form-message-inline">Changes in price should trigger a notification</span>
|
||||
</fieldset>
|
||||
<fieldset class="pure-group price-change-minmax">
|
||||
{{ render_field(form.restock_settings.price_change_min, placeholder=watch.get('restock', {}).get('price')) }}
|
||||
<span class="pure-form-message-inline">Minimum amount, Trigger a change/notification when the price drops <i>below</i> this value.</span>
|
||||
</fieldset>
|
||||
<fieldset class="pure-group price-change-minmax">
|
||||
{{ render_field(form.restock_settings.price_change_max, placeholder=watch.get('restock', {}).get('price')) }}
|
||||
<span class="pure-form-message-inline">Maximum amount, Trigger a change/notification when the price rises <i>above</i> this value.</span>
|
||||
</fieldset>
|
||||
<fieldset class="pure-group price-change-minmax">
|
||||
{{ render_field(form.restock_settings.price_change_threshold_percent) }}
|
||||
<span class="pure-form-message-inline">Price must change more than this % to trigger a change since the first check.</span><br>
|
||||
<span class="pure-form-message-inline">For example, If the product is $1,000 USD originally, <strong>2%</strong> would mean it has to change more than $20 since the first check.</span><br>
|
||||
</fieldset>
|
||||
</div>
|
||||
</fieldset>
|
||||
"""
|
||||
return output
|
||||
314
changedetectionio/processors/restock_diff/processor.py
Normal file
@@ -0,0 +1,314 @@
|
||||
from .. import difference_detection_processor
|
||||
from ..exceptions import ProcessorException
|
||||
from . import Restock
|
||||
from loguru import logger
|
||||
|
||||
import urllib3
|
||||
import time
|
||||
|
||||
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
||||
name = 'Re-stock & Price detection for single product pages'
|
||||
description = 'Detects if the product goes back to in-stock'
|
||||
|
||||
class UnableToExtractRestockData(Exception):
|
||||
def __init__(self, status_code):
|
||||
# Set this so we can use it in other parts of the app
|
||||
self.status_code = status_code
|
||||
return
|
||||
|
||||
class MoreThanOnePriceFound(Exception):
|
||||
def __init__(self):
|
||||
return
|
||||
|
||||
def _search_prop_by_value(matches, value):
|
||||
for properties in matches:
|
||||
for prop in properties:
|
||||
if value in prop[0]:
|
||||
return prop[1] # Yield the desired value and exit the function
|
||||
|
||||
def _deduplicate_prices(data):
|
||||
import re
|
||||
|
||||
'''
|
||||
Some price data has multiple entries, OR it has a single entry with ['$159', '159', 159, "$ 159"] or just "159"
|
||||
Get all the values, clean it and add it to a set then return the unique values
|
||||
'''
|
||||
unique_data = set()
|
||||
|
||||
# Return the complete 'datum' where its price was not seen before
|
||||
for datum in data:
|
||||
|
||||
if isinstance(datum.value, list):
|
||||
# Process each item in the list
|
||||
normalized_value = set([float(re.sub(r'[^\d.]', '', str(item))) for item in datum.value if str(item).strip()])
|
||||
unique_data.update(normalized_value)
|
||||
else:
|
||||
# Process single value
|
||||
v = float(re.sub(r'[^\d.]', '', str(datum.value)))
|
||||
unique_data.add(v)
|
||||
|
||||
return list(unique_data)
|
||||
|
||||
|
||||
# should return Restock()
|
||||
# add casting?
|
||||
def get_itemprop_availability(html_content) -> Restock:
|
||||
"""
|
||||
Kind of funny/cool way to find price/availability in one many different possibilities.
|
||||
Use 'extruct' to find any possible RDFa/microdata/json-ld data, make a JSON string from the output then search it.
|
||||
"""
|
||||
from jsonpath_ng import parse
|
||||
|
||||
import re
|
||||
now = time.time()
|
||||
import extruct
|
||||
logger.trace(f"Imported extruct module in {time.time() - now:.3f}s")
|
||||
|
||||
now = time.time()
|
||||
|
||||
# Extruct is very slow, I'm wondering if some ML is going to be faster (800ms on my i7), 'rdfa' seems to be the heaviest.
|
||||
syntaxes = ['dublincore', 'json-ld', 'microdata', 'microformat', 'opengraph']
|
||||
try:
|
||||
data = extruct.extract(html_content, syntaxes=syntaxes)
|
||||
except Exception as e:
|
||||
logger.warning(f"Unable to extract data, document parsing with extruct failed with {type(e).__name__} - {str(e)}")
|
||||
return Restock()
|
||||
|
||||
logger.trace(f"Extruct basic extract of all metadata done in {time.time() - now:.3f}s")
|
||||
|
||||
# First phase, dead simple scanning of anything that looks useful
|
||||
value = Restock()
|
||||
if data:
|
||||
logger.debug(f"Using jsonpath to find price/availability/etc")
|
||||
price_parse = parse('$..(price|Price)')
|
||||
pricecurrency_parse = parse('$..(pricecurrency|currency|priceCurrency )')
|
||||
availability_parse = parse('$..(availability|Availability)')
|
||||
|
||||
price_result = _deduplicate_prices(price_parse.find(data))
|
||||
if price_result:
|
||||
# Right now, we just support single product items, maybe we will store the whole actual metadata seperately in teh future and
|
||||
# parse that for the UI?
|
||||
if len(price_result) > 1 and len(price_result) > 1:
|
||||
# See of all prices are different, in the case that one product has many embedded data types with the same price
|
||||
# One might have $121.95 and another 121.95 etc
|
||||
logger.warning(f"More than one price found {price_result}, throwing exception, cant use this plugin.")
|
||||
raise MoreThanOnePriceFound()
|
||||
|
||||
value['price'] = price_result[0]
|
||||
|
||||
pricecurrency_result = pricecurrency_parse.find(data)
|
||||
if pricecurrency_result:
|
||||
value['currency'] = pricecurrency_result[0].value
|
||||
|
||||
availability_result = availability_parse.find(data)
|
||||
if availability_result:
|
||||
value['availability'] = availability_result[0].value
|
||||
|
||||
if value.get('availability'):
|
||||
value['availability'] = re.sub(r'(?i)^(https|http)://schema.org/', '',
|
||||
value.get('availability').strip(' "\'').lower()) if value.get('availability') else None
|
||||
|
||||
# Second, go dig OpenGraph which is something that jsonpath_ng cant do because of the tuples and double-dots (:)
|
||||
if not value.get('price') or value.get('availability'):
|
||||
logger.debug(f"Alternatively digging through OpenGraph properties for restock/price info..")
|
||||
jsonpath_expr = parse('$..properties')
|
||||
|
||||
for match in jsonpath_expr.find(data):
|
||||
if not value.get('price'):
|
||||
value['price'] = _search_prop_by_value([match.value], "price:amount")
|
||||
if not value.get('availability'):
|
||||
value['availability'] = _search_prop_by_value([match.value], "product:availability")
|
||||
if not value.get('currency'):
|
||||
value['currency'] = _search_prop_by_value([match.value], "price:currency")
|
||||
logger.trace(f"Processed with Extruct in {time.time()-now:.3f}s")
|
||||
|
||||
return value
|
||||
|
||||
|
||||
def is_between(number, lower=None, upper=None):
|
||||
"""
|
||||
Check if a number is between two values.
|
||||
|
||||
Parameters:
|
||||
number (float): The number to check.
|
||||
lower (float or None): The lower bound (inclusive). If None, no lower bound.
|
||||
upper (float or None): The upper bound (inclusive). If None, no upper bound.
|
||||
|
||||
Returns:
|
||||
bool: True if the number is between the lower and upper bounds, False otherwise.
|
||||
"""
|
||||
return (lower is None or lower <= number) and (upper is None or number <= upper)
|
||||
|
||||
|
||||
class perform_site_check(difference_detection_processor):
|
||||
screenshot = None
|
||||
xpath_data = None
|
||||
|
||||
def run_changedetection(self, watch):
|
||||
import hashlib
|
||||
|
||||
if not watch:
|
||||
raise Exception("Watch no longer exists.")
|
||||
|
||||
# Unset any existing notification error
|
||||
update_obj = {'last_notification_error': False, 'last_error': False, 'restock': Restock()}
|
||||
|
||||
self.screenshot = self.fetcher.screenshot
|
||||
self.xpath_data = self.fetcher.xpath_data
|
||||
|
||||
# Track the content type
|
||||
update_obj['content_type'] = self.fetcher.headers.get('Content-Type', '')
|
||||
update_obj["last_check_status"] = self.fetcher.get_last_status_code()
|
||||
|
||||
# Only try to process restock information (like scraping for keywords) if the page was actually rendered correctly.
|
||||
# Otherwise it will assume "in stock" because nothing suggesting the opposite was found
|
||||
from ...html_tools import html_to_text
|
||||
text = html_to_text(self.fetcher.content)
|
||||
logger.debug(f"Length of text after conversion: {len(text)}")
|
||||
if not len(text):
|
||||
from ...content_fetchers.exceptions import ReplyWithContentButNoText
|
||||
raise ReplyWithContentButNoText(url=watch.link,
|
||||
status_code=self.fetcher.get_last_status_code(),
|
||||
screenshot=self.fetcher.screenshot,
|
||||
html_content=self.fetcher.content,
|
||||
xpath_data=self.fetcher.xpath_data
|
||||
)
|
||||
|
||||
# Which restock settings to compare against?
|
||||
restock_settings = watch.get('restock_settings', {})
|
||||
|
||||
# See if any tags have 'activate for individual watches in this tag/group?' enabled and use the first we find
|
||||
for tag_uuid in watch.get('tags'):
|
||||
tag = self.datastore.data['settings']['application']['tags'].get(tag_uuid, {})
|
||||
if tag.get('overrides_watch'):
|
||||
restock_settings = tag.get('restock_settings', {})
|
||||
logger.info(f"Watch {watch.get('uuid')} - Tag '{tag.get('title')}' selected for restock settings override")
|
||||
break
|
||||
|
||||
|
||||
itemprop_availability = {}
|
||||
try:
|
||||
itemprop_availability = get_itemprop_availability(self.fetcher.content)
|
||||
except MoreThanOnePriceFound as e:
|
||||
# Add the real data
|
||||
raise ProcessorException(message="Cannot run, more than one price detected, this plugin is only for product pages with ONE product, try the content-change detection mode.",
|
||||
url=watch.get('url'),
|
||||
status_code=self.fetcher.get_last_status_code(),
|
||||
screenshot=self.fetcher.screenshot,
|
||||
xpath_data=self.fetcher.xpath_data
|
||||
)
|
||||
|
||||
# Something valid in get_itemprop_availability() by scraping metadata ?
|
||||
if itemprop_availability.get('price') or itemprop_availability.get('availability'):
|
||||
# Store for other usage
|
||||
update_obj['restock'] = itemprop_availability
|
||||
|
||||
if itemprop_availability.get('availability'):
|
||||
# @todo: Configurable?
|
||||
if any(substring.lower() in itemprop_availability['availability'].lower() for substring in [
|
||||
'instock',
|
||||
'instoreonly',
|
||||
'limitedavailability',
|
||||
'onlineonly',
|
||||
'presale']
|
||||
):
|
||||
update_obj['restock']['in_stock'] = True
|
||||
else:
|
||||
update_obj['restock']['in_stock'] = False
|
||||
|
||||
# Main detection method
|
||||
fetched_md5 = None
|
||||
|
||||
# store original price if not set
|
||||
if itemprop_availability and itemprop_availability.get('price') and not itemprop_availability.get('original_price'):
|
||||
itemprop_availability['original_price'] = itemprop_availability.get('price')
|
||||
update_obj['restock']["original_price"] = itemprop_availability.get('price')
|
||||
|
||||
if not self.fetcher.instock_data and not itemprop_availability.get('availability') and not itemprop_availability.get('price'):
|
||||
raise ProcessorException(
|
||||
message=f"Unable to extract restock data for this page unfortunately. (Got code {self.fetcher.get_last_status_code()} from server), no embedded stock information was found and nothing interesting in the text, try using this watch with Chrome.",
|
||||
url=watch.get('url'),
|
||||
status_code=self.fetcher.get_last_status_code(),
|
||||
screenshot=self.fetcher.screenshot,
|
||||
xpath_data=self.fetcher.xpath_data
|
||||
)
|
||||
|
||||
logger.debug(f"self.fetcher.instock_data is - '{self.fetcher.instock_data}' and itemprop_availability.get('availability') is {itemprop_availability.get('availability')}")
|
||||
# Nothing automatic in microdata found, revert to scraping the page
|
||||
if self.fetcher.instock_data and itemprop_availability.get('availability') is None:
|
||||
# 'Possibly in stock' comes from stock-not-in-stock.js when no string found above the fold.
|
||||
# Careful! this does not really come from chrome/js when the watch is set to plaintext
|
||||
update_obj['restock']["in_stock"] = True if self.fetcher.instock_data == 'Possibly in stock' else False
|
||||
logger.debug(f"Watch UUID {watch.get('uuid')} restock check returned instock_data - '{self.fetcher.instock_data}' from JS scraper.")
|
||||
|
||||
# Very often websites will lie about the 'availability' in the metadata, so if the scraped version says its NOT in stock, use that.
|
||||
if self.fetcher.instock_data and self.fetcher.instock_data != 'Possibly in stock':
|
||||
if update_obj['restock'].get('in_stock'):
|
||||
logger.warning(
|
||||
f"Lie detected in the availability machine data!! when scraping said its not in stock!! itemprop was '{itemprop_availability}' and scraped from browser was '{self.fetcher.instock_data}' update obj was {update_obj['restock']} ")
|
||||
logger.warning(f"Setting instock to FALSE, scraper found '{self.fetcher.instock_data}' in the body but metadata reported not-in-stock")
|
||||
update_obj['restock']["in_stock"] = False
|
||||
|
||||
# What we store in the snapshot
|
||||
price = update_obj.get('restock').get('price') if update_obj.get('restock').get('price') else ""
|
||||
snapshot_content = f"In Stock: {update_obj.get('restock').get('in_stock')} - Price: {price}"
|
||||
|
||||
# Main detection method
|
||||
fetched_md5 = hashlib.md5(snapshot_content.encode('utf-8')).hexdigest()
|
||||
|
||||
# The main thing that all this at the moment comes down to :)
|
||||
changed_detected = False
|
||||
logger.debug(f"Watch UUID {watch.get('uuid')} restock check - Previous MD5: {watch.get('previous_md5')}, Fetched MD5 {fetched_md5}")
|
||||
|
||||
# out of stock -> back in stock only?
|
||||
if watch.get('restock') and watch['restock'].get('in_stock') != update_obj['restock'].get('in_stock'):
|
||||
# Yes if we only care about it going to instock, AND we are in stock
|
||||
if restock_settings.get('in_stock_processing') == 'in_stock_only' and update_obj['restock']['in_stock']:
|
||||
changed_detected = True
|
||||
|
||||
if restock_settings.get('in_stock_processing') == 'all_changes':
|
||||
# All cases
|
||||
changed_detected = True
|
||||
|
||||
if restock_settings.get('follow_price_changes') and watch.get('restock') and update_obj.get('restock') and update_obj['restock'].get('price'):
|
||||
price = float(update_obj['restock'].get('price'))
|
||||
# Default to current price if no previous price found
|
||||
if watch['restock'].get('original_price'):
|
||||
previous_price = float(watch['restock'].get('original_price'))
|
||||
# It was different, but negate it further down
|
||||
if price != previous_price:
|
||||
changed_detected = True
|
||||
|
||||
# Minimum/maximum price limit
|
||||
if update_obj.get('restock') and update_obj['restock'].get('price'):
|
||||
logger.debug(
|
||||
f"{watch.get('uuid')} - Change was detected, 'price_change_max' is '{restock_settings.get('price_change_max', '')}' 'price_change_min' is '{restock_settings.get('price_change_min', '')}', price from website is '{update_obj['restock'].get('price', '')}'.")
|
||||
if update_obj['restock'].get('price'):
|
||||
min_limit = float(restock_settings.get('price_change_min')) if restock_settings.get('price_change_min') else None
|
||||
max_limit = float(restock_settings.get('price_change_max')) if restock_settings.get('price_change_max') else None
|
||||
|
||||
price = float(update_obj['restock'].get('price'))
|
||||
logger.debug(f"{watch.get('uuid')} after float conversion - Min limit: '{min_limit}' Max limit: '{max_limit}' Price: '{price}'")
|
||||
if min_limit or max_limit:
|
||||
if is_between(number=price, lower=min_limit, upper=max_limit):
|
||||
# Price was between min/max limit, so there was nothing todo in any case
|
||||
logger.trace(f"{watch.get('uuid')} {price} is between {min_limit} and {max_limit}, nothing to check, forcing changed_detected = False (was {changed_detected})")
|
||||
changed_detected = False
|
||||
else:
|
||||
logger.trace(f"{watch.get('uuid')} {price} is between {min_limit} and {max_limit}, continuing normal comparison")
|
||||
|
||||
# Price comparison by %
|
||||
if watch['restock'].get('original_price') and changed_detected and restock_settings.get('price_change_threshold_percent'):
|
||||
previous_price = float(watch['restock'].get('original_price'))
|
||||
pc = float(restock_settings.get('price_change_threshold_percent'))
|
||||
change = abs((price - previous_price) / previous_price * 100)
|
||||
if change and change <= pc:
|
||||
logger.debug(f"{watch.get('uuid')} Override change-detected to FALSE because % threshold ({pc}%) was {change:.3f}%")
|
||||
changed_detected = False
|
||||
else:
|
||||
logger.debug(f"{watch.get('uuid')} Price change was {change:.3f}% , (threshold {pc}%)")
|
||||
|
||||
# Always record the new checksum
|
||||
update_obj["previous_md5"] = fetched_md5
|
||||
|
||||
return changed_detected, update_obj, snapshot_content.strip()
|
||||
115
changedetectionio/processors/text_json_diff/__init__.py
Normal file
@@ -0,0 +1,115 @@
|
||||
|
||||
from loguru import logger
|
||||
|
||||
|
||||
|
||||
def _task(watch, update_handler):
|
||||
from changedetectionio.content_fetchers.exceptions import ReplyWithContentButNoText
|
||||
from changedetectionio.processors.text_json_diff.processor import FilterNotFoundInResponse
|
||||
|
||||
text_after_filter = ''
|
||||
|
||||
try:
|
||||
# The slow process (we run 2 of these in parallel)
|
||||
changed_detected, update_obj, text_after_filter = update_handler.run_changedetection(watch=watch)
|
||||
except FilterNotFoundInResponse as e:
|
||||
text_after_filter = f"Filter not found in HTML: {str(e)}"
|
||||
except ReplyWithContentButNoText as e:
|
||||
text_after_filter = f"Filter found but no text (empty result)"
|
||||
except Exception as e:
|
||||
text_after_filter = f"Error: {str(e)}"
|
||||
|
||||
if not text_after_filter.strip():
|
||||
text_after_filter = 'Empty content'
|
||||
|
||||
# because run_changedetection always returns bytes due to saving the snapshots etc
|
||||
text_after_filter = text_after_filter.decode('utf-8') if isinstance(text_after_filter, bytes) else text_after_filter
|
||||
|
||||
return text_after_filter
|
||||
|
||||
|
||||
def prepare_filter_prevew(datastore, watch_uuid):
|
||||
'''Used by @app.route("/edit/<string:uuid>/preview-rendered", methods=['POST'])'''
|
||||
from changedetectionio import forms, html_tools
|
||||
from changedetectionio.model.Watch import model as watch_model
|
||||
from concurrent.futures import ProcessPoolExecutor
|
||||
from copy import deepcopy
|
||||
from flask import request, jsonify
|
||||
import brotli
|
||||
import importlib
|
||||
import os
|
||||
import time
|
||||
now = time.time()
|
||||
|
||||
text_after_filter = ''
|
||||
text_before_filter = ''
|
||||
trigger_line_numbers = []
|
||||
ignore_line_numbers = []
|
||||
|
||||
tmp_watch = deepcopy(datastore.data['watching'].get(watch_uuid))
|
||||
|
||||
if tmp_watch and tmp_watch.history and os.path.isdir(tmp_watch.watch_data_dir):
|
||||
# Splice in the temporary stuff from the form
|
||||
form = forms.processor_text_json_diff_form(formdata=request.form if request.method == 'POST' else None,
|
||||
data=request.form
|
||||
)
|
||||
|
||||
# Only update vars that came in via the AJAX post
|
||||
p = {k: v for k, v in form.data.items() if k in request.form.keys()}
|
||||
tmp_watch.update(p)
|
||||
blank_watch_no_filters = watch_model()
|
||||
blank_watch_no_filters['url'] = tmp_watch.get('url')
|
||||
|
||||
latest_filename = next(reversed(tmp_watch.history))
|
||||
html_fname = os.path.join(tmp_watch.watch_data_dir, f"{latest_filename}.html.br")
|
||||
with open(html_fname, 'rb') as f:
|
||||
decompressed_data = brotli.decompress(f.read()).decode('utf-8') if html_fname.endswith('.br') else f.read().decode('utf-8')
|
||||
|
||||
# Just like a normal change detection except provide a fake "watch" object and dont call .call_browser()
|
||||
processor_module = importlib.import_module("changedetectionio.processors.text_json_diff.processor")
|
||||
update_handler = processor_module.perform_site_check(datastore=datastore,
|
||||
watch_uuid=tmp_watch.get('uuid') # probably not needed anymore anyway?
|
||||
)
|
||||
# Use the last loaded HTML as the input
|
||||
update_handler.datastore = datastore
|
||||
update_handler.fetcher.content = str(decompressed_data) # str() because playwright/puppeteer/requests return string
|
||||
update_handler.fetcher.headers['content-type'] = tmp_watch.get('content-type')
|
||||
|
||||
# Process our watch with filters and the HTML from disk, and also a blank watch with no filters but also with the same HTML from disk
|
||||
# Do this as a parallel process because it could take some time
|
||||
with ProcessPoolExecutor(max_workers=2) as executor:
|
||||
future1 = executor.submit(_task, tmp_watch, update_handler)
|
||||
future2 = executor.submit(_task, blank_watch_no_filters, update_handler)
|
||||
|
||||
text_after_filter = future1.result()
|
||||
text_before_filter = future2.result()
|
||||
|
||||
try:
|
||||
trigger_line_numbers = html_tools.strip_ignore_text(content=text_after_filter,
|
||||
wordlist=tmp_watch['trigger_text'],
|
||||
mode='line numbers'
|
||||
)
|
||||
except Exception as e:
|
||||
text_before_filter = f"Error: {str(e)}"
|
||||
|
||||
try:
|
||||
text_to_ignore = tmp_watch.get('ignore_text', []) + datastore.data['settings']['application'].get('global_ignore_text', [])
|
||||
ignore_line_numbers = html_tools.strip_ignore_text(content=text_after_filter,
|
||||
wordlist=text_to_ignore,
|
||||
mode='line numbers'
|
||||
)
|
||||
except Exception as e:
|
||||
text_before_filter = f"Error: {str(e)}"
|
||||
|
||||
logger.trace(f"Parsed in {time.time() - now:.3f}s")
|
||||
|
||||
return jsonify(
|
||||
{
|
||||
'after_filter': text_after_filter,
|
||||
'before_filter': text_before_filter.decode('utf-8') if isinstance(text_before_filter, bytes) else text_before_filter,
|
||||
'duration': time.time() - now,
|
||||
'trigger_line_numbers': trigger_line_numbers,
|
||||
'ignore_line_numbers': ignore_line_numbers,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -1,26 +1,28 @@
|
||||
# HTML to TEXT/JSON DIFFERENCE FETCHER
|
||||
# HTML to TEXT/JSON DIFFERENCE self.fetcher
|
||||
|
||||
import hashlib
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import urllib3
|
||||
|
||||
from changedetectionio import content_fetcher, html_tools
|
||||
from changedetectionio.processors import difference_detection_processor
|
||||
from changedetectionio.html_tools import PERL_STYLE_REGEX, cdata_in_document_to_text, TRANSLATE_WHITESPACE_TABLE
|
||||
from changedetectionio import html_tools, content_fetchers
|
||||
from changedetectionio.blueprint.price_data_follower import PRICE_DATA_TRACK_ACCEPT, PRICE_DATA_TRACK_REJECT
|
||||
from copy import deepcopy
|
||||
from . import difference_detection_processor
|
||||
from ..html_tools import PERL_STYLE_REGEX
|
||||
from loguru import logger
|
||||
|
||||
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
||||
|
||||
name = 'Webpage Text/HTML, JSON and PDF changes'
|
||||
description = 'Detects all text changes where possible'
|
||||
json_filter_prefixes = ['json:', 'jq:']
|
||||
|
||||
json_filter_prefixes = ['json:', 'jq:', 'jqraw:']
|
||||
|
||||
class FilterNotFoundInResponse(ValueError):
|
||||
def __init__(self, msg):
|
||||
def __init__(self, msg, screenshot=None, xpath_data=None):
|
||||
self.screenshot = screenshot
|
||||
self.xpath_data = xpath_data
|
||||
ValueError.__init__(self, msg)
|
||||
|
||||
|
||||
@@ -32,117 +34,32 @@ class PDFToHTMLToolNotFound(ValueError):
|
||||
# Some common stuff here that can be moved to a base class
|
||||
# (set_proxy_from_list)
|
||||
class perform_site_check(difference_detection_processor):
|
||||
screenshot = None
|
||||
xpath_data = None
|
||||
|
||||
def __init__(self, *args, datastore, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.datastore = datastore
|
||||
|
||||
def run(self, uuid, skip_when_checksum_same=True, preferred_proxy=None):
|
||||
def run_changedetection(self, watch):
|
||||
changed_detected = False
|
||||
html_content = ""
|
||||
screenshot = False # as bytes
|
||||
stripped_text_from_html = ""
|
||||
|
||||
# DeepCopy so we can be sure we don't accidently change anything by reference
|
||||
watch = deepcopy(self.datastore.data['watching'].get(uuid))
|
||||
if not watch:
|
||||
raise Exception("Watch no longer exists.")
|
||||
|
||||
# Protect against file:// access
|
||||
if re.search(r'^file', watch.get('url', ''), re.IGNORECASE) and not os.getenv('ALLOW_FILE_URI', False):
|
||||
raise Exception(
|
||||
"file:// type access is denied for security reasons."
|
||||
)
|
||||
|
||||
# Unset any existing notification error
|
||||
update_obj = {'last_notification_error': False, 'last_error': False}
|
||||
|
||||
# Tweak the base config with the per-watch ones
|
||||
request_headers = watch.get('headers', [])
|
||||
request_headers.update(self.datastore.get_all_base_headers())
|
||||
request_headers.update(self.datastore.get_all_headers_in_textfile_for_watch(uuid=uuid))
|
||||
|
||||
# https://github.com/psf/requests/issues/4525
|
||||
# Requests doesnt yet support brotli encoding, so don't put 'br' here, be totally sure that the user cannot
|
||||
# do this by accident.
|
||||
if 'Accept-Encoding' in request_headers and "br" in request_headers['Accept-Encoding']:
|
||||
request_headers['Accept-Encoding'] = request_headers['Accept-Encoding'].replace(', br', '')
|
||||
|
||||
timeout = self.datastore.data['settings']['requests'].get('timeout')
|
||||
|
||||
url = watch.link
|
||||
|
||||
request_body = self.datastore.data['watching'][uuid].get('body')
|
||||
request_method = self.datastore.data['watching'][uuid].get('method')
|
||||
ignore_status_codes = self.datastore.data['watching'][uuid].get('ignore_status_codes', False)
|
||||
|
||||
# source: support
|
||||
is_source = False
|
||||
if url.startswith('source:'):
|
||||
url = url.replace('source:', '')
|
||||
is_source = True
|
||||
|
||||
# Pluggable content fetcher
|
||||
prefer_backend = watch.get_fetch_backend
|
||||
if not prefer_backend or prefer_backend == 'system':
|
||||
prefer_backend = self.datastore.data['settings']['application']['fetch_backend']
|
||||
|
||||
if hasattr(content_fetcher, prefer_backend):
|
||||
klass = getattr(content_fetcher, prefer_backend)
|
||||
else:
|
||||
# If the klass doesnt exist, just use a default
|
||||
klass = getattr(content_fetcher, "html_requests")
|
||||
|
||||
if preferred_proxy:
|
||||
proxy_id = preferred_proxy
|
||||
else:
|
||||
proxy_id = self.datastore.get_preferred_proxy_for_watch(uuid=uuid)
|
||||
|
||||
proxy_url = None
|
||||
if proxy_id:
|
||||
proxy_url = self.datastore.proxy_list.get(proxy_id).get('url')
|
||||
print("UUID {} Using proxy {}".format(uuid, proxy_url))
|
||||
|
||||
fetcher = klass(proxy_override=proxy_url)
|
||||
|
||||
# Configurable per-watch or global extra delay before extracting text (for webDriver types)
|
||||
system_webdriver_delay = self.datastore.data['settings']['application'].get('webdriver_delay', None)
|
||||
if watch['webdriver_delay'] is not None:
|
||||
fetcher.render_extract_delay = watch.get('webdriver_delay')
|
||||
elif system_webdriver_delay is not None:
|
||||
fetcher.render_extract_delay = system_webdriver_delay
|
||||
|
||||
# Possible conflict
|
||||
if prefer_backend == 'html_webdriver':
|
||||
fetcher.browser_steps = watch.get('browser_steps', None)
|
||||
fetcher.browser_steps_screenshot_path = os.path.join(self.datastore.datastore_path, uuid)
|
||||
|
||||
if watch.get('webdriver_js_execute_code') is not None and watch.get('webdriver_js_execute_code').strip():
|
||||
fetcher.webdriver_js_execute_code = watch.get('webdriver_js_execute_code')
|
||||
|
||||
# requests for PDF's, images etc should be passwd the is_binary flag
|
||||
is_binary = watch.is_pdf
|
||||
|
||||
fetcher.run(url, timeout, request_headers, request_body, request_method, ignore_status_codes, watch.get('include_filters'),
|
||||
is_binary=is_binary)
|
||||
fetcher.quit()
|
||||
|
||||
self.screenshot = fetcher.screenshot
|
||||
self.xpath_data = fetcher.xpath_data
|
||||
self.screenshot = self.fetcher.screenshot
|
||||
self.xpath_data = self.fetcher.xpath_data
|
||||
|
||||
# Track the content type
|
||||
update_obj['content_type'] = fetcher.get_all_headers().get('content-type', '').lower()
|
||||
update_obj['content_type'] = self.fetcher.get_all_headers().get('content-type', '').lower()
|
||||
|
||||
# Watches added automatically in the queue manager will skip if its the same checksum as the previous run
|
||||
# Saves a lot of CPU
|
||||
update_obj['previous_md5_before_filters'] = hashlib.md5(fetcher.content.encode('utf-8')).hexdigest()
|
||||
if skip_when_checksum_same:
|
||||
if update_obj['previous_md5_before_filters'] == watch.get('previous_md5_before_filters'):
|
||||
raise content_fetcher.checksumFromPreviousCheckWasTheSame()
|
||||
update_obj['previous_md5_before_filters'] = hashlib.md5(self.fetcher.content.encode('utf-8')).hexdigest()
|
||||
|
||||
# Fetching complete, now filters
|
||||
# @todo move to class / maybe inside of fetcher abstract base?
|
||||
|
||||
# @note: I feel like the following should be in a more obvious chain system
|
||||
# - Check filter text
|
||||
@@ -151,15 +68,24 @@ class perform_site_check(difference_detection_processor):
|
||||
# https://stackoverflow.com/questions/41817578/basic-method-chaining ?
|
||||
# return content().textfilter().jsonextract().checksumcompare() ?
|
||||
|
||||
is_json = 'application/json' in fetcher.get_all_headers().get('content-type', '').lower()
|
||||
is_json = 'application/json' in self.fetcher.get_all_headers().get('content-type', '').lower()
|
||||
is_html = not is_json
|
||||
is_rss = False
|
||||
|
||||
ctype_header = self.fetcher.get_all_headers().get('content-type', '').lower()
|
||||
# Go into RSS preprocess for converting CDATA/comment to usable text
|
||||
if any(substring in ctype_header for substring in ['application/xml', 'application/rss', 'text/xml']):
|
||||
if '<rss' in self.fetcher.content[:100].lower():
|
||||
self.fetcher.content = cdata_in_document_to_text(html_content=self.fetcher.content)
|
||||
is_rss = True
|
||||
|
||||
# source: support, basically treat it as plaintext
|
||||
if is_source:
|
||||
if watch.is_source_type_url:
|
||||
is_html = False
|
||||
is_json = False
|
||||
|
||||
if watch.is_pdf or 'application/pdf' in fetcher.get_all_headers().get('content-type', '').lower():
|
||||
inline_pdf = self.fetcher.get_all_headers().get('content-disposition', '') and '%PDF-1' in self.fetcher.content[:10]
|
||||
if watch.is_pdf or 'application/pdf' in self.fetcher.get_all_headers().get('content-type', '').lower() or inline_pdf:
|
||||
from shutil import which
|
||||
tool = os.getenv("PDF_TO_HTML_TOOL", "pdftohtml")
|
||||
if not which(tool):
|
||||
@@ -170,26 +96,28 @@ class perform_site_check(difference_detection_processor):
|
||||
[tool, '-stdout', '-', '-s', 'out.pdf', '-i'],
|
||||
stdout=subprocess.PIPE,
|
||||
stdin=subprocess.PIPE)
|
||||
proc.stdin.write(fetcher.raw_content)
|
||||
proc.stdin.write(self.fetcher.raw_content)
|
||||
proc.stdin.close()
|
||||
fetcher.content = proc.stdout.read().decode('utf-8')
|
||||
self.fetcher.content = proc.stdout.read().decode('utf-8')
|
||||
proc.wait(timeout=60)
|
||||
|
||||
# Add a little metadata so we know if the file changes (like if an image changes, but the text is the same
|
||||
# @todo may cause problems with non-UTF8?
|
||||
metadata = "<p>Added by changedetection.io: Document checksum - {} Filesize - {} bytes</p>".format(
|
||||
hashlib.md5(fetcher.raw_content).hexdigest().upper(),
|
||||
len(fetcher.content))
|
||||
hashlib.md5(self.fetcher.raw_content).hexdigest().upper(),
|
||||
len(self.fetcher.content))
|
||||
|
||||
fetcher.content = fetcher.content.replace('</body>', metadata + '</body>')
|
||||
self.fetcher.content = self.fetcher.content.replace('</body>', metadata + '</body>')
|
||||
|
||||
# Better would be if Watch.model could access the global data also
|
||||
# and then use getattr https://docs.python.org/3/reference/datamodel.html#object.__getitem__
|
||||
# https://realpython.com/inherit-python-dict/ instead of doing it procedurely
|
||||
include_filters_from_tags = self.datastore.get_tag_overrides_for_watch(uuid=uuid, attr='include_filters')
|
||||
include_filters_rule = [*watch.get('include_filters', []), *include_filters_from_tags]
|
||||
include_filters_from_tags = self.datastore.get_tag_overrides_for_watch(uuid=watch.get('uuid'), attr='include_filters')
|
||||
|
||||
subtractive_selectors = [*self.datastore.get_tag_overrides_for_watch(uuid=uuid, attr='subtractive_selectors'),
|
||||
# 1845 - remove duplicated filters in both group and watch include filter
|
||||
include_filters_rule = list(dict.fromkeys(watch.get('include_filters', []) + include_filters_from_tags))
|
||||
|
||||
subtractive_selectors = [*self.datastore.get_tag_overrides_for_watch(uuid=watch.get('uuid'), attr='subtractive_selectors'),
|
||||
*watch.get("subtractive_selectors", []),
|
||||
*self.datastore.data["settings"]["application"].get("global_subtractive_selectors", [])
|
||||
]
|
||||
@@ -208,7 +136,7 @@ class perform_site_check(difference_detection_processor):
|
||||
if is_json:
|
||||
# Sort the JSON so we dont get false alerts when the content is just re-ordered
|
||||
try:
|
||||
fetcher.content = json.dumps(json.loads(fetcher.content), sort_keys=True)
|
||||
self.fetcher.content = json.dumps(json.loads(self.fetcher.content), sort_keys=True)
|
||||
except Exception as e:
|
||||
# Might have just been a snippet, or otherwise bad JSON, continue
|
||||
pass
|
||||
@@ -216,22 +144,22 @@ class perform_site_check(difference_detection_processor):
|
||||
if has_filter_rule:
|
||||
for filter in include_filters_rule:
|
||||
if any(prefix in filter for prefix in json_filter_prefixes):
|
||||
stripped_text_from_html += html_tools.extract_json_as_string(content=fetcher.content, json_filter=filter)
|
||||
stripped_text_from_html += html_tools.extract_json_as_string(content=self.fetcher.content, json_filter=filter)
|
||||
is_html = False
|
||||
|
||||
if is_html or is_source:
|
||||
if is_html or watch.is_source_type_url:
|
||||
|
||||
# CSS Filter, extract the HTML that matches and feed that into the existing inscriptis::get_text
|
||||
fetcher.content = html_tools.workarounds_for_obfuscations(fetcher.content)
|
||||
html_content = fetcher.content
|
||||
self.fetcher.content = html_tools.workarounds_for_obfuscations(self.fetcher.content)
|
||||
html_content = self.fetcher.content
|
||||
|
||||
# If not JSON, and if it's not text/plain..
|
||||
if 'text/plain' in fetcher.get_all_headers().get('content-type', '').lower():
|
||||
if 'text/plain' in self.fetcher.get_all_headers().get('content-type', '').lower():
|
||||
# Don't run get_text or xpath/css filters on plaintext
|
||||
stripped_text_from_html = html_content
|
||||
else:
|
||||
# Does it have some ld+json price data? used for easier monitoring
|
||||
update_obj['has_ldjson_price_data'] = html_tools.has_ldjson_product_info(fetcher.content)
|
||||
update_obj['has_ldjson_price_data'] = html_tools.has_ldjson_product_info(self.fetcher.content)
|
||||
|
||||
# Then we assume HTML
|
||||
if has_filter_rule:
|
||||
@@ -241,43 +169,52 @@ class perform_site_check(difference_detection_processor):
|
||||
# For HTML/XML we offer xpath as an option, just start a regular xPath "/.."
|
||||
if filter_rule[0] == '/' or filter_rule.startswith('xpath:'):
|
||||
html_content += html_tools.xpath_filter(xpath_filter=filter_rule.replace('xpath:', ''),
|
||||
html_content=fetcher.content,
|
||||
append_pretty_line_formatting=not is_source)
|
||||
html_content=self.fetcher.content,
|
||||
append_pretty_line_formatting=not watch.is_source_type_url,
|
||||
is_rss=is_rss)
|
||||
|
||||
elif filter_rule.startswith('xpath1:'):
|
||||
html_content += html_tools.xpath1_filter(xpath_filter=filter_rule.replace('xpath1:', ''),
|
||||
html_content=self.fetcher.content,
|
||||
append_pretty_line_formatting=not watch.is_source_type_url,
|
||||
is_rss=is_rss)
|
||||
else:
|
||||
# CSS Filter, extract the HTML that matches and feed that into the existing inscriptis::get_text
|
||||
html_content += html_tools.include_filters(include_filters=filter_rule,
|
||||
html_content=fetcher.content,
|
||||
append_pretty_line_formatting=not is_source)
|
||||
html_content=self.fetcher.content,
|
||||
append_pretty_line_formatting=not watch.is_source_type_url)
|
||||
|
||||
if not html_content.strip():
|
||||
raise FilterNotFoundInResponse(include_filters_rule)
|
||||
raise FilterNotFoundInResponse(msg=include_filters_rule, screenshot=self.fetcher.screenshot, xpath_data=self.fetcher.xpath_data)
|
||||
|
||||
if has_subtractive_selectors:
|
||||
html_content = html_tools.element_removal(subtractive_selectors, html_content)
|
||||
|
||||
if is_source:
|
||||
if watch.is_source_type_url:
|
||||
stripped_text_from_html = html_content
|
||||
else:
|
||||
# extract text
|
||||
do_anchor = self.datastore.data["settings"]["application"].get("render_anchor_tag_content", False)
|
||||
stripped_text_from_html = \
|
||||
html_tools.html_to_text(
|
||||
html_content,
|
||||
render_anchor_tag_content=do_anchor
|
||||
)
|
||||
stripped_text_from_html = html_tools.html_to_text(html_content=html_content,
|
||||
render_anchor_tag_content=do_anchor,
|
||||
is_rss=is_rss) # 1874 activate the <title workaround hack
|
||||
|
||||
if watch.get('trim_text_whitespace'):
|
||||
stripped_text_from_html = '\n'.join(line.strip() for line in stripped_text_from_html.replace("\n\n", "\n").splitlines())
|
||||
|
||||
# Re #340 - return the content before the 'ignore text' was applied
|
||||
text_content_before_ignored_filter = stripped_text_from_html.encode('utf-8')
|
||||
# Also used to calculate/show what was removed
|
||||
text_content_before_ignored_filter = stripped_text_from_html
|
||||
|
||||
# @todo whitespace coming from missing rtrim()?
|
||||
# stripped_text_from_html could be based on their preferences, replace the processed text with only that which they want to know about.
|
||||
# Rewrite's the processing text based on only what diff result they want to see
|
||||
|
||||
if watch.has_special_diff_filter_options_set() and len(watch.history.keys()):
|
||||
# Now the content comes from the diff-parser and not the returned HTTP traffic, so could be some differences
|
||||
from .. import diff
|
||||
from changedetectionio import diff
|
||||
# needs to not include (added) etc or it may get used twice
|
||||
# Replace the processed text with the preferred result
|
||||
rendered_diff = diff.render_diff(previous_version_file_contents=watch.get_last_fetched_before_filters(),
|
||||
rendered_diff = diff.render_diff(previous_version_file_contents=watch.get_last_fetched_text_before_filters(),
|
||||
newest_version_file_contents=stripped_text_from_html,
|
||||
include_equal=False, # not the same lines
|
||||
include_added=watch.get('filter_text_added', True),
|
||||
@@ -286,12 +223,12 @@ class perform_site_check(difference_detection_processor):
|
||||
line_feed_sep="\n",
|
||||
include_change_type_prefix=False)
|
||||
|
||||
watch.save_last_fetched_before_filters(text_content_before_ignored_filter)
|
||||
watch.save_last_text_fetched_before_filters(text_content_before_ignored_filter.encode('utf-8'))
|
||||
|
||||
if not rendered_diff and stripped_text_from_html:
|
||||
# We had some content, but no differences were found
|
||||
# Store our new file as the MD5 so it will trigger in the future
|
||||
c = hashlib.md5(text_content_before_ignored_filter.translate(None, b'\r\n\t ')).hexdigest()
|
||||
c = hashlib.md5(stripped_text_from_html.translate(TRANSLATE_WHITESPACE_TABLE).encode('utf-8')).hexdigest()
|
||||
return False, {'previous_md5': c}, stripped_text_from_html.encode('utf-8')
|
||||
else:
|
||||
stripped_text_from_html = rendered_diff
|
||||
@@ -299,25 +236,18 @@ class perform_site_check(difference_detection_processor):
|
||||
# Treat pages with no renderable text content as a change? No by default
|
||||
empty_pages_are_a_change = self.datastore.data['settings']['application'].get('empty_pages_are_a_change', False)
|
||||
if not is_json and not empty_pages_are_a_change and len(stripped_text_from_html.strip()) == 0:
|
||||
raise content_fetcher.ReplyWithContentButNoText(url=url,
|
||||
status_code=fetcher.get_last_status_code(),
|
||||
screenshot=screenshot,
|
||||
raise content_fetchers.exceptions.ReplyWithContentButNoText(url=url,
|
||||
status_code=self.fetcher.get_last_status_code(),
|
||||
screenshot=self.fetcher.screenshot,
|
||||
has_filters=has_filter_rule,
|
||||
html_content=html_content
|
||||
html_content=html_content,
|
||||
xpath_data=self.fetcher.xpath_data
|
||||
)
|
||||
|
||||
# We rely on the actual text in the html output.. many sites have random script vars etc,
|
||||
# in the future we'll implement other mechanisms.
|
||||
|
||||
update_obj["last_check_status"] = fetcher.get_last_status_code()
|
||||
|
||||
# If there's text to skip
|
||||
# @todo we could abstract out the get_text() to handle this cleaner
|
||||
text_to_ignore = watch.get('ignore_text', []) + self.datastore.data['settings']['application'].get('global_ignore_text', [])
|
||||
if len(text_to_ignore):
|
||||
stripped_text_from_html = html_tools.strip_ignore_text(stripped_text_from_html, text_to_ignore)
|
||||
else:
|
||||
stripped_text_from_html = stripped_text_from_html.encode('utf8')
|
||||
update_obj["last_check_status"] = self.fetcher.get_last_status_code()
|
||||
|
||||
# 615 Extract text by regex
|
||||
extract_text = watch.get('extract_text', [])
|
||||
@@ -327,37 +257,53 @@ class perform_site_check(difference_detection_processor):
|
||||
# incase they specified something in '/.../x'
|
||||
if re.search(PERL_STYLE_REGEX, s_re, re.IGNORECASE):
|
||||
regex = html_tools.perl_style_slash_enclosed_regex_to_options(s_re)
|
||||
result = re.findall(regex.encode('utf-8'), stripped_text_from_html)
|
||||
result = re.findall(regex, stripped_text_from_html)
|
||||
|
||||
for l in result:
|
||||
if type(l) is tuple:
|
||||
# @todo - some formatter option default (between groups)
|
||||
regex_matched_output += list(l) + [b'\n']
|
||||
regex_matched_output += list(l) + ['\n']
|
||||
else:
|
||||
# @todo - some formatter option default (between each ungrouped result)
|
||||
regex_matched_output += [l] + [b'\n']
|
||||
regex_matched_output += [l] + ['\n']
|
||||
else:
|
||||
# Doesnt look like regex, just hunt for plaintext and return that which matches
|
||||
# `stripped_text_from_html` will be bytes, so we must encode s_re also to bytes
|
||||
r = re.compile(re.escape(s_re.encode('utf-8')), re.IGNORECASE)
|
||||
r = re.compile(re.escape(s_re), re.IGNORECASE)
|
||||
res = r.findall(stripped_text_from_html)
|
||||
if res:
|
||||
for match in res:
|
||||
regex_matched_output += [match] + [b'\n']
|
||||
regex_matched_output += [match] + ['\n']
|
||||
|
||||
##########################################################
|
||||
stripped_text_from_html = ''
|
||||
|
||||
# Now we will only show what the regex matched
|
||||
stripped_text_from_html = b''
|
||||
text_content_before_ignored_filter = b''
|
||||
if regex_matched_output:
|
||||
# @todo some formatter for presentation?
|
||||
stripped_text_from_html = b''.join(regex_matched_output)
|
||||
text_content_before_ignored_filter = stripped_text_from_html
|
||||
stripped_text_from_html = ''.join(regex_matched_output)
|
||||
|
||||
if watch.get('remove_duplicate_lines'):
|
||||
stripped_text_from_html = '\n'.join(dict.fromkeys(line for line in stripped_text_from_html.replace("\n\n", "\n").splitlines()))
|
||||
|
||||
|
||||
if watch.get('sort_text_alphabetically'):
|
||||
# Note: Because a <p>something</p> will add an extra line feed to signify the paragraph gap
|
||||
# we end up with 'Some text\n\n', sorting will add all those extra \n at the start, so we remove them here.
|
||||
stripped_text_from_html = stripped_text_from_html.replace("\n\n", "\n")
|
||||
stripped_text_from_html = '\n'.join(sorted(stripped_text_from_html.splitlines(), key=lambda x: x.lower()))
|
||||
|
||||
### CALCULATE MD5
|
||||
# If there's text to ignore
|
||||
text_to_ignore = watch.get('ignore_text', []) + self.datastore.data['settings']['application'].get('global_ignore_text', [])
|
||||
text_for_checksuming = stripped_text_from_html
|
||||
if text_to_ignore:
|
||||
text_for_checksuming = html_tools.strip_ignore_text(stripped_text_from_html, text_to_ignore)
|
||||
|
||||
# Re #133 - if we should strip whitespaces from triggering the change detected comparison
|
||||
if self.datastore.data['settings']['application'].get('ignore_whitespace', False):
|
||||
fetched_md5 = hashlib.md5(stripped_text_from_html.translate(None, b'\r\n\t ')).hexdigest()
|
||||
if text_for_checksuming and self.datastore.data['settings']['application'].get('ignore_whitespace', False):
|
||||
fetched_md5 = hashlib.md5(text_for_checksuming.translate(TRANSLATE_WHITESPACE_TABLE).encode('utf-8')).hexdigest()
|
||||
else:
|
||||
fetched_md5 = hashlib.md5(stripped_text_from_html).hexdigest()
|
||||
fetched_md5 = hashlib.md5(text_for_checksuming.encode('utf-8')).hexdigest()
|
||||
|
||||
############ Blocking rules, after checksum #################
|
||||
blocked = False
|
||||
@@ -385,35 +331,40 @@ class perform_site_check(difference_detection_processor):
|
||||
if result:
|
||||
blocked = True
|
||||
|
||||
# The main thing that all this at the moment comes down to :)
|
||||
if watch.get('previous_md5') != fetched_md5:
|
||||
changed_detected = True
|
||||
|
||||
# Looks like something changed, but did it match all the rules?
|
||||
if blocked:
|
||||
changed_detected = False
|
||||
else:
|
||||
# The main thing that all this at the moment comes down to :)
|
||||
if watch.get('previous_md5') != fetched_md5:
|
||||
changed_detected = True
|
||||
|
||||
# Extract title as title
|
||||
if is_html:
|
||||
if self.datastore.data['settings']['application'].get('extract_title_as_title') or watch['extract_title_as_title']:
|
||||
if not watch['title'] or not len(watch['title']):
|
||||
update_obj['title'] = html_tools.extract_element(find='title', html_content=fetcher.content)
|
||||
# Always record the new checksum
|
||||
update_obj["previous_md5"] = fetched_md5
|
||||
|
||||
# On the first run of a site, watch['previous_md5'] will be None, set it the current one.
|
||||
if not watch.get('previous_md5'):
|
||||
watch['previous_md5'] = fetched_md5
|
||||
|
||||
logger.debug(f"Watch UUID {watch.get('uuid')} content check - Previous MD5: {watch.get('previous_md5')}, Fetched MD5 {fetched_md5}")
|
||||
|
||||
if changed_detected:
|
||||
if watch.get('check_unique_lines', False):
|
||||
has_unique_lines = watch.lines_contain_something_unique_compared_to_history(lines=stripped_text_from_html.splitlines())
|
||||
ignore_whitespace = self.datastore.data['settings']['application'].get('ignore_whitespace')
|
||||
|
||||
has_unique_lines = watch.lines_contain_something_unique_compared_to_history(
|
||||
lines=stripped_text_from_html.splitlines(),
|
||||
ignore_whitespace=ignore_whitespace
|
||||
)
|
||||
|
||||
# One or more lines? unsure?
|
||||
if not has_unique_lines:
|
||||
logging.debug("check_unique_lines: UUID {} didnt have anything new setting change_detected=False".format(uuid))
|
||||
logger.debug(f"check_unique_lines: UUID {watch.get('uuid')} didnt have anything new setting change_detected=False")
|
||||
changed_detected = False
|
||||
else:
|
||||
logging.debug("check_unique_lines: UUID {} had unique content".format(uuid))
|
||||
logger.debug(f"check_unique_lines: UUID {watch.get('uuid')} had unique content")
|
||||
|
||||
# Always record the new checksum
|
||||
update_obj["previous_md5"] = fetched_md5
|
||||
|
||||
# On the first run of a site, watch['previous_md5'] will be None, set it the current one.
|
||||
if not watch.get('previous_md5'):
|
||||
watch['previous_md5'] = fetched_md5
|
||||
|
||||
return changed_detected, update_obj, text_content_before_ignored_filter
|
||||
# stripped_text_from_html - Everything after filters and NO 'ignored' content
|
||||
return changed_detected, update_obj, stripped_text_from_html
|
||||
@@ -1,108 +0,0 @@
|
||||
function isItemInStock() {
|
||||
// @todo Pass these in so the same list can be used in non-JS fetchers
|
||||
const outOfStockTexts = [
|
||||
'0 in stock',
|
||||
'agotado',
|
||||
'artikel zurzeit vergriffen',
|
||||
'as soon as stock is available',
|
||||
'ausverkauft', // sold out
|
||||
'available for back order',
|
||||
'back-order or out of stock',
|
||||
'backordered',
|
||||
'benachrichtigt mich', // notify me
|
||||
'brak na stanie',
|
||||
'brak w magazynie',
|
||||
'coming soon',
|
||||
'currently have any tickets for this',
|
||||
'currently unavailable',
|
||||
'dostępne wkrótce',
|
||||
'en rupture de stock',
|
||||
'ist derzeit nicht auf lager',
|
||||
'item is no longer available',
|
||||
'message if back in stock',
|
||||
'nachricht bei',
|
||||
'nicht auf lager',
|
||||
'nicht lieferbar',
|
||||
'nicht zur verfügung',
|
||||
'no disponible temporalmente',
|
||||
'no longer in stock',
|
||||
'no tickets available',
|
||||
'not available',
|
||||
'not currently available',
|
||||
'not in stock',
|
||||
'notify me when available',
|
||||
'não estamos a aceitar encomendas',
|
||||
'out of stock',
|
||||
'out-of-stock',
|
||||
'produkt niedostępny',
|
||||
'sold out',
|
||||
'temporarily out of stock',
|
||||
'temporarily unavailable',
|
||||
'tickets unavailable',
|
||||
'unavailable tickets',
|
||||
'we do not currently have an estimate of when this product will be back in stock.',
|
||||
'zur zeit nicht an lager',
|
||||
'已售完',
|
||||
];
|
||||
|
||||
|
||||
const negateOutOfStockRegexs = [
|
||||
'[0-9] in stock'
|
||||
]
|
||||
var negateOutOfStockRegexs_r = [];
|
||||
for (let i = 0; i < negateOutOfStockRegexs.length; i++) {
|
||||
negateOutOfStockRegexs_r.push(new RegExp(negateOutOfStockRegexs[0], 'g'));
|
||||
}
|
||||
|
||||
|
||||
const elementsWithZeroChildren = Array.from(document.getElementsByTagName('*')).filter(element => element.children.length === 0);
|
||||
|
||||
// REGEXS THAT REALLY MEAN IT'S IN STOCK
|
||||
for (let i = elementsWithZeroChildren.length - 1; i >= 0; i--) {
|
||||
const element = elementsWithZeroChildren[i];
|
||||
if (element.offsetWidth > 0 || element.offsetHeight > 0 || element.getClientRects().length > 0) {
|
||||
var elementText="";
|
||||
if (element.tagName.toLowerCase() === "input") {
|
||||
elementText = element.value.toLowerCase();
|
||||
} else {
|
||||
elementText = element.textContent.toLowerCase();
|
||||
}
|
||||
|
||||
if (elementText.length) {
|
||||
// try which ones could mean its in stock
|
||||
for (let i = 0; i < negateOutOfStockRegexs.length; i++) {
|
||||
if (negateOutOfStockRegexs_r[i].test(elementText)) {
|
||||
return 'Possibly in stock';
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// OTHER STUFF THAT COULD BE THAT IT'S OUT OF STOCK
|
||||
for (let i = elementsWithZeroChildren.length - 1; i >= 0; i--) {
|
||||
const element = elementsWithZeroChildren[i];
|
||||
if (element.offsetWidth > 0 || element.offsetHeight > 0 || element.getClientRects().length > 0) {
|
||||
var elementText="";
|
||||
if (element.tagName.toLowerCase() === "input") {
|
||||
elementText = element.value.toLowerCase();
|
||||
} else {
|
||||
elementText = element.textContent.toLowerCase();
|
||||
}
|
||||
|
||||
if (elementText.length) {
|
||||
// and these mean its out of stock
|
||||
for (const outOfStockText of outOfStockTexts) {
|
||||
if (elementText.includes(outOfStockText)) {
|
||||
return elementText; // item is out of stock
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 'Possibly in stock'; // possibly in stock, cant decide otherwise.
|
||||
}
|
||||
|
||||
// returns the element text that makes it think it's out of stock
|
||||
return isItemInStock();
|
||||
@@ -1,221 +0,0 @@
|
||||
// Copyright (C) 2021 Leigh Morresi (dgtlmoon@gmail.com)
|
||||
// All rights reserved.
|
||||
|
||||
// @file Scrape the page looking for elements of concern (%ELEMENTS%)
|
||||
// http://matatk.agrip.org.uk/tests/position-and-width/
|
||||
// https://stackoverflow.com/questions/26813480/when-is-element-getboundingclientrect-guaranteed-to-be-updated-accurate
|
||||
//
|
||||
// Some pages like https://www.londonstockexchange.com/stock/NCCL/ncondezi-energy-limited/analysis
|
||||
// will automatically force a scroll somewhere, so include the position offset
|
||||
// Lets hope the position doesnt change while we iterate the bbox's, but this is better than nothing
|
||||
var scroll_y = 0;
|
||||
try {
|
||||
scroll_y = +document.documentElement.scrollTop || document.body.scrollTop
|
||||
} catch (e) {
|
||||
console.log(e);
|
||||
}
|
||||
|
||||
|
||||
|
||||
// Include the getXpath script directly, easier than fetching
|
||||
function getxpath(e) {
|
||||
var n = e;
|
||||
if (n && n.id) return '//*[@id="' + n.id + '"]';
|
||||
for (var o = []; n && Node.ELEMENT_NODE === n.nodeType;) {
|
||||
for (var i = 0, r = !1, d = n.previousSibling; d;) d.nodeType !== Node.DOCUMENT_TYPE_NODE && d.nodeName === n.nodeName && i++, d = d.previousSibling;
|
||||
for (d = n.nextSibling; d;) {
|
||||
if (d.nodeName === n.nodeName) {
|
||||
r = !0;
|
||||
break
|
||||
}
|
||||
d = d.nextSibling
|
||||
}
|
||||
o.push((n.prefix ? n.prefix + ":" : "") + n.localName + (i || r ? "[" + (i + 1) + "]" : "")), n = n.parentNode
|
||||
}
|
||||
return o.length ? "/" + o.reverse().join("/") : ""
|
||||
}
|
||||
|
||||
const findUpTag = (el) => {
|
||||
let r = el
|
||||
chained_css = [];
|
||||
depth = 0;
|
||||
|
||||
// Strategy 1: If it's an input, with name, and there's only one, prefer that
|
||||
if (el.name !== undefined && el.name.length) {
|
||||
var proposed = el.tagName + "[name=" + el.name + "]";
|
||||
var proposed_element = window.document.querySelectorAll(proposed);
|
||||
if (proposed_element.length) {
|
||||
if (proposed_element.length === 1) {
|
||||
return proposed;
|
||||
} else {
|
||||
// Some sites change ID but name= stays the same, we can hit it if we know the index
|
||||
// Find all the elements that match and work out the input[n]
|
||||
var n = Array.from(proposed_element).indexOf(el);
|
||||
// Return a Playwright selector for nthinput[name=zipcode]
|
||||
return proposed + " >> nth=" + n;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Strategy 2: Keep going up until we hit an ID tag, imagine it's like #list-widget div h4
|
||||
while (r.parentNode) {
|
||||
if (depth == 5) {
|
||||
break;
|
||||
}
|
||||
if ('' !== r.id) {
|
||||
chained_css.unshift("#" + CSS.escape(r.id));
|
||||
final_selector = chained_css.join(' > ');
|
||||
// Be sure theres only one, some sites have multiples of the same ID tag :-(
|
||||
if (window.document.querySelectorAll(final_selector).length == 1) {
|
||||
return final_selector;
|
||||
}
|
||||
return null;
|
||||
} else {
|
||||
chained_css.unshift(r.tagName.toLowerCase());
|
||||
}
|
||||
r = r.parentNode;
|
||||
depth += 1;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
|
||||
// @todo - if it's SVG or IMG, go into image diff mode
|
||||
// %ELEMENTS% replaced at injection time because different interfaces use it with different settings
|
||||
var elements = window.document.querySelectorAll("%ELEMENTS%");
|
||||
var size_pos = [];
|
||||
// after page fetch, inject this JS
|
||||
// build a map of all elements and their positions (maybe that only include text?)
|
||||
var bbox;
|
||||
for (var i = 0; i < elements.length; i++) {
|
||||
bbox = elements[i].getBoundingClientRect();
|
||||
|
||||
// Exclude items that are not interactable or visible
|
||||
if(elements[i].style.opacity === "0") {
|
||||
continue
|
||||
}
|
||||
if(elements[i].style.display === "none" || elements[i].style.pointerEvents === "none" ) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip really small ones, and where width or height ==0
|
||||
if (bbox['width'] * bbox['height'] < 100) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Don't include elements that are offset from canvas
|
||||
if (bbox['top']+scroll_y < 0 || bbox['left'] < 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// @todo the getXpath kind of sucks, it doesnt know when there is for example just one ID sometimes
|
||||
// it should not traverse when we know we can anchor off just an ID one level up etc..
|
||||
// maybe, get current class or id, keep traversing up looking for only class or id until there is just one match
|
||||
|
||||
// 1st primitive - if it has class, try joining it all and select, if theres only one.. well thats us.
|
||||
xpath_result = false;
|
||||
|
||||
try {
|
||||
var d = findUpTag(elements[i]);
|
||||
if (d) {
|
||||
xpath_result = d;
|
||||
}
|
||||
} catch (e) {
|
||||
console.log(e);
|
||||
}
|
||||
|
||||
// You could swap it and default to getXpath and then try the smarter one
|
||||
// default back to the less intelligent one
|
||||
if (!xpath_result) {
|
||||
try {
|
||||
// I've seen on FB and eBay that this doesnt work
|
||||
// ReferenceError: getXPath is not defined at eval (eval at evaluate (:152:29), <anonymous>:67:20) at UtilityScript.evaluate (<anonymous>:159:18) at UtilityScript.<anonymous> (<anonymous>:1:44)
|
||||
xpath_result = getxpath(elements[i]);
|
||||
} catch (e) {
|
||||
console.log(e);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (window.getComputedStyle(elements[i]).visibility === "hidden") {
|
||||
continue;
|
||||
}
|
||||
|
||||
// @todo Possible to ONLY list where it's clickable to save JSON xfer size
|
||||
size_pos.push({
|
||||
xpath: xpath_result,
|
||||
width: Math.round(bbox['width']),
|
||||
height: Math.round(bbox['height']),
|
||||
left: Math.floor(bbox['left']),
|
||||
top: Math.floor(bbox['top'])+scroll_y,
|
||||
tagName: (elements[i].tagName) ? elements[i].tagName.toLowerCase() : '',
|
||||
tagtype: (elements[i].tagName == 'INPUT' && elements[i].type) ? elements[i].type.toLowerCase() : '',
|
||||
isClickable: (elements[i].onclick) || window.getComputedStyle(elements[i]).cursor == "pointer"
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
// Inject the current one set in the include_filters, which may be a CSS rule
|
||||
// used for displaying the current one in VisualSelector, where its not one we generated.
|
||||
if (include_filters.length) {
|
||||
// Foreach filter, go and find it on the page and add it to the results so we can visualise it again
|
||||
for (const f of include_filters) {
|
||||
bbox = false;
|
||||
q = false;
|
||||
|
||||
if (!f.length) {
|
||||
console.log("xpath_element_scraper: Empty filter, skipping");
|
||||
continue;
|
||||
}
|
||||
|
||||
try {
|
||||
// is it xpath?
|
||||
if (f.startsWith('/') || f.startsWith('xpath:')) {
|
||||
q = document.evaluate(f.replace('xpath:', ''), document, null, XPathResult.FIRST_ORDERED_NODE_TYPE, null).singleNodeValue;
|
||||
} else {
|
||||
q = document.querySelector(f);
|
||||
}
|
||||
} catch (e) {
|
||||
// Maybe catch DOMException and alert?
|
||||
console.log("xpath_element_scraper: Exception selecting element from filter "+f);
|
||||
console.log(e);
|
||||
}
|
||||
|
||||
if (q) {
|
||||
// #1231 - IN the case XPath attribute filter is applied, we will have to traverse up and find the element.
|
||||
if (q.hasOwnProperty('getBoundingClientRect')) {
|
||||
bbox = q.getBoundingClientRect();
|
||||
console.log("xpath_element_scraper: Got filter element, scroll from top was " + scroll_y)
|
||||
} else {
|
||||
try {
|
||||
// Try and see we can find its ownerElement
|
||||
bbox = q.ownerElement.getBoundingClientRect();
|
||||
console.log("xpath_element_scraper: Got filter by ownerElement element, scroll from top was " + scroll_y)
|
||||
} catch (e) {
|
||||
console.log("xpath_element_scraper: error looking up ownerElement")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if(!q) {
|
||||
console.log("xpath_element_scraper: filter element " + f + " was not found");
|
||||
}
|
||||
|
||||
if (bbox && bbox['width'] > 0 && bbox['height'] > 0) {
|
||||
size_pos.push({
|
||||
xpath: f,
|
||||
width: parseInt(bbox['width']),
|
||||
height: parseInt(bbox['height']),
|
||||
left: parseInt(bbox['left']),
|
||||
top: parseInt(bbox['top'])+scroll_y
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Sort the elements so we find the smallest one first, in other words, we find the smallest one matching in that area
|
||||
// so that we dont select the wrapping element by mistake and be unable to select what we want
|
||||
size_pos.sort((a, b) => (a.width*a.height > b.width*b.height) ? 1 : -1)
|
||||
|
||||
// Window.width required for proper scaling in the frontend
|
||||
return {'size_pos': size_pos, 'browser_width': window.innerWidth};
|
||||
@@ -35,4 +35,8 @@ pytest tests/test_access_control.py
|
||||
pytest tests/test_notification.py
|
||||
pytest tests/test_backend.py
|
||||
pytest tests/test_rss.py
|
||||
pytest tests/test_unique_lines.py
|
||||
pytest tests/test_unique_lines.py
|
||||
|
||||
# Check file:// will pickup a file when enabled
|
||||
echo "Hello world" > /tmp/test-file.txt
|
||||
ALLOW_FILE_URI=yes pytest tests/test_security.py
|
||||
|
||||
46
changedetectionio/run_custom_browser_url_tests.sh
Executable file
@@ -0,0 +1,46 @@
|
||||
#!/bin/bash
|
||||
|
||||
# run some tests and look if the 'custom-browser-search-string=1' connect string appeared in the correct containers
|
||||
|
||||
# @todo do it again but with the puppeteer one
|
||||
|
||||
# enable debug
|
||||
set -x
|
||||
|
||||
# A extra browser is configured, but we never chose to use it, so it should NOT show in the logs
|
||||
docker run --rm -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest tests/custom_browser_url/test_custom_browser_url.py::test_request_not_via_custom_browser_url'
|
||||
docker logs sockpuppetbrowser-custom-url &>log-custom.txt
|
||||
grep 'custom-browser-search-string=1' log-custom.txt
|
||||
if [ $? -ne 1 ]
|
||||
then
|
||||
echo "Saw a request in 'sockpuppetbrowser-custom-url' container with 'custom-browser-search-string=1' when I should not - log-custom.txt"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
docker logs sockpuppetbrowser &>log.txt
|
||||
grep 'custom-browser-search-string=1' log.txt
|
||||
if [ $? -ne 1 ]
|
||||
then
|
||||
echo "Saw a request in 'browser' container with 'custom-browser-search-string=1' when I should not"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Special connect string should appear in the custom-url container, but not in the 'default' one
|
||||
docker run --rm -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest tests/custom_browser_url/test_custom_browser_url.py::test_request_via_custom_browser_url'
|
||||
docker logs sockpuppetbrowser-custom-url &>log-custom.txt
|
||||
grep 'custom-browser-search-string=1' log-custom.txt
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
echo "Did not see request in 'sockpuppetbrowser-custom-url' container with 'custom-browser-search-string=1' when I should - log-custom.txt"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
docker logs sockpuppetbrowser &>log.txt
|
||||
grep 'custom-browser-search-string=1' log.txt
|
||||
if [ $? -ne 1 ]
|
||||
then
|
||||
echo "Saw a request in 'browser' container with 'custom-browser-search-string=1' when I should not"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ set -x
|
||||
docker run --network changedet-network -d --name squid-one --hostname squid-one --rm -v `pwd`/tests/proxy_list/squid.conf:/etc/squid/conf.d/debian.conf ubuntu/squid:4.13-21.10_edge
|
||||
docker run --network changedet-network -d --name squid-two --hostname squid-two --rm -v `pwd`/tests/proxy_list/squid.conf:/etc/squid/conf.d/debian.conf ubuntu/squid:4.13-21.10_edge
|
||||
|
||||
# Used for configuring a custom proxy URL via the UI
|
||||
# Used for configuring a custom proxy URL via the UI - with username+password auth
|
||||
docker run --network changedet-network -d \
|
||||
--name squid-custom \
|
||||
--hostname squid-custom \
|
||||
@@ -26,15 +26,17 @@ docker run --network changedet-network \
|
||||
test-changedetectionio \
|
||||
bash -c 'cd changedetectionio && pytest tests/proxy_list/test_multiple_proxy.py'
|
||||
|
||||
|
||||
## Should be a request in the default "first" squid
|
||||
set +e
|
||||
echo "- Looking for chosen.changedetection.io request in squid-one - it should NOT be here"
|
||||
docker logs squid-one 2>/dev/null|grep chosen.changedetection.io
|
||||
if [ $? -ne 0 ]
|
||||
if [ $? -ne 1 ]
|
||||
then
|
||||
echo "Did not see a request to chosen.changedetection.io in the squid logs (while checking preferred proxy - squid one)"
|
||||
echo "Saw a request to chosen.changedetection.io in the squid logs (while checking preferred proxy - squid one) WHEN I SHOULD NOT"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
set -e
|
||||
echo "- Looking for chosen.changedetection.io request in squid-two"
|
||||
# And one in the 'second' squid (user selects this as preferred)
|
||||
docker logs squid-two 2>/dev/null|grep chosen.changedetection.io
|
||||
if [ $? -ne 0 ]
|
||||
@@ -43,7 +45,6 @@ then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
# Test the UI configurable proxies
|
||||
docker run --network changedet-network \
|
||||
test-changedetectionio \
|
||||
@@ -51,6 +52,7 @@ docker run --network changedet-network \
|
||||
|
||||
|
||||
# Should see a request for one.changedetection.io in there
|
||||
echo "- Looking for .changedetection.io request in squid-custom"
|
||||
docker logs squid-custom 2>/dev/null|grep "TCP_TUNNEL.200.*changedetection.io"
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
@@ -67,7 +69,7 @@ docker run --network changedet-network \
|
||||
set +e
|
||||
# Check request was never seen in any container
|
||||
for c in $(echo "squid-one squid-two squid-custom"); do
|
||||
echo Checking $c
|
||||
echo ....Checking $c
|
||||
docker logs $c &> $c.txt
|
||||
grep noproxy $c.txt
|
||||
if [ $? -ne 1 ]
|
||||
|
||||
49
changedetectionio/run_socks_proxy_tests.sh
Executable file
@@ -0,0 +1,49 @@
|
||||
#!/bin/bash
|
||||
|
||||
# exit when any command fails
|
||||
set -e
|
||||
# enable debug
|
||||
set -x
|
||||
|
||||
|
||||
# SOCKS5 related - start simple Socks5 proxy server
|
||||
# SOCKSTEST=xyz should show in the logs of this service to confirm it fetched
|
||||
docker run --network changedet-network -d --hostname socks5proxy --rm --name socks5proxy -p 1080:1080 -e PROXY_USER=proxy_user123 -e PROXY_PASSWORD=proxy_pass123 serjs/go-socks5-proxy
|
||||
docker run --network changedet-network -d --hostname socks5proxy-noauth --rm -p 1081:1080 --name socks5proxy-noauth serjs/go-socks5-proxy
|
||||
|
||||
echo "---------------------------------- SOCKS5 -------------------"
|
||||
# SOCKS5 related - test from proxies.json
|
||||
docker run --network changedet-network \
|
||||
-v `pwd`/tests/proxy_socks5/proxies.json-example:/app/changedetectionio/test-datastore/proxies.json \
|
||||
--rm \
|
||||
-e "FLASK_SERVER_NAME=cdio" \
|
||||
--hostname cdio \
|
||||
-e "SOCKSTEST=proxiesjson" \
|
||||
test-changedetectionio \
|
||||
bash -c 'cd changedetectionio && pytest --live-server-host=0.0.0.0 --live-server-port=5004 -s tests/proxy_socks5/test_socks5_proxy_sources.py'
|
||||
|
||||
# SOCKS5 related - by manually entering in UI
|
||||
docker run --network changedet-network \
|
||||
--rm \
|
||||
-e "FLASK_SERVER_NAME=cdio" \
|
||||
--hostname cdio \
|
||||
-e "SOCKSTEST=manual" \
|
||||
test-changedetectionio \
|
||||
bash -c 'cd changedetectionio && pytest --live-server-host=0.0.0.0 --live-server-port=5004 -s tests/proxy_socks5/test_socks5_proxy.py'
|
||||
|
||||
# SOCKS5 related - test from proxies.json via playwright - NOTE- PLAYWRIGHT DOESNT SUPPORT AUTHENTICATING PROXY
|
||||
docker run --network changedet-network \
|
||||
-e "SOCKSTEST=manual-playwright" \
|
||||
--hostname cdio \
|
||||
-e "FLASK_SERVER_NAME=cdio" \
|
||||
-v `pwd`/tests/proxy_socks5/proxies.json-example-noauth:/app/changedetectionio/test-datastore/proxies.json \
|
||||
-e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" \
|
||||
--rm \
|
||||
test-changedetectionio \
|
||||
bash -c 'cd changedetectionio && pytest --live-server-host=0.0.0.0 --live-server-port=5004 -s tests/proxy_socks5/test_socks5_proxy_sources.py'
|
||||
|
||||
echo "socks5 server logs"
|
||||
docker logs socks5proxy
|
||||
echo "----------------------------------"
|
||||
|
||||
docker kill socks5proxy socks5proxy-noauth
|
||||
18
changedetectionio/safe_jinja.py
Normal file
@@ -0,0 +1,18 @@
|
||||
"""
|
||||
Safe Jinja2 render with max payload sizes
|
||||
|
||||
See https://jinja.palletsprojects.com/en/3.1.x/sandbox/#security-considerations
|
||||
"""
|
||||
|
||||
import jinja2.sandbox
|
||||
import typing as t
|
||||
import os
|
||||
|
||||
JINJA2_MAX_RETURN_PAYLOAD_SIZE = 1024 * int(os.getenv("JINJA2_MAX_RETURN_PAYLOAD_SIZE_KB", 1024 * 10))
|
||||
|
||||
|
||||
def render(template_str, **args: t.Any) -> str:
|
||||
jinja2_env = jinja2.sandbox.ImmutableSandboxedEnvironment(extensions=['jinja2_time.TimeExtension'])
|
||||
output = jinja2_env.from_string(template_str).render(args)
|
||||
return output[:JINJA2_MAX_RETURN_PAYLOAD_SIZE]
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<svg width="15" height="16.363636" viewBox="0 0 15 16.363636" xmlns="http://www.w3.org/2000/svg" xmlns:svg="http://www.w3.org/2000/svg">
|
||||
<svg width="15" height="16.363636" viewBox="0 0 15 16.363636" xmlns="http://www.w3.org/2000/svg" >
|
||||
<path d="m 14.318182,11.762045 v 1.1925 H 5.4102273 L 11.849318,7.1140909 C 12.234545,9.1561364 12.54,11.181818 14.318182,11.762045 Z m -6.7984093,4.601591 c 1.0759091,0 2.0256823,-0.955909 2.0256823,-2.045454 H 5.4545455 c 0,1.089545 0.9879545,2.045454 2.0652272,2.045454 z M 15,2.8622727 0.9177273,15.636136 0,14.627045 l 1.8443182,-1.6725 h -1.1625 v -1.1925 C 4.0070455,10.677273 2.1784091,4.5388636 5.3611364,2.6897727 5.8009091,2.4347727 6.0709091,1.9609091 6.0702273,1.4488636 v -0.00205 C 6.0702273,0.64772727 6.7104545,0 7.5,0 8.2895455,0 8.9297727,0.64772727 8.9297727,1.4468182 v 0.00205 C 8.9290909,1.9602319 9.199773,2.4354591 9.638864,2.6897773 10.364318,3.111141 10.827273,3.7568228 11.1525,4.5129591 L 14.085682,1.8531818 Z M 6.8181818,1.3636364 C 6.8181818,1.74 7.1236364,2.0454545 7.5,2.0454545 7.8763636,2.0454545 8.1818182,1.74 8.1818182,1.3636364 8.1818182,0.98795455 7.8763636,0.68181818 7.5,0.68181818 c -0.3763636,0 -0.6818182,0.30613637 -0.6818182,0.68181822 z" id="path2" style="fill:#f8321b;stroke-width:0.681818;fill-opacity:1"/>
|
||||
</svg>
|
||||
|
||||
|
Before Width: | Height: | Size: 1.2 KiB After Width: | Height: | Size: 1.2 KiB |
@@ -10,7 +10,7 @@
|
||||
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:svg="http://www.w3.org/2000/svg">
|
||||
>
|
||||
<defs
|
||||
id="defs16" />
|
||||
<sodipodi:namedview
|
||||
|
||||
|
Before Width: | Height: | Size: 11 KiB After Width: | Height: | Size: 11 KiB |
@@ -1,7 +1,7 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<svg
|
||||
version="1.1"
|
||||
id="Layer_1"
|
||||
id="copy"
|
||||
x="0px"
|
||||
y="0px"
|
||||
viewBox="0 0 115.77 122.88"
|
||||
@@ -12,7 +12,7 @@
|
||||
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:svg="http://www.w3.org/2000/svg"><defs
|
||||
><defs
|
||||
id="defs11" /><sodipodi:namedview
|
||||
id="namedview9"
|
||||
pagecolor="#ffffff"
|
||||
|
||||
|
Before Width: | Height: | Size: 2.5 KiB After Width: | Height: | Size: 2.5 KiB |
@@ -6,11 +6,11 @@
|
||||
height="7.5005589"
|
||||
width="11.248507"
|
||||
version="1.1"
|
||||
id="Layer_1"
|
||||
id="email"
|
||||
viewBox="0 0 7.1975545 4.7993639"
|
||||
xml:space="preserve"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:svg="http://www.w3.org/2000/svg"><defs
|
||||
><defs
|
||||
id="defs19" />
|
||||
<g
|
||||
id="g14"
|
||||
|
||||
|
Before Width: | Height: | Size: 1.9 KiB After Width: | Height: | Size: 1.9 KiB |
|
Before Width: | Height: | Size: 22 KiB |
@@ -9,7 +9,7 @@
|
||||
id="svg5"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:svg="http://www.w3.org/2000/svg">
|
||||
>
|
||||
<defs
|
||||
id="defs2" />
|
||||
<g
|
||||
|
||||
|
Before Width: | Height: | Size: 2.4 KiB After Width: | Height: | Size: 2.4 KiB |
@@ -10,7 +10,7 @@
|
||||
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:svg="http://www.w3.org/2000/svg">
|
||||
>
|
||||
<defs
|
||||
id="defs12" />
|
||||
<sodipodi:namedview
|
||||
|
||||
|
Before Width: | Height: | Size: 9.7 KiB After Width: | Height: | Size: 9.7 KiB |
@@ -3,7 +3,6 @@
|
||||
xmlns:dc="http://purl.org/dc/elements/1.1/"
|
||||
xmlns:cc="http://creativecommons.org/ns#"
|
||||
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
|
||||
xmlns:svg="http://www.w3.org/2000/svg"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
version="1.1"
|
||||
id="Capa_1"
|
||||
|
||||
|
Before Width: | Height: | Size: 2.9 KiB After Width: | Height: | Size: 2.8 KiB |
@@ -13,7 +13,6 @@
|
||||
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:svg="http://www.w3.org/2000/svg"
|
||||
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
|
||||
xmlns:cc="http://creativecommons.org/ns#"
|
||||
xmlns:dc="http://purl.org/dc/elements/1.1/"><sodipodi:namedview
|
||||
|
||||
|
Before Width: | Height: | Size: 3.5 KiB After Width: | Height: | Size: 3.4 KiB |
225
changedetectionio/static/images/schedule.svg
Normal file
@@ -0,0 +1,225 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<svg
|
||||
version="1.1"
|
||||
id="schedule"
|
||||
x="0px"
|
||||
y="0px"
|
||||
viewBox="0 0 661.20001 665.40002"
|
||||
xml:space="preserve"
|
||||
width="661.20001"
|
||||
height="665.40002"
|
||||
inkscape:version="1.1.2 (0a00cf5339, 2022-02-04)"
|
||||
sodipodi:docname="schedule.svg"
|
||||
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:svg="http://www.w3.org/2000/svg"><defs
|
||||
id="defs77" /><sodipodi:namedview
|
||||
id="namedview75"
|
||||
pagecolor="#ffffff"
|
||||
bordercolor="#666666"
|
||||
borderopacity="1.0"
|
||||
inkscape:pageshadow="2"
|
||||
inkscape:pageopacity="0.0"
|
||||
inkscape:pagecheckerboard="0"
|
||||
showgrid="false"
|
||||
fit-margin-top="0"
|
||||
fit-margin-left="0"
|
||||
fit-margin-right="0"
|
||||
fit-margin-bottom="0"
|
||||
inkscape:zoom="1.2458671"
|
||||
inkscape:cx="300.59386"
|
||||
inkscape:cy="332.29869"
|
||||
inkscape:window-width="1920"
|
||||
inkscape:window-height="1051"
|
||||
inkscape:window-x="1920"
|
||||
inkscape:window-y="0"
|
||||
inkscape:window-maximized="1"
|
||||
inkscape:current-layer="g72" /> <style
|
||||
type="text/css"
|
||||
id="style2"> .st0{fill:#FFFFFF;} .st1{fill:#C1272D;} .st2{fill:#991D26;} .st3{fill:#CCCCCC;} .st4{fill:#E6E6E6;} .st5{fill:#F7931E;} .st6{fill:#F2F2F2;} .st7{fill:none;stroke:#999999;stroke-width:17.9763;stroke-linecap:round;stroke-miterlimit:10;} .st8{fill:none;stroke:#333333;stroke-width:8.9882;stroke-linecap:round;stroke-miterlimit:10;} .st9{fill:none;stroke:#C1272D;stroke-width:5.9921;stroke-linecap:round;stroke-miterlimit:10;} .st10{fill:#245F7F;} </style> <g
|
||||
id="g72"
|
||||
transform="translate(-149.4,-147.3)"> <path
|
||||
class="st0"
|
||||
d="M 601.2,699.8 H 205 c -30.7,0 -55.6,-24.9 -55.6,-55.6 V 248 c 0,-30.7 24.9,-55.6 55.6,-55.6 h 396.2 c 30.7,0 55.6,24.9 55.6,55.6 v 396.2 c 0,30.7 -24.9,55.6 -55.6,55.6 z"
|
||||
id="path4"
|
||||
style="fill:#dfdfdf;fill-opacity:1" /> <path
|
||||
class="st1"
|
||||
d="M 601.2,192.4 H 205 c -30.7,0 -55.6,24.9 -55.6,55.6 v 88.5 H 656.8 V 248 c 0,-30.7 -24.9,-55.6 -55.6,-55.6 z"
|
||||
id="path6"
|
||||
style="fill:#d62128;fill-opacity:1" /> <circle
|
||||
class="st2"
|
||||
cx="253.3"
|
||||
cy="264.5"
|
||||
r="36.700001"
|
||||
id="circle8" /> <circle
|
||||
class="st2"
|
||||
cx="551.59998"
|
||||
cy="264.5"
|
||||
r="36.700001"
|
||||
id="circle10" /> <path
|
||||
class="st3"
|
||||
d="m 253.3,275.7 v 0 c -11.8,0 -21.3,-9.6 -21.3,-21.3 v -85.8 c 0,-11.8 9.6,-21.3 21.3,-21.3 v 0 c 11.8,0 21.3,9.6 21.3,21.3 v 85.8 c 0,11.8 -9.5,21.3 -21.3,21.3 z"
|
||||
id="path12" /> <path
|
||||
class="st3"
|
||||
d="m 551.6,275.7 v 0 c -11.8,0 -21.3,-9.6 -21.3,-21.3 v -85.8 c 0,-11.8 9.6,-21.3 21.3,-21.3 v 0 c 11.8,0 21.3,9.6 21.3,21.3 v 85.8 c 0.1,11.8 -9.5,21.3 -21.3,21.3 z"
|
||||
id="path14" /> <rect
|
||||
x="215.7"
|
||||
y="370.89999"
|
||||
class="st4"
|
||||
width="75.199997"
|
||||
height="75.199997"
|
||||
id="rect16"
|
||||
style="fill:#ffffff;fill-opacity:1" /> <rect
|
||||
x="313"
|
||||
y="370.89999"
|
||||
class="st4"
|
||||
width="75.199997"
|
||||
height="75.199997"
|
||||
id="rect18"
|
||||
style="fill:#ffffff;fill-opacity:1" /> <rect
|
||||
x="410.20001"
|
||||
y="370.89999"
|
||||
class="st4"
|
||||
width="75.199997"
|
||||
height="75.199997"
|
||||
id="rect20"
|
||||
style="fill:#ffffff;fill-opacity:1" /> <rect
|
||||
x="507.5"
|
||||
y="370.89999"
|
||||
class="st4"
|
||||
width="75.199997"
|
||||
height="75.199997"
|
||||
id="rect22"
|
||||
style="fill:#ffffff;fill-opacity:1" /> <rect
|
||||
x="215.7"
|
||||
y="465"
|
||||
class="st4"
|
||||
width="75.199997"
|
||||
height="75.199997"
|
||||
id="rect24"
|
||||
style="fill:#ffffff;fill-opacity:1" /> <rect
|
||||
x="313"
|
||||
y="465"
|
||||
class="st1"
|
||||
width="75.199997"
|
||||
height="75.199997"
|
||||
id="rect26"
|
||||
style="fill:#27c12b;fill-opacity:1" /> <rect
|
||||
x="410.20001"
|
||||
y="465"
|
||||
class="st4"
|
||||
width="75.199997"
|
||||
height="75.199997"
|
||||
id="rect28"
|
||||
style="fill:#ffffff;fill-opacity:1" /> <rect
|
||||
x="507.5"
|
||||
y="465"
|
||||
class="st4"
|
||||
width="75.199997"
|
||||
height="75.199997"
|
||||
id="rect30" /> <rect
|
||||
x="215.7"
|
||||
y="559.09998"
|
||||
class="st4"
|
||||
width="75.199997"
|
||||
height="75.199997"
|
||||
id="rect32"
|
||||
style="fill:#ffffff;fill-opacity:1" /> <rect
|
||||
x="313"
|
||||
y="559.09998"
|
||||
class="st4"
|
||||
width="75.199997"
|
||||
height="75.199997"
|
||||
id="rect34"
|
||||
style="fill:#ffffff;fill-opacity:1" /> <rect
|
||||
x="410.20001"
|
||||
y="559.09998"
|
||||
class="st4"
|
||||
width="75.199997"
|
||||
height="75.199997"
|
||||
id="rect36"
|
||||
style="fill:#ffffff;fill-opacity:1" /> <rect
|
||||
x="507.5"
|
||||
y="559.09998"
|
||||
class="st4"
|
||||
width="75.199997"
|
||||
height="75.199997"
|
||||
id="rect38" /> <g
|
||||
id="g70"> <circle
|
||||
class="st5"
|
||||
cx="621.90002"
|
||||
cy="624"
|
||||
r="188.7"
|
||||
id="circle40" /> <circle
|
||||
class="st0"
|
||||
cx="621.90002"
|
||||
cy="624"
|
||||
r="148"
|
||||
id="circle42" /> <path
|
||||
class="st6"
|
||||
d="m 486.6,636.8 c 0,-81.7 66.3,-148 148,-148 37.6,0 72,14.1 98.1,37.2 -27.1,-30.6 -66.7,-49.9 -110.8,-49.9 -81.7,0 -148,66.3 -148,148 0,44.1 19.3,83.7 49.9,110.8 -23.1,-26.2 -37.2,-60.5 -37.2,-98.1 z"
|
||||
id="path44" /> <polyline
|
||||
class="st7"
|
||||
points="621.9,530.4 621.9,624 559,624 "
|
||||
id="polyline46" /> <g
|
||||
id="g64"> <line
|
||||
class="st8"
|
||||
x1="621.90002"
|
||||
y1="508.29999"
|
||||
x2="621.90002"
|
||||
y2="497.10001"
|
||||
id="line48" /> <line
|
||||
class="st8"
|
||||
x1="621.90002"
|
||||
y1="756.29999"
|
||||
x2="621.90002"
|
||||
y2="745.09998"
|
||||
id="line50" /> <line
|
||||
class="st8"
|
||||
x1="740.29999"
|
||||
y1="626.70001"
|
||||
x2="751.5"
|
||||
y2="626.70001"
|
||||
id="line52" /> <line
|
||||
class="st8"
|
||||
x1="492.29999"
|
||||
y1="626.70001"
|
||||
x2="503.5"
|
||||
y2="626.70001"
|
||||
id="line54" /> <line
|
||||
class="st8"
|
||||
x1="705.59998"
|
||||
y1="710.40002"
|
||||
x2="713.5"
|
||||
y2="718.29999"
|
||||
id="line56" /> <line
|
||||
class="st8"
|
||||
x1="530.29999"
|
||||
y1="535.09998"
|
||||
x2="538.20001"
|
||||
y2="543"
|
||||
id="line58" /> <line
|
||||
class="st8"
|
||||
x1="538.20001"
|
||||
y1="710.40002"
|
||||
x2="530.29999"
|
||||
y2="718.29999"
|
||||
id="line60" /> <line
|
||||
class="st8"
|
||||
x1="713.5"
|
||||
y1="535.09998"
|
||||
x2="705.59998"
|
||||
y2="543"
|
||||
id="line62" /> </g> <line
|
||||
class="st9"
|
||||
x1="604.40002"
|
||||
y1="606.29999"
|
||||
x2="684.5"
|
||||
y2="687.40002"
|
||||
id="line66" /> <circle
|
||||
class="st10"
|
||||
cx="621.90002"
|
||||
cy="624"
|
||||
r="16.1"
|
||||
id="circle68" /> </g> </g> </svg>
|
||||
|
After Width: | Height: | Size: 5.9 KiB |
@@ -6,7 +6,7 @@
|
||||
version="1.1"
|
||||
id="svg6"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:svg="http://www.w3.org/2000/svg">
|
||||
>
|
||||
<defs
|
||||
id="defs10" />
|
||||
<path
|
||||
|
||||
|
Before Width: | Height: | Size: 892 B After Width: | Height: | Size: 854 B |
@@ -1,5 +1,5 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<svg width="18" height="19.92" viewBox="0 0 18 19.92" xmlns="http://www.w3.org/2000/svg" xmlns:svg="http://www.w3.org/2000/svg">
|
||||
<svg width="18" height="19.92" viewBox="0 0 18 19.92" xmlns="http://www.w3.org/2000/svg" >
|
||||
<path d="M -3,-2 H 21 V 22 H -3 Z" fill="none" id="path2"/>
|
||||
<path d="m 15,14.08 c -0.76,0 -1.44,0.3 -1.96,0.77 L 5.91,10.7 C 5.96,10.47 6,10.24 6,10 6,9.76 5.96,9.53 5.91,9.3 L 12.96,5.19 C 13.5,5.69 14.21,6 15,6 16.66,6 18,4.66 18,3 18,1.34 16.66,0 15,0 c -1.66,0 -3,1.34 -3,3 0,0.24 0.04,0.47 0.09,0.7 L 5.04,7.81 C 4.5,7.31 3.79,7 3,7 1.34,7 0,8.34 0,10 c 0,1.66 1.34,3 3,3 0.79,0 1.5,-0.31 2.04,-0.81 l 7.12,4.16 c -0.05,0.21 -0.08,0.43 -0.08,0.65 0,1.61 1.31,2.92 2.92,2.92 1.61,0 2.92,-1.31 2.92,-2.92 0,-1.61 -1.31,-2.92 -2.92,-2.92 z" id="path4" style="fill:#0078e7;fill-opacity:1"/>
|
||||
</svg>
|
||||
|
||||
|
Before Width: | Height: | Size: 787 B After Width: | Height: | Size: 749 B |
44
changedetectionio/static/images/steps.svg
Normal file
@@ -0,0 +1,44 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<svg
|
||||
aria-hidden="true"
|
||||
viewBox="0 0 19.966091 17.999964"
|
||||
class="css-1oqmxjn"
|
||||
version="1.1"
|
||||
id="svg4"
|
||||
sodipodi:docname="steps.svg"
|
||||
width="19.966091"
|
||||
height="17.999964"
|
||||
inkscape:version="1.1.2 (0a00cf5339, 2022-02-04)"
|
||||
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:svg="http://www.w3.org/2000/svg">
|
||||
<defs
|
||||
id="defs8" />
|
||||
<sodipodi:namedview
|
||||
id="namedview6"
|
||||
pagecolor="#ffffff"
|
||||
bordercolor="#666666"
|
||||
borderopacity="1.0"
|
||||
inkscape:pageshadow="2"
|
||||
inkscape:pageopacity="0.0"
|
||||
inkscape:pagecheckerboard="0"
|
||||
showgrid="false"
|
||||
fit-margin-top="0"
|
||||
fit-margin-left="0"
|
||||
fit-margin-right="0"
|
||||
fit-margin-bottom="0"
|
||||
inkscape:zoom="8.6354167"
|
||||
inkscape:cx="-1.3896261"
|
||||
inkscape:cy="6.1375151"
|
||||
inkscape:window-width="1280"
|
||||
inkscape:window-height="667"
|
||||
inkscape:window-x="2419"
|
||||
inkscape:window-y="250"
|
||||
inkscape:window-maximized="0"
|
||||
inkscape:current-layer="svg4" />
|
||||
<path
|
||||
d="m 16.95807,12.000003 c -0.7076,0.0019 -1.3917,0.2538 -1.9316,0.7113 -0.5398,0.4575 -0.9005,1.091 -1.0184,1.7887 H 5.60804 c -0.80847,0.0297 -1.60693,-0.1865 -2.29,-0.62 -0.26632,-0.1847 -0.48375,-0.4315 -0.63356,-0.7189 -0.14982,-0.2874 -0.22753,-0.607 -0.22644,-0.9311 -0.02843,-0.3931 0.03646,-0.7873 0.1894,-1.1505 0.15293,-0.3632 0.38957,-0.6851 0.6906,-0.9395 0.66628,-0.4559004 1.4637,-0.6807004 2.27,-0.6400004 h 8.35003 c 0.8515,-0.0223 1.6727,-0.3206 2.34,-0.85 0.3971,-0.3622 0.7076,-0.8091 0.9084,-1.3077 0.2008,-0.49857 0.2868,-1.03596 0.2516,-1.57229 0.0113,-0.47161 -0.0887,-0.93924 -0.292,-1.36493 -0.2033,-0.4257 -0.5041,-0.79745 -0.878,-1.08507 -0.7801,-0.55815 -1.7212,-0.84609 -2.68,-0.82 H 5.95804 c -0.12537,-0.7417 -0.5248,-1.40924 -1.11913,-1.87032996 -0.59434,-0.46108 -1.3402,-0.68207 -2.08979,-0.61917 -0.74958,0.06291 -1.44818,0.40512 -1.95736,0.95881 C 0.28259,1.5230126 0,2.2477926 0,3.0000126 c 0,0.75222 0.28259,1.47699 0.79176,2.03068 0.50918,0.55369 1.20778,0.8959 1.95736,0.95881 0.74959,0.0629 1.49545,-0.15808 2.08979,-0.61917 0.59433,-0.46109 0.99376,-1.12863 1.11913,-1.87032 h 7.70003 c 0.7353,-0.03061 1.4599,0.18397 2.06,0.61 0.2548,0.19335 0.4595,0.445 0.597,0.73385 0.1375,0.28884 0.2036,0.60644 0.193,0.92615 0.0316,0.38842 -0.0247,0.77898 -0.165,1.14258 -0.1402,0.36361 -0.3607,0.69091 -0.645,0.95741 -0.5713,0.4398 -1.2799,0.663 -2,0.63 H 5.69804 c -1.03259,-0.0462 -2.05065,0.2568 -2.89,0.86 -0.43755,0.3361 -0.78838,0.7720004 -1.02322,1.2712004 -0.23484,0.4993 -0.34688,1.0474 -0.32678,1.5988 -0.00726,0.484 0.10591,0.9622 0.32934,1.3916 0.22344,0.4295 0.55012,0.7966 0.95066,1.0684 0.85039,0.5592 1.85274,0.8421 2.87,0.81 h 8.40003 c 0.0954,0.5643 0.3502,1.0896 0.7343,1.5138 0.3842,0.4242 0.8817,0.7297 1.4338,0.8803 0.5521,0.1507 1.1358,0.1403 1.6822,-0.0299 0.5464,-0.1702 1.0328,-0.4932 1.4016,-0.9308 0.3688,-0.4376 0.6048,-0.9716 0.6801,-1.5389 0.0752,-0.5673 -0.0134,-1.1444 -0.2554,-1.663 -0.242,-0.5186 -0.6273,-0.9572 -1.1104,-1.264 -0.4831,-0.3068 -1.0439,-0.469 -1.6162,-0.4675 z m 0,5 c -0.3956,0 -0.7823,-0.1173 -1.1112,-0.3371 -0.3289,-0.2197 -0.5852,-0.5321 -0.7366,-0.8975 -0.1514,-0.3655 -0.191,-0.7676 -0.1138,-1.1556 0.0772,-0.3879 0.2677,-0.7443 0.5474,-1.024 0.2797,-0.2797 0.636,-0.4702 1.024,-0.5474 0.388,-0.0771 0.7901,-0.0375 1.1555,0.1138 0.3655,0.1514 0.6778,0.4078 0.8976,0.7367 0.2198,0.3289 0.3371,0.7155 0.3371,1.1111 0,0.5304 -0.2107,1.0391 -0.5858,1.4142 -0.3751,0.3751 -0.8838,0.5858 -1.4142,0.5858 z"
|
||||
id="path2"
|
||||
style="fill:#777777;fill-opacity:1" />
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 3.7 KiB |
@@ -1,16 +1,7 @@
|
||||
$(document).ready(function () {
|
||||
|
||||
// duplicate
|
||||
var csrftoken = $('input[name=csrf_token]').val();
|
||||
$.ajaxSetup({
|
||||
beforeSend: function (xhr, settings) {
|
||||
if (!/^(GET|HEAD|OPTIONS|TRACE)$/i.test(settings.type) && !this.crossDomain) {
|
||||
xhr.setRequestHeader("X-CSRFToken", csrftoken)
|
||||
}
|
||||
}
|
||||
})
|
||||
var browsersteps_session_id;
|
||||
var browserless_seconds_remaining = 0;
|
||||
var browser_interface_seconds_remaining = 0;
|
||||
var apply_buttons_disabled = false;
|
||||
var include_text_elements = $("#include_text_elements");
|
||||
var xpath_data = false;
|
||||
@@ -26,7 +17,8 @@ $(document).ready(function () {
|
||||
set_scale();
|
||||
});
|
||||
// Should always be disabled
|
||||
$('#browser_steps >li:first-child select').val('Goto site').attr('disabled', 'disabled');
|
||||
$('#browser_steps-0-operation option[value="Goto site"]').prop("selected", "selected");
|
||||
$('#browser_steps-0-operation').attr('disabled', 'disabled');
|
||||
|
||||
$('#browsersteps-click-start').click(function () {
|
||||
$("#browsersteps-click-start").fadeOut();
|
||||
@@ -49,7 +41,7 @@ $(document).ready(function () {
|
||||
$('#browsersteps-img').removeAttr('src');
|
||||
$("#browsersteps-click-start").show();
|
||||
$("#browsersteps-selector-wrapper .spinner").hide();
|
||||
browserless_seconds_remaining = 0;
|
||||
browser_interface_seconds_remaining = 0;
|
||||
browsersteps_session_id = false;
|
||||
apply_buttons_disabled = false;
|
||||
ctx.clearRect(0, 0, c.width, c.height);
|
||||
@@ -61,12 +53,12 @@ $(document).ready(function () {
|
||||
$('#browser_steps >li:first-child').css('opacity', '0.5');
|
||||
}
|
||||
|
||||
// Show seconds remaining until playwright/browserless needs to restart the session
|
||||
// Show seconds remaining until the browser interface needs to restart the session
|
||||
// (See comment at the top of changedetectionio/blueprint/browser_steps/__init__.py )
|
||||
setInterval(() => {
|
||||
if (browserless_seconds_remaining >= 1) {
|
||||
document.getElementById('browserless-seconds-remaining').innerText = browserless_seconds_remaining + " seconds remaining in session";
|
||||
browserless_seconds_remaining -= 1;
|
||||
if (browser_interface_seconds_remaining >= 1) {
|
||||
document.getElementById('browser-seconds-remaining').innerText = browser_interface_seconds_remaining + " seconds remaining in session";
|
||||
browser_interface_seconds_remaining -= 1;
|
||||
}
|
||||
}, "1000")
|
||||
|
||||
@@ -160,6 +152,12 @@ $(document).ready(function () {
|
||||
e.offsetX > item.left * y_scale && e.offsetX < item.left * y_scale + item.width * y_scale
|
||||
|
||||
) {
|
||||
// Ignore really large ones, because we are scraping 'div' also from xpath_element_scraper but
|
||||
// that div or whatever could be some wrapper and would generally make you select the whole page
|
||||
if (item.width > 800 && item.height > 400) {
|
||||
return
|
||||
}
|
||||
|
||||
// There could be many elements here, record them all and then we'll find out which is the most 'useful'
|
||||
// (input, textarea, button, A etc)
|
||||
if (item.width < xpath_data['browser_width']) {
|
||||
@@ -223,7 +221,7 @@ $(document).ready(function () {
|
||||
// If you switch to "Click X,y" after an element here is setup, it will give the last co-ords anyway
|
||||
//if (x['isClickable'] || x['tagName'].startsWith('h') || x['tagName'] === 'a' || x['tagName'] === 'button' || x['tagtype'] === 'submit' || x['tagtype'] === 'checkbox' || x['tagtype'] === 'radio' || x['tagtype'] === 'li') {
|
||||
$('select', first_available).val('Click element').change();
|
||||
$('input[type=text]', first_available).first().val(x['xpath']);
|
||||
$('input[type=text]', first_available).first().val(x['xpath']).focus();
|
||||
found_something = true;
|
||||
//}
|
||||
}
|
||||
@@ -261,7 +259,7 @@ $(document).ready(function () {
|
||||
// This should trigger 'Goto site'
|
||||
console.log("Got startup response, requesting Goto-Site (first) step fake click");
|
||||
$('#browser_steps >li:first-child .apply').click();
|
||||
browserless_seconds_remaining = 500;
|
||||
browser_interface_seconds_remaining = 500;
|
||||
set_first_gotosite_disabled();
|
||||
}).fail(function (data) {
|
||||
console.log(data);
|
||||
@@ -307,7 +305,7 @@ $(document).ready(function () {
|
||||
|
||||
if ($(this).val() === 'Click X,Y' && last_click_xy['x'] > 0 && $(elem_value).val().length === 0) {
|
||||
// @todo handle scale
|
||||
$(elem_value).val(last_click_xy['x'] + ',' + last_click_xy['y']);
|
||||
$(elem_value).val(last_click_xy['x'] + ',' + last_click_xy['y']).focus();
|
||||
}
|
||||
}).change();
|
||||
|
||||
@@ -321,8 +319,14 @@ $(document).ready(function () {
|
||||
var s = '<div class="control">' + '<a data-step-index=' + i + ' class="pure-button button-secondary button-green button-xsmall apply" >Apply</a> ';
|
||||
if (i > 0) {
|
||||
// The first step never gets these (Goto-site)
|
||||
s += '<a data-step-index=' + i + ' class="pure-button button-secondary button-xsmall clear" >Clear</a> ' +
|
||||
'<a data-step-index=' + i + ' class="pure-button button-secondary button-red button-xsmall remove" >Remove</a>';
|
||||
s += `<a data-step-index="${i}" class="pure-button button-secondary button-xsmall clear" >Clear</a> ` +
|
||||
`<a data-step-index="${i}" class="pure-button button-secondary button-red button-xsmall remove" >Remove</a>`;
|
||||
|
||||
// if a screenshot is available
|
||||
if (browser_steps_available_screenshots.includes(i.toString())) {
|
||||
var d = (browser_steps_last_error_step === i+1) ? 'before' : 'after';
|
||||
s += ` <a data-step-index="${i}" class="pure-button button-secondary button-xsmall show-screenshot" title="Show screenshot from last run" data-type="${d}">Pic</a> `;
|
||||
}
|
||||
}
|
||||
s += '</div>';
|
||||
$(this).append(s)
|
||||
@@ -437,6 +441,24 @@ $(document).ready(function () {
|
||||
|
||||
});
|
||||
|
||||
$('ul#browser_steps li .control .show-screenshot').click(function (element) {
|
||||
var step_n = $(event.currentTarget).data('step-index');
|
||||
w = window.open(this.href, "_blank", "width=640,height=480");
|
||||
const t = $(event.currentTarget).data('type');
|
||||
|
||||
const url = browser_steps_fetch_screenshot_image_url + `&step_n=${step_n}&type=${t}`;
|
||||
w.document.body.innerHTML = `<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<body>
|
||||
<img src="${url}" style="width: 100%" alt="Browser Step at step ${step_n} from last run." title="Browser Step at step ${step_n} from last run."/>
|
||||
</body>
|
||||
</html>`;
|
||||
w.document.title = `Browser Step at step ${step_n} from last run.`;
|
||||
});
|
||||
|
||||
if (browser_steps_last_error_step) {
|
||||
$("ul#browser_steps>li:nth-child("+browser_steps_last_error_step+")").addClass("browser-step-with-error");
|
||||
}
|
||||
|
||||
$("ul#browser_steps select").change(function () {
|
||||
set_greyed_state();
|
||||
|
||||
10
changedetectionio/static/js/csrf.js
Normal file
@@ -0,0 +1,10 @@
|
||||
$(document).ready(function () {
|
||||
$.ajaxSetup({
|
||||
beforeSend: function (xhr, settings) {
|
||||
if (!/^(GET|HEAD|OPTIONS|TRACE)$/i.test(settings.type) && !this.crossDomain) {
|
||||
xhr.setRequestHeader("X-CSRFToken", csrftoken)
|
||||
}
|
||||
}
|
||||
})
|
||||
});
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
$(document).ready(function () {
|
||||
var csrftoken = $('input[name=csrf_token]').val();
|
||||
$.ajaxSetup({
|
||||
beforeSend: function (xhr, settings) {
|
||||
if (!/^(GET|HEAD|OPTIONS|TRACE)$/i.test(settings.type) && !this.crossDomain) {
|
||||
xhr.setRequestHeader("X-CSRFToken", csrftoken)
|
||||
}
|
||||
$('.needs-localtime').each(function () {
|
||||
for (var option of this.options) {
|
||||
var dateObject = new Date(option.value * 1000);
|
||||
option.label = dateObject.toLocaleString(undefined, {dateStyle: "full", timeStyle: "medium"});
|
||||
}
|
||||
})
|
||||
});
|
||||
|
||||
// Load it when the #screenshot tab is in use, so we dont give a slow experience when waiting for the text diff to load
|
||||
window.addEventListener('hashchange', function (e) {
|
||||
@@ -41,6 +39,12 @@ $(document).ready(function () {
|
||||
$("#highlightSnippet").remove();
|
||||
}
|
||||
|
||||
// Listen for Escape key press
|
||||
window.addEventListener('keydown', function (e) {
|
||||
if (e.key === 'Escape') {
|
||||
clean();
|
||||
}
|
||||
}, false);
|
||||
|
||||
function dragTextHandler(event) {
|
||||
console.log('mouseupped');
|
||||
@@ -90,5 +94,10 @@ $(document).ready(function () {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
$('#diff-form').on('submit', function (e) {
|
||||
if ($('select[name=from_version]').val() === $('select[name=to_version]').val()) {
|
||||
e.preventDefault();
|
||||
alert('Error - You are trying to compare the same version.');
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,110 +1,115 @@
|
||||
var a = document.getElementById("a");
|
||||
var b = document.getElementById("b");
|
||||
var result = document.getElementById("result");
|
||||
$(document).ready(function () {
|
||||
var a = document.getElementById("a");
|
||||
var b = document.getElementById("b");
|
||||
var result = document.getElementById("result");
|
||||
var inputs;
|
||||
|
||||
function changed() {
|
||||
// https://github.com/kpdecker/jsdiff/issues/389
|
||||
// I would love to use `{ignoreWhitespace: true}` here but it breaks the formatting
|
||||
options = {
|
||||
ignoreWhitespace: document.getElementById("ignoreWhitespace").checked,
|
||||
};
|
||||
$('#jump-next-diff').click(function () {
|
||||
|
||||
var diff = Diff[window.diffType](a.textContent, b.textContent, options);
|
||||
var fragment = document.createDocumentFragment();
|
||||
for (var i = 0; i < diff.length; i++) {
|
||||
if (diff[i].added && diff[i + 1] && diff[i + 1].removed) {
|
||||
var swap = diff[i];
|
||||
diff[i] = diff[i + 1];
|
||||
diff[i + 1] = swap;
|
||||
var element = inputs[inputs.current];
|
||||
var headerOffset = 80;
|
||||
var elementPosition = element.getBoundingClientRect().top;
|
||||
var offsetPosition = elementPosition - headerOffset + window.scrollY;
|
||||
|
||||
window.scrollTo({
|
||||
top: offsetPosition,
|
||||
behavior: "smooth",
|
||||
});
|
||||
|
||||
inputs.current++;
|
||||
if (inputs.current >= inputs.length) {
|
||||
inputs.current = 0;
|
||||
}
|
||||
});
|
||||
|
||||
function changed() {
|
||||
// https://github.com/kpdecker/jsdiff/issues/389
|
||||
// I would love to use `{ignoreWhitespace: true}` here but it breaks the formatting
|
||||
options = {
|
||||
ignoreWhitespace: document.getElementById("ignoreWhitespace").checked,
|
||||
};
|
||||
|
||||
var diff = Diff[window.diffType](a.textContent, b.textContent, options);
|
||||
var fragment = document.createDocumentFragment();
|
||||
for (var i = 0; i < diff.length; i++) {
|
||||
if (diff[i].added && diff[i + 1] && diff[i + 1].removed) {
|
||||
var swap = diff[i];
|
||||
diff[i] = diff[i + 1];
|
||||
diff[i + 1] = swap;
|
||||
}
|
||||
|
||||
var node;
|
||||
if (diff[i].removed) {
|
||||
node = document.createElement("del");
|
||||
node.classList.add("change");
|
||||
const wrapper = node.appendChild(document.createElement("span"));
|
||||
wrapper.appendChild(document.createTextNode(diff[i].value));
|
||||
} else if (diff[i].added) {
|
||||
node = document.createElement("ins");
|
||||
node.classList.add("change");
|
||||
const wrapper = node.appendChild(document.createElement("span"));
|
||||
wrapper.appendChild(document.createTextNode(diff[i].value));
|
||||
} else {
|
||||
node = document.createTextNode(diff[i].value);
|
||||
}
|
||||
fragment.appendChild(node);
|
||||
}
|
||||
|
||||
result.textContent = "";
|
||||
result.appendChild(fragment);
|
||||
|
||||
// For nice mouse-over hover/title information
|
||||
const removed_current_option = $('#diff-version option:selected')
|
||||
if (removed_current_option) {
|
||||
$('del').each(function () {
|
||||
$(this).prop('title', 'Removed '+removed_current_option[0].label);
|
||||
});
|
||||
}
|
||||
const inserted_current_option = $('#current-version option:selected')
|
||||
if (removed_current_option) {
|
||||
$('ins').each(function () {
|
||||
$(this).prop('title', 'Inserted '+inserted_current_option[0].label);
|
||||
});
|
||||
}
|
||||
// Set the list of possible differences to jump to
|
||||
inputs = document.querySelectorAll('#diff-ui .change')
|
||||
// Set the "current" diff pointer
|
||||
inputs.current = 0;
|
||||
// Goto diff
|
||||
$('#jump-next-diff').click();
|
||||
}
|
||||
|
||||
var node;
|
||||
if (diff[i].removed) {
|
||||
node = document.createElement("del");
|
||||
node.classList.add("change");
|
||||
const wrapper = node.appendChild(document.createElement("span"));
|
||||
wrapper.appendChild(document.createTextNode(diff[i].value));
|
||||
} else if (diff[i].added) {
|
||||
node = document.createElement("ins");
|
||||
node.classList.add("change");
|
||||
const wrapper = node.appendChild(document.createElement("span"));
|
||||
wrapper.appendChild(document.createTextNode(diff[i].value));
|
||||
} else {
|
||||
node = document.createTextNode(diff[i].value);
|
||||
}
|
||||
fragment.appendChild(node);
|
||||
}
|
||||
|
||||
result.textContent = "";
|
||||
result.appendChild(fragment);
|
||||
|
||||
// Jump at start
|
||||
inputs.current = 0;
|
||||
next_diff();
|
||||
}
|
||||
|
||||
window.onload = function () {
|
||||
/* Convert what is options from UTC time.time() to local browser time */
|
||||
var diffList = document.getElementById("diff-version");
|
||||
if (typeof diffList != "undefined" && diffList != null) {
|
||||
for (var option of diffList.options) {
|
||||
var dateObject = new Date(option.value * 1000);
|
||||
option.label = dateObject.toLocaleString();
|
||||
}
|
||||
}
|
||||
|
||||
/* Set current version date as local time in the browser also */
|
||||
var current_v = document.getElementById("current-v-date");
|
||||
var dateObject = new Date(newest_version_timestamp * 1000);
|
||||
current_v.innerHTML = dateObject.toLocaleString();
|
||||
onDiffTypeChange(
|
||||
document.querySelector('#settings [name="diff_type"]:checked'),
|
||||
);
|
||||
changed();
|
||||
};
|
||||
|
||||
a.onpaste = a.onchange = b.onpaste = b.onchange = changed;
|
||||
|
||||
if ("oninput" in a) {
|
||||
a.oninput = b.oninput = changed;
|
||||
} else {
|
||||
a.onkeyup = b.onkeyup = changed;
|
||||
}
|
||||
|
||||
function onDiffTypeChange(radio) {
|
||||
window.diffType = radio.value;
|
||||
// Not necessary
|
||||
// document.title = "Diff " + radio.value.slice(4);
|
||||
}
|
||||
|
||||
var radio = document.getElementsByName("diff_type");
|
||||
for (var i = 0; i < radio.length; i++) {
|
||||
radio[i].onchange = function (e) {
|
||||
onDiffTypeChange(e.target);
|
||||
onDiffTypeChange(
|
||||
document.querySelector('#settings [name="diff_type"]:checked'),
|
||||
);
|
||||
changed();
|
||||
};
|
||||
}
|
||||
|
||||
document.getElementById("ignoreWhitespace").onchange = function (e) {
|
||||
changed();
|
||||
};
|
||||
a.onpaste = a.onchange = b.onpaste = b.onchange = changed;
|
||||
|
||||
var inputs = document.getElementsByClassName("change");
|
||||
inputs.current = 0;
|
||||
if ("oninput" in a) {
|
||||
a.oninput = b.oninput = changed;
|
||||
} else {
|
||||
a.onkeyup = b.onkeyup = changed;
|
||||
}
|
||||
|
||||
function next_diff() {
|
||||
var element = inputs[inputs.current];
|
||||
var headerOffset = 80;
|
||||
var elementPosition = element.getBoundingClientRect().top;
|
||||
var offsetPosition = elementPosition - headerOffset + window.scrollY;
|
||||
function onDiffTypeChange(radio) {
|
||||
window.diffType = radio.value;
|
||||
// Not necessary
|
||||
// document.title = "Diff " + radio.value.slice(4);
|
||||
}
|
||||
|
||||
window.scrollTo({
|
||||
top: offsetPosition,
|
||||
behavior: "smooth",
|
||||
});
|
||||
var radio = document.getElementsByName("diff_type");
|
||||
for (var i = 0; i < radio.length; i++) {
|
||||
radio[i].onchange = function (e) {
|
||||
onDiffTypeChange(e.target);
|
||||
changed();
|
||||
};
|
||||
}
|
||||
|
||||
document.getElementById("ignoreWhitespace").onchange = function (e) {
|
||||
changed();
|
||||
};
|
||||
|
||||
});
|
||||
|
||||
inputs.current++;
|
||||
if (inputs.current >= inputs.length) {
|
||||
inputs.current = 0;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,19 +1,4 @@
|
||||
$(document).ready(function () {
|
||||
function toggle() {
|
||||
if ($('input[name="application-fetch_backend"]:checked').val() != 'html_requests') {
|
||||
$('#requests-override-options').hide();
|
||||
$('#webdriver-override-options').show();
|
||||
} else {
|
||||
$('#requests-override-options').show();
|
||||
$('#webdriver-override-options').hide();
|
||||
}
|
||||
}
|
||||
|
||||
$('input[name="application-fetch_backend"]').click(function (e) {
|
||||
toggle();
|
||||
});
|
||||
toggle();
|
||||
|
||||
$("#api-key").hover(
|
||||
function () {
|
||||
$("#api-key-copy").html('copy').fadeIn();
|
||||
@@ -33,9 +18,25 @@ $(document).ready(function () {
|
||||
|
||||
});
|
||||
|
||||
$("#notification-token-toggle").click(function (e) {
|
||||
$(".toggle-show").click(function (e) {
|
||||
e.preventDefault();
|
||||
$('#notification-tokens-info').toggle();
|
||||
let target = $(this).data('target');
|
||||
$(target).toggle();
|
||||
});
|
||||
|
||||
// Time zone config related
|
||||
$(".local-time").each(function (e) {
|
||||
$(this).text(new Date($(this).data("utc")).toLocaleString());
|
||||
})
|
||||
|
||||
const timezoneInput = $('#application-timezone');
|
||||
if(timezoneInput.length) {
|
||||
const timezone = Intl.DateTimeFormat().resolvedOptions().timeZone;
|
||||
if (!timezoneInput.val().trim()) {
|
||||
timezoneInput.val(timezone);
|
||||
timezoneInput.after('<div class="timezone-message">The timezone was set from your browser, <strong>be sure to press save!</strong></div>');
|
||||
}
|
||||
}
|
||||
|
||||
});
|
||||
|
||||
|
||||
@@ -1,56 +0,0 @@
|
||||
/**
|
||||
* debounce
|
||||
* @param {integer} milliseconds This param indicates the number of milliseconds
|
||||
* to wait after the last call before calling the original function.
|
||||
* @param {object} What "this" refers to in the returned function.
|
||||
* @return {function} This returns a function that when called will wait the
|
||||
* indicated number of milliseconds after the last call before
|
||||
* calling the original function.
|
||||
*/
|
||||
Function.prototype.debounce = function (milliseconds, context) {
|
||||
var baseFunction = this,
|
||||
timer = null,
|
||||
wait = milliseconds;
|
||||
|
||||
return function () {
|
||||
var self = context || this,
|
||||
args = arguments;
|
||||
|
||||
function complete() {
|
||||
baseFunction.apply(self, args);
|
||||
timer = null;
|
||||
}
|
||||
|
||||
if (timer) {
|
||||
clearTimeout(timer);
|
||||
}
|
||||
|
||||
timer = setTimeout(complete, wait);
|
||||
};
|
||||
};
|
||||
|
||||
/**
|
||||
* throttle
|
||||
* @param {integer} milliseconds This param indicates the number of milliseconds
|
||||
* to wait between calls before calling the original function.
|
||||
* @param {object} What "this" refers to in the returned function.
|
||||
* @return {function} This returns a function that when called will wait the
|
||||
* indicated number of milliseconds between calls before
|
||||
* calling the original function.
|
||||
*/
|
||||
Function.prototype.throttle = function (milliseconds, context) {
|
||||
var baseFunction = this,
|
||||
lastEventTimestamp = null,
|
||||
limit = milliseconds;
|
||||
|
||||
return function () {
|
||||
var self = context || this,
|
||||
args = arguments,
|
||||
now = Date.now();
|
||||
|
||||
if (!lastEventTimestamp || now - lastEventTimestamp >= limit) {
|
||||
lastEventTimestamp = now;
|
||||
baseFunction.apply(self, args);
|
||||
}
|
||||
};
|
||||
};
|
||||
@@ -1,56 +1,52 @@
|
||||
$(document).ready(function() {
|
||||
$(document).ready(function () {
|
||||
|
||||
$('#add-email-helper').click(function (e) {
|
||||
e.preventDefault();
|
||||
email = prompt("Destination email");
|
||||
if(email) {
|
||||
var n = $(".notification-urls");
|
||||
var p=email_notification_prefix;
|
||||
$(n).val( $.trim( $(n).val() )+"\n"+email_notification_prefix+email );
|
||||
}
|
||||
});
|
||||
$('#add-email-helper').click(function (e) {
|
||||
e.preventDefault();
|
||||
email = prompt("Destination email");
|
||||
if (email) {
|
||||
var n = $(".notification-urls");
|
||||
var p = email_notification_prefix;
|
||||
$(n).val($.trim($(n).val()) + "\n" + email_notification_prefix + email);
|
||||
}
|
||||
});
|
||||
|
||||
$('#send-test-notification').click(function (e) {
|
||||
e.preventDefault();
|
||||
$('#send-test-notification').click(function (e) {
|
||||
e.preventDefault();
|
||||
|
||||
// this can be global
|
||||
var csrftoken = $('input[name=csrf_token]').val();
|
||||
$.ajaxSetup({
|
||||
beforeSend: function(xhr, settings) {
|
||||
if (!/^(GET|HEAD|OPTIONS|TRACE)$/i.test(settings.type) && !this.crossDomain) {
|
||||
xhr.setRequestHeader("X-CSRFToken", csrftoken)
|
||||
data = {
|
||||
notification_body: $('#notification_body').val(),
|
||||
notification_format: $('#notification_format').val(),
|
||||
notification_title: $('#notification_title').val(),
|
||||
notification_urls: $('.notification-urls').val(),
|
||||
tags: $('#tags').val(),
|
||||
window_url: window.location.href,
|
||||
}
|
||||
|
||||
$('.notifications-wrapper .spinner').fadeIn();
|
||||
$('#notification-test-log').show();
|
||||
$.ajax({
|
||||
type: "POST",
|
||||
url: notification_base_url,
|
||||
data: data,
|
||||
statusCode: {
|
||||
400: function (data) {
|
||||
$("#notification-test-log>span").text(data.responseText);
|
||||
},
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
data = {
|
||||
window_url : window.location.href,
|
||||
notification_urls : $('.notification-urls').val(),
|
||||
}
|
||||
for (key in data) {
|
||||
if (!data[key].length) {
|
||||
alert(key+" is empty, cannot send test.")
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
$.ajax({
|
||||
type: "POST",
|
||||
url: notification_base_url,
|
||||
data : data,
|
||||
statusCode: {
|
||||
400: function() {
|
||||
// More than likely the CSRF token was lost when the server restarted
|
||||
alert("There was a problem processing the request, please reload the page.");
|
||||
}
|
||||
}
|
||||
}).done(function(data){
|
||||
console.log(data);
|
||||
alert('Sent');
|
||||
}).fail(function(data){
|
||||
console.log(data);
|
||||
alert('There was an error communicating with the server.');
|
||||
})
|
||||
});
|
||||
}).done(function (data) {
|
||||
$("#notification-test-log>span").text(data);
|
||||
}).fail(function (jqXHR, textStatus, errorThrown) {
|
||||
// Handle connection refused or other errors
|
||||
if (textStatus === "error" && errorThrown === "") {
|
||||
console.error("Connection refused or server unreachable");
|
||||
$("#notification-test-log>span").text("Error: Connection refused or server is unreachable.");
|
||||
} else {
|
||||
console.error("Error:", textStatus, errorThrown);
|
||||
$("#notification-test-log>span").text("An error occurred: " + textStatus);
|
||||
}
|
||||
}).always(function () {
|
||||
$('.notifications-wrapper .spinner').hide();
|
||||
})
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
196
changedetectionio/static/js/plugins.js
Normal file
@@ -0,0 +1,196 @@
|
||||
(function ($) {
|
||||
/**
|
||||
* debounce
|
||||
* @param {integer} milliseconds This param indicates the number of milliseconds
|
||||
* to wait after the last call before calling the original function.
|
||||
* @param {object} What "this" refers to in the returned function.
|
||||
* @return {function} This returns a function that when called will wait the
|
||||
* indicated number of milliseconds after the last call before
|
||||
* calling the original function.
|
||||
*/
|
||||
Function.prototype.debounce = function (milliseconds, context) {
|
||||
var baseFunction = this,
|
||||
timer = null,
|
||||
wait = milliseconds;
|
||||
|
||||
return function () {
|
||||
var self = context || this,
|
||||
args = arguments;
|
||||
|
||||
function complete() {
|
||||
baseFunction.apply(self, args);
|
||||
timer = null;
|
||||
}
|
||||
|
||||
if (timer) {
|
||||
clearTimeout(timer);
|
||||
}
|
||||
|
||||
timer = setTimeout(complete, wait);
|
||||
};
|
||||
};
|
||||
|
||||
/**
|
||||
* throttle
|
||||
* @param {integer} milliseconds This param indicates the number of milliseconds
|
||||
* to wait between calls before calling the original function.
|
||||
* @param {object} What "this" refers to in the returned function.
|
||||
* @return {function} This returns a function that when called will wait the
|
||||
* indicated number of milliseconds between calls before
|
||||
* calling the original function.
|
||||
*/
|
||||
Function.prototype.throttle = function (milliseconds, context) {
|
||||
var baseFunction = this,
|
||||
lastEventTimestamp = null,
|
||||
limit = milliseconds;
|
||||
|
||||
return function () {
|
||||
var self = context || this,
|
||||
args = arguments,
|
||||
now = Date.now();
|
||||
|
||||
if (!lastEventTimestamp || now - lastEventTimestamp >= limit) {
|
||||
lastEventTimestamp = now;
|
||||
baseFunction.apply(self, args);
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
$.fn.highlightLines = function (configurations) {
|
||||
return this.each(function () {
|
||||
const $pre = $(this);
|
||||
const textContent = $pre.text();
|
||||
const lines = textContent.split(/\r?\n/); // Handles both \n and \r\n line endings
|
||||
|
||||
// Build a map of line numbers to styles
|
||||
const lineStyles = {};
|
||||
|
||||
configurations.forEach(config => {
|
||||
const {color, lines: lineNumbers} = config;
|
||||
lineNumbers.forEach(lineNumber => {
|
||||
lineStyles[lineNumber] = color;
|
||||
});
|
||||
});
|
||||
|
||||
// Function to escape HTML characters
|
||||
function escapeHtml(text) {
|
||||
return text.replace(/[&<>"'`=\/]/g, function (s) {
|
||||
return "&#" + s.charCodeAt(0) + ";";
|
||||
});
|
||||
}
|
||||
|
||||
// Process each line
|
||||
const processedLines = lines.map((line, index) => {
|
||||
const lineNumber = index + 1; // Line numbers start at 1
|
||||
const escapedLine = escapeHtml(line);
|
||||
const color = lineStyles[lineNumber];
|
||||
|
||||
if (color) {
|
||||
// Wrap the line in a span with inline style
|
||||
return `<span style="background-color: ${color}">${escapedLine}</span>`;
|
||||
} else {
|
||||
return escapedLine;
|
||||
}
|
||||
});
|
||||
|
||||
// Join the lines back together
|
||||
const newContent = processedLines.join('\n');
|
||||
|
||||
// Set the new content as HTML
|
||||
$pre.html(newContent);
|
||||
});
|
||||
};
|
||||
$.fn.miniTabs = function (tabsConfig, options) {
|
||||
const settings = {
|
||||
tabClass: 'minitab',
|
||||
tabsContainerClass: 'minitabs',
|
||||
activeClass: 'active',
|
||||
...(options || {})
|
||||
};
|
||||
|
||||
return this.each(function () {
|
||||
const $wrapper = $(this);
|
||||
const $contents = $wrapper.find('div[id]').hide();
|
||||
const $tabsContainer = $('<div>', {class: settings.tabsContainerClass}).prependTo($wrapper);
|
||||
|
||||
// Generate tabs
|
||||
Object.entries(tabsConfig).forEach(([tabTitle, contentSelector], index) => {
|
||||
const $content = $wrapper.find(contentSelector);
|
||||
if (index === 0) $content.show(); // Show first content by default
|
||||
|
||||
$('<a>', {
|
||||
class: `${settings.tabClass}${index === 0 ? ` ${settings.activeClass}` : ''}`,
|
||||
text: tabTitle,
|
||||
'data-target': contentSelector
|
||||
}).appendTo($tabsContainer);
|
||||
});
|
||||
|
||||
// Tab click event
|
||||
$tabsContainer.on('click', `.${settings.tabClass}`, function (e) {
|
||||
e.preventDefault();
|
||||
const $tab = $(this);
|
||||
const target = $tab.data('target');
|
||||
|
||||
// Update active tab
|
||||
$tabsContainer.find(`.${settings.tabClass}`).removeClass(settings.activeClass);
|
||||
$tab.addClass(settings.activeClass);
|
||||
|
||||
// Show/hide content
|
||||
$contents.hide();
|
||||
$wrapper.find(target).show();
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
// Object to store ongoing requests by namespace
|
||||
const requests = {};
|
||||
|
||||
$.abortiveSingularAjax = function (options) {
|
||||
const namespace = options.namespace || 'default';
|
||||
|
||||
// Abort the current request in this namespace if it's still ongoing
|
||||
if (requests[namespace]) {
|
||||
requests[namespace].abort();
|
||||
}
|
||||
|
||||
// Start a new AJAX request and store its reference in the correct namespace
|
||||
requests[namespace] = $.ajax(options);
|
||||
|
||||
// Return the current request in case it's needed
|
||||
return requests[namespace];
|
||||
};
|
||||
})(jQuery);
|
||||
|
||||
|
||||
|
||||
function toggleOpacity(checkboxSelector, fieldSelector, inverted) {
|
||||
const checkbox = document.querySelector(checkboxSelector);
|
||||
const fields = document.querySelectorAll(fieldSelector);
|
||||
|
||||
function updateOpacity() {
|
||||
const opacityValue = !checkbox.checked ? (inverted ? 0.6 : 1) : (inverted ? 1 : 0.6);
|
||||
fields.forEach(field => {
|
||||
field.style.opacity = opacityValue;
|
||||
});
|
||||
}
|
||||
|
||||
// Initial setup
|
||||
updateOpacity();
|
||||
checkbox.addEventListener('change', updateOpacity);
|
||||
}
|
||||
|
||||
function toggleVisibility(checkboxSelector, fieldSelector, inverted) {
|
||||
const checkbox = document.querySelector(checkboxSelector);
|
||||
const fields = document.querySelectorAll(fieldSelector);
|
||||
|
||||
function updateOpacity() {
|
||||
const opacityValue = !checkbox.checked ? (inverted ? 'none' : 'block') : (inverted ? 'block' : 'none');
|
||||
fields.forEach(field => {
|
||||
field.style.display = opacityValue;
|
||||
});
|
||||
}
|
||||
|
||||
// Initial setup
|
||||
updateOpacity();
|
||||
checkbox.addEventListener('change', updateOpacity);
|
||||
}
|
||||
63
changedetectionio/static/js/preview.js
Normal file
@@ -0,0 +1,63 @@
|
||||
function redirectToVersion(version) {
|
||||
var currentUrl = window.location.href.split('?')[0]; // Base URL without query parameters
|
||||
var anchor = '';
|
||||
|
||||
// Check if there is an anchor
|
||||
if (currentUrl.indexOf('#') !== -1) {
|
||||
anchor = currentUrl.substring(currentUrl.indexOf('#'));
|
||||
currentUrl = currentUrl.substring(0, currentUrl.indexOf('#'));
|
||||
}
|
||||
|
||||
window.location.href = currentUrl + '?version=' + version + anchor;
|
||||
}
|
||||
|
||||
function setupDateWidget() {
|
||||
$(document).on('keydown', function (event) {
|
||||
var $selectElement = $('#preview-version');
|
||||
var $selectedOption = $selectElement.find('option:selected');
|
||||
|
||||
if ($selectedOption.length) {
|
||||
if (event.key === 'ArrowLeft' && $selectedOption.prev().length) {
|
||||
redirectToVersion($selectedOption.prev().val());
|
||||
} else if (event.key === 'ArrowRight' && $selectedOption.next().length) {
|
||||
redirectToVersion($selectedOption.next().val());
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
$('#preview-version').on('change', function () {
|
||||
redirectToVersion($(this).val());
|
||||
});
|
||||
|
||||
var $selectedOption = $('#preview-version option:selected');
|
||||
|
||||
if ($selectedOption.length) {
|
||||
var $prevOption = $selectedOption.prev();
|
||||
var $nextOption = $selectedOption.next();
|
||||
|
||||
if ($prevOption.length) {
|
||||
$('#btn-previous').attr('href', '?version=' + $prevOption.val());
|
||||
} else {
|
||||
$('#btn-previous').remove();
|
||||
}
|
||||
|
||||
if ($nextOption.length) {
|
||||
$('#btn-next').attr('href', '?version=' + $nextOption.val());
|
||||
} else {
|
||||
$('#btn-next').remove();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
$(document).ready(function () {
|
||||
if ($('#preview-version').length) {
|
||||
setupDateWidget();
|
||||
}
|
||||
|
||||
$('#diff-col > pre').highlightLines([
|
||||
{
|
||||
'color': '#ee0000',
|
||||
'lines': triggered_line_numbers
|
||||
}
|
||||
]);
|
||||
});
|
||||
@@ -1,14 +1,14 @@
|
||||
$(function () {
|
||||
/* add container before each proxy location to show status */
|
||||
|
||||
var option_li = $('.fetch-backend-proxy li').filter(function() {
|
||||
return $("input",this)[0].value.length >0;
|
||||
});
|
||||
|
||||
//var option_li = $('.fetch-backend-proxy li');
|
||||
var isActive = false;
|
||||
$(option_li).prepend('<div class="proxy-status"></div>');
|
||||
$(option_li).append('<div class="proxy-timing"></div><div class="proxy-check-details"></div>');
|
||||
|
||||
function setup_html_widget() {
|
||||
var option_li = $('.fetch-backend-proxy li').filter(function () {
|
||||
return $("input", this)[0].value.length > 0;
|
||||
});
|
||||
$(option_li).prepend('<div class="proxy-status"></div>');
|
||||
$(option_li).append('<div class="proxy-timing"></div><div class="proxy-check-details"></div>');
|
||||
}
|
||||
|
||||
function set_proxy_check_status(proxy_key, state) {
|
||||
// select input by value name
|
||||
@@ -59,8 +59,14 @@ $(function () {
|
||||
}
|
||||
|
||||
$('#check-all-proxies').click(function (e) {
|
||||
|
||||
e.preventDefault()
|
||||
$('body').addClass('proxy-check-active');
|
||||
|
||||
if (!$('body').hasClass('proxy-check-active')) {
|
||||
setup_html_widget();
|
||||
$('body').addClass('proxy-check-active');
|
||||
}
|
||||
|
||||
$('.proxy-check-details').html('');
|
||||
$('.proxy-status').html('<span class="spinner"></span>').fadeIn();
|
||||
$('.proxy-timing').html('');
|
||||
|
||||