Compare commits

..

179 Commits

Author SHA1 Message Date
dgtlmoon
5437144dff 0.45.26 2024-07-11 13:52:13 +02:00
dgtlmoon
ed38012c6e Code - Fixing deprecation warning (#2477) 2024-07-09 17:38:17 +02:00
dgtlmoon
f07ff9b55e UI - Visual Selector should still update when elements were not found (#2476) 2024-07-09 15:35:19 +02:00
Nectariferous
1c46914992 Code - Update/modernise diff.py (#2471) 2024-07-09 15:08:13 +02:00
dgtlmoon
e9c4037178 UI - Visual Selector - Multiple selections (refactor) (#2475) 2024-07-09 15:07:23 +02:00
dgtlmoon
1af342ef64 UI - Visual Selector now supports Shift+Click for multiple selections! 2024-07-05 20:43:26 +02:00
dgtlmoon
e09ee7da97 UI - Visual Selector - Show/visualise all/any matching filter elements from all filters in "CSS/JSONPath/JQ/XPath Filters" include filters (#2440) 2024-07-05 15:20:39 +02:00
dgtlmoon
09bc24ff34 UI - Visual Selector graphics should be centred 2024-07-05 14:33:36 +02:00
dgtlmoon
a1d04bb37f Snapshot count from history was not updated in watch after using [clear history] (#2459) 2024-07-05 11:09:31 +02:00
dgtlmoon
01f910f840 Fixing 'tags'' field from old installs (0.43.0+) could have wrong data-type causing crash 2024-07-04 15:23:06 +02:00
dgtlmoon
bed16009bb 0.45.25 2024-07-03 19:27:23 +02:00
dgtlmoon
faeed78ffb UI - Fixing preview/diff "ignore text" highlight button (refactor, didnt work in "preview" mode) (#2455) 2024-07-03 19:26:33 +02:00
dgtlmoon
5d9081ccb2 Restock detection - Updating detection texts 2024-07-03 18:45:36 +02:00
dgtlmoon
2cf1829073 UI - Mobile - Hiding empty columns 2024-07-03 17:13:31 +02:00
dgtlmoon
526551a205 UI - Mobile - Watch overview table - Sort/order buttons were not being shown correctly 2024-06-30 18:05:13 +02:00
dgtlmoon
ba139e7f3f Update docker-compose.yml - fix indentation re #2447 2024-06-28 23:08:37 +02:00
Max Michels
13e343f9da Restock detection - Added extra out-of-stock phrases for DE (#2442) 2024-06-26 11:03:00 +02:00
dgtlmoon
13be4623db Restock detection - updating texts 2024-06-25 13:23:43 +02:00
dgtlmoon
3b19e3d2bf UI - Fixing double punctuation in 'unpaused' message #2435 2024-06-24 09:15:48 +02:00
dependabot[bot]
ce42f8ea26 Build - Bump docker/build-push-action from 5 to 6 in the all group (#2436) 2024-06-24 08:50:02 +02:00
dgtlmoon
343e359b39 Now saving last two HTML snapshots for future reference, refactor, dont write screenshots and xpath to disk when no change detected (saves disk IO) (#2431) 2024-06-23 09:19:32 +02:00
Hritik Vijay
ffd160ce0e Filters - Implement jqraw: filter (use this to output nicer JSON format when selecting/filtering by JSON) (#2430) 2024-06-21 13:31:03 +02:00
dgtlmoon
d31fc860cc Build - fixing build warnings 2024-06-20 15:07:17 +02:00
dgtlmoon
90b357f457 Upgrade to Python 3.11 from 3.10, add faster prebuilt "wheels" for rPi devices, upgrade cryptography security library 2024-06-20 14:42:17 +02:00
dgtlmoon
cc147be76e Prefer pythons built in "importlib" over pkg_resources+setuptools (#2424) 2024-06-18 09:08:48 +02:00
dependabot[bot]
8ae5ed76ce Security/dependabot - Bump urllib3 from 1.26.18 to 1.26.19 (#2423) 2024-06-18 08:23:12 +02:00
dgtlmoon
a9ed113369 0.45.24 2024-06-17 13:27:11 +02:00
dgtlmoon
eacf920b9a Update eventlet ( Fixes SSL error on Python 3.12 ) (#2419) 2024-06-17 12:05:20 +02:00
dgtlmoon
c9af9b6374 Filter failure/not found notification threshold - Counter should be reset when editing a watch, clear watch errors on 'save' (#2413) 2024-06-17 11:42:41 +02:00
dependabot[bot]
5e65fb606b Bump dnspython from 2.3.0 to 2.6.1 (#2306) 2024-06-17 10:23:50 +02:00
dgtlmoon
434a1b242e Improve testing for Python 3.10, 3.11 and 3.12 2024-06-17 09:46:54 +02:00
dgtlmoon
bce02f9c82 UI - Add space around checkbox operation buttons so they work better in mobile #2393 2024-06-13 13:22:31 +02:00
dgtlmoon
76ffc3e891 RSS - Setting to hide muted watches in RSS feed (default ON) (#2411) 2024-06-13 11:52:12 +02:00
dgtlmoon
c6ee6687b5 Fetching/Requests - Fixing user agent header overrides per-watch of global settings (#2409) 2024-06-13 10:50:46 +02:00
dgtlmoon
de48892243 Code - improving unique key fix for history database handler (#2402)
* improving unique key fix

* also bump timestamp along 1 sec
2024-06-06 22:59:04 +02:00
dgtlmoon
6aded50aca UI - 'Mark all viewed' button should not show when all viewed (#2399) 2024-06-05 11:46:04 +02:00
dgtlmoon
b8e279a025 Fixing build test - Adding small delay (#2397) 2024-06-04 13:56:35 +02:00
dependabot[bot]
8041d00e75 Code - Bump eventlet from 0.33.3 to 0.35.2 (#2305) 2024-06-03 17:25:14 +02:00
dgtlmoon
6a0e14cfce UI - Mobile CSS/layout fix wrapping on empty list text #2393 2024-05-30 14:26:51 +02:00
dgtlmoon
be91c5425c UI - Preview single snapshot - Date and button fixes (#2389) 2024-05-27 12:51:01 +02:00
dgtlmoon
778680d517 Build - PIL/pillow package not used, probably shouldnt be installed/required (#2382) 2024-05-27 10:17:19 +02:00
dgtlmoon
7e8aa7e3ff 0.45.23 2024-05-22 00:01:50 +02:00
Alexander Sulfrian
d77f913aa0 RSS - Only insert feed header if app_rss_token is set (should be only shown in index/overview page) (#2381) 2024-05-21 23:47:35 +02:00
dgtlmoon
59cefe58e7 Fetcher - Using pyppeteerstealth with puppeteer fetcher (#2203) 2024-05-21 17:03:50 +02:00
dgtlmoon
cfc689e046 Fix overflowing text 2024-05-21 12:13:23 +02:00
Alexander Sulfrian
7b04b52e45 RSS and tags/groups - Fixes use active_tag_uuid, fixes broken RSS link in page html (#2379) 2024-05-20 15:49:12 +02:00
dgtlmoon
f49eb4567f Ability to set default User-Agent for either fetching types directly in the UI (#2375) 2024-05-20 15:11:15 +02:00
dgtlmoon
a8959be348 Testing - Fixing JSON test 2024-05-20 14:14:40 +02:00
dgtlmoon
05bf3c9a5c UI - Mobile - quick watch form element fixes 2024-05-20 13:43:59 +02:00
dgtlmoon
4293639f51 UI - CSS - Remove gradient border, it did not add much to the design #2377 2024-05-20 13:41:23 +02:00
dgtlmoon
f0ed4f64e8 RSS - Muted watches should not show in RSS feed (#2374 #2304) 2024-05-17 17:47:35 +02:00
dgtlmoon
add2c658b4 Notifications - Fixing truncated notifications when tgram:// or discord:// is used with other notification methods (#2372 #2299) 2024-05-17 09:20:26 +02:00
dgtlmoon
e27f66eb73 UI - Ability to preview/view single changes by timestamp using keyboard or select box(#1916) 2024-05-16 23:39:06 +02:00
dgtlmoon
e4504fee49 Browser Steps - Fixing "goto site" step #2330 #2337 (#2364) 2024-05-15 10:49:30 +02:00
dgtlmoon
5798581f18 Crash on older CPU - Setting LXML version to any version without the known modern-CPU-only CPU flags (#2365 #2328 ) 2024-05-15 10:17:51 +02:00
dgtlmoon
ef910b86ef Notifications - Update Apprise notification library to 1.8.0 (#2363 #2324) fixes mailto:// with IP as server endpoint 2024-05-14 14:01:13 +02:00
dgtlmoon
8d1fb96d18 UI - Refactor of the Recheck Time Settings, Added "Use default recheck time" checkbox and refactor/simplify system handling (#2362) 2024-05-14 13:51:03 +02:00
dgtlmoon
5df5d0fbe7 UI - Search should scan/search error messages (#2353) 2024-05-10 18:20:49 +02:00
dgtlmoon
815cba11ca UI - 'stats' tab should show what the server-type detected is ( #2348 ) 2024-05-07 15:23:42 +02:00
dgtlmoon
3aed4e5af9 Update README.md 2024-05-05 18:21:10 +02:00
dgtlmoon
3618c389c6 Notifications - Setting set minimum version for mqtt:// library notifications (#2334 / #2333) 2024-05-02 16:51:56 +02:00
dgtlmoon
d127214d8f 0.45.22 2024-05-02 12:09:45 +02:00
dgtlmoon
c0f000b1d1 Merge pull request from GHSA-pwgc-w4x9-gw67
* Auto-escape was not enabled GHSA-pwgc-w4x9-gw67

* Auto-escape was not enabled because the filenames were not something jinja2 enables it for.
2024-05-02 11:46:31 +02:00
dgtlmoon
ee5294740a 0.45.21 2024-04-25 22:29:38 +02:00
dgtlmoon
bd6eda696c Merge pull request from GHSA-4r7v-whpg-8rx3
* CVE-2024-32651 - Security fix - Server Side Template Injection in Jinja2 allows Remote Command Execution

* use ImmutableSandboxedEnvironment also in validation
2024-04-25 22:06:09 +02:00
dgtlmoon
1ba29655f5 UI - Wrap tag names in solid background to make it easier to read when theres multiple tags 2024-04-20 20:34:52 +02:00
dgtlmoon
830a0a3a82 UI - Error text on exception should contain the word Exception (#2322) 2024-04-19 08:54:25 +02:00
dgtlmoon
e110b3ee93 0.45.20 2024-04-18 11:55:46 +02:00
dgtlmoon
3ae9bfa6f9 Bug fix - further work on lxml filter extract (#2313 #2312 #2317) 2024-04-18 11:53:45 +02:00
dgtlmoon
6f3c3b7dfb 0.45.19 2024-04-17 20:01:35 +02:00
dgtlmoon
74707909f1 Bug fix for newer lxml module - module 'lxml.etree' has no attribute '_ElementStringResult' - reimplement _ElementStringResult (#2313 #2312) 2024-04-17 19:55:45 +02:00
dgtlmoon
d4dac23ba1 0.45.18 2024-04-16 18:50:14 +02:00
dgtlmoon
f9954f93f3 UI - Adding UI notice if watch has group options set (#2311 #2307) 2024-04-16 18:48:51 +02:00
dgtlmoon
1a43b112dc dependabot - automatically follow apprise 2024-04-15 11:17:50 +02:00
dgtlmoon
db59bf73e1 "Send Test Notification" - In "Group" settings form it should not fallback to the system wide notifications when sending a test if nothing is set. 2024-04-03 17:10:13 +02:00
dgtlmoon
8aac7bccbe "Send Test Notification" - Now provides better feedback and works with the actual values in system settings form 2024-04-03 16:52:42 +02:00
dgtlmoon
9449c59fbb Code - Getting ready for newer python versions - packing our own strtobool (#2291) 2024-04-03 16:17:15 +02:00
dgtlmoon
21f4ba2208 UI - BrowserSteps - Show step screenshot/pic should use absolute URL #2243 2024-04-03 16:15:33 +02:00
dgtlmoon
daef1cd036 UI - Remove unique check for URLs entered on the "quick watch add" form ( #2286 #2292 ) 2024-04-03 16:08:33 +02:00
dgtlmoon
56b365df40 UI - Improvements to tag/groups page, show number of watches under each group, link group name to list (#2290) 2024-04-03 16:01:24 +02:00
dgtlmoon
8e5bf91965 "Send Test Notification" button from watch form edit should respect global settings and tag/group settings ( #2289, #2263 ) 2024-04-03 15:18:21 +02:00
dgtlmoon
1ae59551be 0.45.17 2024-03-31 16:35:44 +02:00
dgtlmoon
a176468fb8 UI - Add helper note 2024-03-31 16:35:09 +02:00
dgtlmoon
8fac593201 UI Text - Adding helper text to VisualSelector to explain what the connection is with the CSS/xPath filters 2024-03-26 14:58:36 +01:00
Andrew
e3b8c0f5af Update contributing documentation for discontinuation of dev branch (#2272) 2024-03-22 18:39:43 +01:00
dgtlmoon
514fd7f91e Updating pyppeteer-ng (mainly newer pillow release) (#2247) 2024-03-18 14:00:05 +01:00
dgtlmoon
38c4768b92 Notifications - Updating apprise version, pinning mqtt:// to compatible version (#2242) 2024-03-10 21:05:23 +01:00
dgtlmoon
6555d99044 0.45.16 2024-03-08 21:07:08 +01:00
dgtlmoon
e719dbd19b Pip build - content fetchers package was missing 2024-03-08 21:06:22 +01:00
dgtlmoon
b28a8316cc 0.45.15 2024-03-08 19:00:37 +01:00
dgtlmoon
e609a2d048 Updating restock detection texts 2024-03-08 15:58:40 +01:00
dgtlmoon
994d34c776 Adding CORS module - Solves Chrome extension API connectivity (#2236) 2024-03-08 13:30:31 +01:00
dgtlmoon
de776800e9 UI - Overview list shortcut button - Ability to reset any previous errors 2024-03-06 19:16:13 +01:00
dgtlmoon
8b8ed58f20 Chrome Extension - Adding link and install information from the API page 2024-03-06 15:21:03 +01:00
dgtlmoon
79c6d765de Chrome Extension - Adding link in README.md to the webstore 2024-03-06 11:15:27 +01:00
dgtlmoon
c6db7fc90e Chrome Extension - Adding callout to UI 2024-03-06 11:06:30 +01:00
pedrogius
bc587efae2 Import - Fixed "Include filters" option (fixed typo on select) (#2232) 2024-03-05 10:45:32 +01:00
dgtlmoon
6ee6be1a5f Merge branch 'master' of github.com:dgtlmoon/changedetection.io 2024-02-28 17:11:01 +01:00
dgtlmoon
c83485094b Updating restock detection texts 2024-02-28 17:10:41 +01:00
dgtlmoon
387ce32e6f Restock detection - Improving test for restock IN STOCK -> OUT OF STOCK (#2219) 2024-02-28 10:05:52 +01:00
dgtlmoon
6b9a788d75 Puppeteer - remove debug hook 2024-02-26 18:15:23 +01:00
dgtlmoon
14e632bc19 Custom headers fix in Browser Steps and Playwright/Puppeteer fetchers ( #2197 ) 2024-02-26 18:02:45 +01:00
Constantin Hong
52c895b2e8 text_json_diff/fix: Keep an order of filter and remove duplicated filters. 2 (#2178) 2024-02-21 11:46:23 +01:00
dgtlmoon
a62043e086 Fetching - restock detecting and visual selector scraper - Fixes scraping of elements that are not visible 2024-02-21 11:21:43 +01:00
dgtlmoon
3d390b6ea4 BrowserSteps UI - Avoid selecting very large elements that are likely to be the page wrapper 2024-02-21 11:00:35 +01:00
dgtlmoon
301a40ca34 Fetching - Puppeteer - Adding more debug/diagnostic information 2024-02-21 10:55:18 +01:00
dgtlmoon
1c099cdba6 Update stock-not-in-stock.js 2024-02-21 10:28:59 +01:00
dgtlmoon
af747e6e3f UI - Sorted alphabetical tag list and list of tags in groups setting (#2205) 2024-02-21 10:03:09 +01:00
dgtlmoon
aefad0bdf6 Code - Remove whitespaces in visual selector elements config 2024-02-21 09:37:35 +01:00
dgtlmoon
904ef84f82 Build fix - Pinning package versions and Custom browser endpoints should not have a proxy set (#2204) 2024-02-20 22:11:17 +01:00
dgtlmoon
d2569ba715 Update stock-not-in-stock.js 2024-02-20 20:00:31 +01:00
dgtlmoon
ccb42bcb12 Fetching pages - Custom browser endpoints should not have default proxy info added 2024-02-12 19:05:10 +01:00
dgtlmoon
4163030805 Puppeteer - fixing wait times 2024-02-12 13:02:24 +01:00
dgtlmoon
140d375ad0 Puppeteer - more improvements to proxy and authentication 2024-02-12 12:54:11 +01:00
dgtlmoon
1a608d0ae6 Puppeteer - client fixes for proxy and caching (#2181) 2024-02-12 12:40:31 +01:00
dependabot[bot]
e6ed91cfe3 dependabot - Bump the all group with 1 update (artifact store) (#2180) 2024-02-12 12:40:03 +01:00
dgtlmoon
008272cd77 Puppeteer fetch - fixing exception names 2024-02-11 11:18:36 +01:00
dgtlmoon
823a0c99f4 Code - Split content fetcher code up (playwright, puppeteer and requests), fix puppeteer direct chrome support (#2169) 2024-02-11 00:09:12 +01:00
dgtlmoon
1f57d9d0b6 Alpine linux build - adding JPEG development headers to fix build errors 2024-02-10 20:57:04 +01:00
dgtlmoon
3287283065 Plawright content fetcher - Fixes for status codes and screenshot info (#2168) 2024-02-08 15:15:04 +01:00
dgtlmoon
c5a4e0aaa3 Fetching - Prefer to use SockPuppetBrowser (#2163) 2024-02-07 20:58:21 +01:00
dgtlmoon
5119efe4fb 0.45.14 2024-02-07 12:43:23 +01:00
dgtlmoon
78a2dceb81 Bug fix - fix missing default var (#2162/ #2118/ #2122 ) 2024-02-06 17:25:44 +01:00
dgtlmoon
72c7645f60 Fix - Pinning elementpath xPath filter library to 4.1.5 (#2164) 2024-02-06 14:49:10 +01:00
dgtlmoon
e09eb47fb7 Restock detection - Update stock-not-in-stock.js (NL) 2024-02-03 22:32:39 +01:00
dgtlmoon
616c0b3f65 New text filter - Sort text alphabetically filter (#2153) 2024-02-02 11:36:58 +01:00
dgtlmoon
c90b27823a Filtering - include_filters in group and watch settings should not duplicate (#2151 #1845) 2024-02-02 09:30:01 +01:00
dgtlmoon
3b16b19a94 Record notification count and show in [stats] tab (#2150) 2024-02-02 09:12:44 +01:00
Antonio Neri
4ee9fa79e1 Restock - Update stock-not-in-stock.js Italian translation (#2149)
Added `prodotto esaurito` - Italian for out of stock
2024-02-01 16:35:31 +01:00
dgtlmoon
4b49759113 UI - Show error/warning when trying to compare the same version 2024-02-01 10:36:43 +01:00
dgtlmoon
e9a9790cb0 Fetching - Make an obvious error when using BrowserSteps with the simple text fetcher (#2145) 2024-02-01 00:09:27 +01:00
dgtlmoon
593660e2f6 Fix for switching to price-data-follower mode (when page has JSON price data), only needs to be queued once. Re #1565 2024-01-31 22:39:24 +01:00
dgtlmoon
7d96b4ba83 Fetching - Always record server software reply headers (will be used in the future) (#2143) 2024-01-31 16:15:43 +01:00
dgtlmoon
fca40e4d5b Testing - General test workflow improvements (#2144) 2024-01-31 15:10:44 +01:00
dgtlmoon
66e2dfcead RSS - Include link to the watched URL in the feed (#2139 #2131 and #327) 2024-01-29 16:26:14 +01:00
dgtlmoon
bce7eb68fb Notifications - skip empty notification URLs from being processed (#2138) 2024-01-29 14:20:39 +01:00
dgtlmoon
93c0385119 UI - Filters & Triggers - Adding example for keyword matching in a line 2024-01-29 14:18:14 +01:00
dgtlmoon
e17f3be739 RSS - Adding performance stats 2024-01-29 13:05:11 +01:00
dgtlmoon
3a9f79b756 Notification - logging - adding performance information for processing time of notifications #327 2024-01-29 12:55:08 +01:00
dgtlmoon
1f5670253e UI - Adding icon to show which watch has Browser Steps enabled (#2137) 2024-01-29 12:36:53 +01:00
dgtlmoon
fe3cf5ffd2 Logging - Adding extra debug logging to change detection (#2136) 2024-01-29 11:21:21 +01:00
dgtlmoon
d31a45d49a Fetcher - Improve status_code logging (#2130 #2122) 2024-01-25 09:53:00 +01:00
Conner
19ee65361d Notifications - Bugfix: Notification format not being set correct (HTML emails being sent as plaintext and other problems) (#2129) 2024-01-24 20:45:45 +01:00
dgtlmoon
677082723c Restock tweaks - use a single regex, tidy up height detection (#2125) 2024-01-23 13:31:05 +01:00
dgtlmoon
96793890f8 Notification - Templates - Adding an example of how to use URL encoding with tokens 2024-01-22 12:20:50 +01:00
dgtlmoon
0439155127 Notification - Templates - Adding an example of how to use |tojson for JSON payloads 2024-01-22 11:06:55 +01:00
dependabot[bot]
29ca2521eb Build maintenance - dependabot - Bump the all build helpers (#2121) 2024-01-20 11:47:14 +01:00
Andrew Peabody
7d67ad057c Enable dependabot for github-actions (#2119) 2024-01-20 10:03:24 +01:00
dgtlmoon
2e88872b7e Update docker-compose.yml 2024-01-19 23:14:55 +01:00
dgtlmoon
b30b718373 0.45.13 2024-01-19 10:24:47 +01:00
dgtlmoon
402f1e47e7 Security update - Adding API token secure check for API endpoint /api/v1/watch/<uuid>/history @rozpuszczalny 2024-01-18 23:19:09 +01:00
dgtlmoon
9510345e01 Test - tidy up backup test (#2117) 2024-01-17 22:35:29 +01:00
dgtlmoon
36085d8cf4 Adding contributors section (#2116) 2024-01-17 18:47:24 +01:00
dgtlmoon
399cdf0fbf Logging loguru output tweaks (#2112) 2024-01-16 11:27:47 +01:00
Constantin Hong
4be0fafa93 Support Loguru as a logger (#2036) 2024-01-16 09:48:16 +01:00
dgtlmoon
51ce7ac66e Update stock-not-in-stock.js texts 2024-01-15 10:20:04 +01:00
dgtlmoon
c3d825f38c Test - Adding extra test for HTML output in emails ( #2103 ) 2024-01-14 17:47:54 +01:00
dgtlmoon
fc3c4b804d Update README.md 2024-01-14 13:40:54 +01:00
dgtlmoon
1749c07750 Restock detection - Check all elements for text to get stock status from, only consider elements inside the viewport, only consider elements more than 100px from the top (avoid menu) , trim any text returned (#2040) 2024-01-12 23:11:56 +01:00
dgtlmoon
65428655b8 Notifications - When any in a list of notifications fails, the others should still work (#2106) 2024-01-12 12:25:21 +01:00
dgtlmoon
8be0029260 Browser Steps - Fixing "'Response' object is not subscriptable" where quotes were used in connection URL - Quote wrapped URL for browserstep url was breaking the connection #1627 #1823 #2099 (#2100) 2024-01-11 10:12:00 +01:00
kiyell
3c727ca54b Added OPTIONS HTTP method (#2094) 2024-01-08 23:32:44 +01:00
dgtlmoon
4596532090 API Docs - Examples should use port 5000 (same as the docker-compose default installation and other documentation) 2024-01-08 14:30:48 +01:00
dgtlmoon
d0a88d54a1 0.45.12 2024-01-05 20:17:14 +01:00
dgtlmoon
21ab4b16a0 0.45.11 2024-01-05 20:16:18 +01:00
dgtlmoon
77133de1cf Notification fixes - error on mailto:// when no format was specified, fixing default body and title of notifications to respect global settings (#2085) 2024-01-05 20:15:13 +01:00
dgtlmoon
0d92be348a Update README.md 2024-01-05 19:12:58 +01:00
dgtlmoon
3ac0c9346c Removing heroku support as its no longer free 2024-01-05 18:27:36 +01:00
dgtlmoon
b6d8db4c67 PyPi package build fixes (#2084) 2024-01-05 18:16:07 +01:00
dgtlmoon
436a66d465 Adding PyPi pip package publisher script 2024-01-05 17:27:41 +01:00
dgtlmoon
764514e5eb 0.45.10 2024-01-05 14:51:35 +01:00
dgtlmoon
ad3ffb6ccb Update README.md - Remove deprecated docker-compose (now docker compose) 2024-01-05 11:41:52 +01:00
dgtlmoon
e051b29bf2 Browser Steps - General error handling improvements (#2083) 2024-01-05 11:29:48 +01:00
Christian Arnold
126852b778 Browser Steps - Fix for correct tokens/information in browser step failure notification (#2066) 2024-01-05 11:15:22 +01:00
dgtlmoon
d115b2c858 UI - [Send test notification] - Refactor to use all tokens like a real watch and Notification Body+Title from UI value (#2079) 2024-01-04 17:02:31 +01:00
dgtlmoon
2db04e4211 Notifications upgrade - Upgrade to Apprise 1.7.1 - Emojis support, Telegram topics support, Discord support for user and role @ping support. (#2075) 2024-01-03 11:16:09 +01:00
dgtlmoon
946a556fb6 Restock detection - "In stock" should be None/"Not yet checked" by default (#2069) 2024-01-01 17:10:27 +01:00
dgtlmoon
eda23678aa Restock detection - updating texts 2024-01-01 16:43:34 +01:00
dgtlmoon
273bd45ad7 Fetching - Custom browser on experimental/puppeteer fetcher - Don't switch to custom puppeteer mode if external browser URL is active (#2068) 2024-01-01 16:40:24 +01:00
122 changed files with 4513 additions and 2681 deletions

14
.github/dependabot.yml vendored Normal file
View File

@@ -0,0 +1,14 @@
version: 2
updates:
- package-ecosystem: github-actions
directory: /
schedule:
interval: "weekly"
"caronc/apprise":
versioning-strategy: "increase"
schedule:
interval: "daily"
groups:
all:
patterns:
- "*"

View File

@@ -12,8 +12,10 @@ RUN \
cargo \ cargo \
g++ \ g++ \
gcc \ gcc \
jpeg-dev \
libc-dev \ libc-dev \
libffi-dev \ libffi-dev \
libjpeg \
libxslt-dev \ libxslt-dev \
make \ make \
openssl-dev \ openssl-dev \

View File

@@ -34,7 +34,7 @@ jobs:
# Initializes the CodeQL tools for scanning. # Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL - name: Initialize CodeQL
uses: github/codeql-action/init@v2 uses: github/codeql-action/init@v3
with: with:
languages: ${{ matrix.language }} languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file. # If you wish to specify custom queries, you can do so here or in a config file.
@@ -45,7 +45,7 @@ jobs:
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below) # If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild - name: Autobuild
uses: github/codeql-action/autobuild@v2 uses: github/codeql-action/autobuild@v3
# Command-line programs to run using the OS shell. # Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl # 📚 https://git.io/JvXDl
@@ -59,4 +59,4 @@ jobs:
# make release # make release
- name: Perform CodeQL Analysis - name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2 uses: github/codeql-action/analyze@v3

View File

@@ -41,7 +41,7 @@ jobs:
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- name: Set up Python 3.11 - name: Set up Python 3.11
uses: actions/setup-python@v4 uses: actions/setup-python@v5
with: with:
python-version: 3.11 python-version: 3.11
@@ -88,7 +88,7 @@ jobs:
- name: Build and push :dev - name: Build and push :dev
id: docker_build id: docker_build
if: ${{ github.ref }} == "refs/heads/master" if: ${{ github.ref }} == "refs/heads/master"
uses: docker/build-push-action@v5 uses: docker/build-push-action@v6
with: with:
context: ./ context: ./
file: ./Dockerfile file: ./Dockerfile
@@ -106,7 +106,7 @@ jobs:
- name: Build and push :tag - name: Build and push :tag
id: docker_build_tag_release id: docker_build_tag_release
if: github.event_name == 'release' && startsWith(github.event.release.tag_name, '0.') if: github.event_name == 'release' && startsWith(github.event.release.tag_name, '0.')
uses: docker/build-push-action@v5 uses: docker/build-push-action@v6
with: with:
context: ./ context: ./
file: ./Dockerfile file: ./Dockerfile

77
.github/workflows/pypi-release.yml vendored Normal file
View File

@@ -0,0 +1,77 @@
name: Publish Python 🐍distribution 📦 to PyPI and TestPyPI
on: push
jobs:
build:
name: Build distribution 📦
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.11"
- name: Install pypa/build
run: >-
python3 -m
pip install
build
--user
- name: Build a binary wheel and a source tarball
run: python3 -m build
- name: Store the distribution packages
uses: actions/upload-artifact@v4
with:
name: python-package-distributions
path: dist/
test-pypi-package:
name: Test the built 📦 package works basically.
runs-on: ubuntu-latest
needs:
- build
steps:
- name: Download all the dists
uses: actions/download-artifact@v4
with:
name: python-package-distributions
path: dist/
- name: Set up Python 3.11
uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Test that the basic pip built package runs without error
run: |
set -ex
sudo pip3 install --upgrade pip
pip3 install dist/changedetection.io*.whl
changedetection.io -d /tmp -p 10000 &
sleep 3
curl --retry-connrefused --retry 6 http://127.0.0.1:10000/static/styles/pure-min.css >/dev/null
curl --retry-connrefused --retry 6 http://127.0.0.1:10000/ >/dev/null
killall changedetection.io
publish-to-pypi:
name: >-
Publish Python 🐍 distribution 📦 to PyPI
if: startsWith(github.ref, 'refs/tags/') # only publish to PyPI on tag pushes
needs:
- test-pypi-package
runs-on: ubuntu-latest
environment:
name: release
url: https://pypi.org/p/changedetection.io
permissions:
id-token: write # IMPORTANT: mandatory for trusted publishing
steps:
- name: Download all the dists
uses: actions/download-artifact@v4
with:
name: python-package-distributions
path: dist/
- name: Publish distribution 📦 to PyPI
uses: pypa/gh-action-pypi-publish@release/v1

View File

@@ -11,12 +11,14 @@ on:
- requirements.txt - requirements.txt
- Dockerfile - Dockerfile
- .github/workflows/* - .github/workflows/*
- .github/test/Dockerfile*
pull_request: pull_request:
paths: paths:
- requirements.txt - requirements.txt
- Dockerfile - Dockerfile
- .github/workflows/* - .github/workflows/*
- .github/test/Dockerfile*
# Changes to requirements.txt packages and Dockerfile may or may not always be compatible with arm etc, so worth testing # Changes to requirements.txt packages and Dockerfile may or may not always be compatible with arm etc, so worth testing
# @todo: some kind of path filter for requirements.txt and Dockerfile # @todo: some kind of path filter for requirements.txt and Dockerfile
@@ -26,7 +28,7 @@ jobs:
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- name: Set up Python 3.11 - name: Set up Python 3.11
uses: actions/setup-python@v4 uses: actions/setup-python@v5
with: with:
python-version: 3.11 python-version: 3.11
@@ -49,7 +51,7 @@ jobs:
# Check we can still build under alpine/musl # Check we can still build under alpine/musl
- name: Test that the docker containers can build (musl via alpine check) - name: Test that the docker containers can build (musl via alpine check)
id: docker_build_musl id: docker_build_musl
uses: docker/build-push-action@v5 uses: docker/build-push-action@v6
with: with:
context: ./ context: ./
file: ./.github/test/Dockerfile-alpine file: ./.github/test/Dockerfile-alpine
@@ -57,7 +59,7 @@ jobs:
- name: Test that the docker containers can build - name: Test that the docker containers can build
id: docker_build id: docker_build
uses: docker/build-push-action@v5 uses: docker/build-push-action@v6
# https://github.com/docker/build-push-action#customizing # https://github.com/docker/build-push-action#customizing
with: with:
context: ./ context: ./

View File

@@ -4,17 +4,10 @@ name: ChangeDetection.io App Test
on: [push, pull_request] on: [push, pull_request]
jobs: jobs:
test-application: lint-code:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
# Mainly just for link/flake8
- name: Set up Python 3.11
uses: actions/setup-python@v4
with:
python-version: '3.11'
- name: Lint with flake8 - name: Lint with flake8
run: | run: |
pip3 install flake8 pip3 install flake8
@@ -23,134 +16,24 @@ jobs:
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
- name: Spin up ancillary testable services test-application-3-10:
run: | needs: lint-code
uses: ./.github/workflows/test-stack-reusable-workflow.yml
docker network create changedet-network with:
python-version: '3.10'
# Selenium+browserless
docker run --network changedet-network -d --hostname selenium -p 4444:4444 --rm --shm-size="2g" selenium/standalone-chrome:4
docker run --network changedet-network -d --name browserless --hostname browserless -e "FUNCTION_BUILT_INS=[\"fs\",\"crypto\"]" -e "DEFAULT_LAUNCH_ARGS=[\"--window-size=1920,1080\"]" --rm -p 3000:3000 --shm-size="2g" browserless/chrome:1.60-chrome-stable
# For accessing custom browser tests
docker run --network changedet-network -d --name browserless-custom-url --hostname browserless-custom-url -e "FUNCTION_BUILT_INS=[\"fs\",\"crypto\"]" -e "DEFAULT_LAUNCH_ARGS=[\"--window-size=1920,1080\"]" --rm --shm-size="2g" browserless/chrome:1.60-chrome-stable
- name: Build changedetection.io container for testing test-application-3-11:
run: | needs: lint-code
# Build a changedetection.io container and start testing inside uses: ./.github/workflows/test-stack-reusable-workflow.yml
docker build . -t test-changedetectionio with:
# Debug info python-version: '3.11'
docker run test-changedetectionio bash -c 'pip list' skip-pypuppeteer: true
- name: Spin up ancillary SMTP+Echo message test server test-application-3-12:
run: | needs: lint-code
# Debug SMTP server/echo message back server uses: ./.github/workflows/test-stack-reusable-workflow.yml
docker run --network changedet-network -d -p 11025:11025 -p 11080:11080 --hostname mailserver test-changedetectionio bash -c 'python changedetectionio/tests/smtp/smtp-test-server.py' with:
python-version: '3.12'
skip-pypuppeteer: true
- name: Test built container with pytest
run: |
# Unit tests
docker run test-changedetectionio bash -c 'python3 -m unittest changedetectionio.tests.unit.test_notification_diff'
docker run test-changedetectionio bash -c 'python3 -m unittest changedetectionio.tests.unit.test_watch_model'
# All tests
docker run --network changedet-network test-changedetectionio bash -c 'cd changedetectionio && ./run_basic_tests.sh'
- name: Test built container selenium+browserless/playwright
run: |
# Selenium fetch
docker run --rm -e "WEBDRIVER_URL=http://selenium:4444/wd/hub" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest tests/fetchers/test_content.py && pytest tests/test_errorhandling.py'
# Playwright/Browserless fetch
docker run --rm -e "PLAYWRIGHT_DRIVER_URL=ws://browserless:3000" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest tests/fetchers/test_content.py && pytest tests/test_errorhandling.py && pytest tests/visualselector/test_fetch_data.py'
# Settings headers playwright tests - Call back in from Browserless, check headers
docker run --name "changedet" --hostname changedet --rm -e "FLASK_SERVER_NAME=changedet" -e "PLAYWRIGHT_DRIVER_URL=ws://browserless:3000?dumpio=true" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio; pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/test_request.py'
docker run --name "changedet" --hostname changedet --rm -e "FLASK_SERVER_NAME=changedet" -e "WEBDRIVER_URL=http://selenium:4444/wd/hub" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio; pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/test_request.py'
docker run --name "changedet" --hostname changedet --rm -e "FLASK_SERVER_NAME=changedet" -e "USE_EXPERIMENTAL_PUPPETEER_FETCH=yes" -e "PLAYWRIGHT_DRIVER_URL=ws://browserless:3000?dumpio=true" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio; pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/test_request.py'
# restock detection via playwright - added name=changedet here so that playwright/browserless can connect to it
docker run --rm --name "changedet" -e "FLASK_SERVER_NAME=changedet" -e "PLAYWRIGHT_DRIVER_URL=ws://browserless:3000" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-port=5004 --live-server-host=0.0.0.0 tests/restock/test_restock.py'
- name: Test SMTP notification mime types
run: |
# SMTP content types - needs the 'Debug SMTP server/echo message back server' container from above
docker run --rm --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest tests/smtp/test_notification_smtp.py'
- name: Test with puppeteer fetcher and disk cache
run: |
docker run --rm -e "PUPPETEER_DISK_CACHE=/tmp/data/" -e "USE_EXPERIMENTAL_PUPPETEER_FETCH=yes" -e "PLAYWRIGHT_DRIVER_URL=ws://browserless:3000" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest tests/fetchers/test_content.py && pytest tests/test_errorhandling.py && pytest tests/visualselector/test_fetch_data.py'
# Browserless would have had -e "FUNCTION_BUILT_INS=[\"fs\",\"crypto\"]" added above
- name: Test proxy interaction
run: |
cd changedetectionio
./run_proxy_tests.sh
# And again with PLAYWRIGHT_DRIVER_URL=..
cd ..
- name: Test custom browser URL
run: |
cd changedetectionio
./run_custom_browser_url_tests.sh
cd ..
- name: Test changedetection.io container starts+runs basically without error
run: |
docker run --name test-changedetectionio -p 5556:5000 -d test-changedetectionio
sleep 3
# Should return 0 (no error) when grep finds it
curl -s http://localhost:5556 |grep -q checkbox-uuid
# and IPv6
curl -s -g -6 "http://[::1]:5556"|grep -q checkbox-uuid
docker kill test-changedetectionio
- name: Test changedetection.io SIGTERM and SIGINT signal shutdown
run: |
echo SIGINT Shutdown request test
docker run --name sig-test -d test-changedetectionio
sleep 3
echo ">>> Sending SIGINT to sig-test container"
docker kill --signal=SIGINT sig-test
sleep 3
# invert the check (it should be not 0/not running)
docker ps
# check signal catch(STDOUT) log
docker logs sig-test | grep 'Shutdown: Got Signal - SIGINT' || exit 1
test -z "`docker ps|grep sig-test`"
if [ $? -ne 0 ]
then
echo "Looks like container was running when it shouldnt be"
docker ps
exit 1
fi
# @todo - scan the container log to see the right "graceful shutdown" text exists
docker rm sig-test
echo SIGTERM Shutdown request test
docker run --name sig-test -d test-changedetectionio
sleep 3
echo ">>> Sending SIGTERM to sig-test container"
docker kill --signal=SIGTERM sig-test
sleep 3
# invert the check (it should be not 0/not running)
docker ps
docker logs sig-test | grep 'Shutdown: Got Signal - SIGTERM' || exit 1
test -z "`docker ps|grep sig-test`"
if [ $? -ne 0 ]
then
echo "Looks like container was running when it shouldnt be"
docker ps
exit 1
fi
# @todo - scan the container log to see the right "graceful shutdown" text exists
docker rm sig-test
#export WEBDRIVER_URL=http://localhost:4444/wd/hub
#pytest tests/fetchers/test_content.py
#pytest tests/test_errorhandling.py

View File

@@ -1,36 +0,0 @@
name: ChangeDetection.io PIP package test
# Triggers the workflow on push or pull request events
# This line doesnt work, even tho it is the documented one
on: [push, pull_request]
# Changes to requirements.txt packages and Dockerfile may or may not always be compatible with arm etc, so worth testing
# @todo: some kind of path filter for requirements.txt and Dockerfile
jobs:
test-pip-build-basics:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Python 3.11
uses: actions/setup-python@v4
with:
python-version: 3.11
- name: Test that the basic pip built package runs without error
run: |
set -e
mkdir dist
pip3 install wheel
python3 setup.py bdist_wheel
pip3 install -r requirements.txt
rm ./changedetection.py
rm -rf changedetectio
pip3 install dist/changedetection.io*.whl
changedetection.io -d /tmp -p 10000 &
sleep 3
curl http://127.0.0.1:10000/static/styles/pure-min.css >/dev/null
killall -9 changedetection.io

View File

@@ -0,0 +1,239 @@
name: ChangeDetection.io App Test
on:
workflow_call:
inputs:
python-version:
description: 'Python version to use'
required: true
type: string
default: '3.10'
skip-pypuppeteer:
description: 'Skip PyPuppeteer (not supported in 3.11/3.12)'
required: false
type: boolean
default: false
jobs:
test-application:
runs-on: ubuntu-latest
env:
PYTHON_VERSION: ${{ inputs.python-version }}
steps:
- uses: actions/checkout@v4
# Mainly just for link/flake8
- name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: Build changedetection.io container for testing under Python ${{ env.PYTHON_VERSION }}
run: |
echo "---- Building for Python ${{ env.PYTHON_VERSION }} -----"
# Build a changedetection.io container and start testing inside
docker build --build-arg PYTHON_VERSION=${{ env.PYTHON_VERSION }} --build-arg LOGGER_LEVEL=TRACE -t test-changedetectionio .
# Debug info
docker run test-changedetectionio bash -c 'pip list'
- name: We should be Python ${{ env.PYTHON_VERSION }} ...
run: |
docker run test-changedetectionio bash -c 'python3 --version'
- name: Spin up ancillary testable services
run: |
docker network create changedet-network
# Selenium
docker run --network changedet-network -d --hostname selenium -p 4444:4444 --rm --shm-size="2g" selenium/standalone-chrome:4
# SocketPuppetBrowser + Extra for custom browser test
docker run --network changedet-network -d -e "LOG_LEVEL=TRACE" --cap-add=SYS_ADMIN --name sockpuppetbrowser --hostname sockpuppetbrowser --rm -p 3000:3000 dgtlmoon/sockpuppetbrowser:latest
docker run --network changedet-network -d -e "LOG_LEVEL=TRACE" --cap-add=SYS_ADMIN --name sockpuppetbrowser-custom-url --hostname sockpuppetbrowser-custom-url -p 3001:3000 --rm dgtlmoon/sockpuppetbrowser:latest
- name: Spin up ancillary SMTP+Echo message test server
run: |
# Debug SMTP server/echo message back server
docker run --network changedet-network -d -p 11025:11025 -p 11080:11080 --hostname mailserver test-changedetectionio bash -c 'pip3 install aiosmtpd && python changedetectionio/tests/smtp/smtp-test-server.py'
docker ps
- name: Show docker container state and other debug info
run: |
set -x
echo "Running processes in docker..."
docker ps
- name: Test built container with Pytest (generally as requests/plaintext fetching)
run: |
# Unit tests
echo "run test with unittest"
docker run test-changedetectionio bash -c 'python3 -m unittest changedetectionio.tests.unit.test_notification_diff'
docker run test-changedetectionio bash -c 'python3 -m unittest changedetectionio.tests.unit.test_watch_model'
docker run test-changedetectionio bash -c 'python3 -m unittest changedetectionio.tests.unit.test_jinja2_security'
# All tests
echo "run test with pytest"
# The default pytest logger_level is TRACE
# To change logger_level for pytest(test/conftest.py),
# append the docker option. e.g. '-e LOGGER_LEVEL=DEBUG'
docker run --name test-cdio-basic-tests --network changedet-network test-changedetectionio bash -c 'cd changedetectionio && ./run_basic_tests.sh'
# PLAYWRIGHT/NODE-> CDP
- name: Playwright and SocketPuppetBrowser - Specific tests in built container
run: |
# Playwright via Sockpuppetbrowser fetch
# tests/visualselector/test_fetch_data.py will do browser steps
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/fetchers/test_content.py'
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/test_errorhandling.py'
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/visualselector/test_fetch_data.py'
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/fetchers/test_custom_js_before_content.py'
- name: Playwright and SocketPuppetBrowser - Headers and requests
run: |
# Settings headers playwright tests - Call back in from Sockpuppetbrowser, check headers
docker run --name "changedet" --hostname changedet --rm -e "FLASK_SERVER_NAME=changedet" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000?dumpio=true" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio; pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/test_request.py'
- name: Playwright and SocketPuppetBrowser - Restock detection
run: |
# restock detection via playwright - added name=changedet here so that playwright and sockpuppetbrowser can connect to it
docker run --rm --name "changedet" -e "FLASK_SERVER_NAME=changedet" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-port=5004 --live-server-host=0.0.0.0 tests/restock/test_restock.py'
# STRAIGHT TO CDP
- name: Pyppeteer and SocketPuppetBrowser - Specific tests in built container
if: ${{ inputs.skip-pypuppeteer == false }}
run: |
# Playwright via Sockpuppetbrowser fetch
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "FAST_PUPPETEER_CHROME_FETCHER=True" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/fetchers/test_content.py'
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "FAST_PUPPETEER_CHROME_FETCHER=True" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/test_errorhandling.py'
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "FAST_PUPPETEER_CHROME_FETCHER=True" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/visualselector/test_fetch_data.py'
docker run --rm -e "FLASK_SERVER_NAME=cdio" -e "FAST_PUPPETEER_CHROME_FETCHER=True" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network --hostname=cdio test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/fetchers/test_custom_js_before_content.py'
- name: Pyppeteer and SocketPuppetBrowser - Headers and requests checks
if: ${{ inputs.skip-pypuppeteer == false }}
run: |
# Settings headers playwright tests - Call back in from Sockpuppetbrowser, check headers
docker run --name "changedet" --hostname changedet --rm -e "FAST_PUPPETEER_CHROME_FETCHER=True" -e "FLASK_SERVER_NAME=changedet" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000?dumpio=true" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio; pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/test_request.py'
- name: Pyppeteer and SocketPuppetBrowser - Restock detection
if: ${{ inputs.skip-pypuppeteer == false }}
run: |
# restock detection via playwright - added name=changedet here so that playwright and sockpuppetbrowser can connect to it
docker run --rm --name "changedet" -e "FLASK_SERVER_NAME=changedet" -e "FAST_PUPPETEER_CHROME_FETCHER=True" -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest --live-server-port=5004 --live-server-host=0.0.0.0 tests/restock/test_restock.py'
# SELENIUM
- name: Specific tests in built container for Selenium
run: |
# Selenium fetch
docker run --rm -e "WEBDRIVER_URL=http://selenium:4444/wd/hub" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest tests/fetchers/test_content.py && pytest tests/test_errorhandling.py'
- name: Specific tests in built container for headers and requests checks with Selenium
run: |
docker run --name "changedet" --hostname changedet --rm -e "FLASK_SERVER_NAME=changedet" -e "WEBDRIVER_URL=http://selenium:4444/wd/hub" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio; pytest --live-server-host=0.0.0.0 --live-server-port=5004 tests/test_request.py'
# OTHER STUFF
- name: Test SMTP notification mime types
run: |
# SMTP content types - needs the 'Debug SMTP server/echo message back server' container from above
# "mailserver" hostname defined above
docker run --rm --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest tests/smtp/test_notification_smtp.py'
# @todo Add a test via playwright/puppeteer
# squid with auth is tested in run_proxy_tests.sh -> tests/proxy_list/test_select_custom_proxy.py
- name: Test proxy squid style interaction
run: |
cd changedetectionio
./run_proxy_tests.sh
cd ..
- name: Test proxy SOCKS5 style interaction
run: |
cd changedetectionio
./run_socks_proxy_tests.sh
cd ..
- name: Test custom browser URL
run: |
cd changedetectionio
./run_custom_browser_url_tests.sh
cd ..
- name: Test changedetection.io container starts+runs basically without error
run: |
docker run --name test-changedetectionio -p 5556:5000 -d test-changedetectionio
sleep 3
# Should return 0 (no error) when grep finds it
curl --retry-connrefused --retry 6 -s http://localhost:5556 |grep -q checkbox-uuid
# and IPv6
curl --retry-connrefused --retry 6 -s -g -6 "http://[::1]:5556"|grep -q checkbox-uuid
# Check whether TRACE log is enabled.
# Also, check whether TRACE is came from STDERR
docker logs test-changedetectionio 2>&1 1>/dev/null | grep 'TRACE log is enabled' || exit 1
# Check whether DEBUG is came from STDOUT
docker logs test-changedetectionio 2>/dev/null | grep 'DEBUG' || exit 1
docker kill test-changedetectionio
- name: Test changedetection.io SIGTERM and SIGINT signal shutdown
run: |
echo SIGINT Shutdown request test
docker run --name sig-test -d test-changedetectionio
sleep 3
echo ">>> Sending SIGINT to sig-test container"
docker kill --signal=SIGINT sig-test
sleep 3
# invert the check (it should be not 0/not running)
docker ps
# check signal catch(STDERR) log. Because of
# changedetectionio/__init__.py: logger.add(sys.stderr, level=logger_level)
docker logs sig-test 2>&1 | grep 'Shutdown: Got Signal - SIGINT' || exit 1
test -z "`docker ps|grep sig-test`"
if [ $? -ne 0 ]
then
echo "Looks like container was running when it shouldnt be"
docker ps
exit 1
fi
# @todo - scan the container log to see the right "graceful shutdown" text exists
docker rm sig-test
echo SIGTERM Shutdown request test
docker run --name sig-test -d test-changedetectionio
sleep 3
echo ">>> Sending SIGTERM to sig-test container"
docker kill --signal=SIGTERM sig-test
sleep 3
# invert the check (it should be not 0/not running)
docker ps
# check signal catch(STDERR) log. Because of
# changedetectionio/__init__.py: logger.add(sys.stderr, level=logger_level)
docker logs sig-test 2>&1 | grep 'Shutdown: Got Signal - SIGTERM' || exit 1
test -z "`docker ps|grep sig-test`"
if [ $? -ne 0 ]
then
echo "Looks like container was running when it shouldnt be"
docker ps
exit 1
fi
# @todo - scan the container log to see the right "graceful shutdown" text exists
docker rm sig-test
- name: Dump container log
if: always()
run: |
mkdir output-logs
docker logs test-cdio-basic-tests > output-logs/test-cdio-basic-tests-stdout-${{ env.PYTHON_VERSION }}.txt
docker logs test-cdio-basic-tests 2> output-logs/test-cdio-basic-tests-stderr-${{ env.PYTHON_VERSION }}.txt
- name: Store container log
if: always()
uses: actions/upload-artifact@v4
with:
name: test-cdio-basic-tests-output-py${{ env.PYTHON_VERSION }}
path: output-logs

View File

@@ -2,7 +2,7 @@ Contributing is always welcome!
I am no professional flask developer, if you know a better way that something can be done, please let me know! I am no professional flask developer, if you know a better way that something can be done, please let me know!
Otherwise, it's always best to PR into the `dev` branch. Otherwise, it's always best to PR into the `master` branch.
Please be sure that all new functionality has a matching test! Please be sure that all new functionality has a matching test!

View File

@@ -1,5 +1,11 @@
# pip dependencies install stage # pip dependencies install stage
FROM python:3.11-slim-bookworm as builder
# @NOTE! I would love to move to 3.11 but it breaks the async handler in changedetectionio/content_fetchers/puppeteer.py
# If you know how to fix it, please do! and test it for both 3.10 and 3.11
ARG PYTHON_VERSION=3.11
FROM python:${PYTHON_VERSION}-slim-bookworm AS builder
# See `cryptography` pin comment in requirements.txt # See `cryptography` pin comment in requirements.txt
ARG CRYPTOGRAPHY_DONT_BUILD_RUST=1 ARG CRYPTOGRAPHY_DONT_BUILD_RUST=1
@@ -20,16 +26,17 @@ WORKDIR /install
COPY requirements.txt /requirements.txt COPY requirements.txt /requirements.txt
RUN pip install --target=/dependencies -r /requirements.txt # --extra-index-url https://www.piwheels.org/simple is for cryptography module to be prebuilt (or rustc etc needs to be installed)
RUN pip install --extra-index-url https://www.piwheels.org/simple --target=/dependencies -r /requirements.txt
# Playwright is an alternative to Selenium # Playwright is an alternative to Selenium
# Excluded this package from requirements.txt to prevent arm/v6 and arm/v7 builds from failing # Excluded this package from requirements.txt to prevent arm/v6 and arm/v7 builds from failing
# https://github.com/dgtlmoon/changedetection.io/pull/1067 also musl/alpine (not supported) # https://github.com/dgtlmoon/changedetection.io/pull/1067 also musl/alpine (not supported)
RUN pip install --target=/dependencies playwright~=1.40 \ RUN pip install --target=/dependencies playwright~=1.41.2 \
|| echo "WARN: Failed to install Playwright. The application can still run, but the Playwright option will be disabled." || echo "WARN: Failed to install Playwright. The application can still run, but the Playwright option will be disabled."
# Final image stage # Final image stage
FROM python:3.11-slim-bookworm FROM python:${PYTHON_VERSION}-slim-bookworm
RUN apt-get update && apt-get install -y --no-install-recommends \ RUN apt-get update && apt-get install -y --no-install-recommends \
libxslt1.1 \ libxslt1.1 \
@@ -58,6 +65,11 @@ COPY changedetectionio /app/changedetectionio
# Starting wrapper # Starting wrapper
COPY changedetection.py /app/changedetection.py COPY changedetection.py /app/changedetection.py
# Github Action test purpose(test-only.yml).
# On production, it is effectively LOGGER_LEVEL=''.
ARG LOGGER_LEVEL=''
ENV LOGGER_LEVEL "$LOGGER_LEVEL"
WORKDIR /app WORKDIR /app
CMD ["python", "./changedetection.py", "-d", "/datastore"] CMD ["python", "./changedetection.py", "-d", "/datastore"]

View File

@@ -1,8 +1,8 @@
recursive-include changedetectionio/api * recursive-include changedetectionio/api *
recursive-include changedetectionio/blueprint * recursive-include changedetectionio/blueprint *
recursive-include changedetectionio/content_fetchers *
recursive-include changedetectionio/model * recursive-include changedetectionio/model *
recursive-include changedetectionio/processors * recursive-include changedetectionio/processors *
recursive-include changedetectionio/res *
recursive-include changedetectionio/static * recursive-include changedetectionio/static *
recursive-include changedetectionio/templates * recursive-include changedetectionio/templates *
recursive-include changedetectionio/tests * recursive-include changedetectionio/tests *
@@ -10,6 +10,8 @@ prune changedetectionio/static/package-lock.json
prune changedetectionio/static/styles/node_modules prune changedetectionio/static/styles/node_modules
prune changedetectionio/static/styles/package-lock.json prune changedetectionio/static/styles/package-lock.json
include changedetection.py include changedetection.py
include requirements.txt
include README-pip.md
global-exclude *.pyc global-exclude *.pyc
global-exclude node_modules global-exclude node_modules
global-exclude venv global-exclude venv

View File

@@ -1 +0,0 @@
web: python3 ./changedetection.py -C -d ./datastore -p $PORT

View File

@@ -17,7 +17,7 @@ _Live your data-life pro-actively._
- Nothing to install, access via browser login after signup. - Nothing to install, access via browser login after signup.
- Super fast, no registration needed setup. - Super fast, no registration needed setup.
- Get started watching and receiving website change notifications straight away. - Get started watching and receiving website change notifications straight away.
- See our [tutorials and how-to page for more inspiration](https://changedetection.io/tutorials)
### Target specific parts of the webpage using the Visual Selector tool. ### Target specific parts of the webpage using the Visual Selector tool.
@@ -91,6 +91,14 @@ We [recommend and use Bright Data](https://brightdata.grsm.io/n0r16zf7eivq) glob
Please :star: star :star: this project and help it grow! https://github.com/dgtlmoon/changedetection.io/ Please :star: star :star: this project and help it grow! https://github.com/dgtlmoon/changedetection.io/
### We have a Chrome extension!
Easily add the current web page to your changedetection.io tool, simply install the extension and click "Sync" to connect it to your existing changedetection.io install.
[<img src="./docs/chrome-extension-screenshot.png" style="max-width:80%;" alt="Chrome Extension to easily add the current web-page to detect a change." title="Chrome Extension to easily add the current web-page to detect a change." />](https://chromewebstore.google.com/detail/changedetectionio-website/kefcfmgmlhmankjmnbijimhofdjekbop)
[Goto the Chrome Webstore to download the extension.](https://chromewebstore.google.com/detail/changedetectionio-website/kefcfmgmlhmankjmnbijimhofdjekbop)
## Installation ## Installation
### Docker ### Docker
@@ -98,7 +106,7 @@ Please :star: star :star: this project and help it grow! https://github.com/dgtl
With Docker composer, just clone this repository and.. With Docker composer, just clone this repository and..
```bash ```bash
$ docker-compose up -d $ docker compose up -d
``` ```
Docker standalone Docker standalone
@@ -137,10 +145,10 @@ docker rm $(docker ps -a -f name=changedetection.io -q)
docker run -d --restart always -p "127.0.0.1:5000:5000" -v datastore-volume:/datastore --name changedetection.io dgtlmoon/changedetection.io docker run -d --restart always -p "127.0.0.1:5000:5000" -v datastore-volume:/datastore --name changedetection.io dgtlmoon/changedetection.io
``` ```
### docker-compose ### docker compose
```bash ```bash
docker-compose pull && docker-compose up -d docker compose pull && docker compose up -d
``` ```
See the wiki for more information https://github.com/dgtlmoon/changedetection.io/wiki See the wiki for more information https://github.com/dgtlmoon/changedetection.io/wiki
@@ -249,13 +257,7 @@ Supports managing the website watch list [via our API](https://changedetection.i
Do you use changedetection.io to make money? does it save you time or money? Does it make your life easier? less stressful? Remember, we write this software when we should be doing actual paid work, we have to buy food and pay rent just like you. Do you use changedetection.io to make money? does it save you time or money? Does it make your life easier? less stressful? Remember, we write this software when we should be doing actual paid work, we have to buy food and pay rent just like you.
Firstly, consider taking out a [change detection monthly subscription - unlimited checks and watches](https://changedetection.io?src=github) , even if you don't use it, you still get the warm fuzzy feeling of helping out the project. (And who knows, you might just use it!) Consider taking out an officially supported [website change detection subscription](https://changedetection.io?src=github) , even if you don't use it, you still get the warm fuzzy feeling of helping out the project. (And who knows, you might just use it!)
Or directly donate an amount PayPal [![Donate](https://img.shields.io/badge/Donate-PayPal-green.svg)](https://www.paypal.com/donate/?hosted_button_id=7CP6HR9ZCNDYJ)
Or BTC `1PLFN327GyUarpJd7nVe7Reqg9qHx5frNn`
<img src="https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/docs/btc-support.png" style="max-width:50%;" alt="Support us!" />
## Commercial Support ## Commercial Support
@@ -273,3 +275,9 @@ I offer commercial support, this software is depended on by network security, ae
## Third-party licenses ## Third-party licenses
changedetectionio.html_tools.elementpath_tostring: Copyright (c), 2018-2021, SISSA (Scuola Internazionale Superiore di Studi Avanzati), Licensed under [MIT license](https://github.com/sissaschool/elementpath/blob/master/LICENSE) changedetectionio.html_tools.elementpath_tostring: Copyright (c), 2018-2021, SISSA (Scuola Internazionale Superiore di Studi Avanzati), Licensed under [MIT license](https://github.com/sissaschool/elementpath/blob/master/LICENSE)
## Contributors
Recognition of fantastic contributors to the project
- Constantin Hong https://github.com/Constantin1489

View File

@@ -1,21 +0,0 @@
{
"name": "ChangeDetection.io",
"description": "The best and simplest self-hosted open source website change detection monitoring and notification service.",
"keywords": [
"changedetection",
"website monitoring"
],
"repository": "https://github.com/dgtlmoon/changedetection.io",
"success_url": "/",
"scripts": {
},
"env": {
},
"formation": {
"web": {
"quantity": 1,
"size": "free"
}
},
"image": "heroku/python"
}

View File

@@ -2,21 +2,22 @@
# Read more https://github.com/dgtlmoon/changedetection.io/wiki # Read more https://github.com/dgtlmoon/changedetection.io/wiki
__version__ = '0.45.9' __version__ = '0.45.26'
from distutils.util import strtobool from changedetectionio.strtobool import strtobool
from json.decoder import JSONDecodeError from json.decoder import JSONDecodeError
import os
os.environ['EVENTLET_NO_GREENDNS'] = 'yes'
import eventlet import eventlet
import eventlet.wsgi import eventlet.wsgi
import getopt import getopt
import os
import signal import signal
import socket import socket
import sys import sys
from changedetectionio import store from changedetectionio import store
from changedetectionio.flask_app import changedetection_app from changedetectionio.flask_app import changedetection_app
from loguru import logger
# Only global so we can access it in the signal handler # Only global so we can access it in the signal handler
@@ -28,9 +29,9 @@ def sigshutdown_handler(_signo, _stack_frame):
global app global app
global datastore global datastore
name = signal.Signals(_signo).name name = signal.Signals(_signo).name
print(f'Shutdown: Got Signal - {name} ({_signo}), Saving DB to disk and calling shutdown') logger.critical(f'Shutdown: Got Signal - {name} ({_signo}), Saving DB to disk and calling shutdown')
datastore.sync_to_json() datastore.sync_to_json()
print(f'Sync JSON to disk complete.') logger.success('Sync JSON to disk complete.')
# This will throw a SystemExit exception, because eventlet.wsgi.server doesn't know how to deal with it. # This will throw a SystemExit exception, because eventlet.wsgi.server doesn't know how to deal with it.
# Solution: move to gevent or other server in the future (#2014) # Solution: move to gevent or other server in the future (#2014)
datastore.stop_thread = True datastore.stop_thread = True
@@ -57,13 +58,22 @@ def main():
datastore_path = os.path.join(os.getcwd(), "../datastore") datastore_path = os.path.join(os.getcwd(), "../datastore")
try: try:
opts, args = getopt.getopt(sys.argv[1:], "6Ccsd:h:p:", "port") opts, args = getopt.getopt(sys.argv[1:], "6Ccsd:h:p:l:", "port")
except getopt.GetoptError: except getopt.GetoptError:
print('backend.py -s SSL enable -h [host] -p [port] -d [datastore path]') print('backend.py -s SSL enable -h [host] -p [port] -d [datastore path] -l [debug level - TRACE, DEBUG(default), INFO, SUCCESS, WARNING, ERROR, CRITICAL]')
sys.exit(2) sys.exit(2)
create_datastore_dir = False create_datastore_dir = False
# Set a default logger level
logger_level = 'DEBUG'
# Set a logger level via shell env variable
# Used: Dockerfile for CICD
# To set logger level for pytest, see the app function in tests/conftest.py
if os.getenv("LOGGER_LEVEL"):
level = os.getenv("LOGGER_LEVEL")
logger_level = int(level) if level.isdigit() else level.upper()
for opt, arg in opts: for opt, arg in opts:
if opt == '-s': if opt == '-s':
ssl_mode = True ssl_mode = True
@@ -78,7 +88,7 @@ def main():
datastore_path = arg datastore_path = arg
if opt == '-6': if opt == '-6':
print ("Enabling IPv6 listen support") logger.success("Enabling IPv6 listen support")
ipv6_enabled = True ipv6_enabled = True
# Cleanup (remove text files that arent in the index) # Cleanup (remove text files that arent in the index)
@@ -89,6 +99,25 @@ def main():
if opt == '-C': if opt == '-C':
create_datastore_dir = True create_datastore_dir = True
if opt == '-l':
logger_level = int(arg) if arg.isdigit() else arg.upper()
# Without this, a logger will be duplicated
logger.remove()
try:
log_level_for_stdout = { 'DEBUG', 'SUCCESS' }
logger.configure(handlers=[
{"sink": sys.stdout, "level": logger_level,
"filter" : lambda record: record['level'].name in log_level_for_stdout},
{"sink": sys.stderr, "level": logger_level,
"filter": lambda record: record['level'].name not in log_level_for_stdout},
])
# Catch negative number or wrong log level name
except ValueError:
print("Available log level names: TRACE, DEBUG(default), INFO, SUCCESS,"
" WARNING, ERROR, CRITICAL")
sys.exit(2)
# isnt there some @thingy to attach to each route to tell it, that this route needs a datastore # isnt there some @thingy to attach to each route to tell it, that this route needs a datastore
app_config = {'datastore_path': datastore_path} app_config = {'datastore_path': datastore_path}
@@ -96,17 +125,19 @@ def main():
if create_datastore_dir: if create_datastore_dir:
os.mkdir(app_config['datastore_path']) os.mkdir(app_config['datastore_path'])
else: else:
print( logger.critical(
"ERROR: Directory path for the datastore '{}' does not exist, cannot start, please make sure the directory exists or specify a directory with the -d option.\n" f"ERROR: Directory path for the datastore '{app_config['datastore_path']}'"
"Or use the -C parameter to create the directory.".format(app_config['datastore_path']), file=sys.stderr) f" does not exist, cannot start, please make sure the"
f" directory exists or specify a directory with the -d option.\n"
f"Or use the -C parameter to create the directory.")
sys.exit(2) sys.exit(2)
try: try:
datastore = store.ChangeDetectionStore(datastore_path=app_config['datastore_path'], version_tag=__version__) datastore = store.ChangeDetectionStore(datastore_path=app_config['datastore_path'], version_tag=__version__)
except JSONDecodeError as e: except JSONDecodeError as e:
# Dont' start if the JSON DB looks corrupt # Dont' start if the JSON DB looks corrupt
print ("ERROR: JSON DB or Proxy List JSON at '{}' appears to be corrupt, aborting".format(app_config['datastore_path'])) logger.critical(f"ERROR: JSON DB or Proxy List JSON at '{app_config['datastore_path']}' appears to be corrupt, aborting.")
print(str(e)) logger.critical(str(e))
return return
app = changedetection_app(app_config, datastore) app = changedetection_app(app_config, datastore)
@@ -144,8 +175,9 @@ def main():
# proxy_set_header Host "localhost"; # proxy_set_header Host "localhost";
# proxy_set_header X-Forwarded-Prefix /app; # proxy_set_header X-Forwarded-Prefix /app;
if os.getenv('USE_X_SETTINGS'): if os.getenv('USE_X_SETTINGS'):
print ("USE_X_SETTINGS is ENABLED\n") logger.info("USE_X_SETTINGS is ENABLED")
from werkzeug.middleware.proxy_fix import ProxyFix from werkzeug.middleware.proxy_fix import ProxyFix
app.wsgi_app = ProxyFix(app.wsgi_app, x_prefix=1, x_host=1) app.wsgi_app = ProxyFix(app.wsgi_app, x_prefix=1, x_host=1)

View File

@@ -1,5 +1,5 @@
import os import os
from distutils.util import strtobool from changedetectionio.strtobool import strtobool
from flask_expects_json import expects_json from flask_expects_json import expects_json
from changedetectionio import queuedWatchMetaData from changedetectionio import queuedWatchMetaData
@@ -30,7 +30,7 @@ class Watch(Resource):
self.update_q = kwargs['update_q'] self.update_q = kwargs['update_q']
# Get information about a single watch, excluding the history list (can be large) # Get information about a single watch, excluding the history list (can be large)
# curl http://localhost:4000/api/v1/watch/<string:uuid> # curl http://localhost:5000/api/v1/watch/<string:uuid>
# @todo - version2 - ?muted and ?paused should be able to be called together, return the watch struct not "OK" # @todo - version2 - ?muted and ?paused should be able to be called together, return the watch struct not "OK"
# ?recheck=true # ?recheck=true
@auth.check_token @auth.check_token
@@ -39,9 +39,9 @@ class Watch(Resource):
@api {get} /api/v1/watch/:uuid Single watch - get data, recheck, pause, mute. @api {get} /api/v1/watch/:uuid Single watch - get data, recheck, pause, mute.
@apiDescription Retrieve watch information and set muted/paused status @apiDescription Retrieve watch information and set muted/paused status
@apiExample {curl} Example usage: @apiExample {curl} Example usage:
curl http://localhost:4000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091 -H"x-api-key:813031b16330fe25e3780cf0325daa45" curl http://localhost:5000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091 -H"x-api-key:813031b16330fe25e3780cf0325daa45"
curl "http://localhost:4000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091?muted=unmuted" -H"x-api-key:813031b16330fe25e3780cf0325daa45" curl "http://localhost:5000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091?muted=unmuted" -H"x-api-key:813031b16330fe25e3780cf0325daa45"
curl "http://localhost:4000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091?paused=unpaused" -H"x-api-key:813031b16330fe25e3780cf0325daa45" curl "http://localhost:5000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091?paused=unpaused" -H"x-api-key:813031b16330fe25e3780cf0325daa45"
@apiName Watch @apiName Watch
@apiGroup Watch @apiGroup Watch
@apiParam {uuid} uuid Watch unique ID. @apiParam {uuid} uuid Watch unique ID.
@@ -84,7 +84,7 @@ class Watch(Resource):
""" """
@api {delete} /api/v1/watch/:uuid Delete a watch and related history @api {delete} /api/v1/watch/:uuid Delete a watch and related history
@apiExample {curl} Example usage: @apiExample {curl} Example usage:
curl http://localhost:4000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091 -X DELETE -H"x-api-key:813031b16330fe25e3780cf0325daa45" curl http://localhost:5000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091 -X DELETE -H"x-api-key:813031b16330fe25e3780cf0325daa45"
@apiParam {uuid} uuid Watch unique ID. @apiParam {uuid} uuid Watch unique ID.
@apiName Delete @apiName Delete
@apiGroup Watch @apiGroup Watch
@@ -103,7 +103,7 @@ class Watch(Resource):
@api {put} /api/v1/watch/:uuid Update watch information @api {put} /api/v1/watch/:uuid Update watch information
@apiExample {curl} Example usage: @apiExample {curl} Example usage:
Update (PUT) Update (PUT)
curl http://localhost:4000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091 -X PUT -H"x-api-key:813031b16330fe25e3780cf0325daa45" -H "Content-Type: application/json" -d '{"url": "https://my-nice.com" , "tag": "new list"}' curl http://localhost:5000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091 -X PUT -H"x-api-key:813031b16330fe25e3780cf0325daa45" -H "Content-Type: application/json" -d '{"url": "https://my-nice.com" , "tag": "new list"}'
@apiDescription Updates an existing watch using JSON, accepts the same structure as returned in <a href="#api-Watch-Watch">get single watch information</a> @apiDescription Updates an existing watch using JSON, accepts the same structure as returned in <a href="#api-Watch-Watch">get single watch information</a>
@apiParam {uuid} uuid Watch unique ID. @apiParam {uuid} uuid Watch unique ID.
@@ -132,13 +132,14 @@ class WatchHistory(Resource):
self.datastore = kwargs['datastore'] self.datastore = kwargs['datastore']
# Get a list of available history for a watch by UUID # Get a list of available history for a watch by UUID
# curl http://localhost:4000/api/v1/watch/<string:uuid>/history # curl http://localhost:5000/api/v1/watch/<string:uuid>/history
@auth.check_token
def get(self, uuid): def get(self, uuid):
""" """
@api {get} /api/v1/watch/<string:uuid>/history Get a list of all historical snapshots available for a watch @api {get} /api/v1/watch/<string:uuid>/history Get a list of all historical snapshots available for a watch
@apiDescription Requires `uuid`, returns list @apiDescription Requires `uuid`, returns list
@apiExample {curl} Example usage: @apiExample {curl} Example usage:
curl http://localhost:4000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091/history -H"x-api-key:813031b16330fe25e3780cf0325daa45" -H "Content-Type: application/json" curl http://localhost:5000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091/history -H"x-api-key:813031b16330fe25e3780cf0325daa45" -H "Content-Type: application/json"
{ {
"1676649279": "/tmp/data/6a4b7d5c-fee4-4616-9f43-4ac97046b595/cb7e9be8258368262246910e6a2a4c30.txt", "1676649279": "/tmp/data/6a4b7d5c-fee4-4616-9f43-4ac97046b595/cb7e9be8258368262246910e6a2a4c30.txt",
"1677092785": "/tmp/data/6a4b7d5c-fee4-4616-9f43-4ac97046b595/e20db368d6fc633e34f559ff67bb4044.txt", "1677092785": "/tmp/data/6a4b7d5c-fee4-4616-9f43-4ac97046b595/e20db368d6fc633e34f559ff67bb4044.txt",
@@ -166,26 +167,36 @@ class WatchSingleHistory(Resource):
@api {get} /api/v1/watch/<string:uuid>/history/<int:timestamp> Get single snapshot from watch @api {get} /api/v1/watch/<string:uuid>/history/<int:timestamp> Get single snapshot from watch
@apiDescription Requires watch `uuid` and `timestamp`. `timestamp` of "`latest`" for latest available snapshot, or <a href="#api-Watch_History-Get_list_of_available_stored_snapshots_for_watch">use the list returned here</a> @apiDescription Requires watch `uuid` and `timestamp`. `timestamp` of "`latest`" for latest available snapshot, or <a href="#api-Watch_History-Get_list_of_available_stored_snapshots_for_watch">use the list returned here</a>
@apiExample {curl} Example usage: @apiExample {curl} Example usage:
curl http://localhost:4000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091/history/1677092977 -H"x-api-key:813031b16330fe25e3780cf0325daa45" -H "Content-Type: application/json" curl http://localhost:5000/api/v1/watch/cc0cfffa-f449-477b-83ea-0caafd1dc091/history/1677092977 -H"x-api-key:813031b16330fe25e3780cf0325daa45" -H "Content-Type: application/json"
@apiName Get single snapshot content @apiName Get single snapshot content
@apiGroup Watch History @apiGroup Watch History
@apiParam {String} [html] Optional Set to =1 to return the last HTML (only stores last 2 snapshots, use `latest` as timestamp)
@apiSuccess (200) {String} OK @apiSuccess (200) {String} OK
@apiSuccess (404) {String} ERR Not found @apiSuccess (404) {String} ERR Not found
""" """
watch = self.datastore.data['watching'].get(uuid) watch = self.datastore.data['watching'].get(uuid)
if not watch: if not watch:
abort(404, message='No watch exists with the UUID of {}'.format(uuid)) abort(404, message=f"No watch exists with the UUID of {uuid}")
if not len(watch.history): if not len(watch.history):
abort(404, message='Watch found but no history exists for the UUID {}'.format(uuid)) abort(404, message=f"Watch found but no history exists for the UUID {uuid}")
if timestamp == 'latest': if timestamp == 'latest':
timestamp = list(watch.history.keys())[-1] timestamp = list(watch.history.keys())[-1]
content = watch.get_history_snapshot(timestamp) if request.args.get('html'):
content = watch.get_fetched_html(timestamp)
if content:
response = make_response(content, 200)
response.mimetype = "text/html"
else:
response = make_response("No content found", 404)
response.mimetype = "text/plain"
else:
content = watch.get_history_snapshot(timestamp)
response = make_response(content, 200)
response.mimetype = "text/plain"
response = make_response(content, 200)
response.mimetype = "text/plain"
return response return response
@@ -202,7 +213,7 @@ class CreateWatch(Resource):
@api {post} /api/v1/watch Create a single watch @api {post} /api/v1/watch Create a single watch
@apiDescription Requires atleast `url` set, can accept the same structure as <a href="#api-Watch-Watch">get single watch information</a> to create. @apiDescription Requires atleast `url` set, can accept the same structure as <a href="#api-Watch-Watch">get single watch information</a> to create.
@apiExample {curl} Example usage: @apiExample {curl} Example usage:
curl http://localhost:4000/api/v1/watch -H"x-api-key:813031b16330fe25e3780cf0325daa45" -H "Content-Type: application/json" -d '{"url": "https://my-nice.com" , "tag": "nice list"}' curl http://localhost:5000/api/v1/watch -H"x-api-key:813031b16330fe25e3780cf0325daa45" -H "Content-Type: application/json" -d '{"url": "https://my-nice.com" , "tag": "nice list"}'
@apiName Create @apiName Create
@apiGroup Watch @apiGroup Watch
@apiSuccess (200) {String} OK Was created @apiSuccess (200) {String} OK Was created
@@ -245,7 +256,7 @@ class CreateWatch(Resource):
@api {get} /api/v1/watch List watches @api {get} /api/v1/watch List watches
@apiDescription Return concise list of available watches and some very basic info @apiDescription Return concise list of available watches and some very basic info
@apiExample {curl} Example usage: @apiExample {curl} Example usage:
curl http://localhost:4000/api/v1/watch -H"x-api-key:813031b16330fe25e3780cf0325daa45" curl http://localhost:5000/api/v1/watch -H"x-api-key:813031b16330fe25e3780cf0325daa45"
{ {
"6a4b7d5c-fee4-4616-9f43-4ac97046b595": { "6a4b7d5c-fee4-4616-9f43-4ac97046b595": {
"last_changed": 1677103794, "last_changed": 1677103794,
@@ -363,7 +374,7 @@ class SystemInfo(Resource):
@api {get} /api/v1/systeminfo Return system info @api {get} /api/v1/systeminfo Return system info
@apiDescription Return some info about the current system state @apiDescription Return some info about the current system state
@apiExample {curl} Example usage: @apiExample {curl} Example usage:
curl http://localhost:4000/api/v1/systeminfo -H"x-api-key:813031b16330fe25e3780cf0325daa45" curl http://localhost:5000/api/v1/systeminfo -H"x-api-key:813031b16330fe25e3780cf0325daa45"
HTTP/1.0 200 HTTP/1.0 200
{ {
'queue_size': 10 , 'queue_size': 10 ,

View File

@@ -0,0 +1,7 @@
- This needs an abstraction to directly handle the puppeteer connection methods
- Then remove the playwright stuff
- Remove hack redirect at line 65 changedetectionio/processors/__init__.py
The screenshots are base64 encoded/decoded which is very CPU intensive for large screenshots (in playwright) but not
in the direct puppeteer connection (they are binary end to end)

View File

@@ -4,30 +4,21 @@
# Why? # Why?
# `browsersteps_playwright_browser_interface.chromium.connect_over_cdp()` will only run once without async() # `browsersteps_playwright_browser_interface.chromium.connect_over_cdp()` will only run once without async()
# - this flask app is not async() # - this flask app is not async()
# - browserless has a single timeout/keepalive which applies to the session made at .connect_over_cdp() # - A single timeout/keepalive which applies to the session made at .connect_over_cdp()
# #
# So it means that we must unfortunately for now just keep a single timer since .connect_over_cdp() was run # So it means that we must unfortunately for now just keep a single timer since .connect_over_cdp() was run
# and know when that reaches timeout/keepalive :( when that time is up, restart the connection and tell the user # and know when that reaches timeout/keepalive :( when that time is up, restart the connection and tell the user
# that their time is up, insert another coin. (reload) # that their time is up, insert another coin. (reload)
# #
# Bigger picture
# - It's horrible that we have this click+wait deal, some nice socket.io solution using something similar
# to what the browserless debug UI already gives us would be smarter..
# #
# OR
# - Some API call that should be hacked into browserless or playwright that we can "/api/bump-keepalive/{session_id}/60"
# So we can tell it that we need more time (run this on each action)
#
# OR
# - use multiprocessing to bump this over to its own process and add some transport layer (queue/pipes)
from distutils.util import strtobool from changedetectionio.strtobool import strtobool
from flask import Blueprint, request, make_response from flask import Blueprint, request, make_response
import logging
import os import os
from changedetectionio.store import ChangeDetectionStore from changedetectionio.store import ChangeDetectionStore
from changedetectionio.flask_app import login_optionally_required from changedetectionio.flask_app import login_optionally_required
from loguru import logger
browsersteps_sessions = {} browsersteps_sessions = {}
io_interface_context = None io_interface_context = None
@@ -58,7 +49,7 @@ def construct_blueprint(datastore: ChangeDetectionStore):
io_interface_context = io_interface_context.start() io_interface_context = io_interface_context.start()
keepalive_ms = ((keepalive_seconds + 3) * 1000) keepalive_ms = ((keepalive_seconds + 3) * 1000)
base_url = os.getenv('PLAYWRIGHT_DRIVER_URL', '') base_url = os.getenv('PLAYWRIGHT_DRIVER_URL', '').strip('"')
a = "?" if not '?' in base_url else '&' a = "?" if not '?' in base_url else '&'
base_url += a + f"timeout={keepalive_ms}" base_url += a + f"timeout={keepalive_ms}"
@@ -88,12 +79,14 @@ def construct_blueprint(datastore: ChangeDetectionStore):
if parsed.password: if parsed.password:
proxy['password'] = parsed.password proxy['password'] = parsed.password
print("Browser Steps: UUID {} selected proxy {}".format(watch_uuid, proxy_url)) logger.debug(f"Browser Steps: UUID {watch_uuid} selected proxy {proxy_url}")
# Tell Playwright to connect to Chrome and setup a new session via our stepper interface # Tell Playwright to connect to Chrome and setup a new session via our stepper interface
browsersteps_start_session['browserstepper'] = browser_steps.browsersteps_live_ui( browsersteps_start_session['browserstepper'] = browser_steps.browsersteps_live_ui(
playwright_browser=browsersteps_start_session['browser'], playwright_browser=browsersteps_start_session['browser'],
proxy=proxy) proxy=proxy,
start_url=datastore.data['watching'][watch_uuid].get('url')
)
# For test # For test
#browsersteps_start_session['browserstepper'].action_goto_url(value="http://example.com?time="+str(time.time())) #browsersteps_start_session['browserstepper'].action_goto_url(value="http://example.com?time="+str(time.time()))
@@ -115,10 +108,10 @@ def construct_blueprint(datastore: ChangeDetectionStore):
if not watch_uuid: if not watch_uuid:
return make_response('No Watch UUID specified', 500) return make_response('No Watch UUID specified', 500)
print("Starting connection with playwright") logger.debug("Starting connection with playwright")
logging.debug("browser_steps.py connecting") logger.debug("browser_steps.py connecting")
browsersteps_sessions[browsersteps_session_id] = start_browsersteps_session(watch_uuid) browsersteps_sessions[browsersteps_session_id] = start_browsersteps_session(watch_uuid)
print("Starting connection with playwright - done") logger.debug("Starting connection with playwright - done")
return {'browsersteps_session_id': browsersteps_session_id} return {'browsersteps_session_id': browsersteps_session_id}
@login_optionally_required @login_optionally_required
@@ -176,11 +169,6 @@ def construct_blueprint(datastore: ChangeDetectionStore):
step_n = int(request.form.get('step_n')) step_n = int(request.form.get('step_n'))
is_last_step = strtobool(request.form.get('is_last_step')) is_last_step = strtobool(request.form.get('is_last_step'))
if step_operation == 'Goto site':
step_operation = 'goto_url'
step_optional_value = datastore.data['watching'][uuid].get('url')
step_selector = None
# @todo try.. accept.. nice errors not popups.. # @todo try.. accept.. nice errors not popups..
try: try:
@@ -189,7 +177,7 @@ def construct_blueprint(datastore: ChangeDetectionStore):
optional_value=step_optional_value) optional_value=step_optional_value)
except Exception as e: except Exception as e:
print("Exception when calling step operation", step_operation, str(e)) logger.error(f"Exception when calling step operation {step_operation} {str(e)}")
# Try to find something of value to give back to the user # Try to find something of value to give back to the user
return make_response(str(e).splitlines()[0], 401) return make_response(str(e).splitlines()[0], 401)
@@ -199,8 +187,10 @@ def construct_blueprint(datastore: ChangeDetectionStore):
u = browsersteps_sessions[browsersteps_session_id]['browserstepper'].page.url u = browsersteps_sessions[browsersteps_session_id]['browserstepper'].page.url
if is_last_step and u: if is_last_step and u:
(screenshot, xpath_data) = browsersteps_sessions[browsersteps_session_id]['browserstepper'].request_visualselector_data() (screenshot, xpath_data) = browsersteps_sessions[browsersteps_session_id]['browserstepper'].request_visualselector_data()
datastore.save_screenshot(watch_uuid=uuid, screenshot=screenshot) watch = datastore.data['watching'].get(uuid)
datastore.save_xpath_data(watch_uuid=uuid, data=xpath_data) if watch:
watch.save_screenshot(screenshot=screenshot)
watch.save_xpath_data(data=xpath_data)
# if not this_session.page: # if not this_session.page:
# cleanup_playwright_session() # cleanup_playwright_session()

View File

@@ -4,6 +4,10 @@ import os
import time import time
import re import re
from random import randint from random import randint
from loguru import logger
from changedetectionio.content_fetchers.base import manage_user_agent
from changedetectionio.safe_jinja import render as jinja_render
# Two flags, tell the JS which of the "Selector" or "Value" field should be enabled in the front end # Two flags, tell the JS which of the "Selector" or "Value" field should be enabled in the front end
# 0- off, 1- on # 0- off, 1- on
@@ -45,6 +49,10 @@ browser_step_ui_config = {'Choose one': '0 0',
# ONLY Works in Playwright because we need the fullscreen screenshot # ONLY Works in Playwright because we need the fullscreen screenshot
class steppable_browser_interface(): class steppable_browser_interface():
page = None page = None
start_url = None
def __init__(self, start_url):
self.start_url = start_url
# Convert and perform "Click Button" for example # Convert and perform "Click Button" for example
def call_action(self, action_name, selector=None, optional_value=None): def call_action(self, action_name, selector=None, optional_value=None):
@@ -53,7 +61,7 @@ class steppable_browser_interface():
if call_action_name == 'choose_one': if call_action_name == 'choose_one':
return return
print("> action calling", call_action_name) logger.debug(f"> Action calling '{call_action_name}'")
# https://playwright.dev/python/docs/selectors#xpath-selectors # https://playwright.dev/python/docs/selectors#xpath-selectors
if selector and selector.startswith('/') and not selector.startswith('//'): if selector and selector.startswith('/') and not selector.startswith('//'):
selector = "xpath=" + selector selector = "xpath=" + selector
@@ -61,18 +69,16 @@ class steppable_browser_interface():
action_handler = getattr(self, "action_" + call_action_name) action_handler = getattr(self, "action_" + call_action_name)
# Support for Jinja2 variables in the value and selector # Support for Jinja2 variables in the value and selector
from jinja2 import Environment
jinja2_env = Environment(extensions=['jinja2_time.TimeExtension'])
if selector and ('{%' in selector or '{{' in selector): if selector and ('{%' in selector or '{{' in selector):
selector = str(jinja2_env.from_string(selector).render()) selector = jinja_render(template_str=selector)
if optional_value and ('{%' in optional_value or '{{' in optional_value): if optional_value and ('{%' in optional_value or '{{' in optional_value):
optional_value = str(jinja2_env.from_string(optional_value).render()) optional_value = jinja_render(template_str=optional_value)
action_handler(selector, optional_value) action_handler(selector, optional_value)
self.page.wait_for_timeout(1.5 * 1000) self.page.wait_for_timeout(1.5 * 1000)
print("Call action done in", time.time() - now) logger.debug(f"Call action done in {time.time()-now:.2f}s")
def action_goto_url(self, selector=None, value=None): def action_goto_url(self, selector=None, value=None):
# self.page.set_viewport_size({"width": 1280, "height": 5000}) # self.page.set_viewport_size({"width": 1280, "height": 5000})
@@ -82,9 +88,13 @@ class steppable_browser_interface():
#and also wait for seconds ? #and also wait for seconds ?
#await page.waitForTimeout(1000); #await page.waitForTimeout(1000);
#await page.waitForTimeout(extra_wait_ms); #await page.waitForTimeout(extra_wait_ms);
print("Time to goto URL ", time.time() - now) logger.debug(f"Time to goto URL {time.time()-now:.2f}s")
return response return response
# Incase they request to go back to the start
def action_goto_site(self, selector=None, value=None):
return self.action_goto_url(value=self.start_url)
def action_click_element_containing_text(self, selector=None, value=''): def action_click_element_containing_text(self, selector=None, value=''):
if not len(value.strip()): if not len(value.strip()):
return return
@@ -103,7 +113,7 @@ class steppable_browser_interface():
return response return response
def action_click_element(self, selector, value): def action_click_element(self, selector, value):
print("Clicking element") logger.debug("Clicking element")
if not len(selector.strip()): if not len(selector.strip()):
return return
@@ -111,7 +121,7 @@ class steppable_browser_interface():
def action_click_element_if_exists(self, selector, value): def action_click_element_if_exists(self, selector, value):
import playwright._impl._errors as _api_types import playwright._impl._errors as _api_types
print("Clicking element if exists") logger.debug("Clicking element if exists")
if not len(selector.strip()): if not len(selector.strip()):
return return
try: try:
@@ -168,7 +178,7 @@ class steppable_browser_interface():
self.page.locator(selector, timeout=1000).uncheck(timeout=1000) self.page.locator(selector, timeout=1000).uncheck(timeout=1000)
# Responsible for maintaining a live 'context' with browserless # Responsible for maintaining a live 'context' with the chrome CDP
# @todo - how long do contexts live for anyway? # @todo - how long do contexts live for anyway?
class browsersteps_live_ui(steppable_browser_interface): class browsersteps_live_ui(steppable_browser_interface):
context = None context = None
@@ -177,6 +187,7 @@ class browsersteps_live_ui(steppable_browser_interface):
stale = False stale = False
# bump and kill this if idle after X sec # bump and kill this if idle after X sec
age_start = 0 age_start = 0
headers = {}
# use a special driver, maybe locally etc # use a special driver, maybe locally etc
command_executor = os.getenv( command_executor = os.getenv(
@@ -191,9 +202,11 @@ class browsersteps_live_ui(steppable_browser_interface):
browser_type = os.getenv("PLAYWRIGHT_BROWSER_TYPE", 'chromium').strip('"') browser_type = os.getenv("PLAYWRIGHT_BROWSER_TYPE", 'chromium').strip('"')
def __init__(self, playwright_browser, proxy=None): def __init__(self, playwright_browser, proxy=None, headers=None, start_url=None):
self.headers = headers or {}
self.age_start = time.time() self.age_start = time.time()
self.playwright_browser = playwright_browser self.playwright_browser = playwright_browser
self.start_url = start_url
if self.context is None: if self.context is None:
self.connect(proxy=proxy) self.connect(proxy=proxy)
@@ -205,16 +218,17 @@ class browsersteps_live_ui(steppable_browser_interface):
# @todo handle multiple contexts, bind a unique id from the browser on each req? # @todo handle multiple contexts, bind a unique id from the browser on each req?
self.context = self.playwright_browser.new_context( self.context = self.playwright_browser.new_context(
# @todo accept_downloads=False, # Should never be needed
# user_agent=request_headers['User-Agent'] if request_headers.get('User-Agent') else 'Mozilla/5.0', bypass_csp=True, # This is needed to enable JavaScript execution on GitHub and others
# proxy=self.proxy, extra_http_headers=self.headers,
# This is needed to enable JavaScript execution on GitHub and others ignore_https_errors=True,
bypass_csp=True, proxy=proxy,
# Should never be needed service_workers=os.getenv('PLAYWRIGHT_SERVICE_WORKERS', 'allow'),
accept_downloads=False, # Should be `allow` or `block` - sites like YouTube can transmit large amounts of data via Service Workers
proxy=proxy user_agent=manage_user_agent(headers=self.headers),
) )
self.page = self.context.new_page() self.page = self.context.new_page()
# self.page.set_default_navigation_timeout(keep_open) # self.page.set_default_navigation_timeout(keep_open)
@@ -227,11 +241,11 @@ class browsersteps_live_ui(steppable_browser_interface):
# Listen for all console events and handle errors # Listen for all console events and handle errors
self.page.on("console", lambda msg: print(f"Browser steps console - {msg.type}: {msg.text} {msg.args}")) self.page.on("console", lambda msg: print(f"Browser steps console - {msg.type}: {msg.text} {msg.args}"))
print("Time to browser setup", time.time() - now) logger.debug(f"Time to browser setup {time.time()-now:.2f}s")
self.page.wait_for_timeout(1 * 1000) self.page.wait_for_timeout(1 * 1000)
def mark_as_closed(self): def mark_as_closed(self):
print("Page closed, cleaning up..") logger.debug("Page closed, cleaning up..")
@property @property
def has_expired(self): def has_expired(self):
@@ -241,8 +255,9 @@ class browsersteps_live_ui(steppable_browser_interface):
def get_current_state(self): def get_current_state(self):
"""Return the screenshot and interactive elements mapping, generally always called after action_()""" """Return the screenshot and interactive elements mapping, generally always called after action_()"""
from pkg_resources import resource_string import importlib.resources
xpath_element_js = resource_string(__name__, "../../res/xpath_element_scraper.js").decode('utf-8') xpath_element_js = importlib.resources.files("changedetectionio.content_fetchers.res").joinpath('xpath_element_scraper.js').read_text()
now = time.time() now = time.time()
self.page.wait_for_timeout(1 * 1000) self.page.wait_for_timeout(1 * 1000)
@@ -257,7 +272,7 @@ class browsersteps_live_ui(steppable_browser_interface):
xpath_data = self.page.evaluate("async () => {" + xpath_element_js + "}") xpath_data = self.page.evaluate("async () => {" + xpath_element_js + "}")
# So the JS will find the smallest one first # So the JS will find the smallest one first
xpath_data['size_pos'] = sorted(xpath_data['size_pos'], key=lambda k: k['width'] * k['height'], reverse=True) xpath_data['size_pos'] = sorted(xpath_data['size_pos'], key=lambda k: k['width'] * k['height'], reverse=True)
print("Time to complete get_current_state of browser", time.time() - now) logger.debug(f"Time to complete get_current_state of browser {time.time()-now:.2f}s")
# except # except
# playwright._impl._api_types.Error: Browser closed. # playwright._impl._api_types.Error: Browser closed.
# @todo show some countdown timer? # @todo show some countdown timer?
@@ -273,14 +288,12 @@ class browsersteps_live_ui(steppable_browser_interface):
:param current_include_filters: :param current_include_filters:
:return: :return:
""" """
import importlib.resources
self.page.evaluate("var include_filters=''") self.page.evaluate("var include_filters=''")
from pkg_resources import resource_string xpath_element_js = importlib.resources.files("changedetectionio.content_fetchers.res").joinpath('xpath_element_scraper.js').read_text()
# The code that scrapes elements and makes a list of elements/size/position to click on in the VisualSelector from changedetectionio.content_fetchers import visualselector_xpath_selectors
xpath_element_js = resource_string(__name__, "../../res/xpath_element_scraper.js").decode('utf-8')
from changedetectionio.content_fetcher import visualselector_xpath_selectors
xpath_element_js = xpath_element_js.replace('%ELEMENTS%', visualselector_xpath_selectors) xpath_element_js = xpath_element_js.replace('%ELEMENTS%', visualselector_xpath_selectors)
xpath_data = self.page.evaluate("async () => {" + xpath_element_js + "}") xpath_data = self.page.evaluate("async () => {" + xpath_element_js + "}")
screenshot = self.page.screenshot(type='jpeg', full_page=True, quality=int(os.getenv("PLAYWRIGHT_SCREENSHOT_QUALITY", 72))) screenshot = self.page.screenshot(type='jpeg', full_page=True, quality=int(os.getenv("SCREENSHOT_QUALITY", 72)))
return (screenshot, xpath_data) return (screenshot, xpath_data)

View File

@@ -1,5 +1,4 @@
from playwright.sync_api import PlaywrightContextManager from playwright.sync_api import PlaywrightContextManager
import asyncio
# So playwright wants to run as a context manager, but we do something horrible and hacky # So playwright wants to run as a context manager, but we do something horrible and hacky
# we are holding the session open for as long as possible, then shutting it down, and opening a new one # we are holding the session open for as long as possible, then shutting it down, and opening a new one

View File

@@ -1,14 +1,11 @@
from concurrent.futures import ThreadPoolExecutor from concurrent.futures import ThreadPoolExecutor
from changedetectionio.store import ChangeDetectionStore
from functools import wraps from functools import wraps
from flask import Blueprint from flask import Blueprint
from flask_login import login_required from flask_login import login_required
from changedetectionio.processors import text_json_diff
from changedetectionio.store import ChangeDetectionStore
STATUS_CHECKING = 0 STATUS_CHECKING = 0
STATUS_FAILED = 1 STATUS_FAILED = 1
STATUS_OK = 2 STATUS_OK = 2
@@ -32,10 +29,11 @@ def construct_blueprint(datastore: ChangeDetectionStore):
@threadpool @threadpool
def long_task(uuid, preferred_proxy): def long_task(uuid, preferred_proxy):
import time import time
from changedetectionio import content_fetcher from changedetectionio.content_fetchers import exceptions as content_fetcher_exceptions
from changedetectionio.processors import text_json_diff
from changedetectionio.safe_jinja import render as jinja_render
status = {'status': '', 'length': 0, 'text': ''} status = {'status': '', 'length': 0, 'text': ''}
from jinja2 import Environment, BaseLoader
contents = '' contents = ''
now = time.time() now = time.time()
@@ -43,7 +41,7 @@ def construct_blueprint(datastore: ChangeDetectionStore):
update_handler = text_json_diff.perform_site_check(datastore=datastore, watch_uuid=uuid) update_handler = text_json_diff.perform_site_check(datastore=datastore, watch_uuid=uuid)
update_handler.call_browser() update_handler.call_browser()
# title, size is len contents not len xfer # title, size is len contents not len xfer
except content_fetcher.Non200ErrorCodeReceived as e: except content_fetcher_exceptions.Non200ErrorCodeReceived as e:
if e.status_code == 404: if e.status_code == 404:
status.update({'status': 'OK', 'length': len(contents), 'text': f"OK but 404 (page not found)"}) status.update({'status': 'OK', 'length': len(contents), 'text': f"OK but 404 (page not found)"})
elif e.status_code == 403 or e.status_code == 401: elif e.status_code == 403 or e.status_code == 401:
@@ -52,12 +50,12 @@ def construct_blueprint(datastore: ChangeDetectionStore):
status.update({'status': 'ERROR', 'length': len(contents), 'text': f"Status code: {e.status_code}"}) status.update({'status': 'ERROR', 'length': len(contents), 'text': f"Status code: {e.status_code}"})
except text_json_diff.FilterNotFoundInResponse: except text_json_diff.FilterNotFoundInResponse:
status.update({'status': 'OK', 'length': len(contents), 'text': f"OK but CSS/xPath filter not found (page changed layout?)"}) status.update({'status': 'OK', 'length': len(contents), 'text': f"OK but CSS/xPath filter not found (page changed layout?)"})
except content_fetcher.EmptyReply as e: except content_fetcher_exceptions.EmptyReply as e:
if e.status_code == 403 or e.status_code == 401: if e.status_code == 403 or e.status_code == 401:
status.update({'status': 'ERROR OTHER', 'length': len(contents), 'text': f"Got empty reply with code {e.status_code} - Access denied"}) status.update({'status': 'ERROR OTHER', 'length': len(contents), 'text': f"Got empty reply with code {e.status_code} - Access denied"})
else: else:
status.update({'status': 'ERROR OTHER', 'length': len(contents) if contents else 0, 'text': f"Empty reply with code {e.status_code}, needs chrome?"}) status.update({'status': 'ERROR OTHER', 'length': len(contents) if contents else 0, 'text': f"Empty reply with code {e.status_code}, needs chrome?"})
except content_fetcher.ReplyWithContentButNoText as e: except content_fetcher_exceptions.ReplyWithContentButNoText as e:
txt = f"Got reply but with no content - Status code {e.status_code} - It's possible that the filters were found, but contained no usable text (or contained only an image)." txt = f"Got reply but with no content - Status code {e.status_code} - It's possible that the filters were found, but contained no usable text (or contained only an image)."
status.update({'status': 'ERROR', 'text': txt}) status.update({'status': 'ERROR', 'text': txt})
except Exception as e: except Exception as e:
@@ -66,7 +64,9 @@ def construct_blueprint(datastore: ChangeDetectionStore):
status.update({'status': 'OK', 'length': len(contents), 'text': ''}) status.update({'status': 'OK', 'length': len(contents), 'text': ''})
if status.get('text'): if status.get('text'):
status['text'] = Environment(loader=BaseLoader()).from_string('{{text|e}}').render({'text': status['text']}) # parse 'text' as text for safety
v = {'text': status['text']}
status['text'] = jinja_render(template_str='{{text|e}}', **v)
status['time'] = "{:.2f}s".format(time.time() - now) status['time'] = "{:.2f}s".format(time.time() - now)

View File

@@ -1,5 +1,5 @@
from distutils.util import strtobool from changedetectionio.strtobool import strtobool
from flask import Blueprint, flash, redirect, url_for from flask import Blueprint, flash, redirect, url_for
from flask_login import login_required from flask_login import login_required
from changedetectionio.store import ChangeDetectionStore from changedetectionio.store import ChangeDetectionStore
@@ -18,8 +18,7 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q: PriorityQueue
def accept(uuid): def accept(uuid):
datastore.data['watching'][uuid]['track_ldjson_price_data'] = PRICE_DATA_TRACK_ACCEPT datastore.data['watching'][uuid]['track_ldjson_price_data'] = PRICE_DATA_TRACK_ACCEPT
update_q.put(queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid, 'skip_when_checksum_same': False})) update_q.put(queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid, 'skip_when_checksum_same': False}))
return redirect(url_for("form_watch_checknow", uuid=uuid)) return redirect(url_for("index"))
@login_required @login_required
@price_data_follower_blueprint.route("/<string:uuid>/reject", methods=['GET']) @price_data_follower_blueprint.route("/<string:uuid>/reject", methods=['GET'])

View File

@@ -11,9 +11,16 @@ def construct_blueprint(datastore: ChangeDetectionStore):
def tags_overview_page(): def tags_overview_page():
from .form import SingleTag from .form import SingleTag
add_form = SingleTag(request.form) add_form = SingleTag(request.form)
sorted_tags = sorted(datastore.data['settings']['application'].get('tags').items(), key=lambda x: x[1]['title'])
from collections import Counter
tag_count = Counter(tag for watch in datastore.data['watching'].values() if watch.get('tags') for tag in watch['tags'])
output = render_template("groups-overview.html", output = render_template("groups-overview.html",
available_tags=sorted_tags,
form=add_form, form=add_form,
available_tags=datastore.data['settings']['application'].get('tags', {}), tag_count=tag_count
) )
return output return output

View File

@@ -1,9 +1,9 @@
{% extends 'base.html' %} {% extends 'base.html' %}
{% block content %} {% block content %}
{% from '_helpers.jinja' import render_field, render_checkbox_field, render_button %} {% from '_helpers.html' import render_field, render_checkbox_field, render_button %}
{% from '_common_fields.jinja' import render_common_settings_form %} {% from '_common_fields.html' import render_common_settings_form %}
<script> <script>
const notification_base_url="{{url_for('ajax_callback_send_notification_test')}}"; const notification_base_url="{{url_for('ajax_callback_send_notification_test', mode="group-settings")}}";
</script> </script>
<script src="{{url_for('static_content', group='js', filename='tabs.js')}}" defer></script> <script src="{{url_for('static_content', group='js', filename='tabs.js')}}" defer></script>
@@ -63,7 +63,7 @@ xpath://body/div/span[contains(@class, 'example-class')]",
<ul> <ul>
<li>JSONPath: Prefix with <code>json:</code>, use <code>json:$</code> to force re-formatting if required, <a href="https://jsonpath.com/" target="new">test your JSONPath here</a>.</li> <li>JSONPath: Prefix with <code>json:</code>, use <code>json:$</code> to force re-formatting if required, <a href="https://jsonpath.com/" target="new">test your JSONPath here</a>.</li>
{% if jq_support %} {% if jq_support %}
<li>jq: Prefix with <code>jq:</code> and <a href="https://jqplay.org/" target="new">test your jq here</a>. Using <a href="https://stedolan.github.io/jq/" target="new">jq</a> allows for complex filtering and processing of JSON data with built-in functions, regex, filtering, and more. See examples and documentation <a href="https://stedolan.github.io/jq/manual/" target="new">here</a>.</li> <li>jq: Prefix with <code>jq:</code> and <a href="https://jqplay.org/" target="new">test your jq here</a>. Using <a href="https://stedolan.github.io/jq/" target="new">jq</a> allows for complex filtering and processing of JSON data with built-in functions, regex, filtering, and more. See examples and documentation <a href="https://stedolan.github.io/jq/manual/" target="new">here</a>. Prefix <code>jqraw:</code> outputs the results as text instead of a JSON list.</li>
{% else %} {% else %}
<li>jq support not installed</li> <li>jq support not installed</li>
{% endif %} {% endif %}

View File

@@ -1,6 +1,6 @@
{% extends 'base.html' %} {% extends 'base.html' %}
{% block content %} {% block content %}
{% from '_helpers.jinja' import render_simple_field, render_field %} {% from '_helpers.html' import render_simple_field, render_field %}
<script src="{{url_for('static_content', group='js', filename='jquery-3.6.0.min.js')}}"></script> <script src="{{url_for('static_content', group='js', filename='jquery-3.6.0.min.js')}}"></script>
<div class="box"> <div class="box">
@@ -27,6 +27,7 @@
<thead> <thead>
<tr> <tr>
<th></th> <th></th>
<th># Watches</th>
<th>Tag / Label name</th> <th>Tag / Label name</th>
<th></th> <th></th>
</tr> </tr>
@@ -40,12 +41,13 @@
<td colspan="3">No website organisational tags/groups configured</td> <td colspan="3">No website organisational tags/groups configured</td>
</tr> </tr>
{% endif %} {% endif %}
{% for uuid, tag in available_tags.items() %} {% for uuid, tag in available_tags %}
<tr id="{{ uuid }}" class="{{ loop.cycle('pure-table-odd', 'pure-table-even') }}"> <tr id="{{ uuid }}" class="{{ loop.cycle('pure-table-odd', 'pure-table-even') }}">
<td class="watch-controls"> <td class="watch-controls">
<a class="link-mute state-{{'on' if tag.notification_muted else 'off'}}" href="{{url_for('tags.mute', uuid=tag.uuid)}}"><img src="{{url_for('static_content', group='images', filename='bell-off.svg')}}" alt="Mute notifications" title="Mute notifications" class="icon icon-mute" ></a> <a class="link-mute state-{{'on' if tag.notification_muted else 'off'}}" href="{{url_for('tags.mute', uuid=tag.uuid)}}"><img src="{{url_for('static_content', group='images', filename='bell-off.svg')}}" alt="Mute notifications" title="Mute notifications" class="icon icon-mute" ></a>
</td> </td>
<td class="title-col inline">{{tag.title}}</td> <td>{{ "{:,}".format(tag_count[uuid]) if uuid in tag_count else 0 }}</td>
<td class="title-col inline"> <a href="{{url_for('index', tag=uuid) }}">{{ tag.title }}</a></td>
<td> <td>
<a class="pure-button pure-button-primary" href="{{ url_for('tags.form_tag_edit', uuid=uuid) }}">Edit</a>&nbsp; <a class="pure-button pure-button-primary" href="{{ url_for('tags.form_tag_edit', uuid=uuid) }}">Edit</a>&nbsp;
<a class="pure-button pure-button-primary" href="{{ url_for('tags.delete', uuid=uuid) }}" title="Deletes and removes tag">Delete</a> <a class="pure-button pure-button-primary" href="{{ url_for('tags.delete', uuid=uuid) }}" title="Deletes and removes tag">Delete</a>

View File

@@ -1,757 +0,0 @@
from abc import abstractmethod
from distutils.util import strtobool
from urllib.parse import urlparse
import chardet
import hashlib
import json
import logging
import os
import requests
import sys
import time
import urllib.parse
visualselector_xpath_selectors = 'div,span,form,table,tbody,tr,td,a,p,ul,li,h1,h2,h3,h4, header, footer, section, article, aside, details, main, nav, section, summary'
class Non200ErrorCodeReceived(Exception):
def __init__(self, status_code, url, screenshot=None, xpath_data=None, page_html=None):
# Set this so we can use it in other parts of the app
self.status_code = status_code
self.url = url
self.screenshot = screenshot
self.xpath_data = xpath_data
self.page_text = None
if page_html:
from changedetectionio import html_tools
self.page_text = html_tools.html_to_text(page_html)
return
class checksumFromPreviousCheckWasTheSame(Exception):
def __init__(self):
return
class JSActionExceptions(Exception):
def __init__(self, status_code, url, screenshot, message=''):
self.status_code = status_code
self.url = url
self.screenshot = screenshot
self.message = message
return
class BrowserStepsStepTimout(Exception):
def __init__(self, step_n):
self.step_n = step_n
return
class PageUnloadable(Exception):
def __init__(self, status_code, url, message, screenshot=False):
# Set this so we can use it in other parts of the app
self.status_code = status_code
self.url = url
self.screenshot = screenshot
self.message = message
return
class EmptyReply(Exception):
def __init__(self, status_code, url, screenshot=None):
# Set this so we can use it in other parts of the app
self.status_code = status_code
self.url = url
self.screenshot = screenshot
return
class ScreenshotUnavailable(Exception):
def __init__(self, status_code, url, page_html=None):
# Set this so we can use it in other parts of the app
self.status_code = status_code
self.url = url
if page_html:
from html_tools import html_to_text
self.page_text = html_to_text(page_html)
return
class ReplyWithContentButNoText(Exception):
def __init__(self, status_code, url, screenshot=None, has_filters=False, html_content=''):
# Set this so we can use it in other parts of the app
self.status_code = status_code
self.url = url
self.screenshot = screenshot
self.has_filters = has_filters
self.html_content = html_content
return
class Fetcher():
browser_steps = None
browser_steps_screenshot_path = None
content = None
error = None
fetcher_description = "No description"
browser_connection_url = None
headers = {}
status_code = None
webdriver_js_execute_code = None
xpath_data = None
xpath_element_js = ""
instock_data = None
instock_data_js = ""
# Will be needed in the future by the VisualSelector, always get this where possible.
screenshot = False
system_http_proxy = os.getenv('HTTP_PROXY')
system_https_proxy = os.getenv('HTTPS_PROXY')
# Time ONTOP of the system defined env minimum time
render_extract_delay = 0
def __init__(self):
from pkg_resources import resource_string
# The code that scrapes elements and makes a list of elements/size/position to click on in the VisualSelector
self.xpath_element_js = resource_string(__name__, "res/xpath_element_scraper.js").decode('utf-8')
self.instock_data_js = resource_string(__name__, "res/stock-not-in-stock.js").decode('utf-8')
@abstractmethod
def get_error(self):
return self.error
@abstractmethod
def run(self,
url,
timeout,
request_headers,
request_body,
request_method,
ignore_status_codes=False,
current_include_filters=None,
is_binary=False):
# Should set self.error, self.status_code and self.content
pass
@abstractmethod
def quit(self):
return
@abstractmethod
def get_last_status_code(self):
return self.status_code
@abstractmethod
def screenshot_step(self, step_n):
return None
@abstractmethod
# Return true/false if this checker is ready to run, in the case it needs todo some special config check etc
def is_ready(self):
return True
def get_all_headers(self):
"""
Get all headers but ensure all keys are lowercase
:return:
"""
return {k.lower(): v for k, v in self.headers.items()}
def browser_steps_get_valid_steps(self):
if self.browser_steps is not None and len(self.browser_steps):
valid_steps = filter(
lambda s: (s['operation'] and len(s['operation']) and s['operation'] != 'Choose one' and s['operation'] != 'Goto site'),
self.browser_steps)
return valid_steps
return None
def iterate_browser_steps(self):
from changedetectionio.blueprint.browser_steps.browser_steps import steppable_browser_interface
from playwright._impl._errors import TimeoutError
from jinja2 import Environment
jinja2_env = Environment(extensions=['jinja2_time.TimeExtension'])
step_n = 0
if self.browser_steps is not None and len(self.browser_steps):
interface = steppable_browser_interface()
interface.page = self.page
valid_steps = self.browser_steps_get_valid_steps()
for step in valid_steps:
step_n += 1
print(">> Iterating check - browser Step n {} - {}...".format(step_n, step['operation']))
self.screenshot_step("before-" + str(step_n))
self.save_step_html("before-" + str(step_n))
try:
optional_value = step['optional_value']
selector = step['selector']
# Support for jinja2 template in step values, with date module added
if '{%' in step['optional_value'] or '{{' in step['optional_value']:
optional_value = str(jinja2_env.from_string(step['optional_value']).render())
if '{%' in step['selector'] or '{{' in step['selector']:
selector = str(jinja2_env.from_string(step['selector']).render())
getattr(interface, "call_action")(action_name=step['operation'],
selector=selector,
optional_value=optional_value)
self.screenshot_step(step_n)
self.save_step_html(step_n)
except TimeoutError as e:
print(str(e))
# Stop processing here
raise BrowserStepsStepTimout(step_n=step_n)
# It's always good to reset these
def delete_browser_steps_screenshots(self):
import glob
if self.browser_steps_screenshot_path is not None:
dest = os.path.join(self.browser_steps_screenshot_path, 'step_*.jpeg')
files = glob.glob(dest)
for f in files:
if os.path.isfile(f):
os.unlink(f)
# Maybe for the future, each fetcher provides its own diff output, could be used for text, image
# the current one would return javascript output (as we use JS to generate the diff)
#
def available_fetchers():
# See the if statement at the bottom of this file for how we switch between playwright and webdriver
import inspect
p = []
for name, obj in inspect.getmembers(sys.modules[__name__], inspect.isclass):
if inspect.isclass(obj):
# @todo html_ is maybe better as fetcher_ or something
# In this case, make sure to edit the default one in store.py and fetch_site_status.py
if name.startswith('html_'):
t = tuple([name, obj.fetcher_description])
p.append(t)
return p
class base_html_playwright(Fetcher):
fetcher_description = "Playwright {}/Javascript".format(
os.getenv("PLAYWRIGHT_BROWSER_TYPE", 'chromium').capitalize()
)
if os.getenv("PLAYWRIGHT_DRIVER_URL"):
fetcher_description += " via '{}'".format(os.getenv("PLAYWRIGHT_DRIVER_URL"))
browser_type = ''
command_executor = ''
# Configs for Proxy setup
# In the ENV vars, is prefixed with "playwright_proxy_", so it is for example "playwright_proxy_server"
playwright_proxy_settings_mappings = ['bypass', 'server', 'username', 'password']
proxy = None
def __init__(self, proxy_override=None, browser_connection_url=None):
super().__init__()
self.browser_type = os.getenv("PLAYWRIGHT_BROWSER_TYPE", 'chromium').strip('"')
# .strip('"') is going to save someone a lot of time when they accidently wrap the env value
if not browser_connection_url:
self.browser_connection_url = os.getenv("PLAYWRIGHT_DRIVER_URL", 'ws://playwright-chrome:3000').strip('"')
else:
self.browser_connection_url = browser_connection_url
# If any proxy settings are enabled, then we should setup the proxy object
proxy_args = {}
for k in self.playwright_proxy_settings_mappings:
v = os.getenv('playwright_proxy_' + k, False)
if v:
proxy_args[k] = v.strip('"')
if proxy_args:
self.proxy = proxy_args
# allow per-watch proxy selection override
if proxy_override:
self.proxy = {'server': proxy_override}
if self.proxy:
# Playwright needs separate username and password values
parsed = urlparse(self.proxy.get('server'))
if parsed.username:
self.proxy['username'] = parsed.username
self.proxy['password'] = parsed.password
def screenshot_step(self, step_n=''):
screenshot = self.page.screenshot(type='jpeg', full_page=True, quality=85)
if self.browser_steps_screenshot_path is not None:
destination = os.path.join(self.browser_steps_screenshot_path, 'step_{}.jpeg'.format(step_n))
logging.debug("Saving step screenshot to {}".format(destination))
with open(destination, 'wb') as f:
f.write(screenshot)
def save_step_html(self, step_n):
content = self.page.content()
destination = os.path.join(self.browser_steps_screenshot_path, 'step_{}.html'.format(step_n))
logging.debug("Saving step HTML to {}".format(destination))
with open(destination, 'w') as f:
f.write(content)
def run_fetch_browserless_puppeteer(self,
url,
timeout,
request_headers,
request_body,
request_method,
ignore_status_codes=False,
current_include_filters=None,
is_binary=False):
from pkg_resources import resource_string
extra_wait_ms = (int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)) + self.render_extract_delay) * 1000
self.xpath_element_js = self.xpath_element_js.replace('%ELEMENTS%', visualselector_xpath_selectors)
code = resource_string(__name__, "res/puppeteer_fetch.js").decode('utf-8')
# In the future inject this is a proper JS package
code = code.replace('%xpath_scrape_code%', self.xpath_element_js)
code = code.replace('%instock_scrape_code%', self.instock_data_js)
from requests.exceptions import ConnectTimeout, ReadTimeout
wait_browserless_seconds = 240
browserless_function_url = os.getenv('BROWSERLESS_FUNCTION_URL')
from urllib.parse import urlparse
if not browserless_function_url:
# Convert/try to guess from PLAYWRIGHT_DRIVER_URL
o = urlparse(os.getenv('PLAYWRIGHT_DRIVER_URL'))
browserless_function_url = o._replace(scheme="http")._replace(path="function").geturl()
# Append proxy connect string
if self.proxy:
# Remove username/password if it exists in the URL or you will receive "ERR_NO_SUPPORTED_PROXIES" error
# Actual authentication handled by Puppeteer/node
o = urlparse(self.proxy.get('server'))
proxy_url = urllib.parse.quote(o._replace(netloc="{}:{}".format(o.hostname, o.port)).geturl())
browserless_function_url = f"{browserless_function_url}&--proxy-server={proxy_url}"
try:
amp = '&' if '?' in browserless_function_url else '?'
response = requests.request(
method="POST",
json={
"code": code,
"context": {
# Very primitive disk cache - USE WITH EXTREME CAUTION
# Run browserless container with -e "FUNCTION_BUILT_INS=[\"fs\",\"crypto\"]"
'disk_cache_dir': os.getenv("PUPPETEER_DISK_CACHE", False), # or path to disk cache ending in /, ie /tmp/cache/
'execute_js': self.webdriver_js_execute_code,
'extra_wait_ms': extra_wait_ms,
'include_filters': current_include_filters,
'req_headers': request_headers,
'screenshot_quality': int(os.getenv("PLAYWRIGHT_SCREENSHOT_QUALITY", 72)),
'url': url,
'user_agent': {k.lower(): v for k, v in request_headers.items()}.get('user-agent', None),
'proxy_username': self.proxy.get('username', '') if self.proxy else False,
'proxy_password': self.proxy.get('password', '') if self.proxy and self.proxy.get('username') else False,
'no_cache_list': [
'twitter',
'.pdf'
],
# Could use https://github.com/easylist/easylist here, or install a plugin
'block_url_list': [
'adnxs.com',
'analytics.twitter.com',
'doubleclick.net',
'google-analytics.com',
'googletagmanager',
'trustpilot.com'
]
}
},
# @todo /function needs adding ws:// to http:// rebuild this
url=browserless_function_url+f"{amp}--disable-features=AudioServiceOutOfProcess&dumpio=true&--disable-remote-fonts",
timeout=wait_browserless_seconds)
except ReadTimeout:
raise PageUnloadable(url=url, status_code=None, message=f"No response from browserless in {wait_browserless_seconds}s")
except ConnectTimeout:
raise PageUnloadable(url=url, status_code=None, message=f"Timed out connecting to browserless, retrying..")
else:
# 200 Here means that the communication to browserless worked only, not the page state
if response.status_code == 200:
import base64
x = response.json()
if not x.get('screenshot'):
# https://github.com/puppeteer/puppeteer/blob/v1.0.0/docs/troubleshooting.md#tips
# https://github.com/puppeteer/puppeteer/issues/1834
# https://github.com/puppeteer/puppeteer/issues/1834#issuecomment-381047051
# Check your memory is shared and big enough
raise ScreenshotUnavailable(url=url, status_code=None)
if not x.get('content', '').strip():
raise EmptyReply(url=url, status_code=None)
if x.get('status_code', 200) != 200 and not ignore_status_codes:
raise Non200ErrorCodeReceived(url=url, status_code=x.get('status_code', 200), page_html=x['content'])
self.content = x.get('content')
self.headers = x.get('headers')
self.instock_data = x.get('instock_data')
self.screenshot = base64.b64decode(x.get('screenshot'))
self.status_code = x.get('status_code')
self.xpath_data = x.get('xpath_data')
else:
# Some other error from browserless
raise PageUnloadable(url=url, status_code=None, message=response.content.decode('utf-8'))
def run(self,
url,
timeout,
request_headers,
request_body,
request_method,
ignore_status_codes=False,
current_include_filters=None,
is_binary=False):
# For now, USE_EXPERIMENTAL_PUPPETEER_FETCH is not supported by watches with BrowserSteps (for now!)
if not self.browser_steps and os.getenv('USE_EXPERIMENTAL_PUPPETEER_FETCH'):
if strtobool(os.getenv('USE_EXPERIMENTAL_PUPPETEER_FETCH')):
# Temporary backup solution until we rewrite the playwright code
return self.run_fetch_browserless_puppeteer(
url,
timeout,
request_headers,
request_body,
request_method,
ignore_status_codes,
current_include_filters,
is_binary)
from playwright.sync_api import sync_playwright
import playwright._impl._errors
self.delete_browser_steps_screenshots()
response = None
with sync_playwright() as p:
browser_type = getattr(p, self.browser_type)
# Seemed to cause a connection Exception even tho I can see it connect
# self.browser = browser_type.connect(self.command_executor, timeout=timeout*1000)
# 60,000 connection timeout only
browser = browser_type.connect_over_cdp(self.browser_connection_url, timeout=60000)
# SOCKS5 with authentication is not supported (yet)
# https://github.com/microsoft/playwright/issues/10567
# Set user agent to prevent Cloudflare from blocking the browser
# Use the default one configured in the App.py model that's passed from fetch_site_status.py
context = browser.new_context(
user_agent={k.lower(): v for k, v in request_headers.items()}.get('user-agent', None),
proxy=self.proxy,
# This is needed to enable JavaScript execution on GitHub and others
bypass_csp=True,
# Should be `allow` or `block` - sites like YouTube can transmit large amounts of data via Service Workers
service_workers=os.getenv('PLAYWRIGHT_SERVICE_WORKERS', 'allow'),
# Should never be needed
accept_downloads=False
)
self.page = context.new_page()
if len(request_headers):
context.set_extra_http_headers(request_headers)
# Listen for all console events and handle errors
self.page.on("console", lambda msg: print(f"Playwright console: Watch URL: {url} {msg.type}: {msg.text} {msg.args}"))
# Re-use as much code from browser steps as possible so its the same
from changedetectionio.blueprint.browser_steps.browser_steps import steppable_browser_interface
browsersteps_interface = steppable_browser_interface()
browsersteps_interface.page = self.page
response = browsersteps_interface.action_goto_url(value=url)
self.headers = response.all_headers()
if response is None:
context.close()
browser.close()
print("Content Fetcher > Response object was none")
raise EmptyReply(url=url, status_code=None)
try:
if self.webdriver_js_execute_code is not None and len(self.webdriver_js_execute_code):
browsersteps_interface.action_execute_js(value=self.webdriver_js_execute_code, selector=None)
except playwright._impl._errors.TimeoutError as e:
context.close()
browser.close()
# This can be ok, we will try to grab what we could retrieve
pass
except Exception as e:
print("Content Fetcher > Other exception when executing custom JS code", str(e))
context.close()
browser.close()
raise PageUnloadable(url=url, status_code=None, message=str(e))
extra_wait = int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)) + self.render_extract_delay
self.page.wait_for_timeout(extra_wait * 1000)
self.status_code = response.status
if self.status_code != 200 and not ignore_status_codes:
screenshot=self.page.screenshot(type='jpeg', full_page=True,
quality=int(os.getenv("PLAYWRIGHT_SCREENSHOT_QUALITY", 72)))
raise Non200ErrorCodeReceived(url=url, status_code=self.status_code, screenshot=screenshot)
if len(self.page.content().strip()) == 0:
context.close()
browser.close()
print("Content Fetcher > Content was empty")
raise EmptyReply(url=url, status_code=response.status)
# Run Browser Steps here
if self.browser_steps_get_valid_steps():
self.iterate_browser_steps()
self.page.wait_for_timeout(extra_wait * 1000)
# So we can find an element on the page where its selector was entered manually (maybe not xPath etc)
if current_include_filters is not None:
self.page.evaluate("var include_filters={}".format(json.dumps(current_include_filters)))
else:
self.page.evaluate("var include_filters=''")
self.xpath_data = self.page.evaluate(
"async () => {" + self.xpath_element_js.replace('%ELEMENTS%', visualselector_xpath_selectors) + "}")
self.instock_data = self.page.evaluate("async () => {" + self.instock_data_js + "}")
self.content = self.page.content()
# Bug 3 in Playwright screenshot handling
# Some bug where it gives the wrong screenshot size, but making a request with the clip set first seems to solve it
# JPEG is better here because the screenshots can be very very large
# Screenshots also travel via the ws:// (websocket) meaning that the binary data is base64 encoded
# which will significantly increase the IO size between the server and client, it's recommended to use the lowest
# acceptable screenshot quality here
try:
# The actual screenshot
self.screenshot = self.page.screenshot(type='jpeg', full_page=True,
quality=int(os.getenv("PLAYWRIGHT_SCREENSHOT_QUALITY", 72)))
except Exception as e:
context.close()
browser.close()
raise ScreenshotUnavailable(url=url, status_code=response.status_code)
context.close()
browser.close()
class base_html_webdriver(Fetcher):
if os.getenv("WEBDRIVER_URL"):
fetcher_description = "WebDriver Chrome/Javascript via '{}'".format(os.getenv("WEBDRIVER_URL"))
else:
fetcher_description = "WebDriver Chrome/Javascript"
# Configs for Proxy setup
# In the ENV vars, is prefixed with "webdriver_", so it is for example "webdriver_sslProxy"
selenium_proxy_settings_mappings = ['proxyType', 'ftpProxy', 'httpProxy', 'noProxy',
'proxyAutoconfigUrl', 'sslProxy', 'autodetect',
'socksProxy', 'socksVersion', 'socksUsername', 'socksPassword']
proxy = None
def __init__(self, proxy_override=None, browser_connection_url=None):
super().__init__()
from selenium.webdriver.common.proxy import Proxy as SeleniumProxy
# .strip('"') is going to save someone a lot of time when they accidently wrap the env value
if not browser_connection_url:
self.browser_connection_url = os.getenv("WEBDRIVER_URL", 'http://browser-chrome:4444/wd/hub').strip('"')
else:
self.browser_connection_url = browser_connection_url
# If any proxy settings are enabled, then we should setup the proxy object
proxy_args = {}
for k in self.selenium_proxy_settings_mappings:
v = os.getenv('webdriver_' + k, False)
if v:
proxy_args[k] = v.strip('"')
# Map back standard HTTP_ and HTTPS_PROXY to webDriver httpProxy/sslProxy
if not proxy_args.get('webdriver_httpProxy') and self.system_http_proxy:
proxy_args['httpProxy'] = self.system_http_proxy
if not proxy_args.get('webdriver_sslProxy') and self.system_https_proxy:
proxy_args['httpsProxy'] = self.system_https_proxy
# Allows override the proxy on a per-request basis
if proxy_override is not None:
proxy_args['httpProxy'] = proxy_override
if proxy_args:
self.proxy = SeleniumProxy(raw=proxy_args)
def run(self,
url,
timeout,
request_headers,
request_body,
request_method,
ignore_status_codes=False,
current_include_filters=None,
is_binary=False):
from selenium import webdriver
from selenium.webdriver.chrome.options import Options as ChromeOptions
from selenium.common.exceptions import WebDriverException
# request_body, request_method unused for now, until some magic in the future happens.
options = ChromeOptions()
if self.proxy:
options.proxy = self.proxy
self.driver = webdriver.Remote(
command_executor=self.browser_connection_url,
options=options)
try:
self.driver.get(url)
except WebDriverException as e:
# Be sure we close the session window
self.quit()
raise
self.driver.set_window_size(1280, 1024)
self.driver.implicitly_wait(int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)))
if self.webdriver_js_execute_code is not None:
self.driver.execute_script(self.webdriver_js_execute_code)
# Selenium doesn't automatically wait for actions as good as Playwright, so wait again
self.driver.implicitly_wait(int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)))
# @todo - how to check this? is it possible?
self.status_code = 200
# @todo somehow we should try to get this working for WebDriver
# raise EmptyReply(url=url, status_code=r.status_code)
# @todo - dom wait loaded?
time.sleep(int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)) + self.render_extract_delay)
self.content = self.driver.page_source
self.headers = {}
self.screenshot = self.driver.get_screenshot_as_png()
# Does the connection to the webdriver work? run a test connection.
def is_ready(self):
from selenium import webdriver
from selenium.webdriver.chrome.options import Options as ChromeOptions
self.driver = webdriver.Remote(
command_executor=self.command_executor,
options=ChromeOptions())
# driver.quit() seems to cause better exceptions
self.quit()
return True
def quit(self):
if self.driver:
try:
self.driver.quit()
except Exception as e:
print("Content Fetcher > Exception in chrome shutdown/quit" + str(e))
# "html_requests" is listed as the default fetcher in store.py!
class html_requests(Fetcher):
fetcher_description = "Basic fast Plaintext/HTTP Client"
def __init__(self, proxy_override=None, browser_connection_url=None):
super().__init__()
self.proxy_override = proxy_override
# browser_connection_url is none because its always 'launched locally'
def run(self,
url,
timeout,
request_headers,
request_body,
request_method,
ignore_status_codes=False,
current_include_filters=None,
is_binary=False):
# Make requests use a more modern looking user-agent
if not {k.lower(): v for k, v in request_headers.items()}.get('user-agent', None):
request_headers['User-Agent'] = os.getenv("DEFAULT_SETTINGS_HEADERS_USERAGENT",
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36')
proxies = {}
# Allows override the proxy on a per-request basis
# https://requests.readthedocs.io/en/latest/user/advanced/#socks
# Should also work with `socks5://user:pass@host:port` type syntax.
if self.proxy_override:
proxies = {'http': self.proxy_override, 'https': self.proxy_override, 'ftp': self.proxy_override}
else:
if self.system_http_proxy:
proxies['http'] = self.system_http_proxy
if self.system_https_proxy:
proxies['https'] = self.system_https_proxy
r = requests.request(method=request_method,
data=request_body,
url=url,
headers=request_headers,
timeout=timeout,
proxies=proxies,
verify=False)
# If the response did not tell us what encoding format to expect, Then use chardet to override what `requests` thinks.
# For example - some sites don't tell us it's utf-8, but return utf-8 content
# This seems to not occur when using webdriver/selenium, it seems to detect the text encoding more reliably.
# https://github.com/psf/requests/issues/1604 good info about requests encoding detection
if not is_binary:
# Don't run this for PDF (and requests identified as binary) takes a _long_ time
if not r.headers.get('content-type') or not 'charset=' in r.headers.get('content-type'):
encoding = chardet.detect(r.content)['encoding']
if encoding:
r.encoding = encoding
if not r.content or not len(r.content):
raise EmptyReply(url=url, status_code=r.status_code)
# @todo test this
# @todo maybe you really want to test zero-byte return pages?
if r.status_code != 200 and not ignore_status_codes:
# maybe check with content works?
raise Non200ErrorCodeReceived(url=url, status_code=r.status_code, page_html=r.text)
self.status_code = r.status_code
if is_binary:
# Binary files just return their checksum until we add something smarter
self.content = hashlib.md5(r.content).hexdigest()
else:
self.content = r.text
self.headers = r.headers
self.raw_content = r.content
# Decide which is the 'real' HTML webdriver, this is more a system wide config
# rather than site-specific.
use_playwright_as_chrome_fetcher = os.getenv('PLAYWRIGHT_DRIVER_URL', False)
if use_playwright_as_chrome_fetcher:
html_webdriver = base_html_playwright
else:
html_webdriver = base_html_webdriver

View File

@@ -0,0 +1,43 @@
import sys
from changedetectionio.strtobool import strtobool
from loguru import logger
from changedetectionio.content_fetchers.exceptions import BrowserStepsStepException
import os
visualselector_xpath_selectors = 'div,span,form,table,tbody,tr,td,a,p,ul,li,h1,h2,h3,h4,header,footer,section,article,aside,details,main,nav,section,summary'
# available_fetchers() will scan this implementation looking for anything starting with html_
# this information is used in the form selections
from changedetectionio.content_fetchers.requests import fetcher as html_requests
def available_fetchers():
# See the if statement at the bottom of this file for how we switch between playwright and webdriver
import inspect
p = []
for name, obj in inspect.getmembers(sys.modules[__name__], inspect.isclass):
if inspect.isclass(obj):
# @todo html_ is maybe better as fetcher_ or something
# In this case, make sure to edit the default one in store.py and fetch_site_status.py
if name.startswith('html_'):
t = tuple([name, obj.fetcher_description])
p.append(t)
return p
# Decide which is the 'real' HTML webdriver, this is more a system wide config
# rather than site-specific.
use_playwright_as_chrome_fetcher = os.getenv('PLAYWRIGHT_DRIVER_URL', False)
if use_playwright_as_chrome_fetcher:
# @note - For now, browser steps always uses playwright
if not strtobool(os.getenv('FAST_PUPPETEER_CHROME_FETCHER', 'False')):
logger.debug('Using Playwright library as fetcher')
from .playwright import fetcher as html_webdriver
else:
logger.debug('Using direct Python Puppeteer library as fetcher')
from .puppeteer import fetcher as html_webdriver
else:
logger.debug("Falling back to selenium as fetcher")
from .webdriver_selenium import fetcher as html_webdriver

View File

@@ -0,0 +1,172 @@
import os
from abc import abstractmethod
from loguru import logger
from changedetectionio.content_fetchers import BrowserStepsStepException
def manage_user_agent(headers, current_ua=''):
"""
Basic setting of user-agent
NOTE!!!!!! The service that does the actual Chrome fetching should handle any anti-robot techniques
THERE ARE MANY WAYS THAT IT CAN BE DETECTED AS A ROBOT!!
This does not take care of
- Scraping of 'navigator' (platform, productSub, vendor, oscpu etc etc) browser object (navigator.appVersion) etc
- TCP/IP fingerprint JA3 etc
- Graphic rendering fingerprinting
- Your IP being obviously in a pool of bad actors
- Too many requests
- Scraping of SCH-UA browser replies (thanks google!!)
- Scraping of ServiceWorker, new window calls etc
See https://filipvitas.medium.com/how-to-set-user-agent-header-with-puppeteer-js-and-not-fail-28c7a02165da
Puppeteer requests https://github.com/dgtlmoon/pyppeteerstealth
:param page:
:param headers:
:return:
"""
# Ask it what the user agent is, if its obviously ChromeHeadless, switch it to the default
ua_in_custom_headers = headers.get('User-Agent')
if ua_in_custom_headers:
return ua_in_custom_headers
if not ua_in_custom_headers and current_ua:
current_ua = current_ua.replace('HeadlessChrome', 'Chrome')
return current_ua
return None
class Fetcher():
browser_connection_is_custom = None
browser_connection_url = None
browser_steps = None
browser_steps_screenshot_path = None
content = None
error = None
fetcher_description = "No description"
headers = {}
instock_data = None
instock_data_js = ""
status_code = None
webdriver_js_execute_code = None
xpath_data = None
xpath_element_js = ""
# Will be needed in the future by the VisualSelector, always get this where possible.
screenshot = False
system_http_proxy = os.getenv('HTTP_PROXY')
system_https_proxy = os.getenv('HTTPS_PROXY')
# Time ONTOP of the system defined env minimum time
render_extract_delay = 0
def __init__(self):
import importlib.resources
self.xpath_element_js = importlib.resources.files("changedetectionio.content_fetchers.res").joinpath('xpath_element_scraper.js').read_text()
self.instock_data_js = importlib.resources.files("changedetectionio.content_fetchers.res").joinpath('stock-not-in-stock.js').read_text()
@abstractmethod
def get_error(self):
return self.error
@abstractmethod
def run(self,
url,
timeout,
request_headers,
request_body,
request_method,
ignore_status_codes=False,
current_include_filters=None,
is_binary=False):
# Should set self.error, self.status_code and self.content
pass
@abstractmethod
def quit(self):
return
@abstractmethod
def get_last_status_code(self):
return self.status_code
@abstractmethod
def screenshot_step(self, step_n):
return None
@abstractmethod
# Return true/false if this checker is ready to run, in the case it needs todo some special config check etc
def is_ready(self):
return True
def get_all_headers(self):
"""
Get all headers but ensure all keys are lowercase
:return:
"""
return {k.lower(): v for k, v in self.headers.items()}
def browser_steps_get_valid_steps(self):
if self.browser_steps is not None and len(self.browser_steps):
valid_steps = list(filter(
lambda s: (s['operation'] and len(s['operation']) and s['operation'] != 'Choose one'),
self.browser_steps))
# Just incase they selected Goto site by accident with older JS
if valid_steps and valid_steps[0]['operation'] == 'Goto site':
del(valid_steps[0])
return valid_steps
return None
def iterate_browser_steps(self, start_url=None):
from changedetectionio.blueprint.browser_steps.browser_steps import steppable_browser_interface
from playwright._impl._errors import TimeoutError, Error
from changedetectionio.safe_jinja import render as jinja_render
step_n = 0
if self.browser_steps is not None and len(self.browser_steps):
interface = steppable_browser_interface(start_url=start_url)
interface.page = self.page
valid_steps = self.browser_steps_get_valid_steps()
for step in valid_steps:
step_n += 1
logger.debug(f">> Iterating check - browser Step n {step_n} - {step['operation']}...")
self.screenshot_step("before-" + str(step_n))
self.save_step_html("before-" + str(step_n))
try:
optional_value = step['optional_value']
selector = step['selector']
# Support for jinja2 template in step values, with date module added
if '{%' in step['optional_value'] or '{{' in step['optional_value']:
optional_value = jinja_render(template_str=step['optional_value'])
if '{%' in step['selector'] or '{{' in step['selector']:
selector = jinja_render(template_str=step['selector'])
getattr(interface, "call_action")(action_name=step['operation'],
selector=selector,
optional_value=optional_value)
self.screenshot_step(step_n)
self.save_step_html(step_n)
except (Error, TimeoutError) as e:
logger.debug(str(e))
# Stop processing here
raise BrowserStepsStepException(step_n=step_n, original_e=e)
# It's always good to reset these
def delete_browser_steps_screenshots(self):
import glob
if self.browser_steps_screenshot_path is not None:
dest = os.path.join(self.browser_steps_screenshot_path, 'step_*.jpeg')
files = glob.glob(dest)
for f in files:
if os.path.isfile(f):
os.unlink(f)
def save_step_html(self, param):
pass

View File

@@ -0,0 +1,98 @@
from loguru import logger
class Non200ErrorCodeReceived(Exception):
def __init__(self, status_code, url, screenshot=None, xpath_data=None, page_html=None):
# Set this so we can use it in other parts of the app
self.status_code = status_code
self.url = url
self.screenshot = screenshot
self.xpath_data = xpath_data
self.page_text = None
if page_html:
from changedetectionio import html_tools
self.page_text = html_tools.html_to_text(page_html)
return
class checksumFromPreviousCheckWasTheSame(Exception):
def __init__(self):
return
class JSActionExceptions(Exception):
def __init__(self, status_code, url, screenshot, message=''):
self.status_code = status_code
self.url = url
self.screenshot = screenshot
self.message = message
return
class BrowserConnectError(Exception):
msg = ''
def __init__(self, msg):
self.msg = msg
logger.error(f"Browser connection error {msg}")
return
class BrowserFetchTimedOut(Exception):
msg = ''
def __init__(self, msg):
self.msg = msg
logger.error(f"Browser processing took too long - {msg}")
return
class BrowserStepsStepException(Exception):
def __init__(self, step_n, original_e):
self.step_n = step_n
self.original_e = original_e
logger.debug(f"Browser Steps exception at step {self.step_n} {str(original_e)}")
return
# @todo - make base Exception class that announces via logger()
class PageUnloadable(Exception):
def __init__(self, status_code=None, url='', message='', screenshot=False):
# Set this so we can use it in other parts of the app
self.status_code = status_code
self.url = url
self.screenshot = screenshot
self.message = message
return
class BrowserStepsInUnsupportedFetcher(Exception):
def __init__(self, url):
self.url = url
return
class EmptyReply(Exception):
def __init__(self, status_code, url, screenshot=None):
# Set this so we can use it in other parts of the app
self.status_code = status_code
self.url = url
self.screenshot = screenshot
return
class ScreenshotUnavailable(Exception):
def __init__(self, status_code, url, page_html=None):
# Set this so we can use it in other parts of the app
self.status_code = status_code
self.url = url
if page_html:
from html_tools import html_to_text
self.page_text = html_to_text(page_html)
return
class ReplyWithContentButNoText(Exception):
def __init__(self, status_code, url, screenshot=None, has_filters=False, html_content='', xpath_data=None):
# Set this so we can use it in other parts of the app
self.status_code = status_code
self.url = url
self.screenshot = screenshot
self.has_filters = has_filters
self.html_content = html_content
self.xpath_data = xpath_data
return

View File

@@ -0,0 +1,208 @@
import json
import os
from urllib.parse import urlparse
from loguru import logger
from changedetectionio.content_fetchers.base import Fetcher, manage_user_agent
from changedetectionio.content_fetchers.exceptions import PageUnloadable, Non200ErrorCodeReceived, EmptyReply, ScreenshotUnavailable
class fetcher(Fetcher):
fetcher_description = "Playwright {}/Javascript".format(
os.getenv("PLAYWRIGHT_BROWSER_TYPE", 'chromium').capitalize()
)
if os.getenv("PLAYWRIGHT_DRIVER_URL"):
fetcher_description += " via '{}'".format(os.getenv("PLAYWRIGHT_DRIVER_URL"))
browser_type = ''
command_executor = ''
# Configs for Proxy setup
# In the ENV vars, is prefixed with "playwright_proxy_", so it is for example "playwright_proxy_server"
playwright_proxy_settings_mappings = ['bypass', 'server', 'username', 'password']
proxy = None
def __init__(self, proxy_override=None, custom_browser_connection_url=None):
super().__init__()
self.browser_type = os.getenv("PLAYWRIGHT_BROWSER_TYPE", 'chromium').strip('"')
if custom_browser_connection_url:
self.browser_connection_is_custom = True
self.browser_connection_url = custom_browser_connection_url
else:
# Fallback to fetching from system
# .strip('"') is going to save someone a lot of time when they accidently wrap the env value
self.browser_connection_url = os.getenv("PLAYWRIGHT_DRIVER_URL", 'ws://playwright-chrome:3000').strip('"')
# If any proxy settings are enabled, then we should setup the proxy object
proxy_args = {}
for k in self.playwright_proxy_settings_mappings:
v = os.getenv('playwright_proxy_' + k, False)
if v:
proxy_args[k] = v.strip('"')
if proxy_args:
self.proxy = proxy_args
# allow per-watch proxy selection override
if proxy_override:
self.proxy = {'server': proxy_override}
if self.proxy:
# Playwright needs separate username and password values
parsed = urlparse(self.proxy.get('server'))
if parsed.username:
self.proxy['username'] = parsed.username
self.proxy['password'] = parsed.password
def screenshot_step(self, step_n=''):
screenshot = self.page.screenshot(type='jpeg', full_page=True, quality=int(os.getenv("SCREENSHOT_QUALITY", 72)))
if self.browser_steps_screenshot_path is not None:
destination = os.path.join(self.browser_steps_screenshot_path, 'step_{}.jpeg'.format(step_n))
logger.debug(f"Saving step screenshot to {destination}")
with open(destination, 'wb') as f:
f.write(screenshot)
def save_step_html(self, step_n):
content = self.page.content()
destination = os.path.join(self.browser_steps_screenshot_path, 'step_{}.html'.format(step_n))
logger.debug(f"Saving step HTML to {destination}")
with open(destination, 'w') as f:
f.write(content)
def run(self,
url,
timeout,
request_headers,
request_body,
request_method,
ignore_status_codes=False,
current_include_filters=None,
is_binary=False):
from playwright.sync_api import sync_playwright
import playwright._impl._errors
from changedetectionio.content_fetchers import visualselector_xpath_selectors
self.delete_browser_steps_screenshots()
response = None
with sync_playwright() as p:
browser_type = getattr(p, self.browser_type)
# Seemed to cause a connection Exception even tho I can see it connect
# self.browser = browser_type.connect(self.command_executor, timeout=timeout*1000)
# 60,000 connection timeout only
browser = browser_type.connect_over_cdp(self.browser_connection_url, timeout=60000)
# SOCKS5 with authentication is not supported (yet)
# https://github.com/microsoft/playwright/issues/10567
# Set user agent to prevent Cloudflare from blocking the browser
# Use the default one configured in the App.py model that's passed from fetch_site_status.py
context = browser.new_context(
accept_downloads=False, # Should never be needed
bypass_csp=True, # This is needed to enable JavaScript execution on GitHub and others
extra_http_headers=request_headers,
ignore_https_errors=True,
proxy=self.proxy,
service_workers=os.getenv('PLAYWRIGHT_SERVICE_WORKERS', 'allow'), # Should be `allow` or `block` - sites like YouTube can transmit large amounts of data via Service Workers
user_agent=manage_user_agent(headers=request_headers),
)
self.page = context.new_page()
# Listen for all console events and handle errors
self.page.on("console", lambda msg: print(f"Playwright console: Watch URL: {url} {msg.type}: {msg.text} {msg.args}"))
# Re-use as much code from browser steps as possible so its the same
from changedetectionio.blueprint.browser_steps.browser_steps import steppable_browser_interface
browsersteps_interface = steppable_browser_interface(start_url=url)
browsersteps_interface.page = self.page
response = browsersteps_interface.action_goto_url(value=url)
self.headers = response.all_headers()
if response is None:
context.close()
browser.close()
logger.debug("Content Fetcher > Response object was none")
raise EmptyReply(url=url, status_code=None)
try:
if self.webdriver_js_execute_code is not None and len(self.webdriver_js_execute_code):
browsersteps_interface.action_execute_js(value=self.webdriver_js_execute_code, selector=None)
except playwright._impl._errors.TimeoutError as e:
context.close()
browser.close()
# This can be ok, we will try to grab what we could retrieve
pass
except Exception as e:
logger.debug(f"Content Fetcher > Other exception when executing custom JS code {str(e)}")
context.close()
browser.close()
raise PageUnloadable(url=url, status_code=None, message=str(e))
extra_wait = int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)) + self.render_extract_delay
self.page.wait_for_timeout(extra_wait * 1000)
try:
self.status_code = response.status
except Exception as e:
# https://github.com/dgtlmoon/changedetection.io/discussions/2122#discussioncomment-8241962
logger.critical(f"Response from the browser/Playwright did not have a status_code! Response follows.")
logger.critical(response)
context.close()
browser.close()
raise PageUnloadable(url=url, status_code=None, message=str(e))
if self.status_code != 200 and not ignore_status_codes:
screenshot = self.page.screenshot(type='jpeg', full_page=True,
quality=int(os.getenv("SCREENSHOT_QUALITY", 72)))
raise Non200ErrorCodeReceived(url=url, status_code=self.status_code, screenshot=screenshot)
if len(self.page.content().strip()) == 0:
context.close()
browser.close()
logger.debug("Content Fetcher > Content was empty")
raise EmptyReply(url=url, status_code=response.status)
# Run Browser Steps here
if self.browser_steps_get_valid_steps():
self.iterate_browser_steps(start_url=url)
self.page.wait_for_timeout(extra_wait * 1000)
# So we can find an element on the page where its selector was entered manually (maybe not xPath etc)
if current_include_filters is not None:
self.page.evaluate("var include_filters={}".format(json.dumps(current_include_filters)))
else:
self.page.evaluate("var include_filters=''")
self.xpath_data = self.page.evaluate(
"async () => {" + self.xpath_element_js.replace('%ELEMENTS%', visualselector_xpath_selectors) + "}")
self.instock_data = self.page.evaluate("async () => {" + self.instock_data_js + "}")
self.content = self.page.content()
# Bug 3 in Playwright screenshot handling
# Some bug where it gives the wrong screenshot size, but making a request with the clip set first seems to solve it
# JPEG is better here because the screenshots can be very very large
# Screenshots also travel via the ws:// (websocket) meaning that the binary data is base64 encoded
# which will significantly increase the IO size between the server and client, it's recommended to use the lowest
# acceptable screenshot quality here
try:
# The actual screenshot - this always base64 and needs decoding! horrible! huge CPU usage
self.screenshot = self.page.screenshot(type='jpeg',
full_page=True,
quality=int(os.getenv("SCREENSHOT_QUALITY", 72)),
)
except Exception as e:
# It's likely the screenshot was too long/big and something crashed
raise ScreenshotUnavailable(url=url, status_code=self.status_code)
finally:
context.close()
browser.close()

View File

@@ -0,0 +1,269 @@
import asyncio
import json
import os
import websockets.exceptions
from urllib.parse import urlparse
from loguru import logger
from changedetectionio.content_fetchers.base import Fetcher, manage_user_agent
from changedetectionio.content_fetchers.exceptions import PageUnloadable, Non200ErrorCodeReceived, EmptyReply, BrowserFetchTimedOut, BrowserConnectError
class fetcher(Fetcher):
fetcher_description = "Puppeteer/direct {}/Javascript".format(
os.getenv("PLAYWRIGHT_BROWSER_TYPE", 'chromium').capitalize()
)
if os.getenv("PLAYWRIGHT_DRIVER_URL"):
fetcher_description += " via '{}'".format(os.getenv("PLAYWRIGHT_DRIVER_URL"))
browser_type = ''
command_executor = ''
proxy = None
def __init__(self, proxy_override=None, custom_browser_connection_url=None):
super().__init__()
if custom_browser_connection_url:
self.browser_connection_is_custom = True
self.browser_connection_url = custom_browser_connection_url
else:
# Fallback to fetching from system
# .strip('"') is going to save someone a lot of time when they accidently wrap the env value
self.browser_connection_url = os.getenv("PLAYWRIGHT_DRIVER_URL", 'ws://playwright-chrome:3000').strip('"')
# allow per-watch proxy selection override
# @todo check global too?
if proxy_override:
# Playwright needs separate username and password values
parsed = urlparse(proxy_override)
if parsed:
self.proxy = {'username': parsed.username, 'password': parsed.password}
# Add the proxy server chrome start option, the username and password never gets added here
# (It always goes in via await self.page.authenticate(self.proxy))
# @todo filter some injection attack?
# check scheme when no scheme
proxy_url = parsed.scheme + "://" if parsed.scheme else 'http://'
r = "?" if not '?' in self.browser_connection_url else '&'
port = ":"+str(parsed.port) if parsed.port else ''
q = "?"+parsed.query if parsed.query else ''
proxy_url += f"{parsed.hostname}{port}{parsed.path}{q}"
self.browser_connection_url += f"{r}--proxy-server={proxy_url}"
# def screenshot_step(self, step_n=''):
# screenshot = self.page.screenshot(type='jpeg', full_page=True, quality=85)
#
# if self.browser_steps_screenshot_path is not None:
# destination = os.path.join(self.browser_steps_screenshot_path, 'step_{}.jpeg'.format(step_n))
# logger.debug(f"Saving step screenshot to {destination}")
# with open(destination, 'wb') as f:
# f.write(screenshot)
#
# def save_step_html(self, step_n):
# content = self.page.content()
# destination = os.path.join(self.browser_steps_screenshot_path, 'step_{}.html'.format(step_n))
# logger.debug(f"Saving step HTML to {destination}")
# with open(destination, 'w') as f:
# f.write(content)
async def fetch_page(self,
url,
timeout,
request_headers,
request_body,
request_method,
ignore_status_codes,
current_include_filters,
is_binary
):
from changedetectionio.content_fetchers import visualselector_xpath_selectors
self.delete_browser_steps_screenshots()
extra_wait = int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)) + self.render_extract_delay
from pyppeteer import Pyppeteer
pyppeteer_instance = Pyppeteer()
# Connect directly using the specified browser_ws_endpoint
# @todo timeout
try:
browser = await pyppeteer_instance.connect(browserWSEndpoint=self.browser_connection_url,
ignoreHTTPSErrors=True
)
except websockets.exceptions.InvalidStatusCode as e:
raise BrowserConnectError(msg=f"Error while trying to connect the browser, Code {e.status_code} (check your access, whitelist IP, password etc)")
except websockets.exceptions.InvalidURI:
raise BrowserConnectError(msg=f"Error connecting to the browser, check your browser connection address (should be ws:// or wss://")
except Exception as e:
raise BrowserConnectError(msg=f"Error connecting to the browser {str(e)}")
# Better is to launch chrome with the URL as arg
# non-headless - newPage() will launch an extra tab/window, .browser should already contain 1 page/tab
# headless - ask a new page
self.page = (pages := await browser.pages) and len(pages) or await browser.newPage()
try:
from pyppeteerstealth import inject_evasions_into_page
except ImportError:
logger.debug("pyppeteerstealth module not available, skipping")
pass
else:
# I tried hooking events via self.page.on(Events.Page.DOMContentLoaded, inject_evasions_requiring_obj_to_page)
# But I could never get it to fire reliably, so we just inject it straight after
await inject_evasions_into_page(self.page)
# This user agent is similar to what was used when tweaking the evasions in inject_evasions_into_page(..)
user_agent = None
if request_headers and request_headers.get('User-Agent'):
# Request_headers should now be CaaseInsensitiveDict
# Remove it so it's not sent again with headers after
user_agent = request_headers.pop('User-Agent').strip()
await self.page.setUserAgent(user_agent)
if not user_agent:
# Attempt to strip 'HeadlessChrome' etc
await self.page.setUserAgent(manage_user_agent(headers=request_headers, current_ua=await self.page.evaluate('navigator.userAgent')))
await self.page.setBypassCSP(True)
if request_headers:
await self.page.setExtraHTTPHeaders(request_headers)
# SOCKS5 with authentication is not supported (yet)
# https://github.com/microsoft/playwright/issues/10567
self.page.setDefaultNavigationTimeout(0)
await self.page.setCacheEnabled(True)
if self.proxy and self.proxy.get('username'):
# Setting Proxy-Authentication header is deprecated, and doing so can trigger header change errors from Puppeteer
# https://github.com/puppeteer/puppeteer/issues/676 ?
# https://help.brightdata.com/hc/en-us/articles/12632549957649-Proxy-Manager-How-to-Guides#h_01HAKWR4Q0AFS8RZTNYWRDFJC2
# https://cri.dev/posts/2020-03-30-How-to-solve-Puppeteer-Chrome-Error-ERR_INVALID_ARGUMENT/
await self.page.authenticate(self.proxy)
# Re-use as much code from browser steps as possible so its the same
# from changedetectionio.blueprint.browser_steps.browser_steps import steppable_browser_interface
# not yet used here, we fallback to playwright when browsersteps is required
# browsersteps_interface = steppable_browser_interface()
# browsersteps_interface.page = self.page
response = await self.page.goto(url, waitUntil="load")
if response is None:
await self.page.close()
await browser.close()
logger.warning("Content Fetcher > Response object was none")
raise EmptyReply(url=url, status_code=None)
self.headers = response.headers
try:
if self.webdriver_js_execute_code is not None and len(self.webdriver_js_execute_code):
await self.page.evaluate(self.webdriver_js_execute_code)
except Exception as e:
logger.warning("Got exception when running evaluate on custom JS code")
logger.error(str(e))
await self.page.close()
await browser.close()
# This can be ok, we will try to grab what we could retrieve
raise PageUnloadable(url=url, status_code=None, message=str(e))
try:
self.status_code = response.status
except Exception as e:
# https://github.com/dgtlmoon/changedetection.io/discussions/2122#discussioncomment-8241962
logger.critical(f"Response from the browser/Playwright did not have a status_code! Response follows.")
logger.critical(response)
await self.page.close()
await browser.close()
raise PageUnloadable(url=url, status_code=None, message=str(e))
if self.status_code != 200 and not ignore_status_codes:
screenshot = await self.page.screenshot(type_='jpeg',
fullPage=True,
quality=int(os.getenv("SCREENSHOT_QUALITY", 72)))
raise Non200ErrorCodeReceived(url=url, status_code=self.status_code, screenshot=screenshot)
content = await self.page.content
if len(content.strip()) == 0:
await self.page.close()
await browser.close()
logger.error("Content Fetcher > Content was empty")
raise EmptyReply(url=url, status_code=response.status)
# Run Browser Steps here
# @todo not yet supported, we switch to playwright in this case
# if self.browser_steps_get_valid_steps():
# self.iterate_browser_steps()
await asyncio.sleep(1 + extra_wait)
# So we can find an element on the page where its selector was entered manually (maybe not xPath etc)
# Setup the xPath/VisualSelector scraper
if current_include_filters is not None:
js = json.dumps(current_include_filters)
await self.page.evaluate(f"var include_filters={js}")
else:
await self.page.evaluate(f"var include_filters=''")
self.xpath_data = await self.page.evaluate(
"async () => {" + self.xpath_element_js.replace('%ELEMENTS%', visualselector_xpath_selectors) + "}")
self.instock_data = await self.page.evaluate("async () => {" + self.instock_data_js + "}")
self.content = await self.page.content
# Bug 3 in Playwright screenshot handling
# Some bug where it gives the wrong screenshot size, but making a request with the clip set first seems to solve it
# JPEG is better here because the screenshots can be very very large
# Screenshots also travel via the ws:// (websocket) meaning that the binary data is base64 encoded
# which will significantly increase the IO size between the server and client, it's recommended to use the lowest
# acceptable screenshot quality here
try:
self.screenshot = await self.page.screenshot(type_='jpeg',
fullPage=True,
quality=int(os.getenv("SCREENSHOT_QUALITY", 72)))
except Exception as e:
logger.error("Error fetching screenshot")
# // May fail on very large pages with 'WARNING: tile memory limits exceeded, some content may not draw'
# // @ todo after text extract, we can place some overlay text with red background to say 'croppped'
logger.error('ERROR: content-fetcher page was maybe too large for a screenshot, reverting to viewport only screenshot')
try:
self.screenshot = await self.page.screenshot(type_='jpeg',
fullPage=False,
quality=int(os.getenv("SCREENSHOT_QUALITY", 72)))
except Exception as e:
logger.error('ERROR: Failed to get viewport-only reduced screenshot :(')
pass
finally:
# It's good to log here in the case that the browser crashes on shutting down but we still get the data we need
logger.success(f"Fetching '{url}' complete, closing page")
await self.page.close()
logger.success(f"Fetching '{url}' complete, closing browser")
await browser.close()
logger.success(f"Fetching '{url}' complete, exiting puppeteer fetch.")
async def main(self, **kwargs):
await self.fetch_page(**kwargs)
def run(self, url, timeout, request_headers, request_body, request_method, ignore_status_codes=False,
current_include_filters=None, is_binary=False):
#@todo make update_worker async which could run any of these content_fetchers within memory and time constraints
max_time = os.getenv('PUPPETEER_MAX_PROCESSING_TIMEOUT_SECONDS', 180)
# This will work in 3.10 but not >= 3.11 because 3.11 wants tasks only
try:
asyncio.run(asyncio.wait_for(self.main(
url=url,
timeout=timeout,
request_headers=request_headers,
request_body=request_body,
request_method=request_method,
ignore_status_codes=ignore_status_codes,
current_include_filters=current_include_filters,
is_binary=is_binary
), timeout=max_time))
except asyncio.TimeoutError:
raise(BrowserFetchTimedOut(msg=f"Browser connected but was unable to process the page in {max_time} seconds."))

View File

@@ -0,0 +1,86 @@
import hashlib
import os
import chardet
import requests
from changedetectionio.content_fetchers.exceptions import BrowserStepsInUnsupportedFetcher, EmptyReply, Non200ErrorCodeReceived
from changedetectionio.content_fetchers.base import Fetcher
# "html_requests" is listed as the default fetcher in store.py!
class fetcher(Fetcher):
fetcher_description = "Basic fast Plaintext/HTTP Client"
def __init__(self, proxy_override=None, custom_browser_connection_url=None):
super().__init__()
self.proxy_override = proxy_override
# browser_connection_url is none because its always 'launched locally'
def run(self,
url,
timeout,
request_headers,
request_body,
request_method,
ignore_status_codes=False,
current_include_filters=None,
is_binary=False):
if self.browser_steps_get_valid_steps():
raise BrowserStepsInUnsupportedFetcher(url=url)
proxies = {}
# Allows override the proxy on a per-request basis
# https://requests.readthedocs.io/en/latest/user/advanced/#socks
# Should also work with `socks5://user:pass@host:port` type syntax.
if self.proxy_override:
proxies = {'http': self.proxy_override, 'https': self.proxy_override, 'ftp': self.proxy_override}
else:
if self.system_http_proxy:
proxies['http'] = self.system_http_proxy
if self.system_https_proxy:
proxies['https'] = self.system_https_proxy
r = requests.request(method=request_method,
data=request_body,
url=url,
headers=request_headers,
timeout=timeout,
proxies=proxies,
verify=False)
# If the response did not tell us what encoding format to expect, Then use chardet to override what `requests` thinks.
# For example - some sites don't tell us it's utf-8, but return utf-8 content
# This seems to not occur when using webdriver/selenium, it seems to detect the text encoding more reliably.
# https://github.com/psf/requests/issues/1604 good info about requests encoding detection
if not is_binary:
# Don't run this for PDF (and requests identified as binary) takes a _long_ time
if not r.headers.get('content-type') or not 'charset=' in r.headers.get('content-type'):
encoding = chardet.detect(r.content)['encoding']
if encoding:
r.encoding = encoding
self.headers = r.headers
if not r.content or not len(r.content):
raise EmptyReply(url=url, status_code=r.status_code)
# @todo test this
# @todo maybe you really want to test zero-byte return pages?
if r.status_code != 200 and not ignore_status_codes:
# maybe check with content works?
raise Non200ErrorCodeReceived(url=url, status_code=r.status_code, page_html=r.text)
self.status_code = r.status_code
if is_binary:
# Binary files just return their checksum until we add something smarter
self.content = hashlib.md5(r.content).hexdigest()
else:
self.content = r.text
self.raw_content = r.content

View File

@@ -0,0 +1 @@
# resources for browser injection/scraping

View File

@@ -146,7 +146,7 @@ module.exports = async ({page, context}) => {
var xpath_data; var xpath_data;
var instock_data; var instock_data;
try { try {
// Not sure the best way here, in the future this should be a new package added to npm then run in browserless // Not sure the best way here, in the future this should be a new package added to npm then run in evaluatedCode
// (Once the old playwright is removed) // (Once the old playwright is removed)
xpath_data = await page.evaluate((include_filters) => {%xpath_scrape_code%}, include_filters); xpath_data = await page.evaluate((include_filters) => {%xpath_scrape_code%}, include_filters);
instock_data = await page.evaluate(() => {%instock_scrape_code%}); instock_data = await page.evaluate(() => {%instock_scrape_code%});

View File

@@ -0,0 +1,204 @@
// Restock Detector
// (c) Leigh Morresi dgtlmoon@gmail.com
//
// Assumes the product is in stock to begin with, unless the following appears above the fold ;
// - outOfStockTexts appears above the fold (out of stock)
// - negateOutOfStockRegex (really is in stock)
function isItemInStock() {
// @todo Pass these in so the same list can be used in non-JS fetchers
const outOfStockTexts = [
' أخبرني عندما يتوفر',
'0 in stock',
'actuellement indisponible',
'agotado',
'article épuisé',
'artikel zurzeit vergriffen',
'as soon as stock is available',
'ausverkauft', // sold out
'available for back order',
'awaiting stock',
'back in stock soon',
'back-order or out of stock',
'backordered',
'benachrichtigt mich', // notify me
'brak na stanie',
'brak w magazynie',
'coming soon',
'currently have any tickets for this',
'currently unavailable',
'dieser artikel ist bald wieder verfügbar',
'dostępne wkrótce',
'en rupture de stock',
'isn\'t in stock right now',
'isnt in stock right now',
'isnt in stock right now',
'item is no longer available',
'let me know when it\'s available',
'mail me when available',
'message if back in stock',
'nachricht bei',
'nicht auf lager',
'nicht lagernd',
'nicht lieferbar',
'nicht verfügbar',
'nicht vorrätig',
'nicht zur verfügung',
'nie znaleziono produktów',
'niet beschikbaar',
'niet leverbaar',
'niet op voorraad',
'no disponible temporalmente',
'no longer in stock',
'no tickets available',
'not available',
'not currently available',
'not in stock',
'notify me when available',
'notify me',
'notify when available',
'não estamos a aceitar encomendas',
'out of stock',
'out-of-stock',
'prodotto esaurito',
'produkt niedostępny',
'sold out',
'sold-out',
'temporarily out of stock',
'temporarily unavailable',
'there were no search results for',
'this item is currently unavailable',
'tickets unavailable',
'tijdelijk uitverkocht',
'unavailable nearby',
'unavailable tickets',
'vergriffen',
'vorbestellen',
'vorbestellung ist bald möglich',
'we couldn\'t find any products that match',
'we do not currently have an estimate of when this product will be back in stock.',
'we don\'t know when or if this item will be back in stock.',
'we were not able to find a match',
'when this arrives in stock',
'zur zeit nicht an lager',
'品切れ',
'已售',
'已售完',
'품절'
];
const vh = Math.max(document.documentElement.clientHeight || 0, window.innerHeight || 0);
function getElementBaseText(element) {
// .textContent can include text from children which may give the wrong results
// scan only immediate TEXT_NODEs, which will be a child of the element
var text = "";
for (var i = 0; i < element.childNodes.length; ++i)
if (element.childNodes[i].nodeType === Node.TEXT_NODE)
text += element.childNodes[i].textContent;
return text.toLowerCase().trim();
}
const negateOutOfStockRegex = new RegExp('^([0-9] in stock|add to cart|in stock)', 'ig');
// The out-of-stock or in-stock-text is generally always above-the-fold
// and often below-the-fold is a list of related products that may or may not contain trigger text
// so it's good to filter to just the 'above the fold' elements
// and it should be atleast 100px from the top to ignore items in the toolbar, sometimes menu items like "Coming soon" exist
// @todo - if it's SVG or IMG, go into image diff mode
// %ELEMENTS% replaced at injection time because different interfaces use it with different settings
console.log("Scanning %ELEMENTS%");
function collectVisibleElements(parent, visibleElements) {
if (!parent) return; // Base case: if parent is null or undefined, return
// Add the parent itself to the visible elements array if it's of the specified types
visibleElements.push(parent);
// Iterate over the parent's children
const children = parent.children;
for (let i = 0; i < children.length; i++) {
const child = children[i];
if (
child.nodeType === Node.ELEMENT_NODE &&
window.getComputedStyle(child).display !== 'none' &&
window.getComputedStyle(child).visibility !== 'hidden' &&
child.offsetWidth >= 0 &&
child.offsetHeight >= 0 &&
window.getComputedStyle(child).contentVisibility !== 'hidden'
) {
// If the child is an element and is visible, recursively collect visible elements
collectVisibleElements(child, visibleElements);
}
}
}
const elementsToScan = [];
collectVisibleElements(document.body, elementsToScan);
var elementText = "";
// REGEXS THAT REALLY MEAN IT'S IN STOCK
for (let i = elementsToScan.length - 1; i >= 0; i--) {
const element = elementsToScan[i];
// outside the 'fold' or some weird text in the heading area
// .getBoundingClientRect() was causing a crash in chrome 119, can only be run on contentVisibility != hidden
if (element.getBoundingClientRect().top + window.scrollY >= vh || element.getBoundingClientRect().top + window.scrollY <= 100) {
continue
}
elementText = "";
if (element.tagName.toLowerCase() === "input") {
elementText = element.value.toLowerCase().trim();
} else {
elementText = getElementBaseText(element);
}
if (elementText.length) {
// try which ones could mean its in stock
if (negateOutOfStockRegex.test(elementText) && !elementText.includes('(0 products)')) {
console.log(`Negating/overriding 'Out of Stock' back to "Possibly in stock" found "${elementText}"`)
return 'Possibly in stock';
}
}
}
// OTHER STUFF THAT COULD BE THAT IT'S OUT OF STOCK
for (let i = elementsToScan.length - 1; i >= 0; i--) {
const element = elementsToScan[i];
// outside the 'fold' or some weird text in the heading area
// .getBoundingClientRect() was causing a crash in chrome 119, can only be run on contentVisibility != hidden
if (element.getBoundingClientRect().top + window.scrollY >= vh + 150 || element.getBoundingClientRect().top + window.scrollY <= 100) {
continue
}
elementText = "";
if (element.tagName.toLowerCase() === "input") {
elementText = element.value.toLowerCase().trim();
} else {
elementText = getElementBaseText(element);
}
if (elementText.length) {
// and these mean its out of stock
for (const outOfStockText of outOfStockTexts) {
if (elementText.includes(outOfStockText)) {
console.log(`Selected 'Out of Stock' - found text "${outOfStockText}" - "${elementText}"`)
return outOfStockText; // item is out of stock
}
}
}
}
console.log(`Returning 'Possibly in stock' - cant' find any useful matching text`)
return 'Possibly in stock'; // possibly in stock, cant decide otherwise.
}
// returns the element text that makes it think it's out of stock
return isItemInStock().trim()

View File

@@ -16,24 +16,23 @@ try {
} }
// Include the getXpath script directly, easier than fetching // Include the getXpath script directly, easier than fetching
function getxpath(e) { function getxpath(e) {
var n = e; var n = e;
if (n && n.id) return '//*[@id="' + n.id + '"]'; if (n && n.id) return '//*[@id="' + n.id + '"]';
for (var o = []; n && Node.ELEMENT_NODE === n.nodeType;) { for (var o = []; n && Node.ELEMENT_NODE === n.nodeType;) {
for (var i = 0, r = !1, d = n.previousSibling; d;) d.nodeType !== Node.DOCUMENT_TYPE_NODE && d.nodeName === n.nodeName && i++, d = d.previousSibling; for (var i = 0, r = !1, d = n.previousSibling; d;) d.nodeType !== Node.DOCUMENT_TYPE_NODE && d.nodeName === n.nodeName && i++, d = d.previousSibling;
for (d = n.nextSibling; d;) { for (d = n.nextSibling; d;) {
if (d.nodeName === n.nodeName) { if (d.nodeName === n.nodeName) {
r = !0; r = !0;
break break
}
d = d.nextSibling
} }
o.push((n.prefix ? n.prefix + ":" : "") + n.localName + (i || r ? "[" + (i + 1) + "]" : "")), n = n.parentNode d = d.nextSibling
} }
return o.length ? "/" + o.reverse().join("/") : "" o.push((n.prefix ? n.prefix + ":" : "") + n.localName + (i || r ? "[" + (i + 1) + "]" : "")), n = n.parentNode
} }
return o.length ? "/" + o.reverse().join("/") : ""
}
const findUpTag = (el) => { const findUpTag = (el) => {
let r = el let r = el
@@ -59,14 +58,14 @@ const findUpTag = (el) => {
// Strategy 2: Keep going up until we hit an ID tag, imagine it's like #list-widget div h4 // Strategy 2: Keep going up until we hit an ID tag, imagine it's like #list-widget div h4
while (r.parentNode) { while (r.parentNode) {
if (depth == 5) { if (depth === 5) {
break; break;
} }
if ('' !== r.id) { if ('' !== r.id) {
chained_css.unshift("#" + CSS.escape(r.id)); chained_css.unshift("#" + CSS.escape(r.id));
final_selector = chained_css.join(' > '); final_selector = chained_css.join(' > ');
// Be sure theres only one, some sites have multiples of the same ID tag :-( // Be sure theres only one, some sites have multiples of the same ID tag :-(
if (window.document.querySelectorAll(final_selector).length == 1) { if (window.document.querySelectorAll(final_selector).length === 1) {
return final_selector; return final_selector;
} }
return null; return null;
@@ -82,30 +81,60 @@ const findUpTag = (el) => {
// @todo - if it's SVG or IMG, go into image diff mode // @todo - if it's SVG or IMG, go into image diff mode
// %ELEMENTS% replaced at injection time because different interfaces use it with different settings // %ELEMENTS% replaced at injection time because different interfaces use it with different settings
var elements = window.document.querySelectorAll("%ELEMENTS%");
var size_pos = []; var size_pos = [];
// after page fetch, inject this JS // after page fetch, inject this JS
// build a map of all elements and their positions (maybe that only include text?) // build a map of all elements and their positions (maybe that only include text?)
var bbox; var bbox;
for (var i = 0; i < elements.length; i++) { console.log("Scanning %ELEMENTS%");
bbox = elements[i].getBoundingClientRect();
// Exclude items that are not interactable or visible function collectVisibleElements(parent, visibleElements) {
if(elements[i].style.opacity === "0") { if (!parent) return; // Base case: if parent is null or undefined, return
continue
// Add the parent itself to the visible elements array if it's of the specified types
const tagName = parent.tagName.toLowerCase();
if ("%ELEMENTS%".split(',').includes(tagName)) {
visibleElements.push(parent);
} }
if(elements[i].style.display === "none" || elements[i].style.pointerEvents === "none" ) {
continue // Iterate over the parent's children
const children = parent.children;
for (let i = 0; i < children.length; i++) {
const child = children[i];
if (
child.nodeType === Node.ELEMENT_NODE &&
window.getComputedStyle(child).display !== 'none' &&
window.getComputedStyle(child).visibility !== 'hidden' &&
child.offsetWidth >= 0 &&
child.offsetHeight >= 0 &&
window.getComputedStyle(child).contentVisibility !== 'hidden'
) {
// If the child is an element and is visible, recursively collect visible elements
collectVisibleElements(child, visibleElements);
}
} }
}
// Create an array to hold the visible elements
const visibleElementsArray = [];
// Call collectVisibleElements with the starting parent element
collectVisibleElements(document.body, visibleElementsArray);
visibleElementsArray.forEach(function (element) {
bbox = element.getBoundingClientRect();
// Skip really small ones, and where width or height ==0 // Skip really small ones, and where width or height ==0
if (bbox['width'] * bbox['height'] < 100) { if (bbox['width'] * bbox['height'] < 10) {
continue; return
} }
// Don't include elements that are offset from canvas // Don't include elements that are offset from canvas
if (bbox['top']+scroll_y < 0 || bbox['left'] < 0) { if (bbox['top'] + scroll_y < 0 || bbox['left'] < 0) {
continue; return
} }
// @todo the getXpath kind of sucks, it doesnt know when there is for example just one ID sometimes // @todo the getXpath kind of sucks, it doesnt know when there is for example just one ID sometimes
@@ -114,50 +143,46 @@ for (var i = 0; i < elements.length; i++) {
// 1st primitive - if it has class, try joining it all and select, if theres only one.. well thats us. // 1st primitive - if it has class, try joining it all and select, if theres only one.. well thats us.
xpath_result = false; xpath_result = false;
try { try {
var d = findUpTag(elements[i]); var d = findUpTag(element);
if (d) { if (d) {
xpath_result = d; xpath_result = d;
} }
} catch (e) { } catch (e) {
console.log(e); console.log(e);
} }
// You could swap it and default to getXpath and then try the smarter one // You could swap it and default to getXpath and then try the smarter one
// default back to the less intelligent one // default back to the less intelligent one
if (!xpath_result) { if (!xpath_result) {
try { try {
// I've seen on FB and eBay that this doesnt work // I've seen on FB and eBay that this doesnt work
// ReferenceError: getXPath is not defined at eval (eval at evaluate (:152:29), <anonymous>:67:20) at UtilityScript.evaluate (<anonymous>:159:18) at UtilityScript.<anonymous> (<anonymous>:1:44) // ReferenceError: getXPath is not defined at eval (eval at evaluate (:152:29), <anonymous>:67:20) at UtilityScript.evaluate (<anonymous>:159:18) at UtilityScript.<anonymous> (<anonymous>:1:44)
xpath_result = getxpath(elements[i]); xpath_result = getxpath(element);
} catch (e) { } catch (e) {
console.log(e); console.log(e);
continue; return
} }
} }
if (window.getComputedStyle(elements[i]).visibility === "hidden") {
continue;
}
// @todo Possible to ONLY list where it's clickable to save JSON xfer size
size_pos.push({ size_pos.push({
xpath: xpath_result, xpath: xpath_result,
width: Math.round(bbox['width']), width: Math.round(bbox['width']),
height: Math.round(bbox['height']), height: Math.round(bbox['height']),
left: Math.floor(bbox['left']), left: Math.floor(bbox['left']),
top: Math.floor(bbox['top'])+scroll_y, top: Math.floor(bbox['top']) + scroll_y,
tagName: (elements[i].tagName) ? elements[i].tagName.toLowerCase() : '', tagName: (element.tagName) ? element.tagName.toLowerCase() : '',
tagtype: (elements[i].tagName == 'INPUT' && elements[i].type) ? elements[i].type.toLowerCase() : '', tagtype: (element.tagName.toLowerCase() === 'input' && element.type) ? element.type.toLowerCase() : '',
isClickable: (elements[i].onclick) || window.getComputedStyle(elements[i]).cursor == "pointer" isClickable: window.getComputedStyle(element).cursor == "pointer"
}); });
} });
// Inject the current one set in the include_filters, which may be a CSS rule // Inject the current one set in the include_filters, which may be a CSS rule
// used for displaying the current one in VisualSelector, where its not one we generated. // used for displaying the current one in VisualSelector, where its not one we generated.
if (include_filters.length) { if (include_filters.length) {
let results;
// Foreach filter, go and find it on the page and add it to the results so we can visualise it again // Foreach filter, go and find it on the page and add it to the results so we can visualise it again
for (const f of include_filters) { for (const f of include_filters) {
bbox = false; bbox = false;
@@ -173,55 +198,61 @@ if (include_filters.length) {
if (f.startsWith('/') || f.startsWith('xpath')) { if (f.startsWith('/') || f.startsWith('xpath')) {
var qry_f = f.replace(/xpath(:|\d:)/, '') var qry_f = f.replace(/xpath(:|\d:)/, '')
console.log("[xpath] Scanning for included filter " + qry_f) console.log("[xpath] Scanning for included filter " + qry_f)
q = document.evaluate(qry_f, document, null, XPathResult.FIRST_ORDERED_NODE_TYPE, null).singleNodeValue; let xpathResult = document.evaluate(qry_f, document, null, XPathResult.ORDERED_NODE_SNAPSHOT_TYPE, null);
results = [];
for (let i = 0; i < xpathResult.snapshotLength; i++) {
results.push(xpathResult.snapshotItem(i));
}
} else { } else {
console.log("[css] Scanning for included filter " + f) console.log("[css] Scanning for included filter " + f)
q = document.querySelector(f); console.log("[css] Scanning for included filter " + f);
results = document.querySelectorAll(f);
} }
} catch (e) { } catch (e) {
// Maybe catch DOMException and alert? // Maybe catch DOMException and alert?
console.log("xpath_element_scraper: Exception selecting element from filter "+f); console.log("xpath_element_scraper: Exception selecting element from filter " + f);
console.log(e); console.log(e);
} }
if (q) { if (results.length) {
// Try to resolve //something/text() back to its /something so we can atleast get the bounding box
try {
if (typeof q.nodeName == 'string' && q.nodeName === '#text') {
q = q.parentElement
}
} catch (e) {
console.log(e)
console.log("xpath_element_scraper: #text resolver")
}
// #1231 - IN the case XPath attribute filter is applied, we will have to traverse up and find the element. // Iterate over the results
if (typeof q.getBoundingClientRect == 'function') { results.forEach(node => {
bbox = q.getBoundingClientRect(); // Try to resolve //something/text() back to its /something so we can atleast get the bounding box
console.log("xpath_element_scraper: Got filter element, scroll from top was " + scroll_y)
} else {
try { try {
// Try and see we can find its ownerElement if (typeof node.nodeName == 'string' && node.nodeName === '#text') {
bbox = q.ownerElement.getBoundingClientRect(); node = node.parentElement
console.log("xpath_element_scraper: Got filter by ownerElement element, scroll from top was " + scroll_y) }
} catch (e) { } catch (e) {
console.log(e) console.log(e)
console.log("xpath_element_scraper: error looking up q.ownerElement") console.log("xpath_element_scraper: #text resolver")
} }
}
}
if(!q) {
console.log("xpath_element_scraper: filter element " + f + " was not found");
}
if (bbox && bbox['width'] > 0 && bbox['height'] > 0) { // #1231 - IN the case XPath attribute filter is applied, we will have to traverse up and find the element.
size_pos.push({ if (typeof node.getBoundingClientRect == 'function') {
xpath: f, bbox = node.getBoundingClientRect();
width: parseInt(bbox['width']), console.log("xpath_element_scraper: Got filter element, scroll from top was " + scroll_y)
height: parseInt(bbox['height']), } else {
left: parseInt(bbox['left']), try {
top: parseInt(bbox['top'])+scroll_y // Try and see we can find its ownerElement
bbox = node.ownerElement.getBoundingClientRect();
console.log("xpath_element_scraper: Got filter by ownerElement element, scroll from top was " + scroll_y)
} catch (e) {
console.log(e)
console.log("xpath_element_scraper: error looking up q.ownerElement")
}
}
if (bbox && bbox['width'] > 0 && bbox['height'] > 0) {
size_pos.push({
xpath: f,
width: parseInt(bbox['width']),
height: parseInt(bbox['height']),
left: parseInt(bbox['left']),
top: parseInt(bbox['top']) + scroll_y,
highlight_as_custom_filter: true
});
}
}); });
} }
} }
@@ -229,7 +260,7 @@ if (include_filters.length) {
// Sort the elements so we find the smallest one first, in other words, we find the smallest one matching in that area // Sort the elements so we find the smallest one first, in other words, we find the smallest one matching in that area
// so that we dont select the wrapping element by mistake and be unable to select what we want // so that we dont select the wrapping element by mistake and be unable to select what we want
size_pos.sort((a, b) => (a.width*a.height > b.width*b.height) ? 1 : -1) size_pos.sort((a, b) => (a.width * a.height > b.width * b.height) ? 1 : -1)
// Window.width required for proper scaling in the frontend // Window.width required for proper scaling in the frontend
return {'size_pos': size_pos, 'browser_width': window.innerWidth}; return {'size_pos': size_pos, 'browser_width': window.innerWidth};

View File

@@ -0,0 +1,119 @@
import os
import time
from loguru import logger
from changedetectionio.content_fetchers.base import Fetcher
class fetcher(Fetcher):
if os.getenv("WEBDRIVER_URL"):
fetcher_description = "WebDriver Chrome/Javascript via '{}'".format(os.getenv("WEBDRIVER_URL"))
else:
fetcher_description = "WebDriver Chrome/Javascript"
# Configs for Proxy setup
# In the ENV vars, is prefixed with "webdriver_", so it is for example "webdriver_sslProxy"
selenium_proxy_settings_mappings = ['proxyType', 'ftpProxy', 'httpProxy', 'noProxy',
'proxyAutoconfigUrl', 'sslProxy', 'autodetect',
'socksProxy', 'socksVersion', 'socksUsername', 'socksPassword']
proxy = None
def __init__(self, proxy_override=None, custom_browser_connection_url=None):
super().__init__()
from selenium.webdriver.common.proxy import Proxy as SeleniumProxy
# .strip('"') is going to save someone a lot of time when they accidently wrap the env value
if not custom_browser_connection_url:
self.browser_connection_url = os.getenv("WEBDRIVER_URL", 'http://browser-chrome:4444/wd/hub').strip('"')
else:
self.browser_connection_is_custom = True
self.browser_connection_url = custom_browser_connection_url
# If any proxy settings are enabled, then we should setup the proxy object
proxy_args = {}
for k in self.selenium_proxy_settings_mappings:
v = os.getenv('webdriver_' + k, False)
if v:
proxy_args[k] = v.strip('"')
# Map back standard HTTP_ and HTTPS_PROXY to webDriver httpProxy/sslProxy
if not proxy_args.get('webdriver_httpProxy') and self.system_http_proxy:
proxy_args['httpProxy'] = self.system_http_proxy
if not proxy_args.get('webdriver_sslProxy') and self.system_https_proxy:
proxy_args['httpsProxy'] = self.system_https_proxy
# Allows override the proxy on a per-request basis
if proxy_override is not None:
proxy_args['httpProxy'] = proxy_override
if proxy_args:
self.proxy = SeleniumProxy(raw=proxy_args)
def run(self,
url,
timeout,
request_headers,
request_body,
request_method,
ignore_status_codes=False,
current_include_filters=None,
is_binary=False):
from selenium import webdriver
from selenium.webdriver.chrome.options import Options as ChromeOptions
from selenium.common.exceptions import WebDriverException
# request_body, request_method unused for now, until some magic in the future happens.
options = ChromeOptions()
if self.proxy:
options.proxy = self.proxy
self.driver = webdriver.Remote(
command_executor=self.browser_connection_url,
options=options)
try:
self.driver.get(url)
except WebDriverException as e:
# Be sure we close the session window
self.quit()
raise
self.driver.set_window_size(1280, 1024)
self.driver.implicitly_wait(int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)))
if self.webdriver_js_execute_code is not None:
self.driver.execute_script(self.webdriver_js_execute_code)
# Selenium doesn't automatically wait for actions as good as Playwright, so wait again
self.driver.implicitly_wait(int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)))
# @todo - how to check this? is it possible?
self.status_code = 200
# @todo somehow we should try to get this working for WebDriver
# raise EmptyReply(url=url, status_code=r.status_code)
# @todo - dom wait loaded?
time.sleep(int(os.getenv("WEBDRIVER_DELAY_BEFORE_CONTENT_READY", 5)) + self.render_extract_delay)
self.content = self.driver.page_source
self.headers = {}
self.screenshot = self.driver.get_screenshot_as_png()
# Does the connection to the webdriver work? run a test connection.
def is_ready(self):
from selenium import webdriver
from selenium.webdriver.chrome.options import Options as ChromeOptions
self.driver = webdriver.Remote(
command_executor=self.command_executor,
options=ChromeOptions())
# driver.quit() seems to cause better exceptions
self.quit()
return True
def quit(self):
if self.driver:
try:
self.driver.quit()
except Exception as e:
logger.debug(f"Content Fetcher > Exception in chrome shutdown/quit {str(e)}")

View File

@@ -1,62 +1,97 @@
# used for the notifications, the front-end is using a JS library
import difflib import difflib
from typing import List, Iterator, Union
def same_slicer(lst: List[str], start: int, end: int) -> List[str]:
"""Return a slice of the list, or a single element if start == end."""
return lst[start:end] if start != end else [lst[start]]
def same_slicer(l, a, b): def customSequenceMatcher(
if a == b: before: List[str],
return [l[a]] after: List[str],
else: include_equal: bool = False,
return l[a:b] include_removed: bool = True,
include_added: bool = True,
# like .compare but a little different output include_replaced: bool = True,
def customSequenceMatcher(before, after, include_equal=False, include_removed=True, include_added=True, include_replaced=True, include_change_type_prefix=True): include_change_type_prefix: bool = True
cruncher = difflib.SequenceMatcher(isjunk=lambda x: x in " \\t", a=before, b=after) ) -> Iterator[List[str]]:
"""
# @todo Line-by-line mode instead of buncghed, including `after` that is not in `before` (maybe unset?) Compare two sequences and yield differences based on specified parameters.
Args:
before (List[str]): Original sequence
after (List[str]): Modified sequence
include_equal (bool): Include unchanged parts
include_removed (bool): Include removed parts
include_added (bool): Include added parts
include_replaced (bool): Include replaced parts
include_change_type_prefix (bool): Add prefixes to indicate change types
Yields:
List[str]: Differences between sequences
"""
cruncher = difflib.SequenceMatcher(isjunk=lambda x: x in " \t", a=before, b=after)
for tag, alo, ahi, blo, bhi in cruncher.get_opcodes(): for tag, alo, ahi, blo, bhi in cruncher.get_opcodes():
if include_equal and tag == 'equal': if include_equal and tag == 'equal':
g = before[alo:ahi] yield before[alo:ahi]
yield g
elif include_removed and tag == 'delete': elif include_removed and tag == 'delete':
row_prefix = "(removed) " if include_change_type_prefix else '' prefix = "(removed) " if include_change_type_prefix else ''
g = [ row_prefix + i for i in same_slicer(before, alo, ahi)] yield [f"{prefix}{line}" for line in same_slicer(before, alo, ahi)]
yield g
elif include_replaced and tag == 'replace': elif include_replaced and tag == 'replace':
row_prefix = "(changed) " if include_change_type_prefix else '' prefix_changed = "(changed) " if include_change_type_prefix else ''
g = [row_prefix + i for i in same_slicer(before, alo, ahi)] prefix_into = "(into) " if include_change_type_prefix else ''
row_prefix = "(into) " if include_change_type_prefix else '' yield [f"{prefix_changed}{line}" for line in same_slicer(before, alo, ahi)] + \
g += [row_prefix + i for i in same_slicer(after, blo, bhi)] [f"{prefix_into}{line}" for line in same_slicer(after, blo, bhi)]
yield g
elif include_added and tag == 'insert': elif include_added and tag == 'insert':
row_prefix = "(added) " if include_change_type_prefix else '' prefix = "(added) " if include_change_type_prefix else ''
g = [row_prefix + i for i in same_slicer(after, blo, bhi)] yield [f"{prefix}{line}" for line in same_slicer(after, blo, bhi)]
yield g
# only_differences - only return info about the differences, no context def render_diff(
# line_feed_sep could be "<br>" or "<li>" or "\n" etc previous_version_file_contents: str,
def render_diff(previous_version_file_contents, newest_version_file_contents, include_equal=False, include_removed=True, include_added=True, include_replaced=True, line_feed_sep="\n", include_change_type_prefix=True, patch_format=False): newest_version_file_contents: str,
include_equal: bool = False,
newest_version_file_contents = [line.rstrip() for line in newest_version_file_contents.splitlines()] include_removed: bool = True,
include_added: bool = True,
if previous_version_file_contents: include_replaced: bool = True,
previous_version_file_contents = [line.rstrip() for line in previous_version_file_contents.splitlines()] line_feed_sep: str = "\n",
else: include_change_type_prefix: bool = True,
previous_version_file_contents = "" patch_format: bool = False
) -> str:
"""
Render the difference between two file contents.
Args:
previous_version_file_contents (str): Original file contents
newest_version_file_contents (str): Modified file contents
include_equal (bool): Include unchanged parts
include_removed (bool): Include removed parts
include_added (bool): Include added parts
include_replaced (bool): Include replaced parts
line_feed_sep (str): Separator for lines in output
include_change_type_prefix (bool): Add prefixes to indicate change types
patch_format (bool): Use patch format for output
Returns:
str: Rendered difference
"""
newest_lines = [line.rstrip() for line in newest_version_file_contents.splitlines()]
previous_lines = [line.rstrip() for line in previous_version_file_contents.splitlines()] if previous_version_file_contents else []
if patch_format: if patch_format:
patch = difflib.unified_diff(previous_version_file_contents, newest_version_file_contents) patch = difflib.unified_diff(previous_lines, newest_lines)
return line_feed_sep.join(patch) return line_feed_sep.join(patch)
rendered_diff = customSequenceMatcher(before=previous_version_file_contents, rendered_diff = customSequenceMatcher(
after=newest_version_file_contents, before=previous_lines,
include_equal=include_equal, after=newest_lines,
include_removed=include_removed, include_equal=include_equal,
include_added=include_added, include_removed=include_removed,
include_replaced=include_replaced, include_added=include_added,
include_change_type_prefix=include_change_type_prefix) include_replaced=include_replaced,
include_change_type_prefix=include_change_type_prefix
)
# Recursively join lists def flatten(lst: List[Union[str, List[str]]]) -> str:
f = lambda L: line_feed_sep.join([f(x) if type(x) is list else x for x in L]) return line_feed_sep.join(flatten(x) if isinstance(x, list) else x for x in lst)
p= f(rendered_diff)
return p return flatten(rendered_diff)

View File

@@ -1,25 +1,19 @@
#!/usr/bin/python3 #!/usr/bin/python3
from changedetectionio import queuedWatchMetaData
from copy import deepcopy
from distutils.util import strtobool
from feedgen.feed import FeedGenerator
from flask_compress import Compress as FlaskCompress
from flask_login import current_user
from flask_restful import abort, Api
from flask_wtf import CSRFProtect
from functools import wraps
from threading import Event
import datetime import datetime
import flask_login
import logging
import os import os
import pytz
import queue import queue
import threading import threading
import time import time
from .safe_jinja import render as jinja_render
from changedetectionio.strtobool import strtobool
from copy import deepcopy
from functools import wraps
from threading import Event
import flask_login
import pytz
import timeago import timeago
from feedgen.feed import FeedGenerator
from flask import ( from flask import (
Flask, Flask,
abort, abort,
@@ -32,10 +26,16 @@ from flask import (
session, session,
url_for, url_for,
) )
from flask_compress import Compress as FlaskCompress
from flask_login import current_user
from flask_paginate import Pagination, get_page_parameter from flask_paginate import Pagination, get_page_parameter
from flask_restful import abort, Api
from flask_cors import CORS
from flask_wtf import CSRFProtect
from loguru import logger
from changedetectionio import html_tools, __version__ from changedetectionio import html_tools, __version__
from changedetectionio import queuedWatchMetaData
from changedetectionio.api import api_v1 from changedetectionio.api import api_v1
datastore = None datastore = None
@@ -54,6 +54,9 @@ app = Flask(__name__,
static_folder="static", static_folder="static",
template_folder="templates") template_folder="templates")
# Enable CORS, especially useful for the Chrome extension to operate from anywhere
CORS(app)
# Super handy for compressing large BrowserSteps responses and others # Super handy for compressing large BrowserSteps responses and others
FlaskCompress(app) FlaskCompress(app)
@@ -121,10 +124,10 @@ def _jinja2_filter_datetime(watch_obj, format="%Y-%m-%d %H:%M:%S"):
@app.template_filter('format_timestamp_timeago') @app.template_filter('format_timestamp_timeago')
def _jinja2_filter_datetimestamp(timestamp, format="%Y-%m-%d %H:%M:%S"): def _jinja2_filter_datetimestamp(timestamp, format="%Y-%m-%d %H:%M:%S"):
if timestamp == False: if not timestamp:
return 'Not yet' return 'Not yet'
return timeago.format(timestamp, time.time()) return timeago.format(int(timestamp), time.time())
@app.template_filter('pagination_slice') @app.template_filter('pagination_slice')
@@ -210,6 +213,8 @@ def login_optionally_required(func):
return decorated_view return decorated_view
def changedetection_app(config=None, datastore_o=None): def changedetection_app(config=None, datastore_o=None):
logger.trace("TRACE log is enabled")
global datastore global datastore
datastore = datastore_o datastore = datastore_o
@@ -314,6 +319,7 @@ def changedetection_app(config=None, datastore_o=None):
@app.route("/rss", methods=['GET']) @app.route("/rss", methods=['GET'])
def rss(): def rss():
now = time.time()
# Always requires token set # Always requires token set
app_rss_token = datastore.data['settings']['application'].get('rss_access_token') app_rss_token = datastore.data['settings']['application'].get('rss_access_token')
rss_url_token = request.args.get('token') rss_url_token = request.args.get('token')
@@ -332,8 +338,11 @@ def changedetection_app(config=None, datastore_o=None):
# @todo needs a .itemsWithTag() or something - then we can use that in Jinaj2 and throw this away # @todo needs a .itemsWithTag() or something - then we can use that in Jinaj2 and throw this away
for uuid, watch in datastore.data['watching'].items(): for uuid, watch in datastore.data['watching'].items():
# @todo tag notification_muted skip also (improve Watch model)
if datastore.data['settings']['application'].get('rss_hide_muted_watches') and watch.get('notification_muted'):
continue
if limit_tag and not limit_tag in watch['tags']: if limit_tag and not limit_tag in watch['tags']:
continue continue
watch['uuid'] = uuid watch['uuid'] = uuid
sorted_watches.append(watch) sorted_watches.append(watch)
@@ -377,8 +386,12 @@ def changedetection_app(config=None, datastore_o=None):
include_equal=False, include_equal=False,
line_feed_sep="<br>") line_feed_sep="<br>")
fe.content(content="<html><body><h4>{}</h4>{}</body></html>".format(watch_title, html_diff), # @todo Make this configurable and also consider html-colored markup
type='CDATA') # @todo User could decide if <link> goes to the diff page, or to the watch link
rss_template = "<html><body>\n<h4><a href=\"{{watch_url}}\">{{watch_title}}</a></h4>\n<p>{{html_diff}}</p>\n</body></html>\n"
content = jinja_render(template_str=rss_template, watch_title=watch_title, html_diff=html_diff, watch_url=watch.link)
fe.content(content=content, type='CDATA')
fe.guid(guid, permalink=False) fe.guid(guid, permalink=False)
dt = datetime.datetime.fromtimestamp(int(watch.newest_history_key)) dt = datetime.datetime.fromtimestamp(int(watch.newest_history_key))
@@ -387,6 +400,7 @@ def changedetection_app(config=None, datastore_o=None):
response = make_response(fg.rss_str()) response = make_response(fg.rss_str())
response.headers.set('Content-Type', 'application/rss+xml;charset=utf-8') response.headers.set('Content-Type', 'application/rss+xml;charset=utf-8')
logger.trace(f"RSS generated in {time.time() - now:.3f}s")
return response return response
@app.route("/", methods=['GET']) @app.route("/", methods=['GET'])
@@ -395,17 +409,21 @@ def changedetection_app(config=None, datastore_o=None):
global datastore global datastore
from changedetectionio import forms from changedetectionio import forms
limit_tag = request.args.get('tag', '').lower().strip() active_tag_req = request.args.get('tag', '').lower().strip()
active_tag_uuid = active_tag = None
# Be sure limit_tag is a uuid # Be sure limit_tag is a uuid
for uuid, tag in datastore.data['settings']['application'].get('tags', {}).items(): if active_tag_req:
if limit_tag == tag.get('title', '').lower().strip(): for uuid, tag in datastore.data['settings']['application'].get('tags', {}).items():
limit_tag = uuid if active_tag_req == tag.get('title', '').lower().strip() or active_tag_req == uuid:
active_tag = tag
active_tag_uuid = uuid
break
# Redirect for the old rss path which used the /?rss=true # Redirect for the old rss path which used the /?rss=true
if request.args.get('rss'): if request.args.get('rss'):
return redirect(url_for('rss', tag=limit_tag)) return redirect(url_for('rss', tag=active_tag_uuid))
op = request.args.get('op') op = request.args.get('op')
if op: if op:
@@ -416,7 +434,7 @@ def changedetection_app(config=None, datastore_o=None):
datastore.data['watching'][uuid].toggle_mute() datastore.data['watching'][uuid].toggle_mute()
datastore.needs_write = True datastore.needs_write = True
return redirect(url_for('index', tag = limit_tag)) return redirect(url_for('index', tag = active_tag_uuid))
# Sort by last_changed and add the uuid which is usually the key.. # Sort by last_changed and add the uuid which is usually the key..
sorted_watches = [] sorted_watches = []
@@ -427,7 +445,7 @@ def changedetection_app(config=None, datastore_o=None):
if with_errors and not watch.get('last_error'): if with_errors and not watch.get('last_error'):
continue continue
if limit_tag and not limit_tag in watch['tags']: if active_tag_uuid and not active_tag_uuid in watch['tags']:
continue continue
if watch.get('last_error'): if watch.get('last_error'):
errored_count += 1 errored_count += 1
@@ -435,6 +453,8 @@ def changedetection_app(config=None, datastore_o=None):
if search_q: if search_q:
if (watch.get('title') and search_q in watch.get('title').lower()) or search_q in watch.get('url', '').lower(): if (watch.get('title') and search_q in watch.get('title').lower()) or search_q in watch.get('url', '').lower():
sorted_watches.append(watch) sorted_watches.append(watch)
elif watch.get('last_error') and search_q in watch.get('last_error').lower():
sorted_watches.append(watch)
else: else:
sorted_watches.append(watch) sorted_watches.append(watch)
@@ -446,12 +466,13 @@ def changedetection_app(config=None, datastore_o=None):
total=total_count, total=total_count,
per_page=datastore.data['settings']['application'].get('pager_size', 50), css_framework="semantic") per_page=datastore.data['settings']['application'].get('pager_size', 50), css_framework="semantic")
sorted_tags = sorted(datastore.data['settings']['application'].get('tags').items(), key=lambda x: x[1]['title'])
output = render_template( output = render_template(
"watch-overview.html", "watch-overview.html",
# Don't link to hosting when we're on the hosting environment # Don't link to hosting when we're on the hosting environment
active_tag=limit_tag, active_tag=active_tag,
app_rss_token=datastore.data['settings']['application']['rss_access_token'], active_tag_uuid=active_tag_uuid,
app_rss_token=datastore.data['settings']['application'].get('rss_access_token'),
datastore=datastore, datastore=datastore,
errored_count=errored_count, errored_count=errored_count,
form=form, form=form,
@@ -465,7 +486,7 @@ def changedetection_app(config=None, datastore_o=None):
sort_attribute=request.args.get('sort') if request.args.get('sort') else request.cookies.get('sort'), sort_attribute=request.args.get('sort') if request.args.get('sort') else request.cookies.get('sort'),
sort_order=request.args.get('order') if request.args.get('order') else request.cookies.get('order'), sort_order=request.args.get('order') if request.args.get('order') else request.cookies.get('order'),
system_default_fetcher=datastore.data['settings']['application'].get('fetch_backend'), system_default_fetcher=datastore.data['settings']['application'].get('fetch_backend'),
tags=datastore.data['settings']['application'].get('tags'), tags=sorted_tags,
watches=sorted_watches watches=sorted_watches
) )
@@ -485,29 +506,52 @@ def changedetection_app(config=None, datastore_o=None):
# AJAX endpoint for sending a test # AJAX endpoint for sending a test
@app.route("/notification/send-test/<string:watch_uuid>", methods=['POST'])
@app.route("/notification/send-test", methods=['POST']) @app.route("/notification/send-test", methods=['POST'])
@app.route("/notification/send-test/", methods=['POST'])
@login_optionally_required @login_optionally_required
def ajax_callback_send_notification_test(): def ajax_callback_send_notification_test(watch_uuid=None):
# Watch_uuid could be unsuet in the case its used in tag editor, global setings
import apprise import apprise
from .apprise_asset import asset from .apprise_asset import asset
apobj = apprise.Apprise(asset=asset) apobj = apprise.Apprise(asset=asset)
watch = datastore.data['watching'].get(watch_uuid) if watch_uuid else None
# validate URLS notification_urls = request.form['notification_urls'].strip().splitlines()
if not len(request.form['notification_urls'].strip()):
return make_response({'error': 'No Notification URLs set'}, 400)
for server_url in request.form['notification_urls'].splitlines(): if not notification_urls:
if len(server_url.strip()): logger.debug("Test notification - Trying by group/tag in the edit form if available")
if not apobj.add(server_url): # On an edit page, we should also fire off to the tags if they have notifications
message = '{} is not a valid AppRise URL.'.format(server_url) if request.form.get('tags') and request.form['tags'].strip():
return make_response({'error': message}, 400) for k in request.form['tags'].split(','):
tag = datastore.tag_exists_by_name(k.strip())
notification_urls = tag.get('notifications_urls') if tag and tag.get('notifications_urls') else None
is_global_settings_form = request.args.get('mode', '') == 'global-settings'
is_group_settings_form = request.args.get('mode', '') == 'group-settings'
if not notification_urls and not is_global_settings_form and not is_group_settings_form:
# In the global settings, use only what is typed currently in the text box
logger.debug("Test notification - Trying by global system settings notifications")
if datastore.data['settings']['application'].get('notification_urls'):
notification_urls = datastore.data['settings']['application']['notification_urls']
if not notification_urls:
return 'No Notification URLs set/found'
for n_url in notification_urls:
if len(n_url.strip()):
if not apobj.add(n_url):
return f'Error - {n_url} is not a valid AppRise URL.'
try: try:
n_object = {'watch_url': request.form['window_url'], # use the same as when it is triggered, but then override it with the form test values
'notification_urls': request.form['notification_urls'].splitlines() n_object = {
} 'watch_url': request.form['window_url'],
'notification_urls': notification_urls
}
# Only use if present, if not set in n_object it should use the default system value # Only use if present, if not set in n_object it should use the default system value
if 'notification_format' in request.form and request.form['notification_format'].strip(): if 'notification_format' in request.form and request.form['notification_format'].strip():
@@ -519,11 +563,13 @@ def changedetection_app(config=None, datastore_o=None):
if 'notification_body' in request.form and request.form['notification_body'].strip(): if 'notification_body' in request.form and request.form['notification_body'].strip():
n_object['notification_body'] = request.form.get('notification_body', '').strip() n_object['notification_body'] = request.form.get('notification_body', '').strip()
notification_q.put(n_object) from . import update_worker
new_worker = update_worker.update_worker(update_q, notification_q, app, datastore)
new_worker.queue_notification_for_watch(notification_q=notification_q, n_object=n_object, watch=watch)
except Exception as e: except Exception as e:
return make_response({'error': str(e)}, 400) return make_response({'error': str(e)}, 400)
return 'OK' return 'OK - Sent test notifications'
@app.route("/clear_history/<string:uuid>", methods=['GET']) @app.route("/clear_history/<string:uuid>", methods=['GET'])
@@ -560,6 +606,12 @@ def changedetection_app(config=None, datastore_o=None):
output = render_template("clear_all_history.html") output = render_template("clear_all_history.html")
return output return output
def _watch_has_tag_options_set(watch):
"""This should be fixed better so that Tag is some proper Model, a tag is just a Watch also"""
for tag_uuid, tag in datastore.data['settings']['application'].get('tags', {}).items():
if tag_uuid in watch.get('tags', []) and (tag.get('include_filters') or tag.get('subtractive_selectors')):
return True
@app.route("/edit/<string:uuid>", methods=['GET', 'POST']) @app.route("/edit/<string:uuid>", methods=['GET', 'POST'])
@login_optionally_required @login_optionally_required
# https://stackoverflow.com/questions/42984453/wtforms-populate-form-with-data-if-data-exists # https://stackoverflow.com/questions/42984453/wtforms-populate-form-with-data-if-data-exists
@@ -570,7 +622,6 @@ def changedetection_app(config=None, datastore_o=None):
from .blueprint.browser_steps.browser_steps import browser_step_ui_config from .blueprint.browser_steps.browser_steps import browser_step_ui_config
from . import processors from . import processors
using_default_check_time = True
# More for testing, possible to return the first/only # More for testing, possible to return the first/only
if not datastore.data['watching'].keys(): if not datastore.data['watching'].keys():
flash("No watches to edit", "error") flash("No watches to edit", "error")
@@ -595,10 +646,6 @@ def changedetection_app(config=None, datastore_o=None):
# be sure we update with a copy instead of accidently editing the live object by reference # be sure we update with a copy instead of accidently editing the live object by reference
default = deepcopy(datastore.data['watching'][uuid]) default = deepcopy(datastore.data['watching'][uuid])
# Show system wide default if nothing configured
if all(value == 0 or value == None for value in datastore.data['watching'][uuid]['time_between_check'].values()):
default['time_between_check'] = deepcopy(datastore.data['settings']['requests']['time_between_check'])
# Defaults for proxy choice # Defaults for proxy choice
if datastore.proxy_list is not None: # When enabled if datastore.proxy_list is not None: # When enabled
# @todo # @todo
@@ -632,22 +679,15 @@ def changedetection_app(config=None, datastore_o=None):
if request.method == 'POST' and form.validate(): if request.method == 'POST' and form.validate():
extra_update_obj = {} extra_update_obj = {
'consecutive_filter_failures': 0,
'last_error' : False
}
if request.args.get('unpause_on_save'): if request.args.get('unpause_on_save'):
extra_update_obj['paused'] = False extra_update_obj['paused'] = False
# Re #110, if they submit the same as the default value, set it to None, so we continue to follow the default
# Assume we use the default value, unless something relevant is different, then use the form value
# values could be None, 0 etc.
# Set to None unless the next for: says that something is different
extra_update_obj['time_between_check'] = dict.fromkeys(form.time_between_check.data)
for k, v in form.time_between_check.data.items():
if v and v != datastore.data['settings']['requests']['time_between_check'][k]:
extra_update_obj['time_between_check'] = form.time_between_check.data
using_default_check_time = False
break
extra_update_obj['time_between_check'] = form.time_between_check.data
# Ignore text # Ignore text
form_ignore_text = form.ignore_text.data form_ignore_text = form.ignore_text.data
@@ -681,7 +721,7 @@ def changedetection_app(config=None, datastore_o=None):
datastore.data['watching'][uuid].update(extra_update_obj) datastore.data['watching'][uuid].update(extra_update_obj)
if request.args.get('unpause_on_save'): if request.args.get('unpause_on_save'):
flash("Updated watch - unpaused!.") flash("Updated watch - unpaused!")
else: else:
flash("Updated watch.") flash("Updated watch.")
@@ -728,13 +768,13 @@ def changedetection_app(config=None, datastore_o=None):
extra_title=f" - Edit - {watch.label}", extra_title=f" - Edit - {watch.label}",
form=form, form=form,
has_default_notification_urls=True if len(datastore.data['settings']['application']['notification_urls']) else False, has_default_notification_urls=True if len(datastore.data['settings']['application']['notification_urls']) else False,
has_empty_checktime=using_default_check_time,
has_extra_headers_file=len(datastore.get_all_headers_in_textfile_for_watch(uuid=uuid)) > 0, has_extra_headers_file=len(datastore.get_all_headers_in_textfile_for_watch(uuid=uuid)) > 0,
has_special_tag_options=_watch_has_tag_options_set(watch=watch),
is_html_webdriver=is_html_webdriver, is_html_webdriver=is_html_webdriver,
jq_support=jq_support, jq_support=jq_support,
playwright_enabled=os.getenv('PLAYWRIGHT_DRIVER_URL', False), playwright_enabled=os.getenv('PLAYWRIGHT_DRIVER_URL', False),
settings_application=datastore.data['settings']['application'], settings_application=datastore.data['settings']['application'],
using_global_webdriver_wait=default['webdriver_delay'] is None, using_global_webdriver_wait=not default['webdriver_delay'],
uuid=uuid, uuid=uuid,
visualselector_enabled=visualselector_enabled, visualselector_enabled=visualselector_enabled,
watch=watch watch=watch
@@ -745,7 +785,7 @@ def changedetection_app(config=None, datastore_o=None):
@app.route("/settings", methods=['GET', "POST"]) @app.route("/settings", methods=['GET', "POST"])
@login_optionally_required @login_optionally_required
def settings_page(): def settings_page():
from changedetectionio import content_fetcher, forms from changedetectionio import forms
default = deepcopy(datastore.data['settings']) default = deepcopy(datastore.data['settings'])
if datastore.proxy_list is not None: if datastore.proxy_list is not None:
@@ -813,11 +853,13 @@ def changedetection_app(config=None, datastore_o=None):
flash("An error occurred, please see below.", "error") flash("An error occurred, please see below.", "error")
output = render_template("settings.html", output = render_template("settings.html",
form=form,
hide_remove_pass=os.getenv("SALTED_PASS", False),
api_key=datastore.data['settings']['application'].get('api_access_token'), api_key=datastore.data['settings']['application'].get('api_access_token'),
emailprefix=os.getenv('NOTIFICATION_MAIL_BUTTON_PREFIX', False), emailprefix=os.getenv('NOTIFICATION_MAIL_BUTTON_PREFIX', False),
settings_application=datastore.data['settings']['application']) form=form,
hide_remove_pass=os.getenv("SALTED_PASS", False),
min_system_recheck_seconds=int(os.getenv('MINIMUM_SECONDS_RECHECK_TIME', 3)),
settings_application=datastore.data['settings']['application']
)
return output return output
@@ -1027,6 +1069,8 @@ def changedetection_app(config=None, datastore_o=None):
content = [] content = []
ignored_line_numbers = [] ignored_line_numbers = []
trigger_line_numbers = [] trigger_line_numbers = []
versions = []
timestamp = None
# More for testing, possible to return the first/only # More for testing, possible to return the first/only
if uuid == 'first': if uuid == 'first':
@@ -1046,57 +1090,53 @@ def changedetection_app(config=None, datastore_o=None):
if (watch.get('fetch_backend') == 'system' and system_uses_webdriver) or watch.get('fetch_backend') == 'html_webdriver' or watch.get('fetch_backend', '').startswith('extra_browser_'): if (watch.get('fetch_backend') == 'system' and system_uses_webdriver) or watch.get('fetch_backend') == 'html_webdriver' or watch.get('fetch_backend', '').startswith('extra_browser_'):
is_html_webdriver = True is_html_webdriver = True
# Never requested successfully, but we detected a fetch error
if datastore.data['watching'][uuid].history_n == 0 and (watch.get_error_text() or watch.get_error_snapshot()): if datastore.data['watching'][uuid].history_n == 0 and (watch.get_error_text() or watch.get_error_snapshot()):
flash("Preview unavailable - No fetch/check completed or triggers not reached", "error") flash("Preview unavailable - No fetch/check completed or triggers not reached", "error")
output = render_template("preview.html", else:
content=content, # So prepare the latest preview or not
history_n=watch.history_n, preferred_version = request.args.get('version')
extra_stylesheets=extra_stylesheets, versions = list(watch.history.keys())
# current_diff_url=watch['url'], timestamp = versions[-1]
watch=watch, if preferred_version and preferred_version in versions:
uuid=uuid, timestamp = preferred_version
is_html_webdriver=is_html_webdriver,
last_error=watch['last_error'],
last_error_text=watch.get_error_text(),
last_error_screenshot=watch.get_error_snapshot())
return output
timestamp = list(watch.history.keys())[-1] try:
try: versions = list(watch.history.keys())
tmp = watch.get_history_snapshot(timestamp).splitlines() tmp = watch.get_history_snapshot(timestamp).splitlines()
# Get what needs to be highlighted # Get what needs to be highlighted
ignore_rules = watch.get('ignore_text', []) + datastore.data['settings']['application']['global_ignore_text'] ignore_rules = watch.get('ignore_text', []) + datastore.data['settings']['application']['global_ignore_text']
# .readlines will keep the \n, but we will parse it here again, in the future tidy this up # .readlines will keep the \n, but we will parse it here again, in the future tidy this up
ignored_line_numbers = html_tools.strip_ignore_text(content="\n".join(tmp), ignored_line_numbers = html_tools.strip_ignore_text(content="\n".join(tmp),
wordlist=ignore_rules, wordlist=ignore_rules,
mode='line numbers' mode='line numbers'
) )
trigger_line_numbers = html_tools.strip_ignore_text(content="\n".join(tmp), trigger_line_numbers = html_tools.strip_ignore_text(content="\n".join(tmp),
wordlist=watch['trigger_text'], wordlist=watch['trigger_text'],
mode='line numbers' mode='line numbers'
) )
# Prepare the classes and lines used in the template # Prepare the classes and lines used in the template
i=0 i=0
for l in tmp: for l in tmp:
classes=[] classes=[]
i+=1 i+=1
if i in ignored_line_numbers: if i in ignored_line_numbers:
classes.append('ignored') classes.append('ignored')
if i in trigger_line_numbers: if i in trigger_line_numbers:
classes.append('triggered') classes.append('triggered')
content.append({'line': l, 'classes': ' '.join(classes)}) content.append({'line': l, 'classes': ' '.join(classes)})
except Exception as e: except Exception as e:
content.append({'line': f"File doesnt exist or unable to read timestamp {timestamp}", 'classes': ''}) content.append({'line': f"File doesnt exist or unable to read timestamp {timestamp}", 'classes': ''})
output = render_template("preview.html", output = render_template("preview.html",
content=content, content=content,
current_version=timestamp,
history_n=watch.history_n, history_n=watch.history_n,
extra_stylesheets=extra_stylesheets, extra_stylesheets=extra_stylesheets,
extra_title=f" - Diff - {watch.label} @ {timestamp}",
ignored_line_numbers=ignored_line_numbers, ignored_line_numbers=ignored_line_numbers,
triggered_line_numbers=trigger_line_numbers, triggered_line_numbers=trigger_line_numbers,
current_diff_url=watch['url'], current_diff_url=watch['url'],
@@ -1106,7 +1146,10 @@ def changedetection_app(config=None, datastore_o=None):
is_html_webdriver=is_html_webdriver, is_html_webdriver=is_html_webdriver,
last_error=watch['last_error'], last_error=watch['last_error'],
last_error_text=watch.get_error_text(), last_error_text=watch.get_error_text(),
last_error_screenshot=watch.get_error_snapshot()) last_error_screenshot=watch.get_error_snapshot(),
versions=versions
)
return output return output
@@ -1253,9 +1296,8 @@ def changedetection_app(config=None, datastore_o=None):
url = request.form.get('url').strip() url = request.form.get('url').strip()
if datastore.url_exists(url): if datastore.url_exists(url):
flash('The URL {} already exists'.format(url), "error") flash(f'Warning, URL {url} already exists', "notice")
return redirect(url_for('index'))
add_paused = request.form.get('edit_and_watch_submit_button') != None add_paused = request.form.get('edit_and_watch_submit_button') != None
processor = request.form.get('processor', 'text_json_diff') processor = request.form.get('processor', 'text_json_diff')
new_uuid = datastore.add_watch(url=url, tag=request.form.get('tags').strip(), extras={'paused': add_paused, 'processor': processor}) new_uuid = datastore.add_watch(url=url, tag=request.form.get('tags').strip(), extras={'paused': add_paused, 'processor': processor})
@@ -1405,6 +1447,13 @@ def changedetection_app(config=None, datastore_o=None):
update_q.put(queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid, 'skip_when_checksum_same': False})) update_q.put(queuedWatchMetaData.PrioritizedItem(priority=1, item={'uuid': uuid, 'skip_when_checksum_same': False}))
flash("{} watches queued for rechecking".format(len(uuids))) flash("{} watches queued for rechecking".format(len(uuids)))
elif (op == 'clear-errors'):
for uuid in uuids:
uuid = uuid.strip()
if datastore.data['watching'].get(uuid):
datastore.data['watching'][uuid]["last_error"] = False
flash(f"{len(uuids)} watches errors cleared")
elif (op == 'clear-history'): elif (op == 'clear-history'):
for uuid in uuids: for uuid in uuids:
uuid = uuid.strip() uuid = uuid.strip()
@@ -1484,7 +1533,7 @@ def changedetection_app(config=None, datastore_o=None):
except Exception as e: except Exception as e:
logging.error("Error sharing -{}".format(str(e))) logger.error(f"Error sharing -{str(e)}")
flash("Could not share, something went wrong while communicating with the share server - {}".format(str(e)), 'error') flash("Could not share, something went wrong while communicating with the share server - {}".format(str(e)), 'error')
# https://changedetection.io/share/VrMv05wpXyQa # https://changedetection.io/share/VrMv05wpXyQa
@@ -1584,11 +1633,20 @@ def notification_runner():
try: try:
from changedetectionio import notification from changedetectionio import notification
# Fallback to system config if not set
if not n_object.get('notification_body') and datastore.data['settings']['application'].get('notification_body'):
n_object['notification_body'] = datastore.data['settings']['application'].get('notification_body')
if not n_object.get('notification_title') and datastore.data['settings']['application'].get('notification_title'):
n_object['notification_title'] = datastore.data['settings']['application'].get('notification_title')
if not n_object.get('notification_format') and datastore.data['settings']['application'].get('notification_format'):
n_object['notification_format'] = datastore.data['settings']['application'].get('notification_format')
sent_obj = notification.process_notification(n_object, datastore) sent_obj = notification.process_notification(n_object, datastore)
except Exception as e: except Exception as e:
logging.error("Watch URL: {} Error {}".format(n_object['watch_url'], str(e))) logger.error(f"Watch URL: {n_object['watch_url']} Error {str(e)}")
# UUID wont be present when we submit a 'test' from the global settings # UUID wont be present when we submit a 'test' from the global settings
if 'uuid' in n_object: if 'uuid' in n_object:
@@ -1603,15 +1661,15 @@ def notification_runner():
# Trim the log length # Trim the log length
notification_debug_log = notification_debug_log[-100:] notification_debug_log = notification_debug_log[-100:]
# Thread runner to check every minute, look for new watches to feed into the Queue. # Threaded runner, look for new watches to feed into the Queue.
def ticker_thread_check_time_launch_checks(): def ticker_thread_check_time_launch_checks():
import random import random
from changedetectionio import update_worker from changedetectionio import update_worker
proxy_last_called_time = {} proxy_last_called_time = {}
recheck_time_minimum_seconds = int(os.getenv('MINIMUM_SECONDS_RECHECK_TIME', 20)) recheck_time_minimum_seconds = int(os.getenv('MINIMUM_SECONDS_RECHECK_TIME', 3))
print("System env MINIMUM_SECONDS_RECHECK_TIME", recheck_time_minimum_seconds) logger.debug(f"System env MINIMUM_SECONDS_RECHECK_TIME {recheck_time_minimum_seconds}")
# Spin up Workers that do the fetching # Spin up Workers that do the fetching
# Can be overriden by ENV or use the default settings # Can be overriden by ENV or use the default settings
@@ -1656,7 +1714,7 @@ def ticker_thread_check_time_launch_checks():
now = time.time() now = time.time()
watch = datastore.data['watching'].get(uuid) watch = datastore.data['watching'].get(uuid)
if not watch: if not watch:
logging.error("Watch: {} no longer present.".format(uuid)) logger.error(f"Watch: {uuid} no longer present.")
continue continue
# No need todo further processing if it's paused # No need todo further processing if it's paused
@@ -1664,9 +1722,7 @@ def ticker_thread_check_time_launch_checks():
continue continue
# If they supplied an individual entry minutes to threshold. # If they supplied an individual entry minutes to threshold.
threshold = recheck_time_system_seconds if watch.get('time_between_check_use_default') else watch.threshold_seconds()
watch_threshold_seconds = watch.threshold_seconds()
threshold = watch_threshold_seconds if watch_threshold_seconds > 0 else recheck_time_system_seconds
# #580 - Jitter plus/minus amount of time to make the check seem more random to the server # #580 - Jitter plus/minus amount of time to make the check seem more random to the server
jitter = datastore.data['settings']['requests'].get('jitter_seconds', 0) jitter = datastore.data['settings']['requests'].get('jitter_seconds', 0)
@@ -1689,10 +1745,10 @@ def ticker_thread_check_time_launch_checks():
time_since_proxy_used = int(time.time() - proxy_last_used_time) time_since_proxy_used = int(time.time() - proxy_last_used_time)
if time_since_proxy_used < proxy_list_reuse_time_minimum: if time_since_proxy_used < proxy_list_reuse_time_minimum:
# Not enough time difference reached, skip this watch # Not enough time difference reached, skip this watch
print("> Skipped UUID {} using proxy '{}', not enough time between proxy requests {}s/{}s".format(uuid, logger.debug(f"> Skipped UUID {uuid} "
watch_proxy, f"using proxy '{watch_proxy}', not "
time_since_proxy_used, f"enough time between proxy requests "
proxy_list_reuse_time_minimum)) f"{time_since_proxy_used}s/{proxy_list_reuse_time_minimum}s")
continue continue
else: else:
# Record the last used time # Record the last used time
@@ -1700,14 +1756,12 @@ def ticker_thread_check_time_launch_checks():
# Use Epoch time as priority, so we get a "sorted" PriorityQueue, but we can still push a priority 1 into it. # Use Epoch time as priority, so we get a "sorted" PriorityQueue, but we can still push a priority 1 into it.
priority = int(time.time()) priority = int(time.time())
print( logger.debug(
"> Queued watch UUID {} last checked at {} queued at {:0.2f} priority {} jitter {:0.2f}s, {:0.2f}s since last checked".format( f"> Queued watch UUID {uuid} "
uuid, f"last checked at {watch['last_checked']} "
watch['last_checked'], f"queued at {now:0.2f} priority {priority} "
now, f"jitter {watch.jitter_seconds:0.2f}s, "
priority, f"{now - watch['last_checked']:0.2f}s since last checked")
watch.jitter_seconds,
now - watch['last_checked']))
# Into the queue with you # Into the queue with you
update_q.put(queuedWatchMetaData.PrioritizedItem(priority=priority, item={'uuid': uuid, 'skip_when_checksum_same': True})) update_q.put(queuedWatchMetaData.PrioritizedItem(priority=priority, item={'uuid': uuid, 'skip_when_checksum_same': True}))

View File

@@ -1,6 +1,6 @@
import os import os
import re import re
from distutils.util import strtobool from changedetectionio.strtobool import strtobool
from wtforms import ( from wtforms import (
BooleanField, BooleanField,
@@ -27,7 +27,7 @@ from validators.url import url as url_validator
# each select <option data-enabled="enabled-0-0" # each select <option data-enabled="enabled-0-0"
from changedetectionio.blueprint.browser_steps.browser_steps import browser_step_ui_config from changedetectionio.blueprint.browser_steps.browser_steps import browser_step_ui_config
from changedetectionio import content_fetcher, html_tools from changedetectionio import html_tools, content_fetchers
from changedetectionio.notification import ( from changedetectionio.notification import (
valid_notification_formats, valid_notification_formats,
@@ -43,6 +43,7 @@ valid_method = {
'PUT', 'PUT',
'PATCH', 'PATCH',
'DELETE', 'DELETE',
'OPTIONS',
} }
default_method = 'GET' default_method = 'GET'
@@ -166,33 +167,31 @@ class ValidateContentFetcherIsReady(object):
self.message = message self.message = message
def __call__(self, form, field): def __call__(self, form, field):
import urllib3.exceptions
from changedetectionio import content_fetcher
return return
# AttributeError: module 'changedetectionio.content_fetcher' has no attribute 'extra_browser_unlocked<>ASDF213r123r' # AttributeError: module 'changedetectionio.content_fetcher' has no attribute 'extra_browser_unlocked<>ASDF213r123r'
# Better would be a radiohandler that keeps a reference to each class # Better would be a radiohandler that keeps a reference to each class
if field.data is not None and field.data != 'system': # if field.data is not None and field.data != 'system':
klass = getattr(content_fetcher, field.data) # klass = getattr(content_fetcher, field.data)
some_object = klass() # some_object = klass()
try: # try:
ready = some_object.is_ready() # ready = some_object.is_ready()
#
except urllib3.exceptions.MaxRetryError as e: # except urllib3.exceptions.MaxRetryError as e:
driver_url = some_object.command_executor # driver_url = some_object.command_executor
message = field.gettext('Content fetcher \'%s\' did not respond.' % (field.data)) # message = field.gettext('Content fetcher \'%s\' did not respond.' % (field.data))
message += '<br>' + field.gettext( # message += '<br>' + field.gettext(
'Be sure that the selenium/webdriver runner is running and accessible via network from this container/host.') # 'Be sure that the selenium/webdriver runner is running and accessible via network from this container/host.')
message += '<br>' + field.gettext('Did you follow the instructions in the wiki?') # message += '<br>' + field.gettext('Did you follow the instructions in the wiki?')
message += '<br><br>' + field.gettext('WebDriver Host: %s' % (driver_url)) # message += '<br><br>' + field.gettext('WebDriver Host: %s' % (driver_url))
message += '<br><a href="https://github.com/dgtlmoon/changedetection.io/wiki/Fetching-pages-with-WebDriver">Go here for more information</a>' # message += '<br><a href="https://github.com/dgtlmoon/changedetection.io/wiki/Fetching-pages-with-WebDriver">Go here for more information</a>'
message += '<br>'+field.gettext('Content fetcher did not respond properly, unable to use it.\n %s' % (str(e))) # message += '<br>'+field.gettext('Content fetcher did not respond properly, unable to use it.\n %s' % (str(e)))
#
raise ValidationError(message) # raise ValidationError(message)
#
except Exception as e: # except Exception as e:
message = field.gettext('Content fetcher \'%s\' did not respond properly, unable to use it.\n %s') # message = field.gettext('Content fetcher \'%s\' did not respond properly, unable to use it.\n %s')
raise ValidationError(message % (field.data, e)) # raise ValidationError(message % (field.data, e))
class ValidateNotificationBodyAndTitleWhenURLisSet(object): class ValidateNotificationBodyAndTitleWhenURLisSet(object):
@@ -237,21 +236,26 @@ class ValidateJinja2Template(object):
def __call__(self, form, field): def __call__(self, form, field):
from changedetectionio import notification from changedetectionio import notification
from jinja2 import Environment, BaseLoader, TemplateSyntaxError, UndefinedError from jinja2 import BaseLoader, TemplateSyntaxError, UndefinedError
from jinja2.sandbox import ImmutableSandboxedEnvironment
from jinja2.meta import find_undeclared_variables from jinja2.meta import find_undeclared_variables
import jinja2.exceptions
# Might be a list of text, or might be just text (like from the apprise url list)
joined_data = ' '.join(map(str, field.data)) if isinstance(field.data, list) else f"{field.data}"
try: try:
jinja2_env = Environment(loader=BaseLoader) jinja2_env = ImmutableSandboxedEnvironment(loader=BaseLoader)
jinja2_env.globals.update(notification.valid_tokens) jinja2_env.globals.update(notification.valid_tokens)
jinja2_env.from_string(joined_data).render()
rendered = jinja2_env.from_string(field.data).render()
except TemplateSyntaxError as e: except TemplateSyntaxError as e:
raise ValidationError(f"This is not a valid Jinja2 template: {e}") from e raise ValidationError(f"This is not a valid Jinja2 template: {e}") from e
except UndefinedError as e: except UndefinedError as e:
raise ValidationError(f"A variable or function is not defined: {e}") from e raise ValidationError(f"A variable or function is not defined: {e}") from e
except jinja2.exceptions.SecurityError as e:
raise ValidationError(f"This is not a valid Jinja2 template: {e}") from e
ast = jinja2_env.parse(field.data) ast = jinja2_env.parse(joined_data)
undefined = ", ".join(find_undeclared_variables(ast)) undefined = ", ".join(find_undeclared_variables(ast))
if undefined: if undefined:
raise ValidationError( raise ValidationError(
@@ -416,11 +420,11 @@ class quickWatchForm(Form):
# Common to a single watch and the global settings # Common to a single watch and the global settings
class commonSettingsForm(Form): class commonSettingsForm(Form):
notification_urls = StringListField('Notification URL List', validators=[validators.Optional(), ValidateAppRiseServers()]) notification_urls = StringListField('Notification URL List', validators=[validators.Optional(), ValidateAppRiseServers(), ValidateJinja2Template()])
notification_title = StringField('Notification Title', default='ChangeDetection.io Notification - {{ watch_url }}', validators=[validators.Optional(), ValidateJinja2Template()]) notification_title = StringField('Notification Title', default='ChangeDetection.io Notification - {{ watch_url }}', validators=[validators.Optional(), ValidateJinja2Template()])
notification_body = TextAreaField('Notification Body', default='{{ watch_url }} had a change.', validators=[validators.Optional(), ValidateJinja2Template()]) notification_body = TextAreaField('Notification Body', default='{{ watch_url }} had a change.', validators=[validators.Optional(), ValidateJinja2Template()])
notification_format = SelectField('Notification format', choices=valid_notification_formats.keys()) notification_format = SelectField('Notification format', choices=valid_notification_formats.keys())
fetch_backend = RadioField(u'Fetch Method', choices=content_fetcher.available_fetchers(), validators=[ValidateContentFetcherIsReady()]) fetch_backend = RadioField(u'Fetch Method', choices=content_fetchers.available_fetchers(), validators=[ValidateContentFetcherIsReady()])
extract_title_as_title = BooleanField('Extract <title> from document and use as watch title', default=False) extract_title_as_title = BooleanField('Extract <title> from document and use as watch title', default=False)
webdriver_delay = IntegerField('Wait seconds before extracting text', validators=[validators.Optional(), validators.NumberRange(min=1, webdriver_delay = IntegerField('Wait seconds before extracting text', validators=[validators.Optional(), validators.NumberRange(min=1,
message="Should contain one or more seconds")]) message="Should contain one or more seconds")])
@@ -449,6 +453,7 @@ class watchForm(commonSettingsForm):
tags = StringTagUUID('Group tag', [validators.Optional()], default='') tags = StringTagUUID('Group tag', [validators.Optional()], default='')
time_between_check = FormField(TimeBetweenCheckForm) time_between_check = FormField(TimeBetweenCheckForm)
time_between_check_use_default = BooleanField('Use global settings for time between check', default=False)
include_filters = StringListField('CSS/JSONPath/JQ/XPath Filters', [ValidateCSSJSONXPATHInput()], default='') include_filters = StringListField('CSS/JSONPath/JQ/XPath Filters', [ValidateCSSJSONXPATHInput()], default='')
@@ -464,6 +469,7 @@ class watchForm(commonSettingsForm):
method = SelectField('Request method', choices=valid_method, default=default_method) method = SelectField('Request method', choices=valid_method, default=default_method)
ignore_status_codes = BooleanField('Ignore status codes (process non-2xx status codes as normal)', default=False) ignore_status_codes = BooleanField('Ignore status codes (process non-2xx status codes as normal)', default=False)
check_unique_lines = BooleanField('Only trigger when unique lines appear', default=False) check_unique_lines = BooleanField('Only trigger when unique lines appear', default=False)
sort_text_alphabetically = BooleanField('Sort text alphabetically', default=False)
filter_text_added = BooleanField('Added lines', default=True) filter_text_added = BooleanField('Added lines', default=True)
filter_text_replaced = BooleanField('Replaced/changed lines', default=True) filter_text_replaced = BooleanField('Replaced/changed lines', default=True)
@@ -499,11 +505,9 @@ class watchForm(commonSettingsForm):
result = False result = False
# Attempt to validate jinja2 templates in the URL # Attempt to validate jinja2 templates in the URL
from jinja2 import Environment
# Jinja2 available in URLs along with https://pypi.org/project/jinja2-time/
jinja2_env = Environment(extensions=['jinja2_time.TimeExtension'])
try: try:
ready_url = str(jinja2_env.from_string(self.url.data).render()) from changedetectionio.safe_jinja import render as jinja_render
jinja_render(template_str=self.url.data)
except Exception as e: except Exception as e:
self.url.errors.append('Invalid template syntax') self.url.errors.append('Invalid template syntax')
result = False result = False
@@ -522,6 +526,10 @@ class SingleExtraBrowser(Form):
browser_connection_url = StringField('Browser connection URL', [validators.Optional()], render_kw={"placeholder": "wss://brightdata... wss://oxylabs etc", "size":50}) browser_connection_url = StringField('Browser connection URL', [validators.Optional()], render_kw={"placeholder": "wss://brightdata... wss://oxylabs etc", "size":50})
# @todo do the validation here instead # @todo do the validation here instead
class DefaultUAInputForm(Form):
html_requests = StringField('Plaintext requests', validators=[validators.Optional()], render_kw={"placeholder": "<default>"})
if os.getenv("PLAYWRIGHT_DRIVER_URL") or os.getenv("WEBDRIVER_URL"):
html_webdriver = StringField('Chrome requests', validators=[validators.Optional()], render_kw={"placeholder": "<default>"})
# datastore.data['settings']['requests'].. # datastore.data['settings']['requests']..
class globalSettingsRequestForm(Form): class globalSettingsRequestForm(Form):
@@ -533,6 +541,8 @@ class globalSettingsRequestForm(Form):
extra_proxies = FieldList(FormField(SingleExtraProxy), min_entries=5) extra_proxies = FieldList(FormField(SingleExtraProxy), min_entries=5)
extra_browsers = FieldList(FormField(SingleExtraBrowser), min_entries=5) extra_browsers = FieldList(FormField(SingleExtraBrowser), min_entries=5)
default_ua = FormField(DefaultUAInputForm, label="Default User-Agent overrides")
def validate_extra_proxies(self, extra_validators=None): def validate_extra_proxies(self, extra_validators=None):
for e in self.data['extra_proxies']: for e in self.data['extra_proxies']:
if e.get('proxy_name') or e.get('proxy_url'): if e.get('proxy_name') or e.get('proxy_url'):
@@ -550,7 +560,7 @@ class globalSettingsApplicationForm(commonSettingsForm):
render_kw={"placeholder": os.getenv('BASE_URL', 'Not set')} render_kw={"placeholder": os.getenv('BASE_URL', 'Not set')}
) )
empty_pages_are_a_change = BooleanField('Treat empty pages as a change?', default=False) empty_pages_are_a_change = BooleanField('Treat empty pages as a change?', default=False)
fetch_backend = RadioField('Fetch Method', default="html_requests", choices=content_fetcher.available_fetchers(), validators=[ValidateContentFetcherIsReady()]) fetch_backend = RadioField('Fetch Method', default="html_requests", choices=content_fetchers.available_fetchers(), validators=[ValidateContentFetcherIsReady()])
global_ignore_text = StringListField('Ignore Text', [ValidateListRegex()]) global_ignore_text = StringListField('Ignore Text', [ValidateListRegex()])
global_subtractive_selectors = StringListField('Remove elements', [ValidateCSSJSONXPATHInput(allow_xpath=False, allow_json=False)]) global_subtractive_selectors = StringListField('Remove elements', [ValidateCSSJSONXPATHInput(allow_xpath=False, allow_json=False)])
ignore_whitespace = BooleanField('Ignore whitespace') ignore_whitespace = BooleanField('Ignore whitespace')
@@ -562,6 +572,8 @@ class globalSettingsApplicationForm(commonSettingsForm):
removepassword_button = SubmitField('Remove password', render_kw={"class": "pure-button pure-button-primary"}) removepassword_button = SubmitField('Remove password', render_kw={"class": "pure-button pure-button-primary"})
render_anchor_tag_content = BooleanField('Render anchor tag content', default=False) render_anchor_tag_content = BooleanField('Render anchor tag content', default=False)
shared_diff_access = BooleanField('Allow access to view diff page when password is enabled', default=False, validators=[validators.Optional()]) shared_diff_access = BooleanField('Allow access to view diff page when password is enabled', default=False, validators=[validators.Optional()])
rss_hide_muted_watches = BooleanField('Hide muted watches from RSS feed', default=True,
validators=[validators.Optional()])
filter_failure_notification_threshold_attempts = IntegerField('Number of times the filter can be missing before sending a notification', filter_failure_notification_threshold_attempts = IntegerField('Number of times the filter can be missing before sending a notification',
render_kw={"style": "width: 5em;"}, render_kw={"style": "width: 5em;"},
validators=[validators.NumberRange(min=0, validators=[validators.NumberRange(min=0,

View File

@@ -3,8 +3,6 @@ from bs4 import BeautifulSoup
from inscriptis import get_text from inscriptis import get_text
from jsonpath_ng.ext import parse from jsonpath_ng.ext import parse
from typing import List from typing import List
from inscriptis.css_profiles import CSS_PROFILES, HtmlElement
from inscriptis.html_properties import Display
from inscriptis.model.config import ParserConfig from inscriptis.model.config import ParserConfig
from xml.sax.saxutils import escape as xml_escape from xml.sax.saxutils import escape as xml_escape
import json import json
@@ -169,14 +167,14 @@ def xpath1_filter(xpath_filter, html_content, append_pretty_line_formatting=Fals
# And where the matched result doesn't include something that will cause Inscriptis to add a newline # And where the matched result doesn't include something that will cause Inscriptis to add a newline
# (This way each 'match' reliably has a new-line in the diff) # (This way each 'match' reliably has a new-line in the diff)
# Divs are converted to 4 whitespaces by inscriptis # Divs are converted to 4 whitespaces by inscriptis
if append_pretty_line_formatting and len(html_block) and (not hasattr( element, 'tag' ) or not element.tag in (['br', 'hr', 'div', 'p'])): if append_pretty_line_formatting and len(html_block) and (not hasattr(element, 'tag') or not element.tag in (['br', 'hr', 'div', 'p'])):
html_block += TEXT_FILTER_LIST_LINE_SUFFIX html_block += TEXT_FILTER_LIST_LINE_SUFFIX
if type(element) == etree._ElementStringResult: # Some kind of text, UTF-8 or other
html_block += str(element) if isinstance(element, (str, bytes)):
elif type(element) == etree._ElementUnicodeResult: html_block += element
html_block += str(element)
else: else:
# Return the HTML which will get parsed as text
html_block += etree.tostring(element, pretty_print=True).decode('utf-8') html_block += etree.tostring(element, pretty_print=True).decode('utf-8')
return html_block return html_block
@@ -196,12 +194,12 @@ def extract_element(find='title', html_content=''):
# #
def _parse_json(json_data, json_filter): def _parse_json(json_data, json_filter):
if 'json:' in json_filter: if json_filter.startswith("json:"):
jsonpath_expression = parse(json_filter.replace('json:', '')) jsonpath_expression = parse(json_filter.replace('json:', ''))
match = jsonpath_expression.find(json_data) match = jsonpath_expression.find(json_data)
return _get_stripped_text_from_json_match(match) return _get_stripped_text_from_json_match(match)
if 'jq:' in json_filter: if json_filter.startswith("jq:") or json_filter.startswith("jqraw:"):
try: try:
import jq import jq
@@ -209,10 +207,15 @@ def _parse_json(json_data, json_filter):
# `jq` requires full compilation in windows and so isn't generally available # `jq` requires full compilation in windows and so isn't generally available
raise Exception("jq not support not found") raise Exception("jq not support not found")
jq_expression = jq.compile(json_filter.replace('jq:', '')) if json_filter.startswith("jq:"):
match = jq_expression.input(json_data).all() jq_expression = jq.compile(json_filter.removeprefix("jq:"))
match = jq_expression.input(json_data).all()
return _get_stripped_text_from_json_match(match)
return _get_stripped_text_from_json_match(match) if json_filter.startswith("jqraw:"):
jq_expression = jq.compile(json_filter.removeprefix("jqraw:"))
match = jq_expression.input(json_data).all()
return '\n'.join(str(item) for item in match)
def _get_stripped_text_from_json_match(match): def _get_stripped_text_from_json_match(match):
s = [] s = []

View File

@@ -2,6 +2,7 @@ from abc import ABC, abstractmethod
import time import time
import validators import validators
from wtforms import ValidationError from wtforms import ValidationError
from loguru import logger
from changedetectionio.forms import validate_url from changedetectionio.forms import validate_url
@@ -56,7 +57,7 @@ class import_url_list(Importer):
# Flask wtform validators wont work with basic auth, use validators package # Flask wtform validators wont work with basic auth, use validators package
# Up to 5000 per batch so we dont flood the server # Up to 5000 per batch so we dont flood the server
# @todo validators.url failed on local hostnames (such as referring to ourself when using browserless) # @todo validators.url will fail when you add your own IP etc
if len(url) and 'http' in url.lower() and good < 5000: if len(url) and 'http' in url.lower() and good < 5000:
extras = None extras = None
if processor: if processor:
@@ -195,7 +196,7 @@ class import_xlsx_wachete(Importer):
try: try:
validate_url(data.get('url')) validate_url(data.get('url'))
except ValidationError as e: except ValidationError as e:
print(">> import URL error", data.get('url'), str(e)) logger.error(f">> Import URL error {data.get('url')} {str(e)}")
flash(f"Error processing row number {row_id}, URL value was incorrect, row was skipped.", 'error') flash(f"Error processing row number {row_id}, URL value was incorrect, row was skipped.", 'error')
# Don't bother processing anything else on this row # Don't bother processing anything else on this row
continue continue
@@ -209,7 +210,7 @@ class import_xlsx_wachete(Importer):
self.new_uuids.append(new_uuid) self.new_uuids.append(new_uuid)
good += 1 good += 1
except Exception as e: except Exception as e:
print(e) logger.error(e)
flash(f"Error processing row number {row_id}, check all cell data types are correct, row was skipped.", 'error') flash(f"Error processing row number {row_id}, check all cell data types are correct, row was skipped.", 'error')
else: else:
row_id += 1 row_id += 1
@@ -264,7 +265,7 @@ class import_xlsx_custom(Importer):
try: try:
validate_url(url) validate_url(url)
except ValidationError as e: except ValidationError as e:
print(">> Import URL error", url, str(e)) logger.error(f">> Import URL error {url} {str(e)}")
flash(f"Error processing row number {row_i}, URL value was incorrect, row was skipped.", 'error') flash(f"Error processing row number {row_i}, URL value was incorrect, row was skipped.", 'error')
# Don't bother processing anything else on this row # Don't bother processing anything else on this row
url = None url = None
@@ -293,7 +294,7 @@ class import_xlsx_custom(Importer):
self.new_uuids.append(new_uuid) self.new_uuids.append(new_uuid)
good += 1 good += 1
except Exception as e: except Exception as e:
print(e) logger.error(e)
flash(f"Error processing row number {row_i}, check all cell data types are correct, row was skipped.", 'error') flash(f"Error processing row number {row_i}, check all cell data types are correct, row was skipped.", 'error')
else: else:
row_i += 1 row_i += 1

View File

@@ -5,7 +5,9 @@ from changedetectionio.notification import (
default_notification_title, default_notification_title,
) )
# Equal to or greater than this number of FilterNotFoundInResponse exceptions will trigger a filter-not-found notification
_FILTER_FAILURE_THRESHOLD_ATTEMPTS_DEFAULT = 6 _FILTER_FAILURE_THRESHOLD_ATTEMPTS_DEFAULT = 6
DEFAULT_SETTINGS_HEADERS_USERAGENT='Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36'
class model(dict): class model(dict):
base_config = { base_config = {
@@ -22,6 +24,10 @@ class model(dict):
'time_between_check': {'weeks': None, 'days': None, 'hours': 3, 'minutes': None, 'seconds': None}, 'time_between_check': {'weeks': None, 'days': None, 'hours': 3, 'minutes': None, 'seconds': None},
'timeout': int(getenv("DEFAULT_SETTINGS_REQUESTS_TIMEOUT", "45")), # Default 45 seconds 'timeout': int(getenv("DEFAULT_SETTINGS_REQUESTS_TIMEOUT", "45")), # Default 45 seconds
'workers': int(getenv("DEFAULT_SETTINGS_REQUESTS_WORKERS", "10")), # Number of threads, lower is better for slow connections 'workers': int(getenv("DEFAULT_SETTINGS_REQUESTS_WORKERS", "10")), # Number of threads, lower is better for slow connections
'default_ua': {
'html_requests': getenv("DEFAULT_SETTINGS_HEADERS_USERAGENT", DEFAULT_SETTINGS_HEADERS_USERAGENT),
'html_webdriver': None,
}
}, },
'application': { 'application': {
# Custom notification content # Custom notification content
@@ -41,6 +47,8 @@ class model(dict):
'pager_size': 50, 'pager_size': 50,
'password': False, 'password': False,
'render_anchor_tag_content': False, 'render_anchor_tag_content': False,
'rss_access_token': None,
'rss_hide_muted_watches': True,
'schema_version' : 0, 'schema_version' : 0,
'shared_diff_access': False, 'shared_diff_access': False,
'webdriver_delay': None , # Extra delay in seconds before extracting text 'webdriver_delay': None , # Extra delay in seconds before extracting text

View File

@@ -1,16 +1,18 @@
from distutils.util import strtobool from changedetectionio.strtobool import strtobool
import logging from changedetectionio.safe_jinja import render as jinja_render
import os import os
import re import re
import time import time
import uuid import uuid
from pathlib import Path from pathlib import Path
from loguru import logger
# Allowable protocols, protects against javascript: etc # Allowable protocols, protects against javascript: etc
# file:// is further checked by ALLOW_FILE_URI # file:// is further checked by ALLOW_FILE_URI
SAFE_PROTOCOL_REGEX='^(http|https|ftp|file):' SAFE_PROTOCOL_REGEX='^(http|https|ftp|file):'
minimum_seconds_recheck_time = int(os.getenv('MINIMUM_SECONDS_RECHECK_TIME', 60)) minimum_seconds_recheck_time = int(os.getenv('MINIMUM_SECONDS_RECHECK_TIME', 3))
mtable = {'seconds': 1, 'minutes': 60, 'hours': 3600, 'days': 86400, 'weeks': 86400 * 7} mtable = {'seconds': 1, 'minutes': 60, 'hours': 3600, 'days': 86400, 'weeks': 86400 * 7}
from changedetectionio.notification import ( from changedetectionio.notification import (
@@ -45,6 +47,7 @@ base_config = {
'last_error': False, 'last_error': False,
'last_viewed': 0, # history key value of the last viewed via the [diff] link 'last_viewed': 0, # history key value of the last viewed via the [diff] link
'method': 'GET', 'method': 'GET',
'notification_alert_count': 0,
# Custom notification content # Custom notification content
'notification_body': None, 'notification_body': None,
'notification_format': default_notification_format_for_watch, 'notification_format': default_notification_format_for_watch,
@@ -56,6 +59,8 @@ base_config = {
'previous_md5': False, 'previous_md5': False,
'previous_md5_before_filters': False, # Used for skipping changedetection entirely 'previous_md5_before_filters': False, # Used for skipping changedetection entirely
'proxy': None, # Preferred proxy connection 'proxy': None, # Preferred proxy connection
'remote_server_reply': None, # From 'server' reply header
'sort_text_alphabetically': False,
'subtractive_selectors': [], 'subtractive_selectors': [],
'tag': '', # Old system of text name for a tag, to be removed 'tag': '', # Old system of text name for a tag, to be removed
'tags': [], # list of UUIDs to App.Tags 'tags': [], # list of UUIDs to App.Tags
@@ -64,6 +69,7 @@ base_config = {
# Requires setting to None on submit if it's the same as the default # Requires setting to None on submit if it's the same as the default
# Should be all None by default, so we use the system default in this case. # Should be all None by default, so we use the system default in this case.
'time_between_check': {'weeks': None, 'days': None, 'hours': None, 'minutes': None, 'seconds': None}, 'time_between_check': {'weeks': None, 'days': None, 'hours': None, 'minutes': None, 'seconds': None},
'time_between_check_use_default': True,
'title': None, 'title': None,
'trigger_text': [], # List of text or regex to wait for until a change is detected 'trigger_text': [], # List of text or regex to wait for until a change is detected
'url': '', 'url': '',
@@ -122,7 +128,7 @@ class model(dict):
def ensure_data_dir_exists(self): def ensure_data_dir_exists(self):
if not os.path.isdir(self.watch_data_dir): if not os.path.isdir(self.watch_data_dir):
print ("> Creating data dir {}".format(self.watch_data_dir)) logger.debug(f"> Creating data dir {self.watch_data_dir}")
os.mkdir(self.watch_data_dir) os.mkdir(self.watch_data_dir)
@property @property
@@ -134,12 +140,11 @@ class model(dict):
ready_url = url ready_url = url
if '{%' in url or '{{' in url: if '{%' in url or '{{' in url:
from jinja2 import Environment
# Jinja2 available in URLs along with https://pypi.org/project/jinja2-time/ # Jinja2 available in URLs along with https://pypi.org/project/jinja2-time/
jinja2_env = Environment(extensions=['jinja2_time.TimeExtension'])
try: try:
ready_url = str(jinja2_env.from_string(url).render()) ready_url = jinja_render(template_str=url)
except Exception as e: except Exception as e:
logger.critical(f"Invalid URL template for: '{url}' - {str(e)}")
from flask import ( from flask import (
flash, Markup, url_for flash, Markup, url_for
) )
@@ -211,7 +216,7 @@ class model(dict):
# Read the history file as a dict # Read the history file as a dict
fname = os.path.join(self.watch_data_dir, "history.txt") fname = os.path.join(self.watch_data_dir, "history.txt")
if os.path.isfile(fname): if os.path.isfile(fname):
logging.debug("Reading history index " + str(time.time())) logger.debug(f"Reading watch history index for {self.get('uuid')}")
with open(fname, "r") as f: with open(fname, "r") as f:
for i in f.readlines(): for i in f.readlines():
if ',' in i: if ',' in i:
@@ -233,6 +238,8 @@ class model(dict):
if len(tmp_history): if len(tmp_history):
self.__newest_history_key = list(tmp_history.keys())[-1] self.__newest_history_key = list(tmp_history.keys())[-1]
else:
self.__newest_history_key = None
self.__history_n = len(tmp_history) self.__history_n = len(tmp_history)
@@ -246,10 +253,10 @@ class model(dict):
@property @property
def has_browser_steps(self): def has_browser_steps(self):
has_browser_steps = self.get('browser_steps') and list(filter( has_browser_steps = self.get('browser_steps') and list(filter(
lambda s: (s['operation'] and len(s['operation']) and s['operation'] != 'Choose one' and s['operation'] != 'Goto site'), lambda s: (s['operation'] and len(s['operation']) and s['operation'] != 'Choose one' and s['operation'] != 'Goto site'),
self.get('browser_steps'))) self.get('browser_steps')))
return has_browser_steps return has_browser_steps
# Returns the newest key, but if theres only 1 record, then it's counted as not being new, so return 0. # Returns the newest key, but if theres only 1 record, then it's counted as not being new, so return 0.
@property @property
@@ -323,12 +330,9 @@ class model(dict):
def save_history_text(self, contents, timestamp, snapshot_id): def save_history_text(self, contents, timestamp, snapshot_id):
import brotli import brotli
self.ensure_data_dir_exists() logger.trace(f"{self.get('uuid')} - Updating history.txt with timestamp {timestamp}")
# Small hack so that we sleep just enough to allow 1 second between history snapshots self.ensure_data_dir_exists()
# this is because history.txt indexes/keys snapshots by epoch seconds and we dont want dupe keys
if self.__newest_history_key and int(timestamp) == int(self.__newest_history_key):
time.sleep(timestamp - self.__newest_history_key)
threshold = int(os.getenv('SNAPSHOT_BROTLI_COMPRESSION_THRESHOLD', 1024)) threshold = int(os.getenv('SNAPSHOT_BROTLI_COMPRESSION_THRESHOLD', 1024))
skip_brotli = strtobool(os.getenv('DISABLE_BROTLI_TEXT_SNAPSHOT', 'False')) skip_brotli = strtobool(os.getenv('DISABLE_BROTLI_TEXT_SNAPSHOT', 'False'))
@@ -359,6 +363,7 @@ class model(dict):
# @todo bump static cache of the last timestamp so we dont need to examine the file to set a proper ''viewed'' status # @todo bump static cache of the last timestamp so we dont need to examine the file to set a proper ''viewed'' status
return snapshot_fname return snapshot_fname
@property
@property @property
def has_empty_checktime(self): def has_empty_checktime(self):
# using all() + dictionary comprehension # using all() + dictionary comprehension
@@ -520,8 +525,42 @@ class model(dict):
# None is set # None is set
return False return False
def save_error_text(self, contents):
self.ensure_data_dir_exists()
target_path = os.path.join(self.watch_data_dir, "last-error.txt")
with open(target_path, 'w') as f:
f.write(contents)
def get_last_fetched_before_filters(self): def save_xpath_data(self, data, as_error=False):
import json
if as_error:
target_path = os.path.join(self.watch_data_dir, "elements-error.json")
else:
target_path = os.path.join(self.watch_data_dir, "elements.json")
self.ensure_data_dir_exists()
with open(target_path, 'w') as f:
f.write(json.dumps(data))
f.close()
# Save as PNG, PNG is larger but better for doing visual diff in the future
def save_screenshot(self, screenshot: bytes, as_error=False):
if as_error:
target_path = os.path.join(self.watch_data_dir, "last-error-screenshot.png")
else:
target_path = os.path.join(self.watch_data_dir, "last-screenshot.png")
self.ensure_data_dir_exists()
with open(target_path, 'wb') as f:
f.write(screenshot)
f.close()
def get_last_fetched_text_before_filters(self):
import brotli import brotli
filepath = os.path.join(self.watch_data_dir, 'last-fetched.br') filepath = os.path.join(self.watch_data_dir, 'last-fetched.br')
@@ -536,12 +575,56 @@ class model(dict):
with open(filepath, 'rb') as f: with open(filepath, 'rb') as f:
return(brotli.decompress(f.read()).decode('utf-8')) return(brotli.decompress(f.read()).decode('utf-8'))
def save_last_fetched_before_filters(self, contents): def save_last_text_fetched_before_filters(self, contents):
import brotli import brotli
filepath = os.path.join(self.watch_data_dir, 'last-fetched.br') filepath = os.path.join(self.watch_data_dir, 'last-fetched.br')
with open(filepath, 'wb') as f: with open(filepath, 'wb') as f:
f.write(brotli.compress(contents, mode=brotli.MODE_TEXT)) f.write(brotli.compress(contents, mode=brotli.MODE_TEXT))
def save_last_fetched_html(self, timestamp, contents):
import brotli
self.ensure_data_dir_exists()
snapshot_fname = f"{timestamp}.html.br"
filepath = os.path.join(self.watch_data_dir, snapshot_fname)
with open(filepath, 'wb') as f:
contents = contents.encode('utf-8') if isinstance(contents, str) else contents
try:
f.write(brotli.compress(contents))
except Exception as e:
logger.warning(f"{self.get('uuid')} - Unable to compress snapshot, saving as raw data to {filepath}")
logger.warning(e)
f.write(contents)
self._prune_last_fetched_html_snapshots()
def get_fetched_html(self, timestamp):
import brotli
snapshot_fname = f"{timestamp}.html.br"
filepath = os.path.join(self.watch_data_dir, snapshot_fname)
if os.path.isfile(filepath):
with open(filepath, 'rb') as f:
return (brotli.decompress(f.read()).decode('utf-8'))
return False
def _prune_last_fetched_html_snapshots(self):
dates = list(self.history.keys())
dates.reverse()
for index, timestamp in enumerate(dates):
snapshot_fname = f"{timestamp}.html.br"
filepath = os.path.join(self.watch_data_dir, snapshot_fname)
# Keep only the first 2
if index > 1 and os.path.isfile(filepath):
os.remove(filepath)
@property @property
def get_browsersteps_available_screenshots(self): def get_browsersteps_available_screenshots(self):
"For knowing which screenshots are available to show the user in BrowserSteps UI" "For knowing which screenshots are available to show the user in BrowserSteps UI"

View File

@@ -1,7 +1,8 @@
import apprise import apprise
from jinja2 import Environment, BaseLoader import time
from apprise import NotifyFormat from apprise import NotifyFormat
import json import json
from loguru import logger
valid_tokens = { valid_tokens = {
'base_url': '', 'base_url': '',
@@ -47,7 +48,7 @@ from apprise.decorators import notify
def apprise_custom_api_call_wrapper(body, title, notify_type, *args, **kwargs): def apprise_custom_api_call_wrapper(body, title, notify_type, *args, **kwargs):
import requests import requests
from apprise.utils import parse_url as apprise_parse_url from apprise.utils import parse_url as apprise_parse_url
from apprise.URLBase import URLBase from apprise import URLBase
url = kwargs['meta'].get('url') url = kwargs['meta'].get('url')
@@ -114,13 +115,13 @@ def apprise_custom_api_call_wrapper(body, title, notify_type, *args, **kwargs):
def process_notification(n_object, datastore): def process_notification(n_object, datastore):
from .safe_jinja import render as jinja_render
now = time.time()
if n_object.get('notification_timestamp'):
logger.trace(f"Time since queued {now-n_object['notification_timestamp']:.3f}s")
# Insert variables into the notification content # Insert variables into the notification content
notification_parameters = create_notification_parameters(n_object, datastore) notification_parameters = create_notification_parameters(n_object, datastore)
# Get the notification body from datastore
jinja2_env = Environment(loader=BaseLoader)
n_body = jinja2_env.from_string(n_object.get('notification_body', default_notification_body)).render(**notification_parameters)
n_title = jinja2_env.from_string(n_object.get('notification_title', default_notification_title)).render(**notification_parameters)
n_format = valid_notification_formats.get( n_format = valid_notification_formats.get(
n_object.get('notification_format', default_notification_format), n_object.get('notification_format', default_notification_format),
valid_notification_formats[default_notification_format], valid_notification_formats[default_notification_format],
@@ -131,103 +132,119 @@ def process_notification(n_object, datastore):
# Initially text or whatever # Initially text or whatever
n_format = datastore.data['settings']['application'].get('notification_format', valid_notification_formats[default_notification_format]) n_format = datastore.data['settings']['application'].get('notification_format', valid_notification_formats[default_notification_format])
logger.trace(f"Complete notification body including Jinja and placeholders calculated in {time.time() - now:.3f}s")
# https://github.com/caronc/apprise/wiki/Development_LogCapture # https://github.com/caronc/apprise/wiki/Development_LogCapture
# Anything higher than or equal to WARNING (which covers things like Connection errors) # Anything higher than or equal to WARNING (which covers things like Connection errors)
# raise it as an exception # raise it as an exception
apobjs=[]
sent_objs=[] sent_objs = []
from .apprise_asset import asset from .apprise_asset import asset
for url in n_object['notification_urls']: apobj = apprise.Apprise(debug=True, asset=asset)
url = jinja2_env.from_string(url).render(**notification_parameters)
apobj = apprise.Apprise(debug=True, asset=asset)
url = url.strip()
if len(url):
print(">> Process Notification: AppRise notifying {}".format(url))
with apprise.LogCapture(level=apprise.logging.DEBUG) as logs:
# Re 323 - Limit discord length to their 2000 char limit total or it wont send.
# Because different notifications may require different pre-processing, run each sequentially :(
# 2000 bytes minus -
# 200 bytes for the overhead of the _entire_ json payload, 200 bytes for {tts, wait, content} etc headers
# Length of URL - Incase they specify a longer custom avatar_url
# So if no avatar_url is specified, add one so it can be correctly calculated into the total payload if not n_object.get('notification_urls'):
k = '?' if not '?' in url else '&' return None
if not 'avatar_url' in url \
and not url.startswith('mail') \
and not url.startswith('post') \
and not url.startswith('get') \
and not url.startswith('delete') \
and not url.startswith('put'):
url += k + 'avatar_url=https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/changedetectionio/static/images/avatar-256x256.png'
if url.startswith('tgram://'): with apprise.LogCapture(level=apprise.logging.DEBUG) as logs:
# Telegram only supports a limit subset of HTML, remove the '<br>' we place in. for url in n_object['notification_urls']:
# re https://github.com/dgtlmoon/changedetection.io/issues/555
# @todo re-use an existing library we have already imported to strip all non-allowed tags
n_body = n_body.replace('<br>', '\n')
n_body = n_body.replace('</br>', '\n')
# real limit is 4096, but minus some for extra metadata
payload_max_size = 3600
body_limit = max(0, payload_max_size - len(n_title))
n_title = n_title[0:payload_max_size]
n_body = n_body[0:body_limit]
elif url.startswith('discord://') or url.startswith('https://discordapp.com/api/webhooks') or url.startswith('https://discord.com/api'): # Get the notification body from datastore
# real limit is 2000, but minus some for extra metadata n_body = jinja_render(template_str=n_object.get('notification_body', ''), **notification_parameters)
payload_max_size = 1700 n_title = jinja_render(template_str=n_object.get('notification_title', ''), **notification_parameters)
body_limit = max(0, payload_max_size - len(n_title))
n_title = n_title[0:payload_max_size]
n_body = n_body[0:body_limit]
elif url.startswith('mailto'): url = url.strip()
# Apprise will default to HTML, so we need to override it if not url:
# So that whats' generated in n_body is in line with what is going to be sent. logger.warning(f"Process Notification: skipping empty notification URL.")
# https://github.com/caronc/apprise/issues/633#issuecomment-1191449321 continue
if not 'format=' in url and (n_format == 'Text' or n_format == 'Markdown'):
prefix = '?' if not '?' in url else '&'
# Apprise format is lowercase text https://github.com/caronc/apprise/issues/633
n_format = n_format.tolower()
url = "{}{}format={}".format(url, prefix, n_format)
# If n_format == HTML, then apprise email should default to text/html and we should be sending HTML only
apobj.add(url) logger.info(">> Process Notification: AppRise notifying {}".format(url))
url = jinja_render(template_str=url, **notification_parameters)
apobj.notify( # Re 323 - Limit discord length to their 2000 char limit total or it wont send.
title=n_title, # Because different notifications may require different pre-processing, run each sequentially :(
body=n_body, # 2000 bytes minus -
body_format=n_format, # 200 bytes for the overhead of the _entire_ json payload, 200 bytes for {tts, wait, content} etc headers
# False is not an option for AppRise, must be type None # Length of URL - Incase they specify a longer custom avatar_url
attach=n_object.get('screenshot', None)
)
apobj.clear() # So if no avatar_url is specified, add one so it can be correctly calculated into the total payload
k = '?' if not '?' in url else '&'
if not 'avatar_url' in url \
and not url.startswith('mail') \
and not url.startswith('post') \
and not url.startswith('get') \
and not url.startswith('delete') \
and not url.startswith('put'):
url += k + 'avatar_url=https://raw.githubusercontent.com/dgtlmoon/changedetection.io/master/changedetectionio/static/images/avatar-256x256.png'
# Incase it needs to exist in memory for a while after to process(?) if url.startswith('tgram://'):
apobjs.append(apobj) # Telegram only supports a limit subset of HTML, remove the '<br>' we place in.
# re https://github.com/dgtlmoon/changedetection.io/issues/555
# @todo re-use an existing library we have already imported to strip all non-allowed tags
n_body = n_body.replace('<br>', '\n')
n_body = n_body.replace('</br>', '\n')
# real limit is 4096, but minus some for extra metadata
payload_max_size = 3600
body_limit = max(0, payload_max_size - len(n_title))
n_title = n_title[0:payload_max_size]
n_body = n_body[0:body_limit]
# Returns empty string if nothing found, multi-line string otherwise elif url.startswith('discord://') or url.startswith('https://discordapp.com/api/webhooks') or url.startswith(
log_value = logs.getvalue() 'https://discord.com/api'):
if log_value and 'WARNING' in log_value or 'ERROR' in log_value: # real limit is 2000, but minus some for extra metadata
raise Exception(log_value) payload_max_size = 1700
body_limit = max(0, payload_max_size - len(n_title))
n_title = n_title[0:payload_max_size]
n_body = n_body[0:body_limit]
sent_objs.append({'title': n_title, elif url.startswith('mailto'):
'body': n_body, # Apprise will default to HTML, so we need to override it
'url' : url, # So that whats' generated in n_body is in line with what is going to be sent.
'body_format': n_format}) # https://github.com/caronc/apprise/issues/633#issuecomment-1191449321
if not 'format=' in url and (n_format == 'Text' or n_format == 'Markdown'):
prefix = '?' if not '?' in url else '&'
# Apprise format is lowercase text https://github.com/caronc/apprise/issues/633
n_format = n_format.lower()
url = f"{url}{prefix}format={n_format}"
# If n_format == HTML, then apprise email should default to text/html and we should be sending HTML only
apobj.add(url)
sent_objs.append({'title': n_title,
'body': n_body,
'url': url,
'body_format': n_format})
# Blast off the notifications tht are set in .add()
apobj.notify(
title=n_title,
body=n_body,
body_format=n_format,
# False is not an option for AppRise, must be type None
attach=n_object.get('screenshot', None)
)
# Give apprise time to register an error
time.sleep(3)
# Returns empty string if nothing found, multi-line string otherwise
log_value = logs.getvalue()
if log_value and 'WARNING' in log_value or 'ERROR' in log_value:
raise Exception(log_value)
# Return what was sent for better logging - after the for loop # Return what was sent for better logging - after the for loop
return sent_objs return sent_objs
# Notification title + body content parameters get created here. # Notification title + body content parameters get created here.
# ( Where we prepare the tokens in the notification to be replaced with actual values )
def create_notification_parameters(n_object, datastore): def create_notification_parameters(n_object, datastore):
from copy import deepcopy from copy import deepcopy
# in the case we send a test notification from the main settings, there is no UUID. # in the case we send a test notification from the main settings, there is no UUID.
uuid = n_object['uuid'] if 'uuid' in n_object else '' uuid = n_object['uuid'] if 'uuid' in n_object else ''
if uuid != '': if uuid:
watch_title = datastore.data['watching'][uuid].get('title', '') watch_title = datastore.data['watching'][uuid].get('title', '')
tag_list = [] tag_list = []
tags = datastore.get_all_tags_for_watch(uuid) tags = datastore.get_all_tags_for_watch(uuid)
@@ -255,7 +272,7 @@ def create_notification_parameters(n_object, datastore):
tokens.update( tokens.update(
{ {
'base_url': base_url, 'base_url': base_url,
'current_snapshot': n_object['current_snapshot'] if 'current_snapshot' in n_object else '', 'current_snapshot': n_object.get('current_snapshot', ''),
'diff': n_object.get('diff', ''), # Null default in the case we use a test 'diff': n_object.get('diff', ''), # Null default in the case we use a test
'diff_added': n_object.get('diff_added', ''), # Null default in the case we use a test 'diff_added': n_object.get('diff_added', ''), # Null default in the case we use a test
'diff_full': n_object.get('diff_full', ''), # Null default in the case we use a test 'diff_full': n_object.get('diff_full', ''), # Null default in the case we use a test

View File

@@ -1,10 +1,11 @@
from abc import abstractmethod from abc import abstractmethod
import os from changedetectionio.strtobool import strtobool
import hashlib from changedetectionio.model import Watch
import re
from changedetectionio import content_fetcher
from copy import deepcopy from copy import deepcopy
from distutils.util import strtobool from loguru import logger
import hashlib
import os
import re
class difference_detection_processor(): class difference_detection_processor():
@@ -21,7 +22,7 @@ class difference_detection_processor():
self.watch = deepcopy(self.datastore.data['watching'].get(watch_uuid)) self.watch = deepcopy(self.datastore.data['watching'].get(watch_uuid))
def call_browser(self): def call_browser(self):
from requests.structures import CaseInsensitiveDict
# Protect against file:// access # Protect against file:// access
if re.search(r'^file://', self.watch.get('url', '').strip(), re.IGNORECASE): if re.search(r'^file://', self.watch.get('url', '').strip(), re.IGNORECASE):
if not strtobool(os.getenv('ALLOW_FILE_URI', 'false')): if not strtobool(os.getenv('ALLOW_FILE_URI', 'false')):
@@ -43,14 +44,14 @@ class difference_detection_processor():
# In the case that the preferred fetcher was a browser config with custom connection URL.. # In the case that the preferred fetcher was a browser config with custom connection URL..
# @todo - on save watch, if its extra_browser_ then it should be obvious it will use playwright (like if its requests now..) # @todo - on save watch, if its extra_browser_ then it should be obvious it will use playwright (like if its requests now..)
browser_connection_url = None custom_browser_connection_url = None
if prefer_fetch_backend.startswith('extra_browser_'): if prefer_fetch_backend.startswith('extra_browser_'):
(t, key) = prefer_fetch_backend.split('extra_browser_') (t, key) = prefer_fetch_backend.split('extra_browser_')
connection = list( connection = list(
filter(lambda s: (s['browser_name'] == key), self.datastore.data['settings']['requests'].get('extra_browsers', []))) filter(lambda s: (s['browser_name'] == key), self.datastore.data['settings']['requests'].get('extra_browsers', [])))
if connection: if connection:
prefer_fetch_backend = 'base_html_playwright' prefer_fetch_backend = 'html_webdriver'
browser_connection_url = connection[0].get('browser_connection_url') custom_browser_connection_url = connection[0].get('browser_connection_url')
# PDF should be html_requests because playwright will serve it up (so far) in a embedded page # PDF should be html_requests because playwright will serve it up (so far) in a embedded page
# @todo https://github.com/dgtlmoon/changedetection.io/issues/2019 # @todo https://github.com/dgtlmoon/changedetection.io/issues/2019
@@ -59,22 +60,33 @@ class difference_detection_processor():
prefer_fetch_backend = "html_requests" prefer_fetch_backend = "html_requests"
# Grab the right kind of 'fetcher', (playwright, requests, etc) # Grab the right kind of 'fetcher', (playwright, requests, etc)
if hasattr(content_fetcher, prefer_fetch_backend): from changedetectionio import content_fetchers
fetcher_obj = getattr(content_fetcher, prefer_fetch_backend) if hasattr(content_fetchers, prefer_fetch_backend):
# @todo TEMPORARY HACK - SWITCH BACK TO PLAYWRIGHT FOR BROWSERSTEPS
if prefer_fetch_backend == 'html_webdriver' and self.watch.has_browser_steps:
# This is never supported in selenium anyway
logger.warning("Using playwright fetcher override for possible puppeteer request in browsersteps, because puppetteer:browser steps is incomplete.")
from changedetectionio.content_fetchers.playwright import fetcher as playwright_fetcher
fetcher_obj = playwright_fetcher
else:
fetcher_obj = getattr(content_fetchers, prefer_fetch_backend)
else: else:
# If the klass doesnt exist, just use a default # What it referenced doesnt exist, Just use a default
fetcher_obj = getattr(content_fetcher, "html_requests") fetcher_obj = getattr(content_fetchers, "html_requests")
proxy_url = None proxy_url = None
if preferred_proxy_id: if preferred_proxy_id:
proxy_url = self.datastore.proxy_list.get(preferred_proxy_id).get('url') # Custom browser endpoints should NOT have a proxy added
print(f"Using proxy Key: {preferred_proxy_id} as Proxy URL {proxy_url}") if not prefer_fetch_backend.startswith('extra_browser_'):
proxy_url = self.datastore.proxy_list.get(preferred_proxy_id).get('url')
logger.debug(f"Selected proxy key '{preferred_proxy_id}' as proxy URL '{proxy_url}' for {url}")
else:
logger.debug(f"Skipping adding proxy data when custom Browser endpoint is specified. ")
# Now call the fetcher (playwright/requests/etc) with arguments that only a fetcher would need. # Now call the fetcher (playwright/requests/etc) with arguments that only a fetcher would need.
# When browser_connection_url is None, it method should default to working out whats the best defaults (os env vars etc) # When browser_connection_url is None, it method should default to working out whats the best defaults (os env vars etc)
self.fetcher = fetcher_obj(proxy_override=proxy_url, self.fetcher = fetcher_obj(proxy_override=proxy_url,
browser_connection_url=browser_connection_url custom_browser_connection_url=custom_browser_connection_url
) )
if self.watch.has_browser_steps: if self.watch.has_browser_steps:
@@ -82,7 +94,13 @@ class difference_detection_processor():
self.fetcher.browser_steps_screenshot_path = os.path.join(self.datastore.datastore_path, self.watch.get('uuid')) self.fetcher.browser_steps_screenshot_path = os.path.join(self.datastore.datastore_path, self.watch.get('uuid'))
# Tweak the base config with the per-watch ones # Tweak the base config with the per-watch ones
request_headers = self.watch.get('headers', []) request_headers = CaseInsensitiveDict()
ua = self.datastore.data['settings']['requests'].get('default_ua')
if ua and ua.get(prefer_fetch_backend):
request_headers.update({'User-Agent': ua.get(prefer_fetch_backend)})
request_headers.update(self.watch.get('headers', {}))
request_headers.update(self.datastore.get_all_base_headers()) request_headers.update(self.datastore.get_all_base_headers())
request_headers.update(self.datastore.get_all_headers_in_textfile_for_watch(uuid=self.watch.get('uuid'))) request_headers.update(self.datastore.get_all_headers_in_textfile_for_watch(uuid=self.watch.get('uuid')))
@@ -121,7 +139,7 @@ class difference_detection_processor():
# After init, call run_changedetection() which will do the actual change-detection # After init, call run_changedetection() which will do the actual change-detection
@abstractmethod @abstractmethod
def run_changedetection(self, uuid, skip_when_checksum_same=True): def run_changedetection(self, watch: Watch, skip_when_checksum_same=True):
update_obj = {'last_notification_error': False, 'last_error': False} update_obj = {'last_notification_error': False, 'last_error': False}
some_data = 'xxxxx' some_data = 'xxxxx'
update_obj["previous_md5"] = hashlib.md5(some_data.encode('utf-8')).hexdigest() update_obj["previous_md5"] = hashlib.md5(some_data.encode('utf-8')).hexdigest()

View File

@@ -1,8 +1,8 @@
from . import difference_detection_processor
from loguru import logger
import hashlib import hashlib
import urllib3 import urllib3
from . import difference_detection_processor
from copy import deepcopy
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
@@ -19,10 +19,7 @@ class perform_site_check(difference_detection_processor):
screenshot = None screenshot = None
xpath_data = None xpath_data = None
def run_changedetection(self, uuid, skip_when_checksum_same=True): def run_changedetection(self, watch, skip_when_checksum_same=True):
# DeepCopy so we can be sure we don't accidently change anything by reference
watch = deepcopy(self.datastore.data['watching'].get(uuid))
if not watch: if not watch:
raise Exception("Watch no longer exists.") raise Exception("Watch no longer exists.")
@@ -43,11 +40,13 @@ class perform_site_check(difference_detection_processor):
fetched_md5 = hashlib.md5(self.fetcher.instock_data.encode('utf-8')).hexdigest() fetched_md5 = hashlib.md5(self.fetcher.instock_data.encode('utf-8')).hexdigest()
# 'Possibly in stock' comes from stock-not-in-stock.js when no string found above the fold. # 'Possibly in stock' comes from stock-not-in-stock.js when no string found above the fold.
update_obj["in_stock"] = True if self.fetcher.instock_data == 'Possibly in stock' else False update_obj["in_stock"] = True if self.fetcher.instock_data == 'Possibly in stock' else False
logger.debug(f"Watch UUID {watch.get('uuid')} restock check returned '{self.fetcher.instock_data}' from JS scraper.")
else: else:
raise UnableToExtractRestockData(status_code=self.fetcher.status_code) raise UnableToExtractRestockData(status_code=self.fetcher.status_code)
# The main thing that all this at the moment comes down to :) # The main thing that all this at the moment comes down to :)
changed_detected = False changed_detected = False
logger.debug(f"Watch UUID {watch.get('uuid')} restock check - Previous MD5: {watch.get('previous_md5')}, Fetched MD5 {fetched_md5}")
if watch.get('previous_md5') and watch.get('previous_md5') != fetched_md5: if watch.get('previous_md5') and watch.get('previous_md5') != fetched_md5:
# Yes if we only care about it going to instock, AND we are in stock # Yes if we only care about it going to instock, AND we are in stock
@@ -60,5 +59,4 @@ class perform_site_check(difference_detection_processor):
# Always record the new checksum # Always record the new checksum
update_obj["previous_md5"] = fetched_md5 update_obj["previous_md5"] = fetched_md5
return changed_detected, update_obj, self.fetcher.instock_data.encode('utf-8').strip()
return changed_detected, update_obj, self.fetcher.instock_data.encode('utf-8')

View File

@@ -2,25 +2,26 @@
import hashlib import hashlib
import json import json
import logging
import os import os
import re import re
import urllib3 import urllib3
from changedetectionio import content_fetcher, html_tools
from changedetectionio.blueprint.price_data_follower import PRICE_DATA_TRACK_ACCEPT, PRICE_DATA_TRACK_REJECT
from copy import deepcopy
from . import difference_detection_processor from . import difference_detection_processor
from ..html_tools import PERL_STYLE_REGEX, cdata_in_document_to_text from ..html_tools import PERL_STYLE_REGEX, cdata_in_document_to_text
from changedetectionio import html_tools, content_fetchers
from changedetectionio.blueprint.price_data_follower import PRICE_DATA_TRACK_ACCEPT, PRICE_DATA_TRACK_REJECT
from loguru import logger
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
name = 'Webpage Text/HTML, JSON and PDF changes' name = 'Webpage Text/HTML, JSON and PDF changes'
description = 'Detects all text changes where possible' description = 'Detects all text changes where possible'
json_filter_prefixes = ['json:', 'jq:'] json_filter_prefixes = ['json:', 'jq:', 'jqraw:']
class FilterNotFoundInResponse(ValueError): class FilterNotFoundInResponse(ValueError):
def __init__(self, msg): def __init__(self, msg, screenshot=None, xpath_data=None):
self.screenshot = screenshot
self.xpath_data = xpath_data
ValueError.__init__(self, msg) ValueError.__init__(self, msg)
@@ -33,14 +34,12 @@ class PDFToHTMLToolNotFound(ValueError):
# (set_proxy_from_list) # (set_proxy_from_list)
class perform_site_check(difference_detection_processor): class perform_site_check(difference_detection_processor):
def run_changedetection(self, uuid, skip_when_checksum_same=True): def run_changedetection(self, watch, skip_when_checksum_same=True):
changed_detected = False changed_detected = False
html_content = "" html_content = ""
screenshot = False # as bytes screenshot = False # as bytes
stripped_text_from_html = "" stripped_text_from_html = ""
# DeepCopy so we can be sure we don't accidently change anything by reference
watch = deepcopy(self.datastore.data['watching'].get(uuid))
if not watch: if not watch:
raise Exception("Watch no longer exists.") raise Exception("Watch no longer exists.")
@@ -60,7 +59,7 @@ class perform_site_check(difference_detection_processor):
update_obj['previous_md5_before_filters'] = hashlib.md5(self.fetcher.content.encode('utf-8')).hexdigest() update_obj['previous_md5_before_filters'] = hashlib.md5(self.fetcher.content.encode('utf-8')).hexdigest()
if skip_when_checksum_same: if skip_when_checksum_same:
if update_obj['previous_md5_before_filters'] == watch.get('previous_md5_before_filters'): if update_obj['previous_md5_before_filters'] == watch.get('previous_md5_before_filters'):
raise content_fetcher.checksumFromPreviousCheckWasTheSame() raise content_fetchers.exceptions.checksumFromPreviousCheckWasTheSame()
# Fetching complete, now filters # Fetching complete, now filters
@@ -115,10 +114,12 @@ class perform_site_check(difference_detection_processor):
# Better would be if Watch.model could access the global data also # Better would be if Watch.model could access the global data also
# and then use getattr https://docs.python.org/3/reference/datamodel.html#object.__getitem__ # and then use getattr https://docs.python.org/3/reference/datamodel.html#object.__getitem__
# https://realpython.com/inherit-python-dict/ instead of doing it procedurely # https://realpython.com/inherit-python-dict/ instead of doing it procedurely
include_filters_from_tags = self.datastore.get_tag_overrides_for_watch(uuid=uuid, attr='include_filters') include_filters_from_tags = self.datastore.get_tag_overrides_for_watch(uuid=watch.get('uuid'), attr='include_filters')
include_filters_rule = [*watch.get('include_filters', []), *include_filters_from_tags]
subtractive_selectors = [*self.datastore.get_tag_overrides_for_watch(uuid=uuid, attr='subtractive_selectors'), # 1845 - remove duplicated filters in both group and watch include filter
include_filters_rule = list(dict.fromkeys(watch.get('include_filters', []) + include_filters_from_tags))
subtractive_selectors = [*self.datastore.get_tag_overrides_for_watch(uuid=watch.get('uuid'), attr='subtractive_selectors'),
*watch.get("subtractive_selectors", []), *watch.get("subtractive_selectors", []),
*self.datastore.data["settings"]["application"].get("global_subtractive_selectors", []) *self.datastore.data["settings"]["application"].get("global_subtractive_selectors", [])
] ]
@@ -185,7 +186,7 @@ class perform_site_check(difference_detection_processor):
append_pretty_line_formatting=not watch.is_source_type_url) append_pretty_line_formatting=not watch.is_source_type_url)
if not html_content.strip(): if not html_content.strip():
raise FilterNotFoundInResponse(include_filters_rule) raise FilterNotFoundInResponse(msg=include_filters_rule, screenshot=self.fetcher.screenshot, xpath_data=self.fetcher.xpath_data)
if has_subtractive_selectors: if has_subtractive_selectors:
html_content = html_tools.element_removal(subtractive_selectors, html_content) html_content = html_tools.element_removal(subtractive_selectors, html_content)
@@ -202,6 +203,12 @@ class perform_site_check(difference_detection_processor):
is_rss=is_rss # #1874 activate the <title workaround hack is_rss=is_rss # #1874 activate the <title workaround hack
) )
if watch.get('sort_text_alphabetically') and stripped_text_from_html:
# Note: Because a <p>something</p> will add an extra line feed to signify the paragraph gap
# we end up with 'Some text\n\n', sorting will add all those extra \n at the start, so we remove them here.
stripped_text_from_html = stripped_text_from_html.replace('\n\n', '\n')
stripped_text_from_html = '\n'.join( sorted(stripped_text_from_html.splitlines(), key=lambda x: x.lower() ))
# Re #340 - return the content before the 'ignore text' was applied # Re #340 - return the content before the 'ignore text' was applied
text_content_before_ignored_filter = stripped_text_from_html.encode('utf-8') text_content_before_ignored_filter = stripped_text_from_html.encode('utf-8')
@@ -213,7 +220,7 @@ class perform_site_check(difference_detection_processor):
from .. import diff from .. import diff
# needs to not include (added) etc or it may get used twice # needs to not include (added) etc or it may get used twice
# Replace the processed text with the preferred result # Replace the processed text with the preferred result
rendered_diff = diff.render_diff(previous_version_file_contents=watch.get_last_fetched_before_filters(), rendered_diff = diff.render_diff(previous_version_file_contents=watch.get_last_fetched_text_before_filters(),
newest_version_file_contents=stripped_text_from_html, newest_version_file_contents=stripped_text_from_html,
include_equal=False, # not the same lines include_equal=False, # not the same lines
include_added=watch.get('filter_text_added', True), include_added=watch.get('filter_text_added', True),
@@ -222,7 +229,7 @@ class perform_site_check(difference_detection_processor):
line_feed_sep="\n", line_feed_sep="\n",
include_change_type_prefix=False) include_change_type_prefix=False)
watch.save_last_fetched_before_filters(text_content_before_ignored_filter) watch.save_last_text_fetched_before_filters(text_content_before_ignored_filter)
if not rendered_diff and stripped_text_from_html: if not rendered_diff and stripped_text_from_html:
# We had some content, but no differences were found # We had some content, but no differences were found
@@ -235,11 +242,12 @@ class perform_site_check(difference_detection_processor):
# Treat pages with no renderable text content as a change? No by default # Treat pages with no renderable text content as a change? No by default
empty_pages_are_a_change = self.datastore.data['settings']['application'].get('empty_pages_are_a_change', False) empty_pages_are_a_change = self.datastore.data['settings']['application'].get('empty_pages_are_a_change', False)
if not is_json and not empty_pages_are_a_change and len(stripped_text_from_html.strip()) == 0: if not is_json and not empty_pages_are_a_change and len(stripped_text_from_html.strip()) == 0:
raise content_fetcher.ReplyWithContentButNoText(url=url, raise content_fetchers.exceptions.ReplyWithContentButNoText(url=url,
status_code=self.fetcher.get_last_status_code(), status_code=self.fetcher.get_last_status_code(),
screenshot=screenshot, screenshot=self.fetcher.screenshot,
has_filters=has_filter_rule, has_filters=has_filter_rule,
html_content=html_content html_content=html_content,
xpath_data=self.fetcher.xpath_data
) )
# We rely on the actual text in the html output.. many sites have random script vars etc, # We rely on the actual text in the html output.. many sites have random script vars etc,
@@ -335,15 +343,17 @@ class perform_site_check(difference_detection_processor):
if not watch['title'] or not len(watch['title']): if not watch['title'] or not len(watch['title']):
update_obj['title'] = html_tools.extract_element(find='title', html_content=self.fetcher.content) update_obj['title'] = html_tools.extract_element(find='title', html_content=self.fetcher.content)
logger.debug(f"Watch UUID {watch.get('uuid')} content check - Previous MD5: {watch.get('previous_md5')}, Fetched MD5 {fetched_md5}")
if changed_detected: if changed_detected:
if watch.get('check_unique_lines', False): if watch.get('check_unique_lines', False):
has_unique_lines = watch.lines_contain_something_unique_compared_to_history(lines=stripped_text_from_html.splitlines()) has_unique_lines = watch.lines_contain_something_unique_compared_to_history(lines=stripped_text_from_html.splitlines())
# One or more lines? unsure? # One or more lines? unsure?
if not has_unique_lines: if not has_unique_lines:
logging.debug("check_unique_lines: UUID {} didnt have anything new setting change_detected=False".format(uuid)) logger.debug(f"check_unique_lines: UUID {watch.get('uuid')} didnt have anything new setting change_detected=False")
changed_detected = False changed_detected = False
else: else:
logging.debug("check_unique_lines: UUID {} had unique content".format(uuid)) logger.debug(f"check_unique_lines: UUID {watch.get('uuid')} had unique content")
# Always record the new checksum # Always record the new checksum
update_obj["previous_md5"] = fetched_md5 update_obj["previous_md5"] = fetched_md5

View File

@@ -1,118 +0,0 @@
function isItemInStock() {
// @todo Pass these in so the same list can be used in non-JS fetchers
const outOfStockTexts = [
' أخبرني عندما يتوفر',
'0 in stock',
'agotado',
'artikel zurzeit vergriffen',
'as soon as stock is available',
'ausverkauft', // sold out
'available for back order',
'back-order or out of stock',
'backordered',
'benachrichtigt mich', // notify me
'brak na stanie',
'brak w magazynie',
'coming soon',
'currently have any tickets for this',
'currently unavailable',
'dostępne wkrótce',
'dostępne wkrótce',
'en rupture de stock',
'ist derzeit nicht auf lager',
'ist derzeit nicht auf lager',
'item is no longer available',
'let me know when it\'s available',
'message if back in stock',
'nachricht bei',
'nicht auf lager',
'nicht lieferbar',
'nicht zur verfügung',
'niet leverbaar',
'niet beschikbaar',
'no disponible temporalmente',
'no longer in stock',
'no tickets available',
'not available',
'not currently available',
'not in stock',
'notify me when available',
'não estamos a aceitar encomendas',
'out of stock',
'out-of-stock',
'produkt niedostępny',
'sold out',
'sold-out',
'temporarily out of stock',
'temporarily unavailable',
'tickets unavailable',
'tijdelijk uitverkocht',
'unavailable tickets',
'we do not currently have an estimate of when this product will be back in stock.',
'zur zeit nicht an lager',
'品切れ',
'已售完',
'품절'
];
const negateOutOfStockRegexs = [
'[0-9] in stock'
]
var negateOutOfStockRegexs_r = [];
for (let i = 0; i < negateOutOfStockRegexs.length; i++) {
negateOutOfStockRegexs_r.push(new RegExp(negateOutOfStockRegexs[0], 'g'));
}
const elementsWithZeroChildren = Array.from(document.getElementsByTagName('*')).filter(element => element.children.length === 0);
// REGEXS THAT REALLY MEAN IT'S IN STOCK
for (let i = elementsWithZeroChildren.length - 1; i >= 0; i--) {
const element = elementsWithZeroChildren[i];
if (element.offsetWidth > 0 || element.offsetHeight > 0 || element.getClientRects().length > 0) {
var elementText="";
if (element.tagName.toLowerCase() === "input") {
elementText = element.value.toLowerCase();
} else {
elementText = element.textContent.toLowerCase();
}
if (elementText.length) {
// try which ones could mean its in stock
for (let i = 0; i < negateOutOfStockRegexs.length; i++) {
if (negateOutOfStockRegexs_r[i].test(elementText)) {
return 'Possibly in stock';
}
}
}
}
}
// OTHER STUFF THAT COULD BE THAT IT'S OUT OF STOCK
for (let i = elementsWithZeroChildren.length - 1; i >= 0; i--) {
const element = elementsWithZeroChildren[i];
if (element.offsetWidth > 0 || element.offsetHeight > 0 || element.getClientRects().length > 0) {
var elementText="";
if (element.tagName.toLowerCase() === "input") {
elementText = element.value.toLowerCase();
} else {
elementText = element.textContent.toLowerCase();
}
if (elementText.length) {
// and these mean its out of stock
for (const outOfStockText of outOfStockTexts) {
if (elementText.includes(outOfStockText)) {
return elementText; // item is out of stock
}
}
}
}
}
return 'Possibly in stock'; // possibly in stock, cant decide otherwise.
}
// returns the element text that makes it think it's out of stock
return isItemInStock();

View File

@@ -2,20 +2,22 @@
# run some tests and look if the 'custom-browser-search-string=1' connect string appeared in the correct containers # run some tests and look if the 'custom-browser-search-string=1' connect string appeared in the correct containers
# @todo do it again but with the puppeteer one
# enable debug # enable debug
set -x set -x
# A extra browser is configured, but we never chose to use it, so it should NOT show in the logs # A extra browser is configured, but we never chose to use it, so it should NOT show in the logs
docker run --rm -e "PLAYWRIGHT_DRIVER_URL=ws://browserless:3000" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest tests/custom_browser_url/test_custom_browser_url.py::test_request_not_via_custom_browser_url' docker run --rm -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest tests/custom_browser_url/test_custom_browser_url.py::test_request_not_via_custom_browser_url'
docker logs browserless-custom-url &>log.txt docker logs sockpuppetbrowser-custom-url &>log-custom.txt
grep 'custom-browser-search-string=1' log.txt grep 'custom-browser-search-string=1' log-custom.txt
if [ $? -ne 1 ] if [ $? -ne 1 ]
then then
echo "Saw a request in 'browserless-custom-url' container with 'custom-browser-search-string=1' when I should not" echo "Saw a request in 'sockpuppetbrowser-custom-url' container with 'custom-browser-search-string=1' when I should not - log-custom.txt"
exit 1 exit 1
fi fi
docker logs browserless &>log.txt docker logs sockpuppetbrowser &>log.txt
grep 'custom-browser-search-string=1' log.txt grep 'custom-browser-search-string=1' log.txt
if [ $? -ne 1 ] if [ $? -ne 1 ]
then then
@@ -24,16 +26,16 @@ then
fi fi
# Special connect string should appear in the custom-url container, but not in the 'default' one # Special connect string should appear in the custom-url container, but not in the 'default' one
docker run --rm -e "PLAYWRIGHT_DRIVER_URL=ws://browserless:3000" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest tests/custom_browser_url/test_custom_browser_url.py::test_request_via_custom_browser_url' docker run --rm -e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" --network changedet-network test-changedetectionio bash -c 'cd changedetectionio;pytest tests/custom_browser_url/test_custom_browser_url.py::test_request_via_custom_browser_url'
docker logs browserless-custom-url &>log.txt docker logs sockpuppetbrowser-custom-url &>log-custom.txt
grep 'custom-browser-search-string=1' log.txt grep 'custom-browser-search-string=1' log-custom.txt
if [ $? -ne 0 ] if [ $? -ne 0 ]
then then
echo "Did not see request in 'browserless-custom-url' container with 'custom-browser-search-string=1' when I should" echo "Did not see request in 'sockpuppetbrowser-custom-url' container with 'custom-browser-search-string=1' when I should - log-custom.txt"
exit 1 exit 1
fi fi
docker logs browserless &>log.txt docker logs sockpuppetbrowser &>log.txt
grep 'custom-browser-search-string=1' log.txt grep 'custom-browser-search-string=1' log.txt
if [ $? -ne 1 ] if [ $? -ne 1 ]
then then

View File

@@ -10,41 +10,7 @@ set -x
docker run --network changedet-network -d --name squid-one --hostname squid-one --rm -v `pwd`/tests/proxy_list/squid.conf:/etc/squid/conf.d/debian.conf ubuntu/squid:4.13-21.10_edge docker run --network changedet-network -d --name squid-one --hostname squid-one --rm -v `pwd`/tests/proxy_list/squid.conf:/etc/squid/conf.d/debian.conf ubuntu/squid:4.13-21.10_edge
docker run --network changedet-network -d --name squid-two --hostname squid-two --rm -v `pwd`/tests/proxy_list/squid.conf:/etc/squid/conf.d/debian.conf ubuntu/squid:4.13-21.10_edge docker run --network changedet-network -d --name squid-two --hostname squid-two --rm -v `pwd`/tests/proxy_list/squid.conf:/etc/squid/conf.d/debian.conf ubuntu/squid:4.13-21.10_edge
# SOCKS5 related - start simple Socks5 proxy server # Used for configuring a custom proxy URL via the UI - with username+password auth
# SOCKSTEST=xyz should show in the logs of this service to confirm it fetched
docker run --network changedet-network -d --hostname socks5proxy --name socks5proxy -p 1080:1080 -e PROXY_USER=proxy_user123 -e PROXY_PASSWORD=proxy_pass123 serjs/go-socks5-proxy
docker run --network changedet-network -d --hostname socks5proxy-noauth -p 1081:1080 --name socks5proxy-noauth serjs/go-socks5-proxy
echo "---------------------------------- SOCKS5 -------------------"
# SOCKS5 related - test from proxies.json
docker run --network changedet-network \
-v `pwd`/tests/proxy_socks5/proxies.json-example:/app/changedetectionio/test-datastore/proxies.json \
--rm \
-e "SOCKSTEST=proxiesjson" \
test-changedetectionio \
bash -c 'cd changedetectionio && pytest tests/proxy_socks5/test_socks5_proxy_sources.py'
# SOCKS5 related - by manually entering in UI
docker run --network changedet-network \
--rm \
-e "SOCKSTEST=manual" \
test-changedetectionio \
bash -c 'cd changedetectionio && pytest tests/proxy_socks5/test_socks5_proxy.py'
# SOCKS5 related - test from proxies.json via playwright - NOTE- PLAYWRIGHT DOESNT SUPPORT AUTHENTICATING PROXY
docker run --network changedet-network \
-e "SOCKSTEST=manual-playwright" \
-v `pwd`/tests/proxy_socks5/proxies.json-example-noauth:/app/changedetectionio/test-datastore/proxies.json \
-e "PLAYWRIGHT_DRIVER_URL=ws://browserless:3000" \
--rm \
test-changedetectionio \
bash -c 'cd changedetectionio && pytest tests/proxy_socks5/test_socks5_proxy_sources.py'
echo "socks5 server logs"
docker logs socks5proxy
echo "----------------------------------"
# Used for configuring a custom proxy URL via the UI
docker run --network changedet-network -d \ docker run --network changedet-network -d \
--name squid-custom \ --name squid-custom \
--hostname squid-custom \ --hostname squid-custom \
@@ -60,15 +26,17 @@ docker run --network changedet-network \
test-changedetectionio \ test-changedetectionio \
bash -c 'cd changedetectionio && pytest tests/proxy_list/test_multiple_proxy.py' bash -c 'cd changedetectionio && pytest tests/proxy_list/test_multiple_proxy.py'
set +e
## Should be a request in the default "first" squid echo "- Looking for chosen.changedetection.io request in squid-one - it should NOT be here"
docker logs squid-one 2>/dev/null|grep chosen.changedetection.io docker logs squid-one 2>/dev/null|grep chosen.changedetection.io
if [ $? -ne 0 ] if [ $? -ne 1 ]
then then
echo "Did not see a request to chosen.changedetection.io in the squid logs (while checking preferred proxy - squid one)" echo "Saw a request to chosen.changedetection.io in the squid logs (while checking preferred proxy - squid one) WHEN I SHOULD NOT"
exit 1 exit 1
fi fi
set -e
echo "- Looking for chosen.changedetection.io request in squid-two"
# And one in the 'second' squid (user selects this as preferred) # And one in the 'second' squid (user selects this as preferred)
docker logs squid-two 2>/dev/null|grep chosen.changedetection.io docker logs squid-two 2>/dev/null|grep chosen.changedetection.io
if [ $? -ne 0 ] if [ $? -ne 0 ]
@@ -77,7 +45,6 @@ then
exit 1 exit 1
fi fi
# Test the UI configurable proxies # Test the UI configurable proxies
docker run --network changedet-network \ docker run --network changedet-network \
test-changedetectionio \ test-changedetectionio \
@@ -85,6 +52,7 @@ docker run --network changedet-network \
# Should see a request for one.changedetection.io in there # Should see a request for one.changedetection.io in there
echo "- Looking for .changedetection.io request in squid-custom"
docker logs squid-custom 2>/dev/null|grep "TCP_TUNNEL.200.*changedetection.io" docker logs squid-custom 2>/dev/null|grep "TCP_TUNNEL.200.*changedetection.io"
if [ $? -ne 0 ] if [ $? -ne 0 ]
then then
@@ -101,7 +69,7 @@ docker run --network changedet-network \
set +e set +e
# Check request was never seen in any container # Check request was never seen in any container
for c in $(echo "squid-one squid-two squid-custom"); do for c in $(echo "squid-one squid-two squid-custom"); do
echo Checking $c echo ....Checking $c
docker logs $c &> $c.txt docker logs $c &> $c.txt
grep noproxy $c.txt grep noproxy $c.txt
if [ $? -ne 1 ] if [ $? -ne 1 ]

View File

@@ -0,0 +1,43 @@
#!/bin/bash
# exit when any command fails
set -e
# enable debug
set -x
# SOCKS5 related - start simple Socks5 proxy server
# SOCKSTEST=xyz should show in the logs of this service to confirm it fetched
docker run --network changedet-network -d --hostname socks5proxy --rm --name socks5proxy -p 1080:1080 -e PROXY_USER=proxy_user123 -e PROXY_PASSWORD=proxy_pass123 serjs/go-socks5-proxy
docker run --network changedet-network -d --hostname socks5proxy-noauth --rm -p 1081:1080 --name socks5proxy-noauth serjs/go-socks5-proxy
echo "---------------------------------- SOCKS5 -------------------"
# SOCKS5 related - test from proxies.json
docker run --network changedet-network \
-v `pwd`/tests/proxy_socks5/proxies.json-example:/app/changedetectionio/test-datastore/proxies.json \
--rm \
-e "SOCKSTEST=proxiesjson" \
test-changedetectionio \
bash -c 'cd changedetectionio && pytest tests/proxy_socks5/test_socks5_proxy_sources.py'
# SOCKS5 related - by manually entering in UI
docker run --network changedet-network \
--rm \
-e "SOCKSTEST=manual" \
test-changedetectionio \
bash -c 'cd changedetectionio && pytest tests/proxy_socks5/test_socks5_proxy.py'
# SOCKS5 related - test from proxies.json via playwright - NOTE- PLAYWRIGHT DOESNT SUPPORT AUTHENTICATING PROXY
docker run --network changedet-network \
-e "SOCKSTEST=manual-playwright" \
-v `pwd`/tests/proxy_socks5/proxies.json-example-noauth:/app/changedetectionio/test-datastore/proxies.json \
-e "PLAYWRIGHT_DRIVER_URL=ws://sockpuppetbrowser:3000" \
--rm \
test-changedetectionio \
bash -c 'cd changedetectionio && pytest tests/proxy_socks5/test_socks5_proxy_sources.py'
echo "socks5 server logs"
docker logs socks5proxy
echo "----------------------------------"
docker kill socks5proxy socks5proxy-noauth

View File

@@ -0,0 +1,18 @@
"""
Safe Jinja2 render with max payload sizes
See https://jinja.palletsprojects.com/en/3.1.x/sandbox/#security-considerations
"""
import jinja2.sandbox
import typing as t
import os
JINJA2_MAX_RETURN_PAYLOAD_SIZE = 1024 * int(os.getenv("JINJA2_MAX_RETURN_PAYLOAD_SIZE_KB", 1024 * 10))
def render(template_str, **args: t.Any) -> str:
jinja2_env = jinja2.sandbox.ImmutableSandboxedEnvironment(extensions=['jinja2_time.TimeExtension'])
output = jinja2_env.from_string(template_str).render(args)
return output[:JINJA2_MAX_RETURN_PAYLOAD_SIZE]

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

View File

@@ -0,0 +1,44 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg
aria-hidden="true"
viewBox="0 0 19.966091 17.999964"
class="css-1oqmxjn"
version="1.1"
id="svg4"
sodipodi:docname="steps.svg"
width="19.966091"
height="17.999964"
inkscape:version="1.1.2 (0a00cf5339, 2022-02-04)"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns="http://www.w3.org/2000/svg"
xmlns:svg="http://www.w3.org/2000/svg">
<defs
id="defs8" />
<sodipodi:namedview
id="namedview6"
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1.0"
inkscape:pageshadow="2"
inkscape:pageopacity="0.0"
inkscape:pagecheckerboard="0"
showgrid="false"
fit-margin-top="0"
fit-margin-left="0"
fit-margin-right="0"
fit-margin-bottom="0"
inkscape:zoom="8.6354167"
inkscape:cx="-1.3896261"
inkscape:cy="6.1375151"
inkscape:window-width="1280"
inkscape:window-height="667"
inkscape:window-x="2419"
inkscape:window-y="250"
inkscape:window-maximized="0"
inkscape:current-layer="svg4" />
<path
d="m 16.95807,12.000003 c -0.7076,0.0019 -1.3917,0.2538 -1.9316,0.7113 -0.5398,0.4575 -0.9005,1.091 -1.0184,1.7887 H 5.60804 c -0.80847,0.0297 -1.60693,-0.1865 -2.29,-0.62 -0.26632,-0.1847 -0.48375,-0.4315 -0.63356,-0.7189 -0.14982,-0.2874 -0.22753,-0.607 -0.22644,-0.9311 -0.02843,-0.3931 0.03646,-0.7873 0.1894,-1.1505 0.15293,-0.3632 0.38957,-0.6851 0.6906,-0.9395 0.66628,-0.4559004 1.4637,-0.6807004 2.27,-0.6400004 h 8.35003 c 0.8515,-0.0223 1.6727,-0.3206 2.34,-0.85 0.3971,-0.3622 0.7076,-0.8091 0.9084,-1.3077 0.2008,-0.49857 0.2868,-1.03596 0.2516,-1.57229 0.0113,-0.47161 -0.0887,-0.93924 -0.292,-1.36493 -0.2033,-0.4257 -0.5041,-0.79745 -0.878,-1.08507 -0.7801,-0.55815 -1.7212,-0.84609 -2.68,-0.82 H 5.95804 c -0.12537,-0.7417 -0.5248,-1.40924 -1.11913,-1.87032996 -0.59434,-0.46108 -1.3402,-0.68207 -2.08979,-0.61917 -0.74958,0.06291 -1.44818,0.40512 -1.95736,0.95881 C 0.28259,1.5230126 0,2.2477926 0,3.0000126 c 0,0.75222 0.28259,1.47699 0.79176,2.03068 0.50918,0.55369 1.20778,0.8959 1.95736,0.95881 0.74959,0.0629 1.49545,-0.15808 2.08979,-0.61917 0.59433,-0.46109 0.99376,-1.12863 1.11913,-1.87032 h 7.70003 c 0.7353,-0.03061 1.4599,0.18397 2.06,0.61 0.2548,0.19335 0.4595,0.445 0.597,0.73385 0.1375,0.28884 0.2036,0.60644 0.193,0.92615 0.0316,0.38842 -0.0247,0.77898 -0.165,1.14258 -0.1402,0.36361 -0.3607,0.69091 -0.645,0.95741 -0.5713,0.4398 -1.2799,0.663 -2,0.63 H 5.69804 c -1.03259,-0.0462 -2.05065,0.2568 -2.89,0.86 -0.43755,0.3361 -0.78838,0.7720004 -1.02322,1.2712004 -0.23484,0.4993 -0.34688,1.0474 -0.32678,1.5988 -0.00726,0.484 0.10591,0.9622 0.32934,1.3916 0.22344,0.4295 0.55012,0.7966 0.95066,1.0684 0.85039,0.5592 1.85274,0.8421 2.87,0.81 h 8.40003 c 0.0954,0.5643 0.3502,1.0896 0.7343,1.5138 0.3842,0.4242 0.8817,0.7297 1.4338,0.8803 0.5521,0.1507 1.1358,0.1403 1.6822,-0.0299 0.5464,-0.1702 1.0328,-0.4932 1.4016,-0.9308 0.3688,-0.4376 0.6048,-0.9716 0.6801,-1.5389 0.0752,-0.5673 -0.0134,-1.1444 -0.2554,-1.663 -0.242,-0.5186 -0.6273,-0.9572 -1.1104,-1.264 -0.4831,-0.3068 -1.0439,-0.469 -1.6162,-0.4675 z m 0,5 c -0.3956,0 -0.7823,-0.1173 -1.1112,-0.3371 -0.3289,-0.2197 -0.5852,-0.5321 -0.7366,-0.8975 -0.1514,-0.3655 -0.191,-0.7676 -0.1138,-1.1556 0.0772,-0.3879 0.2677,-0.7443 0.5474,-1.024 0.2797,-0.2797 0.636,-0.4702 1.024,-0.5474 0.388,-0.0771 0.7901,-0.0375 1.1555,0.1138 0.3655,0.1514 0.6778,0.4078 0.8976,0.7367 0.2198,0.3289 0.3371,0.7155 0.3371,1.1111 0,0.5304 -0.2107,1.0391 -0.5858,1.4142 -0.3751,0.3751 -0.8838,0.5858 -1.4142,0.5858 z"
id="path2"
style="fill:#777777;fill-opacity:1" />
</svg>

After

Width:  |  Height:  |  Size: 3.7 KiB

View File

@@ -1,16 +1,7 @@
$(document).ready(function () { $(document).ready(function () {
// duplicate
var csrftoken = $('input[name=csrf_token]').val();
$.ajaxSetup({
beforeSend: function (xhr, settings) {
if (!/^(GET|HEAD|OPTIONS|TRACE)$/i.test(settings.type) && !this.crossDomain) {
xhr.setRequestHeader("X-CSRFToken", csrftoken)
}
}
})
var browsersteps_session_id; var browsersteps_session_id;
var browserless_seconds_remaining = 0; var browser_interface_seconds_remaining = 0;
var apply_buttons_disabled = false; var apply_buttons_disabled = false;
var include_text_elements = $("#include_text_elements"); var include_text_elements = $("#include_text_elements");
var xpath_data = false; var xpath_data = false;
@@ -26,7 +17,8 @@ $(document).ready(function () {
set_scale(); set_scale();
}); });
// Should always be disabled // Should always be disabled
$('#browser_steps >li:first-child select').val('Goto site').attr('disabled', 'disabled'); $('#browser_steps-0-operation option[value="Goto site"]').prop("selected", "selected");
$('#browser_steps-0-operation').attr('disabled', 'disabled');
$('#browsersteps-click-start').click(function () { $('#browsersteps-click-start').click(function () {
$("#browsersteps-click-start").fadeOut(); $("#browsersteps-click-start").fadeOut();
@@ -49,7 +41,7 @@ $(document).ready(function () {
$('#browsersteps-img').removeAttr('src'); $('#browsersteps-img').removeAttr('src');
$("#browsersteps-click-start").show(); $("#browsersteps-click-start").show();
$("#browsersteps-selector-wrapper .spinner").hide(); $("#browsersteps-selector-wrapper .spinner").hide();
browserless_seconds_remaining = 0; browser_interface_seconds_remaining = 0;
browsersteps_session_id = false; browsersteps_session_id = false;
apply_buttons_disabled = false; apply_buttons_disabled = false;
ctx.clearRect(0, 0, c.width, c.height); ctx.clearRect(0, 0, c.width, c.height);
@@ -61,12 +53,12 @@ $(document).ready(function () {
$('#browser_steps >li:first-child').css('opacity', '0.5'); $('#browser_steps >li:first-child').css('opacity', '0.5');
} }
// Show seconds remaining until playwright/browserless needs to restart the session // Show seconds remaining until the browser interface needs to restart the session
// (See comment at the top of changedetectionio/blueprint/browser_steps/__init__.py ) // (See comment at the top of changedetectionio/blueprint/browser_steps/__init__.py )
setInterval(() => { setInterval(() => {
if (browserless_seconds_remaining >= 1) { if (browser_interface_seconds_remaining >= 1) {
document.getElementById('browserless-seconds-remaining').innerText = browserless_seconds_remaining + " seconds remaining in session"; document.getElementById('browser-seconds-remaining').innerText = browser_interface_seconds_remaining + " seconds remaining in session";
browserless_seconds_remaining -= 1; browser_interface_seconds_remaining -= 1;
} }
}, "1000") }, "1000")
@@ -160,6 +152,12 @@ $(document).ready(function () {
e.offsetX > item.left * y_scale && e.offsetX < item.left * y_scale + item.width * y_scale e.offsetX > item.left * y_scale && e.offsetX < item.left * y_scale + item.width * y_scale
) { ) {
// Ignore really large ones, because we are scraping 'div' also from xpath_element_scraper but
// that div or whatever could be some wrapper and would generally make you select the whole page
if (item.width > 800 && item.height > 400) {
return
}
// There could be many elements here, record them all and then we'll find out which is the most 'useful' // There could be many elements here, record them all and then we'll find out which is the most 'useful'
// (input, textarea, button, A etc) // (input, textarea, button, A etc)
if (item.width < xpath_data['browser_width']) { if (item.width < xpath_data['browser_width']) {
@@ -261,7 +259,7 @@ $(document).ready(function () {
// This should trigger 'Goto site' // This should trigger 'Goto site'
console.log("Got startup response, requesting Goto-Site (first) step fake click"); console.log("Got startup response, requesting Goto-Site (first) step fake click");
$('#browser_steps >li:first-child .apply').click(); $('#browser_steps >li:first-child .apply').click();
browserless_seconds_remaining = 500; browser_interface_seconds_remaining = 500;
set_first_gotosite_disabled(); set_first_gotosite_disabled();
}).fail(function (data) { }).fail(function (data) {
console.log(data); console.log(data);

View File

@@ -0,0 +1,10 @@
$(document).ready(function () {
$.ajaxSetup({
beforeSend: function (xhr, settings) {
if (!/^(GET|HEAD|OPTIONS|TRACE)$/i.test(settings.type) && !this.crossDomain) {
xhr.setRequestHeader("X-CSRFToken", csrftoken)
}
}
})
});

View File

@@ -1,12 +1,10 @@
$(document).ready(function () { $(document).ready(function () {
var csrftoken = $('input[name=csrf_token]').val(); $('.needs-localtime').each(function () {
$.ajaxSetup({ for (var option of this.options) {
beforeSend: function (xhr, settings) { var dateObject = new Date(option.value * 1000);
if (!/^(GET|HEAD|OPTIONS|TRACE)$/i.test(settings.type) && !this.crossDomain) { option.label = dateObject.toLocaleString(undefined, {dateStyle: "full", timeStyle: "medium"});
xhr.setRequestHeader("X-CSRFToken", csrftoken)
}
} }
}) });
// Load it when the #screenshot tab is in use, so we dont give a slow experience when waiting for the text diff to load // Load it when the #screenshot tab is in use, so we dont give a slow experience when waiting for the text diff to load
window.addEventListener('hashchange', function (e) { window.addEventListener('hashchange', function (e) {
@@ -41,6 +39,12 @@ $(document).ready(function () {
$("#highlightSnippet").remove(); $("#highlightSnippet").remove();
} }
// Listen for Escape key press
window.addEventListener('keydown', function (e) {
if (e.key === 'Escape') {
clean();
}
}, false);
function dragTextHandler(event) { function dragTextHandler(event) {
console.log('mouseupped'); console.log('mouseupped');
@@ -90,5 +94,10 @@ $(document).ready(function () {
} }
} }
$('#diff-form').on('submit', function (e) {
if ($('select[name=from_version]').val() === $('select[name=to_version]').val()) {
e.preventDefault();
alert('Error - You are trying to compare the same version.');
}
});
}); });

View File

@@ -79,12 +79,7 @@ $(document).ready(function () {
$('#jump-next-diff').click(); $('#jump-next-diff').click();
} }
$('.needs-localtime').each(function () {
for (var option of this.options) {
var dateObject = new Date(option.value * 1000);
option.label = dateObject.toLocaleString(undefined, {dateStyle: "full", timeStyle: "medium"});
}
})
onDiffTypeChange( onDiffTypeChange(
document.querySelector('#settings [name="diff_type"]:checked'), document.querySelector('#settings [name="diff_type"]:checked'),
); );

View File

@@ -13,27 +13,16 @@ $(document).ready(function() {
$('#send-test-notification').click(function (e) { $('#send-test-notification').click(function (e) {
e.preventDefault(); e.preventDefault();
// this can be global
var csrftoken = $('input[name=csrf_token]').val();
$.ajaxSetup({
beforeSend: function(xhr, settings) {
if (!/^(GET|HEAD|OPTIONS|TRACE)$/i.test(settings.type) && !this.crossDomain) {
xhr.setRequestHeader("X-CSRFToken", csrftoken)
}
}
})
data = { data = {
window_url : window.location.href, notification_body: $('#notification_body').val(),
notification_urls : $('.notification-urls').val(), notification_format: $('#notification_format').val(),
} notification_title: $('#notification_title').val(),
for (key in data) { notification_urls: $('.notification-urls').val(),
if (!data[key].length) { tags: $('#tags').val(),
alert(key+" is empty, cannot send test.") window_url: window.location.href,
return;
}
} }
$.ajax({ $.ajax({
type: "POST", type: "POST",
url: notification_base_url, url: notification_base_url,
@@ -46,7 +35,7 @@ $(document).ready(function() {
} }
}).done(function(data){ }).done(function(data){
console.log(data); console.log(data);
alert('Sent'); alert(data);
}).fail(function(data){ }).fail(function(data){
console.log(data); console.log(data);
alert('There was an error communicating with the server.'); alert('There was an error communicating with the server.');

View File

@@ -0,0 +1,53 @@
function redirect_to_version(version) {
var currentUrl = window.location.href;
var baseUrl = currentUrl.split('?')[0]; // Base URL without query parameters
var anchor = '';
// Check if there is an anchor
if (baseUrl.indexOf('#') !== -1) {
anchor = baseUrl.substring(baseUrl.indexOf('#'));
baseUrl = baseUrl.substring(0, baseUrl.indexOf('#'));
}
window.location.href = baseUrl + '?version=' + version + anchor;
}
document.addEventListener('keydown', function (event) {
var selectElement = document.getElementById('preview-version');
if (selectElement) {
var selectedOption = selectElement.querySelector('option:checked');
if (selectedOption) {
if (event.key === 'ArrowLeft') {
if (selectedOption.previousElementSibling) {
redirect_to_version(selectedOption.previousElementSibling.value);
}
} else if (event.key === 'ArrowRight') {
if (selectedOption.nextElementSibling) {
redirect_to_version(selectedOption.nextElementSibling.value);
}
}
}
}
});
document.getElementById('preview-version').addEventListener('change', function () {
redirect_to_version(this.value);
});
var selectElement = document.getElementById('preview-version');
if (selectElement) {
var selectedOption = selectElement.querySelector('option:checked');
if (selectedOption) {
if (selectedOption.previousElementSibling) {
document.getElementById('btn-previous').href = "?version=" + selectedOption.previousElementSibling.value;
} else {
document.getElementById('btn-previous').remove()
}
if (selectedOption.nextElementSibling) {
document.getElementById('btn-next').href = "?version=" + selectedOption.nextElementSibling.value;
} else {
document.getElementById('btn-next').remove()
}
}
}

View File

@@ -2,250 +2,258 @@
// All rights reserved. // All rights reserved.
// yes - this is really a hack, if you are a front-ender and want to help, please get in touch! // yes - this is really a hack, if you are a front-ender and want to help, please get in touch!
$(document).ready(function () { let runInClearMode = false;
var current_selected_i; $(document).ready(() => {
var state_clicked = false; let currentSelections = [];
let currentSelection = null;
let appendToList = false;
let c, xctx, ctx;
let xScale = 1, yScale = 1;
let selectorImage, selectorImageRect, selectorData;
var c;
// greyed out fill context // Global jQuery selectors with "Elem" appended
var xctx; const $selectorCanvasElem = $('#selector-canvas');
// redline highlight context const $includeFiltersElem = $("#include_filters");
var ctx; const $selectorBackgroundElem = $("img#selector-background");
const $selectorCurrentXpathElem = $("#selector-current-xpath span");
const $fetchingUpdateNoticeElem = $('.fetching-update-notice');
const $selectorWrapperElem = $("#selector-wrapper");
var current_default_xpath = []; // Color constants
var x_scale = 1; const FILL_STYLE_HIGHLIGHT = 'rgba(205,0,0,0.35)';
var y_scale = 1; const FILL_STYLE_GREYED_OUT = 'rgba(205,205,205,0.95)';
var selector_image; const STROKE_STYLE_HIGHLIGHT = 'rgba(255,0,0, 0.9)';
var selector_image_rect; const FILL_STYLE_REDLINE = 'rgba(255,0,0, 0.1)';
var selector_data; const STROKE_STYLE_REDLINE = 'rgba(225,0,0,0.9)';
$('#visualselector-tab').click(function () { $('#visualselector-tab').click(() => {
$("img#selector-background").off('load'); $selectorBackgroundElem.off('load');
state_clicked = false; currentSelections = [];
current_selected_i = false; bootstrapVisualSelector();
bootstrap_visualselector();
}); });
$(document).on('keydown', function (event) { function clearReset() {
if ($("img#selector-background").is(":visible")) { ctx.clearRect(0, 0, c.width, c.height);
if (event.key == "Escape") {
state_clicked = false; if ($includeFiltersElem.val().length) {
ctx.clearRect(0, 0, c.width, c.height); alert("Existing filters under the 'Filters & Triggers' tab were cleared.");
}
$includeFiltersElem.val('');
currentSelections = [];
// Means we ignore the xpaths from the scraper marked as sel.highlight_as_custom_filter (it matched a previous selector)
runInClearMode = true;
highlightCurrentSelected();
}
function splitToList(v) {
return v.split('\n').map(line => line.trim()).filter(line => line.length > 0);
}
function sortScrapedElementsBySize() {
// Sort the currentSelections array by area (width * height) in descending order
selectorData['size_pos'].sort((a, b) => {
const areaA = a.width * a.height;
const areaB = b.width * b.height;
return areaB - areaA;
});
}
$(document).on('keydown keyup', (event) => {
if (event.code === 'ShiftLeft' || event.code === 'ShiftRight') {
appendToList = event.type === 'keydown';
}
if (event.type === 'keydown') {
if ($selectorBackgroundElem.is(":visible") && event.key === "Escape") {
clearReset();
} }
} }
}); });
// For when the page loads $('#clear-selector').on('click', () => {
if (!window.location.hash || window.location.hash != '#visualselector') { clearReset();
$("img#selector-background").attr('src', ''); });
// So if they start switching between visualSelector and manual filters, stop it from rendering old filters
$('li.tab a').on('click', () => {
runInClearMode = true;
});
if (!window.location.hash || window.location.hash !== '#visualselector') {
$selectorBackgroundElem.attr('src', '');
return; return;
} }
// Handle clearing button/link bootstrapVisualSelector();
$('#clear-selector').on('click', function (event) {
if (!state_clicked) {
alert('Oops, Nothing selected!');
}
state_clicked = false;
ctx.clearRect(0, 0, c.width, c.height);
xctx.clearRect(0, 0, c.width, c.height);
$("#include_filters").val('');
});
function bootstrapVisualSelector() {
bootstrap_visualselector(); $selectorBackgroundElem
.on("error", () => {
$fetchingUpdateNoticeElem.html("<strong>Ooops!</strong> The VisualSelector tool needs at least one fetched page, please unpause the watch and/or wait for the watch to complete fetching and then reload this page.")
function bootstrap_visualselector() { .css('color', '#bb0000');
if (1) { $('#selector-current-xpath, #clear-selector').hide();
// bootstrap it, this will trigger everything else })
$("img#selector-background").on("error", function () { .on('load', () => {
$('.fetching-update-notice').html("<strong>Ooops!</strong> The VisualSelector tool needs atleast one fetched page, please unpause the watch and/or wait for the watch to complete fetching and then reload this page.");
$('.fetching-update-notice').css('color','#bb0000');
$('#selector-current-xpath').hide();
$('#clear-selector').hide();
}).bind('load', function () {
console.log("Loaded background..."); console.log("Loaded background...");
c = document.getElementById("selector-canvas"); c = document.getElementById("selector-canvas");
// greyed out fill context
xctx = c.getContext("2d"); xctx = c.getContext("2d");
// redline highlight context
ctx = c.getContext("2d"); ctx = c.getContext("2d");
if ($("#include_filters").val().trim().length) { fetchData();
current_default_xpath = $("#include_filters").val().split(/\r?\n/g); $selectorCanvasElem.off("mousemove mousedown");
} else { })
current_default_xpath = []; .attr("src", screenshot_url);
}
fetch_data(); let s = `${$selectorBackgroundElem.attr('src')}?${new Date().getTime()}`;
$('#selector-canvas').off("mousemove mousedown"); $selectorBackgroundElem.attr('src', s);
// screenshot_url defined in the edit.html template
}).attr("src", screenshot_url);
}
// Tell visualSelector that the image should update
var s = $("img#selector-background").attr('src') + "?" + new Date().getTime();
$("img#selector-background").attr('src', s)
} }
// This is fired once the img src is loaded in bootstrap_visualselector() function alertIfFilterNotFound() {
function fetch_data() { let existingFilters = splitToList($includeFiltersElem.val());
// Image is ready let sizePosXpaths = selectorData['size_pos'].map(sel => sel.xpath);
$('.fetching-update-notice').html("Fetching element data..");
for (let filter of existingFilters) {
if (!sizePosXpaths.includes(filter)) {
alert(`One or more of your existing filters was not found and will be removed when a new filter is selected.`);
break;
}
}
}
function fetchData() {
$fetchingUpdateNoticeElem.html("Fetching element data..");
$.ajax({ $.ajax({
url: watch_visual_selector_data_url, url: watch_visual_selector_data_url,
context: document.body context: document.body
}).done(function (data) { }).done((data) => {
$('.fetching-update-notice').html("Rendering.."); $fetchingUpdateNoticeElem.html("Rendering..");
selector_data = data; selectorData = data;
console.log("Reported browser width from backend: " + data['browser_width']); sortScrapedElementsBySize();
state_clicked = false; console.log(`Reported browser width from backend: ${data['browser_width']}`);
set_scale();
reflow_selector();
$('.fetching-update-notice').fadeOut();
});
// Little sanity check for the user, alert them if something missing
alertIfFilterNotFound();
setScale();
reflowSelector();
$fetchingUpdateNoticeElem.fadeOut();
});
} }
function updateFiltersText() {
// Assuming currentSelections is already defined and contains the selections
let uniqueSelections = new Set(currentSelections.map(sel => (sel[0] === '/' ? `xpath:${sel.xpath}` : sel.xpath)));
function set_scale() { if (currentSelections.length > 0) {
// Convert the Set back to an array and join with newline characters
// some things to check if the scaling doesnt work let textboxFilterText = Array.from(uniqueSelections).join("\n");
// - that the widths/sizes really are about the actual screen size cat elements.json |grep -o width......|sort|uniq $includeFiltersElem.val(textboxFilterText);
$("#selector-wrapper").show();
selector_image = $("img#selector-background")[0];
selector_image_rect = selector_image.getBoundingClientRect();
// make the canvas the same size as the image
$('#selector-canvas').attr('height', selector_image_rect.height);
$('#selector-canvas').attr('width', selector_image_rect.width);
$('#selector-wrapper').attr('width', selector_image_rect.width);
x_scale = selector_image_rect.width / selector_data['browser_width'];
y_scale = selector_image_rect.height / selector_image.naturalHeight;
ctx.strokeStyle = 'rgba(255,0,0, 0.9)';
ctx.fillStyle = 'rgba(255,0,0, 0.1)';
ctx.lineWidth = 3;
console.log("scaling set x: " + x_scale + " by y:" + y_scale);
$("#selector-current-xpath").css('max-width', selector_image_rect.width);
}
function reflow_selector() {
$(window).resize(function () {
set_scale();
highlight_current_selected_i();
});
var selector_currnt_xpath_text = $("#selector-current-xpath span");
set_scale();
console.log(selector_data['size_pos'].length + " selectors found");
// highlight the default one if we can find it in the xPath list
// or the xpath matches the default one
found = false;
if (current_default_xpath.length) {
// Find the first one that matches
// @todo In the future paint all that match
for (const c of current_default_xpath) {
for (var i = selector_data['size_pos'].length; i !== 0; i--) {
if (selector_data['size_pos'][i - 1].xpath.trim() === c.trim()) {
console.log("highlighting " + c);
current_selected_i = i - 1;
highlight_current_selected_i();
found = true;
break;
}
}
if (found) {
break;
}
}
if (!found) {
alert("Unfortunately your existing CSS/xPath Filter was no longer found!");
}
} }
}
function setScale() {
$selectorWrapperElem.show();
selectorImage = $selectorBackgroundElem[0];
selectorImageRect = selectorImage.getBoundingClientRect();
$('#selector-canvas').bind('mousemove', function (e) { $selectorCanvasElem.attr({
if (state_clicked) { 'height': selectorImageRect.height,
return; 'width': selectorImageRect.width
});
$selectorWrapperElem.attr('width', selectorImageRect.width);
$('#visual-selector-heading').css('max-width', selectorImageRect.width + "px")
xScale = selectorImageRect.width / selectorImage.naturalWidth;
yScale = selectorImageRect.height / selectorImage.naturalHeight;
ctx.strokeStyle = STROKE_STYLE_HIGHLIGHT;
ctx.fillStyle = FILL_STYLE_REDLINE;
ctx.lineWidth = 3;
console.log("Scaling set x: " + xScale + " by y:" + yScale);
$("#selector-current-xpath").css('max-width', selectorImageRect.width);
}
function reflowSelector() {
$(window).resize(() => {
setScale();
highlightCurrentSelected();
});
setScale();
console.log(selectorData['size_pos'].length + " selectors found");
let existingFilters = splitToList($includeFiltersElem.val());
selectorData['size_pos'].forEach(sel => {
if ((!runInClearMode && sel.highlight_as_custom_filter) || existingFilters.includes(sel.xpath)) {
console.log("highlighting " + c);
currentSelections.push(sel);
} }
ctx.clearRect(0, 0, c.width, c.height); });
current_selected_i = null;
// Add in offset
if ((typeof e.offsetX === "undefined" || typeof e.offsetY === "undefined") || (e.offsetX === 0 && e.offsetY === 0)) { highlightCurrentSelected();
var targetOffset = $(e.target).offset(); updateFiltersText();
$selectorCanvasElem.bind('mousemove', handleMouseMove.debounce(5));
$selectorCanvasElem.bind('mousedown', handleMouseDown.debounce(5));
$selectorCanvasElem.bind('mouseleave', highlightCurrentSelected.debounce(5));
function handleMouseMove(e) {
if (!e.offsetX && !e.offsetY) {
const targetOffset = $(e.target).offset();
e.offsetX = e.pageX - targetOffset.left; e.offsetX = e.pageX - targetOffset.left;
e.offsetY = e.pageY - targetOffset.top; e.offsetY = e.pageY - targetOffset.top;
} }
// Reverse order - the most specific one should be deeper/"laster" ctx.fillStyle = FILL_STYLE_HIGHLIGHT;
// Basically, find the most 'deepest'
var found = 0;
ctx.fillStyle = 'rgba(205,0,0,0.35)';
// Will be sorted by smallest width*height first
for (var i = 0; i <= selector_data['size_pos'].length; i++) {
// draw all of them? let them choose somehow?
var sel = selector_data['size_pos'][i];
// If we are in a bounding-box
if (e.offsetY > sel.top * y_scale && e.offsetY < sel.top * y_scale + sel.height * y_scale
&&
e.offsetX > sel.left * y_scale && e.offsetX < sel.left * y_scale + sel.width * y_scale
) { selectorData['size_pos'].forEach(sel => {
if (e.offsetY > sel.top * yScale && e.offsetY < sel.top * yScale + sel.height * yScale &&
// FOUND ONE e.offsetX > sel.left * yScale && e.offsetX < sel.left * yScale + sel.width * yScale) {
set_current_selected_text(sel.xpath); setCurrentSelectedText(sel.xpath);
ctx.strokeRect(sel.left * x_scale, sel.top * y_scale, sel.width * x_scale, sel.height * y_scale); drawHighlight(sel);
ctx.fillRect(sel.left * x_scale, sel.top * y_scale, sel.width * x_scale, sel.height * y_scale); currentSelections.push(sel);
currentSelection = sel;
// no need to keep digging highlightCurrentSelected();
// @todo or, O to go out/up, I to go in currentSelections.pop();
// or double click to go up/out the selector?
current_selected_i = i;
found += 1;
break;
} }
} })
}.debounce(5));
function set_current_selected_text(s) {
selector_currnt_xpath_text[0].innerHTML = s;
}
function highlight_current_selected_i() {
if (state_clicked) {
state_clicked = false;
xctx.clearRect(0, 0, c.width, c.height);
return;
}
var sel = selector_data['size_pos'][current_selected_i];
if (sel[0] == '/') {
// @todo - not sure just checking / is right
$("#include_filters").val('xpath:' + sel.xpath);
} else {
$("#include_filters").val(sel.xpath);
}
xctx.fillStyle = 'rgba(205,205,205,0.95)';
xctx.strokeStyle = 'rgba(225,0,0,0.9)';
xctx.lineWidth = 3;
xctx.fillRect(0, 0, c.width, c.height);
// Clear out what only should be seen (make a clear/clean spot)
xctx.clearRect(sel.left * x_scale, sel.top * y_scale, sel.width * x_scale, sel.height * y_scale);
xctx.strokeRect(sel.left * x_scale, sel.top * y_scale, sel.width * x_scale, sel.height * y_scale);
state_clicked = true;
set_current_selected_text(sel.xpath);
} }
$('#selector-canvas').bind('mousedown', function (e) { function setCurrentSelectedText(s) {
highlight_current_selected_i(); $selectorCurrentXpathElem[0].innerHTML = s;
}); }
function drawHighlight(sel) {
ctx.strokeRect(sel.left * xScale, sel.top * yScale, sel.width * xScale, sel.height * yScale);
ctx.fillRect(sel.left * xScale, sel.top * yScale, sel.width * xScale, sel.height * yScale);
}
function handleMouseDown() {
// If we are in 'appendToList' mode, grow the list, if not, just 1
currentSelections = appendToList ? [...currentSelections, currentSelection] : [currentSelection];
highlightCurrentSelected();
updateFiltersText();
}
} }
function highlightCurrentSelected() {
xctx.fillStyle = FILL_STYLE_GREYED_OUT;
xctx.strokeStyle = STROKE_STYLE_REDLINE;
xctx.lineWidth = 3;
xctx.clearRect(0, 0, c.width, c.height);
currentSelections.forEach(sel => {
//xctx.clearRect(sel.left * xScale, sel.top * yScale, sel.width * xScale, sel.height * yScale);
xctx.strokeRect(sel.left * xScale, sel.top * yScale, sel.width * xScale, sel.height * yScale);
});
}
}); });

View File

@@ -1,3 +1,17 @@
function toggleOpacity(checkboxSelector, fieldSelector) {
const checkbox = document.querySelector(checkboxSelector);
const fields = document.querySelectorAll(fieldSelector);
function updateOpacity() {
const opacityValue = checkbox.checked ? 0.6 : 1;
fields.forEach(field => {
field.style.opacity = opacityValue;
});
}
// Initial setup
updateOpacity();
checkbox.addEventListener('change', updateOpacity);
}
$(document).ready(function () { $(document).ready(function () {
$('#notification-setting-reset-to-default').click(function (e) { $('#notification-setting-reset-to-default').click(function (e) {
$('#notification_title').val(''); $('#notification_title').val('');
@@ -10,4 +24,7 @@ $(document).ready(function () {
e.preventDefault(); e.preventDefault();
$('#notification-tokens-info').toggle(); $('#notification-tokens-info').toggle();
}); });
toggleOpacity('#time_between_check_use_default', '#time_between_check');
}); });

View File

@@ -68,7 +68,7 @@
--color-last-checked: #bbb; --color-last-checked: #bbb;
--color-text-footer: #444; --color-text-footer: #444;
--color-border-watch-table-cell: #eee; --color-border-watch-table-cell: #eee;
--color-text-watch-tag-list: #e70069; --color-text-watch-tag-list: rgba(231, 0, 105, 0.4);
--color-background-new-watch-form: rgba(0, 0, 0, 0.05); --color-background-new-watch-form: rgba(0, 0, 0, 0.05);
--color-background-new-watch-input: var(--color-white); --color-background-new-watch-input: var(--color-white);
--color-text-new-watch-input: var(--color-text); --color-text-new-watch-input: var(--color-text);
@@ -111,7 +111,7 @@ html[data-darkmode="true"] {
--color-background-input: var(--color-grey-350); --color-background-input: var(--color-grey-350);
--color-text-input-description: var(--color-grey-600); --color-text-input-description: var(--color-grey-600);
--color-text-input-placeholder: var(--color-grey-600); --color-text-input-placeholder: var(--color-grey-600);
--color-text-watch-tag-list: #fa3e92; --color-text-watch-tag-list: rgba(250, 62, 146, 0.4);
--color-background-code: var(--color-grey-200); --color-background-code: var(--color-grey-200);
--color-background-tab: rgba(0, 0, 0, 0.2); --color-background-tab: rgba(0, 0, 0, 0.2);
--color-background-tab-hover: rgba(0, 0, 0, 0.5); --color-background-tab-hover: rgba(0, 0, 0, 0.5);
@@ -126,6 +126,8 @@ html[data-darkmode="true"] {
html[data-darkmode="true"] .watch-table .title-col a[target="_blank"]::after, html[data-darkmode="true"] .watch-table .title-col a[target="_blank"]::after,
html[data-darkmode="true"] .watch-table .current-diff-url::after { html[data-darkmode="true"] .watch-table .current-diff-url::after {
filter: invert(0.5) hue-rotate(10deg) brightness(2); } filter: invert(0.5) hue-rotate(10deg) brightness(2); }
html[data-darkmode="true"] .watch-table .status-browsersteps {
filter: invert(0.5) hue-rotate(10deg) brightness(1.5); }
html[data-darkmode="true"] .watch-table .watch-controls .state-off img { html[data-darkmode="true"] .watch-table .watch-controls .state-off img {
opacity: 0.3; } opacity: 0.3; }
html[data-darkmode="true"] .watch-table .watch-controls .state-on img { html[data-darkmode="true"] .watch-table .watch-controls .state-on img {

View File

@@ -75,7 +75,7 @@
--color-text-footer: #444; --color-text-footer: #444;
--color-border-watch-table-cell: #eee; --color-border-watch-table-cell: #eee;
--color-text-watch-tag-list: #e70069; --color-text-watch-tag-list: rgba(231, 0, 105, 0.4);
--color-background-new-watch-form: rgba(0, 0, 0, 0.05); --color-background-new-watch-form: rgba(0, 0, 0, 0.05);
--color-background-new-watch-input: var(--color-white); --color-background-new-watch-input: var(--color-white);
--color-text-new-watch-input: var(--color-text); --color-text-new-watch-input: var(--color-text);
@@ -127,7 +127,7 @@ html[data-darkmode="true"] {
--color-background-input: var(--color-grey-350); --color-background-input: var(--color-grey-350);
--color-text-input-description: var(--color-grey-600); --color-text-input-description: var(--color-grey-600);
--color-text-input-placeholder: var(--color-grey-600); --color-text-input-placeholder: var(--color-grey-600);
--color-text-watch-tag-list: #fa3e92; --color-text-watch-tag-list: rgba(250, 62, 146, 0.4);
--color-background-code: var(--color-grey-200); --color-background-code: var(--color-grey-200);
--color-background-tab: rgba(0, 0, 0, 0.2); --color-background-tab: rgba(0, 0, 0, 0.2);
@@ -152,6 +152,10 @@ html[data-darkmode="true"] {
filter: invert(.5) hue-rotate(10deg) brightness(2); filter: invert(.5) hue-rotate(10deg) brightness(2);
} }
.status-browsersteps {
filter: invert(.5) hue-rotate(10deg) brightness(1.5);
}
.watch-controls { .watch-controls {
.state-off { .state-off {
img { img {

View File

@@ -1,6 +1,8 @@
#selector-wrapper { #selector-wrapper {
height: 100%; height: 100%;
text-align: center;
max-height: 70vh; max-height: 70vh;
overflow-y: scroll; overflow-y: scroll;
position: relative; position: relative;

View File

@@ -187,8 +187,11 @@ code {
} }
.watch-tag-list { .watch-tag-list {
color: var(--color-text-watch-tag-list); color: var(--color-white);
white-space: nowrap; white-space: nowrap;
background: var(--color-text-watch-tag-list);
border-radius: 5px;
padding: 2px 5px;
} }
.box { .box {
@@ -240,7 +243,6 @@ body::after {
body::before { body::before {
// background-image set in base.html so it works with reverse proxies etc // background-image set in base.html so it works with reverse proxies etc
content: ""; content: "";
background-size: cover
} }
body:after, body:after,
@@ -669,14 +671,25 @@ footer {
and also iPads specifically. and also iPads specifically.
*/ */
.watch-table { .watch-table {
/* make headings work on mobile */
thead {
display: block;
tr {
th {
display: inline-block;
}
}
.empty-cell {
display: none;
}
}
/* Force table to not be like tables anymore */ /* Force table to not be like tables anymore */
thead, tbody {
tbody, td,
th, tr {
td, display: block;
tr { }
display: block;
} }
.last-checked { .last-checked {
@@ -700,13 +713,6 @@ footer {
display: inline-block; display: inline-block;
} }
/* Hide table headers (but not display: none;, for accessibility) */
thead tr {
position: absolute;
top: -9999px;
left: -9999px;
}
.pure-table td, .pure-table td,
.pure-table th { .pure-table th {
border: none; border: none;
@@ -751,6 +757,7 @@ footer {
thead { thead {
background-color: var(--color-background-table-thead); background-color: var(--color-background-table-thead);
color: var(--color-text); color: var(--color-text);
border-bottom: 1px solid var(--color-background-table-thead);
} }
td, td,
@@ -925,23 +932,26 @@ body.full-width {
font-size: .875em; font-size: .875em;
} }
} }
.text-filtering { }
h3 {
margin-top: 0; .border-fieldset {
} h3 {
border: 1px solid #ccc; margin-top: 0;
padding: 1rem; }
border-radius: 5px; border: 1px solid #ccc;
margin-bottom: 1rem; padding: 1rem;
fieldset:last-of-type { border-radius: 5px;
margin-bottom: 1rem;
fieldset:last-of-type {
padding-bottom: 0;
.pure-control-group {
padding-bottom: 0; padding-bottom: 0;
.pure-control-group {
padding-bottom: 0;
}
} }
} }
} }
ul { ul {
padding-left: 1em; padding-left: 1em;
padding-top: 0px; padding-top: 0px;
@@ -1016,6 +1026,11 @@ ul {
border-radius: 10px; border-radius: 10px;
margin-bottom: 1em; margin-bottom: 1em;
display: none; display: none;
button {
/* some space if they wrap the page */
margin-bottom: 3px;
margin-top: 3px;
}
} }
.checkbox-uuid { .checkbox-uuid {
@@ -1077,6 +1092,9 @@ ul {
li { li {
list-style: none; list-style: none;
font-size: 0.8rem; font-size: 0.8rem;
> * {
display: inline-block;
}
} }
} }
@@ -1096,3 +1114,16 @@ ul {
white-space: nowrap; white-space: nowrap;
} }
#chrome-extension-link {
img {
height: 21px;
padding: 2px;
vertical-align: middle;
}
padding: 9px;
border: 1px solid var(--color-grey-800);
border-radius: 10px;
vertical-align: middle;
}

View File

@@ -284,7 +284,7 @@ ul#requests-extra_browsers {
--color-last-checked: #bbb; --color-last-checked: #bbb;
--color-text-footer: #444; --color-text-footer: #444;
--color-border-watch-table-cell: #eee; --color-border-watch-table-cell: #eee;
--color-text-watch-tag-list: #e70069; --color-text-watch-tag-list: rgba(231, 0, 105, 0.4);
--color-background-new-watch-form: rgba(0, 0, 0, 0.05); --color-background-new-watch-form: rgba(0, 0, 0, 0.05);
--color-background-new-watch-input: var(--color-white); --color-background-new-watch-input: var(--color-white);
--color-text-new-watch-input: var(--color-text); --color-text-new-watch-input: var(--color-text);
@@ -327,7 +327,7 @@ html[data-darkmode="true"] {
--color-background-input: var(--color-grey-350); --color-background-input: var(--color-grey-350);
--color-text-input-description: var(--color-grey-600); --color-text-input-description: var(--color-grey-600);
--color-text-input-placeholder: var(--color-grey-600); --color-text-input-placeholder: var(--color-grey-600);
--color-text-watch-tag-list: #fa3e92; --color-text-watch-tag-list: rgba(250, 62, 146, 0.4);
--color-background-code: var(--color-grey-200); --color-background-code: var(--color-grey-200);
--color-background-tab: rgba(0, 0, 0, 0.2); --color-background-tab: rgba(0, 0, 0, 0.2);
--color-background-tab-hover: rgba(0, 0, 0, 0.5); --color-background-tab-hover: rgba(0, 0, 0, 0.5);
@@ -342,6 +342,8 @@ html[data-darkmode="true"] {
html[data-darkmode="true"] .watch-table .title-col a[target="_blank"]::after, html[data-darkmode="true"] .watch-table .title-col a[target="_blank"]::after,
html[data-darkmode="true"] .watch-table .current-diff-url::after { html[data-darkmode="true"] .watch-table .current-diff-url::after {
filter: invert(0.5) hue-rotate(10deg) brightness(2); } filter: invert(0.5) hue-rotate(10deg) brightness(2); }
html[data-darkmode="true"] .watch-table .status-browsersteps {
filter: invert(0.5) hue-rotate(10deg) brightness(1.5); }
html[data-darkmode="true"] .watch-table .watch-controls .state-off img { html[data-darkmode="true"] .watch-table .watch-controls .state-off img {
opacity: 0.3; } opacity: 0.3; }
html[data-darkmode="true"] .watch-table .watch-controls .state-on img { html[data-darkmode="true"] .watch-table .watch-controls .state-on img {
@@ -530,8 +532,11 @@ code {
margin: 0 3px 0 5px; } margin: 0 3px 0 5px; }
.watch-tag-list { .watch-tag-list {
color: var(--color-text-watch-tag-list); color: var(--color-white);
white-space: nowrap; } white-space: nowrap;
background: var(--color-text-watch-tag-list);
border-radius: 5px;
padding: 2px 5px; }
.box { .box {
max-width: 80%; max-width: 80%;
@@ -569,8 +574,7 @@ body::after {
opacity: 0.91; } opacity: 0.91; }
body::before { body::before {
content: ""; content: ""; }
background-size: cover; }
body:after, body:after,
body:before { body:before {
@@ -859,14 +863,17 @@ footer {
and also iPads specifically. and also iPads specifically.
*/ */
.watch-table { .watch-table {
/* make headings work on mobile */
/* Force table to not be like tables anymore */ /* Force table to not be like tables anymore */
/* Force table to not be like tables anymore */ /* Force table to not be like tables anymore */ }
/* Hide table headers (but not display: none;, for accessibility) */ } .watch-table thead {
.watch-table thead, display: block; }
.watch-table tbody, .watch-table thead tr th {
.watch-table th, display: inline-block; }
.watch-table td, .watch-table thead .empty-cell {
.watch-table tr { display: none; }
.watch-table tbody td,
.watch-table tbody tr {
display: block; } display: block; }
.watch-table .last-checked > span { .watch-table .last-checked > span {
vertical-align: middle; } vertical-align: middle; }
@@ -878,10 +885,6 @@ footer {
content: "Last Changed "; } content: "Last Changed "; }
.watch-table td.inline { .watch-table td.inline {
display: inline-block; } display: inline-block; }
.watch-table thead tr {
position: absolute;
top: -9999px;
left: -9999px; }
.watch-table .pure-table td, .watch-table .pure-table td,
.watch-table .pure-table th { .watch-table .pure-table th {
border: none; } border: none; }
@@ -908,7 +911,8 @@ footer {
border-color: var(--color-border-table-cell); } border-color: var(--color-border-table-cell); }
.pure-table thead { .pure-table thead {
background-color: var(--color-background-table-thead); background-color: var(--color-background-table-thead);
color: var(--color-text); } color: var(--color-text);
border-bottom: 1px solid var(--color-background-table-thead); }
.pure-table td, .pure-table td,
.pure-table th { .pure-table th {
border-left-color: var(--color-border-table-cell); } border-left-color: var(--color-border-table-cell); }
@@ -1036,17 +1040,18 @@ body.full-width .edit-form {
color: var(--color-text-input-description); } color: var(--color-text-input-description); }
.edit-form .pure-form-message-inline code { .edit-form .pure-form-message-inline code {
font-size: .875em; } font-size: .875em; }
.edit-form .text-filtering {
border: 1px solid #ccc; .border-fieldset {
padding: 1rem; border: 1px solid #ccc;
border-radius: 5px; padding: 1rem;
margin-bottom: 1rem; } border-radius: 5px;
.edit-form .text-filtering h3 { margin-bottom: 1rem; }
margin-top: 0; } .border-fieldset h3 {
.edit-form .text-filtering fieldset:last-of-type { margin-top: 0; }
.border-fieldset fieldset:last-of-type {
padding-bottom: 0; }
.border-fieldset fieldset:last-of-type .pure-control-group {
padding-bottom: 0; } padding-bottom: 0; }
.edit-form .text-filtering fieldset:last-of-type .pure-control-group {
padding-bottom: 0; }
ul { ul {
padding-left: 1em; padding-left: 1em;
@@ -1060,6 +1065,7 @@ ul {
#selector-wrapper { #selector-wrapper {
height: 100%; height: 100%;
text-align: center;
max-height: 70vh; max-height: 70vh;
overflow-y: scroll; overflow-y: scroll;
position: relative; } position: relative; }
@@ -1122,6 +1128,10 @@ ul {
border-radius: 10px; border-radius: 10px;
margin-bottom: 1em; margin-bottom: 1em;
display: none; } display: none; }
#checkbox-operations button {
/* some space if they wrap the page */
margin-bottom: 3px;
margin-top: 3px; }
.checkbox-uuid > * { .checkbox-uuid > * {
vertical-align: middle; } vertical-align: middle; }
@@ -1167,6 +1177,8 @@ ul {
#quick-watch-processor-type ul li { #quick-watch-processor-type ul li {
list-style: none; list-style: none;
font-size: 0.8rem; } font-size: 0.8rem; }
#quick-watch-processor-type ul li > * {
display: inline-block; }
.restock-label { .restock-label {
padding: 3px; padding: 3px;
@@ -1178,3 +1190,13 @@ ul {
.restock-label.not-in-stock { .restock-label.not-in-stock {
background-color: var(--color-background-button-cancel); background-color: var(--color-background-button-cancel);
color: #777; } color: #777; }
#chrome-extension-link {
padding: 9px;
border: 1px solid var(--color-grey-800);
border-radius: 10px;
vertical-align: middle; }
#chrome-extension-link img {
height: 21px;
padding: 2px;
vertical-align: middle; }

View File

@@ -1,4 +1,4 @@
from distutils.util import strtobool from changedetectionio.strtobool import strtobool
from flask import ( from flask import (
flash flash
@@ -9,7 +9,6 @@ from copy import deepcopy, copy
from os import path, unlink from os import path, unlink
from threading import Lock from threading import Lock
import json import json
import logging
import os import os
import re import re
import requests import requests
@@ -17,6 +16,7 @@ import secrets
import threading import threading
import time import time
import uuid as uuid_builder import uuid as uuid_builder
from loguru import logger
# Because the server will run as a daemon and wont know the URL for notification links when firing off a notification # Because the server will run as a daemon and wont know the URL for notification links when firing off a notification
BASE_URL_NOT_SET_TEXT = '("Base URL" not set - see settings - notifications)' BASE_URL_NOT_SET_TEXT = '("Base URL" not set - see settings - notifications)'
@@ -42,7 +42,7 @@ class ChangeDetectionStore:
self.__data = App.model() self.__data = App.model()
self.datastore_path = datastore_path self.datastore_path = datastore_path
self.json_store_path = "{}/url-watches.json".format(self.datastore_path) self.json_store_path = "{}/url-watches.json".format(self.datastore_path)
print(">>> Datastore path is ", self.json_store_path) logger.info(f"Datastore path is '{self.json_store_path}'")
self.needs_write = False self.needs_write = False
self.start_time = time.time() self.start_time = time.time()
self.stop_thread = False self.stop_thread = False
@@ -83,12 +83,12 @@ class ChangeDetectionStore:
for uuid, watch in self.__data['watching'].items(): for uuid, watch in self.__data['watching'].items():
watch['uuid']=uuid watch['uuid']=uuid
self.__data['watching'][uuid] = Watch.model(datastore_path=self.datastore_path, default=watch) self.__data['watching'][uuid] = Watch.model(datastore_path=self.datastore_path, default=watch)
print("Watching:", uuid, self.__data['watching'][uuid]['url']) logger.info(f"Watching: {uuid} {self.__data['watching'][uuid]['url']}")
# First time ran, Create the datastore. # First time ran, Create the datastore.
except (FileNotFoundError): except (FileNotFoundError):
if include_default_watches: if include_default_watches:
print("No JSON DB found at {}, creating JSON store at {}".format(self.json_store_path, self.datastore_path)) logger.critical(f"No JSON DB found at {self.json_store_path}, creating JSON store at {self.datastore_path}")
self.add_watch(url='https://news.ycombinator.com/', self.add_watch(url='https://news.ycombinator.com/',
tag='Tech news', tag='Tech news',
extras={'fetch_backend': 'html_requests'}) extras={'fetch_backend': 'html_requests'})
@@ -124,12 +124,12 @@ class ChangeDetectionStore:
self.__data['app_guid'] = str(uuid_builder.uuid4()) self.__data['app_guid'] = str(uuid_builder.uuid4())
# Generate the URL access token for RSS feeds # Generate the URL access token for RSS feeds
if not 'rss_access_token' in self.__data['settings']['application']: if not self.__data['settings']['application'].get('rss_access_token'):
secret = secrets.token_hex(16) secret = secrets.token_hex(16)
self.__data['settings']['application']['rss_access_token'] = secret self.__data['settings']['application']['rss_access_token'] = secret
# Generate the API access token # Generate the API access token
if not 'api_access_token' in self.__data['settings']['application']: if not self.__data['settings']['application'].get('api_access_token'):
secret = secrets.token_hex(16) secret = secrets.token_hex(16)
self.__data['settings']['application']['api_access_token'] = secret self.__data['settings']['application']['api_access_token'] = secret
@@ -139,7 +139,7 @@ class ChangeDetectionStore:
save_data_thread = threading.Thread(target=self.save_datastore).start() save_data_thread = threading.Thread(target=self.save_datastore).start()
def set_last_viewed(self, uuid, timestamp): def set_last_viewed(self, uuid, timestamp):
logging.debug("Setting watch UUID: {} last viewed to {}".format(uuid, int(timestamp))) logger.debug(f"Setting watch UUID: {uuid} last viewed to {int(timestamp)}")
self.data['watching'][uuid].update({'last_viewed': int(timestamp)}) self.data['watching'][uuid].update({'last_viewed': int(timestamp)})
self.needs_write = True self.needs_write = True
@@ -163,7 +163,6 @@ class ChangeDetectionStore:
del (update_obj[dict_key]) del (update_obj[dict_key])
self.__data['watching'][uuid].update(update_obj) self.__data['watching'][uuid].update(update_obj)
self.needs_write = True self.needs_write = True
@property @property
@@ -178,7 +177,7 @@ class ChangeDetectionStore:
@property @property
def has_unviewed(self): def has_unviewed(self):
for uuid, watch in self.__data['watching'].items(): for uuid, watch in self.__data['watching'].items():
if watch.viewed == False: if watch.history_n >= 2 and watch.viewed == False:
return True return True
return False return False
@@ -243,6 +242,14 @@ class ChangeDetectionStore:
def clear_watch_history(self, uuid): def clear_watch_history(self, uuid):
import pathlib import pathlib
# JSON Data, Screenshots, Textfiles (history index and snapshots), HTML in the future etc
for item in pathlib.Path(os.path.join(self.datastore_path, uuid)).rglob("*.*"):
unlink(item)
# Force the attr to recalculate
bump = self.__data['watching'][uuid].history
# Do this last because it will trigger a recheck due to last_checked being zero
self.__data['watching'][uuid].update({ self.__data['watching'][uuid].update({
'browser_steps_last_error_step' : None, 'browser_steps_last_error_step' : None,
'check_count': 0, 'check_count': 0,
@@ -255,16 +262,10 @@ class ChangeDetectionStore:
'last_viewed': 0, 'last_viewed': 0,
'previous_md5': False, 'previous_md5': False,
'previous_md5_before_filters': False, 'previous_md5_before_filters': False,
'remote_server_reply': None,
'track_ldjson_price_data': None, 'track_ldjson_price_data': None,
}) })
# JSON Data, Screenshots, Textfiles (history index and snapshots), HTML in the future etc
for item in pathlib.Path(os.path.join(self.datastore_path, uuid)).rglob("*.*"):
unlink(item)
# Force the attr to recalculate
bump = self.__data['watching'][uuid].history
self.needs_write_urgent = True self.needs_write_urgent = True
def add_watch(self, url, tag='', extras=None, tag_uuids=None, write_to_disk_now=True): def add_watch(self, url, tag='', extras=None, tag_uuids=None, write_to_disk_now=True):
@@ -316,7 +317,7 @@ class ChangeDetectionStore:
apply_extras['include_filters'] = [res['css_filter']] apply_extras['include_filters'] = [res['css_filter']]
except Exception as e: except Exception as e:
logging.error("Error fetching metadata for shared watch link", url, str(e)) logger.error(f"Error fetching metadata for shared watch link {url} {str(e)}")
flash("Error fetching metadata for {}".format(url), 'error') flash("Error fetching metadata for {}".format(url), 'error')
return False return False
from .model.Watch import is_safe_url from .model.Watch import is_safe_url
@@ -345,7 +346,7 @@ class ChangeDetectionStore:
new_uuid = new_watch.get('uuid') new_uuid = new_watch.get('uuid')
logging.debug("Added URL {} - {}".format(url, new_uuid)) logger.debug(f"Adding URL {url} - {new_uuid}")
for k in ['uuid', 'history', 'last_checked', 'last_changed', 'newest_history_key', 'previous_md5', 'viewed']: for k in ['uuid', 'history', 'last_checked', 'last_changed', 'newest_history_key', 'previous_md5', 'viewed']:
if k in apply_extras: if k in apply_extras:
@@ -362,7 +363,7 @@ class ChangeDetectionStore:
if write_to_disk_now: if write_to_disk_now:
self.sync_to_json() self.sync_to_json()
print("added ", url) logger.debug(f"Added '{url}'")
return new_uuid return new_uuid
@@ -375,55 +376,14 @@ class ChangeDetectionStore:
return False return False
# Save as PNG, PNG is larger but better for doing visual diff in the future
def save_screenshot(self, watch_uuid, screenshot: bytes, as_error=False):
if not self.data['watching'].get(watch_uuid):
return
if as_error:
target_path = os.path.join(self.datastore_path, watch_uuid, "last-error-screenshot.png")
else:
target_path = os.path.join(self.datastore_path, watch_uuid, "last-screenshot.png")
self.data['watching'][watch_uuid].ensure_data_dir_exists()
with open(target_path, 'wb') as f:
f.write(screenshot)
f.close()
def save_error_text(self, watch_uuid, contents):
if not self.data['watching'].get(watch_uuid):
return
self.data['watching'][watch_uuid].ensure_data_dir_exists()
target_path = os.path.join(self.datastore_path, watch_uuid, "last-error.txt")
with open(target_path, 'w') as f:
f.write(contents)
def save_xpath_data(self, watch_uuid, data, as_error=False):
if not self.data['watching'].get(watch_uuid):
return
if as_error:
target_path = os.path.join(self.datastore_path, watch_uuid, "elements-error.json")
else:
target_path = os.path.join(self.datastore_path, watch_uuid, "elements.json")
self.data['watching'][watch_uuid].ensure_data_dir_exists()
with open(target_path, 'w') as f:
f.write(json.dumps(data))
f.close()
def sync_to_json(self): def sync_to_json(self):
logging.info("Saving JSON..") logger.info("Saving JSON..")
print("Saving JSON..")
try: try:
data = deepcopy(self.__data) data = deepcopy(self.__data)
except RuntimeError as e: except RuntimeError as e:
# Try again in 15 seconds # Try again in 15 seconds
time.sleep(15) time.sleep(15)
logging.error ("! Data changed when writing to JSON, trying again.. %s", str(e)) logger.error(f"! Data changed when writing to JSON, trying again.. {str(e)}")
self.sync_to_json() self.sync_to_json()
return return
else: else:
@@ -436,7 +396,7 @@ class ChangeDetectionStore:
json.dump(data, json_file, indent=4) json.dump(data, json_file, indent=4)
os.replace(self.json_store_path+".tmp", self.json_store_path) os.replace(self.json_store_path+".tmp", self.json_store_path)
except Exception as e: except Exception as e:
logging.error("Error writing JSON!! (Main JSON file save was skipped) : %s", str(e)) logger.error(f"Error writing JSON!! (Main JSON file save was skipped) : {str(e)}")
self.needs_write = False self.needs_write = False
self.needs_write_urgent = False self.needs_write_urgent = False
@@ -447,7 +407,16 @@ class ChangeDetectionStore:
while True: while True:
if self.stop_thread: if self.stop_thread:
print("Shutting down datastore thread") # Suppressing "Logging error in Loguru Handler #0" during CICD.
# Not a meaningful difference for a real use-case just for CICD.
# the side effect is a "Shutting down datastore thread" message
# at the end of each test.
# But still more looking better.
import sys
logger.remove()
logger.add(sys.stderr)
logger.critical("Shutting down datastore thread")
return return
if self.needs_write or self.needs_write_urgent: if self.needs_write or self.needs_write_urgent:
@@ -463,7 +432,7 @@ class ChangeDetectionStore:
# Go through the datastore path and remove any snapshots that are not mentioned in the index # Go through the datastore path and remove any snapshots that are not mentioned in the index
# This usually is not used, but can be handy. # This usually is not used, but can be handy.
def remove_unused_snapshots(self): def remove_unused_snapshots(self):
print ("Removing snapshots from datastore that are not in the index..") logger.info("Removing snapshots from datastore that are not in the index..")
index=[] index=[]
for uuid in self.data['watching']: for uuid in self.data['watching']:
@@ -476,7 +445,7 @@ class ChangeDetectionStore:
for uuid in self.data['watching']: for uuid in self.data['watching']:
for item in pathlib.Path(self.datastore_path).rglob(uuid+"/*.txt"): for item in pathlib.Path(self.datastore_path).rglob(uuid+"/*.txt"):
if not str(item) in index: if not str(item) in index:
print ("Removing",item) logger.info(f"Removing {item}")
unlink(item) unlink(item)
@property @property
@@ -545,7 +514,6 @@ class ChangeDetectionStore:
return os.path.isfile(filepath) return os.path.isfile(filepath)
def get_all_base_headers(self): def get_all_base_headers(self):
from .model.App import parse_headers_from_text_file
headers = {} headers = {}
# Global app settings # Global app settings
headers.update(self.data['settings'].get('headers', {})) headers.update(self.data['settings'].get('headers', {}))
@@ -562,7 +530,7 @@ class ChangeDetectionStore:
if os.path.isfile(filepath): if os.path.isfile(filepath):
headers.update(parse_headers_from_text_file(filepath)) headers.update(parse_headers_from_text_file(filepath))
except Exception as e: except Exception as e:
print(f"ERROR reading headers.txt at {filepath}", str(e)) logger.error(f"ERROR reading headers.txt at {filepath} {str(e)}")
watch = self.data['watching'].get(uuid) watch = self.data['watching'].get(uuid)
if watch: if watch:
@@ -573,7 +541,7 @@ class ChangeDetectionStore:
if os.path.isfile(filepath): if os.path.isfile(filepath):
headers.update(parse_headers_from_text_file(filepath)) headers.update(parse_headers_from_text_file(filepath))
except Exception as e: except Exception as e:
print(f"ERROR reading headers.txt at {filepath}", str(e)) logger.error(f"ERROR reading headers.txt at {filepath} {str(e)}")
# In /datastore/tag-name.txt # In /datastore/tag-name.txt
tags = self.get_all_tags_for_watch(uuid=uuid) tags = self.get_all_tags_for_watch(uuid=uuid)
@@ -584,7 +552,7 @@ class ChangeDetectionStore:
if os.path.isfile(filepath): if os.path.isfile(filepath):
headers.update(parse_headers_from_text_file(filepath)) headers.update(parse_headers_from_text_file(filepath))
except Exception as e: except Exception as e:
print(f"ERROR reading headers.txt at {filepath}", str(e)) logger.error(f"ERROR reading headers.txt at {filepath} {str(e)}")
return headers return headers
@@ -602,13 +570,13 @@ class ChangeDetectionStore:
def add_tag(self, name): def add_tag(self, name):
# If name exists, return that # If name exists, return that
n = name.strip().lower() n = name.strip().lower()
print (f">>> Adding new tag - '{n}'") logger.debug(f">>> Adding new tag - '{n}'")
if not n: if not n:
return False return False
for uuid, tag in self.__data['settings']['application'].get('tags', {}).items(): for uuid, tag in self.__data['settings']['application'].get('tags', {}).items():
if n == tag.get('title', '').lower().strip(): if n == tag.get('title', '').lower().strip():
print (f">>> Tag {name} already exists") logger.warning(f"Tag '{name}' already exists, skipping creation.")
return uuid return uuid
# Eventually almost everything todo with a watch will apply as a Tag # Eventually almost everything todo with a watch will apply as a Tag
@@ -648,7 +616,10 @@ class ChangeDetectionStore:
return res return res
def tag_exists_by_name(self, tag_name): def tag_exists_by_name(self, tag_name):
return any(v.get('title', '').lower() == tag_name.lower() for k, v in self.__data['settings']['application']['tags'].items()) # Check if any tag dictionary has a 'title' attribute matching the provided tag_name
tags = self.__data['settings']['application']['tags'].values()
return next((v for v in tags if v.get('title', '').lower() == tag_name.lower()),
None)
def get_updates_available(self): def get_updates_available(self):
import inspect import inspect
@@ -670,7 +641,7 @@ class ChangeDetectionStore:
updates_available = self.get_updates_available() updates_available = self.get_updates_available()
for update_n in updates_available: for update_n in updates_available:
if update_n > self.__data['settings']['application']['schema_version']: if update_n > self.__data['settings']['application']['schema_version']:
print ("Applying update_{}".format((update_n))) logger.critical(f"Applying update_{update_n}")
# Wont exist on fresh installs # Wont exist on fresh installs
if os.path.exists(self.json_store_path): if os.path.exists(self.json_store_path):
shutil.copyfile(self.json_store_path, self.datastore_path+"/url-watches-before-{}.json".format(update_n)) shutil.copyfile(self.json_store_path, self.datastore_path+"/url-watches-before-{}.json".format(update_n))
@@ -678,8 +649,8 @@ class ChangeDetectionStore:
try: try:
update_method = getattr(self, "update_{}".format(update_n))() update_method = getattr(self, "update_{}".format(update_n))()
except Exception as e: except Exception as e:
print("Error while trying update_{}".format((update_n))) logger.error(f"Error while trying update_{update_n}")
print(e) logger.error(e)
# Don't run any more updates # Don't run any more updates
return return
else: else:
@@ -717,7 +688,7 @@ class ChangeDetectionStore:
with open(os.path.join(target_path, "history.txt"), "w") as f: with open(os.path.join(target_path, "history.txt"), "w") as f:
f.writelines(history) f.writelines(history)
else: else:
logging.warning("Datastore history directory {} does not exist, skipping history import.".format(target_path)) logger.warning(f"Datastore history directory {target_path} does not exist, skipping history import.")
# No longer needed, dynamically pulled from the disk when needed. # No longer needed, dynamically pulled from the disk when needed.
# But we should set it back to a empty dict so we don't break if this schema runs on an earlier version. # But we should set it back to a empty dict so we don't break if this schema runs on an earlier version.
@@ -860,3 +831,21 @@ class ChangeDetectionStore:
self.__data["watching"][awatch]['include_filters'][num] = 'xpath1:' + selector self.__data["watching"][awatch]['include_filters'][num] = 'xpath1:' + selector
if selector.startswith('xpath:'): if selector.startswith('xpath:'):
self.__data["watching"][awatch]['include_filters'][num] = selector.replace('xpath:', 'xpath1:', 1) self.__data["watching"][awatch]['include_filters'][num] = selector.replace('xpath:', 'xpath1:', 1)
# Use more obvious default time setting
def update_15(self):
for uuid in self.__data["watching"]:
if self.__data["watching"][uuid]['time_between_check'] == self.__data['settings']['requests']['time_between_check']:
# What the old logic was, which was pretty confusing
self.__data["watching"][uuid]['time_between_check_use_default'] = True
elif all(value is None or value == 0 for value in self.__data["watching"][uuid]['time_between_check'].values()):
self.__data["watching"][uuid]['time_between_check_use_default'] = True
else:
# Something custom here
self.__data["watching"][uuid]['time_between_check_use_default'] = False
# Correctly set datatype for older installs where 'tag' was string and update_12 did not catch it
def update_16(self):
for uuid, watch in self.data['watching'].items():
if isinstance(watch.get('tags'), str):
self.data['watching'][uuid]['tags'] = []

View File

@@ -0,0 +1,23 @@
# Because strtobool was removed in python 3.12 distutils
_MAP = {
'y': True,
'yes': True,
't': True,
'true': True,
'on': True,
'1': True,
'n': False,
'no': False,
'f': False,
'false': False,
'off': False,
'0': False
}
def strtobool(value):
try:
return _MAP[str(value).lower()]
except KeyError:
raise ValueError('"{}" is not a valid bool value'.format(value))

View File

@@ -0,0 +1,6 @@
# Important notes about templates
Template names should always end in ".html", ".htm", ".xml", ".xhtml", ".svg", even the `import`'ed templates.
Jinja2's `def select_jinja_autoescape(self, filename: str) -> bool:` will check the filename extension and enable autoescaping

View File

@@ -1,5 +1,5 @@
{% from '_helpers.jinja' import render_field %} {% from '_helpers.html' import render_field %}
{% macro render_common_settings_form(form, emailprefix, settings_application) %} {% macro render_common_settings_form(form, emailprefix, settings_application) %}
<div class="pure-control-group"> <div class="pure-control-group">
@@ -115,6 +115,12 @@
Warning: Contents of <code>{{ '{{diff}}' }}</code>, <code>{{ '{{diff_removed}}' }}</code>, and <code>{{ '{{diff_added}}' }}</code> depend on how the difference algorithm perceives the change. <br> Warning: Contents of <code>{{ '{{diff}}' }}</code>, <code>{{ '{{diff_removed}}' }}</code>, and <code>{{ '{{diff_added}}' }}</code> depend on how the difference algorithm perceives the change. <br>
For example, an addition or removal could be perceived as a change in some cases. <a target="_new" href="https://github.com/dgtlmoon/changedetection.io/wiki/Using-the-%7B%7Bdiff%7D%7D,-%7B%7Bdiff_added%7D%7D,-and-%7B%7Bdiff_removed%7D%7D-notification-tokens">More Here</a> <br> For example, an addition or removal could be perceived as a change in some cases. <a target="_new" href="https://github.com/dgtlmoon/changedetection.io/wiki/Using-the-%7B%7Bdiff%7D%7D,-%7B%7Bdiff_added%7D%7D,-and-%7B%7Bdiff_removed%7D%7D-notification-tokens">More Here</a> <br>
</p> </p>
<p>
For JSON payloads, use <strong>|tojson</strong> without quotes for automatic escaping, for example - <code>{ "name": {{ '{{ watch_title|tojson }}' }} }</code>
</p>
<p>
URL encoding, use <strong>|urlencode</strong>, for example - <code>gets://hook-website.com/test.php?title={{ '{{ watch_title|urlencode }}' }}</code>
</p>
</div> </div>
</div> </div>
<div class="pure-control-group"> <div class="pure-control-group">

View File

@@ -6,7 +6,9 @@
<meta name="viewport" content="width=device-width, initial-scale=1.0" > <meta name="viewport" content="width=device-width, initial-scale=1.0" >
<meta name="description" content="Self hosted website change detection." > <meta name="description" content="Self hosted website change detection." >
<title>Change Detection{{extra_title}}</title> <title>Change Detection{{extra_title}}</title>
<link rel="alternate" type="application/rss+xml" title="Changedetection.io » Feed{% if active_tag %}- {{active_tag}}{% endif %}" href="{{ url_for('rss', tag=active_tag , token=app_rss_token)}}" > {% if app_rss_token %}
<link rel="alternate" type="application/rss+xml" title="Changedetection.io » Feed{% if active_tag_uuid %}- {{active_tag.title}}{% endif %}" href="{{ url_for('rss', tag=active_tag_uuid , token=app_rss_token)}}" >
{% endif %}
<link rel="stylesheet" href="{{url_for('static_content', group='styles', filename='pure-min.css')}}" > <link rel="stylesheet" href="{{url_for('static_content', group='styles', filename='pure-min.css')}}" >
<link rel="stylesheet" href="{{url_for('static_content', group='styles', filename='styles.css')}}?v={{ get_css_version() }}" > <link rel="stylesheet" href="{{url_for('static_content', group='styles', filename='styles.css')}}?v={{ get_css_version() }}" >
{% if extra_stylesheets %} {% if extra_stylesheets %}
@@ -24,13 +26,11 @@
<meta name="msapplication-TileColor" content="#da532c"> <meta name="msapplication-TileColor" content="#da532c">
<meta name="msapplication-config" content="favicons/browserconfig.xml"> <meta name="msapplication-config" content="favicons/browserconfig.xml">
<meta name="theme-color" content="#ffffff"> <meta name="theme-color" content="#ffffff">
<script>
<style> const csrftoken="{{ csrf_token() }}";
body::before { </script>
background-image: url({{url_for('static_content', group='images', filename='gradient-border.png') }});
}
</style>
<script src="{{url_for('static_content', group='js', filename='jquery-3.6.0.min.js')}}"></script> <script src="{{url_for('static_content', group='js', filename='jquery-3.6.0.min.js')}}"></script>
<script src="{{url_for('static_content', group='js', filename='csrf.js')}}" defer></script>
</head> </head>
<body> <body>
@@ -89,8 +89,8 @@
<li class="pure-menu-item pure-form" id="search-menu-item"> <li class="pure-menu-item pure-form" id="search-menu-item">
<!-- We use GET here so it offers people a chance to set bookmarks etc --> <!-- We use GET here so it offers people a chance to set bookmarks etc -->
<form name="searchForm" action="" method="GET"> <form name="searchForm" action="" method="GET">
<input id="search-q" class="" name="q" placeholder="URL or Title {% if active_tag %}in '{{ active_tag }}'{% endif %}" required="" type="text" value=""> <input id="search-q" class="" name="q" placeholder="URL or Title {% if active_tag_uuid %}in '{{ active_tag.title }}'{% endif %}" required="" type="text" value="">
<input name="tags" type="hidden" value="{% if active_tag %}{{active_tag}}{% endif %}"> <input name="tags" type="hidden" value="{% if active_tag_uuid %}{{active_tag_uuid}}{% endif %}">
<button class="toggle-button " id="toggle-search" type="button" title="Search, or Use Alt+S Key" > <button class="toggle-button " id="toggle-search" type="button" title="Search, or Use Alt+S Key" >
{% include "svgs/search-icon.svg" %} {% include "svgs/search-icon.svg" %}
</button> </button>
@@ -147,7 +147,19 @@
<section class="content"> <section class="content">
<div id="overlay"> <div id="overlay">
<div class="content"> <div class="content">
<strong>changedetection.io needs your support!</strong><br> <h4>Try our Chrome extension</h4>
<p>
<a id="chrome-extension-link"
title="Try our new Chrome Extension!"
href="https://chromewebstore.google.com/detail/changedetectionio-website/kefcfmgmlhmankjmnbijimhofdjekbop">
<img src="{{url_for('static_content', group='images', filename='Google-Chrome-icon.png')}}">
Chrome Webstore
</a>
</p>
Easily add the current web-page from your browser directly into your changedetection.io tool, more great features coming soon!
<h4>Changedetection.io needs your support!</h4>
<p> <p>
You can help us by supporting changedetection.io on these platforms; You can help us by supporting changedetection.io on these platforms;
</p> </p>

View File

@@ -1,5 +1,5 @@
{% extends 'base.html' %} {% extends 'base.html' %}
{% from '_helpers.jinja' import render_field, render_checkbox_field, render_button %} {% from '_helpers.html' import render_field, render_checkbox_field, render_button %}
{% block content %} {% block content %}
<script> <script>
const screenshot_url="{{url_for('static_content', group='screenshot', filename=uuid)}}"; const screenshot_url="{{url_for('static_content', group='screenshot', filename=uuid)}}";
@@ -13,7 +13,7 @@
<script src="{{url_for('static_content', group='js', filename='diff-overview.js')}}" defer></script> <script src="{{url_for('static_content', group='js', filename='diff-overview.js')}}" defer></script>
<div id="settings"> <div id="settings">
<form class="pure-form " action="" method="GET"> <form class="pure-form " action="" method="GET" id="diff-form">
<fieldset> <fieldset>
{% if versions|length >= 1 %} {% if versions|length >= 1 %}
<strong>Compare</strong> <strong>Compare</strong>

View File

@@ -1,20 +1,21 @@
{% extends 'base.html' %} {% extends 'base.html' %}
{% block content %} {% block content %}
{% from '_helpers.jinja' import render_field, render_checkbox_field, render_button %} {% from '_helpers.html' import render_field, render_checkbox_field, render_button %}
{% from '_common_fields.jinja' import render_common_settings_form %} {% from '_common_fields.html' import render_common_settings_form %}
<script src="{{url_for('static_content', group='js', filename='tabs.js')}}" defer></script> <script src="{{url_for('static_content', group='js', filename='tabs.js')}}" defer></script>
<script src="{{url_for('static_content', group='js', filename='vis.js')}}" defer></script> <script src="{{url_for('static_content', group='js', filename='vis.js')}}" defer></script>
<script> <script>
const browser_steps_available_screenshots=JSON.parse('{{ watch.get_browsersteps_available_screenshots|tojson }}'); const browser_steps_available_screenshots=JSON.parse('{{ watch.get_browsersteps_available_screenshots|tojson }}');
const browser_steps_config=JSON.parse('{{ browser_steps_config|tojson }}'); const browser_steps_config=JSON.parse('{{ browser_steps_config|tojson }}');
const browser_steps_fetch_screenshot_image_url="{{url_for('browser_steps.browser_steps_fetch_screenshot_image', uuid=uuid)}}"; <!-- Should be _external so that firefox and others load it more reliably -->
const browser_steps_fetch_screenshot_image_url="{{url_for('browser_steps.browser_steps_fetch_screenshot_image', uuid=uuid, _external=True)}}";
const browser_steps_last_error_step={{ watch.browser_steps_last_error_step|tojson }}; const browser_steps_last_error_step={{ watch.browser_steps_last_error_step|tojson }};
const browser_steps_start_url="{{url_for('browser_steps.browsersteps_start_session', uuid=uuid)}}"; const browser_steps_start_url="{{url_for('browser_steps.browsersteps_start_session', uuid=uuid)}}";
const browser_steps_sync_url="{{url_for('browser_steps.browsersteps_ui_update', uuid=uuid)}}"; const browser_steps_sync_url="{{url_for('browser_steps.browsersteps_ui_update', uuid=uuid)}}";
{% if emailprefix %} {% if emailprefix %}
const email_notification_prefix=JSON.parse('{{ emailprefix|tojson }}'); const email_notification_prefix=JSON.parse('{{ emailprefix|tojson }}');
{% endif %} {% endif %}
const notification_base_url="{{url_for('ajax_callback_send_notification_test')}}"; const notification_base_url="{{url_for('ajax_callback_send_notification_test', watch_uuid=uuid)}}";
const playwright_enabled={% if playwright_enabled %} true {% else %} false {% endif %}; const playwright_enabled={% if playwright_enabled %} true {% else %} false {% endif %};
const recheck_proxy_start_url="{{url_for('check_proxies.start_check', uuid=uuid)}}"; const recheck_proxy_start_url="{{url_for('check_proxies.start_check', uuid=uuid)}}";
const proxy_recheck_status_url="{{url_for('check_proxies.get_recheck_status', uuid=uuid)}}"; const proxy_recheck_status_url="{{url_for('check_proxies.get_recheck_status', uuid=uuid)}}";
@@ -31,6 +32,7 @@
<script src="{{url_for('static_content', group='js', filename='browser-steps.js')}}" defer></script> <script src="{{url_for('static_content', group='js', filename='browser-steps.js')}}" defer></script>
{% endif %} {% endif %}
{% set has_tag_filters_extra="WARNING: Watch has tag/groups set with special filters\n" if has_special_tag_options else '' %}
<script src="{{url_for('static_content', group='js', filename='recheck-proxy.js')}}" defer></script> <script src="{{url_for('static_content', group='js', filename='recheck-proxy.js')}}" defer></script>
<div class="edit-form monospaced-textarea"> <div class="edit-form monospaced-textarea">
@@ -85,15 +87,9 @@
{{ render_field(form.tags) }} {{ render_field(form.tags) }}
<span class="pure-form-message-inline">Organisational tag/group name used in the main listing page</span> <span class="pure-form-message-inline">Organisational tag/group name used in the main listing page</span>
</div> </div>
<div class="pure-control-group"> <div class="pure-control-group time-between-check border-fieldset">
{{ render_field(form.time_between_check, class="time-check-widget") }} {{ render_field(form.time_between_check, class="time-check-widget") }}
{% if has_empty_checktime %} {{ render_checkbox_field(form.time_between_check_use_default, class="use-default-timecheck") }}
<span class="pure-form-message-inline">Currently using the <a
href="{{ url_for('settings_page', uuid=uuid) }}">default global settings</a>, change to another value if you want to be specific.</span>
{% else %}
<span class="pure-form-message-inline">Set to blank to use the <a
href="{{ url_for('settings_page', uuid=uuid) }}">default global settings</a>.</span>
{% endif %}
</div> </div>
<div class="pure-control-group"> <div class="pure-control-group">
{{ render_checkbox_field(form.extract_title_as_title) }} {{ render_checkbox_field(form.extract_title_as_title) }}
@@ -228,7 +224,7 @@ User-Agent: wonderbra 1.0") }}
</div> </div>
</div> </div>
<div id="browser-steps-fieldlist" style="padding-left: 1em; width: 350px; font-size: 80%;" > <div id="browser-steps-fieldlist" style="padding-left: 1em; width: 350px; font-size: 80%;" >
<span id="browserless-seconds-remaining">Loading</span> <span style="font-size: 80%;"> (<a target=_new href="https://github.com/dgtlmoon/changedetection.io/pull/478/files#diff-1a79d924d1840c485238e66772391268a89c95b781d69091384cf1ea1ac146c9R4">?</a>) </span> <span id="browser-seconds-remaining">Loading</span> <span style="font-size: 80%;"> (<a target=_new href="https://github.com/dgtlmoon/changedetection.io/pull/478/files#diff-1a79d924d1840c485238e66772391268a89c95b781d69091384cf1ea1ac146c9R4">?</a>) </span>
{{ render_field(form.browser_steps) }} {{ render_field(form.browser_steps) }}
</div> </div>
</div> </div>
@@ -280,7 +276,7 @@ User-Agent: wonderbra 1.0") }}
<div class="pure-control-group"> <div class="pure-control-group">
{% set field = render_field(form.include_filters, {% set field = render_field(form.include_filters,
rows=5, rows=5,
placeholder="#example placeholder=has_tag_filters_extra+"#example
xpath://body/div/span[contains(@class, 'example-class')]", xpath://body/div/span[contains(@class, 'example-class')]",
class="m-d") class="m-d")
%} %}
@@ -296,7 +292,7 @@ xpath://body/div/span[contains(@class, 'example-class')]",
<ul> <ul>
<li>JSONPath: Prefix with <code>json:</code>, use <code>json:$</code> to force re-formatting if required, <a href="https://jsonpath.com/" target="new">test your JSONPath here</a>.</li> <li>JSONPath: Prefix with <code>json:</code>, use <code>json:$</code> to force re-formatting if required, <a href="https://jsonpath.com/" target="new">test your JSONPath here</a>.</li>
{% if jq_support %} {% if jq_support %}
<li>jq: Prefix with <code>jq:</code> and <a href="https://jqplay.org/" target="new">test your jq here</a>. Using <a href="https://stedolan.github.io/jq/" target="new">jq</a> allows for complex filtering and processing of JSON data with built-in functions, regex, filtering, and more. See examples and documentation <a href="https://stedolan.github.io/jq/manual/" target="new">here</a>.</li> <li>jq: Prefix with <code>jq:</code> and <a href="https://jqplay.org/" target="new">test your jq here</a>. Using <a href="https://stedolan.github.io/jq/" target="new">jq</a> allows for complex filtering and processing of JSON data with built-in functions, regex, filtering, and more. See examples and documentation <a href="https://stedolan.github.io/jq/manual/" target="new">here</a>. Prefix <code>jqraw:</code> outputs the results as text instead of a JSON list.</li>
{% else %} {% else %}
<li>jq support not installed</li> <li>jq support not installed</li>
{% endif %} {% endif %}
@@ -316,18 +312,19 @@ xpath://body/div/span[contains(@class, 'example-class')]",
</span> </span>
</div> </div>
<fieldset class="pure-control-group"> <fieldset class="pure-control-group">
{{ render_field(form.subtractive_selectors, rows=5, placeholder="header {{ render_field(form.subtractive_selectors, rows=5, placeholder=has_tag_filters_extra+"header
footer footer
nav nav
.stockticker") }} .stockticker") }}
<span class="pure-form-message-inline"> <span class="pure-form-message-inline">
<ul> <ul>
<li> Remove HTML element(s) by CSS selector before text conversion. </li> <li> Remove HTML element(s) by CSS selector before text conversion. </li>
<li> Don't paste HTML here, use only CSS selectors </li>
<li> Add multiple elements or CSS selectors per line to ignore multiple parts of the HTML. </li> <li> Add multiple elements or CSS selectors per line to ignore multiple parts of the HTML. </li>
</ul> </ul>
</span> </span>
</fieldset> </fieldset>
<div class="text-filtering"> <div class="text-filtering border-fieldset">
<fieldset class="pure-group" id="text-filtering-type-options"> <fieldset class="pure-group" id="text-filtering-type-options">
<h3>Text filtering</h3> <h3>Text filtering</h3>
Limit trigger/ignore/block/extract to;<br> Limit trigger/ignore/block/extract to;<br>
@@ -339,6 +336,10 @@ nav
<span class="pure-form-message-inline">When content is merely moved in a list, it will also trigger an <strong>addition</strong>, consider enabling <code><strong>Only trigger when unique lines appear</strong></code></span> <span class="pure-form-message-inline">When content is merely moved in a list, it will also trigger an <strong>addition</strong>, consider enabling <code><strong>Only trigger when unique lines appear</strong></code></span>
</fieldset> </fieldset>
<fieldset class="pure-control-group">
{{ render_checkbox_field(form.sort_text_alphabetically) }}
<span class="pure-form-message-inline">Helps reduce changes detected caused by sites shuffling lines around, combine with <i>check unique lines</i> below.</span>
</fieldset>
<fieldset class="pure-control-group"> <fieldset class="pure-control-group">
{{ render_checkbox_field(form.check_unique_lines) }} {{ render_checkbox_field(form.check_unique_lines) }}
<span class="pure-form-message-inline">Good for websites that just move the content around, and you want to know when NEW content is added, compares new lines against all history for this watch.</span> <span class="pure-form-message-inline">Good for websites that just move the content around, and you want to know when NEW content is added, compares new lines against all history for this watch.</span>
@@ -401,6 +402,7 @@ Unavailable") }}
<li>Use <code>//(?aiLmsux))</code> type flags (more <a href="https://docs.python.org/3/library/re.html#index-15">information here</a>)<br></li> <li>Use <code>//(?aiLmsux))</code> type flags (more <a href="https://docs.python.org/3/library/re.html#index-15">information here</a>)<br></li>
<li>Keyword example &dash; example <code>Out of stock</code></li> <li>Keyword example &dash; example <code>Out of stock</code></li>
<li>Use groups to extract just that text &dash; example <code>/reports.+?(\d+)/i</code> returns a list of years only</li> <li>Use groups to extract just that text &dash; example <code>/reports.+?(\d+)/i</code> returns a list of years only</li>
<li>Example - match lines containing a keyword <code>/.*icecream.*/</code></li>
</ul> </ul>
</li> </li>
<li>One line per regular-expression/string match</li> <li>One line per regular-expression/string match</li>
@@ -430,8 +432,8 @@ Unavailable") }}
<fieldset> <fieldset>
<div class="pure-control-group"> <div class="pure-control-group">
{% if visualselector_enabled %} {% if visualselector_enabled %}
<span class="pure-form-message-inline"> <span class="pure-form-message-inline" id="visual-selector-heading">
The Visual Selector tool lets you select the <i>text</i> elements that will be used for the change detection &dash; after the <i>Browser Steps</i> has completed.<br><br> The Visual Selector tool lets you select the <i>text</i> elements that will be used for the change detection. It automatically fills-in the filters in the "CSS/JSONPath/JQ/XPath Filters" box of the <a href="#filters-and-triggers">Filters & Triggers</a> tab. Use <strong>Shift+Click</strong> to select multiple items.
</span> </span>
<div id="selector-header"> <div id="selector-header">
@@ -479,9 +481,17 @@ Unavailable") }}
<td>{{ "{:,}".format(watch.history|length) }}</td> <td>{{ "{:,}".format(watch.history|length) }}</td>
</tr> </tr>
<tr> <tr>
<td>Last fetch time</td> <td>Last fetch duration</td>
<td>{{ watch.fetch_time }}s</td> <td>{{ watch.fetch_time }}s</td>
</tr> </tr>
<tr>
<td>Notification alert count</td>
<td>{{ watch.notification_alert_count }}</td>
</tr>
<tr>
<td>Server type reply</td>
<td>{{ watch.get('remote_server_reply') }}</td>
</tr>
</tbody> </tbody>
</table> </table>
</div> </div>

View File

@@ -1,6 +1,6 @@
{% extends 'base.html' %} {% extends 'base.html' %}
{% block content %} {% block content %}
{% from '_helpers.jinja' import render_field %} {% from '_helpers.html' import render_field %}
<script src="{{url_for('static_content', group='js', filename='tabs.js')}}" defer></script> <script src="{{url_for('static_content', group='js', filename='tabs.js')}}" defer></script>
<div class="edit-form monospaced-textarea"> <div class="edit-form monospaced-textarea">
@@ -107,7 +107,7 @@
<option value="" style="color: #aaa"> -- none --</option> <option value="" style="color: #aaa"> -- none --</option>
<option value="url">URL</option> <option value="url">URL</option>
<option value="title">Title</option> <option value="title">Title</option>
<option value="include_filter">CSS/xPath filter</option> <option value="include_filters">CSS/xPath filter</option>
<option value="tag">Group / Tag name(s)</option> <option value="tag">Group / Tag name(s)</option>
<option value="interval_minutes">Recheck time (minutes)</option> <option value="interval_minutes">Recheck time (minutes)</option>
</select></td> </select></td>

View File

@@ -1,72 +1,103 @@
{% extends 'base.html' %} {% extends 'base.html' %}
{% block content %} {% block content %}
<script> <script>
const screenshot_url="{{url_for('static_content', group='screenshot', filename=uuid)}}"; const screenshot_url = "{{url_for('static_content', group='screenshot', filename=uuid)}}";
{% if last_error_screenshot %} {% if last_error_screenshot %}
const error_screenshot_url="{{url_for('static_content', group='screenshot', filename=uuid, error_screenshot=1) }}"; const error_screenshot_url = "{{url_for('static_content', group='screenshot', filename=uuid, error_screenshot=1) }}";
{% endif %}
const highlight_submit_ignore_url="{{url_for('highlight_submit_ignore_url', uuid=uuid)}}";
</script>
<script src="{{url_for('static_content', group='js', filename='diff-overview.js')}}" defer></script>
<script src="{{url_for('static_content', group='js', filename='tabs.js')}}" defer></script>
<div class="tabs">
<ul>
{% if last_error_text %}<li class="tab" id="error-text-tab"><a href="#error-text">Error Text</a></li> {% endif %}
{% if last_error_screenshot %}<li class="tab" id="error-screenshot-tab"><a href="#error-screenshot">Error Screenshot</a></li> {% endif %}
{% if history_n > 0 %}
<li class="tab" id="text-tab"><a href="#text">Text</a></li>
<li class="tab" id="screenshot-tab"><a href="#screenshot">Screenshot</a></li>
{% endif %} {% endif %}
</ul> const highlight_submit_ignore_url = "{{url_for('highlight_submit_ignore_url', uuid=uuid)}}";
</div> </script>
<form><input type="hidden" name="csrf_token" value="{{ csrf_token() }}"></form> <script src="{{ url_for('static_content', group='js', filename='diff-overview.js') }}" defer></script>
<div id="diff-ui"> <script src="{{ url_for('static_content', group='js', filename='preview.js') }}" defer></script>
<div class="tab-pane-inner" id="error-text"> <script src="{{ url_for('static_content', group='js', filename='tabs.js') }}" defer></script>
<div class="snapshot-age error">{{watch.error_text_ctime|format_seconds_ago}} seconds ago</div> {% if versions|length >= 2 %}
<pre> <div id="settings" style="text-align: center;">
<form class="pure-form " action="" method="POST">
<fieldset>
<label for="preview-version">Select timestamp</label> <select id="preview-version"
name="from_version"
class="needs-localtime">
{% for version in versions|reverse %}
<option value="{{ version }}" {% if version == current_version %} selected="" {% endif %}>
{{ version }}
</option>
{% endfor %}
</select>
<button type="submit" class="pure-button pure-button-primary">Go</button>
</fieldset>
</form>
<br>
<strong>Keyboard: </strong><a href="" class="pure-button pure-button-primary" id="btn-previous">
&larr; Previous</a> &nbsp; <a class="pure-button pure-button-primary" id="btn-next" href="">
&rarr; Next</a>
</div>
{% endif %}
<div class="tabs">
<ul>
{% if last_error_text %}
<li class="tab" id="error-text-tab"><a href="#error-text">Error Text</a></li> {% endif %}
{% if last_error_screenshot %}
<li class="tab" id="error-screenshot-tab"><a href="#error-screenshot">Error Screenshot</a>
</li> {% endif %}
{% if history_n > 0 %}
<li class="tab" id="text-tab"><a href="#text">Text</a></li>
<li class="tab" id="screenshot-tab"><a href="#screenshot">Screenshot</a></li>
{% endif %}
</ul>
</div>
<div id="diff-ui">
<div class="tab-pane-inner" id="error-text">
<div class="snapshot-age error">{{ watch.error_text_ctime|format_seconds_ago }} seconds ago</div>
<pre>
{{ last_error_text }} {{ last_error_text }}
</pre> </pre>
</div>
<div class="tab-pane-inner" id="error-screenshot">
<div class="snapshot-age error">{{ watch.snapshot_error_screenshot_ctime|format_seconds_ago }} seconds ago
</div>
<img id="error-screenshot-img" style="max-width: 80%"
alt="Current erroring screenshot from most recent request">
</div>
<div class="tab-pane-inner" id="text">
<div class="snapshot-age">{{ current_version|format_timestamp_timeago }}</div>
<span class="ignored">Grey lines are ignored</span> <span class="triggered">Blue lines are triggers</span>
<span class="tip"><strong>Pro-tip</strong>: Highlight text to add to ignore filters</span>
<table>
<tbody>
<tr>
<td id="diff-col" class="highlightable-filter">
{% for row in content %}
<div class="{{ row.classes }}">{{ row.line }}</div>
{% endfor %}
</td>
</tr>
</tbody>
</table>
</div>
<div class="tab-pane-inner" id="screenshot">
<div class="tip">
For now, Differences are performed on text, not graphically, only the latest screenshot is available.
</div>
<br>
{% if is_html_webdriver %}
{% if screenshot %}
<div class="snapshot-age">{{ watch.snapshot_screenshot_ctime|format_timestamp_timeago }}</div>
<img style="max-width: 80%" id="screenshot-img" alt="Current screenshot from most recent request">
{% else %}
No screenshot available just yet! Try rechecking the page.
{% endif %}
{% else %}
<strong>Screenshot requires Playwright/WebDriver enabled</strong>
{% endif %}
</div>
</div> </div>
<div class="tab-pane-inner" id="error-screenshot">
<div class="snapshot-age error">{{watch.snapshot_error_screenshot_ctime|format_seconds_ago}} seconds ago</div>
<img id="error-screenshot-img" style="max-width: 80%" alt="Current erroring screenshot from most recent request" >
</div>
<div class="tab-pane-inner" id="text">
<div class="snapshot-age">{{watch.snapshot_text_ctime|format_timestamp_timeago}}</div>
<span class="ignored">Grey lines are ignored</span> <span class="triggered">Blue lines are triggers</span> <span class="tip"><strong>Pro-tip</strong>: Highlight text to add to ignore filters</span>
<table>
<tbody>
<tr>
<td id="diff-col" class="highlightable-filter">
{% for row in content %}
<div class="{{row.classes}}">{{row.line}}</div>
{% endfor %}
</td>
</tr>
</tbody>
</table>
</div>
<div class="tab-pane-inner" id="screenshot">
<div class="tip">
For now, Differences are performed on text, not graphically, only the latest screenshot is available.
</div>
<br>
{% if is_html_webdriver %}
{% if screenshot %}
<div class="snapshot-age">{{watch.snapshot_screenshot_ctime|format_timestamp_timeago}}</div>
<img style="max-width: 80%" id="screenshot-img" alt="Current screenshot from most recent request" >
{% else %}
No screenshot available just yet! Try rechecking the page.
{% endif %}
{% else %}
<strong>Screenshot requires Playwright/WebDriver enabled</strong>
{% endif %}
</div>
</div>
{% endblock %} {% endblock %}

View File

@@ -1,10 +1,10 @@
{% extends 'base.html' %} {% extends 'base.html' %}
{% block content %} {% block content %}
{% from '_helpers.jinja' import render_field, render_checkbox_field, render_button %} {% from '_helpers.html' import render_field, render_checkbox_field, render_button %}
{% from '_common_fields.jinja' import render_common_settings_form %} {% from '_common_fields.html' import render_common_settings_form %}
<script> <script>
const notification_base_url="{{url_for('ajax_callback_send_notification_test')}}"; const notification_base_url="{{url_for('ajax_callback_send_notification_test', mode="global-settings")}}";
{% if emailprefix %} {% if emailprefix %}
const email_notification_prefix=JSON.parse('{{emailprefix|tojson}}'); const email_notification_prefix=JSON.parse('{{emailprefix|tojson}}');
{% endif %} {% endif %}
@@ -31,7 +31,7 @@
<fieldset> <fieldset>
<div class="pure-control-group"> <div class="pure-control-group">
{{ render_field(form.requests.form.time_between_check, class="time-check-widget") }} {{ render_field(form.requests.form.time_between_check, class="time-check-widget") }}
<span class="pure-form-message-inline">Default time for all watches, when the watch does not have a specific time setting.</span> <span class="pure-form-message-inline">Default recheck time for all watches, current system minimum is <i>{{min_system_recheck_seconds}}</i> seconds (<a href="https://github.com/dgtlmoon/changedetection.io/wiki/Misc-system-settings#enviroment-variables">more info</a>).</span>
</div> </div>
<div class="pure-control-group"> <div class="pure-control-group">
{{ render_field(form.requests.form.jitter_seconds, class="jitter_seconds") }} {{ render_field(form.requests.form.jitter_seconds, class="jitter_seconds") }}
@@ -62,6 +62,9 @@
<span class="pure-form-message-inline">Allow access to view watch diff page when password is enabled (Good for sharing the diff page) <span class="pure-form-message-inline">Allow access to view watch diff page when password is enabled (Good for sharing the diff page)
</span> </span>
</div> </div>
<div class="pure-control-group">
{{ render_checkbox_field(form.application.form.rss_hide_muted_watches) }}
</div>
<div class="pure-control-group"> <div class="pure-control-group">
{{ render_field(form.application.form.pager_size) }} {{ render_field(form.application.form.pager_size) }}
<span class="pure-form-message-inline">Number of items per page in the watch overview list, 0 to disable.</span> <span class="pure-form-message-inline">Number of items per page in the watch overview list, 0 to disable.</span>
@@ -108,8 +111,6 @@
<p>Use the <strong>Basic</strong> method (default) where your watched sites don't need Javascript to render.</p> <p>Use the <strong>Basic</strong> method (default) where your watched sites don't need Javascript to render.</p>
<p>The <strong>Chrome/Javascript</strong> method requires a network connection to a running WebDriver+Chrome server, set by the ENV var 'WEBDRIVER_URL'. </p> <p>The <strong>Chrome/Javascript</strong> method requires a network connection to a running WebDriver+Chrome server, set by the ENV var 'WEBDRIVER_URL'. </p>
</span> </span>
<br>
Tip: <a href="https://github.com/dgtlmoon/changedetection.io/wiki/Proxy-configuration#brightdata-proxy-support">Connect using Bright Data and Oxylabs Proxies, find out more here.</a>
</div> </div>
<fieldset class="pure-group" id="webdriver-override-options" data-visible-for="application-fetch_backend=html_webdriver"> <fieldset class="pure-group" id="webdriver-override-options" data-visible-for="application-fetch_backend=html_webdriver">
<div class="pure-form-message-inline"> <div class="pure-form-message-inline">
@@ -121,6 +122,18 @@
{{ render_field(form.application.form.webdriver_delay) }} {{ render_field(form.application.form.webdriver_delay) }}
</div> </div>
</fieldset> </fieldset>
<div class="pure-control-group inline-radio">
{{ render_field(form.requests.form.default_ua) }}
<span class="pure-form-message-inline">
Applied to all requests.<br><br>
Note: Simply changing the User-Agent often does not defeat anti-robot technologies, it's important to consider <a href="https://changedetection.io/tutorial/what-are-main-types-anti-robot-mechanisms">all of the ways that the browser is detected</a>.
</span>
</div>
<div class="pure-control-group">
<br>
Tip: <a href="https://github.com/dgtlmoon/changedetection.io/wiki/Proxy-configuration#brightdata-proxy-support">Connect using Bright Data and Oxylabs Proxies, find out more here.</a>
</div>
</div> </div>
<div class="tab-pane-inner" id="filters"> <div class="tab-pane-inner" id="filters">
@@ -168,12 +181,12 @@ nav
</div> </div>
<div class="tab-pane-inner" id="api"> <div class="tab-pane-inner" id="api">
<h4>API Access</h4>
<p>Drive your changedetection.io via API, More about <a href="https://github.com/dgtlmoon/changedetection.io/wiki/API-Reference">API access here</a></p> <p>Drive your changedetection.io via API, More about <a href="https://github.com/dgtlmoon/changedetection.io/wiki/API-Reference">API access here</a></p>
<div class="pure-control-group"> <div class="pure-control-group">
{{ render_checkbox_field(form.application.form.api_access_token_enabled) }} {{ render_checkbox_field(form.application.form.api_access_token_enabled) }}
<div class="pure-form-message-inline">Restrict API access limit by using <code>x-api-key</code> header</div><br> <div class="pure-form-message-inline">Restrict API access limit by using <code>x-api-key</code> header - required for the Chrome Extension to work</div><br>
<div class="pure-form-message-inline"><br>API Key <span id="api-key">{{api_key}}</span> <div class="pure-form-message-inline"><br>API Key <span id="api-key">{{api_key}}</span>
<span style="display:none;" id="api-key-copy" >copy</span> <span style="display:none;" id="api-key-copy" >copy</span>
</div> </div>
@@ -181,6 +194,20 @@ nav
<div class="pure-control-group"> <div class="pure-control-group">
<a href="{{url_for('settings_reset_api_key')}}" class="pure-button button-small button-cancel">Regenerate API key</a> <a href="{{url_for('settings_reset_api_key')}}" class="pure-button button-small button-cancel">Regenerate API key</a>
</div> </div>
<div class="pure-control-group">
<h4>Chrome Extension</h4>
<p>Easily add any web-page to your changedetection.io installation from within Chrome.</p>
<strong>Step 1</strong> Install the extension, <strong>Step 2</strong> Navigate to this page,
<strong>Step 3</strong> Open the extension from the toolbar and click "<i>Sync API Access</i>"
<p>
<a id="chrome-extension-link"
title="Try our new Chrome Extension!"
href="https://chromewebstore.google.com/detail/changedetectionio-website/kefcfmgmlhmankjmnbijimhofdjekbop">
<img src="{{ url_for('static_content', group='images', filename='Google-Chrome-icon.png') }}" alt="Chrome">
Chrome Webstore
</a>
</p>
</div>
</div> </div>
<div class="tab-pane-inner" id="proxies"> <div class="tab-pane-inner" id="proxies">
<div id="recommended-proxy"> <div id="recommended-proxy">

View File

@@ -1,6 +1,6 @@
{% extends 'base.html' %} {% extends 'base.html' %}
{% block content %} {% block content %}
{% from '_helpers.jinja' import render_simple_field, render_field, render_nolabel_field %} {% from '_helpers.html' import render_simple_field, render_field, render_nolabel_field, sort_by_title %}
<script src="{{url_for('static_content', group='js', filename='jquery-3.6.0.min.js')}}"></script> <script src="{{url_for('static_content', group='js', filename='jquery-3.6.0.min.js')}}"></script>
<script src="{{url_for('static_content', group='js', filename='watch-overview.js')}}" defer></script> <script src="{{url_for('static_content', group='js', filename='watch-overview.js')}}" defer></script>
@@ -13,7 +13,7 @@
<div id="watch-add-wrapper-zone"> <div id="watch-add-wrapper-zone">
{{ render_nolabel_field(form.url, placeholder="https://...", required=true) }} {{ render_nolabel_field(form.url, placeholder="https://...", required=true) }}
{{ render_nolabel_field(form.tags, value=tags[active_tag].title if active_tag else '', placeholder="watch label / tag") }} {{ render_nolabel_field(form.tags, value=active_tag.title if active_tag_uuid else '', placeholder="watch label / tag") }}
{{ render_nolabel_field(form.watch_submit_button, title="Watch this URL!" ) }} {{ render_nolabel_field(form.watch_submit_button, title="Watch this URL!" ) }}
{{ render_nolabel_field(form.edit_and_watch_submit_button, title="Edit first then Watch") }} {{ render_nolabel_field(form.edit_and_watch_submit_button, title="Edit first then Watch") }}
</div> </div>
@@ -37,6 +37,7 @@
<button class="pure-button button-secondary button-xsmall" name="op" value="assign-tag" id="checkbox-assign-tag">Tag</button> <button class="pure-button button-secondary button-xsmall" name="op" value="assign-tag" id="checkbox-assign-tag">Tag</button>
<button class="pure-button button-secondary button-xsmall" name="op" value="mark-viewed">Mark viewed</button> <button class="pure-button button-secondary button-xsmall" name="op" value="mark-viewed">Mark viewed</button>
<button class="pure-button button-secondary button-xsmall" name="op" value="notification-default">Use default notification</button> <button class="pure-button button-secondary button-xsmall" name="op" value="notification-default">Use default notification</button>
<button class="pure-button button-secondary button-xsmall" name="op" value="clear-errors">Clear errors</button>
<button class="pure-button button-secondary button-xsmall" style="background: #dd4242;" name="op" value="clear-history">Clear/reset history</button> <button class="pure-button button-secondary button-xsmall" style="background: #dd4242;" name="op" value="clear-history">Clear/reset history</button>
<button class="pure-button button-secondary button-xsmall" style="background: #dd4242;" name="op" value="delete">Delete</button> <button class="pure-button button-secondary button-xsmall" style="background: #dd4242;" name="op" value="delete">Delete</button>
</div> </div>
@@ -45,12 +46,14 @@
{% endif %} {% endif %}
{% if search_q %}<div id="search-result-info">Searching "<strong><i>{{search_q}}</i></strong>"</div>{% endif %} {% if search_q %}<div id="search-result-info">Searching "<strong><i>{{search_q}}</i></strong>"</div>{% endif %}
<div> <div>
<a href="{{url_for('index')}}" class="pure-button button-tag {{'active' if not active_tag }}">All</a> <a href="{{url_for('index')}}" class="pure-button button-tag {{'active' if not active_tag_uuid }}">All</a>
{% for uuid, tag in tags.items() %}
{% if tag != "" %} <!-- tag list -->
<a href="{{url_for('index', tag=uuid) }}" class="pure-button button-tag {{'active' if active_tag == uuid }}">{{ tag.title }}</a> {% for uuid, tag in tags %}
{% endif %} {% if tag != "" %}
{% endfor %} <a href="{{url_for('index', tag=uuid) }}" class="pure-button button-tag {{'active' if active_tag_uuid == uuid }}">{{ tag.title }}</a>
{% endif %}
{% endfor %}
</div> </div>
{% set sort_order = sort_order or 'asc' %} {% set sort_order = sort_order or 'asc' %}
@@ -64,18 +67,18 @@
<tr> <tr>
{% set link_order = "desc" if sort_order == 'asc' else "asc" %} {% set link_order = "desc" if sort_order == 'asc' else "asc" %}
{% set arrow_span = "" %} {% set arrow_span = "" %}
<th><input style="vertical-align: middle" type="checkbox" id="check-all" > <a class="{{ 'active '+link_order if sort_attribute == 'date_created' else 'inactive' }}" href="{{url_for('index', sort='date_created', order=link_order, tag=active_tag)}}"># <span class='arrow {{link_order}}'></span></a></th> <th><input style="vertical-align: middle" type="checkbox" id="check-all" > <a class="{{ 'active '+link_order if sort_attribute == 'date_created' else 'inactive' }}" href="{{url_for('index', sort='date_created', order=link_order, tag=active_tag_uuid)}}"># <span class='arrow {{link_order}}'></span></a></th>
<th></th> <th class="empty-cell"></th>
<th><a class="{{ 'active '+link_order if sort_attribute == 'label' else 'inactive' }}" href="{{url_for('index', sort='label', order=link_order, tag=active_tag)}}">Website <span class='arrow {{link_order}}'></span></a></th> <th><a class="{{ 'active '+link_order if sort_attribute == 'label' else 'inactive' }}" href="{{url_for('index', sort='label', order=link_order, tag=active_tag_uuid)}}">Website <span class='arrow {{link_order}}'></span></a></th>
<th><a class="{{ 'active '+link_order if sort_attribute == 'last_checked' else 'inactive' }}" href="{{url_for('index', sort='last_checked', order=link_order, tag=active_tag)}}">Last Checked <span class='arrow {{link_order}}'></span></a></th> <th><a class="{{ 'active '+link_order if sort_attribute == 'last_checked' else 'inactive' }}" href="{{url_for('index', sort='last_checked', order=link_order, tag=active_tag_uuid)}}">Last Checked <span class='arrow {{link_order}}'></span></a></th>
<th><a class="{{ 'active '+link_order if sort_attribute == 'last_changed' else 'inactive' }}" href="{{url_for('index', sort='last_changed', order=link_order, tag=active_tag)}}">Last Changed <span class='arrow {{link_order}}'></span></a></th> <th><a class="{{ 'active '+link_order if sort_attribute == 'last_changed' else 'inactive' }}" href="{{url_for('index', sort='last_changed', order=link_order, tag=active_tag_uuid)}}">Last Changed <span class='arrow {{link_order}}'></span></a></th>
<th></th> <th class="empty-cell"></th>
</tr> </tr>
</thead> </thead>
<tbody> <tbody>
{% if not watches|length %} {% if not watches|length %}
<tr> <tr>
<td colspan="6">No website watches configured, please add a URL in the box above, or <a href="{{ url_for('import_page')}}" >import a list</a>.</td> <td colspan="6" style="text-wrap: wrap;">No website watches configured, please add a URL in the box above, or <a href="{{ url_for('import_page')}}" >import a list</a>.</td>
</tr> </tr>
{% endif %} {% endif %}
{% for watch in (watches|sort(attribute=sort_attribute, reverse=sort_order == 'asc'))|pagination_slice(skip=pagination.skip) %} {% for watch in (watches|sort(attribute=sort_attribute, reverse=sort_order == 'asc'))|pagination_slice(skip=pagination.skip) %}
@@ -92,11 +95,11 @@
<td class="inline checkbox-uuid" ><input name="uuids" type="checkbox" value="{{ watch.uuid}} " > <span>{{ loop.index+pagination.skip }}</span></td> <td class="inline checkbox-uuid" ><input name="uuids" type="checkbox" value="{{ watch.uuid}} " > <span>{{ loop.index+pagination.skip }}</span></td>
<td class="inline watch-controls"> <td class="inline watch-controls">
{% if not watch.paused %} {% if not watch.paused %}
<a class="state-off" href="{{url_for('index', op='pause', uuid=watch.uuid, tag=active_tag)}}"><img src="{{url_for('static_content', group='images', filename='pause.svg')}}" alt="Pause checks" title="Pause checks" class="icon icon-pause" ></a> <a class="state-off" href="{{url_for('index', op='pause', uuid=watch.uuid, tag=active_tag_uuid)}}"><img src="{{url_for('static_content', group='images', filename='pause.svg')}}" alt="Pause checks" title="Pause checks" class="icon icon-pause" ></a>
{% else %} {% else %}
<a class="state-on" href="{{url_for('index', op='pause', uuid=watch.uuid, tag=active_tag)}}"><img src="{{url_for('static_content', group='images', filename='play.svg')}}" alt="UnPause checks" title="UnPause checks" class="icon icon-unpause" ></a> <a class="state-on" href="{{url_for('index', op='pause', uuid=watch.uuid, tag=active_tag_uuid)}}"><img src="{{url_for('static_content', group='images', filename='play.svg')}}" alt="UnPause checks" title="UnPause checks" class="icon icon-unpause" ></a>
{% endif %} {% endif %}
<a class="link-mute state-{{'on' if watch.notification_muted else 'off'}}" href="{{url_for('index', op='mute', uuid=watch.uuid, tag=active_tag)}}"><img src="{{url_for('static_content', group='images', filename='bell-off.svg')}}" alt="Mute notifications" title="Mute notifications" class="icon icon-mute" ></a> <a class="link-mute state-{{'on' if watch.notification_muted else 'off'}}" href="{{url_for('index', op='mute', uuid=watch.uuid, tag=active_tag_uuid)}}"><img src="{{url_for('static_content', group='images', filename='bell-off.svg')}}" alt="Mute notifications" title="Mute notifications" class="icon icon-mute" ></a>
</td> </td>
<td class="title-col inline">{{watch.title if watch.title is not none and watch.title|length > 0 else watch.url}} <td class="title-col inline">{{watch.title if watch.title is not none and watch.title|length > 0 else watch.url}}
<a class="external" target="_blank" rel="noopener" href="{{ watch.link.replace('source:','') }}"></a> <a class="external" target="_blank" rel="noopener" href="{{ watch.link.replace('source:','') }}"></a>
@@ -110,6 +113,7 @@
{% endif %} {% endif %}
{%if watch.is_pdf %}<img class="status-icon" src="{{url_for('static_content', group='images', filename='pdf-icon.svg')}}" title="Converting PDF to text" >{% endif %} {%if watch.is_pdf %}<img class="status-icon" src="{{url_for('static_content', group='images', filename='pdf-icon.svg')}}" title="Converting PDF to text" >{% endif %}
{% if watch.has_browser_steps %}<img class="status-icon status-browsersteps" src="{{url_for('static_content', group='images', filename='steps.svg')}}" title="Browser Steps is enabled" >{% endif %}
{% if watch.last_error is defined and watch.last_error != False %} {% if watch.last_error is defined and watch.last_error != False %}
<div class="fetch-error">{{ watch.last_error }} <div class="fetch-error">{{ watch.last_error }}
@@ -165,7 +169,7 @@
<td> <td>
<a {% if watch.uuid in queued_uuids %}disabled="true"{% endif %} href="{{ url_for('form_watch_checknow', uuid=watch.uuid, tag=request.args.get('tag')) }}" <a {% if watch.uuid in queued_uuids %}disabled="true"{% endif %} href="{{ url_for('form_watch_checknow', uuid=watch.uuid, tag=request.args.get('tag')) }}"
class="recheck pure-button pure-button-primary">{% if watch.uuid in queued_uuids %}Queued{% else %}Recheck{% endif %}</a> class="recheck pure-button pure-button-primary">{% if watch.uuid in queued_uuids %}Queued{% else %}Recheck{% endif %}</a>
<a href="{{ url_for('edit_page', uuid=watch.uuid)}}" class="pure-button pure-button-primary">Edit</a> <a href="{{ url_for('edit_page', uuid=watch.uuid)}}#general" class="pure-button pure-button-primary">Edit</a>
{% if watch.history_n >= 2 %} {% if watch.history_n >= 2 %}
{% if is_unviewed %} {% if is_unviewed %}
@@ -196,11 +200,11 @@
</li> </li>
{% endif %} {% endif %}
<li> <li>
<a href="{{ url_for('form_watch_checknow', tag=active_tag, with_errors=request.args.get('with_errors',0)) }}" class="pure-button button-tag ">Recheck <a href="{{ url_for('form_watch_checknow', tag=active_tag_uuid, with_errors=request.args.get('with_errors',0)) }}" class="pure-button button-tag ">Recheck
all {% if active_tag%} in "{{tags[active_tag].title}}"{%endif%}</a> all {% if active_tag_uuid %} in "{{active_tag.title}}"{%endif%}</a>
</li> </li>
<li> <li>
<a href="{{ url_for('rss', tag=active_tag , token=app_rss_token)}}"><img alt="RSS Feed" id="feed-icon" src="{{url_for('static_content', group='images', filename='Generic_Feed-icon.svg')}}" height="15"></a> <a href="{{ url_for('rss', tag=active_tag_uuid, token=app_rss_token)}}"><img alt="RSS Feed" id="feed-icon" src="{{url_for('static_content', group='images', filename='Generic_Feed-icon.svg')}}" height="15"></a>
</li> </li>
</ul> </ul>
{{ pagination.links }} {{ pagination.links }}

View File

@@ -4,6 +4,8 @@ import pytest
from changedetectionio import changedetection_app from changedetectionio import changedetection_app
from changedetectionio import store from changedetectionio import store
import os import os
import sys
from loguru import logger
# https://github.com/pallets/flask/blob/1.1.2/examples/tutorial/tests/test_auth.py # https://github.com/pallets/flask/blob/1.1.2/examples/tutorial/tests/test_auth.py
# Much better boilerplate than the docs # Much better boilerplate than the docs
@@ -11,6 +13,15 @@ import os
global app global app
# https://loguru.readthedocs.io/en/latest/resources/migration.html#replacing-caplog-fixture-from-pytest-library
# Show loguru logs only if CICD pytest fails.
from loguru import logger
@pytest.fixture
def reportlog(pytestconfig):
logging_plugin = pytestconfig.pluginmanager.getplugin("logging-plugin")
handler_id = logger.add(logging_plugin.report_handler, format="{message}")
yield
logger.remove(handler_id)
def cleanup(datastore_path): def cleanup(datastore_path):
import glob import glob
@@ -41,6 +52,18 @@ def app(request):
app_config = {'datastore_path': datastore_path, 'disable_checkver' : True} app_config = {'datastore_path': datastore_path, 'disable_checkver' : True}
cleanup(app_config['datastore_path']) cleanup(app_config['datastore_path'])
logger_level = 'TRACE'
logger.remove()
log_level_for_stdout = { 'DEBUG', 'SUCCESS' }
logger.configure(handlers=[
{"sink": sys.stdout, "level": logger_level,
"filter" : lambda record: record['level'].name in log_level_for_stdout},
{"sink": sys.stderr, "level": logger_level,
"filter": lambda record: record['level'].name not in log_level_for_stdout},
])
datastore = store.ChangeDetectionStore(datastore_path=app_config['datastore_path'], include_default_watches=False) datastore = store.ChangeDetectionStore(datastore_path=app_config['datastore_path'], include_default_watches=False)
app = changedetection_app(app_config, datastore) app = changedetection_app(app_config, datastore)

View File

@@ -7,10 +7,11 @@ from ..util import live_server_setup, wait_for_all_checks
def do_test(client, live_server, make_test_use_extra_browser=False): def do_test(client, live_server, make_test_use_extra_browser=False):
# Grep for this string in the logs? # Grep for this string in the logs?
test_url = f"https://changedetection.io/ci-test.html" test_url = f"https://changedetection.io/ci-test.html?non-custom-default=true"
# "non-custom-default" should not appear in the custom browser connection
custom_browser_name = 'custom browser URL' custom_browser_name = 'custom browser URL'
# needs to be set and something like 'ws://127.0.0.1:3000?stealth=1&--disable-web-security=true' # needs to be set and something like 'ws://127.0.0.1:3000'
assert os.getenv('PLAYWRIGHT_DRIVER_URL'), "Needs PLAYWRIGHT_DRIVER_URL set for this test" assert os.getenv('PLAYWRIGHT_DRIVER_URL'), "Needs PLAYWRIGHT_DRIVER_URL set for this test"
##################### #####################
@@ -19,9 +20,7 @@ def do_test(client, live_server, make_test_use_extra_browser=False):
data={"application-empty_pages_are_a_change": "", data={"application-empty_pages_are_a_change": "",
"requests-time_between_check-minutes": 180, "requests-time_between_check-minutes": 180,
'application-fetch_backend': "html_webdriver", 'application-fetch_backend': "html_webdriver",
# browserless-custom-url is setup in .github/workflows/test-only.yml 'requests-extra_browsers-0-browser_connection_url': 'ws://sockpuppetbrowser-custom-url:3000',
# the test script run_custom_browser_url_test.sh will look for 'custom-browser-search-string' in the container logs
'requests-extra_browsers-0-browser_connection_url': 'ws://browserless-custom-url:3000?stealth=1&--disable-web-security=true&custom-browser-search-string=1',
'requests-extra_browsers-0-browser_name': custom_browser_name 'requests-extra_browsers-0-browser_name': custom_browser_name
}, },
follow_redirects=True follow_redirects=True
@@ -51,7 +50,8 @@ def do_test(client, live_server, make_test_use_extra_browser=False):
res = client.post( res = client.post(
url_for("edit_page", uuid="first"), url_for("edit_page", uuid="first"),
data={ data={
"url": test_url, # 'run_customer_browser_url_tests.sh' will search for this string to know if we hit the right browser container or not
"url": f"https://changedetection.io/ci-test.html?custom-browser-search-string=1",
"tags": "", "tags": "",
"headers": "", "headers": "",
'fetch_backend': f"extra_browser_{custom_browser_name}", 'fetch_backend': f"extra_browser_{custom_browser_name}",

View File

@@ -37,4 +37,4 @@ def test_fetch_webdriver_content(client, live_server):
) )
logging.getLogger().info("Looking for correct fetched HTML (text) from server") logging.getLogger().info("Looking for correct fetched HTML (text) from server")
assert b'cool it works' in res.data assert b'cool it works' in res.data

View File

@@ -0,0 +1,56 @@
import os
from flask import url_for
from ..util import live_server_setup, wait_for_all_checks, extract_UUID_from_client
def test_execute_custom_js(client, live_server):
live_server_setup(live_server)
assert os.getenv('PLAYWRIGHT_DRIVER_URL'), "Needs PLAYWRIGHT_DRIVER_URL set for this test"
test_url = url_for('test_interactive_html_endpoint', _external=True)
test_url = test_url.replace('localhost.localdomain', 'cdio')
test_url = test_url.replace('localhost', 'cdio')
res = client.post(
url_for("form_quick_watch_add"),
data={"url": test_url, "tags": '', 'edit_and_watch_submit_button': 'Edit > Watch'},
follow_redirects=True
)
assert b"Watch added in Paused state, saving will unpause" in res.data
res = client.post(
url_for("edit_page", uuid="first", unpause_on_save=1),
data={
"url": test_url,
"tags": "",
'fetch_backend': "html_webdriver",
'webdriver_js_execute_code': 'document.querySelector("button[name=test-button]").click();',
'headers': "testheader: yes\buser-agent: MyCustomAgent",
},
follow_redirects=True
)
assert b"unpaused" in res.data
wait_for_all_checks(client)
uuid = extract_UUID_from_client(client)
assert live_server.app.config['DATASTORE'].data['watching'][uuid].history_n >= 1, "Watch history had atleast 1 (everything fetched OK)"
assert b"This text should be removed" not in res.data
# Check HTML conversion detected and workd
res = client.get(
url_for("preview_page", uuid=uuid),
follow_redirects=True
)
assert b"This text should be removed" not in res.data
assert b"I smell JavaScript because the button was pressed" in res.data
assert b"testheader: yes" in res.data
assert b"user-agent: mycustomagent" in res.data
client.get(
url_for("form_delete", uuid="all"),
follow_redirects=True
)

View File

@@ -1,6 +1,6 @@
#!/usr/bin/python3 #!/usr/bin/python3
import time import os
from flask import url_for from flask import url_for
from ..util import live_server_setup, wait_for_all_checks from ..util import live_server_setup, wait_for_all_checks
@@ -9,22 +9,20 @@ def test_preferred_proxy(client, live_server):
live_server_setup(live_server) live_server_setup(live_server)
url = "http://chosen.changedetection.io" url = "http://chosen.changedetection.io"
res = client.post( res = client.post(
url_for("import_page"), url_for("form_quick_watch_add"),
# Because a URL wont show in squid/proxy logs due it being SSLed data={"url": url, "tags": '', 'edit_and_watch_submit_button': 'Edit > Watch'},
# Use plain HTTP or a specific domain-name here
data={"urls": url},
follow_redirects=True follow_redirects=True
) )
assert b"Watch added in Paused state, saving will unpause" in res.data
assert b"1 Imported" in res.data
wait_for_all_checks(client) wait_for_all_checks(client)
res = client.post( res = client.post(
url_for("edit_page", uuid="first"), url_for("edit_page", uuid="first", unpause_on_save=1),
data={ data={
"include_filters": "", "include_filters": "",
"fetch_backend": "html_requests", "fetch_backend": 'html_webdriver' if os.getenv('PLAYWRIGHT_DRIVER_URL') else 'html_requests',
"headers": "", "headers": "",
"proxy": "proxy-two", "proxy": "proxy-two",
"tags": "", "tags": "",
@@ -32,6 +30,6 @@ def test_preferred_proxy(client, live_server):
}, },
follow_redirects=True follow_redirects=True
) )
assert b"Updated watch." in res.data assert b"unpaused" in res.data
wait_for_all_checks(client) wait_for_all_checks(client)
# Now the request should appear in the second-squid logs # Now the request should appear in the second-squid logs

View File

@@ -3,6 +3,7 @@
import time import time
from flask import url_for from flask import url_for
from ..util import live_server_setup, wait_for_all_checks from ..util import live_server_setup, wait_for_all_checks
import os
# just make a request, we will grep in the docker logs to see it actually got called # just make a request, we will grep in the docker logs to see it actually got called
def test_select_custom(client, live_server): def test_select_custom(client, live_server):
@@ -14,7 +15,7 @@ def test_select_custom(client, live_server):
data={ data={
"requests-time_between_check-minutes": 180, "requests-time_between_check-minutes": 180,
"application-ignore_whitespace": "y", "application-ignore_whitespace": "y",
"application-fetch_backend": "html_requests", "application-fetch_backend": 'html_webdriver' if os.getenv('PLAYWRIGHT_DRIVER_URL') else 'html_requests',
"requests-extra_proxies-0-proxy_name": "custom-test-proxy", "requests-extra_proxies-0-proxy_name": "custom-test-proxy",
# test:awesome is set in tests/proxy_list/squid-passwords.txt # test:awesome is set in tests/proxy_list/squid-passwords.txt
"requests-extra_proxies-0-proxy_url": "http://test:awesome@squid-custom:3128", "requests-extra_proxies-0-proxy_url": "http://test:awesome@squid-custom:3128",

View File

@@ -95,7 +95,7 @@ def test_restock_detection(client, live_server):
# We should have a notification # We should have a notification
time.sleep(2) time.sleep(2)
assert os.path.isfile("test-datastore/notification.txt") assert os.path.isfile("test-datastore/notification.txt"), "Notification received"
os.unlink("test-datastore/notification.txt") os.unlink("test-datastore/notification.txt")
# Default behaviour is to only fire notification when it goes OUT OF STOCK -> IN STOCK # Default behaviour is to only fire notification when it goes OUT OF STOCK -> IN STOCK
@@ -103,4 +103,9 @@ def test_restock_detection(client, live_server):
set_original_response() set_original_response()
client.get(url_for("form_watch_checknow"), follow_redirects=True) client.get(url_for("form_watch_checknow"), follow_redirects=True)
wait_for_all_checks(client) wait_for_all_checks(client)
assert not os.path.isfile("test-datastore/notification.txt") assert not os.path.isfile("test-datastore/notification.txt"), "No notification should have fired when it went OUT OF STOCK by default"
# BUT we should see that it correctly shows "not in stock"
res = client.get(url_for("index"))
assert b'not-in-stock' in res.data, "Correctly showing NOT IN STOCK in the list after it changed from IN STOCK"

View File

@@ -1,42 +1,51 @@
#!/usr/bin/python3 #!/usr/bin/python3
import smtpd import asyncio
import asyncore from aiosmtpd.controller import Controller
from aiosmtpd.smtp import SMTP
# Accept a SMTP message and offer a way to retrieve the last message via TCP Socket # Accept a SMTP message and offer a way to retrieve the last message via TCP Socket
last_received_message = b"Nothing" last_received_message = b"Nothing"
class CustomSMTPServer(smtpd.SMTPServer): class CustomSMTPHandler:
async def handle_DATA(self, server, session, envelope):
def process_message(self, peer, mailfrom, rcpttos, data, **kwargs):
global last_received_message global last_received_message
last_received_message = data last_received_message = envelope.content
print('Receiving message from:', peer) print('Receiving message from:', session.peer)
print('Message addressed from:', mailfrom) print('Message addressed from:', envelope.mail_from)
print('Message addressed to :', rcpttos) print('Message addressed to :', envelope.rcpt_tos)
print('Message length :', len(data)) print('Message length :', len(envelope.content))
print(data.decode('utf8')) print(envelope.content.decode('utf8'))
return return '250 Message accepted for delivery'
# Just print out the last message received on plain TCP socket server class EchoServerProtocol(asyncio.Protocol):
class EchoServer(asyncore.dispatcher): def connection_made(self, transport):
def __init__(self, host, port):
asyncore.dispatcher.__init__(self)
self.create_socket()
self.set_reuse_addr()
self.bind((host, port))
self.listen(5)
def handle_accepted(self, sock, addr):
global last_received_message global last_received_message
print('Incoming connection from %s' % repr(addr)) self.transport = transport
sock.send(last_received_message) peername = transport.get_extra_info('peername')
print('Incoming connection from {}'.format(peername))
self.transport.write(last_received_message)
last_received_message = b'' last_received_message = b''
self.transport.close()
server = CustomSMTPServer(('0.0.0.0', 11025), None) # SMTP mail goes here async def main():
server2 = EchoServer('0.0.0.0', 11080) # Echo back last message received # Start the SMTP server
asyncore.loop() controller = Controller(CustomSMTPHandler(), hostname='0.0.0.0', port=11025)
controller.start()
# Start the TCP Echo server
loop = asyncio.get_running_loop()
server = await loop.create_server(
lambda: EchoServerProtocol(),
'0.0.0.0', 11080
)
async with server:
await server.serve_forever()
if __name__ == "__main__":
asyncio.run(main())

View File

@@ -32,6 +32,8 @@ def get_last_message_from_smtp_server():
client_socket.connect((smtp_test_server, port)) # connect to the server client_socket.connect((smtp_test_server, port)) # connect to the server
data = client_socket.recv(50024).decode() # receive response data = client_socket.recv(50024).decode() # receive response
logging.info("get_last_message_from_smtp_server..")
logging.info(data)
client_socket.close() # close the connection client_socket.close() # close the connection
return data return data
@@ -71,6 +73,8 @@ def test_check_notification_email_formats_default_HTML(client, live_server):
wait_for_all_checks(client) wait_for_all_checks(client)
set_longer_modified_response() set_longer_modified_response()
time.sleep(2)
client.get(url_for("form_watch_checknow"), follow_redirects=True) client.get(url_for("form_watch_checknow"), follow_redirects=True)
wait_for_all_checks(client) wait_for_all_checks(client)
@@ -81,7 +85,7 @@ def test_check_notification_email_formats_default_HTML(client, live_server):
# The email should have two bodies, and the text/html part should be <br> # The email should have two bodies, and the text/html part should be <br>
assert 'Content-Type: text/plain' in msg assert 'Content-Type: text/plain' in msg
assert '(added) So let\'s see what happens.\n' in msg # The plaintext part with \n assert '(added) So let\'s see what happens.\r\n' in msg # The plaintext part with \r\n
assert 'Content-Type: text/html' in msg assert 'Content-Type: text/html' in msg
assert '(added) So let\'s see what happens.<br>' in msg # the html part assert '(added) So let\'s see what happens.<br>' in msg # the html part
res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True) res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True)
@@ -97,6 +101,17 @@ def test_check_notification_email_formats_default_Text_override_HTML(client, liv
set_original_response() set_original_response()
global smtp_test_server global smtp_test_server
notification_url = f'mailto://changedetection@{smtp_test_server}:11025/?to=fff@home.com' notification_url = f'mailto://changedetection@{smtp_test_server}:11025/?to=fff@home.com'
notification_body = f"""<!DOCTYPE html>
<html lang="en">
<head>
<title>My Webpage</title>
</head>
<body>
<h1>Test</h1>
{default_notification_body}
</body>
</html>
"""
##################### #####################
# Set this up for when we remove the notification from the watch, it should fallback with these details # Set this up for when we remove the notification from the watch, it should fallback with these details
@@ -104,7 +119,7 @@ def test_check_notification_email_formats_default_Text_override_HTML(client, liv
url_for("settings_page"), url_for("settings_page"),
data={"application-notification_urls": notification_url, data={"application-notification_urls": notification_url,
"application-notification_title": "fallback-title " + default_notification_title, "application-notification_title": "fallback-title " + default_notification_title,
"application-notification_body": default_notification_body, "application-notification_body": notification_body,
"application-notification_format": 'Text', "application-notification_format": 'Text',
"requests-time_between_check-minutes": 180, "requests-time_between_check-minutes": 180,
'application-fetch_backend': "html_requests"}, 'application-fetch_backend': "html_requests"},
@@ -124,6 +139,7 @@ def test_check_notification_email_formats_default_Text_override_HTML(client, liv
wait_for_all_checks(client) wait_for_all_checks(client)
set_longer_modified_response() set_longer_modified_response()
time.sleep(2)
client.get(url_for("form_watch_checknow"), follow_redirects=True) client.get(url_for("form_watch_checknow"), follow_redirects=True)
wait_for_all_checks(client) wait_for_all_checks(client)
@@ -136,7 +152,7 @@ def test_check_notification_email_formats_default_Text_override_HTML(client, liv
# The email should not have two bodies, should be TEXT only # The email should not have two bodies, should be TEXT only
assert 'Content-Type: text/plain' in msg assert 'Content-Type: text/plain' in msg
assert '(added) So let\'s see what happens.\n' in msg # The plaintext part with \n assert '(added) So let\'s see what happens.\r\n' in msg # The plaintext part with \r\n
set_original_response() set_original_response()
# Now override as HTML format # Now override as HTML format
@@ -157,9 +173,14 @@ def test_check_notification_email_formats_default_Text_override_HTML(client, liv
# The email should have two bodies, and the text/html part should be <br> # The email should have two bodies, and the text/html part should be <br>
assert 'Content-Type: text/plain' in msg assert 'Content-Type: text/plain' in msg
assert '(removed) So let\'s see what happens.\n' in msg # The plaintext part with \n assert '(removed) So let\'s see what happens.\r\n' in msg # The plaintext part with \n
assert 'Content-Type: text/html' in msg assert 'Content-Type: text/html' in msg
assert '(removed) So let\'s see what happens.<br>' in msg # the html part assert '(removed) So let\'s see what happens.<br>' in msg # the html part
# https://github.com/dgtlmoon/changedetection.io/issues/2103
assert '<h1>Test</h1>' in msg
assert '&lt;' not in msg
assert 'Content-Type: text/html' in msg
res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True) res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True)
assert b'Deleted' in res.data assert b'Deleted' in res.data

View File

@@ -1,5 +1,5 @@
#!/usr/bin/python3 #!/usr/bin/python3
import os.path
import time import time
from flask import url_for from flask import url_for
from .util import live_server_setup, wait_for_all_checks from .util import live_server_setup, wait_for_all_checks
@@ -107,7 +107,6 @@ def test_check_add_line_contains_trigger(client, live_server):
#live_server_setup(live_server) #live_server_setup(live_server)
# Give the endpoint time to spin up # Give the endpoint time to spin up
time.sleep(1)
test_notification_url = url_for('test_notification_endpoint', _external=True).replace('http://', 'post://') + "?xxx={{ watch_url }}" test_notification_url = url_for('test_notification_endpoint', _external=True).replace('http://', 'post://') + "?xxx={{ watch_url }}"
res = client.post( res = client.post(
@@ -166,6 +165,7 @@ def test_check_add_line_contains_trigger(client, live_server):
# Takes a moment for apprise to fire # Takes a moment for apprise to fire
time.sleep(3) time.sleep(3)
assert os.path.isfile("test-datastore/notification.txt"), "Notification fired because I can see the output file"
with open("test-datastore/notification.txt", 'r') as f: with open("test-datastore/notification.txt", 'r') as f:
response= f.read() response= f.read()
assert '-Oh yes please-' in response assert '-Oh yes please-' in response

View File

@@ -149,6 +149,15 @@ def test_api_simple(client, live_server):
headers={'x-api-key': api_key}, headers={'x-api-key': api_key},
) )
assert b'which has this one new line' in res.data assert b'which has this one new line' in res.data
assert b'<div id' not in res.data
# Fetch the HTML of the latest one
res = client.get(
url_for("watchsinglehistory", uuid=watch_uuid, timestamp='latest')+"?html=1",
headers={'x-api-key': api_key},
)
assert b'which has this one new line' in res.data
assert b'<div id' in res.data
# Fetch the whole watch # Fetch the whole watch
res = client.get( res = client.get(
@@ -163,6 +172,7 @@ def test_api_simple(client, live_server):
# Loading the most recent snapshot should force viewed to become true # Loading the most recent snapshot should force viewed to become true
client.get(url_for("diff_history_page", uuid="first"), follow_redirects=True) client.get(url_for("diff_history_page", uuid="first"), follow_redirects=True)
time.sleep(3)
# Fetch the whole watch again, viewed should be true # Fetch the whole watch again, viewed should be true
res = client.get( res = client.get(
url_for("watch", uuid=watch_uuid), url_for("watch", uuid=watch_uuid),

View File

@@ -2,13 +2,12 @@
import time import time
from flask import url_for from flask import url_for
from . util import live_server_setup from .util import live_server_setup, wait_for_all_checks
def test_basic_auth(client, live_server): def test_basic_auth(client, live_server):
live_server_setup(live_server) live_server_setup(live_server)
# Give the endpoint time to spin up
time.sleep(1)
# Add our URL to the import page # Add our URL to the import page
test_url = url_for('test_basicauth_method', _external=True).replace("//","//myuser:mypass@") test_url = url_for('test_basicauth_method', _external=True).replace("//","//myuser:mypass@")
@@ -19,8 +18,8 @@ def test_basic_auth(client, live_server):
follow_redirects=True follow_redirects=True
) )
assert b"1 Imported" in res.data assert b"1 Imported" in res.data
wait_for_all_checks(client)
time.sleep(1) time.sleep(1)
# Check form validation # Check form validation
res = client.post( res = client.post(
url_for("edit_page", uuid="first"), url_for("edit_page", uuid="first"),
@@ -29,7 +28,7 @@ def test_basic_auth(client, live_server):
) )
assert b"Updated watch." in res.data assert b"Updated watch." in res.data
time.sleep(1) wait_for_all_checks(client)
res = client.get( res = client.get(
url_for("preview_page", uuid="first"), url_for("preview_page", uuid="first"),
follow_redirects=True follow_redirects=True

View File

@@ -100,11 +100,12 @@ def test_check_ldjson_price_autodetect(client, live_server):
# Accept it # Accept it
uuid = extract_UUID_from_client(client) uuid = extract_UUID_from_client(client)
time.sleep(1)
client.get(url_for('price_data_follower.accept', uuid=uuid, follow_redirects=True)) client.get(url_for('price_data_follower.accept', uuid=uuid, follow_redirects=True))
wait_for_all_checks(client) wait_for_all_checks(client)
# Trigger a check # Trigger a check
time.sleep(1)
client.get(url_for("form_watch_checknow"), follow_redirects=True) client.get(url_for("form_watch_checknow"), follow_redirects=True)
wait_for_all_checks(client) wait_for_all_checks(client)
# Offer should be gone # Offer should be gone

View File

@@ -3,7 +3,8 @@
import time import time
from flask import url_for from flask import url_for
from urllib.request import urlopen from urllib.request import urlopen
from .util import set_original_response, set_modified_response, live_server_setup, wait_for_all_checks, extract_rss_token_from_UI from .util import set_original_response, set_modified_response, live_server_setup, wait_for_all_checks, extract_rss_token_from_UI, \
extract_UUID_from_client
sleep_time_for_fetch_thread = 3 sleep_time_for_fetch_thread = 3
@@ -29,7 +30,7 @@ def test_check_basic_change_detection_functionality(client, live_server):
assert b"1 Imported" in res.data assert b"1 Imported" in res.data
time.sleep(sleep_time_for_fetch_thread) wait_for_all_checks(client)
# Do this a few times.. ensures we dont accidently set the status # Do this a few times.. ensures we dont accidently set the status
for n in range(3): for n in range(3):
@@ -62,9 +63,6 @@ def test_check_basic_change_detection_functionality(client, live_server):
# Make a change # Make a change
set_modified_response() set_modified_response()
res = urlopen(url_for('test_endpoint', _external=True))
assert b'which has this one new line' in res.read()
# Force recheck # Force recheck
res = client.get(url_for("form_watch_checknow"), follow_redirects=True) res = client.get(url_for("form_watch_checknow"), follow_redirects=True)
assert b'1 watches queued for rechecking.' in res.data assert b'1 watches queued for rechecking.' in res.data
@@ -135,12 +133,23 @@ def test_check_basic_change_detection_functionality(client, live_server):
# It should have picked up the <title> # It should have picked up the <title>
assert b'head title' in res.data assert b'head title' in res.data
# Be sure the last_viewed is going to be greater than the last snapshot
time.sleep(1)
# hit the mark all viewed link # hit the mark all viewed link
res = client.get(url_for("mark_all_viewed"), follow_redirects=True) res = client.get(url_for("mark_all_viewed"), follow_redirects=True)
assert b'Mark all viewed' not in res.data assert b'Mark all viewed' not in res.data
assert b'unviewed' not in res.data assert b'unviewed' not in res.data
# #2458 "clear history" should make the Watch object update its status correctly when the first snapshot lands again
uuid = extract_UUID_from_client(client)
client.get(url_for("clear_watch_history", uuid=uuid))
client.get(url_for("form_watch_checknow"), follow_redirects=True)
wait_for_all_checks(client)
res = client.get(url_for("index"))
assert b'preview/' in res.data
# #
# Cleanup everything # Cleanup everything
res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True) res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True)

View File

@@ -1,8 +1,8 @@
#!/usr/bin/python3 #!/usr/bin/python3
from .util import set_original_response, set_modified_response, live_server_setup, wait_for_all_checks from .util import set_original_response, live_server_setup, wait_for_all_checks
from flask import url_for from flask import url_for
from urllib.request import urlopen import io
from zipfile import ZipFile from zipfile import ZipFile
import re import re
import time import time
@@ -37,15 +37,10 @@ def test_backup(client, live_server):
# Should be PK/ZIP stream # Should be PK/ZIP stream
assert res.data.count(b'PK') >= 2 assert res.data.count(b'PK') >= 2
# ZipFile from buffer seems non-obvious, just save it instead backup = ZipFile(io.BytesIO(res.data))
with open("download.zip", 'wb') as f: l = backup.namelist()
f.write(res.data)
zip = ZipFile('download.zip')
l = zip.namelist()
uuid4hex = re.compile('^[a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab][a-f0-9]{3}-?[a-f0-9]{12}.*txt', re.I) uuid4hex = re.compile('^[a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab][a-f0-9]{3}-?[a-f0-9]{12}.*txt', re.I)
newlist = list(filter(uuid4hex.match, l)) # Read Note below newlist = list(filter(uuid4hex.match, l)) # Read Note below
# Should be two txt files in the archive (history and the snapshot) # Should be two txt files in the archive (history and the snapshot)
assert len(newlist) == 2 assert len(newlist) == 2

View File

@@ -3,7 +3,7 @@
import time import time
from flask import url_for from flask import url_for
from .util import live_server_setup from .util import live_server_setup, wait_for_all_checks
import pytest import pytest
@@ -27,9 +27,6 @@ def set_html_response():
def test_check_encoding_detection(client, live_server): def test_check_encoding_detection(client, live_server):
set_html_response() set_html_response()
# Give the endpoint time to spin up
time.sleep(1)
# Add our URL to the import page # Add our URL to the import page
test_url = url_for('test_endpoint', content_type="text/html", _external=True) test_url = url_for('test_endpoint', content_type="text/html", _external=True)
client.post( client.post(
@@ -39,7 +36,7 @@ def test_check_encoding_detection(client, live_server):
) )
# Give the thread time to pick it up # Give the thread time to pick it up
time.sleep(2) wait_for_all_checks(client)
res = client.get( res = client.get(
url_for("preview_page", uuid="first"), url_for("preview_page", uuid="first"),
@@ -56,9 +53,6 @@ def test_check_encoding_detection(client, live_server):
def test_check_encoding_detection_missing_content_type_header(client, live_server): def test_check_encoding_detection_missing_content_type_header(client, live_server):
set_html_response() set_html_response()
# Give the endpoint time to spin up
time.sleep(1)
# Add our URL to the import page # Add our URL to the import page
test_url = url_for('test_endpoint', _external=True) test_url = url_for('test_endpoint', _external=True)
client.post( client.post(
@@ -67,8 +61,7 @@ def test_check_encoding_detection_missing_content_type_header(client, live_serve
follow_redirects=True follow_redirects=True
) )
# Give the thread time to pick it up wait_for_all_checks(client)
time.sleep(2)
res = client.get( res = client.get(
url_for("preview_page", uuid="first"), url_for("preview_page", uuid="first"),

View File

@@ -3,7 +3,7 @@
import time import time
from flask import url_for from flask import url_for
from . util import live_server_setup from .util import live_server_setup, wait_for_all_checks
from ..html_tools import * from ..html_tools import *
@@ -30,7 +30,7 @@ def _runner_test_http_errors(client, live_server, http_code, expected_text):
assert b"1 Imported" in res.data assert b"1 Imported" in res.data
# Give the thread time to pick it up # Give the thread time to pick it up
time.sleep(2) wait_for_all_checks(client)
res = client.get(url_for("index")) res = client.get(url_for("index"))
# no change # no change
@@ -57,7 +57,7 @@ def _runner_test_http_errors(client, live_server, http_code, expected_text):
def test_http_error_handler(client, live_server): def test_http_error_handler(client, live_server):
_runner_test_http_errors(client, live_server, 403, 'Access denied') _runner_test_http_errors(client, live_server, 403, 'Access denied')
_runner_test_http_errors(client, live_server, 404, 'Page not found') _runner_test_http_errors(client, live_server, 404, 'Page not found')
_runner_test_http_errors(client, live_server, 500, '(Internal server Error) received') _runner_test_http_errors(client, live_server, 500, '(Internal server error) received')
_runner_test_http_errors(client, live_server, 400, 'Error - Request returned a HTTP error code 400') _runner_test_http_errors(client, live_server, 400, 'Error - Request returned a HTTP error code 400')
res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True) res = client.get(url_for("form_delete", uuid="all"), follow_redirects=True)
assert b'Deleted' in res.data assert b'Deleted' in res.data
@@ -76,7 +76,7 @@ def test_DNS_errors(client, live_server):
assert b"1 Imported" in res.data assert b"1 Imported" in res.data
# Give the thread time to pick it up # Give the thread time to pick it up
time.sleep(3) wait_for_all_checks(client)
res = client.get(url_for("index")) res = client.get(url_for("index"))
found_name_resolution_error = b"Temporary failure in name resolution" in res.data or b"Name or service not known" in res.data found_name_resolution_error = b"Temporary failure in name resolution" in res.data or b"Name or service not known" in res.data
@@ -104,7 +104,7 @@ def test_low_level_errors_clear_correctly(client, live_server):
follow_redirects=True follow_redirects=True
) )
assert b"1 Imported" in res.data assert b"1 Imported" in res.data
time.sleep(2) wait_for_all_checks(client)
# We should see the DNS error # We should see the DNS error
res = client.get(url_for("index")) res = client.get(url_for("index"))
@@ -121,7 +121,7 @@ def test_low_level_errors_clear_correctly(client, live_server):
) )
# Now the error should be gone # Now the error should be gone
time.sleep(2) wait_for_all_checks(client)
res = client.get(url_for("index")) res = client.get(url_for("index"))
found_name_resolution_error = b"Temporary failure in name resolution" in res.data or b"Name or service not known" in res.data found_name_resolution_error = b"Temporary failure in name resolution" in res.data or b"Name or service not known" in res.data
assert not found_name_resolution_error assert not found_name_resolution_error

View File

@@ -29,6 +29,7 @@ def test_check_extract_text_from_diff(client, live_server):
# Load in 5 different numbers/changes # Load in 5 different numbers/changes
last_date="" last_date=""
for n in range(5): for n in range(5):
time.sleep(1)
# Give the thread time to pick it up # Give the thread time to pick it up
print("Bumping snapshot and checking.. ", n) print("Bumping snapshot and checking.. ", n)
last_date = str(time.time()) last_date = str(time.time())

View File

@@ -21,10 +21,11 @@ def set_response_with_filter():
f.write(test_return_data) f.write(test_return_data)
return None return None
def run_filter_test(client, content_filter): def run_filter_test(client, live_server, content_filter):
# Response WITHOUT the filter ID element
set_original_response()
# Give the endpoint time to spin up
time.sleep(1)
# cleanup for the next # cleanup for the next
client.get( client.get(
url_for("form_delete", uuid="all"), url_for("form_delete", uuid="all"),
@@ -79,6 +80,7 @@ def run_filter_test(client, content_filter):
"include_filters": content_filter, "include_filters": content_filter,
"fetch_backend": "html_requests"}) "fetch_backend": "html_requests"})
# A POST here will also reset the filter failure counter (filter_failure_notification_threshold_attempts)
res = client.post( res = client.post(
url_for("edit_page", uuid="first"), url_for("edit_page", uuid="first"),
data=notification_form_data, data=notification_form_data,
@@ -91,20 +93,21 @@ def run_filter_test(client, content_filter):
# Now the notification should not exist, because we didnt reach the threshold # Now the notification should not exist, because we didnt reach the threshold
assert not os.path.isfile("test-datastore/notification.txt") assert not os.path.isfile("test-datastore/notification.txt")
# -2 because we would have checked twice above (on adding and on edit) # recheck it up to just before the threshold, including the fact that in the previous POST it would have rechecked (and incremented)
for i in range(0, App._FILTER_FAILURE_THRESHOLD_ATTEMPTS_DEFAULT-2): for i in range(0, App._FILTER_FAILURE_THRESHOLD_ATTEMPTS_DEFAULT-2):
res = client.get(url_for("form_watch_checknow"), follow_redirects=True) client.get(url_for("form_watch_checknow"), follow_redirects=True)
wait_for_all_checks(client) wait_for_all_checks(client)
assert not os.path.isfile("test-datastore/notification.txt"), f"test-datastore/notification.txt should not exist - Attempt {i}" time.sleep(2) # delay for apprise to fire
assert not os.path.isfile("test-datastore/notification.txt"), f"test-datastore/notification.txt should not exist - Attempt {i} when threshold is {App._FILTER_FAILURE_THRESHOLD_ATTEMPTS_DEFAULT}"
# We should see something in the frontend # We should see something in the frontend
res = client.get(url_for("index"))
assert b'Warning, no filters were found' in res.data assert b'Warning, no filters were found' in res.data
# One more check should trigger it (see -2 above) # One more check should trigger the _FILTER_FAILURE_THRESHOLD_ATTEMPTS_DEFAULT threshold
client.get(url_for("form_watch_checknow"), follow_redirects=True)
wait_for_all_checks(client)
client.get(url_for("form_watch_checknow"), follow_redirects=True) client.get(url_for("form_watch_checknow"), follow_redirects=True)
wait_for_all_checks(client) wait_for_all_checks(client)
time.sleep(2) # delay for apprise to fire
# Now it should exist and contain our "filter not found" alert # Now it should exist and contain our "filter not found" alert
assert os.path.isfile("test-datastore/notification.txt") assert os.path.isfile("test-datastore/notification.txt")
@@ -149,13 +152,9 @@ def test_setup(live_server):
live_server_setup(live_server) live_server_setup(live_server)
def test_check_include_filters_failure_notification(client, live_server): def test_check_include_filters_failure_notification(client, live_server):
set_original_response() run_filter_test(client, live_server,'#nope-doesnt-exist')
wait_for_all_checks(client)
run_filter_test(client, '#nope-doesnt-exist')
def test_check_xpath_filter_failure_notification(client, live_server): def test_check_xpath_filter_failure_notification(client, live_server):
set_original_response() run_filter_test(client, live_server, '//*[@id="nope-doesnt-exist"]')
time.sleep(1)
run_filter_test(client, '//*[@id="nope-doesnt-exist"]')
# Test that notification is never sent # Test that notification is never sent

Some files were not shown because too many files have changed in this diff Show More