mirror of
https://github.com/dgtlmoon/changedetection.io.git
synced 2025-11-01 23:28:06 +00:00
Compare commits
6 Commits
test-speed
...
3486-RSS-A
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
03d56f3c8c | ||
|
|
80be1a30f2 | ||
|
|
93b4f79006 | ||
|
|
3009e46617 | ||
|
|
8f040a1a84 | ||
|
|
4dbab8d77a |
@@ -2,7 +2,7 @@
|
||||
|
||||
# Read more https://github.com/dgtlmoon/changedetection.io/wiki
|
||||
|
||||
__version__ = '0.50.18'
|
||||
__version__ = '0.50.20'
|
||||
|
||||
from changedetectionio.strtobool import strtobool
|
||||
from json.decoder import JSONDecodeError
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
from loguru import logger
|
||||
from lxml import etree
|
||||
from typing import List
|
||||
import html
|
||||
import json
|
||||
@@ -58,13 +57,17 @@ def include_filters(include_filters, html_content, append_pretty_line_formatting
|
||||
|
||||
return html_block
|
||||
|
||||
def subtractive_css_selector(css_selector, html_content):
|
||||
def subtractive_css_selector(css_selector, content):
|
||||
from bs4 import BeautifulSoup
|
||||
soup = BeautifulSoup(html_content, "html.parser")
|
||||
soup = BeautifulSoup(content, "html.parser")
|
||||
|
||||
# So that the elements dont shift their index, build a list of elements here which will be pointers to their place in the DOM
|
||||
elements_to_remove = soup.select(css_selector)
|
||||
|
||||
if not elements_to_remove:
|
||||
# Better to return the original that rebuild with BeautifulSoup
|
||||
return content
|
||||
|
||||
# Then, remove them in a separate loop
|
||||
for item in elements_to_remove:
|
||||
item.decompose()
|
||||
@@ -72,6 +75,7 @@ def subtractive_css_selector(css_selector, html_content):
|
||||
return str(soup)
|
||||
|
||||
def subtractive_xpath_selector(selectors: List[str], html_content: str) -> str:
|
||||
from lxml import etree
|
||||
# Parse the HTML content using lxml
|
||||
html_tree = etree.HTML(html_content)
|
||||
|
||||
@@ -83,6 +87,10 @@ def subtractive_xpath_selector(selectors: List[str], html_content: str) -> str:
|
||||
# Collect elements for each selector
|
||||
elements_to_remove.extend(html_tree.xpath(selector))
|
||||
|
||||
# If no elements were found, return the original HTML content
|
||||
if not elements_to_remove:
|
||||
return html_content
|
||||
|
||||
# Then, remove them in a separate loop
|
||||
for element in elements_to_remove:
|
||||
if element.getparent() is not None: # Ensure the element has a parent before removing
|
||||
|
||||
@@ -94,24 +94,21 @@ class guess_stream_type():
|
||||
self.is_rss = True
|
||||
elif any(s in http_content_header for s in JSON_CONTENT_TYPES):
|
||||
self.is_json = True
|
||||
elif 'pdf' in magic_content_header:
|
||||
self.is_pdf = True
|
||||
elif has_html_patterns or http_content_header == 'text/html':
|
||||
self.is_html = True
|
||||
elif any(s in magic_content_header for s in JSON_CONTENT_TYPES):
|
||||
self.is_json = True
|
||||
# magic will call a rss document 'xml'
|
||||
# Rarely do endpoints give the right header, usually just text/xml, so we check also for <rss
|
||||
# This also triggers the automatic CDATA text parser so the RSS goes back a nice content list
|
||||
elif '<rss' in test_content_normalized or '<feed' in test_content_normalized or any(s in magic_content_header for s in RSS_XML_CONTENT_TYPES):
|
||||
self.is_rss = True
|
||||
elif any(s in http_content_header for s in XML_CONTENT_TYPES):
|
||||
# Only mark as generic XML if not already detected as RSS
|
||||
if not self.is_rss:
|
||||
self.is_xml = True
|
||||
elif 'pdf' in magic_content_header:
|
||||
self.is_pdf = True
|
||||
###
|
||||
elif has_html_patterns or http_content_header == 'text/html':
|
||||
self.is_html = True
|
||||
# If magic says text/plain and we found no HTML patterns, trust it
|
||||
elif magic_result == 'text/plain':
|
||||
self.is_plaintext = True
|
||||
logger.debug(f"Trusting magic's text/plain result (no HTML patterns detected)")
|
||||
elif any(s in magic_content_header for s in JSON_CONTENT_TYPES):
|
||||
self.is_json = True
|
||||
# magic will call a rss document 'xml'
|
||||
elif '<rss' in test_content_normalized or '<feed' in test_content_normalized or any(s in magic_content_header for s in RSS_XML_CONTENT_TYPES):
|
||||
self.is_rss = True
|
||||
elif test_content_normalized.startswith('<?xml') or any(s in magic_content_header for s in XML_CONTENT_TYPES):
|
||||
# Generic XML that's not RSS/Atom (RSS/Atom checked above)
|
||||
self.is_xml = True
|
||||
@@ -122,4 +119,8 @@ class guess_stream_type():
|
||||
# Only trust magic for 'text' if no other patterns matched
|
||||
elif 'text' in magic_content_header:
|
||||
self.is_plaintext = True
|
||||
# If magic says text/plain and we found no HTML patterns, trust it
|
||||
elif magic_result == 'text/plain':
|
||||
self.is_plaintext = True
|
||||
logger.debug(f"Trusting magic's text/plain result (no HTML patterns detected)")
|
||||
|
||||
|
||||
@@ -227,7 +227,7 @@ class ContentProcessor:
|
||||
"""Convert CDATA/comments in RSS to usable text."""
|
||||
return cdata_in_document_to_text(html_content=content)
|
||||
|
||||
def preprocess_pdf(self, content, raw_content):
|
||||
def preprocess_pdf(self, raw_content):
|
||||
"""Convert PDF to HTML using external tool."""
|
||||
from shutil import which
|
||||
tool = os.getenv("PDF_TO_HTML_TOOL", "pdftohtml")
|
||||
@@ -251,7 +251,7 @@ class ContentProcessor:
|
||||
metadata = (
|
||||
f"<p>Added by changedetection.io: Document checksum - "
|
||||
f"{hashlib.md5(raw_content).hexdigest().upper()} "
|
||||
f"Filesize - {len(html_content)} bytes</p>"
|
||||
f"Original file size - {len(raw_content)} bytes</p>"
|
||||
)
|
||||
return html_content.replace('</body>', metadata + '</body>')
|
||||
|
||||
@@ -384,7 +384,8 @@ class perform_site_check(difference_detection_processor):
|
||||
|
||||
# PDF preprocessing
|
||||
if watch.is_pdf or stream_content_type.is_pdf:
|
||||
content = content_processor.preprocess_pdf(content, self.fetcher.raw_content)
|
||||
content = content_processor.preprocess_pdf(raw_content=self.fetcher.raw_content)
|
||||
stream_content_type.is_html = True
|
||||
|
||||
# JSON preprocessing
|
||||
if stream_content_type.is_json:
|
||||
@@ -403,6 +404,8 @@ class perform_site_check(difference_detection_processor):
|
||||
html_content = content
|
||||
|
||||
# Apply include filters (CSS, XPath, JSON)
|
||||
# Except for plaintext (incase they tried to confuse the system, it will HTML escape
|
||||
#if not stream_content_type.is_plaintext:
|
||||
if filter_config.has_include_filters:
|
||||
html_content = content_processor.apply_include_filters(content, stream_content_type)
|
||||
|
||||
@@ -414,6 +417,9 @@ class perform_site_check(difference_detection_processor):
|
||||
if watch.is_source_type_url:
|
||||
# For source URLs, keep raw content
|
||||
stripped_text = html_content
|
||||
elif stream_content_type.is_plaintext:
|
||||
# For plaintext, keep as-is without HTML-to-text conversion
|
||||
stripped_text = html_content
|
||||
else:
|
||||
# Extract text from HTML/RSS content (not generic XML)
|
||||
if stream_content_type.is_html or stream_content_type.is_rss:
|
||||
|
||||
@@ -8,25 +8,30 @@ from .util import set_original_response, set_modified_response, live_server_setu
|
||||
# `subtractive_selectors` should still work in `source:` type requests
|
||||
def test_fetch_pdf(client, live_server, measure_memory_usage):
|
||||
import shutil
|
||||
shutil.copy("tests/test.pdf", "test-datastore/endpoint-test.pdf")
|
||||
import os
|
||||
|
||||
shutil.copy("tests/test.pdf", "test-datastore/endpoint-test.pdf")
|
||||
first_version_size = os.path.getsize("test-datastore/endpoint-test.pdf")
|
||||
|
||||
# live_server_setup(live_server) # Setup on conftest per function
|
||||
test_url = url_for('test_pdf_endpoint', _external=True)
|
||||
# Add our URL to the import page
|
||||
uuid = client.application.config.get('DATASTORE').add_watch(url=test_url)
|
||||
client.get(url_for("ui.form_watch_checknow"), follow_redirects=True)
|
||||
|
||||
|
||||
wait_for_all_checks(client)
|
||||
|
||||
res = client.get(
|
||||
url_for("ui.ui_views.preview_page", uuid="first"),
|
||||
follow_redirects=True
|
||||
)
|
||||
watch = live_server.app.config['DATASTORE'].data['watching'][uuid]
|
||||
dates = list(watch.history.keys())
|
||||
snapshot_contents = watch.get_history_snapshot(dates[0])
|
||||
|
||||
# PDF header should not be there (it was converted to text)
|
||||
assert b'PDF' not in res.data[:10]
|
||||
assert b'hello world' in res.data
|
||||
assert 'PDF' not in snapshot_contents
|
||||
# Was converted away from HTML
|
||||
assert 'pdftohtml' not in snapshot_contents.lower() # Generator tag shouldnt be there
|
||||
assert f'Original file size - {first_version_size}' in snapshot_contents
|
||||
assert 'html' not in snapshot_contents.lower() # is converted from html
|
||||
assert 'body' not in snapshot_contents.lower() # is converted from html
|
||||
# And our text content was there
|
||||
assert 'hello world' in snapshot_contents
|
||||
|
||||
# So we know if the file changes in other ways
|
||||
import hashlib
|
||||
@@ -34,8 +39,7 @@ def test_fetch_pdf(client, live_server, measure_memory_usage):
|
||||
# We should have one
|
||||
assert len(original_md5) >0
|
||||
# And it's going to be in the document
|
||||
assert b'Document checksum - '+bytes(str(original_md5).encode('utf-8')) in res.data
|
||||
|
||||
assert f'Document checksum - {original_md5}' in snapshot_contents
|
||||
|
||||
shutil.copy("tests/test2.pdf", "test-datastore/endpoint-test.pdf")
|
||||
changed_md5 = hashlib.md5(open("test-datastore/endpoint-test.pdf", 'rb').read()).hexdigest().upper()
|
||||
@@ -58,7 +62,6 @@ def test_fetch_pdf(client, live_server, measure_memory_usage):
|
||||
assert original_md5.encode('utf-8') not in res.data
|
||||
assert changed_md5.encode('utf-8') in res.data
|
||||
|
||||
|
||||
res = client.get(
|
||||
url_for("ui.ui_views.diff_history_page", uuid="first"),
|
||||
follow_redirects=True
|
||||
@@ -66,6 +69,16 @@ def test_fetch_pdf(client, live_server, measure_memory_usage):
|
||||
|
||||
assert original_md5.encode('utf-8') in res.data
|
||||
assert changed_md5.encode('utf-8') in res.data
|
||||
|
||||
assert b'here is a change' in res.data
|
||||
|
||||
|
||||
dates = list(watch.history.keys())
|
||||
# new snapshot was also OK, no HTML
|
||||
snapshot_contents = watch.get_history_snapshot(dates[1])
|
||||
assert 'html' not in snapshot_contents.lower()
|
||||
assert f'Original file size - {os.path.getsize("test-datastore/endpoint-test.pdf")}' in snapshot_contents
|
||||
assert f'here is a change' in snapshot_contents
|
||||
assert os.path.getsize("test-datastore/endpoint-test.pdf") != first_version_size # And the disk change worked
|
||||
|
||||
|
||||
|
||||
@@ -110,8 +110,9 @@ def test_basic_cdata_rss_markup(client, live_server, measure_memory_usage):
|
||||
|
||||
|
||||
set_original_cdata_xml()
|
||||
|
||||
test_url = url_for('test_endpoint', content_type="application/atom+xml; charset=UTF-8", _external=True)
|
||||
# Rarely do endpoints give the right header, usually just text/xml, so we check also for <rss
|
||||
# This also triggers the automatic CDATA text parser so the RSS goes back a nice content list
|
||||
test_url = url_for('test_endpoint', content_type="text/xml; charset=UTF-8", _external=True)
|
||||
|
||||
# Add our URL to the import page
|
||||
uuid = client.application.config.get('DATASTORE').add_watch(url=test_url)
|
||||
|
||||
Reference in New Issue
Block a user