Compare commits

...

4 Commits

Author SHA1 Message Date
dgtlmoon d80a38a1b1 API - watch.link was accidently a tuple, enforcing string 2026-04-29 19:36:34 +10:00
dgtlmoon e25387f588 Improve LiteLLM deps #4093 (#4102) 2026-04-29 09:08:20 +02:00
dgtlmoon e4bc048280 UI - AI/LLM - "Summary" button should set last viewed (#4095)
Build and push containers / metadata (push) Has been cancelled
Build and push containers / build-push-containers (push) Has been cancelled
Publish Python 🐍distribution 📦 to PyPI and TestPyPI / Build distribution 📦 (push) Has been cancelled
ChangeDetection.io Container Build Test / Build linux/amd64 (alpine) (push) Has been cancelled
ChangeDetection.io Container Build Test / Build linux/arm64 (alpine) (push) Has been cancelled
ChangeDetection.io Container Build Test / Build linux/amd64 (main) (push) Has been cancelled
ChangeDetection.io Container Build Test / Build linux/arm/v7 (main) (push) Has been cancelled
ChangeDetection.io Container Build Test / Build linux/arm/v8 (main) (push) Has been cancelled
ChangeDetection.io Container Build Test / Build linux/arm64 (main) (push) Has been cancelled
ChangeDetection.io App Test / lint-code (push) Has been cancelled
ChangeDetection.io App Test / lint-translations (push) Has been cancelled
ChangeDetection.io App Test / lint-template-i18n (push) Has been cancelled
Publish Python 🐍distribution 📦 to PyPI and TestPyPI / Test the built package works basically. (push) Has been cancelled
Publish Python 🐍distribution 📦 to PyPI and TestPyPI / Publish Python 🐍 distribution 📦 to PyPI (push) Has been cancelled
ChangeDetection.io App Test / test-application-3-10 (push) Has been cancelled
ChangeDetection.io App Test / test-application-3-11 (push) Has been cancelled
ChangeDetection.io App Test / test-application-3-12 (push) Has been cancelled
ChangeDetection.io App Test / test-application-3-13 (push) Has been cancelled
ChangeDetection.io App Test / test-application-3-14 (push) Has been cancelled
2026-04-28 19:47:15 +10:00
skkzsh 2839a4276e Ruff INT (flake8-gettext) (#4096) 2026-04-28 19:46:58 +10:00
7 changed files with 75 additions and 7 deletions
+2 -2
View File
@@ -11,8 +11,8 @@ jobs:
- name: Lint with Ruff
run: |
pip install ruff
# Check for syntax errors and undefined names
ruff check . --select E9,F63,F7,F82
# Check for syntax errors and undefined names, and gettext misuse
ruff check . --select E9,F63,F7,F82,INT
# Complete check with errors treated as warnings
ruff check . --exit-zero
- name: Validate OpenAPI spec
+5 -1
View File
@@ -20,10 +20,11 @@ exclude = [
select = [
"B", # flake8-bugbear
"B9",
"C",
"C",
"E", # pycodestyle
"F", # Pyflakes
"I", # isort
"INT", # flake8-gettext
"N", # pep8-naming
"UP", # pyupgrade
"W", # pycodestyle
@@ -43,6 +44,9 @@ ignore = [
[lint.mccabe]
max-complexity = 12
[lint.flake8-gettext]
extend-function-names = ["_l", "lazy_gettext", "pgettext", "npgettext"]
[format]
indent-style = "space"
quote-style = "preserve"
+1 -1
View File
@@ -103,7 +103,7 @@ class Watch(Resource):
# attr .last_changed will check for the last written text snapshot on change
watch['last_changed'] = watch_obj.last_changed
watch['viewed'] = watch_obj.viewed
watch['link'] = watch_obj.link,
watch['link'] = watch_obj.link
return watch
+4
View File
@@ -283,6 +283,8 @@ def construct_blueprint(datastore: ChangeDetectionStore):
# Check cache — keyed by version pair + prompt hash (invalidates if prompt changes)
cached = watch.get_llm_diff_summary(from_version, to_version, prompt=cache_prompt)
if cached:
import time
datastore.set_last_viewed(uuid, int(time.time()))
return jsonify({'summary': cached, 'error': None, 'cached': True})
# Check global monthly token budget before making an LLM call
@@ -316,6 +318,8 @@ def construct_blueprint(datastore: ChangeDetectionStore):
except Exception as e:
logger.warning(f"Could not cache llm summary for {uuid}: {e}")
import time
datastore.set_last_viewed(uuid, int(time.time()))
return jsonify({'summary': summary, 'error': None, 'cached': False})
@diff_blueprint.route("/diff/<uuid_str:uuid>/extract", methods=['GET'])
+2
View File
@@ -102,6 +102,8 @@ def test_api_simple(client, live_server, measure_memory_usage, datastore_path):
#705 `last_changed` should be zero on the first check
assert before_recheck_info['last_changed'] == 0
assert before_recheck_info['title'] == 'My test URL'
assert isinstance(before_recheck_info['link'], str), "link must be a plain string, not a tuple or list"
assert before_recheck_info['link'] == test_url
# Check the limit by tag doesnt return anything when nothing found
res = client.get(
@@ -336,6 +336,58 @@ def test_hardcoded_fallback_when_nothing_set(
delete_all_watches(client)
def test_llm_summary_ajax_sets_last_viewed(
client, live_server, measure_memory_usage, datastore_path):
"""
Calling /diff/<uuid>/llm-summary via AJAX should mark the watch as viewed
(set last_viewed) for both fresh and cached responses.
"""
from unittest.mock import patch, MagicMock
_configure_llm(client)
ds = client.application.config.get('DATASTORE')
test_url = url_for('test_endpoint', content_type='text/html', content='v1', _external=True)
uuid = ds.add_watch(url=test_url)
watch = ds.data['watching'][uuid]
watch.save_history_blob('old content\n', '4000000000', 'snap-old')
watch.save_history_blob('new content\n', '4000000001', 'snap-new')
assert watch['last_viewed'] == 0, "last_viewed should start at 0"
mock_response = MagicMock()
mock_response.choices = [MagicMock()]
mock_response.choices[0].message.content = 'Content changed from old to new.'
mock_response.usage = MagicMock(total_tokens=50, prompt_tokens=40, completion_tokens=10)
with patch('litellm.completion', return_value=mock_response):
res = client.get(
url_for('ui.ui_diff.diff_llm_summary', uuid=uuid,
from_version='4000000000', to_version='4000000001'),
)
assert res.status_code == 200
data = res.get_json()
assert data['summary'] == 'Content changed from old to new.'
assert watch['last_viewed'] > 0, "last_viewed should be set after fresh LLM summary"
# Reset and verify the cached path also sets last_viewed
watch['last_viewed'] = 0
with patch('litellm.completion', return_value=mock_response):
res2 = client.get(
url_for('ui.ui_diff.diff_llm_summary', uuid=uuid,
from_version='4000000000', to_version='4000000001'),
)
assert res2.status_code == 200
data2 = res2.get_json()
assert data2.get('cached') is True
assert watch['last_viewed'] > 0, "last_viewed should be set even when returning cached summary"
delete_all_watches(client)
def test_global_default_saved_and_loaded_via_settings_form(
client, live_server, measure_memory_usage, datastore_path):
"""
+9 -3
View File
@@ -99,6 +99,12 @@ pytest-mock ~=3.15
# OpenAPI validation support
openapi-core[flask] ~= 0.23
# openapi-spec-validator (pulled in by openapi-core) requires jsonschema>=4.24.0.
# litellm 1.83.11.83.14 exact-pin jsonschema==4.23.0, which is below that floor —
# the two can never coexist. Without this pin, pip walks back through ~14 litellm
# patch releases before finding 1.83.0 (jsonschema>=4.23.0,<5.0.0, accepts 4.24.x).
# Pinning >=4.24.0 here lets the resolver reject incompatible litellm versions immediately.
jsonschema>=4.24.0,<5.0.0
loguru
@@ -120,7 +126,7 @@ greenlet >= 3.0.3
# Default SOCKETIO_MODE=threading is recommended for better compatibility
gevent
referencing # Don't pin — jsonschema-path (required by openapi-core>=0.18) caps referencing<0.37.0, so pinning 0.37.0 forces openapi-core back to 0.17.2. Revisit once jsonschema-path>=0.3.5 relaxes the cap.
referencing==0.37.0 # jsonschema-path>=0.4.x allows <0.38.0; 0.37.0 is current latest
# For conditions
panzi-json-logic
@@ -131,7 +137,7 @@ price-parser
# Lightweight MIME type detection (saves ~14MB memory vs python-magic/libmagic)
# Used for detecting correct favicon type and content-type detection
puremagic
puremagic<2.0 # 2.x requires Python >=3.12; unpin once 3.10/3.11 support is dropped
# Scheduler - Windows seemed to miss a lot of default timezone info (even "UTC" !)
tzdata
@@ -141,7 +147,7 @@ tzdata
pluggy ~= 1.6
# LLM intent-based change evaluation (multi-provider via litellm)
litellm>=1.40.0
litellm>=1.40.0,<1.83.1 # 1.83.11.83.14 exact-pin jsonschema==4.23.0, conflicting with openapi-spec-validator's >=4.24.0 floor; re-evaluate when litellm fixes this
# BM25 relevance trimming for large snapshots (pure Python, no ML)
rank-bm25>=0.2.2