mirror of
https://github.com/dgtlmoon/changedetection.io.git
synced 2026-01-20 06:00:20 +00:00
Compare commits
11 Commits
brotli-nat
...
python-314
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fe5beac2b2 | ||
|
|
5a1d44dc62 | ||
|
|
6db1085337 | ||
|
|
66553e106d | ||
|
|
5b01dbd9f8 | ||
|
|
c86f214fc3 | ||
|
|
32149640d9 | ||
|
|
15f16455fc | ||
|
|
15cdfac9d9 | ||
|
|
04de397916 | ||
|
|
9421f7e279 |
11
.github/workflows/test-only.yml
vendored
11
.github/workflows/test-only.yml
vendored
@@ -52,4 +52,13 @@ jobs:
|
||||
uses: ./.github/workflows/test-stack-reusable-workflow.yml
|
||||
with:
|
||||
python-version: '3.13'
|
||||
skip-pypuppeteer: true
|
||||
skip-pypuppeteer: true
|
||||
|
||||
|
||||
test-application-3-14:
|
||||
#if: github.event_name == 'push' && github.ref == 'refs/heads/master'
|
||||
needs: lint-code
|
||||
uses: ./.github/workflows/test-stack-reusable-workflow.yml
|
||||
with:
|
||||
python-version: '3.14'
|
||||
skip-pypuppeteer: false
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
# Read more https://github.com/dgtlmoon/changedetection.io/wiki
|
||||
# Semver means never use .01, or 00. Should be .1.
|
||||
__version__ = '0.52.4'
|
||||
__version__ = '0.52.6'
|
||||
|
||||
from changedetectionio.strtobool import strtobool
|
||||
from json.decoder import JSONDecodeError
|
||||
@@ -41,9 +41,10 @@ from loguru import logger
|
||||
#
|
||||
# IMPLEMENTATION:
|
||||
# 1. Explicit contexts everywhere (primary protection):
|
||||
# - Watch.py: ctx = multiprocessing.get_context('spawn')
|
||||
# - playwright.py: ctx = multiprocessing.get_context('spawn')
|
||||
# - puppeteer.py: ctx = multiprocessing.get_context('spawn')
|
||||
# - isolated_opencv.py: ctx = multiprocessing.get_context('spawn')
|
||||
# - isolated_libvips.py: ctx = multiprocessing.get_context('spawn')
|
||||
#
|
||||
# 2. Global default (defense-in-depth, below):
|
||||
# - Safety net if future code forgets explicit context
|
||||
|
||||
@@ -2,7 +2,6 @@ import os
|
||||
import time
|
||||
|
||||
from flask import Blueprint, request, make_response, render_template, redirect, url_for, flash, session
|
||||
from flask_login import current_user
|
||||
from flask_paginate import Pagination, get_page_parameter
|
||||
|
||||
from changedetectionio import forms
|
||||
@@ -85,6 +84,7 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q, queuedWatchMe
|
||||
app_rss_token=datastore.data['settings']['application'].get('rss_access_token'),
|
||||
datastore=datastore,
|
||||
errored_count=errored_count,
|
||||
extra_classes='has-queue' if len(update_q.queue) else '',
|
||||
form=form,
|
||||
generate_tag_colors=processors.generate_processor_badge_colors,
|
||||
guid=datastore.data['app_guid'],
|
||||
@@ -92,9 +92,10 @@ def construct_blueprint(datastore: ChangeDetectionStore, update_q, queuedWatchMe
|
||||
hosted_sticky=os.getenv("SALTED_PASS", False) == False,
|
||||
now_time_server=round(time.time()),
|
||||
pagination=pagination,
|
||||
processor_badge_css=processors.get_processor_badge_css(),
|
||||
processor_badge_texts=processors.get_processor_badge_texts(),
|
||||
processor_descriptions=processors.get_processor_descriptions(),
|
||||
processor_badge_css=processors.get_processor_badge_css(),
|
||||
queue_size=len(update_q.queue),
|
||||
queued_uuids=[q_uuid.item['uuid'] for q_uuid in update_q.queue],
|
||||
search_q=request.args.get('q', '').strip(),
|
||||
sort_attribute=request.args.get('sort') if request.args.get('sort') else request.cookies.get('sort'),
|
||||
|
||||
@@ -99,9 +99,14 @@ html[data-darkmode="true"] .watch-tag-list.tag-{{ class_name }} {
|
||||
data-confirm-message="{{ _('<p>Are you sure you want to delete the selected watches?</strong></p><p>This action cannot be undone.</p>') }}"
|
||||
data-confirm-button="{{ _('Delete') }}"><i data-feather="trash" style="width: 14px; height: 14px; stroke: white; margin-right: 4px;"></i>{{ _('Delete') }}</button>
|
||||
</div>
|
||||
{%- if watches|length >= pagination.per_page -%}
|
||||
{{ pagination.info }}
|
||||
{%- endif -%}
|
||||
|
||||
<div id="stats_row">
|
||||
<div class="left">{%- if watches|length >= pagination.per_page -%}{{ pagination.info }}{%- endif -%}</div>
|
||||
<div class="right" >{{ _('Queued size') }}: <span id="queue-size-int">{{ queue_size }}</span></div>
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
{%- if search_q -%}<div id="search-result-info">{{ _('Searching') }} "<strong><i>{{search_q}}</i></strong>"</div>{%- endif -%}
|
||||
<div>
|
||||
<a href="{{url_for('watchlist.index')}}" class="pure-button button-tag {{'active' if not active_tag_uuid }}">{{ _('All') }}</a>
|
||||
|
||||
@@ -156,6 +156,9 @@ class fetcher(Fetcher):
|
||||
from PIL import Image
|
||||
import io
|
||||
img = Image.open(io.BytesIO(screenshot_png))
|
||||
# Convert to RGB if needed (JPEG doesn't support transparency)
|
||||
if img.mode != 'RGB':
|
||||
img = img.convert('RGB')
|
||||
jpeg_buffer = io.BytesIO()
|
||||
img.save(jpeg_buffer, format='JPEG', quality=int(os.getenv("SCREENSHOT_QUALITY", 72)))
|
||||
self.screenshot = jpeg_buffer.getvalue()
|
||||
|
||||
@@ -18,21 +18,31 @@ BROTLI_COMPRESS_SIZE_THRESHOLD = int(os.getenv('SNAPSHOT_BROTLI_COMPRESSION_THRE
|
||||
minimum_seconds_recheck_time = int(os.getenv('MINIMUM_SECONDS_RECHECK_TIME', 3))
|
||||
mtable = {'seconds': 1, 'minutes': 60, 'hours': 3600, 'days': 86400, 'weeks': 86400 * 7}
|
||||
|
||||
def _brotli_compress_worker(conn, filepath, mode=None):
|
||||
def _brotli_save(contents, filepath, mode=None, fallback_uncompressed=False):
|
||||
"""
|
||||
Worker function to compress data with brotli in a separate process.
|
||||
This isolates memory - when process exits, OS reclaims all memory.
|
||||
Save compressed data using native brotli.
|
||||
Testing shows no memory leak when using gc.collect() after compression.
|
||||
|
||||
Args:
|
||||
conn: multiprocessing.Pipe connection to receive data
|
||||
contents: data to compress (str or bytes)
|
||||
filepath: destination file path
|
||||
mode: brotli compression mode (e.g., brotli.MODE_TEXT)
|
||||
fallback_uncompressed: if True, save uncompressed on failure; if False, raise exception
|
||||
|
||||
Returns:
|
||||
str: actual filepath saved (may differ from input if fallback used)
|
||||
|
||||
Raises:
|
||||
Exception: if compression fails and fallback_uncompressed is False
|
||||
"""
|
||||
import brotli
|
||||
import gc
|
||||
|
||||
# Ensure contents are bytes
|
||||
if isinstance(contents, str):
|
||||
contents = contents.encode('utf-8')
|
||||
|
||||
try:
|
||||
# Receive data from parent process via pipe (avoids pickle overhead)
|
||||
contents = conn.recv()
|
||||
logger.debug(f"Starting brotli compression of {len(contents)} bytes.")
|
||||
|
||||
if mode is not None:
|
||||
@@ -43,111 +53,25 @@ def _brotli_compress_worker(conn, filepath, mode=None):
|
||||
with open(filepath, 'wb') as f:
|
||||
f.write(compressed_data)
|
||||
|
||||
# Send success status back
|
||||
conn.send(True)
|
||||
logger.debug(f"Finished brotli compression - From {len(contents)} to {len(compressed_data)} bytes.")
|
||||
# No need for explicit cleanup - process exit frees all memory
|
||||
except Exception as e:
|
||||
logger.critical(f"Brotli compression worker failed: {e}")
|
||||
conn.send(False)
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
# Force garbage collection to prevent memory buildup
|
||||
gc.collect()
|
||||
|
||||
def _brotli_subprocess_save(contents, filepath, mode=None, timeout=30, fallback_uncompressed=False):
|
||||
"""
|
||||
Save compressed data using subprocess to isolate memory.
|
||||
Uses Pipe to avoid pickle overhead for large data.
|
||||
|
||||
Args:
|
||||
contents: data to compress (str or bytes)
|
||||
filepath: destination file path
|
||||
mode: brotli compression mode (e.g., brotli.MODE_TEXT)
|
||||
timeout: subprocess timeout in seconds
|
||||
fallback_uncompressed: if True, save uncompressed on failure; if False, raise exception
|
||||
|
||||
Returns:
|
||||
str: actual filepath saved (may differ from input if fallback used)
|
||||
|
||||
Raises:
|
||||
Exception: if compression fails and fallback_uncompressed is False
|
||||
"""
|
||||
import multiprocessing
|
||||
import sys
|
||||
|
||||
# Ensure contents are bytes
|
||||
if isinstance(contents, str):
|
||||
contents = contents.encode('utf-8')
|
||||
|
||||
# Use explicit spawn context for thread safety (avoids fork() with multi-threaded parent)
|
||||
# Always use spawn - consistent behavior in tests and production
|
||||
ctx = multiprocessing.get_context('spawn')
|
||||
parent_conn, child_conn = ctx.Pipe()
|
||||
|
||||
# Run compression in subprocess using spawn (not fork)
|
||||
proc = ctx.Process(target=_brotli_compress_worker, args=(child_conn, filepath, mode))
|
||||
|
||||
# Windows-safe: Set daemon=False explicitly to avoid issues with process cleanup
|
||||
proc.daemon = False
|
||||
proc.start()
|
||||
|
||||
try:
|
||||
# Send data to subprocess via pipe (avoids pickle)
|
||||
parent_conn.send(contents)
|
||||
|
||||
# Wait for result with timeout
|
||||
if parent_conn.poll(timeout):
|
||||
success = parent_conn.recv()
|
||||
else:
|
||||
success = False
|
||||
logger.warning(f"Brotli compression subprocess timed out after {timeout}s")
|
||||
# Graceful termination with platform-aware cleanup
|
||||
try:
|
||||
proc.terminate()
|
||||
except Exception as term_error:
|
||||
logger.debug(f"Process termination issue (may be normal on Windows): {term_error}")
|
||||
|
||||
parent_conn.close()
|
||||
proc.join(timeout=5)
|
||||
|
||||
# Force kill if still alive after graceful termination
|
||||
if proc.is_alive():
|
||||
try:
|
||||
if sys.platform == 'win32':
|
||||
# Windows: use kill() which is more forceful
|
||||
proc.kill()
|
||||
else:
|
||||
# Unix: terminate() already sent SIGTERM, now try SIGKILL
|
||||
proc.kill()
|
||||
proc.join(timeout=2)
|
||||
except Exception as kill_error:
|
||||
logger.warning(f"Failed to kill brotli compression process: {kill_error}")
|
||||
|
||||
# Check if file was created successfully
|
||||
if success and os.path.exists(filepath):
|
||||
return filepath
|
||||
return filepath
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Brotli compression error: {e}")
|
||||
try:
|
||||
parent_conn.close()
|
||||
except:
|
||||
pass
|
||||
try:
|
||||
proc.terminate()
|
||||
proc.join(timeout=2)
|
||||
except:
|
||||
pass
|
||||
|
||||
# Compression failed
|
||||
if fallback_uncompressed:
|
||||
logger.warning(f"Brotli compression failed for {filepath}, saving uncompressed")
|
||||
fallback_path = filepath.replace('.br', '')
|
||||
with open(fallback_path, 'wb') as f:
|
||||
f.write(contents)
|
||||
return fallback_path
|
||||
else:
|
||||
raise Exception(f"Brotli compression subprocess failed for {filepath}")
|
||||
# Compression failed
|
||||
if fallback_uncompressed:
|
||||
logger.warning(f"Brotli compression failed for {filepath}, saving uncompressed")
|
||||
fallback_path = filepath.replace('.br', '')
|
||||
with open(fallback_path, 'wb') as f:
|
||||
f.write(contents)
|
||||
return fallback_path
|
||||
else:
|
||||
raise Exception(f"Brotli compression failed for {filepath}: {e}")
|
||||
|
||||
|
||||
class model(watch_base):
|
||||
@@ -523,7 +447,7 @@ class model(watch_base):
|
||||
|
||||
if not os.path.exists(dest):
|
||||
try:
|
||||
actual_dest = _brotli_subprocess_save(contents, dest, mode=brotli.MODE_TEXT, fallback_uncompressed=True)
|
||||
actual_dest = _brotli_save(contents, dest, mode=brotli.MODE_TEXT, fallback_uncompressed=True)
|
||||
if actual_dest != dest:
|
||||
snapshot_fname = os.path.basename(actual_dest)
|
||||
except Exception as e:
|
||||
@@ -949,13 +873,13 @@ class model(watch_base):
|
||||
def save_last_text_fetched_before_filters(self, contents):
|
||||
import brotli
|
||||
filepath = os.path.join(self.watch_data_dir, 'last-fetched.br')
|
||||
_brotli_subprocess_save(contents, filepath, mode=brotli.MODE_TEXT, fallback_uncompressed=False)
|
||||
_brotli_save(contents, filepath, mode=brotli.MODE_TEXT, fallback_uncompressed=False)
|
||||
|
||||
def save_last_fetched_html(self, timestamp, contents):
|
||||
self.ensure_data_dir_exists()
|
||||
snapshot_fname = f"{timestamp}.html.br"
|
||||
filepath = os.path.join(self.watch_data_dir, snapshot_fname)
|
||||
_brotli_subprocess_save(contents, filepath, mode=None, fallback_uncompressed=True)
|
||||
_brotli_save(contents, filepath, mode=None, fallback_uncompressed=True)
|
||||
self._prune_last_fetched_html_snapshots()
|
||||
|
||||
def get_fetched_html(self, timestamp):
|
||||
|
||||
@@ -13,14 +13,9 @@ Research: https://github.com/libvips/pyvips/issues/234
|
||||
|
||||
import multiprocessing
|
||||
|
||||
# CRITICAL: Use 'spawn' instead of 'fork' to avoid inheriting parent's
|
||||
# CRITICAL: Use 'spawn' context instead of 'fork' to avoid inheriting parent's
|
||||
# LibVIPS threading state which can cause hangs in gaussblur operations
|
||||
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
|
||||
try:
|
||||
multiprocessing.set_start_method('spawn', force=False)
|
||||
except RuntimeError:
|
||||
# Already set, ignore
|
||||
pass
|
||||
|
||||
|
||||
def _worker_generate_diff(conn, img_bytes_from, img_bytes_to, threshold, blur_sigma, max_width, max_height):
|
||||
@@ -95,9 +90,10 @@ def generate_diff_isolated(img_bytes_from, img_bytes_to, threshold, blur_sigma,
|
||||
Returns:
|
||||
bytes: JPEG diff image or None on failure
|
||||
"""
|
||||
parent_conn, child_conn = multiprocessing.Pipe()
|
||||
ctx = multiprocessing.get_context('spawn')
|
||||
parent_conn, child_conn = ctx.Pipe()
|
||||
|
||||
p = multiprocessing.Process(
|
||||
p = ctx.Process(
|
||||
target=_worker_generate_diff,
|
||||
args=(child_conn, img_bytes_from, img_bytes_to, threshold, blur_sigma, max_width, max_height)
|
||||
)
|
||||
@@ -140,7 +136,8 @@ def calculate_change_percentage_isolated(img_bytes_from, img_bytes_to, threshold
|
||||
Returns:
|
||||
float: Change percentage
|
||||
"""
|
||||
parent_conn, child_conn = multiprocessing.Pipe()
|
||||
ctx = multiprocessing.get_context('spawn')
|
||||
parent_conn, child_conn = ctx.Pipe()
|
||||
|
||||
def _worker_calculate(conn):
|
||||
try:
|
||||
@@ -185,7 +182,7 @@ def calculate_change_percentage_isolated(img_bytes_from, img_bytes_to, threshold
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
p = multiprocessing.Process(target=_worker_calculate, args=(child_conn,))
|
||||
p = ctx.Process(target=_worker_calculate, args=(child_conn,))
|
||||
p.start()
|
||||
|
||||
result = 0.0
|
||||
@@ -233,7 +230,8 @@ def compare_images_isolated(img_bytes_from, img_bytes_to, threshold, blur_sigma,
|
||||
tuple: (changed_detected, change_percentage)
|
||||
"""
|
||||
print(f"[Parent] Starting compare_images_isolated subprocess", flush=True)
|
||||
parent_conn, child_conn = multiprocessing.Pipe()
|
||||
ctx = multiprocessing.get_context('spawn')
|
||||
parent_conn, child_conn = ctx.Pipe()
|
||||
|
||||
def _worker_compare(conn):
|
||||
try:
|
||||
@@ -301,7 +299,7 @@ def compare_images_isolated(img_bytes_from, img_bytes_to, threshold, blur_sigma,
|
||||
finally:
|
||||
conn.close()
|
||||
|
||||
p = multiprocessing.Process(target=_worker_compare, args=(child_conn,))
|
||||
p = ctx.Process(target=_worker_compare, args=(child_conn,))
|
||||
print(f"[Parent] Starting subprocess (pid will be assigned)", flush=True)
|
||||
p.start()
|
||||
print(f"[Parent] Subprocess started (pid={p.pid}), waiting for result (30s timeout)", flush=True)
|
||||
|
||||
@@ -76,7 +76,7 @@ $(document).ready(function () {
|
||||
|
||||
// Cache DOM elements for performance
|
||||
const queueBubble = document.getElementById('queue-bubble');
|
||||
|
||||
const queueSizePagerInfoText = document.getElementById('queue-size-int');
|
||||
// Only try to connect if authentication isn't required or user is authenticated
|
||||
// The 'is_authenticated' variable will be set in the template
|
||||
if (typeof is_authenticated !== 'undefined' ? is_authenticated : true) {
|
||||
@@ -118,6 +118,10 @@ $(document).ready(function () {
|
||||
|
||||
socket.on('queue_size', function (data) {
|
||||
console.log(`${data.event_timestamp} - Queue size update: ${data.q_length}`);
|
||||
if(queueSizePagerInfoText) {
|
||||
queueSizePagerInfoText.textContent = parseInt(data.q_length).toLocaleString() || 'None';
|
||||
}
|
||||
document.body.classList.toggle('has-queue', parseInt(data.q_length) > 0);
|
||||
|
||||
// Update queue bubble in action sidebar
|
||||
//if (queueBubble) {
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
.pagination-page-info {
|
||||
color: #fff;
|
||||
font-size: 0.85rem;
|
||||
text-transform: capitalize;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,32 @@
|
||||
/* table related */
|
||||
#stats_row {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
width: 100%;
|
||||
color: #fff;
|
||||
font-size: 0.85rem;
|
||||
>* {
|
||||
padding-bottom: 0.5rem;
|
||||
}
|
||||
.left {
|
||||
text-align: left;
|
||||
}
|
||||
|
||||
.right {
|
||||
opacity: 0.5;
|
||||
transition: opacity 0.6s ease;
|
||||
margin-left: auto; /* pushes it to the far right */
|
||||
text-align: right;
|
||||
}
|
||||
}
|
||||
body.has-queue {
|
||||
#stats_row {
|
||||
.right {
|
||||
opacity: 1.0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
.watch-table {
|
||||
width: 100%;
|
||||
font-size: 80%;
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -2,6 +2,7 @@
|
||||
import psutil
|
||||
import time
|
||||
from threading import Thread
|
||||
import multiprocessing
|
||||
|
||||
import pytest
|
||||
import arrow
|
||||
@@ -97,6 +98,34 @@ def cleanup(datastore_path):
|
||||
if os.path.isfile(f):
|
||||
os.unlink(f)
|
||||
|
||||
def pytest_configure(config):
|
||||
"""Configure pytest environment before tests run.
|
||||
|
||||
CRITICAL: Set multiprocessing start method to 'fork' for Python 3.14+ compatibility.
|
||||
|
||||
Python 3.14 changed the default start method from 'fork' to 'forkserver' on Linux.
|
||||
The forkserver method requires all objects to be picklable, but pytest-flask's
|
||||
LiveServer uses nested functions that can't be pickled.
|
||||
|
||||
Setting 'fork' explicitly:
|
||||
- Maintains compatibility with Python 3.10-3.13 (where 'fork' was already default)
|
||||
- Fixes Python 3.14 pickling errors
|
||||
- Only affects Unix-like systems (Windows uses 'spawn' regardless)
|
||||
|
||||
See: https://github.com/python/cpython/issues/126831
|
||||
See: https://docs.python.org/3/whatsnew/3.14.html
|
||||
"""
|
||||
# Only set if not already set (respects existing configuration)
|
||||
if multiprocessing.get_start_method(allow_none=True) is None:
|
||||
try:
|
||||
# 'fork' is available on Unix-like systems (Linux, macOS)
|
||||
# On Windows, this will have no effect as 'spawn' is the only option
|
||||
multiprocessing.set_start_method('fork', force=False)
|
||||
logger.debug("Set multiprocessing start method to 'fork' for Python 3.14+ compatibility")
|
||||
except (ValueError, RuntimeError):
|
||||
# Already set, not available on this platform, or context already created
|
||||
pass
|
||||
|
||||
def pytest_addoption(parser):
|
||||
"""Add custom command-line options for pytest.
|
||||
|
||||
|
||||
Binary file not shown.
File diff suppressed because it is too large
Load Diff
@@ -42,7 +42,7 @@ orjson~=3.11
|
||||
# jq not available on Windows so must be installed manually
|
||||
|
||||
# Notification library
|
||||
apprise==1.9.5
|
||||
apprise==1.9.6
|
||||
|
||||
diff_match_patch
|
||||
|
||||
@@ -70,7 +70,7 @@ lxml >=4.8.0,!=5.2.0,!=5.2.1,<7
|
||||
|
||||
# XPath 2.0-3.1 support - 4.2.0 had issues, 4.1.5 stable
|
||||
# Consider updating to latest stable version periodically
|
||||
elementpath==5.0.4
|
||||
elementpath==5.1.0
|
||||
|
||||
# For fast image comparison in screenshot change detection
|
||||
# opencv-python-headless is OPTIONAL (excluded from requirements.txt)
|
||||
|
||||
Reference in New Issue
Block a user