diff --git a/.github/workflows/test-stack-reusable-workflow.yml b/.github/workflows/test-stack-reusable-workflow.yml index 57dd02fe..64603ff4 100644 --- a/.github/workflows/test-stack-reusable-workflow.yml +++ b/.github/workflows/test-stack-reusable-workflow.yml @@ -246,13 +246,21 @@ jobs: # @todo - scan the container log to see the right "graceful shutdown" text exists docker rm sig-test - - name: Dump container log + - name: Dump container log and memory report if: always() run: | mkdir output-logs docker logs test-cdio-basic-tests > output-logs/test-cdio-basic-tests-stdout-${{ env.PYTHON_VERSION }}.txt docker logs test-cdio-basic-tests 2> output-logs/test-cdio-basic-tests-stderr-${{ env.PYTHON_VERSION }}.txt + # Extract test-memory.log from the container + echo "Extracting test-memory.log from container..." + docker cp test-cdio-basic-tests:/app/changedetectionio/test-memory.log output-logs/test-memory-${{ env.PYTHON_VERSION }}.log || echo "test-memory.log not found in container" + + # Display the memory log contents for immediate visibility in workflow output + echo "=== Memory Test Report ===" + cat output-logs/test-memory-${{ env.PYTHON_VERSION }}.log 2>/dev/null || echo "No memory log available" + - name: Store everything including test-datastore if: always() uses: actions/upload-artifact@v4 diff --git a/changedetectionio/async_update_worker.py b/changedetectionio/async_update_worker.py index a607ca09..ed090320 100644 --- a/changedetectionio/async_update_worker.py +++ b/changedetectionio/async_update_worker.py @@ -334,6 +334,10 @@ async def async_update_worker(worker_id, q, notification_q, app, datastore): if update_handler.fetcher.content or (not update_handler.fetcher.content and empty_pages_are_a_change): watch.save_last_fetched_html(contents=update_handler.fetcher.content, timestamp=int(fetch_start_time)) + # Explicitly delete large content variables to free memory IMMEDIATELY after saving + # These are no longer needed after being saved to history + del contents + # Send notifications on second+ check if watch.history_n >= 2: logger.info(f"Change detected in UUID {uuid} - {watch['url']}") @@ -372,6 +376,12 @@ async def async_update_worker(worker_id, q, notification_q, app, datastore): datastore.update_watch(uuid=uuid, update_obj={'fetch_time': round(time.time() - fetch_start_time, 3), 'check_count': count}) + # NOW clear fetcher content - after all processing is complete + # This is the last point where we need the fetcher data + if update_handler and hasattr(update_handler, 'fetcher') and update_handler.fetcher: + update_handler.fetcher.clear_content() + logger.debug(f"Cleared fetcher content for UUID {uuid}") + except Exception as e: logger.error(f"Worker {worker_id} unexpected error processing {uuid}: {e}") logger.error(f"Worker {worker_id} traceback:", exc_info=True) @@ -392,7 +402,28 @@ async def async_update_worker(worker_id, q, notification_q, app, datastore): #logger.info(f"Worker {worker_id} sending completion signal for UUID {watch['uuid']}") watch_check_update.send(watch_uuid=watch['uuid']) - update_handler = None + # Explicitly clean up update_handler and all its references + if update_handler: + # Clear fetcher content using the proper method + if hasattr(update_handler, 'fetcher') and update_handler.fetcher: + update_handler.fetcher.clear_content() + + # Clear processor references + if hasattr(update_handler, 'content_processor'): + update_handler.content_processor = None + + update_handler = None + + # Clear local contents variable if it still exists + if 'contents' in locals(): + del contents + + # Note: We don't set watch = None here because: + # 1. watch is just a local reference to datastore.data['watching'][uuid] + # 2. Setting it to None doesn't affect the datastore + # 3. GC can't collect the object anyway (still referenced by datastore) + # 4. It would just cause confusion + logger.debug(f"Worker {worker_id} completed watch {uuid} in {time.time()-fetch_start_time:.2f}s") except Exception as cleanup_error: logger.error(f"Worker {worker_id} error during cleanup: {cleanup_error}") diff --git a/changedetectionio/content_fetchers/base.py b/changedetectionio/content_fetchers/base.py index 8be939ef..6cffe5aa 100644 --- a/changedetectionio/content_fetchers/base.py +++ b/changedetectionio/content_fetchers/base.py @@ -64,6 +64,19 @@ class Fetcher(): # Time ONTOP of the system defined env minimum time render_extract_delay = 0 + def clear_content(self): + """ + Explicitly clear all content from memory to free up heap space. + Call this after content has been saved to disk. + """ + self.content = None + if hasattr(self, 'raw_content'): + self.raw_content = None + self.screenshot = None + self.xpath_data = None + # Keep headers and status_code as they're small + logger.trace("Fetcher content cleared from memory") + @abstractmethod def get_error(self): return self.error diff --git a/changedetectionio/processors/magic.py b/changedetectionio/processors/magic.py index 918ae9ea..5eebe93d 100644 --- a/changedetectionio/processors/magic.py +++ b/changedetectionio/processors/magic.py @@ -64,24 +64,31 @@ class guess_stream_type(): # Remove whitespace between < and tag name for robust detection (handles '< html', '<\nhtml', etc.) test_content_normalized = re.sub(r'<\s+', '<', test_content) - # Magic will sometimes call text/plain as text/html! + # Use puremagic for lightweight MIME detection (saves ~14MB vs python-magic) magic_result = None try: - import magic + import puremagic - mime = magic.from_buffer(content[:200], mime=True) # Send the original content - logger.debug(f"Guessing mime type, original content_type '{http_content_header}', mime type detected '{mime}'") - if mime and "/" in mime: - magic_result = mime - # Ignore generic/fallback mime types from magic - if mime in ['application/octet-stream', 'application/x-empty', 'binary']: - logger.debug(f"Ignoring generic mime type '{mime}' from magic library") - # Trust magic for non-text types immediately - elif mime not in ['text/html', 'text/plain']: - magic_content_header = mime + # puremagic needs bytes, so encode if we have a string + content_bytes = content[:200].encode('utf-8') if isinstance(content, str) else content[:200] + + # puremagic returns a list of PureMagic objects with confidence scores + detections = puremagic.magic_string(content_bytes) + if detections: + # Get the highest confidence detection + mime = detections[0].mime_type + logger.debug(f"Guessing mime type, original content_type '{http_content_header}', mime type detected '{mime}'") + if mime and "/" in mime: + magic_result = mime + # Ignore generic/fallback mime types + if mime in ['application/octet-stream', 'application/x-empty', 'binary']: + logger.debug(f"Ignoring generic mime type '{mime}' from puremagic library") + # Trust puremagic for non-text types immediately + elif mime not in ['text/html', 'text/plain']: + magic_content_header = mime except Exception as e: - logger.error(f"Error getting a more precise mime type from 'magic' library ({str(e)}), using content-based detection") + logger.error(f"Error getting a more precise mime type from 'puremagic' library ({str(e)}), using content-based detection") # Content-based detection (most reliable for text formats) # Check for HTML patterns first - if found, override magic's text/plain diff --git a/changedetectionio/processors/text_json_diff/processor.py b/changedetectionio/processors/text_json_diff/processor.py index 32b646a9..c9baa6a5 100644 --- a/changedetectionio/processors/text_json_diff/processor.py +++ b/changedetectionio/processors/text_json_diff/processor.py @@ -556,6 +556,20 @@ class perform_site_check(difference_detection_processor): else: logger.debug(f"check_unique_lines: UUID {watch.get('uuid')} had unique content") + # Note: Explicit cleanup is only needed here because text_json_diff handles + # large strings (100KB-300KB for RSS/HTML). The other processors work with + # small strings and don't need this. + # + # Python would clean these up automatically, but explicit `del` frees memory + # immediately rather than waiting for function return, reducing peak memory usage. + del content + if 'html_content' in locals() and html_content is not stripped_text: + del html_content + if 'text_content_before_ignored_filter' in locals() and text_content_before_ignored_filter is not stripped_text: + del text_content_before_ignored_filter + if 'text_for_checksuming' in locals() and text_for_checksuming is not stripped_text: + del text_for_checksuming + return changed_detected, update_obj, stripped_text def _apply_diff_filtering(self, watch, stripped_text, text_before_filter): diff --git a/requirements.txt b/requirements.txt index 2a5a5f74..7a879970 100644 --- a/requirements.txt +++ b/requirements.txt @@ -125,8 +125,9 @@ price-parser # flask_socket_io - incorrect package name, already have flask-socketio above -# So far for detecting correct favicon type, but for other things in the future -python-magic +# Lightweight MIME type detection (saves ~14MB memory vs python-magic/libmagic) +# Used for detecting correct favicon type and content-type detection +puremagic # Scheduler - Windows seemed to miss a lot of default timezone info (even "UTC" !) tzdata