mirror of
https://github.com/dgtlmoon/changedetection.io.git
synced 2026-01-27 09:26:04 +00:00
Compare commits
38 Commits
python-314
...
heuy-notif
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8d4ebeb8ee | ||
|
|
b139541763 | ||
|
|
0d415d5da2 | ||
|
|
6d70dca86f | ||
|
|
41e771ff91 | ||
|
|
20c11623ef | ||
|
|
422215bc9a | ||
|
|
c06d752d9e | ||
|
|
1fcbb9afc5 | ||
|
|
03997fa2df | ||
|
|
9640764797 | ||
|
|
0e97893b58 | ||
|
|
2d511a2151 | ||
|
|
90610444a7 | ||
|
|
cad164b267 | ||
|
|
54d91118f6 | ||
|
|
45096a9fd4 | ||
|
|
7a08cbb691 | ||
|
|
9bd704ec9f | ||
|
|
286da3b810 | ||
|
|
237f08b8ed | ||
|
|
5fad56b548 | ||
|
|
c5eeafe2dd | ||
|
|
50c2e9761e | ||
|
|
5031aea0cc | ||
|
|
0b14972248 | ||
|
|
1f3e065add | ||
|
|
06f2822967 | ||
|
|
f6e93f9250 | ||
|
|
cc6e078a79 | ||
|
|
ca5bb8fe93 | ||
|
|
b62efa1470 | ||
|
|
3e281f4ab5 | ||
|
|
becd32f549 | ||
|
|
3c80738da5 | ||
|
|
b2cc8e5d0c | ||
|
|
4c36561653 | ||
|
|
31a104f29e |
@@ -124,6 +124,37 @@ jobs:
|
||||
name: test-cdio-basic-tests-output-py${{ env.PYTHON_VERSION }}
|
||||
path: output-logs
|
||||
|
||||
notification-queue-storage-test:
|
||||
# The notifications are put into a retry queue so they cant be lost.
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 25
|
||||
env:
|
||||
PYTHON_VERSION: ${{ inputs.python-version }}
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Download Docker image artifact
|
||||
uses: actions/download-artifact@v6
|
||||
with:
|
||||
name: test-changedetectionio-${{ env.PYTHON_VERSION }}
|
||||
path: /tmp
|
||||
|
||||
- name: Load Docker image
|
||||
run: |
|
||||
docker load -i /tmp/test-changedetectionio.tar
|
||||
|
||||
- name: Test Huey notification backend - SQLite
|
||||
run: |
|
||||
docker network inspect changedet-network >/dev/null 2>&1 || docker network create changedet-network
|
||||
docker run --name notification-basic-tests --rm --network changedet-network test-changedetectionio bash -c 'cd changedetectionio && QUEUE_STORAGE=sqlite pytest tests/test_notifications_huey.py -v'
|
||||
|
||||
- name: Test Huey notification backend - Redis
|
||||
run: |
|
||||
docker network inspect changedet-network >/dev/null 2>&1 || docker network create changedet-network
|
||||
docker run --network changedet-network -d --name redis-test-huey --hostname redis-test-huey -p 127.0.0.1:6379:6379 redis:7
|
||||
docker run --name notification-basic-tests --rm --network changedet-network test-changedetectionio bash -c 'cd changedetectionio && pip3 install redis && QUEUE_STORAGE=redis REDIS_URL=redis://redis-test-huey:6379/0 pytest tests/test_notifications_huey.py -v'
|
||||
|
||||
# Playwright tests
|
||||
playwright-tests:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
@@ -96,10 +96,10 @@ def sigshutdown_handler(_signo, _stack_frame):
|
||||
|
||||
# Close janus queues properly
|
||||
try:
|
||||
from changedetectionio.flask_app import update_q, notification_q
|
||||
from changedetectionio.flask_app import update_q
|
||||
update_q.close()
|
||||
notification_q.close()
|
||||
logger.debug("Janus queues closed successfully")
|
||||
logger.debug("Janus update queue closed successfully")
|
||||
# notification_q is deprecated - now using Huey task queue which handles its own shutdown
|
||||
except Exception as e:
|
||||
logger.critical(f"CRITICAL: Failed to close janus queues: {e}")
|
||||
|
||||
|
||||
168
changedetectionio/blueprint/notification_dashboard/__init__.py
Normal file
168
changedetectionio/blueprint/notification_dashboard/__init__.py
Normal file
@@ -0,0 +1,168 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
Notification Dashboard Blueprint
|
||||
Handles the notification queue dashboard UI and related functionality
|
||||
"""
|
||||
|
||||
from flask import Blueprint, render_template, request, redirect, url_for, flash, jsonify
|
||||
from changedetectionio.flask_app import login_optionally_required
|
||||
|
||||
|
||||
def construct_blueprint():
|
||||
"""Construct and return the notification dashboard blueprint"""
|
||||
notification_dashboard = Blueprint('notification_dashboard', __name__, template_folder='templates')
|
||||
|
||||
@notification_dashboard.route("/", methods=['GET'])
|
||||
@login_optionally_required
|
||||
def dashboard():
|
||||
"""Notification queue dashboard - shows all notification events in timeline view"""
|
||||
from changedetectionio.notification.task_queue import (
|
||||
get_all_notification_events,
|
||||
get_retry_config
|
||||
)
|
||||
|
||||
# Get filter parameter from query string
|
||||
filter_status = request.args.get('filter', '').lower()
|
||||
valid_filters = ['delivered', 'queued', 'retrying', 'failed']
|
||||
|
||||
# Validate filter
|
||||
if filter_status and filter_status not in valid_filters:
|
||||
filter_status = ''
|
||||
|
||||
# Get all notification events (delivered, queued, retrying, failed)
|
||||
all_events = get_all_notification_events(limit=100)
|
||||
|
||||
# Get retry configuration for display
|
||||
retry_config = get_retry_config()
|
||||
|
||||
# Count by status for summary (always show all counts)
|
||||
status_counts = {
|
||||
'delivered': sum(1 for e in all_events if e['status'] == 'delivered'),
|
||||
'queued': sum(1 for e in all_events if e['status'] == 'queued'),
|
||||
'retrying': sum(1 for e in all_events if e['status'] == 'retrying'),
|
||||
'failed': sum(1 for e in all_events if e['status'] == 'failed')
|
||||
}
|
||||
|
||||
# Filter events if a filter is active
|
||||
if filter_status:
|
||||
events = [e for e in all_events if e['status'] == filter_status]
|
||||
else:
|
||||
events = all_events
|
||||
|
||||
return render_template(
|
||||
'notification-dashboard.html',
|
||||
events=events,
|
||||
retry_config=retry_config,
|
||||
status_counts=status_counts,
|
||||
active_filter=filter_status
|
||||
)
|
||||
|
||||
@notification_dashboard.route("/log/<task_id>", methods=['GET'])
|
||||
@login_optionally_required
|
||||
def get_notification_log(task_id):
|
||||
"""Get Apprise log for a specific notification task"""
|
||||
from changedetectionio.notification.task_queue import get_task_apprise_log
|
||||
|
||||
log_data = get_task_apprise_log(task_id)
|
||||
|
||||
if log_data:
|
||||
return jsonify(log_data)
|
||||
else:
|
||||
return jsonify({'error': 'Log not found for this task'}), 404
|
||||
|
||||
@notification_dashboard.route("/send-now/<task_id>", methods=['GET'])
|
||||
@login_optionally_required
|
||||
def send_now(task_id):
|
||||
"""Manually retry a scheduled notification immediately"""
|
||||
from changedetectionio.notification.task_queue import retry_notification_now
|
||||
|
||||
success = retry_notification_now(task_id)
|
||||
if success:
|
||||
message = "✓ Notification sent successfully and removed from queue."
|
||||
flash(message, 'notice')
|
||||
else:
|
||||
message = "Failed to send notification. It has been re-queued for automatic retry."
|
||||
flash(message, 'error')
|
||||
|
||||
return redirect(url_for('notification_dashboard.dashboard'))
|
||||
|
||||
@notification_dashboard.route("/retry/<task_id>", methods=['POST'])
|
||||
@login_optionally_required
|
||||
def retry_notification(task_id):
|
||||
"""Retry a failed notification (from dead letter queue)"""
|
||||
from changedetectionio.notification.task_queue import retry_failed_notification, get_task_metadata
|
||||
|
||||
# Check if task_id exists first to provide better error message
|
||||
if not task_id or task_id == 'TASK_ID_PLACEHOLDER':
|
||||
flash("Invalid task ID. Please refresh the page and try again.", 'error')
|
||||
return redirect(url_for('notification_dashboard.dashboard'))
|
||||
|
||||
# Check if task exists in metadata
|
||||
task_metadata = get_task_metadata(task_id)
|
||||
if not task_metadata:
|
||||
flash(f"Task ID '{task_id}' not found. It may have been already retried or removed.", 'error')
|
||||
return redirect(url_for('notification_dashboard.dashboard'))
|
||||
|
||||
success = retry_failed_notification(task_id)
|
||||
|
||||
if success:
|
||||
flash("Notification queued for retry.", 'notice')
|
||||
else:
|
||||
flash("Failed to retry notification. The task may be missing notification data. Check logs for details.", 'error')
|
||||
|
||||
return redirect(url_for('notification_dashboard.dashboard'))
|
||||
|
||||
@notification_dashboard.route("/retry-all", methods=['POST'])
|
||||
@login_optionally_required
|
||||
def retry_all_notifications():
|
||||
"""Retry all failed notifications"""
|
||||
from changedetectionio.notification.task_queue import retry_all_failed_notifications
|
||||
|
||||
result = retry_all_failed_notifications()
|
||||
|
||||
if result['total'] == 0:
|
||||
flash("No failed notifications to retry.", 'notice')
|
||||
elif result['failed'] == 0:
|
||||
flash(f"Successfully queued {result['success']} notification(s) for retry.", 'notice')
|
||||
else:
|
||||
flash(f"Queued {result['success']} notification(s) for retry. {result['failed']} failed to queue.", 'error')
|
||||
|
||||
return redirect(url_for('notification_dashboard.dashboard'))
|
||||
|
||||
@notification_dashboard.route("/clear-failed", methods=['POST'])
|
||||
@login_optionally_required
|
||||
def clear_failed_notifications():
|
||||
"""Clear only failed notifications (dead letter queue)"""
|
||||
from changedetectionio.notification.task_queue import clear_failed_notifications
|
||||
|
||||
result = clear_failed_notifications()
|
||||
|
||||
if 'error' in result:
|
||||
flash(f"Error clearing failed notifications: {result['error']}", 'error')
|
||||
else:
|
||||
cleared_count = result.get('cleared', 0)
|
||||
if cleared_count == 0:
|
||||
flash("No failed notifications to clear.", 'notice')
|
||||
else:
|
||||
flash(f"Cleared {cleared_count} failed notification(s).", 'notice')
|
||||
|
||||
return redirect(url_for('notification_dashboard.dashboard'))
|
||||
|
||||
@notification_dashboard.route("/clear-all", methods=['POST'])
|
||||
@login_optionally_required
|
||||
def clear_all_notifications():
|
||||
"""Clear ALL notifications (delivered, pending, retrying, and failed)"""
|
||||
from changedetectionio.notification.task_queue import clear_all_notifications
|
||||
|
||||
result = clear_all_notifications()
|
||||
|
||||
if 'error' in result:
|
||||
flash(f"Error clearing notifications: {result['error']}", 'error')
|
||||
else:
|
||||
total_cleared = result.get('queue', 0) + result.get('schedule', 0) + result.get('results', 0) + result.get('delivered', 0)
|
||||
flash(f"Cleared {total_cleared} notification(s) (delivered, queued, retrying, and failed).", 'notice')
|
||||
|
||||
return redirect(url_for('notification_dashboard.dashboard'))
|
||||
|
||||
return notification_dashboard
|
||||
@@ -0,0 +1,398 @@
|
||||
{% extends 'base.html' %}
|
||||
|
||||
{% block content %}
|
||||
<div class="edit-form">
|
||||
<div class="inner notifications-dashboard-split">
|
||||
|
||||
<h4>Notification Events</h4>
|
||||
|
||||
<!-- Summary Stats -->
|
||||
<div class="event-summary">
|
||||
<a href="{{ url_for('notification_dashboard.dashboard', filter='delivered') }}"
|
||||
class="stat-item stat-filter-link {% if active_filter == 'delivered' %}stat-filter-active{% endif %}"
|
||||
title="Show only delivered notifications">
|
||||
<span class="stat-badge stat-delivered">{{ status_counts.delivered }}</span> Delivered
|
||||
</a>
|
||||
<a href="{{ url_for('notification_dashboard.dashboard', filter='queued') }}"
|
||||
class="stat-item stat-filter-link {% if active_filter == 'queued' %}stat-filter-active{% endif %}"
|
||||
title="Show only queued notifications">
|
||||
<span class="stat-badge stat-queued">{{ status_counts.queued }}</span> Queued
|
||||
</a>
|
||||
<a href="{{ url_for('notification_dashboard.dashboard', filter='retrying') }}"
|
||||
class="stat-item stat-filter-link {% if active_filter == 'retrying' %}stat-filter-active{% endif %}"
|
||||
title="Show only retrying notifications">
|
||||
<span class="stat-badge stat-retrying">{{ status_counts.retrying }}</span> Retrying
|
||||
</a>
|
||||
<a href="{{ url_for('notification_dashboard.dashboard', filter='failed') }}"
|
||||
class="stat-item stat-filter-link {% if active_filter == 'failed' %}stat-filter-active{% endif %}"
|
||||
title="Show only failed notifications">
|
||||
<span class="stat-badge stat-failed">{{ status_counts.failed }}</span> Failed
|
||||
</a>
|
||||
{% if active_filter %}
|
||||
<a href="{{ url_for('notification_dashboard.dashboard') }}"
|
||||
class="stat-item stat-clear-filter"
|
||||
title="Clear filter and show all notifications">
|
||||
<svg width="14" height="14" viewBox="0 0 16 16" style="vertical-align: middle; margin-right: 4px;">
|
||||
<path d="M4 4l8 8m0-8l-8 8" stroke="currentColor" stroke-width="2"/>
|
||||
</svg>
|
||||
Clear Filter
|
||||
</a>
|
||||
{% endif %}
|
||||
</div>
|
||||
|
||||
<!-- Action Buttons -->
|
||||
<div class="dashboard-actions">
|
||||
{% if status_counts.failed > 0 %}
|
||||
<form method="POST" action="{{ url_for('notification_dashboard.retry_all_notifications') }}" style="display: inline;">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}"/>
|
||||
<button type="submit" class="pure-button button-small retry-all-btn">
|
||||
<svg width="14" height="14" viewBox="0 0 16 16" style="vertical-align: middle; margin-right: 4px;">
|
||||
<path d="M13.5 8a5.5 5.5 0 1 1-1.65-3.95" stroke="currentColor" stroke-width="1.5" fill="none"/>
|
||||
<path d="M13.5 2v4h-4" stroke="currentColor" stroke-width="1.5" fill="none"/>
|
||||
</svg>
|
||||
Retry All Failed ({{ status_counts.failed }})
|
||||
</button>
|
||||
</form>
|
||||
<form method="POST" action="{{ url_for('notification_dashboard.clear_failed_notifications') }}" style="display: inline;" onsubmit="return confirm('Clear all {{ status_counts.failed }} failed notification(s)?');">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}"/>
|
||||
<button type="submit" class="pure-button button-small clear-failed-btn">
|
||||
<svg width="14" height="14" viewBox="0 0 16 16" style="vertical-align: middle; margin-right: 4px;">
|
||||
<path d="M4 4l8 8m0-8l-8 8" stroke="currentColor" stroke-width="2"/>
|
||||
</svg>
|
||||
Clear Failed ({{ status_counts.failed }})
|
||||
</button>
|
||||
</form>
|
||||
{% endif %}
|
||||
{% if events|length > 0 %}
|
||||
<form method="POST" action="{{ url_for('notification_dashboard.clear_all_notifications') }}" style="display: inline;" onsubmit="return confirm('Clear all notifications (delivered, queued, retrying, and failed)?');">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}"/>
|
||||
<button type="submit" class="pure-button button-small clear-all-btn">Clear All ({{ events|length }})</button>
|
||||
</form>
|
||||
{% endif %}
|
||||
</div>
|
||||
|
||||
<!-- Two-Column Layout -->
|
||||
<div class="split-view">
|
||||
|
||||
<!-- LEFT COLUMN: Event List -->
|
||||
<div class="event-list">
|
||||
{% if events %}
|
||||
{% for event in events %}
|
||||
<div class="event-item {% if loop.first %}event-item-selected{% endif %}"
|
||||
data-event-id="{{ event.id }}"
|
||||
data-event-json='{{ event|tojson }}'>
|
||||
|
||||
<!-- Status Badge -->
|
||||
<span class="event-badge event-badge-{{ event.status }}">
|
||||
{% if event.status == 'delivered' %}
|
||||
<svg width="14" height="14" viewBox="0 0 16 16" xmlns="http://www.w3.org/2000/svg">
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M12.28 5.22a.75.75 0 0 1 0 1.06l-4.75 4.75a.75.75 0 0 1-1.06 0l-2.5-2.5a.75.75 0 0 1 1.06-1.06L7 9.44l4.22-4.22a.75.75 0 0 1 1.06 0Z"></path>
|
||||
</svg>
|
||||
DELIVERED
|
||||
{% elif event.status == 'queued' %}
|
||||
<svg width="14" height="14" viewBox="0 0 16 16" xmlns="http://www.w3.org/2000/svg">
|
||||
<circle cx="8" cy="8" r="6" stroke="currentColor" stroke-width="1.5" fill="none"/>
|
||||
<path d="M8 4v4l3 3" stroke="currentColor" stroke-width="1.5" fill="none"/>
|
||||
</svg>
|
||||
QUEUED
|
||||
{% elif event.status == 'retrying' %}
|
||||
<svg width="14" height="14" viewBox="0 0 16 16" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M13.5 8a5.5 5.5 0 1 1-1.65-3.95" stroke="currentColor" stroke-width="1.5" fill="none"/>
|
||||
<path d="M13.5 2v4h-4" stroke="currentColor" stroke-width="1.5" fill="none"/>
|
||||
</svg>
|
||||
RETRYING {% if event.retry_number %}({{ event.retry_number }}/{{ event.total_retries }}){% endif %}
|
||||
{% else %}
|
||||
<svg width="14" height="14" viewBox="0 0 16 16" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M4 4l8 8m0-8l-8 8" stroke="currentColor" stroke-width="2"/>
|
||||
</svg>
|
||||
FAILED
|
||||
{% endif %}
|
||||
</span>
|
||||
|
||||
<!-- Watch Title -->
|
||||
<span class="event-title" title="{{ event.watch_url }}">
|
||||
{{ event.watch_title }}
|
||||
</span>
|
||||
|
||||
<!-- Timestamp -->
|
||||
<span class="event-time">
|
||||
<span class="time-display" data-timestamp="{{ event.timestamp }}">
|
||||
{{ event.timestamp_formatted if event.timestamp_formatted else 'N/A' }}
|
||||
</span>
|
||||
</span>
|
||||
</div>
|
||||
{% endfor %}
|
||||
{% else %}
|
||||
<div class="empty-state">
|
||||
<p>No notification events yet</p>
|
||||
</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
|
||||
<!-- RIGHT COLUMN: Event Details -->
|
||||
<div class="event-details">
|
||||
<div id="event-details-content">
|
||||
{% if events %}
|
||||
<!-- Initial content will be loaded by JavaScript from first event -->
|
||||
<div class="loading">Select an event to view details</div>
|
||||
{% else %}
|
||||
<div class="empty-details">No events to display</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
|
||||
<!-- Retry Schedule Information (Collapsed by default) -->
|
||||
<details class="retry-schedule-details">
|
||||
<summary>ℹ️ Automatic Retry Configuration</summary>
|
||||
<div class="retry-schedule">
|
||||
<p>
|
||||
Notifications are automatically retried <strong>{{ retry_config.retry_count }} times</strong> with <strong>exponential backoff</strong> starting at {{ retry_config.retry_delay_seconds }} seconds.
|
||||
</p>
|
||||
<table class="pure-table">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Attempt</th>
|
||||
<th>Time</th>
|
||||
<th>Action</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td><strong>1st</strong> (initial)</td>
|
||||
<td>T+0:00</td>
|
||||
<td>⚠️ Fails (e.g., SMTP server down)</td>
|
||||
</tr>
|
||||
{% for i in range(retry_config.retry_count) %}
|
||||
{% set delay = retry_config.retry_delays[i] %}
|
||||
{% set cumulative_time = retry_config.retry_delays[:i+1]|sum %}
|
||||
<tr>
|
||||
<td><strong>{{ i + 2 }}{{ ['st', 'nd', 'rd'][i + 1] if i + 1 < 3 else 'th' }}</strong> (retry {{ i + 1 }})</td>
|
||||
<td>T+{{ '%d:%02d' % (cumulative_time // 60, cumulative_time % 60) }}</td>
|
||||
<td>{% if i < retry_config.retry_count - 1 %}⚠️ Fails → Wait {{ delay }}s{% else %}⚠️ Fails → Give up{% endif %}</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</details>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script src="{{ url_for('static_content', group='js', filename='notification-dashboard.js') }}"></script>
|
||||
<script>
|
||||
// Event selection and detail display
|
||||
$(function() {
|
||||
let currentSelectedId = null;
|
||||
|
||||
// Time displays now show actual date/time from backend instead of relative format
|
||||
// Note: All user-controlled content is HTML-escaped on the backend for security
|
||||
|
||||
// Function to render event details
|
||||
function renderEventDetails(event) {
|
||||
if (!event) {
|
||||
return '<div class="empty-details">No event data available</div>';
|
||||
}
|
||||
|
||||
let html = '<div class="detail-container">';
|
||||
|
||||
// Header with resend button
|
||||
html += '<div class="detail-header">';
|
||||
html += `<h5>Event Details</h5>`;
|
||||
if (event.status === 'failed') {
|
||||
const retryUrl = '/notification-dashboard/retry/' + event.id;
|
||||
html += `<form method="POST" action="${retryUrl}" style="display: inline;">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}"/>
|
||||
<button type="submit" class="pure-button button-small resend-btn">
|
||||
<svg width="14" height="14" viewBox="0 0 16 16" style="vertical-align: middle; margin-right: 4px;">
|
||||
<path d="M13.5 8a5.5 5.5 0 1 1-1.65-3.95" stroke="currentColor" stroke-width="1.5" fill="none"/>
|
||||
<path d="M13.5 2v4h-4" stroke="currentColor" stroke-width="1.5" fill="none"/>
|
||||
</svg>
|
||||
Retry
|
||||
</button>
|
||||
</form>`;
|
||||
} else if (event.status === 'retrying') {
|
||||
const sendNowUrl = '/notification-dashboard/send-now/' + event.id;
|
||||
html += `<a href="${sendNowUrl}" class="pure-button button-small resend-btn">Send Now</a>`;
|
||||
}
|
||||
html += '</div>';
|
||||
|
||||
// Status section
|
||||
html += '<div class="detail-section">';
|
||||
html += '<div class="detail-row"><span class="detail-label">Status:</span>';
|
||||
let statusClass = 'status-' + (event.status || 'unknown');
|
||||
let statusText = (event.status || 'unknown').toUpperCase();
|
||||
if (event.status === 'retrying' && event.retry_number) {
|
||||
statusText += ` (Attempt ${event.retry_number}/${event.total_retries})`;
|
||||
}
|
||||
html += `<span class="detail-value"><span class="status-pill ${statusClass}">${statusText}</span></span></div>`;
|
||||
|
||||
// Timestamp (converted to browser's local timezone)
|
||||
if (event.timestamp) {
|
||||
html += '<div class="detail-row"><span class="detail-label">Timestamp:</span>';
|
||||
html += `<span class="detail-value">${window.formatTimestampLocal(event.timestamp)}</span></div>`;
|
||||
}
|
||||
|
||||
// Retry time (for retrying status, converted to browser's local timezone)
|
||||
if (event.status === 'retrying' && event.retry_at) {
|
||||
html += '<div class="detail-row"><span class="detail-label">Next Retry:</span>';
|
||||
html += `<span class="detail-value">${window.formatTimestampLocal(event.retry_at)}</span></div>`;
|
||||
}
|
||||
|
||||
// Task ID
|
||||
if (event.id) {
|
||||
html += '<div class="detail-row"><span class="detail-label">Task ID:</span>';
|
||||
html += `<span class="detail-value"><code>${event.id}</code></span></div>`;
|
||||
}
|
||||
html += '</div>';
|
||||
|
||||
// Watch section
|
||||
if (event.watch_url || event.watch_uuid || event.watch_title) {
|
||||
html += '<div class="detail-section">';
|
||||
html += '<h6>Watch</h6>';
|
||||
if (event.watch_url) {
|
||||
html += '<div class="detail-row"><span class="detail-label">URL:</span>';
|
||||
html += `<span class="detail-value"><a href="${event.watch_url}" target="_blank">${event.watch_url}</a></span></div>`;
|
||||
}
|
||||
if (event.watch_uuid) {
|
||||
html += '<div class="detail-row"><span class="detail-label">UUID:</span>';
|
||||
html += `<span class="detail-value"><code>${event.watch_uuid}</code> `;
|
||||
html += `<a href="/edit/${event.watch_uuid}" class="button-link">Edit</a></span></div>`;
|
||||
}
|
||||
html += '</div>';
|
||||
}
|
||||
|
||||
// Notification endpoints
|
||||
if (event.notification_urls && event.notification_urls.length > 0) {
|
||||
html += '<div class="detail-section">';
|
||||
html += '<h6>Notification Endpoints</h6>';
|
||||
event.notification_urls.forEach(url => {
|
||||
html += `<div class="endpoint-item"><code>${url}</code></div>`;
|
||||
});
|
||||
html += '</div>';
|
||||
}
|
||||
|
||||
// Payload (what was sent)
|
||||
if (event.payload) {
|
||||
html += '<div class="detail-section">';
|
||||
html += '<h6>Payload</h6>';
|
||||
|
||||
if (event.payload.notification_title) {
|
||||
html += '<div class="detail-row"><span class="detail-label">Title:</span>';
|
||||
html += `<span class="detail-value"><code>${event.payload.notification_title}</code></span></div>`;
|
||||
}
|
||||
|
||||
if (event.payload.notification_body) {
|
||||
html += '<div class="detail-row"><span class="detail-label">Body:</span>';
|
||||
html += `<span class="detail-value"><pre class="payload-content">${event.payload.notification_body}</pre></span></div>`;
|
||||
}
|
||||
|
||||
if (event.payload.notification_format) {
|
||||
html += '<div class="detail-row"><span class="detail-label">Format:</span>';
|
||||
html += `<span class="detail-value"><code>${event.payload.notification_format}</code></span></div>`;
|
||||
}
|
||||
|
||||
html += '</div>';
|
||||
}
|
||||
|
||||
// Apprise logs
|
||||
if (event.apprise_logs) {
|
||||
html += '<div class="detail-section">';
|
||||
html += '<h6>Apprise Logs</h6>';
|
||||
html += `<pre class="log-content">${event.apprise_logs}</pre>`;
|
||||
html += '</div>';
|
||||
}
|
||||
|
||||
// Error details
|
||||
if (event.error) {
|
||||
html += '<div class="detail-section detail-error">';
|
||||
html += '<h6>Error</h6>';
|
||||
html += `<pre class="error-content">${event.error}</pre>`;
|
||||
html += '</div>';
|
||||
}
|
||||
|
||||
// Retry attempts timeline (for retrying and failed notifications)
|
||||
if ((event.status === 'retrying' || event.status === 'failed') && event.retry_attempts && event.retry_attempts.length > 0) {
|
||||
html += '<div class="detail-section retry-timeline">';
|
||||
html += '<h6>Retry Attempts History</h6>';
|
||||
html += '<div class="timeline">';
|
||||
|
||||
// Sort by attempt number to ensure chronological order
|
||||
const attempts = [...event.retry_attempts].sort((a, b) => a.attempt_number - b.attempt_number);
|
||||
|
||||
attempts.forEach((attempt, index) => {
|
||||
html += '<div class="timeline-item">';
|
||||
html += `<div class="timeline-marker">${attempt.attempt_number}</div>`;
|
||||
html += '<div class="timeline-content">';
|
||||
html += `<div class="timeline-header">`;
|
||||
html += `<strong>Attempt #${attempt.attempt_number}</strong>`;
|
||||
if (attempt.timestamp) {
|
||||
html += ` <span class="timeline-time">${window.formatTimestampLocal(attempt.timestamp)}</span>`;
|
||||
}
|
||||
html += `</div>`;
|
||||
|
||||
if (attempt.error || attempt.error_message) {
|
||||
html += `<div class="timeline-error">⚠️ ${attempt.error || attempt.error_message}</div>`;
|
||||
}
|
||||
|
||||
html += '</div>';
|
||||
html += '</div>';
|
||||
});
|
||||
|
||||
// Add current status indicator
|
||||
if (event.status === 'retrying' && event.retry_at) {
|
||||
html += '<div class="timeline-item timeline-future">';
|
||||
html += `<div class="timeline-marker">⏰</div>`;
|
||||
html += '<div class="timeline-content">';
|
||||
html += `<div class="timeline-header">`;
|
||||
html += `<strong>Next Retry</strong>`;
|
||||
html += ` <span class="timeline-time">${window.formatTimestampLocal(event.retry_at)}</span>`;
|
||||
html += `</div>`;
|
||||
html += `<div class="timeline-status">Scheduled (Attempt ${event.retry_number + 1}/${event.total_retries})</div>`;
|
||||
html += '</div>';
|
||||
html += '</div>';
|
||||
} else if (event.status === 'failed') {
|
||||
html += '<div class="timeline-item timeline-end">';
|
||||
html += `<div class="timeline-marker">✖</div>`;
|
||||
html += '<div class="timeline-content">';
|
||||
html += `<div class="timeline-header"><strong>Exhausted Retries</strong></div>`;
|
||||
html += `<div class="timeline-status">All ${attempts.length} retry attempts failed</div>`;
|
||||
html += '</div>';
|
||||
html += '</div>';
|
||||
}
|
||||
|
||||
html += '</div>'; // Close timeline
|
||||
html += '</div>'; // Close detail-section
|
||||
}
|
||||
|
||||
html += '</div>';
|
||||
|
||||
return html;
|
||||
}
|
||||
|
||||
// Handle event item click
|
||||
$('.event-item').click(function(e) {
|
||||
e.preventDefault();
|
||||
|
||||
// Update selection state
|
||||
$('.event-item').removeClass('event-item-selected');
|
||||
$(this).addClass('event-item-selected');
|
||||
|
||||
// Get event data
|
||||
const eventData = $(this).data('event-json');
|
||||
currentSelectedId = eventData.id;
|
||||
|
||||
// Render details
|
||||
$('#event-details-content').html(renderEventDetails(eventData));
|
||||
});
|
||||
|
||||
// Auto-select first event on load
|
||||
if ($('.event-item').length > 0) {
|
||||
$('.event-item').first().click();
|
||||
}
|
||||
});
|
||||
</script>
|
||||
|
||||
{% endblock %}
|
||||
@@ -4,7 +4,7 @@ from datetime import datetime
|
||||
from zoneinfo import ZoneInfo, available_timezones
|
||||
import secrets
|
||||
import flask_login
|
||||
from flask import Blueprint, render_template, request, redirect, url_for, flash
|
||||
from flask import Blueprint, render_template, request, redirect, url_for, flash, jsonify
|
||||
from flask_babel import gettext
|
||||
|
||||
from changedetectionio.store import ChangeDetectionStore
|
||||
@@ -84,12 +84,12 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
# Adjust worker count if it changed
|
||||
if new_worker_count != old_worker_count:
|
||||
from changedetectionio import worker_handler
|
||||
from changedetectionio.flask_app import update_q, notification_q, app, datastore as ds
|
||||
|
||||
from changedetectionio.flask_app import update_q, app, datastore as ds
|
||||
|
||||
result = worker_handler.adjust_async_worker_count(
|
||||
new_count=new_worker_count,
|
||||
update_q=update_q,
|
||||
notification_q=notification_q,
|
||||
notification_q=None, # Now using Huey task queue
|
||||
app=app,
|
||||
datastore=ds
|
||||
)
|
||||
@@ -187,4 +187,74 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
logs=notification_debug_log if len(notification_debug_log) else ["Notification logs are empty - no notifications sent yet."])
|
||||
return output
|
||||
|
||||
# Legacy routes - redirect to new notification dashboard blueprint
|
||||
@settings_blueprint.route("/failed-notifications", methods=['GET'])
|
||||
@login_optionally_required
|
||||
def failed_notifications():
|
||||
"""Redirect to new notification dashboard"""
|
||||
return redirect(url_for('notification_dashboard.dashboard'))
|
||||
|
||||
@settings_blueprint.route("/notification-log/<task_id>", methods=['GET'])
|
||||
@login_optionally_required
|
||||
def get_notification_log(task_id):
|
||||
"""Redirect to new notification dashboard log endpoint"""
|
||||
return redirect(url_for('notification_dashboard.get_notification_log', task_id=task_id))
|
||||
|
||||
@settings_blueprint.route("/retry-notification/<task_id>", methods=['POST'])
|
||||
@login_optionally_required
|
||||
def retry_notification(task_id):
|
||||
"""Redirect to new notification dashboard retry endpoint"""
|
||||
return redirect(url_for('notification_dashboard.retry_notification', task_id=task_id), code=307)
|
||||
|
||||
@settings_blueprint.route("/send-now/<task_id>", methods=['GET'])
|
||||
@login_optionally_required
|
||||
def send_now(task_id):
|
||||
"""Redirect to new notification dashboard send now endpoint"""
|
||||
return redirect(url_for('notification_dashboard.send_now', task_id=task_id))
|
||||
|
||||
@settings_blueprint.route("/retry-all-notifications", methods=['POST'])
|
||||
@login_optionally_required
|
||||
def retry_all_notifications():
|
||||
"""Redirect to new notification dashboard retry all endpoint"""
|
||||
return redirect(url_for('notification_dashboard.retry_all_notifications'), code=307)
|
||||
|
||||
@settings_blueprint.route("/clear-all-notifications", methods=['POST'])
|
||||
@login_optionally_required
|
||||
def clear_all_notifications():
|
||||
"""Redirect to new notification dashboard clear all endpoint"""
|
||||
return redirect(url_for('notification_dashboard.clear_all_notifications'), code=307)
|
||||
|
||||
@settings_blueprint.route("/api/v1/notifications/failed", methods=['GET'])
|
||||
@login_optionally_required
|
||||
def api_get_failed_notifications():
|
||||
"""API endpoint to get list of failed notifications (dead letter queue)"""
|
||||
from changedetectionio.notification.task_queue import get_failed_notifications
|
||||
from flask import jsonify, request
|
||||
|
||||
limit = request.args.get('limit', default=100, type=int)
|
||||
limit = max(1, min(1000, limit)) # Clamp to 1-1000
|
||||
|
||||
failed = get_failed_notifications(limit=limit)
|
||||
|
||||
return jsonify({
|
||||
'count': len(failed),
|
||||
'limit': limit,
|
||||
'notifications': failed
|
||||
}), 200
|
||||
|
||||
@settings_blueprint.route("/api/v1/notifications/retry-all", methods=['POST'])
|
||||
@login_optionally_required
|
||||
def api_retry_all_notifications():
|
||||
"""API endpoint to retry all failed notifications"""
|
||||
from changedetectionio.notification.task_queue import retry_all_failed_notifications
|
||||
from flask import jsonify
|
||||
|
||||
result = retry_all_failed_notifications()
|
||||
|
||||
return jsonify({
|
||||
'status': 'success' if result['failed'] == 0 else 'partial',
|
||||
'message': f"Queued {result['success']} notifications for retry",
|
||||
'details': result
|
||||
}), 200
|
||||
|
||||
return settings_blueprint
|
||||
@@ -0,0 +1,254 @@
|
||||
{% extends 'base.html' %}
|
||||
|
||||
{% block content %}
|
||||
<div class="edit-form">
|
||||
<div class="inner notifications-dashboard">
|
||||
|
||||
<h4>Notification Queue Dashboard</h4>
|
||||
|
||||
<!-- Last Successful Notification Reference -->
|
||||
{% if last_success %}
|
||||
<div class="last-success-box">
|
||||
<h5>✅ Most Recent Successful Notification</h5>
|
||||
<div class="details">
|
||||
<div>
|
||||
<strong>Timestamp:</strong> {{ last_success.timestamp_formatted }}
|
||||
</div>
|
||||
{% if last_success.watch_url %}
|
||||
<div>
|
||||
<strong>Watch URL:</strong> <a href="{{ last_success.watch_url }}" target="_blank" style="word-break: break-all;">{{ last_success.watch_url }}</a>
|
||||
</div>
|
||||
{% endif %}
|
||||
{% if last_success.notification_urls %}
|
||||
<div>
|
||||
<strong>Sent via:</strong>
|
||||
{% for url in last_success.notification_urls %}
|
||||
<code>{{ url }}</code>
|
||||
{% endfor %}
|
||||
</div>
|
||||
{% endif %}
|
||||
{% if last_success.apprise_logs %}
|
||||
<details>
|
||||
<summary>📋 View Apprise Logs</summary>
|
||||
<pre>{% for log_line in last_success.apprise_logs %}{{ log_line }}
|
||||
{% endfor %}</pre>
|
||||
</details>
|
||||
{% endif %}
|
||||
</div>
|
||||
<p class="note">
|
||||
Use this as reference - this notification was sent successfully with current settings.
|
||||
</p>
|
||||
</div>
|
||||
{% endif %}
|
||||
|
||||
<!-- Action Buttons (shown when there are any notifications) -->
|
||||
{% if (pending_count and pending_count > 0) or failed_notifications|length > 0 %}
|
||||
<div class="dashboard-actions">
|
||||
{% if failed_notifications|length > 0 %}
|
||||
<form method="POST" action="{{ url_for('settings.retry_all_notifications') }}" style="display: inline-block;">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}">
|
||||
<button type="submit" class="pure-button pure-button-primary retry-all-btn" data-confirm-action data-confirm-message="Retry all {{ failed_notifications|length }} failed notifications?">
|
||||
<span class="icon-repeat"></span> Retry All ({{ failed_notifications|length }})
|
||||
</button>
|
||||
</form>
|
||||
{% endif %}
|
||||
<form method="POST" action="{{ url_for('settings.clear_all_notifications') }}" style="display: inline-block;" onsubmit="return confirm('⚠️ WARNING: This will DELETE ALL notifications:\n\n- {{ pending_count if pending_count else 0 }} Pending/Retrying\n- {{ failed_notifications|length }} Failed\n- All retry attempts\n\nThis action cannot be undone!\n\nAre you sure?');">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}">
|
||||
<button type="submit" class="pure-button clear-all-btn">
|
||||
<span class="icon-trash"></span> Clear All
|
||||
</button>
|
||||
</form>
|
||||
</div>
|
||||
{% endif %}
|
||||
|
||||
<!-- Two-Column Dashboard Layout (responsive: stacks on mobile) -->
|
||||
<div class="dashboard-grid">
|
||||
|
||||
<!-- LEFT COLUMN: Pending/Retrying Notifications -->
|
||||
<div class="pending-column">
|
||||
<div class="column-header">
|
||||
<h5>🔄 Pending / Retrying</h5>
|
||||
<div class="count-badge">{{ pending_count if pending_count is not none else '?' }}</div>
|
||||
</div>
|
||||
<p class="column-description">Notifications currently queued or being retried</p>
|
||||
|
||||
{% if pending_list %}
|
||||
<div class="notification-cards">
|
||||
{% for item in pending_list %}
|
||||
<div class="notification-card pending-card" data-task-id="{{ item.task_id }}">
|
||||
<div class="card-header">
|
||||
<span class="status-badge {{ item.status }}">
|
||||
{% if item.status == 'queued' %}QUEUED{% else %}RETRYING{% if item.retry_number is defined and item.total_retries is defined %} ({{ item.retry_number }}/{{ item.total_retries }}){% endif %}{% endif %}
|
||||
</span>
|
||||
{% if item.watch_uuid %}
|
||||
<a href="/edit/{{ item.watch_uuid }}">Watch: {{ item.watch_uuid[:8] }}...</a>
|
||||
{% endif %}
|
||||
</div>
|
||||
{% if item.task_id %}
|
||||
<div class="notification-id">ID: {{ item.task_id[:20] }}...</div>
|
||||
{% endif %}
|
||||
{% if item.queued_at_formatted %}
|
||||
<div class="notification-queued-time">Queued: {{ item.queued_at_formatted }}</div>
|
||||
{% endif %}
|
||||
{% if item.watch_url %}
|
||||
<div class="notification-target"><strong>Target:</strong> {{ item.watch_url }}</div>
|
||||
{% endif %}
|
||||
{% if item.status == 'retrying' %}
|
||||
<div class="retry-info">
|
||||
<span>
|
||||
⏰ Next retry: <span class="retry-time" data-timestamp="{{ item.retry_at_timestamp }}">{{ item.retry_at_formatted }}</span>
|
||||
{% if item.retry_in_seconds > 0 %}
|
||||
(in {{ item.retry_in_seconds }}s)
|
||||
{% endif %}
|
||||
</span>
|
||||
{% if item.task_id %}
|
||||
<a href="{{ url_for('settings.send_now', task_id=item.task_id) }}" class="pure-button send-now-btn" title="Send this notification now (cancel scheduled retry)">Send Now</a>
|
||||
{% endif %}
|
||||
</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
{% endfor %}
|
||||
</div>
|
||||
{% else %}
|
||||
<div class="empty-state pending-empty">✅ No pending notifications</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
|
||||
<!-- RIGHT COLUMN: Failed (Dead Letter) Notifications -->
|
||||
<div class="failed-column">
|
||||
<div class="column-header">
|
||||
<h5>💀 Failed (Dead Letter)</h5>
|
||||
<div class="count-badge {% if failed_notifications|length == 0 %}no-failures{% else %}has-failures{% endif %}">
|
||||
{{ failed_notifications|length }}
|
||||
</div>
|
||||
</div>
|
||||
<p class="column-description">Exhausted all retry attempts</p>
|
||||
|
||||
{% if failed_notifications|length > 0 %}
|
||||
<div class="notification-cards">
|
||||
{% for notification in failed_notifications %}
|
||||
<div class="notification-card failed-card" data-task-id="{{ notification.task_id }}">
|
||||
<div class="card-header">
|
||||
<span class="status-badge failed">FAILED</span>
|
||||
{% if notification.notification_data and notification.notification_data.get('uuid') %}
|
||||
<a href="/edit/{{ notification.notification_data.get('uuid') }}">
|
||||
Watch: {{ notification.notification_data.get('uuid')[:8] }}...
|
||||
</a>
|
||||
{% endif %}
|
||||
</div>
|
||||
{% if notification.task_id %}
|
||||
<div class="notification-id">ID: {{ notification.task_id[:20] }}...</div>
|
||||
{% endif %}
|
||||
{% if notification.notification_data and notification.notification_data.get('watch_url') %}
|
||||
<div class="notification-target"><strong>Target:</strong> {{ notification.notification_data.get('watch_url') }}</div>
|
||||
{% endif %}
|
||||
{% if notification.notification_data and notification.notification_data.get('notification_urls') %}
|
||||
<div class="notification-endpoints">
|
||||
<strong>Notification endpoints:</strong>
|
||||
{% for url in notification.notification_data.get('notification_urls') %}
|
||||
<div class="endpoint-item">• {{ url }}</div>
|
||||
{% endfor %}
|
||||
</div>
|
||||
{% endif %}
|
||||
{% if notification.timestamp %}
|
||||
<div class="failure-time">
|
||||
Failed: {{ notification.timestamp_formatted }}
|
||||
{% if notification.days_ago is defined %}
|
||||
({{ notification.days_ago }} day{{ 's' if notification.days_ago != 1 else '' }} ago)
|
||||
{% endif %}
|
||||
</div>
|
||||
{% endif %}
|
||||
<form method="POST" action="{{ url_for('settings.retry_notification', task_id=notification.task_id) }}" class="retry-form">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}">
|
||||
<button type="submit" class="pure-button">
|
||||
<span class="icon-repeat"></span> Retry This
|
||||
</button>
|
||||
</form>
|
||||
</div>
|
||||
{% endfor %}
|
||||
</div>
|
||||
{% else %}
|
||||
<div class="empty-state failed-empty">✅ No failed notifications</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Last Clicked Notification Log Info -->
|
||||
<div id="last-log-info" style="display: none;" class="log-info-box">
|
||||
<div class="log-header">
|
||||
<h5>📋 Notification Log Details</h5>
|
||||
<button class="close-btn" onclick="document.getElementById('last-log-info').style.display='none'">✕</button>
|
||||
</div>
|
||||
<div class="log-content">
|
||||
<div class="log-meta">
|
||||
<div><strong>Task ID:</strong> <span id="log-task-id" class="notification-id"></span></div>
|
||||
<div id="log-watch-url-container" style="display: none;">
|
||||
<strong>Watch URL:</strong> <span id="log-watch-url"></span>
|
||||
</div>
|
||||
<div id="log-notification-urls-container" style="display: none;">
|
||||
<strong>Notification endpoints:</strong>
|
||||
<div id="log-notification-urls" class="endpoint-list"></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="log-body">
|
||||
<h6>Apprise Log:</h6>
|
||||
<pre id="log-apprise-content"></pre>
|
||||
</div>
|
||||
<div id="log-error-container" style="display: none;" class="log-error">
|
||||
<h6>Error:</h6>
|
||||
<pre id="log-error-content"></pre>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Retry Schedule Information -->
|
||||
<div class="retry-schedule">
|
||||
<h5>Automatic Retry Schedule (Exponential Backoff)</h5>
|
||||
<p>
|
||||
Notifications are automatically retried <strong>{{ retry_config.retry_count }} times</strong> with <strong>exponential backoff</strong> starting at {{ retry_config.retry_delay_seconds }} seconds.
|
||||
</p>
|
||||
<table class="pure-table">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Attempt</th>
|
||||
<th>Time</th>
|
||||
<th>Action</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td><strong>1st</strong> (initial)</td>
|
||||
<td>T+0:00</td>
|
||||
<td>⚠️ Fails (e.g., SMTP server down)</td>
|
||||
</tr>
|
||||
{% for i in range(retry_config.retry_count) %}
|
||||
{% set delay = retry_config.retry_delays[i] %}
|
||||
{% set cumulative_time = retry_config.retry_delays[:i+1]|sum %}
|
||||
<tr>
|
||||
<td><strong>{{ i + 2 }}{{ ['st', 'nd', 'rd'][i + 1] if i + 1 < 3 else 'th' }}</strong> (retry {{ i + 1 }})</td>
|
||||
<td>T+{{ '%d:%02d' % (cumulative_time // 60, cumulative_time % 60) }}</td>
|
||||
<td>{% if i < retry_config.retry_count - 1 %}⚠️ Fails → Wait {{ delay }}s ({{ '%d:%02d' % (delay // 60, delay % 60) }}){% else %}⚠️ Fails → Give up{% endif %}</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
<tr style="background: #fff3cd;">
|
||||
<td><strong>Dead Letter</strong></td>
|
||||
<td>T+{{ '%d:%02d' % (retry_config.total_time_seconds // 60, retry_config.total_time_seconds % 60) }}</td>
|
||||
<td>💀 Moved to this failed notifications list</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
<p class="note">
|
||||
<strong>Total:</strong> {{ retry_config.total_attempts }} attempts over {{ '%d:%02d' % (retry_config.total_time_seconds // 60, retry_config.total_time_seconds % 60) }} (mm:ss).
|
||||
Failed notifications are kept for 30 days, then automatically deleted.
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<!-- Hidden data elements for JavaScript -->
|
||||
<div id="log-url-template" data-url="{{ url_for('settings.get_notification_log', task_id='TASK_ID') }}" style="display: none;"></div>
|
||||
|
||||
<script src="{{url_for('static_content', group='js', filename='notification-dashboard.js')}}" defer></script>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{% endblock %}
|
||||
@@ -46,7 +46,7 @@ def construct_blueprint(datastore: ChangeDetectionStore):
|
||||
if request.form.get('tags') and request.form['tags'].strip():
|
||||
for k in request.form['tags'].split(','):
|
||||
tag = datastore.tag_exists_by_name(k.strip())
|
||||
notification_urls = tag.get('notifications_urls') if tag and tag.get('notifications_urls') else None
|
||||
notification_urls = tag.get('notification_urls') if tag and tag.get('notification_urls') else None
|
||||
|
||||
if not notification_urls and not is_global_settings_form and not is_group_settings_form:
|
||||
# In the global settings, use only what is typed currently in the text box
|
||||
|
||||
@@ -12,7 +12,7 @@ from blinker import signal
|
||||
|
||||
from changedetectionio.strtobool import strtobool
|
||||
from threading import Event
|
||||
from changedetectionio.queue_handlers import RecheckPriorityQueue, NotificationQueue
|
||||
from changedetectionio.queue_handlers import RecheckPriorityQueue # NotificationQueue deprecated - now using Huey
|
||||
from changedetectionio import worker_handler
|
||||
|
||||
from flask import (
|
||||
@@ -52,9 +52,9 @@ datastore = None
|
||||
ticker_thread = None
|
||||
extra_stylesheets = []
|
||||
|
||||
# Use bulletproof janus-based queues for sync/async reliability
|
||||
# Use bulletproof janus-based queues for sync/async reliability
|
||||
update_q = RecheckPriorityQueue()
|
||||
notification_q = NotificationQueue()
|
||||
# notification_q = NotificationQueue() # DEPRECATED: Now using Huey task queue
|
||||
MAX_QUEUE_SIZE = 2000
|
||||
|
||||
app = Flask(__name__,
|
||||
@@ -195,7 +195,7 @@ def _get_worker_status_info():
|
||||
"""Get detailed worker status information for display"""
|
||||
status = worker_handler.get_worker_status()
|
||||
running_uuids = worker_handler.get_running_uuids()
|
||||
|
||||
|
||||
return {
|
||||
'count': status['worker_count'],
|
||||
'type': status['worker_type'],
|
||||
@@ -204,6 +204,17 @@ def _get_worker_status_info():
|
||||
'loop_running': status.get('async_loop_running', None)
|
||||
}
|
||||
|
||||
@app.template_global('get_failed_notifications_count')
|
||||
def _get_failed_notifications_count():
|
||||
"""Check if there are any failed notifications in dead letter queue"""
|
||||
try:
|
||||
from changedetectionio.notification.task_queue import get_failed_notifications
|
||||
failed = get_failed_notifications(limit=1) # Just check if any exist
|
||||
return len(failed)
|
||||
except Exception as e:
|
||||
logger.debug(f"Unable to get failed notifications count: {e}")
|
||||
return 0
|
||||
|
||||
|
||||
# We use the whole watch object from the store/JSON so we can see if there's some related status in terms of a thread
|
||||
# running or something similar.
|
||||
@@ -750,6 +761,9 @@ def changedetection_app(config=None, datastore_o=None):
|
||||
import changedetectionio.blueprint.settings as settings
|
||||
app.register_blueprint(settings.construct_blueprint(datastore), url_prefix='/settings')
|
||||
|
||||
import changedetectionio.blueprint.notification_dashboard as notification_dashboard
|
||||
app.register_blueprint(notification_dashboard.construct_blueprint(), url_prefix='/notification-dashboard')
|
||||
|
||||
import changedetectionio.conditions.blueprint as conditions
|
||||
app.register_blueprint(conditions.construct_blueprint(datastore), url_prefix='/conditions')
|
||||
|
||||
@@ -799,7 +813,7 @@ def changedetection_app(config=None, datastore_o=None):
|
||||
health_result = worker_handler.check_worker_health(
|
||||
expected_count=expected_workers,
|
||||
update_q=update_q,
|
||||
notification_q=notification_q,
|
||||
notification_q=None, # Now using Huey task queue
|
||||
app=app,
|
||||
datastore=datastore
|
||||
)
|
||||
@@ -860,11 +874,22 @@ def changedetection_app(config=None, datastore_o=None):
|
||||
# Can be overridden by ENV or use the default settings
|
||||
n_workers = int(os.getenv("FETCH_WORKERS", datastore.data['settings']['requests']['workers']))
|
||||
logger.info(f"Starting {n_workers} workers during app initialization")
|
||||
worker_handler.start_workers(n_workers, update_q, notification_q, app, datastore)
|
||||
# Pass None for notification_q - now using Huey task queue directly
|
||||
worker_handler.start_workers(n_workers, update_q, None, app, datastore)
|
||||
|
||||
# Initialize Huey task queue for notifications
|
||||
from changedetectionio.notification.task_queue import init_huey, init_huey_task, start_huey_consumer_with_watchdog
|
||||
init_huey(datastore.datastore_path)
|
||||
init_huey_task() # Apply task decorator
|
||||
|
||||
# Start Huey consumer for notification processing (replaces notification_runner)
|
||||
# Watchdog will automatically restart consumer if it crashes
|
||||
# Queued notifications are persistent and won't be lost during crashes
|
||||
# Uses app.config.exit for clean shutdown (same pattern as other threads)
|
||||
threading.Thread(target=start_huey_consumer_with_watchdog, args=(app,), daemon=True).start()
|
||||
|
||||
# @todo handle ctrl break
|
||||
ticker_thread = threading.Thread(target=ticker_thread_check_time_launch_checks, daemon=True, name="TickerThread-ScheduleChecker").start()
|
||||
threading.Thread(target=notification_runner, daemon=True, name="NotificationRunner").start()
|
||||
|
||||
in_pytest = "pytest" in sys.modules or "PYTEST_CURRENT_TEST" in os.environ
|
||||
# Check for new release version, but not when running in test/build or pytest
|
||||
@@ -904,56 +929,10 @@ def check_for_new_version():
|
||||
app.config.exit.wait(86400)
|
||||
|
||||
|
||||
def notification_runner():
|
||||
global notification_debug_log
|
||||
from datetime import datetime
|
||||
import json
|
||||
with app.app_context():
|
||||
while not app.config.exit.is_set():
|
||||
try:
|
||||
# At the moment only one thread runs (single runner)
|
||||
n_object = notification_q.get(block=False)
|
||||
except queue.Empty:
|
||||
app.config.exit.wait(1)
|
||||
|
||||
else:
|
||||
|
||||
now = datetime.now()
|
||||
sent_obj = None
|
||||
|
||||
try:
|
||||
from changedetectionio.notification.handler import process_notification
|
||||
|
||||
# Fallback to system config if not set
|
||||
if not n_object.get('notification_body') and datastore.data['settings']['application'].get('notification_body'):
|
||||
n_object['notification_body'] = datastore.data['settings']['application'].get('notification_body')
|
||||
|
||||
if not n_object.get('notification_title') and datastore.data['settings']['application'].get('notification_title'):
|
||||
n_object['notification_title'] = datastore.data['settings']['application'].get('notification_title')
|
||||
|
||||
if not n_object.get('notification_format') and datastore.data['settings']['application'].get('notification_format'):
|
||||
n_object['notification_format'] = datastore.data['settings']['application'].get('notification_format')
|
||||
if n_object.get('notification_urls', {}):
|
||||
sent_obj = process_notification(n_object, datastore)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Watch URL: {n_object['watch_url']} Error {str(e)}")
|
||||
|
||||
# UUID wont be present when we submit a 'test' from the global settings
|
||||
if 'uuid' in n_object:
|
||||
datastore.update_watch(uuid=n_object['uuid'],
|
||||
update_obj={'last_notification_error': "Notification error detected, goto notification log."})
|
||||
|
||||
log_lines = str(e).splitlines()
|
||||
notification_debug_log += log_lines
|
||||
|
||||
with app.app_context():
|
||||
app.config['watch_check_update_SIGNAL'].send(app_context=app, watch_uuid=n_object.get('uuid'))
|
||||
|
||||
# Process notifications
|
||||
notification_debug_log+= ["{} - SENDING - {}".format(now.strftime("%c"), json.dumps(sent_obj))]
|
||||
# Trim the log length
|
||||
notification_debug_log = notification_debug_log[-100:]
|
||||
# DEPRECATED: notification_runner has been replaced by Huey task queue
|
||||
# All logic from this function has been moved to changedetectionio/notification/task_queue.py
|
||||
# in the send_notification_task() function with automatic retry logic and persistent queuing
|
||||
# See: changedetectionio/notification/task_queue.py - send_notification_task()
|
||||
|
||||
|
||||
|
||||
@@ -977,7 +956,7 @@ def ticker_thread_check_time_launch_checks():
|
||||
health_result = worker_handler.check_worker_health(
|
||||
expected_count=expected_workers,
|
||||
update_q=update_q,
|
||||
notification_q=notification_q,
|
||||
notification_q=None, # Now using Huey task queue
|
||||
app=app,
|
||||
datastore=datastore
|
||||
)
|
||||
|
||||
40
changedetectionio/notification/exceptions.py
Normal file
40
changedetectionio/notification/exceptions.py
Normal file
@@ -0,0 +1,40 @@
|
||||
"""
|
||||
Notification module exceptions
|
||||
"""
|
||||
|
||||
|
||||
class AppriseNotificationException(Exception):
|
||||
"""
|
||||
Exception raised when Apprise notification fails to send (network, authentication, etc).
|
||||
|
||||
These are transient failures that should be retried with exponential backoff.
|
||||
|
||||
Includes the fully rendered notification content (sent_objs) that was attempted,
|
||||
so we can show exactly what failed even when the send doesn't succeed.
|
||||
|
||||
Attributes:
|
||||
sent_objs: List of rendered notification objects with title, body, url
|
||||
"""
|
||||
def __init__(self, message, sent_objs=None):
|
||||
super().__init__(message)
|
||||
self.sent_objs = sent_objs or []
|
||||
|
||||
|
||||
class WatchNotFoundException(Exception):
|
||||
"""
|
||||
Exception raised when the watch being notified for no longer exists.
|
||||
|
||||
This is a non-recoverable error that should NOT be retried.
|
||||
The notification should be immediately marked as failed/dead-lettered.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class NotificationConfigurationException(Exception):
|
||||
"""
|
||||
Exception raised when notification configuration is invalid.
|
||||
|
||||
This is a non-recoverable error that should NOT be retried.
|
||||
Examples: invalid notification URLs, missing required fields, etc.
|
||||
"""
|
||||
pass
|
||||
@@ -13,6 +13,7 @@ from ..diff import HTML_REMOVED_STYLE, REMOVED_PLACEMARKER_OPEN, REMOVED_PLACEMA
|
||||
import re
|
||||
|
||||
from ..notification_service import NotificationContextData, add_rendered_diff_to_notification_vars
|
||||
from .exceptions import AppriseNotificationException
|
||||
|
||||
newline_re = re.compile(r'\r\n|\r|\n')
|
||||
|
||||
@@ -443,8 +444,10 @@ def process_notification(n_object: NotificationContextData, datastore):
|
||||
if not '<pre' in n_body and not '<body' in n_body: # No custom HTML-ish body was setup already
|
||||
n_body = as_monospaced_html_email(content=n_body, title=n_title)
|
||||
|
||||
# Send the notification and capture return value (True if any succeeded, False if all failed)
|
||||
notification_success = True
|
||||
if not url.startswith('null://'):
|
||||
apobj.notify(
|
||||
notification_success = apobj.notify(
|
||||
title=n_title,
|
||||
body=n_body,
|
||||
# `body_format` Tell apprise what format the INPUT is in, specify a wrong/bad type and it will force skip conversion in apprise
|
||||
@@ -457,9 +460,16 @@ def process_notification(n_object: NotificationContextData, datastore):
|
||||
# Returns empty string if nothing found, multi-line string otherwise
|
||||
log_value = logs.getvalue()
|
||||
|
||||
if log_value and ('WARNING' in log_value or 'ERROR' in log_value):
|
||||
logger.critical(log_value)
|
||||
raise Exception(log_value)
|
||||
# Check both Apprise return value AND log capture for failures
|
||||
if not notification_success:
|
||||
error_msg = f"Apprise notification failed - all notification URLs returned False"
|
||||
if log_value:
|
||||
error_msg += f"\nApprise logs:\n{log_value}"
|
||||
logger.critical(error_msg)
|
||||
raise AppriseNotificationException(error_msg, sent_objs=sent_objs)
|
||||
elif log_value and ('WARNING' in log_value or 'ERROR' in log_value):
|
||||
logger.critical(f"Apprise warning/error detected:\n{log_value}")
|
||||
raise AppriseNotificationException(log_value, sent_objs=sent_objs)
|
||||
|
||||
# Return what was sent for better logging - after the for loop
|
||||
return sent_objs
|
||||
|
||||
153
changedetectionio/notification/message_unpacker.py
Normal file
153
changedetectionio/notification/message_unpacker.py
Normal file
@@ -0,0 +1,153 @@
|
||||
"""
|
||||
Huey Message Unpacker - Centralized pickle unpacking for Huey messages.
|
||||
|
||||
Eliminates duplicate code and centralizes error handling for unpacking
|
||||
Huey's pickled messages from queue and schedule storage.
|
||||
"""
|
||||
|
||||
import pickle
|
||||
from typing import Optional, Tuple, Dict
|
||||
from datetime import datetime
|
||||
from loguru import logger
|
||||
|
||||
|
||||
class HueyMessageUnpacker:
|
||||
"""
|
||||
Utility class for unpacking Huey messages safely and consistently.
|
||||
|
||||
Handles:
|
||||
- Pickle deserialization errors
|
||||
- Revoked task filtering
|
||||
- Notification data extraction
|
||||
- Scheduled task ETA extraction
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def unpack_queued_notification(pickled_bytes, huey) -> Optional[Tuple[str, Dict]]:
|
||||
"""
|
||||
Unpack a queued (immediate execution) Huey message.
|
||||
|
||||
Args:
|
||||
pickled_bytes: Pickled Huey message from queue
|
||||
huey: Huey instance for revocation checks
|
||||
|
||||
Returns:
|
||||
(task_id, notification_data) or None if revoked/invalid
|
||||
"""
|
||||
try:
|
||||
message = pickle.loads(pickled_bytes)
|
||||
|
||||
# Extract task ID
|
||||
task_id = message.id if hasattr(message, 'id') else None
|
||||
|
||||
# Skip revoked tasks
|
||||
if task_id and huey.is_revoked(task_id):
|
||||
logger.debug(f"Skipping revoked task {task_id}")
|
||||
return None
|
||||
|
||||
# Extract notification data from message args
|
||||
if hasattr(message, 'args') and message.args:
|
||||
notification_data = message.args[0]
|
||||
return (task_id, notification_data)
|
||||
else:
|
||||
logger.debug(f"Message {task_id} has no args")
|
||||
return None
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error unpacking queued message: {e}")
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def unpack_scheduled_notification(pickled_bytes, huey) -> Optional[Tuple[str, Dict, Optional[datetime]]]:
|
||||
"""
|
||||
Unpack a scheduled (retry/delayed) Huey message.
|
||||
|
||||
Args:
|
||||
pickled_bytes: Pickled Huey message from schedule
|
||||
huey: Huey instance for revocation checks
|
||||
|
||||
Returns:
|
||||
(task_id, notification_data, eta) or None if revoked/invalid
|
||||
eta is a datetime object representing when the task should execute
|
||||
"""
|
||||
try:
|
||||
message = pickle.loads(pickled_bytes)
|
||||
|
||||
# Extract task ID
|
||||
task_id = message.id if hasattr(message, 'id') else None
|
||||
|
||||
# Skip revoked tasks
|
||||
if task_id and huey.is_revoked(task_id):
|
||||
logger.debug(f"Skipping revoked scheduled task {task_id}")
|
||||
return None
|
||||
|
||||
# Extract notification data from message args
|
||||
if not (hasattr(message, 'args') and message.args):
|
||||
logger.debug(f"Scheduled message {task_id} has no args")
|
||||
return None
|
||||
|
||||
notification_data = message.args[0]
|
||||
|
||||
# Extract ETA (when task should execute)
|
||||
eta = message.eta if hasattr(message, 'eta') else None
|
||||
|
||||
return (task_id, notification_data, eta)
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error unpacking scheduled message: {e}")
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def calculate_retry_timing(eta: Optional[datetime]) -> Tuple[int, str]:
|
||||
"""
|
||||
Calculate retry timing information from ETA.
|
||||
|
||||
Args:
|
||||
eta: datetime when task should execute (may be naive or timezone-aware)
|
||||
|
||||
Returns:
|
||||
(retry_in_seconds, eta_formatted) - seconds until retry and formatted time
|
||||
"""
|
||||
if not eta:
|
||||
return (0, 'Unknown')
|
||||
|
||||
try:
|
||||
# Handle both naive and timezone-aware datetimes
|
||||
if eta.tzinfo is not None:
|
||||
# Timezone-aware
|
||||
now = datetime.now(eta.tzinfo)
|
||||
# Convert to local timezone for display
|
||||
local_tz = datetime.now().astimezone().tzinfo
|
||||
eta_local = eta.astimezone(local_tz)
|
||||
eta_formatted = eta_local.strftime('%Y-%m-%d %H:%M:%S %Z')
|
||||
else:
|
||||
# Naive datetime
|
||||
now = datetime.now()
|
||||
eta_formatted = eta.strftime('%Y-%m-%d %H:%M:%S')
|
||||
|
||||
retry_in_seconds = int((eta - now).total_seconds())
|
||||
return (retry_in_seconds, eta_formatted)
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error calculating retry timing: {e}")
|
||||
return (0, 'Unknown')
|
||||
|
||||
@staticmethod
|
||||
def extract_task_id_from_scheduled(pickled_bytes) -> Optional[str]:
|
||||
"""
|
||||
Quick extraction of just the task ID from a scheduled message.
|
||||
|
||||
Used for checking if a task is still scheduled without full unpacking.
|
||||
|
||||
Args:
|
||||
pickled_bytes: Pickled Huey message
|
||||
|
||||
Returns:
|
||||
task_id string or None
|
||||
"""
|
||||
try:
|
||||
message = pickle.loads(pickled_bytes)
|
||||
return message.id if hasattr(message, 'id') else None
|
||||
except Exception as e:
|
||||
logger.debug(f"Error extracting task ID: {e}")
|
||||
return None
|
||||
273
changedetectionio/notification/retry_service.py
Normal file
273
changedetectionio/notification/retry_service.py
Normal file
@@ -0,0 +1,273 @@
|
||||
"""
|
||||
Notification Retry Service - Centralized retry logic for failed notifications.
|
||||
|
||||
Handles:
|
||||
- Manual immediate retry ("Send Now" button)
|
||||
- Failed notification retry from dead letter queue
|
||||
- Batch retry of all failed notifications
|
||||
- Config reload with cascading priority
|
||||
"""
|
||||
|
||||
from loguru import logger
|
||||
from changedetectionio.notification_service import NotificationContextData, _check_cascading_vars
|
||||
|
||||
|
||||
class NotificationRetryService:
|
||||
"""
|
||||
Service for retrying failed and scheduled notifications.
|
||||
|
||||
Encapsulates all retry logic including config reloading, task revocation,
|
||||
and re-queueing with proper cleanup.
|
||||
"""
|
||||
|
||||
def __init__(self, huey, datastore):
|
||||
"""
|
||||
Initialize retry service.
|
||||
|
||||
Args:
|
||||
huey: Huey instance for task management
|
||||
datastore: ChangeDetectionStore instance for watch lookups
|
||||
"""
|
||||
self.huey = huey
|
||||
self.datastore = datastore
|
||||
|
||||
def retry_now(self, task_id):
|
||||
"""
|
||||
Manually retry a scheduled/retrying notification immediately.
|
||||
|
||||
Used by "Send Now" button in UI. Revokes the scheduled task and executes
|
||||
the notification synchronously in the current thread.
|
||||
|
||||
Args:
|
||||
task_id: Huey task ID to retry immediately
|
||||
|
||||
Returns:
|
||||
bool: True if successfully executed, False otherwise
|
||||
"""
|
||||
if self.huey is None:
|
||||
logger.error("Huey not initialized")
|
||||
return False
|
||||
|
||||
try:
|
||||
# Find the scheduled task
|
||||
notification_data = self._find_scheduled_task(task_id)
|
||||
if not notification_data:
|
||||
logger.error(f"Task {task_id} not found in schedule")
|
||||
return False
|
||||
|
||||
# Revoke scheduled task FIRST to prevent race condition
|
||||
self.huey.revoke_by_id(task_id, revoke_once=True)
|
||||
logger.info(f"Revoked scheduled task {task_id} before execution")
|
||||
|
||||
# Execute notification synchronously in current thread
|
||||
success = self._execute_notification_sync(notification_data, task_id)
|
||||
|
||||
if success:
|
||||
# Clean up old metadata and result
|
||||
from changedetectionio.notification.task_queue import _delete_result, _delete_task_metadata
|
||||
_delete_result(task_id)
|
||||
_delete_task_metadata(task_id)
|
||||
logger.info(f"✓ Notification sent successfully for task {task_id}")
|
||||
return True
|
||||
else:
|
||||
# Re-queue for automatic retry if manual send failed
|
||||
self._requeue_for_retry(notification_data)
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error executing scheduled notification {task_id}: {e}")
|
||||
return False
|
||||
|
||||
def retry_failed(self, task_id):
|
||||
"""
|
||||
Retry a failed notification from dead letter queue.
|
||||
|
||||
Removes the task from dead letter queue and re-queues it.
|
||||
If it fails again, it will go back to the dead letter queue.
|
||||
|
||||
Args:
|
||||
task_id: Huey task ID to retry
|
||||
|
||||
Returns:
|
||||
bool: True if successfully queued for retry, False otherwise
|
||||
"""
|
||||
if self.huey is None:
|
||||
logger.error("Huey not initialized")
|
||||
return False
|
||||
|
||||
try:
|
||||
# Get task metadata from storage
|
||||
from changedetectionio.notification.task_queue import _get_task_metadata, _delete_result, _delete_task_metadata, queue_notification
|
||||
|
||||
task_metadata = _get_task_metadata(task_id)
|
||||
if not task_metadata:
|
||||
logger.error(f"Task metadata for {task_id} not found")
|
||||
return False
|
||||
|
||||
# Extract notification data
|
||||
notification_data = task_metadata.get('notification_data', {})
|
||||
if not notification_data:
|
||||
logger.error(f"No notification data found for task {task_id}")
|
||||
return False
|
||||
|
||||
# Re-queue with current settings
|
||||
queue_notification(notification_data)
|
||||
|
||||
# Remove from dead letter queue
|
||||
_delete_result(task_id)
|
||||
_delete_task_metadata(task_id)
|
||||
|
||||
logger.info(f"Re-queued failed notification task {task_id}")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error retrying notification {task_id}: {e}")
|
||||
return False
|
||||
|
||||
def retry_all_failed(self):
|
||||
"""
|
||||
Retry all failed notifications in the dead letter queue.
|
||||
|
||||
Returns:
|
||||
dict: {
|
||||
'success': int, # Number successfully re-queued
|
||||
'failed': int, # Number that failed to re-queue
|
||||
'total': int # Total processed
|
||||
}
|
||||
"""
|
||||
if self.huey is None:
|
||||
return {'success': 0, 'failed': 0, 'total': 0}
|
||||
|
||||
success_count = 0
|
||||
failed_count = 0
|
||||
|
||||
try:
|
||||
from huey.utils import Error as HueyError
|
||||
from changedetectionio.notification.task_queue import _enumerate_results
|
||||
|
||||
# Get all failed tasks from result store
|
||||
results = _enumerate_results()
|
||||
|
||||
for task_id, result in results.items():
|
||||
if isinstance(result, (Exception, HueyError)):
|
||||
# Try to retry this failed notification
|
||||
if self.retry_failed(task_id):
|
||||
success_count += 1
|
||||
else:
|
||||
failed_count += 1
|
||||
|
||||
total = success_count + failed_count
|
||||
logger.info(f"Retry all: {success_count} succeeded, {failed_count} failed, {total} total")
|
||||
|
||||
return {
|
||||
'success': success_count,
|
||||
'failed': failed_count,
|
||||
'total': total
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error retrying all failed notifications: {e}")
|
||||
return {
|
||||
'success': success_count,
|
||||
'failed': failed_count,
|
||||
'total': success_count + failed_count
|
||||
}
|
||||
|
||||
def reload_notification_config(self, n_object, watch):
|
||||
"""
|
||||
Reload notification_urls and notification_format with cascading priority.
|
||||
|
||||
Priority: Watch settings > Tag settings > Global settings
|
||||
|
||||
This is done on every send/retry to allow operators to fix broken
|
||||
notification settings and retry with corrected configuration.
|
||||
|
||||
Args:
|
||||
n_object: NotificationContextData object to update
|
||||
watch: Watch object
|
||||
datastore: Datastore instance
|
||||
|
||||
Raises:
|
||||
Exception: If no notification_urls defined after cascading check
|
||||
"""
|
||||
n_object['notification_urls'] = _check_cascading_vars(self.datastore, 'notification_urls', watch)
|
||||
n_object['notification_format'] = _check_cascading_vars(self.datastore, 'notification_format', watch)
|
||||
|
||||
if not n_object.get('notification_urls'):
|
||||
raise Exception("No notification_urls defined after checking cascading (Watch > Tag > System)")
|
||||
|
||||
# Private helper methods
|
||||
|
||||
def _find_scheduled_task(self, task_id):
|
||||
"""
|
||||
Find a scheduled task by ID and return its notification data.
|
||||
|
||||
Args:
|
||||
task_id: Task ID to find
|
||||
|
||||
Returns:
|
||||
dict: Notification data or None if not found
|
||||
"""
|
||||
from changedetectionio.notification.message_unpacker import HueyMessageUnpacker
|
||||
|
||||
try:
|
||||
scheduled_items = list(self.huey.storage.scheduled_items())
|
||||
|
||||
for scheduled_bytes in scheduled_items:
|
||||
result = HueyMessageUnpacker.unpack_scheduled_notification(scheduled_bytes, self.huey)
|
||||
if result is None:
|
||||
continue
|
||||
|
||||
found_task_id, notification_data, _ = result
|
||||
if found_task_id == task_id:
|
||||
return notification_data
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error finding scheduled task: {e}")
|
||||
|
||||
return None
|
||||
|
||||
def _execute_notification_sync(self, notification_data, task_id):
|
||||
"""
|
||||
Execute notification synchronously in current thread.
|
||||
|
||||
Args:
|
||||
notification_data: Notification data dict
|
||||
task_id: Task ID for logging
|
||||
|
||||
Returns:
|
||||
bool: True if successful, False otherwise
|
||||
"""
|
||||
try:
|
||||
from changedetectionio.notification.handler import process_notification
|
||||
|
||||
# Wrap in NotificationContextData if needed
|
||||
if not isinstance(notification_data, NotificationContextData):
|
||||
notification_data = NotificationContextData(notification_data)
|
||||
|
||||
# Execute synchronously (not via Huey queue)
|
||||
logger.info(f"Executing notification for task {task_id} immediately...")
|
||||
process_notification(notification_data, self.datastore)
|
||||
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to send notification for task {task_id}: {e}")
|
||||
return False
|
||||
|
||||
def _requeue_for_retry(self, notification_data):
|
||||
"""
|
||||
Re-queue a notification for automatic retry after manual send failed.
|
||||
|
||||
Args:
|
||||
notification_data: Notification data to re-queue
|
||||
"""
|
||||
try:
|
||||
from changedetectionio.notification.task_queue import send_notification_task
|
||||
|
||||
logger.info("Re-queueing notification for automatic retry after manual send failed")
|
||||
send_notification_task(notification_data)
|
||||
logger.info("Re-queued notification successfully")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to re-queue notification: {e}")
|
||||
655
changedetectionio/notification/state_retriever.py
Normal file
655
changedetectionio/notification/state_retriever.py
Normal file
@@ -0,0 +1,655 @@
|
||||
"""
|
||||
Notification State Retriever - Centralized logic for retrieving notification state.
|
||||
|
||||
Handles:
|
||||
- Pending/queued notifications (from Huey queue and schedule)
|
||||
- Failed notifications (from dead letter queue)
|
||||
- Delivered notifications (from audit trail storage)
|
||||
- Unified event timeline for UI
|
||||
- Apprise logs for individual tasks
|
||||
"""
|
||||
|
||||
from loguru import logger
|
||||
from changedetectionio.notification.message_unpacker import HueyMessageUnpacker
|
||||
from changedetectionio.notification_service import timestamp_to_localtime
|
||||
|
||||
|
||||
class NotificationStateRetriever:
|
||||
"""
|
||||
Service for retrieving notification state from various sources.
|
||||
|
||||
Provides unified interface for accessing:
|
||||
- Pending notifications (queued + scheduled/retrying)
|
||||
- Failed notifications (dead letter queue)
|
||||
- Delivered notifications (audit trail)
|
||||
- Unified event timeline
|
||||
"""
|
||||
|
||||
def __init__(self, huey, task_data_manager, task_manager, retry_count=2):
|
||||
"""
|
||||
Initialize state retriever service.
|
||||
|
||||
Args:
|
||||
huey: Huey instance for queue/schedule access
|
||||
task_data_manager: Task data storage manager for retry attempts and delivered notifications
|
||||
task_manager: Task manager for result store and metadata access
|
||||
retry_count: Number of retries (default: 2, from NOTIFICATION_RETRY_COUNT)
|
||||
"""
|
||||
self.huey = huey
|
||||
self.task_data_manager = task_data_manager
|
||||
self.task_manager = task_manager
|
||||
self.retry_count = retry_count
|
||||
|
||||
def get_pending_notifications_count(self):
|
||||
"""
|
||||
Get count of pending notifications (immediate queue + scheduled/retrying).
|
||||
|
||||
This includes:
|
||||
- Tasks in the immediate queue (ready to execute now)
|
||||
- Tasks in the schedule (waiting for retry or delayed execution)
|
||||
|
||||
Supports FileStorage, SqliteStorage, and RedisStorage backends.
|
||||
|
||||
Returns:
|
||||
Integer count of pending notifications, or None if unable to determine
|
||||
"""
|
||||
if self.huey is None or self.task_manager is None:
|
||||
return 0
|
||||
|
||||
try:
|
||||
# Get counts using task manager (polymorphic, backend-agnostic)
|
||||
queue_count, schedule_count = self.task_manager.count_storage_items()
|
||||
|
||||
total_count = queue_count + schedule_count
|
||||
|
||||
if queue_count > 0:
|
||||
logger.debug(f"Pending notifications - queue: {queue_count}")
|
||||
if schedule_count > 0:
|
||||
logger.debug(f"Pending notifications - schedule: {schedule_count}")
|
||||
if total_count > 0:
|
||||
logger.info(f"Total pending/retrying notifications: {total_count}")
|
||||
|
||||
return total_count
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting pending notification count: {e}", exc_info=True)
|
||||
return None # Unable to determine
|
||||
|
||||
def get_pending_notifications(self, limit=50):
|
||||
"""
|
||||
Get list of pending/retrying notifications from queue and schedule.
|
||||
|
||||
Args:
|
||||
limit: Maximum number to return (default: 50)
|
||||
|
||||
Returns:
|
||||
List of dicts with pending notification info
|
||||
"""
|
||||
if self.huey is None:
|
||||
return []
|
||||
|
||||
pending = []
|
||||
|
||||
try:
|
||||
# Use Huey's built-in methods to get queued and scheduled items
|
||||
# These methods return pickled bytes that need to be unpickled
|
||||
|
||||
# Get queued tasks (immediate execution)
|
||||
if hasattr(self.huey.storage, 'enqueued_items'):
|
||||
try:
|
||||
queued_items = list(self.huey.storage.enqueued_items(limit=limit))
|
||||
for queued_bytes in queued_items:
|
||||
if len(pending) >= limit:
|
||||
break
|
||||
|
||||
# Use centralized unpacker
|
||||
result = HueyMessageUnpacker.unpack_queued_notification(queued_bytes, self.huey)
|
||||
if result is None:
|
||||
continue
|
||||
|
||||
task_id, notification_data = result
|
||||
|
||||
# Get metadata for timestamp
|
||||
metadata = self._get_task_metadata(task_id) if task_id else None
|
||||
queued_timestamp = metadata.get('timestamp') if metadata else None
|
||||
|
||||
# Format timestamp for display
|
||||
queued_at_formatted = timestamp_to_localtime(queued_timestamp) if queued_timestamp else 'Unknown'
|
||||
|
||||
pending.append({
|
||||
'status': 'queued',
|
||||
'watch_url': notification_data.get('watch_url', 'Unknown'),
|
||||
'watch_uuid': notification_data.get('uuid'),
|
||||
'task_id': task_id,
|
||||
'queued_at': queued_timestamp,
|
||||
'queued_at_formatted': queued_at_formatted,
|
||||
})
|
||||
except Exception as e:
|
||||
logger.debug(f"Error getting queued items: {e}")
|
||||
|
||||
# Get scheduled tasks (retrying)
|
||||
if hasattr(self.huey.storage, 'scheduled_items'):
|
||||
try:
|
||||
scheduled_items = list(self.huey.storage.scheduled_items(limit=limit))
|
||||
for scheduled_bytes in scheduled_items:
|
||||
if len(pending) >= limit:
|
||||
break
|
||||
|
||||
# Use centralized unpacker
|
||||
result = HueyMessageUnpacker.unpack_scheduled_notification(scheduled_bytes, self.huey)
|
||||
if result is None:
|
||||
continue
|
||||
|
||||
task_id, notification_data, eta = result
|
||||
|
||||
# Calculate retry timing using unpacker utility
|
||||
retry_in_seconds, eta_formatted = HueyMessageUnpacker.calculate_retry_timing(eta)
|
||||
|
||||
# Convert eta to Unix timestamp for JavaScript (with safety check)
|
||||
retry_at_timestamp = None
|
||||
if eta and hasattr(eta, 'timestamp'):
|
||||
try:
|
||||
# Huey stores ETA as naive datetime in UTC - need to add timezone info
|
||||
if eta.tzinfo is None:
|
||||
# Naive datetime - assume it's UTC (Huey's default)
|
||||
import datetime
|
||||
eta = eta.replace(tzinfo=datetime.timezone.utc)
|
||||
retry_at_timestamp = int(eta.timestamp())
|
||||
logger.debug(f"ETA after timezone fix: {eta}, Timestamp: {retry_at_timestamp}")
|
||||
except Exception as e:
|
||||
logger.debug(f"Error converting eta to timestamp: {e}")
|
||||
|
||||
# Get original queued timestamp from metadata
|
||||
metadata = self._get_task_metadata(task_id) if task_id else None
|
||||
queued_timestamp = metadata.get('timestamp') if metadata else None
|
||||
|
||||
# Format timestamp for display
|
||||
queued_at_formatted = timestamp_to_localtime(queued_timestamp) if queued_timestamp else 'Unknown'
|
||||
|
||||
# Get retry count from retry_attempts (using polymorphic task_data_manager)
|
||||
# Retry number represents which retry this is (1st retry, 2nd retry, etc.)
|
||||
# If there are N attempt files, we're currently on retry #N
|
||||
retry_number = 1 # Default to 1 (first retry after initial failure)
|
||||
total_attempts = self.retry_count + 1 # Initial attempt + retries
|
||||
watch_uuid = notification_data.get('uuid')
|
||||
retry_attempts = []
|
||||
notification_urls = []
|
||||
|
||||
# Load retry attempts using polymorphic manager
|
||||
if watch_uuid and self.task_data_manager is not None:
|
||||
try:
|
||||
retry_attempts = self.task_data_manager.load_retry_attempts(watch_uuid)
|
||||
|
||||
if len(retry_attempts) > 0:
|
||||
# Current retry number = number of attempt files
|
||||
# (1 file = 1st retry, 2 files = 2nd retry, etc.)
|
||||
retry_number = len(retry_attempts)
|
||||
logger.debug(f"Watch {watch_uuid[:8]}: Found {len(retry_attempts)} retry files, currently on retry #{retry_number}/{total_attempts}")
|
||||
|
||||
# Extract notification_urls from latest retry attempt
|
||||
latest_attempt = retry_attempts[-1]
|
||||
attempt_notification_data = latest_attempt.get('notification_data', {})
|
||||
if attempt_notification_data:
|
||||
notification_urls = attempt_notification_data.get('notification_urls', [])
|
||||
else:
|
||||
# No retry attempts yet - first retry
|
||||
retry_number = 1
|
||||
logger.debug(f"Watch {watch_uuid[:8]}: No retry attempts yet, first retry (retry #1/{total_attempts})")
|
||||
except Exception as e:
|
||||
logger.warning(f"Error reading retry attempts for {watch_uuid}: {e}, defaulting to attempt #1")
|
||||
retry_number = 1 # Fallback to 1 on error
|
||||
|
||||
pending.append({
|
||||
'status': 'retrying',
|
||||
'watch_url': notification_data.get('watch_url', 'Unknown'),
|
||||
'watch_uuid': notification_data.get('uuid'),
|
||||
'retry_at': eta,
|
||||
'retry_at_formatted': eta_formatted,
|
||||
'retry_at_timestamp': retry_at_timestamp,
|
||||
'retry_in_seconds': retry_in_seconds,
|
||||
'task_id': task_id,
|
||||
'queued_at': queued_timestamp,
|
||||
'queued_at_formatted': queued_at_formatted,
|
||||
'retry_number': retry_number,
|
||||
'total_retries': total_attempts,
|
||||
'retry_attempts': retry_attempts,
|
||||
'notification_urls': notification_urls,
|
||||
})
|
||||
except Exception as e:
|
||||
logger.debug(f"Error getting scheduled items: {e}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting pending notifications: {e}", exc_info=True)
|
||||
|
||||
logger.debug(f"get_pending_notifications returning {len(pending)} items")
|
||||
return pending
|
||||
|
||||
def get_delivered_notifications(self, limit=50):
|
||||
"""
|
||||
Get list of delivered (successful) notifications (using polymorphic task_data_manager).
|
||||
|
||||
Each successful notification is stored in the task data manager.
|
||||
|
||||
Args:
|
||||
limit: Maximum number to return (default: 50)
|
||||
|
||||
Returns:
|
||||
List of dicts with delivered notification info (newest first)
|
||||
"""
|
||||
if self.task_data_manager is None:
|
||||
logger.debug("Task data manager not initialized")
|
||||
return []
|
||||
|
||||
try:
|
||||
# Load using polymorphic manager (handles FileStorage, SQLiteStorage, RedisStorage)
|
||||
notifications = self.task_data_manager.load_delivered_notifications()
|
||||
|
||||
# Apply limit
|
||||
if limit and len(notifications) > limit:
|
||||
notifications = notifications[:limit]
|
||||
|
||||
return notifications
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Unable to load delivered notifications: {e}")
|
||||
|
||||
return []
|
||||
|
||||
def get_last_successful_notification(self):
|
||||
"""
|
||||
Get the most recent successful notification for reference.
|
||||
|
||||
Returns:
|
||||
Dict with success info or None if no successful notifications yet
|
||||
"""
|
||||
delivered = self.get_delivered_notifications(limit=1)
|
||||
return delivered[0] if delivered else None
|
||||
|
||||
def get_failed_notifications(self, limit=100, max_age_days=30):
|
||||
"""
|
||||
Get list of failed notification tasks from Huey's result store.
|
||||
|
||||
Args:
|
||||
limit: Maximum number of failed tasks to return (default: 100)
|
||||
max_age_days: Auto-delete failed notifications older than this (default: 30 days)
|
||||
|
||||
Returns:
|
||||
List of dicts containing failed notification info:
|
||||
- task_id: Huey task ID
|
||||
- timestamp: When the task failed
|
||||
- error: Error message
|
||||
- notification_data: Original notification data
|
||||
- watch_url: URL of the watch
|
||||
- watch_uuid: UUID of the watch
|
||||
"""
|
||||
if self.huey is None or self.task_manager is None:
|
||||
return []
|
||||
|
||||
failed_tasks = []
|
||||
import time
|
||||
|
||||
try:
|
||||
# Query Huey's result storage for failed tasks using backend-agnostic helper
|
||||
cutoff_time = time.time() - (max_age_days * 86400)
|
||||
|
||||
# Use helper function that works with all storage backends
|
||||
results = self.task_manager.enumerate_results()
|
||||
|
||||
# Import Huey's Error class for checking failed tasks
|
||||
from huey.utils import Error as HueyError
|
||||
|
||||
for task_id, result in results.items():
|
||||
if isinstance(result, (Exception, HueyError)):
|
||||
# This is a failed task (either Exception or Huey Error object)
|
||||
# Check if task is still scheduled for retry
|
||||
# If it is, don't include it in failed list (still retrying)
|
||||
if self.huey.storage:
|
||||
try:
|
||||
# Check if this task is in the schedule queue (still being retried)
|
||||
task_still_scheduled = False
|
||||
|
||||
# Use Huey's built-in scheduled_items() method to get scheduled tasks
|
||||
try:
|
||||
if hasattr(self.huey.storage, 'scheduled_items'):
|
||||
scheduled_items = list(self.huey.storage.scheduled_items())
|
||||
for scheduled_bytes in scheduled_items:
|
||||
# Use centralized unpacker to extract just the task ID
|
||||
scheduled_task_id = HueyMessageUnpacker.extract_task_id_from_scheduled(scheduled_bytes)
|
||||
if scheduled_task_id == task_id:
|
||||
task_still_scheduled = True
|
||||
logger.debug(f"Task {task_id[:20]}... IS scheduled")
|
||||
break
|
||||
except Exception as se:
|
||||
logger.debug(f"Error checking schedule: {se}")
|
||||
|
||||
# Also check if task failed very recently
|
||||
# Handles race condition where result is written before retry is scheduled
|
||||
if not task_still_scheduled:
|
||||
task_metadata = self._get_task_metadata(task_id)
|
||||
if task_metadata:
|
||||
task_time = task_metadata.get('timestamp', 0)
|
||||
time_since_failure = time.time() - task_time if task_time else 999
|
||||
|
||||
# Grace period before marking as permanently failed (configurable for tests)
|
||||
# Default 5 seconds in production, but can be reduced to 1 second in tests
|
||||
import os
|
||||
grace_period = int(os.environ.get('NOTIFICATION_FAILED_GRACE_PERIOD', '5'))
|
||||
|
||||
# If task failed very recently, it might still be scheduling a retry
|
||||
# Be conservative and don't count it as permanently failed yet
|
||||
if time_since_failure < grace_period:
|
||||
logger.debug(f"Task {task_id[:20]}... failed only {time_since_failure:.1f}s ago (grace period: {grace_period}s), might still be scheduling retry")
|
||||
task_still_scheduled = True # Treat as potentially still retrying
|
||||
|
||||
# SMART DETECTION: Check retry attempt count
|
||||
# If this task has exhausted all retries, it's permanently failed
|
||||
# regardless of schedule state (handles Redis timing issues)
|
||||
task_metadata = self._get_task_metadata(task_id)
|
||||
if task_metadata:
|
||||
notification_watch_uuid = task_metadata.get('notification_data', {}).get('uuid')
|
||||
if notification_watch_uuid and self.task_data_manager:
|
||||
try:
|
||||
retry_attempts = self.task_data_manager.load_retry_attempts(notification_watch_uuid)
|
||||
num_attempts = len(retry_attempts)
|
||||
|
||||
# If we have retry_count + 1 attempts (initial + retries), it's exhausted
|
||||
if num_attempts >= self.retry_count + 1:
|
||||
logger.debug(f"Task {task_id[:20]}... has {num_attempts} attempts (max: {self.retry_count + 1}), marking as permanently failed")
|
||||
task_still_scheduled = False # Override - definitely failed
|
||||
except Exception as e:
|
||||
logger.debug(f"Error checking retry attempts: {e}")
|
||||
|
||||
# Skip this task if it's still scheduled for retry
|
||||
if task_still_scheduled:
|
||||
logger.debug(f"Task {task_id[:20]}... still scheduled for retry, not counting as failed yet")
|
||||
continue
|
||||
else:
|
||||
logger.debug(f"Task {task_id[:20]}... NOT in schedule or exhausted retries, counting as failed")
|
||||
except Exception as e:
|
||||
logger.debug(f"Error checking schedule for task {task_id}: {e}")
|
||||
|
||||
# Try to extract notification data from task metadata storage
|
||||
try:
|
||||
# Get task metadata from our metadata storage
|
||||
task_metadata = self._get_task_metadata(task_id)
|
||||
if task_metadata:
|
||||
task_time = task_metadata.get('timestamp', 0)
|
||||
notification_data = task_metadata.get('notification_data', {})
|
||||
|
||||
# Auto-cleanup old failed notifications to free memory
|
||||
if task_time and task_time < cutoff_time:
|
||||
logger.info(f"Auto-deleting old failed notification {task_id} (age: {(time.time() - task_time) / 86400:.1f} days)")
|
||||
self.task_manager.delete_result(task_id)
|
||||
self.task_manager.delete_task_metadata(task_id)
|
||||
continue
|
||||
|
||||
# Format timestamp for display with locale awareness
|
||||
timestamp_formatted = timestamp_to_localtime(task_time) if task_time else 'Unknown'
|
||||
days_ago = int((time.time() - task_time) / 86400) if task_time else 0
|
||||
|
||||
# Load retry attempts for this notification (using polymorphic task_data_manager)
|
||||
retry_attempts = []
|
||||
notification_watch_uuid = notification_data.get('uuid')
|
||||
if notification_watch_uuid and self.task_data_manager is not None:
|
||||
try:
|
||||
retry_attempts = self.task_data_manager.load_retry_attempts(notification_watch_uuid)
|
||||
except Exception as e:
|
||||
logger.debug(f"Error loading retry attempts for {notification_watch_uuid}: {e}")
|
||||
|
||||
# Merge notification_data from latest retry attempt (has reloaded notification_urls)
|
||||
if retry_attempts:
|
||||
latest_attempt = retry_attempts[-1]
|
||||
attempt_notification_data = latest_attempt.get('notification_data', {})
|
||||
if attempt_notification_data:
|
||||
notification_data.update(attempt_notification_data)
|
||||
|
||||
failed_tasks.append({
|
||||
'task_id': task_id,
|
||||
'timestamp': task_time,
|
||||
'timestamp_formatted': timestamp_formatted,
|
||||
'days_ago': days_ago,
|
||||
'error': str(result),
|
||||
'notification_data': notification_data,
|
||||
'retry_attempts': retry_attempts,
|
||||
})
|
||||
except Exception as e:
|
||||
logger.error(f"Error extracting failed task data: {e}")
|
||||
|
||||
if len(failed_tasks) >= limit:
|
||||
break
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error querying failed notifications: {e}")
|
||||
|
||||
return failed_tasks
|
||||
|
||||
def get_all_notification_events(self, limit=100):
|
||||
"""
|
||||
Get ALL notification events in a unified format for timeline view.
|
||||
Returns successful deliveries, queued, retrying, and failed notifications.
|
||||
|
||||
Returns list sorted by timestamp (newest first) with structure:
|
||||
{
|
||||
'id': 'task_id or unique_id',
|
||||
'status': 'delivered' | 'queued' | 'retrying' | 'failed',
|
||||
'timestamp': unix_timestamp,
|
||||
'timestamp_formatted': 'human readable',
|
||||
'watch_uuid': 'uuid',
|
||||
'watch_url': 'url',
|
||||
'watch_title': 'title or truncated url',
|
||||
'notification_urls': ['endpoint1', 'endpoint2'],
|
||||
'retry_number': 1, # for retrying status
|
||||
'total_retries': 3, # for retrying status
|
||||
'apprise_logs': 'logs text',
|
||||
'error': 'error text if failed'
|
||||
}
|
||||
"""
|
||||
events = []
|
||||
|
||||
# 1. Get delivered (successful) notifications (up to 100)
|
||||
delivered = self.get_delivered_notifications(limit=limit)
|
||||
for success in delivered:
|
||||
events.append({
|
||||
'id': success.get('task_id') or f"success-{success.get('timestamp', 0)}",
|
||||
'status': 'delivered',
|
||||
'timestamp': success.get('timestamp'),
|
||||
'timestamp_formatted': success.get('timestamp_formatted'),
|
||||
'watch_uuid': success.get('watch_uuid'),
|
||||
'watch_url': success.get('watch_url'),
|
||||
'watch_title': success.get('watch_url', 'Unknown')[:50],
|
||||
'notification_urls': success.get('notification_urls', []),
|
||||
'apprise_logs': '\n'.join(success.get('apprise_logs', [])) if isinstance(success.get('apprise_logs'), list) else success.get('apprise_logs', ''),
|
||||
'payload': success.get('payload'),
|
||||
'error': None
|
||||
})
|
||||
|
||||
# 2. Get pending/queued notifications
|
||||
pending = self.get_pending_notifications(limit=limit)
|
||||
for item in pending:
|
||||
status = 'retrying' if item.get('status') == 'retrying' else 'queued'
|
||||
|
||||
# Get apprise logs and payload for this task if available
|
||||
apprise_logs = None
|
||||
payload = None
|
||||
task_id = item.get('task_id')
|
||||
if task_id:
|
||||
log_data = self.get_task_apprise_log(task_id)
|
||||
if log_data and log_data.get('apprise_log'):
|
||||
apprise_logs = log_data.get('apprise_log')
|
||||
# Get payload from retry attempts if available
|
||||
retry_attempts = item.get('retry_attempts', [])
|
||||
if retry_attempts:
|
||||
payload = retry_attempts[-1].get('payload')
|
||||
|
||||
events.append({
|
||||
'id': task_id,
|
||||
'status': status,
|
||||
'timestamp': item.get('queued_at'),
|
||||
'timestamp_formatted': item.get('queued_at_formatted'),
|
||||
'watch_uuid': item.get('watch_uuid'),
|
||||
'watch_url': item.get('watch_url'),
|
||||
'watch_title': item.get('watch_url', 'Unknown')[:50],
|
||||
'notification_urls': item.get('notification_urls', []) if item.get('notification_urls') else [],
|
||||
'retry_number': item.get('retry_number'),
|
||||
'total_retries': item.get('total_retries'),
|
||||
'retry_at': item.get('retry_at_timestamp'),
|
||||
'retry_at_formatted': item.get('retry_at_formatted'),
|
||||
'retry_attempts': item.get('retry_attempts', []),
|
||||
'apprise_logs': apprise_logs,
|
||||
'payload': payload,
|
||||
'error': None
|
||||
})
|
||||
|
||||
# 3. Get failed notifications (dead letter)
|
||||
failed = self.get_failed_notifications(limit=limit)
|
||||
for item in failed:
|
||||
# Get apprise logs and payload for failed tasks
|
||||
apprise_logs = None
|
||||
payload = None
|
||||
task_id = item.get('task_id')
|
||||
if task_id:
|
||||
log_data = self.get_task_apprise_log(task_id)
|
||||
if log_data and log_data.get('apprise_log'):
|
||||
apprise_logs = log_data.get('apprise_log')
|
||||
|
||||
# Get payload from retry attempts (has the most recent attempt data)
|
||||
retry_attempts = item.get('retry_attempts', [])
|
||||
if retry_attempts:
|
||||
payload = retry_attempts[-1].get('payload')
|
||||
|
||||
events.append({
|
||||
'id': task_id,
|
||||
'status': 'failed',
|
||||
'timestamp': item.get('timestamp'),
|
||||
'timestamp_formatted': item.get('timestamp_formatted'),
|
||||
'watch_uuid': item.get('notification_data', {}).get('uuid'),
|
||||
'watch_url': item.get('notification_data', {}).get('watch_url'),
|
||||
'watch_title': item.get('notification_data', {}).get('watch_url', 'Unknown')[:50],
|
||||
'notification_urls': item.get('notification_data', {}).get('notification_urls', []),
|
||||
'retry_attempts': item.get('retry_attempts', []),
|
||||
'apprise_logs': apprise_logs,
|
||||
'payload': payload,
|
||||
'error': item.get('error')
|
||||
})
|
||||
|
||||
# Sort by timestamp (newest first)
|
||||
events.sort(key=lambda x: x.get('timestamp', 0) or 0, reverse=True)
|
||||
|
||||
# HTML escape user-controlled fields to prevent XSS in UI
|
||||
from changedetectionio.jinja2_custom.safe_jinja import render_fully_escaped
|
||||
for event in events:
|
||||
# Escape apprise logs
|
||||
if event.get('apprise_logs'):
|
||||
event['apprise_logs'] = render_fully_escaped(event['apprise_logs'])
|
||||
|
||||
# Escape error messages
|
||||
if event.get('error'):
|
||||
event['error'] = render_fully_escaped(event['error'])
|
||||
|
||||
# Escape payload fields (notification title, body, format)
|
||||
if event.get('payload') and isinstance(event['payload'], dict):
|
||||
if event['payload'].get('notification_title'):
|
||||
event['payload']['notification_title'] = render_fully_escaped(event['payload']['notification_title'])
|
||||
if event['payload'].get('notification_body'):
|
||||
event['payload']['notification_body'] = render_fully_escaped(event['payload']['notification_body'])
|
||||
if event['payload'].get('notification_format'):
|
||||
event['payload']['notification_format'] = render_fully_escaped(event['payload']['notification_format'])
|
||||
|
||||
# Limit results
|
||||
return events[:limit]
|
||||
|
||||
def get_task_apprise_log(self, task_id):
|
||||
"""
|
||||
Get the Apprise log for a specific task.
|
||||
|
||||
Returns dict with:
|
||||
- apprise_log: str (the log text)
|
||||
- task_id: str
|
||||
- watch_url: str (if available)
|
||||
- notification_urls: list (if available)
|
||||
- error: str (if failed)
|
||||
"""
|
||||
if self.huey is None:
|
||||
return None
|
||||
|
||||
try:
|
||||
# First check task metadata for notification data and logs
|
||||
metadata = self._get_task_metadata(task_id)
|
||||
|
||||
# Also check Huey result for error info (failed tasks)
|
||||
from huey.utils import Error as HueyError
|
||||
error_info = None
|
||||
try:
|
||||
result = self.huey.result(task_id, preserve=True)
|
||||
if result and isinstance(result, (Exception, HueyError)):
|
||||
error_info = str(result)
|
||||
except Exception as e:
|
||||
# If huey.result() raises an exception, that IS the error we want
|
||||
# (Huey raises the stored exception when calling result() on failed tasks)
|
||||
error_info = str(e)
|
||||
logger.debug(f"Got error from result for task {task_id}: {type(e).__name__}")
|
||||
|
||||
if metadata:
|
||||
# Get apprise logs from metadata (could be 'apprise_logs' list or 'apprise_log' string)
|
||||
apprise_logs = metadata.get('apprise_logs', [])
|
||||
apprise_log_text = '\n'.join(apprise_logs) if isinstance(apprise_logs, list) else metadata.get('apprise_log', '')
|
||||
|
||||
# If no logs in metadata but we have error_info, try to extract from error
|
||||
if not apprise_log_text and error_info and 'Apprise logs:' in error_info:
|
||||
parts = error_info.split('Apprise logs:', 1)
|
||||
if len(parts) > 1:
|
||||
apprise_log_text = parts[1].strip()
|
||||
# The exception string has escaped newlines (\n), convert to actual newlines
|
||||
apprise_log_text = apprise_log_text.replace('\\n', '\n')
|
||||
# Also remove trailing quotes and closing parens from exception repr
|
||||
apprise_log_text = apprise_log_text.rstrip("')")
|
||||
logger.debug(f"Extracted Apprise logs from error for task {task_id}: {len(apprise_log_text)} chars")
|
||||
|
||||
# Clean up error to not duplicate the Apprise logs
|
||||
# Only show the main error message, not the logs again
|
||||
error_parts = error_info.split('\nApprise logs:', 1)
|
||||
if len(error_parts) > 1:
|
||||
error_info = error_parts[0] # Keep only the main error message
|
||||
|
||||
# Use metadata for apprise_log and notification data, but also include error from result
|
||||
result = {
|
||||
'task_id': task_id,
|
||||
'apprise_log': apprise_log_text if apprise_log_text else 'No log available',
|
||||
'watch_url': metadata.get('notification_data', {}).get('watch_url'),
|
||||
'notification_urls': metadata.get('notification_data', {}).get('notification_urls', []),
|
||||
'error': error_info if error_info else metadata.get('error'),
|
||||
}
|
||||
|
||||
return result
|
||||
else:
|
||||
# Fallback: if no metadata, check result store
|
||||
try:
|
||||
result = self.huey.result(task_id, preserve=True)
|
||||
if result and isinstance(result, (Exception, HueyError)):
|
||||
error = str(result)
|
||||
return {
|
||||
'task_id': task_id,
|
||||
'apprise_log': f"Error: {error}",
|
||||
'error': error
|
||||
}
|
||||
except Exception as e:
|
||||
error = str(e)
|
||||
return {
|
||||
'task_id': task_id,
|
||||
'apprise_log': f"Error: {error}",
|
||||
'error': error
|
||||
}
|
||||
|
||||
return None
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting task apprise log: {e}")
|
||||
return None
|
||||
|
||||
# Private helper methods
|
||||
|
||||
def _get_task_metadata(self, task_id):
|
||||
"""Get task metadata from task manager."""
|
||||
if self.task_manager is None:
|
||||
return None
|
||||
return self.task_manager.get_task_metadata(task_id)
|
||||
66
changedetectionio/notification/task_data/__init__.py
Normal file
66
changedetectionio/notification/task_data/__init__.py
Normal file
@@ -0,0 +1,66 @@
|
||||
"""
|
||||
Task data storage management for Huey notifications.
|
||||
|
||||
Provides polymorphic storage for retry attempt audit trails and delivered
|
||||
notification confirmations that persist independently of the Huey queue backend.
|
||||
|
||||
This module uses the Strategy pattern to handle different storage backends
|
||||
(FileStorage, SQLiteStorage, RedisStorage) without conditional logic in the main code.
|
||||
"""
|
||||
|
||||
from .base import HueyTaskDataStorageManager
|
||||
from .file_storage import FileTaskDataStorageManager
|
||||
from .sqlite_storage import SqliteTaskDataStorageManager
|
||||
from .redis_storage import RedisTaskDataStorageManager
|
||||
|
||||
__all__ = [
|
||||
'HueyTaskDataStorageManager',
|
||||
'FileTaskDataStorageManager',
|
||||
'SqliteTaskDataStorageManager',
|
||||
'RedisTaskDataStorageManager',
|
||||
'create_task_data_storage_manager',
|
||||
]
|
||||
|
||||
|
||||
def create_task_data_storage_manager(huey_storage, fallback_path=None):
|
||||
"""
|
||||
Factory function to create the appropriate task data storage manager.
|
||||
|
||||
Uses duck typing to detect storage backend type and return the appropriate manager.
|
||||
|
||||
Args:
|
||||
huey_storage: Huey storage instance (FileStorage, SQLiteStorage, or RedisStorage)
|
||||
fallback_path: Fallback path for Redis storage (typically global datastore path)
|
||||
|
||||
Returns:
|
||||
HueyTaskDataStorageManager: Appropriate manager for the storage backend
|
||||
|
||||
Raises:
|
||||
ValueError: If storage type cannot be determined
|
||||
"""
|
||||
if huey_storage is None:
|
||||
raise ValueError("huey_storage cannot be None")
|
||||
|
||||
# Detect storage type using duck typing (check for distinguishing attributes)
|
||||
|
||||
# FileStorage: has 'path' attribute
|
||||
if hasattr(huey_storage, 'path') and huey_storage.path is not None:
|
||||
from loguru import logger
|
||||
logger.debug(f"Detected FileStorage backend")
|
||||
return FileTaskDataStorageManager(huey_storage)
|
||||
|
||||
# SQLiteStorage: has 'filename' attribute (path to .db file)
|
||||
if hasattr(huey_storage, 'filename') and huey_storage.filename is not None:
|
||||
from loguru import logger
|
||||
logger.debug(f"Detected SQLiteStorage backend")
|
||||
return SqliteTaskDataStorageManager(huey_storage)
|
||||
|
||||
# RedisStorage: has 'conn' attribute (Redis connection)
|
||||
if hasattr(huey_storage, 'conn'):
|
||||
from loguru import logger
|
||||
logger.debug(f"Detected RedisStorage backend")
|
||||
return RedisTaskDataStorageManager(huey_storage, fallback_path=fallback_path)
|
||||
|
||||
# Unknown storage type
|
||||
storage_type = type(huey_storage).__name__
|
||||
raise ValueError(f"Unknown Huey storage type: {storage_type}")
|
||||
406
changedetectionio/notification/task_data/base.py
Normal file
406
changedetectionio/notification/task_data/base.py
Normal file
@@ -0,0 +1,406 @@
|
||||
"""
|
||||
Base class for managing Huey task data (retry attempts and delivered notifications).
|
||||
|
||||
This provides polymorphic storage for audit trail data that persists
|
||||
independently of the Huey queue backend (FileStorage, SQLiteStorage, RedisStorage).
|
||||
"""
|
||||
|
||||
from loguru import logger
|
||||
|
||||
|
||||
class HueyTaskDataStorageManager:
|
||||
"""
|
||||
Abstract base class for managing task data storage.
|
||||
|
||||
Handles retry attempt audit trails and delivered notification confirmations
|
||||
that are stored as JSON files on disk, regardless of the Huey queue backend.
|
||||
"""
|
||||
|
||||
def __init__(self, storage, storage_path=None):
|
||||
"""
|
||||
Initialize the task data storage manager.
|
||||
|
||||
Args:
|
||||
storage: Huey storage instance (FileStorage, SQLiteStorage, or RedisStorage)
|
||||
storage_path: Optional explicit storage path (for testing)
|
||||
"""
|
||||
self.storage = storage
|
||||
self._explicit_storage_path = storage_path
|
||||
|
||||
@property
|
||||
def storage_path(self):
|
||||
"""
|
||||
Get the storage path for this backend.
|
||||
|
||||
This is where retry attempts and delivered notifications are stored as JSON files.
|
||||
Must be implemented by subclasses to handle backend-specific path logic.
|
||||
|
||||
Returns:
|
||||
str: Path to storage directory, or None if unavailable
|
||||
"""
|
||||
raise NotImplementedError(f"{self.__class__.__name__} must implement storage_path property")
|
||||
|
||||
def store_retry_attempt(self, watch_uuid, notification_data, error_message):
|
||||
"""
|
||||
Store a retry attempt as a JSON file for audit trail.
|
||||
|
||||
Args:
|
||||
watch_uuid: UUID of the watch
|
||||
notification_data: Dict containing notification data
|
||||
error_message: Error message from the failed attempt
|
||||
|
||||
Returns:
|
||||
bool: True if stored successfully
|
||||
"""
|
||||
import os
|
||||
import time
|
||||
from .file_utils import _atomic_json_write
|
||||
|
||||
storage_path = self.storage_path
|
||||
if not storage_path:
|
||||
logger.debug("No storage path available, cannot store retry attempt")
|
||||
return False
|
||||
|
||||
try:
|
||||
attempts_dir = os.path.join(storage_path, 'retry_attempts')
|
||||
os.makedirs(attempts_dir, exist_ok=True)
|
||||
|
||||
# Create unique filename with timestamp
|
||||
timestamp = time.time()
|
||||
attempt_number = len([f for f in os.listdir(attempts_dir)
|
||||
if f.startswith(f"{watch_uuid}.")]) + 1
|
||||
filename = f"{watch_uuid}.{attempt_number}.{int(timestamp)}.json"
|
||||
filepath = os.path.join(attempts_dir, filename)
|
||||
|
||||
# Extract payload if it's in notification_data
|
||||
payload = notification_data.pop('payload', None) if isinstance(notification_data, dict) else None
|
||||
|
||||
# Store retry attempt data
|
||||
retry_data = {
|
||||
'watch_uuid': watch_uuid,
|
||||
'timestamp': timestamp,
|
||||
'attempt_number': attempt_number,
|
||||
'error': error_message, # Using 'error' for backward compatibility
|
||||
'error_message': error_message, # Also keep error_message for clarity
|
||||
'notification_data': notification_data,
|
||||
'payload': payload # What was attempted to be sent to Apprise
|
||||
}
|
||||
|
||||
_atomic_json_write(filepath, retry_data)
|
||||
logger.debug(f"Stored retry attempt #{attempt_number} for watch {watch_uuid[:8]}")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error storing retry attempt: {e}")
|
||||
return False
|
||||
|
||||
def load_retry_attempts(self, watch_uuid):
|
||||
"""
|
||||
Load all retry attempts for a watch.
|
||||
|
||||
Args:
|
||||
watch_uuid: UUID of the watch
|
||||
|
||||
Returns:
|
||||
list: List of retry attempt dicts, sorted by timestamp
|
||||
"""
|
||||
import os
|
||||
import glob
|
||||
from .file_utils import _safe_json_load
|
||||
|
||||
storage_path = self.storage_path
|
||||
if not storage_path:
|
||||
return []
|
||||
|
||||
try:
|
||||
attempts_dir = os.path.join(storage_path, 'retry_attempts')
|
||||
if not os.path.exists(attempts_dir):
|
||||
return []
|
||||
|
||||
retry_attempts = []
|
||||
attempt_pattern = os.path.join(attempts_dir, f"{watch_uuid}.*.json")
|
||||
|
||||
for attempt_file in sorted(glob.glob(attempt_pattern)):
|
||||
try:
|
||||
attempt_data = _safe_json_load(attempt_file, 'retry_attempts', storage_path)
|
||||
if attempt_data:
|
||||
# Format timestamp for display
|
||||
attempt_time = attempt_data.get('timestamp')
|
||||
if attempt_time:
|
||||
from changedetectionio.notification_service import timestamp_to_localtime
|
||||
attempt_data['timestamp_formatted'] = timestamp_to_localtime(attempt_time)
|
||||
retry_attempts.append(attempt_data)
|
||||
except Exception as e:
|
||||
logger.debug(f"Unable to load retry attempt file {attempt_file}: {e}")
|
||||
|
||||
return retry_attempts
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error loading retry attempts for {watch_uuid}: {e}")
|
||||
return []
|
||||
|
||||
def store_delivered_notification(self, task_id, notification_data, apprise_logs=None):
|
||||
"""
|
||||
Store a delivered notification confirmation for audit trail.
|
||||
|
||||
Args:
|
||||
task_id: Huey task ID
|
||||
notification_data: Dict containing notification data
|
||||
apprise_logs: Optional Apprise logs from delivery
|
||||
|
||||
Returns:
|
||||
bool: True if stored successfully
|
||||
"""
|
||||
import os
|
||||
import time
|
||||
from .file_utils import _atomic_json_write
|
||||
|
||||
storage_path = self.storage_path
|
||||
if not storage_path:
|
||||
logger.debug("No storage path available, cannot store delivered notification")
|
||||
return False
|
||||
|
||||
try:
|
||||
success_dir = os.path.join(storage_path, 'success')
|
||||
os.makedirs(success_dir, exist_ok=True)
|
||||
|
||||
# Create unique filename with timestamp
|
||||
timestamp = int(time.time() * 1000) # milliseconds for uniqueness
|
||||
filename = f"success-{task_id}-{timestamp}.json"
|
||||
filepath = os.path.join(success_dir, filename)
|
||||
|
||||
# Store delivery confirmation data
|
||||
# Merge notification_data fields at top level for backward compatibility
|
||||
delivery_data = {
|
||||
'task_id': task_id,
|
||||
'timestamp': time.time(),
|
||||
'apprise_logs': apprise_logs or []
|
||||
}
|
||||
# Merge notification_data fields (watch_url, watch_uuid, notification_urls, payload)
|
||||
delivery_data.update(notification_data)
|
||||
|
||||
_atomic_json_write(filepath, delivery_data)
|
||||
logger.debug(f"Stored delivered notification confirmation for task {task_id[:8]}")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error storing delivered notification: {e}")
|
||||
return False
|
||||
|
||||
def load_delivered_notifications(self):
|
||||
"""
|
||||
Load all delivered notification confirmations.
|
||||
|
||||
Returns:
|
||||
list: List of delivered notification dicts, sorted by timestamp (newest first)
|
||||
"""
|
||||
import os
|
||||
from .file_utils import _safe_json_load
|
||||
|
||||
storage_path = self.storage_path
|
||||
if not storage_path:
|
||||
return []
|
||||
|
||||
try:
|
||||
success_dir = os.path.join(storage_path, 'success')
|
||||
if not os.path.exists(success_dir):
|
||||
return []
|
||||
|
||||
delivered = []
|
||||
for filename in os.listdir(success_dir):
|
||||
if not filename.startswith('success-') or not filename.endswith('.json'):
|
||||
continue
|
||||
|
||||
filepath = os.path.join(success_dir, filename)
|
||||
try:
|
||||
delivery_data = _safe_json_load(filepath, 'success', storage_path)
|
||||
if delivery_data:
|
||||
# Format timestamp for display
|
||||
delivery_time = delivery_data.get('timestamp')
|
||||
if delivery_time:
|
||||
from changedetectionio.notification_service import timestamp_to_localtime
|
||||
delivery_data['timestamp_formatted'] = timestamp_to_localtime(delivery_time)
|
||||
|
||||
# Add event_id for UI consistency
|
||||
delivery_data['event_id'] = filename.replace('success-', '').replace('.json', '')
|
||||
delivered.append(delivery_data)
|
||||
except Exception as e:
|
||||
logger.debug(f"Unable to load delivered notification file {filepath}: {e}")
|
||||
|
||||
# Sort by timestamp, newest first
|
||||
delivered.sort(key=lambda x: x.get('timestamp', 0), reverse=True)
|
||||
return delivered
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error loading delivered notifications: {e}")
|
||||
return []
|
||||
|
||||
def cleanup_old_retry_attempts(self, cutoff_time):
|
||||
"""
|
||||
Clean up retry attempt files older than cutoff time.
|
||||
|
||||
Args:
|
||||
cutoff_time: Unix timestamp - files older than this will be deleted
|
||||
|
||||
Returns:
|
||||
int: Number of files deleted
|
||||
"""
|
||||
import os
|
||||
|
||||
storage_path = self.storage_path
|
||||
if not storage_path:
|
||||
return 0
|
||||
|
||||
deleted_count = 0
|
||||
try:
|
||||
attempts_dir = os.path.join(storage_path, 'retry_attempts')
|
||||
if not os.path.exists(attempts_dir):
|
||||
return 0
|
||||
|
||||
for filename in os.listdir(attempts_dir):
|
||||
if not filename.endswith('.json'):
|
||||
continue
|
||||
|
||||
filepath = os.path.join(attempts_dir, filename)
|
||||
try:
|
||||
# Check file modification time
|
||||
file_mtime = os.path.getmtime(filepath)
|
||||
if file_mtime < cutoff_time:
|
||||
os.remove(filepath)
|
||||
deleted_count += 1
|
||||
except Exception as e:
|
||||
logger.debug(f"Error checking/deleting retry attempt file {filepath}: {e}")
|
||||
|
||||
if deleted_count > 0:
|
||||
logger.info(f"Cleaned up {deleted_count} old retry attempt files")
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error cleaning up old retry attempts: {e}")
|
||||
|
||||
return deleted_count
|
||||
|
||||
def cleanup_old_delivered_notifications(self, cutoff_time):
|
||||
"""
|
||||
Clean up delivered notification files older than cutoff time.
|
||||
|
||||
Args:
|
||||
cutoff_time: Unix timestamp - files older than this will be deleted
|
||||
|
||||
Returns:
|
||||
int: Number of files deleted
|
||||
"""
|
||||
import os
|
||||
|
||||
storage_path = self.storage_path
|
||||
if not storage_path:
|
||||
return 0
|
||||
|
||||
deleted_count = 0
|
||||
try:
|
||||
success_dir = os.path.join(storage_path, 'success')
|
||||
if not os.path.exists(success_dir):
|
||||
return 0
|
||||
|
||||
for filename in os.listdir(success_dir):
|
||||
if not filename.startswith('success-') or not filename.endswith('.json'):
|
||||
continue
|
||||
|
||||
filepath = os.path.join(success_dir, filename)
|
||||
try:
|
||||
# Check file modification time
|
||||
file_mtime = os.path.getmtime(filepath)
|
||||
if file_mtime < cutoff_time:
|
||||
os.remove(filepath)
|
||||
deleted_count += 1
|
||||
except Exception as e:
|
||||
logger.debug(f"Error checking/deleting delivered notification file {filepath}: {e}")
|
||||
|
||||
if deleted_count > 0:
|
||||
logger.info(f"Cleaned up {deleted_count} old delivered notification files")
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error cleaning up old delivered notifications: {e}")
|
||||
|
||||
return deleted_count
|
||||
|
||||
def clear_retry_attempts(self, watch_uuid):
|
||||
"""
|
||||
Clear all retry attempts for a specific watch.
|
||||
|
||||
Called after successful notification delivery to clean up the audit trail.
|
||||
|
||||
Args:
|
||||
watch_uuid: UUID of the watch to clear retry attempts for
|
||||
|
||||
Returns:
|
||||
int: Number of retry attempts cleared
|
||||
"""
|
||||
import os
|
||||
import glob
|
||||
|
||||
storage_path = self.storage_path
|
||||
if not storage_path or not watch_uuid:
|
||||
return 0
|
||||
|
||||
try:
|
||||
attempts_dir = os.path.join(storage_path, 'retry_attempts')
|
||||
if not os.path.exists(attempts_dir):
|
||||
return 0
|
||||
|
||||
# Find all retry attempt files for this watch
|
||||
attempt_pattern = os.path.join(attempts_dir, f"{watch_uuid}.*.json")
|
||||
attempt_files = glob.glob(attempt_pattern)
|
||||
|
||||
cleared_count = 0
|
||||
for attempt_file in attempt_files:
|
||||
try:
|
||||
os.remove(attempt_file)
|
||||
cleared_count += 1
|
||||
except Exception as e:
|
||||
logger.debug(f"Error removing retry attempt file {attempt_file}: {e}")
|
||||
|
||||
if cleared_count > 0:
|
||||
logger.debug(f"Cleared {cleared_count} retry attempts for watch {watch_uuid[:8]}")
|
||||
|
||||
return cleared_count
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error clearing retry attempts for watch {watch_uuid}: {e}")
|
||||
return 0
|
||||
|
||||
def clear_all_data(self):
|
||||
"""
|
||||
Clear all retry attempts and delivered notifications.
|
||||
|
||||
Returns:
|
||||
dict: Count of files cleared by type
|
||||
"""
|
||||
import os
|
||||
|
||||
storage_path = self.storage_path
|
||||
if not storage_path:
|
||||
return {'retry_attempts': 0, 'delivered': 0}
|
||||
|
||||
cleared = {'retry_attempts': 0, 'delivered': 0}
|
||||
|
||||
try:
|
||||
# Clear retry attempts
|
||||
attempts_dir = os.path.join(storage_path, 'retry_attempts')
|
||||
if os.path.exists(attempts_dir):
|
||||
for filename in os.listdir(attempts_dir):
|
||||
if filename.endswith('.json'):
|
||||
os.remove(os.path.join(attempts_dir, filename))
|
||||
cleared['retry_attempts'] += 1
|
||||
|
||||
# Clear delivered notifications
|
||||
success_dir = os.path.join(storage_path, 'success')
|
||||
if os.path.exists(success_dir):
|
||||
for filename in os.listdir(success_dir):
|
||||
if filename.startswith('success-') and filename.endswith('.json'):
|
||||
os.remove(os.path.join(success_dir, filename))
|
||||
cleared['delivered'] += 1
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error clearing task data: {e}")
|
||||
|
||||
return cleared
|
||||
36
changedetectionio/notification/task_data/file_storage.py
Normal file
36
changedetectionio/notification/task_data/file_storage.py
Normal file
@@ -0,0 +1,36 @@
|
||||
"""
|
||||
FileStorage backend task data manager for Huey notifications.
|
||||
|
||||
For local file-based storage (the default Huey backend).
|
||||
"""
|
||||
|
||||
from loguru import logger
|
||||
from .base import HueyTaskDataStorageManager
|
||||
|
||||
|
||||
class FileTaskDataStorageManager(HueyTaskDataStorageManager):
|
||||
"""Task data manager for FileStorage backend (local file-based storage)."""
|
||||
|
||||
@property
|
||||
def storage_path(self):
|
||||
"""
|
||||
Get storage path from FileStorage's 'path' attribute.
|
||||
|
||||
FileStorage stores everything under a single directory specified by the 'path' attribute.
|
||||
|
||||
Returns:
|
||||
str: Storage path, or None if unavailable
|
||||
"""
|
||||
# Use explicit path if provided (for testing)
|
||||
if self._explicit_storage_path:
|
||||
return self._explicit_storage_path
|
||||
|
||||
# FileStorage has a 'path' attribute pointing to its directory
|
||||
storage_path = getattr(self.storage, 'path', None)
|
||||
|
||||
if storage_path:
|
||||
logger.debug(f"FileStorage path: {storage_path}")
|
||||
else:
|
||||
logger.warning("FileStorage has no 'path' attribute")
|
||||
|
||||
return storage_path
|
||||
89
changedetectionio/notification/task_data/file_utils.py
Normal file
89
changedetectionio/notification/task_data/file_utils.py
Normal file
@@ -0,0 +1,89 @@
|
||||
"""
|
||||
File utility functions for atomic JSON operations.
|
||||
|
||||
Provides safe, atomic file operations for storing retry attempts
|
||||
and delivered notification confirmations.
|
||||
"""
|
||||
|
||||
import os
|
||||
import json
|
||||
import tempfile
|
||||
from loguru import logger
|
||||
|
||||
|
||||
def _atomic_json_write(filepath, data):
|
||||
"""
|
||||
Atomically write JSON data to a file.
|
||||
|
||||
Uses a temp file + rename pattern to ensure atomicity.
|
||||
This prevents corruption if the process is interrupted during write.
|
||||
|
||||
Args:
|
||||
filepath: Destination file path
|
||||
data: Data to serialize as JSON
|
||||
|
||||
Raises:
|
||||
IOError: If write fails
|
||||
"""
|
||||
directory = os.path.dirname(filepath)
|
||||
|
||||
# Create a temporary file in the same directory as the target
|
||||
# (ensures it's on the same filesystem for atomic rename)
|
||||
fd, temp_path = tempfile.mkstemp(
|
||||
dir=directory,
|
||||
prefix='.tmp_',
|
||||
suffix='.json'
|
||||
)
|
||||
|
||||
try:
|
||||
# Write to temp file
|
||||
with os.fdopen(fd, 'w') as f:
|
||||
json.dump(data, f, indent=2)
|
||||
|
||||
# Atomically replace the target file
|
||||
os.replace(temp_path, filepath)
|
||||
|
||||
except Exception as e:
|
||||
# Clean up temp file on error
|
||||
try:
|
||||
os.unlink(temp_path)
|
||||
except:
|
||||
pass
|
||||
raise IOError(f"Failed to write {filepath}: {e}")
|
||||
|
||||
|
||||
def _safe_json_load(filepath, data_type, storage_path):
|
||||
"""
|
||||
Safely load JSON data from a file with corruption handling.
|
||||
|
||||
Args:
|
||||
filepath: Path to JSON file
|
||||
data_type: Type of data for logging (e.g., 'retry_attempts', 'success')
|
||||
storage_path: Base storage path for moving corrupted files
|
||||
|
||||
Returns:
|
||||
dict: Loaded JSON data, or None if file is corrupted/unreadable
|
||||
"""
|
||||
try:
|
||||
with open(filepath, 'r') as f:
|
||||
return json.load(f)
|
||||
except json.JSONDecodeError as e:
|
||||
logger.warning(f"Corrupted {data_type} file {filepath}: {e}")
|
||||
|
||||
# Move corrupted file to quarantine
|
||||
try:
|
||||
quarantine_dir = os.path.join(storage_path, 'corrupted')
|
||||
os.makedirs(quarantine_dir, exist_ok=True)
|
||||
|
||||
corrupted_filename = f"corrupted_{os.path.basename(filepath)}"
|
||||
quarantine_path = os.path.join(quarantine_dir, corrupted_filename)
|
||||
|
||||
os.rename(filepath, quarantine_path)
|
||||
logger.info(f"Moved corrupted {data_type} file to {quarantine_path}")
|
||||
except Exception as move_error:
|
||||
logger.error(f"Could not quarantine corrupted file: {move_error}")
|
||||
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.debug(f"Error loading {data_type} file {filepath}: {e}")
|
||||
return None
|
||||
337
changedetectionio/notification/task_data/redis_storage.py
Normal file
337
changedetectionio/notification/task_data/redis_storage.py
Normal file
@@ -0,0 +1,337 @@
|
||||
"""
|
||||
RedisStorage backend task data manager for Huey notifications.
|
||||
|
||||
For distributed deployments with Redis.
|
||||
|
||||
Uses native Redis keys to store retry attempts and delivered notifications
|
||||
as JSON strings for better performance and native Redis operations.
|
||||
"""
|
||||
|
||||
import json
|
||||
import time
|
||||
from loguru import logger
|
||||
from .base import HueyTaskDataStorageManager
|
||||
|
||||
|
||||
class RedisTaskDataStorageManager(HueyTaskDataStorageManager):
|
||||
"""Task data manager for RedisStorage backend - uses Redis keys for storage."""
|
||||
|
||||
def __init__(self, storage, storage_path=None, fallback_path=None):
|
||||
"""
|
||||
Initialize Redis task data manager.
|
||||
|
||||
Args:
|
||||
storage: Huey Redis storage instance
|
||||
storage_path: Optional explicit storage path (for testing)
|
||||
fallback_path: Fallback path when Redis has no local storage
|
||||
(typically the global datastore path)
|
||||
"""
|
||||
super().__init__(storage, storage_path)
|
||||
self._fallback_path = fallback_path
|
||||
|
||||
@property
|
||||
def storage_path(self):
|
||||
"""
|
||||
Get storage path for Redis backend.
|
||||
|
||||
Redis stores EVERYTHING natively in Redis (keys + JSON strings).
|
||||
This property returns None because Redis doesn't use filesystem storage.
|
||||
|
||||
All operations (store/load/cleanup) are implemented using native Redis commands
|
||||
and do not touch the filesystem.
|
||||
|
||||
Returns:
|
||||
None - Redis uses native database storage, not filesystem
|
||||
"""
|
||||
# Redis stores everything in Redis database, no filesystem path needed
|
||||
# If any code tries to use storage_path, it will get None and should fail fast
|
||||
return None
|
||||
|
||||
@property
|
||||
def redis_conn(self):
|
||||
"""Get Redis connection from storage."""
|
||||
return getattr(self.storage, 'conn', None)
|
||||
|
||||
def _get_key_prefix(self):
|
||||
"""Get Redis key prefix based on storage name."""
|
||||
name = getattr(self.storage, 'name', 'changedetection-notifications')
|
||||
return f"{name}:task_data"
|
||||
|
||||
def store_retry_attempt(self, watch_uuid, notification_data, error_message):
|
||||
"""Store retry attempt in Redis as JSON string."""
|
||||
if not self.redis_conn:
|
||||
logger.error("No Redis connection available")
|
||||
return False
|
||||
|
||||
try:
|
||||
# Get current attempt number
|
||||
key_prefix = self._get_key_prefix()
|
||||
pattern = f"{key_prefix}:retry:{watch_uuid}:*"
|
||||
existing_keys = self.redis_conn.keys(pattern)
|
||||
attempt_number = len(existing_keys) + 1
|
||||
|
||||
# Extract payload if present
|
||||
payload = notification_data.pop('payload', None) if isinstance(notification_data, dict) else None
|
||||
|
||||
# Prepare retry data
|
||||
timestamp = time.time()
|
||||
retry_data = {
|
||||
'watch_uuid': watch_uuid,
|
||||
'timestamp': timestamp,
|
||||
'attempt_number': attempt_number,
|
||||
'error': error_message,
|
||||
'error_message': error_message,
|
||||
'notification_data': notification_data,
|
||||
'payload': payload
|
||||
}
|
||||
|
||||
# Store as JSON string in Redis
|
||||
retry_key = f"{key_prefix}:retry:{watch_uuid}:{attempt_number}"
|
||||
self.redis_conn.set(retry_key, json.dumps(retry_data))
|
||||
|
||||
# Set expiration (30 days) to prevent unbounded growth
|
||||
self.redis_conn.expire(retry_key, 30 * 24 * 60 * 60)
|
||||
|
||||
logger.debug(f"Stored retry attempt #{attempt_number} for watch {watch_uuid[:8]} in Redis")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error storing retry attempt in Redis: {e}")
|
||||
return False
|
||||
|
||||
def load_retry_attempts(self, watch_uuid):
|
||||
"""Load all retry attempts for a watch from Redis."""
|
||||
if not self.redis_conn:
|
||||
logger.debug("No Redis connection available")
|
||||
return []
|
||||
|
||||
try:
|
||||
key_prefix = self._get_key_prefix()
|
||||
pattern = f"{key_prefix}:retry:{watch_uuid}:*"
|
||||
retry_keys = sorted(self.redis_conn.keys(pattern))
|
||||
|
||||
retry_attempts = []
|
||||
for key in retry_keys:
|
||||
try:
|
||||
data = self.redis_conn.get(key)
|
||||
if data:
|
||||
attempt_data = json.loads(data.decode('utf-8') if isinstance(data, bytes) else data)
|
||||
|
||||
# Format timestamp for display
|
||||
attempt_time = attempt_data.get('timestamp')
|
||||
if attempt_time:
|
||||
from changedetectionio.notification_service import timestamp_to_localtime
|
||||
attempt_data['timestamp_formatted'] = timestamp_to_localtime(attempt_time)
|
||||
|
||||
retry_attempts.append(attempt_data)
|
||||
except Exception as e:
|
||||
logger.debug(f"Error parsing retry attempt from Redis: {e}")
|
||||
|
||||
return retry_attempts
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error loading retry attempts from Redis: {e}")
|
||||
return []
|
||||
|
||||
def store_delivered_notification(self, task_id, notification_data, apprise_logs=None):
|
||||
"""Store delivered notification in Redis as JSON string."""
|
||||
if not self.redis_conn:
|
||||
logger.error("No Redis connection available")
|
||||
return False
|
||||
|
||||
try:
|
||||
timestamp = time.time()
|
||||
|
||||
# Merge all data at top level
|
||||
delivery_data = {
|
||||
'task_id': task_id,
|
||||
'timestamp': timestamp,
|
||||
'apprise_logs': apprise_logs or []
|
||||
}
|
||||
delivery_data.update(notification_data)
|
||||
|
||||
# Store as JSON string in Redis
|
||||
key_prefix = self._get_key_prefix()
|
||||
delivery_key = f"{key_prefix}:delivered:{task_id}"
|
||||
self.redis_conn.set(delivery_key, json.dumps(delivery_data))
|
||||
|
||||
# Set expiration (30 days) to prevent unbounded growth
|
||||
self.redis_conn.expire(delivery_key, 30 * 24 * 60 * 60)
|
||||
|
||||
# Add to sorted set for time-ordered retrieval
|
||||
delivered_index = f"{key_prefix}:delivered:index"
|
||||
self.redis_conn.zadd(delivered_index, {task_id: timestamp})
|
||||
|
||||
logger.debug(f"Stored delivered notification for task {task_id[:8]} in Redis")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error storing delivered notification in Redis: {e}")
|
||||
return False
|
||||
|
||||
def load_delivered_notifications(self):
|
||||
"""Load all delivered notifications from Redis (newest first)."""
|
||||
if not self.redis_conn:
|
||||
logger.debug("No Redis connection available")
|
||||
return []
|
||||
|
||||
try:
|
||||
key_prefix = self._get_key_prefix()
|
||||
delivered_index = f"{key_prefix}:delivered:index"
|
||||
|
||||
# Get task IDs sorted by timestamp (newest first)
|
||||
task_ids = self.redis_conn.zrevrange(delivered_index, 0, -1)
|
||||
|
||||
delivered = []
|
||||
for task_id in task_ids:
|
||||
try:
|
||||
task_id_str = task_id.decode('utf-8') if isinstance(task_id, bytes) else task_id
|
||||
delivery_key = f"{key_prefix}:delivered:{task_id_str}"
|
||||
data = self.redis_conn.get(delivery_key)
|
||||
|
||||
if data:
|
||||
delivery_data = json.loads(data.decode('utf-8') if isinstance(data, bytes) else data)
|
||||
|
||||
# Format timestamp for display
|
||||
delivery_time = delivery_data.get('timestamp')
|
||||
if delivery_time:
|
||||
from changedetectionio.notification_service import timestamp_to_localtime
|
||||
delivery_data['timestamp_formatted'] = timestamp_to_localtime(delivery_time)
|
||||
|
||||
# Add event_id for UI consistency
|
||||
delivery_data['event_id'] = delivery_data.get('task_id', '').replace('delivered-', '')
|
||||
|
||||
delivered.append(delivery_data)
|
||||
except Exception as e:
|
||||
logger.debug(f"Error parsing delivered notification from Redis: {e}")
|
||||
|
||||
return delivered
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error loading delivered notifications from Redis: {e}")
|
||||
return []
|
||||
|
||||
def cleanup_old_retry_attempts(self, cutoff_time):
|
||||
"""Clean up old retry attempts from Redis."""
|
||||
if not self.redis_conn:
|
||||
return 0
|
||||
|
||||
deleted_count = 0
|
||||
try:
|
||||
key_prefix = self._get_key_prefix()
|
||||
pattern = f"{key_prefix}:retry:*"
|
||||
retry_keys = self.redis_conn.keys(pattern)
|
||||
|
||||
for key in retry_keys:
|
||||
try:
|
||||
data = self.redis_conn.get(key)
|
||||
if data:
|
||||
attempt_data = json.loads(data.decode('utf-8') if isinstance(data, bytes) else data)
|
||||
timestamp = attempt_data.get('timestamp', 0)
|
||||
|
||||
if timestamp < cutoff_time:
|
||||
self.redis_conn.delete(key)
|
||||
deleted_count += 1
|
||||
except Exception as e:
|
||||
logger.debug(f"Error checking retry attempt key: {e}")
|
||||
|
||||
if deleted_count > 0:
|
||||
logger.info(f"Cleaned up {deleted_count} old retry attempts from Redis")
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error cleaning up old retry attempts from Redis: {e}")
|
||||
|
||||
return deleted_count
|
||||
|
||||
def cleanup_old_delivered_notifications(self, cutoff_time):
|
||||
"""Clean up old delivered notifications from Redis."""
|
||||
if not self.redis_conn:
|
||||
return 0
|
||||
|
||||
deleted_count = 0
|
||||
try:
|
||||
key_prefix = self._get_key_prefix()
|
||||
delivered_index = f"{key_prefix}:delivered:index"
|
||||
|
||||
# Get all task IDs with timestamp < cutoff_time
|
||||
old_task_ids = self.redis_conn.zrangebyscore(delivered_index, 0, cutoff_time)
|
||||
|
||||
for task_id in old_task_ids:
|
||||
try:
|
||||
task_id_str = task_id.decode('utf-8') if isinstance(task_id, bytes) else task_id
|
||||
delivery_key = f"{key_prefix}:delivered:{task_id_str}"
|
||||
|
||||
# Delete the data key
|
||||
self.redis_conn.delete(delivery_key)
|
||||
|
||||
# Remove from sorted set
|
||||
self.redis_conn.zrem(delivered_index, task_id)
|
||||
|
||||
deleted_count += 1
|
||||
except Exception as e:
|
||||
logger.debug(f"Error deleting old delivered notification: {e}")
|
||||
|
||||
if deleted_count > 0:
|
||||
logger.info(f"Cleaned up {deleted_count} old delivered notifications from Redis")
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error cleaning up old delivered notifications from Redis: {e}")
|
||||
|
||||
return deleted_count
|
||||
|
||||
def clear_retry_attempts(self, watch_uuid):
|
||||
"""Clear all retry attempts for a specific watch from Redis."""
|
||||
if not self.redis_conn or not watch_uuid:
|
||||
return 0
|
||||
|
||||
try:
|
||||
key_prefix = self._get_key_prefix()
|
||||
|
||||
# Find all retry attempt keys for this watch
|
||||
pattern = f"{key_prefix}:retry:{watch_uuid}:*"
|
||||
retry_keys = self.redis_conn.keys(pattern)
|
||||
|
||||
cleared_count = 0
|
||||
if retry_keys:
|
||||
self.redis_conn.delete(*retry_keys)
|
||||
cleared_count = len(retry_keys)
|
||||
|
||||
if cleared_count > 0:
|
||||
logger.debug(f"Cleared {cleared_count} retry attempts for watch {watch_uuid[:8]} from Redis")
|
||||
|
||||
return cleared_count
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error clearing retry attempts for watch {watch_uuid} from Redis: {e}")
|
||||
return 0
|
||||
|
||||
def clear_all_data(self):
|
||||
"""Clear all retry attempts and delivered notifications from Redis."""
|
||||
if not self.redis_conn:
|
||||
return {'retry_attempts': 0, 'delivered': 0}
|
||||
|
||||
try:
|
||||
key_prefix = self._get_key_prefix()
|
||||
|
||||
# Count and delete retry attempts
|
||||
retry_pattern = f"{key_prefix}:retry:*"
|
||||
retry_keys = self.redis_conn.keys(retry_pattern)
|
||||
retry_count = len(retry_keys)
|
||||
if retry_keys:
|
||||
self.redis_conn.delete(*retry_keys)
|
||||
|
||||
# Count and delete delivered notifications
|
||||
delivered_pattern = f"{key_prefix}:delivered:*"
|
||||
delivered_keys = self.redis_conn.keys(delivered_pattern)
|
||||
delivered_count = len(delivered_keys)
|
||||
if delivered_keys:
|
||||
self.redis_conn.delete(*delivered_keys)
|
||||
|
||||
return {
|
||||
'retry_attempts': retry_count,
|
||||
'delivered': delivered_count
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error clearing Redis task data: {e}")
|
||||
return {'retry_attempts': 0, 'delivered': 0}
|
||||
369
changedetectionio/notification/task_data/sqlite_storage.py
Normal file
369
changedetectionio/notification/task_data/sqlite_storage.py
Normal file
@@ -0,0 +1,369 @@
|
||||
"""
|
||||
SQLiteStorage backend task data manager for Huey notifications.
|
||||
|
||||
For SQLite-based storage (local disk only, not network storage).
|
||||
|
||||
Uses native SQLite tables to store retry attempts and delivered notifications
|
||||
as JSON blobs for better performance and atomicity.
|
||||
"""
|
||||
|
||||
import os
|
||||
import json
|
||||
import sqlite3
|
||||
import time
|
||||
from loguru import logger
|
||||
from .base import HueyTaskDataStorageManager
|
||||
|
||||
|
||||
class SqliteTaskDataStorageManager(HueyTaskDataStorageManager):
|
||||
"""Task data manager for SQLiteStorage backend - uses SQLite tables for storage."""
|
||||
|
||||
def __init__(self, storage, storage_path=None):
|
||||
super().__init__(storage, storage_path)
|
||||
self._init_tables()
|
||||
|
||||
@property
|
||||
def storage_path(self):
|
||||
"""
|
||||
Get storage path by extracting directory from SQLiteStorage's 'filename' attribute.
|
||||
|
||||
Returns:
|
||||
str: Storage path (directory containing the SQLite database), or None if unavailable
|
||||
"""
|
||||
# Use explicit path if provided (for testing)
|
||||
if self._explicit_storage_path:
|
||||
return self._explicit_storage_path
|
||||
|
||||
# SQLiteStorage has a 'filename' attribute pointing to the .db file
|
||||
db_filename = getattr(self.storage, 'filename', None)
|
||||
if not db_filename:
|
||||
logger.warning("SQLiteStorage has no 'filename' attribute")
|
||||
return None
|
||||
|
||||
# Extract directory from database filename
|
||||
storage_path = os.path.dirname(db_filename)
|
||||
|
||||
if storage_path:
|
||||
logger.debug(f"SQLiteStorage path (from database directory): {storage_path}")
|
||||
else:
|
||||
logger.warning(f"Could not extract directory from SQLite filename: {db_filename}")
|
||||
|
||||
return storage_path
|
||||
|
||||
@property
|
||||
def db_filename(self):
|
||||
"""Get the SQLite database filename."""
|
||||
return getattr(self.storage, 'filename', None)
|
||||
|
||||
def _get_connection(self):
|
||||
"""Get SQLite database connection."""
|
||||
if not self.db_filename:
|
||||
raise ValueError("No SQLite database filename available")
|
||||
return sqlite3.connect(self.db_filename)
|
||||
|
||||
def _init_tables(self):
|
||||
"""Initialize SQLite tables for retry attempts and delivered notifications."""
|
||||
if not self.db_filename:
|
||||
logger.warning("Cannot initialize tables - no database filename")
|
||||
return
|
||||
|
||||
try:
|
||||
conn = self._get_connection()
|
||||
cursor = conn.cursor()
|
||||
|
||||
# Table for retry attempts (stores JSON blobs)
|
||||
cursor.execute("""
|
||||
CREATE TABLE IF NOT EXISTS notification_retry_attempts (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
watch_uuid TEXT NOT NULL,
|
||||
attempt_number INTEGER NOT NULL,
|
||||
timestamp REAL NOT NULL,
|
||||
data_json TEXT NOT NULL,
|
||||
created_at REAL DEFAULT (strftime('%s', 'now'))
|
||||
)
|
||||
""")
|
||||
|
||||
# Index for fast lookups by watch_uuid
|
||||
cursor.execute("""
|
||||
CREATE INDEX IF NOT EXISTS idx_retry_watch_uuid
|
||||
ON notification_retry_attempts(watch_uuid)
|
||||
""")
|
||||
|
||||
# Table for delivered notifications (stores JSON blobs)
|
||||
cursor.execute("""
|
||||
CREATE TABLE IF NOT EXISTS notification_delivered (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
task_id TEXT NOT NULL UNIQUE,
|
||||
watch_uuid TEXT,
|
||||
timestamp REAL NOT NULL,
|
||||
data_json TEXT NOT NULL,
|
||||
created_at REAL DEFAULT (strftime('%s', 'now'))
|
||||
)
|
||||
""")
|
||||
|
||||
# Index for fast lookups and sorting by timestamp
|
||||
cursor.execute("""
|
||||
CREATE INDEX IF NOT EXISTS idx_delivered_timestamp
|
||||
ON notification_delivered(timestamp DESC)
|
||||
""")
|
||||
|
||||
conn.commit()
|
||||
conn.close()
|
||||
logger.debug("Initialized SQLite tables for notification task data")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error initializing SQLite tables: {e}")
|
||||
|
||||
def store_retry_attempt(self, watch_uuid, notification_data, error_message):
|
||||
"""Store retry attempt in SQLite table as JSON blob."""
|
||||
try:
|
||||
conn = self._get_connection()
|
||||
cursor = conn.cursor()
|
||||
|
||||
# Get current attempt number
|
||||
cursor.execute(
|
||||
"SELECT COUNT(*) FROM notification_retry_attempts WHERE watch_uuid = ?",
|
||||
(watch_uuid,)
|
||||
)
|
||||
attempt_number = cursor.fetchone()[0] + 1
|
||||
|
||||
# Extract payload if present
|
||||
payload = notification_data.pop('payload', None) if isinstance(notification_data, dict) else None
|
||||
|
||||
# Prepare retry data
|
||||
timestamp = time.time()
|
||||
retry_data = {
|
||||
'watch_uuid': watch_uuid,
|
||||
'timestamp': timestamp,
|
||||
'attempt_number': attempt_number,
|
||||
'error': error_message,
|
||||
'error_message': error_message,
|
||||
'notification_data': notification_data,
|
||||
'payload': payload
|
||||
}
|
||||
|
||||
# Store as JSON blob
|
||||
cursor.execute("""
|
||||
INSERT INTO notification_retry_attempts
|
||||
(watch_uuid, attempt_number, timestamp, data_json)
|
||||
VALUES (?, ?, ?, ?)
|
||||
""", (watch_uuid, attempt_number, timestamp, json.dumps(retry_data)))
|
||||
|
||||
conn.commit()
|
||||
conn.close()
|
||||
|
||||
logger.debug(f"Stored retry attempt #{attempt_number} for watch {watch_uuid[:8]} in SQLite")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error storing retry attempt in SQLite: {e}")
|
||||
return False
|
||||
|
||||
def load_retry_attempts(self, watch_uuid):
|
||||
"""Load all retry attempts for a watch from SQLite table."""
|
||||
try:
|
||||
conn = self._get_connection()
|
||||
cursor = conn.cursor()
|
||||
|
||||
cursor.execute("""
|
||||
SELECT data_json FROM notification_retry_attempts
|
||||
WHERE watch_uuid = ?
|
||||
ORDER BY attempt_number ASC
|
||||
""", (watch_uuid,))
|
||||
|
||||
retry_attempts = []
|
||||
for row in cursor.fetchall():
|
||||
try:
|
||||
attempt_data = json.loads(row[0])
|
||||
|
||||
# Format timestamp for display
|
||||
attempt_time = attempt_data.get('timestamp')
|
||||
if attempt_time:
|
||||
from changedetectionio.notification_service import timestamp_to_localtime
|
||||
attempt_data['timestamp_formatted'] = timestamp_to_localtime(attempt_time)
|
||||
|
||||
retry_attempts.append(attempt_data)
|
||||
except Exception as e:
|
||||
logger.debug(f"Error parsing retry attempt JSON: {e}")
|
||||
|
||||
conn.close()
|
||||
return retry_attempts
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error loading retry attempts from SQLite: {e}")
|
||||
return []
|
||||
|
||||
def store_delivered_notification(self, task_id, notification_data, apprise_logs=None):
|
||||
"""Store delivered notification in SQLite table as JSON blob."""
|
||||
try:
|
||||
conn = self._get_connection()
|
||||
cursor = conn.cursor()
|
||||
|
||||
timestamp = time.time()
|
||||
watch_uuid = notification_data.get('watch_uuid')
|
||||
|
||||
# Merge all data at top level
|
||||
delivery_data = {
|
||||
'task_id': task_id,
|
||||
'timestamp': timestamp,
|
||||
'apprise_logs': apprise_logs or []
|
||||
}
|
||||
delivery_data.update(notification_data)
|
||||
|
||||
# Store as JSON blob
|
||||
cursor.execute("""
|
||||
INSERT INTO notification_delivered
|
||||
(task_id, watch_uuid, timestamp, data_json)
|
||||
VALUES (?, ?, ?, ?)
|
||||
""", (task_id, watch_uuid, timestamp, json.dumps(delivery_data)))
|
||||
|
||||
conn.commit()
|
||||
conn.close()
|
||||
|
||||
logger.debug(f"Stored delivered notification for task {task_id[:8]} in SQLite")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error storing delivered notification in SQLite: {e}")
|
||||
return False
|
||||
|
||||
def load_delivered_notifications(self):
|
||||
"""Load all delivered notifications from SQLite table."""
|
||||
try:
|
||||
conn = self._get_connection()
|
||||
cursor = conn.cursor()
|
||||
|
||||
cursor.execute("""
|
||||
SELECT data_json FROM notification_delivered
|
||||
ORDER BY timestamp DESC
|
||||
""")
|
||||
|
||||
delivered = []
|
||||
for row in cursor.fetchall():
|
||||
try:
|
||||
delivery_data = json.loads(row[0])
|
||||
|
||||
# Format timestamp for display
|
||||
delivery_time = delivery_data.get('timestamp')
|
||||
if delivery_time:
|
||||
from changedetectionio.notification_service import timestamp_to_localtime
|
||||
delivery_data['timestamp_formatted'] = timestamp_to_localtime(delivery_time)
|
||||
|
||||
# Add event_id for UI consistency
|
||||
delivery_data['event_id'] = delivery_data.get('task_id', '').replace('delivered-', '')
|
||||
|
||||
delivered.append(delivery_data)
|
||||
except Exception as e:
|
||||
logger.debug(f"Error parsing delivered notification JSON: {e}")
|
||||
|
||||
conn.close()
|
||||
return delivered
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error loading delivered notifications from SQLite: {e}")
|
||||
return []
|
||||
|
||||
def cleanup_old_retry_attempts(self, cutoff_time):
|
||||
"""Clean up old retry attempts from SQLite table."""
|
||||
try:
|
||||
conn = self._get_connection()
|
||||
cursor = conn.cursor()
|
||||
|
||||
cursor.execute("""
|
||||
DELETE FROM notification_retry_attempts
|
||||
WHERE timestamp < ?
|
||||
""", (cutoff_time,))
|
||||
|
||||
deleted_count = cursor.rowcount
|
||||
conn.commit()
|
||||
conn.close()
|
||||
|
||||
if deleted_count > 0:
|
||||
logger.info(f"Cleaned up {deleted_count} old retry attempts from SQLite")
|
||||
|
||||
return deleted_count
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error cleaning up old retry attempts from SQLite: {e}")
|
||||
return 0
|
||||
|
||||
def cleanup_old_delivered_notifications(self, cutoff_time):
|
||||
"""Clean up old delivered notifications from SQLite table."""
|
||||
try:
|
||||
conn = self._get_connection()
|
||||
cursor = conn.cursor()
|
||||
|
||||
cursor.execute("""
|
||||
DELETE FROM notification_delivered
|
||||
WHERE timestamp < ?
|
||||
""", (cutoff_time,))
|
||||
|
||||
deleted_count = cursor.rowcount
|
||||
conn.commit()
|
||||
conn.close()
|
||||
|
||||
if deleted_count > 0:
|
||||
logger.info(f"Cleaned up {deleted_count} old delivered notifications from SQLite")
|
||||
|
||||
return deleted_count
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error cleaning up old delivered notifications from SQLite: {e}")
|
||||
return 0
|
||||
|
||||
def clear_retry_attempts(self, watch_uuid):
|
||||
"""Clear all retry attempts for a specific watch from SQLite."""
|
||||
if not watch_uuid:
|
||||
return 0
|
||||
|
||||
try:
|
||||
conn = self._get_connection()
|
||||
cursor = conn.cursor()
|
||||
|
||||
# Delete all retry attempts for this watch
|
||||
cursor.execute(
|
||||
"DELETE FROM notification_retry_attempts WHERE watch_uuid = ?",
|
||||
(watch_uuid,)
|
||||
)
|
||||
|
||||
cleared_count = cursor.rowcount
|
||||
conn.commit()
|
||||
conn.close()
|
||||
|
||||
if cleared_count > 0:
|
||||
logger.debug(f"Cleared {cleared_count} retry attempts for watch {watch_uuid[:8]} from SQLite")
|
||||
|
||||
return cleared_count
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error clearing retry attempts for watch {watch_uuid} from SQLite: {e}")
|
||||
return 0
|
||||
|
||||
def clear_all_data(self):
|
||||
"""Clear all retry attempts and delivered notifications from SQLite."""
|
||||
try:
|
||||
conn = self._get_connection()
|
||||
cursor = conn.cursor()
|
||||
|
||||
# Count before deletion
|
||||
cursor.execute("SELECT COUNT(*) FROM notification_retry_attempts")
|
||||
retry_count = cursor.fetchone()[0]
|
||||
|
||||
cursor.execute("SELECT COUNT(*) FROM notification_delivered")
|
||||
delivered_count = cursor.fetchone()[0]
|
||||
|
||||
# Delete all
|
||||
cursor.execute("DELETE FROM notification_retry_attempts")
|
||||
cursor.execute("DELETE FROM notification_delivered")
|
||||
|
||||
conn.commit()
|
||||
conn.close()
|
||||
|
||||
return {
|
||||
'retry_attempts': retry_count,
|
||||
'delivered': delivered_count
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error clearing SQLite task data: {e}")
|
||||
return {'retry_attempts': 0, 'delivered': 0}
|
||||
1194
changedetectionio/notification/task_queue/__init__.py
Normal file
1194
changedetectionio/notification/task_queue/__init__.py
Normal file
File diff suppressed because it is too large
Load Diff
127
changedetectionio/notification/task_queue/base.py
Normal file
127
changedetectionio/notification/task_queue/base.py
Normal file
@@ -0,0 +1,127 @@
|
||||
"""
|
||||
Abstract base class for Huey storage backend task managers.
|
||||
"""
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
|
||||
class HueyTaskManager(ABC):
|
||||
"""
|
||||
Abstract base class for Huey storage backend operations.
|
||||
|
||||
Provides a polymorphic interface for storage-specific operations like:
|
||||
- Enumerating results (failed notifications)
|
||||
- Deleting results
|
||||
- Counting pending notifications
|
||||
- Clearing all notifications
|
||||
|
||||
Each storage backend (FileStorage, SqliteStorage, RedisStorage) has its own
|
||||
concrete implementation that knows how to interact with that specific storage.
|
||||
"""
|
||||
|
||||
def __init__(self, storage, storage_path=None):
|
||||
"""
|
||||
Initialize task manager with storage instance.
|
||||
|
||||
Args:
|
||||
storage: Huey storage instance
|
||||
storage_path: Optional path for file-based storage
|
||||
"""
|
||||
self.storage = storage
|
||||
self.storage_path = storage_path
|
||||
|
||||
@abstractmethod
|
||||
def enumerate_results(self):
|
||||
"""
|
||||
Enumerate all results from storage.
|
||||
|
||||
Returns:
|
||||
dict: {task_id: result_data} for all stored results
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def delete_result(self, task_id):
|
||||
"""
|
||||
Delete a result from storage.
|
||||
|
||||
Args:
|
||||
task_id: Task ID to delete
|
||||
|
||||
Returns:
|
||||
bool: True if deleted successfully, False otherwise
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def count_storage_items(self):
|
||||
"""
|
||||
Count items in storage (queue + schedule).
|
||||
|
||||
Returns:
|
||||
tuple: (queue_count, schedule_count)
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def clear_all_notifications(self):
|
||||
"""
|
||||
Clear all notifications (queue, schedule, results, metadata).
|
||||
|
||||
Returns:
|
||||
dict: Counts of cleared items by type
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def store_task_metadata(self, task_id, metadata):
|
||||
"""
|
||||
Store task metadata for later retrieval.
|
||||
|
||||
Args:
|
||||
task_id: Task ID
|
||||
metadata: Metadata dictionary to store
|
||||
|
||||
Returns:
|
||||
bool: True if stored successfully, False otherwise
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_task_metadata(self, task_id):
|
||||
"""
|
||||
Retrieve task metadata.
|
||||
|
||||
Args:
|
||||
task_id: Task ID
|
||||
|
||||
Returns:
|
||||
dict: Metadata dictionary or None if not found
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def delete_task_metadata(self, task_id):
|
||||
"""
|
||||
Delete task metadata.
|
||||
|
||||
Args:
|
||||
task_id: Task ID
|
||||
|
||||
Returns:
|
||||
bool: True if deleted successfully, False otherwise
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def cleanup_old_retry_attempts(self, cutoff_time):
|
||||
"""
|
||||
Clean up retry attempt records older than cutoff_time.
|
||||
|
||||
Args:
|
||||
cutoff_time: Unix timestamp - delete records older than this
|
||||
|
||||
Returns:
|
||||
int: Number of retry attempts deleted
|
||||
"""
|
||||
pass
|
||||
384
changedetectionio/notification/task_queue/file_storage.py
Normal file
384
changedetectionio/notification/task_queue/file_storage.py
Normal file
@@ -0,0 +1,384 @@
|
||||
"""
|
||||
FileStorage backend task manager for Huey notifications.
|
||||
|
||||
This is the default backend, optimized for NAS/CIFS compatibility.
|
||||
|
||||
Enhancements:
|
||||
- Atomic file writes (prevents corruption on crash/power failure)
|
||||
- JSON corruption detection and recovery
|
||||
- Lost-found directory for corrupted files
|
||||
"""
|
||||
|
||||
from loguru import logger
|
||||
|
||||
from .base import HueyTaskManager
|
||||
|
||||
import os
|
||||
|
||||
|
||||
def _atomic_json_write(filepath, data):
|
||||
"""
|
||||
Atomic JSON write - never leaves partial/corrupted files.
|
||||
|
||||
Critical for aerospace-grade reliability:
|
||||
- Writes to temp file first
|
||||
- Forces data to disk (fsync)
|
||||
- Atomic rename (POSIX guarantees atomicity)
|
||||
|
||||
If process crashes mid-write, you either get:
|
||||
- Old complete file (rename didn't happen)
|
||||
- New complete file (rename succeeded)
|
||||
Never a partial/corrupted file.
|
||||
|
||||
Args:
|
||||
filepath: Target file path
|
||||
data: Data to write (will be JSON encoded)
|
||||
|
||||
Returns:
|
||||
True on success
|
||||
|
||||
Raises:
|
||||
Exception on write failure
|
||||
"""
|
||||
import tempfile
|
||||
import json
|
||||
|
||||
directory = os.path.dirname(filepath)
|
||||
|
||||
# Write to temp file in same directory (same filesystem = atomic rename)
|
||||
fd, temp_path = tempfile.mkstemp(dir=directory, prefix='.tmp_', suffix='.json')
|
||||
|
||||
try:
|
||||
with os.fdopen(fd, 'w') as f:
|
||||
json.dump(data, f, indent=2)
|
||||
f.flush()
|
||||
os.fsync(f.fileno()) # Force to disk before rename
|
||||
|
||||
# Atomic rename (POSIX guarantees atomicity)
|
||||
# If crash happens here, worst case is orphaned temp file
|
||||
os.replace(temp_path, filepath)
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
# Clean up temp file on error
|
||||
try:
|
||||
os.unlink(temp_path)
|
||||
except:
|
||||
pass
|
||||
raise
|
||||
|
||||
|
||||
def _safe_json_load(filepath, schema_name, storage_path):
|
||||
"""
|
||||
Load JSON with corruption detection and recovery.
|
||||
|
||||
Corrupted files are moved to lost-found for forensic analysis.
|
||||
|
||||
Args:
|
||||
filepath: JSON file to load
|
||||
schema_name: Schema type (for lost-found directory)
|
||||
storage_path: Root storage path
|
||||
|
||||
Returns:
|
||||
Parsed JSON data or None if corrupted
|
||||
"""
|
||||
import json
|
||||
import shutil
|
||||
import time
|
||||
|
||||
try:
|
||||
with open(filepath, 'r') as f:
|
||||
return json.load(f)
|
||||
|
||||
except (json.JSONDecodeError, EOFError) as e:
|
||||
# Corrupted or incomplete JSON file
|
||||
file_size = os.path.getsize(filepath) if os.path.exists(filepath) else 0
|
||||
filename = os.path.basename(filepath)
|
||||
logger.warning(f"Corrupted {schema_name} file: {filename} ({file_size} bytes) - moving to lost-found")
|
||||
|
||||
try:
|
||||
lost_found_dir = os.path.join(storage_path, 'lost-found', schema_name)
|
||||
os.makedirs(lost_found_dir, exist_ok=True)
|
||||
|
||||
timestamp = int(time.time())
|
||||
lost_found_path = os.path.join(lost_found_dir, f"{filename}.{timestamp}.corrupted")
|
||||
|
||||
shutil.move(filepath, lost_found_path)
|
||||
logger.info(f"Moved corrupted {schema_name} file to {lost_found_path}")
|
||||
|
||||
except Exception as move_err:
|
||||
logger.error(f"Unable to move corrupted {schema_name} file: {move_err}")
|
||||
|
||||
return None
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Unable to load {schema_name} file {filepath}: {e}")
|
||||
return None
|
||||
|
||||
|
||||
class FileStorageTaskManager(HueyTaskManager):
|
||||
"""Task manager for FileStorage backend (default, NAS-safe)."""
|
||||
|
||||
def enumerate_results(self):
|
||||
"""Enumerate results by walking filesystem directories."""
|
||||
import os
|
||||
import pickle
|
||||
import struct
|
||||
import time
|
||||
|
||||
results = {}
|
||||
|
||||
if not self.storage_path:
|
||||
return results
|
||||
|
||||
results_dir = os.path.join(self.storage_path, 'results')
|
||||
|
||||
if not os.path.exists(results_dir):
|
||||
return results
|
||||
|
||||
# Walk through all subdirectories to find result files
|
||||
for root, dirs, files in os.walk(results_dir):
|
||||
for filename in files:
|
||||
if filename.startswith('.'):
|
||||
continue
|
||||
|
||||
filepath = os.path.join(root, filename)
|
||||
try:
|
||||
# Read and unpickle the result
|
||||
# Huey FileStorage format: 4-byte length + task_id + pickled data
|
||||
with open(filepath, 'rb') as f:
|
||||
# Read the task ID header (length-prefixed)
|
||||
task_id_len_bytes = f.read(4)
|
||||
if len(task_id_len_bytes) < 4:
|
||||
raise EOFError("Incomplete header")
|
||||
task_id_len = struct.unpack('>I', task_id_len_bytes)[0]
|
||||
task_id_bytes = f.read(task_id_len)
|
||||
if len(task_id_bytes) < task_id_len:
|
||||
raise EOFError("Incomplete task ID")
|
||||
task_id = task_id_bytes.decode('utf-8')
|
||||
|
||||
# Now unpickle the result data
|
||||
result_data = pickle.load(f)
|
||||
results[task_id] = result_data
|
||||
except (pickle.UnpicklingError, EOFError) as e:
|
||||
# Corrupted or incomplete result file
|
||||
file_size = os.path.getsize(filepath)
|
||||
logger.warning(f"Corrupted result file {filename} ({file_size} bytes) - moving to lost-found.")
|
||||
try:
|
||||
import shutil
|
||||
lost_found_dir = os.path.join(self.storage_path, 'lost-found', 'results')
|
||||
os.makedirs(lost_found_dir, exist_ok=True)
|
||||
|
||||
timestamp = int(time.time())
|
||||
lost_found_path = os.path.join(lost_found_dir, f"{filename}.{timestamp}.corrupted")
|
||||
|
||||
shutil.move(filepath, lost_found_path)
|
||||
logger.info(f"Moved corrupted file to {lost_found_path}")
|
||||
except Exception as move_err:
|
||||
logger.error(f"Unable to move corrupted file: {move_err}")
|
||||
except Exception as e:
|
||||
logger.debug(f"Unable to read result file {filename}: {e}")
|
||||
|
||||
return results
|
||||
|
||||
def delete_result(self, task_id):
|
||||
"""Delete result file from filesystem."""
|
||||
import hashlib
|
||||
|
||||
if not self.storage_path:
|
||||
return False
|
||||
|
||||
results_dir = os.path.join(self.storage_path, 'results')
|
||||
|
||||
# Huey uses MD5 hash to create subdirectories
|
||||
task_id_bytes = task_id.encode('utf-8')
|
||||
hex_hash = hashlib.md5(task_id_bytes).hexdigest()
|
||||
|
||||
# FileStorage creates subdirectories based on first 2 chars of hash
|
||||
subdir = hex_hash[:2]
|
||||
result_file = os.path.join(results_dir, subdir, task_id)
|
||||
|
||||
if os.path.exists(result_file):
|
||||
os.remove(result_file)
|
||||
logger.debug(f"Deleted result file for task {task_id}")
|
||||
return True
|
||||
else:
|
||||
logger.debug(f"Result file not found for task {task_id}")
|
||||
return False
|
||||
|
||||
def count_storage_items(self):
|
||||
"""Count items by walking filesystem directories."""
|
||||
queue_count = 0
|
||||
schedule_count = 0
|
||||
|
||||
if not self.storage_path:
|
||||
return queue_count, schedule_count
|
||||
|
||||
try:
|
||||
# Count queue files
|
||||
queue_dir = os.path.join(self.storage_path, 'queue')
|
||||
if os.path.exists(queue_dir):
|
||||
for root, dirs, files in os.walk(queue_dir):
|
||||
queue_count += len([f for f in files if not f.startswith('.')])
|
||||
|
||||
# Count schedule files
|
||||
schedule_dir = os.path.join(self.storage_path, 'schedule')
|
||||
if os.path.exists(schedule_dir):
|
||||
for root, dirs, files in os.walk(schedule_dir):
|
||||
schedule_count += len([f for f in files if not f.startswith('.')])
|
||||
except Exception as e:
|
||||
logger.debug(f"FileStorage count error: {e}")
|
||||
|
||||
return queue_count, schedule_count
|
||||
|
||||
def clear_all_notifications(self):
|
||||
"""Clear all notification files from filesystem."""
|
||||
cleared = {
|
||||
'queue': 0,
|
||||
'schedule': 0,
|
||||
'results': 0,
|
||||
'retry_attempts': 0,
|
||||
'task_metadata': 0,
|
||||
'delivered': 0
|
||||
}
|
||||
|
||||
if not self.storage_path:
|
||||
return cleared
|
||||
|
||||
# Clear queue
|
||||
queue_dir = os.path.join(self.storage_path, 'queue')
|
||||
if os.path.exists(queue_dir):
|
||||
for root, dirs, files in os.walk(queue_dir):
|
||||
for f in files:
|
||||
if not f.startswith('.'):
|
||||
os.remove(os.path.join(root, f))
|
||||
cleared['queue'] += 1
|
||||
|
||||
# Clear schedule
|
||||
schedule_dir = os.path.join(self.storage_path, 'schedule')
|
||||
if os.path.exists(schedule_dir):
|
||||
for root, dirs, files in os.walk(schedule_dir):
|
||||
for f in files:
|
||||
if not f.startswith('.'):
|
||||
os.remove(os.path.join(root, f))
|
||||
cleared['schedule'] += 1
|
||||
|
||||
# Clear results
|
||||
results_dir = os.path.join(self.storage_path, 'results')
|
||||
if os.path.exists(results_dir):
|
||||
for root, dirs, files in os.walk(results_dir):
|
||||
for f in files:
|
||||
if not f.startswith('.'):
|
||||
os.remove(os.path.join(root, f))
|
||||
cleared['results'] += 1
|
||||
|
||||
# Clear retry attempts
|
||||
attempts_dir = os.path.join(self.storage_path, 'retry_attempts')
|
||||
if os.path.exists(attempts_dir):
|
||||
for f in os.listdir(attempts_dir):
|
||||
if f.endswith('.json'):
|
||||
os.remove(os.path.join(attempts_dir, f))
|
||||
cleared['retry_attempts'] += 1
|
||||
|
||||
# Clear task metadata
|
||||
metadata_dir = os.path.join(self.storage_path, 'task_metadata')
|
||||
if os.path.exists(metadata_dir):
|
||||
for f in os.listdir(metadata_dir):
|
||||
if f.endswith('.json'):
|
||||
os.remove(os.path.join(metadata_dir, f))
|
||||
cleared['task_metadata'] += 1
|
||||
|
||||
# Clear delivered (success) notifications
|
||||
success_dir = os.path.join(self.storage_path, 'success')
|
||||
if os.path.exists(success_dir):
|
||||
for f in os.listdir(success_dir):
|
||||
if f.startswith('success-') and f.endswith('.json'):
|
||||
os.remove(os.path.join(success_dir, f))
|
||||
cleared['delivered'] += 1
|
||||
|
||||
return cleared
|
||||
|
||||
def store_task_metadata(self, task_id, metadata):
|
||||
"""Store task metadata as JSON file with atomic write."""
|
||||
import time
|
||||
|
||||
if not self.storage_path:
|
||||
return False
|
||||
|
||||
try:
|
||||
metadata_dir = os.path.join(self.storage_path, 'task_metadata')
|
||||
os.makedirs(metadata_dir, exist_ok=True)
|
||||
|
||||
metadata_file = os.path.join(metadata_dir, f"{task_id}.json")
|
||||
metadata_with_id = {
|
||||
'task_id': task_id,
|
||||
'timestamp': time.time(),
|
||||
**metadata
|
||||
}
|
||||
|
||||
# Use atomic write to prevent corruption
|
||||
_atomic_json_write(metadata_file, metadata_with_id)
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Unable to store task metadata: {e}")
|
||||
return False
|
||||
|
||||
def get_task_metadata(self, task_id):
|
||||
"""Retrieve task metadata from JSON file with corruption handling."""
|
||||
if not self.storage_path:
|
||||
return None
|
||||
|
||||
try:
|
||||
metadata_dir = os.path.join(self.storage_path, 'task_metadata')
|
||||
metadata_file = os.path.join(metadata_dir, f"{task_id}.json")
|
||||
|
||||
if os.path.exists(metadata_file):
|
||||
# Use safe JSON load with corruption handling
|
||||
return _safe_json_load(metadata_file, 'task_metadata', self.storage_path)
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Unable to load task metadata for {task_id}: {e}")
|
||||
|
||||
return None
|
||||
|
||||
def delete_task_metadata(self, task_id):
|
||||
"""Delete task metadata JSON file."""
|
||||
if not self.storage_path:
|
||||
return False
|
||||
|
||||
try:
|
||||
metadata_dir = os.path.join(self.storage_path, 'task_metadata')
|
||||
metadata_file = os.path.join(metadata_dir, f"{task_id}.json")
|
||||
|
||||
if os.path.exists(metadata_file):
|
||||
os.remove(metadata_file)
|
||||
return True
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.debug(f"Unable to delete task metadata for {task_id}: {e}")
|
||||
return False
|
||||
|
||||
def cleanup_old_retry_attempts(self, cutoff_time):
|
||||
"""Clean up old retry attempt files from filesystem."""
|
||||
if not self.storage_path:
|
||||
return 0
|
||||
|
||||
deleted_count = 0
|
||||
try:
|
||||
attempts_dir = os.path.join(self.storage_path, 'retry_attempts')
|
||||
if os.path.exists(attempts_dir):
|
||||
for filename in os.listdir(attempts_dir):
|
||||
if filename.endswith('.json'):
|
||||
filepath = os.path.join(attempts_dir, filename)
|
||||
try:
|
||||
file_mtime = os.path.getmtime(filepath)
|
||||
if file_mtime < cutoff_time:
|
||||
os.remove(filepath)
|
||||
deleted_count += 1
|
||||
except Exception as fe:
|
||||
logger.debug(f"Unable to delete old retry attempt file {filename}: {fe}")
|
||||
except Exception as e:
|
||||
logger.debug(f"Error cleaning up old retry attempts: {e}")
|
||||
|
||||
return deleted_count
|
||||
245
changedetectionio/notification/task_queue/redis_storage.py
Normal file
245
changedetectionio/notification/task_queue/redis_storage.py
Normal file
@@ -0,0 +1,245 @@
|
||||
"""
|
||||
RedisStorage backend task manager for Huey notifications.
|
||||
|
||||
For distributed deployments with Redis.
|
||||
|
||||
Enhancements:
|
||||
- Redis provides atomic operations (ACID-like semantics)
|
||||
- Hybrid mode: queue data in Redis, retry attempts/success in JSON files
|
||||
- JSON files use atomic writes from file_storage module
|
||||
"""
|
||||
|
||||
from loguru import logger
|
||||
|
||||
from .base import HueyTaskManager
|
||||
|
||||
|
||||
class RedisStorageTaskManager(HueyTaskManager):
|
||||
"""Task manager for RedisStorage backend (distributed deployments)."""
|
||||
|
||||
def __init__(self, storage, storage_path=None):
|
||||
"""
|
||||
Initialize Redis task manager.
|
||||
|
||||
Args:
|
||||
storage: Huey Redis storage instance
|
||||
storage_path: Directory for file-based data (retry attempts, success)
|
||||
"""
|
||||
super().__init__(storage)
|
||||
self.storage_path = storage_path
|
||||
|
||||
def enumerate_results(self):
|
||||
import pickle
|
||||
"""Enumerate results using Redis commands."""
|
||||
results = {}
|
||||
|
||||
if not hasattr(self.storage, 'conn'):
|
||||
return results
|
||||
|
||||
try:
|
||||
# Redis stores results with keys like "{name}:result:{task_id}"
|
||||
name = self.storage.name
|
||||
pattern = f"{name}:result:*"
|
||||
|
||||
# Get all result keys
|
||||
result_keys = self.storage.conn.keys(pattern)
|
||||
|
||||
for key in result_keys:
|
||||
# Extract task_id from key
|
||||
task_id = key.decode('utf-8').split(':')[-1]
|
||||
|
||||
# Get result data
|
||||
result_data = self.storage.conn.get(key)
|
||||
if result_data:
|
||||
results[task_id] = pickle.loads(result_data)
|
||||
except Exception as e:
|
||||
logger.error(f"Error enumerating Redis results: {e}")
|
||||
|
||||
return results
|
||||
|
||||
def delete_result(self, task_id):
|
||||
"""Delete result from Redis."""
|
||||
if not hasattr(self.storage, 'conn'):
|
||||
return False
|
||||
|
||||
try:
|
||||
name = self.storage.name
|
||||
result_key = f"{name}:result:{task_id}"
|
||||
deleted = self.storage.conn.delete(result_key) > 0
|
||||
logger.debug(f"Deleted result from Redis for task {task_id}: {deleted}")
|
||||
return deleted
|
||||
except Exception as e:
|
||||
logger.error(f"Error deleting Redis result: {e}")
|
||||
return False
|
||||
|
||||
def count_storage_items(self):
|
||||
"""Count items using Redis commands."""
|
||||
queue_count = 0
|
||||
schedule_count = 0
|
||||
|
||||
if not hasattr(self.storage, 'conn'):
|
||||
return queue_count, schedule_count
|
||||
|
||||
try:
|
||||
name = self.storage.name
|
||||
|
||||
# Queue is a list
|
||||
queue_count = self.storage.conn.llen(f"{name}:queue")
|
||||
|
||||
# Schedule is a sorted set
|
||||
schedule_count = self.storage.conn.zcard(f"{name}:schedule")
|
||||
except Exception as e:
|
||||
logger.debug(f"Redis count error: {e}")
|
||||
|
||||
return queue_count, schedule_count
|
||||
|
||||
def clear_all_notifications(self):
|
||||
"""Clear all notifications from Redis including task data stored natively in Redis."""
|
||||
cleared = {
|
||||
'queue': 0,
|
||||
'schedule': 0,
|
||||
'results': 0,
|
||||
'retry_attempts': 0,
|
||||
'task_metadata': 0,
|
||||
'delivered': 0
|
||||
}
|
||||
|
||||
if not hasattr(self.storage, 'conn'):
|
||||
return cleared
|
||||
|
||||
try:
|
||||
name = self.storage.name
|
||||
|
||||
# Clear queue (list)
|
||||
cleared['queue'] = self.storage.conn.llen(f"{name}:queue")
|
||||
self.storage.conn.delete(f"{name}:queue")
|
||||
|
||||
# Clear schedule (sorted set)
|
||||
cleared['schedule'] = self.storage.conn.zcard(f"{name}:schedule")
|
||||
self.storage.conn.delete(f"{name}:schedule")
|
||||
|
||||
# Clear results (keys)
|
||||
result_keys = self.storage.conn.keys(f"{name}:result:*")
|
||||
if result_keys:
|
||||
cleared['results'] = len(result_keys)
|
||||
self.storage.conn.delete(*result_keys)
|
||||
|
||||
# Clear metadata (keys)
|
||||
metadata_keys = self.storage.conn.keys(f"{name}:metadata:*")
|
||||
if metadata_keys:
|
||||
cleared['task_metadata'] = len(metadata_keys)
|
||||
self.storage.conn.delete(*metadata_keys)
|
||||
|
||||
# Clear task data (retry attempts and delivered notifications) natively stored in Redis
|
||||
# These are now stored using RedisTaskDataStorageManager with keys like:
|
||||
# - {name}:task_data:retry:{watch_uuid}:{attempt_number}
|
||||
# - {name}:task_data:delivered:{task_id}
|
||||
task_data_keys = self.storage.conn.keys(f"{name}:task_data:*")
|
||||
if task_data_keys:
|
||||
# Count retry vs delivered
|
||||
for key in task_data_keys:
|
||||
key_str = key.decode('utf-8') if isinstance(key, bytes) else key
|
||||
if ':retry:' in key_str:
|
||||
cleared['retry_attempts'] += 1
|
||||
elif ':delivered:' in key_str:
|
||||
cleared['delivered'] += 1
|
||||
|
||||
# Delete all task data keys
|
||||
self.storage.conn.delete(*task_data_keys)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error clearing Redis notifications: {e}")
|
||||
|
||||
return cleared
|
||||
|
||||
def store_task_metadata(self, task_id, metadata):
|
||||
"""Store task metadata in Redis."""
|
||||
import json
|
||||
import time
|
||||
|
||||
if not hasattr(self.storage, 'conn'):
|
||||
return False
|
||||
|
||||
try:
|
||||
name = self.storage.name
|
||||
metadata_key = f"{name}:metadata:{task_id}"
|
||||
|
||||
metadata_with_id = {
|
||||
'task_id': task_id,
|
||||
'timestamp': time.time(),
|
||||
**metadata
|
||||
}
|
||||
|
||||
self.storage.conn.set(metadata_key, json.dumps(metadata_with_id))
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Error storing Redis task metadata: {e}")
|
||||
return False
|
||||
|
||||
def get_task_metadata(self, task_id):
|
||||
"""Retrieve task metadata from Redis."""
|
||||
import json
|
||||
|
||||
if not hasattr(self.storage, 'conn'):
|
||||
return None
|
||||
|
||||
try:
|
||||
name = self.storage.name
|
||||
metadata_key = f"{name}:metadata:{task_id}"
|
||||
|
||||
data = self.storage.conn.get(metadata_key)
|
||||
if data:
|
||||
return json.loads(data.decode('utf-8') if isinstance(data, bytes) else data)
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.debug(f"Error retrieving Redis task metadata: {e}")
|
||||
return None
|
||||
|
||||
def delete_task_metadata(self, task_id):
|
||||
"""Delete task metadata from Redis."""
|
||||
if not hasattr(self.storage, 'conn'):
|
||||
return False
|
||||
|
||||
try:
|
||||
name = self.storage.name
|
||||
metadata_key = f"{name}:metadata:{task_id}"
|
||||
deleted = self.storage.conn.delete(metadata_key) > 0
|
||||
return deleted
|
||||
except Exception as e:
|
||||
logger.debug(f"Error deleting Redis task metadata: {e}")
|
||||
return False
|
||||
|
||||
def cleanup_old_retry_attempts(self, cutoff_time):
|
||||
"""Clean up old retry attempts from Redis."""
|
||||
if not hasattr(self.storage, 'conn'):
|
||||
return 0
|
||||
|
||||
deleted_count = 0
|
||||
try:
|
||||
name = self.storage.name
|
||||
pattern = f"{name}:retry_attempts:*"
|
||||
|
||||
# Get all retry attempt keys
|
||||
retry_keys = self.storage.conn.keys(pattern)
|
||||
|
||||
for key in retry_keys:
|
||||
try:
|
||||
# Get the timestamp from the key's data
|
||||
data = self.storage.conn.get(key)
|
||||
if data:
|
||||
import json
|
||||
attempt_data = json.loads(data.decode('utf-8') if isinstance(data, bytes) else data)
|
||||
timestamp = attempt_data.get('timestamp', 0)
|
||||
|
||||
if timestamp < cutoff_time:
|
||||
self.storage.conn.delete(key)
|
||||
deleted_count += 1
|
||||
except Exception as ke:
|
||||
logger.debug(f"Error checking retry attempt key: {ke}")
|
||||
|
||||
if deleted_count > 0:
|
||||
logger.info(f"Cleaned up {deleted_count} old retry attempts from Redis")
|
||||
except Exception as e:
|
||||
logger.debug(f"Error cleaning up old Redis retry attempts: {e}")
|
||||
|
||||
return deleted_count
|
||||
308
changedetectionio/notification/task_queue/sqlite_storage.py
Normal file
308
changedetectionio/notification/task_queue/sqlite_storage.py
Normal file
@@ -0,0 +1,308 @@
|
||||
"""
|
||||
SQLiteStorage backend task manager for Huey notifications.
|
||||
|
||||
WARNING: Only use on local disk storage, NOT on NFS/CIFS network storage!
|
||||
|
||||
Enhancements:
|
||||
- SQLite provides ACID transactions (atomicity built-in)
|
||||
- Hybrid mode: queue data in SQLite, retry attempts/success in JSON files
|
||||
- JSON files use atomic writes from file_storage module
|
||||
"""
|
||||
|
||||
from loguru import logger
|
||||
|
||||
from .base import HueyTaskManager
|
||||
|
||||
|
||||
class SqliteStorageTaskManager(HueyTaskManager):
|
||||
"""Task manager for SqliteStorage backend (local disk only)."""
|
||||
|
||||
def __init__(self, storage, storage_path=None):
|
||||
"""
|
||||
Initialize SQLite task manager.
|
||||
|
||||
Args:
|
||||
storage: Huey SQLite storage instance
|
||||
storage_path: Directory for file-based data (retry attempts, success)
|
||||
"""
|
||||
super().__init__(storage)
|
||||
self.storage_path = storage_path
|
||||
|
||||
def enumerate_results(self):
|
||||
import pickle
|
||||
import sqlite3
|
||||
"""Enumerate results by querying SQLite database."""
|
||||
results = {}
|
||||
|
||||
if not hasattr(self.storage, 'filename') or self.storage.filename is None:
|
||||
logger.warning("SQLite storage has no filename, cannot enumerate results")
|
||||
return results
|
||||
|
||||
try:
|
||||
conn = sqlite3.connect(self.storage.filename)
|
||||
cursor = conn.cursor()
|
||||
|
||||
# SQLite storage uses 'kv' table for results, not 'results'
|
||||
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='kv'")
|
||||
if not cursor.fetchone():
|
||||
conn.close()
|
||||
return results
|
||||
|
||||
# Query all results from kv table
|
||||
# Huey SQLiteStorage stores everything in kv table with queue=<name>
|
||||
cursor.execute("SELECT key, value FROM kv WHERE queue = ?", (self.storage.name,))
|
||||
for row in cursor.fetchall():
|
||||
task_id = row[0]
|
||||
result_data = pickle.loads(row[1])
|
||||
results[task_id] = result_data
|
||||
|
||||
conn.close()
|
||||
except Exception as e:
|
||||
logger.debug(f"Error enumerating SQLite results: {e}")
|
||||
|
||||
return results
|
||||
|
||||
def delete_result(self, task_id):
|
||||
"""Delete result from SQLite database."""
|
||||
if not hasattr(self.storage, 'filename') or self.storage.filename is None:
|
||||
return False
|
||||
import sqlite3
|
||||
try:
|
||||
conn = sqlite3.connect(self.storage.filename)
|
||||
cursor = conn.cursor()
|
||||
# SQLite stores results in kv table
|
||||
cursor.execute("DELETE FROM kv WHERE queue = ? AND key = ?",
|
||||
(self.storage.name, task_id))
|
||||
conn.commit()
|
||||
deleted = cursor.rowcount > 0
|
||||
conn.close()
|
||||
logger.debug(f"Deleted result from SQLite for task {task_id}: {deleted}")
|
||||
return deleted
|
||||
except Exception as e:
|
||||
logger.error(f"Error deleting SQLite result: {e}")
|
||||
return False
|
||||
|
||||
def count_storage_items(self):
|
||||
"""Count items by querying SQLite database."""
|
||||
queue_count = 0
|
||||
schedule_count = 0
|
||||
|
||||
if not hasattr(self.storage, 'filename') or self.storage.filename is None:
|
||||
return queue_count, schedule_count
|
||||
import sqlite3
|
||||
try:
|
||||
conn = sqlite3.connect(self.storage.filename)
|
||||
cursor = conn.cursor()
|
||||
|
||||
# SQLite uses 'task' table for queue, 'schedule' for scheduled items
|
||||
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='task'")
|
||||
if cursor.fetchone():
|
||||
cursor.execute("SELECT COUNT(*) FROM task WHERE queue = ?", (self.storage.name,))
|
||||
queue_count = cursor.fetchone()[0]
|
||||
|
||||
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='schedule'")
|
||||
if cursor.fetchone():
|
||||
cursor.execute("SELECT COUNT(*) FROM schedule WHERE queue = ?", (self.storage.name,))
|
||||
schedule_count = cursor.fetchone()[0]
|
||||
|
||||
conn.close()
|
||||
except Exception as e:
|
||||
logger.debug(f"SQLite count error: {e}")
|
||||
|
||||
return queue_count, schedule_count
|
||||
|
||||
def clear_all_notifications(self):
|
||||
"""Clear all notifications from SQLite database and file-based retry attempts/success."""
|
||||
cleared = {
|
||||
'queue': 0,
|
||||
'schedule': 0,
|
||||
'results': 0,
|
||||
'retry_attempts': 0,
|
||||
'task_metadata': 0,
|
||||
'delivered': 0
|
||||
}
|
||||
|
||||
if not hasattr(self.storage, 'filename'):
|
||||
return cleared
|
||||
import sqlite3
|
||||
import os
|
||||
|
||||
try:
|
||||
conn = sqlite3.connect(self.storage.filename)
|
||||
cursor = conn.cursor()
|
||||
|
||||
# SQLite uses 'task' table for queue
|
||||
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='task'")
|
||||
if cursor.fetchone():
|
||||
cursor.execute("DELETE FROM task WHERE queue = ?", (self.storage.name,))
|
||||
cleared['queue'] = cursor.rowcount
|
||||
|
||||
# SQLite uses 'schedule' table
|
||||
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='schedule'")
|
||||
if cursor.fetchone():
|
||||
cursor.execute("DELETE FROM schedule WHERE queue = ?", (self.storage.name,))
|
||||
cleared['schedule'] = cursor.rowcount
|
||||
|
||||
# SQLite uses 'kv' table for results
|
||||
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='kv'")
|
||||
if cursor.fetchone():
|
||||
cursor.execute("DELETE FROM kv WHERE queue = ?", (self.storage.name,))
|
||||
cleared['results'] = cursor.rowcount
|
||||
|
||||
# Check and clear task_metadata table if it exists
|
||||
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='task_metadata'")
|
||||
if cursor.fetchone():
|
||||
cursor.execute("DELETE FROM task_metadata")
|
||||
cleared['task_metadata'] = cursor.rowcount
|
||||
|
||||
conn.commit()
|
||||
conn.close()
|
||||
|
||||
# Clear file-based retry attempts and success notifications
|
||||
# These are stored as JSON files even in SQLite mode (hybrid approach)
|
||||
if self.storage_path:
|
||||
# Clear retry attempts
|
||||
attempts_dir = os.path.join(self.storage_path, 'retry_attempts')
|
||||
if os.path.exists(attempts_dir):
|
||||
for f in os.listdir(attempts_dir):
|
||||
if f.endswith('.json'):
|
||||
os.remove(os.path.join(attempts_dir, f))
|
||||
cleared['retry_attempts'] += 1
|
||||
|
||||
# Clear delivered (success) notifications
|
||||
success_dir = os.path.join(self.storage_path, 'success')
|
||||
if os.path.exists(success_dir):
|
||||
for f in os.listdir(success_dir):
|
||||
if f.startswith('success-') and f.endswith('.json'):
|
||||
os.remove(os.path.join(success_dir, f))
|
||||
cleared['delivered'] += 1
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error clearing SQLite notifications: {e}")
|
||||
|
||||
return cleared
|
||||
|
||||
def store_task_metadata(self, task_id, metadata):
|
||||
"""Store task metadata in SQLite database."""
|
||||
import sqlite3
|
||||
import json
|
||||
import time
|
||||
|
||||
if not hasattr(self.storage, 'filename'):
|
||||
return False
|
||||
|
||||
try:
|
||||
conn = sqlite3.connect(self.storage.filename)
|
||||
cursor = conn.cursor()
|
||||
|
||||
# Create table if it doesn't exist
|
||||
cursor.execute("""
|
||||
CREATE TABLE IF NOT EXISTS task_metadata (
|
||||
task_id TEXT PRIMARY KEY,
|
||||
timestamp REAL,
|
||||
metadata TEXT
|
||||
)
|
||||
""")
|
||||
|
||||
metadata_with_id = {
|
||||
'task_id': task_id,
|
||||
'timestamp': time.time(),
|
||||
**metadata
|
||||
}
|
||||
|
||||
cursor.execute(
|
||||
"INSERT OR REPLACE INTO task_metadata (task_id, timestamp, metadata) VALUES (?, ?, ?)",
|
||||
(task_id, time.time(), json.dumps(metadata_with_id))
|
||||
)
|
||||
conn.commit()
|
||||
conn.close()
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Error storing SQLite task metadata: {e}")
|
||||
return False
|
||||
|
||||
def get_task_metadata(self, task_id):
|
||||
"""Retrieve task metadata from SQLite database."""
|
||||
import sqlite3
|
||||
import json
|
||||
|
||||
if not hasattr(self.storage, 'filename'):
|
||||
return None
|
||||
|
||||
try:
|
||||
conn = sqlite3.connect(self.storage.filename)
|
||||
cursor = conn.cursor()
|
||||
|
||||
# Check if table exists
|
||||
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='task_metadata'")
|
||||
if not cursor.fetchone():
|
||||
conn.close()
|
||||
return None
|
||||
|
||||
cursor.execute("SELECT metadata FROM task_metadata WHERE task_id = ?", (task_id,))
|
||||
row = cursor.fetchone()
|
||||
conn.close()
|
||||
|
||||
if row:
|
||||
return json.loads(row[0])
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.debug(f"Error retrieving SQLite task metadata: {e}")
|
||||
return None
|
||||
|
||||
def delete_task_metadata(self, task_id):
|
||||
"""Delete task metadata from SQLite database."""
|
||||
import sqlite3
|
||||
|
||||
if not hasattr(self.storage, 'filename'):
|
||||
return False
|
||||
|
||||
try:
|
||||
conn = sqlite3.connect(self.storage.filename)
|
||||
cursor = conn.cursor()
|
||||
|
||||
# Check if table exists
|
||||
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='task_metadata'")
|
||||
if not cursor.fetchone():
|
||||
conn.close()
|
||||
return False
|
||||
|
||||
cursor.execute("DELETE FROM task_metadata WHERE task_id = ?", (task_id,))
|
||||
conn.commit()
|
||||
deleted = cursor.rowcount > 0
|
||||
conn.close()
|
||||
return deleted
|
||||
except Exception as e:
|
||||
logger.debug(f"Error deleting SQLite task metadata: {e}")
|
||||
return False
|
||||
|
||||
def cleanup_old_retry_attempts(self, cutoff_time):
|
||||
"""Clean up old retry attempts from SQLite database."""
|
||||
import sqlite3
|
||||
|
||||
if not hasattr(self.storage, 'filename'):
|
||||
return 0
|
||||
|
||||
deleted_count = 0
|
||||
try:
|
||||
conn = sqlite3.connect(self.storage.filename)
|
||||
cursor = conn.cursor()
|
||||
|
||||
# Check if retry_attempts table exists
|
||||
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='retry_attempts'")
|
||||
if not cursor.fetchone():
|
||||
conn.close()
|
||||
return 0
|
||||
|
||||
# Delete old retry attempts
|
||||
cursor.execute("DELETE FROM retry_attempts WHERE timestamp < ?", (cutoff_time,))
|
||||
deleted_count = cursor.rowcount
|
||||
conn.commit()
|
||||
conn.close()
|
||||
|
||||
if deleted_count > 0:
|
||||
logger.info(f"Cleaned up {deleted_count} old retry attempts from SQLite")
|
||||
except Exception as e:
|
||||
logger.debug(f"Error cleaning up old SQLite retry attempts: {e}")
|
||||
|
||||
return deleted_count
|
||||
@@ -117,7 +117,8 @@ class NotificationContextData(dict):
|
||||
|
||||
def timestamp_to_localtime(timestamp):
|
||||
# Format the date using locale-aware formatting with timezone
|
||||
dt = datetime.datetime.fromtimestamp(int(timestamp))
|
||||
# Unix timestamps are always UTC, so use utcfromtimestamp to avoid double conversion
|
||||
dt = datetime.datetime.utcfromtimestamp(int(timestamp))
|
||||
dt = dt.replace(tzinfo=pytz.UTC)
|
||||
|
||||
# Get local timezone-aware datetime
|
||||
@@ -218,9 +219,10 @@ class NotificationService:
|
||||
Standalone notification service that handles all notification functionality
|
||||
previously embedded in the update_worker class
|
||||
"""
|
||||
|
||||
def __init__(self, datastore, notification_q):
|
||||
|
||||
def __init__(self, datastore, notification_q=None):
|
||||
self.datastore = datastore
|
||||
# notification_q is deprecated - now using Huey task queue directly
|
||||
self.notification_q = notification_q
|
||||
|
||||
def queue_notification_for_watch(self, n_object: NotificationContextData, watch, date_index_from=-2, date_index_to=-1):
|
||||
@@ -250,7 +252,6 @@ class NotificationService:
|
||||
if n_object.get('notification_format') == USE_SYSTEM_DEFAULT_NOTIFICATION_FORMAT_FOR_WATCH:
|
||||
n_object['notification_format'] = self.datastore.data['settings']['application'].get('notification_format')
|
||||
|
||||
|
||||
triggered_text = ''
|
||||
if len(trigger_text):
|
||||
from . import html_tools
|
||||
@@ -273,12 +274,11 @@ class NotificationService:
|
||||
triggered_text=triggered_text,
|
||||
timestamp_changed=dates[date_index_to]))
|
||||
|
||||
if self.notification_q:
|
||||
logger.debug("Queued notification for sending")
|
||||
self.notification_q.put(n_object)
|
||||
else:
|
||||
logger.debug("Not queued, no queue defined. Just returning processed data")
|
||||
return n_object
|
||||
# Queue notification to Huey for processing with retry logic
|
||||
from changedetectionio.notification.task_queue import queue_notification
|
||||
logger.debug("Queuing notification to Huey for sending with retry")
|
||||
queue_notification(n_object)
|
||||
return n_object
|
||||
|
||||
def send_content_changed_notification(self, watch_uuid):
|
||||
"""
|
||||
@@ -302,22 +302,17 @@ class NotificationService:
|
||||
|
||||
# Prefer - Individual watch settings > Tag settings > Global settings (in that order)
|
||||
# this change probably not needed?
|
||||
n_object['notification_urls'] = _check_cascading_vars(self.datastore, 'notification_urls', watch)
|
||||
|
||||
n_object['notification_title'] = _check_cascading_vars(self.datastore,'notification_title', watch)
|
||||
n_object['notification_body'] = _check_cascading_vars(self.datastore,'notification_body', watch)
|
||||
n_object['notification_format'] = _check_cascading_vars(self.datastore,'notification_format', watch)
|
||||
|
||||
|
||||
# (Individual watch) Only prepare to notify if the rules above matched
|
||||
queued = False
|
||||
if n_object and n_object.get('notification_urls'):
|
||||
queued = True
|
||||
count = watch.get('notification_alert_count', 0) + 1
|
||||
self.datastore.update_watch(uuid=watch_uuid, update_obj={'notification_alert_count': count})
|
||||
self.queue_notification_for_watch(n_object=n_object, watch=watch)
|
||||
|
||||
count = watch.get('notification_alert_count', 0) + 1
|
||||
self.datastore.update_watch(uuid=watch_uuid, update_obj={'notification_alert_count': count})
|
||||
|
||||
self.queue_notification_for_watch(n_object=n_object, watch=watch)
|
||||
|
||||
return queued
|
||||
return True
|
||||
|
||||
def send_filter_failure_notification(self, watch_uuid):
|
||||
"""
|
||||
@@ -361,8 +356,9 @@ Thanks - Your omniscient changedetection.io installation.
|
||||
'uuid': watch_uuid,
|
||||
'screenshot': None
|
||||
})
|
||||
self.notification_q.put(n_object)
|
||||
logger.debug(f"Sent filter not found notification for {watch_uuid}")
|
||||
from changedetectionio.notification.task_queue import send_notification_task
|
||||
send_notification_task(n_object)
|
||||
logger.debug(f"Queued filter not found notification for {watch_uuid}")
|
||||
else:
|
||||
logger.debug(f"NOT sending filter not found notification for {watch_uuid} - no notification URLs")
|
||||
|
||||
@@ -409,13 +405,18 @@ Thanks - Your omniscient changedetection.io installation.
|
||||
'watch_url': watch['url'],
|
||||
'uuid': watch_uuid
|
||||
})
|
||||
self.notification_q.put(n_object)
|
||||
logger.error(f"Sent step not found notification for {watch_uuid}")
|
||||
from changedetectionio.notification.task_queue import send_notification_task
|
||||
send_notification_task(n_object)
|
||||
logger.error(f"Queued step not found notification for {watch_uuid}")
|
||||
|
||||
|
||||
# Convenience functions for creating notification service instances
|
||||
def create_notification_service(datastore, notification_q):
|
||||
def create_notification_service(datastore, notification_q=None):
|
||||
"""
|
||||
Factory function to create a NotificationService instance
|
||||
|
||||
Args:
|
||||
datastore: The ChangeDetectionStore instance
|
||||
notification_q: Deprecated, no longer used (kept for backward compatibility)
|
||||
"""
|
||||
return NotificationService(datastore, notification_q)
|
||||
@@ -90,12 +90,8 @@ export HIDE_REFERER=True
|
||||
pytest -vv -s --maxfail=1 tests/test_access_control.py
|
||||
|
||||
# Re-run a few tests that will trigger brotli based storage
|
||||
export SNAPSHOT_BROTLI_COMPRESSION_THRESHOLD=5
|
||||
pytest -vv -s --maxfail=1 tests/test_access_control.py
|
||||
REMOVE_REQUESTS_OLD_SCREENSHOTS=false pytest tests/test_notification.py
|
||||
pytest -vv -s --maxfail=1 tests/test_backend.py
|
||||
pytest -vv -s --maxfail=1 tests/test_rss.py
|
||||
pytest -vv -s --maxfail=1 tests/test_unique_lines.py
|
||||
SNAPSHOT_BROTLI_COMPRESSION_THRESHOLD=5 pytest -n 30 --dist load -vv -s --maxfail=1 tests/test_access_control.py tests/test_backend.py tests/test_rss.py tests/test_unique_lines.py
|
||||
|
||||
|
||||
# Try high concurrency
|
||||
FETCH_WORKERS=50 pytest tests/test_history_consistency.py -vv -l -s
|
||||
|
||||
10
changedetectionio/static/images/notification-fail.svg
Normal file
10
changedetectionio/static/images/notification-fail.svg
Normal file
@@ -0,0 +1,10 @@
|
||||
<svg width="20" height="20" viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<!-- Bell icon (grey outline) -->
|
||||
<path d="M18 8C18 6.4087 17.3679 4.88258 16.2426 3.75736C15.1174 2.63214 13.5913 2 12 2C10.4087 2 8.88258 2.63214 7.75736 3.75736C6.63214 4.88258 6 6.4087 6 8C6 15 3 17 3 17H21C21 17 18 15 18 8Z"
|
||||
stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" style="opacity: 0.7;"/>
|
||||
<path d="M13.73 21C13.5542 21.3031 13.3019 21.5547 12.9982 21.7295C12.6946 21.9044 12.3504 21.9965 12 21.9965C11.6496 21.9965 11.3054 21.9044 11.0018 21.7295C10.6982 21.5547 10.4458 21.3031 10.27 21"
|
||||
stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" style="opacity: 0.7;"/>
|
||||
<!-- Red exclamation mark badge -->
|
||||
<circle cx="17" cy="7" r="5" fill="#dc3545"/>
|
||||
<text x="17" y="7" text-anchor="middle" dominant-baseline="central" fill="white" font-size="8" font-weight="bold" font-family="Arial, sans-serif">!</text>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 1018 B |
64
changedetectionio/static/js/notification-dashboard.js
Normal file
64
changedetectionio/static/js/notification-dashboard.js
Normal file
@@ -0,0 +1,64 @@
|
||||
/**
|
||||
* Notification Dashboard - Interactive functionality
|
||||
* Handles timezone conversion, AJAX log fetching, and user interactions
|
||||
*/
|
||||
|
||||
// Global utility function to format Unix timestamp to local timezone
|
||||
window.formatTimestampLocal = function(timestamp) {
|
||||
if (!timestamp) return 'N/A';
|
||||
return new Intl.DateTimeFormat(undefined, {
|
||||
year: 'numeric', month: '2-digit', day: '2-digit',
|
||||
hour: '2-digit', minute: '2-digit', second: '2-digit',
|
||||
timeZoneName: 'short'
|
||||
}).format(timestamp * 1000);
|
||||
};
|
||||
|
||||
$(function() {
|
||||
// Convert retry timestamps to local timezone
|
||||
$('.retry-time[data-timestamp]').each(function() {
|
||||
var timestamp = parseInt($(this).data('timestamp'));
|
||||
if (timestamp) {
|
||||
$(this).text(window.formatTimestampLocal(timestamp));
|
||||
}
|
||||
});
|
||||
|
||||
// Handle notification card clicks to fetch and display logs
|
||||
$('.notification-card').css('cursor', 'pointer').click(function(e) {
|
||||
// Don't trigger if clicking on a button or form
|
||||
if ($(e.target).is('button, input') || $(e.target).closest('form, button').length) return;
|
||||
|
||||
var taskId = $(this).data('task-id');
|
||||
if (!taskId) return;
|
||||
|
||||
// Show loading state
|
||||
$('#last-log-info').show();
|
||||
$('#log-apprise-content').text('Loading...');
|
||||
|
||||
// Fetch log via AJAX
|
||||
var logUrl = $('#log-url-template').data('url').replace('TASK_ID', taskId);
|
||||
$.getJSON(logUrl)
|
||||
.done(function(data) {
|
||||
$('#log-task-id').text(data.task_id);
|
||||
$('#log-watch-url').text(data.watch_url || '').parent().toggle(!!data.watch_url);
|
||||
|
||||
if (data.notification_urls && data.notification_urls.length) {
|
||||
$('#log-notification-urls').html(data.notification_urls.map(url =>
|
||||
'<div class="endpoint-item">• ' + url + '</div>').join(''));
|
||||
$('#log-notification-urls-container').show();
|
||||
} else {
|
||||
$('#log-notification-urls-container').hide();
|
||||
}
|
||||
|
||||
$('#log-apprise-content').text(data.apprise_log || 'No log available');
|
||||
$('#log-error-content').text(data.error || '');
|
||||
$('#log-error-container').toggle(!!data.error);
|
||||
|
||||
// Scroll to log
|
||||
$('#last-log-info')[0].scrollIntoView({ behavior: 'smooth', block: 'nearest' });
|
||||
})
|
||||
.fail(function(xhr) {
|
||||
var error = xhr.responseJSON && xhr.responseJSON.error ? xhr.responseJSON.error : 'Failed to load log';
|
||||
$('#log-apprise-content').text('Error: ' + error);
|
||||
});
|
||||
});
|
||||
});
|
||||
File diff suppressed because it is too large
Load Diff
@@ -22,6 +22,7 @@
|
||||
@use "parts/socket";
|
||||
@use "parts/visualselector";
|
||||
@use "parts/widgets";
|
||||
@use "parts/notificationsdashboard";
|
||||
@use "parts/diff_image";
|
||||
@use "parts/modal";
|
||||
@use "parts/language";
|
||||
@@ -33,7 +34,6 @@
|
||||
@use "parts/login_form";
|
||||
@use "parts/tabs";
|
||||
|
||||
|
||||
body {
|
||||
color: var(--color-text);
|
||||
background: var(--color-background-page);
|
||||
@@ -1063,6 +1063,17 @@ ul {
|
||||
opacity: 0.8;
|
||||
}
|
||||
|
||||
#failed-notifications-icon {
|
||||
display: none;
|
||||
}
|
||||
|
||||
body.failed-notifications {
|
||||
#failed-notifications-icon {
|
||||
display: inline-block;
|
||||
vertical-align: middle;
|
||||
}
|
||||
}
|
||||
|
||||
#bottom-horizontal-offscreen {
|
||||
position: fixed;
|
||||
bottom: 0;
|
||||
@@ -1096,5 +1107,3 @@ ul#highlightSnippetActions {
|
||||
display: inline-block;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -141,6 +141,7 @@
|
||||
<a id="add-email-helper" class="pure-button button-secondary button-xsmall" >Add email <img style="height: 1em; display: inline-block" src="{{url_for('static_content', group='images', filename='email.svg')}}" alt="Add an email address"> </a>
|
||||
{% endif %}
|
||||
<a href="{{url_for('settings.notification_logs')}}" class="pure-button button-secondary button-xsmall" >Notification debug logs</a>
|
||||
<a href="{{url_for('notification_dashboard.dashboard')}}" class="pure-button button-secondary button-xsmall" >Notification Queue</a>
|
||||
<br>
|
||||
<div id="notification-test-log" style="display: none;"><span class="pure-form-message-inline">Processing..</span></div>
|
||||
</div>
|
||||
|
||||
@@ -47,7 +47,7 @@
|
||||
{% endif %}
|
||||
</head>
|
||||
|
||||
<body class="{{extra_classes}}">
|
||||
<body class="{{extra_classes}}{% if get_failed_notifications_count() > 0 %} failed-notifications{% endif %}">
|
||||
<div class="header">
|
||||
<div {% if pure_menu_fixed != False %}class="pure-menu-fixed"{% endif %} style="width: 100%;">
|
||||
<div class="home-menu pure-menu pure-menu-horizontal" id="nav-menu">
|
||||
@@ -81,6 +81,15 @@
|
||||
{% include "svgs/search-icon.svg" %}
|
||||
</button>
|
||||
</li>
|
||||
<li class="pure-menu-item" id="failed-notifications-icon">
|
||||
<a href="{{ url_for('notification_dashboard.dashboard')}}" class="pure-menu-link" title="Notification Queue - pending, retrying, and failed notifications" >
|
||||
<img src="{{url_for('static_content', group='images', filename='notification-fail.svg')}}" alt="Notification Queue" style="height: 22px;">
|
||||
</a>
|
||||
</li>
|
||||
{% else %}
|
||||
<li class="pure-menu-item">
|
||||
<a href="{{ url_for('ui.ui_edit.edit_page', uuid=uuid, next='diff') }}" class="pure-menu-link">EDIT</a>
|
||||
</li>
|
||||
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
@@ -87,6 +87,7 @@ def measure_memory_usage(request):
|
||||
|
||||
def cleanup(datastore_path):
|
||||
import glob
|
||||
import shutil
|
||||
# Unlink test output files
|
||||
for g in ["*.txt", "*.json", "*.pdf"]:
|
||||
files = glob.glob(os.path.join(datastore_path, g))
|
||||
@@ -97,6 +98,15 @@ def cleanup(datastore_path):
|
||||
if os.path.isfile(f):
|
||||
os.unlink(f)
|
||||
|
||||
# Clean up Huey retry_attempts directory to prevent test interference
|
||||
retry_attempts_dir = os.path.join(datastore_path, 'notification-queue', 'retry_attempts')
|
||||
if os.path.exists(retry_attempts_dir):
|
||||
try:
|
||||
shutil.rmtree(retry_attempts_dir)
|
||||
logger.debug(f"Cleaned up retry_attempts directory: {retry_attempts_dir}")
|
||||
except Exception as e:
|
||||
logger.warning(f"Error cleaning retry_attempts directory: {e}")
|
||||
|
||||
def pytest_addoption(parser):
|
||||
"""Add custom command-line options for pytest.
|
||||
|
||||
@@ -183,13 +193,32 @@ def prepare_test_function(live_server, datastore_path):
|
||||
|
||||
yield
|
||||
|
||||
# Cleanup: Clear watches again after test
|
||||
# Cleanup: Clear watches and Huey queue after test
|
||||
try:
|
||||
datastore.data['watching'] = {}
|
||||
datastore.needs_write = True
|
||||
|
||||
# Also clear Huey notification queue to prevent test interference
|
||||
from changedetectionio.notification.task_queue import clear_all_notifications
|
||||
try:
|
||||
clear_all_notifications()
|
||||
logger.debug("Cleared Huey notification queue after test")
|
||||
except Exception as he:
|
||||
logger.debug(f"Could not clear Huey queue: {he}")
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Error during datastore cleanup: {e}")
|
||||
|
||||
# Cleanup: Stop Huey consumer and clear queue state
|
||||
try:
|
||||
from changedetectionio.notification import task_queue
|
||||
if hasattr(task_queue, 'consumer_process') and task_queue.consumer_process:
|
||||
task_queue.consumer_process.terminate()
|
||||
task_queue.consumer_process.join(timeout=2)
|
||||
task_queue.consumer_process = None
|
||||
except Exception as e:
|
||||
logger.warning(f"Error stopping Huey consumer: {e}")
|
||||
|
||||
|
||||
# So the app can also know which test name it was
|
||||
@pytest.fixture(autouse=True)
|
||||
|
||||
@@ -170,16 +170,17 @@ def test_group_tag_notification(client, live_server, measure_memory_usage, datas
|
||||
set_original_response(datastore_path=datastore_path)
|
||||
|
||||
test_url = url_for('test_endpoint', _external=True)
|
||||
res = client.post(
|
||||
url_for("ui.ui_views.form_quick_watch_add"),
|
||||
data={"url": test_url, "tags": 'test-tag, other-tag'},
|
||||
follow_redirects=True
|
||||
)
|
||||
|
||||
assert b"Watch added" in res.data
|
||||
uuid = client.application.config.get('DATASTORE').add_watch(url=test_url, tag='test-tag')
|
||||
|
||||
notification_url = url_for('test_notification_endpoint', _external=True).replace('http', 'json')
|
||||
notification_form_data = {"notification_urls": notification_url,
|
||||
# Force recheck
|
||||
res = client.get(url_for("ui.form_watch_checknow"), follow_redirects=True)
|
||||
assert b'Queued 1 watch for rechecking.' in res.data
|
||||
wait_for_all_checks(client)
|
||||
|
||||
assert len(live_server.app.config['DATASTORE'].data['watching'][uuid]['tags']), "Should have tag associated"
|
||||
dest_notification_url = url_for('test_notification_endpoint', _external=True).replace('http://', 'post://')
|
||||
group_notification_form_data = {"notification_urls": dest_notification_url,
|
||||
"notification_title": "New GROUP TAG ChangeDetection.io Notification - {{watch_url}}",
|
||||
"notification_body": "BASE URL: {{base_url}}\n"
|
||||
"Watch URL: {{watch_url}}\n"
|
||||
@@ -198,20 +199,19 @@ def test_group_tag_notification(client, live_server, measure_memory_usage, datas
|
||||
"notification_screenshot": True,
|
||||
"notification_format": 'text',
|
||||
"title": "test-tag"}
|
||||
|
||||
res = client.post(
|
||||
url_for("tags.form_tag_edit_submit", uuid=get_UUID_for_tag_name(client, name="test-tag")),
|
||||
data=notification_form_data,
|
||||
data=group_notification_form_data,
|
||||
follow_redirects=True
|
||||
)
|
||||
assert b"Updated" in res.data
|
||||
|
||||
wait_for_all_checks(client)
|
||||
|
||||
# Now a change to the watch should trigger a notification
|
||||
set_modified_response(datastore_path=datastore_path)
|
||||
client.get(url_for("ui.form_watch_checknow"), follow_redirects=True)
|
||||
res = client.get(url_for("ui.form_watch_checknow"), follow_redirects=True)
|
||||
assert b'Queued 1 watch for rechecking.' in res.data
|
||||
wait_for_all_checks(client)
|
||||
|
||||
time.sleep(3)
|
||||
|
||||
assert os.path.isfile(os.path.join(datastore_path, "notification.txt"))
|
||||
@@ -226,9 +226,7 @@ def test_group_tag_notification(client, live_server, measure_memory_usage, datas
|
||||
assert test_url in notification_submission
|
||||
assert ':-)' in notification_submission
|
||||
assert "Diff Full: Some initial text" in notification_submission
|
||||
assert "New GROUP TAG ChangeDetection.io" in notification_submission
|
||||
assert "test-tag" in notification_submission
|
||||
assert "other-tag" in notification_submission
|
||||
|
||||
#@todo Test that multiple notifications fired
|
||||
#@todo Test that each of multiple notifications with different settings
|
||||
|
||||
@@ -3,6 +3,12 @@ import time
|
||||
from flask import url_for
|
||||
from .util import set_original_response, set_modified_response, live_server_setup, wait_for_all_checks
|
||||
import logging
|
||||
import pytest
|
||||
|
||||
# Set environment variable at module level to disable retries for dead-letter test
|
||||
# This must be done BEFORE the Flask app/Huey is initialized
|
||||
_original_retry_count = os.environ.get('NOTIFICATION_RETRY_COUNT')
|
||||
os.environ['NOTIFICATION_RETRY_COUNT'] = '0'
|
||||
|
||||
def test_check_notification_error_handling(client, live_server, measure_memory_usage, datastore_path):
|
||||
|
||||
|
||||
1263
changedetectionio/tests/test_notifications_huey.py
Normal file
1263
changedetectionio/tests/test_notifications_huey.py
Normal file
File diff suppressed because it is too large
Load Diff
161
changedetectionio/tests/unit/test_huey_filestorage.py
Normal file
161
changedetectionio/tests/unit/test_huey_filestorage.py
Normal file
@@ -0,0 +1,161 @@
|
||||
"""
|
||||
Unit tests for Huey FileStorage task manager.
|
||||
|
||||
Tests the basic functionality of the FileStorage task manager without requiring
|
||||
a full Huey instance or changedetection.io app.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
import tempfile
|
||||
import shutil
|
||||
import os
|
||||
from changedetectionio.notification.task_queue.file_storage import FileStorageTaskManager
|
||||
|
||||
|
||||
class MockStorage:
|
||||
"""Mock storage object to simulate Huey FileStorage."""
|
||||
def __init__(self, path):
|
||||
self.path = path
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def temp_storage_dir():
|
||||
"""Create a temporary directory for testing."""
|
||||
temp_dir = tempfile.mkdtemp()
|
||||
yield temp_dir
|
||||
shutil.rmtree(temp_dir, ignore_errors=True)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def task_manager(temp_storage_dir):
|
||||
"""Create a FileStorageTaskManager instance for testing."""
|
||||
mock_storage = MockStorage(temp_storage_dir)
|
||||
return FileStorageTaskManager(mock_storage, temp_storage_dir)
|
||||
|
||||
|
||||
class TestFileStorageTaskManager:
|
||||
"""Tests for FileStorageTaskManager basic functionality."""
|
||||
|
||||
def test_store_and_get_metadata(self, task_manager):
|
||||
"""Test storing and retrieving task metadata."""
|
||||
task_id = "test-task-123"
|
||||
# Use realistic notification data structure matching actual app usage
|
||||
metadata = {
|
||||
'notification_data': {
|
||||
'watch_url': 'https://example.com/test',
|
||||
'uuid': 'test-watch-uuid-123',
|
||||
'current_snapshot': 'Test content snapshot',
|
||||
'diff': '+ New content added\n- Old content removed',
|
||||
'diff_clean': 'New content added\nOld content removed',
|
||||
'triggered_text': 'price: $99.99',
|
||||
'notification_urls': ['mailto://test@example.com'],
|
||||
'notification_title': 'Change detected on example.com',
|
||||
'notification_body': 'The page has changed',
|
||||
'notification_format': 'HTML'
|
||||
}
|
||||
}
|
||||
|
||||
# Store metadata
|
||||
result = task_manager.store_task_metadata(task_id, metadata)
|
||||
assert result is True, "Should successfully store metadata"
|
||||
|
||||
# Retrieve metadata
|
||||
retrieved = task_manager.get_task_metadata(task_id)
|
||||
assert retrieved is not None, "Should retrieve stored metadata"
|
||||
assert retrieved['task_id'] == task_id
|
||||
assert 'timestamp' in retrieved
|
||||
assert retrieved['notification_data'] == metadata['notification_data']
|
||||
|
||||
def test_delete_metadata(self, task_manager):
|
||||
"""Test deleting task metadata."""
|
||||
task_id = "test-task-456"
|
||||
metadata = {'notification_data': {'test': 'data'}}
|
||||
|
||||
# Store then delete
|
||||
task_manager.store_task_metadata(task_id, metadata)
|
||||
result = task_manager.delete_task_metadata(task_id)
|
||||
assert result is True, "Should successfully delete metadata"
|
||||
|
||||
# Verify it's gone
|
||||
retrieved = task_manager.get_task_metadata(task_id)
|
||||
assert retrieved is None, "Metadata should be deleted"
|
||||
|
||||
def test_delete_nonexistent_metadata(self, task_manager):
|
||||
"""Test deleting metadata that doesn't exist."""
|
||||
result = task_manager.delete_task_metadata("nonexistent-task")
|
||||
assert result is False, "Should return False for nonexistent metadata"
|
||||
|
||||
def test_get_nonexistent_metadata(self, task_manager):
|
||||
"""Test retrieving metadata that doesn't exist."""
|
||||
retrieved = task_manager.get_task_metadata("nonexistent-task")
|
||||
assert retrieved is None, "Should return None for nonexistent metadata"
|
||||
|
||||
def test_count_storage_items_empty(self, task_manager):
|
||||
"""Test counting storage items when empty."""
|
||||
queue_count, schedule_count = task_manager.count_storage_items()
|
||||
assert queue_count == 0, "Empty queue should have 0 items"
|
||||
assert schedule_count == 0, "Empty schedule should have 0 items"
|
||||
|
||||
def test_count_storage_items_with_files(self, task_manager, temp_storage_dir):
|
||||
"""Test counting storage items with files present."""
|
||||
# Create some queue files
|
||||
queue_dir = os.path.join(temp_storage_dir, 'queue')
|
||||
os.makedirs(queue_dir, exist_ok=True)
|
||||
|
||||
for i in range(3):
|
||||
with open(os.path.join(queue_dir, f"task-{i}"), 'w') as f:
|
||||
f.write("test")
|
||||
|
||||
# Create some schedule files
|
||||
schedule_dir = os.path.join(temp_storage_dir, 'schedule')
|
||||
os.makedirs(schedule_dir, exist_ok=True)
|
||||
|
||||
for i in range(2):
|
||||
with open(os.path.join(schedule_dir, f"scheduled-{i}"), 'w') as f:
|
||||
f.write("test")
|
||||
|
||||
queue_count, schedule_count = task_manager.count_storage_items()
|
||||
assert queue_count == 3, "Should count 3 queue items"
|
||||
assert schedule_count == 2, "Should count 2 schedule items"
|
||||
|
||||
def test_clear_all_notifications(self, task_manager, temp_storage_dir):
|
||||
"""Test clearing all notifications."""
|
||||
# Create test files in various directories
|
||||
for subdir in ['queue', 'schedule', 'results']:
|
||||
dir_path = os.path.join(temp_storage_dir, subdir)
|
||||
os.makedirs(dir_path, exist_ok=True)
|
||||
with open(os.path.join(dir_path, 'test-file'), 'w') as f:
|
||||
f.write("test")
|
||||
|
||||
# Create metadata files
|
||||
metadata_dir = os.path.join(temp_storage_dir, 'task_metadata')
|
||||
os.makedirs(metadata_dir, exist_ok=True)
|
||||
with open(os.path.join(metadata_dir, 'test-task.json'), 'w') as f:
|
||||
f.write('{"test": "data"}')
|
||||
|
||||
# Clear all
|
||||
cleared = task_manager.clear_all_notifications()
|
||||
|
||||
assert cleared['queue'] == 1, "Should clear 1 queue file"
|
||||
assert cleared['schedule'] == 1, "Should clear 1 schedule file"
|
||||
assert cleared['results'] == 1, "Should clear 1 result file"
|
||||
assert cleared['task_metadata'] == 1, "Should clear 1 metadata file"
|
||||
|
||||
def test_metadata_file_structure(self, task_manager, temp_storage_dir):
|
||||
"""Test that metadata files are created in the correct structure."""
|
||||
task_id = "test-structure-789"
|
||||
metadata = {'notification_data': {'test': 'value'}}
|
||||
|
||||
task_manager.store_task_metadata(task_id, metadata)
|
||||
|
||||
# Check file exists in correct location
|
||||
expected_path = os.path.join(temp_storage_dir, 'task_metadata', f"{task_id}.json")
|
||||
assert os.path.exists(expected_path), f"Metadata file should exist at {expected_path}"
|
||||
|
||||
# Check file contains valid JSON
|
||||
import json
|
||||
with open(expected_path, 'r') as f:
|
||||
data = json.load(f)
|
||||
assert data['task_id'] == task_id
|
||||
assert 'timestamp' in data
|
||||
assert 'notification_data' in data
|
||||
@@ -1548,6 +1548,65 @@ paths:
|
||||
'500':
|
||||
description: Server error
|
||||
|
||||
/notifications/retry-all:
|
||||
post:
|
||||
operationId: retryAllFailedNotifications
|
||||
tags: [Notification Management]
|
||||
summary: Retry all failed notifications
|
||||
description: |
|
||||
Retry all notifications that failed after exhausting all retry attempts (dead letter queue).
|
||||
|
||||
Failed notifications are removed from the dead letter queue and re-queued with current settings.
|
||||
If they fail again, they will return to the dead letter queue.
|
||||
|
||||
Useful after fixing notification settings (e.g., correcting SMTP server configuration).
|
||||
x-code-samples:
|
||||
- lang: 'curl'
|
||||
source: |
|
||||
curl -X POST "http://localhost:5000/api/v1/notifications/retry-all" \
|
||||
-H "x-api-key: YOUR_API_KEY"
|
||||
- lang: 'Python'
|
||||
source: |
|
||||
import requests
|
||||
|
||||
headers = {'x-api-key': 'YOUR_API_KEY'}
|
||||
response = requests.post('http://localhost:5000/api/v1/notifications/retry-all', headers=headers)
|
||||
print(response.json())
|
||||
responses:
|
||||
'200':
|
||||
description: Retry operation completed
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
status:
|
||||
type: string
|
||||
enum: [success, partial]
|
||||
description: Success if all notifications re-queued, partial if some failed
|
||||
message:
|
||||
type: string
|
||||
description: Human-readable result message
|
||||
details:
|
||||
type: object
|
||||
properties:
|
||||
success:
|
||||
type: integer
|
||||
description: Number of notifications successfully re-queued
|
||||
failed:
|
||||
type: integer
|
||||
description: Number of notifications that failed to re-queue
|
||||
total:
|
||||
type: integer
|
||||
description: Total number of failed notifications processed
|
||||
example:
|
||||
status: "success"
|
||||
message: "Queued 3 notifications for retry"
|
||||
details:
|
||||
success: 3
|
||||
failed: 0
|
||||
total: 3
|
||||
|
||||
/systeminfo:
|
||||
get:
|
||||
operationId: getSystemInfo
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -9,6 +9,7 @@ flask_expects_json~=1.7
|
||||
flask_restful
|
||||
flask_cors # For the Chrome extension to operate
|
||||
janus # Thread-safe async/sync queue bridge
|
||||
huey ~= 2.5 # Task queue for notification retries with FileHuey/SqliteHuey/RedisHuey support
|
||||
flask_wtf~=1.2
|
||||
flask~=3.1
|
||||
flask-socketio~=5.6.0
|
||||
|
||||
Reference in New Issue
Block a user