Compare commits

...

4 Commits

Author SHA1 Message Date
dgtlmoon
d40773d595 Attempt slim-bookworm ssl3 upgrade 2023-06-17 23:42:31 +02:00
dgtlmoon
a4c620c308 Code - Adding CI test for search (#1626) 2023-06-13 15:03:32 +02:00
dgtlmoon
9434eac72d 0.42.3 2023-06-12 15:28:51 +02:00
dgtlmoon
edb5e20de6 Bug fix - Fixed crash when deleting watch from UI when watch was already manually deleted from datadir (#1623) 2023-06-12 15:10:48 +02:00
4 changed files with 83 additions and 8 deletions

View File

@@ -1,5 +1,5 @@
# pip dependencies install stage
FROM python:3.10-slim as builder
FROM python:3.10-slim-bookworm as builder
# See `cryptography` pin comment in requirements.txt
ARG CRYPTOGRAPHY_DONT_BUILD_RUST=1
@@ -29,10 +29,10 @@ RUN pip install --target=/dependencies playwright~=1.27.1 \
|| echo "WARN: Failed to install Playwright. The application can still run, but the Playwright option will be disabled."
# Final image stage
FROM python:3.10-slim
FROM python:3.10-slim-bookworm
RUN apt-get update && apt-get install -y --no-install-recommends \
libssl1.1 \
libssl3 \
libxslt1.1 \
# For pdftohtml
poppler-utils \

View File

@@ -38,7 +38,7 @@ from flask_paginate import Pagination, get_page_parameter
from changedetectionio import html_tools
from changedetectionio.api import api_v1
__version__ = '0.42.2'
__version__ = '0.42.3'
datastore = None

View File

@@ -204,15 +204,16 @@ class ChangeDetectionStore:
# GitHub #30 also delete history records
for uuid in self.data['watching']:
path = pathlib.Path(os.path.join(self.datastore_path, uuid))
shutil.rmtree(path)
self.needs_write_urgent = True
if os.path.exists(path):
shutil.rmtree(path)
else:
path = pathlib.Path(os.path.join(self.datastore_path, uuid))
shutil.rmtree(path)
if os.path.exists(path):
shutil.rmtree(path)
del self.data['watching'][uuid]
self.needs_write_urgent = True
self.needs_write_urgent = True
# Clone a watch by UUID
def clone(self, uuid):

View File

@@ -0,0 +1,74 @@
from flask import url_for
from .util import set_original_response, set_modified_response, live_server_setup
import time
def test_setup(live_server):
live_server_setup(live_server)
def test_basic_search(client, live_server):
#live_server_setup(live_server)
urls = ['https://localhost:12300?first-result=1',
'https://localhost:5000?second-result=1'
]
res = client.post(
url_for("import_page"),
data={"urls": "\r\n".join(urls)},
follow_redirects=True
)
assert b"2 Imported" in res.data
# By URL
res = client.get(url_for("index") + "?q=first-res")
assert urls[0].encode('utf-8') in res.data
assert urls[1].encode('utf-8') not in res.data
# By Title
res = client.post(
url_for("edit_page", uuid="first"),
data={"title": "xxx-title", "url": urls[0], "tag": "", "headers": "", 'fetch_backend': "html_requests"},
follow_redirects=True
)
assert b"Updated watch." in res.data
res = client.get(url_for("index") + "?q=xxx-title")
assert urls[0].encode('utf-8') in res.data
assert urls[1].encode('utf-8') not in res.data
def test_search_in_tag_limit(client, live_server):
#live_server_setup(live_server)
urls = ['https://localhost:12300?first-result=1 tag-one',
'https://localhost:5000?second-result=1 tag-two'
]
res = client.post(
url_for("import_page"),
data={"urls": "\r\n".join(urls)},
follow_redirects=True
)
assert b"2 Imported" in res.data
# By URL
res = client.get(url_for("index") + "?q=first-res")
# Split because of the import tag separation
assert urls[0].split(' ')[0].encode('utf-8') in res.data, urls[0].encode('utf-8')
assert urls[1].split(' ')[0].encode('utf-8') not in res.data, urls[0].encode('utf-8')
# By Title
res = client.post(
url_for("edit_page", uuid="first"),
data={"title": "xxx-title", "url": urls[0].split(' ')[0], "tag": urls[0].split(' ')[1], "headers": "",
'fetch_backend': "html_requests"},
follow_redirects=True
)
assert b"Updated watch." in res.data
res = client.get(url_for("index") + "?q=xxx-title")
assert urls[0].split(' ')[0].encode('utf-8') in res.data, urls[0].encode('utf-8')
assert urls[1].split(' ')[0].encode('utf-8') not in res.data, urls[0].encode('utf-8')