mirror of
https://github.com/dgtlmoon/changedetection.io.git
synced 2025-11-14 13:36:09 +00:00
Compare commits
3 Commits
windows-te
...
3482-JSON-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9d22c86e9d | ||
|
|
7fe504f3e9 | ||
|
|
ca140c559e |
@@ -303,70 +303,92 @@ def _get_stripped_text_from_json_match(match):
|
|||||||
|
|
||||||
return stripped_text_from_html
|
return stripped_text_from_html
|
||||||
|
|
||||||
|
def extract_json_blob_from_html(content, ensure_is_ldjson_info_type, json_filter):
|
||||||
|
from bs4 import BeautifulSoup
|
||||||
|
stripped_text_from_html = ''
|
||||||
|
|
||||||
|
# Foreach <script json></script> blob.. just return the first that matches json_filter
|
||||||
|
# As a last resort, try to parse the whole <body>
|
||||||
|
soup = BeautifulSoup(content, 'html.parser')
|
||||||
|
|
||||||
|
if ensure_is_ldjson_info_type:
|
||||||
|
bs_result = soup.find_all('script', {"type": "application/ld+json"})
|
||||||
|
else:
|
||||||
|
bs_result = soup.find_all('script')
|
||||||
|
bs_result += soup.find_all('body')
|
||||||
|
|
||||||
|
bs_jsons = []
|
||||||
|
|
||||||
|
for result in bs_result:
|
||||||
|
# result.text is how bs4 magically strips JSON from the body
|
||||||
|
content_start = result.text.lstrip("\ufeff").strip()[:100] if result.text else ''
|
||||||
|
# Skip empty tags, and things that dont even look like JSON
|
||||||
|
if not result.text or not (content_start[0] == '{' or content_start[0] == '['):
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
json_data = json.loads(result.text)
|
||||||
|
bs_jsons.append(json_data)
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
# Skip objects which cannot be parsed
|
||||||
|
continue
|
||||||
|
|
||||||
|
if not bs_jsons:
|
||||||
|
raise JSONNotFound("No parsable JSON found in this document")
|
||||||
|
|
||||||
|
for json_data in bs_jsons:
|
||||||
|
stripped_text_from_html = _parse_json(json_data, json_filter)
|
||||||
|
|
||||||
|
if ensure_is_ldjson_info_type:
|
||||||
|
# Could sometimes be list, string or something else random
|
||||||
|
if isinstance(json_data, dict):
|
||||||
|
# If it has LD JSON 'key' @type, and @type is 'product', and something was found for the search
|
||||||
|
# (Some sites have multiple of the same ld+json @type='product', but some have the review part, some have the 'price' part)
|
||||||
|
# @type could also be a list although non-standard ("@type": ["Product", "SubType"],)
|
||||||
|
# LD_JSON auto-extract also requires some content PLUS the ldjson to be present
|
||||||
|
# 1833 - could be either str or dict, should not be anything else
|
||||||
|
|
||||||
|
t = json_data.get('@type')
|
||||||
|
if t and stripped_text_from_html:
|
||||||
|
|
||||||
|
if isinstance(t, str) and t.lower() == ensure_is_ldjson_info_type.lower():
|
||||||
|
break
|
||||||
|
# The non-standard part, some have a list
|
||||||
|
elif isinstance(t, list):
|
||||||
|
if ensure_is_ldjson_info_type.lower() in [x.lower().strip() for x in t]:
|
||||||
|
break
|
||||||
|
|
||||||
|
elif stripped_text_from_html:
|
||||||
|
break
|
||||||
|
|
||||||
|
return stripped_text_from_html
|
||||||
|
|
||||||
# content - json
|
# content - json
|
||||||
# json_filter - ie json:$..price
|
# json_filter - ie json:$..price
|
||||||
# ensure_is_ldjson_info_type - str "product", optional, "@type == product" (I dont know how to do that as a json selector)
|
# ensure_is_ldjson_info_type - str "product", optional, "@type == product" (I dont know how to do that as a json selector)
|
||||||
def extract_json_as_string(content, json_filter, ensure_is_ldjson_info_type=None):
|
def extract_json_as_string(content, json_filter, ensure_is_ldjson_info_type=None):
|
||||||
from bs4 import BeautifulSoup
|
|
||||||
|
|
||||||
stripped_text_from_html = False
|
stripped_text_from_html = False
|
||||||
# https://github.com/dgtlmoon/changedetection.io/pull/2041#issuecomment-1848397161w
|
# https://github.com/dgtlmoon/changedetection.io/pull/2041#issuecomment-1848397161w
|
||||||
# Try to parse/filter out the JSON, if we get some parser error, then maybe it's embedded within HTML tags
|
# Try to parse/filter out the JSON, if we get some parser error, then maybe it's embedded within HTML tags
|
||||||
try:
|
|
||||||
# .lstrip("\ufeff") strings ByteOrderMark from UTF8 and still lets the UTF work
|
|
||||||
stripped_text_from_html = _parse_json(json.loads(content.lstrip("\ufeff") ), json_filter)
|
|
||||||
except json.JSONDecodeError as e:
|
|
||||||
logger.warning(str(e))
|
|
||||||
|
|
||||||
# Foreach <script json></script> blob.. just return the first that matches json_filter
|
# Looks like clean JSON, dont bother extracting from HTML
|
||||||
# As a last resort, try to parse the whole <body>
|
|
||||||
soup = BeautifulSoup(content, 'html.parser')
|
|
||||||
|
|
||||||
if ensure_is_ldjson_info_type:
|
content_start = content.lstrip("\ufeff").strip()[:100]
|
||||||
bs_result = soup.find_all('script', {"type": "application/ld+json"})
|
|
||||||
else:
|
|
||||||
bs_result = soup.find_all('script')
|
|
||||||
bs_result += soup.find_all('body')
|
|
||||||
|
|
||||||
bs_jsons = []
|
if content_start[0] == '{' or content_start[0] == '[':
|
||||||
for result in bs_result:
|
try:
|
||||||
# Skip empty tags, and things that dont even look like JSON
|
# .lstrip("\ufeff") strings ByteOrderMark from UTF8 and still lets the UTF work
|
||||||
if not result.text or '{' not in result.text:
|
stripped_text_from_html = _parse_json(json.loads(content.lstrip("\ufeff")), json_filter)
|
||||||
continue
|
except json.JSONDecodeError as e:
|
||||||
try:
|
logger.warning(f"Error processing JSON {content[:20]}...{str(e)})")
|
||||||
json_data = json.loads(result.text)
|
else:
|
||||||
bs_jsons.append(json_data)
|
# Probably something else, go fish inside for it
|
||||||
except json.JSONDecodeError:
|
try:
|
||||||
# Skip objects which cannot be parsed
|
stripped_text_from_html = extract_json_blob_from_html(content=content,
|
||||||
continue
|
ensure_is_ldjson_info_type=ensure_is_ldjson_info_type,
|
||||||
|
json_filter=json_filter )
|
||||||
if not bs_jsons:
|
except json.JSONDecodeError as e:
|
||||||
raise JSONNotFound("No parsable JSON found in this document")
|
logger.warning(f"Error processing JSON while extracting JSON from HTML blob {content[:20]}...{str(e)})")
|
||||||
|
|
||||||
for json_data in bs_jsons:
|
|
||||||
stripped_text_from_html = _parse_json(json_data, json_filter)
|
|
||||||
|
|
||||||
if ensure_is_ldjson_info_type:
|
|
||||||
# Could sometimes be list, string or something else random
|
|
||||||
if isinstance(json_data, dict):
|
|
||||||
# If it has LD JSON 'key' @type, and @type is 'product', and something was found for the search
|
|
||||||
# (Some sites have multiple of the same ld+json @type='product', but some have the review part, some have the 'price' part)
|
|
||||||
# @type could also be a list although non-standard ("@type": ["Product", "SubType"],)
|
|
||||||
# LD_JSON auto-extract also requires some content PLUS the ldjson to be present
|
|
||||||
# 1833 - could be either str or dict, should not be anything else
|
|
||||||
|
|
||||||
t = json_data.get('@type')
|
|
||||||
if t and stripped_text_from_html:
|
|
||||||
|
|
||||||
if isinstance(t, str) and t.lower() == ensure_is_ldjson_info_type.lower():
|
|
||||||
break
|
|
||||||
# The non-standard part, some have a list
|
|
||||||
elif isinstance(t, list):
|
|
||||||
if ensure_is_ldjson_info_type.lower() in [x.lower().strip() for x in t]:
|
|
||||||
break
|
|
||||||
|
|
||||||
elif stripped_text_from_html:
|
|
||||||
break
|
|
||||||
|
|
||||||
if not stripped_text_from_html:
|
if not stripped_text_from_html:
|
||||||
# Re 265 - Just return an empty string when filter not found
|
# Re 265 - Just return an empty string when filter not found
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
|||||||
name = 'Webpage Text/HTML, JSON and PDF changes'
|
name = 'Webpage Text/HTML, JSON and PDF changes'
|
||||||
description = 'Detects all text changes where possible'
|
description = 'Detects all text changes where possible'
|
||||||
|
|
||||||
json_filter_prefixes = ['json:', 'jq:', 'jqraw:']
|
JSON_FILTER_PREFIXES = ['json:', 'jq:', 'jqraw:']
|
||||||
|
|
||||||
# Assume it's this type if the server says nothing on content-type
|
# Assume it's this type if the server says nothing on content-type
|
||||||
DEFAULT_WHEN_NO_CONTENT_TYPE_HEADER = 'text/html'
|
DEFAULT_WHEN_NO_CONTENT_TYPE_HEADER = 'text/html'
|
||||||
@@ -99,6 +99,10 @@ class FilterConfig:
|
|||||||
def has_include_filters(self):
|
def has_include_filters(self):
|
||||||
return bool(self.include_filters) and bool(self.include_filters[0].strip())
|
return bool(self.include_filters) and bool(self.include_filters[0].strip())
|
||||||
|
|
||||||
|
@property
|
||||||
|
def has_include_json_filters(self):
|
||||||
|
return any(f.strip().startswith(prefix) for f in self.include_filters for prefix in JSON_FILTER_PREFIXES)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def has_subtractive_selectors(self):
|
def has_subtractive_selectors(self):
|
||||||
return bool(self.subtractive_selectors) and bool(self.subtractive_selectors[0].strip())
|
return bool(self.subtractive_selectors) and bool(self.subtractive_selectors[0].strip())
|
||||||
@@ -255,15 +259,14 @@ class ContentProcessor:
|
|||||||
)
|
)
|
||||||
return html_content.replace('</body>', metadata + '</body>')
|
return html_content.replace('</body>', metadata + '</body>')
|
||||||
|
|
||||||
def preprocess_json(self, content, has_filters):
|
def preprocess_json(self, raw_content):
|
||||||
"""Format and sort JSON content."""
|
"""Format and sort JSON content."""
|
||||||
# Force reformat if no filters specified
|
# Then we re-format it, else it does have filters (later on) which will reformat it anyway
|
||||||
if not has_filters:
|
content = html_tools.extract_json_as_string(content=raw_content, json_filter="json:$")
|
||||||
content = html_tools.extract_json_as_string(content=content, json_filter="json:$")
|
|
||||||
|
|
||||||
# Sort JSON to avoid false alerts from reordering
|
# Sort JSON to avoid false alerts from reordering
|
||||||
try:
|
try:
|
||||||
content = json.dumps(json.loads(content), sort_keys=True)
|
content = json.dumps(json.loads(content), sort_keys=True, indent=4)
|
||||||
except Exception:
|
except Exception:
|
||||||
# Might be malformed JSON, continue anyway
|
# Might be malformed JSON, continue anyway
|
||||||
pass
|
pass
|
||||||
@@ -294,7 +297,7 @@ class ContentProcessor:
|
|||||||
)
|
)
|
||||||
|
|
||||||
# JSON filters
|
# JSON filters
|
||||||
elif any(filter_rule.startswith(prefix) for prefix in json_filter_prefixes):
|
elif any(filter_rule.startswith(prefix) for prefix in JSON_FILTER_PREFIXES):
|
||||||
filtered_content += html_tools.extract_json_as_string(
|
filtered_content += html_tools.extract_json_as_string(
|
||||||
content=content,
|
content=content,
|
||||||
json_filter=filter_rule
|
json_filter=filter_rule
|
||||||
@@ -387,9 +390,12 @@ class perform_site_check(difference_detection_processor):
|
|||||||
content = content_processor.preprocess_pdf(raw_content=self.fetcher.raw_content)
|
content = content_processor.preprocess_pdf(raw_content=self.fetcher.raw_content)
|
||||||
stream_content_type.is_html = True
|
stream_content_type.is_html = True
|
||||||
|
|
||||||
# JSON preprocessing
|
# JSON - Always reformat it nicely for consistency.
|
||||||
|
|
||||||
if stream_content_type.is_json:
|
if stream_content_type.is_json:
|
||||||
content = content_processor.preprocess_json(content, filter_config.has_include_filters)
|
if not filter_config.has_include_json_filters:
|
||||||
|
content = content_processor.preprocess_json(raw_content=content)
|
||||||
|
#else, otherwise it gets sorted/formatted in the filter stage anyway
|
||||||
|
|
||||||
# HTML obfuscation workarounds
|
# HTML obfuscation workarounds
|
||||||
if stream_content_type.is_html:
|
if stream_content_type.is_html:
|
||||||
|
|||||||
@@ -113,14 +113,8 @@ def set_original_ext_response():
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
def set_modified_ext_response():
|
def set_modified_ext_response():
|
||||||
data = """
|
# This should get reformatted
|
||||||
[
|
data = """ [ { "isPriceLowered": false, "status": "Sold", "statusOrig": "sold" }, {
|
||||||
{
|
|
||||||
"isPriceLowered": false,
|
|
||||||
"status": "Sold",
|
|
||||||
"statusOrig": "sold"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"_id": "5e7b3e1fb3262d306323ff1e",
|
"_id": "5e7b3e1fb3262d306323ff1e",
|
||||||
"listingsType": "consumer",
|
"listingsType": "consumer",
|
||||||
"isPriceLowered": false,
|
"isPriceLowered": false,
|
||||||
@@ -230,30 +224,15 @@ def check_json_filter(json_filter, client, live_server):
|
|||||||
|
|
||||||
# Add our URL to the import page
|
# Add our URL to the import page
|
||||||
test_url = url_for('test_endpoint', content_type="application/json", _external=True)
|
test_url = url_for('test_endpoint', content_type="application/json", _external=True)
|
||||||
uuid = client.application.config.get('DATASTORE').add_watch(url=test_url)
|
uuid = client.application.config.get('DATASTORE').add_watch(url=test_url, extras={"include_filters": json_filter.splitlines()})
|
||||||
client.get(url_for("ui.form_watch_checknow"), follow_redirects=True)
|
client.get(url_for("ui.form_watch_checknow"), follow_redirects=True)
|
||||||
|
|
||||||
# Give the thread time to pick it up
|
# Give the thread time to pick it up
|
||||||
wait_for_all_checks(client)
|
wait_for_all_checks(client)
|
||||||
|
|
||||||
# Goto the edit page, add our ignore text
|
|
||||||
# Add our URL to the import page
|
|
||||||
res = client.post(
|
|
||||||
url_for("ui.ui_edit.edit_page", uuid="first"),
|
|
||||||
data={"include_filters": json_filter,
|
|
||||||
"url": test_url,
|
|
||||||
"tags": "",
|
|
||||||
"headers": "",
|
|
||||||
"fetch_backend": "html_requests",
|
|
||||||
"time_between_check_use_default": "y"
|
|
||||||
},
|
|
||||||
follow_redirects=True
|
|
||||||
)
|
|
||||||
assert b"Updated watch." in res.data
|
|
||||||
|
|
||||||
# Check it saved
|
# Check it saved
|
||||||
res = client.get(
|
res = client.get(
|
||||||
url_for("ui.ui_edit.edit_page", uuid="first"),
|
url_for("ui.ui_edit.edit_page", uuid=uuid),
|
||||||
)
|
)
|
||||||
assert bytes(escape(json_filter).encode('utf-8')) in res.data
|
assert bytes(escape(json_filter).encode('utf-8')) in res.data
|
||||||
|
|
||||||
@@ -272,7 +251,7 @@ def check_json_filter(json_filter, client, live_server):
|
|||||||
assert b'has-unread-changes' in res.data
|
assert b'has-unread-changes' in res.data
|
||||||
|
|
||||||
# Should not see this, because its not in the JSONPath we entered
|
# Should not see this, because its not in the JSONPath we entered
|
||||||
res = client.get(url_for("ui.ui_views.diff_history_page", uuid="first"))
|
res = client.get(url_for("ui.ui_views.diff_history_page", uuid=uuid))
|
||||||
|
|
||||||
# But the change should be there, tho its hard to test the change was detected because it will show old and new versions
|
# But the change should be there, tho its hard to test the change was detected because it will show old and new versions
|
||||||
# And #462 - check we see the proper utf-8 string there
|
# And #462 - check we see the proper utf-8 string there
|
||||||
@@ -294,32 +273,12 @@ def test_check_jqraw_filter(client, live_server, measure_memory_usage):
|
|||||||
def check_json_filter_bool_val(json_filter, client, live_server):
|
def check_json_filter_bool_val(json_filter, client, live_server):
|
||||||
set_original_response()
|
set_original_response()
|
||||||
|
|
||||||
# Give the endpoint time to spin up
|
|
||||||
time.sleep(1)
|
|
||||||
|
|
||||||
test_url = url_for('test_endpoint', content_type="application/json", _external=True)
|
test_url = url_for('test_endpoint', content_type="application/json", _external=True)
|
||||||
|
|
||||||
uuid = client.application.config.get('DATASTORE').add_watch(url=test_url)
|
uuid = client.application.config.get('DATASTORE').add_watch(url=test_url, extras={"include_filters": [json_filter]})
|
||||||
client.get(url_for("ui.form_watch_checknow"), follow_redirects=True)
|
client.get(url_for("ui.form_watch_checknow"), follow_redirects=True)
|
||||||
|
|
||||||
wait_for_all_checks(client)
|
wait_for_all_checks(client)
|
||||||
# Goto the edit page, add our ignore text
|
|
||||||
# Add our URL to the import page
|
|
||||||
res = client.post(
|
|
||||||
url_for("ui.ui_edit.edit_page", uuid="first"),
|
|
||||||
data={"include_filters": json_filter,
|
|
||||||
"url": test_url,
|
|
||||||
"tags": "",
|
|
||||||
"headers": "",
|
|
||||||
"fetch_backend": "html_requests",
|
|
||||||
"time_between_check_use_default": "y"
|
|
||||||
},
|
|
||||||
follow_redirects=True
|
|
||||||
)
|
|
||||||
assert b"Updated watch." in res.data
|
|
||||||
|
|
||||||
# Give the thread time to pick it up
|
|
||||||
wait_for_all_checks(client)
|
|
||||||
# Make a change
|
# Make a change
|
||||||
set_modified_response()
|
set_modified_response()
|
||||||
|
|
||||||
@@ -353,21 +312,16 @@ def test_check_jqraw_filter_bool_val(client, live_server, measure_memory_usage):
|
|||||||
def check_json_ext_filter(json_filter, client, live_server):
|
def check_json_ext_filter(json_filter, client, live_server):
|
||||||
set_original_ext_response()
|
set_original_ext_response()
|
||||||
|
|
||||||
# Give the endpoint time to spin up
|
|
||||||
time.sleep(1)
|
|
||||||
|
|
||||||
# Add our URL to the import page
|
# Add our URL to the import page
|
||||||
test_url = url_for('test_endpoint', content_type="application/json", _external=True)
|
test_url = url_for('test_endpoint', content_type="application/json", _external=True)
|
||||||
uuid = client.application.config.get('DATASTORE').add_watch(url=test_url)
|
uuid = client.application.config.get('DATASTORE').add_watch(url=test_url)
|
||||||
client.get(url_for("ui.form_watch_checknow"), follow_redirects=True)
|
client.get(url_for("ui.form_watch_checknow"), follow_redirects=True)
|
||||||
|
|
||||||
# Give the thread time to pick it up
|
|
||||||
wait_for_all_checks(client)
|
wait_for_all_checks(client)
|
||||||
|
|
||||||
# Goto the edit page, add our ignore text
|
# Goto the edit page, add our ignore text
|
||||||
# Add our URL to the import page
|
# Add our URL to the import page
|
||||||
res = client.post(
|
res = client.post(
|
||||||
url_for("ui.ui_edit.edit_page", uuid="first"),
|
url_for("ui.ui_edit.edit_page", uuid=uuid),
|
||||||
data={"include_filters": json_filter,
|
data={"include_filters": json_filter,
|
||||||
"url": test_url,
|
"url": test_url,
|
||||||
"tags": "",
|
"tags": "",
|
||||||
@@ -381,7 +335,7 @@ def check_json_ext_filter(json_filter, client, live_server):
|
|||||||
|
|
||||||
# Check it saved
|
# Check it saved
|
||||||
res = client.get(
|
res = client.get(
|
||||||
url_for("ui.ui_edit.edit_page", uuid="first"),
|
url_for("ui.ui_edit.edit_page", uuid=uuid),
|
||||||
)
|
)
|
||||||
assert bytes(escape(json_filter).encode('utf-8')) in res.data
|
assert bytes(escape(json_filter).encode('utf-8')) in res.data
|
||||||
|
|
||||||
@@ -395,6 +349,12 @@ def check_json_ext_filter(json_filter, client, live_server):
|
|||||||
# Give the thread time to pick it up
|
# Give the thread time to pick it up
|
||||||
wait_for_all_checks(client)
|
wait_for_all_checks(client)
|
||||||
|
|
||||||
|
watch = live_server.app.config['DATASTORE'].data['watching'][uuid]
|
||||||
|
dates = list(watch.history.keys())
|
||||||
|
snapshot_contents = watch.get_history_snapshot(dates[0])
|
||||||
|
|
||||||
|
assert snapshot_contents[0] == '['
|
||||||
|
|
||||||
# It should have 'has-unread-changes'
|
# It should have 'has-unread-changes'
|
||||||
res = client.get(url_for("watchlist.index"))
|
res = client.get(url_for("watchlist.index"))
|
||||||
assert b'has-unread-changes' in res.data
|
assert b'has-unread-changes' in res.data
|
||||||
@@ -456,7 +416,7 @@ def test_correct_header_detect(client, live_server, measure_memory_usage):
|
|||||||
# Like in https://github.com/dgtlmoon/changedetection.io/pull/1593
|
# Like in https://github.com/dgtlmoon/changedetection.io/pull/1593
|
||||||
# Specify extra html that JSON is sometimes wrapped in - when using SockpuppetBrowser / Puppeteer / Playwrightetc
|
# Specify extra html that JSON is sometimes wrapped in - when using SockpuppetBrowser / Puppeteer / Playwrightetc
|
||||||
with open("test-datastore/endpoint-content.txt", "w") as f:
|
with open("test-datastore/endpoint-content.txt", "w") as f:
|
||||||
f.write('<html><body>{"hello" : 123, "world": 123}')
|
f.write('<html><body>{ "world": 123, "hello" : 123}')
|
||||||
|
|
||||||
# Add our URL to the import page
|
# Add our URL to the import page
|
||||||
# Check weird casing is cleaned up and detected also
|
# Check weird casing is cleaned up and detected also
|
||||||
@@ -474,8 +434,18 @@ def test_correct_header_detect(client, live_server, measure_memory_usage):
|
|||||||
follow_redirects=True
|
follow_redirects=True
|
||||||
)
|
)
|
||||||
|
|
||||||
assert b'"hello": 123,' in res.data
|
|
||||||
assert b'"world": 123' in res.data
|
watch = live_server.app.config['DATASTORE'].data['watching'][uuid]
|
||||||
|
dates = list(watch.history.keys())
|
||||||
|
snapshot_contents = watch.get_history_snapshot(dates[0])
|
||||||
|
|
||||||
|
assert b'"hello": 123,' in res.data # properly html escaped in the front end
|
||||||
|
|
||||||
|
# Should be correctly formatted and sorted, ("world" goes to end)
|
||||||
|
assert snapshot_contents == """{
|
||||||
|
"hello": 123,
|
||||||
|
"world": 123
|
||||||
|
}"""
|
||||||
|
|
||||||
delete_all_watches(client)
|
delete_all_watches(client)
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user