.oO SearXNG Developer Documentation Oo.
Loading...
Searching...
No Matches
tracker_patterns.py
Go to the documentation of this file.
1# SPDX-License-Identifier: AGPL-3.0-or-later
2"""Simple implementation to store TrackerPatterns data in a SQL database."""
3
4from __future__ import annotations
5import typing
6
7__all__ = ["TrackerPatternsDB"]
8
9import re
10import pathlib
11from collections.abc import Iterator
12from urllib.parse import urlparse, urlunparse, parse_qsl, urlencode
13
14import httpx
15
16from searx.data.core import get_cache, log
17
18RuleType = tuple[str, list[str], list[str]]
19
20
22 # pylint: disable=missing-class-docstring
23
24 ctx_name = "data_tracker_patterns"
25 json_file = pathlib.Path(__file__).parent / "tracker_patterns.json"
26
27 CLEAR_LIST_URL = [
28 # ClearURL rule lists, the first one that responds HTTP 200 is used
29 "https://rules1.clearurls.xyz/data.minify.json",
30 "https://rules2.clearurls.xyz/data.minify.json",
31 "https://raw.githubusercontent.com/ClearURLs/Rules/refs/heads/master/data.min.json",
32 ]
33
34 class Fields:
35 # pylint: disable=too-few-public-methods, invalid-name
36 url_regexp: typing.Final = 0 # URL (regular expression) match condition of the link
37 url_ignore: typing.Final = 1 # URL (regular expression) to ignore
38 del_args: typing.Final = 2 # list of URL arguments (regular expression) to delete
39
40 def __init__(self):
41 self.cache = get_cache()
42
43 def init(self):
44 if self.cache.properties("tracker_patterns loaded") != "OK":
45 self.load()
46 self.cache.properties.set("tracker_patterns loaded", "OK")
47 # F I X M E:
48 # do we need a maintenance .. rember: database is stored
49 # in /tmp and will be rebuild during the reboot anyway
50
51 def load(self):
52 log.debug("init searx.data.TRACKER_PATTERNS")
53 for rule in self.iter_clear_list():
54 self.add(rule)
55
56 def add(self, rule: RuleType):
57 self.cache.set(
58 key=rule[self.Fields.url_regexp],
59 value=(
60 rule[self.Fields.url_ignore],
61 rule[self.Fields.del_args],
62 ),
63 ctx=self.ctx_name,
64 expire=None,
65 )
66
67 def rules(self) -> Iterator[RuleType]:
68 self.init()
69 for key, value in self.cache.pairs(ctx=self.ctx_name):
70 yield key, value[0], value[1]
71
72 def iter_clear_list(self) -> Iterator[RuleType]:
73 resp = None
74 for url in self.CLEAR_LIST_URL:
75 resp = httpx.get(url, timeout=3)
76 if resp.status_code == 200:
77 break
78 log.warning(f"TRACKER_PATTERNS: ClearURL ignore HTTP {resp.status_code} {url}")
79
80 if resp is None:
81 log.error("TRACKER_PATTERNS: failed fetching ClearURL rule lists")
82 return
83
84 for rule in resp.json()["providers"].values():
85 yield (
86 rule["urlPattern"].replace("\\\\", "\\"), # fix javascript regex syntax
87 [exc.replace("\\\\", "\\") for exc in rule.get("exceptions", [])],
88 rule.get("rules", []),
89 )
90
91 def clean_url(self, url: str) -> bool | str:
92 """The URL arguments are normalized and cleaned of tracker parameters.
93
94 Returns bool ``True`` to use URL unchanged (``False`` to ignore URL).
95 If URL should be modified, the returned string is the new URL to use.
96 """
97
98 new_url = url
99 parsed_new_url = urlparse(url=new_url)
100
101 for rule in self.rules():
102
103 if not re.match(rule[self.Fields.url_regexp], new_url):
104 # no match / ignore pattern
105 continue
106
107 do_ignore = False
108 for pattern in rule[self.Fields.url_ignore]:
109 if re.match(pattern, new_url):
110 do_ignore = True
111 break
112
113 if do_ignore:
114 # pattern is in the list of exceptions / ignore pattern
115 # HINT:
116 # we can't break the outer pattern loop since we have
117 # overlapping urlPattern like ".*"
118 continue
119
120 # remove tracker arguments from the url-query part
121 query_args: list[tuple[str, str]] = list(parse_qsl(parsed_new_url.query))
122
123 for name, val in query_args.copy():
124 # remove URL arguments
125 for pattern in rule[self.Fields.del_args]:
126 if re.match(pattern, name):
127 log.debug("TRACKER_PATTERNS: %s remove tracker arg: %s='%s'", parsed_new_url.netloc, name, val)
128 query_args.remove((name, val))
129
130 parsed_new_url = parsed_new_url._replace(query=urlencode(query_args))
131 new_url = urlunparse(parsed_new_url)
132
133 if new_url != url:
134 return new_url
135
136 return True
137
138
139if __name__ == "__main__":
141 for r in db.rules():
142 print(r)