.oO SearXNG Developer Documentation Oo.
Loading...
Searching...
No Matches
annas_archive.py
Go to the documentation of this file.
1# SPDX-License-Identifier: AGPL-3.0-or-later
2"""`Anna's Archive`_ is a free non-profit online shadow library metasearch
3engine providing access to a variety of book resources (also via IPFS), created
4by a team of anonymous archivists (AnnaArchivist_).
5
6.. _Anna's Archive: https://annas-archive.org/
7.. _AnnaArchivist: https://annas-software.org/AnnaArchivist/annas-archive
8
9Configuration
10=============
11
12The engine has the following additional settings:
13
14- :py:obj:`aa_content`
15- :py:obj:`aa_ext`
16- :py:obj:`aa_sort`
17
18With this options a SearXNG maintainer is able to configure **additional**
19engines for specific searches in Anna's Archive. For example a engine to search
20for *newest* articles and journals (PDF) / by shortcut ``!aaa <search-term>``.
21
22.. code:: yaml
23
24 - name: annas articles
25 engine: annas_archive
26 shortcut: aaa
27 aa_content: 'magazine'
28 aa_ext: 'pdf'
29 aa_sort: 'newest'
30
31Implementations
32===============
33
34"""
35
36from typing import List, Dict, Any, Optional
37from urllib.parse import quote
38from lxml import html
39
40from searx.utils import extract_text, eval_xpath, eval_xpath_list
41from searx.enginelib.traits import EngineTraits
42from searx.data import ENGINE_TRAITS
43
44# about
45about: Dict[str, Any] = {
46 "website": "https://annas-archive.org/",
47 "wikidata_id": "Q115288326",
48 "official_api_documentation": None,
49 "use_official_api": False,
50 "require_api_key": False,
51 "results": "HTML",
52}
53
54# engine dependent config
55categories: List[str] = ["files"]
56paging: bool = False
57
58# search-url
59base_url: str = "https://annas-archive.org"
60aa_content: str = ""
61"""Anan's search form field **Content** / possible values::
62
63 book_fiction, book_unknown, book_nonfiction,
64 book_comic, magazine, standards_document
65
66To not filter use an empty string (default).
67"""
68aa_sort: str = ''
69"""Sort Anna's results, possible values::
70
71 newest, oldest, largest, smallest
72
73To sort by *most relevant* use an empty string (default)."""
74
75aa_ext: str = ''
76"""Filter Anna's results by a file ending. Common filters for example are
77``pdf`` and ``epub``.
78
79.. note::
80
81 Anna's Archive is a beta release: Filter results by file extension does not
82 really work on Anna's Archive.
83
84"""
85
86
87def init(engine_settings=None): # pylint: disable=unused-argument
88 """Check of engine's settings."""
89 traits = EngineTraits(**ENGINE_TRAITS['annas archive'])
90
91 if aa_content and aa_content not in traits.custom['content']:
92 raise ValueError(f'invalid setting content: {aa_content}')
93
94 if aa_sort and aa_sort not in traits.custom['sort']:
95 raise ValueError(f'invalid setting sort: {aa_sort}')
96
97 if aa_ext and aa_ext not in traits.custom['ext']:
98 raise ValueError(f'invalid setting ext: {aa_ext}')
99
100
101def request(query, params: Dict[str, Any]) -> Dict[str, Any]:
102 q = quote(query)
103 lang = traits.get_language(params["language"], traits.all_locale) # type: ignore
104 params["url"] = base_url + f"/search?lang={lang or ''}&content={aa_content}&ext={aa_ext}&sort={aa_sort}&q={q}"
105 return params
106
107
108def response(resp) -> List[Dict[str, Optional[str]]]:
109 results: List[Dict[str, Optional[str]]] = []
110 dom = html.fromstring(resp.text)
111
112 for item in eval_xpath_list(dom, '//main//div[contains(@class, "h-[125]")]/a'):
113 results.append(_get_result(item))
114
115 # The rendering of the WEB page is very strange; except the first position
116 # all other positions of Anna's result page are enclosed in SGML comments.
117 # These comments are *uncommented* by some JS code, see query of class
118 # '.js-scroll-hidden' in Anna's HTML template:
119 # https://annas-software.org/AnnaArchivist/annas-archive/-/blob/main/allthethings/templates/macros/md5_list.html
120
121 for item in eval_xpath_list(dom, '//main//div[contains(@class, "js-scroll-hidden")]'):
122 item = html.fromstring(item.xpath('./comment()')[0].text)
123 results.append(_get_result(item))
124
125 return results
126
127
128def _get_result(item):
129 return {
130 'template': 'paper.html',
131 'url': base_url + item.xpath('./@href')[0],
132 'title': extract_text(eval_xpath(item, './/h3/text()[1]')),
133 'publisher': extract_text(eval_xpath(item, './/div[contains(@class, "text-sm")]')),
134 'authors': [extract_text(eval_xpath(item, './/div[contains(@class, "italic")]'))],
135 'content': extract_text(eval_xpath(item, './/div[contains(@class, "text-xs")]')),
136 'img_src': item.xpath('.//img/@src')[0],
137 }
138
139
140def fetch_traits(engine_traits: EngineTraits):
141 """Fetch languages and other search arguments from Anna's search form."""
142 # pylint: disable=import-outside-toplevel
143
144 import babel
145 from searx.network import get # see https://github.com/searxng/searxng/issues/762
146 from searx.locales import language_tag
147
148 engine_traits.all_locale = ''
149 engine_traits.custom['content'] = []
150 engine_traits.custom['ext'] = []
151 engine_traits.custom['sort'] = []
152
153 resp = get(base_url + '/search')
154 if not resp.ok: # type: ignore
155 raise RuntimeError("Response from Anna's search page is not OK.")
156 dom = html.fromstring(resp.text) # type: ignore
157
158 # supported language codes
159
160 lang_map = {}
161 for x in eval_xpath_list(dom, "//form//input[@name='lang']"):
162 eng_lang = x.get("value")
163 if eng_lang in ('', '_empty', 'nl-BE', 'und'):
164 continue
165 try:
166 locale = babel.Locale.parse(lang_map.get(eng_lang, eng_lang), sep='-')
167 except babel.UnknownLocaleError:
168 # silently ignore unknown languages
169 # print("ERROR: %s -> %s is unknown by babel" % (x.get("data-name"), eng_lang))
170 continue
171 sxng_lang = language_tag(locale)
172 conflict = engine_traits.languages.get(sxng_lang)
173 if conflict:
174 if conflict != eng_lang:
175 print("CONFLICT: babel %s --> %s, %s" % (sxng_lang, conflict, eng_lang))
176 continue
177 engine_traits.languages[sxng_lang] = eng_lang
178
179 for x in eval_xpath_list(dom, "//form//input[@name='content']"):
180 engine_traits.custom['content'].append(x.get("value"))
181
182 for x in eval_xpath_list(dom, "//form//input[@name='ext']"):
183 engine_traits.custom['ext'].append(x.get("value"))
184
185 for x in eval_xpath_list(dom, "//form//select[@name='sort']//option"):
186 engine_traits.custom['sort'].append(x.get("value"))
fetch_traits(EngineTraits engine_traits)
List[Dict[str, Optional[str]]] response(resp)
Dict[str, Any] request(query, Dict[str, Any] params)
init(engine_settings=None)