.oO SearXNG Developer Documentation Oo.
Loading...
Searching...
No Matches
arxiv.py
Go to the documentation of this file.
1# SPDX-License-Identifier: AGPL-3.0-or-later
2"""arXiv is a free distribution service and an open-access archive for nearly
32.4 million scholarly articles in the fields of physics, mathematics, computer
4science, quantitative biology, quantitative finance, statistics, electrical
5engineering and systems science, and economics.
6
7The engine uses the `arXiv API`_.
8
9.. _arXiv API: https://info.arxiv.org/help/api/user-manual.html
10"""
11
12import typing as t
13
14from datetime import datetime
15from urllib.parse import urlencode
16
17from lxml import etree
18from lxml.etree import XPath
19from searx.utils import eval_xpath, eval_xpath_list, eval_xpath_getindex
20from searx.result_types import EngineResults
21
22if t.TYPE_CHECKING:
23 from searx.extended_types import SXNG_Response
24 from searx.search.processors import OnlineParams
25
26about = {
27 "website": "https://arxiv.org",
28 "wikidata_id": "Q118398",
29 "official_api_documentation": "https://info.arxiv.org/help/api/user-manual.html",
30 "use_official_api": True,
31 "require_api_key": False,
32 "results": "XML-RSS",
33}
34
35categories = ["science", "scientific publications"]
36paging = True
37arxiv_max_results = 10
38arxiv_search_prefix = "all"
39"""Search fields, for more details see, `Details of Query Construction`_.
40
41.. _Details of Query Construction:
42 https://info.arxiv.org/help/api/user-manual.html#51-details-of-query-construction
43"""
44
45base_url = "https://export.arxiv.org/api/query"
46"""`arXiv API`_ URL, for more details see Query-Interface_
47
48.. _Query-Interface: https://info.arxiv.org/help/api/user-manual.html#_query_interface
49"""
50
51arxiv_namespaces = {
52 "atom": "http://www.w3.org/2005/Atom",
53 "arxiv": "http://arxiv.org/schemas/atom",
54}
55xpath_entry = XPath("//atom:entry", namespaces=arxiv_namespaces)
56xpath_title = XPath(".//atom:title", namespaces=arxiv_namespaces)
57xpath_id = XPath(".//atom:id", namespaces=arxiv_namespaces)
58xpath_summary = XPath(".//atom:summary", namespaces=arxiv_namespaces)
59xpath_author_name = XPath(".//atom:author/atom:name", namespaces=arxiv_namespaces)
60xpath_doi = XPath(".//arxiv:doi", namespaces=arxiv_namespaces)
61xpath_pdf = XPath(".//atom:link[@title='pdf']", namespaces=arxiv_namespaces)
62xpath_published = XPath(".//atom:published", namespaces=arxiv_namespaces)
63xpath_journal = XPath(".//arxiv:journal_ref", namespaces=arxiv_namespaces)
64xpath_category = XPath(".//atom:category/@term", namespaces=arxiv_namespaces)
65xpath_comment = XPath("./arxiv:comment", namespaces=arxiv_namespaces)
66
67
68def request(query: str, params: "OnlineParams") -> None:
69
70 args = {
71 "search_query": f"{arxiv_search_prefix}:{query}",
72 "start": (params["pageno"] - 1) * arxiv_max_results,
73 "max_results": arxiv_max_results,
74 }
75 params["url"] = f"{base_url}?{urlencode(args)}"
76
77
78def response(resp: "SXNG_Response") -> EngineResults:
79
80 res = EngineResults()
81
82 dom = etree.fromstring(resp.content)
83 for entry in eval_xpath_list(dom, xpath_entry):
84
85 title: str = eval_xpath_getindex(entry, xpath_title, 0).text
86
87 url: str = eval_xpath_getindex(entry, xpath_id, 0).text
88 abstract: str = eval_xpath_getindex(entry, xpath_summary, 0).text
89
90 authors: list[str] = [author.text for author in eval_xpath_list(entry, xpath_author_name)]
91
92 # doi
93 doi_element = eval_xpath_getindex(entry, xpath_doi, 0, default=None)
94 doi: str = "" if doi_element is None else doi_element.text
95
96 # pdf
97 pdf_element = eval_xpath_getindex(entry, xpath_pdf, 0, default=None)
98 pdf_url: str = "" if pdf_element is None else pdf_element.attrib.get("href")
99
100 # journal
101 journal_element = eval_xpath_getindex(entry, xpath_journal, 0, default=None)
102 journal: str = "" if journal_element is None else journal_element.text
103
104 # tags
105 tag_elements = eval_xpath(entry, xpath_category)
106 tags: list[str] = [str(tag) for tag in tag_elements]
107
108 # comments
109 comments_elements = eval_xpath_getindex(entry, xpath_comment, 0, default=None)
110 comments: str = "" if comments_elements is None else comments_elements.text
111
112 publishedDate = datetime.strptime(eval_xpath_getindex(entry, xpath_published, 0).text, "%Y-%m-%dT%H:%M:%SZ")
113
114 res.add(
115 res.types.Paper(
116 url=url,
117 title=title,
118 publishedDate=publishedDate,
119 content=abstract,
120 doi=doi,
121 authors=authors,
122 journal=journal,
123 tags=tags,
124 comments=comments,
125 pdf_url=pdf_url,
126 )
127 )
128
129 return res
None request(str query, "OnlineParams" params)
Definition arxiv.py:68
EngineResults response("SXNG_Response" resp)
Definition arxiv.py:78