.oO SearXNG Developer Documentation Oo.
Loading...
Searching...
No Matches
semantic_scholar.py
Go to the documentation of this file.
1# SPDX-License-Identifier: AGPL-3.0-or-later
2"""Semantic Scholar (Science)"""
3
4from json import dumps
5from datetime import datetime
6from lxml import html
7
8from flask_babel import gettext
9from searx.network import get
10from searx.utils import eval_xpath_getindex, html_to_text
11
12
13about = {
14 "website": 'https://www.semanticscholar.org/',
15 "wikidata_id": 'Q22908627',
16 "official_api_documentation": 'https://api.semanticscholar.org/',
17 "use_official_api": True,
18 "require_api_key": False,
19 "results": 'JSON',
20}
21
22categories = ['science', 'scientific publications']
23paging = True
24search_url = 'https://www.semanticscholar.org/api/1/search'
25base_url = 'https://www.semanticscholar.org'
26
27
29 resp = get(base_url)
30 if not resp.ok:
31 raise RuntimeError("Can't determine Semantic Scholar UI version")
32
33 doc = html.fromstring(resp.text)
34 ui_version = eval_xpath_getindex(doc, "//meta[@name='s2-ui-version']/@content", 0)
35 if not ui_version:
36 raise RuntimeError("Can't determine Semantic Scholar UI version")
37
38 return ui_version
39
40
41def request(query, params):
42 params['url'] = search_url
43 params['method'] = 'POST'
44 params['headers'].update(
45 {
46 'Content-Type': 'application/json',
47 'X-S2-UI-Version': _get_ui_version(),
48 'X-S2-Client': "webapp-browser",
49 }
50 )
51 params['data'] = dumps(
52 {
53 "queryString": query,
54 "page": params['pageno'],
55 "pageSize": 10,
56 "sort": "relevance",
57 "getQuerySuggestions": False,
58 "authors": [],
59 "coAuthors": [],
60 "venues": [],
61 "performTitleMatch": True,
62 }
63 )
64 return params
65
66
67def response(resp):
68 res = resp.json()
69
70 results = []
71 for result in res['results']:
72 url = result.get('primaryPaperLink', {}).get('url')
73 if not url and result.get('links'):
74 url = result.get('links')[0]
75 if not url:
76 alternatePaperLinks = result.get('alternatePaperLinks')
77 if alternatePaperLinks:
78 url = alternatePaperLinks[0].get('url')
79 if not url:
80 url = base_url + '/paper/%s' % result['id']
81
82 # publishedDate
83 if 'pubDate' in result:
84 publishedDate = datetime.strptime(result['pubDate'], "%Y-%m-%d")
85 else:
86 publishedDate = None
87
88 # authors
89 authors = [author[0]['name'] for author in result.get('authors', [])]
90
91 # pick for the first alternate link, but not from the crawler
92 pdf_url = None
93 for doc in result.get('alternatePaperLinks', []):
94 if doc['linkType'] not in ('crawler', 'doi'):
95 pdf_url = doc['url']
96 break
97
98 # comments
99 comments = None
100 if 'citationStats' in result:
101 comments = gettext(
102 '{numCitations} citations from the year {firstCitationVelocityYear} to {lastCitationVelocityYear}'
103 ).format(
104 numCitations=result['citationStats']['numCitations'],
105 firstCitationVelocityYear=result['citationStats']['firstCitationVelocityYear'],
106 lastCitationVelocityYear=result['citationStats']['lastCitationVelocityYear'],
107 )
108
109 results.append(
110 {
111 'template': 'paper.html',
112 'url': url,
113 'title': result['title']['text'],
114 'content': html_to_text(result['paperAbstract']['text']),
115 'journal': result.get('venue', {}).get('text') or result.get('journal', {}).get('name'),
116 'doi': result.get('doiInfo', {}).get('doi'),
117 'tags': result.get('fieldsOfStudy'),
118 'authors': authors,
119 'pdf_url': pdf_url,
120 'publishedDate': publishedDate,
121 'comments': comments,
122 }
123 )
124
125 return results