.oO SearXNG Developer Documentation Oo.
Loading...
Searching...
No Matches
yandex.py
Go to the documentation of this file.
1# SPDX-License-Identifier: AGPL-3.0-or-later
2"""Yandex (Web, images)"""
3
4from json import loads
5from urllib.parse import urlencode
6from html import unescape
7from lxml import html
8from searx.exceptions import SearxEngineCaptchaException
9from searx.utils import humanize_bytes, eval_xpath, eval_xpath_list, extract_text, extr
10
11
12# Engine metadata
13about = {
14 "website": 'https://yandex.com/',
15 "wikidata_id": 'Q5281',
16 "official_api_documentation": "?",
17 "use_official_api": False,
18 "require_api_key": False,
19 "results": 'HTML',
20}
21
22# Engine configuration
23categories = []
24paging = True
25search_type = ""
26
27# Search URL
28base_url_web = 'https://yandex.com/search/site/'
29base_url_images = 'https://yandex.com/images/search'
30
31results_xpath = '//li[contains(@class, "serp-item")]'
32url_xpath = './/a[@class="b-serp-item__title-link"]/@href'
33title_xpath = './/h3[@class="b-serp-item__title"]/a[@class="b-serp-item__title-link"]/span'
34content_xpath = './/div[@class="b-serp-item__content"]//div[@class="b-serp-item__text"]'
35
36
38 if resp.url.path.startswith('/showcaptcha'):
40
41
42def request(query, params):
43 query_params_web = {
44 "tmpl_version": "releases",
45 "text": query,
46 "web": "1",
47 "frame": "1",
48 "searchid": "3131712",
49 }
50
51 query_params_images = {
52 "text": query,
53 "uinfo": "sw-1920-sh-1080-ww-1125-wh-999",
54 }
55
56 if params['pageno'] > 1:
57 query_params_web.update({"p": params["pageno"] - 1})
58 query_params_images.update({"p": params["pageno"] - 1})
59
60 params["cookies"] = {'cookie': "yp=1716337604.sp.family%3A0#1685406411.szm.1:1920x1080:1920x999"}
61
62 if search_type == 'web':
63 params['url'] = f"{base_url_web}?{urlencode(query_params_web)}"
64 elif search_type == 'images':
65 params['url'] = f"{base_url_images}?{urlencode(query_params_images)}"
66
67 return params
68
69
70def response(resp):
71 if search_type == 'web':
72
74
75 dom = html.fromstring(resp.text)
76
77 results = []
78
79 for result in eval_xpath_list(dom, results_xpath):
80 results.append(
81 {
82 'url': extract_text(eval_xpath(result, url_xpath)),
83 'title': extract_text(eval_xpath(result, title_xpath)),
84 'content': extract_text(eval_xpath(result, content_xpath)),
85 }
86 )
87
88 return results
89
90 if search_type == 'images':
91
93
94 html_data = html.fromstring(resp.text)
95 html_sample = unescape(html.tostring(html_data, encoding='unicode'))
96
97 content_between_tags = extr(
98 html_sample, '{"location":"/images/search/', 'advRsyaSearchColumn":null}}', default="fail"
99 )
100 json_data = '{"location":"/images/search/' + content_between_tags + 'advRsyaSearchColumn":null}}'
101
102 if content_between_tags == "fail":
103 content_between_tags = extr(html_sample, '{"location":"/images/search/', 'false}}}')
104 json_data = '{"location":"/images/search/' + content_between_tags + 'false}}}'
105
106 json_resp = loads(json_data)
107
108 results = []
109 for _, item_data in json_resp['initialState']['serpList']['items']['entities'].items():
110 title = item_data['snippet']['title']
111 source = item_data['snippet']['url']
112 thumb = item_data['image']
113 fullsize_image = item_data['viewerData']['dups'][0]['url']
114 height = item_data['viewerData']['dups'][0]['h']
115 width = item_data['viewerData']['dups'][0]['w']
116 filesize = item_data['viewerData']['dups'][0]['fileSizeInBytes']
117 humanized_filesize = humanize_bytes(filesize)
118
119 results.append(
120 {
121 'title': title,
122 'url': source,
123 'img_src': fullsize_image,
124 'filesize': humanized_filesize,
125 'thumbnail_src': thumb,
126 'template': 'images.html',
127 'resolution': f'{width} x {height}',
128 }
129 )
130
131 return results
132
133 return []
request(query, params)
Definition yandex.py:42
catch_bad_response(resp)
Definition yandex.py:37