.oO SearXNG Developer Documentation Oo.
Loading...
Searching...
No Matches
google_videos.py
Go to the documentation of this file.
1# SPDX-License-Identifier: AGPL-3.0-or-later
2"""This is the implementation of the Google Videos engine.
3
4.. admonition:: Content-Security-Policy (CSP)
5
6 This engine needs to allow images from the `data URLs`_ (prefixed with the
7 ``data:`` scheme)::
8
9 Header set Content-Security-Policy "img-src 'self' data: ;"
10
11.. _data URLs:
12 https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URIs
13
14"""
15
16from typing import TYPE_CHECKING
17
18from urllib.parse import urlencode
19from lxml import html
20
21from searx.utils import (
22 eval_xpath,
23 eval_xpath_list,
24 eval_xpath_getindex,
25 extract_text,
26)
27
28from searx.engines.google import fetch_traits # pylint: disable=unused-import
29from searx.engines.google import (
30 get_google_info,
31 time_range_dict,
32 filter_mapping,
33 suggestion_xpath,
34 detect_google_sorry,
35)
36from searx.enginelib.traits import EngineTraits
37from searx.utils import get_embeded_stream_url
38
39if TYPE_CHECKING:
40 import logging
41
42 logger: logging.Logger
43
44traits: EngineTraits
45
46# about
47about = {
48 "website": 'https://www.google.com',
49 "wikidata_id": 'Q219885',
50 "official_api_documentation": 'https://developers.google.com/custom-search',
51 "use_official_api": False,
52 "require_api_key": False,
53 "results": 'HTML',
54}
55
56# engine dependent config
57
58categories = ['videos', 'web']
59paging = True
60max_page = 50
61language_support = True
62time_range_support = True
63safesearch = True
64
65
66def request(query, params):
67 """Google-Video search request"""
68
69 google_info = get_google_info(params, traits)
70
71 query_url = (
72 'https://'
73 + google_info['subdomain']
74 + '/search'
75 + "?"
76 + urlencode(
77 {
78 'q': query,
79 'tbm': "vid",
80 'start': 10 * params['pageno'],
81 **google_info['params'],
82 'asearch': 'arc',
83 'async': 'use_ac:true,_fmt:html',
84 }
85 )
86 )
87
88 if params['time_range'] in time_range_dict:
89 query_url += '&' + urlencode({'tbs': 'qdr:' + time_range_dict[params['time_range']]})
90 if 'safesearch' in params:
91 query_url += '&' + urlencode({'safe': filter_mapping[params['safesearch']]})
92 params['url'] = query_url
93
94 params['cookies'] = google_info['cookies']
95 params['headers'].update(google_info['headers'])
96 return params
97
98
99def response(resp):
100 """Get response from google's search request"""
101 results = []
102
103 detect_google_sorry(resp)
104
105 # convert the text to dom
106 dom = html.fromstring(resp.text)
107
108 # parse results
109 for result in eval_xpath_list(dom, '//div[contains(@class, "g ")]'):
110
111 thumbnail = eval_xpath_getindex(result, './/img/@src', 0, None)
112 if thumbnail is None:
113 continue
114
115 title = extract_text(eval_xpath_getindex(result, './/a/h3[1]', 0))
116 url = eval_xpath_getindex(result, './/a/h3[1]/../@href', 0)
117
118 c_node = eval_xpath_getindex(result, './/div[@class="ITZIwc"]', 0)
119 content = extract_text(c_node)
120 pub_info = extract_text(eval_xpath(result, './/div[@class="gqF9jc"]'))
121
122 results.append(
123 {
124 'url': url,
125 'title': title,
126 'content': content,
127 'author': pub_info,
128 'thumbnail': thumbnail,
129 'iframe_src': get_embeded_stream_url(url),
130 'template': 'videos.html',
131 }
132 )
133
134 # parse suggestion
135 for suggestion in eval_xpath_list(dom, suggestion_xpath):
136 # append suggestion
137 results.append({'suggestion': extract_text(suggestion)})
138
139 return results