95from searx.utils import extr, extract_text, eval_xpath, gen_useragent, html_to_text, humanize_bytes, remove_pua_from_str
170 """Get an actual ``sc`` argument from Startpage's search form (HTML page).
172 Startpage puts a ``sc`` argument on every HTML :py:obj:`search form
173 <search_form_xpath>`. Without this argument Startpage considers the request
174 is from a bot. We do not know what is encoded in the value of the ``sc``
175 argument, but it seems to be a kind of a *time-stamp*.
177 Startpage's search form generates a new sc-code on each request. This
178 function scrap a new sc-code from Startpage's home page every
179 :py:obj:`sc_code_cache_sec` seconds.
183 global sc_code_ts, sc_code
185 if sc_code
and (time() < (sc_code_ts + sc_code_cache_sec)):
186 logger.debug(
"get_sc_code: reuse '%s'", sc_code)
189 headers = {**params[
'headers']}
190 headers[
'Origin'] = base_url
191 headers[
'Referer'] = base_url +
'/'
198 if searxng_locale ==
'all':
199 searxng_locale =
'en-US'
200 locale = babel.Locale.parse(searxng_locale, sep=
'-')
202 if send_accept_language_header:
203 ac_lang = locale.language
205 ac_lang =
"%s-%s,%s;q=0.9,*;q=0.5" % (
210 headers[
'Accept-Language'] = ac_lang
212 get_sc_url = base_url +
'/?sc=%s' % (sc_code)
213 logger.debug(
"query new sc time-stamp ... %s", get_sc_url)
214 logger.debug(
"headers: %s", headers)
215 resp = get(get_sc_url, headers=headers)
221 if str(resp.url).startswith(
'https://www.startpage.com/sp/captcha'):
223 message=
"get_sc_code: got redirected to https://www.startpage.com/sp/captcha",
226 dom = lxml.html.fromstring(resp.text)
229 sc_code = eval_xpath(dom, search_form_xpath +
'//input[@name="sc"]/@value')[0]
230 except IndexError
as exc:
231 logger.debug(
"suspend startpage API --> https://github.com/searxng/searxng/pull/695")
233 message=
"get_sc_code: [PR-695] query new sc time-stamp failed! (%s)" % resp.url,
237 logger.debug(
"get_sc_code: new value is: %s", sc_code)
313 published_date =
None
316 if re.match(
r"^([1-9]|[1-2][0-9]|3[0-1]) [A-Z][a-z]{2} [0-9]{4} \.\.\. ", content):
317 date_pos = content.find(
'...') + 4
318 date_string = content[0 : date_pos - 5]
320 content = content[date_pos:]
323 published_date = dateutil.parser.parse(date_string, dayfirst=
True)
328 elif re.match(
r"^[0-9]+ days? ago \.\.\. ", content):
329 date_pos = content.find(
'...') + 4
330 date_string = content[0 : date_pos - 5]
333 published_date = datetime.now() - timedelta(days=int(re.match(
r'\d+', date_string).group()))
336 content = content[date_pos:]
338 return content, published_date
376 url = result.get(
'altClickUrl')
381 if result.get(
'thumbnailUrl'):
382 thumbnailUrl = base_url + result[
'thumbnailUrl']
385 if result.get(
'width')
and result.get(
'height'):
386 resolution = f
"{result['width']}x{result['height']}"
389 if result.get(
'filesize'):
390 size_str =
''.join(filter(str.isdigit, result[
'filesize']))
391 filesize = humanize_bytes(int(size_str))
394 'template':
'images.html',
396 'title': html_to_text(result[
'title']),
398 'img_src': result.get(
'rawImageUrl'),
399 'thumbnail_src': thumbnailUrl,
400 'resolution': resolution,
401 'img_format': result.get(
'format'),
402 'filesize': filesize,
428 """Fetch :ref:`languages <startpage languages>` and :ref:`regions <startpage
429 regions>` from Startpage."""
433 'User-Agent': gen_useragent(),
434 'Accept-Language':
"en-US,en;q=0.5",
436 resp = get(
'https://www.startpage.com/do/settings', headers=headers)
439 print(
"ERROR: response from Startpage is not OK.")
441 dom = lxml.html.fromstring(resp.text)
446 for option
in dom.xpath(
'//form[@name="settings"]//select[@name="search_results_region"]/option'):
447 sp_region_names.append(option.get(
'value'))
449 for eng_tag
in sp_region_names:
452 babel_region_tag = {
'no_NO':
'nb_NO'}.get(eng_tag, eng_tag)
454 if '-' in babel_region_tag:
455 l, r = babel_region_tag.split(
'-')
457 sxng_tag = region_tag(babel.Locale.parse(l +
'_' + r, sep=
'_'))
461 sxng_tag = region_tag(babel.Locale.parse(babel_region_tag, sep=
'_'))
463 except babel.UnknownLocaleError:
464 print(
"ERROR: can't determine babel locale of startpage's locale %s" % eng_tag)
467 conflict = engine_traits.regions.get(sxng_tag)
469 if conflict != eng_tag:
470 print(
"CONFLICT: babel %s --> %s, %s" % (sxng_tag, conflict, eng_tag))
472 engine_traits.regions[sxng_tag] = eng_tag
476 catalog_engine2code = {name.lower(): lang_code
for lang_code, name
in babel.Locale(
'en').languages.items()}
480 for lang_code
in filter(
lambda lang_code: lang_code.find(
'_') == -1, babel.localedata.locale_identifiers()):
481 native_name = babel.Locale(lang_code).get_language_name()
483 print(f
"ERROR: language name of startpage's language {lang_code} is unknown by babel")
485 native_name = native_name.lower()
487 catalog_engine2code[native_name] = lang_code
490 unaccented_name =
''.join(filter(
lambda c:
not combining(c), normalize(
'NFKD', native_name)))
491 if len(unaccented_name) == len(unaccented_name.encode()):
493 catalog_engine2code[unaccented_name] = lang_code
497 catalog_engine2code.update(
500 'fantizhengwen':
'zh_Hant',
514 for option
in dom.xpath(
'//form[@name="settings"]//select[@name="language"]/option'):
516 eng_tag = option.get(
'value')
517 if eng_tag
in skip_eng_tags:
519 name = extract_text(option).lower()
521 sxng_tag = catalog_engine2code.get(eng_tag)
523 sxng_tag = catalog_engine2code[name]
525 conflict = engine_traits.languages.get(sxng_tag)
527 if conflict != eng_tag:
528 print(
"CONFLICT: babel %s --> %s, %s" % (sxng_tag, conflict, eng_tag))
530 engine_traits.languages[sxng_tag] = eng_tag
get_sc_code(searxng_locale, params)
dict[str, Any]|None _get_image_result(result)
fetch_traits(EngineTraits engine_traits)
tuple[str, datetime|None] _parse_published_date(str content)