2021-01-13 11:31:25 +01:00
|
|
|
# SPDX-License-Identifier: AGPL-3.0-or-later
|
2022-01-05 13:08:56 +01:00
|
|
|
# lint: pylint
|
2022-10-03 17:21:13 +02:00
|
|
|
"""Startpage's language & region selectors are a mess ..
|
|
|
|
|
|
|
|
.. _startpage regions:
|
|
|
|
|
|
|
|
Startpage regions
|
|
|
|
=================
|
|
|
|
|
|
|
|
In the list of regions there are tags we need to map to common region tags::
|
|
|
|
|
|
|
|
pt-BR_BR --> pt_BR
|
|
|
|
zh-CN_CN --> zh_Hans_CN
|
|
|
|
zh-TW_TW --> zh_Hant_TW
|
|
|
|
zh-TW_HK --> zh_Hant_HK
|
|
|
|
en-GB_GB --> en_GB
|
|
|
|
|
|
|
|
and there is at least one tag with a three letter language tag (ISO 639-2)::
|
|
|
|
|
|
|
|
fil_PH --> fil_PH
|
|
|
|
|
|
|
|
The locale code ``no_NO`` from Startpage does not exists and is mapped to
|
|
|
|
``nb-NO``::
|
|
|
|
|
|
|
|
babel.core.UnknownLocaleError: unknown locale 'no_NO'
|
|
|
|
|
|
|
|
For reference see languages-subtag at iana; ``no`` is the macrolanguage [1]_ and
|
|
|
|
W3C recommends subtag over macrolanguage [2]_.
|
|
|
|
|
|
|
|
.. [1] `iana: language-subtag-registry
|
|
|
|
<https://www.iana.org/assignments/language-subtag-registry/language-subtag-registry>`_ ::
|
|
|
|
|
|
|
|
type: language
|
|
|
|
Subtag: nb
|
|
|
|
Description: Norwegian Bokmål
|
|
|
|
Added: 2005-10-16
|
|
|
|
Suppress-Script: Latn
|
|
|
|
Macrolanguage: no
|
|
|
|
|
|
|
|
.. [2]
|
|
|
|
Use macrolanguages with care. Some language subtags have a Scope field set to
|
|
|
|
macrolanguage, i.e. this primary language subtag encompasses a number of more
|
|
|
|
specific primary language subtags in the registry. ... As we recommended for
|
|
|
|
the collection subtags mentioned above, in most cases you should try to use
|
|
|
|
the more specific subtags ... `W3: The primary language subtag
|
|
|
|
<https://www.w3.org/International/questions/qa-choosing-language-tags#langsubtag>`_
|
|
|
|
|
|
|
|
.. _startpage languages:
|
|
|
|
|
|
|
|
Startpage languages
|
|
|
|
===================
|
|
|
|
|
2022-10-30 11:23:20 +01:00
|
|
|
:py:obj:`send_accept_language_header`:
|
|
|
|
The displayed name in Startpage's settings page depend on the location of the
|
|
|
|
IP when ``Accept-Language`` HTTP header is unset. In :py:obj:`fetch_traits`
|
|
|
|
we use::
|
2022-10-03 17:21:13 +02:00
|
|
|
|
2022-10-30 11:23:20 +01:00
|
|
|
'Accept-Language': "en-US,en;q=0.5",
|
|
|
|
..
|
|
|
|
|
|
|
|
to get uniform names independent from the IP).
|
|
|
|
|
|
|
|
.. _startpage categories:
|
|
|
|
|
|
|
|
Startpage categories
|
|
|
|
====================
|
|
|
|
|
|
|
|
Startpage's category (for Web-search, News, Videos, ..) is set by
|
|
|
|
:py:obj:`startpage_categ` in settings.yml::
|
|
|
|
|
|
|
|
- name: startpage
|
|
|
|
engine: startpage
|
|
|
|
startpage_categ: web
|
|
|
|
...
|
|
|
|
|
|
|
|
.. hint::
|
|
|
|
|
|
|
|
The default category is ``web`` .. and other categories than ``web`` are not
|
|
|
|
yet implemented.
|
2022-01-05 13:08:56 +01:00
|
|
|
|
2021-01-13 11:31:25 +01:00
|
|
|
"""
|
2022-01-05 13:08:56 +01:00
|
|
|
|
2022-10-30 11:23:20 +01:00
|
|
|
from typing import TYPE_CHECKING
|
|
|
|
from collections import OrderedDict
|
2022-01-05 13:08:56 +01:00
|
|
|
import re
|
|
|
|
from unicodedata import normalize, combining
|
2022-10-30 11:23:20 +01:00
|
|
|
from time import time
|
2022-01-05 13:08:56 +01:00
|
|
|
from datetime import datetime, timedelta
|
2022-01-05 13:00:52 +01:00
|
|
|
|
2022-10-30 11:23:20 +01:00
|
|
|
import dateutil.parser
|
|
|
|
import lxml.html
|
|
|
|
import babel
|
2022-01-05 13:08:56 +01:00
|
|
|
|
2022-10-30 11:23:20 +01:00
|
|
|
from searx.utils import extract_text, eval_xpath, gen_useragent
|
2023-06-25 12:37:31 +02:00
|
|
|
from searx.network import get # see https://github.com/searxng/searxng/issues/762
|
2022-10-30 11:23:20 +01:00
|
|
|
from searx.exceptions import SearxEngineCaptchaException
|
|
|
|
from searx.locales import region_tag
|
2022-10-03 17:21:13 +02:00
|
|
|
from searx.enginelib.traits import EngineTraits
|
|
|
|
|
2022-10-30 11:23:20 +01:00
|
|
|
if TYPE_CHECKING:
|
|
|
|
import logging
|
|
|
|
|
|
|
|
logger: logging.Logger
|
|
|
|
|
2022-10-03 17:21:13 +02:00
|
|
|
traits: EngineTraits
|
2014-09-02 19:57:01 +02:00
|
|
|
|
2021-01-13 11:31:25 +01:00
|
|
|
# about
|
|
|
|
about = {
|
|
|
|
"website": 'https://startpage.com',
|
|
|
|
"wikidata_id": 'Q2333295',
|
|
|
|
"official_api_documentation": None,
|
|
|
|
"use_official_api": False,
|
|
|
|
"require_api_key": False,
|
|
|
|
"results": 'HTML',
|
|
|
|
}
|
|
|
|
|
2022-10-30 11:23:20 +01:00
|
|
|
startpage_categ = 'web'
|
|
|
|
"""Startpage's category, visit :ref:`startpage categories`.
|
|
|
|
"""
|
|
|
|
|
|
|
|
send_accept_language_header = True
|
|
|
|
"""Startpage tries to guess user's language and territory from the HTTP
|
|
|
|
``Accept-Language``. Optional the user can select a search-language (can be
|
|
|
|
different to the UI language) and a region filter.
|
|
|
|
"""
|
|
|
|
|
2014-09-02 19:57:01 +02:00
|
|
|
# engine dependent config
|
2021-12-22 16:58:52 +01:00
|
|
|
categories = ['general', 'web']
|
2019-10-14 14:18:41 +02:00
|
|
|
paging = True
|
2023-11-14 08:25:06 +01:00
|
|
|
max_page = 18
|
|
|
|
"""Tested 18 pages maximum (argument ``page``), to be save max is set to 20."""
|
|
|
|
|
2022-10-30 11:23:20 +01:00
|
|
|
time_range_support = True
|
|
|
|
safesearch = True
|
|
|
|
|
|
|
|
time_range_dict = {'day': 'd', 'week': 'w', 'month': 'm', 'year': 'y'}
|
|
|
|
safesearch_dict = {0: '0', 1: '1', 2: '1'}
|
2013-10-19 18:29:39 +02:00
|
|
|
|
2014-09-02 19:57:01 +02:00
|
|
|
# search-url
|
2022-10-30 11:23:20 +01:00
|
|
|
base_url = 'https://www.startpage.com'
|
|
|
|
search_url = base_url + '/sp/search'
|
2013-10-19 18:29:39 +02:00
|
|
|
|
2014-09-02 19:57:01 +02:00
|
|
|
# specific xpath variables
|
|
|
|
# ads xpath //div[@id="results"]/div[@id="sponsored"]//div[@class="result"]
|
|
|
|
# not ads: div[@class="result"] are the direct childs of div[@id="results"]
|
2020-12-13 15:43:50 +01:00
|
|
|
results_xpath = '//div[@class="w-gl__result__main"]'
|
2020-12-16 22:54:14 +01:00
|
|
|
link_xpath = './/a[@class="w-gl__result-title result-link"]'
|
2019-10-14 14:18:41 +02:00
|
|
|
content_xpath = './/p[@class="w-gl__description"]'
|
2022-10-30 11:23:20 +01:00
|
|
|
search_form_xpath = '//form[@id="search"]'
|
|
|
|
"""XPath of Startpage's origin search form
|
|
|
|
|
|
|
|
.. code: html
|
|
|
|
|
|
|
|
<form action="/sp/search" method="post">
|
|
|
|
<input type="text" name="query" value="" ..>
|
|
|
|
<input type="hidden" name="t" value="device">
|
|
|
|
<input type="hidden" name="lui" value="english">
|
|
|
|
<input type="hidden" name="sc" value="Q7Mt5TRqowKB00">
|
|
|
|
<input type="hidden" name="cat" value="web">
|
|
|
|
<input type="hidden" class="abp" id="abp-input" name="abp" value="1">
|
|
|
|
</form>
|
|
|
|
"""
|
2014-01-30 02:10:32 +01:00
|
|
|
|
2022-01-06 18:29:04 +01:00
|
|
|
# timestamp of the last fetch of 'sc' code
|
|
|
|
sc_code_ts = 0
|
|
|
|
sc_code = ''
|
2022-10-30 11:23:20 +01:00
|
|
|
sc_code_cache_sec = 30
|
|
|
|
"""Time in seconds the sc-code is cached in memory :py:obj:`get_sc_code`."""
|
2022-01-06 18:29:04 +01:00
|
|
|
|
|
|
|
|
2022-10-30 11:23:20 +01:00
|
|
|
def get_sc_code(searxng_locale, params):
|
|
|
|
"""Get an actual ``sc`` argument from Startpage's search form (HTML page).
|
2022-01-09 16:05:25 +01:00
|
|
|
|
2022-10-30 11:23:20 +01:00
|
|
|
Startpage puts a ``sc`` argument on every HTML :py:obj:`search form
|
|
|
|
<search_form_xpath>`. Without this argument Startpage considers the request
|
|
|
|
is from a bot. We do not know what is encoded in the value of the ``sc``
|
|
|
|
argument, but it seems to be a kind of a *time-stamp*.
|
2022-01-09 16:05:25 +01:00
|
|
|
|
2022-10-30 11:23:20 +01:00
|
|
|
Startpage's search form generates a new sc-code on each request. This
|
|
|
|
function scrap a new sc-code from Startpage's home page every
|
|
|
|
:py:obj:`sc_code_cache_sec` seconds.
|
2022-01-09 16:05:25 +01:00
|
|
|
|
2022-10-30 11:23:20 +01:00
|
|
|
"""
|
2022-01-06 18:29:04 +01:00
|
|
|
|
2022-10-30 11:23:20 +01:00
|
|
|
global sc_code_ts, sc_code # pylint: disable=global-statement
|
2022-01-06 18:29:04 +01:00
|
|
|
|
2022-10-30 11:23:20 +01:00
|
|
|
if sc_code and (time() < (sc_code_ts + sc_code_cache_sec)):
|
|
|
|
logger.debug("get_sc_code: reuse '%s'", sc_code)
|
|
|
|
return sc_code
|
|
|
|
|
|
|
|
headers = {**params['headers']}
|
|
|
|
headers['Origin'] = base_url
|
|
|
|
headers['Referer'] = base_url + '/'
|
|
|
|
# headers['Connection'] = 'keep-alive'
|
|
|
|
# headers['Accept-Encoding'] = 'gzip, deflate, br'
|
|
|
|
# headers['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8'
|
|
|
|
# headers['User-Agent'] = 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:105.0) Gecko/20100101 Firefox/105.0'
|
|
|
|
|
|
|
|
# add Accept-Language header
|
|
|
|
if searxng_locale == 'all':
|
|
|
|
searxng_locale = 'en-US'
|
|
|
|
locale = babel.Locale.parse(searxng_locale, sep='-')
|
|
|
|
|
|
|
|
if send_accept_language_header:
|
|
|
|
ac_lang = locale.language
|
|
|
|
if locale.territory:
|
|
|
|
ac_lang = "%s-%s,%s;q=0.9,*;q=0.5" % (
|
|
|
|
locale.language,
|
|
|
|
locale.territory,
|
|
|
|
locale.language,
|
|
|
|
)
|
|
|
|
headers['Accept-Language'] = ac_lang
|
|
|
|
|
|
|
|
get_sc_url = base_url + '/?sc=%s' % (sc_code)
|
|
|
|
logger.debug("query new sc time-stamp ... %s", get_sc_url)
|
|
|
|
logger.debug("headers: %s", headers)
|
2023-06-25 12:37:31 +02:00
|
|
|
resp = get(get_sc_url, headers=headers)
|
2022-10-30 11:23:20 +01:00
|
|
|
|
|
|
|
# ?? x = network.get('https://www.startpage.com/sp/cdn/images/filter-chevron.svg', headers=headers)
|
|
|
|
# ?? https://www.startpage.com/sp/cdn/images/filter-chevron.svg
|
|
|
|
# ?? ping-back URL: https://www.startpage.com/sp/pb?sc=TLsB0oITjZ8F21
|
2022-01-06 18:29:04 +01:00
|
|
|
|
2023-06-25 12:37:31 +02:00
|
|
|
if str(resp.url).startswith('https://www.startpage.com/sp/captcha'): # type: ignore
|
2022-10-30 11:23:20 +01:00
|
|
|
raise SearxEngineCaptchaException(
|
|
|
|
message="get_sc_code: got redirected to https://www.startpage.com/sp/captcha",
|
|
|
|
)
|
|
|
|
|
2023-06-25 12:37:31 +02:00
|
|
|
dom = lxml.html.fromstring(resp.text) # type: ignore
|
2022-10-30 11:23:20 +01:00
|
|
|
|
|
|
|
try:
|
|
|
|
sc_code = eval_xpath(dom, search_form_xpath + '//input[@name="sc"]/@value')[0]
|
|
|
|
except IndexError as exc:
|
|
|
|
logger.debug("suspend startpage API --> https://github.com/searxng/searxng/pull/695")
|
|
|
|
raise SearxEngineCaptchaException(
|
2023-06-25 12:37:31 +02:00
|
|
|
message="get_sc_code: [PR-695] query new sc time-stamp failed! (%s)" % resp.url, # type: ignore
|
2022-10-30 11:23:20 +01:00
|
|
|
) from exc
|
|
|
|
|
|
|
|
sc_code_ts = time()
|
|
|
|
logger.debug("get_sc_code: new value is: %s", sc_code)
|
|
|
|
return sc_code
|
2022-01-06 18:29:04 +01:00
|
|
|
|
|
|
|
|
2022-10-30 11:23:20 +01:00
|
|
|
def request(query, params):
|
|
|
|
"""Assemble a Startpage request.
|
2022-01-06 18:29:04 +01:00
|
|
|
|
2022-10-30 11:23:20 +01:00
|
|
|
To avoid CAPTCHA we need to send a well formed HTTP POST request with a
|
|
|
|
cookie. We need to form a request that is identical to the request build by
|
|
|
|
Startpage's search form:
|
2022-01-06 18:29:04 +01:00
|
|
|
|
2022-10-30 11:23:20 +01:00
|
|
|
- in the cookie the **region** is selected
|
|
|
|
- in the HTTP POST data the **language** is selected
|
2022-01-06 18:29:04 +01:00
|
|
|
|
2022-10-30 11:23:20 +01:00
|
|
|
Additionally the arguments form Startpage's search form needs to be set in
|
|
|
|
HTML POST data / compare ``<input>`` elements: :py:obj:`search_form_xpath`.
|
|
|
|
"""
|
|
|
|
if startpage_categ == 'web':
|
|
|
|
return _request_cat_web(query, params)
|
2022-01-06 18:29:04 +01:00
|
|
|
|
2022-10-30 11:23:20 +01:00
|
|
|
logger.error("Startpages's category '%' is not yet implemented.", startpage_categ)
|
|
|
|
return params
|
2022-01-06 18:29:04 +01:00
|
|
|
|
2014-01-24 09:35:27 +01:00
|
|
|
|
2022-10-30 11:23:20 +01:00
|
|
|
def _request_cat_web(query, params):
|
2014-09-02 19:57:01 +02:00
|
|
|
|
2022-10-30 11:23:20 +01:00
|
|
|
engine_region = traits.get_region(params['searxng_locale'], 'en-US')
|
|
|
|
engine_language = traits.get_language(params['searxng_locale'], 'en')
|
2022-01-09 16:11:22 +01:00
|
|
|
|
2022-10-30 11:23:20 +01:00
|
|
|
# build arguments
|
2022-01-05 13:00:52 +01:00
|
|
|
args = {
|
2019-10-14 14:18:41 +02:00
|
|
|
'query': query,
|
|
|
|
'cat': 'web',
|
2022-10-30 11:23:20 +01:00
|
|
|
't': 'device',
|
|
|
|
'sc': get_sc_code(params['searxng_locale'], params), # hint: this func needs HTTP headers,
|
|
|
|
'with_date': time_range_dict.get(params['time_range'], ''),
|
2019-10-14 14:18:41 +02:00
|
|
|
}
|
|
|
|
|
2022-10-30 11:23:20 +01:00
|
|
|
if engine_language:
|
|
|
|
args['language'] = engine_language
|
|
|
|
args['lui'] = engine_language
|
|
|
|
|
|
|
|
args['abp'] = '1'
|
|
|
|
if params['pageno'] > 1:
|
|
|
|
args['page'] = params['pageno']
|
|
|
|
|
|
|
|
# build cookie
|
|
|
|
lang_homepage = 'en'
|
|
|
|
cookie = OrderedDict()
|
|
|
|
cookie['date_time'] = 'world'
|
|
|
|
cookie['disable_family_filter'] = safesearch_dict[params['safesearch']]
|
|
|
|
cookie['disable_open_in_new_window'] = '0'
|
|
|
|
cookie['enable_post_method'] = '1' # hint: POST
|
|
|
|
cookie['enable_proxy_safety_suggest'] = '1'
|
|
|
|
cookie['enable_stay_control'] = '1'
|
|
|
|
cookie['instant_answers'] = '1'
|
|
|
|
cookie['lang_homepage'] = 's/device/%s/' % lang_homepage
|
|
|
|
cookie['num_of_results'] = '10'
|
|
|
|
cookie['suggestions'] = '1'
|
|
|
|
cookie['wt_unit'] = 'celsius'
|
|
|
|
|
|
|
|
if engine_language:
|
|
|
|
cookie['language'] = engine_language
|
|
|
|
cookie['language_ui'] = engine_language
|
|
|
|
|
|
|
|
if engine_region:
|
|
|
|
cookie['search_results_region'] = engine_region
|
|
|
|
|
|
|
|
params['cookies']['preferences'] = 'N1N'.join(["%sEEE%s" % x for x in cookie.items()])
|
|
|
|
logger.debug('cookie preferences: %s', params['cookies']['preferences'])
|
|
|
|
|
|
|
|
# POST request
|
|
|
|
logger.debug("data: %s", args)
|
|
|
|
params['data'] = args
|
|
|
|
params['method'] = 'POST'
|
|
|
|
params['url'] = search_url
|
|
|
|
params['headers']['Origin'] = base_url
|
|
|
|
params['headers']['Referer'] = base_url + '/'
|
|
|
|
# is the Accept header needed?
|
|
|
|
# params['headers']['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
|
2014-09-02 19:57:01 +02:00
|
|
|
|
2013-10-19 18:29:39 +02:00
|
|
|
return params
|
|
|
|
|
|
|
|
|
2014-09-02 19:57:01 +02:00
|
|
|
# get response from search-request
|
2013-10-19 18:29:39 +02:00
|
|
|
def response(resp):
|
2022-10-30 11:23:20 +01:00
|
|
|
dom = lxml.html.fromstring(resp.text)
|
|
|
|
|
|
|
|
if startpage_categ == 'web':
|
|
|
|
return _response_cat_web(dom)
|
2014-09-02 19:57:01 +02:00
|
|
|
|
2022-10-30 11:23:20 +01:00
|
|
|
logger.error("Startpages's category '%' is not yet implemented.", startpage_categ)
|
|
|
|
return []
|
|
|
|
|
|
|
|
|
|
|
|
def _response_cat_web(dom):
|
|
|
|
results = []
|
2014-11-17 10:19:23 +01:00
|
|
|
|
2014-09-02 19:57:01 +02:00
|
|
|
# parse results
|
2019-11-15 09:31:37 +01:00
|
|
|
for result in eval_xpath(dom, results_xpath):
|
|
|
|
links = eval_xpath(result, link_xpath)
|
2014-11-17 10:19:23 +01:00
|
|
|
if not links:
|
|
|
|
continue
|
|
|
|
link = links[0]
|
2013-10-24 23:43:39 +02:00
|
|
|
url = link.attrib.get('href')
|
2014-01-19 21:20:07 +01:00
|
|
|
|
2014-09-02 19:57:01 +02:00
|
|
|
# block google-ad url's
|
2016-07-11 15:29:47 +02:00
|
|
|
if re.match(r"^http(s|)://(www\.)?google\.[a-z]+/aclk.*$", url):
|
2015-08-24 11:28:55 +02:00
|
|
|
continue
|
|
|
|
|
|
|
|
# block startpage search url's
|
2016-07-11 15:29:47 +02:00
|
|
|
if re.match(r"^http(s|)://(www\.)?startpage\.com/do/search\?.*$", url):
|
2014-09-02 19:57:01 +02:00
|
|
|
continue
|
|
|
|
|
2016-12-09 11:44:24 +01:00
|
|
|
title = extract_text(link)
|
2015-02-06 17:31:10 +01:00
|
|
|
|
2019-11-15 09:31:37 +01:00
|
|
|
if eval_xpath(result, content_xpath):
|
2023-06-25 12:37:31 +02:00
|
|
|
content: str = extract_text(eval_xpath(result, content_xpath)) # type: ignore
|
2014-09-02 19:57:01 +02:00
|
|
|
else:
|
|
|
|
content = ''
|
2014-01-24 09:35:27 +01:00
|
|
|
|
2015-10-24 16:15:30 +02:00
|
|
|
published_date = None
|
|
|
|
|
|
|
|
# check if search result starts with something like: "2 Sep 2014 ... "
|
2016-07-11 15:29:47 +02:00
|
|
|
if re.match(r"^([1-9]|[1-2][0-9]|3[0-1]) [A-Z][a-z]{2} [0-9]{4} \.\.\. ", content):
|
2016-01-18 12:47:31 +01:00
|
|
|
date_pos = content.find('...') + 4
|
2021-12-27 09:26:22 +01:00
|
|
|
date_string = content[0 : date_pos - 5]
|
2015-10-24 16:15:30 +02:00
|
|
|
# fix content string
|
|
|
|
content = content[date_pos:]
|
|
|
|
|
[Fix] Startpage ValueError on Spanish date format
datetime.parser.parse() does not know the Spanish date format which
leads to a ValueError. Fixes #1870
Traceback (most recent call last):
File "/usr/local/searx/searx/search.py", line 160, in search_one_http_request_safe
search_results = search_one_http_request(engine, query, request_params)
File "/usr/local/searx/searx/search.py", line 97, in search_one_http_request
return engine.response(response)
File "/usr/local/searx/searx/engines/startpage.py", line 102, in response
published_date = parser.parse(date_string, dayfirst=True)
File "/usr/local/searx/searx-ve/lib/python3.6/site-packages/dateutil/parser/_parser.py", line 1358, in parse
return DEFAULTPARSER.parse(timestr, **kwargs)
File "/usr/local/searx/searx-ve/lib/python3.6/site-packages/dateutil/parser/_parser.py", line 649, in parse
raise ValueError("Unknown string format:", timestr)
ValueError: ('Unknown string format:', '24 Ene 2013')
2020-03-02 18:55:48 +01:00
|
|
|
try:
|
2022-10-30 11:23:20 +01:00
|
|
|
published_date = dateutil.parser.parse(date_string, dayfirst=True)
|
[Fix] Startpage ValueError on Spanish date format
datetime.parser.parse() does not know the Spanish date format which
leads to a ValueError. Fixes #1870
Traceback (most recent call last):
File "/usr/local/searx/searx/search.py", line 160, in search_one_http_request_safe
search_results = search_one_http_request(engine, query, request_params)
File "/usr/local/searx/searx/search.py", line 97, in search_one_http_request
return engine.response(response)
File "/usr/local/searx/searx/engines/startpage.py", line 102, in response
published_date = parser.parse(date_string, dayfirst=True)
File "/usr/local/searx/searx-ve/lib/python3.6/site-packages/dateutil/parser/_parser.py", line 1358, in parse
return DEFAULTPARSER.parse(timestr, **kwargs)
File "/usr/local/searx/searx-ve/lib/python3.6/site-packages/dateutil/parser/_parser.py", line 649, in parse
raise ValueError("Unknown string format:", timestr)
ValueError: ('Unknown string format:', '24 Ene 2013')
2020-03-02 18:55:48 +01:00
|
|
|
except ValueError:
|
|
|
|
pass
|
|
|
|
|
2015-10-24 16:15:30 +02:00
|
|
|
# check if search result starts with something like: "5 days ago ... "
|
2016-07-11 15:29:47 +02:00
|
|
|
elif re.match(r"^[0-9]+ days? ago \.\.\. ", content):
|
2016-01-18 12:47:31 +01:00
|
|
|
date_pos = content.find('...') + 4
|
2021-12-27 09:26:22 +01:00
|
|
|
date_string = content[0 : date_pos - 5]
|
2015-10-24 16:15:30 +02:00
|
|
|
|
|
|
|
# calculate datetime
|
2023-06-25 12:37:31 +02:00
|
|
|
published_date = datetime.now() - timedelta(days=int(re.match(r'\d+', date_string).group())) # type: ignore
|
2015-10-24 16:15:30 +02:00
|
|
|
|
|
|
|
# fix content string
|
|
|
|
content = content[date_pos:]
|
|
|
|
|
|
|
|
if published_date:
|
|
|
|
# append result
|
2021-12-27 09:26:22 +01:00
|
|
|
results.append({'url': url, 'title': title, 'content': content, 'publishedDate': published_date})
|
2015-10-24 16:15:30 +02:00
|
|
|
else:
|
|
|
|
# append result
|
2021-12-27 09:26:22 +01:00
|
|
|
results.append({'url': url, 'title': title, 'content': content})
|
2014-01-24 09:35:27 +01:00
|
|
|
|
2014-09-02 19:57:01 +02:00
|
|
|
# return results
|
2013-10-19 18:29:39 +02:00
|
|
|
return results
|
2020-09-14 09:06:58 +02:00
|
|
|
|
|
|
|
|
2022-10-03 17:21:13 +02:00
|
|
|
def fetch_traits(engine_traits: EngineTraits):
|
|
|
|
"""Fetch :ref:`languages <startpage languages>` and :ref:`regions <startpage
|
|
|
|
regions>` from Startpage."""
|
2022-10-30 11:23:20 +01:00
|
|
|
# pylint: disable=too-many-branches
|
2022-10-03 17:21:13 +02:00
|
|
|
|
|
|
|
headers = {
|
|
|
|
'User-Agent': gen_useragent(),
|
|
|
|
'Accept-Language': "en-US,en;q=0.5", # bing needs to set the English language
|
|
|
|
}
|
2023-06-25 12:37:31 +02:00
|
|
|
resp = get('https://www.startpage.com/do/settings', headers=headers)
|
2022-10-03 17:21:13 +02:00
|
|
|
|
2023-06-25 12:37:31 +02:00
|
|
|
if not resp.ok: # type: ignore
|
2022-10-03 17:21:13 +02:00
|
|
|
print("ERROR: response from Startpage is not OK.")
|
|
|
|
|
2023-06-25 12:37:31 +02:00
|
|
|
dom = lxml.html.fromstring(resp.text) # type: ignore
|
2022-10-03 17:21:13 +02:00
|
|
|
|
|
|
|
# regions
|
|
|
|
|
|
|
|
sp_region_names = []
|
|
|
|
for option in dom.xpath('//form[@name="settings"]//select[@name="search_results_region"]/option'):
|
|
|
|
sp_region_names.append(option.get('value'))
|
|
|
|
|
|
|
|
for eng_tag in sp_region_names:
|
|
|
|
if eng_tag == 'all':
|
|
|
|
continue
|
|
|
|
babel_region_tag = {'no_NO': 'nb_NO'}.get(eng_tag, eng_tag) # norway
|
|
|
|
|
|
|
|
if '-' in babel_region_tag:
|
|
|
|
l, r = babel_region_tag.split('-')
|
|
|
|
r = r.split('_')[-1]
|
|
|
|
sxng_tag = region_tag(babel.Locale.parse(l + '_' + r, sep='_'))
|
|
|
|
|
|
|
|
else:
|
|
|
|
try:
|
|
|
|
sxng_tag = region_tag(babel.Locale.parse(babel_region_tag, sep='_'))
|
|
|
|
|
|
|
|
except babel.UnknownLocaleError:
|
|
|
|
print("ERROR: can't determine babel locale of startpage's locale %s" % eng_tag)
|
|
|
|
continue
|
|
|
|
|
|
|
|
conflict = engine_traits.regions.get(sxng_tag)
|
|
|
|
if conflict:
|
|
|
|
if conflict != eng_tag:
|
|
|
|
print("CONFLICT: babel %s --> %s, %s" % (sxng_tag, conflict, eng_tag))
|
|
|
|
continue
|
|
|
|
engine_traits.regions[sxng_tag] = eng_tag
|
|
|
|
|
|
|
|
# languages
|
|
|
|
|
|
|
|
catalog_engine2code = {name.lower(): lang_code for lang_code, name in babel.Locale('en').languages.items()}
|
|
|
|
|
|
|
|
# get the native name of every language known by babel
|
|
|
|
|
2023-06-25 12:37:31 +02:00
|
|
|
for lang_code in filter(
|
|
|
|
lambda lang_code: lang_code.find('_') == -1, babel.localedata.locale_identifiers() # type: ignore
|
|
|
|
):
|
|
|
|
native_name = babel.Locale(lang_code).get_language_name().lower() # type: ignore
|
2022-10-03 17:21:13 +02:00
|
|
|
# add native name exactly as it is
|
|
|
|
catalog_engine2code[native_name] = lang_code
|
|
|
|
|
|
|
|
# add "normalized" language name (i.e. français becomes francais and español becomes espanol)
|
|
|
|
unaccented_name = ''.join(filter(lambda c: not combining(c), normalize('NFKD', native_name)))
|
|
|
|
if len(unaccented_name) == len(unaccented_name.encode()):
|
|
|
|
# add only if result is ascii (otherwise "normalization" didn't work)
|
|
|
|
catalog_engine2code[unaccented_name] = lang_code
|
|
|
|
|
|
|
|
# values that can't be determined by babel's languages names
|
|
|
|
|
|
|
|
catalog_engine2code.update(
|
|
|
|
{
|
|
|
|
# traditional chinese used in ..
|
|
|
|
'fantizhengwen': 'zh_Hant',
|
|
|
|
# Korean alphabet
|
|
|
|
'hangul': 'ko',
|
|
|
|
# Malayalam is one of 22 scheduled languages of India.
|
|
|
|
'malayam': 'ml',
|
|
|
|
'norsk': 'nb',
|
|
|
|
'sinhalese': 'si',
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
|
|
|
skip_eng_tags = {
|
|
|
|
'english_uk', # SearXNG lang 'en' already maps to 'english'
|
|
|
|
}
|
|
|
|
|
|
|
|
for option in dom.xpath('//form[@name="settings"]//select[@name="language"]/option'):
|
|
|
|
|
|
|
|
eng_tag = option.get('value')
|
|
|
|
if eng_tag in skip_eng_tags:
|
|
|
|
continue
|
2023-06-25 12:37:31 +02:00
|
|
|
name = extract_text(option).lower() # type: ignore
|
2022-10-03 17:21:13 +02:00
|
|
|
|
|
|
|
sxng_tag = catalog_engine2code.get(eng_tag)
|
|
|
|
if sxng_tag is None:
|
|
|
|
sxng_tag = catalog_engine2code[name]
|
|
|
|
|
|
|
|
conflict = engine_traits.languages.get(sxng_tag)
|
|
|
|
if conflict:
|
|
|
|
if conflict != eng_tag:
|
|
|
|
print("CONFLICT: babel %s --> %s, %s" % (sxng_tag, conflict, eng_tag))
|
|
|
|
continue
|
|
|
|
engine_traits.languages[sxng_tag] = eng_tag
|