2021-01-13 11:31:25 +01:00
|
|
|
# SPDX-License-Identifier: AGPL-3.0-or-later
|
2022-01-05 13:08:56 +01:00
|
|
|
# lint: pylint
|
|
|
|
"""Startpage (Web)
|
|
|
|
|
2021-01-13 11:31:25 +01:00
|
|
|
"""
|
2022-01-05 13:08:56 +01:00
|
|
|
|
|
|
|
import re
|
2022-01-06 18:29:04 +01:00
|
|
|
from time import time
|
2014-09-02 19:57:01 +02:00
|
|
|
|
2022-01-05 13:00:52 +01:00
|
|
|
from urllib.parse import urlencode
|
2022-01-05 13:08:56 +01:00
|
|
|
from unicodedata import normalize, combining
|
|
|
|
from datetime import datetime, timedelta
|
2022-01-05 13:00:52 +01:00
|
|
|
|
2015-10-24 16:15:30 +02:00
|
|
|
from dateutil import parser
|
2022-01-05 13:08:56 +01:00
|
|
|
from lxml import html
|
2020-09-14 09:06:58 +02:00
|
|
|
from babel import Locale
|
|
|
|
from babel.localedata import locale_identifiers
|
2022-01-05 13:08:56 +01:00
|
|
|
|
2022-01-15 22:56:34 +01:00
|
|
|
from searx.network import get
|
2020-10-02 18:13:56 +02:00
|
|
|
from searx.utils import extract_text, eval_xpath, match_language
|
2022-01-09 16:05:25 +01:00
|
|
|
from searx.exceptions import (
|
|
|
|
SearxEngineResponseException,
|
|
|
|
SearxEngineCaptchaException,
|
|
|
|
)
|
|
|
|
|
2014-09-02 19:57:01 +02:00
|
|
|
|
2021-01-13 11:31:25 +01:00
|
|
|
# about
|
|
|
|
about = {
|
|
|
|
"website": 'https://startpage.com',
|
|
|
|
"wikidata_id": 'Q2333295',
|
|
|
|
"official_api_documentation": None,
|
|
|
|
"use_official_api": False,
|
|
|
|
"require_api_key": False,
|
|
|
|
"results": 'HTML',
|
|
|
|
}
|
|
|
|
|
2014-09-02 19:57:01 +02:00
|
|
|
# engine dependent config
|
2021-12-22 16:58:52 +01:00
|
|
|
categories = ['general', 'web']
|
2014-11-17 10:19:23 +01:00
|
|
|
# there is a mechanism to block "bot" search
|
|
|
|
# (probably the parameter qid), require
|
|
|
|
# storing of qid's between mulitble search-calls
|
|
|
|
|
2019-10-14 14:18:41 +02:00
|
|
|
paging = True
|
2020-09-14 09:06:58 +02:00
|
|
|
supported_languages_url = 'https://www.startpage.com/do/settings'
|
2013-10-19 18:29:39 +02:00
|
|
|
|
2014-09-02 19:57:01 +02:00
|
|
|
# search-url
|
|
|
|
base_url = 'https://startpage.com/'
|
2022-01-05 13:00:52 +01:00
|
|
|
search_url = base_url + 'sp/search?'
|
2013-10-19 18:29:39 +02:00
|
|
|
|
2014-09-02 19:57:01 +02:00
|
|
|
# specific xpath variables
|
|
|
|
# ads xpath //div[@id="results"]/div[@id="sponsored"]//div[@class="result"]
|
|
|
|
# not ads: div[@class="result"] are the direct childs of div[@id="results"]
|
2020-12-13 15:43:50 +01:00
|
|
|
results_xpath = '//div[@class="w-gl__result__main"]'
|
2020-12-16 22:54:14 +01:00
|
|
|
link_xpath = './/a[@class="w-gl__result-title result-link"]'
|
2019-10-14 14:18:41 +02:00
|
|
|
content_xpath = './/p[@class="w-gl__description"]'
|
2014-01-30 02:10:32 +01:00
|
|
|
|
2022-01-06 18:29:04 +01:00
|
|
|
# timestamp of the last fetch of 'sc' code
|
|
|
|
sc_code_ts = 0
|
|
|
|
sc_code = ''
|
|
|
|
|
|
|
|
|
2022-01-09 16:05:25 +01:00
|
|
|
def raise_captcha(resp):
|
|
|
|
|
|
|
|
if str(resp.url).startswith('https://www.startpage.com/sp/captcha'):
|
|
|
|
# suspend CAPTCHA for 7 days
|
|
|
|
raise SearxEngineCaptchaException(suspended_time=7 * 24 * 3600)
|
|
|
|
|
|
|
|
|
2022-01-06 18:29:04 +01:00
|
|
|
def get_sc_code(headers):
|
|
|
|
"""Get an actual `sc` argument from startpage's home page.
|
|
|
|
|
|
|
|
Startpage puts a `sc` argument on every link. Without this argument
|
|
|
|
startpage considers the request is from a bot. We do not know what is
|
|
|
|
encoded in the value of the `sc` argument, but it seems to be a kind of a
|
|
|
|
*time-stamp*. This *time-stamp* is valid for a few hours.
|
|
|
|
|
|
|
|
This function scrap a new *time-stamp* from startpage's home page every hour
|
|
|
|
(3000 sec).
|
|
|
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
global sc_code_ts, sc_code # pylint: disable=global-statement
|
|
|
|
|
|
|
|
if time() > (sc_code_ts + 3000):
|
|
|
|
logger.debug("query new sc time-stamp ...")
|
|
|
|
|
2022-01-15 22:56:34 +01:00
|
|
|
resp = get(base_url, headers=headers)
|
2022-01-09 16:05:25 +01:00
|
|
|
raise_captcha(resp)
|
2022-01-06 18:29:04 +01:00
|
|
|
dom = html.fromstring(resp.text)
|
|
|
|
|
2022-01-09 16:05:25 +01:00
|
|
|
try:
|
2022-10-14 20:27:53 +02:00
|
|
|
# <input type="hidden" name="sc" value="...">
|
|
|
|
sc_code = eval_xpath(dom, '//input[@name="sc"]/@value')[0]
|
2022-01-09 16:05:25 +01:00
|
|
|
except IndexError as exc:
|
|
|
|
# suspend startpage API --> https://github.com/searxng/searxng/pull/695
|
|
|
|
raise SearxEngineResponseException(
|
|
|
|
suspended_time=7 * 24 * 3600, message="PR-695: query new sc time-stamp failed!"
|
2022-01-09 16:11:22 +01:00
|
|
|
) from exc
|
2022-01-06 18:29:04 +01:00
|
|
|
|
|
|
|
sc_code_ts = time()
|
|
|
|
logger.debug("new value is: %s", sc_code)
|
|
|
|
|
|
|
|
return sc_code
|
|
|
|
|
2014-01-24 09:35:27 +01:00
|
|
|
|
2014-09-02 19:57:01 +02:00
|
|
|
# do search-request
|
2013-10-19 18:29:39 +02:00
|
|
|
def request(query, params):
|
2014-09-02 19:57:01 +02:00
|
|
|
|
2022-01-09 16:11:22 +01:00
|
|
|
# pylint: disable=line-too-long
|
|
|
|
# The format string from Startpage's FFox add-on [1]::
|
|
|
|
#
|
|
|
|
# https://www.startpage.com/do/dsearch?query={searchTerms}&cat=web&pl=ext-ff&language=__MSG_extensionUrlLanguage__&extVersion=1.3.0
|
|
|
|
#
|
|
|
|
# [1] https://addons.mozilla.org/en-US/firefox/addon/startpage-private-search/
|
|
|
|
|
2022-01-05 13:00:52 +01:00
|
|
|
args = {
|
2019-10-14 14:18:41 +02:00
|
|
|
'query': query,
|
|
|
|
'page': params['pageno'],
|
|
|
|
'cat': 'web',
|
2022-01-09 16:11:22 +01:00
|
|
|
# 'pl': 'ext-ff',
|
|
|
|
# 'extVersion': '1.3.0',
|
2022-01-05 13:00:52 +01:00
|
|
|
# 'abp': "-1",
|
2022-01-06 18:29:04 +01:00
|
|
|
'sc': get_sc_code(params['headers']),
|
2019-10-14 14:18:41 +02:00
|
|
|
}
|
|
|
|
|
2019-01-06 15:27:46 +01:00
|
|
|
# set language if specified
|
|
|
|
if params['language'] != 'all':
|
2020-09-14 09:06:58 +02:00
|
|
|
lang_code = match_language(params['language'], supported_languages, fallback=None)
|
|
|
|
if lang_code:
|
|
|
|
language_name = supported_languages[lang_code]['alias']
|
2022-01-05 13:00:52 +01:00
|
|
|
args['language'] = language_name
|
|
|
|
args['lui'] = language_name
|
2014-09-02 19:57:01 +02:00
|
|
|
|
2022-01-05 13:00:52 +01:00
|
|
|
params['url'] = search_url + urlencode(args)
|
2013-10-19 18:29:39 +02:00
|
|
|
return params
|
|
|
|
|
|
|
|
|
2014-09-02 19:57:01 +02:00
|
|
|
# get response from search-request
|
2013-10-19 18:29:39 +02:00
|
|
|
def response(resp):
|
|
|
|
results = []
|
2014-09-02 19:57:01 +02:00
|
|
|
|
2016-11-30 18:43:03 +01:00
|
|
|
dom = html.fromstring(resp.text)
|
2014-11-17 10:19:23 +01:00
|
|
|
|
2014-09-02 19:57:01 +02:00
|
|
|
# parse results
|
2019-11-15 09:31:37 +01:00
|
|
|
for result in eval_xpath(dom, results_xpath):
|
|
|
|
links = eval_xpath(result, link_xpath)
|
2014-11-17 10:19:23 +01:00
|
|
|
if not links:
|
|
|
|
continue
|
|
|
|
link = links[0]
|
2013-10-24 23:43:39 +02:00
|
|
|
url = link.attrib.get('href')
|
2014-01-19 21:20:07 +01:00
|
|
|
|
2014-09-02 19:57:01 +02:00
|
|
|
# block google-ad url's
|
2016-07-11 15:29:47 +02:00
|
|
|
if re.match(r"^http(s|)://(www\.)?google\.[a-z]+/aclk.*$", url):
|
2015-08-24 11:28:55 +02:00
|
|
|
continue
|
|
|
|
|
|
|
|
# block startpage search url's
|
2016-07-11 15:29:47 +02:00
|
|
|
if re.match(r"^http(s|)://(www\.)?startpage\.com/do/search\?.*$", url):
|
2014-09-02 19:57:01 +02:00
|
|
|
continue
|
|
|
|
|
2016-12-09 11:44:24 +01:00
|
|
|
title = extract_text(link)
|
2015-02-06 17:31:10 +01:00
|
|
|
|
2019-11-15 09:31:37 +01:00
|
|
|
if eval_xpath(result, content_xpath):
|
|
|
|
content = extract_text(eval_xpath(result, content_xpath))
|
2014-09-02 19:57:01 +02:00
|
|
|
else:
|
|
|
|
content = ''
|
2014-01-24 09:35:27 +01:00
|
|
|
|
2015-10-24 16:15:30 +02:00
|
|
|
published_date = None
|
|
|
|
|
|
|
|
# check if search result starts with something like: "2 Sep 2014 ... "
|
2016-07-11 15:29:47 +02:00
|
|
|
if re.match(r"^([1-9]|[1-2][0-9]|3[0-1]) [A-Z][a-z]{2} [0-9]{4} \.\.\. ", content):
|
2016-01-18 12:47:31 +01:00
|
|
|
date_pos = content.find('...') + 4
|
2021-12-27 09:26:22 +01:00
|
|
|
date_string = content[0 : date_pos - 5]
|
2015-10-24 16:15:30 +02:00
|
|
|
# fix content string
|
|
|
|
content = content[date_pos:]
|
|
|
|
|
[Fix] Startpage ValueError on Spanish date format
datetime.parser.parse() does not know the Spanish date format which
leads to a ValueError. Fixes #1870
Traceback (most recent call last):
File "/usr/local/searx/searx/search.py", line 160, in search_one_http_request_safe
search_results = search_one_http_request(engine, query, request_params)
File "/usr/local/searx/searx/search.py", line 97, in search_one_http_request
return engine.response(response)
File "/usr/local/searx/searx/engines/startpage.py", line 102, in response
published_date = parser.parse(date_string, dayfirst=True)
File "/usr/local/searx/searx-ve/lib/python3.6/site-packages/dateutil/parser/_parser.py", line 1358, in parse
return DEFAULTPARSER.parse(timestr, **kwargs)
File "/usr/local/searx/searx-ve/lib/python3.6/site-packages/dateutil/parser/_parser.py", line 649, in parse
raise ValueError("Unknown string format:", timestr)
ValueError: ('Unknown string format:', '24 Ene 2013')
2020-03-02 18:55:48 +01:00
|
|
|
try:
|
|
|
|
published_date = parser.parse(date_string, dayfirst=True)
|
|
|
|
except ValueError:
|
|
|
|
pass
|
|
|
|
|
2015-10-24 16:15:30 +02:00
|
|
|
# check if search result starts with something like: "5 days ago ... "
|
2016-07-11 15:29:47 +02:00
|
|
|
elif re.match(r"^[0-9]+ days? ago \.\.\. ", content):
|
2016-01-18 12:47:31 +01:00
|
|
|
date_pos = content.find('...') + 4
|
2021-12-27 09:26:22 +01:00
|
|
|
date_string = content[0 : date_pos - 5]
|
2015-10-24 16:15:30 +02:00
|
|
|
|
|
|
|
# calculate datetime
|
|
|
|
published_date = datetime.now() - timedelta(days=int(re.match(r'\d+', date_string).group()))
|
|
|
|
|
|
|
|
# fix content string
|
|
|
|
content = content[date_pos:]
|
|
|
|
|
|
|
|
if published_date:
|
|
|
|
# append result
|
2021-12-27 09:26:22 +01:00
|
|
|
results.append({'url': url, 'title': title, 'content': content, 'publishedDate': published_date})
|
2015-10-24 16:15:30 +02:00
|
|
|
else:
|
|
|
|
# append result
|
2021-12-27 09:26:22 +01:00
|
|
|
results.append({'url': url, 'title': title, 'content': content})
|
2014-01-24 09:35:27 +01:00
|
|
|
|
2014-09-02 19:57:01 +02:00
|
|
|
# return results
|
2013-10-19 18:29:39 +02:00
|
|
|
return results
|
2020-09-14 09:06:58 +02:00
|
|
|
|
|
|
|
|
|
|
|
# get supported languages from their site
|
|
|
|
def _fetch_supported_languages(resp):
|
2022-01-05 13:08:56 +01:00
|
|
|
# startpage's language selector is a mess each option has a displayed name
|
|
|
|
# and a value, either of which may represent the language name in the native
|
|
|
|
# script, the language name in English, an English transliteration of the
|
|
|
|
# native name, the English name of the writing script used by the language,
|
|
|
|
# or occasionally something else entirely.
|
2020-09-14 09:06:58 +02:00
|
|
|
|
2022-09-27 17:01:00 +02:00
|
|
|
# this cases are so special they need to be hardcoded, a couple of them are misspellings
|
2020-09-14 09:06:58 +02:00
|
|
|
language_names = {
|
|
|
|
'english_uk': 'en-GB',
|
|
|
|
'fantizhengwen': ['zh-TW', 'zh-HK'],
|
|
|
|
'hangul': 'ko',
|
|
|
|
'malayam': 'ml',
|
|
|
|
'norsk': 'nb',
|
|
|
|
'sinhalese': 'si',
|
2021-12-27 09:26:22 +01:00
|
|
|
'sudanese': 'su',
|
2020-09-14 09:06:58 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
# get the English name of every language known by babel
|
2022-01-05 13:08:56 +01:00
|
|
|
language_names.update(
|
|
|
|
{
|
|
|
|
# fmt: off
|
|
|
|
name.lower(): lang_code
|
|
|
|
# pylint: disable=protected-access
|
|
|
|
for lang_code, name in Locale('en')._data['languages'].items()
|
|
|
|
# fmt: on
|
|
|
|
}
|
|
|
|
)
|
2020-09-14 09:06:58 +02:00
|
|
|
|
|
|
|
# get the native name of every language known by babel
|
|
|
|
for lang_code in filter(lambda lang_code: lang_code.find('_') == -1, locale_identifiers()):
|
|
|
|
native_name = Locale(lang_code).get_language_name().lower()
|
|
|
|
# add native name exactly as it is
|
|
|
|
language_names[native_name] = lang_code
|
|
|
|
|
|
|
|
# add "normalized" language name (i.e. français becomes francais and español becomes espanol)
|
|
|
|
unaccented_name = ''.join(filter(lambda c: not combining(c), normalize('NFKD', native_name)))
|
|
|
|
if len(unaccented_name) == len(unaccented_name.encode()):
|
|
|
|
# add only if result is ascii (otherwise "normalization" didn't work)
|
|
|
|
language_names[unaccented_name] = lang_code
|
|
|
|
|
|
|
|
dom = html.fromstring(resp.text)
|
|
|
|
sp_lang_names = []
|
2022-03-19 14:16:37 +01:00
|
|
|
for option in dom.xpath('//form[@name="settings"]//select[@name="language"]/option'):
|
2020-09-14 09:06:58 +02:00
|
|
|
sp_lang_names.append((option.get('value'), extract_text(option).lower()))
|
|
|
|
|
|
|
|
supported_languages = {}
|
|
|
|
for sp_option_value, sp_option_text in sp_lang_names:
|
|
|
|
lang_code = language_names.get(sp_option_value) or language_names.get(sp_option_text)
|
|
|
|
if isinstance(lang_code, str):
|
|
|
|
supported_languages[lang_code] = {'alias': sp_option_value}
|
|
|
|
elif isinstance(lang_code, list):
|
2022-01-05 13:08:56 +01:00
|
|
|
for _lc in lang_code:
|
|
|
|
supported_languages[_lc] = {'alias': sp_option_value}
|
2020-09-14 09:06:58 +02:00
|
|
|
else:
|
|
|
|
print('Unknown language option in Startpage: {} ({})'.format(sp_option_value, sp_option_text))
|
|
|
|
|
|
|
|
return supported_languages
|