2014-11-17 10:19:23 +01:00
|
|
|
# Startpage (Web)
|
|
|
|
#
|
2014-09-02 19:57:01 +02:00
|
|
|
# @website https://startpage.com
|
|
|
|
# @provide-api no (nothing found)
|
2014-11-17 10:19:23 +01:00
|
|
|
#
|
2014-09-02 19:57:01 +02:00
|
|
|
# @using-api no
|
|
|
|
# @results HTML
|
|
|
|
# @stable no (HTML can change)
|
|
|
|
# @parse url, title, content
|
|
|
|
#
|
|
|
|
# @todo paging
|
|
|
|
|
2013-10-19 18:29:39 +02:00
|
|
|
from lxml import html
|
2015-10-24 16:15:30 +02:00
|
|
|
from dateutil import parser
|
|
|
|
from datetime import datetime, timedelta
|
2014-09-02 19:57:01 +02:00
|
|
|
import re
|
2015-02-06 17:31:10 +01:00
|
|
|
from searx.engines.xpath import extract_text
|
2019-10-14 14:18:41 +02:00
|
|
|
from searx.languages import language_codes
|
2014-09-02 19:57:01 +02:00
|
|
|
|
|
|
|
# engine dependent config
|
|
|
|
categories = ['general']
|
2014-11-17 10:19:23 +01:00
|
|
|
# there is a mechanism to block "bot" search
|
|
|
|
# (probably the parameter qid), require
|
|
|
|
# storing of qid's between mulitble search-calls
|
|
|
|
|
2019-10-14 14:18:41 +02:00
|
|
|
paging = True
|
2014-09-02 19:57:01 +02:00
|
|
|
language_support = True
|
2013-10-19 18:29:39 +02:00
|
|
|
|
2014-09-02 19:57:01 +02:00
|
|
|
# search-url
|
|
|
|
base_url = 'https://startpage.com/'
|
|
|
|
search_url = base_url + 'do/search'
|
2013-10-19 18:29:39 +02:00
|
|
|
|
2014-09-02 19:57:01 +02:00
|
|
|
# specific xpath variables
|
|
|
|
# ads xpath //div[@id="results"]/div[@id="sponsored"]//div[@class="result"]
|
|
|
|
# not ads: div[@class="result"] are the direct childs of div[@id="results"]
|
2019-10-14 14:18:41 +02:00
|
|
|
results_xpath = '//div[@class="w-gl__result"]'
|
|
|
|
link_xpath = './/a[@class="w-gl__result-title"]'
|
|
|
|
content_xpath = './/p[@class="w-gl__description"]'
|
2014-01-30 02:10:32 +01:00
|
|
|
|
2014-01-24 09:35:27 +01:00
|
|
|
|
2014-09-02 19:57:01 +02:00
|
|
|
# do search-request
|
2013-10-19 18:29:39 +02:00
|
|
|
def request(query, params):
|
2014-09-02 19:57:01 +02:00
|
|
|
|
2013-10-19 18:29:39 +02:00
|
|
|
params['url'] = search_url
|
|
|
|
params['method'] = 'POST'
|
2019-10-14 14:18:41 +02:00
|
|
|
params['data'] = {
|
|
|
|
'query': query,
|
|
|
|
'page': params['pageno'],
|
|
|
|
'cat': 'web',
|
|
|
|
'cmd': 'process_search',
|
|
|
|
'engine0': 'v1all',
|
|
|
|
}
|
|
|
|
|
2019-01-06 15:27:46 +01:00
|
|
|
# set language if specified
|
|
|
|
if params['language'] != 'all':
|
2019-10-14 14:18:41 +02:00
|
|
|
language = 'english'
|
|
|
|
for lc, _, _, lang in language_codes:
|
|
|
|
if lc == params['language']:
|
|
|
|
language = lang
|
|
|
|
params['data']['language'] = language
|
|
|
|
params['data']['lui'] = language
|
2014-09-02 19:57:01 +02:00
|
|
|
|
2013-10-19 18:29:39 +02:00
|
|
|
return params
|
|
|
|
|
|
|
|
|
2014-09-02 19:57:01 +02:00
|
|
|
# get response from search-request
|
2013-10-19 18:29:39 +02:00
|
|
|
def response(resp):
|
|
|
|
results = []
|
2014-09-02 19:57:01 +02:00
|
|
|
|
2016-11-30 18:43:03 +01:00
|
|
|
dom = html.fromstring(resp.text)
|
2014-11-17 10:19:23 +01:00
|
|
|
|
2014-09-02 19:57:01 +02:00
|
|
|
# parse results
|
|
|
|
for result in dom.xpath(results_xpath):
|
2014-11-17 10:19:23 +01:00
|
|
|
links = result.xpath(link_xpath)
|
|
|
|
if not links:
|
|
|
|
continue
|
|
|
|
link = links[0]
|
2013-10-24 23:43:39 +02:00
|
|
|
url = link.attrib.get('href')
|
2014-01-19 21:20:07 +01:00
|
|
|
|
2014-09-02 19:57:01 +02:00
|
|
|
# block google-ad url's
|
2016-07-11 15:29:47 +02:00
|
|
|
if re.match(r"^http(s|)://(www\.)?google\.[a-z]+/aclk.*$", url):
|
2015-08-24 11:28:55 +02:00
|
|
|
continue
|
|
|
|
|
|
|
|
# block startpage search url's
|
2016-07-11 15:29:47 +02:00
|
|
|
if re.match(r"^http(s|)://(www\.)?startpage\.com/do/search\?.*$", url):
|
2014-09-02 19:57:01 +02:00
|
|
|
continue
|
|
|
|
|
2016-12-09 11:44:24 +01:00
|
|
|
title = extract_text(link)
|
2015-02-06 17:31:10 +01:00
|
|
|
|
2018-12-14 21:08:04 +01:00
|
|
|
if result.xpath(content_xpath):
|
|
|
|
content = extract_text(result.xpath(content_xpath))
|
2014-09-02 19:57:01 +02:00
|
|
|
else:
|
|
|
|
content = ''
|
2014-01-24 09:35:27 +01:00
|
|
|
|
2015-10-24 16:15:30 +02:00
|
|
|
published_date = None
|
|
|
|
|
|
|
|
# check if search result starts with something like: "2 Sep 2014 ... "
|
2016-07-11 15:29:47 +02:00
|
|
|
if re.match(r"^([1-9]|[1-2][0-9]|3[0-1]) [A-Z][a-z]{2} [0-9]{4} \.\.\. ", content):
|
2016-01-18 12:47:31 +01:00
|
|
|
date_pos = content.find('...') + 4
|
|
|
|
date_string = content[0:date_pos - 5]
|
2015-10-24 16:15:30 +02:00
|
|
|
published_date = parser.parse(date_string, dayfirst=True)
|
|
|
|
|
|
|
|
# fix content string
|
|
|
|
content = content[date_pos:]
|
|
|
|
|
|
|
|
# check if search result starts with something like: "5 days ago ... "
|
2016-07-11 15:29:47 +02:00
|
|
|
elif re.match(r"^[0-9]+ days? ago \.\.\. ", content):
|
2016-01-18 12:47:31 +01:00
|
|
|
date_pos = content.find('...') + 4
|
|
|
|
date_string = content[0:date_pos - 5]
|
2015-10-24 16:15:30 +02:00
|
|
|
|
|
|
|
# calculate datetime
|
|
|
|
published_date = datetime.now() - timedelta(days=int(re.match(r'\d+', date_string).group()))
|
|
|
|
|
|
|
|
# fix content string
|
|
|
|
content = content[date_pos:]
|
|
|
|
|
|
|
|
if published_date:
|
|
|
|
# append result
|
|
|
|
results.append({'url': url,
|
|
|
|
'title': title,
|
|
|
|
'content': content,
|
|
|
|
'publishedDate': published_date})
|
|
|
|
else:
|
|
|
|
# append result
|
|
|
|
results.append({'url': url,
|
|
|
|
'title': title,
|
|
|
|
'content': content})
|
2014-01-24 09:35:27 +01:00
|
|
|
|
2014-09-02 19:57:01 +02:00
|
|
|
# return results
|
2013-10-19 18:29:39 +02:00
|
|
|
return results
|