1
0
mirror of https://github.com/searxng/searxng.git synced 2024-11-14 16:50:11 +01:00
searxng/searx/engines/duden.py

72 lines
2.1 KiB
Python
Raw Normal View History

# SPDX-License-Identifier: AGPL-3.0-or-later
"""Duden
2018-08-18 19:24:02 +02:00
"""
import re
from urllib.parse import quote, urljoin
from lxml import html
from searx.utils import extract_text, eval_xpath, eval_xpath_list, eval_xpath_getindex
from searx.network import raise_for_httperror
2018-08-18 19:24:02 +02:00
# about
about = {
"website": 'https://www.duden.de',
"wikidata_id": 'Q73624591',
"official_api_documentation": None,
"use_official_api": False,
"require_api_key": False,
"results": 'HTML',
"language": 'de',
}
categories = ['dictionaries']
2018-08-18 19:24:02 +02:00
paging = True
# search-url
base_url = 'https://www.duden.de/'
2019-07-25 08:17:45 +02:00
search_url = base_url + 'suchen/dudenonline/{query}?search_api_fulltext=&page={offset}'
2018-08-18 19:24:02 +02:00
def request(query, params):
offset = params['pageno'] - 1
2019-07-25 08:17:45 +02:00
if offset == 0:
search_url_fmt = base_url + 'suchen/dudenonline/{query}'
params['url'] = search_url_fmt.format(query=quote(query))
else:
params['url'] = search_url.format(offset=offset, query=quote(query))
# after the last page of results, spelling corrections are returned after a HTTP redirect
# whatever the page number is
params['soft_max_redirects'] = 1
params['raise_for_httperror'] = False
2018-08-18 19:24:02 +02:00
return params
def response(resp):
results = []
if resp.status_code == 404:
return results
raise_for_httperror(resp)
2018-08-18 19:24:02 +02:00
dom = html.fromstring(resp.text)
number_of_results_element = eval_xpath_getindex(
dom, '//a[@class="active" and contains(@href,"/suchen/dudenonline")]/span/text()', 0, default=None
)
if number_of_results_element is not None:
number_of_results_string = re.sub('[^0-9]', '', number_of_results_element)
2018-08-18 19:24:02 +02:00
results.append({'number_of_results': int(number_of_results_string)})
for result in eval_xpath_list(dom, '//section[not(contains(@class, "essay"))]'):
url = eval_xpath_getindex(result, './/h2/a', 0).get('href')
url = urljoin(base_url, url)
title = eval_xpath(result, 'string(.//h2/a)').strip()
content = extract_text(eval_xpath(result, './/p'))
# append result
results.append({'url': url, 'title': title, 'content': content})
2018-08-18 19:24:02 +02:00
return results