1
0
mirror of https://github.com/searxng/searxng.git synced 2024-11-05 12:50:11 +01:00
searxng/searx/engines/dictzone.py

61 lines
1.4 KiB
Python
Raw Normal View History

# SPDX-License-Identifier: AGPL-3.0-or-later
2016-09-06 16:36:04 +02:00
"""
Dictzone
"""
from urllib.parse import urljoin
2016-09-06 11:47:27 +02:00
from lxml import html
from searx.utils import eval_xpath
2016-09-06 11:47:27 +02:00
# about
about = {
"website": 'https://dictzone.com/',
"wikidata_id": None,
"official_api_documentation": None,
"use_official_api": False,
"require_api_key": False,
"results": 'HTML',
}
engine_type = 'online_dictionnary'
categories = ['general']
url = 'https://dictzone.com/{from_lang}-{to_lang}-dictionary/{query}'
2016-09-06 11:47:27 +02:00
weight = 100
results_xpath = './/table[@id="r"]/tr'
https_support = True
2016-09-06 11:47:27 +02:00
def request(query, params):
params['url'] = url.format(from_lang=params['from_lang'][2],
to_lang=params['to_lang'][2],
query=params['query'])
2016-09-06 11:47:27 +02:00
return params
2016-09-06 11:47:27 +02:00
def response(resp):
results = []
dom = html.fromstring(resp.text)
for k, result in enumerate(eval_xpath(dom, results_xpath)[1:]):
2016-09-06 11:47:27 +02:00
try:
from_result, to_results_raw = eval_xpath(result, './td')
2016-09-06 11:47:27 +02:00
except:
continue
to_results = []
for to_result in eval_xpath(to_results_raw, './p/a'):
2016-09-06 11:47:27 +02:00
t = to_result.text_content()
if t.strip():
to_results.append(to_result.text_content())
results.append({
2021-03-18 19:59:01 +01:00
'url': urljoin(str(resp.url), '?%d' % k),
'title': from_result.text_content(),
'content': '; '.join(to_results)
2016-09-06 11:47:27 +02:00
})
return results