2016-04-03 22:03:41 +02:00
|
|
|
# Doku Wiki
|
|
|
|
#
|
|
|
|
# @website https://www.dokuwiki.org/
|
|
|
|
# @provide-api yes
|
|
|
|
# (https://www.dokuwiki.org/devel:xmlrpc)
|
|
|
|
#
|
|
|
|
# @using-api no
|
|
|
|
# @results HTML
|
|
|
|
# @stable yes
|
|
|
|
# @parse (general) url, title, content
|
|
|
|
|
2020-08-06 17:42:46 +02:00
|
|
|
from urllib.parse import urlencode
|
2016-04-03 22:03:41 +02:00
|
|
|
from lxml.html import fromstring
|
|
|
|
from searx.engines.xpath import extract_text
|
2019-11-15 09:31:37 +01:00
|
|
|
from searx.utils import eval_xpath
|
2016-04-03 22:03:41 +02:00
|
|
|
|
|
|
|
# engine dependent config
|
|
|
|
categories = ['general'] # TODO , 'images', 'music', 'videos', 'files'
|
|
|
|
paging = False
|
|
|
|
language_support = False
|
|
|
|
number_of_results = 5
|
|
|
|
|
|
|
|
# search-url
|
|
|
|
# Doku is OpenSearch compatible
|
|
|
|
base_url = 'http://localhost:8090'
|
|
|
|
search_url = '/?do=search'\
|
2016-04-05 13:31:49 +02:00
|
|
|
'&{query}'
|
2016-04-03 22:03:41 +02:00
|
|
|
# TODO '&startRecord={offset}'\
|
|
|
|
# TODO '&maximumRecords={limit}'\
|
|
|
|
|
2016-04-04 13:38:22 +02:00
|
|
|
|
2016-04-03 22:03:41 +02:00
|
|
|
# do search-request
|
|
|
|
def request(query, params):
|
|
|
|
|
|
|
|
params['url'] = base_url +\
|
2016-04-05 13:31:49 +02:00
|
|
|
search_url.format(query=urlencode({'id': query}))
|
2016-04-03 22:03:41 +02:00
|
|
|
|
|
|
|
return params
|
|
|
|
|
|
|
|
|
|
|
|
# get response from search-request
|
|
|
|
def response(resp):
|
|
|
|
results = []
|
|
|
|
|
|
|
|
doc = fromstring(resp.text)
|
|
|
|
|
|
|
|
# parse results
|
|
|
|
# Quickhits
|
2019-11-15 09:31:37 +01:00
|
|
|
for r in eval_xpath(doc, '//div[@class="search_quickresult"]/ul/li'):
|
2016-04-03 22:03:41 +02:00
|
|
|
try:
|
2019-11-15 09:31:37 +01:00
|
|
|
res_url = eval_xpath(r, './/a[@class="wikilink1"]/@href')[-1]
|
2016-04-03 22:03:41 +02:00
|
|
|
except:
|
|
|
|
continue
|
|
|
|
|
|
|
|
if not res_url:
|
|
|
|
continue
|
|
|
|
|
2019-11-15 09:31:37 +01:00
|
|
|
title = extract_text(eval_xpath(r, './/a[@class="wikilink1"]/@title'))
|
2016-04-03 22:03:41 +02:00
|
|
|
|
|
|
|
# append result
|
|
|
|
results.append({'title': title,
|
|
|
|
'content': "",
|
|
|
|
'url': base_url + res_url})
|
|
|
|
|
|
|
|
# Search results
|
2019-11-15 09:31:37 +01:00
|
|
|
for r in eval_xpath(doc, '//dl[@class="search_results"]/*'):
|
2016-04-03 22:03:41 +02:00
|
|
|
try:
|
|
|
|
if r.tag == "dt":
|
2019-11-15 09:31:37 +01:00
|
|
|
res_url = eval_xpath(r, './/a[@class="wikilink1"]/@href')[-1]
|
|
|
|
title = extract_text(eval_xpath(r, './/a[@class="wikilink1"]/@title'))
|
2016-04-03 22:03:41 +02:00
|
|
|
elif r.tag == "dd":
|
2019-11-15 09:31:37 +01:00
|
|
|
content = extract_text(eval_xpath(r, '.'))
|
2016-04-03 22:03:41 +02:00
|
|
|
|
|
|
|
# append result
|
|
|
|
results.append({'title': title,
|
|
|
|
'content': content,
|
|
|
|
'url': base_url + res_url})
|
|
|
|
except:
|
|
|
|
continue
|
|
|
|
|
|
|
|
if not res_url:
|
|
|
|
continue
|
|
|
|
|
|
|
|
# return results
|
|
|
|
return results
|