1
0
mirror of https://github.com/searxng/searxng.git synced 2024-11-16 17:40:11 +01:00
searxng/searx/engines/yacy.py

109 lines
3.1 KiB
Python
Raw Normal View History

# Yacy (Web, Images, Videos, Music, Files)
#
# @website http://yacy.net
# @provide-api yes
# (http://www.yacy-websuche.de/wiki/index.php/Dev:APIyacysearch)
#
# @using-api yes
# @results JSON
# @stable yes
# @parse (general) url, title, content, publishedDate
# @parse (images) url, title, img_src
#
# @todo parse video, audio and file results
from json import loads
from dateutil import parser
2016-11-30 18:43:03 +01:00
from searx.url_utils import urlencode
2016-12-11 14:05:07 +01:00
from searx.utils import html_to_text
# engine dependent config
categories = ['general', 'images'] # TODO , 'music', 'videos', 'files'
paging = True
language_support = True
number_of_results = 5
# search-url
base_url = 'http://localhost:8090'
2014-12-16 17:10:20 +01:00
search_url = '/yacysearch.json?{query}'\
2015-02-09 16:55:01 +01:00
'&startRecord={offset}'\
'&maximumRecords={limit}'\
'&contentdom={search_type}'\
'&resource=global'
# yacy specific type-definitions
search_types = {'general': 'text',
'images': 'image',
'files': 'app',
'music': 'audio',
'videos': 'video'}
2014-01-20 02:31:20 +01:00
# do search-request
def request(query, params):
offset = (params['pageno'] - 1) * number_of_results
2015-02-09 16:55:01 +01:00
search_type = search_types.get(params.get('category'), '0')
2014-12-16 17:10:20 +01:00
params['url'] = base_url +\
search_url.format(query=urlencode({'query': query}),
offset=offset,
limit=number_of_results,
search_type=search_type)
# add language tag if specified
if params['language'] != 'all':
params['url'] += '&lr=lang_' + params['language'].split('-')[0]
return params
2014-01-20 02:31:20 +01:00
# get response from search-request
def response(resp):
results = []
raw_search_results = loads(resp.text)
# return empty array if there are no results
2014-02-11 13:13:51 +01:00
if not raw_search_results:
return []
2015-02-09 16:55:01 +01:00
search_results = raw_search_results.get('channels', [])
2015-02-09 16:55:01 +01:00
if len(search_results) == 0:
return []
for result in search_results[0].get('items', []):
2015-02-01 11:48:15 +01:00
# parse image results
if result.get('image') and result.get('width') and result.get('height'):
2018-01-06 14:52:14 +01:00
result_url = ''
if 'url' in result:
result_url = result['url']
elif 'link' in result:
result_url = result['link']
else:
continue
2015-02-01 11:48:15 +01:00
# append result
2018-01-06 14:52:14 +01:00
results.append({'url': result_url,
2015-02-01 11:48:15 +01:00
'title': result['title'],
'content': '',
'img_src': result['image'],
'template': 'images.html'})
# parse general results
2015-02-01 11:48:15 +01:00
else:
publishedDate = parser.parse(result['pubDate'])
# append result
results.append({'url': result['link'],
2014-12-16 17:10:20 +01:00
'title': result['title'],
2016-12-11 14:05:07 +01:00
'content': html_to_text(result['description']),
2014-12-16 17:10:20 +01:00
'publishedDate': publishedDate})
2015-02-09 16:55:01 +01:00
# TODO parse video, audio and file results
# return results
return results