1
0
mirror of https://github.com/searxng/searxng.git synced 2024-11-18 02:10:12 +01:00
searxng/searx/engines/yacy.py

188 lines
4.5 KiB
Python
Raw Normal View History

# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""YaCy_ is a free distributed search engine, built on the principles of
peer-to-peer (P2P) networks.
API: Dev:APIyacysearch_
Releases:
- https://github.com/yacy/yacy_search_server/tags
- https://download.yacy.net/
.. _Yacy: https://yacy.net/
.. _Dev:APIyacysearch: https://wiki.yacy.net/index.php/Dev:APIyacysearch
Configuration
=============
The engine has the following (additional) settings:
- :py:obj:`http_digest_auth_user`
- :py:obj:`http_digest_auth_pass`
- :py:obj:`search_mode`
- :py:obj:`search_type`
.. code:: yaml
- name: yacy
engine: yacy
categories: general
search_type: text
base_url: https://yacy.searchlab.eu
shortcut: ya
- name: yacy images
engine: yacy
categories: images
search_type: image
base_url: https://yacy.searchlab.eu
shortcut: yai
disabled: true
Implementations
===============
"""
# pylint: disable=fixme
from json import loads
from urllib.parse import urlencode
from dateutil import parser
2021-03-18 19:59:01 +01:00
from httpx import DigestAuth
2016-12-11 14:05:07 +01:00
from searx.utils import html_to_text
# about
about = {
"website": 'https://yacy.net/',
"wikidata_id": 'Q1759675',
"official_api_documentation": 'https://wiki.yacy.net/index.php/Dev:API',
"use_official_api": True,
"require_api_key": False,
"results": 'JSON',
}
# engine dependent config
categories = ['general']
paging = True
number_of_results = 10
http_digest_auth_user = ""
"""HTTP digest user for the local YACY instance"""
http_digest_auth_pass = ""
"""HTTP digest password for the local YACY instance"""
search_mode = 'global'
"""Yacy search mode ``global`` or ``local``. By default, Yacy operates in ``global``
mode.
``global``
Peer-to-Peer search
``local``
Privacy or Stealth mode, restricts the search to local yacy instance.
"""
search_type = 'text'
"""One of ``text``, ``image`` / The search-types ``app``, ``audio`` and
``video`` are not yet implemented (Pull-Requests are welcome).
"""
# search-url
base_url = 'https://yacy.searchlab.eu'
search_url = (
'/yacysearch.json?{query}'
'&startRecord={offset}'
'&maximumRecords={limit}'
'&contentdom={search_type}'
2023-08-02 10:35:08 +02:00
'&resource={resource}'
)
def init(_):
valid_types = [
'text',
'image',
# 'app', 'audio', 'video',
]
if search_type not in valid_types:
raise ValueError('search_type "%s" is not one of %s' % (search_type, valid_types))
2014-01-20 02:31:20 +01:00
def request(query, params):
offset = (params['pageno'] - 1) * number_of_results
params['url'] = base_url + search_url.format(
2023-08-02 10:35:08 +02:00
query=urlencode({'query': query}),
offset=offset,
limit=number_of_results,
search_type=search_type,
resource=search_mode,
)
if http_digest_auth_user and http_digest_auth_pass:
2021-03-18 19:59:01 +01:00
params['auth'] = DigestAuth(http_digest_auth_user, http_digest_auth_pass)
# add language tag if specified
if params['language'] != 'all':
params['url'] += '&lr=lang_' + params['language'].split('-')[0]
return params
2014-01-20 02:31:20 +01:00
def response(resp):
results = []
raw_search_results = loads(resp.text)
# return empty array if there are no results
2014-02-11 13:13:51 +01:00
if not raw_search_results:
return []
2015-02-09 16:55:01 +01:00
search_results = raw_search_results.get('channels', [])
2015-02-09 16:55:01 +01:00
if len(search_results) == 0:
return []
for result in search_results[0].get('items', []):
2015-02-01 11:48:15 +01:00
# parse image results
if search_type == 'image':
2018-01-06 14:52:14 +01:00
result_url = ''
if 'url' in result:
result_url = result['url']
elif 'link' in result:
result_url = result['link']
else:
continue
2015-02-01 11:48:15 +01:00
# append result
results.append(
{
'url': result_url,
'title': result['title'],
'content': '',
'img_src': result['image'],
'template': 'images.html',
}
)
2015-02-01 11:48:15 +01:00
# parse general results
2015-02-01 11:48:15 +01:00
else:
publishedDate = None
if 'pubDate' in result:
publishedDate = parser.parse(result['pubDate'])
# append result
results.append(
{
'url': result['link'] or '',
'title': result['title'],
'content': html_to_text(result['description']),
'publishedDate': publishedDate,
}
)
2015-02-09 16:55:01 +01:00
# TODO parse video, audio and file results
return results