1
0
mirror of https://github.com/searxng/searxng.git synced 2024-11-05 04:40:11 +01:00
searxng/searx/engines/www1x.py

65 lines
1.6 KiB
Python
Raw Normal View History

# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""1x (Images)
"""
from urllib.parse import urlencode, urljoin
from lxml import html, etree
2020-12-07 15:46:00 +01:00
from searx.utils import extract_text, eval_xpath_list, eval_xpath_getindex
# about
about = {
"website": 'https://1x.com/',
"wikidata_id": None,
"official_api_documentation": None,
"use_official_api": False,
"require_api_key": False,
"results": 'HTML',
}
# engine dependent config
categories = ['images']
paging = False
# search-url
base_url = 'https://1x.com'
2016-01-18 12:47:31 +01:00
search_url = base_url + '/backend/search.php?{query}'
2020-12-07 15:46:00 +01:00
gallery_url = 'https://gallery.1x.com/'
# do search-request
def request(query, params):
params['url'] = search_url.format(query=urlencode({'q': query}))
return params
# get response from search-request
def response(resp):
results = []
2020-12-07 15:46:00 +01:00
xmldom = etree.fromstring(resp.content)
xmlsearchresult = eval_xpath_getindex(xmldom, '//data', 0)
2020-12-07 15:46:00 +01:00
dom = html.fragment_fromstring(xmlsearchresult.text, create_parent='div')
for link in eval_xpath_list(dom, '//a'):
url = urljoin(base_url, link.attrib.get('href'))
2019-10-16 13:27:05 +02:00
title = extract_text(link)
thumbnail_src = urljoin(
gallery_url, (eval_xpath_getindex(link, './/img', 0).attrib['src']).replace(base_url, '')
)
# append result
results.append(
{
'url': url,
'title': title,
'img_src': thumbnail_src,
'content': '',
'thumbnail_src': thumbnail_src,
'template': 'images.html',
}
)
# return results
return results