1
0
mirror of https://github.com/searxng/searxng.git synced 2024-11-05 12:50:11 +01:00
searxng/searx/engines/www1x.py

58 lines
1.6 KiB
Python
Raw Normal View History

# SPDX-License-Identifier: AGPL-3.0-or-later
"""
1x (Images)
"""
2020-12-07 15:46:00 +01:00
from lxml import html, etree
from urllib.parse import urlencode, urljoin
2020-12-07 15:46:00 +01:00
from searx.utils import extract_text, eval_xpath_list, eval_xpath_getindex
# about
about = {
"website": 'https://1x.com/',
"wikidata_id": None,
"official_api_documentation": None,
"use_official_api": False,
"require_api_key": False,
"results": 'HTML',
}
# engine dependent config
categories = ['images']
paging = False
# search-url
base_url = 'https://1x.com'
2016-01-18 12:47:31 +01:00
search_url = base_url + '/backend/search.php?{query}'
2020-12-07 15:46:00 +01:00
gallery_url = 'https://gallery.1x.com/'
# do search-request
def request(query, params):
params['url'] = search_url.format(query=urlencode({'q': query}))
return params
# get response from search-request
def response(resp):
results = []
2020-12-07 15:46:00 +01:00
xmldom = etree.fromstring(resp.content)
xmlsearchresult = eval_xpath_getindex(xmldom, '//searchresult', 0)
dom = html.fragment_fromstring(xmlsearchresult.text, create_parent='div')
for link in eval_xpath_list(dom, '/div/table/tr/td/div[2]//a'):
url = urljoin(base_url, link.attrib.get('href'))
2019-10-16 13:27:05 +02:00
title = extract_text(link)
2020-12-07 15:46:00 +01:00
thumbnail_src = urljoin(gallery_url, eval_xpath_getindex(link, './/img', 0).attrib['src'])
# append result
results.append({'url': url,
'title': title,
2020-12-07 15:46:00 +01:00
'img_src': thumbnail_src,
'content': '',
'thumbnail_src': thumbnail_src,
'template': 'images.html'})
# return results
return results