1
0
mirror of https://github.com/searxng/searxng.git synced 2024-11-22 20:17:45 +01:00
searxng/searx/engines/1337x.py

54 lines
1.7 KiB
Python
Raw Normal View History

# SPDX-License-Identifier: AGPL-3.0-or-later
"""
1337x
"""
from urllib.parse import quote, urljoin
2017-01-15 14:24:19 +01:00
from lxml import html
from searx.utils import extract_text, get_torrent_size, eval_xpath, eval_xpath_list, eval_xpath_getindex
# about
about = {
"website": 'https://1337x.to/',
"wikidata_id": 'Q28134166',
"official_api_documentation": None,
"use_official_api": False,
"require_api_key": False,
"results": 'HTML',
}
2017-01-15 14:24:19 +01:00
url = 'https://1337x.to/'
search_url = url + 'search/{search_term}/{pageno}/'
2017-01-15 15:08:43 +01:00
categories = ['videos']
2017-01-15 14:24:19 +01:00
paging = True
2017-01-15 14:50:29 +01:00
2017-01-15 14:24:19 +01:00
def request(query, params):
params['url'] = search_url.format(search_term=quote(query), pageno=params['pageno'])
return params
2017-01-15 14:50:29 +01:00
2017-01-15 14:24:19 +01:00
def response(resp):
results = []
dom = html.fromstring(resp.text)
for result in eval_xpath_list(dom, '//table[contains(@class, "table-list")]/tbody//tr'):
href = urljoin(url, eval_xpath_getindex(result, './td[contains(@class, "name")]/a[2]/@href', 0))
title = extract_text(eval_xpath(result, './td[contains(@class, "name")]/a[2]'))
seed = extract_text(eval_xpath(result, './/td[contains(@class, "seeds")]'))
leech = extract_text(eval_xpath(result, './/td[contains(@class, "leeches")]'))
filesize_info = extract_text(eval_xpath(result, './/td[contains(@class, "size")]/text()'))
2017-01-15 15:29:01 +01:00
filesize, filesize_multiplier = filesize_info.split()
filesize = get_torrent_size(filesize, filesize_multiplier)
2017-01-15 14:24:19 +01:00
results.append({'url': href,
'title': title,
2017-01-15 15:29:01 +01:00
'seed': seed,
'leech': leech,
'filesize': filesize,
'template': 'torrent.html'})
2017-01-15 14:24:19 +01:00
return results