2015-05-02 15:45:17 +02:00
|
|
|
"""
|
|
|
|
BTDigg (Videos, Music, Files)
|
|
|
|
|
2019-07-25 08:40:48 +02:00
|
|
|
@website https://btdig.com
|
2015-05-02 15:45:17 +02:00
|
|
|
@provide-api yes (on demand)
|
|
|
|
|
|
|
|
@using-api no
|
|
|
|
@results HTML (using search portal)
|
|
|
|
@stable no (HTML can change)
|
|
|
|
@parse url, title, content, seed, leech, magnetlink
|
|
|
|
"""
|
2015-01-21 18:02:29 +01:00
|
|
|
|
|
|
|
from lxml import html
|
|
|
|
from operator import itemgetter
|
|
|
|
from searx.engines.xpath import extract_text
|
2016-11-30 18:43:03 +01:00
|
|
|
from searx.url_utils import quote, urljoin
|
2016-08-13 14:55:47 +02:00
|
|
|
from searx.utils import get_torrent_size
|
2015-01-21 18:02:29 +01:00
|
|
|
|
|
|
|
# engine dependent config
|
|
|
|
categories = ['videos', 'music', 'files']
|
|
|
|
paging = True
|
|
|
|
|
|
|
|
# search-url
|
2019-07-25 08:40:48 +02:00
|
|
|
url = 'https://btdig.com'
|
2015-01-25 10:21:44 +01:00
|
|
|
search_url = url + '/search?q={search_term}&p={pageno}'
|
2015-01-21 18:02:29 +01:00
|
|
|
|
|
|
|
|
|
|
|
# do search-request
|
|
|
|
def request(query, params):
|
|
|
|
params['url'] = search_url.format(search_term=quote(query),
|
2016-01-18 12:47:31 +01:00
|
|
|
pageno=params['pageno'] - 1)
|
2015-01-21 18:02:29 +01:00
|
|
|
|
|
|
|
return params
|
|
|
|
|
|
|
|
|
|
|
|
# get response from search-request
|
|
|
|
def response(resp):
|
|
|
|
results = []
|
|
|
|
|
2016-11-30 18:43:03 +01:00
|
|
|
dom = html.fromstring(resp.text)
|
2015-01-21 18:02:29 +01:00
|
|
|
|
2019-07-25 08:40:48 +02:00
|
|
|
search_res = dom.xpath('//div[@class="one_result"]')
|
2015-01-21 18:02:29 +01:00
|
|
|
|
|
|
|
# return empty array if nothing is found
|
|
|
|
if not search_res:
|
|
|
|
return []
|
|
|
|
|
|
|
|
# parse results
|
|
|
|
for result in search_res:
|
2019-07-25 08:40:48 +02:00
|
|
|
link = result.xpath('.//div[@class="torrent_name"]//a')[0]
|
2015-01-30 19:52:44 +01:00
|
|
|
href = urljoin(url, link.attrib.get('href'))
|
2016-12-09 11:44:24 +01:00
|
|
|
title = extract_text(link)
|
2015-01-21 18:02:29 +01:00
|
|
|
|
2019-07-25 08:40:48 +02:00
|
|
|
excerpt = result.xpath('.//div[@class="torrent_excerpt"]')[0]
|
|
|
|
content = html.tostring(excerpt, encoding='unicode', method='text', with_tail=False)
|
|
|
|
# it is better to emit <br/> instead of |, but html tags are verboten
|
|
|
|
content = content.strip().replace('\n', ' | ')
|
|
|
|
content = ' '.join(content.split())
|
2015-01-21 18:02:29 +01:00
|
|
|
|
2019-07-25 08:40:48 +02:00
|
|
|
filesize = result.xpath('.//span[@class="torrent_size"]/text()')[0].split()[0]
|
|
|
|
filesize_multiplier = result.xpath('.//span[@class="torrent_size"]/text()')[0].split()[1]
|
|
|
|
files = (result.xpath('.//span[@class="torrent_files"]/text()') or ['1'])[0]
|
2015-01-21 18:02:29 +01:00
|
|
|
|
|
|
|
# convert filesize to byte if possible
|
2016-08-13 14:55:47 +02:00
|
|
|
filesize = get_torrent_size(filesize, filesize_multiplier)
|
2015-01-21 18:02:29 +01:00
|
|
|
|
|
|
|
# convert files to int if possible
|
2019-07-25 08:40:48 +02:00
|
|
|
try:
|
2015-01-21 18:02:29 +01:00
|
|
|
files = int(files)
|
2019-07-25 08:40:48 +02:00
|
|
|
except:
|
2015-01-21 18:02:29 +01:00
|
|
|
files = None
|
|
|
|
|
2019-07-25 08:40:48 +02:00
|
|
|
magnetlink = result.xpath('.//div[@class="torrent_magnet"]//a')[0].attrib['href']
|
2015-01-21 18:02:29 +01:00
|
|
|
|
|
|
|
# append result
|
|
|
|
results.append({'url': href,
|
|
|
|
'title': title,
|
|
|
|
'content': content,
|
|
|
|
'filesize': filesize,
|
|
|
|
'files': files,
|
|
|
|
'magnetlink': magnetlink,
|
|
|
|
'template': 'torrent.html'})
|
|
|
|
|
|
|
|
# return results sorted by seeder
|
2019-07-25 08:40:48 +02:00
|
|
|
return results
|