1
0
mirror of https://github.com/searxng/searxng.git synced 2024-11-20 03:10:10 +01:00

[mod] xpath, 1337x, acgsou, apkmirror, archlinux, arxiv: use eval_xpath_* functions

This commit is contained in:
Alexandre Flament 2020-11-26 15:49:33 +01:00
parent de887c6347
commit ad72803ed9
6 changed files with 51 additions and 54 deletions

View File

@ -1,6 +1,6 @@
from urllib.parse import quote, urljoin from urllib.parse import quote, urljoin
from lxml import html from lxml import html
from searx.utils import extract_text, get_torrent_size from searx.utils import extract_text, get_torrent_size, eval_xpath, eval_xpath_list, eval_xpath_getindex
url = 'https://1337x.to/' url = 'https://1337x.to/'
@ -20,12 +20,12 @@ def response(resp):
dom = html.fromstring(resp.text) dom = html.fromstring(resp.text)
for result in dom.xpath('//table[contains(@class, "table-list")]/tbody//tr'): for result in eval_xpath_list(dom, '//table[contains(@class, "table-list")]/tbody//tr'):
href = urljoin(url, result.xpath('./td[contains(@class, "name")]/a[2]/@href')[0]) href = urljoin(url, eval_xpath_getindex(result, './td[contains(@class, "name")]/a[2]/@href', 0))
title = extract_text(result.xpath('./td[contains(@class, "name")]/a[2]')) title = extract_text(eval_xpath(result, './td[contains(@class, "name")]/a[2]'))
seed = extract_text(result.xpath('.//td[contains(@class, "seeds")]')) seed = extract_text(eval_xpath(result, './/td[contains(@class, "seeds")]'))
leech = extract_text(result.xpath('.//td[contains(@class, "leeches")]')) leech = extract_text(eval_xpath(result, './/td[contains(@class, "leeches")]'))
filesize_info = extract_text(result.xpath('.//td[contains(@class, "size")]/text()')) filesize_info = extract_text(eval_xpath(result, './/td[contains(@class, "size")]/text()'))
filesize, filesize_multiplier = filesize_info.split() filesize, filesize_multiplier = filesize_info.split()
filesize = get_torrent_size(filesize, filesize_multiplier) filesize = get_torrent_size(filesize, filesize_multiplier)

View File

@ -11,7 +11,7 @@
from urllib.parse import urlencode from urllib.parse import urlencode
from lxml import html from lxml import html
from searx.utils import extract_text, get_torrent_size from searx.utils import extract_text, get_torrent_size, eval_xpath_list, eval_xpath_getindex
# engine dependent config # engine dependent config
categories = ['files', 'images', 'videos', 'music'] categories = ['files', 'images', 'videos', 'music']
@ -37,24 +37,21 @@ def request(query, params):
def response(resp): def response(resp):
results = [] results = []
dom = html.fromstring(resp.text) dom = html.fromstring(resp.text)
for result in dom.xpath(xpath_results): for result in eval_xpath_list(dom, xpath_results):
# defaults # defaults
filesize = 0 filesize = 0
magnet_link = "magnet:?xt=urn:btih:{}&tr=http://tracker.acgsou.com:2710/announce" magnet_link = "magnet:?xt=urn:btih:{}&tr=http://tracker.acgsou.com:2710/announce"
try: category = extract_text(eval_xpath_getindex(result, xpath_category, 0, default=[]))
category = extract_text(result.xpath(xpath_category)[0]) page_a = eval_xpath_getindex(result, xpath_title, 0)
except:
pass
page_a = result.xpath(xpath_title)[0]
title = extract_text(page_a) title = extract_text(page_a)
href = base_url + page_a.attrib.get('href') href = base_url + page_a.attrib.get('href')
magnet_link = magnet_link.format(page_a.attrib.get('href')[5:-5]) magnet_link = magnet_link.format(page_a.attrib.get('href')[5:-5])
filesize_info = eval_xpath_getindex(result, xpath_filesize, 0, default=None)
if filesize_info:
try: try:
filesize_info = result.xpath(xpath_filesize)[0]
filesize = filesize_info[:-2] filesize = filesize_info[:-2]
filesize_multiplier = filesize_info[-2:] filesize_multiplier = filesize_info[-2:]
filesize = get_torrent_size(filesize, filesize_multiplier) filesize = get_torrent_size(filesize, filesize_multiplier)

View File

@ -11,7 +11,7 @@
from urllib.parse import urlencode from urllib.parse import urlencode
from lxml import html from lxml import html
from searx.utils import extract_text from searx.utils import extract_text, eval_xpath_list, eval_xpath_getindex
# engine dependent config # engine dependent config
@ -42,12 +42,13 @@ def response(resp):
dom = html.fromstring(resp.text) dom = html.fromstring(resp.text)
# parse results # parse results
for result in dom.xpath('.//div[@id="content"]/div[@class="listWidget"]/div[@class="appRow"]'): for result in eval_xpath_list(dom, './/div[@id="content"]/div[@class="listWidget"]/div[@class="appRow"]'):
link = result.xpath('.//h5/a')[0] link = eval_xpath_getindex(result, './/h5/a', 0)
url = base_url + link.attrib.get('href') + '#downloads' url = base_url + link.attrib.get('href') + '#downloads'
title = extract_text(link) title = extract_text(link)
thumbnail_src = base_url + result.xpath('.//img')[0].attrib.get('src').replace('&w=32&h=32', '&w=64&h=64') thumbnail_src = base_url\
+ eval_xpath_getindex(result, './/img', 0).attrib.get('src').replace('&w=32&h=32', '&w=64&h=64')
res = { res = {
'url': url, 'url': url,

View File

@ -13,7 +13,7 @@
from urllib.parse import urlencode, urljoin from urllib.parse import urlencode, urljoin
from lxml import html from lxml import html
from searx.utils import extract_text from searx.utils import extract_text, eval_xpath_list, eval_xpath_getindex
# engine dependent config # engine dependent config
categories = ['it'] categories = ['it']
@ -131,8 +131,8 @@ def response(resp):
dom = html.fromstring(resp.text) dom = html.fromstring(resp.text)
# parse results # parse results
for result in dom.xpath(xpath_results): for result in eval_xpath_list(dom, xpath_results):
link = result.xpath(xpath_link)[0] link = eval_xpath_getindex(result, xpath_link, 0)
href = urljoin(base_url, link.attrib.get('href')) href = urljoin(base_url, link.attrib.get('href'))
title = extract_text(link) title = extract_text(link)

View File

@ -13,6 +13,7 @@
from lxml import html from lxml import html
from datetime import datetime from datetime import datetime
from searx.utils import eval_xpath_list, eval_xpath_getindex
categories = ['science'] categories = ['science']
@ -42,29 +43,26 @@ def response(resp):
results = [] results = []
dom = html.fromstring(resp.content) dom = html.fromstring(resp.content)
search_results = dom.xpath('//entry')
for entry in search_results: for entry in eval_xpath_list(dom, '//entry'):
title = entry.xpath('.//title')[0].text title = eval_xpath_getindex(entry, './/title', 0).text
url = entry.xpath('.//id')[0].text url = eval_xpath_getindex(entry, './/id', 0).text
content_string = '{doi_content}{abstract_content}' content_string = '{doi_content}{abstract_content}'
abstract = entry.xpath('.//summary')[0].text abstract = eval_xpath_getindex(entry, './/summary', 0).text
# If a doi is available, add it to the snipppet # If a doi is available, add it to the snipppet
try: doi_element = eval_xpath_getindex(entry, './/link[@title="doi"]', 0, default=None)
doi_content = entry.xpath('.//link[@title="doi"]')[0].text doi_content = doi_element.text if doi_element is not None else ''
content = content_string.format(doi_content=doi_content, abstract_content=abstract) content = content_string.format(doi_content=doi_content, abstract_content=abstract)
except:
content = content_string.format(doi_content="", abstract_content=abstract)
if len(content) > 300: if len(content) > 300:
content = content[0:300] + "..." content = content[0:300] + "..."
# TODO: center snippet on query term # TODO: center snippet on query term
publishedDate = datetime.strptime(entry.xpath('.//published')[0].text, '%Y-%m-%dT%H:%M:%SZ') publishedDate = datetime.strptime(eval_xpath_getindex(entry, './/published', 0).text, '%Y-%m-%dT%H:%M:%SZ')
res_dict = {'url': url, res_dict = {'url': url,
'title': title, 'title': title,

View File

@ -1,6 +1,6 @@
from lxml import html from lxml import html
from urllib.parse import urlencode from urllib.parse import urlencode
from searx.utils import extract_text, extract_url, eval_xpath from searx.utils import extract_text, extract_url, eval_xpath, eval_xpath_list
search_url = None search_url = None
url_xpath = None url_xpath = None
@ -42,21 +42,22 @@ def response(resp):
is_onion = True if 'onions' in categories else False is_onion = True if 'onions' in categories else False
if results_xpath: if results_xpath:
for result in eval_xpath(dom, results_xpath): for result in eval_xpath_list(dom, results_xpath):
url = extract_url(eval_xpath(result, url_xpath), search_url) url = extract_url(eval_xpath_list(result, url_xpath, min_len=1), search_url)
title = extract_text(eval_xpath(result, title_xpath)) title = extract_text(eval_xpath_list(result, title_xpath, min_len=1))
content = extract_text(eval_xpath(result, content_xpath)) content = extract_text(eval_xpath_list(result, content_xpath, min_len=1))
tmp_result = {'url': url, 'title': title, 'content': content} tmp_result = {'url': url, 'title': title, 'content': content}
# add thumbnail if available # add thumbnail if available
if thumbnail_xpath: if thumbnail_xpath:
thumbnail_xpath_result = eval_xpath(result, thumbnail_xpath) thumbnail_xpath_result = eval_xpath_list(result, thumbnail_xpath)
if len(thumbnail_xpath_result) > 0: if len(thumbnail_xpath_result) > 0:
tmp_result['img_src'] = extract_url(thumbnail_xpath_result, search_url) tmp_result['img_src'] = extract_url(thumbnail_xpath_result, search_url)
# add alternative cached url if available # add alternative cached url if available
if cached_xpath: if cached_xpath:
tmp_result['cached_url'] = cached_url + extract_text(result.xpath(cached_xpath)) tmp_result['cached_url'] = cached_url\
+ extract_text(eval_xpath_list(result, cached_xpath, min_len=1))
if is_onion: if is_onion:
tmp_result['is_onion'] = True tmp_result['is_onion'] = True
@ -66,19 +67,19 @@ def response(resp):
if cached_xpath: if cached_xpath:
for url, title, content, cached in zip( for url, title, content, cached in zip(
(extract_url(x, search_url) for (extract_url(x, search_url) for
x in dom.xpath(url_xpath)), x in eval_xpath_list(dom, url_xpath)),
map(extract_text, dom.xpath(title_xpath)), map(extract_text, eval_xpath_list(dom, title_xpath)),
map(extract_text, dom.xpath(content_xpath)), map(extract_text, eval_xpath_list(dom, content_xpath)),
map(extract_text, dom.xpath(cached_xpath)) map(extract_text, eval_xpath_list(dom, cached_xpath))
): ):
results.append({'url': url, 'title': title, 'content': content, results.append({'url': url, 'title': title, 'content': content,
'cached_url': cached_url + cached, 'is_onion': is_onion}) 'cached_url': cached_url + cached, 'is_onion': is_onion})
else: else:
for url, title, content in zip( for url, title, content in zip(
(extract_url(x, search_url) for (extract_url(x, search_url) for
x in dom.xpath(url_xpath)), x in eval_xpath_list(dom, url_xpath)),
map(extract_text, dom.xpath(title_xpath)), map(extract_text, eval_xpath_list(dom, title_xpath)),
map(extract_text, dom.xpath(content_xpath)) map(extract_text, eval_xpath_list(dom, content_xpath))
): ):
results.append({'url': url, 'title': title, 'content': content, 'is_onion': is_onion}) results.append({'url': url, 'title': title, 'content': content, 'is_onion': is_onion})