1
0
mirror of https://github.com/searxng/searxng.git synced 2024-11-22 12:10:11 +01:00

[fix] engine deviantart: review of the result-scrapper

The deviantart site changed and hence deviantart is currently unusable.
This commit is contained in:
Bnyro 2023-09-08 12:08:14 +02:00 committed by Markus Heiser
parent 1f7366060e
commit e73a6f5d14

View File

@ -1,12 +1,14 @@
# SPDX-License-Identifier: AGPL-3.0-or-later # SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint # lint: pylint
""" """Deviantart (Images)
Deviantart (Images)
""" """
from urllib.parse import urlencode import urllib.parse
from lxml import html from lxml import html
from searx.utils import extract_text, eval_xpath, eval_xpath_list
# about # about
about = { about = {
"website": 'https://www.deviantart.com/', "website": 'https://www.deviantart.com/',
@ -20,31 +22,30 @@ about = {
# engine dependent config # engine dependent config
categories = ['images'] categories = ['images']
paging = True paging = True
time_range_support = True
time_range_dict = {
'day': 'popular-24-hours',
'week': 'popular-1-week',
'month': 'popular-1-month',
'year': 'most-recent',
}
# search-url # search-url
base_url = 'https://www.deviantart.com' base_url = 'https://www.deviantart.com'
results_xpath = '//div[@class="_2pZkk"]/div/div/a'
url_xpath = './@href'
thumbnail_src_xpath = './div/img/@src'
img_src_xpath = './div/img/@srcset'
title_xpath = './@aria-label'
premium_xpath = '../div/div/div/text()'
premium_keytext = 'Watch the artist to view this deviation'
cursor_xpath = '(//a[@class="_1OGeq"]/@href)[last()]'
def request(query, params): def request(query, params):
# https://www.deviantart.com/search/deviations?page=5&q=foo # https://www.deviantart.com/search?q=foo
query = { nextpage_url = params['engine_data'].get('nextpage')
'page': params['pageno'], # don't use nextpage when user selected to jump back to page 1
'q': query, if params['pageno'] > 1 and nextpage_url is not None:
} params['url'] = nextpage_url
if params['time_range'] in time_range_dict: else:
query['order'] = time_range_dict[params['time_range']] params['url'] = f"{base_url}/search?{urllib.parse.urlencode({'q': query})}"
params['url'] = base_url + '/search/deviations?' + urlencode(query)
return params return params
@ -52,29 +53,35 @@ def request(query, params):
def response(resp): def response(resp):
results = [] results = []
dom = html.fromstring(resp.text) dom = html.fromstring(resp.text)
for row in dom.xpath('//div[contains(@data-hook, "content_row")]'): for result in eval_xpath_list(dom, results_xpath):
for result in row.xpath('./div'): # skip images that are blurred
_text = extract_text(eval_xpath(result, premium_xpath))
a_tag = result.xpath('.//a[@data-hook="deviation_link"]')[0] if _text and premium_keytext in _text:
noscript_tag = a_tag.xpath('.//noscript')
if noscript_tag:
img_tag = noscript_tag[0].xpath('.//img')
else:
img_tag = a_tag.xpath('.//img')
if not img_tag:
continue continue
img_tag = img_tag[0] img_src = extract_text(eval_xpath(result, img_src_xpath))
if img_src:
img_src = img_src.split(' ')[0]
parsed_url = urllib.parse.urlparse(img_src)
img_src = parsed_url._replace(path=parsed_url.path.split('/v1')[0]).geturl()
results.append( results.append(
{ {
'template': 'images.html', 'template': 'images.html',
'url': a_tag.attrib.get('href'), 'url': extract_text(eval_xpath(result, url_xpath)),
'img_src': img_tag.attrib.get('src'), 'img_src': img_src,
'title': img_tag.attrib.get('alt'), 'thumbnail_src': extract_text(eval_xpath(result, thumbnail_src_xpath)),
'title': extract_text(eval_xpath(result, title_xpath)),
}
)
nextpage_url = extract_text(eval_xpath(dom, cursor_xpath))
if nextpage_url:
results.append(
{
'engine_data': nextpage_url.replace("http://", "https://"),
'key': 'nextpage',
} }
) )