2020-07-07 21:59:15 +02:00
|
|
|
# SPDX-License-Identifier: AGPL-3.0-or-later
|
|
|
|
"""Google (Images)
|
|
|
|
|
|
|
|
:website: https://images.google.com (redirected to subdomain www.)
|
|
|
|
:provide-api: yes (https://developers.google.com/custom-search/)
|
|
|
|
:using-api: not the offical, since it needs registration to another service
|
|
|
|
:results: HTML
|
|
|
|
:stable: no
|
|
|
|
:template: images.html
|
|
|
|
:parse: url, title, content, source, thumbnail_src, img_src
|
|
|
|
|
|
|
|
For detailed description of the *REST-full* API see: `Query Parameter
|
|
|
|
Definitions`_.
|
|
|
|
|
|
|
|
.. _admonition:: Content-Security-Policy (CSP)
|
2015-05-02 15:45:17 +02:00
|
|
|
|
2020-07-07 21:59:15 +02:00
|
|
|
This engine needs to allow images from the `data URLs`_ (prefixed with the
|
|
|
|
``data:` scheme).::
|
|
|
|
|
|
|
|
Header set Content-Security-Policy "img-src 'self' data: ;"
|
|
|
|
|
|
|
|
.. _Query Parameter Definitions:
|
|
|
|
https://developers.google.com/custom-search/docs/xml_results#WebSearch_Query_Parameter_Definitions
|
2015-05-02 15:45:17 +02:00
|
|
|
|
|
|
|
"""
|
2013-10-19 22:19:14 +02:00
|
|
|
|
2020-08-08 19:35:22 +02:00
|
|
|
import urllib
|
2015-12-09 01:20:46 +01:00
|
|
|
from lxml import html
|
2020-07-07 21:59:15 +02:00
|
|
|
from flask_babel import gettext
|
|
|
|
from searx import logger
|
|
|
|
from searx.url_utils import urlencode, urlparse
|
|
|
|
from searx.utils import eval_xpath
|
|
|
|
from searx.engines.xpath import extract_text
|
|
|
|
|
|
|
|
# pylint: disable=unused-import
|
|
|
|
from searx.engines.google import (
|
2020-07-08 00:46:03 +02:00
|
|
|
supported_languages_url,
|
|
|
|
_fetch_supported_languages,
|
2020-07-07 21:59:15 +02:00
|
|
|
)
|
|
|
|
# pylint: enable=unused-import
|
|
|
|
|
|
|
|
from searx.engines.google import (
|
2020-07-08 00:46:03 +02:00
|
|
|
get_lang_country,
|
|
|
|
google_domains,
|
|
|
|
time_range_dict,
|
2020-07-07 21:59:15 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
logger = logger.getChild('google images')
|
2016-12-11 16:39:12 +01:00
|
|
|
|
2014-09-01 15:10:05 +02:00
|
|
|
# engine dependent config
|
2020-07-07 21:59:15 +02:00
|
|
|
|
2013-10-19 22:19:31 +02:00
|
|
|
categories = ['images']
|
2020-07-07 21:59:15 +02:00
|
|
|
paging = False
|
|
|
|
language_support = True
|
|
|
|
use_locale_domain = True
|
2016-07-18 17:25:40 +02:00
|
|
|
time_range_support = True
|
2020-07-07 21:59:15 +02:00
|
|
|
safesearch = True
|
2013-10-19 22:19:14 +02:00
|
|
|
|
2020-07-07 21:59:15 +02:00
|
|
|
filter_mapping = {
|
2020-07-08 00:46:03 +02:00
|
|
|
0: 'images',
|
|
|
|
1: 'active',
|
|
|
|
2: 'active'
|
2020-07-07 21:59:15 +02:00
|
|
|
}
|
|
|
|
|
2020-07-08 00:46:03 +02:00
|
|
|
|
2020-07-07 21:59:15 +02:00
|
|
|
def scrap_out_thumbs(dom):
|
|
|
|
"""Scrap out thumbnail data from <script> tags.
|
|
|
|
"""
|
|
|
|
ret_val = dict()
|
|
|
|
for script in eval_xpath(dom, '//script[contains(., "_setImgSrc(")]'):
|
|
|
|
_script = script.text
|
|
|
|
# _setImgSrc('0','data:image\/jpeg;base64,\/9j\/4AAQSkZJR ....');
|
2020-07-08 00:46:03 +02:00
|
|
|
_thumb_no, _img_data = _script[len("_setImgSrc("):-2].split(",", 1)
|
|
|
|
_thumb_no = _thumb_no.replace("'", "")
|
|
|
|
_img_data = _img_data.replace("'", "")
|
2020-07-07 21:59:15 +02:00
|
|
|
_img_data = _img_data.replace(r"\/", r"/")
|
|
|
|
ret_val[_thumb_no] = _img_data.replace(r"\x3d", "=")
|
|
|
|
return ret_val
|
2016-07-19 10:14:11 +02:00
|
|
|
|
2020-07-08 00:46:03 +02:00
|
|
|
|
2020-08-08 19:35:22 +02:00
|
|
|
def scrap_img_by_id(script, data_id):
|
|
|
|
"""Get full image URL by data-id in parent element
|
|
|
|
"""
|
|
|
|
img_url = ''
|
|
|
|
_script = script.split('\n')
|
|
|
|
for i, line in enumerate(_script):
|
|
|
|
if 'gstatic.com/images' in line and data_id in line:
|
|
|
|
url_line = _script[i + 1]
|
|
|
|
img_url = url_line.split('"')[1]
|
|
|
|
img_url = urllib.parse.unquote(img_url.replace(r'\u00', r'%'))
|
|
|
|
return img_url
|
|
|
|
|
|
|
|
|
2013-10-19 22:19:14 +02:00
|
|
|
def request(query, params):
|
2020-07-07 21:59:15 +02:00
|
|
|
"""Google-Video search request"""
|
|
|
|
|
|
|
|
language, country, lang_country = get_lang_country(
|
|
|
|
# pylint: disable=undefined-variable
|
|
|
|
params, supported_languages, language_aliases
|
|
|
|
)
|
|
|
|
subdomain = 'www.' + google_domains.get(country.upper(), 'google.com')
|
|
|
|
|
2020-07-08 00:46:03 +02:00
|
|
|
query_url = 'https://' + subdomain + '/search' + "?" + urlencode({
|
|
|
|
'q': query,
|
2020-07-07 21:59:15 +02:00
|
|
|
'tbm': "isch",
|
2020-07-08 00:46:03 +02:00
|
|
|
'hl': lang_country,
|
2020-07-07 21:59:15 +02:00
|
|
|
'lr': "lang_" + language,
|
|
|
|
'ie': "utf8",
|
|
|
|
'oe': "utf8",
|
|
|
|
'num': 30,
|
|
|
|
})
|
2016-08-13 00:43:21 +02:00
|
|
|
|
2016-07-26 00:22:05 +02:00
|
|
|
if params['time_range'] in time_range_dict:
|
2020-07-07 21:59:15 +02:00
|
|
|
query_url += '&' + urlencode({'tbs': 'qdr:' + time_range_dict[params['time_range']]})
|
|
|
|
if params['safesearch']:
|
|
|
|
query_url += '&' + urlencode({'safe': filter_mapping[params['safesearch']]})
|
|
|
|
|
|
|
|
params['url'] = query_url
|
|
|
|
logger.debug("query_url --> %s", query_url)
|
|
|
|
|
|
|
|
params['headers']['Accept-Language'] = (
|
|
|
|
"%s,%s;q=0.8,%s;q=0.5" % (lang_country, language, language))
|
|
|
|
logger.debug(
|
|
|
|
"HTTP Accept-Language --> %s", params['headers']['Accept-Language'])
|
|
|
|
params['headers']['Accept'] = (
|
|
|
|
'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
|
2020-07-08 00:46:03 +02:00
|
|
|
)
|
|
|
|
# params['google_subdomain'] = subdomain
|
2013-10-19 22:19:14 +02:00
|
|
|
return params
|
|
|
|
|
2014-01-20 02:31:20 +01:00
|
|
|
|
2013-10-19 22:19:14 +02:00
|
|
|
def response(resp):
|
2020-07-07 21:59:15 +02:00
|
|
|
"""Get response from google's search request"""
|
2019-05-28 05:33:31 +02:00
|
|
|
results = []
|
|
|
|
|
2020-07-07 21:59:15 +02:00
|
|
|
# detect google sorry
|
|
|
|
resp_url = urlparse(resp.url)
|
|
|
|
if resp_url.netloc == 'sorry.google.com' or resp_url.path == '/sorry/IndexRedirect':
|
|
|
|
raise RuntimeWarning('sorry.google.com')
|
|
|
|
|
|
|
|
if resp_url.path.startswith('/sorry'):
|
|
|
|
raise RuntimeWarning(gettext('CAPTCHA required'))
|
|
|
|
|
|
|
|
# which subdomain ?
|
|
|
|
# subdomain = resp.search_params.get('google_subdomain')
|
|
|
|
|
|
|
|
# convert the text to dom
|
2019-04-12 23:12:56 +02:00
|
|
|
dom = html.fromstring(resp.text)
|
2020-07-07 21:59:15 +02:00
|
|
|
img_bas64_map = scrap_out_thumbs(dom)
|
2020-08-08 19:35:22 +02:00
|
|
|
img_src_script = eval_xpath(dom, '//script[contains(., "AF_initDataCallback({key: ")]')[1].text
|
2019-04-12 23:12:56 +02:00
|
|
|
|
2019-05-28 05:33:31 +02:00
|
|
|
# parse results
|
2020-07-07 21:59:15 +02:00
|
|
|
#
|
|
|
|
# root element::
|
|
|
|
# <div id="islmp" ..>
|
|
|
|
# result div per image::
|
|
|
|
# <div jsmodel="tTXmib"> / <div jsaction="..." data-id="..."
|
|
|
|
# The data-id matches to a item in a json-data structure in::
|
|
|
|
# <script nonce="I+vqelcy/01CKiBJi5Z1Ow">AF_initDataCallback({key: 'ds:1', ... data:function(){return [ ...
|
2020-08-08 19:35:22 +02:00
|
|
|
# In this structure the link to the origin PNG, JPG or whatever is given
|
2020-07-07 21:59:15 +02:00
|
|
|
# first link per image-div contains a <img> with the data-iid for bas64 encoded image data::
|
|
|
|
# <img class="rg_i Q4LuWd" data-iid="0"
|
|
|
|
# second link per image-div is the target link::
|
|
|
|
# <a class="VFACy kGQAp" href="https://en.wikipedia.org/wiki/The_Sacrament_of_the_Last_Supper">
|
|
|
|
# the second link also contains two div tags with the *description* and *publisher*::
|
|
|
|
# <div class="WGvvNb">The Sacrament of the Last Supper ...</div>
|
|
|
|
# <div class="fxgdke">en.wikipedia.org</div>
|
|
|
|
|
|
|
|
root = eval_xpath(dom, '//div[@id="islmp"]')
|
|
|
|
if not root:
|
|
|
|
logger.error("did not find root element id='islmp'")
|
|
|
|
return results
|
|
|
|
|
|
|
|
root = root[0]
|
|
|
|
for img_node in eval_xpath(root, './/img[contains(@class, "rg_i")]'):
|
2019-05-28 05:33:31 +02:00
|
|
|
|
|
|
|
try:
|
2020-07-07 21:59:15 +02:00
|
|
|
img_alt = eval_xpath(img_node, '@alt')[0]
|
|
|
|
|
|
|
|
img_base64_id = eval_xpath(img_node, '@data-iid')
|
|
|
|
if img_base64_id:
|
|
|
|
img_base64_id = img_base64_id[0]
|
|
|
|
thumbnail_src = img_bas64_map[img_base64_id]
|
|
|
|
else:
|
|
|
|
thumbnail_src = eval_xpath(img_node, '@src')
|
|
|
|
if not thumbnail_src:
|
|
|
|
thumbnail_src = eval_xpath(img_node, '@data-src')
|
|
|
|
if thumbnail_src:
|
|
|
|
thumbnail_src = thumbnail_src[0]
|
|
|
|
else:
|
|
|
|
thumbnail_src = ''
|
|
|
|
|
|
|
|
link_node = eval_xpath(img_node, '../../../a[2]')[0]
|
|
|
|
url = eval_xpath(link_node, '@href')[0]
|
|
|
|
|
|
|
|
pub_nodes = eval_xpath(link_node, './div/div')
|
|
|
|
pub_descr = img_alt
|
|
|
|
pub_source = ''
|
|
|
|
if pub_nodes:
|
|
|
|
pub_descr = extract_text(pub_nodes[0])
|
|
|
|
pub_source = extract_text(pub_nodes[1])
|
|
|
|
|
2020-08-08 19:35:22 +02:00
|
|
|
img_src_id = eval_xpath(img_node, '../../../@data-id')[0]
|
|
|
|
src_url = scrap_img_by_id(img_src_script, img_src_id)
|
|
|
|
if not src_url:
|
|
|
|
src_url = thumbnail_src
|
|
|
|
|
2020-07-07 21:59:15 +02:00
|
|
|
results.append({
|
|
|
|
'url': url,
|
|
|
|
'title': img_alt,
|
|
|
|
'content': pub_descr,
|
|
|
|
'source': pub_source,
|
2020-08-08 19:35:22 +02:00
|
|
|
'img_src': src_url,
|
2020-07-07 21:59:15 +02:00
|
|
|
# 'img_format': img_format,
|
|
|
|
'thumbnail_src': thumbnail_src,
|
|
|
|
'template': 'images.html'
|
|
|
|
})
|
|
|
|
except Exception as e: # pylint: disable=broad-except
|
|
|
|
logger.error(e, exc_info=True)
|
2020-07-08 00:46:03 +02:00
|
|
|
# from lxml import etree
|
|
|
|
# logger.debug(etree.tostring(img_node, pretty_print=True))
|
|
|
|
# import pdb
|
|
|
|
# pdb.set_trace()
|
2019-04-12 23:12:56 +02:00
|
|
|
continue
|
|
|
|
|
2013-10-19 22:19:14 +02:00
|
|
|
return results
|