# SPDX-License-Identifier: AGPL-3.0-or-later
# lint: pylint
"""This is the implementation of the google images engine.
.. admonition:: Content-Security-Policy (CSP)
This engine needs to allow images from the `data URLs`_ (prefixed with the
``data:`` scheme)::
Header set Content-Security-Policy "img-src 'self' data: ;"
.. _data URLs:
https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URIs
"""
import re
from urllib.parse import urlencode, unquote
from lxml import html
from searx.utils import (
eval_xpath,
eval_xpath_list,
eval_xpath_getindex,
extract_text,
)
from searx.engines.google import (
get_lang_info,
time_range_dict,
detect_google_sorry,
)
# pylint: disable=unused-import
from searx.engines.google import supported_languages_url, _fetch_supported_languages
# pylint: enable=unused-import
# about
about = {
"website": 'https://images.google.com',
"wikidata_id": 'Q521550',
"official_api_documentation": 'https://developers.google.com/custom-search',
"use_official_api": False,
"require_api_key": False,
"results": 'HTML',
}
# engine dependent config
categories = ['images', 'web']
paging = False
use_locale_domain = True
time_range_support = True
safesearch = True
send_accept_language_header = True
filter_mapping = {0: 'images', 1: 'active', 2: 'active'}
def scrap_out_thumbs(dom):
"""Scrap out thumbnail data from
#
#
# The second script contains the URLs of the images.
# The AF_initDataCallback(..) is called with very large dictionary, that
# looks like JSON but it is not JSON since it contains JS variables and
# constants like 'null' (we can't use a JSON parser for).
#
# The alternative is to parse the entire