2019-08-02 13:37:13 +02:00
|
|
|
# -*- coding: utf-8 -*-
|
2020-08-06 17:42:46 +02:00
|
|
|
import sys
|
2015-01-11 13:26:40 +01:00
|
|
|
import re
|
2020-10-05 12:50:08 +02:00
|
|
|
import importlib
|
2015-01-11 13:26:40 +01:00
|
|
|
|
2017-12-01 20:45:24 +01:00
|
|
|
from numbers import Number
|
2016-11-19 17:51:19 +01:00
|
|
|
from os.path import splitext, join
|
2014-04-25 01:46:40 +02:00
|
|
|
from random import choice
|
2020-08-06 17:42:46 +02:00
|
|
|
from html.parser import HTMLParser
|
2020-11-02 11:19:53 +01:00
|
|
|
from urllib.parse import urljoin, urlparse
|
2020-10-02 18:13:56 +02:00
|
|
|
|
|
|
|
from lxml import html
|
2020-11-26 15:12:11 +01:00
|
|
|
from lxml.etree import ElementBase, XPath, XPathError, XPathSyntaxError, _ElementStringResult, _ElementUnicodeResult
|
2020-08-06 17:42:46 +02:00
|
|
|
from babel.core import get_global
|
2014-04-25 01:46:40 +02:00
|
|
|
|
2020-10-02 18:13:56 +02:00
|
|
|
|
2018-03-01 05:30:48 +01:00
|
|
|
from searx import settings
|
2020-10-05 13:50:33 +02:00
|
|
|
from searx.data import USER_AGENTS
|
2021-07-27 18:37:46 +02:00
|
|
|
from searx.version import VERSION_TAG
|
2016-09-06 16:43:48 +02:00
|
|
|
from searx.languages import language_codes
|
2020-11-26 15:12:11 +01:00
|
|
|
from searx.exceptions import SearxXPathSyntaxException, SearxEngineXPathException
|
2015-01-11 13:26:40 +01:00
|
|
|
from searx import logger
|
2014-11-18 11:37:42 +01:00
|
|
|
|
2015-01-11 13:26:40 +01:00
|
|
|
|
|
|
|
logger = logger.getChild('utils')
|
2014-01-10 23:38:08 +01:00
|
|
|
|
2015-01-01 14:13:56 +01:00
|
|
|
blocked_tags = ('script',
|
|
|
|
'style')
|
|
|
|
|
2019-08-02 13:37:13 +02:00
|
|
|
ecma_unescape4_re = re.compile(r'%u([0-9a-fA-F]{4})', re.UNICODE)
|
|
|
|
ecma_unescape2_re = re.compile(r'%([0-9a-fA-F]{2})', re.UNICODE)
|
|
|
|
|
2019-11-15 09:31:37 +01:00
|
|
|
xpath_cache = dict()
|
2019-07-18 21:32:17 +02:00
|
|
|
lang_to_lc_cache = dict()
|
|
|
|
|
2014-01-19 22:59:01 +01:00
|
|
|
|
2020-11-26 15:12:11 +01:00
|
|
|
class NotSetClass:
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
|
|
NOTSET = NotSetClass()
|
|
|
|
|
|
|
|
|
2014-10-17 12:34:51 +02:00
|
|
|
def searx_useragent():
|
2020-10-02 18:17:01 +02:00
|
|
|
"""Return the searx User Agent"""
|
2014-12-29 21:31:04 +01:00
|
|
|
return 'searx/{searx_version} {suffix}'.format(
|
2021-07-27 18:37:46 +02:00
|
|
|
searx_version=VERSION_TAG,
|
2021-06-02 17:28:17 +02:00
|
|
|
suffix=settings['outgoing']['useragent_suffix']).strip()
|
2014-10-19 12:41:04 +02:00
|
|
|
|
|
|
|
|
2018-08-05 10:55:42 +02:00
|
|
|
def gen_useragent(os=None):
|
2020-10-02 18:17:01 +02:00
|
|
|
"""Return a random browser User Agent
|
|
|
|
|
|
|
|
See searx/data/useragents.json
|
|
|
|
"""
|
2020-10-05 13:50:33 +02:00
|
|
|
return str(USER_AGENTS['ua'].format(os=os or choice(USER_AGENTS['os']), version=choice(USER_AGENTS['versions'])))
|
2017-05-28 15:46:45 +02:00
|
|
|
|
|
|
|
|
2020-09-11 10:23:56 +02:00
|
|
|
class HTMLTextExtractorException(Exception):
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
2020-11-16 09:43:23 +01:00
|
|
|
class HTMLTextExtractor(HTMLParser): # pylint: disable=W0223 # (see https://bugs.python.org/issue31844)
|
2016-07-10 16:44:27 +02:00
|
|
|
|
2013-11-08 23:44:26 +01:00
|
|
|
def __init__(self):
|
|
|
|
HTMLParser.__init__(self)
|
2014-01-19 22:59:01 +01:00
|
|
|
self.result = []
|
2015-01-01 14:13:56 +01:00
|
|
|
self.tags = []
|
|
|
|
|
|
|
|
def handle_starttag(self, tag, attrs):
|
|
|
|
self.tags.append(tag)
|
|
|
|
|
|
|
|
def handle_endtag(self, tag):
|
2015-01-22 17:43:45 +01:00
|
|
|
if not self.tags:
|
|
|
|
return
|
|
|
|
|
2015-01-01 14:13:56 +01:00
|
|
|
if tag != self.tags[-1]:
|
2020-09-11 10:23:56 +02:00
|
|
|
raise HTMLTextExtractorException()
|
2015-01-22 17:43:45 +01:00
|
|
|
|
2015-01-01 14:13:56 +01:00
|
|
|
self.tags.pop()
|
|
|
|
|
|
|
|
def is_valid_tag(self):
|
|
|
|
return not self.tags or self.tags[-1] not in blocked_tags
|
2013-11-08 23:44:26 +01:00
|
|
|
|
2020-11-16 09:43:23 +01:00
|
|
|
def handle_data(self, data):
|
2015-01-01 14:13:56 +01:00
|
|
|
if not self.is_valid_tag():
|
|
|
|
return
|
2020-11-16 09:43:23 +01:00
|
|
|
self.result.append(data)
|
2013-11-08 23:44:26 +01:00
|
|
|
|
2020-11-16 09:43:23 +01:00
|
|
|
def handle_charref(self, name):
|
2015-01-01 14:13:56 +01:00
|
|
|
if not self.is_valid_tag():
|
|
|
|
return
|
2020-11-16 09:43:23 +01:00
|
|
|
if name[0] in ('x', 'X'):
|
|
|
|
codepoint = int(name[1:], 16)
|
2014-01-20 02:31:20 +01:00
|
|
|
else:
|
2020-11-16 09:43:23 +01:00
|
|
|
codepoint = int(name)
|
2020-08-06 17:42:46 +02:00
|
|
|
self.result.append(chr(codepoint))
|
2013-11-08 23:44:26 +01:00
|
|
|
|
|
|
|
def handle_entityref(self, name):
|
2015-01-01 14:13:56 +01:00
|
|
|
if not self.is_valid_tag():
|
|
|
|
return
|
2014-10-19 12:41:04 +02:00
|
|
|
# codepoint = htmlentitydefs.name2codepoint[name]
|
2020-08-06 17:42:46 +02:00
|
|
|
# self.result.append(chr(codepoint))
|
2013-11-18 16:47:20 +01:00
|
|
|
self.result.append(name)
|
2013-11-08 23:44:26 +01:00
|
|
|
|
|
|
|
def get_text(self):
|
2020-08-06 17:42:46 +02:00
|
|
|
return ''.join(self.result).strip()
|
2013-11-08 23:44:26 +01:00
|
|
|
|
2014-01-19 22:59:01 +01:00
|
|
|
|
2020-10-02 18:17:01 +02:00
|
|
|
def html_to_text(html_str):
|
|
|
|
"""Extract text from a HTML string
|
|
|
|
|
|
|
|
Args:
|
|
|
|
* html_str (str): string HTML
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
* str: extracted text
|
|
|
|
|
|
|
|
Examples:
|
|
|
|
>>> html_to_text('Example <span id="42">#2</span>')
|
|
|
|
'Example #2'
|
|
|
|
|
|
|
|
>>> html_to_text('<style>.span { color: red; }</style><span>Example</span>')
|
|
|
|
'Example'
|
|
|
|
"""
|
|
|
|
html_str = html_str.replace('\n', ' ')
|
|
|
|
html_str = ' '.join(html_str.split())
|
2013-11-08 23:44:26 +01:00
|
|
|
s = HTMLTextExtractor()
|
2020-09-11 10:23:56 +02:00
|
|
|
try:
|
2020-10-02 18:17:01 +02:00
|
|
|
s.feed(html_str)
|
2020-09-11 10:23:56 +02:00
|
|
|
except HTMLTextExtractorException:
|
2020-10-02 18:17:01 +02:00
|
|
|
logger.debug("HTMLTextExtractor: invalid HTML\n%s", html_str)
|
2013-11-08 23:44:26 +01:00
|
|
|
return s.get_text()
|
2013-11-15 18:55:18 +01:00
|
|
|
|
|
|
|
|
2020-11-26 15:12:11 +01:00
|
|
|
def extract_text(xpath_results, allow_none=False):
|
2020-10-02 18:17:01 +02:00
|
|
|
"""Extract text from a lxml result
|
|
|
|
|
|
|
|
* if xpath_results is list, extract the text from each result and concat the list
|
|
|
|
* if xpath_results is a xml element, extract all the text node from it
|
|
|
|
( text_content() method from lxml )
|
|
|
|
* if xpath_results is a string element, then it's already done
|
|
|
|
"""
|
2020-11-26 15:12:11 +01:00
|
|
|
if isinstance(xpath_results, list):
|
2020-10-02 18:13:56 +02:00
|
|
|
# it's list of result : concat everything using recursive call
|
|
|
|
result = ''
|
|
|
|
for e in xpath_results:
|
|
|
|
result = result + extract_text(e)
|
|
|
|
return result.strip()
|
2020-11-26 15:12:11 +01:00
|
|
|
elif isinstance(xpath_results, ElementBase):
|
2020-10-02 18:13:56 +02:00
|
|
|
# it's a element
|
|
|
|
text = html.tostring(
|
|
|
|
xpath_results, encoding='unicode', method='text', with_tail=False
|
|
|
|
)
|
|
|
|
text = text.strip().replace('\n', ' ')
|
|
|
|
return ' '.join(text.split())
|
2020-11-26 15:12:11 +01:00
|
|
|
elif isinstance(xpath_results, (_ElementStringResult, _ElementUnicodeResult, str, Number, bool)):
|
|
|
|
return str(xpath_results)
|
|
|
|
elif xpath_results is None and allow_none:
|
|
|
|
return None
|
|
|
|
elif xpath_results is None and not allow_none:
|
|
|
|
raise ValueError('extract_text(None, allow_none=False)')
|
|
|
|
else:
|
|
|
|
raise ValueError('unsupported type')
|
2020-10-02 18:13:56 +02:00
|
|
|
|
|
|
|
|
2020-10-03 10:02:50 +02:00
|
|
|
def normalize_url(url, base_url):
|
|
|
|
"""Normalize URL: add protocol, join URL with base_url, add trailing slash if there is no path
|
|
|
|
|
|
|
|
Args:
|
|
|
|
* url (str): Relative URL
|
|
|
|
* base_url (str): Base URL, it must be an absolute URL.
|
|
|
|
|
|
|
|
Example:
|
|
|
|
>>> normalize_url('https://example.com', 'http://example.com/')
|
|
|
|
'https://example.com/'
|
|
|
|
>>> normalize_url('//example.com', 'http://example.com/')
|
|
|
|
'http://example.com/'
|
|
|
|
>>> normalize_url('//example.com', 'https://example.com/')
|
|
|
|
'https://example.com/'
|
|
|
|
>>> normalize_url('/path?a=1', 'https://example.com')
|
|
|
|
'https://example.com/path?a=1'
|
|
|
|
>>> normalize_url('', 'https://example.com')
|
|
|
|
'https://example.com/'
|
|
|
|
>>> normalize_url('/test', '/path')
|
2020-11-26 15:12:11 +01:00
|
|
|
raise ValueError
|
2020-10-03 10:02:50 +02:00
|
|
|
|
|
|
|
Raises:
|
|
|
|
* lxml.etree.ParserError
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
* str: normalized URL
|
|
|
|
"""
|
|
|
|
if url.startswith('//'):
|
|
|
|
# add http or https to this kind of url //example.com/
|
|
|
|
parsed_search_url = urlparse(base_url)
|
|
|
|
url = '{0}:{1}'.format(parsed_search_url.scheme or 'http', url)
|
|
|
|
elif url.startswith('/'):
|
|
|
|
# fix relative url to the search engine
|
|
|
|
url = urljoin(base_url, url)
|
|
|
|
|
|
|
|
# fix relative urls that fall through the crack
|
|
|
|
if '://' not in url:
|
|
|
|
url = urljoin(base_url, url)
|
|
|
|
|
2020-10-02 18:17:01 +02:00
|
|
|
parsed_url = urlparse(url)
|
|
|
|
|
|
|
|
# add a / at this end of the url if there is no path
|
|
|
|
if not parsed_url.netloc:
|
2020-11-26 15:12:11 +01:00
|
|
|
raise ValueError('Cannot parse url')
|
2020-10-02 18:17:01 +02:00
|
|
|
if not parsed_url.path:
|
|
|
|
url += '/'
|
|
|
|
|
|
|
|
return url
|
|
|
|
|
|
|
|
|
2020-10-03 10:02:50 +02:00
|
|
|
def extract_url(xpath_results, base_url):
|
2020-10-02 18:17:01 +02:00
|
|
|
"""Extract and normalize URL from lxml Element
|
|
|
|
|
|
|
|
Args:
|
|
|
|
* xpath_results (Union[List[html.HtmlElement], html.HtmlElement]): lxml Element(s)
|
2020-10-03 10:02:50 +02:00
|
|
|
* base_url (str): Base URL
|
2020-10-02 18:17:01 +02:00
|
|
|
|
|
|
|
Example:
|
|
|
|
>>> def f(s, search_url):
|
|
|
|
>>> return searx.utils.extract_url(html.fromstring(s), search_url)
|
|
|
|
>>> f('<span id="42">https://example.com</span>', 'http://example.com/')
|
|
|
|
'https://example.com/'
|
|
|
|
>>> f('https://example.com', 'http://example.com/')
|
|
|
|
'https://example.com/'
|
|
|
|
>>> f('//example.com', 'http://example.com/')
|
|
|
|
'http://example.com/'
|
|
|
|
>>> f('//example.com', 'https://example.com/')
|
|
|
|
'https://example.com/'
|
|
|
|
>>> f('/path?a=1', 'https://example.com')
|
|
|
|
'https://example.com/path?a=1'
|
|
|
|
>>> f('', 'https://example.com')
|
|
|
|
raise lxml.etree.ParserError
|
|
|
|
>>> searx.utils.extract_url([], 'https://example.com')
|
2020-11-26 15:12:11 +01:00
|
|
|
raise ValueError
|
2020-10-02 18:17:01 +02:00
|
|
|
|
|
|
|
Raises:
|
2020-11-26 15:12:11 +01:00
|
|
|
* ValueError
|
2020-10-02 18:17:01 +02:00
|
|
|
* lxml.etree.ParserError
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
* str: normalized URL
|
|
|
|
"""
|
2020-10-02 18:13:56 +02:00
|
|
|
if xpath_results == []:
|
2020-11-26 15:12:11 +01:00
|
|
|
raise ValueError('Empty url resultset')
|
2020-10-02 18:13:56 +02:00
|
|
|
|
2020-10-03 10:02:50 +02:00
|
|
|
url = extract_text(xpath_results)
|
|
|
|
return normalize_url(url, base_url)
|
2020-10-02 18:13:56 +02:00
|
|
|
|
|
|
|
|
2015-01-17 21:54:40 +01:00
|
|
|
def dict_subset(d, properties):
|
2020-10-02 18:17:01 +02:00
|
|
|
"""Extract a subset of a dict
|
|
|
|
|
|
|
|
Examples:
|
|
|
|
>>> dict_subset({'A': 'a', 'B': 'b', 'C': 'c'}, ['A', 'C'])
|
|
|
|
{'A': 'a', 'C': 'c'}
|
|
|
|
>>> >> dict_subset({'A': 'a', 'B': 'b', 'C': 'c'}, ['A', 'D'])
|
|
|
|
{'A': 'a'}
|
|
|
|
"""
|
2015-01-17 21:54:40 +01:00
|
|
|
result = {}
|
|
|
|
for k in properties:
|
|
|
|
if k in d:
|
|
|
|
result[k] = d[k]
|
|
|
|
return result
|
2015-01-29 19:44:52 +01:00
|
|
|
|
|
|
|
|
2016-08-13 14:55:47 +02:00
|
|
|
def get_torrent_size(filesize, filesize_multiplier):
|
2020-10-02 18:17:01 +02:00
|
|
|
"""
|
|
|
|
|
|
|
|
Args:
|
|
|
|
* filesize (str): size
|
|
|
|
* filesize_multiplier (str): TB, GB, .... TiB, GiB...
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
* int: number of bytes
|
|
|
|
|
|
|
|
Example:
|
|
|
|
>>> get_torrent_size('5', 'GB')
|
|
|
|
5368709120
|
|
|
|
>>> get_torrent_size('3.14', 'MiB')
|
|
|
|
3140000
|
|
|
|
"""
|
2016-08-13 14:55:47 +02:00
|
|
|
try:
|
|
|
|
filesize = float(filesize)
|
|
|
|
|
|
|
|
if filesize_multiplier == 'TB':
|
|
|
|
filesize = int(filesize * 1024 * 1024 * 1024 * 1024)
|
|
|
|
elif filesize_multiplier == 'GB':
|
|
|
|
filesize = int(filesize * 1024 * 1024 * 1024)
|
|
|
|
elif filesize_multiplier == 'MB':
|
|
|
|
filesize = int(filesize * 1024 * 1024)
|
|
|
|
elif filesize_multiplier == 'KB':
|
|
|
|
filesize = int(filesize * 1024)
|
2016-10-11 19:31:42 +02:00
|
|
|
elif filesize_multiplier == 'TiB':
|
|
|
|
filesize = int(filesize * 1000 * 1000 * 1000 * 1000)
|
|
|
|
elif filesize_multiplier == 'GiB':
|
|
|
|
filesize = int(filesize * 1000 * 1000 * 1000)
|
|
|
|
elif filesize_multiplier == 'MiB':
|
|
|
|
filesize = int(filesize * 1000 * 1000)
|
|
|
|
elif filesize_multiplier == 'KiB':
|
|
|
|
filesize = int(filesize * 1000)
|
2020-11-26 15:12:11 +01:00
|
|
|
except ValueError:
|
2016-08-13 14:55:47 +02:00
|
|
|
filesize = None
|
|
|
|
|
|
|
|
return filesize
|
2016-09-06 16:43:48 +02:00
|
|
|
|
|
|
|
|
2016-10-11 19:31:42 +02:00
|
|
|
def convert_str_to_int(number_str):
|
2020-10-02 18:17:01 +02:00
|
|
|
"""Convert number_str to int or 0 if number_str is not a number."""
|
2016-10-11 19:31:42 +02:00
|
|
|
if number_str.isdigit():
|
|
|
|
return int(number_str)
|
|
|
|
else:
|
|
|
|
return 0
|
|
|
|
|
|
|
|
|
2017-09-04 20:05:04 +02:00
|
|
|
def int_or_zero(num):
|
2020-10-02 18:17:01 +02:00
|
|
|
"""Convert num to int or 0. num can be either a str or a list.
|
|
|
|
If num is a list, the first element is converted to int (or return 0 if the list is empty).
|
|
|
|
If num is a str, see convert_str_to_int
|
|
|
|
"""
|
2017-09-04 20:05:04 +02:00
|
|
|
if isinstance(num, list):
|
|
|
|
if len(num) < 1:
|
|
|
|
return 0
|
|
|
|
num = num[0]
|
|
|
|
return convert_str_to_int(num)
|
|
|
|
|
|
|
|
|
2016-09-06 16:43:48 +02:00
|
|
|
def is_valid_lang(lang):
|
2020-10-02 18:17:01 +02:00
|
|
|
"""Return language code and name if lang describe a language.
|
|
|
|
|
|
|
|
Examples:
|
|
|
|
>>> is_valid_lang('zz')
|
|
|
|
False
|
|
|
|
>>> is_valid_lang('uk')
|
|
|
|
(True, 'uk', 'ukrainian')
|
|
|
|
>>> is_valid_lang(b'uk')
|
|
|
|
(True, 'uk', 'ukrainian')
|
|
|
|
>>> is_valid_lang('en')
|
|
|
|
(True, 'en', 'english')
|
|
|
|
>>> searx.utils.is_valid_lang('Español')
|
|
|
|
(True, 'es', 'spanish')
|
|
|
|
>>> searx.utils.is_valid_lang('Spanish')
|
|
|
|
(True, 'es', 'spanish')
|
|
|
|
"""
|
2020-09-08 16:08:37 +02:00
|
|
|
if isinstance(lang, bytes):
|
|
|
|
lang = lang.decode()
|
2016-09-06 16:43:48 +02:00
|
|
|
is_abbr = (len(lang) == 2)
|
2020-09-08 16:08:37 +02:00
|
|
|
lang = lang.lower()
|
2016-09-06 16:43:48 +02:00
|
|
|
if is_abbr:
|
|
|
|
for l in language_codes:
|
2019-10-16 14:52:57 +02:00
|
|
|
if l[0][:2] == lang:
|
2017-06-15 10:51:09 +02:00
|
|
|
return (True, l[0][:2], l[3].lower())
|
2016-09-06 16:43:48 +02:00
|
|
|
return False
|
|
|
|
else:
|
|
|
|
for l in language_codes:
|
2019-10-16 14:52:57 +02:00
|
|
|
if l[1].lower() == lang or l[3].lower() == lang:
|
2017-06-15 10:51:09 +02:00
|
|
|
return (True, l[0][:2], l[3].lower())
|
2016-09-06 16:43:48 +02:00
|
|
|
return False
|
2016-11-19 17:51:19 +01:00
|
|
|
|
|
|
|
|
2019-07-18 21:32:17 +02:00
|
|
|
def _get_lang_to_lc_dict(lang_list):
|
|
|
|
key = str(lang_list)
|
|
|
|
value = lang_to_lc_cache.get(key, None)
|
|
|
|
if value is None:
|
|
|
|
value = dict()
|
|
|
|
for lc in lang_list:
|
|
|
|
value.setdefault(lc.split('-')[0], lc)
|
|
|
|
lang_to_lc_cache[key] = value
|
|
|
|
return value
|
|
|
|
|
|
|
|
|
2020-11-16 09:43:23 +01:00
|
|
|
def _match_language(lang_code, lang_list=[], custom_aliases={}): # pylint: disable=W0102
|
2020-10-02 18:17:01 +02:00
|
|
|
"""auxiliary function to match lang_code in lang_list"""
|
2018-03-01 05:30:48 +01:00
|
|
|
# replace language code with a custom alias if necessary
|
|
|
|
if lang_code in custom_aliases:
|
|
|
|
lang_code = custom_aliases[lang_code]
|
|
|
|
|
|
|
|
if lang_code in lang_list:
|
|
|
|
return lang_code
|
|
|
|
|
|
|
|
# try to get the most likely country for this language
|
|
|
|
subtags = get_global('likely_subtags').get(lang_code)
|
|
|
|
if subtags:
|
|
|
|
subtag_parts = subtags.split('_')
|
|
|
|
new_code = subtag_parts[0] + '-' + subtag_parts[-1]
|
|
|
|
if new_code in custom_aliases:
|
|
|
|
new_code = custom_aliases[new_code]
|
|
|
|
if new_code in lang_list:
|
|
|
|
return new_code
|
|
|
|
|
|
|
|
# try to get the any supported country for this language
|
2019-07-18 21:32:17 +02:00
|
|
|
return _get_lang_to_lc_dict(lang_list).get(lang_code, None)
|
2018-03-01 05:30:48 +01:00
|
|
|
|
|
|
|
|
2020-11-16 09:43:23 +01:00
|
|
|
def match_language(locale_code, lang_list=[], custom_aliases={}, fallback='en-US'): # pylint: disable=W0102
|
2020-10-02 18:17:01 +02:00
|
|
|
"""get the language code from lang_list that best matches locale_code"""
|
2018-03-01 05:30:48 +01:00
|
|
|
# try to get language from given locale_code
|
|
|
|
language = _match_language(locale_code, lang_list, custom_aliases)
|
|
|
|
if language:
|
|
|
|
return language
|
|
|
|
|
|
|
|
locale_parts = locale_code.split('-')
|
|
|
|
lang_code = locale_parts[0]
|
|
|
|
|
|
|
|
# try to get language using an equivalent country code
|
|
|
|
if len(locale_parts) > 1:
|
|
|
|
country_alias = get_global('territory_aliases').get(locale_parts[-1])
|
|
|
|
if country_alias:
|
|
|
|
language = _match_language(lang_code + '-' + country_alias[0], lang_list, custom_aliases)
|
|
|
|
if language:
|
|
|
|
return language
|
|
|
|
|
|
|
|
# try to get language using an equivalent language code
|
|
|
|
alias = get_global('language_aliases').get(lang_code)
|
|
|
|
if alias:
|
|
|
|
language = _match_language(alias, lang_list, custom_aliases)
|
|
|
|
if language:
|
|
|
|
return language
|
|
|
|
|
|
|
|
if lang_code != locale_code:
|
|
|
|
# try to get language from given language without giving the country
|
|
|
|
language = _match_language(lang_code, lang_list, custom_aliases)
|
|
|
|
|
|
|
|
return language or fallback
|
|
|
|
|
|
|
|
|
2016-11-19 17:51:19 +01:00
|
|
|
def load_module(filename, module_dir):
|
|
|
|
modname = splitext(filename)[0]
|
|
|
|
if modname in sys.modules:
|
|
|
|
del sys.modules[modname]
|
|
|
|
filepath = join(module_dir, filename)
|
2020-10-05 12:50:08 +02:00
|
|
|
# and https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly
|
|
|
|
spec = importlib.util.spec_from_file_location(modname, filepath)
|
|
|
|
module = importlib.util.module_from_spec(spec)
|
|
|
|
sys.modules[modname] = module
|
|
|
|
spec.loader.exec_module(module)
|
2016-11-19 17:51:19 +01:00
|
|
|
return module
|
2017-07-20 15:44:02 +02:00
|
|
|
|
|
|
|
|
2017-12-01 20:45:24 +01:00
|
|
|
def to_string(obj):
|
2020-10-02 18:17:01 +02:00
|
|
|
"""Convert obj to its string representation."""
|
2020-08-06 17:42:46 +02:00
|
|
|
if isinstance(obj, str):
|
2017-12-01 20:45:24 +01:00
|
|
|
return obj
|
|
|
|
if isinstance(obj, Number):
|
2020-08-06 17:42:46 +02:00
|
|
|
return str(obj)
|
2017-12-01 20:45:24 +01:00
|
|
|
if hasattr(obj, '__str__'):
|
|
|
|
return obj.__str__()
|
|
|
|
if hasattr(obj, '__repr__'):
|
|
|
|
return obj.__repr__()
|
2019-08-02 13:37:13 +02:00
|
|
|
|
|
|
|
|
|
|
|
def ecma_unescape(s):
|
2020-10-02 18:17:01 +02:00
|
|
|
"""Python implementation of the unescape javascript function
|
2019-08-02 13:37:13 +02:00
|
|
|
|
|
|
|
https://www.ecma-international.org/ecma-262/6.0/#sec-unescape-string
|
|
|
|
https://developer.mozilla.org/fr/docs/Web/JavaScript/Reference/Objets_globaux/unescape
|
2020-10-02 18:17:01 +02:00
|
|
|
|
|
|
|
Examples:
|
|
|
|
>>> ecma_unescape('%u5409')
|
|
|
|
'吉'
|
|
|
|
>>> ecma_unescape('%20')
|
|
|
|
' '
|
|
|
|
>>> ecma_unescape('%F3')
|
|
|
|
'ó'
|
2019-08-02 13:37:13 +02:00
|
|
|
"""
|
|
|
|
# "%u5409" becomes "吉"
|
2020-08-06 17:42:46 +02:00
|
|
|
s = ecma_unescape4_re.sub(lambda e: chr(int(e.group(1), 16)), s)
|
2019-08-02 13:37:13 +02:00
|
|
|
# "%20" becomes " ", "%F3" becomes "ó"
|
2020-08-06 17:42:46 +02:00
|
|
|
s = ecma_unescape2_re.sub(lambda e: chr(int(e.group(1), 16)), s)
|
2019-08-02 13:37:13 +02:00
|
|
|
return s
|
2019-09-23 17:14:32 +02:00
|
|
|
|
|
|
|
|
2020-10-26 19:25:28 +01:00
|
|
|
def get_string_replaces_function(replaces):
|
|
|
|
rep = {re.escape(k): v for k, v in replaces.items()}
|
|
|
|
pattern = re.compile("|".join(rep.keys()))
|
|
|
|
|
|
|
|
def f(text):
|
|
|
|
return pattern.sub(lambda m: rep[re.escape(m.group(0))], text)
|
|
|
|
|
|
|
|
return f
|
|
|
|
|
|
|
|
|
2019-09-23 17:14:32 +02:00
|
|
|
def get_engine_from_settings(name):
|
|
|
|
"""Return engine configuration from settings.yml of a given engine name"""
|
|
|
|
|
|
|
|
if 'engines' not in settings:
|
|
|
|
return {}
|
|
|
|
|
2019-09-30 14:27:13 +02:00
|
|
|
for engine in settings['engines']:
|
2019-09-23 17:14:32 +02:00
|
|
|
if 'name' not in engine:
|
|
|
|
continue
|
|
|
|
if name == engine['name']:
|
|
|
|
return engine
|
|
|
|
|
|
|
|
return {}
|
2019-11-15 09:31:37 +01:00
|
|
|
|
|
|
|
|
2020-11-26 15:12:11 +01:00
|
|
|
def get_xpath(xpath_spec):
|
2020-10-02 18:17:01 +02:00
|
|
|
"""Return cached compiled XPath
|
|
|
|
|
|
|
|
There is no thread lock.
|
|
|
|
Worst case scenario, xpath_str is compiled more than one time.
|
2020-11-26 15:12:11 +01:00
|
|
|
|
|
|
|
Args:
|
|
|
|
* xpath_spec (str|lxml.etree.XPath): XPath as a str or lxml.etree.XPath
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
* result (bool, float, list, str): Results.
|
|
|
|
|
|
|
|
Raises:
|
|
|
|
* TypeError: Raise when xpath_spec is neither a str nor a lxml.etree.XPath
|
|
|
|
* SearxXPathSyntaxException: Raise when there is a syntax error in the XPath
|
|
|
|
"""
|
|
|
|
if isinstance(xpath_spec, str):
|
|
|
|
result = xpath_cache.get(xpath_spec, None)
|
|
|
|
if result is None:
|
|
|
|
try:
|
|
|
|
result = XPath(xpath_spec)
|
|
|
|
except XPathSyntaxError as e:
|
2020-12-17 09:57:57 +01:00
|
|
|
raise SearxXPathSyntaxException(xpath_spec, str(e.msg)) from e
|
2020-11-26 15:12:11 +01:00
|
|
|
xpath_cache[xpath_spec] = result
|
|
|
|
return result
|
|
|
|
|
|
|
|
if isinstance(xpath_spec, XPath):
|
|
|
|
return xpath_spec
|
|
|
|
|
|
|
|
raise TypeError('xpath_spec must be either a str or a lxml.etree.XPath')
|
|
|
|
|
|
|
|
|
|
|
|
def eval_xpath(element, xpath_spec):
|
|
|
|
"""Equivalent of element.xpath(xpath_str) but compile xpath_str once for all.
|
|
|
|
See https://lxml.de/xpathxslt.html#xpath-return-values
|
|
|
|
|
|
|
|
Args:
|
|
|
|
* element (ElementBase): [description]
|
|
|
|
* xpath_spec (str|lxml.etree.XPath): XPath as a str or lxml.etree.XPath
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
* result (bool, float, list, str): Results.
|
|
|
|
|
|
|
|
Raises:
|
|
|
|
* TypeError: Raise when xpath_spec is neither a str nor a lxml.etree.XPath
|
|
|
|
* SearxXPathSyntaxException: Raise when there is a syntax error in the XPath
|
|
|
|
* SearxEngineXPathException: Raise when the XPath can't be evaluated.
|
2020-10-02 18:17:01 +02:00
|
|
|
"""
|
2020-11-26 15:12:11 +01:00
|
|
|
xpath = get_xpath(xpath_spec)
|
|
|
|
try:
|
|
|
|
return xpath(element)
|
|
|
|
except XPathError as e:
|
|
|
|
arg = ' '.join([str(i) for i in e.args])
|
2020-12-17 09:57:57 +01:00
|
|
|
raise SearxEngineXPathException(xpath_spec, arg) from e
|
2020-11-26 15:12:11 +01:00
|
|
|
|
|
|
|
|
|
|
|
def eval_xpath_list(element, xpath_spec, min_len=None):
|
|
|
|
"""Same as eval_xpath, check if the result is a list
|
|
|
|
|
|
|
|
Args:
|
|
|
|
* element (ElementBase): [description]
|
|
|
|
* xpath_spec (str|lxml.etree.XPath): XPath as a str or lxml.etree.XPath
|
|
|
|
* min_len (int, optional): [description]. Defaults to None.
|
|
|
|
|
|
|
|
Raises:
|
|
|
|
* TypeError: Raise when xpath_spec is neither a str nor a lxml.etree.XPath
|
|
|
|
* SearxXPathSyntaxException: Raise when there is a syntax error in the XPath
|
|
|
|
* SearxEngineXPathException: raise if the result is not a list
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
* result (bool, float, list, str): Results.
|
|
|
|
"""
|
|
|
|
result = eval_xpath(element, xpath_spec)
|
|
|
|
if not isinstance(result, list):
|
|
|
|
raise SearxEngineXPathException(xpath_spec, 'the result is not a list')
|
|
|
|
if min_len is not None and min_len > len(result):
|
|
|
|
raise SearxEngineXPathException(xpath_spec, 'len(xpath_str) < ' + str(min_len))
|
2019-11-15 09:31:37 +01:00
|
|
|
return result
|
|
|
|
|
|
|
|
|
2020-11-26 15:12:11 +01:00
|
|
|
def eval_xpath_getindex(elements, xpath_spec, index, default=NOTSET):
|
|
|
|
"""Call eval_xpath_list then get one element using the index parameter.
|
|
|
|
If the index does not exist, either aise an exception is default is not set,
|
|
|
|
other return the default value (can be None).
|
|
|
|
|
|
|
|
Args:
|
|
|
|
* elements (ElementBase): lxml element to apply the xpath.
|
|
|
|
* xpath_spec (str|lxml.etree.XPath): XPath as a str or lxml.etree.XPath.
|
|
|
|
* index (int): index to get
|
|
|
|
* default (Object, optional): Defaults if index doesn't exist.
|
|
|
|
|
|
|
|
Raises:
|
|
|
|
* TypeError: Raise when xpath_spec is neither a str nor a lxml.etree.XPath
|
|
|
|
* SearxXPathSyntaxException: Raise when there is a syntax error in the XPath
|
|
|
|
* SearxEngineXPathException: if the index is not found. Also see eval_xpath.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
* result (bool, float, list, str): Results.
|
|
|
|
"""
|
|
|
|
result = eval_xpath_list(elements, xpath_spec)
|
|
|
|
if index >= -len(result) and index < len(result):
|
|
|
|
return result[index]
|
|
|
|
if default == NOTSET:
|
|
|
|
# raise an SearxEngineXPathException instead of IndexError
|
|
|
|
# to record xpath_spec
|
|
|
|
raise SearxEngineXPathException(xpath_spec, 'index ' + str(index) + ' not found')
|
|
|
|
return default
|