mirror of
https://github.com/searxng/searxng.git
synced 2024-11-22 12:10:11 +01:00
[mod] remove unused import
use from searx.engines.duckduckgo import _fetch_supported_languages, supported_languages_url # NOQA so it is possible to easily remove all unused import using autoflake: autoflake --in-place --recursive --remove-all-unused-imports searx tests
This commit is contained in:
parent
6489a560ea
commit
3038052c79
@ -11,7 +11,7 @@
|
||||
|
||||
from urllib.parse import urlencode
|
||||
from lxml import html
|
||||
from searx.utils import extract_text, get_torrent_size, int_or_zero
|
||||
from searx.utils import extract_text, get_torrent_size
|
||||
|
||||
# engine dependent config
|
||||
categories = ['files', 'images', 'videos', 'music']
|
||||
|
@ -11,7 +11,6 @@
|
||||
More info on api: https://arxiv.org/help/api/user-manual
|
||||
"""
|
||||
|
||||
from urllib.parse import urlencode
|
||||
from lxml import html
|
||||
from datetime import datetime
|
||||
|
||||
|
@ -16,8 +16,8 @@
|
||||
import re
|
||||
from urllib.parse import urlencode
|
||||
from lxml import html
|
||||
from searx import logger, utils
|
||||
from searx.utils import extract_text, match_language, gen_useragent, eval_xpath
|
||||
from searx import logger
|
||||
from searx.utils import eval_xpath, extract_text, match_language
|
||||
|
||||
logger = logger.getChild('bing engine')
|
||||
|
||||
@ -98,7 +98,6 @@ def response(resp):
|
||||
result_len = int(result_len_container)
|
||||
except Exception as e:
|
||||
logger.debug('result error :\n%s', e)
|
||||
pass
|
||||
|
||||
if result_len and _get_offset_from_pageno(resp.search_params.get("pageno", 0)) > result_len:
|
||||
return []
|
||||
|
@ -15,10 +15,10 @@
|
||||
from urllib.parse import urlencode
|
||||
from lxml import html
|
||||
from json import loads
|
||||
import re
|
||||
from searx.utils import match_language
|
||||
|
||||
from searx.engines.bing import _fetch_supported_languages, supported_languages_url, language_aliases
|
||||
from searx.engines.bing import language_aliases
|
||||
from searx.engines.bing import _fetch_supported_languages, supported_languages_url # NOQA
|
||||
|
||||
# engine dependent config
|
||||
categories = ['images']
|
||||
|
@ -16,7 +16,8 @@ from dateutil import parser
|
||||
from urllib.parse import urlencode, urlparse, parse_qsl
|
||||
from lxml import etree
|
||||
from searx.utils import list_get, match_language
|
||||
from searx.engines.bing import _fetch_supported_languages, supported_languages_url, language_aliases
|
||||
from searx.engines.bing import language_aliases
|
||||
from searx.engines.bing import _fetch_supported_languages, supported_languages_url # NOQA
|
||||
|
||||
# engine dependent config
|
||||
categories = ['news']
|
||||
|
@ -15,7 +15,8 @@ from lxml import html
|
||||
from urllib.parse import urlencode
|
||||
from searx.utils import match_language
|
||||
|
||||
from searx.engines.bing import _fetch_supported_languages, supported_languages_url, language_aliases
|
||||
from searx.engines.bing import language_aliases
|
||||
from searx.engines.bing import _fetch_supported_languages, supported_languages_url # NOQA
|
||||
|
||||
categories = ['videos']
|
||||
paging = True
|
||||
|
@ -11,7 +11,6 @@
|
||||
"""
|
||||
|
||||
from lxml import html
|
||||
from operator import itemgetter
|
||||
from urllib.parse import quote, urljoin
|
||||
from searx.utils import extract_text, get_torrent_size
|
||||
|
||||
|
@ -18,7 +18,6 @@ import re
|
||||
from os.path import expanduser, isabs, realpath, commonprefix
|
||||
from shlex import split as shlex_split
|
||||
from subprocess import Popen, PIPE
|
||||
from time import time
|
||||
from threading import Thread
|
||||
|
||||
from searx import logger
|
||||
|
@ -1,10 +1,7 @@
|
||||
import json
|
||||
import re
|
||||
import unicodedata
|
||||
|
||||
from datetime import datetime
|
||||
|
||||
from searx.data import CURRENCIES
|
||||
from searx.data import CURRENCIES # NOQA
|
||||
|
||||
|
||||
categories = []
|
||||
|
@ -15,7 +15,6 @@
|
||||
from lxml import html
|
||||
import re
|
||||
from urllib.parse import urlencode
|
||||
from searx.utils import extract_text
|
||||
|
||||
|
||||
# engine dependent config
|
||||
|
@ -12,10 +12,8 @@
|
||||
|
||||
import random
|
||||
import string
|
||||
from dateutil import parser
|
||||
from json import loads
|
||||
from urllib.parse import urlencode
|
||||
from lxml import html
|
||||
from datetime import datetime
|
||||
|
||||
# engine dependent config
|
||||
|
@ -15,8 +15,6 @@
|
||||
|
||||
from lxml.html import fromstring
|
||||
from json import loads
|
||||
from urllib.parse import urlencode
|
||||
from searx.poolrequests import get
|
||||
from searx.utils import extract_text, match_language, eval_xpath
|
||||
|
||||
# engine dependent config
|
||||
|
@ -15,11 +15,11 @@ from lxml import html
|
||||
|
||||
from searx import logger
|
||||
from searx.data import WIKIDATA_UNITS
|
||||
from searx.engines.duckduckgo import _fetch_supported_languages, supported_languages_url, language_aliases
|
||||
from searx.engines.duckduckgo import language_aliases
|
||||
from searx.engines.duckduckgo import _fetch_supported_languages, supported_languages_url # NOQA
|
||||
from searx.utils import extract_text, html_to_text, match_language, get_string_replaces_function
|
||||
from searx.external_urls import get_external_url, get_earth_coordinates_url, area_to_osm_zoom
|
||||
|
||||
|
||||
logger = logger.getChild('duckduckgo_definitions')
|
||||
|
||||
URL = 'https://api.duckduckgo.com/'\
|
||||
|
@ -15,12 +15,9 @@
|
||||
|
||||
from json import loads
|
||||
from urllib.parse import urlencode
|
||||
from searx.engines.duckduckgo import (
|
||||
_fetch_supported_languages, supported_languages_url,
|
||||
get_region_code, language_aliases
|
||||
)
|
||||
from searx.engines.duckduckgo import get_region_code
|
||||
from searx.engines.duckduckgo import _fetch_supported_languages, supported_languages_url # NOQA
|
||||
from searx.poolrequests import get
|
||||
from searx.utils import extract_text
|
||||
|
||||
# engine dependent config
|
||||
categories = ['images']
|
||||
|
@ -60,7 +60,6 @@ def response(resp):
|
||||
|
||||
except:
|
||||
logger.debug("Couldn't read number of results.")
|
||||
pass
|
||||
|
||||
for result in eval_xpath(dom, '//section[not(contains(@class, "essay"))]'):
|
||||
try:
|
||||
|
@ -1,8 +1,5 @@
|
||||
from json import loads, dumps
|
||||
from lxml import html
|
||||
from urllib.parse import quote, urljoin
|
||||
from requests.auth import HTTPBasicAuth
|
||||
from searx.utils import extract_text, get_torrent_size
|
||||
|
||||
|
||||
base_url = 'http://localhost:9200'
|
||||
|
@ -29,12 +29,9 @@ from lxml import html
|
||||
from flask_babel import gettext
|
||||
from searx import logger
|
||||
from searx.utils import extract_text, eval_xpath
|
||||
from searx.engines.google import _fetch_supported_languages, supported_languages_url # NOQA
|
||||
|
||||
# pylint: disable=unused-import
|
||||
from searx.engines.google import (
|
||||
supported_languages_url,
|
||||
_fetch_supported_languages,
|
||||
)
|
||||
# pylint: enable=unused-import
|
||||
|
||||
from searx.engines.google import (
|
||||
|
@ -12,8 +12,8 @@
|
||||
|
||||
from urllib.parse import urlencode
|
||||
from lxml import html
|
||||
from searx.engines.google import _fetch_supported_languages, supported_languages_url
|
||||
from searx.utils import match_language
|
||||
from searx.engines.google import _fetch_supported_languages, supported_languages_url # NOQA
|
||||
|
||||
# search-url
|
||||
categories = ['news']
|
||||
|
@ -11,7 +11,6 @@
|
||||
"""
|
||||
|
||||
from datetime import date, timedelta
|
||||
from json import loads
|
||||
from urllib.parse import urlencode
|
||||
from lxml import html
|
||||
from searx.utils import extract_text
|
||||
|
@ -12,8 +12,8 @@ from json import loads
|
||||
from datetime import datetime
|
||||
from operator import itemgetter
|
||||
|
||||
from urllib.parse import quote, urljoin
|
||||
from searx.utils import extract_text, get_torrent_size
|
||||
from urllib.parse import quote
|
||||
from searx.utils import get_torrent_size
|
||||
|
||||
# engine dependent config
|
||||
categories = ["videos", "music", "files"]
|
||||
|
@ -14,7 +14,6 @@ import re
|
||||
from json import loads
|
||||
from lxml import html
|
||||
from dateutil import parser
|
||||
from io import StringIO
|
||||
from urllib.parse import quote_plus, urlencode
|
||||
from searx import logger
|
||||
from searx.poolrequests import get as http_get
|
||||
|
@ -17,7 +17,6 @@ import re
|
||||
from unicodedata import normalize, combining
|
||||
from babel import Locale
|
||||
from babel.localedata import locale_identifiers
|
||||
from searx.languages import language_codes
|
||||
from searx.utils import extract_text, eval_xpath, match_language
|
||||
|
||||
# engine dependent config
|
||||
|
@ -21,9 +21,9 @@ from babel.dates import format_datetime, format_date, format_time, get_datetime_
|
||||
from searx import logger
|
||||
from searx.data import WIKIDATA_UNITS
|
||||
from searx.poolrequests import post, get
|
||||
from searx.engines.wikipedia import _fetch_supported_languages, supported_languages_url
|
||||
from searx.utils import match_language, searx_useragent, get_string_replaces_function
|
||||
from searx.external_urls import get_external_url, get_earth_coordinates_url, area_to_osm_zoom
|
||||
from searx.engines.wikipedia import _fetch_supported_languages, supported_languages_url # NOQA
|
||||
|
||||
logger = logger.getChild('wikidata')
|
||||
|
||||
|
@ -13,9 +13,8 @@ import re
|
||||
from datetime import datetime, timedelta
|
||||
from urllib.parse import urlencode
|
||||
from lxml import html
|
||||
from searx.engines.yahoo import (
|
||||
parse_url, _fetch_supported_languages, supported_languages_url, language_aliases
|
||||
)
|
||||
from searx.engines.yahoo import parse_url, language_aliases
|
||||
from searx.engines.yahoo import _fetch_supported_languages, supported_languages_url # NOQA
|
||||
from dateutil import parser
|
||||
from searx.utils import extract_text, extract_url, match_language
|
||||
|
||||
|
@ -11,7 +11,6 @@
|
||||
from functools import reduce
|
||||
from json import loads
|
||||
from urllib.parse import quote_plus
|
||||
from searx.utils import extract_text, list_get
|
||||
|
||||
# engine dependent config
|
||||
categories = ['videos', 'music']
|
||||
|
@ -20,7 +20,6 @@ from importlib import import_module
|
||||
from os import listdir, makedirs, remove, stat, utime
|
||||
from os.path import abspath, basename, dirname, exists, join
|
||||
from shutil import copyfile
|
||||
from traceback import print_exc
|
||||
|
||||
from searx import logger, settings, static_path
|
||||
|
||||
|
@ -1,5 +1,4 @@
|
||||
import re
|
||||
from collections import defaultdict
|
||||
from operator import itemgetter
|
||||
from threading import RLock
|
||||
from urllib.parse import urlparse, unquote
|
||||
|
@ -7,7 +7,7 @@ from numbers import Number
|
||||
from os.path import splitext, join
|
||||
from random import choice
|
||||
from html.parser import HTMLParser
|
||||
from urllib.parse import urljoin, urlparse, unquote
|
||||
from urllib.parse import urljoin, urlparse
|
||||
|
||||
from lxml import html
|
||||
from lxml.etree import XPath, _ElementStringResult, _ElementUnicodeResult
|
||||
|
@ -40,7 +40,7 @@ from datetime import datetime, timedelta
|
||||
from time import time
|
||||
from html import escape
|
||||
from io import StringIO
|
||||
from urllib.parse import urlencode, urlparse, urljoin, urlsplit
|
||||
from urllib.parse import urlencode, urljoin, urlparse
|
||||
|
||||
from pygments import highlight
|
||||
from pygments.lexers import get_lexer_by_name
|
||||
|
@ -14,7 +14,6 @@ along with searx. If not, see < http://www.gnu.org/licenses/ >.
|
||||
|
||||
'''
|
||||
|
||||
from sys import version_info
|
||||
|
||||
from searx.engines import command as command_engine
|
||||
from searx.testing import SearxTestCase
|
||||
|
@ -5,7 +5,7 @@ from searx.preferences import Preferences
|
||||
from searx.engines import engines
|
||||
|
||||
import searx.search
|
||||
from searx.search import EngineRef, SearchQuery
|
||||
from searx.search import EngineRef
|
||||
from searx.webadapter import validate_engineref_list
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user