1
0
mirror of https://github.com/searxng/searxng.git synced 2024-11-20 03:10:10 +01:00

Merge branch 'master' into fix-engine-spotify

This commit is contained in:
Markus Heiser 2019-12-29 09:47:06 +01:00 committed by GitHub
commit 36e72a4619
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
244 changed files with 10745 additions and 11499 deletions

133
.dir-locals.el Normal file
View File

@ -0,0 +1,133 @@
;;; .dir-locals.el
;;
;; If you get ``*** EPC Error ***`` (even after a jedi:install-server) in your
;; emacs session, mostly you have jedi-mode enabled but the python enviroment is
;; missed. The python environment has to be next to the
;; ``<repo>/.dir-locals.el`` in::
;;
;; ./local/py3
;;
;; In Emacs, some buffer locals are referencing the project environment:
;;
;; - prj-root --> <repo>/
;; - python-environment-directory --> <repo>/local
;; - python-environment-default-root-name --> py3
;; - python-shell-virtualenv-root --> <repo>/local/py3
;; When this variable is set with the path of the virtualenv to use,
;; `process-environment' and `exec-path' get proper values in order to run
;; shells inside the specified virtualenv, example::
;; (setq python-shell-virtualenv-root "/path/to/env/")
;;
;; To setup such an environment build target 'pyenv' or 'pyenvinstall'::
;;
;; $ make pyenvinstall
;;
;; Alternatively create the virtualenv, source it and install jedi + epc
;; (required by `emacs-jedi <https://tkf.github.io/emacs-jedi>`_)::
;;
;; $ virtualenv --python=python3 "--no-site-packages" ./local/py3
;; ...
;; $ source ./local/py3/bin/activate
;; (py3)$ # now install into the activated 'py3' environment ..
;; (py3)$ pip install jedi epc
;; ...
;;
;; Here is what also I found useful to add to my .emacs::
;;
;; (global-set-key [f6] 'flycheck-mode)
;; (add-hook 'python-mode-hook 'my:python-mode-hook)
;;
;; (defun my:python-mode-hook ()
;; (add-to-list 'company-backends 'company-jedi)
;; (require 'jedi-core)
;; (jedi:setup)
;; (define-key python-mode-map (kbd "C-c C-d") 'jedi:show-doc)
;; (define-key python-mode-map (kbd "M-.") 'jedi:goto-definition)
;; (define-key python-mode-map (kbd "M-,") 'jedi:goto-definition-pop-marker)
;; )
;;
((nil
. ((fill-column . 80)
))
(python-mode
. ((indent-tabs-mode . nil)
;; project root folder is where the `.dir-locals.el' is located
(eval . (setq-local
prj-root (locate-dominating-file default-directory ".dir-locals.el")))
(eval . (setq-local
python-environment-directory (expand-file-name "./local" prj-root)))
;; use 'py3' enviroment as default
(eval . (setq-local
python-environment-default-root-name "py3"))
(eval . (setq-local
python-shell-virtualenv-root
(concat python-environment-directory
"/"
python-environment-default-root-name)))
;; python-shell-virtualenv-path is obsolete, use python-shell-virtualenv-root!
;; (eval . (setq-local
;; python-shell-virtualenv-path python-shell-virtualenv-root))
(eval . (setq-local
python-shell-interpreter
(expand-file-name "bin/python" python-shell-virtualenv-root)))
(eval . (setq-local
python-environment-virtualenv
(list (expand-file-name "bin/virtualenv" python-shell-virtualenv-root)
;;"--system-site-packages"
"--quiet")))
(eval . (setq-local
pylint-command
(expand-file-name "bin/pylint" python-shell-virtualenv-root)))
;; pylint will find the '.pylintrc' file next to the CWD
;; https://pylint.readthedocs.io/en/latest/user_guide/run.html#command-line-options
(eval . (setq-local
flycheck-pylintrc ".pylintrc"))
;; flycheck & other python stuff should use the local py3 environment
(eval . (setq-local
flycheck-python-pylint-executable python-shell-interpreter))
;; use 'M-x jedi:show-setup-info' and 'M-x epc:controller' to inspect jedi server
;; https://tkf.github.io/emacs-jedi/latest/#jedi:environment-root -- You
;; can specify a full path instead of a name (relative path). In that case,
;; python-environment-directory is ignored and Python virtual environment
;; is created at the specified path.
(eval . (setq-local jedi:environment-root python-shell-virtualenv-root))
;; https://tkf.github.io/emacs-jedi/latest/#jedi:server-command
(eval .(setq-local
jedi:server-command
(list python-shell-interpreter
jedi:server-script)
))
;; jedi:environment-virtualenv --> see above 'python-environment-virtualenv'
;; is set buffer local! No need to setup jedi:environment-virtualenv:
;;
;; Virtualenv command to use. A list of string. If it is nil,
;; python-environment-virtualenv is used instead. You must set non-nil
;; value to jedi:environment-root in order to make this setting work.
;;
;; https://tkf.github.io/emacs-jedi/latest/#jedi:environment-virtualenv
;;
;; (eval . (setq-local
;; jedi:environment-virtualenv
;; (list (expand-file-name "bin/virtualenv" python-shell-virtualenv-root)
;; ;;"--python"
;; ;;"/usr/bin/python3.4"
;; )))
;; jedi:server-args
)))

6
.gitignore vendored
View File

@ -18,3 +18,9 @@ setup.cfg
node_modules/ node_modules/
.tx/ .tx/
build/
dist/
local/
gh-pages/
searx.egg-info/

444
.pylintrc Normal file
View File

@ -0,0 +1,444 @@
# -*- coding: utf-8; mode: conf -*-
# lint Python modules using external checkers.
#
# This is the main checker controlling the other ones and the reports
# generation. It is itself both a raw checker and an astng checker in order
# to:
# * handle message activation / deactivation at the module level
# * handle some basic but necessary stats'data (number of classes, methods...)
#
[MASTER]
# A comma-separated list of package or module names from where C extensions may
# be loaded. Extensions are loading into the active Python interpreter and may
# run arbitrary code
extension-pkg-whitelist=
# Add files or directories to the blacklist. They should be base names, not
# paths.
ignore=CVS, .git, .svn
# Add files or directories matching the regex patterns to the blacklist. The
# regex matches against base names, not paths.
ignore-patterns=
# Python code to execute, usually for sys.path manipulation such as
# pygtk.require().
#init-hook=
# Use multiple processes to speed up Pylint.
jobs=1
# List of plugins (as comma separated values of python modules names) to load,
# usually to register additional checkers.
load-plugins=
# Pickle collected data for later comparisons.
persistent=yes
# Specify a configuration file.
#rcfile=
# Allow loading of arbitrary C extensions. Extensions are imported into the
# active Python interpreter and may run arbitrary code.
unsafe-load-any-extension=no
[MESSAGES CONTROL]
# Only show warnings with the listed confidence levels. Leave empty to show
# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
confidence=
# Disable the message, report, category or checker with the given id(s). You
# can either give multiple identifiers separated by comma (,) or put this
# option multiple times (only on the command line, not in the configuration
# file where it should appear only once).You can also use "--disable=all" to
# disable everything first and then reenable specific checks. For example, if
# you want to run only the similarities checker, you can use "--disable=all
# --enable=similarities". If you want to run only the classes checker, but have
# no Warning level messages displayed, use"--disable=all --enable=classes
# --disable=W"
disable=bad-whitespace, duplicate-code
# Enable the message, report, category or checker with the given id(s). You can
# either give multiple identifier separated by comma (,) or put this option
# multiple time (only on the command line, not in the configuration file where
# it should appear only once). See also the "--disable" option for examples.
enable=
[REPORTS]
# Python expression which should return a note less than 10 (10 is the highest
# note). You have access to the variables errors warning, statement which
# respectively contain the number of errors / warnings messages and the total
# number of statements analyzed. This is used by the global evaluation report
# (RP0004).
evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
# Template used to display messages. This is a python new-style format string
# used to format the message information. See doc for all details
# HINT: do not set this here, use argument --msg-template=...
#msg-template={path}:{line}: [{msg_id}({symbol}),{obj}] {msg}
# Set the output format. Available formats are text, parseable, colorized, json
# and msvs (visual studio).You can also give a reporter class, eg
# mypackage.mymodule.MyReporterClass.
# HINT: do not set this here, use argument --output-format=...
#output-format=text
# Tells whether to display a full report or only the messages
reports=no
# Activate the evaluation score.
score=yes
[REFACTORING]
# Maximum number of nested blocks for function / method body
max-nested-blocks=5
[BASIC]
# List of builtins function names that should not be used, separated by a comma
bad-functions=map,filter,apply,input
# Naming hint for argument names
argument-name-hint=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$
# Regular expression matching correct argument names
argument-rgx=(([a-z][a-zA-Z0-9_]{2,30})|(_[a-z0-9_]*))$
# Naming hint for attribute names
attr-name-hint=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$
# Regular expression matching correct attribute names
attr-rgx=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*)|([A-Z0-9_]*))$
# Bad variable names which should always be refused, separated by a comma
bad-names=foo,bar,baz,toto,tutu,tata
# Naming hint for class attribute names
class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
# Regular expression matching correct class attribute names
class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
# Naming hint for class names
class-name-hint=[A-Z_][a-zA-Z0-9]+$
# Regular expression matching correct class names
class-rgx=[A-Z_][a-zA-Z0-9]+$
# Naming hint for constant names
const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$
# Regular expression matching correct constant names
const-rgx=(([a-zA-Z_][a-zA-Z0-9_]*)|(__.*__))$
#const-rgx=[f]?[A-Z_][a-zA-Z0-9_]{2,30}$
# Minimum line length for functions/classes that require docstrings, shorter
# ones are exempt.
docstring-min-length=-1
# Naming hint for function names
function-name-hint=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$
# Regular expression matching correct function names
function-rgx=(([a-z][a-zA-Z0-9_]{2,30})|(_[a-z0-9_]*))$
# Good variable names which should always be accepted, separated by a comma
good-names=i,j,k,ex,Run,_,log,cfg,id
# Include a hint for the correct naming format with invalid-name
include-naming-hint=no
# Naming hint for inline iteration names
inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$
# Regular expression matching correct inline iteration names
inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
# Naming hint for method names
method-name-hint=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$
# Regular expression matching correct method names
method-rgx=(([a-z][a-zA-Z0-9_]{2,30})|(_[a-z0-9_]*))$
# Naming hint for module names
module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
# Regular expression matching correct module names
#module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
module-rgx=([a-z_][a-z0-9_]*)$
# Colon-delimited sets of names that determine each other's naming style when
# the name regexes allow several styles.
name-group=
# Regular expression which should only match function or class names that do
# not require a docstring.
no-docstring-rgx=^_
# List of decorators that produce properties, such as abc.abstractproperty. Add
# to this list to register other decorators that produce valid properties.
property-classes=abc.abstractproperty
# Naming hint for variable names
variable-name-hint=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$
# Regular expression matching correct variable names
variable-rgx=(([a-z][a-zA-Z0-9_]{2,30})|(_[a-z0-9_]*)|([a-z]))$
[FORMAT]
# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
expected-line-ending-format=
# Regexp for a line that is allowed to be longer than the limit.
ignore-long-lines=^\s*(# )?<?https?://\S+>?$
# Number of spaces of indent required inside a hanging or continued line.
indent-after-paren=4
# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
# tab).
indent-string=' '
# Maximum number of characters on a single line.
max-line-length=120
# Maximum number of lines in a module
max-module-lines=2000
# List of optional constructs for which whitespace checking is disabled. `dict-
# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}.
# `trailing-comma` allows a space between comma and closing bracket: (a, ).
# `empty-line` allows space-only lines.
no-space-check=trailing-comma,dict-separator
# Allow the body of a class to be on the same line as the declaration if body
# contains single statement.No config file found, using default configuration
single-line-class-stmt=no
# Allow the body of an if to be on the same line as the test if there is no
# else.
single-line-if-stmt=no
[LOGGING]
# Logging modules to check that the string format arguments are in logging
# function parameter format
logging-modules=logging
[MISCELLANEOUS]
# List of note tags to take in consideration, separated by a comma.
notes=FIXME,XXX,TODO
[SIMILARITIES]
# Ignore comments when computing similarities.
ignore-comments=yes
# Ignore docstrings when computing similarities.
ignore-docstrings=yes
# Ignore imports when computing similarities.
ignore-imports=no
# Minimum lines number of a similarity.
min-similarity-lines=4
[SPELLING]
# Spelling dictionary name. Available dictionaries: none. To make it working
# install python-enchant package.
spelling-dict=
# List of comma separated words that should not be checked.
spelling-ignore-words=
# A path to a file that contains private dictionary; one word per line.
spelling-private-dict-file=
# Tells whether to store unknown words to indicated private dictionary in
# --spelling-private-dict-file option instead of raising a message.
spelling-store-unknown-words=no
[TYPECHECK]
# List of decorators that produce context managers, such as
# contextlib.contextmanager. Add to this list to register other decorators that
# produce valid context managers.
contextmanager-decorators=contextlib.contextmanager
# List of members which are set dynamically and missed by pylint inference
# system, and so shouldn't trigger E1101 when accessed. Python regular
# expressions are accepted.
generated-members=
# Tells whether missing members accessed in mixin class should be ignored. A
# mixin class is detected if its name ends with "mixin" (case insensitive).
ignore-mixin-members=yes
# This flag controls whether pylint should warn about no-member and similar
# checks whenever an opaque object is returned when inferring. The inference
# can return multiple potential results while evaluating a Python object, but
# some branches might not be evaluated, which results in partial inference. In
# that case, it might be useful to still emit no-member and other checks for
# the rest of the inferred objects.
ignore-on-opaque-inference=yes
# List of class names for which member attributes should not be checked (useful
# for classes with dynamically set attributes). This supports the use of
# qualified names.
ignored-classes=optparse.Values,thread._local,_thread._local
# List of module names for which member attributes should not be checked
# (useful for modules/projects where namespaces are manipulated during runtime
# and thus existing member attributes cannot be deduced by static analysis. It
# supports qualified module names, as well as Unix pattern matching.
ignored-modules=
# Show a hint with possible names when a member name was not found. The aspect
# of finding the hint is based on edit distance.
missing-member-hint=yes
# The minimum edit distance a name should have in order to be considered a
# similar match for a missing member name.
missing-member-hint-distance=1
# The total number of similar names that should be taken in consideration when
# showing a hint for a missing member.
missing-member-max-choices=1
[VARIABLES]
# List of additional names supposed to be defined in builtins. Remember that
# you should avoid to define new builtins when possible.
additional-builtins=
# Tells whether unused global variables should be treated as a violation.
allow-global-unused-variables=yes
# List of strings which can identify a callback function by name. A callback
# name must start or end with one of those strings.
callbacks=cb_,_cb
# A regular expression matching the name of dummy variables (i.e. expectedly
# not used).
dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_
# Argument names that match this expression will be ignored. Default to name
# with leading underscore
ignored-argument-names=_.*|^ignored_|^unused_
# Tells whether we should check for unused import in __init__ files.
init-import=no
# List of qualified module names which can have objects that can redefine
# builtins.
redefining-builtins-modules=six.moves,future.builtins
[CLASSES]
# List of method names used to declare (i.e. assign) instance attributes.
defining-attr-methods=__init__,__new__,setUp
# List of member names, which should be excluded from the protected access
# warning.
exclude-protected=_asdict,_fields,_replace,_source,_make
# List of valid names for the first argument in a class method.
valid-classmethod-first-arg=cls
# List of valid names for the first argument in a metaclass class method.
valid-metaclass-classmethod-first-arg=mcs
[DESIGN]
# Maximum number of arguments for function / method
max-args=8
# Maximum number of attributes for a class (see R0902).
max-attributes=20
# Maximum number of boolean expressions in a if statement
max-bool-expr=5
# Maximum number of branch for function / method body
max-branches=12
# Maximum number of locals for function / method body
max-locals=20
# Maximum number of parents for a class (see R0901).
max-parents=7
# Maximum number of public methods for a class (see R0904).
max-public-methods=20
# Maximum number of return / yield for function / method body
max-returns=6
# Maximum number of statements in function / method body
max-statements=50
# Minimum number of public methods for a class (see R0903).
min-public-methods=2
[IMPORTS]
# Allow wildcard imports from modules that define __all__.
allow-wildcard-with-all=no
# Analyse import fallback blocks. This can be used to support both Python 2 and
# 3 compatible code, which means that the block might have code that exists
# only in one or another interpreter, leading to false positives when analysed.
analyse-fallback-blocks=no
# Deprecated modules which should not be used, separated by a comma
deprecated-modules=optparse,tkinter.tix
# Create a graph of external dependencies in the given file (report RP0402 must
# not be disabled)
ext-import-graph=
# Create a graph of every (i.e. internal and external) dependencies in the
# given file (report RP0402 must not be disabled)
import-graph=
# Create a graph of internal dependencies in the given file (report RP0402 must
# not be disabled)
int-import-graph=
# Force import order to recognize a module as part of the standard
# compatibility libraries.
known-standard-library=
# Force import order to recognize a module as part of a third party library.
known-third-party=enchant
[EXCEPTIONS]
# Exceptions that will emit a warning when being caught. Defaults to
# "Exception"
overgeneral-exceptions=Exception

View File

@ -1,4 +1,4 @@
Searx was created by Adam Tauber and is maintained by Adam Tauber, Alexandre Flament and Noémi Ványi. Searx was created by Adam Tauber and is maintained by Adam Tauber, Alexandre Flament, Noémi Ványi, @pofilo and Markus Heiser.
Major contributing authors: Major contributing authors:
@ -9,6 +9,8 @@ Major contributing authors:
- @Cqoicebordel - @Cqoicebordel
- Noémi Ványi - Noémi Ványi
- Marc Abonce Seguin @a01200356 - Marc Abonce Seguin @a01200356
- @pofilo
- Markus Heiser @return42
People who have submitted patches/translates, reported bugs, consulted features or People who have submitted patches/translates, reported bugs, consulted features or
generally made searx better: generally made searx better:

View File

@ -1,32 +1,36 @@
FROM alpine:3.10 FROM alpine:3.10
ENTRYPOINT ["/sbin/tini","--","/usr/local/searx/dockerfiles/docker-entrypoint.sh"]
EXPOSE 8080
VOLUME /etc/searx
VOLUME /var/log/uwsgi
ARG VERSION_GITCOMMIT=unknow ARG VERSION_GITCOMMIT=unknown
ARG SEARX_GIT_VERSION=unknow ARG SEARX_GIT_VERSION=unknown
ARG SEARX_GID=1000 ARG SEARX_GID=977
ARG SEARX_UID=1000 ARG SEARX_UID=977
RUN addgroup -g ${SEARX_GID} searx && \
adduser -u ${SEARX_UID} -D -h /usr/local/searx -s /bin/sh -G searx searx
ARG TIMESTAMP_SETTINGS=0 ARG TIMESTAMP_SETTINGS=0
ARG TIMESTAMP_UWSGI=0 ARG TIMESTAMP_UWSGI=0
ARG LABEL_VCS_REF= ARG LABEL_VCS_REF=
ARG LABEL_VCS_URL= ARG LABEL_VCS_URL=
ENV BASE_URL= \ ENV INSTANCE_NAME=searx \
AUTOCOMPLETE= \
BASE_URL= \
MORTY_KEY= \ MORTY_KEY= \
MORTY_URL= MORTY_URL=
EXPOSE 8080
VOLUME /etc/searx
VOLUME /var/log/uwsgi
WORKDIR /usr/local/searx WORKDIR /usr/local/searx
RUN addgroup -g ${SEARX_GID} searx && \
adduser -u ${SEARX_UID} -D -h /usr/local/searx -s /bin/sh -G searx searx
COPY requirements.txt ./requirements.txt COPY requirements.txt ./requirements.txt
RUN apk -U upgrade \ RUN apk upgrade --no-cache \
&& apk add -t build-dependencies \ && apk add --no-cache -t build-dependencies \
build-base \ build-base \
py3-setuptools \ py3-setuptools \
python3-dev \ python3-dev \
@ -36,7 +40,7 @@ RUN apk -U upgrade \
openssl-dev \ openssl-dev \
tar \ tar \
git \ git \
&& apk add \ && apk add --no-cache \
ca-certificates \ ca-certificates \
su-exec \ su-exec \
python3 \ python3 \
@ -48,8 +52,7 @@ RUN apk -U upgrade \
uwsgi-python3 \ uwsgi-python3 \
&& pip3 install --upgrade pip \ && pip3 install --upgrade pip \
&& pip3 install --no-cache -r requirements.txt \ && pip3 install --no-cache -r requirements.txt \
&& apk del build-dependencies \ && apk del build-dependencies
&& rm -f /var/cache/apk/*
COPY --chown=searx:searx . . COPY --chown=searx:searx . .
@ -60,7 +63,6 @@ RUN su searx -c "/usr/bin/python3 -m compileall -q searx"; \
echo "VERSION_STRING = VERSION_STRING + \"-$VERSION_GITCOMMIT\"" >> /usr/local/searx/searx/version.py; \ echo "VERSION_STRING = VERSION_STRING + \"-$VERSION_GITCOMMIT\"" >> /usr/local/searx/searx/version.py; \
fi fi
ENTRYPOINT ["/sbin/tini","--","/usr/local/searx/dockerfiles/docker-entrypoint.sh"]
# Keep this argument at the end since it change each time # Keep this argument at the end since it change each time
ARG LABEL_DATE= ARG LABEL_DATE=
@ -69,7 +71,7 @@ LABEL maintainer="searx <https://github.com/asciimoo/searx>" \
version="${SEARX_GIT_VERSION}" \ version="${SEARX_GIT_VERSION}" \
org.label-schema.schema-version="1.0" \ org.label-schema.schema-version="1.0" \
org.label-schema.name="searx" \ org.label-schema.name="searx" \
org.label-schema.schema-version="${SEARX_GIT_VERSION}" \ org.label-schema.version="${SEARX_GIT_VERSION}" \
org.label-schema.url="${LABEL_VCS_URL}" \ org.label-schema.url="${LABEL_VCS_URL}" \
org.label-schema.vcs-ref=${LABEL_VCS_REF} \ org.label-schema.vcs-ref=${LABEL_VCS_REF} \
org.label-schema.vcs-url=${LABEL_VCS_URL} \ org.label-schema.vcs-url=${LABEL_VCS_URL} \

89
Makefile Normal file
View File

@ -0,0 +1,89 @@
# -*- coding: utf-8; mode: makefile-gmake -*-
export GIT_URL=https://github.com/asciimoo/searx
export SEARX_URL=https://searx.me
export DOCS_URL=https://asciimoo.github.io/searx
PYOBJECTS = searx
DOC = docs
PY_SETUP_EXTRAS ?= \[test\]
PYDIST=./dist/py
PYBUILD=./build/py
include utils/makefile.include
include utils/makefile.python
include utils/makefile.sphinx
all: clean install
PHONY += help
help:
@echo ' test - run developer tests'
@echo ' docs - build documentation'
@echo ' docs-live - autobuild HTML documentation while editing'
@echo ' run - run developer instance'
@echo ' install - developer install (./local)'
@echo ' uninstall - uninstall (./local)'
@echo ' gh-pages - build docs & deploy on gh-pages branch'
@echo ' clean - drop builds and environments'
@echo ''
@$(MAKE) -s -f utils/makefile.include make-help
@echo ''
@$(MAKE) -s -f utils/makefile.python python-help
PHONY += install
install: pyenvinstall
PHONY += uninstall
uninstall: pyenvuninstall
PHONY += clean
clean: pyclean
$(call cmd,common_clean)
PHONY += run
run: pyenvinstall
$(Q) ( \
sed -i -e "s/debug : False/debug : True/g" ./searx/settings.yml ; \
sleep 2 ; \
xdg-open http://127.0.0.1:8888/ ; \
sleep 3 ; \
sed -i -e "s/debug : True/debug : False/g" ./searx/settings.yml ; \
) &
$(PY_ENV)/bin/python ./searx/webapp.py
# docs
# ----
PHONY += docs
docs: pyenvinstall sphinx-doc
$(call cmd,sphinx,html,docs,docs)
PHONY += docs-live
docs-live: pyenvinstall sphinx-live
$(call cmd,sphinx_autobuild,html,docs,docs)
$(GH_PAGES)::
@echo "doc available at --> $(DOCS_URL)"
# test
# ----
PHONY += test test.pylint test.pep8 test.unit test.robot
# TODO: balance linting with pylint
test: test.pep8 test.unit test.robot
- make pylint
test.pep8: pyenvinstall
$(PY_ENV_ACT); ./manage.sh pep8_check
test.unit: pyenvinstall
$(PY_ENV_ACT); ./manage.sh unit_tests
test.robot: pyenvinstall
$(PY_ENV_ACT); ./manage.sh install_geckodriver
$(PY_ENV_ACT); ./manage.sh robot_tests
.PHONY: $(PHONY)

View File

@ -23,13 +23,13 @@ Go to the `searx-docker <https://github.com/searx/searx-docker>`__ project.
Without Docker Without Docker
------ ------
For all the details, follow this `step by step installation <https://asciimoo.github.io/searx/dev/install/installation.html>`__. For all of the details, follow this `step by step installation <https://asciimoo.github.io/searx/dev/install/installation.html>`__.
Note: the documentation needs to be updated. Note: the documentation needs to be updated.
If you are in hurry If you are in a hurry
------ ------
- clone source: - clone the source:
``git clone https://github.com/asciimoo/searx.git && cd searx`` ``git clone https://github.com/asciimoo/searx.git && cd searx``
- install dependencies: ``./manage.sh update_packages`` - install dependencies: ``./manage.sh update_packages``
- edit your - edit your

View File

@ -29,6 +29,8 @@ do
printf " -f Always update on the configuration files (existing files are renamed with the .old suffix)\n" printf " -f Always update on the configuration files (existing files are renamed with the .old suffix)\n"
printf " Without this option, new configuration files are copied with the .new suffix\n" printf " Without this option, new configuration files are copied with the .new suffix\n"
printf "\nEnvironment variables:\n\n" printf "\nEnvironment variables:\n\n"
printf " INSTANCE_NAME settings.yml : general.instance_name\n"
printf " AUTOCOMPLETE settings.yml : search.autocomplete\n"
printf " BASE_URL settings.yml : server.base_url\n" printf " BASE_URL settings.yml : server.base_url\n"
printf " MORTY_URL settings.yml : result_proxy.url\n" printf " MORTY_URL settings.yml : result_proxy.url\n"
printf " MORTY_KEY settings.yml : result_proxy.key\n" printf " MORTY_KEY settings.yml : result_proxy.key\n"
@ -53,6 +55,8 @@ patch_searx_settings() {
# update settings.yml # update settings.yml
sed -i -e "s|base_url : False|base_url : ${BASE_URL}|g" \ sed -i -e "s|base_url : False|base_url : ${BASE_URL}|g" \
-e "s/instance_name : \"searx\"/instance_name : \"${INSTANCE_NAME}\"/g" \
-e "s/autocomplete : \"\"/autocomplete : \"${AUTOCOMPLETE}\"/g" \
-e "s/ultrasecretkey/$(openssl rand -hex 32)/g" \ -e "s/ultrasecretkey/$(openssl rand -hex 32)/g" \
"${CONF}" "${CONF}"
@ -71,7 +75,7 @@ EOF
} }
update_conf() { update_conf() {
FORCE_CONF_UPDATE="$1" FORCE_CONF_UPDATE=$1
CONF="$2" CONF="$2"
NEW_CONF="${2}.new" NEW_CONF="${2}.new"
OLD_CONF="${2}.old" OLD_CONF="${2}.old"
@ -81,7 +85,7 @@ update_conf() {
if [ -f "${CONF}" ]; then if [ -f "${CONF}" ]; then
if [ "${REF_CONF}" -nt "${CONF}" ]; then if [ "${REF_CONF}" -nt "${CONF}" ]; then
# There is a new version # There is a new version
if [ $FORCE_CONF_UPDATE ]; then if [ $FORCE_CONF_UPDATE -ne 0 ]; then
# Replace the current configuration # Replace the current configuration
printf '⚠️ Automaticaly update %s to the new version\n' "${CONF}" printf '⚠️ Automaticaly update %s to the new version\n' "${CONF}"
if [ ! -f "${OLD_CONF}" ]; then if [ ! -f "${OLD_CONF}" ]; then
@ -107,7 +111,7 @@ update_conf() {
} }
# make sure there are uwsgi settings # make sure there are uwsgi settings
update_conf "${FORCE_CONF_UPDATE}" "${UWSGI_SETTINGS_PATH}" "/usr/local/searx/dockerfiles/uwsgi.ini" "patch_uwsgi_settings" update_conf ${FORCE_CONF_UPDATE} "${UWSGI_SETTINGS_PATH}" "/usr/local/searx/dockerfiles/uwsgi.ini" "patch_uwsgi_settings"
# make sure there are searx settings # make sure there are searx settings
update_conf "${FORCE_CONF_UPDATE}" "${SEARX_SETTINGS_PATH}" "/usr/local/searx/searx/settings.yml" "patch_searx_settings" update_conf "${FORCE_CONF_UPDATE}" "${SEARX_SETTINGS_PATH}" "/usr/local/searx/searx/settings.yml" "patch_searx_settings"

130
docs/_themes/searx/static/searx.css vendored Normal file
View File

@ -0,0 +1,130 @@
@import url("pocoo.css");
a, a.reference, a.footnote-reference {
color: #004b6b;
border-color: #004b6b;
}
a:hover {
color: #6d4100;
border-color: #6d4100;
}
p.version-warning {
background-color: #004b6b;
}
div.sidebar {
background-color: whitesmoke;
border-color: lightsteelblue;
border-radius: 3pt;
}
p.sidebar-title, .sidebar p {
margin: 6pt;
}
.sidebar li,
.hlist li {
list-style-type: disclosure-closed;
}
/* admonitions
*/
div.admonition, div.topic {
background-color: #fafafa;
margin: 8px 0px;
padding: 1em;
border-radius: 3pt 0 0 3pt;
border-top: none;
border-right: none;
border-bottom: none;
border-left: 5pt solid #ccc;
}
p.admonition-title:after {
content: none;
}
.admonition.hint { border-color: #416dc0b0; }
.admonition.note { border-color: #6c856cb0; }
.admonition.tip { border-color: #85c5c2b0; }
.admonition.attention { border-color: #ecec97b0; }
.admonition.caution { border-color: #a6c677b0; }
.admonition.danger { border-color: #d46262b0; }
.admonition.important { border-color: #dfa3a3b0; }
.admonition.error { border-color: red; }
.admonition.warning { border-color: darkred; }
.admonition.admonition-generic-admonition-title {
border-color: #416dc0b0;
}
/* admonitions with (rendered) reST markup examples (:class: rst-example)
*
* .. admonition:: title of the example
* :class: rst-example
* ....
*/
div.rst-example {
background-color: inherit;
margin: 0;
border-top: none;
border-right: 1px solid #ccc;
border-bottom: none;
border-left: none;
border-radius: none;
padding: 0;
}
div.rst-example > p.admonition-title {
font-family: Sans Serif;
font-style: italic;
font-size: 0.8em;
display: block;
border-bottom: 1px solid #ccc;
padding: 0.5em 1em;
text-align: right;
}
/* code block in figures
*/
div.highlight pre {
text-align: left;
}
/* Table theme
*/
thead, tfoot {
background-color: #fff;
}
th:hover, td:hover {
background-color: #ffc;
}
thead th, tfoot th, tfoot td, tbody th {
background-color: #fffaef;
}
tbody tr:nth-child(odd) {
background-color: #fff;
}
tbody tr:nth-child(even) {
background-color: #fafafa;
}
caption {
font-family: Sans Serif;
padding: 0.5em;
margin: 0.5em 0 0.5em 0;
caption-side: top;
text-align: left;
}

6
docs/_themes/searx/theme.conf vendored Normal file
View File

@ -0,0 +1,6 @@
[theme]
inherit = pocoo
stylesheet = searx.css
[options]
touch_icon =

96
docs/admin/api.rst Normal file
View File

@ -0,0 +1,96 @@
.. _adminapi:
==================
Administration API
==================
Get configuration data
======================
.. code:: http
GET /config HTTP/1.1
Sample response
---------------
.. code:: json
{
"autocomplete": "",
"categories": [
"map",
"it",
"images",
],
"default_locale": "",
"default_theme": "oscar",
"engines": [
{
"categories": [
"map"
],
"enabled": true,
"name": "openstreetmap",
"shortcut": "osm"
},
{
"categories": [
"it"
],
"enabled": true,
"name": "arch linux wiki",
"shortcut": "al"
},
{
"categories": [
"images"
],
"enabled": true,
"name": "google images",
"shortcut": "goi"
},
{
"categories": [
"it"
],
"enabled": false,
"name": "bitbucket",
"shortcut": "bb"
},
],
"instance_name": "searx",
"locales": {
"de": "Deutsch (German)",
"en": "English",
"eo": "Esperanto (Esperanto)",
},
"plugins": [
{
"enabled": true,
"name": "HTTPS rewrite"
},
{
"enabled": false,
"name": "Vim-like hotkeys"
}
],
"safe_search": 0
}
Embed search bar
================
The search bar can be embedded into websites. Just paste the example into the
HTML of the site. URL of the searx instance and values are customizable.
.. code:: html
<form method="post" action="https://searx.me/">
<!-- search --> <input type="text" name="q" />
<!-- categories --> <input type="hidden" name="categories" value="general,social media" />
<!-- language --> <input type="hidden" name="lang" value="all" />
<!-- locale --> <input type="hidden" name="locale" value="en" />
<!-- date filter --> <input type="hidden" name="time_range" value="month" />
</form>

View File

@ -0,0 +1,33 @@
digraph G {
node [style=filled, shape=box, fillcolor="#ffffcc", fontname="Sans"];
edge [fontname="Sans"];
browser [label="Browser", shape=Mdiamond];
rp [label="Reverse Proxy", href="url to configure reverse proxy"];
filtron [label="Filtron", href="https://github.com/asciimoo/filtron"];
morty [label="Morty", href="https://github.com/asciimoo/morty"];
static [label="Static files", href="url to configure static files"];
uwsgi [label="uwsgi", href="url to configure uwsgi"]
searx1 [label="Searx #1"];
searx2 [label="Searx #2"];
searx3 [label="Searx #3"];
searx4 [label="Searx #4"];
browser -> rp [label="HTTPS"]
subgraph cluster_searx {
label = "Searx instance" fontname="Sans";
bgcolor="#fafafa";
{ rank=same; static rp };
rp -> morty [label="optional: images and HTML pages proxy"];
rp -> static [label="optional: reverse proxy serves directly static files"];
rp -> filtron [label="HTTP"];
filtron -> uwsgi [label="HTTP"];
uwsgi -> searx1;
uwsgi -> searx2;
uwsgi -> searx3;
uwsgi -> searx4;
}
}

View File

@ -0,0 +1,24 @@
.. _architecture:
============
Architecture
============
.. sidebar:: Needs work!
This article needs some work / Searx is a collaborative effort. If you have
any contribution, feel welcome to send us your :pull:`PR <../pulls>`, see
:ref:`how to contribute`.
Herein you will find some hints and suggestions about typical architectures of
searx infrastructures.
We start with a contribution from :pull:`@dalf <1776#issuecomment-567917320>`.
It shows a *reference* setup for public searx instances.
.. _arch public:
.. kernel-figure:: arch_public.dot
:alt: arch_public.dot
Reference architecture of a public searx setup.

103
docs/admin/buildhosts.rst Normal file
View File

@ -0,0 +1,103 @@
.. _buildhosts:
==========
Buildhosts
==========
.. sidebar:: This article needs some work
If you have any contribution send us your :pull:`PR <../pulls>`, see
:ref:`how to contribute`.
To get best results from build, its recommend to install additional packages
on build hosts.
.. _docs build:
Build docs
==========
.. _Graphviz: https://graphviz.gitlab.io
.. _ImageMagick: https://www.imagemagick.org
.. _XeTeX: https://tug.org/xetex/
.. _dvisvgm: https://dvisvgm.de/
.. sidebar:: Sphinx build needs
- ImageMagick_
- Graphviz_
- XeTeX_
- dvisvgm_
Most of the sphinx requirements are installed from :origin:`setup.py` and the
docs can be build from scratch with ``make docs``. For better math and image
processing additional packages are needed. The XeTeX_ needed not only for PDF
creation, its also needed for :ref:`math` when HTML output is build.
To be able to do :ref:`sphinx:math-support` without CDNs, the math are rendered
as images (``sphinx.ext.imgmath`` extension). If your docs build (``make
docs``) shows warnings like this::
WARNING: dot(1) not found, for better output quality install \
graphviz from http://www.graphviz.org
..
WARNING: LaTeX command 'latex' cannot be run (needed for math \
display), check the imgmath_latex setting
you need to install additional packages on your build host, to get better HTML
output.
.. _system requirements:
.. tabs::
.. group-tab:: Ubuntu / debian
.. code-block:: sh
$ sudo apt install graphviz imagemagick texlive-xetex librsvg2-bin
.. group-tab:: Arch Linux
.. code-block:: sh
$ sudo pacman -S graphviz imagemagick texlive-bin extra/librsvg
.. group-tab:: Fedora / RHEL
.. code-block:: sh
$ sudo dnf install graphviz graphviz-gd texlive-xetex-bin librsvg2-tools
For PDF output you also need:
.. tabs::
.. group-tab:: Ubuntu / debian
.. code:: sh
$ sudo apt texlive-latex-recommended texlive-extra-utils ttf-dejavu
.. group-tab:: Arch Linux
.. code:: sh
$ sudo pacman -S texlive-core texlive-latexextra ttf-dejavu
.. group-tab:: Fedora / RHEL
.. code:: sh
$ sudo dnf install \
texlive-collection-fontsrecommended texlive-collection-latex \
dejavu-sans-fonts dejavu-serif-fonts dejavu-sans-mono-fonts
.. _system requirements END:
.. literalinclude:: ../conf.py
:language: python
:start-after: # sphinx.ext.imgmath setup
:end-before: # sphinx.ext.imgmath setup END

68
docs/admin/engines.rst Normal file
View File

@ -0,0 +1,68 @@
.. _engines generic:
=======
engines
=======
.. sidebar:: Further reading ..
- :ref:`engines generic`
- :ref:`configured engines`
- :ref:`engine settings`
- :ref:`engine file`
============= =========== ==================== ============
:ref:`engine settings` :ref:`engine file`
------------------------- ---------------------------------
Name (cfg) Categories
------------------------- ---------------------------------
Engine .. Paging support **P**
------------------------- -------------------- ------------
Shortcut **S** Language support **L**
Timeout **TO** Time range support **TR**
Disabled **D** Offline **O**
------------- ----------- -------------------- ------------
Suspend end **SE**
------------- ----------- ---------------------------------
Safe search **SS**
============= =========== =================================
Configuration defaults (at built time):
.. _configured engines:
.. jinja:: webapp
.. flat-table:: Engines configured at built time (defaults)
:header-rows: 1
:stub-columns: 2
* - Name (cfg)
- S
- Engine
- TO
- Categories
- P
- L
- SS
- D
- TR
- O
- SE
{% for name, mod in engines.items() %}
* - {{name}}
- !{{mod.shortcut}}
- {{mod.__name__}}
- {{mod.timeout}}
- {{", ".join(mod.categories)}}
- {{(mod.paging and "y") or ""}}
- {{(mod.language_support and "y") or ""}}
- {{(mod.safesearch and "y") or ""}}
- {{(mod.disabled and "y") or ""}}
- {{(mod.time_range_support and "y") or ""}}
- {{(mod.offline and "y") or ""}}
- {{mod.suspend_end_time}}
{% endfor %}

148
docs/admin/filtron.rst Normal file
View File

@ -0,0 +1,148 @@
==========================
How to protect an instance
==========================
Searx depens on external search services. To avoid the abuse of these services
it is advised to limit the number of requests processed by searx.
An application firewall, ``filtron`` solves exactly this problem. Information
on how to install it can be found at the `project page of filtron
<https://github.com/asciimoo/filtron>`__.
Sample configuration of filtron
===============================
An example configuration can be find below. This configuration limits the access
of:
- scripts or applications (roboagent limit)
- webcrawlers (botlimit)
- IPs which send too many requests (IP limit)
- too many json, csv, etc. requests (rss/json limit)
- the same UserAgent of if too many requests (useragent limit)
.. code:: json
[{
"name":"search request",
"filters":[
"Param:q",
"Path=^(/|/search)$"
],
"interval":"<time-interval-in-sec (int)>",
"limit":"<max-request-number-in-interval (int)>",
"subrules":[
{
"name":"roboagent limit",
"interval":"<time-interval-in-sec (int)>",
"limit":"<max-request-number-in-interval (int)>",
"filters":[
"Header:User-Agent=(curl|cURL|Wget|python-requests|Scrapy|FeedFetcher|Go-http-client)"
],
"actions":[
{
"name":"block",
"params":{
"message":"Rate limit exceeded"
}
}
]
},
{
"name":"botlimit",
"limit":0,
"stop":true,
"filters":[
"Header:User-Agent=(Googlebot|bingbot|Baiduspider|yacybot|YandexMobileBot|YandexBot|Yahoo! Slurp|MJ12bot|AhrefsBot|archive.org_bot|msnbot|MJ12bot|SeznamBot|linkdexbot|Netvibes|SMTBot|zgrab|James BOT)"
],
"actions":[
{
"name":"block",
"params":{
"message":"Rate limit exceeded"
}
}
]
},
{
"name":"IP limit",
"interval":"<time-interval-in-sec (int)>",
"limit":"<max-request-number-in-interval (int)>",
"stop":true,
"aggregations":[
"Header:X-Forwarded-For"
],
"actions":[
{
"name":"block",
"params":{
"message":"Rate limit exceeded"
}
}
]
},
{
"name":"rss/json limit",
"interval":"<time-interval-in-sec (int)>",
"limit":"<max-request-number-in-interval (int)>",
"stop":true,
"filters":[
"Param:format=(csv|json|rss)"
],
"actions":[
{
"name":"block",
"params":{
"message":"Rate limit exceeded"
}
}
]
},
{
"name":"useragent limit",
"interval":"<time-interval-in-sec (int)>",
"limit":"<max-request-number-in-interval (int)>",
"aggregations":[
"Header:User-Agent"
],
"actions":[
{
"name":"block",
"params":{
"message":"Rate limit exceeded"
}
}
]
}
]
}]
Route request through filtron
=============================
Filtron can be started using the following command:
.. code:: sh
$ filtron -rules rules.json
It listens on ``127.0.0.1:4004`` and forwards filtered requests to
``127.0.0.1:8888`` by default.
Use it along with ``nginx`` with the following example configuration.
.. code:: nginx
location / {
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Scheme $scheme;
proxy_pass http://127.0.0.1:4004/;
}
Requests are coming from port 4004 going through filtron and then forwarded to
port 8888 where a searx is being run.

15
docs/admin/index.rst Normal file
View File

@ -0,0 +1,15 @@
===========================
Administrator documentation
===========================
.. toctree::
:maxdepth: 1
installation
api
architecture
filtron
morty
engines
plugins
buildhosts

341
docs/admin/installation.rst Normal file
View File

@ -0,0 +1,341 @@
.. _installation:
============
Installation
============
.. contents::
:depth: 3
Basic installation
==================
Step by step installation for Debian/Ubuntu with virtualenv. For Ubuntu, be sure
to have enable universe repository.
Install packages:
.. code:: sh
$ sudo -H apt-get install \
git build-essential libxslt-dev \
python-dev python-virtualenv python-babel \
zlib1g-dev libffi-dev libssl-dev
Install searx:
.. code:: sh
cd /usr/local
sudo -H git clone https://github.com/asciimoo/searx.git
sudo -H useradd searx -d /usr/local/searx
sudo -H chown searx:searx -R /usr/local/searx
Install dependencies in a virtualenv:
.. code:: sh
cd /usr/local/searx
sudo -H -u searx -i
.. code:: sh
(searx)$ virtualenv searx-ve
(searx)$ . ./searx-ve/bin/activate
(searx)$ ./manage.sh update_packages
Configuration
==============
.. code:: sh
sed -i -e "s/ultrasecretkey/`openssl rand -hex 16`/g" searx/settings.yml
Edit searx/settings.yml if necessary.
Check
=====
Start searx:
.. code:: sh
python searx/webapp.py
Go to http://localhost:8888
If everything works fine, disable the debug option in settings.yml:
.. code:: sh
sed -i -e "s/debug : True/debug : False/g" searx/settings.yml
At this point searx is not demonized ; uwsgi allows this.
You can exit the virtualenv and the searx user bash (enter exit command
twice).
uwsgi
=====
Install packages:
.. code:: sh
sudo -H apt-get install \
uwsgi uwsgi-plugin-python
Create the configuration file ``/etc/uwsgi/apps-available/searx.ini`` with this
content:
.. code:: ini
[uwsgi]
# Who will run the code
uid = searx
gid = searx
# disable logging for privacy
disable-logging = true
# Number of workers (usually CPU count)
workers = 4
# The right granted on the created socket
chmod-socket = 666
# Plugin to use and interpretor config
single-interpreter = true
master = true
plugin = python
lazy-apps = true
enable-threads = true
# Module to import
module = searx.webapp
# Virtualenv and python path
virtualenv = /usr/local/searx/searx-ve/
pythonpath = /usr/local/searx/
chdir = /usr/local/searx/searx/
Activate the uwsgi application and restart:
.. code:: sh
cd /etc/uwsgi/apps-enabled
ln -s ../apps-available/searx.ini
/etc/init.d/uwsgi restart
Web server
==========
with nginx
----------
If nginx is not installed (uwsgi will not work with the package
nginx-light):
.. code:: sh
sudo -H apt-get install nginx
Hosted at /
~~~~~~~~~~~
Create the configuration file ``/etc/nginx/sites-available/searx`` with this
content:
.. code:: nginx
server {
listen 80;
server_name searx.example.com;
root /usr/local/searx;
location / {
include uwsgi_params;
uwsgi_pass unix:/run/uwsgi/app/searx/socket;
}
}
Create a symlink to sites-enabled:
.. code:: sh
sudo -H ln -s /etc/nginx/sites-available/searx /etc/nginx/sites-enabled/searx
Restart service:
.. code:: sh
sudo -H service nginx restart
sudo -H service uwsgi restart
from subdirectory URL (/searx)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Add this configuration in the server config file
``/etc/nginx/sites-enabled/default``:
.. code:: nginx
location = /searx { rewrite ^ /searx/; }
location /searx {
try_files $uri @searx;
}
location @searx {
uwsgi_param SCRIPT_NAME /searx;
include uwsgi_params;
uwsgi_modifier1 30;
uwsgi_pass unix:/run/uwsgi/app/searx/socket;
}
**OR** using reverse proxy (Please, note that reverse proxy advised to be used
in case of single-user or low-traffic instances.)
.. code:: nginx
location /searx {
proxy_pass http://127.0.0.1:8888;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Scheme $scheme;
proxy_set_header X-Script-Name /searx;
proxy_buffering off;
}
Enable ``base_url`` in ``searx/settings.yml``
.. code:: yaml
base_url : http://your.domain.tld/searx/
Restart service:
.. code:: sh
sudo -H service nginx restart
sudo -H service uwsgi restart
disable logs
^^^^^^^^^^^^
for better privacy you can disable nginx logs about searx.
how to proceed: below ``uwsgi_pass`` in ``/etc/nginx/sites-available/default``
add:
.. code:: nginx
access_log /dev/null;
error_log /dev/null;
Restart service:
.. code:: sh
sudo -H service nginx restart
with apache
-----------
Add wsgi mod:
.. code:: sh
sudo -H apt-get install libapache2-mod-uwsgi
sudo -H a2enmod uwsgi
Add this configuration in the file ``/etc/apache2/apache2.conf``:
.. code:: apache
<Location />
Options FollowSymLinks Indexes
SetHandler uwsgi-handler
uWSGISocket /run/uwsgi/app/searx/socket
</Location>
Note that if your instance of searx is not at the root, you should change
``<Location />`` by the location of your instance, like ``<Location /searx>``.
Restart Apache:
.. code:: sh
sudo -H /etc/init.d/apache2 restart
disable logs
~~~~~~~~~~~~
For better privacy you can disable Apache logs.
.. warning::
You can only disable logs for the whole (virtual) server not for a specific
path.
Go back to ``/etc/apache2/apache2.conf`` and above ``<Location />`` add:
.. code:: apache
CustomLog /dev/null combined
Restart Apache:
.. code:: sh
sudo -H /etc/init.d/apache2 restart
How to update
=============
.. code:: sh
cd /usr/local/searx
sudo -H -u searx -i
.. code:: sh
(searx)$ . ./searx-ve/bin/activate
(searx)$ git stash
(searx)$ git pull origin master
(searx)$ git stash apply
(searx)$ ./manage.sh update_packages
.. code:: sh
sudo -H service uwsgi restart
Docker
======
Make sure you have installed Docker. For instance, you can deploy searx like this:
.. code:: sh
docker pull wonderfall/searx
docker run -d --name searx -p $PORT:8888 wonderfall/searx
Go to ``http://localhost:$PORT``.
See https://hub.docker.com/r/wonderfall/searx/ for more informations. It's also
possible to build searx from the embedded Dockerfile.
.. code:: sh
git clone https://github.com/asciimoo/searx.git
cd searx
docker build -t whatever/searx .
References
==========
* https://about.okhin.fr/posts/Searx/ with some additions
* How to: `Setup searx in a couple of hours with a free SSL certificate
<https://www.reddit.com/r/privacytoolsIO/comments/366kvn/how_to_setup_your_own_privacy_respecting_search/>`__

26
docs/admin/morty.rst Normal file
View File

@ -0,0 +1,26 @@
=========================
How to setup result proxy
=========================
.. _morty: https://github.com/asciimoo/morty
.. _morty's README: https://github.com/asciimoo/morty
By default searx can only act as an image proxy for result images, but it is
possible to proxify all the result URLs with an external service, morty_.
To use this feature, morty has to be installed and activated in searx's
``settings.yml``.
Add the following snippet to your ``settings.yml`` and restart searx:
.. code:: yaml
result_proxy:
url : http://127.0.0.1:3000/
key : your_morty_proxy_key
``url``
Is the address of the running morty service.
``key``
Is an optional argument, see `morty's README`_ for more information.

39
docs/admin/plugins.rst Normal file
View File

@ -0,0 +1,39 @@
.. _plugins generic:
===============
Plugins builtin
===============
.. sidebar:: Further reading ..
- :ref:`dev plugin`
Configuration defaults (at built time):
:DO: Default on
.. _configured plugins:
.. jinja:: webapp
.. flat-table:: Plugins configured at built time (defaults)
:header-rows: 1
:stub-columns: 1
:widths: 3 1 9
* - Name
- DO
- Description
JS & CSS dependencies
{% for plgin in plugins %}
* - {{plgin.name}}
- {{(plgin.default_on and "y") or ""}}
- {{plgin.description}}
{% for dep in (plgin.js_dependencies + plgin.css_dependencies) %}
| ``{{dep}}`` {% endfor %}
{% endfor %}

43
docs/blog/admin.rst Normal file
View File

@ -0,0 +1,43 @@
=============================================================
Searx admin interface
=============================================================
.. _searx-admin: https://github.com/kvch/searx-admin#searx-admin
.. _NLnet Foundation: https://nlnet.nl/
manage your instance from your browser
.. sidebar:: Installation
Installation guide can be found in the repository of searx-admin_.
One of the biggest advantages of searx is being extremely customizable. But at
first it can be daunting to newcomers. A barrier of taking advantage of this
feature is our ugly settings file which is sometimes hard to understand and
edit.
To make self-hosting searx more accessible a new tool is introduced, called
``searx-admin``. It is a web application which is capable of managing your
instance and manipulating its settings via a web UI. It aims to replace editing
of ``settings.yml`` for less experienced administrators or people who prefer
graphical admin interfaces.
.. figure:: searx-admin-engines.png
:alt: Screenshot of engine list
Configuration page of engines
Since ``searx-admin`` acts as a supervisor for searx, we have decided to
implement it as a standalone tool instead of part of searx. Another reason for
making it a standalone tool is that the codebase and dependencies of searx
should not grow because of a fully optional feature, which does not affect
existing instances.
Acknowledgements
================
This development was sponsored by `NLnet Foundation`_.
| Happy hacking.
| kvch // 2017.08.22 21:25

10
docs/blog/index.rst Normal file
View File

@ -0,0 +1,10 @@
====
Blog
====
.. toctree::
:maxdepth: 1
python3
admin
intro-offline

View File

@ -0,0 +1,77 @@
===============================
Preparation for offline engines
===============================
Offline engines
===============
To extend the functionality of searx, offline engines are going to be
introduced. An offline engine is an engine which does not need Internet
connection to perform a search and does not use HTTP to communicate.
Offline engines can be configured as online engines, by adding those to the
`engines` list of :origin:`settings.yml <searx/settings.yml>`. Thus, searx
finds the engine file and imports it.
Example skeleton for the new engines:
.. code:: python
from subprocess import PIPE, Popen
categories = ['general']
offline = True
def init(settings):
pass
def search(query, params):
process = Popen(['ls', query], stdout=PIPE)
return_code = process.wait()
if return_code != 0:
raise RuntimeError('non-zero return code', return_code)
results = []
line = process.stdout.readline()
while line:
result = parse_line(line)
results.append(results)
line = process.stdout.readline()
return results
Development progress
====================
First, a proposal has been created as a Github issue. Then it was moved to the
wiki as a design document. You can read it here: :wiki:`Offline-engines`.
In this development step, searx core was prepared to accept and perform offline
searches. Offline search requests are scheduled together with regular offline
requests.
As offline searches can return arbitrary results depending on the engine, the
current result templates were insufficient to present such results. Thus, a new
template is introduced which is caplable of presenting arbitrary key value pairs
as a table. You can check out the pull request for more details see
:pull:`1700`.
Next steps
==========
Today, it is possible to create/run an offline engine. However, it is going to be publicly available for everyone who knows the searx instance. So the next step is to introduce token based access for engines. This way administrators are able to limit the access to private engines.
Acknowledgement
===============
This development was sponsored by `Search and Discovery Fund`_ of `NLnet Foundation`_ .
.. _Search and Discovery Fund: https://nlnet.nl/discovery
.. _NLnet Foundation: https://nlnet.nl/
| Happy hacking.
| kvch // 2019.10.21 17:03

68
docs/blog/python3.rst Normal file
View File

@ -0,0 +1,68 @@
============================
Introducing Python 3 support
============================
.. _Python 2.7 clock: https://pythonclock.org/
.. sidebar:: Python 2.7 to 3 upgrade
This chapter exists of historical reasons. Python 2.7 release schedule ends
(`Python 2.7 clock`_) after 11 years Python 3 exists
As most operation systems are coming with Python3 installed by default. So it is
time for searx to support Python3. But don't worry support of Python2.7 won't be
dropped.
.. image:: searxpy3.png
:scale: 50 %
:alt: hurray
:align: center
How to run searx using Python 3
===============================
Please make sure that you run at least Python 3.5.
To run searx, first a Python3 virtualenv should be created. After entering the
virtualenv, dependencies must be installed. Then run searx with python3 instead
of the usual python command.
.. code:: sh
virtualenv -p python3 venv3
source venv3/bin/activate
pip3 install -r requirements.txt
python3 searx/webapp.py
If you want to run searx using Python2.7, you don't have to do anything
differently as before.
Fun facts
=========
- 115 files were changed when implementing the support for both Python versions.
- All of the dependencies was compatible except for the robotframework used for
browser tests. Thus, these tests were migrated to splinter. So from now on
both versions are being tested on Travis and can be tested locally.
If you found bugs
=================
Please open an issue on `GitHub`_. Make sure that you mention your Python
version in your issue, so we can investigate it properly.
.. _GitHub: https://github.com/asciimoo/searx/issues
Acknowledgment
==============
This development was sponsored by `NLnet Foundation`_.
.. _NLnet Foundation: https://nlnet.nl/
| Happy hacking.
| kvch // 2017.05.13 22:57

Binary file not shown.

After

Width:  |  Height:  |  Size: 50 KiB

BIN
docs/blog/searxpy3.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 30 KiB

114
docs/conf.py Normal file
View File

@ -0,0 +1,114 @@
# -*- coding: utf-8 -*-
import sys, os
from searx.version import VERSION_STRING
from pallets_sphinx_themes import ProjectLink
GIT_URL = os.environ.get("GIT_URL", "https://github.com/asciimoo/searx")
SEARX_URL = os.environ.get("SEARX_URL", "https://searx.me")
DOCS_URL = os.environ.get("DOCS_URL", "https://asciimoo.github.io/searx/")
# Project --------------------------------------------------------------
project = u'searx'
copyright = u'2015-2019, Adam Tauber, Noémi Ványi'
author = u'Adam Tauber'
release, version = VERSION_STRING, VERSION_STRING
highlight_language = 'none'
# General --------------------------------------------------------------
master_doc = "index"
source_suffix = '.rst'
numfig = True
from searx import webapp
jinja_contexts = {
'webapp': dict(**webapp.__dict__)
}
# usage:: lorem :patch:`f373169` ipsum
extlinks = {}
# upstream links
extlinks['wiki'] = ('https://github.com/asciimoo/searx/wiki/%s', ' ')
extlinks['pull'] = ('https://github.com/asciimoo/searx/pull/%s', 'PR ')
# links to custom brand
extlinks['origin'] = (GIT_URL + '/blob/master/%s', 'git://')
extlinks['patch'] = (GIT_URL + '/commit/%s', '#')
extlinks['search'] = (SEARX_URL + '/%s', '#')
extlinks['docs'] = (DOCS_URL + '/%s', 'docs: ')
extlinks['pypi'] = ('https://pypi.org/project/%s', 'PyPi: ')
extlinks['man'] = ('https://manpages.debian.org/jump?q=%s', '')
#extlinks['role'] = (
# 'https://www.sphinx-doc.org/en/master/usage/restructuredtext/roles.html#role-%s', '')
extlinks['duref'] = (
'http://docutils.sourceforge.net/docs/ref/rst/restructuredtext.html#%s', '')
extlinks['durole'] = (
'http://docutils.sourceforge.net/docs/ref/rst/roles.html#%s', '')
extlinks['dudir'] = (
'http://docutils.sourceforge.net/docs/ref/rst/directives.html#%s', '')
extlinks['ctan'] = (
'https://ctan.org/pkg/%s', 'CTAN: ')
extensions = [
'sphinx.ext.imgmath',
'sphinx.ext.extlinks',
'sphinx.ext.viewcode',
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"pallets_sphinx_themes",
"sphinx_issues", # https://github.com/sloria/sphinx-issues/blob/master/README.rst
"sphinxcontrib.jinja", # https://github.com/tardyp/sphinx-jinja
'linuxdoc.rstFlatTable', # Implementation of the 'flat-table' reST-directive.
'linuxdoc.kfigure', # Sphinx extension which implements scalable image handling.
"sphinx_tabs.tabs", # https://github.com/djungelorm/sphinx-tabs
]
intersphinx_mapping = {
"python": ("https://docs.python.org/3/", None),
"flask": ("https://flask.palletsprojects.com/", None),
# "werkzeug": ("https://werkzeug.palletsprojects.com/", None),
"jinja": ("https://jinja.palletsprojects.com/", None),
"linuxdoc" : ("https://return42.github.io/linuxdoc/", None),
"sphinx" : ("https://www.sphinx-doc.org/en/master/", None),
}
issues_github_path = "asciimoo/searx"
# HTML -----------------------------------------------------------------
sys.path.append(os.path.abspath('_themes'))
html_theme_path = ['_themes']
html_theme = "searx"
# sphinx.ext.imgmath setup
html_math_renderer = 'imgmath'
imgmath_image_format = 'svg'
imgmath_font_size = 14
# sphinx.ext.imgmath setup END
html_theme_options = {"index_sidebar_logo": True}
html_context = {
"project_links": [
ProjectLink("Source", GIT_URL),
ProjectLink("Wiki", "https://github.com/asciimoo/searx/wiki"),
ProjectLink("Public instances", "https://github.com/asciimoo/searx/wiki/Searx-instances"),
ProjectLink("Twitter", "https://twitter.com/Searx_engine"),
]
}
html_sidebars = {
"**": ["project.html", "relations.html", "searchbox.html"],
}
singlehtml_sidebars = {"index": ["project.html", "localtoc.html"]}
html_static_path = ["static"]
html_logo = "static/img/searx_logo_small.png"
html_title = "Searx Documentation ({})".format("Searx-{}.tex".format(VERSION_STRING))
html_show_sourcelink = False
# LaTeX ----------------------------------------------------------------
latex_documents = [
(master_doc, "searx-{}.tex".format(VERSION_STRING), html_title, author, "manual")
]

View File

@ -0,0 +1,180 @@
.. _how to contribute:
=================
How to contribute
=================
Prime directives: Privacy, Hackability
======================================
Searx has two prime directives, **privacy-by-design and hackability** . The
hackability comes in three levels:
- support of search engines
- plugins to alter search behaviour
- hacking searx itself
Note the lack of "world domination" among the directives. Searx has no
intention of wide mass-adoption, rounded corners, etc. The prime directive
"privacy" deserves a separate chapter, as it's quite uncommon unfortunately.
Privacy-by-design
-----------------
Searx was born out of the need for a **privacy-respecting** search tool which
can be extended easily to maximize both, its search and its privacy protecting
capabilities.
A few widely used features work differently or turned off by default or not
implemented at all **as a consequence of privacy-by-design**.
If a feature reduces the privacy preserving aspects of searx, it should be
switched off by default or should not implemented at all. There are plenty of
search engines already providing such features. If a feature reduces the
protection of searx, users must be informed about the effect of choosing to
enable it. Features that protect privacy but differ from the expectations of
the user should also be explained.
Also, if you think that something works weird with searx, it's might be because
of the tool you use is designed in a way to interfere with the privacy respect.
Submitting a bugreport to the vendor of the tool that misbehaves might be a good
feedback to reconsider the disrespect to its customers (e.g. ``GET`` vs ``POST``
requests in various browsers).
Remember the other prime directive of searx is to be hackable, so if the above
privacy concerns do not fancy you, simply fork it.
*Happy hacking.*
Code
====
.. _PEP8: https://www.python.org/dev/peps/pep-0008/
.. _Conventional Commits: https://www.conventionalcommits.org/
.. _Git Commit Good Practice: https://wiki.openstack.org/wiki/GitCommitMessages
.. _Structural split of changes:
https://wiki.openstack.org/wiki/GitCommitMessages#Structural_split_of_changes
.. _gitmoji: https://gitmoji.carloscuesta.me/
.. _Semantic PR: https://github.com/zeke/semantic-pull-requests
.. sidebar:: Create good commits!
- `Structural split of changes`_
- `Conventional Commits`_
- `Git Commit Good Practice`_
- some like to use: gitmoji_
- not yet active: `Semantic PR`_
In order to submit a patch, please follow the steps below:
- Follow coding conventions.
- PEP8_ standards apply, except the convention of line length
- Maximum line length is 120 characters
- The cardinal rule for creating good commits is to ensure there is only one
*logical change* per commit / read `Structural split of changes`_
- Check if your code breaks existing tests. If so, update the tests or fix your
code.
- If your code can be unit-tested, add unit tests.
- Add yourself to the :origin:`AUTHORS.rst` file.
- Choose meaning full commit messages, read `Conventional Commits`_
.. code::
<type>[optional scope]: <description>
[optional body]
[optional footer(s)]
- Create a pull request.
For more help on getting started with searx development, see :ref:`devquickstart`.
Translation
===========
Translation currently takes place on :ref:`transifex <translation>`.
.. caution::
Please, do not update translation files in the repo.
.. _contrib docs:
Documentation
=============
.. _Sphinx: http://www.sphinx-doc.org
.. _reST: http://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html
.. sidebar:: The reST sources
has been moved from ``gh-branch`` into ``master`` (:origin:`docs`).
The documentation is built using Sphinx_. So in order to be able to generate
the required files, you have to install it on your system. Much easier, use
our :ref:`makefile`.
Here is an example which makes a complete rebuild:
.. code:: sh
$ make docs-clean docs
...
The HTML pages are in dist/docs.
.. _make docs-live:
live build
----------
.. sidebar:: docs-clean
It is recommended to assert a complete rebuild before deploying (use
``docs-clean``).
Live build is like WYSIWYG. If you want to edit the documentation, its
recommended to use. The Makefile target ``docs-live`` builds the docs, opens
URL in your favorite browser and rebuilds every time a reST file has been
changed.
.. code:: sh
$ make docs-live
...
The HTML pages are in dist/docs.
... Serving on http://0.0.0.0:8080
... Start watching changes
.. _deploy on github.io:
deploy on github.io
-------------------
To deploy documentation at :docs:`github.io <.>` use Makefile target
:ref:`make gh-pages`, which will builds the documentation, clones searx into a sub
folder ``gh-pages``, cleans it, copies the doc build into and runs all the
needed git add, commit and push:
.. code:: sh
$ make docs-clean gh-pages
...
SPHINX docs --> file://<...>/dist/docs
The HTML pages are in dist/docs.
...
Cloning into 'gh-pages' ...
...
cd gh-pages; git checkout gh-pages >/dev/null
Switched to a new branch 'gh-pages'
...
doc available at --> https://asciimoo.github.io/searx

6
docs/dev/csv_table.txt Normal file
View File

@ -0,0 +1,6 @@
stub col row 1, column, "loremLorem ipsum dolor sit amet, consetetur sadipscing elitr, sed diam nonumy
eirmod tempor invidunt ut labore et dolore magna aliquyam erat, sed diam
voluptua."
stub col row 1, "At vero eos et accusam et justo duo dolores et ea rebum. Stet clita
kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet.", column
stub col row 1, column, column

View File

@ -0,0 +1,267 @@
.. _engines-dev:
===============
Engine overview
===============
.. _metasearch-engine: https://en.wikipedia.org/wiki/Metasearch_engine
searx is a metasearch-engine_, so it uses different search engines to provide
better results.
Because there is no general search API which could be used for every search
engine, an adapter has to be built between searx and the external search
engines. Adapters are stored under the folder :origin:`searx/engines`.
.. contents::
:depth: 3
:backlinks: entry
general engine configuration
============================
It is required to tell searx the type of results the engine provides. The
arguments can be set in the engine file or in the settings file
(normally ``settings.yml``). The arguments in the settings file override
the ones in the engine file.
It does not matter if an option is stored in the engine file or in the
settings. However, the standard way is the following:
.. _engine file:
engine file
-----------
======================= =========== ===========================================
argument type information
======================= =========== ===========================================
categories list pages, in which the engine is working
paging boolean support multible pages
language_support boolean support language choosing
time_range_support boolean support search time range
offline boolean engine runs offline
======================= =========== ===========================================
.. _engine settings:
settings.yml
------------
======================= =========== ===========================================
argument type information
======================= =========== ===========================================
name string name of search-engine
engine string name of searx-engine
(filename without ``.py``)
shortcut string shortcut of search-engine
timeout string specific timeout for search-engine
======================= =========== ===========================================
overrides
---------
A few of the options have default values in the engine, but are often
overwritten by the settings. If ``None`` is assigned to an option in the engine
file, it has to be redefined in the settings, otherwise searx will not start
with that engine.
The naming of overrides is arbitrary. But the recommended overrides are the
following:
======================= =========== ===========================================
argument type information
======================= =========== ===========================================
base_url string base-url, can be overwritten to use same
engine on other URL
number_of_results int maximum number of results per request
language string ISO code of language and country like en_US
api_key string api-key if required by engine
======================= =========== ===========================================
example code
------------
.. code:: python
# engine dependent config
categories = ['general']
paging = True
language_support = True
making a request
================
To perform a search an URL have to be specified. In addition to specifying an
URL, arguments can be passed to the query.
passed arguments
----------------
These arguments can be used to construct the search query. Furthermore,
parameters with default value can be redefined for special purposes.
====================== ============ ========================================================================
argument type default-value, information
====================== ============ ========================================================================
url string ``''``
method string ``'GET'``
headers set ``{}``
data set ``{}``
cookies set ``{}``
verify boolean ``True``
headers.User-Agent string a random User-Agent
category string current category, like ``'general'``
started datetime current date-time
pageno int current pagenumber
language string specific language code like ``'en_US'``, or ``'all'`` if unspecified
====================== ============ ========================================================================
parsed arguments
----------------
The function ``def request(query, params):`` always returns the ``params``
variable. Inside searx, the following paramters can be used to specify a search
request:
============ =========== =========================================================
argument type information
============ =========== =========================================================
url string requested url
method string HTTP request method
headers set HTTP header information
data set HTTP data information (parsed if ``method != 'GET'``)
cookies set HTTP cookies
verify boolean Performing SSL-Validity check
============ =========== =========================================================
example code
------------
.. code:: python
# search-url
base_url = 'https://example.com/'
search_string = 'search?{query}&page={page}'
# do search-request
def request(query, params):
search_path = search_string.format(
query=urlencode({'q': query}),
page=params['pageno'])
params['url'] = base_url + search_path
return params
returned results
================
Searx is able to return results of different media-types. Currently the
following media-types are supported:
- default_
- images_
- videos_
- torrent_
- map_
To set another media-type as default, the parameter ``template`` must be set to
the desired type.
default
-------
========================= =====================================================
result-parameter information
========================= =====================================================
url string, url of the result
title string, title of the result
content string, general result-text
publishedDate :py:class:`datetime.datetime`, time of publish
========================= =====================================================
images
------
To use this template, the parameter:
========================= =====================================================
result-parameter information
========================= =====================================================
template is set to ``images.html``
url string, url to the result site
title string, title of the result *(partly implemented)*
content *(partly implemented)*
publishedDate :py:class:`datetime.datetime`,
time of publish *(partly implemented)*
img\_src string, url to the result image
thumbnail\_src string, url to a small-preview image
========================= =====================================================
videos
------
========================= =====================================================
result-parameter information
========================= =====================================================
template is set to ``videos.html``
url string, url of the result
title string, title of the result
content *(not implemented yet)*
publishedDate :py:class:`datetime.datetime`, time of publish
thumbnail string, url to a small-preview image
========================= =====================================================
torrent
-------
.. _magnetlink: https://en.wikipedia.org/wiki/Magnet_URI_scheme
========================= =====================================================
result-parameter information
========================= =====================================================
template is set to ``torrent.html``
url string, url of the result
title string, title of the result
content string, general result-text
publishedDate :py:class:`datetime.datetime`,
time of publish *(not implemented yet)*
seed int, number of seeder
leech int, number of leecher
filesize int, size of file in bytes
files int, number of files
magnetlink string, magnetlink_ of the result
torrentfile string, torrentfile of the result
========================= =====================================================
map
---
========================= =====================================================
result-parameter information
========================= =====================================================
url string, url of the result
title string, title of the result
content string, general result-text
publishedDate :py:class:`datetime.datetime`, time of publish
latitude latitude of result (in decimal format)
longitude longitude of result (in decimal format)
boundingbox boundingbox of result (array of 4. values
``[lat-min, lat-max, lon-min, lon-max]``)
geojson geojson of result (http://geojson.org)
osm.type type of osm-object (if OSM-Result)
osm.id id of osm-object (if OSM-Result)
address.name name of object
address.road street name of object
address.house_number house number of object
address.locality city, place of object
address.postcode postcode of object
address.country country of object
========================= =====================================================

3
docs/dev/hello.dot Normal file
View File

@ -0,0 +1,3 @@
graph G {
Hello -- World
}

15
docs/dev/index.rst Normal file
View File

@ -0,0 +1,15 @@
=======================
Developer documentation
=======================
.. toctree::
:maxdepth: 1
quickstart
contribution_guide
engine_overview
search_api
plugins
translation
makefile
reST

221
docs/dev/makefile.rst Normal file
View File

@ -0,0 +1,221 @@
.. _makefile:
================
Makefile Targets
================
.. _gnu-make: https://www.gnu.org/software/make/manual/make.html#Introduction
.. sidebar:: build environment
Before looking deeper at the targets, first read about :ref:`makefile setup`
and :ref:`make pyenv`.
With the aim to simplify development cycles, started with :pull:`1756` a
``Makefile`` based boilerplate was added. If you are not familiar with
Makefiles, we recommend to read gnu-make_ introduction.
The usage is simple, just type ``make {target-name}`` to *build* a target.
Calling the ``help`` target gives a first overview::
$ make help
test - run developer tests
docs - build documentation
docs-live - autobuild HTML documentation while editing
run - run developer instance
install - developer install (./local)
uninstall - uninstall (./local)
gh-pages - build docs & deploy on gh-pages branch
clean - drop builds and environments
...
.. contents:: Contents
:depth: 2
:local:
:backlinks: entry
.. _makefile setup:
Setup
=====
.. _git stash: https://git-scm.com/docs/git-stash
The main setup is done in the :origin:`Makefile`::
export GIT_URL=https://github.com/asciimoo/searx
export SEARX_URL=https://searx.me
export DOCS_URL=https://asciimoo.github.io/searx
.. sidebar:: fork & upstream
Commit changes in your (local) branch, fork or whatever, but do not push them
upstream / `git stash`_ is your friend.
:GIT_URL: Changes this, to point to your searx fork.
:SEARX_URL: Changes this, to point to your searx instance.
:DOCS_URL: If you host your own (branded) documentation, change this URL.
.. _make pyenv:
Python environment
==================
.. sidebar:: activate environment
``source ./local/py3/bin/activate``
With Makefile we do no longer need to build up the virualenv manually (as
described in the :ref:`devquickstart` guide). Jump into your git working tree
and release a ``make pyenv``:
.. code:: sh
$ cd ~/searx-clone
$ make pyenv
PYENV usage: source ./local/py3/bin/activate
...
With target ``pyenv`` a development environment (aka virtualenv) was build up in
``./local/py3/``. To make a *developer install* of searx (:origin:`setup.py`)
into this environment, use make target ``install``:
.. code:: sh
$ make install
PYENV usage: source ./local/py3/bin/activate
PYENV using virtualenv from ./local/py3
PYENV install .
You have never to think about intermediate targets like ``pyenv`` or
``install``, the ``Makefile`` chains them as requisites. Just run your main
target.
.. sidebar:: drop environment
To get rid of the existing environment before re-build use :ref:`clean target
<make clean>` first.
If you think, something goes wrong with your ./local environment or you change
the :origin:`setup.py` file (or the requirements listed in
:origin:`requirements-dev.txt` and :origin:`requirements.txt`), you have to call
:ref:`make clean`.
.. _make run:
``make run``
============
To get up a running a developer instance simply call ``make run``. This enables
*debug* option in :origin:`searx/settings.yml`, starts a ``./searx/webapp.py``
instance, disables *debug* option again and opens the URL in your favorite WEB
browser (:man:`xdg-open`):
.. code:: sh
$ make run
PYENV usage: source ./local/py3/bin/activate
PYENV install .
./local/py3/bin/python ./searx/webapp.py
...
INFO:werkzeug: * Running on http://127.0.0.1:8888/ (Press CTRL+C to quit)
...
.. _make clean:
``make clean``
==============
Drop all intermediate files, all builds, but keep sources untouched. Includes
target ``pyclean`` which drops ./local environment. Before calling ``make
clean`` stop all processes using :ref:`make pyenv`.
.. code:: sh
$ make clean
CLEAN pyclean
CLEAN clean
.. _make docs:
``make docs docs-live docs-clean``
==================================
We describe the usage of the ``doc*`` targets in the :ref:`How to contribute /
Documentation <contrib docs>` section. If you want to edit the documentation
read our :ref:`make docs-live` section. If you are working in your own brand,
adjust your :ref:`Makefile setup <makefile setup>`.
.. _make gh-pages:
``make gh-pages``
=================
To deploy on github.io first adjust your :ref:`Makefile setup <makefile
setup>`. For any further read :ref:`deploy on github.io`.
.. _make test:
``make test``
=============
Runs a series of tests: ``test.pep8``, ``test.unit``, ``test.robot`` and does
additional :ref:`pylint checks <make pylint>`. You can run tests selective,
e.g.:
.. code:: sh
$ make test.pep8 test.unit
. ./local/py3/bin/activate; ./manage.sh pep8_check
[!] Running pep8 check
. ./local/py3/bin/activate; ./manage.sh unit_tests
[!] Running unit tests
.. _make pylint:
``make pylint``
===============
.. _Pylint: https://www.pylint.org/
Before commiting its recommend to do some (more) linting. Pylint_ is known as
one of the best source-code, bug and quality checker for the Python programming
language. Pylint_ is not yet a quality gate within our searx project (like
:ref:`test.pep8 <make test>` it is), but Pylint_ can help to improve code
quality anyway. The pylint profile we use at searx project is found in
project's root folder :origin:`.pylintrc`.
Code quality is a ongoing process. Don't try to fix all messages from Pylint,
run Pylint and check if your changed lines are bringing up new messages. If so,
fix it. By this, code quality gets incremental better and if there comes the
day, the linting is balanced out, we might decide to add Pylint as a quality
gate.
``make pybuild``
================
.. _PyPi: https://pypi.org/
.. _twine: https://twine.readthedocs.io/en/latest/
Build Python packages in ``./dist/py``.
.. code:: sh
$ make pybuild
...
BUILD pybuild
running sdist
running egg_info
...
$ ls ./dist/py/
searx-0.15.0-py3-none-any.whl searx-0.15.0.tar.gz
To upload packages to PyPi_, there is also a ``upload-pypi`` target. It needs
twine_ to be installed. Since you are not the owner of :pypi:`searx` you will
never need the latter.

54
docs/dev/plugins.rst Normal file
View File

@ -0,0 +1,54 @@
.. _dev plugin:
=======
Plugins
=======
.. sidebar:: Further reading ..
- :ref:`plugins generic`
Plugins can extend or replace functionality of various components of searx.
Example plugin
==============
.. code:: python
name = 'Example plugin'
description = 'This plugin extends the suggestions with the word "example"'
default_on = False # disabled by default
js_dependencies = tuple() # optional, list of static js files
css_dependencies = tuple() # optional, list of static css files
# attach callback to the post search hook
# request: flask request object
# ctx: the whole local context of the post search hook
def post_search(request, ctx):
ctx['search'].suggestions.add('example')
return True
Plugin entry points
===================
Entry points (hooks) define when a plugin runs. Right now only three hooks are
implemented. So feel free to implement a hook if it fits the behaviour of your
plugin.
Pre search hook
---------------
Runs BEFORE the search request. Function to implement: ``pre_search``
Post search hook
----------------
Runs AFTER the search request. Function to implement: ``post_search``
Result hook
-----------
Runs when a new result is added to the result list. Function to implement:
``on_result``

132
docs/dev/quickstart.rst Normal file
View File

@ -0,0 +1,132 @@
.. _devquickstart:
======================
Development Quickstart
======================
.. sidebar:: :ref:`makefile`
For additional developer purpose there are :ref:`makefile`.
This quickstart guide gets your environment set up with searx. Furthermore, it
gives a short introduction to the ``manage.sh`` script.
How to setup your development environment
=========================================
.. sidebar:: :ref:`make pyenv <make pyenv>`
Alternatively use the :ref:`make pyenv`.
First, clone the source code of searx to the desired folder. In this case the
source is cloned to ``~/myprojects/searx``. Then create and activate the
searx-ve virtualenv and install the required packages using ``manage.sh``.
.. code:: sh
cd ~/myprojects
git clone https://github.com/asciimoo/searx.git
cd searx
virtualenv searx-ve
. ./searx-ve/bin/activate
./manage.sh update_dev_packages
How to run tests
================
.. sidebar:: :ref:`make test.unit <make test>`
Alternatively use the ``test.pep8``, ``test.unit``, ``test.robot`` targets.
Tests can be run using the ``manage.sh`` script. Following tests and checks are
available:
- Unit tests
- Selenium tests
- PEP8 validation
- Unit test coverage check
For example unit tests are run with the command below:
.. code:: sh
./manage.sh unit_tests
For further test options, please consult the help of the ``manage.sh`` script or
read :ref:`make test`.
How to compile styles and javascript
====================================
.. _less: http://lesscss.org/
.. _NodeJS: https://nodejs.org
How to build styles
-------------------
Less_ is required to build the styles of searx. Less_ can be installed using
either NodeJS_ or Apt.
.. code:: sh
sudo -H apt-get install nodejs
sudo -H npm install -g less
OR
.. code:: sh
sudo -H apt-get install node-less
After satisfying the requirements styles can be build using ``manage.sh``
.. code:: sh
./manage.sh styles
How to build the source of the oscar theme
==========================================
.. _grunt: https://gruntjs.com/
Grunt_ must be installed in order to build the javascript sources. It depends on
NodeJS, so first Node has to be installed.
.. code:: sh
sudo -H apt-get install nodejs
sudo -H npm install -g grunt-cli
After installing grunt, the files can be built using the following command:
.. code:: sh
./manage.sh grunt_build
Tips for debugging/development
==============================
.. sidebar:: :ref:`make run`
Makefile target ``run`` already enables debug option for your developer
session / see :ref:`make run`.
Turn on debug logging
Whether you are working on a new engine or trying to eliminate a bug, it is
always a good idea to turn on debug logging. When debug logging is enabled a
stack trace appears, instead of the cryptic ``Internal Server Error``
message. It can be turned on by setting ``debug: False`` to ``debug: True`` in
:origin:`settings.yml <searx/settings.yml>`.
.. sidebar:: :ref:`make test`
Alternatively use the :ref:`make test` targets.
Run ``./manage.sh tests`` before creating a PR.
Failing build on Travis is common because of PEP8 checks. So a new commit
must be created containing these format fixes. This phase can be skipped if
``./manage.sh tests`` is run locally before creating a PR.

1428
docs/dev/reST.rst Normal file

File diff suppressed because it is too large Load Diff

119
docs/dev/search_api.rst Normal file
View File

@ -0,0 +1,119 @@
==========
Search API
==========
The search supports both ``GET`` and ``POST``.
Furthermore, two enpoints ``/`` and ``/search`` are available for querying.
``GET /``
``GET /search``
Parameters
==========
.. sidebar:: Further reading ..
- :ref:`engines generic`
- :ref:`configured engines`
- :ref:`engine settings`
- :ref:`engine file`
``q`` : required
The search query. This string is passed to external search services. Thus,
searx supports syntax of each search service. For example, ``site:github.com
searx`` is a valid query for Google. However, if simply the query above is
passed to any search engine which does not filter its results based on this
syntax, you might not get the results you wanted.
See more at :ref:`search-syntax`
``categories`` : optional
Comma separated list, specifies the active search categories
``engines``: optional
Comma separated list, specifies the active search engines.
``lang``: default ``all``
Code of the language.
``pageno``: default ``1``
Search page number.
``time_range``: optional
[ ``day``, ``month``, ``year`` ]
Time range of search for engines which support it. See if an engine supports
time range search in the preferences page of an instance.
``format``: optional
[ ``json``, ``csv``, ``rss`` ]
Output format of results.
``results_on_new_tab``: default ``0``
[ ``0``, ``1`` ]
Open search results on new tab.
``image_proxy``: default ``False``
[ ``True``, ``False`` ]
Proxy image results through searx.
``autocomplete``: default *empty*
[ ``google``, ``dbpedia``, ``duckduckgo``, ``startpage``, ``wikipedia`` ]
Service which completes words as you type.
``safesearch``: default ``None``
[ ``0``, ``1``, ``None`` ]
Filter search results of engines which support safe search. See if an engine
supports safe search in the preferences page of an instance.
``theme``: default ``oscar``
[ ``oscar``, ``simple``, ``legacy``, ``pix-art``, ``courgette`` ]
Theme of instance.
Please note, available themes depend on an instance. It is possible that an
instance administrator deleted, created or renamed themes on his/her instance.
See the available options in the preferences page of the instance.
``oscar-style``: default ``logicodev``
[ ``pointhi``, ``logicodev`` ]
Style of Oscar theme. It is only parsed if the theme of an instance is
``oscar``.
Please note, available styles depend on an instance. It is possible that an
instance administrator deleted, created or renamed styles on his/her
instance. See the available options in the preferences page of the instance.
``enabled_plugins``: optional
List of enabled plugins.
:default: ``HTTPS_rewrite``, ``Self_Informations``,
``Search_on_category_select``, ``Tracker_URL_remover``
:values: [ ``DOAI_rewrite``, ``HTTPS_rewrite``, ``Infinite_scroll``,
``Vim-like_hotkeys``, ``Self_Informations``, ``Tracker_URL_remover``,
``Search_on_category_select`` ]
``disabled_plugins``: optional
List of disabled plugins.
:default: ``DOAI_rewrite``, ``Infinite_scroll``, ``Vim-like_hotkeys``
:values: ``DOAI_rewrite``, ``HTTPS_rewrite``, ``Infinite_scroll``,
``Vim-like_hotkeys``, ``Self_Informations``, ``Tracker_URL_remover``,
``Search_on_category_select``
``enabled_engines``: optional : *all* :origin:`engines <searx/engines>`
List of enabled engines.
``disabled_engines``: optional : *all* :origin:`engines <searx/engines>`
List of disabled engines.

10
docs/dev/svg_image.svg Normal file
View File

@ -0,0 +1,10 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- originate: https://commons.wikimedia.org/wiki/File:Variable_Resistor.svg -->
<svg xmlns="http://www.w3.org/2000/svg"
version="1.1" baseProfile="full"
width="70px" height="40px" viewBox="0 0 700 400">
<line x1="0" y1="200" x2="700" y2="200" stroke="black" stroke-width="20px"/>
<rect x="100" y="100" width="500" height="200" fill="white" stroke="black" stroke-width="20px"/>
<line x1="180" y1="370" x2="500" y2="50" stroke="black" stroke-width="15px"/>
<polygon points="585 0 525 25 585 50" transform="rotate(135 525 25)"/>
</svg>

After

Width:  |  Height:  |  Size: 580 B

71
docs/dev/translation.rst Normal file
View File

@ -0,0 +1,71 @@
.. _translation:
===========
Translation
===========
.. _searx@transifex: https://www.transifex.com/asciimoo/searx/
Translation currently takes place on `searx@transifex`_
Requirements
============
* Transifex account
* Installed CLI tool of Transifex
Init Transifex project
======================
After installing ``transifex`` using pip, run the following command to
initialize the project.
.. code:: sh
tx init # Transifex instance: https://www.transifex.com/asciimoo/searx/
After ``$HOME/.transifexrc`` is created, get a Transifex API key and insert it
into the configuration file.
Create a configuration file for ``tx`` named ``$HOME/.tx/config``.
.. code:: ini
[main]
host = https://www.transifex.com
[searx.messagespo]
file_filter = searx/translations/<lang>/LC_MESSAGES/messages.po
source_file = messages.pot
source_lang = en
type = PO
Then run ``tx set``:
.. code:: shell
tx set --auto-local -r searx.messagespo 'searx/translations/<lang>/LC_MESSAGES/messages.po' \
--source-lang en --type PO --source-file messages.pot --execute
Update translations
===================
To retrieve the latest translations, pull it from Transifex.
.. code:: sh
tx pull -a
Then check the new languages. If strings translated are not enough, delete those
folders, because those should not be compiled. Call the command below to compile
the ``.po`` files.
.. code:: shell
pybabel compile -d searx/translations
After the compilation is finished commit the ``.po`` and ``.mo`` files and
create a PR.

32
docs/index.rst Normal file
View File

@ -0,0 +1,32 @@
================
Welcome to searx
================
Search without being tracked.
.. sidebar:: Features
- Self hosted
- No user tracking
- No user profiling
- About 70 supported search engines
- Easy integration with any search engine
- Cookies are not used by default
- Secure, encrypted connections (HTTPS/SSL)
- Hosted by organizations, such as *La Quadrature du Net*, which promote
digital rights
Searx is a free internet metasearch engine which aggregates results from more
than 70 search services. Users are neither tracked nor profiled. Additionally,
searx can be used over Tor for online anonymity.
Get started with searx by using one of the :wiki:`Searx-instances`. If you
don't trust anyone, you can set up your own, see :ref:`installation`.
.. toctree::
:maxdepth: 2
user/index
admin/index
dev/index
blog/index

BIN
docs/static/img/searx_logo_small.png vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.3 KiB

9
docs/user/index.rst Normal file
View File

@ -0,0 +1,9 @@
==================
User documentation
==================
.. toctree::
:maxdepth: 1
search_syntax
own-instance

View File

@ -0,0 +1,77 @@
===========================
Why use a private instance?
===========================
"Is it worth to run my own instance?" is a common question among searx users.
Before answering this question, see what options a searx user has.
Public instances are open to everyone who has access to its URL. Usually, these
are operated by unknown parties (from the users' point of view). Private
instances can be used by a select group of people. It is for example a searx of
group of friends or a company which can be accessed through VPN. Also it can be
single user one which runs on the user's laptop.
To gain more insight on how these instances work let's dive into how searx
protects its users.
How does searx protect privacy?
===============================
Searx protects the privacy of its users in multiple ways regardless of the type
of the instance (private, public). Removal of private data from search requests
comes in three forms:
1. removal of private data from requests going to search services
2. not forwarding anything from a third party services through search services
(e.g. advertisement)
3. removal of private data from requests going to the result pages
Removing private data means not sending cookies to external search engines and
generating a random browser profile for every request. Thus, it does not matter
if a public or private instance handles the request, because it is anonymized in
both cases. IP addresses will be the IP of the instance. But searx can be
configured to use proxy or Tor. `Result proxy
<https://github.com/asciimoo/morty>`__ is supported, too.
Searx does not serve ads or tracking content unlike most search services. So
private data is not forwarded to third parties who might monetize it. Besides
protecting users from search services, both referring page and search query are
hidden from visited result pages.
What are the consequences of using public instances?
----------------------------------------------------
If someone uses a public instance, he/she has to trust the administrator of that
instance. This means that the user of the public instance does not know whether
his/her requests are logged, aggregated and sent or sold to a third party.
Also, public instances without proper protection are more vulnerable to abusing
the search service, In this case the external service in exchange returns
CAPTCHAs or bans the IP of the instance. Thus, search requests return less
results.
I see. What about private instances?
------------------------------------
If users run their own instances, everything is in their control: the source
code, logging settings and private data. Unknown instance administrators do not
have to be trusted.
Furthermore, as the default settings of their instance is editable, there is no
need to use cookies to tailor searx to their needs. So preferences will not be
reset to defaults when clearing browser cookies. As settings are stored on
their computer, it will not be accessible to others as long as their computer is
not compromised.
Conclusion
==========
Always use an instance which is operated by people you trust. The privacy
features of searx are available to users no matter what kind of instance they
use.
If someone is on the go or just wants to try searx for the first time public
instances are the best choices. Additionally, public instance are making a
world a better place, because those who cannot or do not want to run an
instance, have access to a privacy respecting search service.

View File

@ -0,0 +1,42 @@
.. _search-syntax:
=============
Search syntax
=============
Searx allows you to modify the default categories, engines and search language
via the search query.
Prefix ``!``
to set Category/engine
Prefix: ``:``
to set language
Prefix: ``?``
to add engines and categories to the currently selected categories
Abbrevations of the engines and languages are also accepted. Engine/category
modifiers are chainable and inclusive (e.g. with :search:`!it !ddg !wp qwer
<?q=%21it%20%21ddg%20%21wp%20qwer>` search in IT category **and** duckduckgo
**and** wikipedia for ``qwer``).
See the :search:`/preferences page <preferences>` for the list of engines,
categories and languages.
Examples
========
Search in wikipedia for ``qwer``:
- :search:`!wp qwer <?q=%21wp%20qwer>` or
- :search:`!wikipedia qwer :search:<?q=%21wikipedia%20qwer>`
Image search:
- :search:`!images Cthulhu <?q=%21images%20Cthulhu>`
Custom language in wikipedia:
- :search:`:hu !wp hackerspace <?q=%3Ahu%20%21wp%20hackerspace>`

View File

@ -18,12 +18,12 @@ ACTION="$1"
update_packages() { update_packages() {
pip install --upgrade pip pip install --upgrade pip
pip install --upgrade setuptools pip install --upgrade setuptools
pip install -r "$BASE_DIR/requirements.txt" pip install -Ur "$BASE_DIR/requirements.txt"
} }
update_dev_packages() { update_dev_packages() {
update_packages update_packages
pip install -r "$BASE_DIR/requirements-dev.txt" pip install -Ur "$BASE_DIR/requirements-dev.txt"
} }
install_geckodriver() { install_geckodriver() {
@ -70,6 +70,11 @@ locales() {
pybabel compile -d "$SEARX_DIR/translations" pybabel compile -d "$SEARX_DIR/translations"
} }
update_useragents() {
echo '[!] Updating user agent versions'
python utils/fetch_firefox_version.py
}
pep8_check() { pep8_check() {
echo '[!] Running pep8 check' echo '[!] Running pep8 check'
# ignored rules: # ignored rules:
@ -152,6 +157,8 @@ styles() {
} }
grunt_build() { grunt_build() {
npm_path_setup
echo '[!] Grunt build : oscar theme' echo '[!] Grunt build : oscar theme'
grunt --gruntfile "$SEARX_DIR/static/themes/oscar/gruntfile.js" grunt --gruntfile "$SEARX_DIR/static/themes/oscar/gruntfile.js"
echo '[!] Grunt build : simple theme' echo '[!] Grunt build : simple theme'
@ -243,7 +250,8 @@ Commands
update_packages - Check & update production dependency changes update_packages - Check & update production dependency changes
update_dev_packages - Check & update development and production dependency changes update_dev_packages - Check & update development and production dependency changes
install_geckodriver - Download & install geckodriver if not already installed (required for robot_tests) install_geckodriver - Download & install geckodriver if not already installed (required for robot_tests)
npm_packages - Download & install npm dependencies (source manage.sh to update the PATH) npm_packages - Download & install npm dependencies
update_useragents - Update useragents.json with the most recent versions of Firefox
Build Build
----- -----

View File

@ -1,3 +1,6 @@
pallets-sphinx-themes
Sphinx
sphinx-issues
mock==2.0.0 mock==2.0.0
nose2[coverage_plugin] nose2[coverage_plugin]
cov-core==1.15.0 cov-core==1.15.0
@ -8,3 +11,6 @@ transifex-client==0.12.2
unittest2==1.1.0 unittest2==1.1.0
zope.testrunner==4.5.1 zope.testrunner==4.5.1
selenium==3.141.0 selenium==3.141.0
linuxdoc @ git+http://github.com/return42/linuxdoc.git
sphinx-jinja
sphinx-tabs

View File

@ -1,14 +1,15 @@
{ {
"ua": "Mozilla/5.0 ({os}; rv:{version}) Gecko/20100101 Firefox/{version}",
"versions": [ "versions": [
"61.0.1", "70.0.1",
"61.0", "70.0",
"60.0.2", "69.0.3",
"60.0.1", "69.0.2",
"60.0" "69.0.1",
"69.0"
], ],
"os": [ "os": [
"Windows NT 10; WOW64", "Windows NT 10; WOW64",
"X11; Linux x86_64" "X11; Linux x86_64"
] ],
"ua": "Mozilla/5.0 ({os}; rv:{version}) Gecko/20100101 Firefox/{version}"
} }

View File

@ -27,7 +27,7 @@ from json import loads
from requests import get from requests import get
from searx import settings from searx import settings
from searx import logger from searx import logger
from searx.utils import load_module, match_language from searx.utils import load_module, match_language, get_engine_from_settings
logger = logger.getChild('engines') logger = logger.getChild('engines')
@ -53,7 +53,8 @@ engine_default_args = {'paging': False,
'disabled': False, 'disabled': False,
'suspend_end_time': 0, 'suspend_end_time': 0,
'continuous_errors': 0, 'continuous_errors': 0,
'time_range_support': False} 'time_range_support': False,
'offline': False}
def load_engine(engine_data): def load_engine(engine_data):
@ -128,14 +129,16 @@ def load_engine(engine_data):
engine.stats = { engine.stats = {
'result_count': 0, 'result_count': 0,
'search_count': 0, 'search_count': 0,
'page_load_time': 0,
'page_load_count': 0,
'engine_time': 0, 'engine_time': 0,
'engine_time_count': 0, 'engine_time_count': 0,
'score_count': 0, 'score_count': 0,
'errors': 0 'errors': 0
} }
if not engine.offline:
engine.stats['page_load_time'] = 0
engine.stats['page_load_count'] = 0
for category_name in engine.categories: for category_name in engine.categories:
categories.setdefault(category_name, []).append(engine) categories.setdefault(category_name, []).append(engine)
@ -173,11 +176,6 @@ def get_engines_stats():
results_num = \ results_num = \
engine.stats['result_count'] / float(engine.stats['search_count']) engine.stats['result_count'] / float(engine.stats['search_count'])
if engine.stats['page_load_count'] != 0:
load_times = engine.stats['page_load_time'] / float(engine.stats['page_load_count']) # noqa
else:
load_times = 0
if engine.stats['engine_time_count'] != 0: if engine.stats['engine_time_count'] != 0:
this_engine_time = engine.stats['engine_time'] / float(engine.stats['engine_time_count']) # noqa this_engine_time = engine.stats['engine_time'] / float(engine.stats['engine_time_count']) # noqa
else: else:
@ -189,14 +187,19 @@ def get_engines_stats():
else: else:
score = score_per_result = 0.0 score = score_per_result = 0.0
max_pageload = max(load_times, max_pageload) if not engine.offline:
load_times = 0
if engine.stats['page_load_count'] != 0:
load_times = engine.stats['page_load_time'] / float(engine.stats['page_load_count']) # noqa
max_pageload = max(load_times, max_pageload)
pageloads.append({'avg': load_times, 'name': engine.name})
max_engine_times = max(this_engine_time, max_engine_times) max_engine_times = max(this_engine_time, max_engine_times)
max_results = max(results_num, max_results) max_results = max(results_num, max_results)
max_score = max(score, max_score) max_score = max(score, max_score)
max_score_per_result = max(score_per_result, max_score_per_result) max_score_per_result = max(score_per_result, max_score_per_result)
max_errors = max(max_errors, engine.stats['errors']) max_errors = max(max_errors, engine.stats['errors'])
pageloads.append({'avg': load_times, 'name': engine.name})
engine_times.append({'avg': this_engine_time, 'name': engine.name}) engine_times.append({'avg': this_engine_time, 'name': engine.name})
results.append({'avg': results_num, 'name': engine.name}) results.append({'avg': results_num, 'name': engine.name})
scores.append({'avg': score, 'name': engine.name}) scores.append({'avg': score, 'name': engine.name})
@ -255,7 +258,7 @@ def initialize_engines(engine_list):
load_engines(engine_list) load_engines(engine_list)
def engine_init(engine_name, init_fn): def engine_init(engine_name, init_fn):
init_fn() init_fn(get_engine_from_settings(engine_name))
logger.debug('%s engine: Initialized', engine_name) logger.debug('%s engine: Initialized', engine_name)
for engine_name, engine in engines.items(): for engine_name, engine in engines.items():

View File

@ -17,6 +17,7 @@ from searx.url_utils import urlencode
categories = ['science'] categories = ['science']
paging = True
base_url = 'http://export.arxiv.org/api/query?search_query=all:'\ base_url = 'http://export.arxiv.org/api/query?search_query=all:'\
+ '{query}&start={offset}&max_results={number_of_results}' + '{query}&start={offset}&max_results={number_of_results}'
@ -29,7 +30,7 @@ def request(query, params):
# basic search # basic search
offset = (params['pageno'] - 1) * number_of_results offset = (params['pageno'] - 1) * number_of_results
string_args = dict(query=query, string_args = dict(query=query.decode('utf-8'),
offset=offset, offset=offset,
number_of_results=number_of_results) number_of_results=number_of_results)

View File

@ -13,10 +13,14 @@
@todo publishedDate @todo publishedDate
""" """
import re
from lxml import html from lxml import html
from searx import logger, utils
from searx.engines.xpath import extract_text from searx.engines.xpath import extract_text
from searx.url_utils import urlencode from searx.url_utils import urlencode
from searx.utils import match_language, gen_useragent from searx.utils import match_language, gen_useragent, eval_xpath
logger = logger.getChild('bing engine')
# engine dependent config # engine dependent config
categories = ['general'] categories = ['general']
@ -30,9 +34,13 @@ base_url = 'https://www.bing.com/'
search_string = 'search?{query}&first={offset}' search_string = 'search?{query}&first={offset}'
def _get_offset_from_pageno(pageno):
return (pageno - 1) * 10 + 1
# do search-request # do search-request
def request(query, params): def request(query, params):
offset = (params['pageno'] - 1) * 10 + 1 offset = _get_offset_from_pageno(params.get('pageno', 0))
if params['language'] == 'all': if params['language'] == 'all':
lang = 'EN' lang = 'EN'
@ -47,29 +55,21 @@ def request(query, params):
params['url'] = base_url + search_path params['url'] = base_url + search_path
params['headers']['User-Agent'] = gen_useragent('Windows NT 6.3; WOW64')
return params return params
# get response from search-request # get response from search-request
def response(resp): def response(resp):
results = [] results = []
result_len = 0
dom = html.fromstring(resp.text) dom = html.fromstring(resp.text)
try:
results.append({'number_of_results': int(dom.xpath('//span[@class="sb_count"]/text()')[0]
.split()[0].replace(',', ''))})
except:
pass
# parse results # parse results
for result in dom.xpath('//div[@class="sa_cc"]'): for result in eval_xpath(dom, '//div[@class="sa_cc"]'):
link = result.xpath('.//h3/a')[0] link = eval_xpath(result, './/h3/a')[0]
url = link.attrib.get('href') url = link.attrib.get('href')
title = extract_text(link) title = extract_text(link)
content = extract_text(result.xpath('.//p')) content = extract_text(eval_xpath(result, './/p'))
# append result # append result
results.append({'url': url, results.append({'url': url,
@ -77,18 +77,35 @@ def response(resp):
'content': content}) 'content': content})
# parse results again if nothing is found yet # parse results again if nothing is found yet
for result in dom.xpath('//li[@class="b_algo"]'): for result in eval_xpath(dom, '//li[@class="b_algo"]'):
link = result.xpath('.//h2/a')[0] link = eval_xpath(result, './/h2/a')[0]
url = link.attrib.get('href') url = link.attrib.get('href')
title = extract_text(link) title = extract_text(link)
content = extract_text(result.xpath('.//p')) content = extract_text(eval_xpath(result, './/p'))
# append result # append result
results.append({'url': url, results.append({'url': url,
'title': title, 'title': title,
'content': content}) 'content': content})
# return results try:
result_len_container = "".join(eval_xpath(dom, '//span[@class="sb_count"]/text()'))
result_len_container = utils.to_string(result_len_container)
if "-" in result_len_container:
# Remove the part "from-to" for paginated request ...
result_len_container = result_len_container[result_len_container.find("-") * 2 + 2:]
result_len_container = re.sub('[^0-9]', '', result_len_container)
if len(result_len_container) > 0:
result_len = int(result_len_container)
except Exception as e:
logger.debug('result error :\n%s', e)
pass
if _get_offset_from_pageno(resp.search_params.get("pageno", 0)) > result_len:
return []
results.append({'number_of_results': result_len})
return results return results
@ -96,9 +113,9 @@ def response(resp):
def _fetch_supported_languages(resp): def _fetch_supported_languages(resp):
supported_languages = [] supported_languages = []
dom = html.fromstring(resp.text) dom = html.fromstring(resp.text)
options = dom.xpath('//div[@id="limit-languages"]//input') options = eval_xpath(dom, '//div[@id="limit-languages"]//input')
for option in options: for option in options:
code = option.xpath('./@id')[0].replace('_', '-') code = eval_xpath(option, './@id')[0].replace('_', '-')
if code == 'nb': if code == 'nb':
code = 'no' code = 'no'
supported_languages.append(code) supported_languages.append(code)

View File

@ -15,7 +15,7 @@
from json import loads from json import loads
from datetime import datetime from datetime import datetime
from searx.url_utils import urlencode from searx.url_utils import urlencode
from searx.utils import match_language from searx.utils import match_language, html_to_text
# engine dependent config # engine dependent config
categories = ['videos'] categories = ['videos']
@ -59,7 +59,7 @@ def response(resp):
for res in search_res['list']: for res in search_res['list']:
title = res['title'] title = res['title']
url = res['url'] url = res['url']
content = res['description'] content = html_to_text(res['description'])
thumbnail = res['thumbnail_360_url'] thumbnail = res['thumbnail_360_url']
publishedDate = datetime.fromtimestamp(res['created_time'], None) publishedDate = datetime.fromtimestamp(res['created_time'], None)
embedded = embedded_url.format(videoid=res['id']) embedded = embedded_url.format(videoid=res['id'])

View File

@ -24,7 +24,7 @@ time_range_support = True
# search-url # search-url
base_url = 'https://www.deviantart.com/' base_url = 'https://www.deviantart.com/'
search_url = base_url + 'browse/all/?offset={offset}&{query}' search_url = base_url + 'search?page={page}&{query}'
time_range_url = '&order={range}' time_range_url = '&order={range}'
time_range_dict = {'day': 11, time_range_dict = {'day': 11,
@ -37,9 +37,7 @@ def request(query, params):
if params['time_range'] and params['time_range'] not in time_range_dict: if params['time_range'] and params['time_range'] not in time_range_dict:
return params return params
offset = (params['pageno'] - 1) * 24 params['url'] = search_url.format(page=params['pageno'],
params['url'] = search_url.format(offset=offset,
query=urlencode({'q': query})) query=urlencode({'q': query}))
if params['time_range'] in time_range_dict: if params['time_range'] in time_range_dict:
params['url'] += time_range_url.format(range=time_range_dict[params['time_range']]) params['url'] += time_range_url.format(range=time_range_dict[params['time_range']])
@ -57,28 +55,27 @@ def response(resp):
dom = html.fromstring(resp.text) dom = html.fromstring(resp.text)
regex = re.compile(r'\/200H\/')
# parse results # parse results
for result in dom.xpath('.//span[@class="thumb wide"]'): for row in dom.xpath('//div[contains(@data-hook, "content_row")]'):
link = result.xpath('.//a[@class="torpedo-thumb-link"]')[0] for result in row.xpath('./div'):
url = link.attrib.get('href') link = result.xpath('.//a[@data-hook="deviation_link"]')[0]
title = extract_text(result.xpath('.//span[@class="title"]')) url = link.attrib.get('href')
thumbnail_src = link.xpath('.//img')[0].attrib.get('src') title = link.attrib.get('title')
img_src = regex.sub('/', thumbnail_src) thumbnail_src = result.xpath('.//img')[0].attrib.get('src')
img_src = thumbnail_src
# http to https, remove domain sharding # http to https, remove domain sharding
thumbnail_src = re.sub(r"https?://(th|fc)\d+.", "https://th01.", thumbnail_src) thumbnail_src = re.sub(r"https?://(th|fc)\d+.", "https://th01.", thumbnail_src)
thumbnail_src = re.sub(r"http://", "https://", thumbnail_src) thumbnail_src = re.sub(r"http://", "https://", thumbnail_src)
url = re.sub(r"http://(.*)\.deviantart\.com/", "https://\\1.deviantart.com/", url) url = re.sub(r"http://(.*)\.deviantart\.com/", "https://\\1.deviantart.com/", url)
# append result # append result
results.append({'url': url, results.append({'url': url,
'title': title, 'title': title,
'img_src': img_src, 'img_src': img_src,
'thumbnail_src': thumbnail_src, 'thumbnail_src': thumbnail_src,
'template': 'images.html'}) 'template': 'images.html'})
# return results # return results
return results return results

View File

@ -11,11 +11,11 @@
import re import re
from lxml import html from lxml import html
from searx.utils import is_valid_lang from searx.utils import is_valid_lang, eval_xpath
from searx.url_utils import urljoin from searx.url_utils import urljoin
categories = ['general'] categories = ['general']
url = u'http://dictzone.com/{from_lang}-{to_lang}-dictionary/{query}' url = u'https://dictzone.com/{from_lang}-{to_lang}-dictionary/{query}'
weight = 100 weight = 100
parser_re = re.compile(b'.*?([a-z]+)-([a-z]+) ([^ ]+)$', re.I) parser_re = re.compile(b'.*?([a-z]+)-([a-z]+) ([^ ]+)$', re.I)
@ -47,14 +47,14 @@ def response(resp):
dom = html.fromstring(resp.text) dom = html.fromstring(resp.text)
for k, result in enumerate(dom.xpath(results_xpath)[1:]): for k, result in enumerate(eval_xpath(dom, results_xpath)[1:]):
try: try:
from_result, to_results_raw = result.xpath('./td') from_result, to_results_raw = eval_xpath(result, './td')
except: except:
continue continue
to_results = [] to_results = []
for to_result in to_results_raw.xpath('./p/a'): for to_result in eval_xpath(to_results_raw, './p/a'):
t = to_result.text_content() t = to_result.text_content()
if t.strip(): if t.strip():
to_results.append(to_result.text_content()) to_results.append(to_result.text_content())

View File

@ -15,7 +15,8 @@ import string
from dateutil import parser from dateutil import parser
from json import loads from json import loads
from lxml import html from lxml import html
from searx.url_utils import quote_plus from searx.url_utils import urlencode
from datetime import datetime
# engine dependent config # engine dependent config
categories = ['news', 'social media'] categories = ['news', 'social media']
@ -23,7 +24,7 @@ paging = True
# search-url # search-url
base_url = 'https://digg.com/' base_url = 'https://digg.com/'
search_url = base_url + 'api/search/{query}.json?position={position}&format=html' search_url = base_url + 'api/search/?{query}&from={position}&size=20&format=html'
# specific xpath variables # specific xpath variables
results_xpath = '//article' results_xpath = '//article'
@ -38,9 +39,9 @@ digg_cookie_chars = string.ascii_uppercase + string.ascii_lowercase +\
# do search-request # do search-request
def request(query, params): def request(query, params):
offset = (params['pageno'] - 1) * 10 offset = (params['pageno'] - 1) * 20
params['url'] = search_url.format(position=offset, params['url'] = search_url.format(position=offset,
query=quote_plus(query)) query=urlencode({'q': query}))
params['cookies']['frontend.auid'] = ''.join(random.choice( params['cookies']['frontend.auid'] = ''.join(random.choice(
digg_cookie_chars) for _ in range(22)) digg_cookie_chars) for _ in range(22))
return params return params
@ -52,30 +53,17 @@ def response(resp):
search_result = loads(resp.text) search_result = loads(resp.text)
if 'html' not in search_result or search_result['html'] == '':
return results
dom = html.fromstring(search_result['html'])
# parse results # parse results
for result in dom.xpath(results_xpath): for result in search_result['mapped']:
url = result.attrib.get('data-contenturl')
thumbnail = result.xpath('.//img')[0].attrib.get('src')
title = ''.join(result.xpath(title_xpath))
content = ''.join(result.xpath(content_xpath))
pubdate = result.xpath(pubdate_xpath)[0].attrib.get('datetime')
publishedDate = parser.parse(pubdate)
# http to https
thumbnail = thumbnail.replace("http://static.digg.com", "https://static.digg.com")
published = datetime.strptime(result['created']['ISO'], "%Y-%m-%d %H:%M:%S")
# append result # append result
results.append({'url': url, results.append({'url': result['url'],
'title': title, 'title': result['title'],
'content': content, 'content': result['excerpt'],
'template': 'videos.html', 'template': 'videos.html',
'publishedDate': publishedDate, 'publishedDate': published,
'thumbnail': thumbnail}) 'thumbnail': result['images']['thumbImage']})
# return results # return results
return results return results

View File

@ -11,6 +11,7 @@
from lxml.html import fromstring from lxml.html import fromstring
from searx.engines.xpath import extract_text from searx.engines.xpath import extract_text
from searx.utils import eval_xpath
from searx.url_utils import urlencode from searx.url_utils import urlencode
# engine dependent config # engine dependent config
@ -45,16 +46,16 @@ def response(resp):
# parse results # parse results
# Quickhits # Quickhits
for r in doc.xpath('//div[@class="search_quickresult"]/ul/li'): for r in eval_xpath(doc, '//div[@class="search_quickresult"]/ul/li'):
try: try:
res_url = r.xpath('.//a[@class="wikilink1"]/@href')[-1] res_url = eval_xpath(r, './/a[@class="wikilink1"]/@href')[-1]
except: except:
continue continue
if not res_url: if not res_url:
continue continue
title = extract_text(r.xpath('.//a[@class="wikilink1"]/@title')) title = extract_text(eval_xpath(r, './/a[@class="wikilink1"]/@title'))
# append result # append result
results.append({'title': title, results.append({'title': title,
@ -62,13 +63,13 @@ def response(resp):
'url': base_url + res_url}) 'url': base_url + res_url})
# Search results # Search results
for r in doc.xpath('//dl[@class="search_results"]/*'): for r in eval_xpath(doc, '//dl[@class="search_results"]/*'):
try: try:
if r.tag == "dt": if r.tag == "dt":
res_url = r.xpath('.//a[@class="wikilink1"]/@href')[-1] res_url = eval_xpath(r, './/a[@class="wikilink1"]/@href')[-1]
title = extract_text(r.xpath('.//a[@class="wikilink1"]/@title')) title = extract_text(eval_xpath(r, './/a[@class="wikilink1"]/@title'))
elif r.tag == "dd": elif r.tag == "dd":
content = extract_text(r.xpath('.')) content = extract_text(eval_xpath(r, '.'))
# append result # append result
results.append({'title': title, results.append({'title': title,

View File

@ -18,7 +18,7 @@ from json import loads
from searx.engines.xpath import extract_text from searx.engines.xpath import extract_text
from searx.poolrequests import get from searx.poolrequests import get
from searx.url_utils import urlencode from searx.url_utils import urlencode
from searx.utils import match_language from searx.utils import match_language, eval_xpath
# engine dependent config # engine dependent config
categories = ['general'] categories = ['general']
@ -65,21 +65,36 @@ def get_region_code(lang, lang_list=[]):
def request(query, params): def request(query, params):
if params['time_range'] and params['time_range'] not in time_range_dict: if params['time_range'] not in (None, 'None', '') and params['time_range'] not in time_range_dict:
return params return params
offset = (params['pageno'] - 1) * 30 offset = (params['pageno'] - 1) * 30
region_code = get_region_code(params['language'], supported_languages) region_code = get_region_code(params['language'], supported_languages)
if region_code: params['url'] = 'https://duckduckgo.com/html/'
params['url'] = url.format( if params['pageno'] > 1:
query=urlencode({'q': query, 'kl': region_code}), offset=offset, dc_param=offset) params['method'] = 'POST'
params['data']['q'] = query
params['data']['s'] = offset
params['data']['dc'] = 30
params['data']['nextParams'] = ''
params['data']['v'] = 'l'
params['data']['o'] = 'json'
params['data']['api'] = '/d.js'
if params['time_range'] in time_range_dict:
params['data']['df'] = time_range_dict[params['time_range']]
if region_code:
params['data']['kl'] = region_code
else: else:
params['url'] = url.format( if region_code:
query=urlencode({'q': query}), offset=offset, dc_param=offset) params['url'] = url.format(
query=urlencode({'q': query, 'kl': region_code}), offset=offset, dc_param=offset)
else:
params['url'] = url.format(
query=urlencode({'q': query}), offset=offset, dc_param=offset)
if params['time_range'] in time_range_dict: if params['time_range'] in time_range_dict:
params['url'] += time_range_url.format(range=time_range_dict[params['time_range']]) params['url'] += time_range_url.format(range=time_range_dict[params['time_range']])
return params return params
@ -91,17 +106,19 @@ def response(resp):
doc = fromstring(resp.text) doc = fromstring(resp.text)
# parse results # parse results
for r in doc.xpath(result_xpath): for i, r in enumerate(eval_xpath(doc, result_xpath)):
if i >= 30:
break
try: try:
res_url = r.xpath(url_xpath)[-1] res_url = eval_xpath(r, url_xpath)[-1]
except: except:
continue continue
if not res_url: if not res_url:
continue continue
title = extract_text(r.xpath(title_xpath)) title = extract_text(eval_xpath(r, title_xpath))
content = extract_text(r.xpath(content_xpath)) content = extract_text(eval_xpath(r, content_xpath))
# append result # append result
results.append({'title': title, results.append({'title': title,

View File

@ -1,3 +1,14 @@
"""
DuckDuckGo (definitions)
- `Instant Answer API`_
- `DuckDuckGo query`_
.. _Instant Answer API: https://duckduckgo.com/api
.. _DuckDuckGo query: https://api.duckduckgo.com/?q=DuckDuckGo&format=json&pretty=1
"""
import json import json
from lxml import html from lxml import html
from re import compile from re import compile
@ -25,7 +36,8 @@ def result_to_text(url, text, htmlResult):
def request(query, params): def request(query, params):
params['url'] = url.format(query=urlencode({'q': query})) params['url'] = url.format(query=urlencode({'q': query}))
language = match_language(params['language'], supported_languages, language_aliases) language = match_language(params['language'], supported_languages, language_aliases)
params['headers']['Accept-Language'] = language.split('-')[0] language = language.split('-')[0]
params['headers']['Accept-Language'] = language
return params return params
@ -43,8 +55,9 @@ def response(resp):
# add answer if there is one # add answer if there is one
answer = search_res.get('Answer', '') answer = search_res.get('Answer', '')
if answer != '': if answer:
results.append({'answer': html_to_text(answer)}) if search_res.get('AnswerType', '') not in ['calc']:
results.append({'answer': html_to_text(answer)})
# add infobox # add infobox
if 'Definition' in search_res: if 'Definition' in search_res:

View File

@ -11,6 +11,7 @@
from lxml import html, etree from lxml import html, etree
import re import re
from searx.engines.xpath import extract_text from searx.engines.xpath import extract_text
from searx.utils import eval_xpath
from searx.url_utils import quote, urljoin from searx.url_utils import quote, urljoin
from searx import logger from searx import logger
@ -52,9 +53,9 @@ def response(resp):
dom = html.fromstring(resp.text) dom = html.fromstring(resp.text)
try: try:
number_of_results_string = re.sub('[^0-9]', '', dom.xpath( number_of_results_string =\
'//a[@class="active" and contains(@href,"/suchen/dudenonline")]/span/text()')[0] re.sub('[^0-9]', '',
) eval_xpath(dom, '//a[@class="active" and contains(@href,"/suchen/dudenonline")]/span/text()')[0])
results.append({'number_of_results': int(number_of_results_string)}) results.append({'number_of_results': int(number_of_results_string)})
@ -62,12 +63,12 @@ def response(resp):
logger.debug("Couldn't read number of results.") logger.debug("Couldn't read number of results.")
pass pass
for result in dom.xpath('//section[not(contains(@class, "essay"))]'): for result in eval_xpath(dom, '//section[not(contains(@class, "essay"))]'):
try: try:
url = result.xpath('.//h2/a')[0].get('href') url = eval_xpath(result, './/h2/a')[0].get('href')
url = urljoin(base_url, url) url = urljoin(base_url, url)
title = result.xpath('string(.//h2/a)').strip() title = eval_xpath(result, 'string(.//h2/a)').strip()
content = extract_text(result.xpath('.//p')) content = extract_text(eval_xpath(result, './/p'))
# append result # append result
results.append({'url': url, results.append({'url': url,
'title': title, 'title': title,

View File

@ -18,13 +18,13 @@ categories = ['files']
paging = True paging = True
# search-url # search-url
base_url = 'https://f-droid.org/' base_url = 'https://search.f-droid.org/'
search_url = base_url + 'repository/browse/?{query}' search_url = base_url + '?{query}'
# do search-request # do search-request
def request(query, params): def request(query, params):
query = urlencode({'fdfilter': query, 'fdpage': params['pageno']}) query = urlencode({'q': query, 'page': params['pageno'], 'lang': ''})
params['url'] = search_url.format(query=query) params['url'] = search_url.format(query=query)
return params return params
@ -35,17 +35,16 @@ def response(resp):
dom = html.fromstring(resp.text) dom = html.fromstring(resp.text)
for app in dom.xpath('//div[@id="appheader"]'): for app in dom.xpath('//a[@class="package-header"]'):
url = app.xpath('./ancestor::a/@href')[0] app_url = app.xpath('./@href')[0]
title = app.xpath('./p/span/text()')[0] app_title = extract_text(app.xpath('./div/h4[@class="package-name"]/text()'))
img_src = app.xpath('.//img/@src')[0] app_content = extract_text(app.xpath('./div/div/span[@class="package-summary"]')).strip() \
+ ' - ' + extract_text(app.xpath('./div/div/span[@class="package-license"]')).strip()
app_img_src = app.xpath('./img[@class="package-icon"]/@src')[0]
content = extract_text(app.xpath('./p')[0]) results.append({'url': app_url,
content = content.replace(title, '', 1).strip() 'title': app_title,
'content': app_content,
results.append({'url': url, 'img_src': app_img_src})
'title': title,
'content': content,
'img_src': img_src})
return results return results

View File

@ -16,7 +16,8 @@ from json import loads
from time import time from time import time
import re import re
from searx.engines import logger from searx.engines import logger
from searx.url_utils import urlencode, unquote from searx.url_utils import urlencode
from searx.utils import ecma_unescape, html_to_text
logger = logger.getChild('flickr-noapi') logger = logger.getChild('flickr-noapi')
@ -75,11 +76,10 @@ def response(resp):
for index in legend: for index in legend:
photo = model_export['main'][index[0]][int(index[1])][index[2]][index[3]][int(index[4])] photo = model_export['main'][index[0]][int(index[1])][index[2]][index[3]][int(index[4])]
author = unquote(photo.get('realname', '')) author = ecma_unescape(photo.get('realname', ''))
source = unquote(photo.get('username', '')) + ' @ Flickr' source = ecma_unescape(photo.get('username', '')) + ' @ Flickr'
title = unquote(photo.get('title', '')) title = ecma_unescape(photo.get('title', ''))
content = unquote(photo.get('description', '')) content = html_to_text(ecma_unescape(photo.get('description', '')))
img_src = None img_src = None
# From the biggest to the lowest format # From the biggest to the lowest format
for image_size in image_sizes: for image_size in image_sizes:

View File

@ -10,7 +10,10 @@
@parse url, title, content, thumbnail, img_src @parse url, title, content, thumbnail, img_src
""" """
from cgi import escape try:
from cgi import escape
except:
from html import escape
from lxml import html from lxml import html
from searx.engines.xpath import extract_text from searx.engines.xpath import extract_text
from searx.url_utils import urljoin, urlencode from searx.url_utils import urljoin, urlencode

View File

@ -14,7 +14,9 @@ import random
from json import loads from json import loads
from time import time from time import time
from lxml.html import fromstring from lxml.html import fromstring
from searx.poolrequests import get
from searx.url_utils import urlencode from searx.url_utils import urlencode
from searx.utils import eval_xpath
# engine dependent config # engine dependent config
categories = ['general'] categories = ['general']
@ -30,13 +32,9 @@ search_string = 'search?{query}'\
'&c=main'\ '&c=main'\
'&s={offset}'\ '&s={offset}'\
'&format=json'\ '&format=json'\
'&qh=0'\ '&langcountry={lang}'\
'&qlang={lang}'\
'&ff={safesearch}'\ '&ff={safesearch}'\
'&rxiec={rxieu}'\ '&rand={rxikd}'
'&ulse={ulse}'\
'&rand={rxikd}' # current unix timestamp
# specific xpath variables # specific xpath variables
results_xpath = '//response//result' results_xpath = '//response//result'
url_xpath = './/url' url_xpath = './/url'
@ -45,9 +43,26 @@ content_xpath = './/sum'
supported_languages_url = 'https://gigablast.com/search?&rxikd=1' supported_languages_url = 'https://gigablast.com/search?&rxikd=1'
extra_param = '' # gigablast requires a random extra parameter
# which can be extracted from the source code of the search page
def parse_extra_param(text):
global extra_param
param_lines = [x for x in text.splitlines() if x.startswith('var url=') or x.startswith('url=url+')]
extra_param = ''
for l in param_lines:
extra_param += l.split("'")[1]
extra_param = extra_param.split('&')[-1]
def init(engine_settings=None):
parse_extra_param(get('http://gigablast.com/search?c=main&qlangcountry=en-us&q=south&s=10').text)
# do search-request # do search-request
def request(query, params): def request(query, params):
print("EXTRAPARAM:", extra_param)
offset = (params['pageno'] - 1) * number_of_results offset = (params['pageno'] - 1) * number_of_results
if params['language'] == 'all': if params['language'] == 'all':
@ -66,13 +81,11 @@ def request(query, params):
search_path = search_string.format(query=urlencode({'q': query}), search_path = search_string.format(query=urlencode({'q': query}),
offset=offset, offset=offset,
number_of_results=number_of_results, number_of_results=number_of_results,
rxikd=int(time() * 1000),
rxieu=random.randint(1000000000, 9999999999),
ulse=random.randint(100000000, 999999999),
lang=language, lang=language,
rxikd=int(time() * 1000),
safesearch=safesearch) safesearch=safesearch)
params['url'] = base_url + search_path params['url'] = base_url + search_path + '&' + extra_param
return params return params
@ -82,7 +95,11 @@ def response(resp):
results = [] results = []
# parse results # parse results
response_json = loads(resp.text) try:
response_json = loads(resp.text)
except:
parse_extra_param(resp.text)
raise Exception('extra param expired, please reload')
for result in response_json['results']: for result in response_json['results']:
# append result # append result
@ -98,9 +115,9 @@ def response(resp):
def _fetch_supported_languages(resp): def _fetch_supported_languages(resp):
supported_languages = [] supported_languages = []
dom = fromstring(resp.text) dom = fromstring(resp.text)
links = dom.xpath('//span[@id="menu2"]/a') links = eval_xpath(dom, '//span[@id="menu2"]/a')
for link in links: for link in links:
href = link.xpath('./@href')[0].split('lang%3A') href = eval_xpath(link, './@href')[0].split('lang%3A')
if len(href) == 2: if len(href) == 2:
code = href[1].split('_') code = href[1].split('_')
if len(code) == 2: if len(code) == 2:

View File

@ -14,7 +14,7 @@ from lxml import html, etree
from searx.engines.xpath import extract_text, extract_url from searx.engines.xpath import extract_text, extract_url
from searx import logger from searx import logger
from searx.url_utils import urlencode, urlparse, parse_qsl from searx.url_utils import urlencode, urlparse, parse_qsl
from searx.utils import match_language from searx.utils import match_language, eval_xpath
logger = logger.getChild('google engine') logger = logger.getChild('google engine')
@ -107,13 +107,12 @@ images_path = '/images'
supported_languages_url = 'https://www.google.com/preferences?#languages' supported_languages_url = 'https://www.google.com/preferences?#languages'
# specific xpath variables # specific xpath variables
results_xpath = '//div[@class="g"]' results_xpath = '//div[contains(@class, "ZINbbc")]'
url_xpath = './/h3/a/@href' url_xpath = './/div[@class="kCrYT"][1]/a/@href'
title_xpath = './/h3' title_xpath = './/div[@class="kCrYT"][1]/a/div[1]'
content_xpath = './/span[@class="st"]' content_xpath = './/div[@class="kCrYT"][2]//div[contains(@class, "BNeawe")]//div[contains(@class, "BNeawe")]'
content_misc_xpath = './/div[@class="f slp"]' suggestion_xpath = '//div[contains(@class, "ZINbbc")][last()]//div[@class="rVLSBd"]/a//div[contains(@class, "BNeawe")]'
suggestion_xpath = '//p[@class="_Bmc"]' spelling_suggestion_xpath = '//div[@id="scc"]//a'
spelling_suggestion_xpath = '//a[@class="spell"]'
# map : detail location # map : detail location
map_address_xpath = './/div[@class="s"]//table//td[2]/span/text()' map_address_xpath = './/div[@class="s"]//table//td[2]/span/text()'
@ -156,7 +155,7 @@ def parse_url(url_string, google_hostname):
# returns extract_text on the first result selected by the xpath or None # returns extract_text on the first result selected by the xpath or None
def extract_text_from_dom(result, xpath): def extract_text_from_dom(result, xpath):
r = result.xpath(xpath) r = eval_xpath(result, xpath)
if len(r) > 0: if len(r) > 0:
return extract_text(r[0]) return extract_text(r[0])
return None return None
@ -199,9 +198,6 @@ def request(query, params):
params['headers']['Accept-Language'] = language + ',' + language + '-' + country params['headers']['Accept-Language'] = language + ',' + language + '-' + country
params['headers']['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8' params['headers']['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
# Force Internet Explorer 12 user agent to avoid loading the new UI that Searx can't parse
params['headers']['User-Agent'] = "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)"
params['google_hostname'] = google_hostname params['google_hostname'] = google_hostname
return params return params
@ -226,21 +222,21 @@ def response(resp):
# convert the text to dom # convert the text to dom
dom = html.fromstring(resp.text) dom = html.fromstring(resp.text)
instant_answer = dom.xpath('//div[@id="_vBb"]//text()') instant_answer = eval_xpath(dom, '//div[@id="_vBb"]//text()')
if instant_answer: if instant_answer:
results.append({'answer': u' '.join(instant_answer)}) results.append({'answer': u' '.join(instant_answer)})
try: try:
results_num = int(dom.xpath('//div[@id="resultStats"]//text()')[0] results_num = int(eval_xpath(dom, '//div[@id="resultStats"]//text()')[0]
.split()[1].replace(',', '')) .split()[1].replace(',', ''))
results.append({'number_of_results': results_num}) results.append({'number_of_results': results_num})
except: except:
pass pass
# parse results # parse results
for result in dom.xpath(results_xpath): for result in eval_xpath(dom, results_xpath):
try: try:
title = extract_text(result.xpath(title_xpath)[0]) title = extract_text(eval_xpath(result, title_xpath)[0])
url = parse_url(extract_url(result.xpath(url_xpath), google_url), google_hostname) url = parse_url(extract_url(eval_xpath(result, url_xpath), google_url), google_hostname)
parsed_url = urlparse(url, google_hostname) parsed_url = urlparse(url, google_hostname)
# map result # map result
@ -249,7 +245,7 @@ def response(resp):
continue continue
# if parsed_url.path.startswith(maps_path) or parsed_url.netloc.startswith(map_hostname_start): # if parsed_url.path.startswith(maps_path) or parsed_url.netloc.startswith(map_hostname_start):
# print "yooooo"*30 # print "yooooo"*30
# x = result.xpath(map_near) # x = eval_xpath(result, map_near)
# if len(x) > 0: # if len(x) > 0:
# # map : near the location # # map : near the location
# results = results + parse_map_near(parsed_url, x, google_hostname) # results = results + parse_map_near(parsed_url, x, google_hostname)
@ -273,9 +269,7 @@ def response(resp):
content = extract_text_from_dom(result, content_xpath) content = extract_text_from_dom(result, content_xpath)
if content is None: if content is None:
continue continue
content_misc = extract_text_from_dom(result, content_misc_xpath)
if content_misc is not None:
content = content_misc + "<br />" + content
# append result # append result
results.append({'url': url, results.append({'url': url,
'title': title, 'title': title,
@ -286,11 +280,11 @@ def response(resp):
continue continue
# parse suggestion # parse suggestion
for suggestion in dom.xpath(suggestion_xpath): for suggestion in eval_xpath(dom, suggestion_xpath):
# append suggestion # append suggestion
results.append({'suggestion': extract_text(suggestion)}) results.append({'suggestion': extract_text(suggestion)})
for correction in dom.xpath(spelling_suggestion_xpath): for correction in eval_xpath(dom, spelling_suggestion_xpath):
results.append({'correction': extract_text(correction)}) results.append({'correction': extract_text(correction)})
# return results # return results
@ -299,9 +293,9 @@ def response(resp):
def parse_images(result, google_hostname): def parse_images(result, google_hostname):
results = [] results = []
for image in result.xpath(images_xpath): for image in eval_xpath(result, images_xpath):
url = parse_url(extract_text(image.xpath(image_url_xpath)[0]), google_hostname) url = parse_url(extract_text(eval_xpath(image, image_url_xpath)[0]), google_hostname)
img_src = extract_text(image.xpath(image_img_src_xpath)[0]) img_src = extract_text(eval_xpath(image, image_img_src_xpath)[0])
# append result # append result
results.append({'url': url, results.append({'url': url,
@ -388,10 +382,10 @@ def attributes_to_html(attributes):
def _fetch_supported_languages(resp): def _fetch_supported_languages(resp):
supported_languages = {} supported_languages = {}
dom = html.fromstring(resp.text) dom = html.fromstring(resp.text)
options = dom.xpath('//*[@id="langSec"]//input[@name="lr"]') options = eval_xpath(dom, '//*[@id="langSec"]//input[@name="lr"]')
for option in options: for option in options:
code = option.xpath('./@value')[0].split('_')[-1] code = eval_xpath(option, './@value')[0].split('_')[-1]
name = option.xpath('./@data-name')[0].title() name = eval_xpath(option, './@data-name')[0].title()
supported_languages[code] = {"name": name} supported_languages[code] = {"name": name}
return supported_languages return supported_languages

View File

@ -70,11 +70,21 @@ def response(resp):
try: try:
metadata = loads(result) metadata = loads(result)
img_format = "{0} {1}x{2}".format(metadata['ity'], str(metadata['ow']), str(metadata['oh']))
source = "{0} ({1})".format(metadata['st'], metadata['isu']) img_format = metadata.get('ity', '')
img_width = metadata.get('ow', '')
img_height = metadata.get('oh', '')
if img_width and img_height:
img_format += " {0}x{1}".format(img_width, img_height)
source = metadata.get('st', '')
source_url = metadata.get('isu', '')
if source_url:
source += " ({0})".format(source_url)
results.append({'url': metadata['ru'], results.append({'url': metadata['ru'],
'title': metadata['pt'], 'title': metadata['pt'],
'content': metadata['s'], 'content': metadata.get('s', ''),
'source': source, 'source': source,
'img_format': img_format, 'img_format': img_format,
'thumbnail_src': metadata['tu'], 'thumbnail_src': metadata['tu'],

View File

@ -75,15 +75,17 @@ def response(resp):
# get thumbnails # get thumbnails
script = str(dom.xpath('//script[contains(., "_setImagesSrc")]')[0].text) script = str(dom.xpath('//script[contains(., "_setImagesSrc")]')[0].text)
id = result.xpath('.//div[@class="s"]//img/@id')[0] ids = result.xpath('.//div[@class="s"]//img/@id')
thumbnails_data = re.findall('s=\'(.*?)(?:\\\\[a-z,1-9,\\\\]+\'|\')\;var ii=\[(?:|[\'vidthumb\d+\',]+)\'' + id, if len(ids) > 0:
script) thumbnails_data = \
tmp = [] re.findall('s=\'(.*?)(?:\\\\[a-z,1-9,\\\\]+\'|\')\;var ii=\[(?:|[\'vidthumb\d+\',]+)\'' + ids[0],
if len(thumbnails_data) != 0: script)
tmp = re.findall('(data:image/jpeg;base64,[a-z,A-Z,0-9,/,\+]+)', thumbnails_data[0]) tmp = []
thumbnail = '' if len(thumbnails_data) != 0:
if len(tmp) != 0: tmp = re.findall('(data:image/jpeg;base64,[a-z,A-Z,0-9,/,\+]+)', thumbnails_data[0])
thumbnail = tmp[-1] thumbnail = ''
if len(tmp) != 0:
thumbnail = tmp[-1]
# append result # append result
results.append({'url': url, results.append({'url': url,

100
searx/engines/invidious.py Normal file
View File

@ -0,0 +1,100 @@
# Invidious (Videos)
#
# @website https://invidio.us/
# @provide-api yes (https://github.com/omarroth/invidious/wiki/API)
#
# @using-api yes
# @results JSON
# @stable yes
# @parse url, title, content, publishedDate, thumbnail, embedded
from searx.url_utils import quote_plus
from dateutil import parser
import time
# engine dependent config
categories = ["videos", "music"]
paging = True
language_support = True
time_range_support = True
# search-url
base_url = "https://invidio.us/"
# do search-request
def request(query, params):
time_range_dict = {
"day": "today",
"week": "week",
"month": "month",
"year": "year",
}
search_url = base_url + "api/v1/search?q={query}"
params["url"] = search_url.format(
query=quote_plus(query)
) + "&page={pageno}".format(pageno=params["pageno"])
if params["time_range"] in time_range_dict:
params["url"] += "&date={timerange}".format(
timerange=time_range_dict[params["time_range"]]
)
if params["language"] != "all":
lang = params["language"].split("-")
if len(lang) == 2:
params["url"] += "&range={lrange}".format(lrange=lang[1])
return params
# get response from search-request
def response(resp):
results = []
search_results = resp.json()
embedded_url = (
'<iframe width="540" height="304" '
+ 'data-src="'
+ base_url
+ 'embed/{videoid}" '
+ 'frameborder="0" allowfullscreen></iframe>'
)
base_invidious_url = base_url + "watch?v="
for result in search_results:
rtype = result.get("type", None)
if rtype == "video":
videoid = result.get("videoId", None)
if not videoid:
continue
url = base_invidious_url + videoid
embedded = embedded_url.format(videoid=videoid)
thumbs = result.get("videoThumbnails", [])
thumb = next(
(th for th in thumbs if th["quality"] == "sddefault"), None
)
if thumb:
thumbnail = thumb.get("url", "")
else:
thumbnail = ""
publishedDate = parser.parse(
time.ctime(result.get("published", 0))
)
results.append(
{
"url": url,
"title": result.get("title", ""),
"content": result.get("description", ""),
"template": "videos.html",
"publishedDate": publishedDate,
"embedded": embedded,
"thumbnail": thumbnail,
}
)
return results

View File

@ -24,7 +24,7 @@ result_base_url = 'https://openstreetmap.org/{osm_type}/{osm_id}'
# do search-request # do search-request
def request(query, params): def request(query, params):
params['url'] = base_url + search_string.format(query=query) params['url'] = base_url + search_string.format(query=query.decode('utf-8'))
return params return params

View File

@ -50,6 +50,7 @@ def request(query, params):
language = match_language(params['language'], supported_languages, language_aliases) language = match_language(params['language'], supported_languages, language_aliases)
params['url'] += '&locale=' + language.replace('-', '_').lower() params['url'] += '&locale=' + language.replace('-', '_').lower()
params['headers']['User-Agent'] = 'Mozilla/5.0 (X11; Linux x86_64; rv:69.0) Gecko/20100101 Firefox/69.0'
return params return params

78
searx/engines/seedpeer.py Normal file
View File

@ -0,0 +1,78 @@
# Seedpeer (Videos, Music, Files)
#
# @website https://seedpeer.me
# @provide-api no (nothing found)
#
# @using-api no
# @results HTML (using search portal)
# @stable yes (HTML can change)
# @parse url, title, content, seed, leech, magnetlink
from lxml import html
from json import loads
from operator import itemgetter
from searx.url_utils import quote, urljoin
from searx.engines.xpath import extract_text
url = 'https://seedpeer.me/'
search_url = url + 'search/{search_term}?page={page_no}'
torrent_file_url = url + 'torrent/{torrent_hash}'
# specific xpath variables
script_xpath = '//script[@type="text/javascript"][not(@src)]'
torrent_xpath = '(//table)[2]/tbody/tr'
link_xpath = '(./td)[1]/a/@href'
age_xpath = '(./td)[2]'
size_xpath = '(./td)[3]'
# do search-request
def request(query, params):
params['url'] = search_url.format(search_term=quote(query),
page_no=params['pageno'])
return params
# get response from search-request
def response(resp):
results = []
dom = html.fromstring(resp.text)
result_rows = dom.xpath(torrent_xpath)
try:
script_element = dom.xpath(script_xpath)[0]
json_string = script_element.text[script_element.text.find('{'):]
torrents_json = loads(json_string)
except:
return []
# parse results
for torrent_row, torrent_json in zip(result_rows, torrents_json['data']['list']):
title = torrent_json['name']
seed = int(torrent_json['seeds'])
leech = int(torrent_json['peers'])
size = int(torrent_json['size'])
torrent_hash = torrent_json['hash']
torrentfile = torrent_file_url.format(torrent_hash=torrent_hash)
magnetlink = 'magnet:?xt=urn:btih:{}'.format(torrent_hash)
age = extract_text(torrent_row.xpath(age_xpath))
link = torrent_row.xpath(link_xpath)[0]
href = urljoin(url, link)
# append result
results.append({'url': href,
'title': title,
'content': age,
'seed': seed,
'leech': leech,
'filesize': size,
'torrentfile': torrentfile,
'magnetlink': magnetlink,
'template': 'torrent.html'})
# return results sorted by seeder
return sorted(results, key=itemgetter('seed'), reverse=True)

View File

@ -51,7 +51,9 @@ def get_client_id():
if response.ok: if response.ok:
tree = html.fromstring(response.content) tree = html.fromstring(response.content)
script_tags = tree.xpath("//script[contains(@src, '/assets/app')]") # script_tags has been moved from /assets/app/ to /assets/ path. I
# found client_id in https://a-v2.sndcdn.com/assets/49-a0c01933-3.js
script_tags = tree.xpath("//script[contains(@src, '/assets/')]")
app_js_urls = [script_tag.get('src') for script_tag in script_tags if script_tag is not None] app_js_urls = [script_tag.get('src') for script_tag in script_tags if script_tag is not None]
# extracts valid app_js urls from soundcloud.com content # extracts valid app_js urls from soundcloud.com content
@ -66,7 +68,7 @@ def get_client_id():
return "" return ""
def init(): def init(engine_settings=None):
global guest_client_id global guest_client_id
# api-key # api-key
guest_client_id = get_client_id() guest_client_id = get_client_id()

View File

@ -15,6 +15,8 @@ from dateutil import parser
from datetime import datetime, timedelta from datetime import datetime, timedelta
import re import re
from searx.engines.xpath import extract_text from searx.engines.xpath import extract_text
from searx.languages import language_codes
from searx.utils import eval_xpath
# engine dependent config # engine dependent config
categories = ['general'] categories = ['general']
@ -22,7 +24,7 @@ categories = ['general']
# (probably the parameter qid), require # (probably the parameter qid), require
# storing of qid's between mulitble search-calls # storing of qid's between mulitble search-calls
# paging = False paging = True
language_support = True language_support = True
# search-url # search-url
@ -32,23 +34,32 @@ search_url = base_url + 'do/search'
# specific xpath variables # specific xpath variables
# ads xpath //div[@id="results"]/div[@id="sponsored"]//div[@class="result"] # ads xpath //div[@id="results"]/div[@id="sponsored"]//div[@class="result"]
# not ads: div[@class="result"] are the direct childs of div[@id="results"] # not ads: div[@class="result"] are the direct childs of div[@id="results"]
results_xpath = '//li[contains(@class, "search-result") and contains(@class, "search-item")]' results_xpath = '//div[@class="w-gl__result"]'
link_xpath = './/h3/a' link_xpath = './/a[@class="w-gl__result-title"]'
content_xpath = './p[@class="search-item__body"]' content_xpath = './/p[@class="w-gl__description"]'
# do search-request # do search-request
def request(query, params): def request(query, params):
offset = (params['pageno'] - 1) * 10
params['url'] = search_url params['url'] = search_url
params['method'] = 'POST' params['method'] = 'POST'
params['data'] = {'query': query, params['data'] = {
'startat': offset} 'query': query,
'page': params['pageno'],
'cat': 'web',
'cmd': 'process_search',
'engine0': 'v1all',
}
# set language if specified # set language if specified
if params['language'] != 'all': if params['language'] != 'all':
params['data']['with_language'] = ('lang_' + params['language'].split('-')[0]) language = 'english'
for lc, _, _, lang in language_codes:
if lc == params['language']:
language = lang
params['data']['language'] = language
params['data']['lui'] = language
return params return params
@ -60,8 +71,8 @@ def response(resp):
dom = html.fromstring(resp.text) dom = html.fromstring(resp.text)
# parse results # parse results
for result in dom.xpath(results_xpath): for result in eval_xpath(dom, results_xpath):
links = result.xpath(link_xpath) links = eval_xpath(result, link_xpath)
if not links: if not links:
continue continue
link = links[0] link = links[0]
@ -77,8 +88,8 @@ def response(resp):
title = extract_text(link) title = extract_text(link)
if result.xpath(content_xpath): if eval_xpath(result, content_xpath):
content = extract_text(result.xpath(content_xpath)) content = extract_text(eval_xpath(result, content_xpath))
else: else:
content = '' content = ''

View File

@ -16,7 +16,7 @@ from searx.poolrequests import get
from searx.engines.xpath import extract_text from searx.engines.xpath import extract_text
from searx.engines.wikipedia import _fetch_supported_languages, supported_languages_url from searx.engines.wikipedia import _fetch_supported_languages, supported_languages_url
from searx.url_utils import urlencode from searx.url_utils import urlencode
from searx.utils import match_language from searx.utils import match_language, eval_xpath
from json import loads from json import loads
from lxml.html import fromstring from lxml.html import fromstring
@ -57,22 +57,6 @@ language_fallback_xpath = '//sup[contains(@class,"wb-language-fallback-indicator
calendar_name_xpath = './/sup[contains(@class,"wb-calendar-name")]' calendar_name_xpath = './/sup[contains(@class,"wb-calendar-name")]'
media_xpath = value_xpath + '//div[contains(@class,"commons-media-caption")]//a' media_xpath = value_xpath + '//div[contains(@class,"commons-media-caption")]//a'
# xpath_cache
xpath_cache = {}
def get_xpath(xpath_str):
result = xpath_cache.get(xpath_str, None)
if not result:
result = etree.XPath(xpath_str)
xpath_cache[xpath_str] = result
return result
def eval_xpath(element, xpath_str):
xpath = get_xpath(xpath_str)
return xpath(element)
def get_id_cache(result): def get_id_cache(result):
id_cache = {} id_cache = {}

View File

@ -21,7 +21,8 @@ search_url = base_url + u'w/api.php?'\
'action=query'\ 'action=query'\
'&format=json'\ '&format=json'\
'&{query}'\ '&{query}'\
'&prop=extracts|pageimages'\ '&prop=extracts|pageimages|pageprops'\
'&ppprop=disambiguation'\
'&exintro'\ '&exintro'\
'&explaintext'\ '&explaintext'\
'&pithumbsize=300'\ '&pithumbsize=300'\
@ -79,12 +80,15 @@ def response(resp):
# wikipedia article's unique id # wikipedia article's unique id
# first valid id is assumed to be the requested article # first valid id is assumed to be the requested article
if 'pages' not in search_result['query']:
return results
for article_id in search_result['query']['pages']: for article_id in search_result['query']['pages']:
page = search_result['query']['pages'][article_id] page = search_result['query']['pages'][article_id]
if int(article_id) > 0: if int(article_id) > 0:
break break
if int(article_id) < 0: if int(article_id) < 0 or 'disambiguation' in page.get('pageprops', {}):
return [] return []
title = page.get('title') title = page.get('title')
@ -96,6 +100,7 @@ def response(resp):
extract = page.get('extract') extract = page.get('extract')
summary = extract_first_paragraph(extract, title, image) summary = extract_first_paragraph(extract, title, image)
summary = summary.replace('() ', '')
# link to wikipedia article # link to wikipedia article
wikipedia_link = base_url.format(language=url_lang(resp.search_params['language'])) \ wikipedia_link = base_url.format(language=url_lang(resp.search_params['language'])) \

View File

@ -55,7 +55,7 @@ def obtain_token():
return token return token
def init(): def init(engine_settings=None):
obtain_token() obtain_token()

View File

@ -11,8 +11,8 @@
""" """
from lxml import html from lxml import html
import re
from searx.url_utils import urlencode, urljoin from searx.url_utils import urlencode, urljoin
from searx.engines.xpath import extract_text
# engine dependent config # engine dependent config
categories = ['images'] categories = ['images']
@ -34,41 +34,18 @@ def request(query, params):
def response(resp): def response(resp):
results = [] results = []
# get links from result-text dom = html.fromstring(resp.text)
regex = re.compile('(</a>|<a)') for res in dom.xpath('//div[@class="List-item MainListing"]'):
results_parts = re.split(regex, resp.text)
cur_element = ''
# iterate over link parts
for result_part in results_parts:
# processed start and end of link # processed start and end of link
if result_part == '<a': link = res.xpath('//a')[0]
cur_element = result_part
continue
elif result_part != '</a>':
cur_element += result_part
continue
cur_element += result_part
# fix xml-error
cur_element = cur_element.replace('"></a>', '"/></a>')
dom = html.fromstring(cur_element)
link = dom.xpath('//a')[0]
url = urljoin(base_url, link.attrib.get('href')) url = urljoin(base_url, link.attrib.get('href'))
title = link.attrib.get('title', '') title = extract_text(link)
thumbnail_src = urljoin(base_url, link.xpath('.//img')[0].attrib['src']) thumbnail_src = urljoin(base_url, res.xpath('.//img')[0].attrib['src'])
# TODO: get image with higher resolution # TODO: get image with higher resolution
img_src = thumbnail_src img_src = thumbnail_src
# check if url is showing to a photo
if '/photo/' not in url:
continue
# append result # append result
results.append({'url': url, results.append({'url': url,
'title': title, 'title': title,

View File

@ -1,6 +1,6 @@
from lxml import html from lxml import html
from lxml.etree import _ElementStringResult, _ElementUnicodeResult from lxml.etree import _ElementStringResult, _ElementUnicodeResult
from searx.utils import html_to_text from searx.utils import html_to_text, eval_xpath
from searx.url_utils import unquote, urlencode, urljoin, urlparse from searx.url_utils import unquote, urlencode, urljoin, urlparse
search_url = None search_url = None
@ -104,15 +104,15 @@ def response(resp):
results = [] results = []
dom = html.fromstring(resp.text) dom = html.fromstring(resp.text)
if results_xpath: if results_xpath:
for result in dom.xpath(results_xpath): for result in eval_xpath(dom, results_xpath):
url = extract_url(result.xpath(url_xpath), search_url) url = extract_url(eval_xpath(result, url_xpath), search_url)
title = extract_text(result.xpath(title_xpath)) title = extract_text(eval_xpath(result, title_xpath))
content = extract_text(result.xpath(content_xpath)) content = extract_text(eval_xpath(result, content_xpath))
tmp_result = {'url': url, 'title': title, 'content': content} tmp_result = {'url': url, 'title': title, 'content': content}
# add thumbnail if available # add thumbnail if available
if thumbnail_xpath: if thumbnail_xpath:
thumbnail_xpath_result = result.xpath(thumbnail_xpath) thumbnail_xpath_result = eval_xpath(result, thumbnail_xpath)
if len(thumbnail_xpath_result) > 0: if len(thumbnail_xpath_result) > 0:
tmp_result['img_src'] = extract_url(thumbnail_xpath_result, search_url) tmp_result['img_src'] = extract_url(thumbnail_xpath_result, search_url)
@ -120,14 +120,14 @@ def response(resp):
else: else:
for url, title, content in zip( for url, title, content in zip(
(extract_url(x, search_url) for (extract_url(x, search_url) for
x in dom.xpath(url_xpath)), x in eval_xpath(dom, url_xpath)),
map(extract_text, dom.xpath(title_xpath)), map(extract_text, eval_xpath(dom, title_xpath)),
map(extract_text, dom.xpath(content_xpath)) map(extract_text, eval_xpath(dom, content_xpath))
): ):
results.append({'url': url, 'title': title, 'content': content}) results.append({'url': url, 'title': title, 'content': content})
if not suggestion_xpath: if not suggestion_xpath:
return results return results
for suggestion in dom.xpath(suggestion_xpath): for suggestion in eval_xpath(dom, suggestion_xpath):
results.append({'suggestion': extract_text(suggestion)}) results.append({'suggestion': extract_text(suggestion)})
return results return results

View File

@ -14,7 +14,7 @@
from lxml import html from lxml import html
from searx.engines.xpath import extract_text, extract_url from searx.engines.xpath import extract_text, extract_url
from searx.url_utils import unquote, urlencode from searx.url_utils import unquote, urlencode
from searx.utils import match_language from searx.utils import match_language, eval_xpath
# engine dependent config # engine dependent config
categories = ['general'] categories = ['general']
@ -109,21 +109,21 @@ def response(resp):
dom = html.fromstring(resp.text) dom = html.fromstring(resp.text)
try: try:
results_num = int(dom.xpath('//div[@class="compPagination"]/span[last()]/text()')[0] results_num = int(eval_xpath(dom, '//div[@class="compPagination"]/span[last()]/text()')[0]
.split()[0].replace(',', '')) .split()[0].replace(',', ''))
results.append({'number_of_results': results_num}) results.append({'number_of_results': results_num})
except: except:
pass pass
# parse results # parse results
for result in dom.xpath(results_xpath): for result in eval_xpath(dom, results_xpath):
try: try:
url = parse_url(extract_url(result.xpath(url_xpath), search_url)) url = parse_url(extract_url(eval_xpath(result, url_xpath), search_url))
title = extract_text(result.xpath(title_xpath)[0]) title = extract_text(eval_xpath(result, title_xpath)[0])
except: except:
continue continue
content = extract_text(result.xpath(content_xpath)[0]) content = extract_text(eval_xpath(result, content_xpath)[0])
# append result # append result
results.append({'url': url, results.append({'url': url,
@ -131,7 +131,7 @@ def response(resp):
'content': content}) 'content': content})
# if no suggestion found, return results # if no suggestion found, return results
suggestions = dom.xpath(suggestion_xpath) suggestions = eval_xpath(dom, suggestion_xpath)
if not suggestions: if not suggestions:
return results return results
@ -148,9 +148,9 @@ def response(resp):
def _fetch_supported_languages(resp): def _fetch_supported_languages(resp):
supported_languages = [] supported_languages = []
dom = html.fromstring(resp.text) dom = html.fromstring(resp.text)
options = dom.xpath('//div[@id="yschlang"]/span/label/input') options = eval_xpath(dom, '//div[@id="yschlang"]/span/label/input')
for option in options: for option in options:
code_parts = option.xpath('./@value')[0][5:].split('_') code_parts = eval_xpath(option, './@value')[0][5:].split('_')
if len(code_parts) == 2: if len(code_parts) == 2:
code = code_parts[0] + '-' + code_parts[1].upper() code = code_parts[0] + '-' + code_parts[1].upper()
else: else:

View File

@ -67,12 +67,8 @@ def response(resp):
if videoid is not None: if videoid is not None:
url = base_youtube_url + videoid url = base_youtube_url + videoid
thumbnail = 'https://i.ytimg.com/vi/' + videoid + '/hqdefault.jpg' thumbnail = 'https://i.ytimg.com/vi/' + videoid + '/hqdefault.jpg'
title = video.get('title', {}).get('simpleText', videoid) title = get_text_from_json(video.get('title', {}))
description_snippet = video.get('descriptionSnippet', {}) content = get_text_from_json(video.get('descriptionSnippet', {}))
if 'runs' in description_snippet:
content = reduce(lambda a, b: a + b.get('text', ''), description_snippet.get('runs'), '')
else:
content = description_snippet.get('simpleText', '')
embedded = embedded_url.format(videoid=videoid) embedded = embedded_url.format(videoid=videoid)
# append result # append result
@ -85,3 +81,10 @@ def response(resp):
# return results # return results
return results return results
def get_text_from_json(element):
if 'runs' in element:
return reduce(lambda a, b: a + b.get('text', ''), element.get('runs'), '')
else:
return element.get('simpleText', '')

View File

@ -28,5 +28,6 @@ class SearxParameterException(SearxException):
else: else:
message = 'Invalid value "' + value + '" for parameter ' + name message = 'Invalid value "' + value + '" for parameter ' + name
super(SearxParameterException, self).__init__(message) super(SearxParameterException, self).__init__(message)
self.message = message
self.parameter_name = name self.parameter_name = name
self.parameter_value = value self.parameter_value = value

View File

@ -225,6 +225,9 @@ def https_url_rewrite(result):
def on_result(request, search, result): def on_result(request, search, result):
if 'parsed_url' not in result:
return True
if result['parsed_url'].scheme == 'http': if result['parsed_url'].scheme == 'http':
https_url_rewrite(result) https_url_rewrite(result)
return True return True

View File

@ -35,6 +35,9 @@ def get_doi_resolver(args, preference_doi_resolver):
def on_result(request, search, result): def on_result(request, search, result):
if 'parsed_url' not in result:
return True
doi = extract_doi(result['parsed_url']) doi = extract_doi(result['parsed_url'])
if doi and len(doi) < 50: if doi and len(doi) < 50:
for suffix in ('/', '.pdf', '/full', '/meta', '/abstract'): for suffix in ('/', '.pdf', '/full', '/meta', '/abstract'):

View File

@ -17,10 +17,10 @@ along with searx. If not, see < http://www.gnu.org/licenses/ >.
from flask_babel import gettext from flask_babel import gettext
import re import re
from searx.url_utils import urlunparse from searx.url_utils import urlunparse, parse_qsl, urlencode
regexes = {re.compile(r'utm_[^&]+&?'), regexes = {re.compile(r'utm_[^&]+'),
re.compile(r'(wkey|wemail)[^&]+&?'), re.compile(r'(wkey|wemail)[^&]*'),
re.compile(r'&$')} re.compile(r'&$')}
name = gettext('Tracker URL remover') name = gettext('Tracker URL remover')
@ -30,16 +30,23 @@ preference_section = 'privacy'
def on_result(request, search, result): def on_result(request, search, result):
if 'parsed_url' not in result:
return True
query = result['parsed_url'].query query = result['parsed_url'].query
if query == "": if query == "":
return True return True
parsed_query = parse_qsl(query)
for reg in regexes: changes = 0
query = reg.sub('', query) for i, (param_name, _) in enumerate(list(parsed_query)):
for reg in regexes:
if query != result['parsed_url'].query: if reg.match(param_name):
result['parsed_url'] = result['parsed_url']._replace(query=query) parsed_query.pop(i - changes)
result['url'] = urlunparse(result['parsed_url']) changes += 1
result['parsed_url'] = result['parsed_url']._replace(query=urlencode(parsed_query))
result['url'] = urlunparse(result['parsed_url'])
break
return True return True

View File

@ -43,6 +43,7 @@ class RawTextQuery(object):
self.query_parts = [] self.query_parts = []
self.engines = [] self.engines = []
self.languages = [] self.languages = []
self.timeout_limit = None
self.specific = False self.specific = False
# parse query, if tags are set, which # parse query, if tags are set, which
@ -69,6 +70,21 @@ class RawTextQuery(object):
self.query_parts.append(query_part) self.query_parts.append(query_part)
continue continue
# this force the timeout
if query_part[0] == '<':
try:
raw_timeout_limit = int(query_part[1:])
if raw_timeout_limit < 100:
# below 100, the unit is the second ( <3 = 3 seconds timeout )
self.timeout_limit = float(raw_timeout_limit)
else:
# 100 or above, the unit is the millisecond ( <850 = 850 milliseconds timeout )
self.timeout_limit = raw_timeout_limit / 1000.0
parse_next = True
except ValueError:
# error not reported to the user
pass
# this force a language # this force a language
if query_part[0] == ':': if query_part[0] == ':':
lang = query_part[1:].lower().replace('_', '-') lang = query_part[1:].lower().replace('_', '-')
@ -161,14 +177,15 @@ class RawTextQuery(object):
class SearchQuery(object): class SearchQuery(object):
"""container for all the search parameters (query, language, etc...)""" """container for all the search parameters (query, language, etc...)"""
def __init__(self, query, engines, categories, lang, safesearch, pageno, time_range): def __init__(self, query, engines, categories, lang, safesearch, pageno, time_range, timeout_limit=None):
self.query = query.encode('utf-8') self.query = query.encode('utf-8')
self.engines = engines self.engines = engines
self.categories = categories self.categories = categories
self.lang = lang self.lang = lang
self.safesearch = safesearch self.safesearch = safesearch
self.pageno = pageno self.pageno = pageno
self.time_range = time_range self.time_range = None if time_range in ('', 'None', None) else time_range
self.timeout_limit = timeout_limit
def __str__(self): def __str__(self):
return str(self.query) + ";" + str(self.engines) return str(self.query) + ";" + str(self.engines)

View File

@ -67,8 +67,9 @@ def merge_two_infoboxes(infobox1, infobox2):
for url2 in infobox2.get('urls', []): for url2 in infobox2.get('urls', []):
unique_url = True unique_url = True
for url1 in infobox1.get('urls', []): parsed_url2 = urlparse(url2.get('url', ''))
if compare_urls(urlparse(url1.get('url', '')), urlparse(url2.get('url', ''))): for url1 in urls1:
if compare_urls(urlparse(url1.get('url', '')), parsed_url2):
unique_url = False unique_url = False
break break
if unique_url: if unique_url:
@ -188,8 +189,9 @@ class ResultContainer(object):
add_infobox = True add_infobox = True
infobox_id = infobox.get('id', None) infobox_id = infobox.get('id', None)
if infobox_id is not None: if infobox_id is not None:
parsed_url_infobox_id = urlparse(infobox_id)
for existingIndex in self.infoboxes: for existingIndex in self.infoboxes:
if compare_urls(urlparse(existingIndex.get('id', '')), urlparse(infobox_id)): if compare_urls(urlparse(existingIndex.get('id', '')), parsed_url_infobox_id):
merge_two_infoboxes(existingIndex, infobox) merge_two_infoboxes(existingIndex, infobox)
add_infobox = False add_infobox = False
@ -197,6 +199,13 @@ class ResultContainer(object):
self.infoboxes.append(infobox) self.infoboxes.append(infobox)
def _merge_result(self, result, position): def _merge_result(self, result, position):
if 'url' in result:
self.__merge_url_result(result, position)
return
self.__merge_result_no_url(result, position)
def __merge_url_result(self, result, position):
result['parsed_url'] = urlparse(result['url']) result['parsed_url'] = urlparse(result['url'])
# if the result has no scheme, use http as default # if the result has no scheme, use http as default
@ -210,51 +219,60 @@ class ResultContainer(object):
if result.get('content'): if result.get('content'):
result['content'] = WHITESPACE_REGEX.sub(' ', result['content']) result['content'] = WHITESPACE_REGEX.sub(' ', result['content'])
# check for duplicates duplicated = self.__find_duplicated_http_result(result)
duplicated = False if duplicated:
self.__merge_duplicated_http_result(duplicated, result, position)
return
# if there is no duplicate found, append result
result['positions'] = [position]
with RLock():
self._merged_results.append(result)
def __find_duplicated_http_result(self, result):
result_template = result.get('template') result_template = result.get('template')
for merged_result in self._merged_results: for merged_result in self._merged_results:
if 'parsed_url' not in merged_result:
continue
if compare_urls(result['parsed_url'], merged_result['parsed_url'])\ if compare_urls(result['parsed_url'], merged_result['parsed_url'])\
and result_template == merged_result.get('template'): and result_template == merged_result.get('template'):
if result_template != 'images.html': if result_template != 'images.html':
# not an image, same template, same url : it's a duplicate # not an image, same template, same url : it's a duplicate
duplicated = merged_result return merged_result
break
else: else:
# it's an image # it's an image
# it's a duplicate if the parsed_url, template and img_src are differents # it's a duplicate if the parsed_url, template and img_src are differents
if result.get('img_src', '') == merged_result.get('img_src', ''): if result.get('img_src', '') == merged_result.get('img_src', ''):
duplicated = merged_result return merged_result
break return None
# merge duplicates together def __merge_duplicated_http_result(self, duplicated, result, position):
if duplicated: # using content with more text
# using content with more text if result_content_len(result.get('content', '')) >\
if result_content_len(result.get('content', '')) >\ result_content_len(duplicated.get('content', '')):
result_content_len(duplicated.get('content', '')): duplicated['content'] = result['content']
duplicated['content'] = result['content']
# merge all result's parameters not found in duplicate # merge all result's parameters not found in duplicate
for key in result.keys(): for key in result.keys():
if not duplicated.get(key): if not duplicated.get(key):
duplicated[key] = result.get(key) duplicated[key] = result.get(key)
# add the new position # add the new position
duplicated['positions'].append(position) duplicated['positions'].append(position)
# add engine to list of result-engines # add engine to list of result-engines
duplicated['engines'].add(result['engine']) duplicated['engines'].add(result['engine'])
# using https if possible # using https if possible
if duplicated['parsed_url'].scheme != 'https' and result['parsed_url'].scheme == 'https': if duplicated['parsed_url'].scheme != 'https' and result['parsed_url'].scheme == 'https':
duplicated['url'] = result['parsed_url'].geturl() duplicated['url'] = result['parsed_url'].geturl()
duplicated['parsed_url'] = result['parsed_url'] duplicated['parsed_url'] = result['parsed_url']
# if there is no duplicate found, append result def __merge_result_no_url(self, result, position):
else: result['engines'] = set([result['engine']])
result['positions'] = [position] result['positions'] = [position]
with RLock(): with RLock():
self._merged_results.append(result) self._merged_results.append(result)
def order_results(self): def order_results(self):
for result in self._merged_results: for result in self._merged_results:

View File

@ -45,6 +45,16 @@ if sys.version_info[0] == 3:
logger = logger.getChild('search') logger = logger.getChild('search')
number_of_searches = 0 number_of_searches = 0
max_request_timeout = settings.get('outgoing', {}).get('max_request_timeout' or None)
if max_request_timeout is None:
logger.info('max_request_timeout={0}'.format(max_request_timeout))
else:
if isinstance(max_request_timeout, float):
logger.info('max_request_timeout={0} second(s)'.format(max_request_timeout))
else:
logger.critical('outgoing.max_request_timeout if defined has to be float')
from sys import exit
exit(1)
def send_http_request(engine, request_params): def send_http_request(engine, request_params):
@ -67,7 +77,7 @@ def send_http_request(engine, request_params):
return req(request_params['url'], **request_args) return req(request_params['url'], **request_args)
def search_one_request(engine, query, request_params): def search_one_http_request(engine, query, request_params):
# update request parameters dependent on # update request parameters dependent on
# search-engine (contained in engines folder) # search-engine (contained in engines folder)
engine.request(query, request_params) engine.request(query, request_params)
@ -87,7 +97,53 @@ def search_one_request(engine, query, request_params):
return engine.response(response) return engine.response(response)
def search_one_offline_request(engine, query, request_params):
return engine.search(query, request_params)
def search_one_request_safe(engine_name, query, request_params, result_container, start_time, timeout_limit): def search_one_request_safe(engine_name, query, request_params, result_container, start_time, timeout_limit):
if engines[engine_name].offline:
return search_one_offline_request_safe(engine_name, query, request_params, result_container, start_time, timeout_limit) # noqa
return search_one_http_request_safe(engine_name, query, request_params, result_container, start_time, timeout_limit)
def search_one_offline_request_safe(engine_name, query, request_params, result_container, start_time, timeout_limit):
engine = engines[engine_name]
try:
search_results = search_one_offline_request(engine, query, request_params)
if search_results:
result_container.extend(engine_name, search_results)
engine_time = time() - start_time
result_container.add_timing(engine_name, engine_time, engine_time)
with threading.RLock():
engine.stats['engine_time'] += engine_time
engine.stats['engine_time_count'] += 1
except ValueError as e:
record_offline_engine_stats_on_error(engine, result_container, start_time)
logger.exception('engine {0} : invalid input : {1}'.format(engine_name, e))
except Exception as e:
record_offline_engine_stats_on_error(engine, result_container, start_time)
result_container.add_unresponsive_engine((
engine_name,
u'{0}: {1}'.format(gettext('unexpected crash'), e),
))
logger.exception('engine {0} : exception : {1}'.format(engine_name, e))
def record_offline_engine_stats_on_error(engine, result_container, start_time):
engine_time = time() - start_time
result_container.add_timing(engine.name, engine_time, engine_time)
with threading.RLock():
engine.stats['errors'] += 1
def search_one_http_request_safe(engine_name, query, request_params, result_container, start_time, timeout_limit):
# set timeout for all HTTP requests # set timeout for all HTTP requests
requests_lib.set_timeout_for_thread(timeout_limit, start_time=start_time) requests_lib.set_timeout_for_thread(timeout_limit, start_time=start_time)
# reset the HTTP total time # reset the HTTP total time
@ -101,7 +157,7 @@ def search_one_request_safe(engine_name, query, request_params, result_container
try: try:
# send requests and parse the results # send requests and parse the results
search_results = search_one_request(engine, query, request_params) search_results = search_one_http_request(engine, query, request_params)
# check if the engine accepted the request # check if the engine accepted the request
if search_results is not None: if search_results is not None:
@ -265,6 +321,18 @@ def get_search_query_from_webapp(preferences, form):
# query_engines # query_engines
query_engines = raw_text_query.engines query_engines = raw_text_query.engines
# timeout_limit
query_timeout = raw_text_query.timeout_limit
if query_timeout is None and 'timeout_limit' in form:
raw_time_limit = form.get('timeout_limit')
if raw_time_limit in ['None', '']:
raw_time_limit = None
else:
try:
query_timeout = float(raw_time_limit)
except ValueError:
raise SearxParameterException('timeout_limit', raw_time_limit)
# query_categories # query_categories
query_categories = [] query_categories = []
@ -338,7 +406,8 @@ def get_search_query_from_webapp(preferences, form):
query_engines = deduplicate_query_engines(query_engines) query_engines = deduplicate_query_engines(query_engines)
return (SearchQuery(query, query_engines, query_categories, return (SearchQuery(query, query_engines, query_categories,
query_lang, query_safesearch, query_pageno, query_time_range), query_lang, query_safesearch, query_pageno,
query_time_range, query_timeout),
raw_text_query) raw_text_query)
@ -351,6 +420,7 @@ class Search(object):
super(Search, self).__init__() super(Search, self).__init__()
self.search_query = search_query self.search_query = search_query
self.result_container = ResultContainer() self.result_container = ResultContainer()
self.actual_timeout = None
# do search-request # do search-request
def search(self): def search(self):
@ -380,7 +450,7 @@ class Search(object):
search_query = self.search_query search_query = self.search_query
# max of all selected engine timeout # max of all selected engine timeout
timeout_limit = 0 default_timeout = 0
# start search-reqest for all selected engines # start search-reqest for all selected engines
for selected_engine in search_query.engines: for selected_engine in search_query.engines:
@ -403,29 +473,51 @@ class Search(object):
continue continue
# set default request parameters # set default request parameters
request_params = default_request_params() request_params = {}
request_params['headers']['User-Agent'] = user_agent if not engine.offline:
request_params = default_request_params()
request_params['headers']['User-Agent'] = user_agent
if hasattr(engine, 'language') and engine.language:
request_params['language'] = engine.language
else:
request_params['language'] = search_query.lang
request_params['safesearch'] = search_query.safesearch
request_params['time_range'] = search_query.time_range
request_params['category'] = selected_engine['category'] request_params['category'] = selected_engine['category']
request_params['pageno'] = search_query.pageno request_params['pageno'] = search_query.pageno
if hasattr(engine, 'language') and engine.language:
request_params['language'] = engine.language
else:
request_params['language'] = search_query.lang
# 0 = None, 1 = Moderate, 2 = Strict
request_params['safesearch'] = search_query.safesearch
request_params['time_range'] = search_query.time_range
# append request to list # append request to list
requests.append((selected_engine['name'], search_query.query, request_params)) requests.append((selected_engine['name'], search_query.query, request_params))
# update timeout_limit # update default_timeout
timeout_limit = max(timeout_limit, engine.timeout) default_timeout = max(default_timeout, engine.timeout)
# adjust timeout
self.actual_timeout = default_timeout
query_timeout = self.search_query.timeout_limit
if max_request_timeout is None and query_timeout is None:
# No max, no user query: default_timeout
pass
elif max_request_timeout is None and query_timeout is not None:
# No max, but user query: From user query except if above default
self.actual_timeout = min(default_timeout, query_timeout)
elif max_request_timeout is not None and query_timeout is None:
# Max, no user query: Default except if above max
self.actual_timeout = min(default_timeout, max_request_timeout)
elif max_request_timeout is not None and query_timeout is not None:
# Max & user query: From user query except if above max
self.actual_timeout = min(query_timeout, max_request_timeout)
logger.debug("actual_timeout={0} (default_timeout={1}, ?timeout_limit={2}, max_request_timeout={3})"
.format(self.actual_timeout, default_timeout, query_timeout, max_request_timeout))
# send all search-request
if requests: if requests:
# send all search-request search_multiple_requests(requests, self.result_container, start_time, self.actual_timeout)
search_multiple_requests(requests, self.result_container, start_time, timeout_limit)
start_new_thread(gc.collect, tuple()) start_new_thread(gc.collect, tuple())
# return results, suggestions, answers and infoboxes # return results, suggestions, answers and infoboxes

View File

@ -34,7 +34,8 @@ ui:
# key : !!binary "your_morty_proxy_key" # key : !!binary "your_morty_proxy_key"
outgoing: # communication with search engines outgoing: # communication with search engines
request_timeout : 2.0 # seconds request_timeout : 2.0 # default timeout in seconds, can be override by engine
# max_request_timeout: 10.0 # the maximum timeout in seconds
useragent_suffix : "" # suffix of searx_useragent, could contain informations like an email address to the administrator useragent_suffix : "" # suffix of searx_useragent, could contain informations like an email address to the administrator
pool_connections : 100 # Number of different hosts pool_connections : 100 # Number of different hosts
pool_maxsize : 10 # Number of simultaneous requests by host pool_maxsize : 10 # Number of simultaneous requests by host
@ -160,11 +161,12 @@ engines:
weight : 2 weight : 2
disabled : True disabled : True
- name : digbt # cloudflare protected
engine : digbt # - name : digbt
shortcut : dbt # engine : digbt
timeout : 6.0 # shortcut : dbt
disabled : True # timeout : 6.0
# disabled : True
- name : digg - name : digg
engine : digg engine : digg
@ -203,11 +205,11 @@ engines:
- name : etymonline - name : etymonline
engine : xpath engine : xpath
paging : True paging : True
search_url : http://etymonline.com/?search={query}&p={pageno} search_url : https://etymonline.com/search?page={pageno}&q={query}
url_xpath : //a[contains(@class, "word--")]/@href url_xpath : //a[contains(@class, "word__name--")]/@href
title_xpath : //p[contains(@class, "word__name--")]/text() title_xpath : //a[contains(@class, "word__name--")]
content_xpath : //section[contains(@class, "word__defination")]/object content_xpath : //section[contains(@class, "word__defination")]
first_page_num : 0 first_page_num : 1
shortcut : et shortcut : et
disabled : True disabled : True
@ -392,6 +394,12 @@ engines:
timeout : 6.0 timeout : 6.0
disabled : True disabled : True
- name : invidious
engine : invidious
base_url : 'https://invidio.us/'
shortcut: iv
timeout : 5.0
- name: kickass - name: kickass
engine : kickass engine : kickass
shortcut : kc shortcut : kc
@ -400,7 +408,7 @@ engines:
- name : library genesis - name : library genesis
engine : xpath engine : xpath
search_url : http://libgen.io/search.php?req={query} search_url : https://libgen.is/search.php?req={query}
url_xpath : //a[contains(@href,"bookfi.net")]/@href url_xpath : //a[contains(@href,"bookfi.net")]/@href
title_xpath : //a[contains(@href,"book/")]/text()[1] title_xpath : //a[contains(@href,"book/")]/text()[1]
content_xpath : //td/a[1][contains(@href,"=author")]/text() content_xpath : //td/a[1][contains(@href,"=author")]/text()
@ -456,7 +464,7 @@ engines:
- name : openairedatasets - name : openairedatasets
engine : json_engine engine : json_engine
paging : True paging : True
search_url : http://api.openaire.eu/search/datasets?format=json&page={pageno}&size=10&title={query} search_url : https://api.openaire.eu/search/datasets?format=json&page={pageno}&size=10&title={query}
results_query : response/results/result results_query : response/results/result
url_query : metadata/oaf:entity/oaf:result/children/instance/webresource/url/$ url_query : metadata/oaf:entity/oaf:result/children/instance/webresource/url/$
title_query : metadata/oaf:entity/oaf:result/title/$ title_query : metadata/oaf:entity/oaf:result/title/$
@ -468,7 +476,7 @@ engines:
- name : openairepublications - name : openairepublications
engine : json_engine engine : json_engine
paging : True paging : True
search_url : http://api.openaire.eu/search/publications?format=json&page={pageno}&size=10&title={query} search_url : https://api.openaire.eu/search/publications?format=json&page={pageno}&size=10&title={query}
results_query : response/results/result results_query : response/results/result
url_query : metadata/oaf:entity/oaf:result/children/instance/webresource/url/$ url_query : metadata/oaf:entity/oaf:result/children/instance/webresource/url/$
title_query : metadata/oaf:entity/oaf:result/title/$ title_query : metadata/oaf:entity/oaf:result/title/$
@ -699,9 +707,9 @@ engines:
shortcut: vo shortcut: vo
categories: social media categories: social media
search_url : https://searchvoat.co/?t={query} search_url : https://searchvoat.co/?t={query}
url_xpath : //div[@class="entry"]/p/a[@class="title"]/@href url_xpath : //div[@class="entry"]//p[@class="title"]/a/@href
title_xpath : //div[@class="entry"]/p/a[@class="title"] title_xpath : //div[@class="entry"]//p[@class="title"]/a/text()
content_xpath : //div[@class="entry"]/p/span[@class="domain"] content_xpath : //div[@class="entry"]//span[@class="domain"]/a/text()
timeout : 10.0 timeout : 10.0
disabled : True disabled : True
@ -739,10 +747,15 @@ engines:
title_xpath : ./h2 title_xpath : ./h2
content_xpath : ./p[@class="s"] content_xpath : ./p[@class="s"]
suggestion_xpath : /html/body//div[@class="top-info"]/p[@class="top-info spell"]/a suggestion_xpath : /html/body//div[@class="top-info"]/p[@class="top-info spell"]/a
first_page_num : 1 first_page_num : 0
page_size : 10 page_size : 10
disabled : True disabled : True
- name : seedpeer
shortcut : speu
engine : seedpeer
categories: files, music, videos
# - name : yacy # - name : yacy
# engine : yacy # engine : yacy
# shortcut : ya # shortcut : ya
@ -802,7 +815,7 @@ locales:
doi_resolvers : doi_resolvers :
oadoi.org : 'https://oadoi.org/' oadoi.org : 'https://oadoi.org/'
doi.org : 'https://doi.org/' doi.org : 'https://doi.org/'
doai.io : 'http://doai.io/' doai.io : 'https://doai.io/'
sci-hub.tw : 'http://sci-hub.tw/' sci-hub.tw : 'https://sci-hub.tw/'
default_doi_resolver : 'oadoi.org' default_doi_resolver : 'oadoi.org'

View File

@ -43,7 +43,7 @@ locales:
doi_resolvers : doi_resolvers :
oadoi.org : 'https://oadoi.org/' oadoi.org : 'https://oadoi.org/'
doi.org : 'https://doi.org/' doi.org : 'https://doi.org/'
doai.io : 'http://doai.io/' doai.io : 'https://doai.io/'
sci-hub.tw : 'http://sci-hub.tw/' sci-hub.tw : 'https://sci-hub.tw/'
default_doi_resolver : 'oadoi.org' default_doi_resolver : 'oadoi.org'

File diff suppressed because one or more lines are too long

View File

@ -125,6 +125,14 @@ $(document).ready(function() {
} }
}); });
function nextResult(current, direction) {
var next = current[direction]();
while (!next.is('.result') && next.length !== 0) {
next = next[direction]();
}
return next
}
function highlightResult(which) { function highlightResult(which) {
return function() { return function() {
var current = $('.result[data-vim-selected]'); var current = $('.result[data-vim-selected]');
@ -157,13 +165,13 @@ $(document).ready(function() {
} }
break; break;
case 'down': case 'down':
next = current.next('.result'); next = nextResult(current, 'next');
if (next.length === 0) { if (next.length === 0) {
next = $('.result:first'); next = $('.result:first');
} }
break; break;
case 'up': case 'up':
next = current.prev('.result'); next = nextResult(current, 'prev');
if (next.length === 0) { if (next.length === 0) {
next = $('.result:last'); next = $('.result:last');
} }

View File

@ -1 +1 @@
.q{padding:.5em 1em .5em 3em}#search_submit{left:0;right:auto}.result .favicon{float:right;margin-left:.5em;margin-right:0}#sidebar{right:auto;left:0}#results{padding:0 32px 0 272px}.search.center{padding-right:0;padding-left:17em}.right{right:auto;left:0}#pagination form+form{float:left;margin-top:-2em}.engine-table{text-align:right} #search_submit,#sidebar,.right{right:auto;left:0}.q{padding:.5em 1em .5em 3em}.result .favicon{float:right;margin-left:.5em;margin-right:0}#results{padding:0 32px 0 272px}.search.center{padding-right:0;padding-left:17em}#pagination form+form{float:left;margin-top:-2em}.engine-table{text-align:right}

File diff suppressed because one or more lines are too long

View File

@ -325,6 +325,10 @@ a {
font-size: 0.9em; font-size: 0.9em;
} }
.result .engines {
text-align: right;
}
.result .content { .result .content {
margin: 0; margin: 0;
color: #666; color: #666;

File diff suppressed because one or more lines are too long

View File

@ -376,6 +376,10 @@ table {
width: 100%; width: 100%;
} }
.result-table {
margin-bottom: 10px;
}
td { td {
padding: 0 4px; padding: 0 4px;
} }

View File

@ -0,0 +1,732 @@
.searx-navbar {
background: #29314d;
height: 2.3rem;
font-size: 1.3rem;
line-height: 1.3rem;
padding: 0.5rem;
font-weight: bold;
margin-bottom: 0.8rem;
}
.searx-navbar a,
.searx-navbar a:hover {
margin-right: 2.0rem;
color: white;
text-decoration: none;
}
.searx-navbar .instance a {
color: #01d7d4;
margin-left: 2.0rem;
}
#main-logo {
margin-top: 20vh;
margin-bottom: 25px;
}
#main-logo > img {
max-width: 350px;
width: 80%;
}
* {
border-radius: 0 !important;
}
html {
position: relative;
min-height: 100%;
color: #29314d;
}
body {
/* Margin bottom by footer height */
font-family: 'Roboto', Helvetica, Arial, sans-serif;
margin-bottom: 80px;
background-color: white;
}
body a {
color: #0088cc;
}
.footer {
position: absolute;
bottom: 0;
width: 100%;
/* Set the fixed height of the footer here */
height: 60px;
text-align: center;
color: #999;
}
input[type=checkbox]:checked + .label_hide_if_checked,
input[type=checkbox]:checked + .label_hide_if_not_checked + .label_hide_if_checked {
display: none;
}
input[type=checkbox]:not(:checked) + .label_hide_if_not_checked,
input[type=checkbox]:not(:checked) + .label_hide_if_checked + .label_hide_if_not_checked {
display: none;
}
.onoff-checkbox {
width: 15%;
}
.onoffswitch {
position: relative;
width: 110px;
-webkit-user-select: none;
-moz-user-select: none;
-ms-user-select: none;
}
.onoffswitch-checkbox {
display: none;
}
.onoffswitch-label {
display: block;
overflow: hidden;
cursor: pointer;
border: 2px solid #FFFFFF !important;
border-radius: 50px !important;
}
.onoffswitch-inner {
display: block;
transition: margin 0.3s ease-in 0s;
}
.onoffswitch-inner:before,
.onoffswitch-inner:after {
display: block;
float: left;
width: 50%;
height: 30px;
padding: 0;
line-height: 40px;
font-size: 20px;
box-sizing: border-box;
content: "";
background-color: #EEEEEE;
}
.onoffswitch-switch {
display: block;
width: 37px;
background-color: #01d7d4;
position: absolute;
top: 0;
bottom: 0;
right: 0px;
border: 2px solid #FFFFFF !important;
border-radius: 50px !important;
transition: all 0.3s ease-in 0s;
}
.onoffswitch-checkbox:checked + .onoffswitch-label .onoffswitch-inner {
margin-right: 0;
}
.onoffswitch-checkbox:checked + .onoffswitch-label .onoffswitch-switch {
right: 71px;
background-color: #A1A1A1;
}
.result_header {
margin-top: 0px;
margin-bottom: 2px;
font-size: 16px;
}
.result_header .favicon {
margin-bottom: -3px;
}
.result_header a {
color: #29314d;
text-decoration: none;
}
.result_header a:hover {
color: #0088cc;
}
.result_header a:visited {
color: #684898;
}
.result_header a .highlight {
background-color: #f6f9fa;
}
.result-content,
.result-format,
.result-source {
margin-top: 2px;
margin-bottom: 0;
word-wrap: break-word;
color: #666666;
font-size: 13px;
}
.result-content .highlight,
.result-format .highlight,
.result-source .highlight {
font-weight: bold;
}
.result-source {
font-size: 10px;
float: left;
}
.result-format {
font-size: 10px;
float: right;
}
.external-link {
color: #069025;
font-size: 12px;
margin-bottom: 15px;
}
.external-link a {
margin-right: 3px;
}
.result-default,
.result-code,
.result-torrent,
.result-videos,
.result-map {
clear: both;
padding: 2px 4px;
}
.result-default:hover,
.result-code:hover,
.result-torrent:hover,
.result-videos:hover,
.result-map:hover {
background-color: #f6f9fa;
}
.result-images {
float: left !important;
width: 24%;
margin: .5%;
}
.result-images a {
display: block;
width: 100%;
background-size: cover;
}
.img-thumbnail {
margin: 5px;
max-height: 128px;
min-height: 128px;
}
.result-videos {
clear: both;
}
.result-videos hr {
margin: 5px 0 15px 0;
}
.result-videos .collapse {
width: 100%;
}
.result-videos .in {
margin-bottom: 8px;
}
.result-torrent {
clear: both;
}
.result-torrent b {
margin-right: 5px;
margin-left: 5px;
}
.result-torrent .seeders {
color: #2ecc71;
}
.result-torrent .leechers {
color: #f35e77;
}
.result-map {
clear: both;
}
.result-code {
clear: both;
}
.result-code .code-fork,
.result-code .code-fork a {
color: #666666;
}
.suggestion_item {
margin: 2px 5px;
max-width: 100%;
}
.suggestion_item .btn {
max-width: 100%;
white-space: normal;
word-wrap: break-word;
text-align: left;
}
.result_download {
margin-right: 5px;
}
#pagination {
margin-top: 30px;
padding-bottom: 60px;
}
.label-default {
color: #a4a4a4;
background: transparent;
}
.result .text-muted small {
word-wrap: break-word;
}
.modal-wrapper {
box-shadow: 0 5px 15px rgba(0, 0, 0, 0.5);
}
.modal-wrapper {
background-clip: padding-box;
background-color: #fff;
border: 1px solid rgba(0, 0, 0, 0.2);
border-radius: 6px;
box-shadow: 0 3px 9px rgba(0, 0, 0, 0.5);
outline: 0 none;
position: relative;
}
.infobox .panel-heading {
background-color: #f6f9fa;
}
.infobox .panel-heading .panel-title {
font-weight: 700;
}
.infobox p {
font-family: "DejaVu Serif", Georgia, Cambria, "Times New Roman", Times, serif !important;
font-style: italic;
}
.infobox .btn {
background-color: #2ecc71;
border: none;
}
.infobox .btn a {
color: white;
margin: 5px;
}
.infobox .infobox_part {
margin-bottom: 20px;
word-wrap: break-word;
table-layout: fixed;
}
.infobox .infobox_part:last-child {
margin-bottom: 0;
}
.search_categories,
#categories {
text-transform: capitalize;
margin-bottom: 0.5rem;
display: flex;
flex-wrap: wrap;
flex-flow: row wrap;
align-content: stretch;
}
.search_categories label,
#categories label,
.search_categories .input-group-addon,
#categories .input-group-addon {
flex-grow: 1;
flex-basis: auto;
font-size: 1.2rem;
font-weight: normal;
background-color: white;
border: #dddddd 1px solid;
border-right: none;
color: #666666;
padding-bottom: 0.4rem;
padding-top: 0.4rem;
text-align: center;
min-width: 50px;
}
.search_categories label:last-child,
#categories label:last-child,
.search_categories .input-group-addon:last-child,
#categories .input-group-addon:last-child {
border-right: #dddddd 1px solid;
}
.search_categories input[type="checkbox"]:checked + label,
#categories input[type="checkbox"]:checked + label {
color: #29314d;
font-weight: bold;
border-bottom: #01d7d4 5px solid;
}
#main-logo {
margin-top: 10vh;
margin-bottom: 25px;
}
#main-logo > img {
max-width: 350px;
width: 80%;
}
#q {
box-shadow: none;
border-right: none;
border-color: #a4a4a4;
}
#search_form .input-group-btn .btn {
border-color: #a4a4a4;
}
#search_form .input-group-btn .btn:hover {
background-color: #2ecc71;
color: white;
}
.custom-select {
appearance: none;
-webkit-appearance: none;
-moz-appearance: none;
font-size: 1.2rem;
font-weight: normal;
background-color: white;
border: #dddddd 1px solid;
color: #666666;
background: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAA8AAAAPCAQAAACR313BAAAABGdBTUEAALGPC/xhBQAAACBjSFJN
AAB6JgAAgIQAAPoAAACA6AAAdTAAAOpgAAA6mAAAF3CculE8AAAAAmJLR0QA/4ePzL8AAAAJcEhZ
cwAABFkAAARZAVnbJUkAAAAHdElNRQfgBxgLDwB20OFsAAAAbElEQVQY073OsQ3CMAAEwJMYwJGn
sAehpoXJItltBkmcdZBYgIIiQoLglnz3ui+eP+bk5uneteTMZJa6OJuIqvYzSJoqwqBq8gdmTTW8
6/dghxAUq4xsVYT9laBYXCw93Aajh7GPEF23t4fkBYevGFTANkPRAAAAJXRFWHRkYXRlOmNyZWF0
ZQAyMDE2LTA3LTI0VDExOjU1OjU4KzAyOjAwRFqFOQAAACV0RVh0ZGF0ZTptb2RpZnkAMjAxNi0w
Ny0yNFQxMToxNTowMCswMjowMP7RDgQAAAAZdEVYdFNvZnR3YXJlAHd3dy5pbmtzY2FwZS5vcmeb
7jwaAAAAAElFTkSuQmCC) 96% no-repeat;
}
.search-margin {
margin-bottom: 0.6em;
}
#advanced-search-container {
display: none;
text-align: left;
margin-bottom: 1rem;
clear: both;
}
#advanced-search-container label,
#advanced-search-container .input-group-addon {
font-size: 1.2rem;
font-weight: normal;
background-color: white;
border: #dddddd 1px solid;
border-right: none;
color: #666666;
padding-bottom: 0.4rem;
padding-right: 0.7rem;
padding-left: 0.7rem;
}
#advanced-search-container label:last-child,
#advanced-search-container .input-group-addon:last-child {
border-right: #dddddd 1px solid;
}
#advanced-search-container input[type="radio"] {
display: none;
}
#advanced-search-container input[type="radio"]:checked + label {
color: #29314d;
font-weight: bold;
border-bottom: #01d7d4 5px solid;
}
#check-advanced {
display: none;
}
#check-advanced:checked ~ #advanced-search-container {
display: block;
}
.advanced {
padding: 0;
margin-top: 0.3rem;
text-align: right;
}
.advanced label,
.advanced select {
cursor: pointer;
}
.cursor-text {
cursor: text !important;
}
.cursor-pointer {
cursor: pointer !important;
}
pre,
code {
font-family: 'Ubuntu Mono', 'Courier New', 'Lucida Console', monospace !important;
}
.lineno {
margin-right: 5px;
}
.highlight .hll {
background-color: #ffffcc;
}
.highlight {
background: #f8f8f8;
}
.highlight .c {
color: #556366;
font-style: italic;
}
/* Comment */
.highlight .err {
border: 1px solid #ffa92f;
}
/* Error */
.highlight .k {
color: #BE74D5;
font-weight: bold;
}
/* Keyword */
.highlight .o {
color: #d19a66;
}
/* Operator */
.highlight .cm {
color: #556366;
font-style: italic;
}
/* Comment.Multiline */
.highlight .cp {
color: #bc7a00;
}
/* Comment.Preproc */
.highlight .c1 {
color: #556366;
font-style: italic;
}
/* Comment.Single */
.highlight .cs {
color: #556366;
font-style: italic;
}
/* Comment.Special */
.highlight .gd {
color: #a00000;
}
/* Generic.Deleted */
.highlight .ge {
font-style: italic;
}
/* Generic.Emph */
.highlight .gr {
color: #ff0000;
}
/* Generic.Error */
.highlight .gh {
color: #000080;
font-weight: bold;
}
/* Generic.Heading */
.highlight .gi {
color: #00a000;
}
/* Generic.Inserted */
.highlight .go {
color: #888888;
}
/* Generic.Output */
.highlight .gp {
color: #000080;
font-weight: bold;
}
/* Generic.Prompt */
.highlight .gs {
font-weight: bold;
}
/* Generic.Strong */
.highlight .gu {
color: #800080;
font-weight: bold;
}
/* Generic.Subheading */
.highlight .gt {
color: #0044dd;
}
/* Generic.Traceback */
.highlight .kc {
color: #BE74D5;
font-weight: bold;
}
/* Keyword.Constant */
.highlight .kd {
color: #BE74D5;
font-weight: bold;
}
/* Keyword.Declaration */
.highlight .kn {
color: #BE74D5;
font-weight: bold;
}
/* Keyword.Namespace */
.highlight .kp {
color: #be74d5;
}
/* Keyword.Pseudo */
.highlight .kr {
color: #BE74D5;
font-weight: bold;
}
/* Keyword.Reserved */
.highlight .kt {
color: #d46c72;
}
/* Keyword.Type */
.highlight .m {
color: #d19a66;
}
/* Literal.Number */
.highlight .s {
color: #86c372;
}
/* Literal.String */
.highlight .na {
color: #7d9029;
}
/* Name.Attribute */
.highlight .nb {
color: #be74d5;
}
/* Name.Builtin */
.highlight .nc {
color: #61AFEF;
font-weight: bold;
}
/* Name.Class */
.highlight .no {
color: #d19a66;
}
/* Name.Constant */
.highlight .nd {
color: #aa22ff;
}
/* Name.Decorator */
.highlight .ni {
color: #999999;
font-weight: bold;
}
/* Name.Entity */
.highlight .ne {
color: #D2413A;
font-weight: bold;
}
/* Name.Exception */
.highlight .nf {
color: #61afef;
}
/* Name.Function */
.highlight .nl {
color: #a0a000;
}
/* Name.Label */
.highlight .nn {
color: #61AFEF;
font-weight: bold;
}
/* Name.Namespace */
.highlight .nt {
color: #BE74D5;
font-weight: bold;
}
/* Name.Tag */
.highlight .nv {
color: #dfc06f;
}
/* Name.Variable */
.highlight .ow {
color: #AA22FF;
font-weight: bold;
}
/* Operator.Word */
.highlight .w {
color: #d7dae0;
}
/* Text.Whitespace */
.highlight .mf {
color: #d19a66;
}
/* Literal.Number.Float */
.highlight .mh {
color: #d19a66;
}
/* Literal.Number.Hex */
.highlight .mi {
color: #d19a66;
}
/* Literal.Number.Integer */
.highlight .mo {
color: #d19a66;
}
/* Literal.Number.Oct */
.highlight .sb {
color: #86c372;
}
/* Literal.String.Backtick */
.highlight .sc {
color: #86c372;
}
/* Literal.String.Char */
.highlight .sd {
color: #86C372;
font-style: italic;
}
/* Literal.String.Doc */
.highlight .s2 {
color: #86c372;
}
/* Literal.String.Double */
.highlight .se {
color: #BB6622;
font-weight: bold;
}
/* Literal.String.Escape */
.highlight .sh {
color: #86c372;
}
/* Literal.String.Heredoc */
.highlight .si {
color: #BB6688;
font-weight: bold;
}
/* Literal.String.Interpol */
.highlight .sx {
color: #be74d5;
}
/* Literal.String.Other */
.highlight .sr {
color: #bb6688;
}
/* Literal.String.Regex */
.highlight .s1 {
color: #86c372;
}
/* Literal.String.Single */
.highlight .ss {
color: #dfc06f;
}
/* Literal.String.Symbol */
.highlight .bp {
color: #be74d5;
}
/* Name.Builtin.Pseudo */
.highlight .vc {
color: #dfc06f;
}
/* Name.Variable.Class */
.highlight .vg {
color: #dfc06f;
}
/* Name.Variable.Global */
.highlight .vi {
color: #dfc06f;
}
/* Name.Variable.Instance */
.highlight .il {
color: #d19a66;
}
/* Literal.Number.Integer.Long */
.highlight .lineno {
-webkit-touch-callout: none;
-webkit-user-select: none;
-khtml-user-select: none;
-moz-user-select: none;
-ms-user-select: none;
user-select: none;
cursor: default;
color: #556366;
}
.highlight .lineno::selection {
background: transparent;
/* WebKit/Blink Browsers */
}
.highlight .lineno::-moz-selection {
background: transparent;
/* Gecko Browsers */
}
.highlight pre {
background-color: #282C34;
color: #D7DAE0;
border: none;
margin-bottom: 25px;
font-size: 15px;
padding: 20px 10px;
}
.highlight {
font-weight: 700;
}
.table > tbody > tr > td,
.table > tbody > tr > th {
vertical-align: middle !important;
}

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,931 @@
* {
border-radius: 0 !important;
}
html {
position: relative;
min-height: 100%;
color: #29314d;
}
body {
/* Margin bottom by footer height */
font-family: 'Roboto', Helvetica, Arial, sans-serif;
margin-bottom: 80px;
background-color: white;
}
body a {
color: #0088cc;
}
.footer {
position: absolute;
bottom: 0;
width: 100%;
/* Set the fixed height of the footer here */
height: 60px;
text-align: center;
color: #999;
}
input[type=checkbox]:checked + .label_hide_if_checked,
input[type=checkbox]:checked + .label_hide_if_not_checked + .label_hide_if_checked {
display: none;
}
input[type=checkbox]:not(:checked) + .label_hide_if_not_checked,
input[type=checkbox]:not(:checked) + .label_hide_if_checked + .label_hide_if_not_checked {
display: none;
}
.onoff-checkbox {
width: 15%;
}
.onoffswitch {
position: relative;
width: 110px;
-webkit-user-select: none;
-moz-user-select: none;
-ms-user-select: none;
}
.onoffswitch-checkbox {
display: none;
}
.onoffswitch-label {
display: block;
overflow: hidden;
cursor: pointer;
border: 2px solid #FFFFFF !important;
border-radius: 50px !important;
}
.onoffswitch-inner {
display: block;
transition: margin 0.3s ease-in 0s;
}
.onoffswitch-inner:before,
.onoffswitch-inner:after {
display: block;
float: left;
width: 50%;
height: 30px;
padding: 0;
line-height: 40px;
font-size: 20px;
box-sizing: border-box;
content: "";
background-color: #EEEEEE;
}
.onoffswitch-switch {
display: block;
width: 37px;
background-color: #01d7d4;
position: absolute;
top: 0;
bottom: 0;
right: 0px;
border: 2px solid #FFFFFF !important;
border-radius: 50px !important;
transition: all 0.3s ease-in 0s;
}
.onoffswitch-checkbox:checked + .onoffswitch-label .onoffswitch-inner {
margin-right: 0;
}
.onoffswitch-checkbox:checked + .onoffswitch-label .onoffswitch-switch {
right: 71px;
background-color: #A1A1A1;
}
.result_header {
margin-top: 0px;
margin-bottom: 2px;
font-size: 16px;
}
.result_header .favicon {
margin-bottom: -3px;
}
.result_header a {
color: #29314d;
text-decoration: none;
}
.result_header a:hover {
color: #0088cc;
}
.result_header a:visited {
color: #684898;
}
.result_header a .highlight {
background-color: #f6f9fa;
}
.result-content,
.result-format,
.result-source {
margin-top: 2px;
margin-bottom: 0;
word-wrap: break-word;
color: #666666;
font-size: 13px;
}
.result-content .highlight,
.result-format .highlight,
.result-source .highlight {
font-weight: bold;
}
.result-source {
font-size: 10px;
float: left;
}
.result-format {
font-size: 10px;
float: right;
}
.external-link {
color: #069025;
font-size: 12px;
margin-bottom: 15px;
}
.external-link a {
margin-right: 3px;
}
.result-default,
.result-code,
.result-torrent,
.result-videos,
.result-map {
clear: both;
padding: 2px 4px;
}
.result-default:hover,
.result-code:hover,
.result-torrent:hover,
.result-videos:hover,
.result-map:hover {
background-color: #f6f9fa;
}
.result-images {
float: left !important;
width: 24%;
margin: .5%;
}
.result-images a {
display: block;
width: 100%;
background-size: cover;
}
.img-thumbnail {
margin: 5px;
max-height: 128px;
min-height: 128px;
}
.result-videos {
clear: both;
}
.result-videos hr {
margin: 5px 0 15px 0;
}
.result-videos .collapse {
width: 100%;
}
.result-videos .in {
margin-bottom: 8px;
}
.result-torrent {
clear: both;
}
.result-torrent b {
margin-right: 5px;
margin-left: 5px;
}
.result-torrent .seeders {
color: #2ecc71;
}
.result-torrent .leechers {
color: #f35e77;
}
.result-map {
clear: both;
}
.result-code {
clear: both;
}
.result-code .code-fork,
.result-code .code-fork a {
color: #666666;
}
.suggestion_item {
margin: 2px 5px;
max-width: 100%;
}
.suggestion_item .btn {
max-width: 100%;
white-space: normal;
word-wrap: break-word;
text-align: left;
}
.result_download {
margin-right: 5px;
}
#pagination {
margin-top: 30px;
padding-bottom: 60px;
}
.label-default {
color: #a4a4a4;
background: transparent;
}
.result .text-muted small {
word-wrap: break-word;
}
.modal-wrapper {
box-shadow: 0 5px 15px rgba(0, 0, 0, 0.5);
}
.modal-wrapper {
background-clip: padding-box;
background-color: #fff;
border: 1px solid rgba(0, 0, 0, 0.2);
border-radius: 6px;
box-shadow: 0 3px 9px rgba(0, 0, 0, 0.5);
outline: 0 none;
position: relative;
}
.infobox .panel-heading {
background-color: #f6f9fa;
}
.infobox .panel-heading .panel-title {
font-weight: 700;
}
.infobox p {
font-family: "DejaVu Serif", Georgia, Cambria, "Times New Roman", Times, serif !important;
font-style: italic;
}
.infobox .btn {
background-color: #2ecc71;
border: none;
}
.infobox .btn a {
color: white;
margin: 5px;
}
.infobox .infobox_part {
margin-bottom: 20px;
word-wrap: break-word;
table-layout: fixed;
}
.infobox .infobox_part:last-child {
margin-bottom: 0;
}
.search_categories,
#categories {
text-transform: capitalize;
margin-bottom: 0.5rem;
display: flex;
flex-wrap: wrap;
flex-flow: row wrap;
align-content: stretch;
}
.search_categories label,
#categories label,
.search_categories .input-group-addon,
#categories .input-group-addon {
flex-grow: 1;
flex-basis: auto;
font-size: 1.2rem;
font-weight: normal;
background-color: white;
border: #dddddd 1px solid;
border-right: none;
color: #666666;
padding-bottom: 0.4rem;
padding-top: 0.4rem;
text-align: center;
min-width: 50px;
}
.search_categories label:last-child,
#categories label:last-child,
.search_categories .input-group-addon:last-child,
#categories .input-group-addon:last-child {
border-right: #dddddd 1px solid;
}
.search_categories input[type="checkbox"]:checked + label,
#categories input[type="checkbox"]:checked + label {
color: #29314d;
font-weight: bold;
border-bottom: #01d7d4 5px solid;
}
#main-logo {
margin-top: 10vh;
margin-bottom: 25px;
}
#main-logo > img {
max-width: 350px;
width: 80%;
}
#q {
box-shadow: none;
border-right: none;
border-color: #a4a4a4;
}
#search_form .input-group-btn .btn {
border-color: #a4a4a4;
}
#search_form .input-group-btn .btn:hover {
background-color: #2ecc71;
color: white;
}
.custom-select {
appearance: none;
-webkit-appearance: none;
-moz-appearance: none;
font-size: 1.2rem;
font-weight: normal;
background-color: white;
border: #dddddd 1px solid;
color: #666666;
background: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAA8AAAAPCAQAAACR313BAAAABGdBTUEAALGPC/xhBQAAACBjSFJN
AAB6JgAAgIQAAPoAAACA6AAAdTAAAOpgAAA6mAAAF3CculE8AAAAAmJLR0QA/4ePzL8AAAAJcEhZ
cwAABFkAAARZAVnbJUkAAAAHdElNRQfgBxgLDwB20OFsAAAAbElEQVQY073OsQ3CMAAEwJMYwJGn
sAehpoXJItltBkmcdZBYgIIiQoLglnz3ui+eP+bk5uneteTMZJa6OJuIqvYzSJoqwqBq8gdmTTW8
6/dghxAUq4xsVYT9laBYXCw93Aajh7GPEF23t4fkBYevGFTANkPRAAAAJXRFWHRkYXRlOmNyZWF0
ZQAyMDE2LTA3LTI0VDExOjU1OjU4KzAyOjAwRFqFOQAAACV0RVh0ZGF0ZTptb2RpZnkAMjAxNi0w
Ny0yNFQxMToxNTowMCswMjowMP7RDgQAAAAZdEVYdFNvZnR3YXJlAHd3dy5pbmtzY2FwZS5vcmeb
7jwaAAAAAElFTkSuQmCC) 96% no-repeat;
}
.search-margin {
margin-bottom: 0.6em;
}
#advanced-search-container {
display: none;
text-align: left;
margin-bottom: 1rem;
clear: both;
}
#advanced-search-container label,
#advanced-search-container .input-group-addon {
font-size: 1.2rem;
font-weight: normal;
background-color: white;
border: #dddddd 1px solid;
border-right: none;
color: #666666;
padding-bottom: 0.4rem;
padding-right: 0.7rem;
padding-left: 0.7rem;
}
#advanced-search-container label:last-child,
#advanced-search-container .input-group-addon:last-child {
border-right: #dddddd 1px solid;
}
#advanced-search-container input[type="radio"] {
display: none;
}
#advanced-search-container input[type="radio"]:checked + label {
color: #29314d;
font-weight: bold;
border-bottom: #01d7d4 5px solid;
}
#check-advanced {
display: none;
}
#check-advanced:checked ~ #advanced-search-container {
display: block;
}
.advanced {
padding: 0;
margin-top: 0.3rem;
text-align: right;
}
.advanced label,
.advanced select {
cursor: pointer;
}
.cursor-text {
cursor: text !important;
}
.cursor-pointer {
cursor: pointer !important;
}
pre,
code {
font-family: 'Ubuntu Mono', 'Courier New', 'Lucida Console', monospace !important;
}
.lineno {
margin-right: 5px;
}
.highlight .hll {
background-color: #ffffcc;
}
.highlight {
background: #f8f8f8;
}
.highlight .c {
color: #556366;
font-style: italic;
}
/* Comment */
.highlight .err {
border: 1px solid #ffa92f;
}
/* Error */
.highlight .k {
color: #BE74D5;
font-weight: bold;
}
/* Keyword */
.highlight .o {
color: #d19a66;
}
/* Operator */
.highlight .cm {
color: #556366;
font-style: italic;
}
/* Comment.Multiline */
.highlight .cp {
color: #bc7a00;
}
/* Comment.Preproc */
.highlight .c1 {
color: #556366;
font-style: italic;
}
/* Comment.Single */
.highlight .cs {
color: #556366;
font-style: italic;
}
/* Comment.Special */
.highlight .gd {
color: #a00000;
}
/* Generic.Deleted */
.highlight .ge {
font-style: italic;
}
/* Generic.Emph */
.highlight .gr {
color: #ff0000;
}
/* Generic.Error */
.highlight .gh {
color: #000080;
font-weight: bold;
}
/* Generic.Heading */
.highlight .gi {
color: #00a000;
}
/* Generic.Inserted */
.highlight .go {
color: #888888;
}
/* Generic.Output */
.highlight .gp {
color: #000080;
font-weight: bold;
}
/* Generic.Prompt */
.highlight .gs {
font-weight: bold;
}
/* Generic.Strong */
.highlight .gu {
color: #800080;
font-weight: bold;
}
/* Generic.Subheading */
.highlight .gt {
color: #0044dd;
}
/* Generic.Traceback */
.highlight .kc {
color: #BE74D5;
font-weight: bold;
}
/* Keyword.Constant */
.highlight .kd {
color: #BE74D5;
font-weight: bold;
}
/* Keyword.Declaration */
.highlight .kn {
color: #BE74D5;
font-weight: bold;
}
/* Keyword.Namespace */
.highlight .kp {
color: #be74d5;
}
/* Keyword.Pseudo */
.highlight .kr {
color: #BE74D5;
font-weight: bold;
}
/* Keyword.Reserved */
.highlight .kt {
color: #d46c72;
}
/* Keyword.Type */
.highlight .m {
color: #d19a66;
}
/* Literal.Number */
.highlight .s {
color: #86c372;
}
/* Literal.String */
.highlight .na {
color: #7d9029;
}
/* Name.Attribute */
.highlight .nb {
color: #be74d5;
}
/* Name.Builtin */
.highlight .nc {
color: #61AFEF;
font-weight: bold;
}
/* Name.Class */
.highlight .no {
color: #d19a66;
}
/* Name.Constant */
.highlight .nd {
color: #aa22ff;
}
/* Name.Decorator */
.highlight .ni {
color: #999999;
font-weight: bold;
}
/* Name.Entity */
.highlight .ne {
color: #D2413A;
font-weight: bold;
}
/* Name.Exception */
.highlight .nf {
color: #61afef;
}
/* Name.Function */
.highlight .nl {
color: #a0a000;
}
/* Name.Label */
.highlight .nn {
color: #61AFEF;
font-weight: bold;
}
/* Name.Namespace */
.highlight .nt {
color: #BE74D5;
font-weight: bold;
}
/* Name.Tag */
.highlight .nv {
color: #dfc06f;
}
/* Name.Variable */
.highlight .ow {
color: #AA22FF;
font-weight: bold;
}
/* Operator.Word */
.highlight .w {
color: #d7dae0;
}
/* Text.Whitespace */
.highlight .mf {
color: #d19a66;
}
/* Literal.Number.Float */
.highlight .mh {
color: #d19a66;
}
/* Literal.Number.Hex */
.highlight .mi {
color: #d19a66;
}
/* Literal.Number.Integer */
.highlight .mo {
color: #d19a66;
}
/* Literal.Number.Oct */
.highlight .sb {
color: #86c372;
}
/* Literal.String.Backtick */
.highlight .sc {
color: #86c372;
}
/* Literal.String.Char */
.highlight .sd {
color: #86C372;
font-style: italic;
}
/* Literal.String.Doc */
.highlight .s2 {
color: #86c372;
}
/* Literal.String.Double */
.highlight .se {
color: #BB6622;
font-weight: bold;
}
/* Literal.String.Escape */
.highlight .sh {
color: #86c372;
}
/* Literal.String.Heredoc */
.highlight .si {
color: #BB6688;
font-weight: bold;
}
/* Literal.String.Interpol */
.highlight .sx {
color: #be74d5;
}
/* Literal.String.Other */
.highlight .sr {
color: #bb6688;
}
/* Literal.String.Regex */
.highlight .s1 {
color: #86c372;
}
/* Literal.String.Single */
.highlight .ss {
color: #dfc06f;
}
/* Literal.String.Symbol */
.highlight .bp {
color: #be74d5;
}
/* Name.Builtin.Pseudo */
.highlight .vc {
color: #dfc06f;
}
/* Name.Variable.Class */
.highlight .vg {
color: #dfc06f;
}
/* Name.Variable.Global */
.highlight .vi {
color: #dfc06f;
}
/* Name.Variable.Instance */
.highlight .il {
color: #d19a66;
}
/* Literal.Number.Integer.Long */
.highlight .lineno {
-webkit-touch-callout: none;
-webkit-user-select: none;
-khtml-user-select: none;
-moz-user-select: none;
-ms-user-select: none;
user-select: none;
cursor: default;
color: #556366;
}
.highlight .lineno::selection {
background: transparent;
/* WebKit/Blink Browsers */
}
.highlight .lineno::-moz-selection {
background: transparent;
/* Gecko Browsers */
}
.highlight pre {
background-color: #282C34;
color: #D7DAE0;
border: none;
margin-bottom: 25px;
font-size: 15px;
padding: 20px 10px;
}
.highlight {
font-weight: 700;
}
.table > tbody > tr > td,
.table > tbody > tr > th {
vertical-align: middle !important;
}
/*Global*/
body {
background: #1d1f21 none !important;
color: #D5D8D7 !important;
}
a {
color: #41a2ce !important;
text-decoration: none !important;
}
a:hover {
color: #5F89AC !important;
}
input,
button,
textarea,
select {
border: 1px solid #282a2e !important;
background-color: #444 !important;
color: #BBB !important;
}
input:focus,
button:focus,
textarea:focus,
select:focus {
border: 1px solid #C5C8C6 !important;
box-shadow: initial !important;
}
div#advanced-search-container div#categories label {
background: none;
border: 1px solid #282a2e;
}
ul.nav li a {
border: 0 !important;
border-bottom: 1px solid #4d3f43 !important;
}
#categories *,
.modal-wrapper * {
background: #1d1f21 none !important;
color: #D5D8D7 !important;
}
#categories * {
border: 1px solid #3d3f43 !important;
}
#categories *:checked + label {
border-bottom: 4px solid #3d9f94 !important;
}
.result-content,
.result-source,
.result-format {
color: #B5B8B7 !important;
}
.external-link {
color: #35B887 !important;
}
.table-striped tr td,
.table-striped tr th {
border-color: #4d3f43 !important;
}
.highlight {
background: #333333 !important;
}
/*nav*/
.navbar {
background: #1d1f21 none;
border: none;
}
.navbar .active,
.menu {
background: none !important;
}
.label-default {
background: none;
color: #BBB;
}
.navbar-default .navbar-nav > .active > a,
.navbar-default .navbar-nav > .active > a:hover,
.navbar-default .navbar-nav > .active > a:focus,
.nav-tabs.nav-justified > .active > a {
background-color: #282a2e !important;
}
/*Search Page*/
.result-default:hover,
.result-code:hover,
.result-torrent:hover,
.result-videos:hover,
.result-map:hover {
background-color: #222426;
}
/*buttons*/
.btn {
color: #BBB;
background-color: #444 ;
border: 1px solid #282a2e;
}
.btn:hover {
color: #444 !important;
background-color: #BBB !important;
}
.btn-primary.active {
color: #C5C8C6;
background-color: #5F89AC;
border-color: #5F89AC;
}
/*Right Pannels*/
.panel {
border: 1px solid #111;
background: none;
}
.panel-heading {
color: #C5C8C6 !important;
background: #282a2e !important;
border-bottom: none;
}
.panel-body {
color: #C5C8C6 !important;
background: #1d1f21 !important;
border-color: #111 !important;
}
p.btn.btn-default {
background: none;
}
.table-striped > tbody > tr:nth-child(odd) > td,
.table-striped > tbody > tr:nth-child(odd) > th,
.table-striped > thead > tr:nth-child(odd) > th {
background: #2d2f32 none !important;
color: #D5D8D7 !important;
}
.label-success {
background: #1d6f42 none !important;
}
.label-danger {
background: #ad1f12 none !important;
}
.searx-navbar {
background: #333334;
height: 2.3rem;
font-size: 1.3rem;
line-height: 1.3rem;
padding: 0.5rem;
font-weight: bold;
margin-bottom: 0.8rem;
}
.searx-navbar a,
.searx-navbar a:hover {
margin-right: 2.0rem;
color: white;
text-decoration: none;
}
.searx-navbar .instance a {
color: #01d7d4;
margin-left: 2.0rem;
}
#main-logo {
margin-top: 20vh;
margin-bottom: 25px;
}
#main-logo > img {
max-width: 350px;
width: 80%;
}
.onoffswitch-inner:before,
.onoffswitch-inner:after {
background: #1d1f21 none !important;
}
.onoffswitch-switch,
.onoffswitch-label {
border: 2px solid #3d3f43 !important;
}
.nav > li > a:hover,
.nav > li > a:focus {
background-color: #3d3f43 !important;
}
/*Images search*/
.img-thumbnail,
.thumbnail {
padding: 0px;
line-height: 1.42857143;
background: none;
border: none;
}
.modal-content {
background: #1d1f21 none !important;
}
/*Preferences*/
.table > thead > tr > td.danger,
.table > tbody > tr > td.danger,
.table > tfoot > tr > td.danger,
.table > thead > tr > th.danger,
.table > tbody > tr > th.danger,
.table > tfoot > tr > th.danger,
.table > thead > tr.danger > td,
.table > tbody > tr.danger > td,
.table > tfoot > tr.danger > td,
.table > thead > tr.danger > th,
.table > tbody > tr.danger > th,
.table > tfoot > tr.danger > th {
background: rgba(240, 0, 0, 0.56) !important;
color: #C5C8C6 !important;
}
.table-hover > tbody > tr > td.danger:hover,
.table-hover > tbody > tr > th.danger:hover,
.table-hover > tbody > tr.danger:hover > td,
.table-hover > tbody > tr:hover > .danger,
.table-hover > tbody > tr.danger:hover > th {
background: rgba(237, 59, 59, 0.61) !important;
color: #C5C8C6 !important;
}
.table-hover > tbody > tr:hover > td,
.table-hover > tbody > tr:hover > th {
background: #66696e !important;
}
.btn-success {
color: #C5C8C6;
background: #449d44;
}
.btn-danger {
color: #C5C8C6;
background: #d9534f;
}
.well {
background: #444;
border-color: #282a2e;
}
.highlight {
background-color: transparent !important;
}

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,562 @@
html {
position: relative;
min-height: 100%;
}
body {
/* Margin bottom by footer height */
margin-bottom: 80px;
}
.footer {
position: absolute;
bottom: 0;
width: 100%;
/* Set the fixed height of the footer here */
height: 60px;
}
input[type=checkbox]:checked + .label_hide_if_checked,
input[type=checkbox]:checked + .label_hide_if_not_checked + .label_hide_if_checked {
display: none;
}
input[type=checkbox]:not(:checked) + .label_hide_if_not_checked,
input[type=checkbox]:not(:checked) + .label_hide_if_checked + .label_hide_if_not_checked {
display: none;
}
.onoff-checkbox {
width: 15%;
}
.onoffswitch {
position: relative;
width: 110px;
-webkit-user-select: none;
-moz-user-select: none;
-ms-user-select: none;
}
.onoffswitch-checkbox {
display: none;
}
.onoffswitch-label {
display: block;
overflow: hidden;
cursor: pointer;
border: 2px solid #FFFFFF !important;
border-radius: 50px !important;
}
.onoffswitch-inner {
display: block;
transition: margin 0.3s ease-in 0s;
}
.onoffswitch-inner:before,
.onoffswitch-inner:after {
display: block;
float: left;
width: 50%;
height: 30px;
padding: 0;
line-height: 40px;
font-size: 20px;
box-sizing: border-box;
content: "";
background-color: #EEEEEE;
}
.onoffswitch-switch {
display: block;
width: 37px;
background-color: #00CC00;
position: absolute;
top: 0;
bottom: 0;
right: 0px;
border: 2px solid #FFFFFF !important;
border-radius: 50px !important;
transition: all 0.3s ease-in 0s;
}
.onoffswitch-checkbox:checked + .onoffswitch-label .onoffswitch-inner {
margin-right: 0;
}
.onoffswitch-checkbox:checked + .onoffswitch-label .onoffswitch-switch {
right: 71px;
background-color: #A1A1A1;
}
.result_header {
margin-bottom: 5px;
margin-top: 20px;
}
.result_header .favicon {
margin-bottom: -3px;
}
.result_header a {
vertical-align: bottom;
}
.result_header a .highlight {
font-weight: bold;
}
.result-content {
margin-top: 5px;
word-wrap: break-word;
}
.result-content .highlight {
font-weight: bold;
}
.result-default {
clear: both;
}
.result-images {
float: left !important;
height: 138px;
}
.img-thumbnail {
margin: 5px;
max-height: 128px;
}
.result-videos {
clear: both;
}
.result-torrents {
clear: both;
}
.result-map {
clear: both;
}
.result-code {
clear: both;
}
.suggestion_item {
margin: 2px 5px;
max-width: 100%;
}
.suggestion_item .btn {
max-width: 100%;
white-space: normal;
word-wrap: break-word;
text-align: left;
}
.result_download {
margin-right: 5px;
}
#pagination {
margin-top: 30px;
padding-bottom: 50px;
}
.label-default {
color: #AAA;
background: #FFF;
}
.result .text-muted small {
word-wrap: break-word;
}
.modal-wrapper {
box-shadow: 0 5px 15px rgba(0, 0, 0, 0.5);
}
.modal-wrapper {
background-clip: padding-box;
background-color: #fff;
border: 1px solid rgba(0, 0, 0, 0.2);
border-radius: 6px;
box-shadow: 0 3px 9px rgba(0, 0, 0, 0.5);
outline: 0 none;
position: relative;
}
.infobox .infobox_part {
margin-bottom: 20px;
word-wrap: break-word;
table-layout: fixed;
}
.infobox .infobox_part:last-child {
margin-bottom: 0;
}
.search_categories,
#categories {
text-transform: capitalize;
margin-bottom: 1.5rem;
margin-top: 1.5rem;
display: flex;
flex-wrap: wrap;
align-content: stretch;
}
.search_categories label,
#categories label,
.search_categories .input-group-addon,
#categories .input-group-addon {
flex-grow: 1;
flex-basis: auto;
font-size: 1.3rem;
font-weight: normal;
background-color: white;
border: #DDD 1px solid;
border-right: none;
color: #333;
padding-bottom: 0.8rem;
padding-top: 0.8rem;
text-align: center;
min-width: 50px;
}
.search_categories label:last-child,
#categories label:last-child,
.search_categories .input-group-addon:last-child,
#categories .input-group-addon:last-child {
border-right: #DDD 1px solid;
}
.search_categories input[type="checkbox"]:checked + label,
#categories input[type="checkbox"]:checked + label {
color: black;
font-weight: bold;
background-color: #EEE;
}
#advanced-search-container {
display: none;
text-align: center;
margin-bottom: 1rem;
clear: both;
}
#advanced-search-container label,
#advanced-search-container .input-group-addon {
font-size: 1.3rem;
font-weight: normal;
background-color: white;
border: #DDD 1px solid;
border-right: none;
color: #333;
padding-bottom: 0.8rem;
padding-left: 1.2rem;
padding-right: 1.2rem;
}
#advanced-search-container label:last-child,
#advanced-search-container .input-group-addon:last-child {
border-right: #DDD 1px solid;
}
#advanced-search-container input[type="radio"] {
display: none;
}
#advanced-search-container input[type="radio"]:checked + label {
color: black;
font-weight: bold;
background-color: #EEE;
}
#check-advanced {
display: none;
}
#check-advanced:checked ~ #advanced-search-container {
display: block;
}
.advanced {
padding: 0;
margin-top: 0.3rem;
text-align: right;
}
.advanced label,
.advanced select {
cursor: pointer;
}
.cursor-text {
cursor: text !important;
}
.cursor-pointer {
cursor: pointer !important;
}
.highlight .hll {
background-color: #ffffcc;
}
.highlight {
background: #f8f8f8;
}
.highlight .c {
color: #408080;
font-style: italic;
}
/* Comment */
.highlight .err {
border: 1px solid #ff0000;
}
/* Error */
.highlight .k {
color: #008000;
font-weight: bold;
}
/* Keyword */
.highlight .o {
color: #666666;
}
/* Operator */
.highlight .cm {
color: #408080;
font-style: italic;
}
/* Comment.Multiline */
.highlight .cp {
color: #bc7a00;
}
/* Comment.Preproc */
.highlight .c1 {
color: #408080;
font-style: italic;
}
/* Comment.Single */
.highlight .cs {
color: #408080;
font-style: italic;
}
/* Comment.Special */
.highlight .gd {
color: #a00000;
}
/* Generic.Deleted */
.highlight .ge {
font-style: italic;
}
/* Generic.Emph */
.highlight .gr {
color: #ff0000;
}
/* Generic.Error */
.highlight .gh {
color: #000080;
font-weight: bold;
}
/* Generic.Heading */
.highlight .gi {
color: #00a000;
}
/* Generic.Inserted */
.highlight .go {
color: #888888;
}
/* Generic.Output */
.highlight .gp {
color: #000080;
font-weight: bold;
}
/* Generic.Prompt */
.highlight .gs {
font-weight: bold;
}
/* Generic.Strong */
.highlight .gu {
color: #800080;
font-weight: bold;
}
/* Generic.Subheading */
.highlight .gt {
color: #0044dd;
}
/* Generic.Traceback */
.highlight .kc {
color: #008000;
font-weight: bold;
}
/* Keyword.Constant */
.highlight .kd {
color: #008000;
font-weight: bold;
}
/* Keyword.Declaration */
.highlight .kn {
color: #008000;
font-weight: bold;
}
/* Keyword.Namespace */
.highlight .kp {
color: #008000;
}
/* Keyword.Pseudo */
.highlight .kr {
color: #008000;
font-weight: bold;
}
/* Keyword.Reserved */
.highlight .kt {
color: #b00040;
}
/* Keyword.Type */
.highlight .m {
color: #666666;
}
/* Literal.Number */
.highlight .s {
color: #ba2121;
}
/* Literal.String */
.highlight .na {
color: #7d9029;
}
/* Name.Attribute */
.highlight .nb {
color: #008000;
}
/* Name.Builtin */
.highlight .nc {
color: #0000FF;
font-weight: bold;
}
/* Name.Class */
.highlight .no {
color: #880000;
}
/* Name.Constant */
.highlight .nd {
color: #aa22ff;
}
/* Name.Decorator */
.highlight .ni {
color: #999999;
font-weight: bold;
}
/* Name.Entity */
.highlight .ne {
color: #D2413A;
font-weight: bold;
}
/* Name.Exception */
.highlight .nf {
color: #0000ff;
}
/* Name.Function */
.highlight .nl {
color: #a0a000;
}
/* Name.Label */
.highlight .nn {
color: #0000FF;
font-weight: bold;
}
/* Name.Namespace */
.highlight .nt {
color: #008000;
font-weight: bold;
}
/* Name.Tag */
.highlight .nv {
color: #19177c;
}
/* Name.Variable */
.highlight .ow {
color: #AA22FF;
font-weight: bold;
}
/* Operator.Word */
.highlight .w {
color: #bbbbbb;
}
/* Text.Whitespace */
.highlight .mf {
color: #666666;
}
/* Literal.Number.Float */
.highlight .mh {
color: #666666;
}
/* Literal.Number.Hex */
.highlight .mi {
color: #666666;
}
/* Literal.Number.Integer */
.highlight .mo {
color: #666666;
}
/* Literal.Number.Oct */
.highlight .sb {
color: #ba2121;
}
/* Literal.String.Backtick */
.highlight .sc {
color: #ba2121;
}
/* Literal.String.Char */
.highlight .sd {
color: #BA2121;
font-style: italic;
}
/* Literal.String.Doc */
.highlight .s2 {
color: #ba2121;
}
/* Literal.String.Double */
.highlight .se {
color: #BB6622;
font-weight: bold;
}
/* Literal.String.Escape */
.highlight .sh {
color: #ba2121;
}
/* Literal.String.Heredoc */
.highlight .si {
color: #BB6688;
font-weight: bold;
}
/* Literal.String.Interpol */
.highlight .sx {
color: #008000;
}
/* Literal.String.Other */
.highlight .sr {
color: #bb6688;
}
/* Literal.String.Regex */
.highlight .s1 {
color: #ba2121;
}
/* Literal.String.Single */
.highlight .ss {
color: #19177c;
}
/* Literal.String.Symbol */
.highlight .bp {
color: #008000;
}
/* Name.Builtin.Pseudo */
.highlight .vc {
color: #19177c;
}
/* Name.Variable.Class */
.highlight .vg {
color: #19177c;
}
/* Name.Variable.Global */
.highlight .vi {
color: #19177c;
}
/* Name.Variable.Instance */
.highlight .il {
color: #666666;
}
/* Literal.Number.Integer.Long */
.highlight .lineno {
-webkit-touch-callout: none;
-webkit-user-select: none;
-khtml-user-select: none;
-moz-user-select: none;
-ms-user-select: none;
user-select: none;
cursor: default;
}
.highlight .lineno::selection {
background: transparent;
/* WebKit/Blink Browsers */
}
.highlight .lineno::-moz-selection {
background: transparent;
/* Gecko Browsers */
}
.searx-navbar {
background: #eee;
color: #aaa;
height: 2.3rem;
font-size: 1.3rem;
line-height: 1.3rem;
padding: 0.5rem;
font-weight: bold;
margin-bottom: 1.3rem;
}
.searx-navbar a,
.searx-navbar a:hover {
margin-right: 2.0rem;
text-decoration: none;
}
.searx-navbar .instance a {
color: #444;
margin-left: 2.0rem;
}
.table > tbody > tr > td,
.table > tbody > tr > th {
vertical-align: middle !important;
}

View File

@ -55,7 +55,7 @@ module.exports = function(grunt) {
"css/logicodev-dark.min.css": "less/logicodev-dark/oscar.less"} "css/logicodev-dark.min.css": "less/logicodev-dark/oscar.less"}
}, },
/* /*
// built with ./manage.sh styles // built with ./manage.sh styles
bootstrap: { bootstrap: {
options: { options: {
paths: ["less/bootstrap"], paths: ["less/bootstrap"],

Some files were not shown because too many files have changed in this diff Show More