1
0
mirror of https://github.com/searxng/searxng.git synced 2024-11-19 19:00:10 +01:00
This commit is contained in:
pw3t 2014-01-05 17:57:55 +01:00
commit 0d93ad2018
14 changed files with 98 additions and 55 deletions

View File

@ -5,7 +5,7 @@ number_of_results = 1
[bing]
engine = bing
language = en-us
locale = en-US
[cc]
engine=currency_convert
@ -20,6 +20,7 @@ engine = duckduckgo_definitions
[duckduckgo]
engine = duckduckgo
locale = en-us
[flickr]
engine = flickr
@ -63,17 +64,17 @@ categories = social media
[urbandictionary]
engine = xpath
search_url = http://www.urbandictionary.com/define.php?term={query}
url_xpath = //div[@id="entries"]//div[@class="word"]//a
title_xpath = //div[@id="entries"]//div[@class="word"]//span//text()
content_xpath = //div[@id="entries"]//div[@class="text"]//div[@class="definition"]//text()
url_xpath = //div[@id="entries"]//div[@class="word"]/a/@href
title_xpath = //div[@id="entries"]//div[@class="word"]/span
content_xpath = //div[@id="entries"]//div[@class="text"]/div[@class="definition"]
[yahoo]
engine = xpath
search_url = http://search.yahoo.com/search?p={query}
results_xpath = //div[@class="res"]
url_xpath = .//span[@class="url"]//text()
content_xpath = .//div[@class="abstr"]//text()
title_xpath = .//h3/a//text()
url_xpath = .//h3/a/@href
title_xpath = .//h3/a
content_xpath = .//div[@class="abstr"]
suggestion_xpath = //div[@id="satat"]//a
[youtube]
@ -82,5 +83,6 @@ categories = videos
[dailymotion]
engine = dailymotion
locale = en_US
categories = videos

View File

@ -261,7 +261,7 @@ def get_engines_stats():
for engine in errors:
if max_errors:
engine['percentage'] = int(engine['avg']/max_errors*100)
engine['percentage'] = int(float(engine['avg'])/max_errors*100)
else:
engine['percentage'] = 0

View File

@ -4,11 +4,11 @@ from cgi import escape
base_url = 'http://www.bing.com/'
search_string = 'search?{query}'
language = 'en-us' # see http://msdn.microsoft.com/en-us/library/dd251064.aspx
locale = 'en-US' # see http://msdn.microsoft.com/en-us/library/dd251064.aspx
def request(query, params):
search_path = search_string.format(query=urlencode({'q': query, 'setmkt': language}))
search_path = search_string.format(query=urlencode({'q': query, 'setmkt': locale}))
#if params['category'] == 'images':
# params['url'] = base_url + 'images/' + search_path
params['url'] = base_url + search_path

View File

@ -1,16 +1,17 @@
from urllib import urlencode
from lxml import html
from json import loads
from cgi import escape
categories = ['videos']
localization = 'en'
locale = 'en_US'
# see http://www.dailymotion.com/doc/api/obj-video.html
search_url = 'https://api.dailymotion.com/videos?fields=title,description,duration,url,thumbnail_360_url&sort=relevance&limit=25&page=1&{query}'
def request(query, params):
global search_url
params['url'] = search_url.format(query=urlencode({'search': query, 'localization': localization }))
params['url'] = search_url.format(query=urlencode({'search': query, 'localization': locale }))
return params
@ -27,6 +28,11 @@ def response(resp):
else:
content = ''
if res['description']:
content += escape(res['description'][:500])
description = text_content_from_html(res['description'])
content += description[:500]
results.append({'url': url, 'title': title, 'content': content})
return results
def text_content_from_html(html_string):
desc_html = html.fragment_fromstring(html_string, create_parent=True)
return desc_html.text_content()

View File

@ -3,10 +3,11 @@ from urllib import urlencode
from searx.utils import html_to_text
url = 'https://duckduckgo.com/'
search_url = url + 'd.js?{query}&l=us-en&p=1&s=0'
search_url = url + 'd.js?{query}&p=1&s=0'
locale = 'us-en'
def request(query, params):
params['url'] = search_url.format(query=urlencode({'q': query}))
params['url'] = search_url.format(query=urlencode({'q': query, 'l': locale}))
return params

View File

@ -1,7 +1,7 @@
import json
from urllib import urlencode
url = 'http://api.duckduckgo.com/?{query}&format=json&pretty=0'
url = 'http://api.duckduckgo.com/?{query}&format=json&pretty=0&no_redirect=1'
def request(query, params):
params['url'] = url.format(query=urlencode({'q': query}))

View File

@ -1,6 +1,4 @@
from json import loads
from urllib import urlencode
from searx.utils import html_to_text
from HTMLParser import HTMLParser
url = 'http://www.filecrop.com/'

0
searx/engines/flickr.py Executable file → Normal file
View File

0
searx/engines/google_images.py Executable file → Normal file
View File

View File

@ -19,14 +19,13 @@ def response(resp):
global base_url
results = []
dom = html.fromstring(resp.content)
for result in dom.xpath('//div[@class="result"]'):
# ads xpath //div[@id="results"]/div[@id="sponsored"]//div[@class="result"]
# not ads : div[@class="result"] are the direct childs of div[@id="results"]
for result in dom.xpath('//div[@id="results"]/div[@class="result"]'):
link = result.xpath('.//h3/a')[0]
url = link.attrib.get('href')
parsed_url = urlparse(url)
# TODO better google link detection
if parsed_url.netloc.find('www.google.com') >= 0:
continue
title = ' '.join(link.xpath('.//text()'))
content = escape(' '.join(result.xpath('.//p[@class="desc"]//text()')))
title = link.text_content()
content = result.xpath('./p[@class="desc"]')[0].text_content()
results.append({'url': url, 'title': title, 'content': content})
return results

View File

@ -1,5 +1,5 @@
from lxml import html
from urllib import urlencode
from urllib import urlencode, unquote
from urlparse import urlparse, urljoin
from cgi import escape
from lxml.etree import _ElementStringResult
@ -11,32 +11,64 @@ title_xpath = None
suggestion_xpath = ''
results_xpath = ''
def extract_url(xpath_results):
url = ''
parsed_search_url = urlparse(search_url)
'''
if xpath_results is list, extract the text from each result and concat the list
if xpath_results is a xml element, extract all the text node from it ( text_content() method from lxml )
if xpath_results is a string element, then it's already done
'''
def extract_text(xpath_results):
if type(xpath_results) == list:
# it's list of result : concat everything using recursive call
if not len(xpath_results):
raise Exception('Empty url resultset')
if type(xpath_results[0]) == _ElementStringResult:
url = ''.join(xpath_results)
if url.startswith('//'):
url = parsed_search_url.scheme+url
elif url.startswith('/'):
url = urljoin(search_url, url)
#TODO
else:
url = xpath_results[0].attrib.get('href')
result = ''
for e in xpath_results:
result = result + extract_text(e)
return result
elif type(xpath_results) == _ElementStringResult:
# it's a string
return ''.join(xpath_results)
else:
url = xpath_results.attrib.get('href')
if not url.startswith('http://') and not url.startswith('https://'):
url = 'http://'+url
# it's a element
return xpath_results.text_content()
def extract_url(xpath_results):
url = extract_text(xpath_results)
if url.startswith('//'):
# add http or https to this kind of url //example.com/
parsed_search_url = urlparse(search_url)
url = parsed_search_url.scheme+url
elif url.startswith('/'):
# fix relative url to the search engine
url = urljoin(search_url, url)
# normalize url
url = normalize_url(url)
return url
def normalize_url(url):
parsed_url = urlparse(url)
# add a / at this end of the url if there is no path
if not parsed_url.netloc:
raise Exception('Cannot parse url')
if not parsed_url.path:
url += '/'
# FIXME : hack for yahoo
if parsed_url.hostname == 'search.yahoo.com' and parsed_url.path.startswith('/r'):
p = parsed_url.path
mark = p.find('/**')
if mark != -1:
return unquote(p[mark+3:]).decode('utf-8')
return url
def request(query, params):
query = urlencode({'q': query})[2:]
params['url'] = search_url.format(query=query)
@ -50,15 +82,19 @@ def response(resp):
if results_xpath:
for result in dom.xpath(results_xpath):
url = extract_url(result.xpath(url_xpath))
title = ' '.join(result.xpath(title_xpath))
content = escape(' '.join(result.xpath(content_xpath)))
title = extract_text(result.xpath(title_xpath)[0 ])
content = extract_text(result.xpath(content_xpath)[0])
results.append({'url': url, 'title': title, 'content': content})
else:
for content, url, title in zip(dom.xpath(content_xpath), map(extract_url, dom.xpath(url_xpath)), dom.xpath(title_xpath)):
for url, title, content in zip(
map(extract_url, dom.xpath(url_xpath)), \
map(extract_text, dom.xpath(title_xpath)), \
map(extract_text, dom.xpath(content_xpath)), \
):
results.append({'url': url, 'title': title, 'content': content})
if not suggestion_xpath:
return results
for suggestion in dom.xpath(suggestion_xpath):
results.append({'suggestion': escape(''.join(suggestion.xpath('.//text()')))})
results.append({'suggestion': extract_text(suggestion)})
return results

View File

@ -1,5 +1,5 @@
from json import loads
from urllib import urlencode, quote
from urllib import urlencode
url = 'http://localhost:8090'
search_url = '/yacysearch.json?{query}&maximumRecords=10'

View File

@ -37,7 +37,7 @@
<p>It's ok if you don't trust us regarding the logs, <a href="https://github.com/asciimoo/searx">take the code</a> and run it yourself! decentralize!</p>
<h3>How to add to firefox?</h3>
<p><a href="#" onclick="window.external.AddSearchProvider(window.location.protocol + '//' + window.location.host + '/opensearch.xml')">Install</a> searx as a search engine on any version of Firefox! (javascript required)</p>
<h2 id="faq">Developer FAQ</h2>
<h2 id="dev_faq">Developer FAQ</h2>
<h3>New engines?</h3>
<p><ul>
<li>Edit your engines.cfg, see <a href="https://raw.github.com/asciimoo/searx/master/engines.cfg_sample">sample config</a></li>

View File

@ -152,7 +152,8 @@ def preferences():
selected_categories.append(category)
if selected_categories:
resp = make_response(redirect('/'))
resp.set_cookie('categories', ','.join(selected_categories))
# cookie max age: 4 weeks
resp.set_cookie('categories', ','.join(selected_categories), max_age=60*60*24*7*4)
return resp
return render('preferences.html')