1
0
mirror of https://github.com/searxng/searxng.git synced 2024-11-19 02:40:11 +01:00

[enh] engine cfg compatibilty

This commit is contained in:
asciimoo 2013-10-23 23:55:37 +02:00
parent 39d229e110
commit 74b6be3991
9 changed files with 36 additions and 39 deletions

View File

@ -1,8 +1,11 @@
from json import loads from json import loads
from urllib import urlencode
url = 'https://duckduckgo.com/'
search_url = url + 'd.js?{query}&l=us-en&p=1&s=0'
def request(query, params): def request(query, params):
params['url'] = 'https://duckduckgo.com/d.js?q=%s&l=us-en&p=1&s=0' % query params['url'] = search_url.format(query=urlencode({'q': query}))
return params return params

View File

@ -1,7 +1,10 @@
import json import json
from urllib import urlencode
url = 'http://api.duckduckgo.com/?{query}&format=json&pretty=0'
def request(query, params): def request(query, params):
params['url'] = 'http://api.duckduckgo.com/?q=%s&format=json&pretty=0' % query params['url'] = url.format(query=urlencode({'q': query}))
return params return params

View File

@ -1,18 +1,16 @@
#!/usr/bin/env python #!/usr/bin/env python
from urllib import quote from urllib import urlencode
from lxml import html from lxml import html
from urlparse import urljoin from urlparse import urljoin
categories = ['images'] categories = ['images']
base_url = 'https://secure.flickr.com/' url = 'https://secure.flickr.com/'
search_url = base_url+'search/?q=' search_url = url+'search/?q={query}'
def request(query, params): def request(query, params):
global search_url params['url'] = search_url.format(query=urlencode({'q': query}))
query = quote(query.replace(' ', '+'), safe='+')
params['url'] = search_url + query
return params return params
def response(resp): def response(resp):
@ -20,11 +18,11 @@ def response(resp):
results = [] results = []
dom = html.fromstring(resp.text) dom = html.fromstring(resp.text)
for result in dom.xpath('//div[@id="thumbnails"]//a[@class="rapidnofollow photo-click" and @data-track="photo-click"]'): for result in dom.xpath('//div[@id="thumbnails"]//a[@class="rapidnofollow photo-click" and @data-track="photo-click"]'):
url = urljoin(base_url, result.attrib.get('href')) href = urljoin(url, result.attrib.get('href'))
img = result.xpath('.//img')[0] img = result.xpath('.//img')[0]
title = img.attrib.get('alt', '') title = img.attrib.get('alt', '')
img_src = img.attrib.get('data-defer-src') img_src = img.attrib.get('data-defer-src')
if not img_src: if not img_src:
continue continue
results.append({'url': url, 'title': title, 'img_src': img_src, 'template': 'images.html'}) results.append({'url': href, 'title': title, 'img_src': img_src, 'template': 'images.html'})
return results return results

View File

@ -4,11 +4,11 @@ from cgi import escape
categories = ['it'] categories = ['it']
search_url = 'https://api.github.com/search/repositories?sort=stars&order=desc&' search_url = 'https://api.github.com/search/repositories?sort=stars&order=desc&{query}'
def request(query, params): def request(query, params):
global search_url global search_url
params['url'] = search_url + urlencode({'q': query}) params['url'] = search_url.format(query=urlencode({'q': query}))
params['headers']['Accept'] = 'application/vnd.github.preview.text-match+json' params['headers']['Accept'] = 'application/vnd.github.preview.text-match+json'
return params return params

View File

@ -5,15 +5,14 @@ from json import loads
categories = ['images'] categories = ['images']
search_url = 'https://ajax.googleapis.com/ajax/services/search/images?v=1.0&start=0&rsz=large&safe=off&filter=off&' url = 'https://ajax.googleapis.com/'
search_url = url + 'ajax/services/search/images?v=1.0&start=0&rsz=large&safe=off&filter=off&{query}'
def request(query, params): def request(query, params):
global search_url
params['url'] = search_url + urlencode({'q': query}) params['url'] = search_url + urlencode({'q': query})
return params return params
def response(resp): def response(resp):
global base_url
results = [] results = []
search_res = loads(resp.text) search_res = loads(resp.text)
if not search_res.get('responseData'): if not search_res.get('responseData'):
@ -21,9 +20,9 @@ def response(resp):
if not search_res['responseData'].get('results'): if not search_res['responseData'].get('results'):
return [] return []
for result in search_res['responseData']['results']: for result in search_res['responseData']['results']:
url = result['originalContextUrl'] href = result['originalContextUrl']
title = result['title'] title = result['title']
if not result['url']: if not result['url']:
continue continue
results.append({'url': url, 'title': title, 'content': '', 'img_src': result['url'], 'template': 'images.html'}) results.append({'url': href, 'title': title, 'content': '', 'img_src': result['url'], 'template': 'images.html'})
return results return results

View File

@ -5,21 +5,18 @@ from urllib import quote
categories = ['videos', 'music'] categories = ['videos', 'music']
base_url = 'https://thepiratebay.sx/' url = 'https://thepiratebay.sx/'
search_url = base_url + 'search/{search_term}/0/99/{search_type}' search_url = url + 'search/{search_term}/0/99/{search_type}'
search_types = {'videos': '200' search_types = {'videos': '200'
,'music' : '100' ,'music' : '100'
} }
def request(query, params): def request(query, params):
global search_url, search_types
# 200 is the video category
params['url'] = search_url.format(search_term=quote(query), search_type=search_types.get(params['category'])) params['url'] = search_url.format(search_term=quote(query), search_type=search_types.get(params['category']))
return params return params
def response(resp): def response(resp):
global base_url
results = [] results = []
dom = html.fromstring(resp.text) dom = html.fromstring(resp.text)
search_res = dom.xpath('//table[@id="searchResult"]//tr') search_res = dom.xpath('//table[@id="searchResult"]//tr')
@ -27,12 +24,12 @@ def response(resp):
return results return results
for result in search_res[1:]: for result in search_res[1:]:
link = result.xpath('.//div[@class="detName"]//a')[0] link = result.xpath('.//div[@class="detName"]//a')[0]
url = urljoin(base_url, link.attrib.get('href')) href = urljoin(url, link.attrib.get('href'))
title = ' '.join(link.xpath('.//text()')) title = ' '.join(link.xpath('.//text()'))
content = escape(' '.join(result.xpath('.//font[@class="detDesc"]//text()'))) content = escape(' '.join(result.xpath('.//font[@class="detDesc"]//text()')))
seed, leech = result.xpath('.//td[@align="right"]/text()')[:2] seed, leech = result.xpath('.//td[@align="right"]/text()')[:2]
content += '<br />Seed: %s, Leech: %s' % (seed, leech) content += '<br />Seed: %s, Leech: %s' % (seed, leech)
magnetlink = result.xpath('.//a[@title="Download this torrent using magnet"]')[0] magnetlink = result.xpath('.//a[@title="Download this torrent using magnet"]')[0]
content += '<br /><a href="%s">magnet link</a>' % urljoin(base_url, magnetlink.attrib['href']) content += '<br /><a href="%s">magnet link</a>' % urljoin(url, magnetlink.attrib['href'])
results.append({'url': url, 'title': title, 'content': content}) results.append({'url': href, 'title': title, 'content': content})
return results return results

View File

@ -1,13 +1,15 @@
from json import loads from json import loads
from urllib import urlencode
categories = ['music'] categories = ['music']
guest_client_id = 'b45b1aa10f1ac2941910a7f0d10f8e28' guest_client_id = 'b45b1aa10f1ac2941910a7f0d10f8e28'
search_url = 'https://api.soundcloud.com/search?q=%s&facet=model&limit=10&offset=0&linked_partitioning=1&client_id='+guest_client_id url = 'https://api.soundcloud.com/'
search_url = url + 'search?{query}&facet=model&limit=20&offset=0&linked_partitioning=1&client_id='+guest_client_id
def request(query, params): def request(query, params):
global search_url global search_url
params['url'] = search_url % query params['url'] = search_url.format(query=urlencode({'q': query}))
return params return params

View File

@ -5,23 +5,21 @@ from urllib import urlencode
categories = ['it'] categories = ['it']
base_url = 'http://stackoverflow.com/' url = 'http://stackoverflow.com/'
search_url = base_url+'search?' search_url = url+'search?'
def request(query, params): def request(query, params):
global search_url
params['url'] = search_url + urlencode({'q': query}) params['url'] = search_url + urlencode({'q': query})
return params return params
def response(resp): def response(resp):
global base_url
results = [] results = []
dom = html.fromstring(resp.text) dom = html.fromstring(resp.text)
for result in dom.xpath('//div[@class="question-summary search-result"]'): for result in dom.xpath('//div[@class="question-summary search-result"]'):
link = result.xpath('.//div[@class="result-link"]//a')[0] link = result.xpath('.//div[@class="result-link"]//a')[0]
url = urljoin(base_url, link.attrib.get('href')) href = urljoin(url, link.attrib.get('href'))
title = ' '.join(link.xpath('.//text()')) title = ' '.join(link.xpath('.//text()'))
content = escape(' '.join(result.xpath('.//div[@class="excerpt"]//text()'))) content = escape(' '.join(result.xpath('.//div[@class="excerpt"]//text()')))
results.append({'url': url, 'title': title, 'content': content}) results.append({'url': href, 'title': title, 'content': content})
return results return results

View File

@ -1,15 +1,12 @@
from json import loads from json import loads
from urllib import quote from urllib import urlencode
categories = ['videos'] categories = ['videos']
search_url = 'https://gdata.youtube.com/feeds/api/videos?alt=json&q=' search_url = 'https://gdata.youtube.com/feeds/api/videos?alt=json&{query}'
def request(query, params): def request(query, params):
global search_url params['url'] = search_url.format(query=urlencode({'q': query}))
query = quote(query.replace(' ', '+'), safe='+')
params['url'] = search_url + query
return params return params