1
0
mirror of https://github.com/searxng/searxng.git synced 2024-11-16 17:40:11 +01:00
searxng/searx/engines/google.py

116 lines
3.4 KiB
Python
Raw Normal View History

2014-09-01 15:10:05 +02:00
## Google (Web)
#
# @website https://www.google.com
# @provide-api yes (https://developers.google.com/custom-search/)
2014-09-01 15:10:05 +02:00
#
# @using-api no
# @results HTML
# @stable no (HTML can change)
# @parse url, title, content, suggestion
2014-01-29 19:28:38 +01:00
from urllib import urlencode
from urlparse import unquote,urlparse,parse_qsl
from lxml import html
from searx.engines.xpath import extract_text, extract_url
2014-01-29 19:28:38 +01:00
2014-09-01 15:10:05 +02:00
# engine dependent config
2014-01-29 19:28:38 +01:00
categories = ['general']
2014-09-01 15:10:05 +02:00
paging = True
language_support = True
2014-01-29 19:28:38 +01:00
2014-09-01 15:10:05 +02:00
# search-url
google_hostname = 'www.google.com'
search_path = '/search'
redirect_path = '/url'
images_path = '/images'
search_url = 'https://' + google_hostname + search_path + '?{query}&start={offset}&gbv=1'
2014-01-31 04:35:23 +01:00
# specific xpath variables
results_xpath= '//li[@class="g"]'
url_xpath = './/h3/a/@href'
title_xpath = './/h3'
content_xpath = './/span[@class="st"]'
suggestion_xpath = '//p[@class="_Bmc"]'
images_xpath = './/div/a'
image_url_xpath = './@href'
image_img_src_xpath = './img/@src'
# remove google-specific tracking-url
def parse_url(url_string):
parsed_url = urlparse(url_string)
if parsed_url.netloc in [google_hostname, ''] and parsed_url.path==redirect_path:
query = dict(parse_qsl(parsed_url.query))
return query['q']
else:
return url_string
2014-01-29 19:28:38 +01:00
2014-09-01 15:10:05 +02:00
# do search-request
2014-01-29 19:28:38 +01:00
def request(query, params):
offset = (params['pageno'] - 1) * 10
2014-09-01 15:10:05 +02:00
if params['language'] == 'all':
language = 'en'
else:
language = params['language'].replace('_','-').lower()
2014-09-01 15:10:05 +02:00
2014-01-29 21:14:38 +01:00
params['url'] = search_url.format(offset=offset,
query=urlencode({'q': query}))
params['headers']['Accept-Language'] = language
2014-09-01 15:10:05 +02:00
2014-01-29 19:28:38 +01:00
return params
2014-09-01 15:10:05 +02:00
# get response from search-request
2014-01-29 19:28:38 +01:00
def response(resp):
results = []
2014-09-01 15:10:05 +02:00
dom = html.fromstring(resp.text)
2014-01-29 19:28:38 +01:00
2014-09-01 15:10:05 +02:00
# parse results
for result in dom.xpath(results_xpath):
title = extract_text(result.xpath(title_xpath)[0])
try:
url = parse_url(extract_url(result.xpath(url_xpath), search_url))
parsed_url = urlparse(url)
if parsed_url.netloc==google_hostname and parsed_url.path==search_path:
# remove the link to google news
continue
if parsed_url.netloc==google_hostname and parsed_url.path==images_path:
# images result
results = results + parse_images(result)
else:
# normal result
content = extract_text(result.xpath(content_xpath)[0])
# append result
results.append({'url': url,
'title': title,
'content': content})
except:
continue
# parse suggestion
for suggestion in dom.xpath(suggestion_xpath):
# append suggestion
results.append({'suggestion': extract_text(suggestion)})
2014-09-01 15:10:05 +02:00
# return results
2014-01-29 19:28:38 +01:00
return results
def parse_images(result):
results = []
for image in result.xpath(images_xpath):
url = parse_url(extract_text(image.xpath(image_url_xpath)[0]))
img_src = extract_text(image.xpath(image_img_src_xpath)[0])
# append result
results.append({'url': url,
'title': '',
'content': '',
'img_src': img_src,
'template': 'images.html'})
return results