1
0
mirror of https://github.com/searxng/searxng.git synced 2024-11-22 12:10:11 +01:00

[mod] len() removed from conditions

This commit is contained in:
asciimoo 2014-02-11 13:13:51 +01:00
parent 239299d45e
commit c1d7d30b8e
8 changed files with 19 additions and 17 deletions

View File

@ -163,7 +163,7 @@ def score_results(results):
duplicated = new_res
break
if duplicated:
if len(res.get('content', '')) > len(duplicated.get('content', '')): # noqa
if res.get('content') > duplicated.get('content'):
duplicated['content'] = res['content']
duplicated['score'] += score
duplicated['engines'].append(res['engine'])

View File

@ -39,7 +39,7 @@ def parse(query):
def do_query(data, q):
ret = []
if not len(q):
if not q:
return ret
qkey = q[0]

View File

@ -35,7 +35,7 @@ def response(resp):
title = link.text_content()
content = ''
if len(result.xpath('./p[@class="desc"]')):
if result.xpath('./p[@class="desc"]'):
content = result.xpath('./p[@class="desc"]')[0].text_content()
results.append({'url': url, 'title': title, 'content': content})

View File

@ -23,7 +23,7 @@ if xpath_results is a string element, then it's already done
def extract_text(xpath_results):
if type(xpath_results) == list:
# it's list of result : concat everything using recursive call
if not len(xpath_results):
if not xpath_results:
raise Exception('Empty url resultset')
result = ''
for e in xpath_results:

View File

@ -13,7 +13,7 @@ def request(query, params):
def response(resp):
raw_search_results = loads(resp.text)
if not len(raw_search_results):
if not raw_search_results:
return []
search_results = raw_search_results.get('channels', {})[0].get('items', [])
@ -26,10 +26,10 @@ def response(resp):
tmp_result['url'] = result['link']
tmp_result['content'] = ''
if len(result['description']):
if result['description']:
tmp_result['content'] += result['description'] + "<br/>"
if len(result['pubDate']):
if result['pubDate']:
tmp_result['content'] += result['pubDate'] + "<br/>"
if result['size'] != '-1':

View File

@ -22,9 +22,10 @@ def response(resp):
if not 'feed' in search_results:
return results
feed = search_results['feed']
for result in feed['entry']:
url = [x['href'] for x in result['link'] if x['type'] == 'text/html']
if not len(url):
if not url:
return
# remove tracking
url = url[0].replace('feature=youtube_gdata', '')
@ -32,12 +33,13 @@ def response(resp):
url = url[:-1]
title = result['title']['$t']
content = ''
thumbnail = ''
if len(result['media$group']['media$thumbnail']):
if result['media$group']['media$thumbnail']:
thumbnail = result['media$group']['media$thumbnail'][0]['url']
content += '<a href="{0}" title="{0}" ><img src="{1}" /></a>'.format(url, thumbnail) # noqa
if len(content):
if content:
content += '<br />' + result['content']['$t']
else:
content = result['content']['$t']

View File

@ -49,7 +49,7 @@ class Search(object):
self.categories = []
if len(self.engines):
if self.engines:
self.categories = list(set(engine['category']
for engine in self.engines))
else:
@ -59,13 +59,13 @@ class Search(object):
if not category in categories:
continue
self.categories.append(category)
if not len(self.categories):
if not self.categories:
cookie_categories = request.cookies.get('categories', '')
cookie_categories = cookie_categories.split(',')
for ccateg in cookie_categories:
if ccateg in categories:
self.categories.append(ccateg)
if not len(self.categories):
if not self.categories:
self.categories = ['general']
for categ in self.categories:

View File

@ -91,7 +91,7 @@ def render(template_name, **kwargs):
for ccateg in cookie_categories:
if ccateg in categories:
kwargs['selected_categories'].append(ccateg)
if not len(kwargs['selected_categories']):
if not kwargs['selected_categories']:
kwargs['selected_categories'] = ['general']
return render_template(template_name, **kwargs)
@ -150,12 +150,12 @@ def index():
elif search.request_data.get('format') == 'csv':
csv = UnicodeWriter(cStringIO.StringIO())
keys = ('title', 'url', 'content', 'host', 'engine', 'score')
if len(search.results):
if search.results:
csv.writerow(keys)
for row in search.results:
row['host'] = row['parsed_url'].netloc
csv.writerow([row.get(key, '') for key in keys])
csv.stream.seek(0)
csv.stream.seek(0)
response = Response(csv.stream.read(), mimetype='application/csv')
cont_disp = 'attachment;Filename=searx_-_{0}.csv'.format(search.query)
response.headers.add('Content-Disposition', cont_disp)