From 4c9b8b29ee420a3d111cb225e1fb4ad14c5b3e17 Mon Sep 17 00:00:00 2001 From: Markus Heiser Date: Thu, 30 Sep 2021 16:40:00 +0200 Subject: [PATCH 1/3] [mod] engine duckduckgo - use DuckDuckGo-Lite Implement a scrapper for DuckDuckGo-Lite [1]. The existing DuckDuckGo [2] engine does not support paging. DuckDuckgo-Lite is much faster, less verbose and does have a paging option (reversed engineered from the input form of [1]). [1] https://lite.duckduckgo.com/lite [2] https://duckduckgo.com/ Signed-off-by: Markus Heiser --- searx/engines/duckduckgo.py | 146 ++++++++++++++++++++++++------------ 1 file changed, 97 insertions(+), 49 deletions(-) diff --git a/searx/engines/duckduckgo.py b/searx/engines/duckduckgo.py index 3c086f81b..e1ef140e6 100644 --- a/searx/engines/duckduckgo.py +++ b/searx/engines/duckduckgo.py @@ -1,16 +1,24 @@ # SPDX-License-Identifier: AGPL-3.0-or-later -""" - DuckDuckGo (Web) +# lint: pylint +"""DuckDuckGo Lite """ -from lxml.html import fromstring from json import loads -from searx.utils import extract_text, match_language, eval_xpath, dict_subset + +from lxml.html import fromstring + +from searx.utils import ( + dict_subset, + eval_xpath, + eval_xpath_getindex, + extract_text, + match_language, +) from searx.network import get # about about = { - "website": 'https://duckduckgo.com/', + "website": 'https://lite.duckduckgo.com/lite', "wikidata_id": 'Q12805', "official_api_documentation": 'https://duckduckgo.com/api', "use_official_api": False, @@ -20,7 +28,7 @@ about = { # engine dependent config categories = ['general'] -paging = False +paging = True supported_languages_url = 'https://duckduckgo.com/util/u172.js' time_range_support = True @@ -34,21 +42,16 @@ language_aliases = { 'zh-HK': 'tzh-HK' } +time_range_dict = { + 'day': 'd', + 'week': 'w', + 'month': 'm', + 'year': 'y' +} + # search-url -url = 'https://html.duckduckgo.com/html' -url_ping = 'https://duckduckgo.com/t/sl_h' -time_range_dict = {'day': 'd', - 'week': 'w', - 'month': 'm', - 'year': 'y'} - -# specific xpath variables -result_xpath = '//div[@class="result results_links results_links_deep web-result "]' # noqa -url_xpath = './/a[@class="result__a"]/@href' -title_xpath = './/a[@class="result__a"]' -content_xpath = './/a[@class="result__snippet"]' -correction_xpath = '//div[@id="did_you_mean"]//a' - +url = 'https://lite.duckduckgo.com/lite' +url_ping = 'https://duckduckgo.com/t/sl_l' # match query's language to a region code that duckduckgo will accept def get_region_code(lang, lang_list=None): @@ -63,66 +66,111 @@ def get_region_code(lang, lang_list=None): def request(query, params): - if params['time_range'] is not None and params['time_range'] not in time_range_dict: - return params params['url'] = url params['method'] = 'POST' + params['data']['q'] = query - params['data']['b'] = '' + + # The API is not documented, so we do some reverse engineering and emulate + # what https://lite.duckduckgo.com/lite/ does when you press "next Page" + # link again and again .. + + params['headers']['Content-Type'] = 'application/x-www-form-urlencoded' + + # initial page does not have an offset + if params['pageno'] == 2: + # second page does have an offset of 30 + offset = (params['pageno'] - 1) * 30 + params['data']['s'] = offset + params['data']['dc'] = offset + 1 + + elif params['pageno'] > 2: + # third and following pages do have an offset of 30 + n*50 + offset = 30 + (params['pageno'] - 2) * 50 + params['data']['s'] = offset + params['data']['dc'] = offset + 1 + + # initial page does not have additional data in the input form + if params['pageno'] > 1: + # request the second page (and more pages) needs 'o' and 'api' arguments + params['data']['o'] = 'json' + params['data']['api'] = 'd.js' + + # initial page does not have additional data in the input form + if params['pageno'] > 2: + # request the third page (and more pages) some more arguments + params['data']['nextParams'] = '' + params['data']['v'] = '' + params['data']['vqd'] = '' region_code = get_region_code(params['language'], supported_languages) if region_code: params['data']['kl'] = region_code params['cookies']['kl'] = region_code + params['data']['df'] = '' if params['time_range'] in time_range_dict: params['data']['df'] = time_range_dict[params['time_range']] + params['cookies']['df'] = time_range_dict[params['time_range']] - params['allow_redirects'] = False + logger.debug("param data: %s", params['data']) + logger.debug("param cookies: %s", params['cookies']) return params - # get response from search-request def response(resp): - if resp.status_code == 303: - return [] - # ping headers_ping = dict_subset(resp.request.headers, ['User-Agent', 'Accept-Encoding', 'Accept', 'Cookie']) get(url_ping, headers=headers_ping) - # parse the response + if resp.status_code == 303: + return [] + results = [] doc = fromstring(resp.text) - for i, r in enumerate(eval_xpath(doc, result_xpath)): - if i >= 30: - break - try: - res_url = eval_xpath(r, url_xpath)[-1] - except: + + result_table = eval_xpath(doc, '//html/body/form/div[@class="filters"]/table') + if not len(result_table) >= 3: + # no more results + return [] + result_table = result_table[2] + + tr_rows = eval_xpath(result_table, './/tr') + + # In the last is the form of the 'previous/next page' links + tr_rows = tr_rows[:-1] + + len_tr_rows = len(tr_rows) + offset = 0 + + while len_tr_rows >= offset + 4: + + # assemble table rows we need to scrap + tr_title = tr_rows[offset] + tr_content = tr_rows[offset + 1] + offset += 4 + + # ignore sponsored Adds + if tr_content.get('class') == 'result-sponsored': continue - if not res_url: + a_tag = eval_xpath_getindex(tr_title, './/td//a[@class="result-link"]', 0, None) + if a_tag is None: continue - title = extract_text(eval_xpath(r, title_xpath)) - content = extract_text(eval_xpath(r, content_xpath)) + td_content = eval_xpath_getindex(tr_content, './/td[@class="result-snippet"]', 0, None) + if td_content is None: + continue - # append result - results.append({'title': title, - 'content': content, - 'url': res_url}) + results.append({ + 'title': a_tag.text_content(), + 'content': extract_text(td_content), + 'url': a_tag.get('href'), + }) - # parse correction - for correction in eval_xpath(doc, correction_xpath): - # append correction - results.append({'correction': extract_text(correction)}) - - # return results return results - # get supported languages from their site def _fetch_supported_languages(resp): From a5b7ed9550c234a24064a678bcbed4fa5ec4327f Mon Sep 17 00:00:00 2001 From: Markus Heiser Date: Thu, 30 Sep 2021 16:49:12 +0200 Subject: [PATCH 2/3] [mod] engine duckduckgo - update supported_languages_url Signed-off-by: Markus Heiser --- searx/engines/duckduckgo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/searx/engines/duckduckgo.py b/searx/engines/duckduckgo.py index e1ef140e6..d283af81d 100644 --- a/searx/engines/duckduckgo.py +++ b/searx/engines/duckduckgo.py @@ -29,7 +29,7 @@ about = { # engine dependent config categories = ['general'] paging = True -supported_languages_url = 'https://duckduckgo.com/util/u172.js' +supported_languages_url = 'https://duckduckgo.com/util/u588.js' time_range_support = True language_aliases = { From 97419a770d7c4c102ed4c04c35f0d099c196d533 Mon Sep 17 00:00:00 2001 From: Markus Heiser Date: Fri, 1 Oct 2021 08:45:48 +0200 Subject: [PATCH 3/3] [upd] make data.languages Signed-off-by: Markus Heiser --- searx/data/engines_languages.json | 36 +++++++++++++------------------ 1 file changed, 15 insertions(+), 21 deletions(-) diff --git a/searx/data/engines_languages.json b/searx/data/engines_languages.json index fe7e467fc..a4b0a8a5d 100644 --- a/searx/data/engines_languages.json +++ b/searx/data/engines_languages.json @@ -24570,13 +24570,17 @@ "en-CA", "en-ID", "en-IE", + "en-IL", "en-IN", "en-MY", "en-NZ", "en-PH", + "en-PK", "en-SG", + "en-TH", "en-UK", "en-US", + "en-VN", "en-ZA", "es-AR", "es-CL", @@ -24591,17 +24595,13 @@ "fr-CA", "fr-CH", "fr-FR", - "he-IL", "hr-HR", "hu-HU", - "id-ID", - "it-CH", "it-IT", "jp-JP", "kr-KR", "lt-LT", "lv-LV", - "ms-MY", "nl-BE", "nl-NL", "no-NO", @@ -24613,12 +24613,10 @@ "sk-SK", "sl-SL", "sv-SE", - "th-TH", - "tl-PH", "tr-TR", "tzh-HK", "tzh-TW", - "vi-VN", + "uk-UA", "wt-WT", "zh-CN" ], @@ -24637,13 +24635,17 @@ "en-CA", "en-ID", "en-IE", + "en-IL", "en-IN", "en-MY", "en-NZ", "en-PH", + "en-PK", "en-SG", + "en-TH", "en-UK", "en-US", + "en-VN", "en-ZA", "es-AR", "es-CL", @@ -24658,17 +24660,13 @@ "fr-CA", "fr-CH", "fr-FR", - "he-IL", "hr-HR", "hu-HU", - "id-ID", - "it-CH", "it-IT", "jp-JP", "kr-KR", "lt-LT", "lv-LV", - "ms-MY", "nl-BE", "nl-NL", "no-NO", @@ -24680,12 +24678,10 @@ "sk-SK", "sl-SL", "sv-SE", - "th-TH", - "tl-PH", "tr-TR", "tzh-HK", "tzh-TW", - "vi-VN", + "uk-UA", "wt-WT", "zh-CN" ], @@ -24704,13 +24700,17 @@ "en-CA", "en-ID", "en-IE", + "en-IL", "en-IN", "en-MY", "en-NZ", "en-PH", + "en-PK", "en-SG", + "en-TH", "en-UK", "en-US", + "en-VN", "en-ZA", "es-AR", "es-CL", @@ -24725,17 +24725,13 @@ "fr-CA", "fr-CH", "fr-FR", - "he-IL", "hr-HR", "hu-HU", - "id-ID", - "it-CH", "it-IT", "jp-JP", "kr-KR", "lt-LT", "lv-LV", - "ms-MY", "nl-BE", "nl-NL", "no-NO", @@ -24747,12 +24743,10 @@ "sk-SK", "sl-SL", "sv-SE", - "th-TH", - "tl-PH", "tr-TR", "tzh-HK", "tzh-TW", - "vi-VN", + "uk-UA", "wt-WT", "zh-CN" ],