1
0
mirror of https://github.com/searxng/searxng.git synced 2024-11-15 01:00:12 +01:00
searxng/searx/engines/core.py

116 lines
3.5 KiB
Python
Raw Normal View History

2021-03-26 12:22:49 +01:00
# SPDX-License-Identifier: AGPL-3.0-or-later
"""CORE (science)
2021-03-26 12:22:49 +01:00
"""
from datetime import datetime
from urllib.parse import urlencode
from searx.exceptions import SearxEngineAPIException
2021-03-26 12:22:49 +01:00
about = {
"website": 'https://core.ac.uk',
"wikidata_id": 'Q22661180',
"official_api_documentation": 'https://core.ac.uk/documentation/api/',
"use_official_api": True,
"require_api_key": True,
"results": 'JSON',
}
categories = ['science', 'scientific publications']
2021-03-26 12:22:49 +01:00
paging = True
nb_per_page = 10
2021-03-26 12:22:49 +01:00
api_key = 'unset'
2021-03-26 12:22:49 +01:00
base_url = 'https://core.ac.uk:443/api-v2/search/'
search_string = '{query}?page={page}&pageSize={nb_per_page}&apiKey={apikey}'
2021-03-26 12:22:49 +01:00
def request(query, params):
if api_key == 'unset':
raise SearxEngineAPIException('missing CORE API key')
2021-03-26 12:22:49 +01:00
search_path = search_string.format(
query=urlencode({'q': query}),
nb_per_page=nb_per_page,
page=params['pageno'],
apikey=api_key,
)
2021-03-26 12:22:49 +01:00
params['url'] = base_url + search_path
return params
2021-03-26 12:22:49 +01:00
2021-03-26 12:22:49 +01:00
def response(resp):
results = []
json_data = resp.json()
2021-03-26 12:22:49 +01:00
for result in json_data['data']:
source = result['_source']
url = None
if source.get('urls'):
url = source['urls'][0].replace('http://', 'https://', 1)
if url is None and source.get('doi'):
# use the DOI reference
url = 'https://doi.org/' + source['doi']
if url is None and source.get('downloadUrl'):
# use the downloadUrl
url = source['downloadUrl']
if url is None and source.get('identifiers'):
# try to find an ark id, see
# https://www.wikidata.org/wiki/Property:P8091
# and https://en.wikipedia.org/wiki/Archival_Resource_Key
arkids = [
identifier[5:] # 5 is the length of "ark:/"
for identifier in source.get('identifiers')
if isinstance(identifier, str) and identifier.startswith('ark:/')
]
if len(arkids) > 0:
url = 'https://n2t.net/' + arkids[0]
if url is None:
continue
publishedDate = None
time = source['publishedDate'] or source['depositedDate']
if time:
publishedDate = datetime.fromtimestamp(time / 1000)
# sometimes the 'title' is None / filter None values
journals = [j['title'] for j in (source.get('journals') or []) if j['title']]
publisher = source['publisher']
if publisher:
publisher = source['publisher'].strip("'")
results.append(
{
'template': 'paper.html',
'title': source['title'],
'url': url,
'content': source['description'] or '',
# 'comments': '',
'tags': source['topics'],
'publishedDate': publishedDate,
'type': (source['types'] or [None])[0],
'authors': source['authors'],
'editor': ', '.join(source['contributors'] or []),
'publisher': publisher,
'journal': ', '.join(journals),
# 'volume': '',
# 'pages' : '',
# 'number': '',
'doi': source['doi'],
'issn': [x for x in [source.get('issn')] if x],
'isbn': [x for x in [source.get('isbn')] if x], # exists in the rawRecordXml
'pdf_url': source.get('repositoryDocument', {}).get('pdfOrigin'),
}
)
2021-03-26 12:22:49 +01:00
return results