45def response(resp):
46 res = loads(resp.text)
47 results = []
48 for result in res['results']:
49 url = result.get('primaryPaperLink', {}).get('url')
50 if not url and result.get('links'):
51 url = result.get('links')[0]
52 if not url:
53 alternatePaperLinks = result.get('alternatePaperLinks')
54 if alternatePaperLinks:
55 url = alternatePaperLinks[0].get('url')
56 if not url:
57 url = paper_url + '/%s' % result['id']
58
59
60 if 'pubDate' in result:
61 publishedDate = datetime.strptime(result['pubDate'], "%Y-%m-%d")
62 else:
63 publishedDate = None
64
65
66 authors = [author[0]['name'] for author in result.get('authors', [])]
67
68
69 pdf_url = None
70 for doc in result.get('alternatePaperLinks', []):
71 if doc['linkType'] not in ('crawler', 'doi'):
72 pdf_url = doc['url']
73 break
74
75
76 comments = None
77 if 'citationStats' in result:
78 comments = gettext(
79 '{numCitations} citations from the year {firstCitationVelocityYear} to {lastCitationVelocityYear}'
80 ).format(
81 numCitations=result['citationStats']['numCitations'],
82 firstCitationVelocityYear=result['citationStats']['firstCitationVelocityYear'],
83 lastCitationVelocityYear=result['citationStats']['lastCitationVelocityYear'],
84 )
85
86 results.append(
87 {
88 'template': 'paper.html',
89 'url': url,
90 'title': result['title']['text'],
91 'content': result['paperAbstract']['text'],
92 'journal': result.get('venue', {}).get('text') or result.get('journal', {}).get('name'),
93 'doi': result.get('doiInfo', {}).get('doi'),
94 'tags': result.get('fieldsOfStudy'),
95 'authors': authors,
96 'pdf_url': pdf_url,
97 'publishedDate': publishedDate,
98 'comments': comments,
99 }
100 )
101
102 return results