67def response(resp):
68 res = resp.json()
69
70 results = []
71 for result in res['results']:
72 url = result.get('primaryPaperLink', {}).get('url')
73 if not url and result.get('links'):
74 url = result.get('links')[0]
75 if not url:
76 alternatePaperLinks = result.get('alternatePaperLinks')
77 if alternatePaperLinks:
78 url = alternatePaperLinks[0].get('url')
79 if not url:
80 url = base_url + '/paper/%s' % result['id']
81
82
83 if 'pubDate' in result:
84 publishedDate = datetime.strptime(result['pubDate'], "%Y-%m-%d")
85 else:
86 publishedDate = None
87
88
89 authors = [author[0]['name'] for author in result.get('authors', [])]
90
91
92 pdf_url = None
93 for doc in result.get('alternatePaperLinks', []):
94 if doc['linkType'] not in ('crawler', 'doi'):
95 pdf_url = doc['url']
96 break
97
98
99 comments = None
100 if 'citationStats' in result:
101 comments = gettext(
102 '{numCitations} citations from the year {firstCitationVelocityYear} to {lastCitationVelocityYear}'
103 ).format(
104 numCitations=result['citationStats']['numCitations'],
105 firstCitationVelocityYear=result['citationStats']['firstCitationVelocityYear'],
106 lastCitationVelocityYear=result['citationStats']['lastCitationVelocityYear'],
107 )
108
109 results.append(
110 {
111 'template': 'paper.html',
112 'url': url,
113 'title': result['title']['text'],
114 'content': html_to_text(result['paperAbstract']['text']),
115 'journal': result.get('venue', {}).get('text') or result.get('journal', {}).get('name'),
116 'doi': result.get('doiInfo', {}).get('doi'),
117 'tags': result.get('fieldsOfStudy'),
118 'authors': authors,
119 'pdf_url': pdf_url,
120 'publishedDate': publishedDate,
121 'comments': comments,
122 }
123 )
124
125 return results