66def response(resp):
67 res = resp.json()
68
69 results = []
70 for result in res['results']:
71 url = result.get('primaryPaperLink', {}).get('url')
72 if not url and result.get('links'):
73 url = result.get('links')[0]
74 if not url:
75 alternatePaperLinks = result.get('alternatePaperLinks')
76 if alternatePaperLinks:
77 url = alternatePaperLinks[0].get('url')
78 if not url:
79 url = base_url + '/paper/%s' % result['id']
80
81
82 if 'pubDate' in result:
83 publishedDate = datetime.strptime(result['pubDate'], "%Y-%m-%d")
84 else:
85 publishedDate = None
86
87
88 authors = [author[0]['name'] for author in result.get('authors', [])]
89
90
91 pdf_url = None
92 for doc in result.get('alternatePaperLinks', []):
93 if doc['linkType'] not in ('crawler', 'doi'):
94 pdf_url = doc['url']
95 break
96
97
98 comments = None
99 if 'citationStats' in result:
100 comments = gettext(
101 '{numCitations} citations from the year {firstCitationVelocityYear} to {lastCitationVelocityYear}'
102 ).format(
103 numCitations=result['citationStats']['numCitations'],
104 firstCitationVelocityYear=result['citationStats']['firstCitationVelocityYear'],
105 lastCitationVelocityYear=result['citationStats']['lastCitationVelocityYear'],
106 )
107
108 results.append(
109 {
110 'template': 'paper.html',
111 'url': url,
112 'title': result['title']['text'],
113 'content': html_to_text(result['paperAbstract']['text']),
114 'journal': result.get('venue', {}).get('text') or result.get('journal', {}).get('name'),
115 'doi': result.get('doiInfo', {}).get('doi'),
116 'tags': result.get('fieldsOfStudy'),
117 'authors': authors,
118 'pdf_url': pdf_url,
119 'publishedDate': publishedDate,
120 'comments': comments,
121 }
122 )
123
124 return results