Kexin2000 commited on
Commit
f93a84b
1 Parent(s): 81728a3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +345 -62
app.py CHANGED
@@ -10,12 +10,235 @@ from sklearn.neighbors import NearestNeighbors
10
  import requests
11
  from cachetools import cached, TTLCache
12
 
13
- CACHE_TIME = 60 * 60 * 6 # 6小时
14
 
15
- # 全局的推荐器对象
16
- recommender = None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
- # 第二个功能的全局变量
19
  @cached(cache=TTLCache(maxsize=500, ttl=CACHE_TIME))
20
  def get_recommendations_from_semantic_scholar(semantic_scholar_id: str):
21
  try:
@@ -29,11 +252,13 @@ def get_recommendations_from_semantic_scholar(semantic_scholar_id: str):
29
  return r.json()["recommendedPapers"]
30
  except KeyError as e:
31
  raise gr.Error(
32
- "获取推荐时出错,如果这是一篇新论文或尚未被Semantic Scholar索引,则可能尚未有推荐。"
 
33
  ) from e
34
 
35
 
36
  def filter_recommendations(recommendations, max_paper_count=5):
 
37
  arxiv_paper = [
38
  r for r in recommendations if r["externalIds"].get("ArXiv", None) is not None
39
  ]
@@ -49,12 +274,15 @@ def get_paper_title_from_arxiv_id(arxiv_id):
49
  "title"
50
  ]
51
  except Exception as e:
52
- print(f"获取论文标题时出错 {arxiv_id}: {e}")
53
- raise gr.Error(f"获取论文标题时出错 {arxiv_id}: {e}") from e
54
 
55
 
56
  def format_recommendation_into_markdown(arxiv_id, recommendations):
57
- comment = "以下论文由Semantic Scholar API推荐\n\n"
 
 
 
58
  for r in recommendations:
59
  hub_paper_url = f"https://huggingface.co/papers/{r['externalIds']['ArXiv']}"
60
  comment += f"* [{r['title']}]({hub_paper_url}) ({r['year']})\n"
@@ -67,63 +295,118 @@ def return_recommendations(url):
67
  filtered_recommendations = filter_recommendations(recommendations)
68
  return format_recommendation_into_markdown(arxiv_id, filtered_recommendations)
69
 
 
70
 
71
- # Gradio界面
72
- title = 'PDF GPT Turbo'
73
- description = """ PDF GPT Turbo允许您与PDF文件交流。它使用Google的Universal Sentence Encoder与Deep averaging network(DAN)来提供无幻觉的响应,通过提高OpenAI的嵌入质量。它在方括号([Page No.])中引用页码,显示信息的位置,增强了响应的可信度。"""
74
 
75
- # 预定义的问题
76
- questions = [
77
- "研究调查了什么?",
78
- "能否提供本文的摘要?",
79
- "这项研究使用了什么方法?",
80
- # 需要时添加更多的问题
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
  ]
82
 
83
- with gr.Blocks(css="""#chatbot { font-size: 14px; min-height: 1200; }""") as demo:
84
- gr.Markdown(f'<center><h3>{title}</h3></center>')
85
- gr.Markdown(description)
86
-
87
- with gr.Row():
88
- with gr.Group():
89
- gr.Markdown(f'<p style="text-align:center">在这里获取您的Open AI API密钥 <a href="https://platform.openai.com/account/api-keys">here</a></p>')
90
- with gr.Accordion("API Key"):
91
- openAI_key = gr.Textbox(label='在此输入您的OpenAI API密钥')
92
- url = gr.Textbox(label='在此输入PDF的URL (示例: https://arxiv.org/pdf/1706.03762.pdf )')
93
- gr.Markdown("<center><h4>或<h4></center>")
94
- file = gr.File(label='在此上传您的PDF/研究论文/书籍', file_types=['.pdf'])
95
- question = gr.Textbox(label='在此输入您的问题')
96
- gr.Examples(
97
- [[q] for q in questions],
98
- inputs=[question],
99
- label="预定义问题:点击问题以自动填充输入框,然后按Enter键!",
100
- )
101
- model = gr.Radio([
102
- 'gpt-3.5-turbo',
103
- 'gpt-3.5-turbo-16k',
104
- 'gpt-3.5-turbo-0613',
105
- 'gpt-3.5-turbo-16k-0613',
106
- 'text-davinci-003',
107
- 'gpt-4',
108
- 'gpt-4-32k'
109
- ], label='选择模型', default='gpt-3.5-turbo')
110
- btn = gr.Button(value='提交')
111
- btn.style(full_width=True)
112
- with gr.Group():
113
- chatbot = gr.Chatbot(placeholder="聊天历史", label="聊天历史", lines=50, elem_id="chatbot")
114
-
115
- # 将按钮的点击事件绑定到question_answer函数
116
- btn.click(
117
- question_answer,
118
- inputs=[chatbot, url, file, question, openAI_key, model],
119
- outputs=[chatbot],
120
- )
121
 
122
- # 第二个标签
123
- gr.Tab("论文推荐", [
124
- gr.Textbox(label="输入Hugging Face Papers的URL", lines=1),
125
- gr.Button("获取推荐", return_recommendations),
126
- gr.Markdown(),
127
- ])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
 
129
- demo.launch()
 
 
10
  import requests
11
  from cachetools import cached, TTLCache
12
 
 
13
 
14
+ def download_pdf(url, output_path):
15
+ urllib.request.urlretrieve(url, output_path)
16
+
17
+
18
+ def preprocess(text):
19
+ text = text.replace('\n', ' ')
20
+ text = re.sub('\s+', ' ', text)
21
+ return text
22
+
23
+
24
+ def pdf_to_text(path, start_page=1, end_page=None):
25
+ doc = fitz.open(path)
26
+ total_pages = doc.page_count
27
+
28
+ if end_page is None:
29
+ end_page = total_pages
30
+
31
+ text_list = []
32
+
33
+ for i in range(start_page - 1, end_page):
34
+ text = doc.load_page(i).get_text("text")
35
+ text = preprocess(text)
36
+ text_list.append(text)
37
+
38
+ doc.close()
39
+ return text_list
40
+
41
+
42
+ def text_to_chunks(texts, word_length=150, start_page=1):
43
+ text_toks = [t.split(' ') for t in texts]
44
+ page_nums = []
45
+ chunks = []
46
+
47
+ for idx, words in enumerate(text_toks):
48
+ for i in range(0, len(words), word_length):
49
+ chunk = words[i:i + word_length]
50
+ if (i + word_length) > len(words) and (len(chunk) < word_length) and (
51
+ len(text_toks) != (idx + 1)):
52
+ text_toks[idx + 1] = chunk + text_toks[idx + 1]
53
+ continue
54
+ chunk = ' '.join(chunk).strip()
55
+ chunk = f'[Page no. {idx + start_page}]' + ' ' + '"' + chunk + '"'
56
+ chunks.append(chunk)
57
+ return chunks
58
+
59
+
60
+ class SemanticSearch:
61
+
62
+ def __init__(self):
63
+ self.use = hub.load('https://tfhub.dev/google/universal-sentence-encoder/4')
64
+ self.fitted = False
65
+
66
+ def fit(self, data, batch=1000, n_neighbors=5):
67
+ self.data = data
68
+ self.embeddings = self.get_text_embedding(data, batch=batch)
69
+ n_neighbors = min(n_neighbors, len(self.embeddings))
70
+ self.nn = NearestNeighbors(n_neighbors=n_neighbors)
71
+ self.nn.fit(self.embeddings)
72
+ self.fitted = True
73
+
74
+ def __call__(self, text, return_data=True):
75
+ inp_emb = self.use([text])
76
+ neighbors = self.nn.kneighbors(inp_emb, return_distance=False)[0]
77
+
78
+ if return_data:
79
+ return [self.data[i] for i in neighbors]
80
+ else:
81
+ return neighbors
82
+
83
+ def get_text_embedding(self, texts, batch=1000):
84
+ embeddings = []
85
+ for i in range(0, len(texts), batch):
86
+ text_batch = texts[i:(i + batch)]
87
+ emb_batch = self.use(text_batch)
88
+ embeddings.append(emb_batch)
89
+ embeddings = np.vstack(embeddings)
90
+ return embeddings
91
+
92
+
93
+ def load_recommender(path, start_page=1):
94
+ global recommender
95
+ texts = pdf_to_text(path, start_page=start_page)
96
+ chunks = text_to_chunks(texts, start_page=start_page)
97
+ recommender.fit(chunks)
98
+ return 'Corpus Loaded.'
99
+
100
+
101
+ def generate_text(openAI_key, prompt, model="gpt-3.5-turbo"):
102
+ openai.api_key = openAI_key
103
+ temperature = 0.7
104
+ max_tokens = 256
105
+ top_p = 1
106
+ frequency_penalty = 0
107
+ presence_penalty = 0
108
+
109
+ if model == "text-davinci-003":
110
+ completions = openai.Completion.create(
111
+ engine=model,
112
+ prompt=prompt,
113
+ max_tokens=max_tokens,
114
+ n=1,
115
+ stop=None,
116
+ temperature=temperature,
117
+ )
118
+ message = completions.choices[0].text
119
+ else:
120
+ message = openai.ChatCompletion.create(
121
+ model=model,
122
+ messages=[
123
+ {"role": "system", "content": "You are a helpful assistant."},
124
+ {"role": "assistant", "content": "Here is some initial assistant message."},
125
+ {"role": "user", "content": prompt}
126
+ ],
127
+ temperature=.3,
128
+ max_tokens=max_tokens,
129
+ top_p=top_p,
130
+ frequency_penalty=frequency_penalty,
131
+ presence_penalty=presence_penalty,
132
+ ).choices[0].message['content']
133
+ return message
134
+
135
+
136
+ def generate_answer(question, openAI_key, model):
137
+ topn_chunks = recommender(question)
138
+ prompt = 'search results:\n\n'
139
+ for c in topn_chunks:
140
+ prompt += c + '\n\n'
141
+
142
+ prompt += "Instructions: Compose a comprehensive reply to the query using the search results given. " \
143
+ "Cite each reference using [ Page Number] notation. " \
144
+ "Only answer what is asked. The answer should be short and concise. \n\nQuery: "
145
+
146
+ prompt += f"{question}\nAnswer:"
147
+ answer = generate_text(openAI_key, prompt, model)
148
+ return answer
149
+
150
+
151
+ def question_answer(chat_history, url, file, question, openAI_key, model):
152
+ try:
153
+ if openAI_key.strip() == '':
154
+ return '[ERROR]: Please enter your Open AI Key. Get your key here : https://platform.openai.com/account/api-keys'
155
+ if url.strip() == '' and file is None:
156
+ return '[ERROR]: Both URL and PDF is empty. Provide at least one.'
157
+ if url.strip() != '' and file is not None:
158
+ return '[ERROR]: Both URL and PDF is provided. Please provide only one (either URL or PDF).'
159
+ if model is None or model == '':
160
+ return '[ERROR]: You have not selected any model. Please choose an LLM model.'
161
+ if url.strip() != '':
162
+ glob_url = url
163
+ download_pdf(glob_url, 'corpus.pdf')
164
+ load_recommender('corpus.pdf')
165
+ else:
166
+ old_file_name = file.name
167
+ file_name = file.name
168
+ file_name = file_name[:-12] + file_name[-4:]
169
+ os.rename(old_file_name, file_name)
170
+ load_recommender(file_name)
171
+ if question.strip() == '':
172
+ return '[ERROR]: Question field is empty'
173
+ if model == "text-davinci-003" or model == "gpt-4" or model == "gpt-4-32k":
174
+ answer = generate_answer_text_davinci_003(question, openAI_key)
175
+ else:
176
+ answer = generate_answer(question, openAI_key, model)
177
+ chat_history.append([question, answer])
178
+ return chat_history
179
+ except openai.error.InvalidRequestError as e:
180
+ return f'[ERROR]: Either you do not have access to GPT4 or you have exhausted your quota!'
181
+
182
+
183
+ def generate_text_text_davinci_003(openAI_key, prompt, engine="text-davinci-003"):
184
+ openai.api_key = openAI_key
185
+ completions = openai.Completion.create(
186
+ engine=engine,
187
+ prompt=prompt,
188
+ max_tokens=512,
189
+ n=1,
190
+ stop=None,
191
+ temperature=0.7,
192
+ )
193
+ message = completions.choices[0].text
194
+ return message
195
+
196
+
197
+ def generate_answer_text_davinci_003(question, openAI_key):
198
+ topn_chunks = recommender(question)
199
+ prompt = ""
200
+ prompt += 'search results:\n\n'
201
+ for c in topn_chunks:
202
+ prompt += c + '\n\n'
203
+
204
+ prompt += "Instructions: Compose a comprehensive reply to the query using the search results given. " \
205
+ "Cite each reference using [ Page Number] notation (every result has this number at the beginning). " \
206
+ "Citation should be done at the end of each sentence. If the search results mention multiple subjects " \
207
+ "with the same name, create separate answers for each. Only include information found in the results and " \
208
+ "don't add any additional information. Make sure the answer is correct and don't output false content. " \
209
+ "If the text does not relate to the query, simply state 'Found Nothing'. Ignore outlier " \
210
+ "search results which has nothing to do with the question. Only answer what is asked. The " \
211
+ "answer should be short and concise. \n\nQuery: {question}\nAnswer: "
212
+
213
+ prompt += f"Query: {question}\nAnswer:"
214
+ answer = generate_text_text_davinci_003(openAI_key, prompt, "text-davinci-003")
215
+ return answer
216
+
217
+
218
+ # pre-defined questions
219
+ questions = [
220
+ "What did the study investigate?",
221
+ "Can you provide a summary of this paper?",
222
+ "what are the methodologies used in this study?",
223
+ "what are the data intervals used in this study? Give me the start dates and end dates?",
224
+ "what are the main limitations of this study?",
225
+ "what are the main shortcomings of this study?",
226
+ "what are the main findings of the study?",
227
+ "what are the main results of the study?",
228
+ "what are the main contributions of this study?",
229
+ "what is the conclusion of this paper?",
230
+ "what are the input features used in this study?",
231
+ "what is the dependent variable in this study?",
232
+ ]
233
+
234
+ # =============================================================================
235
+ CACHE_TIME = 60 * 60 * 6 # 6 hours
236
+
237
+
238
+ def parse_arxiv_id_from_paper_url(url):
239
+ return url.split("/")[-1]
240
+
241
 
 
242
  @cached(cache=TTLCache(maxsize=500, ttl=CACHE_TIME))
243
  def get_recommendations_from_semantic_scholar(semantic_scholar_id: str):
244
  try:
 
252
  return r.json()["recommendedPapers"]
253
  except KeyError as e:
254
  raise gr.Error(
255
+ "Error getting recommendations, if this is a new paper it may not yet have"
256
+ " been indexed by Semantic Scholar."
257
  ) from e
258
 
259
 
260
  def filter_recommendations(recommendations, max_paper_count=5):
261
+ # include only arxiv papers
262
  arxiv_paper = [
263
  r for r in recommendations if r["externalIds"].get("ArXiv", None) is not None
264
  ]
 
274
  "title"
275
  ]
276
  except Exception as e:
277
+ print(f"Error getting paper title for {arxiv_id}: {e}")
278
+ raise gr.Error("Error getting paper title for {arxiv_id}: {e}") from e
279
 
280
 
281
  def format_recommendation_into_markdown(arxiv_id, recommendations):
282
+ # title = get_paper_title_from_arxiv_id(arxiv_id)
283
+ # url = f"https://huggingface.co/papers/{arxiv_id}"
284
+ # comment = f"Recommended papers for [{title}]({url})\n\n"
285
+ comment = "The following papers were recommended by the Semantic Scholar API \n\n"
286
  for r in recommendations:
287
  hub_paper_url = f"https://huggingface.co/papers/{r['externalIds']['ArXiv']}"
288
  comment += f"* [{r['title']}]({hub_paper_url}) ({r['year']})\n"
 
295
  filtered_recommendations = filter_recommendations(recommendations)
296
  return format_recommendation_into_markdown(arxiv_id, filtered_recommendations)
297
 
298
+ # ==============================================================================================
299
 
 
 
 
300
 
301
+ recommender = SemanticSearch()
302
+
303
+ # title = 'PDF GPT Turbo'
304
+ # description = """ PDF GPT Turbo allows you to chat with your PDF files. It uses Google's Universal Sentence Encoder with Deep averaging network (DAN) to give hallucination free response by improving the embedding quality of OpenAI. It cites the page number in square brackets([Page No.]) and shows where the information is located, adding credibility to the responses."""
305
+ #
306
+ # with gr.Blocks(css="""#chatbot { font-size: 14px; min-height: 1200; }""") as demo:
307
+ # gr.Markdown(f'<center><h3>{title}</h3></center>')
308
+ # gr.Markdown(description)
309
+ #
310
+ # with gr.Row():
311
+ # with gr.Group():
312
+ # gr.Markdown(
313
+ # f'<p style="text-align:center">Get your Open AI API key <a href="https://platform.openai.com/account/api-keys">here</a></p>')
314
+ # with gr.Accordion("API Key"):
315
+ # openAI_key = gr.Textbox(label='Enter your OpenAI API key here', password=True)
316
+ # url = gr.Textbox(label='Enter PDF URL here (Example: https://arxiv.org/pdf/1706.03762.pdf )')
317
+ # gr.Markdown("<center><h4>OR<h4></center>")
318
+ # file = gr.File(label='Upload your PDF/ Research Paper / Book here', file_types=['.pdf'])
319
+ # question = gr.Textbox(label='Enter your question here')
320
+ # gr.Examples(
321
+ # [[q] for q in questions],
322
+ # inputs=[question],
323
+ # label="PRE-DEFINED QUESTIONS: Click on a question to auto-fill the input box, then press Enter!",
324
+ # )
325
+ # model = gr.Radio([
326
+ # 'gpt-3.5-turbo',
327
+ # 'gpt-3.5-turbo-16k',
328
+ # 'gpt-3.5-turbo-0613',
329
+ # 'gpt-3.5-turbo-16k-0613',
330
+ # 'text-davinci-003',
331
+ # 'gpt-4',
332
+ # 'gpt-4-32k'
333
+ # ], label='Select Model', default='gpt-3.5-turbo')
334
+ # btn = gr.Button(value='Submit')
335
+ #
336
+ # btn.style(full_width=True)
337
+ #
338
+ # with gr.Group():
339
+ # chatbot = gr.Chatbot(placeholder="Chat History", label="Chat History", lines=50, elem_id="chatbot")
340
+ #
341
+ # #
342
+ # # Bind the click event of the button to the question_answer function
343
+ # btn.click(
344
+ # question_answer,
345
+ # inputs=[chatbot, url, file, question, openAI_key, model],
346
+ # outputs=[chatbot],
347
+ # )
348
+ #
349
+ # demo.launch()
350
+
351
+ # 第一个文件的内容
352
+ title_1 = "Semantic Scholar Paper Recommender"
353
+ description_1 = (
354
+ "Paste a link to a paper on Hugging Face Papers and get recommendations for similar"
355
+ " papers from Semantic Scholar. **Note**: Some papers may not have recommendations"
356
+ " yet if they are new or have not been indexed by Semantic Scholar."
357
+ )
358
+ examples_1 = [
359
+ "https://huggingface.co/papers/2309.12307",
360
+ "https://huggingface.co/papers/2211.10086",
361
  ]
362
 
363
+ # 第二个文件的内容
364
+ title_2 = "PDF GPT Turbo"
365
+ description_2 = (
366
+ "PDF GPT Turbo allows you to chat with your PDF files. It uses Google's Universal Sentence Encoder with Deep averaging network (DAN) to give hallucination free response by improving the embedding quality of OpenAI. It cites the page number in square brackets([Page No.]) and shows where the information is located, adding credibility to the responses."
367
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
368
 
369
+ # 创建第一个界面
370
+ interface_1 = gr.Interface(
371
+ return_recommendations,
372
+ gr.Textbox(lines=1),
373
+ gr.Markdown(),
374
+ examples=examples_1,
375
+ title=title_1,
376
+ description=description_1,
377
+ )
378
+
379
+ # 创建第二个界面
380
+ interface_2 = gr.Interface(
381
+ question_answer,
382
+ [
383
+ gr.Chatbot(placeholder="Chat History", label="Chat History", lines=50, elem_id="chatbot"),
384
+ gr.Textbox(label='Enter PDF URL here (Example: https://arxiv.org/pdf/1706.03762.pdf )'),
385
+ gr.File(label='Upload your PDF/ Research Paper / Book here', file_types=['.pdf']),
386
+ gr.Textbox(label='Enter your question here'),
387
+ gr.Textbox(label='Enter your OpenAI API key here', password=True),
388
+ gr.Radio(['gpt-3.5-turbo',
389
+ 'gpt-3.5-turbo-16k',
390
+ 'gpt-3.5-turbo-0613',
391
+ 'gpt-3.5-turbo-16k-0613',
392
+ 'text-davinci-003',
393
+ 'gpt-4',
394
+ 'gpt-4-32k'
395
+ ], label='Select Model', default='gpt-3.5-turbo'),
396
+ ],
397
+ [gr.Chatbot(placeholder="Chat History", label="Chat History", lines=50, elem_id="chatbot")],
398
+ examples=[],
399
+ title=title_2,
400
+ description=description_2,
401
+ )
402
+
403
+ # 创建带有两个选项卡的界面
404
+ tabbed_interface = gr.TabGroup(
405
+ [
406
+ {"Semantic Scholar Recommender": interface_1},
407
+ {"OpenAI Chatbot": interface_2},
408
+ ]
409
+ )
410
 
411
+ # 启动界面
412
+ tabbed_interface.launch()