zfb commited on
Commit
85f3d92
1 Parent(s): 5335124

fix bug: deprecation warning and infinite loop

Browse files
Files changed (4) hide show
  1. .gitignore +6 -0
  2. app.py +13 -13
  3. optimizeOpenAI.py +4 -1
  4. requirements.txt +0 -1
.gitignore ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Ignore all __pycache__ directories
2
+ __pycache__/
3
+ /__pycache__
4
+ # Ignore all .pyc files
5
+ *.pyc
6
+ flagged/
app.py CHANGED
@@ -808,8 +808,8 @@ Use ChatGPT to summary the papers.Star our Github [🌟ChatPaper](https://github
808
  '''
809
 
810
  api_input = [
811
- gradio.inputs.Textbox(label="请输入你的API-key(必填, 多个API-key请用英文逗号隔开)",
812
- default="",
813
  type='password')
814
  ]
815
  api_gui = gradio.Interface(fn=valid_apikey,
@@ -841,28 +841,28 @@ Use ChatGPT to summary the papers.Star our Github [🌟ChatPaper](https://github
841
  '''
842
  # 创建Gradio界面
843
  ip = [
844
- gradio.inputs.Textbox(label="请输入你的API-key(必填, 多个API-key请用英文逗号隔开),不需要空格",
845
- default="",
846
  type='password'),
847
- gradio.inputs.Textbox(
848
  label="请输入论文大标题索引(用英文逗号隔开,必填)",
849
- default=
850
  "'Abstract,Introduction,Related Work,Background,Preliminary,Problem Formulation,Methods,Methodology,Method,Approach,Approaches,Materials and Methods,Experiment Settings,Experiment,Experimental Results,Evaluation,Experiments,Results,Findings,Data Analysis,Discussion,Results and Discussion,Conclusion,References'"
851
  ),
852
- gradio.inputs.Radio(choices=["gpt-3.5-turbo", "gpt-3.5-turbo-0301"],
853
- default="gpt-3.5-turbo",
854
  label="Select model"),
855
- gradio.inputs.Slider(minimum=-0,
856
  maximum=1.0,
857
- default=1.0,
858
  step=0.05,
859
  label="Top-p (nucleus sampling)"),
860
- gradio.inputs.Slider(minimum=-0,
861
  maximum=5.0,
862
- default=0.5,
863
  step=0.5,
864
  label="Temperature"),
865
- gradio.inputs.File(label="请上传论文PDF(必填)")
866
  ]
867
 
868
  chatpaper_gui = gradio.Interface(fn=upload_pdf,
 
808
  '''
809
 
810
  api_input = [
811
+ gradio.Textbox(label="请输入你的API-key(必填, 多个API-key请用英文逗号隔开)",
812
+ value="",
813
  type='password')
814
  ]
815
  api_gui = gradio.Interface(fn=valid_apikey,
 
841
  '''
842
  # 创建Gradio界面
843
  ip = [
844
+ gradio.Textbox(label="请输入你的API-key(必填, 多个API-key请用英文逗号隔开),不需要空格",
845
+ value="",
846
  type='password'),
847
+ gradio.Textbox(
848
  label="请输入论文大标题索引(用英文逗号隔开,必填)",
849
+ value=
850
  "'Abstract,Introduction,Related Work,Background,Preliminary,Problem Formulation,Methods,Methodology,Method,Approach,Approaches,Materials and Methods,Experiment Settings,Experiment,Experimental Results,Evaluation,Experiments,Results,Findings,Data Analysis,Discussion,Results and Discussion,Conclusion,References'"
851
  ),
852
+ gradio.Radio(choices=["gpt-3.5-turbo", "gpt-3.5-turbo-0301"],
853
+ value="gpt-3.5-turbo",
854
  label="Select model"),
855
+ gradio.Slider(minimum=-0,
856
  maximum=1.0,
857
+ value=1.0,
858
  step=0.05,
859
  label="Top-p (nucleus sampling)"),
860
+ gradio.Slider(minimum=-0,
861
  maximum=5.0,
862
+ value=0.5,
863
  step=0.5,
864
  label="Temperature"),
865
+ gradio.File(label="请上传论文PDF(必填)")
866
  ]
867
 
868
  chatpaper_gui = gradio.Interface(fn=upload_pdf,
optimizeOpenAI.py CHANGED
@@ -92,6 +92,9 @@ class chatPaper:
92
  full_conversation = ""
93
  for x in self.conversation[convo_id]:
94
  full_conversation = str(x["content"]) + "\n" + full_conversation
 
 
 
95
  while True:
96
  if (len(ENCODER.encode(full_conversation+query)) > self.max_tokens):
97
  query = query[:self.decrease_step]
@@ -208,7 +211,7 @@ class chatPaper:
208
  input = input[self.decrease_step:]
209
  prompt = prompt.replace("{conversation}", input)
210
  self.reset(convo_id='conversationSummary')
211
- response = self.ask(prompt,convo_id='conversationSummary')
212
  while self.token_str(str(response))>self.max_tokens:
213
  response = response[:-self.decrease_step]
214
  self.reset(convo_id='conversationSummary',system_prompt='Summariaze our diaglog')
 
92
  full_conversation = ""
93
  for x in self.conversation[convo_id]:
94
  full_conversation = str(x["content"]) + "\n" + full_conversation
95
+ max_len_full_conversation = self.max_tokens - self.decrease_step
96
+ if len(ENCODER.encode(full_conversation)) > max_len_full_conversation:
97
+ full_conversation = full_conversation[:-max_len_full_conversation]
98
  while True:
99
  if (len(ENCODER.encode(full_conversation+query)) > self.max_tokens):
100
  query = query[:self.decrease_step]
 
211
  input = input[self.decrease_step:]
212
  prompt = prompt.replace("{conversation}", input)
213
  self.reset(convo_id='conversationSummary')
214
+ response = self.ask(prompt,convo_id='conversationSummary')[0]
215
  while self.token_str(str(response))>self.max_tokens:
216
  response = response[:-self.decrease_step]
217
  self.reset(convo_id='conversationSummary',system_prompt='Summariaze our diaglog')
requirements.txt CHANGED
@@ -1,7 +1,6 @@
1
  arxiv==1.4.3
2
  PyMuPDF==1.21.1
3
  requests==2.26.0
4
- tiktoken==0.2.0
5
  tenacity==8.2.2
6
  pybase64==1.2.3
7
  Pillow==9.4.0
 
1
  arxiv==1.4.3
2
  PyMuPDF==1.21.1
3
  requests==2.26.0
 
4
  tenacity==8.2.2
5
  pybase64==1.2.3
6
  Pillow==9.4.0