shaocongma commited on
Commit
08c0f29
1 Parent(s): c731c18

add error catch for openai api.

Browse files
assets/idealab.png ADDED
chat.py → idealab.py RENAMED
@@ -8,7 +8,8 @@ from utils.prompts import SYSTEM
8
  openai_key = os.getenv("OPENAI_API_KEY")
9
  default_model = os.getenv("DEFAULT_MODEL")
10
  if default_model is None:
11
- default_model = "gpt-3.5-turbo-16k"
 
12
 
13
  openai.api_key = openai_key
14
 
@@ -67,11 +68,14 @@ Instruction:
67
 
68
 
69
  ANNOUNCEMENT = """
70
- # Paper Bot
71
 
72
- Criticize your paper's contribution by searching related references online! This nice bot will also give you some suggestions.
 
 
73
  """
74
 
 
75
  def criticize_my_idea(title, contributions, max_tokens=4096):
76
  ref = References(title=title, description=f"{contributions}")
77
  keywords, _ = llm(systems=SYSTEM["keywords"], prompts=title, return_json=True)
@@ -97,13 +101,18 @@ def generate_choices(thoughts):
97
  return output
98
 
99
 
 
 
 
 
 
100
 
101
  with gr.Blocks() as demo:
102
  llm = GPTModel(model=default_model)
103
- gr.Markdown(ANNOUNCEMENT)
104
 
 
105
  with gr.Row():
106
- with gr.Tab("Make it an idea!"):
107
  thoughts_input = gr.Textbox(label="Thoughts")
108
  with gr.Accordion("Show prompts", open=False):
109
  prompts_1 = gr.Textbox(label="Prompts", interactive=False, value=paper_system_prompt)
@@ -111,7 +120,7 @@ with gr.Blocks() as demo:
111
  with gr.Row():
112
  button_generate_idea = gr.Button("Make it an idea!", variant="primary")
113
 
114
- with gr.Tab("Criticize my idea!"):
115
  title_input = gr.Textbox(label="Title")
116
  contribution_input = gr.Textbox(label="Contributions", lines=5)
117
  with gr.Accordion("Show prompts", open=False):
@@ -120,15 +129,16 @@ with gr.Blocks() as demo:
120
  with gr.Row():
121
  button_submit = gr.Button("Criticize my idea!", variant="primary")
122
 
123
- with gr.Tab("Make it a paper!"):
124
- gr.Markdown("## Coming Soon!")
125
 
126
  with gr.Column(scale=1):
127
  contribution_output = gr.JSON(label="Contributions")
 
128
  with gr.Accordion("References", open=False):
129
  references_output = gr.JSON(label="References")
130
 
131
  button_submit.click(fn=criticize_my_idea, inputs=[title_input, contribution_input], outputs=[contribution_output, references_output])
132
- button_generate_idea.click(fn=generate_choices, inputs=thoughts_input, outputs=contribution_output)
133
  demo.queue(concurrency_count=1, max_size=5, api_open=False)
134
  demo.launch(show_error=True)
 
8
  openai_key = os.getenv("OPENAI_API_KEY")
9
  default_model = os.getenv("DEFAULT_MODEL")
10
  if default_model is None:
11
+ # default_model = "gpt-3.5-turbo-16k"
12
+ default_model = "gpt-4"
13
 
14
  openai.api_key = openai_key
15
 
 
68
 
69
 
70
  ANNOUNCEMENT = """
71
+ <h1 style="text-align: center"><img src='/file=assets/idealab.png' width=36px style="display: inline"/>灵感实验室IdeaLab</h1>
72
 
73
+ <p>灵感实验室IdeaLab可以为你选择你下一篇论文的研究方向! 输入你的研究领域或者任何想法, 灵感实验室会自动生成若干个论文标题+论文的主要贡献供你选择. </p>
74
+
75
+ <p>除此之外, 输入你的论文标题+主要贡献, 它会自动搜索相关文献, 来验证这个想法是不是有人做过了.</p>
76
  """
77
 
78
+
79
  def criticize_my_idea(title, contributions, max_tokens=4096):
80
  ref = References(title=title, description=f"{contributions}")
81
  keywords, _ = llm(systems=SYSTEM["keywords"], prompts=title, return_json=True)
 
101
  return output
102
 
103
 
104
+ # def translate_json(json_input):
105
+ # system_prompt = "You are a translation bot. The user will input a JSON format string. You need to translate it into Chinese and return in the same formmat."
106
+ # output, _ = llm(systems=system_prompt, prompts=str(json_input), return_json=True)
107
+ # return output
108
+
109
 
110
  with gr.Blocks() as demo:
111
  llm = GPTModel(model=default_model)
 
112
 
113
+ gr.HTML(ANNOUNCEMENT)
114
  with gr.Row():
115
+ with gr.Tab("生成论文想法 (Generate Paper Ideas)"):
116
  thoughts_input = gr.Textbox(label="Thoughts")
117
  with gr.Accordion("Show prompts", open=False):
118
  prompts_1 = gr.Textbox(label="Prompts", interactive=False, value=paper_system_prompt)
 
120
  with gr.Row():
121
  button_generate_idea = gr.Button("Make it an idea!", variant="primary")
122
 
123
+ with gr.Tab("验证想法可行性 (Validate Feasibility)"):
124
  title_input = gr.Textbox(label="Title")
125
  contribution_input = gr.Textbox(label="Contributions", lines=5)
126
  with gr.Accordion("Show prompts", open=False):
 
129
  with gr.Row():
130
  button_submit = gr.Button("Criticize my idea!", variant="primary")
131
 
132
+ with gr.Tab("生成论文 (Generate Paper)"):
133
+ gr.Markdown("...")
134
 
135
  with gr.Column(scale=1):
136
  contribution_output = gr.JSON(label="Contributions")
137
+ # cn_output = gr.JSON(label="主要贡献")
138
  with gr.Accordion("References", open=False):
139
  references_output = gr.JSON(label="References")
140
 
141
  button_submit.click(fn=criticize_my_idea, inputs=[title_input, contribution_input], outputs=[contribution_output, references_output])
142
+ button_generate_idea.click(fn=generate_choices, inputs=thoughts_input, outputs=contribution_output)#.success(translate_json, contribution_output, cn_output)
143
  demo.queue(concurrency_count=1, max_size=5, api_open=False)
144
  demo.launch(show_error=True)
utils/gpt_interaction.py CHANGED
@@ -94,41 +94,25 @@ class GPTModel:
94
  {"role": "user", "content": prompts}
95
  ]
96
  for _ in range(self.max_attempts):
97
- response = openai.ChatCompletion.create(
98
- model=self.model,
99
- messages=conversation_history,
100
- n=1,
101
- temperature=self.temperature,
102
- presence_penalty=self.presence_penalty,
103
- frequency_penalty=self.frequency_penalty,
104
- stream=False
105
- )
106
- assistant_message = response['choices'][0]["message"]["content"]
107
- usage = response['usage']
108
- log.info(assistant_message)
109
- time.sleep(15)
110
- if return_json:
111
- assistant_message = json.loads(assistant_message)
112
- return assistant_message, usage
113
- # try:
114
- # response = openai.ChatCompletion.create(
115
- # model=self.model,
116
- # messages=conversation_history,
117
- # n=1,
118
- # temperature=self.temperature,
119
- # presence_penalty=self.presence_penalty,
120
- # frequency_penalty=self.frequency_penalty,
121
- # stream=False
122
- # )
123
- # assistant_message = response['choices'][0]["message"]["content"]
124
- # usage = response['usage']
125
- # log.info(assistant_message)
126
- # if return_json:
127
- # assistant_message = json.loads(assistant_message)
128
- # return assistant_message, usage
129
- # except Exception as e:
130
- # print(f"Failed to get response. Error: {e}")
131
- # time.sleep(self.delay)
132
  raise RuntimeError("Failed to get response from OpenAI.")
133
 
134
 
 
94
  {"role": "user", "content": prompts}
95
  ]
96
  for _ in range(self.max_attempts):
97
+ try:
98
+ response = openai.ChatCompletion.create(
99
+ model=self.model,
100
+ messages=conversation_history,
101
+ n=1,
102
+ temperature=self.temperature,
103
+ presence_penalty=self.presence_penalty,
104
+ frequency_penalty=self.frequency_penalty,
105
+ stream=False
106
+ )
107
+ assistant_message = response['choices'][0]["message"]["content"]
108
+ usage = response['usage']
109
+ log.info(assistant_message)
110
+ if return_json:
111
+ assistant_message = json.loads(assistant_message)
112
+ return assistant_message, usage
113
+ except openai.error.APIConnectionError as e:
114
+ print(f"Failed to get response. Error: {e}")
115
+ time.sleep(self.delay)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
  raise RuntimeError("Failed to get response from OpenAI.")
117
 
118