Spaces:
Running
Running
shaocongma
commited on
Commit
•
1de6eae
1
Parent(s):
668208d
update models.
Browse files- app.py +9 -4
- section_generator.py +1 -1
- utils/gpt_interaction.py +36 -20
app.py
CHANGED
@@ -40,12 +40,14 @@ else:
|
|
40 |
except openai.error.AuthenticationError:
|
41 |
IS_OPENAI_API_KEY_AVAILABLE = False
|
42 |
|
43 |
-
DEFAULT_MODEL = "gpt-4" if GPT4_ENABLE else
|
44 |
GPT4_INTERACTIVE = True if GPT4_ENABLE else False
|
45 |
DEFAULT_SECTIONS = ["introduction", "related works", "backgrounds", "methodology", "experiments",
|
46 |
"conclusion", "abstract"] if GPT4_ENABLE \
|
47 |
else ["introduction", "related works"]
|
48 |
|
|
|
|
|
49 |
#######################################################################################################################
|
50 |
# Load the list of templates & knowledge databases
|
51 |
#######################################################################################################################
|
@@ -69,6 +71,9 @@ ANNOUNCEMENT = """
|
|
69 |
## 主要功能
|
70 |
通过输入想要生成的论文名称(比如Playing atari with deep reinforcement learning),即可由AI辅助生成论文模板.
|
71 |
|
|
|
|
|
|
|
72 |
***2023-06-13 Update***:
|
73 |
1. 新增‘高级选项-Prompts模式’. 这个模式仅会输出用于生成论文的Prompts而不会生成论文本身. 可以根据自己的需求修改Prompts, 也可以把Prompts复制给其他语言模型.
|
74 |
2. 把默认的ICLR 2022模板改成了Default模板. 不再显示ICLR的页眉页尾.
|
@@ -219,7 +224,7 @@ with gr.Blocks(theme=theme) as demo:
|
|
219 |
template = gr.Dropdown(label="Template", choices=ALL_TEMPLATES, value="Default",
|
220 |
interactive=True,
|
221 |
info="生成论文的模板.")
|
222 |
-
model_selection = gr.Dropdown(label="Model", choices=
|
223 |
value=DEFAULT_MODEL,
|
224 |
interactive=GPT4_INTERACTIVE,
|
225 |
info="生成论文用到的语言模型.")
|
@@ -242,7 +247,7 @@ with gr.Blocks(theme=theme) as demo:
|
|
242 |
interactive=True, label="MAX_KW_REFS",
|
243 |
info="每个Keyword搜索几篇参考文献", visible=False)
|
244 |
|
245 |
-
max_tokens_ref_slider = gr.Slider(minimum=256, maximum=
|
246 |
interactive=True, label="MAX_TOKENS",
|
247 |
info="参考文献内容占用Prompts中的Token数")
|
248 |
|
@@ -263,7 +268,7 @@ with gr.Blocks(theme=theme) as demo:
|
|
263 |
query_counts_slider = gr.Slider(minimum=1, maximum=20, value=10, step=1,
|
264 |
interactive=True, label="QUERY_COUNTS",
|
265 |
info="从知识库内检索多少条内容", visible=False)
|
266 |
-
max_tokens_kd_slider = gr.Slider(minimum=256, maximum=
|
267 |
interactive=True, label="MAX_TOKENS",
|
268 |
info="知识库内容占用Prompts中的Token数")
|
269 |
# template = gr.Dropdown(label="Template", choices=ALL_TEMPLATES, value="Default",
|
|
|
40 |
except openai.error.AuthenticationError:
|
41 |
IS_OPENAI_API_KEY_AVAILABLE = False
|
42 |
|
43 |
+
DEFAULT_MODEL = "gpt-4" if GPT4_ENABLE else 'gpt-3.5-turbo-16k'
|
44 |
GPT4_INTERACTIVE = True if GPT4_ENABLE else False
|
45 |
DEFAULT_SECTIONS = ["introduction", "related works", "backgrounds", "methodology", "experiments",
|
46 |
"conclusion", "abstract"] if GPT4_ENABLE \
|
47 |
else ["introduction", "related works"]
|
48 |
|
49 |
+
MODEL_LIST = ['gpt-4', 'gpt-3.5-turbo', 'gpt-3.5-turbo-16k']
|
50 |
+
|
51 |
#######################################################################################################################
|
52 |
# Load the list of templates & knowledge databases
|
53 |
#######################################################################################################################
|
|
|
71 |
## 主要功能
|
72 |
通过输入想要生成的论文名称(比如Playing atari with deep reinforcement learning),即可由AI辅助生成论文模板.
|
73 |
|
74 |
+
***2023-06-13 Update***:
|
75 |
+
- 增加了最新的gpt-3.5-turbo-16k模型的支持.
|
76 |
+
|
77 |
***2023-06-13 Update***:
|
78 |
1. 新增‘高级选项-Prompts模式’. 这个模式仅会输出用于生成论文的Prompts而不会生成论文本身. 可以根据自己的需求修改Prompts, 也可以把Prompts复制给其他语言模型.
|
79 |
2. 把默认的ICLR 2022模板改成了Default模板. 不再显示ICLR的页眉页尾.
|
|
|
224 |
template = gr.Dropdown(label="Template", choices=ALL_TEMPLATES, value="Default",
|
225 |
interactive=True,
|
226 |
info="生成论文的模板.")
|
227 |
+
model_selection = gr.Dropdown(label="Model", choices=MODEL_LIST,
|
228 |
value=DEFAULT_MODEL,
|
229 |
interactive=GPT4_INTERACTIVE,
|
230 |
info="生成论文用到的语言模型.")
|
|
|
247 |
interactive=True, label="MAX_KW_REFS",
|
248 |
info="每个Keyword搜索几篇参考文献", visible=False)
|
249 |
|
250 |
+
max_tokens_ref_slider = gr.Slider(minimum=256, maximum=8192, value=2048, step=2,
|
251 |
interactive=True, label="MAX_TOKENS",
|
252 |
info="参考文献内容占用Prompts中的Token数")
|
253 |
|
|
|
268 |
query_counts_slider = gr.Slider(minimum=1, maximum=20, value=10, step=1,
|
269 |
interactive=True, label="QUERY_COUNTS",
|
270 |
info="从知识库内检索多少条内容", visible=False)
|
271 |
+
max_tokens_kd_slider = gr.Slider(minimum=256, maximum=8192, value=2048, step=2,
|
272 |
interactive=True, label="MAX_TOKENS",
|
273 |
info="知识库内容占用Prompts中的Token数")
|
274 |
# template = gr.Dropdown(label="Template", choices=ALL_TEMPLATES, value="Default",
|
section_generator.py
CHANGED
@@ -66,7 +66,7 @@ def section_generation(paper, section, save_to_path, model, research_field="mach
|
|
66 |
model=model, temperature=0.4)
|
67 |
paper["body"][section] = output
|
68 |
tex_file = os.path.join(save_to_path, f"{section}.tex")
|
69 |
-
with open(tex_file, "w") as f:
|
70 |
f.write(output)
|
71 |
time.sleep(5)
|
72 |
return usage
|
|
|
66 |
model=model, temperature=0.4)
|
67 |
paper["body"][section] = output
|
68 |
tex_file = os.path.join(save_to_path, f"{section}.tex")
|
69 |
+
with open(tex_file, "w", encoding="utf-8") as f:
|
70 |
f.write(output)
|
71 |
time.sleep(5)
|
72 |
return usage
|
utils/gpt_interaction.py
CHANGED
@@ -46,6 +46,7 @@ class GPTModel_API2D_SUPPORT:
|
|
46 |
headers = {
|
47 |
"Content-Type": "application/json",
|
48 |
"Authorization": f"Bearer {self.key}",
|
|
|
49 |
}
|
50 |
|
51 |
data = {
|
@@ -93,30 +94,45 @@ class GPTModel:
|
|
93 |
{"role": "user", "content": prompts}
|
94 |
]
|
95 |
for _ in range(self.max_attempts):
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
115 |
raise RuntimeError("Failed to get response from OpenAI.")
|
116 |
|
117 |
|
118 |
|
119 |
if __name__ == "__main__":
|
120 |
-
bot = GPTModel()
|
121 |
r = bot("You are an assistant.", "Hello.")
|
122 |
print(r)
|
|
|
46 |
headers = {
|
47 |
"Content-Type": "application/json",
|
48 |
"Authorization": f"Bearer {self.key}",
|
49 |
+
'Content-type': 'text/plain; charset=utf-8'
|
50 |
}
|
51 |
|
52 |
data = {
|
|
|
94 |
{"role": "user", "content": prompts}
|
95 |
]
|
96 |
for _ in range(self.max_attempts):
|
97 |
+
response = openai.ChatCompletion.create(
|
98 |
+
model=self.model,
|
99 |
+
messages=conversation_history,
|
100 |
+
n=1,
|
101 |
+
temperature=self.temperature,
|
102 |
+
presence_penalty=self.presence_penalty,
|
103 |
+
frequency_penalty=self.frequency_penalty,
|
104 |
+
stream=False
|
105 |
+
)
|
106 |
+
assistant_message = response['choices'][0]["message"]["content"]
|
107 |
+
usage = response['usage']
|
108 |
+
log.info(assistant_message)
|
109 |
+
if return_json:
|
110 |
+
assistant_message = json.loads(assistant_message)
|
111 |
+
return assistant_message, usage
|
112 |
+
# try:
|
113 |
+
# response = openai.ChatCompletion.create(
|
114 |
+
# model=self.model,
|
115 |
+
# messages=conversation_history,
|
116 |
+
# n=1,
|
117 |
+
# temperature=self.temperature,
|
118 |
+
# presence_penalty=self.presence_penalty,
|
119 |
+
# frequency_penalty=self.frequency_penalty,
|
120 |
+
# stream=False
|
121 |
+
# )
|
122 |
+
# assistant_message = response['choices'][0]["message"]["content"]
|
123 |
+
# usage = response['usage']
|
124 |
+
# log.info(assistant_message)
|
125 |
+
# if return_json:
|
126 |
+
# assistant_message = json.loads(assistant_message)
|
127 |
+
# return assistant_message, usage
|
128 |
+
# except Exception as e:
|
129 |
+
# print(f"Failed to get response. Error: {e}")
|
130 |
+
# time.sleep(self.delay)
|
131 |
raise RuntimeError("Failed to get response from OpenAI.")
|
132 |
|
133 |
|
134 |
|
135 |
if __name__ == "__main__":
|
136 |
+
bot = GPTModel(model="gpt-3.5-turbo-16k")
|
137 |
r = bot("You are an assistant.", "Hello.")
|
138 |
print(r)
|