tiannian commited on
Commit
d00e98b
1 Parent(s): 7199b7d

Create chat.py

Browse files
Files changed (1) hide show
  1. chat.py +406 -0
chat.py ADDED
@@ -0,0 +1,406 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import gradio as gr
3
+ # import openai
4
+ import os
5
+ import sys
6
+ import traceback
7
+ import requests
8
+ # import markdown
9
+ import csv
10
+
11
+ my_api_key = "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" # 在这里输入你的 API 密钥
12
+ HIDE_MY_KEY = False # 如果你想在UI中隐藏你的 API 密钥,将此值设置为 True
13
+
14
+ initial_prompt = "You are a helpful assistant."
15
+ API_URL = "https://878787.top/v1/chat/completions"
16
+ HISTORY_DIR = "history"
17
+ TEMPLATES_DIR = "templates"
18
+
19
+
20
+
21
+ #if we are running in Docker
22
+ if os.environ.get('dockerrun') == 'yes':
23
+ dockerflag = True
24
+ else:
25
+ dockerflag = False
26
+
27
+ if dockerflag:
28
+ my_api_key = os.environ.get('my_api_key')
29
+ if my_api_key == "empty":
30
+ print("Please give a api key!")
31
+ sys.exit(1)
32
+ #auth
33
+ username = os.environ.get('USERNAME')
34
+ password = os.environ.get('PASSWORD')
35
+ if isinstance(username, type(None)) or isinstance(password, type(None)):
36
+ authflag = False
37
+ else:
38
+ authflag = True
39
+
40
+
41
+ def parse_text(text):
42
+ lines = text.split("\n")
43
+ lines = [line for line in lines if line != ""]
44
+ count = 0
45
+ firstline = False
46
+ for i, line in enumerate(lines):
47
+ if "```" in line:
48
+ count += 1
49
+ items = line.split('`')
50
+ if count % 2 == 1:
51
+ lines[i] = f'<pre><code class="{items[-1]}">'
52
+ firstline = True
53
+ else:
54
+ lines[i] = f'</code></pre>'
55
+ else:
56
+ if i > 0:
57
+ if count % 2 == 1:
58
+ line = line.replace("`", "\`")
59
+ line = line.replace("\"", "`\"`")
60
+ line = line.replace("\'", "`\'`")
61
+ # line = line.replace("&", "&amp;")
62
+ line = line.replace("<", "&lt;")
63
+ line = line.replace(">", "&gt;")
64
+ line = line.replace(" ", "&nbsp;")
65
+ line = line.replace("*", "&ast;")
66
+ line = line.replace("_", "&lowbar;")
67
+ line = line.replace("-", "&#45;")
68
+ line = line.replace(".", "&#46;")
69
+ line = line.replace("!", "&#33;")
70
+ line = line.replace("(", "&#40;")
71
+ line = line.replace(")", "&#41;")
72
+ line = line.replace("$", "&#36;")
73
+ lines[i] = "<br>"+line
74
+ text = "".join(lines)
75
+ return text
76
+
77
+ def predict(inputs, top_p, temperature, openai_api_key, chatbot=[], history=[], system_prompt=initial_prompt, retry=False, summary=False, summary_on_crash = False, stream = True): # repetition_penalty, top_k
78
+
79
+ if summary:
80
+ stream = False
81
+
82
+ headers = {
83
+ "Content-Type": "application/json",
84
+ "Authorization": f"Bearer {openai_api_key}"
85
+ }
86
+
87
+ chat_counter = len(history) // 2
88
+
89
+ print(f"chat_counter - {chat_counter}")
90
+
91
+ messages = [compose_system(system_prompt)]
92
+ if chat_counter:
93
+ for index in range(0, 2*chat_counter, 2):
94
+ temp1 = {}
95
+ temp1["role"] = "user"
96
+ temp1["content"] = history[index]
97
+ temp2 = {}
98
+ temp2["role"] = "assistant"
99
+ temp2["content"] = history[index+1]
100
+ if temp1["content"] != "":
101
+ if temp2["content"] != "" or retry:
102
+ messages.append(temp1)
103
+ messages.append(temp2)
104
+ else:
105
+ messages[-1]['content'] = temp2['content']
106
+ if retry and chat_counter:
107
+ messages.pop()
108
+ elif summary:
109
+ history = [*[i["content"] for i in messages[-2:]], "我们刚刚聊了什么?"]
110
+ messages.append(compose_user(
111
+ "请帮我总结一下上述对话的内容,实现减少字数的同时,保证对话的质量。在总结中不要加入这一句话。"))
112
+ else:
113
+ temp3 = {}
114
+ temp3["role"] = "user"
115
+ temp3["content"] = inputs
116
+ messages.append(temp3)
117
+ chat_counter += 1
118
+ # messages
119
+ payload = {
120
+ "model": "gpt-3.5-turbo",
121
+ "messages": messages, # [{"role": "user", "content": f"{inputs}"}],
122
+ "temperature": temperature, # 1.0,
123
+ "top_p": top_p, # 1.0,
124
+ "n": 1,
125
+ "stream": stream,
126
+ "presence_penalty": 0,
127
+ "frequency_penalty": 0,
128
+ }
129
+
130
+ if not summary:
131
+ history.append(inputs)
132
+ else:
133
+ print("精简中...")
134
+ # make a POST request to the API endpoint using the requests.post method, passing in stream=True
135
+ response = requests.post(API_URL, headers=headers,
136
+ json=payload, stream=True)
137
+
138
+ token_counter = 0
139
+ partial_words = ""
140
+
141
+ counter = 0
142
+ if stream:
143
+ chatbot.append((parse_text(history[-1]), ""))
144
+ for chunk in response.iter_lines():
145
+ if counter == 0:
146
+ counter += 1
147
+ continue
148
+ counter += 1
149
+ # check whether each line is non-empty
150
+ if chunk:
151
+ # decode each line as response data is in bytes
152
+ try:
153
+ if len(json.loads(chunk.decode()[6:])['choices'][0]["delta"]) == 0:
154
+ chunkjson = json.loads(chunk.decode()[6:])
155
+ status_text = f"id: {chunkjson['id']}, finish_reason: {chunkjson['choices'][0]['finish_reason']}"
156
+ yield chatbot, history, status_text
157
+ break
158
+ except Exception as e:
159
+ traceback.print_exc()
160
+ print("Context 过长,正在尝试精简……")
161
+ chatbot.pop()
162
+ chatbot, history, status_text = next(predict(inputs, top_p, temperature, openai_api_key, chatbot, history, system_prompt, retry, summary=True, summary_on_crash=True, stream=False))
163
+ yield chatbot, history, status_text
164
+ if not "ERROR" in status_text:
165
+ print("精简完成,正在尝试重新生成……")
166
+ yield next(predict(inputs, top_p, temperature, openai_api_key, chatbot, history, system_prompt, retry, summary=False, summary_on_crash=True, stream=False))
167
+ else:
168
+ print("精简出错了,可能是网络原因。")
169
+ break
170
+ chunkjson = json.loads(chunk.decode()[6:])
171
+ status_text = f"id: {chunkjson['id']}, finish_reason: {chunkjson['choices'][0]['finish_reason']}"
172
+ partial_words = partial_words + \
173
+ json.loads(chunk.decode()[6:])[
174
+ 'choices'][0]["delta"]["content"]
175
+ if token_counter == 0:
176
+ history.append(" " + partial_words)
177
+ else:
178
+ history[-1] = partial_words
179
+ chatbot[-1] = (parse_text(history[-2]), parse_text(history[-1]))
180
+ token_counter += 1
181
+ yield chatbot, history, status_text
182
+ else:
183
+ try:
184
+ responsejson = json.loads(response.text)
185
+ content = responsejson["choices"][0]["message"]["content"]
186
+ history.append(content)
187
+ chatbot.append((parse_text(history[-2]), parse_text(content)))
188
+ status_text = "精简完成"
189
+ except:
190
+ chatbot.append((parse_text(history[-1]), "☹️发生了错误,请检查网络连接或者稍后再试。"))
191
+ status_text = "status: ERROR"
192
+ yield chatbot, history, status_text
193
+
194
+
195
+
196
+ def delete_last_conversation(chatbot, history):
197
+ if "☹️发生了错误" in chatbot[-1][1]:
198
+ chatbot.pop()
199
+ print(history)
200
+ return chatbot, history
201
+ history.pop()
202
+ history.pop()
203
+ print(history)
204
+ return chatbot, history
205
+
206
+ def save_chat_history(filename, system, history, chatbot):
207
+ if filename == "":
208
+ return
209
+ if not filename.endswith(".json"):
210
+ filename += ".json"
211
+ os.makedirs(HISTORY_DIR, exist_ok=True)
212
+ json_s = {"system": system, "history": history, "chatbot": chatbot}
213
+ print(json_s)
214
+ with open(os.path.join(HISTORY_DIR, filename), "w") as f:
215
+ json.dump(json_s, f)
216
+
217
+
218
+ def load_chat_history(filename):
219
+ with open(os.path.join(HISTORY_DIR, filename), "r") as f:
220
+ json_s = json.load(f)
221
+ print(json_s)
222
+ return filename, json_s["system"], json_s["history"], json_s["chatbot"]
223
+
224
+
225
+ def get_file_names(dir, plain=False, filetype=".json"):
226
+ # find all json files in the current directory and return their names
227
+ try:
228
+ files = sorted([f for f in os.listdir(dir) if f.endswith(filetype)])
229
+ except FileNotFoundError:
230
+ files = []
231
+ if plain:
232
+ return files
233
+ else:
234
+ return gr.Dropdown.update(choices=files)
235
+
236
+ def get_history_names(plain=False):
237
+ return get_file_names(HISTORY_DIR, plain)
238
+
239
+ def load_template(filename, mode=0):
240
+ lines = []
241
+ with open(os.path.join(TEMPLATES_DIR, filename), "r", encoding="utf8") as csvfile:
242
+ reader = csv.reader(csvfile)
243
+ lines = list(reader)
244
+ lines = lines[1:]
245
+ if mode == 1:
246
+ return sorted([row[0] for row in lines])
247
+ elif mode == 2:
248
+ return {row[0]:row[1] for row in lines}
249
+ else:
250
+ return {row[0]:row[1] for row in lines}, gr.Dropdown.update(choices=sorted([row[0] for row in lines]))
251
+
252
+ def get_template_names(plain=False):
253
+ return get_file_names(TEMPLATES_DIR, plain, filetype=".csv")
254
+
255
+ def reset_state():
256
+ return [], []
257
+
258
+
259
+ def compose_system(system_prompt):
260
+ return {"role": "system", "content": system_prompt}
261
+
262
+
263
+ def compose_user(user_input):
264
+ return {"role": "user", "content": user_input}
265
+
266
+
267
+ def reset_textbox():
268
+ return gr.update(value='')
269
+
270
+ title = """<h1 align="center">ChatGPT 🚀</h1>"""
271
+ description = """<div align=center>
272
+
273
+
274
+ 访问ChatGPT的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本
275
+
276
+ 此App使用 `gpt-3.5-turbo` 大语言模型
277
+ </div>
278
+ """
279
+ customCSS = """
280
+ code {
281
+ display: inline;
282
+ white-space: break-spaces;
283
+ border-radius: 6px;
284
+ margin: 0 2px 0 2px;
285
+ padding: .2em .4em .1em .4em;
286
+ background-color: rgba(175,184,193,0.2);
287
+ }
288
+ pre {
289
+ display: block;
290
+ white-space: pre;
291
+ background-color: hsla(0, 0%, 0%, 72%);
292
+ border: solid 5px var(--color-border-primary) !important;
293
+ border-radius: 8px;
294
+ padding: 0 1.2rem 1.2rem;
295
+ margin-top: 1em !important;
296
+ color: #FFF;
297
+ box-shadow: inset 0px 8px 16px hsla(0, 0%, 0%, .2)
298
+ }
299
+ pre code, pre code code {
300
+ background-color: transparent !important;
301
+ margin: 0;
302
+ padding: 0;
303
+ }
304
+ """
305
+
306
+ with gr.Blocks(css=customCSS) as demo:
307
+ gr.HTML(title)
308
+ keyTxt = gr.Textbox(show_label=True, placeholder=f"在这里输入你的OpenAI API-key...",
309
+ value=my_api_key, label="API Key", type="password", visible=not HIDE_MY_KEY).style(container=True)
310
+ chatbot = gr.Chatbot() # .style(color_map=("#1D51EE", "#585A5B"))
311
+ history = gr.State([])
312
+ promptTemplates = gr.State(load_template(get_template_names(plain=True)[0], mode=2))
313
+ TRUECOMSTANT = gr.State(True)
314
+ FALSECONSTANT = gr.State(False)
315
+ topic = gr.State("未命名对话历史记录")
316
+
317
+ with gr.Row():
318
+ with gr.Column(scale=12):
319
+ txt = gr.Textbox(show_label=False, placeholder="在这里输入").style(
320
+ container=False)
321
+ with gr.Column(min_width=50, scale=1):
322
+ submitBtn = gr.Button("🚀", variant="primary")
323
+ with gr.Row():
324
+ emptyBtn = gr.Button("🧹 新的对话")
325
+ retryBtn = gr.Button("🔄 重新生成")
326
+ delLastBtn = gr.Button("🗑️ 删除上条对话")
327
+ reduceTokenBtn = gr.Button("♻️ 总结对话")
328
+ statusDisplay = gr.Markdown("status: ready")
329
+ systemPromptTxt = gr.Textbox(show_label=True, placeholder=f"在这里输入System Prompt...",
330
+ label="System prompt", value=initial_prompt).style(container=True)
331
+ with gr.Accordion(label="加载Prompt模板", open=False):
332
+ with gr.Column():
333
+ with gr.Row():
334
+ with gr.Column(scale=6):
335
+ templateFileSelectDropdown = gr.Dropdown(label="选择Prompt模板集合文件(.csv)", choices=get_template_names(plain=True), multiselect=False)
336
+ with gr.Column(scale=1):
337
+ templateRefreshBtn = gr.Button("🔄 刷新")
338
+ templaeFileReadBtn = gr.Button("📂 读入模板")
339
+ with gr.Row():
340
+ with gr.Column(scale=6):
341
+ templateSelectDropdown = gr.Dropdown(label="从Prompt模板中加载", choices=load_template(get_template_names(plain=True)[0], mode=1), multiselect=False)
342
+ with gr.Column(scale=1):
343
+ templateApplyBtn = gr.Button("⬇️ 应用")
344
+ with gr.Accordion(label="保存/加载对话历史记录(在文本框中输入文件名,点击“保存对话”按钮,历史记录文件会被存储到Python文件旁边)", open=False):
345
+ with gr.Column():
346
+ with gr.Row():
347
+ with gr.Column(scale=6):
348
+ saveFileName = gr.Textbox(
349
+ show_label=True, placeholder=f"在这里输入保存的文件名...", label="设置保存文件名", value="对话历史记录").style(container=True)
350
+ with gr.Column(scale=1):
351
+ saveBtn = gr.Button("💾 保存对话")
352
+ with gr.Row():
353
+ with gr.Column(scale=6):
354
+ historyFileSelectDropdown = gr.Dropdown(label="从列表中加载对话", choices=get_history_names(plain=True), multiselect=False)
355
+ with gr.Column(scale=1):
356
+ historyRefreshBtn = gr.Button("🔄 刷新")
357
+ historyReadBtn = gr.Button("📂 读入对话")
358
+ #inputs, top_p, temperature, top_k, repetition_penalty
359
+ with gr.Accordion("参数", open=False):
360
+ top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.05,
361
+ interactive=True, label="Top-p (nucleus sampling)",)
362
+ temperature = gr.Slider(minimum=-0, maximum=5.0, value=1.0,
363
+ step=0.1, interactive=True, label="Temperature",)
364
+ #top_k = gr.Slider( minimum=1, maximum=50, value=4, step=1, interactive=True, label="Top-k",)
365
+ #repetition_penalty = gr.Slider( minimum=0.1, maximum=3.0, value=1.03, step=0.01, interactive=True, label="Repetition Penalty", )
366
+ gr.Markdown(description)
367
+
368
+
369
+ txt.submit(predict, [txt, top_p, temperature, keyTxt,
370
+ chatbot, history, systemPromptTxt], [chatbot, history, statusDisplay])
371
+ txt.submit(reset_textbox, [], [txt])
372
+ submitBtn.click(predict, [txt, top_p, temperature, keyTxt, chatbot,
373
+ history, systemPromptTxt], [chatbot, history, statusDisplay], show_progress=True)
374
+ submitBtn.click(reset_textbox, [], [txt])
375
+ emptyBtn.click(reset_state, outputs=[chatbot, history])
376
+ retryBtn.click(predict, [txt, top_p, temperature, keyTxt, chatbot, history,
377
+ systemPromptTxt, TRUECOMSTANT], [chatbot, history, statusDisplay], show_progress=True)
378
+ delLastBtn.click(delete_last_conversation, [chatbot, history], [
379
+ chatbot, history], show_progress=True)
380
+ reduceTokenBtn.click(predict, [txt, top_p, temperature, keyTxt, chatbot, history,
381
+ systemPromptTxt, FALSECONSTANT, TRUECOMSTANT], [chatbot, history, statusDisplay], show_progress=True)
382
+ saveBtn.click(save_chat_history, [
383
+ saveFileName, systemPromptTxt, history, chatbot], None, show_progress=True)
384
+ saveBtn.click(get_history_names, None, [historyFileSelectDropdown])
385
+ historyRefreshBtn.click(get_history_names, None, [historyFileSelectDropdown])
386
+ historyReadBtn.click(load_chat_history, [historyFileSelectDropdown], [saveFileName, systemPromptTxt, history, chatbot], show_progress=True)
387
+ templateRefreshBtn.click(get_template_names, None, [templateFileSelectDropdown])
388
+ templaeFileReadBtn.click(load_template, [templateFileSelectDropdown], [promptTemplates, templateSelectDropdown], show_progress=True)
389
+ templateApplyBtn.click(lambda x, y: x[y], [promptTemplates, templateSelectDropdown], [systemPromptTxt], show_progress=True)
390
+
391
+ print("温馨提示:访问 http://localhost:7860 查看界面")
392
+ # 默认开启本地服务器,默认可以直接从IP访问,默认不创建公开分享链接
393
+ demo.title = "川虎ChatGPT 🚀"
394
+
395
+ #if running in Docker
396
+ if dockerflag:
397
+ if authflag:
398
+ demo.queue().launch(server_name="0.0.0.0", server_port=7860,auth=(username, password))
399
+ else:
400
+ demo.queue().launch(server_name="0.0.0.0", server_port=7860, share=False)
401
+ #if not running in Docker
402
+ else:
403
+ demo.queue().launch(share=False) # 改为 share=True 可以创建公开分享链接
404
+ #demo.queue().launch(server_name="0.0.0.0", server_port=7860, share=False) # 可自定义端口
405
+ #demo.queue().launch(server_name="0.0.0.0", server_port=7860,auth=("在这里填写用户名", "在这里填写密码")) # 可设置用户名与密码
406
+ #demo.queue().launch(auth=("在这里填写用户名", "在这里填写密码")) # 适合Nginx反向代理