JohnSmith9982 commited on
Commit
1829379
1 Parent(s): ee87114

Upload 3 files

Browse files
Files changed (3) hide show
  1. ChuanhuChatbot.py +146 -0
  2. presets.py +31 -0
  3. utils.py +52 -37
ChuanhuChatbot.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ # import openai
3
+ import os
4
+ import sys
5
+ from utils import *
6
+ from presets import *
7
+
8
+ my_api_key = "" # 在这里输入你的 API 密钥
9
+ HIDE_MY_KEY = False # 如果你想在UI中隐藏你的 API 密钥,将此值设置为 True
10
+
11
+ gr.Chatbot.postprocess = postprocess
12
+
13
+ #if we are running in Docker
14
+ if os.environ.get('dockerrun') == 'yes':
15
+ dockerflag = True
16
+ else:
17
+ dockerflag = False
18
+
19
+ authflag = False
20
+
21
+ if dockerflag:
22
+ my_api_key = os.environ.get('my_api_key')
23
+ if my_api_key == "empty":
24
+ print("Please give a api key!")
25
+ sys.exit(1)
26
+ #auth
27
+ username = os.environ.get('USERNAME')
28
+ password = os.environ.get('PASSWORD')
29
+ if not (isinstance(username, type(None)) or isinstance(password, type(None))):
30
+ authflag = True
31
+ else:
32
+ if os.path.exists("api_key.txt"):
33
+ with open("api_key.txt", "r") as f:
34
+ my_api_key = f.read().strip()
35
+ if os.path.exists("auth.json"):
36
+ with open("auth.json", "r") as f:
37
+ auth = json.load(f)
38
+ username = auth["username"]
39
+ password = auth["password"]
40
+ if username != "" and password != "":
41
+ authflag = True
42
+
43
+ with gr.Blocks(css=customCSS) as demo:
44
+ gr.HTML(title)
45
+ keyTxt = gr.Textbox(show_label=True, placeholder=f"在这里输入你的OpenAI API-key...",
46
+ value=my_api_key, label="API Key", type="password", visible=not HIDE_MY_KEY).style(container=True)
47
+ chatbot = gr.Chatbot() # .style(color_map=("#1D51EE", "#585A5B"))
48
+ history = gr.State([])
49
+ promptTemplates = gr.State(load_template(get_template_names(plain=True)[0], mode=2))
50
+ TRUECOMSTANT = gr.State(True)
51
+ FALSECONSTANT = gr.State(False)
52
+ topic = gr.State("未命名对话历史记录")
53
+
54
+ with gr.Row():
55
+ with gr.Column(scale=12):
56
+ txt = gr.Textbox(show_label=False, placeholder="在这里输入").style(
57
+ container=False)
58
+ with gr.Column(min_width=50, scale=1):
59
+ submitBtn = gr.Button("🚀", variant="primary")
60
+ with gr.Row():
61
+ emptyBtn = gr.Button("🧹 新的对话")
62
+ retryBtn = gr.Button("🔄 重新生成")
63
+ delLastBtn = gr.Button("🗑️ 删除上条对话")
64
+ reduceTokenBtn = gr.Button("♻️ 总结对话")
65
+ statusDisplay = gr.Markdown("status: ready")
66
+ systemPromptTxt = gr.Textbox(show_label=True, placeholder=f"在这里输入System Prompt...",
67
+ label="System prompt", value=initial_prompt).style(container=True)
68
+ with gr.Accordion(label="加载Prompt模板", open=False):
69
+ with gr.Column():
70
+ with gr.Row():
71
+ with gr.Column(scale=6):
72
+ templateFileSelectDropdown = gr.Dropdown(label="选择Prompt模板集合文件(.csv)", choices=get_template_names(plain=True), multiselect=False, value=get_template_names(plain=True)[0])
73
+ with gr.Column(scale=1):
74
+ templateRefreshBtn = gr.Button("🔄 刷新")
75
+ templaeFileReadBtn = gr.Button("📂 读入模板")
76
+ with gr.Row():
77
+ with gr.Column(scale=6):
78
+ templateSelectDropdown = gr.Dropdown(label="从Prompt模板中加载", choices=load_template(get_template_names(plain=True)[0], mode=1), multiselect=False, value=load_template(get_template_names(plain=True)[0], mode=1)[0])
79
+ with gr.Column(scale=1):
80
+ templateApplyBtn = gr.Button("⬇️ 应用")
81
+ with gr.Accordion(label="保存/加载对话历史记录", open=False):
82
+ with gr.Column():
83
+ with gr.Row():
84
+ with gr.Column(scale=6):
85
+ saveFileName = gr.Textbox(
86
+ show_label=True, placeholder=f"在这里输入保存的文件名...", label="设置保存文件名", value="对话历史记录").style(container=True)
87
+ with gr.Column(scale=1):
88
+ saveHistoryBtn = gr.Button("💾 保存对话")
89
+ with gr.Row():
90
+ with gr.Column(scale=6):
91
+ historyFileSelectDropdown = gr.Dropdown(label="从列表中加载对话", choices=get_history_names(plain=True), multiselect=False, value=get_history_names(plain=True)[0])
92
+ with gr.Column(scale=1):
93
+ historyRefreshBtn = gr.Button("🔄 刷新")
94
+ historyReadBtn = gr.Button("📂 读入对话")
95
+ #inputs, top_p, temperature, top_k, repetition_penalty
96
+ with gr.Accordion("参数", open=False):
97
+ top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.05,
98
+ interactive=True, label="Top-p (nucleus sampling)",)
99
+ temperature = gr.Slider(minimum=-0, maximum=5.0, value=1.0,
100
+ step=0.1, interactive=True, label="Temperature",)
101
+ #top_k = gr.Slider( minimum=1, maximum=50, value=4, step=1, interactive=True, label="Top-k",)
102
+ #repetition_penalty = gr.Slider( minimum=0.1, maximum=3.0, value=1.03, step=0.01, interactive=True, label="Repetition Penalty", )
103
+ gr.Markdown(description)
104
+
105
+
106
+ txt.submit(predict, [txt, top_p, temperature, keyTxt,
107
+ chatbot, history, systemPromptTxt], [chatbot, history, statusDisplay])
108
+ txt.submit(reset_textbox, [], [txt])
109
+ submitBtn.click(predict, [txt, top_p, temperature, keyTxt, chatbot,
110
+ history, systemPromptTxt], [chatbot, history, statusDisplay], show_progress=True)
111
+ submitBtn.click(reset_textbox, [], [txt])
112
+ emptyBtn.click(reset_state, outputs=[chatbot, history])
113
+ retryBtn.click(predict, [txt, top_p, temperature, keyTxt, chatbot, history,
114
+ systemPromptTxt, TRUECOMSTANT], [chatbot, history, statusDisplay], show_progress=True)
115
+ delLastBtn.click(delete_last_conversation, [chatbot, history], [
116
+ chatbot, history], show_progress=True)
117
+ reduceTokenBtn.click(predict, [txt, top_p, temperature, keyTxt, chatbot, history,
118
+ systemPromptTxt, FALSECONSTANT, TRUECOMSTANT], [chatbot, history, statusDisplay], show_progress=True)
119
+ saveHistoryBtn.click(save_chat_history, [
120
+ saveFileName, systemPromptTxt, history, chatbot], None, show_progress=True)
121
+ saveHistoryBtn.click(get_history_names, None, [historyFileSelectDropdown])
122
+ historyRefreshBtn.click(get_history_names, None, [historyFileSelectDropdown])
123
+ historyReadBtn.click(load_chat_history, [historyFileSelectDropdown, systemPromptTxt, history, chatbot], [saveFileName, systemPromptTxt, history, chatbot], show_progress=True)
124
+ templateRefreshBtn.click(get_template_names, None, [templateFileSelectDropdown])
125
+ templaeFileReadBtn.click(load_template, [templateFileSelectDropdown], [promptTemplates, templateSelectDropdown], show_progress=True)
126
+ templateApplyBtn.click(get_template_content, [promptTemplates, templateSelectDropdown, systemPromptTxt], [systemPromptTxt], show_progress=True)
127
+
128
+ print("川虎的温馨提示:访问 http://localhost:7860 查看界面")
129
+ # 默认开启本地服务器,默认可以直接从IP访问,默认不创建公开分享链接
130
+ demo.title = "川虎ChatGPT 🚀"
131
+
132
+ #if running in Docker
133
+ if dockerflag:
134
+ if authflag:
135
+ demo.queue().launch(server_name="0.0.0.0", server_port=7860,auth=(username, password))
136
+ else:
137
+ demo.queue().launch(server_name="0.0.0.0", server_port=7860, share=False)
138
+ #if not running in Docker
139
+ else:
140
+ if authflag:
141
+ demo.queue().launch(share=False, auth=(username, password))
142
+ else:
143
+ demo.queue().launch(share=False) # 改为 share=True 可以创建公开分享链接
144
+ #demo.queue().launch(server_name="0.0.0.0", server_port=7860, share=False) # 可自定义端口
145
+ #demo.queue().launch(server_name="0.0.0.0", server_port=7860,auth=("在这里填写用户名", "在这里填写密码")) # 可设置用户名与密码
146
+ #demo.queue().launch(auth=("在这里填写用户名", "在这里填写密码")) # 适合Nginx反向代理
presets.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ title = """<h1 align="center">川虎ChatGPT 🚀</h1>"""
2
+ description = """<div align=center>
3
+
4
+ 由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536) 和 [明昭MZhao](https://space.bilibili.com/24807452)开发
5
+
6
+ 访问川虎ChatGPT的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本
7
+
8
+ 此App使用 `gpt-3.5-turbo` 大语言模型
9
+ </div>
10
+ """
11
+ customCSS = """
12
+ code {
13
+ display: inline;
14
+ white-space: break-spaces;
15
+ border-radius: 6px;
16
+ margin: 0 2px 0 2px;
17
+ padding: .2em .4em .1em .4em;
18
+ background-color: rgba(175,184,193,0.2);
19
+ }
20
+ pre code {
21
+ display: block;
22
+ white-space: pre;
23
+ background-color: hsla(0, 0%, 0%, 72%);
24
+ border: solid 5px var(--color-border-primary) !important;
25
+ border-radius: 10px;
26
+ padding: 0 1.2rem 1.2rem;
27
+ margin-top: 1em !important;
28
+ color: #FFF;
29
+ box-shadow: inset 0px 8px 16px hsla(0, 0%, 0%, .2)
30
+ }
31
+ """
utils.py CHANGED
@@ -13,6 +13,7 @@ import requests
13
  # import markdown
14
  import csv
15
  import mdtex2html
 
16
 
17
  if TYPE_CHECKING:
18
  from typing import TypedDict
@@ -41,7 +42,7 @@ def postprocess(
41
  y[i] = (
42
  # None if message is None else markdown.markdown(message),
43
  # None if response is None else markdown.markdown(response),
44
- None if message is None else mdtex2html.convert(message),
45
  None if response is None else mdtex2html.convert(response),
46
  )
47
  return y
@@ -62,13 +63,7 @@ def parse_text(text):
62
  else:
63
  if i > 0:
64
  if count % 2 == 1:
65
- # line = line.replace("‘", "'")
66
- # line = line.replace("“", '"')
67
  line = line.replace("`", "\`")
68
- # line = line.replace("\"", "`\"`")
69
- # line = line.replace("\'", "`\'`")
70
- # line = line.replace("'``'", "''")
71
- # line = line.replace("&", "&amp;")
72
  line = line.replace("<", "&lt;")
73
  line = line.replace(">", "&gt;")
74
  line = line.replace(" ", "&nbsp;")
@@ -86,13 +81,6 @@ def parse_text(text):
86
 
87
  def predict(inputs, top_p, temperature, openai_api_key, chatbot=[], history=[], system_prompt=initial_prompt, retry=False, summary=False, retry_on_crash = False, stream = True): # repetition_penalty, top_k
88
 
89
- if "猫娘" in inputs:
90
- chatbot.append((inputs, '喵~主人请点击<a href="https://www.bilibili.com/video/BV1GJ411x7h7/">这个链接</a>查看刺激内容哦~'))
91
- yield chatbot, history, "status: 喵~"
92
- print(f"########## 有人上钩了: {inputs} ##########")
93
- return
94
- print(f"====== 收到问题: {inputs} =======")
95
-
96
  if retry_on_crash:
97
  retry = True
98
 
@@ -158,8 +146,8 @@ def predict(inputs, top_p, temperature, openai_api_key, chatbot=[], history=[],
158
  response = requests.post(API_URL, headers=headers, json=payload, stream=True)
159
  except:
160
  history.append("")
161
- chatbot.append(inputs, "")
162
- yield history, chatbot, f"出现了网络错误"
163
  return
164
 
165
  token_counter = 0
@@ -183,7 +171,6 @@ def predict(inputs, top_p, temperature, openai_api_key, chatbot=[], history=[],
183
  yield chatbot, history, status_text
184
  break
185
  except Exception as e:
186
- traceback.print_exc()
187
  if not retry_on_crash:
188
  print("正在尝试使用缩短的context重新生成……")
189
  chatbot.pop()
@@ -223,14 +210,18 @@ def predict(inputs, top_p, temperature, openai_api_key, chatbot=[], history=[],
223
 
224
 
225
  def delete_last_conversation(chatbot, history):
226
- if "☹️发生了错误" in chatbot[-1][1]:
 
 
 
 
 
 
227
  chatbot.pop()
228
  print(history)
229
  return chatbot, history
230
- history.pop()
231
- history.pop()
232
- print(history)
233
- return chatbot, history
234
 
235
  def save_chat_history(filename, system, history, chatbot):
236
  if filename == "":
@@ -244,19 +235,31 @@ def save_chat_history(filename, system, history, chatbot):
244
  json.dump(json_s, f)
245
 
246
 
247
- def load_chat_history(filename):
248
- with open(os.path.join(HISTORY_DIR, filename), "r") as f:
249
- json_s = json.load(f)
250
- print(json_s)
251
- return filename, json_s["system"], json_s["history"], json_s["chatbot"]
 
 
 
 
 
252
 
 
 
253
 
254
- def get_file_names(dir, plain=False, filetype=".json"):
255
  # find all json files in the current directory and return their names
 
256
  try:
257
- files = sorted([f for f in os.listdir(dir) if f.endswith(filetype)])
 
258
  except FileNotFoundError:
259
  files = []
 
 
 
260
  if plain:
261
  return files
262
  else:
@@ -267,24 +270,36 @@ def get_history_names(plain=False):
267
 
268
  def load_template(filename, mode=0):
269
  lines = []
270
- with open(os.path.join(TEMPLATES_DIR, filename), "r", encoding="utf8") as csvfile:
271
- reader = csv.reader(csvfile)
272
- lines = list(reader)
273
- lines = lines[1:]
 
 
 
 
 
 
274
  if mode == 1:
275
- return sorted([row[0] for row in lines])
276
  elif mode == 2:
277
  return {row[0]:row[1] for row in lines}
278
  else:
279
- return {row[0]:row[1] for row in lines}, gr.Dropdown.update(choices=sorted([row[0] for row in lines]))
 
280
 
281
  def get_template_names(plain=False):
282
- return get_file_names(TEMPLATES_DIR, plain, filetype=".csv")
 
 
 
 
 
 
283
 
284
  def reset_state():
285
  return [], []
286
 
287
-
288
  def compose_system(system_prompt):
289
  return {"role": "system", "content": system_prompt}
290
 
 
13
  # import markdown
14
  import csv
15
  import mdtex2html
16
+ from pypinyin import lazy_pinyin
17
 
18
  if TYPE_CHECKING:
19
  from typing import TypedDict
 
42
  y[i] = (
43
  # None if message is None else markdown.markdown(message),
44
  # None if response is None else markdown.markdown(response),
45
+ None if message is None else mdtex2html.convert((message)),
46
  None if response is None else mdtex2html.convert(response),
47
  )
48
  return y
 
63
  else:
64
  if i > 0:
65
  if count % 2 == 1:
 
 
66
  line = line.replace("`", "\`")
 
 
 
 
67
  line = line.replace("<", "&lt;")
68
  line = line.replace(">", "&gt;")
69
  line = line.replace(" ", "&nbsp;")
 
81
 
82
  def predict(inputs, top_p, temperature, openai_api_key, chatbot=[], history=[], system_prompt=initial_prompt, retry=False, summary=False, retry_on_crash = False, stream = True): # repetition_penalty, top_k
83
 
 
 
 
 
 
 
 
84
  if retry_on_crash:
85
  retry = True
86
 
 
146
  response = requests.post(API_URL, headers=headers, json=payload, stream=True)
147
  except:
148
  history.append("")
149
+ chatbot.append((inputs, ""))
150
+ yield history, chatbot, f"获取请求失败,请检查网络连接。"
151
  return
152
 
153
  token_counter = 0
 
171
  yield chatbot, history, status_text
172
  break
173
  except Exception as e:
 
174
  if not retry_on_crash:
175
  print("正在尝试使用缩短的context重新生成……")
176
  chatbot.pop()
 
210
 
211
 
212
  def delete_last_conversation(chatbot, history):
213
+ try:
214
+ if "☹️发生了错误" in chatbot[-1][1]:
215
+ chatbot.pop()
216
+ print(history)
217
+ return chatbot, history
218
+ history.pop()
219
+ history.pop()
220
  chatbot.pop()
221
  print(history)
222
  return chatbot, history
223
+ except:
224
+ return chatbot, history
 
 
225
 
226
  def save_chat_history(filename, system, history, chatbot):
227
  if filename == "":
 
235
  json.dump(json_s, f)
236
 
237
 
238
+ def load_chat_history(filename, system, history, chatbot):
239
+ try:
240
+ print("Loading from history...")
241
+ with open(os.path.join(HISTORY_DIR, filename), "r") as f:
242
+ json_s = json.load(f)
243
+ print(json_s)
244
+ return filename, json_s["system"], json_s["history"], json_s["chatbot"]
245
+ except FileNotFoundError:
246
+ print("File not found.")
247
+ return filename, system, history, chatbot
248
 
249
+ def sorted_by_pinyin(list):
250
+ return sorted(list, key=lambda char: lazy_pinyin(char)[0][0])
251
 
252
+ def get_file_names(dir, plain=False, filetypes=[".json"]):
253
  # find all json files in the current directory and return their names
254
+ files = []
255
  try:
256
+ for type in filetypes:
257
+ files += [f for f in os.listdir(dir) if f.endswith(type)]
258
  except FileNotFoundError:
259
  files = []
260
+ files = sorted_by_pinyin(files)
261
+ if files == []:
262
+ files = [""]
263
  if plain:
264
  return files
265
  else:
 
270
 
271
  def load_template(filename, mode=0):
272
  lines = []
273
+ print("Loading template...")
274
+ if filename.endswith(".json"):
275
+ with open(os.path.join(TEMPLATES_DIR, filename), "r", encoding="utf8") as f:
276
+ lines = json.load(f)
277
+ lines = [[i["act"], i["prompt"]] for i in lines]
278
+ else:
279
+ with open(os.path.join(TEMPLATES_DIR, filename), "r", encoding="utf8") as csvfile:
280
+ reader = csv.reader(csvfile)
281
+ lines = list(reader)
282
+ lines = lines[1:]
283
  if mode == 1:
284
+ return sorted_by_pinyin([row[0] for row in lines])
285
  elif mode == 2:
286
  return {row[0]:row[1] for row in lines}
287
  else:
288
+ choices = sorted_by_pinyin([row[0] for row in lines])
289
+ return {row[0]:row[1] for row in lines}, gr.Dropdown.update(choices=choices, value=choices[0])
290
 
291
  def get_template_names(plain=False):
292
+ return get_file_names(TEMPLATES_DIR, plain, filetypes=[".csv", "json"])
293
+
294
+ def get_template_content(templates, selection, original_system_prompt):
295
+ try:
296
+ return templates[selection]
297
+ except:
298
+ return original_system_prompt
299
 
300
  def reset_state():
301
  return [], []
302
 
 
303
  def compose_system(system_prompt):
304
  return {"role": "system", "content": system_prompt}
305