JohnSmith9982 commited on
Commit
85095bb
1 Parent(s): 4399c5c

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +19 -25
  2. presets.py +31 -6
  3. utils.py +34 -13
app.py CHANGED
@@ -43,7 +43,7 @@ else:
43
 
44
  gr.Chatbot.postprocess = postprocess
45
 
46
- with gr.Blocks(css=customCSS,) as demo:
47
  history = gr.State([])
48
  token_count = gr.State([])
49
  promptTemplates = gr.State(load_template(get_template_names(plain=True)[0], mode=2))
@@ -51,13 +51,11 @@ with gr.Blocks(css=customCSS,) as demo:
51
  FALSECONSTANT = gr.State(False)
52
  topic = gr.State("未命名对话历史记录")
53
 
54
- # gr.HTML("""
55
- # <div style="text-align: center; margin-top: 20px;">
56
- # """)
57
- gr.HTML(title)
58
 
59
  with gr.Row(scale=1).style(equal_height=True):
60
-
61
  with gr.Column(scale=5):
62
  with gr.Row(scale=1):
63
  chatbot = gr.Chatbot().style(height=600) # .style(color_map=("#1D51EE", "#585A5B"))
@@ -73,11 +71,8 @@ with gr.Blocks(css=customCSS,) as demo:
73
  delLastBtn = gr.Button("🗑️ 删除一条对话")
74
  reduceTokenBtn = gr.Button("♻️ 总结对话")
75
 
76
-
77
-
78
  with gr.Column():
79
  with gr.Column(min_width=50,scale=1):
80
- status_display = gr.Markdown("status: ready")
81
  with gr.Tab(label="ChatGPT"):
82
  keyTxt = gr.Textbox(show_label=True, placeholder=f"OpenAI API-key...",value=my_api_key, type="password", visible=not HIDE_MY_KEY, label="API-Key")
83
  model_select_dropdown = gr.Dropdown(label="选择模型", choices=MODELS, multiselect=False, value=MODELS[0])
@@ -104,27 +99,27 @@ with gr.Blocks(css=customCSS,) as demo:
104
 
105
  with gr.Tab(label="保存/加载"):
106
  with gr.Accordion(label="保存/加载对话历史记录", open=True):
 
107
  with gr.Column():
108
  with gr.Row():
109
  with gr.Column(scale=6):
110
  saveFileName = gr.Textbox(
111
- show_label=True, placeholder=f"在这里输入保存的文件名...", label="设置保存文件名", value="对话历史记录").style(container=True)
112
  with gr.Column(scale=1):
113
  saveHistoryBtn = gr.Button("💾 保存对话")
 
114
  with gr.Row():
115
  with gr.Column(scale=6):
116
  historyFileSelectDropdown = gr.Dropdown(label="从列表中加载对话", choices=get_history_names(plain=True), multiselect=False, value=get_history_names(plain=True)[0])
117
  with gr.Column(scale=1):
118
  historyRefreshBtn = gr.Button("🔄 刷新")
 
 
 
119
 
120
-
121
-
122
- gr.HTML("""
123
- <div style="text-align: center; margin-top: 20px; margin-bottom: 20px;">
124
- """)
125
  gr.Markdown(description)
126
 
127
-
128
  user_input.submit(predict, [keyTxt, systemPromptTxt, history, user_input, chatbot, token_count, top_p, temperature, use_streaming_checkbox, model_select_dropdown, use_websearch_checkbox], [chatbot, history, status_display, token_count], show_progress=True)
129
  user_input.submit(reset_textbox, [], [user_input])
130
 
@@ -140,20 +135,19 @@ with gr.Blocks(css=customCSS,) as demo:
140
 
141
  reduceTokenBtn.click(reduce_token_size, [keyTxt, systemPromptTxt, history, chatbot, token_count, top_p, temperature, use_streaming_checkbox, model_select_dropdown], [chatbot, history, status_display, token_count], show_progress=True)
142
 
143
- saveHistoryBtn.click(save_chat_history, [
144
- saveFileName, systemPromptTxt, history, chatbot], None, show_progress=True)
 
 
145
 
 
 
146
  saveHistoryBtn.click(get_history_names, None, [historyFileSelectDropdown])
147
-
148
  historyRefreshBtn.click(get_history_names, None, [historyFileSelectDropdown])
149
-
150
  historyFileSelectDropdown.change(load_chat_history, [historyFileSelectDropdown, systemPromptTxt, history, chatbot], [saveFileName, systemPromptTxt, history, chatbot], show_progress=True)
 
151
 
152
- templateRefreshBtn.click(get_template_names, None, [templateFileSelectDropdown])
153
-
154
- templateFileSelectDropdown.change(load_template, [templateFileSelectDropdown], [promptTemplates, templateSelectDropdown], show_progress=True)
155
-
156
- templateSelectDropdown.change(get_template_content, [promptTemplates, templateSelectDropdown, systemPromptTxt], [systemPromptTxt], show_progress=True)
157
 
158
  logging.info(colorama.Back.GREEN + "\n川虎的温馨提示:访问 http://localhost:7860 查看界面" + colorama.Style.RESET_ALL)
159
  # 默认开启本地服务器,默认可以直接从IP访问,默认不创建公开分享链接
 
43
 
44
  gr.Chatbot.postprocess = postprocess
45
 
46
+ with gr.Blocks(css=customCSS) as demo:
47
  history = gr.State([])
48
  token_count = gr.State([])
49
  promptTemplates = gr.State(load_template(get_template_names(plain=True)[0], mode=2))
 
51
  FALSECONSTANT = gr.State(False)
52
  topic = gr.State("未命名对话历史记录")
53
 
54
+ with gr.Row():
55
+ gr.HTML(title)
56
+ status_display = gr.Markdown("status: ready", elem_id="status_display")
 
57
 
58
  with gr.Row(scale=1).style(equal_height=True):
 
59
  with gr.Column(scale=5):
60
  with gr.Row(scale=1):
61
  chatbot = gr.Chatbot().style(height=600) # .style(color_map=("#1D51EE", "#585A5B"))
 
71
  delLastBtn = gr.Button("🗑️ 删除一条对话")
72
  reduceTokenBtn = gr.Button("♻️ 总结对话")
73
 
 
 
74
  with gr.Column():
75
  with gr.Column(min_width=50,scale=1):
 
76
  with gr.Tab(label="ChatGPT"):
77
  keyTxt = gr.Textbox(show_label=True, placeholder=f"OpenAI API-key...",value=my_api_key, type="password", visible=not HIDE_MY_KEY, label="API-Key")
78
  model_select_dropdown = gr.Dropdown(label="选择模型", choices=MODELS, multiselect=False, value=MODELS[0])
 
99
 
100
  with gr.Tab(label="保存/加载"):
101
  with gr.Accordion(label="保存/加载对话历史记录", open=True):
102
+ gr.Markdown("对话历史默认保存在history文件夹中。")
103
  with gr.Column():
104
  with gr.Row():
105
  with gr.Column(scale=6):
106
  saveFileName = gr.Textbox(
107
+ show_label=True, placeholder=f"设置文件名: 默认为.json,可选为.md", label="设置保存文件名", value="对话历史记录").style(container=True)
108
  with gr.Column(scale=1):
109
  saveHistoryBtn = gr.Button("💾 保存对话")
110
+ exportMarkdownBtn = gr.Button("📝 导出为Markdown")
111
  with gr.Row():
112
  with gr.Column(scale=6):
113
  historyFileSelectDropdown = gr.Dropdown(label="从列表中加载对话", choices=get_history_names(plain=True), multiselect=False, value=get_history_names(plain=True)[0])
114
  with gr.Column(scale=1):
115
  historyRefreshBtn = gr.Button("🔄 刷新")
116
+ with gr.Row():
117
+ with gr.Column():
118
+ downloadFile = gr.File(interactive=True)
119
 
 
 
 
 
 
120
  gr.Markdown(description)
121
 
122
+ # Chatbot
123
  user_input.submit(predict, [keyTxt, systemPromptTxt, history, user_input, chatbot, token_count, top_p, temperature, use_streaming_checkbox, model_select_dropdown, use_websearch_checkbox], [chatbot, history, status_display, token_count], show_progress=True)
124
  user_input.submit(reset_textbox, [], [user_input])
125
 
 
135
 
136
  reduceTokenBtn.click(reduce_token_size, [keyTxt, systemPromptTxt, history, chatbot, token_count, top_p, temperature, use_streaming_checkbox, model_select_dropdown], [chatbot, history, status_display, token_count], show_progress=True)
137
 
138
+ # Template
139
+ templateRefreshBtn.click(get_template_names, None, [templateFileSelectDropdown])
140
+ templateFileSelectDropdown.change(load_template, [templateFileSelectDropdown], [promptTemplates, templateSelectDropdown], show_progress=True)
141
+ templateSelectDropdown.change(get_template_content, [promptTemplates, templateSelectDropdown, systemPromptTxt], [systemPromptTxt], show_progress=True)
142
 
143
+ # S&L
144
+ saveHistoryBtn.click(save_chat_history, [saveFileName, systemPromptTxt, history, chatbot], downloadFile, show_progress=True)
145
  saveHistoryBtn.click(get_history_names, None, [historyFileSelectDropdown])
146
+ exportMarkdownBtn.click(export_markdown, [saveFileName, systemPromptTxt, history, chatbot], downloadFile, show_progress=True)
147
  historyRefreshBtn.click(get_history_names, None, [historyFileSelectDropdown])
 
148
  historyFileSelectDropdown.change(load_chat_history, [historyFileSelectDropdown, systemPromptTxt, history, chatbot], [saveFileName, systemPromptTxt, history, chatbot], show_progress=True)
149
+ downloadFile.change(load_chat_history, [downloadFile, systemPromptTxt, history, chatbot], [saveFileName, systemPromptTxt, history, chatbot])
150
 
 
 
 
 
 
151
 
152
  logging.info(colorama.Back.GREEN + "\n川虎的温馨提示:访问 http://localhost:7860 查看界面" + colorama.Style.RESET_ALL)
153
  # 默认开启本地服务器,默认可以直接从IP访问,默认不创建公开分享链接
presets.py CHANGED
@@ -1,6 +1,6 @@
1
  # -*- coding:utf-8 -*-
2
- title = """<h1 align="left">川虎ChatGPT 🚀</h1>"""
3
- description = """<div align=center>
4
 
5
  由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536) 和 [明昭MZhao](https://space.bilibili.com/24807452)开发
6
 
@@ -10,6 +10,33 @@ description = """<div align=center>
10
  </div>
11
  """
12
  customCSS = """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  code {
14
  display: inline;
15
  white-space: break-spaces;
@@ -30,11 +57,9 @@ pre code {
30
  box-shadow: inset 0px 8px 16px hsla(0, 0%, 0%, .2)
31
  }
32
 
33
- *{
34
  transition: all 0.6s;
35
  }
36
-
37
-
38
  """
39
 
40
  summarize_prompt = "你是谁?我们刚才聊了什么?" # 总结对话时的 prompt
@@ -58,7 +83,7 @@ ssl_error_prompt = "SSL错误,无法获取对话。" # SSL 错误
58
  no_apikey_msg = "API key长度不是51位,请检查是否输入正确。" # API key 长度不足 51 位
59
 
60
  max_token_streaming = 3500 # 流式对话时的最大 token 数
61
- timeout_streaming = 15 # 流式对话时的超时时间
62
  max_token_all = 3500 # 非流式对话时的最大 token 数
63
  timeout_all = 200 # 非流式对话时的超时时间
64
  enable_streaming_option = True # 是否启用选择选择是否实时显示回答的勾选框
 
1
  # -*- coding:utf-8 -*-
2
+ title = """<h1 align="left" style="min-width:200px; margin-top:0;">川虎ChatGPT 🚀</h1>"""
3
+ description = """<div align="center" style="margin:16px 0">
4
 
5
  由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536) 和 [明昭MZhao](https://space.bilibili.com/24807452)开发
6
 
 
10
  </div>
11
  """
12
  customCSS = """
13
+ #status_display {
14
+ display: flex;
15
+ min-height: 2.5em;
16
+ align-items: flex-end;
17
+ justify-content: flex-end;
18
+ }
19
+ #status_display p {
20
+ font-size: .85em;
21
+ font-family: monospace;
22
+ color: var(--text-color-subdued) !important;
23
+ }
24
+ [class *= "message"] {
25
+ border-radius: var(--radius-xl) !important;
26
+ border: none;
27
+ padding: var(--spacing-xl) !important;
28
+ font-size: var(--text-md) !important;
29
+ line-height: var(--line-md) !important;
30
+ }
31
+ [data-testid = "bot"] {
32
+ max-width: 85%;
33
+ border-bottom-left-radius: 0 !important;
34
+ }
35
+ [data-testid = "user"] {
36
+ max-width: 85%;
37
+ width: auto !important;
38
+ border-bottom-right-radius: 0 !important;
39
+ }
40
  code {
41
  display: inline;
42
  white-space: break-spaces;
 
57
  box-shadow: inset 0px 8px 16px hsla(0, 0%, 0%, .2)
58
  }
59
 
60
+ * {
61
  transition: all 0.6s;
62
  }
 
 
63
  """
64
 
65
  summarize_prompt = "你是谁?我们刚才聊了什么?" # 总结对话时的 prompt
 
83
  no_apikey_msg = "API key长度不是51位,请检查是否输入正确。" # API key 长度不足 51 位
84
 
85
  max_token_streaming = 3500 # 流式对话时的最大 token 数
86
+ timeout_streaming = 30 # 流式对话时的超时时间
87
  max_token_all = 3500 # 非流式对话时的最大 token 数
88
  timeout_all = 200 # 非流式对话时的超时时间
89
  enable_streaming_option = True # 是否启用选择选择是否实时显示回答的勾选框
utils.py CHANGED
@@ -48,13 +48,14 @@ def postprocess(
48
  y[i] = (
49
  # None if message is None else markdown.markdown(message),
50
  # None if response is None else markdown.markdown(response),
51
- None if message is None else mdtex2html.convert((message)),
52
  None if response is None else mdtex2html.convert(response),
53
  )
54
  return y
55
 
56
- def count_token(input_str):
57
  encoding = tiktoken.get_encoding("cl100k_base")
 
58
  length = len(encoding.encode(input_str))
59
  return length
60
 
@@ -142,10 +143,10 @@ def stream_predict(openai_api_key, system_prompt, history, inputs, chatbot, all_
142
  chatbot.append((parse_text(inputs), ""))
143
  user_token_count = 0
144
  if len(all_token_counts) == 0:
145
- system_prompt_token_count = count_token(system_prompt)
146
- user_token_count = count_token(inputs) + system_prompt_token_count
147
  else:
148
- user_token_count = count_token(inputs)
149
  all_token_counts.append(user_token_count)
150
  logging.info(f"输入token计数: {user_token_count}")
151
  yield get_return_value()
@@ -204,7 +205,7 @@ def predict_all(openai_api_key, system_prompt, history, inputs, chatbot, all_tok
204
  history.append(construct_user(inputs))
205
  history.append(construct_assistant(""))
206
  chatbot.append((parse_text(inputs), ""))
207
- all_token_counts.append(count_token(inputs))
208
  try:
209
  response = get_response(openai_api_key, system_prompt, history, temperature, top_p, False, selected_model)
210
  except requests.exceptions.ConnectTimeout:
@@ -322,22 +323,42 @@ def delete_last_conversation(chatbot, history, previous_token_count):
322
  return chatbot, history, previous_token_count, construct_token_message(sum(previous_token_count))
323
 
324
 
325
- def save_chat_history(filename, system, history, chatbot):
326
  logging.info("保存对话历史中……")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
327
  if filename == "":
328
  return
329
  if not filename.endswith(".json"):
330
  filename += ".json"
331
- os.makedirs(HISTORY_DIR, exist_ok=True)
332
- json_s = {"system": system, "history": history, "chatbot": chatbot}
333
- logging.info(json_s)
334
- with open(os.path.join(HISTORY_DIR, filename), "w") as f:
335
- json.dump(json_s, f, ensure_ascii=False, indent=4)
336
- logging.info("保存对话历史完毕")
 
 
337
 
338
 
339
  def load_chat_history(filename, system, history, chatbot):
340
  logging.info("加载对话历史中……")
 
 
341
  try:
342
  with open(os.path.join(HISTORY_DIR, filename), "r") as f:
343
  json_s = json.load(f)
 
48
  y[i] = (
49
  # None if message is None else markdown.markdown(message),
50
  # None if response is None else markdown.markdown(response),
51
+ None if message is None else message,
52
  None if response is None else mdtex2html.convert(response),
53
  )
54
  return y
55
 
56
+ def count_token(message):
57
  encoding = tiktoken.get_encoding("cl100k_base")
58
+ input_str = f"role: {message['role']}, content: {message['content']}"
59
  length = len(encoding.encode(input_str))
60
  return length
61
 
 
143
  chatbot.append((parse_text(inputs), ""))
144
  user_token_count = 0
145
  if len(all_token_counts) == 0:
146
+ system_prompt_token_count = count_token(construct_system(system_prompt))
147
+ user_token_count = count_token(construct_user(inputs)) + system_prompt_token_count
148
  else:
149
+ user_token_count = count_token(construct_user(inputs))
150
  all_token_counts.append(user_token_count)
151
  logging.info(f"输入token计数: {user_token_count}")
152
  yield get_return_value()
 
205
  history.append(construct_user(inputs))
206
  history.append(construct_assistant(""))
207
  chatbot.append((parse_text(inputs), ""))
208
+ all_token_counts.append(count_token(construct_user(inputs)))
209
  try:
210
  response = get_response(openai_api_key, system_prompt, history, temperature, top_p, False, selected_model)
211
  except requests.exceptions.ConnectTimeout:
 
323
  return chatbot, history, previous_token_count, construct_token_message(sum(previous_token_count))
324
 
325
 
326
+ def save_file(filename, system, history, chatbot):
327
  logging.info("保存对话历史中……")
328
+ os.makedirs(HISTORY_DIR, exist_ok=True)
329
+ if filename.endswith(".json"):
330
+ json_s = {"system": system, "history": history, "chatbot": chatbot}
331
+ print(json_s)
332
+ with open(os.path.join(HISTORY_DIR, filename), "w") as f:
333
+ json.dump(json_s, f)
334
+ elif filename.endswith(".md"):
335
+ md_s = f"system: \n- {system} \n"
336
+ for data in history:
337
+ md_s += f"\n{data['role']}: \n- {data['content']} \n"
338
+ with open(os.path.join(HISTORY_DIR, filename), "w", encoding="utf8") as f:
339
+ f.write(md_s)
340
+ logging.info("保存对话历史完毕")
341
+ return os.path.join(HISTORY_DIR, filename)
342
+
343
+ def save_chat_history(filename, system, history, chatbot):
344
  if filename == "":
345
  return
346
  if not filename.endswith(".json"):
347
  filename += ".json"
348
+ return save_file(filename, system, history, chatbot)
349
+
350
+ def export_markdown(filename, system, history, chatbot):
351
+ if filename == "":
352
+ return
353
+ if not filename.endswith(".md"):
354
+ filename += ".md"
355
+ return save_file(filename, system, history, chatbot)
356
 
357
 
358
  def load_chat_history(filename, system, history, chatbot):
359
  logging.info("加载对话历史中……")
360
+ if type(filename) != str:
361
+ filename = filename.name
362
  try:
363
  with open(os.path.join(HISTORY_DIR, filename), "r") as f:
364
  json_s = json.load(f)