JohnSmith9982 commited on
Commit
8ba98ee
1 Parent(s): a97e5c1

Upload 38 files

Browse files
CITATION.cff ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cff-version: 1.2.0
2
+ title: ChuanhuChatGPT
3
+ message: >-
4
+ If you use this software, please cite it using these
5
+ metadata.
6
+ type: software
7
+ authors:
8
+ - given-names: Chuanhu
9
+ orcid: https://orcid.org/0000-0001-8954-8598
10
+ - given-names: MZhao
11
+ orcid: https://orcid.org/0000-0003-2298-6213
12
+ - given-names: Keldos
13
+ orcid: https://orcid.org/0009-0005-0357-272X
14
+ repository-code: 'https://github.com/GaiZhenbiao/ChuanhuChatGPT'
15
+ url: 'https://github.com/GaiZhenbiao/ChuanhuChatGPT'
16
+ abstract: Provided a light and easy to use interface for ChatGPT API
17
+ license: GPL-3.0
18
+ commit: bd0034c37e5af6a90bd9c2f7dd073f6cd27c61af
19
+ version: '20230405'
20
+ date-released: '2023-04-05'
ChuanhuChatbot.py CHANGED
@@ -10,31 +10,33 @@ from modules.config import *
10
  from modules.utils import *
11
  from modules.presets import *
12
  from modules.overwrites import *
13
- from modules.chat_func import *
14
- from modules.openai_func import get_usage
15
 
 
 
16
  gr.Chatbot.postprocess = postprocess
17
  PromptHelper.compact_text_chunks = compact_text_chunks
18
 
19
  with open("assets/custom.css", "r", encoding="utf-8") as f:
20
  customCSS = f.read()
21
 
 
 
 
22
  with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
23
  user_name = gr.State("")
24
- history = gr.State([])
25
- token_count = gr.State([])
26
  promptTemplates = gr.State(load_template(get_template_names(plain=True)[0], mode=2))
27
- user_api_key = gr.State(my_api_key)
28
  user_question = gr.State("")
29
- outputing = gr.State(False)
30
- topic = gr.State("未命名对话历史记录")
 
 
31
 
32
  with gr.Row():
33
- with gr.Column():
34
- gr.HTML(title)
35
- user_info = gr.Markdown(value="", elem_id="user_info")
36
- gr.HTML('<center><a href="https://huggingface.co/spaces/JohnSmith9982/ChuanhuChatGPT?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a></center>')
37
  status_display = gr.Markdown(get_geoip(), elem_id="status_display")
 
 
38
 
39
  # https://github.com/gradio-app/gradio/pull/3296
40
  def create_greeting(request: gr.Request):
@@ -50,176 +52,235 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
50
  with gr.Row():
51
  chatbot = gr.Chatbot(elem_id="chuanhu_chatbot").style(height="100%")
52
  with gr.Row():
53
- with gr.Column(scale=12):
54
  user_input = gr.Textbox(
55
  elem_id="user_input_tb",
56
- show_label=False, placeholder="在这里输入"
57
  ).style(container=False)
58
- with gr.Column(min_width=70, scale=1):
59
- submitBtn = gr.Button("发送", variant="primary")
60
- cancelBtn = gr.Button("取消", variant="secondary", visible=False)
61
  with gr.Row():
62
  emptyBtn = gr.Button(
63
- "🧹 新的对话",
64
  )
65
- retryBtn = gr.Button("🔄 重新生成")
66
- delFirstBtn = gr.Button("🗑️ 删除最旧对话")
67
- delLastBtn = gr.Button("🗑️ 删除最新对话")
68
- reduceTokenBtn = gr.Button("♻️ 总结对话")
69
 
70
  with gr.Column():
71
  with gr.Column(min_width=50, scale=1):
72
- with gr.Tab(label="ChatGPT"):
73
  keyTxt = gr.Textbox(
74
  show_label=True,
75
  placeholder=f"OpenAI API-key...",
76
- value=hide_middle_chars(my_api_key),
77
  type="password",
78
  visible=not HIDE_MY_KEY,
79
  label="API-Key",
80
  )
81
  if multi_api_key:
82
- usageTxt = gr.Markdown("多账号模式已开启,无需输入key,可直接开始对话", elem_id="usage_display")
83
  else:
84
- usageTxt = gr.Markdown("**发送消息** 或 **提交key** 以显示额度", elem_id="usage_display")
85
  model_select_dropdown = gr.Dropdown(
86
- label="选择模型", choices=MODELS, multiselect=False, value=MODELS[0]
87
  )
88
- use_streaming_checkbox = gr.Checkbox(
89
- label="实时传输回答", value=True, visible=enable_streaming_option
90
  )
91
- use_websearch_checkbox = gr.Checkbox(label="使用在线搜索", value=False)
 
 
 
 
 
92
  language_select_dropdown = gr.Dropdown(
93
- label="选择回复语言(针对搜索&索引功能)",
94
  choices=REPLY_LANGUAGES,
95
  multiselect=False,
96
  value=REPLY_LANGUAGES[0],
97
  )
98
- index_files = gr.Files(label="上传索引文件", type="file", multiple=True)
99
- two_column = gr.Checkbox(label="双栏pdf", value=advance_docs["pdf"].get("two_column", False))
100
  # TODO: 公式ocr
101
- # formula_ocr = gr.Checkbox(label="识别公式", value=advance_docs["pdf"].get("formula_ocr", False))
102
 
103
  with gr.Tab(label="Prompt"):
104
  systemPromptTxt = gr.Textbox(
105
  show_label=True,
106
- placeholder=f"在这里输入System Prompt...",
107
  label="System prompt",
108
- value=initial_prompt,
109
  lines=10,
110
  ).style(container=False)
111
- with gr.Accordion(label="加载Prompt模板", open=True):
112
  with gr.Column():
113
  with gr.Row():
114
  with gr.Column(scale=6):
115
  templateFileSelectDropdown = gr.Dropdown(
116
- label="选择Prompt模板集合文件",
117
  choices=get_template_names(plain=True),
118
  multiselect=False,
119
  value=get_template_names(plain=True)[0],
120
  ).style(container=False)
121
  with gr.Column(scale=1):
122
- templateRefreshBtn = gr.Button("🔄 刷新")
123
  with gr.Row():
124
  with gr.Column():
125
  templateSelectDropdown = gr.Dropdown(
126
- label="从Prompt模板中加载",
127
  choices=load_template(
128
  get_template_names(plain=True)[0], mode=1
129
  ),
130
  multiselect=False,
131
  ).style(container=False)
132
 
133
- with gr.Tab(label="保存/加载"):
134
- with gr.Accordion(label="保存/加载对话历史记录", open=True):
135
  with gr.Column():
136
  with gr.Row():
137
  with gr.Column(scale=6):
138
  historyFileSelectDropdown = gr.Dropdown(
139
- label="从列表中加载对话",
140
  choices=get_history_names(plain=True),
141
  multiselect=False,
142
  value=get_history_names(plain=True)[0],
143
  )
144
  with gr.Column(scale=1):
145
- historyRefreshBtn = gr.Button("🔄 刷新")
146
  with gr.Row():
147
  with gr.Column(scale=6):
148
  saveFileName = gr.Textbox(
149
  show_label=True,
150
- placeholder=f"设置文件名: 默认为.json,可选为.md",
151
- label="设置保存文件名",
152
- value="对话历史记录",
153
  ).style(container=True)
154
  with gr.Column(scale=1):
155
- saveHistoryBtn = gr.Button("💾 保存对话")
156
- exportMarkdownBtn = gr.Button("📝 导出为Markdown")
157
- gr.Markdown("默认保存于history文件夹")
158
  with gr.Row():
159
  with gr.Column():
160
  downloadFile = gr.File(interactive=True)
161
 
162
- with gr.Tab(label="高级"):
163
- gr.Markdown("# ⚠️ 务必谨慎更改 ⚠️\n\n如果无法使用请恢复默认设置")
164
- default_btn = gr.Button("🔙 恢复默认设置")
165
-
166
- with gr.Accordion("参数", open=False):
167
- top_p = gr.Slider(
 
 
 
 
 
 
 
168
  minimum=-0,
169
  maximum=1.0,
170
  value=1.0,
171
  step=0.05,
172
  interactive=True,
173
- label="Top-p",
174
  )
175
- temperature = gr.Slider(
176
- minimum=-0,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
177
  maximum=2.0,
178
- value=1.0,
179
- step=0.1,
 
 
 
 
 
 
 
 
180
  interactive=True,
181
- label="Temperature",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
182
  )
183
 
184
- with gr.Accordion("网络设置", open=False, visible=False):
185
  # 优先展示自定义的api_host
186
  apihostTxt = gr.Textbox(
187
  show_label=True,
188
- placeholder=f"在这里输入API-Host...",
189
  label="API-Host",
190
  value=config.api_host or shared.API_HOST,
191
  lines=1,
192
  )
193
- changeAPIURLBtn = gr.Button("🔄 切换API地址")
194
  proxyTxt = gr.Textbox(
195
  show_label=True,
196
- placeholder=f"在这里输入代理地址...",
197
- label="代理地址(示例:http://127.0.0.1:10809)",
198
  value="",
199
  lines=2,
200
  )
201
- changeProxyBtn = gr.Button("🔄 设置代理地址")
 
202
 
203
- gr.Markdown(description)
204
- gr.HTML(footer.format(versions=versions_html()), elem_id="footer")
205
  chatgpt_predict_args = dict(
206
  fn=predict,
207
  inputs=[
208
- user_api_key,
209
- systemPromptTxt,
210
- history,
211
  user_question,
212
  chatbot,
213
- token_count,
214
- top_p,
215
- temperature,
216
  use_streaming_checkbox,
217
- model_select_dropdown,
218
  use_websearch_checkbox,
219
  index_files,
220
  language_select_dropdown,
221
  ],
222
- outputs=[chatbot, history, status_display, token_count],
223
  show_progress=True,
224
  )
225
 
@@ -243,12 +304,18 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
243
  )
244
 
245
  get_usage_args = dict(
246
- fn=get_usage, inputs=[user_api_key], outputs=[usageTxt], show_progress=False
 
 
 
 
 
 
247
  )
248
 
249
 
250
  # Chatbot
251
- cancelBtn.click(cancel_outputing, [], [])
252
 
253
  user_input.submit(**transfer_input_args).then(**chatgpt_predict_args).then(**end_outputing_args)
254
  user_input.submit(**get_usage_args)
@@ -256,9 +323,12 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
256
  submitBtn.click(**transfer_input_args).then(**chatgpt_predict_args).then(**end_outputing_args)
257
  submitBtn.click(**get_usage_args)
258
 
 
 
259
  emptyBtn.click(
260
- reset_state,
261
- outputs=[chatbot, history, token_count, status_display],
 
262
  show_progress=True,
263
  )
264
  emptyBtn.click(**reset_textbox_args)
@@ -266,61 +336,42 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
266
  retryBtn.click(**start_outputing_args).then(
267
  retry,
268
  [
269
- user_api_key,
270
- systemPromptTxt,
271
- history,
272
  chatbot,
273
- token_count,
274
- top_p,
275
- temperature,
276
  use_streaming_checkbox,
277
- model_select_dropdown,
 
278
  language_select_dropdown,
279
  ],
280
- [chatbot, history, status_display, token_count],
281
  show_progress=True,
282
  ).then(**end_outputing_args)
283
  retryBtn.click(**get_usage_args)
284
 
285
  delFirstBtn.click(
286
  delete_first_conversation,
287
- [history, token_count],
288
- [history, token_count, status_display],
289
  )
290
 
291
  delLastBtn.click(
292
  delete_last_conversation,
293
- [chatbot, history, token_count],
294
- [chatbot, history, token_count, status_display],
295
- show_progress=True,
296
- )
297
-
298
- reduceTokenBtn.click(
299
- reduce_token_size,
300
- [
301
- user_api_key,
302
- systemPromptTxt,
303
- history,
304
- chatbot,
305
- token_count,
306
- top_p,
307
- temperature,
308
- gr.State(sum(token_count.value[-4:])),
309
- model_select_dropdown,
310
- language_select_dropdown,
311
- ],
312
- [chatbot, history, status_display, token_count],
313
- show_progress=True,
314
  )
315
- reduceTokenBtn.click(**get_usage_args)
316
 
317
  two_column.change(update_doc_config, [two_column], None)
318
 
319
- # ChatGPT
320
- keyTxt.change(submit_key, keyTxt, [user_api_key, status_display]).then(**get_usage_args)
321
  keyTxt.submit(**get_usage_args)
 
 
 
322
 
323
  # Template
 
324
  templateRefreshBtn.click(get_template_names, None, [templateFileSelectDropdown])
325
  templateFileSelectDropdown.change(
326
  load_template,
@@ -338,31 +389,33 @@ with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
338
  # S&L
339
  saveHistoryBtn.click(
340
  save_chat_history,
341
- [saveFileName, systemPromptTxt, history, chatbot, user_name],
342
  downloadFile,
343
  show_progress=True,
344
  )
345
  saveHistoryBtn.click(get_history_names, [gr.State(False), user_name], [historyFileSelectDropdown])
346
  exportMarkdownBtn.click(
347
  export_markdown,
348
- [saveFileName, systemPromptTxt, history, chatbot, user_name],
349
  downloadFile,
350
  show_progress=True,
351
  )
352
  historyRefreshBtn.click(get_history_names, [gr.State(False), user_name], [historyFileSelectDropdown])
353
- historyFileSelectDropdown.change(
354
- load_chat_history,
355
- [historyFileSelectDropdown, systemPromptTxt, history, chatbot, user_name],
356
- [saveFileName, systemPromptTxt, history, chatbot],
357
- show_progress=True,
358
- )
359
- downloadFile.change(
360
- load_chat_history,
361
- [downloadFile, systemPromptTxt, history, chatbot, user_name],
362
- [saveFileName, systemPromptTxt, history, chatbot],
363
- )
364
 
365
  # Advanced
 
 
 
 
 
 
 
 
 
 
 
366
  default_btn.click(
367
  reset_default, [], [apihostTxt, proxyTxt, status_display], show_progress=True
368
  )
@@ -385,39 +438,15 @@ logging.info(
385
  + colorama.Style.RESET_ALL
386
  )
387
  # 默认开启本地服务器,默认可以直接从IP访问,默认不创建公开分享链接
388
- demo.title = "川虎ChatGPT 🚀"
389
 
390
  if __name__ == "__main__":
391
  reload_javascript()
392
- # if running in Docker
393
- if dockerflag:
394
- if authflag:
395
- demo.queue(concurrency_count=CONCURRENT_COUNT).launch(
396
- server_name="0.0.0.0",
397
- server_port=7860,
398
- auth=auth_list,
399
- favicon_path="./assets/favicon.ico",
400
- )
401
- else:
402
- demo.queue(concurrency_count=CONCURRENT_COUNT).launch(
403
- server_name="0.0.0.0",
404
- server_port=7860,
405
- share=False,
406
- favicon_path="./assets/favicon.ico",
407
- )
408
- # if not running in Docker
409
- else:
410
- if authflag:
411
- demo.queue(concurrency_count=CONCURRENT_COUNT).launch(
412
- share=False,
413
- auth=auth_list,
414
- favicon_path="./assets/favicon.ico",
415
- inbrowser=True,
416
- )
417
- else:
418
- demo.queue(concurrency_count=CONCURRENT_COUNT).launch(
419
- share=False, favicon_path="./assets/favicon.ico", inbrowser=True
420
- ) # 改为 share=True 可以创建公开分享链接
421
- # demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", server_port=7860, share=False) # 可自定义端口
422
- # demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", server_port=7860,auth=("在这里填写用户名", "在这里填写密码")) # 可设置用户名与密码
423
- # demo.queue(concurrency_count=CONCURRENT_COUNT).launch(auth=("在这里填写用户名", "在这里填写密码")) # 适合Nginx反向代理
10
  from modules.utils import *
11
  from modules.presets import *
12
  from modules.overwrites import *
13
+ from modules.models import get_model
 
14
 
15
+
16
+ gr.Chatbot._postprocess_chat_messages = postprocess_chat_messages
17
  gr.Chatbot.postprocess = postprocess
18
  PromptHelper.compact_text_chunks = compact_text_chunks
19
 
20
  with open("assets/custom.css", "r", encoding="utf-8") as f:
21
  customCSS = f.read()
22
 
23
+ def create_new_model():
24
+ return get_model(model_name = MODELS[DEFAULT_MODEL], access_key = my_api_key)[0]
25
+
26
  with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
27
  user_name = gr.State("")
 
 
28
  promptTemplates = gr.State(load_template(get_template_names(plain=True)[0], mode=2))
 
29
  user_question = gr.State("")
30
+ user_api_key = gr.State(my_api_key)
31
+ current_model = gr.State(create_new_model)
32
+
33
+ topic = gr.State(i18n("未命名对话历史记录"))
34
 
35
  with gr.Row():
36
+ gr.HTML(CHUANHU_TITLE, elem_id="app_title")
 
 
 
37
  status_display = gr.Markdown(get_geoip(), elem_id="status_display")
38
+ with gr.Row(elem_id="float_display"):
39
+ user_info = gr.Markdown(value="getting user info...", elem_id="user_info")
40
 
41
  # https://github.com/gradio-app/gradio/pull/3296
42
  def create_greeting(request: gr.Request):
52
  with gr.Row():
53
  chatbot = gr.Chatbot(elem_id="chuanhu_chatbot").style(height="100%")
54
  with gr.Row():
55
+ with gr.Column(min_width=225, scale=12):
56
  user_input = gr.Textbox(
57
  elem_id="user_input_tb",
58
+ show_label=False, placeholder=i18n("在这里输入")
59
  ).style(container=False)
60
+ with gr.Column(min_width=42, scale=1):
61
+ submitBtn = gr.Button(value="", variant="primary", elem_id="submit_btn")
62
+ cancelBtn = gr.Button(value="", variant="secondary", visible=False, elem_id="cancel_btn")
63
  with gr.Row():
64
  emptyBtn = gr.Button(
65
+ i18n("🧹 新的对话"),
66
  )
67
+ retryBtn = gr.Button(i18n("🔄 重新生成"))
68
+ delFirstBtn = gr.Button(i18n("🗑️ 删除最旧对话"))
69
+ delLastBtn = gr.Button(i18n("🗑️ 删除最新对话"))
 
70
 
71
  with gr.Column():
72
  with gr.Column(min_width=50, scale=1):
73
+ with gr.Tab(label=i18n("模型")):
74
  keyTxt = gr.Textbox(
75
  show_label=True,
76
  placeholder=f"OpenAI API-key...",
77
+ value=hide_middle_chars(user_api_key.value),
78
  type="password",
79
  visible=not HIDE_MY_KEY,
80
  label="API-Key",
81
  )
82
  if multi_api_key:
83
+ usageTxt = gr.Markdown(i18n("多账号模式已开启,无需输入key,可直接开始对话"), elem_id="usage_display", elem_classes="insert_block")
84
  else:
85
+ usageTxt = gr.Markdown(i18n("**发送消息** 或 **提交key** 以显示额度"), elem_id="usage_display", elem_classes="insert_block")
86
  model_select_dropdown = gr.Dropdown(
87
+ label=i18n("选择模型"), choices=MODELS, multiselect=False, value=MODELS[DEFAULT_MODEL], interactive=True
88
  )
89
+ lora_select_dropdown = gr.Dropdown(
90
+ label=i18n("选择LoRA模型"), choices=[], multiselect=False, interactive=True, visible=False
91
  )
92
+ with gr.Row():
93
+ use_streaming_checkbox = gr.Checkbox(
94
+ label=i18n("实时传输回答"), value=True, visible=ENABLE_STREAMING_OPTION
95
+ )
96
+ single_turn_checkbox = gr.Checkbox(label=i18n("单轮对话"), value=False)
97
+ use_websearch_checkbox = gr.Checkbox(label=i18n("使用在线搜索"), value=False)
98
  language_select_dropdown = gr.Dropdown(
99
+ label=i18n("选择回复语言(针对搜索&索引功能)"),
100
  choices=REPLY_LANGUAGES,
101
  multiselect=False,
102
  value=REPLY_LANGUAGES[0],
103
  )
104
+ index_files = gr.Files(label=i18n("上传索引文件"), type="file")
105
+ two_column = gr.Checkbox(label=i18n("双栏pdf"), value=advance_docs["pdf"].get("two_column", False))
106
  # TODO: 公式ocr
107
+ # formula_ocr = gr.Checkbox(label=i18n("识别公式"), value=advance_docs["pdf"].get("formula_ocr", False))
108
 
109
  with gr.Tab(label="Prompt"):
110
  systemPromptTxt = gr.Textbox(
111
  show_label=True,
112
+ placeholder=i18n("在这里输入System Prompt..."),
113
  label="System prompt",
114
+ value=INITIAL_SYSTEM_PROMPT,
115
  lines=10,
116
  ).style(container=False)
117
+ with gr.Accordion(label=i18n("加载Prompt模板"), open=True):
118
  with gr.Column():
119
  with gr.Row():
120
  with gr.Column(scale=6):
121
  templateFileSelectDropdown = gr.Dropdown(
122
+ label=i18n("选择Prompt模板集合文件"),
123
  choices=get_template_names(plain=True),
124
  multiselect=False,
125
  value=get_template_names(plain=True)[0],
126
  ).style(container=False)
127
  with gr.Column(scale=1):
128
+ templateRefreshBtn = gr.Button(i18n("🔄 刷新"))
129
  with gr.Row():
130
  with gr.Column():
131
  templateSelectDropdown = gr.Dropdown(
132
+ label=i18n("从Prompt模板中加载"),
133
  choices=load_template(
134
  get_template_names(plain=True)[0], mode=1
135
  ),
136
  multiselect=False,
137
  ).style(container=False)
138
 
139
+ with gr.Tab(label=i18n("保存/加载")):
140
+ with gr.Accordion(label=i18n("保存/加载对话历史记录"), open=True):
141
  with gr.Column():
142
  with gr.Row():
143
  with gr.Column(scale=6):
144
  historyFileSelectDropdown = gr.Dropdown(
145
+ label=i18n("从列表中加载对话"),
146
  choices=get_history_names(plain=True),
147
  multiselect=False,
148
  value=get_history_names(plain=True)[0],
149
  )
150
  with gr.Column(scale=1):
151
+ historyRefreshBtn = gr.Button(i18n("🔄 刷新"))
152
  with gr.Row():
153
  with gr.Column(scale=6):
154
  saveFileName = gr.Textbox(
155
  show_label=True,
156
+ placeholder=i18n("设置文件名: 默认为.json,可选为.md"),
157
+ label=i18n("设置保存文件名"),
158
+ value=i18n("对话历史记录"),
159
  ).style(container=True)
160
  with gr.Column(scale=1):
161
+ saveHistoryBtn = gr.Button(i18n("💾 保存对话"))
162
+ exportMarkdownBtn = gr.Button(i18n("📝 导出为Markdown"))
163
+ gr.Markdown(i18n("默认保存于history文件夹"))
164
  with gr.Row():
165
  with gr.Column():
166
  downloadFile = gr.File(interactive=True)
167
 
168
+ with gr.Tab(label=i18n("高级")):
169
+ gr.Markdown(i18n("# ⚠️ 务必谨慎更改 ⚠️\n\n如果无法使用请恢复默认设置"))
170
+ gr.HTML(APPEARANCE_SWITCHER, elem_classes="insert_block")
171
+ with gr.Accordion(i18n("参数"), open=False):
172
+ temperature_slider = gr.Slider(
173
+ minimum=-0,
174
+ maximum=2.0,
175
+ value=1.0,
176
+ step=0.1,
177
+ interactive=True,
178
+ label="temperature",
179
+ )
180
+ top_p_slider = gr.Slider(
181
  minimum=-0,
182
  maximum=1.0,
183
  value=1.0,
184
  step=0.05,
185
  interactive=True,
186
+ label="top-p",
187
  )
188
+ n_choices_slider = gr.Slider(
189
+ minimum=1,
190
+ maximum=10,
191
+ value=1,
192
+ step=1,
193
+ interactive=True,
194
+ label="n choices",
195
+ )
196
+ stop_sequence_txt = gr.Textbox(
197
+ show_label=True,
198
+ placeholder=i18n("在这里输入停止符,用英文逗号隔开..."),
199
+ label="stop",
200
+ value="",
201
+ lines=1,
202
+ )
203
+ max_context_length_slider = gr.Slider(
204
+ minimum=1,
205
+ maximum=32768,
206
+ value=2000,
207
+ step=1,
208
+ interactive=True,
209
+ label="max context",
210
+ )
211
+ max_generation_slider = gr.Slider(
212
+ minimum=1,
213
+ maximum=32768,
214
+ value=1000,
215
+ step=1,
216
+ interactive=True,
217
+ label="max generations",
218
+ )
219
+ presence_penalty_slider = gr.Slider(
220
+ minimum=-2.0,
221
  maximum=2.0,
222
+ value=0.0,
223
+ step=0.01,
224
+ interactive=True,
225
+ label="presence penalty",
226
+ )
227
+ frequency_penalty_slider = gr.Slider(
228
+ minimum=-2.0,
229
+ maximum=2.0,
230
+ value=0.0,
231
+ step=0.01,
232
  interactive=True,
233
+ label="frequency penalty",
234
+ )
235
+ logit_bias_txt = gr.Textbox(
236
+ show_label=True,
237
+ placeholder=f"word:likelihood",
238
+ label="logit bias",
239
+ value="",
240
+ lines=1,
241
+ )
242
+ user_identifier_txt = gr.Textbox(
243
+ show_label=True,
244
+ placeholder=i18n("用于定位滥用行为"),
245
+ label=i18n("用户名"),
246
+ value=user_name.value,
247
+ lines=1,
248
  )
249
 
250
+ with gr.Accordion(i18n("网络设置"), open=False):
251
  # 优先展示自定义的api_host
252
  apihostTxt = gr.Textbox(
253
  show_label=True,
254
+ placeholder=i18n("在这里输入API-Host..."),
255
  label="API-Host",
256
  value=config.api_host or shared.API_HOST,
257
  lines=1,
258
  )
259
+ changeAPIURLBtn = gr.Button(i18n("🔄 切换API地址"))
260
  proxyTxt = gr.Textbox(
261
  show_label=True,
262
+ placeholder=i18n("在这里输入代理地址..."),
263
+ label=i18n("代理地址(示例:http://127.0.0.1:10809)"),
264
  value="",
265
  lines=2,
266
  )
267
+ changeProxyBtn = gr.Button(i18n("🔄 设置代理地址"))
268
+ default_btn = gr.Button(i18n("🔙 恢复默认设置"))
269
 
270
+ gr.Markdown(CHUANHU_DESCRIPTION, elem_id="description")
271
+ gr.HTML(FOOTER.format(versions=versions_html()), elem_id="footer")
272
  chatgpt_predict_args = dict(
273
  fn=predict,
274
  inputs=[
275
+ current_model,
 
 
276
  user_question,
277
  chatbot,
 
 
 
278
  use_streaming_checkbox,
 
279
  use_websearch_checkbox,
280
  index_files,
281
  language_select_dropdown,
282
  ],
283
+ outputs=[chatbot, status_display],
284
  show_progress=True,
285
  )
286
 
304
  )
305
 
306
  get_usage_args = dict(
307
+ fn=billing_info, inputs=[current_model], outputs=[usageTxt], show_progress=False
308
+ )
309
+
310
+ load_history_from_file_args = dict(
311
+ fn=load_chat_history,
312
+ inputs=[current_model, historyFileSelectDropdown, chatbot, user_name],
313
+ outputs=[saveFileName, systemPromptTxt, chatbot]
314
  )
315
 
316
 
317
  # Chatbot
318
+ cancelBtn.click(interrupt, [current_model], [])
319
 
320
  user_input.submit(**transfer_input_args).then(**chatgpt_predict_args).then(**end_outputing_args)
321
  user_input.submit(**get_usage_args)
323
  submitBtn.click(**transfer_input_args).then(**chatgpt_predict_args).then(**end_outputing_args)
324
  submitBtn.click(**get_usage_args)
325
 
326
+ index_files.change(handle_file_upload, [current_model, index_files, chatbot], [index_files, chatbot, status_display])
327
+
328
  emptyBtn.click(
329
+ reset,
330
+ inputs=[current_model],
331
+ outputs=[chatbot, status_display],
332
  show_progress=True,
333
  )
334
  emptyBtn.click(**reset_textbox_args)
336
  retryBtn.click(**start_outputing_args).then(
337
  retry,
338
  [
339
+ current_model,
 
 
340
  chatbot,
 
 
 
341
  use_streaming_checkbox,
342
+ use_websearch_checkbox,
343
+ index_files,
344
  language_select_dropdown,
345
  ],
346
+ [chatbot, status_display],
347
  show_progress=True,
348
  ).then(**end_outputing_args)
349
  retryBtn.click(**get_usage_args)
350
 
351
  delFirstBtn.click(
352
  delete_first_conversation,
353
+ [current_model],
354
+ [status_display],
355
  )
356
 
357
  delLastBtn.click(
358
  delete_last_conversation,
359
+ [current_model, chatbot],
360
+ [chatbot, status_display],
361
+ show_progress=False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
362
  )
 
363
 
364
  two_column.change(update_doc_config, [two_column], None)
365
 
366
+ # LLM Models
367
+ keyTxt.change(set_key, [current_model, keyTxt], [user_api_key, status_display]).then(**get_usage_args)
368
  keyTxt.submit(**get_usage_args)
369
+ single_turn_checkbox.change(set_single_turn, [current_model, single_turn_checkbox], None)
370
+ model_select_dropdown.change(get_model, [model_select_dropdown, lora_select_dropdown, user_api_key, temperature_slider, top_p_slider, systemPromptTxt], [current_model, status_display, lora_select_dropdown], show_progress=True)
371
+ lora_select_dropdown.change(get_model, [model_select_dropdown, lora_select_dropdown, user_api_key, temperature_slider, top_p_slider, systemPromptTxt], [current_model, status_display], show_progress=True)
372
 
373
  # Template
374
+ systemPromptTxt.change(set_system_prompt, [current_model, systemPromptTxt], None)
375
  templateRefreshBtn.click(get_template_names, None, [templateFileSelectDropdown])
376
  templateFileSelectDropdown.change(
377
  load_template,
389
  # S&L
390
  saveHistoryBtn.click(
391
  save_chat_history,
392
+ [current_model, saveFileName, chatbot, user_name],
393
  downloadFile,
394
  show_progress=True,
395
  )
396
  saveHistoryBtn.click(get_history_names, [gr.State(False), user_name], [historyFileSelectDropdown])
397
  exportMarkdownBtn.click(
398
  export_markdown,
399
+ [current_model, saveFileName, chatbot, user_name],
400
  downloadFile,
401
  show_progress=True,
402
  )
403
  historyRefreshBtn.click(get_history_names, [gr.State(False), user_name], [historyFileSelectDropdown])
404
+ historyFileSelectDropdown.change(**load_history_from_file_args)
405
+ downloadFile.change(**load_history_from_file_args)
 
 
 
 
 
 
 
 
 
406
 
407
  # Advanced
408
+ max_context_length_slider.change(set_token_upper_limit, [current_model, max_context_length_slider], None)
409
+ temperature_slider.change(set_temperature, [current_model, temperature_slider], None)
410
+ top_p_slider.change(set_top_p, [current_model, top_p_slider], None)
411
+ n_choices_slider.change(set_n_choices, [current_model, n_choices_slider], None)
412
+ stop_sequence_txt.change(set_stop_sequence, [current_model, stop_sequence_txt], None)
413
+ max_generation_slider.change(set_max_tokens, [current_model, max_generation_slider], None)
414
+ presence_penalty_slider.change(set_presence_penalty, [current_model, presence_penalty_slider], None)
415
+ frequency_penalty_slider.change(set_frequency_penalty, [current_model, frequency_penalty_slider], None)
416
+ logit_bias_txt.change(set_logit_bias, [current_model, logit_bias_txt], None)
417
+ user_identifier_txt.change(set_user_identifier, [current_model, user_identifier_txt], None)
418
+
419
  default_btn.click(
420
  reset_default, [], [apihostTxt, proxyTxt, status_display], show_progress=True
421
  )
438
  + colorama.Style.RESET_ALL
439
  )
440
  # 默认开启本地服务器,默认可以直接从IP访问,默认不创建公开分享链接
441
+ demo.title = i18n("川虎Chat 🚀")
442
 
443
  if __name__ == "__main__":
444
  reload_javascript()
445
+ demo.queue(concurrency_count=CONCURRENT_COUNT).launch(
446
+ auth=auth_list if authflag else None,
447
+ favicon_path="./assets/favicon.ico",
448
+ inbrowser=not dockerflag, # 禁止在docker下开启inbrowser
449
+ )
450
+ # demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", server_port=7860, share=False) # 可自定义端口
451
+ # demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", server_port=7860,auth=("在这里填写用户名", "在这里填写密码")) # 可设置用户名与密码
452
+ # demo.queue(concurrency_count=CONCURRENT_COUNT).launch(auth=("在这里填写用户名", "在这里填写密码")) # 适合Nginx反向代理
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Dockerfile CHANGED
@@ -1,7 +1,9 @@
1
  FROM python:3.9 as builder
2
  RUN apt-get update && apt-get install -y build-essential
3
  COPY requirements.txt .
 
4
  RUN pip install --user -r requirements.txt
 
5
 
6
  FROM python:3.9
7
  MAINTAINER iskoldt
@@ -9,6 +11,5 @@ COPY --from=builder /root/.local /root/.local
9
  ENV PATH=/root/.local/bin:$PATH
10
  COPY . /app
11
  WORKDIR /app
12
- ENV my_api_key empty
13
  ENV dockerrun yes
14
  CMD ["python3", "-u", "ChuanhuChatbot.py", "2>&1", "|", "tee", "/var/log/application.log"]
1
  FROM python:3.9 as builder
2
  RUN apt-get update && apt-get install -y build-essential
3
  COPY requirements.txt .
4
+ COPY requirements_advanced.txt .
5
  RUN pip install --user -r requirements.txt
6
+ # RUN pip install --user -r requirements_advanced.txt
7
 
8
  FROM python:3.9
9
  MAINTAINER iskoldt
11
  ENV PATH=/root/.local/bin:$PATH
12
  COPY . /app
13
  WORKDIR /app
 
14
  ENV dockerrun yes
15
  CMD ["python3", "-u", "ChuanhuChatbot.py", "2>&1", "|", "tee", "/var/log/application.log"]
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 🐯
4
  colorFrom: green
5
  colorTo: red
6
  sdk: gradio
7
- sdk_version: 3.24.1
8
  app_file: ChuanhuChatbot.py
9
  pinned: false
10
  license: gpl-3.0
4
  colorFrom: green
5
  colorTo: red
6
  sdk: gradio
7
+ sdk_version: 3.25.0
8
  app_file: ChuanhuChatbot.py
9
  pinned: false
10
  license: gpl-3.0
assets/custom.css CHANGED
@@ -3,14 +3,27 @@
3
  --chatbot-color-dark: #121111;
4
  }
5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  /* 覆盖gradio的页脚信息QAQ */
7
- footer {
8
  display: none !important;
9
- }
10
- #footer{
11
  text-align: center;
12
  }
13
- #footer div{
14
  display: inline-block;
15
  }
16
  #footer .versions{
@@ -18,16 +31,34 @@ footer {
18
  opacity: 0.85;
19
  }
20
 
 
 
 
 
21
  /* user_info */
22
  #user_info {
23
  white-space: nowrap;
24
- margin-top: -1.3em !important;
25
- padding-left: 112px !important;
 
 
 
 
 
 
 
 
 
 
 
26
  }
27
  #user_info p {
28
- font-size: .85em;
29
- font-family: monospace;
30
- color: var(--body-text-color-subdued);
 
 
 
31
  }
32
 
33
  /* status_display */
@@ -43,14 +74,18 @@ footer {
43
  color: var(--body-text-color-subdued);
44
  }
45
 
46
- #chuanhu_chatbot, #status_display {
47
  transition: all 0.6s;
48
  }
 
 
 
49
 
50
  /* usage_display */
51
- #usage_display {
52
  position: relative;
53
  margin: 0;
 
54
  box-shadow: var(--block-shadow);
55
  border-width: var(--block-border-width);
56
  border-color: var(--block-border-color);
@@ -62,7 +97,6 @@ footer {
62
  }
63
  #usage_display p, #usage_display span {
64
  margin: 0;
65
- padding: .5em 1em;
66
  font-size: .85em;
67
  color: var(--body-text-color-subdued);
68
  }
@@ -74,7 +108,7 @@ footer {
74
  overflow: hidden;
75
  }
76
  .progress {
77
- background-color: var(--block-title-background-fill);;
78
  height: 100%;
79
  border-radius: 10px;
80
  text-align: right;
@@ -88,38 +122,107 @@ footer {
88
  padding-right: 10px;
89
  line-height: 20px;
90
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
  /* list */
92
  ol:not(.options), ul:not(.options) {
93
  padding-inline-start: 2em !important;
94
  }
95
 
96
- /* 亮色 */
97
- @media (prefers-color-scheme: light) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98
  #chuanhu_chatbot {
99
- background-color: var(--chatbot-color-light) !important;
100
- color: #000000 !important;
101
  }
102
- [data-testid = "bot"] {
103
- background-color: #FFFFFF !important;
104
- }
105
- [data-testid = "user"] {
106
- background-color: #95EC69 !important;
107
  }
108
  }
109
- /* 暗色 */
110
- @media (prefers-color-scheme: dark) {
111
  #chuanhu_chatbot {
112
- background-color: var(--chatbot-color-dark) !important;
113
- color: #FFFFFF !important;
114
  }
115
- [data-testid = "bot"] {
116
- background-color: #2C2C2C !important;
117
  }
118
- [data-testid = "user"] {
119
- background-color: #26B561 !important;
120
  }
121
- body {
122
- background-color: var(--neutral-950) !important;
123
  }
124
  }
125
  /* 对话气泡 */
3
  --chatbot-color-dark: #121111;
4
  }
5
 
6
+ #app_title {
7
+ font-weight: var(--prose-header-text-weight);
8
+ font-size: var(--text-xxl);
9
+ line-height: 1.3;
10
+ text-align: left;
11
+ margin-top: 6px;
12
+ white-space: nowrap;
13
+ }
14
+ #description {
15
+ text-align: center;
16
+ margin:16px 0
17
+ }
18
+
19
  /* 覆盖gradio的页脚信息QAQ */
20
+ /* footer {
21
  display: none !important;
22
+ } */
23
+ #footer {
24
  text-align: center;
25
  }
26
+ #footer div {
27
  display: inline-block;
28
  }
29
  #footer .versions{
31
  opacity: 0.85;
32
  }
33
 
34
+ #float_display {
35
+ position: absolute;
36
+ max-height: 30px;
37
+ }
38
  /* user_info */
39
  #user_info {
40
  white-space: nowrap;
41
+ position: absolute; left: 8em; top: .2em;
42
+ z-index: var(--layer-2);
43
+ box-shadow: var(--block-shadow);
44
+ border: none; border-radius: var(--block-label-radius);
45
+ background: var(--color-accent);
46
+ padding: var(--block-label-padding);
47
+ font-size: var(--block-label-text-size); line-height: var(--line-sm);
48
+ width: auto; min-height: 30px!important;
49
+ opacity: 1;
50
+ transition: opacity 0.3s ease-in-out;
51
+ }
52
+ #user_info .wrap {
53
+ opacity: 0;
54
  }
55
  #user_info p {
56
+ color: white;
57
+ font-weight: var(--block-label-text-weight);
58
+ }
59
+ #user_info.hideK {
60
+ opacity: 0;
61
+ transition: opacity 1s ease-in-out;
62
  }
63
 
64
  /* status_display */
74
  color: var(--body-text-color-subdued);
75
  }
76
 
77
+ #status_display {
78
  transition: all 0.6s;
79
  }
80
+ #chuanhu_chatbot {
81
+ transition: height 0.3s ease;
82
+ }
83
 
84
  /* usage_display */
85
+ .insert_block {
86
  position: relative;
87
  margin: 0;
88
+ padding: .5em 1em;
89
  box-shadow: var(--block-shadow);
90
  border-width: var(--block-border-width);
91
  border-color: var(--block-border-color);
97
  }
98
  #usage_display p, #usage_display span {
99
  margin: 0;
 
100
  font-size: .85em;
101
  color: var(--body-text-color-subdued);
102
  }
108
  overflow: hidden;
109
  }
110
  .progress {
111
+ background-color: var(--block-title-background-fill);
112
  height: 100%;
113
  border-radius: 10px;
114
  text-align: right;
122
  padding-right: 10px;
123
  line-height: 20px;
124
  }
125
+
126
+ .apSwitch {
127
+ top: 2px;
128
+ display: inline-block;
129
+ height: 24px;
130
+ position: relative;
131
+ width: 48px;
132
+ border-radius: 12px;
133
+ }
134
+ .apSwitch input {
135
+ display: none !important;
136
+ }
137
+ .apSlider {
138
+ background-color: var(--block-label-background-fill);
139
+ bottom: 0;
140
+ cursor: pointer;
141
+ left: 0;
142
+ position: absolute;
143
+ right: 0;
144
+ top: 0;
145
+ transition: .4s;
146
+ font-size: 18px;
147
+ border-radius: 12px;
148
+ }
149
+ .apSlider::before {
150
+ bottom: -1.5px;
151
+ left: 1px;
152
+ position: absolute;
153
+ transition: .4s;
154
+ content: "🌞";
155
+ }
156
+ input:checked + .apSlider {
157
+ background-color: var(--block-label-background-fill);
158
+ }
159
+ input:checked + .apSlider::before {
160
+ transform: translateX(23px);
161
+ content:"🌚";
162
+ }
163
+
164
+ #submit_btn, #cancel_btn {
165
+ height: 42px !important;
166
+ }
167
+ #submit_btn::before {
168
+ content: url("data:image/svg+xml, %3Csvg width='21px' height='20px' viewBox='0 0 21 20' version='1.1' xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink'%3E %3Cg id='page' stroke='none' stroke-width='1' fill='none' fill-rule='evenodd'%3E %3Cg id='send' transform='translate(0.435849, 0.088463)' fill='%23FFFFFF' fill-rule='nonzero'%3E %3Cpath d='M0.579148261,0.0428666046 C0.301105539,-0.0961547561 -0.036517765,0.122307382 0.0032026237,0.420210298 L1.4927172,18.1553639 C1.5125774,18.4334066 1.79062012,18.5922882 2.04880264,18.4929872 L8.24518329,15.8913017 L11.6412765,19.7441794 C11.8597387,19.9825018 12.2370824,19.8832008 12.3165231,19.5852979 L13.9450591,13.4882182 L19.7839562,11.0255541 C20.0619989,10.8865327 20.0818591,10.4694687 19.7839562,10.3105871 L0.579148261,0.0428666046 Z M11.6138902,17.0883151 L9.85385903,14.7195502 L0.718169621,0.618812241 L12.69945,12.9346347 L11.6138902,17.0883151 Z' id='shape'%3E%3C/path%3E %3C/g%3E %3C/g%3E %3C/svg%3E");
169
+ height: 21px;
170
+ }
171
+ #cancel_btn::before {
172
+ content: url("data:image/svg+xml,%3Csvg width='21px' height='21px' viewBox='0 0 21 21' version='1.1' xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink'%3E %3Cg id='pg' stroke='none' stroke-width='1' fill='none' fill-rule='evenodd'%3E %3Cpath d='M10.2072007,20.088463 C11.5727865,20.088463 12.8594566,19.8259823 14.067211,19.3010209 C15.2749653,18.7760595 16.3386126,18.0538087 17.2581528,17.1342685 C18.177693,16.2147282 18.8982283,15.1527965 19.4197586,13.9484733 C19.9412889,12.7441501 20.202054,11.4557644 20.202054,10.0833163 C20.202054,8.71773046 19.9395733,7.43106036 19.4146119,6.22330603 C18.8896505,5.01555169 18.1673997,3.95018885 17.2478595,3.0272175 C16.3283192,2.10424615 15.2646719,1.3837109 14.0569176,0.865611739 C12.8491633,0.34751258 11.5624932,0.088463 10.1969073,0.088463 C8.83132146,0.088463 7.54636692,0.34751258 6.34204371,0.865611739 C5.1377205,1.3837109 4.07407321,2.10424615 3.15110186,3.0272175 C2.22813051,3.95018885 1.5058797,5.01555169 0.984349419,6.22330603 C0.46281914,7.43106036 0.202054,8.71773046 0.202054,10.0833163 C0.202054,11.4557644 0.4645347,12.7441501 0.9894961,13.9484733 C1.5144575,15.1527965 2.23670831,16.2147282 3.15624854,17.1342685 C4.07578877,18.0538087 5.1377205,18.7760595 6.34204371,19.3010209 C7.54636692,19.8259823 8.83475258,20.088463 10.2072007,20.088463 Z M10.2072007,18.2562448 C9.07493099,18.2562448 8.01471483,18.0452309 7.0265522,17.6232031 C6.03838956,17.2011753 5.17031614,16.6161693 4.42233192,15.8681851 C3.6743477,15.1202009 3.09105726,14.2521274 2.67246059,13.2639648 C2.25386392,12.2758022 2.04456558,11.215586 2.04456558,10.0833163 C2.04456558,8.95104663 2.25386392,7.89083047 2.67246059,6.90266784 C3.09105726,5.9145052 3.6743477,5.04643178 4.42233192,4.29844756 C5.17031614,3.55046334 6.036674,2.9671729 7.02140552,2.54857623 C8.00613703,2.12997956 9.06463763,1.92068122 10.1969073,1.92068122 C11.329177,1.92068122 12.3911087,2.12997956 13.3827025,2.54857623 C14.3742962,2.9671729 15.2440852,3.55046334 15.9920694,4.29844756 C16.7400537,5.04643178 17.3233441,5.9145052 17.7419408,6.90266784 C18.1605374,7.89083047 18.3698358,8.95104663 18.3698358,10.0833163 C18.3698358,11.215586 18.1605374,12.2758022 17.7419408,13.2639648 C17.3233441,14.2521274 16.7400537,15.1202009 15.9920694,15.8681851 C15.2440852,16.6161693 14.3760118,17.2011753 13.3878492,17.6232031 C12.3996865,18.0452309 11.3394704,18.2562448 10.2072007,18.2562448 Z M7.65444721,13.6242324 L12.7496608,13.6242324 C13.0584616,13.6242324 13.3003556,13.5384544 13.4753427,13.3668984 C13.6503299,13.1953424 13.7378234,12.9585951 13.7378234,12.6566565 L13.7378234,7.49968276 C13.7378234,7.19774418 13.6503299,6.96099688 13.4753427,6.78944087 C13.3003556,6.61788486 13.0584616,6.53210685 12.7496608,6.53210685 L7.65444721,6.53210685 C7.33878414,6.53210685 7.09345904,6.61788486 6.91847191,6.78944087 C6.74348478,6.96099688 6.65599121,7.19774418 6.65599121,7.49968276 L6.65599121,12.6566565 C6.65599121,12.9585951 6.74348478,13.1953424 6.91847191,13.3668984 C7.09345904,13.5384544 7.33878414,13.6242324 7.65444721,13.6242324 Z' id='shape' fill='%23FF3B30' fill-rule='nonzero'%3E%3C/path%3E %3C/g%3E %3C/svg%3E");
173
+ height: 21px;
174
+ }
175
  /* list */
176
  ol:not(.options), ul:not(.options) {
177
  padding-inline-start: 2em !important;
178
  }
179
 
180
+ /* 亮色(默认) */
181
+ #chuanhu_chatbot {
182
+ background-color: var(--chatbot-color-light) !important;
183
+ color: #000000 !important;
184
+ }
185
+ [data-testid = "bot"] {
186
+ background-color: #FFFFFF !important;
187
+ }
188
+ [data-testid = "user"] {
189
+ background-color: #95EC69 !important;
190
+ }
191
+ /* 暗色 */
192
+ .dark #chuanhu_chatbot {
193
+ background-color: var(--chatbot-color-dark) !important;
194
+ color: #FFFFFF !important;
195
+ }
196
+ .dark [data-testid = "bot"] {
197
+ background-color: #2C2C2C !important;
198
+ }
199
+ .dark [data-testid = "user"] {
200
+ background-color: #26B561 !important;
201
+ }
202
+
203
+ /* 屏幕宽度大于等于500px的设备 */
204
+ /* update on 2023.4.8: 高度的细致调整已写入JavaScript */
205
+ @media screen and (min-width: 500px) {
206
  #chuanhu_chatbot {
207
+ height: calc(100vh - 200px);
 
208
  }
209
+ #chuanhu_chatbot .wrap {
210
+ max-height: calc(100vh - 200px - var(--line-sm)*1rem - 2*var(--block-label-margin) );
 
 
 
211
  }
212
  }
213
+ /* 屏幕宽度小于500px的设备 */
214
+ @media screen and (max-width: 499px) {
215
  #chuanhu_chatbot {
216
+ height: calc(100vh - 140px);
 
217
  }
218
+ #chuanhu_chatbot .wrap {
219
+ max-height: calc(100vh - 140px - var(--line-sm)*1rem - 2*var(--block-label-margin) );
220
  }
221
+ [data-testid = "bot"] {
222
+ max-width: 98% !important;
223
  }
224
+ #app_title h1{
225
+ letter-spacing: -1px; font-size: 22px;
226
  }
227
  }
228
  /* 对话气泡 */
assets/custom.js CHANGED
@@ -1,70 +1,224 @@
 
1
  // custom javascript here
 
2
  const MAX_HISTORY_LENGTH = 32;
3
 
4
  var key_down_history = [];
5
  var currentIndex = -1;
6
  var user_input_ta;
7
 
 
 
 
 
 
 
 
 
8
  var ga = document.getElementsByTagName("gradio-app");
9
  var targetNode = ga[0];
10
- var observer = new MutationObserver(function(mutations) {
 
 
 
11
  for (var i = 0; i < mutations.length; i++) {
12
- if (mutations[i].addedNodes.length) {
13
- var user_input_tb = document.getElementById('user_input_tb');
14
- if (user_input_tb) {
15
- // 监听到user_input_tb被添加到DOM树中
16
- // 这里可以编写元素加载完成后需要执行的代码
17
- user_input_ta = user_input_tb.querySelector("textarea");
18
- if (user_input_ta){
19
- observer.disconnect(); // 停止监听
20
- // textarea 上监听 keydown 事件
21
- user_input_ta.addEventListener("keydown", function (event) {
22
- var value = user_input_ta.value.trim();
23
- // 判断按下的是否为方向键
24
- if (event.code === 'ArrowUp' || event.code === 'ArrowDown') {
25
- // 如果按下的是方向键,且输入框中有内容,且历史记录中没有该内容,则不执行操作
26
- if(value && key_down_history.indexOf(value) === -1)
27
- return;
28
- // 对于需要响应的动作,阻止默认行为。
29
- event.preventDefault();
30
- var length = key_down_history.length;
31
- if(length === 0) {
32
- currentIndex = -1; // 如果历史记录为空,直接将当前选中的记录重置
33
- return;
34
- }
35
- if (currentIndex === -1) {
36
- currentIndex = length;
37
- }
38
- if (event.code === 'ArrowUp' && currentIndex > 0) {
39
- currentIndex--;
40
- user_input_ta.value = key_down_history[currentIndex];
41
- } else if (event.code === 'ArrowDown' && currentIndex < length - 1) {
42
- currentIndex++;
43
- user_input_ta.value = key_down_history[currentIndex];
44
- }
45
- user_input_ta.selectionStart = user_input_ta.value.length;
46
- user_input_ta.selectionEnd = user_input_ta.value.length;
47
- const input_event = new InputEvent("input", {bubbles: true, cancelable: true});
48
- user_input_ta.dispatchEvent(input_event);
49
- }else if(event.code === "Enter") {
50
- if (value) {
51
- currentIndex = -1;
52
- if(key_down_history.indexOf(value) === -1){
53
- key_down_history.push(value);
54
- if (key_down_history.length > MAX_HISTORY_LENGTH) {
55
- key_down_history.shift();
56
- }
57
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  }
59
  }
60
- });
61
- break;
62
  }
63
- }
 
 
 
 
 
 
 
 
 
64
  }
65
- }
66
- });
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
 
68
- // 监听目标节点的子节点列表是否发生变化
69
- observer.observe(targetNode, { childList: true , subtree: true });
 
 
 
 
 
 
 
 
 
70
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
  // custom javascript here
3
+
4
  const MAX_HISTORY_LENGTH = 32;
5
 
6
  var key_down_history = [];
7
  var currentIndex = -1;
8
  var user_input_ta;
9
 
10
+ var gradioContainer = null;
11
+ var user_input_ta = null;
12
+ var user_input_tb = null;
13
+ var userInfoDiv = null;
14
+ var appTitleDiv = null;
15
+ var chatbot = null;
16
+ var apSwitch = null;
17
+
18
  var ga = document.getElementsByTagName("gradio-app");
19
  var targetNode = ga[0];
20
+ var isInIframe = (window.self !== window.top);
21
+
22
+ // gradio 页面加载好了么??? 我能动你的元素了么??
23
+ function gradioLoaded(mutations) {
24
  for (var i = 0; i < mutations.length; i++) {
25
+ if (mutations[i].addedNodes.length) {
26
+ gradioContainer = document.querySelector(".gradio-container");
27
+ user_input_tb = document.getElementById('user_input_tb');
28
+ userInfoDiv = document.getElementById("user_info");
29
+ appTitleDiv = document.getElementById("app_title");
30
+ chatbot = document.querySelector('#chuanhu_chatbot');
31
+ apSwitch = document.querySelector('.apSwitch input[type="checkbox"]');
32
+
33
+ if (gradioContainer && apSwitch) { // gradioCainter 加载出来了没?
34
+ adjustDarkMode();
35
+ }
36
+ if (user_input_tb) { // user_input_tb 加载出来了没?
37
+ selectHistory();
38
+ }
39
+ if (userInfoDiv && appTitleDiv) { // userInfoDiv 和 appTitleDiv 加载出来了没?
40
+ setTimeout(showOrHideUserInfo(), 2000);
41
+ }
42
+ if (chatbot) { // chatbot 加载出来了没?
43
+ setChatbotHeight()
44
+ }
45
+ }
46
+ }
47
+ }
48
+
49
+ function selectHistory() {
50
+ user_input_ta = user_input_tb.querySelector("textarea");
51
+ if (user_input_ta) {
52
+ observer.disconnect(); // 停止监听
53
+ // textarea 上监听 keydown 事件
54
+ user_input_ta.addEventListener("keydown", function (event) {
55
+ var value = user_input_ta.value.trim();
56
+ // 判断按下的是否为方向键
57
+ if (event.code === 'ArrowUp' || event.code === 'ArrowDown') {
58
+ // ���果按下的是方向键,且输入框中有内容,且历史记录中没有该内容,则不执行操作
59
+ if (value && key_down_history.indexOf(value) === -1)
60
+ return;
61
+ // 对于需要响应的动作,阻止默认行为。
62
+ event.preventDefault();
63
+ var length = key_down_history.length;
64
+ if (length === 0) {
65
+ currentIndex = -1; // 如果历史记录为空,直接将当前选中的记录重置
66
+ return;
67
+ }
68
+ if (currentIndex === -1) {
69
+ currentIndex = length;
70
+ }
71
+ if (event.code === 'ArrowUp' && currentIndex > 0) {
72
+ currentIndex--;
73
+ user_input_ta.value = key_down_history[currentIndex];
74
+ } else if (event.code === 'ArrowDown' && currentIndex < length - 1) {
75
+ currentIndex++;
76
+ user_input_ta.value = key_down_history[currentIndex];
77
+ }
78
+ user_input_ta.selectionStart = user_input_ta.value.length;
79
+ user_input_ta.selectionEnd = user_input_ta.value.length;
80
+ const input_event = new InputEvent("input", { bubbles: true, cancelable: true });
81
+ user_input_ta.dispatchEvent(input_event);
82
+ } else if (event.code === "Enter") {
83
+ if (value) {
84
+ currentIndex = -1;
85
+ if (key_down_history.indexOf(value) === -1) {
86
+ key_down_history.push(value);
87
+ if (key_down_history.length > MAX_HISTORY_LENGTH) {
88
+ key_down_history.shift();
89
  }
90
  }
91
+ }
 
92
  }
93
+ });
94
+ }
95
+ }
96
+
97
+ function toggleUserInfoVisibility(shouldHide) {
98
+ if (userInfoDiv) {
99
+ if (shouldHide) {
100
+ userInfoDiv.classList.add("hideK");
101
+ } else {
102
+ userInfoDiv.classList.remove("hideK");
103
  }
104
+ }
105
+ }
106
+ function showOrHideUserInfo() {
107
+ var sendBtn = document.getElementById("submit_btn");
108
+
109
+ // Bind mouse/touch events to show/hide user info
110
+ appTitleDiv.addEventListener("mouseenter", function () {
111
+ toggleUserInfoVisibility(false);
112
+ });
113
+ userInfoDiv.addEventListener("mouseenter", function () {
114
+ toggleUserInfoVisibility(false);
115
+ });
116
+ sendBtn.addEventListener("mouseenter", function () {
117
+ toggleUserInfoVisibility(false);
118
+ });
119
+
120
+ appTitleDiv.addEventListener("mouseleave", function () {
121
+ toggleUserInfoVisibility(true);
122
+ });
123
+ userInfoDiv.addEventListener("mouseleave", function () {
124
+ toggleUserInfoVisibility(true);
125
+ });
126
+ sendBtn.addEventListener("mouseleave", function () {
127
+ toggleUserInfoVisibility(true);
128
+ });
129
+
130
+ appTitleDiv.ontouchstart = function () {
131
+ toggleUserInfoVisibility(false);
132
+ };
133
+ userInfoDiv.ontouchstart = function () {
134
+ toggleUserInfoVisibility(false);
135
+ };
136
+ sendBtn.ontouchstart = function () {
137
+ toggleUserInfoVisibility(false);
138
+ };
139
+
140
+ appTitleDiv.ontouchend = function () {
141
+ setTimeout(function () {
142
+ toggleUserInfoVisibility(true);
143
+ }, 3000);
144
+ };
145
+ userInfoDiv.ontouchend = function () {
146
+ setTimeout(function () {
147
+ toggleUserInfoVisibility(true);
148
+ }, 3000);
149
+ };
150
+ sendBtn.ontouchend = function () {
151
+ setTimeout(function () {
152
+ toggleUserInfoVisibility(true);
153
+ }, 3000); // Delay 1 second to hide user info
154
+ };
155
+
156
+ // Hide user info after 2 second
157
+ setTimeout(function () {
158
+ toggleUserInfoVisibility(true);
159
+ }, 2000);
160
+ }
161
 
162
+ function toggleDarkMode(isEnabled) {
163
+ if (isEnabled) {
164
+ gradioContainer.classList.add("dark");
165
+ document.body.style.setProperty("background-color", "var(--neutral-950)", "important");
166
+ } else {
167
+ gradioContainer.classList.remove("dark");
168
+ document.body.style.backgroundColor = "";
169
+ }
170
+ }
171
+ function adjustDarkMode() {
172
+ const darkModeQuery = window.matchMedia("(prefers-color-scheme: dark)");
173
 
174
+ // 根据当前颜色模式设置初始状态
175
+ apSwitch.checked = darkModeQuery.matches;
176
+ toggleDarkMode(darkModeQuery.matches);
177
+ // 监听颜色模式变化
178
+ darkModeQuery.addEventListener("change", (e) => {
179
+ apSwitch.checked = e.matches;
180
+ toggleDarkMode(e.matches);
181
+ });
182
+ // apSwitch = document.querySelector('.apSwitch input[type="checkbox"]');
183
+ apSwitch.addEventListener("change", (e) => {
184
+ toggleDarkMode(e.target.checked);
185
+ });
186
+ }
187
+
188
+ function setChatbotHeight() {
189
+ const screenWidth = window.innerWidth;
190
+ const statusDisplay = document.querySelector('#status_display');
191
+ const statusDisplayHeight = statusDisplay ? statusDisplay.offsetHeight : 0;
192
+ const wrap = chatbot.querySelector('.wrap');
193
+ const vh = window.innerHeight * 0.01;
194
+ document.documentElement.style.setProperty('--vh', `${vh}px`);
195
+ if (isInIframe) {
196
+ chatbot.style.height = `700px`;
197
+ wrap.style.maxHeight = `calc(700px - var(--line-sm) * 1rem - 2 * var(--block-label-margin))`
198
+ } else {
199
+ if (screenWidth <= 320) {
200
+ chatbot.style.height = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 150}px)`;
201
+ wrap.style.maxHeight = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 150}px - var(--line-sm) * 1rem - 2 * var(--block-label-margin))`;
202
+ } else if (screenWidth <= 499) {
203
+ chatbot.style.height = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 100}px)`;
204
+ wrap.style.maxHeight = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 100}px - var(--line-sm) * 1rem - 2 * var(--block-label-margin))`;
205
+ } else {
206
+ chatbot.style.height = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 160}px)`;
207
+ wrap.style.maxHeight = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 160}px - var(--line-sm) * 1rem - 2 * var(--block-label-margin))`;
208
+ }
209
+ }
210
+ }
211
+
212
+ // 监视页面内部 DOM 变动
213
+ var observer = new MutationObserver(function (mutations) {
214
+ gradioLoaded(mutations);
215
+ });
216
+ observer.observe(targetNode, { childList: true, subtree: true });
217
+
218
+ // 监视页面变化
219
+ window.addEventListener("DOMContentLoaded", function () {
220
+ isInIframe = (window.self !== window.top);
221
+ });
222
+ window.addEventListener('resize', setChatbotHeight);
223
+ window.addEventListener('scroll', setChatbotHeight);
224
+ window.matchMedia("(prefers-color-scheme: dark)").addEventListener("change", adjustDarkMode);
config_example.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ // 你的OpenAI API Key,一般必填,
3
+ // 若缺省填为 "openai_api_key": "" 则必须再在图形界面中填入API Key
4
+ "openai_api_key": "",
5
+ "language": "auto",
6
+ // 如果使用代理,请取消注释下面的两行,并替换代理URL
7
+ // "https_proxy": "http://127.0.0.1:1079",
8
+ // "http_proxy": "http://127.0.0.1:1079",
9
+ "users": [], // 用户列表,[[用户名1, 密码1], [用户名2, 密码2], ...]
10
+ "local_embedding": false, //是否在本地编制索引
11
+ "default_model": "gpt-3.5-turbo", // 默认模型
12
+ "advance_docs": {
13
+ "pdf": {
14
+ // 是否认为PDF是双栏的
15
+ "two_column": false,
16
+ // 是否使用OCR识别PDF中的公式
17
+ "formula_ocr": true
18
+ }
19
+ },
20
+ // 是否多个API Key轮换使用
21
+ "multi_api_key": false,
22
+ "api_key_list": [
23
+ "sk-xxxxxxxxxxxxxxxxxxxxxxxx1",
24
+ "sk-xxxxxxxxxxxxxxxxxxxxxxxx2",
25
+ "sk-xxxxxxxxxxxxxxxxxxxxxxxx3"
26
+ ],
27
+ // 如果使用自定义端口、自定义ip,请取消注释并替换对应内容
28
+ // "server_name": "0.0.0.0",
29
+ // "server_port": 7860,
30
+ // 如果要share到gradio,设置为true
31
+ // "share": false,
32
+ }
configs/ds_config_chatbot.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "fp16": {
3
+ "enabled": false
4
+ },
5
+ "bf16": {
6
+ "enabled": true
7
+ },
8
+ "comms_logger": {
9
+ "enabled": false,
10
+ "verbose": false,
11
+ "prof_all": false,
12
+ "debug": false
13
+ },
14
+ "steps_per_print": 20000000000000000,
15
+ "train_micro_batch_size_per_gpu": 1,
16
+ "wall_clock_breakdown": false
17
+ }
locale/en_US.json ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "未命名对话历史记录": "Unnamed Dialog History",
3
+ "在这里输入": "Type in here",
4
+ "🧹 新的对话": "🧹 New Dialogue",
5
+ "🔄 重新生成": "🔄 Regeneration",
6
+ "🗑️ 删除最旧对话": "🗑️ Delete oldest dialog",
7
+ "🗑️ 删除最新对话": "🗑️ Delete latest dialog",
8
+ "模型": "Model",
9
+ "多账号模式已开启,无需输入key,可直接开始对话": "Multi-account mode is enabled, no need to enter key, you can start the dialogue directly",
10
+ "**发送消息** 或 **提交key** 以显示额度": "**Send message** or **Submit key** to display credit",
11
+ "选择模型": "Select Model",
12
+ "选择LoRA模型": "Select LoRA Model",
13
+ "实时传输回答": "Stream output",
14
+ "单轮对话": "Single-turn dialogue",
15
+ "使用在线搜索": "Use online search",
16
+ "选择回复语言(针对搜索&索引功能)": "Select reply language (for search & index)",
17
+ "上传索引文件": "Upload index file",
18
+ "双栏pdf": "Two-column pdf",
19
+ "识别公式": "formula OCR",
20
+ "在这里输入System Prompt...": "Type in System Prompt here...",
21
+ "加载Prompt模板": "Load Prompt Template",
22
+ "选择Prompt模板集合文件": "Select Prompt Template Collection File",
23
+ "🔄 刷新": "🔄 Refresh",
24
+ "从Prompt模板中加载": "Load from Prompt Template",
25
+ "保存/加载": "Save/Load",
26
+ "保存/加载对话历史记录": "Save/Load Dialog History",
27
+ "从列表中加载对话": "Load dialog from list",
28
+ "设置文件名: 默认为.json,可选为.md": "Set file name: default is .json, optional is .md",
29
+ "设置保存文件名": "Set save file name",
30
+ "对话历史记录": "Dialog History",
31
+ "💾 保存对话": "💾 Save Dialog",
32
+ "📝 导出为Markdown": "📝 Export as Markdown",
33
+ "默认保存于history文件夹": "Default save in history folder",
34
+ "高级": "Advanced",
35
+ "# ⚠️ 务必谨慎更改 ⚠️\n\n如果无法使用请恢复默认设置": "# ⚠️ Caution: Changes require care. ⚠️\n\nIf unable to use, restore default settings.",
36
+ "参数": "Parameters",
37
+ "在这里输入停止符,用英文逗号隔开...": "Type in stop token here, separated by comma...",
38
+ "用于定位滥用行为": "Used to locate abuse",
39
+ "用户名": "Username",
40
+ "网络设置": "Network Settings",
41
+ "在这里输入API-Host...": "Type in API-Host here...",
42
+ "🔄 切换API地址": "🔄 Switch API Address",
43
+ "在这里输入代理地址...": "Type in proxy address here...",
44
+ "代理地址(示例:http://127.0.0.1:10809)": "Proxy address (example: http://127.0.0.1:10809)",
45
+ "🔄 设置代理地址": "🔄 Set Proxy Address",
46
+ "🔙 恢复默认设置": "🔙 Restore Default Settings",
47
+ "川虎Chat 🚀": "Chuanhu Chat 🚀",
48
+ "开始实时传输回答……": "Start streaming output...",
49
+ "Token 计数: ": "Token Count: ",
50
+ ",本次对话累计消耗了 ": ",Total cost for this dialogue is ",
51
+ "**获取API使用情况失败**": "**Failed to get API usage**",
52
+ "**本月使用金额** ": "**Monthly usage** ",
53
+ "获取API使用情况失败:": "Failed to get API usage:",
54
+ "API密钥更改为了": "The API key is changed to",
55
+ "JSON解析错误,收到的内容: ": "JSON parsing error, received content: ",
56
+ "模型设置为了:": "Model is set to: ",
57
+ "☹️发生了错误:": "☹️Error: ",
58
+ "获取对话时发生错误,请查看后台日志": "Error occurred when getting dialogue, check the background log",
59
+ "请检查网络连接,或者API-Key是否有效。": "Check the network connection or whether the API-Key is valid.",
60
+ "连接超时,无法获取对话。": "Connection timed out, unable to get dialogue.",
61
+ "读取超时,无法获取对话。": "Read timed out, unable to get dialogue.",
62
+ "代理错误,无法获取对话。": "Proxy error, unable to get dialogue.",
63
+ "SSL错误,无法获取对话。": "SSL error, unable to get dialogue.",
64
+ "API key为空,请检查是否输入正确。": "API key is empty, check whether it is entered correctly.",
65
+ "请输入对话内容。": "Enter the content of the conversation.",
66
+ "账单信息不适用": "Billing information is not applicable",
67
+ "由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536) 和 [明昭MZhao](https://space.bilibili.com/24807452)开发<br />访问川虎Chat的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本": "developor: Bilibili [土川虎虎虎](https://space.bilibili.com/29125536) and [明昭MZhao](https://space.bilibili.com/24807452)\n\nDownload latest code from [GitHub](https://github.com/GaiZhenbiao/ChuanhuChatGPT)",
68
+ "切换亮暗色主题": "Switch light/dark theme",
69
+ "您的IP区域:未知。": "Your IP region: Unknown.",
70
+ "获取IP地理位置失败。原因:": "Failed to get IP location. Reason: ",
71
+ "。你仍然可以使用聊天功能。": ". You can still use the chat function.",
72
+ "您的IP区域:": "Your IP region: "
73
+ }
locale/extract_locale.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import re
4
+
5
+ # Define regular expression patterns
6
+ pattern = r'i18n\((\"{3}.*?\"{3}|\".*?\")\)'
7
+
8
+ # Load the .py file
9
+ with open('ChuanhuChatbot.py', 'r', encoding='utf-8') as f:
10
+ contents = f.read()
11
+
12
+ # Load the .py files in the modules folder
13
+ for filename in os.listdir("modules"):
14
+ if filename.endswith(".py"):
15
+ with open(os.path.join("modules", filename), "r", encoding="utf-8") as f:
16
+ contents += f.read()
17
+
18
+ # Matching with regular expressions
19
+ matches = re.findall(pattern, contents, re.DOTALL)
20
+
21
+ # Convert to key/value pairs
22
+ data = {match.strip('()"'): '' for match in matches}
23
+
24
+ # Save as a JSON file
25
+ with open('labels.json', 'w', encoding='utf-8') as f:
26
+ json.dump(data, f, ensure_ascii=False, indent=4)
locale/ja_JP.json ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "未命名对话历史记录": "名無しの会話履歴",
3
+ "在这里输入": "ここに入力",
4
+ "🧹 新的对话": "🧹 新しい会話",
5
+ "🔄 重新生成": "🔄 再生成",
6
+ "🗑️ 删除最旧对话": "🗑️ 最古の会話削除",
7
+ "🗑️ 删除最新对话": "🗑️ 最新の会話削除",
8
+ "模型": "LLMモデル",
9
+ "多账号模式已开启,无需输入key,可直接开始对话": "複数アカウントモードがオンになっています。キーを入力する必要はありません。会話を開始できます",
10
+ "**发送消息** 或 **提交key** 以显示额度": "**メッセージを送信** または **キーを送信** して、クレジットを表示します",
11
+ "选择模型": "LLMモデルを選択",
12
+ "选择LoRA模型": "LoRAモデルを選択",
13
+ "实时传输回答": "ストリーム出力",
14
+ "单轮对话": "単発会話",
15
+ "使用在线搜索": "オンライン検索を使用",
16
+ "选择回复语言(针对搜索&索引功能)": "回答言語を選択(検索とインデックス機能に対して)",
17
+ "上传索引文件": "インデックスファイルをアップロード",
18
+ "双栏pdf": "2カラムpdf",
19
+ "识别公式": "formula OCR",
20
+ "在这里输入System Prompt...": "System Promptを入力してください...",
21
+ "加载Prompt模板": "Promptテンプレートを読込",
22
+ "选择Prompt模板集合文件": "Promptテンプレートコレクションを選択",
23
+ "🔄 刷新": "🔄 更新",
24
+ "从Prompt模板中加载": "Promptテンプレートから読込",
25
+ "保存/加载": "保存/読込",
26
+ "保存/加载对话历史记录": "会話履歴を保存/読込",
27
+ "从列表中加载对话": "リストから会話を読込",
28
+ "设置文件名: 默认为.json,可选为.md": "ファイル名を設定: デフォルトは.json、.mdを選択できます",
29
+ "设置保存文件名": "保存ファイル名を設定",
30
+ "对话历史记录": "会話履歴",
31
+ "💾 保存对话": "💾 会話を保存",
32
+ "📝 导出为Markdown": "📝 Markdownでエクスポート",
33
+ "默认保存于history文件夹": "デフォルトでhistoryフォルダに保存されます",
34
+ "高级": "Advanced",
35
+ "# ⚠️ 务必谨慎更改 ⚠️\n\n如果无法使用请恢复默认设置": "# ⚠️ 変更には慎重に ⚠️\n\nもし動作しない場合は、デフォルト設定に戻してください。",
36
+ "参数": "パラメータ",
37
+ "在这里输入停止符,用英文逗号隔开...": "ここにストップ文字を英語のカンマで区切って入力してください...",
38
+ "用于定位滥用行为": "不正行為を特定するために使用されます",
39
+ "用户名": "ユーザー名",
40
+ "网络设置": "ネットワーク設定",
41
+ "在这里输入API-Host...": "API-Hostを入力してください...",
42
+ "🔄 切换API地址": "🔄 APIアドレスを切り替え",
43
+ "在这里输入代理地址...": "プロキシアドレスを入力してください...",
44
+ "代理地址(示例:http://127.0.0.1:10809)": "プロキシアドレス(例:http://127.0.0.1:10809)",
45
+ "🔄 设置代理地址": "🔄 プロキシアドレスを設定",
46
+ "🔙 恢复默认设置": "🔙 デフォルト設定に戻す",
47
+ "川虎Chat 🚀": "川虎Chat 🚀",
48
+ "开始实时传输回答……": "ストリーム出力開始……",
49
+ "Token 计数: ": "Token数: ",
50
+ ",本次对话累计消耗了 ": ", 今の会話で消費合計 ",
51
+ "**获取API使用情况失败**": "**API使用状況の取得に失敗しました**",
52
+ "**本月使用金额** ": "**今月の使用料金** ",
53
+ "获取API使用情况失败:": "API使用状況の取得に失敗しました:",
54
+ "API密钥更改为了": "APIキーが変更されました",
55
+ "JSON解析错误,收到的内容: ": "JSON解析エラー、受信内容: ",
56
+ "模型设置为了:": "LLMモデルを設定しました: ",
57
+ "☹️发生了错误:": "エラーが発生しました: ",
58
+ "获取对话时发生错误,请查看后台日志": "会話取得時にエラー発生、あとのログを確認してください",
59
+ "请检查网络连接,或者API-Key是否有效。": "ネットワーク接続を確認するか、APIキーが有効かどうかを確認してください。",
60
+ "连接超时,无法获取对话。": "接続タイムアウト、会話を取得できません。",
61
+ "读取超时,无法获取对话。": "読み込みタイムアウト、会話を取得できません。",
62
+ "代理错误,无法获取对话。": "プロキシエラー、会話を取得できません。",
63
+ "SSL错误,无法获取对话。": "SSLエラー、会話を取得できません。",
64
+ "API key为空,请检查是否输入正确。": "APIキーが入力されていません。正しく入力されているか確認してください。",
65
+ "请输入对话内容。": "会話内容を入力してください。",
66
+ "账单信息不适用": "課金情報は対象外です",
67
+ "由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536) 和 [明昭MZhao](https://space.bilibili.com/24807452)开发<br />访问川虎Chat的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本": "開発:Bilibili [土川虎虎虎](https://space.bilibili.com/29125536) と [明昭MZhao](https://space.bilibili.com/24807452)\n\n最新コードは川虎Chatのサイトへ [GitHubプロジェクト](https://github.com/GaiZhenbiao/ChuanhuChatGPT)",
68
+ "切换亮暗色主题": "テーマの明暗切替",
69
+ "您的IP区域:未知。": "あなたのIPアドレス地域:不明",
70
+ "获取IP地理位置失败。原因:": "IPアドレス地域の取得に失敗しました。理由:",
71
+ "。你仍然可以使用聊天功能。": "。あなたはまだチャット機能を使用できます。",
72
+ "您的IP区域:": "あなたのIPアドレス地域:"
73
+ }
modules/__init__.py ADDED
File without changes
modules/base_model.py ADDED
@@ -0,0 +1,550 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ from typing import TYPE_CHECKING, List
3
+
4
+ import logging
5
+ import json
6
+ import commentjson as cjson
7
+ import os
8
+ import sys
9
+ import requests
10
+ import urllib3
11
+ import traceback
12
+
13
+ from tqdm import tqdm
14
+ import colorama
15
+ from duckduckgo_search import ddg
16
+ import asyncio
17
+ import aiohttp
18
+ from enum import Enum
19
+
20
+ from .presets import *
21
+ from .llama_func import *
22
+ from .utils import *
23
+ from . import shared
24
+ from .config import retrieve_proxy
25
+
26
+
27
+ class ModelType(Enum):
28
+ Unknown = -1
29
+ OpenAI = 0
30
+ ChatGLM = 1
31
+ LLaMA = 2
32
+ XMBot = 3
33
+
34
+ @classmethod
35
+ def get_type(cls, model_name: str):
36
+ model_type = None
37
+ model_name_lower = model_name.lower()
38
+ if "gpt" in model_name_lower:
39
+ model_type = ModelType.OpenAI
40
+ elif "chatglm" in model_name_lower:
41
+ model_type = ModelType.ChatGLM
42
+ elif "llama" in model_name_lower or "alpaca" in model_name_lower:
43
+ model_type = ModelType.LLaMA
44
+ elif "xmbot" in model_name_lower:
45
+ model_type = ModelType.XMBot
46
+ else:
47
+ model_type = ModelType.Unknown
48
+ return model_type
49
+
50
+
51
+ class BaseLLMModel:
52
+ def __init__(
53
+ self,
54
+ model_name,
55
+ system_prompt="",
56
+ temperature=1.0,
57
+ top_p=1.0,
58
+ n_choices=1,
59
+ stop=None,
60
+ max_generation_token=None,
61
+ presence_penalty=0,
62
+ frequency_penalty=0,
63
+ logit_bias=None,
64
+ user="",
65
+ ) -> None:
66
+ self.history = []
67
+ self.all_token_counts = []
68
+ self.model_name = model_name
69
+ self.model_type = ModelType.get_type(model_name)
70
+ try:
71
+ self.token_upper_limit = MODEL_TOKEN_LIMIT[model_name]
72
+ except KeyError:
73
+ self.token_upper_limit = DEFAULT_TOKEN_LIMIT
74
+ self.interrupted = False
75
+ self.system_prompt = system_prompt
76
+ self.api_key = None
77
+ self.need_api_key = False
78
+ self.single_turn = False
79
+
80
+ self.temperature = temperature
81
+ self.top_p = top_p
82
+ self.n_choices = n_choices
83
+ self.stop_sequence = stop
84
+ self.max_generation_token = None
85
+ self.presence_penalty = presence_penalty
86
+ self.frequency_penalty = frequency_penalty
87
+ self.logit_bias = logit_bias
88
+ self.user_identifier = user
89
+
90
+ def get_answer_stream_iter(self):
91
+ """stream predict, need to be implemented
92
+ conversations are stored in self.history, with the most recent question, in OpenAI format
93
+ should return a generator, each time give the next word (str) in the answer
94
+ """
95
+ logging.warning("stream predict not implemented, using at once predict instead")
96
+ response, _ = self.get_answer_at_once()
97
+ yield response
98
+
99
+ def get_answer_at_once(self):
100
+ """predict at once, need to be implemented
101
+ conversations are stored in self.history, with the most recent question, in OpenAI format
102
+ Should return:
103
+ the answer (str)
104
+ total token count (int)
105
+ """
106
+ logging.warning("at once predict not implemented, using stream predict instead")
107
+ response_iter = self.get_answer_stream_iter()
108
+ count = 0
109
+ for response in response_iter:
110
+ count += 1
111
+ return response, sum(self.all_token_counts) + count
112
+
113
+ def billing_info(self):
114
+ """get billing infomation, inplement if needed"""
115
+ logging.warning("billing info not implemented, using default")
116
+ return BILLING_NOT_APPLICABLE_MSG
117
+
118
+ def count_token(self, user_input):
119
+ """get token count from input, implement if needed"""
120
+ logging.warning("token count not implemented, using default")
121
+ return len(user_input)
122
+
123
+ def stream_next_chatbot(self, inputs, chatbot, fake_input=None, display_append=""):
124
+ def get_return_value():
125
+ return chatbot, status_text
126
+
127
+ status_text = i18n("开始实时传输回答……")
128
+ if fake_input:
129
+ chatbot.append((fake_input, ""))
130
+ else:
131
+ chatbot.append((inputs, ""))
132
+
133
+ user_token_count = self.count_token(inputs)
134
+ self.all_token_counts.append(user_token_count)
135
+ logging.debug(f"输入token计数: {user_token_count}")
136
+
137
+ stream_iter = self.get_answer_stream_iter()
138
+
139
+ for partial_text in stream_iter:
140
+ chatbot[-1] = (chatbot[-1][0], partial_text + display_append)
141
+ self.all_token_counts[-1] += 1
142
+ status_text = self.token_message()
143
+ yield get_return_value()
144
+ if self.interrupted:
145
+ self.recover()
146
+ break
147
+ self.history.append(construct_assistant(partial_text))
148
+
149
+ def next_chatbot_at_once(self, inputs, chatbot, fake_input=None, display_append=""):
150
+ if fake_input:
151
+ chatbot.append((fake_input, ""))
152
+ else:
153
+ chatbot.append((inputs, ""))
154
+ if fake_input is not None:
155
+ user_token_count = self.count_token(fake_input)
156
+ else:
157
+ user_token_count = self.count_token(inputs)
158
+ self.all_token_counts.append(user_token_count)
159
+ ai_reply, total_token_count = self.get_answer_at_once()
160
+ self.history.append(construct_assistant(ai_reply))
161
+ if fake_input is not None:
162
+ self.history[-2] = construct_user(fake_input)
163
+ chatbot[-1] = (chatbot[-1][0], ai_reply + display_append)
164
+ if fake_input is not None:
165
+ self.all_token_counts[-1] += count_token(construct_assistant(ai_reply))
166
+ else:
167
+ self.all_token_counts[-1] = total_token_count - sum(self.all_token_counts)
168
+ status_text = self.token_message()
169
+ return chatbot, status_text
170
+
171
+ def handle_file_upload(self, files, chatbot):
172
+ """if the model accepts multi modal input, implement this function"""
173
+ status = gr.Markdown.update()
174
+ if files:
175
+ construct_index(self.api_key, file_src=files)
176
+ status = "索引构建完成"
177
+ return gr.Files.update(), chatbot, status
178
+
179
+ def prepare_inputs(self, real_inputs, use_websearch, files, reply_language, chatbot):
180
+ fake_inputs = None
181
+ display_append = []
182
+ limited_context = False
183
+ fake_inputs = real_inputs
184
+ if files:
185
+ from llama_index.indices.vector_store.base_query import GPTVectorStoreIndexQuery
186
+ from llama_index.indices.query.schema import QueryBundle
187
+ from langchain.embeddings.huggingface import HuggingFaceEmbeddings
188
+ from langchain.chat_models import ChatOpenAI
189
+ from llama_index import (
190
+ GPTSimpleVectorIndex,
191
+ ServiceContext,
192
+ LangchainEmbedding,
193
+ OpenAIEmbedding,
194
+ )
195
+ limited_context = True
196
+ msg = "加载索引中……"
197
+ logging.info(msg)
198
+ # yield chatbot + [(inputs, "")], msg
199
+ index = construct_index(self.api_key, file_src=files)
200
+ assert index is not None, "获取索引失败"
201
+ msg = "索引获取成功,生成回答中……"
202
+ logging.info(msg)
203
+ if local_embedding or self.model_type != ModelType.OpenAI:
204
+ embed_model = LangchainEmbedding(HuggingFaceEmbeddings())
205
+ else:
206
+ embed_model = OpenAIEmbedding()
207
+ # yield chatbot + [(inputs, "")], msg
208
+ with retrieve_proxy():
209
+ prompt_helper = PromptHelper(
210
+ max_input_size=4096,
211
+ num_output=5,
212
+ max_chunk_overlap=20,
213
+ chunk_size_limit=600,
214
+ )
215
+ from llama_index import ServiceContext
216
+
217
+ service_context = ServiceContext.from_defaults(
218
+ prompt_helper=prompt_helper, embed_model=embed_model
219
+ )
220
+ query_object = GPTVectorStoreIndexQuery(
221
+ index.index_struct,
222
+ service_context=service_context,
223
+ similarity_top_k=5,
224
+ vector_store=index._vector_store,
225
+ docstore=index._docstore,
226
+ )
227
+ query_bundle = QueryBundle(real_inputs)
228
+ nodes = query_object.retrieve(query_bundle)
229
+ reference_results = [n.node.text for n in nodes]
230
+ reference_results = add_source_numbers(reference_results, use_source=False)
231
+ display_append = add_details(reference_results)
232
+ display_append = "\n\n" + "".join(display_append)
233
+ real_inputs = (
234
+ replace_today(PROMPT_TEMPLATE)
235
+ .replace("{query_str}", real_inputs)
236
+ .replace("{context_str}", "\n\n".join(reference_results))
237
+ .replace("{reply_language}", reply_language)
238
+ )
239
+ elif use_websearch:
240
+ limited_context = True
241
+ search_results = ddg(real_inputs, max_results=5)
242
+ reference_results = []
243
+ for idx, result in enumerate(search_results):
244
+ logging.debug(f"搜索结果{idx + 1}:{result}")
245
+ domain_name = urllib3.util.parse_url(result["href"]).host
246
+ reference_results.append([result["body"], result["href"]])
247
+ display_append.append(
248
+ f"{idx+1}. [{domain_name}]({result['href']})\n"
249
+ )
250
+ reference_results = add_source_numbers(reference_results)
251
+ display_append = "\n\n" + "".join(display_append)
252
+ real_inputs = (
253
+ replace_today(WEBSEARCH_PTOMPT_TEMPLATE)
254
+ .replace("{query}", real_inputs)
255
+ .replace("{web_results}", "\n\n".join(reference_results))
256
+ .replace("{reply_language}", reply_language)
257
+ )
258
+ else:
259
+ display_append = ""
260
+ return limited_context, fake_inputs, display_append, real_inputs, chatbot
261
+
262
+ def predict(
263
+ self,
264
+ inputs,
265
+ chatbot,
266
+ stream=False,
267
+ use_websearch=False,
268
+ files=None,
269
+ reply_language="中文",
270
+ should_check_token_count=True,
271
+ ): # repetition_penalty, top_k
272
+
273
+ status_text = "开始生成回答……"
274
+ logging.info(
275
+ "输入为:" + colorama.Fore.BLUE + f"{inputs}" + colorama.Style.RESET_ALL
276
+ )
277
+ if should_check_token_count:
278
+ yield chatbot + [(inputs, "")], status_text
279
+ if reply_language == "跟随问题语言(不稳定)":
280
+ reply_language = "the same language as the question, such as English, 中文, 日本語, Español, Français, or Deutsch."
281
+
282
+ limited_context, fake_inputs, display_append, inputs, chatbot = self.prepare_inputs(real_inputs=inputs, use_websearch=use_websearch, files=files, reply_language=reply_language, chatbot=chatbot)
283
+ yield chatbot + [(fake_inputs, "")], status_text
284
+
285
+ if (
286
+ self.need_api_key and
287
+ self.api_key is None
288
+ and not shared.state.multi_api_key
289
+ ):
290
+ status_text = STANDARD_ERROR_MSG + NO_APIKEY_MSG
291
+ logging.info(status_text)
292
+ chatbot.append((inputs, ""))
293
+ if len(self.history) == 0:
294
+ self.history.append(construct_user(inputs))
295
+ self.history.append("")
296
+ self.all_token_counts.append(0)
297
+ else:
298
+ self.history[-2] = construct_user(inputs)
299
+ yield chatbot + [(inputs, "")], status_text
300
+ return
301
+ elif len(inputs.strip()) == 0:
302
+ status_text = STANDARD_ERROR_MSG + NO_INPUT_MSG
303
+ logging.info(status_text)
304
+ yield chatbot + [(inputs, "")], status_text
305
+ return
306
+
307
+ if self.single_turn:
308
+ self.history = []
309
+ self.all_token_counts = []
310
+ self.history.append(construct_user(inputs))
311
+
312
+ try:
313
+ if stream:
314
+ logging.debug("使用流式传输")
315
+ iter = self.stream_next_chatbot(
316
+ inputs,
317
+ chatbot,
318
+ fake_input=fake_inputs,
319
+ display_append=display_append,
320
+ )
321
+ for chatbot, status_text in iter:
322
+ yield chatbot, status_text
323
+ else:
324
+ logging.debug("不使用流式传输")
325
+ chatbot, status_text = self.next_chatbot_at_once(
326
+ inputs,
327
+ chatbot,
328
+ fake_input=fake_inputs,
329
+ display_append=display_append,
330
+ )
331
+ yield chatbot, status_text
332
+ except Exception as e:
333
+ traceback.print_exc()
334
+ status_text = STANDARD_ERROR_MSG + str(e)
335
+ yield chatbot, status_text
336
+
337
+ if len(self.history) > 1 and self.history[-1]["content"] != inputs:
338
+ logging.info(
339
+ "回答为:"
340
+ + colorama.Fore.BLUE
341
+ + f"{self.history[-1]['content']}"
342
+ + colorama.Style.RESET_ALL
343
+ )
344
+
345
+ if limited_context:
346
+ # self.history = self.history[-4:]
347
+ # self.all_token_counts = self.all_token_counts[-2:]
348
+ self.history = []
349
+ self.all_token_counts = []
350
+
351
+ max_token = self.token_upper_limit - TOKEN_OFFSET
352
+
353
+ if sum(self.all_token_counts) > max_token and should_check_token_count:
354
+ count = 0
355
+ while (
356
+ sum(self.all_token_counts)
357
+ > self.token_upper_limit * REDUCE_TOKEN_FACTOR
358
+ and sum(self.all_token_counts) > 0
359
+ ):
360
+ count += 1
361
+ del self.all_token_counts[0]
362
+ del self.history[:2]
363
+ logging.info(status_text)
364
+ status_text = f"为了防止token超限,模型忘记了早期的 {count} 轮对话"
365
+ yield chatbot, status_text
366
+
367
+ def retry(
368
+ self,
369
+ chatbot,
370
+ stream=False,
371
+ use_websearch=False,
372
+ files=None,
373
+ reply_language="中文",
374
+ ):
375
+ logging.debug("重试中……")
376
+ if len(self.history) > 0:
377
+ inputs = self.history[-2]["content"]
378
+ del self.history[-2:]
379
+ self.all_token_counts.pop()
380
+ elif len(chatbot) > 0:
381
+ inputs = chatbot[-1][0]
382
+ else:
383
+ yield chatbot, f"{STANDARD_ERROR_MSG}上下文是空的"
384
+ return
385
+
386
+ iter = self.predict(
387
+ inputs,
388
+ chatbot,
389
+ stream=stream,
390
+ use_websearch=use_websearch,
391
+ files=files,
392
+ reply_language=reply_language,
393
+ )
394
+ for x in iter:
395
+ yield x
396
+ logging.debug("重试完毕")
397
+
398
+ # def reduce_token_size(self, chatbot):
399
+ # logging.info("开始减少token数量……")
400
+ # chatbot, status_text = self.next_chatbot_at_once(
401
+ # summarize_prompt,
402
+ # chatbot
403
+ # )
404
+ # max_token_count = self.token_upper_limit * REDUCE_TOKEN_FACTOR
405
+ # num_chat = find_n(self.all_token_counts, max_token_count)
406
+ # logging.info(f"previous_token_count: {self.all_token_counts}, keeping {num_chat} chats")
407
+ # chatbot = chatbot[:-1]
408
+ # self.history = self.history[-2*num_chat:] if num_chat > 0 else []
409
+ # self.all_token_counts = self.all_token_counts[-num_chat:] if num_chat > 0 else []
410
+ # msg = f"保留了最近{num_chat}轮对话"
411
+ # logging.info(msg)
412
+ # logging.info("减少token数量完毕")
413
+ # return chatbot, msg + "," + self.token_message(self.all_token_counts if len(self.all_token_counts) > 0 else [0])
414
+
415
+ def interrupt(self):
416
+ self.interrupted = True
417
+
418
+ def recover(self):
419
+ self.interrupted = False
420
+
421
+ def set_token_upper_limit(self, new_upper_limit):
422
+ self.token_upper_limit = new_upper_limit
423
+ print(f"token上限设置为{new_upper_limit}")
424
+
425
+ def set_temperature(self, new_temperature):
426
+ self.temperature = new_temperature
427
+
428
+ def set_top_p(self, new_top_p):
429
+ self.top_p = new_top_p
430
+
431
+ def set_n_choices(self, new_n_choices):
432
+ self.n_choices = new_n_choices
433
+
434
+ def set_stop_sequence(self, new_stop_sequence: str):
435
+ new_stop_sequence = new_stop_sequence.split(",")
436
+ self.stop_sequence = new_stop_sequence
437
+
438
+ def set_max_tokens(self, new_max_tokens):
439
+ self.max_generation_token = new_max_tokens
440
+
441
+ def set_presence_penalty(self, new_presence_penalty):
442
+ self.presence_penalty = new_presence_penalty
443
+
444
+ def set_frequency_penalty(self, new_frequency_penalty):
445
+ self.frequency_penalty = new_frequency_penalty
446
+
447
+ def set_logit_bias(self, logit_bias):
448
+ logit_bias = logit_bias.split()
449
+ bias_map = {}
450
+ encoding = tiktoken.get_encoding("cl100k_base")
451
+ for line in logit_bias:
452
+ word, bias_amount = line.split(":")
453
+ if word:
454
+ for token in encoding.encode(word):
455
+ bias_map[token] = float(bias_amount)
456
+ self.logit_bias = bias_map
457
+
458
+ def set_user_identifier(self, new_user_identifier):
459
+ self.user_identifier = new_user_identifier
460
+
461
+ def set_system_prompt(self, new_system_prompt):
462
+ self.system_prompt = new_system_prompt
463
+
464
+ def set_key(self, new_access_key):
465
+ self.api_key = new_access_key.strip()
466
+ msg = f"API密钥更改为了{hide_middle_chars(self.api_key)}"
467
+ logging.info(msg)
468
+ return new_access_key, msg
469
+
470
+ def set_single_turn(self, new_single_turn):
471
+ self.single_turn = new_single_turn
472
+
473
+ def reset(self):
474
+ self.history = []
475
+ self.all_token_counts = []
476
+ self.interrupted = False
477
+ return [], self.token_message([0])
478
+
479
+ def delete_first_conversation(self):
480
+ if self.history:
481
+ del self.history[:2]
482
+ del self.all_token_counts[0]
483
+ return self.token_message()
484
+
485
+ def delete_last_conversation(self, chatbot):
486
+ if len(chatbot) > 0 and STANDARD_ERROR_MSG in chatbot[-1][1]:
487
+ msg = "由于包含报错信息,只删除chatbot记录"
488
+ chatbot.pop()
489
+ return chatbot, self.history
490
+ if len(self.history) > 0:
491
+ self.history.pop()
492
+ self.history.pop()
493
+ if len(chatbot) > 0:
494
+ msg = "删除了一组chatbot对话"
495
+ chatbot.pop()
496
+ if len(self.all_token_counts) > 0:
497
+ msg = "删除了一组对话的token计数记录"
498
+ self.all_token_counts.pop()
499
+ msg = "删除了一组对话"
500
+ return chatbot, msg
501
+
502
+ def token_message(self, token_lst=None):
503
+ if token_lst is None:
504
+ token_lst = self.all_token_counts
505
+ token_sum = 0
506
+ for i in range(len(token_lst)):
507
+ token_sum += sum(token_lst[: i + 1])
508
+ return i18n("Token 计数: ") + f"{sum(token_lst)}" + i18n(",本次对话累计消耗了 ") + f"{token_sum} tokens"
509
+
510
+ def save_chat_history(self, filename, chatbot, user_name):
511
+ if filename == "":
512
+ return
513
+ if not filename.endswith(".json"):
514
+ filename += ".json"
515
+ return save_file(filename, self.system_prompt, self.history, chatbot, user_name)
516
+
517
+ def export_markdown(self, filename, chatbot, user_name):
518
+ if filename == "":
519
+ return
520
+ if not filename.endswith(".md"):
521
+ filename += ".md"
522
+ return save_file(filename, self.system_prompt, self.history, chatbot, user_name)
523
+
524
+ def load_chat_history(self, filename, chatbot, user_name):
525
+ logging.debug(f"{user_name} 加载对话历史中……")
526
+ if type(filename) != str:
527
+ filename = filename.name
528
+ try:
529
+ with open(os.path.join(HISTORY_DIR, user_name, filename), "r") as f:
530
+ json_s = json.load(f)
531
+ try:
532
+ if type(json_s["history"][0]) == str:
533
+ logging.info("历史记录格式为旧版,正在转换……")
534
+ new_history = []
535
+ for index, item in enumerate(json_s["history"]):
536
+ if index % 2 == 0:
537
+ new_history.append(construct_user(item))
538
+ else:
539
+ new_history.append(construct_assistant(item))
540
+ json_s["history"] = new_history
541
+ logging.info(new_history)
542
+ except:
543
+ # 没有对话历史
544
+ pass
545
+ logging.debug(f"{user_name} 加载对话历史完毕")
546
+ self.history = json_s["history"]
547
+ return filename, json_s["system"], json_s["chatbot"]
548
+ except FileNotFoundError:
549
+ logging.warning(f"{user_name} 没有找到对话历史文件,不执行任何操作")
550
+ return filename, self.system_prompt, chatbot
modules/config.py CHANGED
@@ -3,9 +3,10 @@ from contextlib import contextmanager
3
  import os
4
  import logging
5
  import sys
6
- import json
7
 
8
  from . import shared
 
9
 
10
 
11
  __all__ = [
@@ -18,6 +19,9 @@ __all__ = [
18
  "advance_docs",
19
  "update_doc_config",
20
  "multi_api_key",
 
 
 
21
  ]
22
 
23
  # 添加一个统一的config文件,避免文件过多造成的疑惑(优先级最低)
@@ -28,6 +32,34 @@ if os.path.exists("config.json"):
28
  else:
29
  config = {}
30
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  ## 处理docker if we are running in Docker
32
  dockerflag = config.get("dockerflag", False)
33
  if os.environ.get("dockerrun") == "yes":
@@ -35,7 +67,7 @@ if os.environ.get("dockerrun") == "yes":
35
 
36
  ## 处理 api-key 以及 允许的用户列表
37
  my_api_key = config.get("openai_api_key", "") # 在这里输入你的 API 密钥
38
- my_api_key = os.environ.get("my_api_key", my_api_key)
39
 
40
  ## 多账户机制
41
  multi_api_key = config.get("multi_api_key", False) # 是否开启多账户机制
@@ -54,35 +86,6 @@ api_host = os.environ.get("api_host", config.get("api_host", ""))
54
  if api_host:
55
  shared.state.set_api_host(api_host)
56
 
57
- if dockerflag:
58
- if my_api_key == "empty":
59
- logging.error("Please give a api key!")
60
- sys.exit(1)
61
- # auth
62
- username = os.environ.get("USERNAME")
63
- password = os.environ.get("PASSWORD")
64
- if not (isinstance(username, type(None)) or isinstance(password, type(None))):
65
- auth_list.append((os.environ.get("USERNAME"), os.environ.get("PASSWORD")))
66
- authflag = True
67
- else:
68
- if (
69
- not my_api_key
70
- and os.path.exists("api_key.txt")
71
- and os.path.getsize("api_key.txt")
72
- ):
73
- with open("api_key.txt", "r") as f:
74
- my_api_key = f.read().strip()
75
- if os.path.exists("auth.json"):
76
- authflag = True
77
- with open("auth.json", "r", encoding='utf-8') as f:
78
- auth = json.load(f)
79
- for _ in auth:
80
- if auth[_]["username"] and auth[_]["password"]:
81
- auth_list.append((auth[_]["username"], auth[_]["password"]))
82
- else:
83
- logging.error("请检查auth.json文件中的用户名和密码!")
84
- sys.exit(1)
85
-
86
  @contextmanager
87
  def retrieve_openai_api(api_key = None):
88
  old_api_key = os.environ.get("OPENAI_API_KEY", "")
@@ -111,6 +114,8 @@ https_proxy = os.environ.get("HTTPS_PROXY", https_proxy)
111
  os.environ["HTTP_PROXY"] = ""
112
  os.environ["HTTPS_PROXY"] = ""
113
 
 
 
114
  @contextmanager
115
  def retrieve_proxy(proxy=None):
116
  """
@@ -137,9 +142,29 @@ advance_docs = defaultdict(lambda: defaultdict(dict))
137
  advance_docs.update(config.get("advance_docs", {}))
138
  def update_doc_config(two_column_pdf):
139
  global advance_docs
140
- if two_column_pdf:
141
- advance_docs["pdf"]["two_column"] = True
 
 
 
 
 
 
 
 
142
  else:
143
- advance_docs["pdf"]["two_column"] = False
 
 
 
 
 
 
 
 
 
 
 
 
144
 
145
- logging.info(f"更新后的文件参数为:{advance_docs}")
3
  import os
4
  import logging
5
  import sys
6
+ import commentjson as json
7
 
8
  from . import shared
9
+ from . import presets
10
 
11
 
12
  __all__ = [
19
  "advance_docs",
20
  "update_doc_config",
21
  "multi_api_key",
22
+ "server_name",
23
+ "server_port",
24
+ "share",
25
  ]
26
 
27
  # 添加一个统一的config文件,避免文件过多造成的疑惑(优先级最低)
32
  else:
33
  config = {}
34
 
35
+ language = config.get("language", "auto") # 在这里输入你的 API 密钥
36
+ language = os.environ.get("LANGUAGE", language)
37
+
38
+
39
+ if os.path.exists("api_key.txt"):
40
+ logging.info("检测到api_key.txt文件,正在进行迁移...")
41
+ with open("api_key.txt", "r") as f:
42
+ config["openai_api_key"] = f.read().strip()
43
+ os.rename("api_key.txt", "api_key(deprecated).txt")
44
+ with open("config.json", "w", encoding='utf-8') as f:
45
+ json.dump(config, f, indent=4)
46
+
47
+ if os.path.exists("auth.json"):
48
+ logging.info("检测到auth.json文件,正在进行迁移...")
49
+ auth_list = []
50
+ with open("auth.json", "r", encoding='utf-8') as f:
51
+ auth = json.load(f)
52
+ for _ in auth:
53
+ if auth[_]["username"] and auth[_]["password"]:
54
+ auth_list.append((auth[_]["username"], auth[_]["password"]))
55
+ else:
56
+ logging.error("请检查auth.json文件中的用户名和密码!")
57
+ sys.exit(1)
58
+ config["users"] = auth_list
59
+ os.rename("auth.json", "auth(deprecated).json")
60
+ with open("config.json", "w", encoding='utf-8') as f:
61
+ json.dump(config, f, indent=4)
62
+
63
  ## 处理docker if we are running in Docker
64
  dockerflag = config.get("dockerflag", False)
65
  if os.environ.get("dockerrun") == "yes":
67
 
68
  ## 处理 api-key 以及 允许的用户列表
69
  my_api_key = config.get("openai_api_key", "") # 在这里输入你的 API 密钥
70
+ my_api_key = os.environ.get("OPENAI_API_KEY", my_api_key)
71
 
72
  ## 多账户机制
73
  multi_api_key = config.get("multi_api_key", False) # 是否开启多账户机制
86
  if api_host:
87
  shared.state.set_api_host(api_host)
88
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
  @contextmanager
90
  def retrieve_openai_api(api_key = None):
91
  old_api_key = os.environ.get("OPENAI_API_KEY", "")
114
  os.environ["HTTP_PROXY"] = ""
115
  os.environ["HTTPS_PROXY"] = ""
116
 
117
+ local_embedding = config.get("local_embedding", False) # 是否使用本地embedding
118
+
119
  @contextmanager
120
  def retrieve_proxy(proxy=None):
121
  """
142
  advance_docs.update(config.get("advance_docs", {}))
143
  def update_doc_config(two_column_pdf):
144
  global advance_docs
145
+ advance_docs["pdf"]["two_column"] = two_column_pdf
146
+
147
+ logging.info(f"更新后的文件参数为:{advance_docs}")
148
+
149
+ ## 处理gradio.launch参数
150
+ server_name = config.get("server_name", None)
151
+ server_port = config.get("server_port", None)
152
+ if server_name is None:
153
+ if dockerflag:
154
+ server_name = "0.0.0.0"
155
  else:
156
+ server_name = "127.0.0.1"
157
+ if server_port is None:
158
+ if dockerflag:
159
+ server_port = 7860
160
+
161
+ assert server_port is None or type(server_port) == int, "要求port设置为int类型"
162
+
163
+ # 设置默认model
164
+ default_model = config.get("default_model", "")
165
+ try:
166
+ presets.DEFAULT_MODEL = presets.MODELS.index(default_model)
167
+ except ValueError:
168
+ pass
169
 
170
+ share = config.get("share", False)
modules/llama_func.py CHANGED
@@ -15,6 +15,8 @@ from tqdm import tqdm
15
 
16
  from modules.presets import *
17
  from modules.utils import *
 
 
18
 
19
  def get_index_name(file_src):
20
  file_paths = [x.name for x in file_src]
@@ -28,6 +30,7 @@ def get_index_name(file_src):
28
 
29
  return md5_hash.hexdigest()
30
 
 
31
  def block_split(text):
32
  blocks = []
33
  while len(text) > 0:
@@ -35,6 +38,7 @@ def block_split(text):
35
  text = text[1000:]
36
  return blocks
37
 
 
38
  def get_documents(file_src):
39
  documents = []
40
  logging.debug("Loading documents...")
@@ -44,37 +48,45 @@ def get_documents(file_src):
44
  filename = os.path.basename(filepath)
45
  file_type = os.path.splitext(filepath)[1]
46
  logging.info(f"loading file: {filename}")
47
- if file_type == ".pdf":
48
- logging.debug("Loading PDF...")
49
- try:
50
- from modules.pdf_func import parse_pdf
51
- from modules.config import advance_docs
52
- two_column = advance_docs["pdf"].get("two_column", False)
53
- pdftext = parse_pdf(filepath, two_column).text
54
- except:
55
- pdftext = ""
56
- with open(filepath, 'rb') as pdfFileObj:
57
- pdfReader = PyPDF2.PdfReader(pdfFileObj)
58
- for page in tqdm(pdfReader.pages):
59
- pdftext += page.extract_text()
60
- text_raw = pdftext
61
- elif file_type == ".docx":
62
- logging.debug("Loading Word...")
63
- DocxReader = download_loader("DocxReader")
64
- loader = DocxReader()
65
- text_raw = loader.load_data(file=filepath)[0].text
66
- elif file_type == ".epub":
67
- logging.debug("Loading EPUB...")
68
- EpubReader = download_loader("EpubReader")
69
- loader = EpubReader()
70
- text_raw = loader.load_data(file=filepath)[0].text
71
- elif file_type == ".xlsx":
72
- logging.debug("Loading Excel...")
73
- text_raw = excel_to_string(filepath)
74
- else:
75
- logging.debug("Loading text file...")
76
- with open(filepath, "r", encoding="utf-8") as f:
77
- text_raw = f.read()
 
 
 
 
 
 
 
 
78
  text = add_space(text_raw)
79
  # text = block_split(text)
80
  # documents += text
@@ -84,27 +96,36 @@ def get_documents(file_src):
84
 
85
 
86
  def construct_index(
87
- api_key,
88
- file_src,
89
- max_input_size=4096,
90
- num_outputs=5,
91
- max_chunk_overlap=20,
92
- chunk_size_limit=600,
93
- embedding_limit=None,
94
- separator=" "
95
  ):
96
  from langchain.chat_models import ChatOpenAI
97
- from llama_index import GPTSimpleVectorIndex, ServiceContext
 
98
 
99
- os.environ["OPENAI_API_KEY"] = api_key
 
 
 
 
100
  chunk_size_limit = None if chunk_size_limit == 0 else chunk_size_limit
101
  embedding_limit = None if embedding_limit == 0 else embedding_limit
102
  separator = " " if separator == "" else separator
103
 
104
- llm_predictor = LLMPredictor(
105
- llm=ChatOpenAI(model_name="gpt-3.5-turbo-0301", openai_api_key=api_key)
 
 
 
 
 
106
  )
107
- prompt_helper = PromptHelper(max_input_size = max_input_size, num_output = num_outputs, max_chunk_overlap = max_chunk_overlap, embedding_limit=embedding_limit, chunk_size_limit=600, separator=separator)
108
  index_name = get_index_name(file_src)
109
  if os.path.exists(f"./index/{index_name}.json"):
110
  logging.info("找到了缓存的索引文件,加载中……")
@@ -112,11 +133,19 @@ def construct_index(
112
  else:
113
  try:
114
  documents = get_documents(file_src)
 
 
 
 
115
  logging.info("构建索引中……")
116
  with retrieve_proxy():
117
- service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper, chunk_size_limit=chunk_size_limit)
 
 
 
 
118
  index = GPTSimpleVectorIndex.from_documents(
119
- documents, service_context=service_context
120
  )
121
  logging.debug("索引构建完成!")
122
  os.makedirs("./index", exist_ok=True)
15
 
16
  from modules.presets import *
17
  from modules.utils import *
18
+ from modules.config import local_embedding
19
+
20
 
21
  def get_index_name(file_src):
22
  file_paths = [x.name for x in file_src]
30
 
31
  return md5_hash.hexdigest()
32
 
33
+
34
  def block_split(text):
35
  blocks = []
36
  while len(text) > 0:
38
  text = text[1000:]
39
  return blocks
40
 
41
+
42
  def get_documents(file_src):
43
  documents = []
44
  logging.debug("Loading documents...")
48
  filename = os.path.basename(filepath)
49
  file_type = os.path.splitext(filepath)[1]
50
  logging.info(f"loading file: {filename}")
51
+ try:
52
+ if file_type == ".pdf":
53
+ logging.debug("Loading PDF...")
54
+ try:
55
+ from modules.pdf_func import parse_pdf
56
+ from modules.config import advance_docs
57
+
58
+ two_column = advance_docs["pdf"].get("two_column", False)
59
+ pdftext = parse_pdf(filepath, two_column).text
60
+ except:
61
+ pdftext = ""
62
+ with open(filepath, "rb") as pdfFileObj:
63
+ pdfReader = PyPDF2.PdfReader(pdfFileObj)
64
+ for page in tqdm(pdfReader.pages):
65
+ pdftext += page.extract_text()
66
+ text_raw = pdftext
67
+ elif file_type == ".docx":
68
+ logging.debug("Loading Word...")
69
+ DocxReader = download_loader("DocxReader")
70
+ loader = DocxReader()
71
+ text_raw = loader.load_data(file=filepath)[0].text
72
+ elif file_type == ".epub":
73
+ logging.debug("Loading EPUB...")
74
+ EpubReader = download_loader("EpubReader")
75
+ loader = EpubReader()
76
+ text_raw = loader.load_data(file=filepath)[0].text
77
+ elif file_type == ".xlsx":
78
+ logging.debug("Loading Excel...")
79
+ text_list = excel_to_string(filepath)
80
+ for elem in text_list:
81
+ documents.append(Document(elem))
82
+ continue
83
+ else:
84
+ logging.debug("Loading text file...")
85
+ with open(filepath, "r", encoding="utf-8") as f:
86
+ text_raw = f.read()
87
+ except Exception as e:
88
+ logging.error(f"Error loading file: {filename}")
89
+ pass
90
  text = add_space(text_raw)
91
  # text = block_split(text)
92
  # documents += text
96
 
97
 
98
  def construct_index(
99
+ api_key,
100
+ file_src,
101
+ max_input_size=4096,
102
+ num_outputs=5,
103
+ max_chunk_overlap=20,
104
+ chunk_size_limit=600,
105
+ embedding_limit=None,
106
+ separator=" ",
107
  ):
108
  from langchain.chat_models import ChatOpenAI
109
+ from langchain.embeddings.huggingface import HuggingFaceEmbeddings
110
+ from llama_index import GPTSimpleVectorIndex, ServiceContext, LangchainEmbedding, OpenAIEmbedding
111
 
112
+ if api_key:
113
+ os.environ["OPENAI_API_KEY"] = api_key
114
+ else:
115
+ # 由于一个依赖的愚蠢的设计,这里必须要有一个API KEY
116
+ os.environ["OPENAI_API_KEY"] = "sk-xxxxxxx"
117
  chunk_size_limit = None if chunk_size_limit == 0 else chunk_size_limit
118
  embedding_limit = None if embedding_limit == 0 else embedding_limit
119
  separator = " " if separator == "" else separator
120
 
121
+ prompt_helper = PromptHelper(
122
+ max_input_size=max_input_size,
123
+ num_output=num_outputs,
124
+ max_chunk_overlap=max_chunk_overlap,
125
+ embedding_limit=embedding_limit,
126
+ chunk_size_limit=600,
127
+ separator=separator,
128
  )
 
129
  index_name = get_index_name(file_src)
130
  if os.path.exists(f"./index/{index_name}.json"):
131
  logging.info("找到了缓存的索引文件,加载中……")
133
  else:
134
  try:
135
  documents = get_documents(file_src)
136
+ if local_embedding:
137
+ embed_model = LangchainEmbedding(HuggingFaceEmbeddings())
138
+ else:
139
+ embed_model = OpenAIEmbedding()
140
  logging.info("构建索引中……")
141
  with retrieve_proxy():
142
+ service_context = ServiceContext.from_defaults(
143
+ prompt_helper=prompt_helper,
144
+ chunk_size_limit=chunk_size_limit,
145
+ embed_model=embed_model,
146
+ )
147
  index = GPTSimpleVectorIndex.from_documents(
148
+ documents, service_context=service_context
149
  )
150
  logging.debug("索引构建完成!")
151
  os.makedirs("./index", exist_ok=True)
modules/models.py ADDED
@@ -0,0 +1,585 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ from typing import TYPE_CHECKING, List
3
+
4
+ import logging
5
+ import json
6
+ import commentjson as cjson
7
+ import os
8
+ import sys
9
+ import requests
10
+ import urllib3
11
+ import platform
12
+
13
+ from tqdm import tqdm
14
+ import colorama
15
+ from duckduckgo_search import ddg
16
+ import asyncio
17
+ import aiohttp
18
+ from enum import Enum
19
+ import uuid
20
+
21
+ from .presets import *
22
+ from .llama_func import *
23
+ from .utils import *
24
+ from . import shared
25
+ from .config import retrieve_proxy
26
+ from modules import config
27
+ from .base_model import BaseLLMModel, ModelType
28
+
29
+
30
+ class OpenAIClient(BaseLLMModel):
31
+ def __init__(
32
+ self,
33
+ model_name,
34
+ api_key,
35
+ system_prompt=INITIAL_SYSTEM_PROMPT,
36
+ temperature=1.0,
37
+ top_p=1.0,
38
+ ) -> None:
39
+ super().__init__(
40
+ model_name=model_name,
41
+ temperature=temperature,
42
+ top_p=top_p,
43
+ system_prompt=system_prompt,
44
+ )
45
+ self.api_key = api_key
46
+ self.need_api_key = True
47
+ self._refresh_header()
48
+
49
+ def get_answer_stream_iter(self):
50
+ response = self._get_response(stream=True)
51
+ if response is not None:
52
+ iter = self._decode_chat_response(response)
53
+ partial_text = ""
54
+ for i in iter:
55
+ partial_text += i
56
+ yield partial_text
57
+ else:
58
+ yield STANDARD_ERROR_MSG + GENERAL_ERROR_MSG
59
+
60
+ def get_answer_at_once(self):
61
+ response = self._get_response()
62
+ response = json.loads(response.text)
63
+ content = response["choices"][0]["message"]["content"]
64
+ total_token_count = response["usage"]["total_tokens"]
65
+ return content, total_token_count
66
+
67
+ def count_token(self, user_input):
68
+ input_token_count = count_token(construct_user(user_input))
69
+ if self.system_prompt is not None and len(self.all_token_counts) == 0:
70
+ system_prompt_token_count = count_token(
71
+ construct_system(self.system_prompt)
72
+ )
73
+ return input_token_count + system_prompt_token_count
74
+ return input_token_count
75
+
76
+ def billing_info(self):
77
+ try:
78
+ curr_time = datetime.datetime.now()
79
+ last_day_of_month = get_last_day_of_month(
80
+ curr_time).strftime("%Y-%m-%d")
81
+ first_day_of_month = curr_time.replace(day=1).strftime("%Y-%m-%d")
82
+ usage_url = f"{shared.state.usage_api_url}?start_date={first_day_of_month}&end_date={last_day_of_month}"
83
+ try:
84
+ usage_data = self._get_billing_data(usage_url)
85
+ except Exception as e:
86
+ logging.error(f"获取API使用情况失败:" + str(e))
87
+ return i18n("**获取API使用情况失败**")
88
+ rounded_usage = "{:.5f}".format(usage_data["total_usage"] / 100)
89
+ return i18n("**本月使用金额** ") + f"\u3000 ${rounded_usage}"
90
+ except requests.exceptions.ConnectTimeout:
91
+ status_text = (
92
+ STANDARD_ERROR_MSG + CONNECTION_TIMEOUT_MSG + ERROR_RETRIEVE_MSG
93
+ )
94
+ return status_text
95
+ except requests.exceptions.ReadTimeout:
96
+ status_text = STANDARD_ERROR_MSG + READ_TIMEOUT_MSG + ERROR_RETRIEVE_MSG
97
+ return status_text
98
+ except Exception as e:
99
+ logging.error(i18n("获取API使用情况失败:") + str(e))
100
+ return STANDARD_ERROR_MSG + ERROR_RETRIEVE_MSG
101
+
102
+ def set_token_upper_limit(self, new_upper_limit):
103
+ pass
104
+
105
+ def set_key(self, new_access_key):
106
+ self.api_key = new_access_key.strip()
107
+ self._refresh_header()
108
+ msg = i18n("API密钥更改为了") + f"{hide_middle_chars(self.api_key)}"
109
+ logging.info(msg)
110
+ return msg
111
+
112
+ @shared.state.switching_api_key # 在不开启多账号模式的时候,这个装饰器不会起作用
113
+ def _get_response(self, stream=False):
114
+ openai_api_key = self.api_key
115
+ system_prompt = self.system_prompt
116
+ history = self.history
117
+ logging.debug(colorama.Fore.YELLOW +
118
+ f"{history}" + colorama.Fore.RESET)
119
+ headers = {
120
+ "Content-Type": "application/json",
121
+ "Authorization": f"Bearer {openai_api_key}",
122
+ }
123
+
124
+ if system_prompt is not None:
125
+ history = [construct_system(system_prompt), *history]
126
+
127
+ payload = {
128
+ "model": self.model_name,
129
+ "messages": history,
130
+ "temperature": self.temperature,
131
+ "top_p": self.top_p,
132
+ "n": self.n_choices,
133
+ "stream": stream,
134
+ "presence_penalty": self.presence_penalty,
135
+ "frequency_penalty": self.frequency_penalty,
136
+ }
137
+
138
+ if self.max_generation_token is not None:
139
+ payload["max_tokens"] = self.max_generation_token
140
+ if self.stop_sequence is not None:
141
+ payload["stop"] = self.stop_sequence
142
+ if self.logit_bias is not None:
143
+ payload["logit_bias"] = self.logit_bias
144
+ if self.user_identifier is not None:
145
+ payload["user"] = self.user_identifier
146
+
147
+ if stream:
148
+ timeout = TIMEOUT_STREAMING
149
+ else:
150
+ timeout = TIMEOUT_ALL
151
+
152
+ # 如果有自定义的api-host,使用自定义host发送请求,否则使用默认设置发送请求
153
+ if shared.state.completion_url != COMPLETION_URL:
154
+ logging.info(f"使用自定义API URL: {shared.state.completion_url}")
155
+
156
+ with retrieve_proxy():
157
+ try:
158
+ response = requests.post(
159
+ shared.state.completion_url,
160
+ headers=headers,
161
+ json=payload,
162
+ stream=stream,
163
+ timeout=timeout,
164
+ )
165
+ except:
166
+ return None
167
+ return response
168
+
169
+ def _refresh_header(self):
170
+ self.headers = {
171
+ "Content-Type": "application/json",
172
+ "Authorization": f"Bearer {self.api_key}",
173
+ }
174
+
175
+ def _get_billing_data(self, billing_url):
176
+ with retrieve_proxy():
177
+ response = requests.get(
178
+ billing_url,
179
+ headers=self.headers,
180
+ timeout=TIMEOUT_ALL,
181
+ )
182
+
183
+ if response.status_code == 200:
184
+ data = response.json()
185
+ return data
186
+ else:
187
+ raise Exception(
188
+ f"API request failed with status code {response.status_code}: {response.text}"
189
+ )
190
+
191
+ def _decode_chat_response(self, response):
192
+ error_msg = ""
193
+ for chunk in response.iter_lines():
194
+ if chunk:
195
+ chunk = chunk.decode()
196
+ chunk_length = len(chunk)
197
+ try:
198
+ chunk = json.loads(chunk[6:])
199
+ except json.JSONDecodeError:
200
+ print(i18n("JSON解析错误,收到的内容: ") + f"{chunk}")
201
+ error_msg += chunk
202
+ continue
203
+ if chunk_length > 6 and "delta" in chunk["choices"][0]:
204
+ if chunk["choices"][0]["finish_reason"] == "stop":
205
+ break
206
+ try:
207
+ yield chunk["choices"][0]["delta"]["content"]
208
+ except Exception as e:
209
+ # logging.error(f"Error: {e}")
210
+ continue
211
+ if error_msg:
212
+ raise Exception(error_msg)
213
+
214
+
215
+ class ChatGLM_Client(BaseLLMModel):
216
+ def __init__(self, model_name) -> None:
217
+ super().__init__(model_name=model_name)
218
+ from transformers import AutoTokenizer, AutoModel
219
+ import torch
220
+ global CHATGLM_TOKENIZER, CHATGLM_MODEL
221
+ if CHATGLM_TOKENIZER is None or CHATGLM_MODEL is None:
222
+ system_name = platform.system()
223
+ model_path = None
224
+ if os.path.exists("models"):
225
+ model_dirs = os.listdir("models")
226
+ if model_name in model_dirs:
227
+ model_path = f"models/{model_name}"
228
+ if model_path is not None:
229
+ model_source = model_path
230
+ else:
231
+ model_source = f"THUDM/{model_name}"
232
+ CHATGLM_TOKENIZER = AutoTokenizer.from_pretrained(
233
+ model_source, trust_remote_code=True
234
+ )
235
+ quantified = False
236
+ if "int4" in model_name:
237
+ quantified = True
238
+ model = AutoModel.from_pretrained(
239
+ model_source, trust_remote_code=True
240
+ )
241
+ if torch.cuda.is_available():
242
+ # run on CUDA
243
+ logging.info("CUDA is available, using CUDA")
244
+ model = model.half().cuda()
245
+ # mps加速还存在一些问题,暂时不使用
246
+ elif system_name == "Darwin" and model_path is not None and not quantified:
247
+ logging.info("Running on macOS, using MPS")
248
+ # running on macOS and model already downloaded
249
+ model = model.half().to("mps")
250
+ else:
251
+ logging.info("GPU is not available, using CPU")
252
+ model = model.float()
253
+ model = model.eval()
254
+ CHATGLM_MODEL = model
255
+
256
+ def _get_glm_style_input(self):
257
+ history = [x["content"] for x in self.history]
258
+ query = history.pop()
259
+ logging.debug(colorama.Fore.YELLOW +
260
+ f"{history}" + colorama.Fore.RESET)
261
+ assert (
262
+ len(history) % 2 == 0
263
+ ), f"History should be even length. current history is: {history}"
264
+ history = [[history[i], history[i + 1]]
265
+ for i in range(0, len(history), 2)]
266
+ return history, query
267
+
268
+ def get_answer_at_once(self):
269
+ history, query = self._get_glm_style_input()
270
+ response, _ = CHATGLM_MODEL.chat(
271
+ CHATGLM_TOKENIZER, query, history=history)
272
+ return response, len(response)
273
+
274
+ def get_answer_stream_iter(self):
275
+ history, query = self._get_glm_style_input()
276
+ for response, history in CHATGLM_MODEL.stream_chat(
277
+ CHATGLM_TOKENIZER,
278
+ query,
279
+ history,
280
+ max_length=self.token_upper_limit,
281
+ top_p=self.top_p,
282
+ temperature=self.temperature,
283
+ ):
284
+ yield response
285
+
286
+
287
+ class LLaMA_Client(BaseLLMModel):
288
+ def __init__(
289
+ self,
290
+ model_name,
291
+ lora_path=None,
292
+ ) -> None:
293
+ super().__init__(model_name=model_name)
294
+ from lmflow.datasets.dataset import Dataset
295
+ from lmflow.pipeline.auto_pipeline import AutoPipeline
296
+ from lmflow.models.auto_model import AutoModel
297
+ from lmflow.args import ModelArguments, DatasetArguments, InferencerArguments
298
+
299
+ self.max_generation_token = 1000
300
+ self.end_string = "\n\n"
301
+ # We don't need input data
302
+ data_args = DatasetArguments(dataset_path=None)
303
+ self.dataset = Dataset(data_args)
304
+ self.system_prompt = ""
305
+
306
+ global LLAMA_MODEL, LLAMA_INFERENCER
307
+ if LLAMA_MODEL is None or LLAMA_INFERENCER is None:
308
+ model_path = None
309
+ if os.path.exists("models"):
310
+ model_dirs = os.listdir("models")
311
+ if model_name in model_dirs:
312
+ model_path = f"models/{model_name}"
313
+ if model_path is not None:
314
+ model_source = model_path
315
+ else:
316
+ model_source = f"decapoda-research/{model_name}"
317
+ # raise Exception(f"models目录下没有这个模型: {model_name}")
318
+ if lora_path is not None:
319
+ lora_path = f"lora/{lora_path}"
320
+ model_args = ModelArguments(model_name_or_path=model_source, lora_model_path=lora_path, model_type=None, config_overrides=None, config_name=None, tokenizer_name=None, cache_dir=None,
321
+ use_fast_tokenizer=True, model_revision='main', use_auth_token=False, torch_dtype=None, use_lora=False, lora_r=8, lora_alpha=32, lora_dropout=0.1, use_ram_optimized_load=True)
322
+ pipeline_args = InferencerArguments(
323
+ local_rank=0, random_seed=1, deepspeed='configs/ds_config_chatbot.json', mixed_precision='bf16')
324
+
325
+ with open(pipeline_args.deepspeed, "r") as f:
326
+ ds_config = json.load(f)
327
+ LLAMA_MODEL = AutoModel.get_model(
328
+ model_args,
329
+ tune_strategy="none",
330
+ ds_config=ds_config,
331
+ )
332
+ LLAMA_INFERENCER = AutoPipeline.get_pipeline(
333
+ pipeline_name="inferencer",
334
+ model_args=model_args,
335
+ data_args=data_args,
336
+ pipeline_args=pipeline_args,
337
+ )
338
+ # Chats
339
+ # model_name = model_args.model_name_or_path
340
+ # if model_args.lora_model_path is not None:
341
+ # model_name += f" + {model_args.lora_model_path}"
342
+
343
+ # context = (
344
+ # "You are a helpful assistant who follows the given instructions"
345
+ # " unconditionally."
346
+ # )
347
+
348
+ def _get_llama_style_input(self):
349
+ history = []
350
+ instruction = ""
351
+ if self.system_prompt:
352
+ instruction = (f"Instruction: {self.system_prompt}\n")
353
+ for x in self.history:
354
+ if x["role"] == "user":
355
+ history.append(f"{instruction}Input: {x['content']}")
356
+ else:
357
+ history.append(f"Output: {x['content']}")
358
+ context = "\n\n".join(history)
359
+ context += "\n\nOutput: "
360
+ return context
361
+
362
+ def get_answer_at_once(self):
363
+ context = self._get_llama_style_input()
364
+
365
+ input_dataset = self.dataset.from_dict(
366
+ {"type": "text_only", "instances": [{"text": context}]}
367
+ )
368
+
369
+ output_dataset = LLAMA_INFERENCER.inference(
370
+ model=LLAMA_MODEL,
371
+ dataset=input_dataset,
372
+ max_new_tokens=self.max_generation_token,
373
+ temperature=self.temperature,
374
+ )
375
+
376
+ response = output_dataset.to_dict()["instances"][0]["text"]
377
+ return response, len(response)
378
+
379
+ def get_answer_stream_iter(self):
380
+ context = self._get_llama_style_input()
381
+ partial_text = ""
382
+ step = 1
383
+ for _ in range(0, self.max_generation_token, step):
384
+ input_dataset = self.dataset.from_dict(
385
+ {"type": "text_only", "instances": [
386
+ {"text": context + partial_text}]}
387
+ )
388
+ output_dataset = LLAMA_INFERENCER.inference(
389
+ model=LLAMA_MODEL,
390
+ dataset=input_dataset,
391
+ max_new_tokens=step,
392
+ temperature=self.temperature,
393
+ )
394
+ response = output_dataset.to_dict()["instances"][0]["text"]
395
+ if response == "" or response == self.end_string:
396
+ break
397
+ partial_text += response
398
+ yield partial_text
399
+
400
+
401
+ class XMBot_Client(BaseLLMModel):
402
+ def __init__(self, api_key):
403
+ super().__init__(model_name="xmbot")
404
+ self.api_key = api_key
405
+ self.session_id = None
406
+ self.reset()
407
+ self.image_bytes = None
408
+ self.image_path = None
409
+ self.xm_history = []
410
+ self.url = "https://xmbot.net/web"
411
+
412
+ def reset(self):
413
+ self.session_id = str(uuid.uuid4())
414
+ return [], "已重置"
415
+
416
+ def try_read_image(self, filepath):
417
+ import base64
418
+
419
+ def is_image_file(filepath):
420
+ # 判断文件是否为图片
421
+ valid_image_extensions = [".jpg", ".jpeg", ".png", ".bmp", ".gif", ".tiff"]
422
+ file_extension = os.path.splitext(filepath)[1].lower()
423
+ return file_extension in valid_image_extensions
424
+
425
+ def read_image_as_bytes(filepath):
426
+ # 读取图片文件并返回比特流
427
+ with open(filepath, "rb") as f:
428
+ image_bytes = f.read()
429
+ return image_bytes
430
+
431
+ if is_image_file(filepath):
432
+ logging.info(f"读取图片文件: {filepath}")
433
+ image_bytes = read_image_as_bytes(filepath)
434
+ base64_encoded_image = base64.b64encode(image_bytes).decode()
435
+ self.image_bytes = base64_encoded_image
436
+ self.image_path = filepath
437
+ else:
438
+ self.image_bytes = None
439
+ self.image_path = None
440
+
441
+ def prepare_inputs(self, real_inputs, use_websearch, files, reply_language, chatbot):
442
+ fake_inputs = real_inputs
443
+ display_append = ""
444
+ limited_context = False
445
+ return limited_context, fake_inputs, display_append, real_inputs, chatbot
446
+
447
+ def handle_file_upload(self, files, chatbot):
448
+ """if the model accepts multi modal input, implement this function"""
449
+ if files:
450
+ for file in files:
451
+ if file.name:
452
+ logging.info(f"尝试读取图像: {file.name}")
453
+ self.try_read_image(file.name)
454
+ if self.image_path is not None:
455
+ chatbot = chatbot + [((self.image_path,), None)]
456
+ if self.image_bytes is not None:
457
+ logging.info("使用图片作为输入")
458
+ conv_id = str(uuid.uuid4())
459
+ data = {
460
+ "user_id": self.api_key,
461
+ "session_id": self.session_id,
462
+ "uuid": conv_id,
463
+ "data_type": "imgbase64",
464
+ "data": self.image_bytes
465
+ }
466
+ response = requests.post(self.url, json=data)
467
+ response = json.loads(response.text)
468
+ logging.info(f"图片回复: {response['data']}")
469
+ return None, chatbot, None
470
+
471
+ def get_answer_at_once(self):
472
+ question = self.history[-1]["content"]
473
+ conv_id = str(uuid.uuid4())
474
+ data = {
475
+ "user_id": self.api_key,
476
+ "session_id": self.session_id,
477
+ "uuid": conv_id,
478
+ "data_type": "text",
479
+ "data": question
480
+ }
481
+ response = requests.post(self.url, json=data)
482
+ try:
483
+ response = json.loads(response.text)
484
+ return response["data"], len(response["data"])
485
+ except Exception as e:
486
+ return response.text, len(response.text)
487
+
488
+
489
+
490
+
491
+ def get_model(
492
+ model_name,
493
+ lora_model_path=None,
494
+ access_key=None,
495
+ temperature=None,
496
+ top_p=None,
497
+ system_prompt=None,
498
+ ) -> BaseLLMModel:
499
+ msg = i18n("模型设置为了:") + f" {model_name}"
500
+ model_type = ModelType.get_type(model_name)
501
+ lora_selector_visibility = False
502
+ lora_choices = []
503
+ dont_change_lora_selector = False
504
+ if model_type != ModelType.OpenAI:
505
+ config.local_embedding = True
506
+ # del current_model.model
507
+ model = None
508
+ try:
509
+ if model_type == ModelType.OpenAI:
510
+ logging.info(f"正在加载OpenAI模型: {model_name}")
511
+ model = OpenAIClient(
512
+ model_name=model_name,
513
+ api_key=access_key,
514
+ system_prompt=system_prompt,
515
+ temperature=temperature,
516
+ top_p=top_p,
517
+ )
518
+ elif model_type == ModelType.ChatGLM:
519
+ logging.info(f"正在加载ChatGLM模型: {model_name}")
520
+ model = ChatGLM_Client(model_name)
521
+ elif model_type == ModelType.LLaMA and lora_model_path == "":
522
+ msg = f"现在请为 {model_name} 选择LoRA模型"
523
+ logging.info(msg)
524
+ lora_selector_visibility = True
525
+ if os.path.isdir("lora"):
526
+ lora_choices = get_file_names(
527
+ "lora", plain=True, filetypes=[""])
528
+ lora_choices = ["No LoRA"] + lora_choices
529
+ elif model_type == ModelType.LLaMA and lora_model_path != "":
530
+ logging.info(f"正在加载LLaMA模型: {model_name} + {lora_model_path}")
531
+ dont_change_lora_selector = True
532
+ if lora_model_path == "No LoRA":
533
+ lora_model_path = None
534
+ msg += " + No LoRA"
535
+ else:
536
+ msg += f" + {lora_model_path}"
537
+ model = LLaMA_Client(model_name, lora_model_path)
538
+ elif model_type == ModelType.XMBot:
539
+ model = XMBot_Client(api_key=access_key)
540
+ elif model_type == ModelType.Unknown:
541
+ raise ValueError(f"未知模型: {model_name}")
542
+ logging.info(msg)
543
+ except Exception as e:
544
+ logging.error(e)
545
+ msg = f"{STANDARD_ERROR_MSG}: {e}"
546
+ if dont_change_lora_selector:
547
+ return model, msg
548
+ else:
549
+ return model, msg, gr.Dropdown.update(choices=lora_choices, visible=lora_selector_visibility)
550
+
551
+
552
+ if __name__ == "__main__":
553
+ with open("config.json", "r") as f:
554
+ openai_api_key = cjson.load(f)["openai_api_key"]
555
+ # set logging level to debug
556
+ logging.basicConfig(level=logging.DEBUG)
557
+ # client = ModelManager(model_name="gpt-3.5-turbo", access_key=openai_api_key)
558
+ client = get_model(model_name="chatglm-6b-int4")
559
+ chatbot = []
560
+ stream = False
561
+ # 测试账单功能
562
+ logging.info(colorama.Back.GREEN + "测试账单功能" + colorama.Back.RESET)
563
+ logging.info(client.billing_info())
564
+ # 测试问答
565
+ logging.info(colorama.Back.GREEN + "测试问答" + colorama.Back.RESET)
566
+ question = "巴黎是中国的首都吗?"
567
+ for i in client.predict(inputs=question, chatbot=chatbot, stream=stream):
568
+ logging.info(i)
569
+ logging.info(f"测试问答后history : {client.history}")
570
+ # 测试记忆力
571
+ logging.info(colorama.Back.GREEN + "测试记忆力" + colorama.Back.RESET)
572
+ question = "我刚刚问了你什么问题?"
573
+ for i in client.predict(inputs=question, chatbot=chatbot, stream=stream):
574
+ logging.info(i)
575
+ logging.info(f"测试记忆力后history : {client.history}")
576
+ # 测试重试功能
577
+ logging.info(colorama.Back.GREEN + "测试重试功能" + colorama.Back.RESET)
578
+ for i in client.retry(chatbot=chatbot, stream=stream):
579
+ logging.info(i)
580
+ logging.info(f"重试后history : {client.history}")
581
+ # # 测试总结功能
582
+ # print(colorama.Back.GREEN + "测试总结功能" + colorama.Back.RESET)
583
+ # chatbot, msg = client.reduce_token_size(chatbot=chatbot)
584
+ # print(chatbot, msg)
585
+ # print(f"总结后history: {client.history}")
modules/overwrites.py CHANGED
@@ -4,6 +4,7 @@ import logging
4
  from llama_index import Prompt
5
  from typing import List, Tuple
6
  import mdtex2html
 
7
 
8
  from modules.presets import *
9
  from modules.llama_func import *
@@ -20,23 +21,60 @@ def compact_text_chunks(self, prompt: Prompt, text_chunks: List[str]) -> List[st
20
 
21
 
22
  def postprocess(
23
- self, y: List[Tuple[str | None, str | None]]
24
- ) -> List[Tuple[str | None, str | None]]:
25
- """
26
- Parameters:
27
- y: List of tuples representing the message and response pairs. Each message and response should be a string, which may be in Markdown format.
28
- Returns:
29
- List of tuples representing the message and response. Each message and response will be a string of HTML.
30
- """
31
- if y is None or y == []:
32
- return []
33
- user, bot = y[-1]
34
- if not detect_converted_mark(user):
35
- user = convert_asis(user)
36
- if not detect_converted_mark(bot):
37
- bot = convert_mdtext(bot)
38
- y[-1] = (user, bot)
39
- return y
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
 
41
  with open("./assets/custom.js", "r", encoding="utf-8") as f, open("./assets/Kelpy-Codos.js", "r", encoding="utf-8") as f2:
42
  customJS = f.read()
4
  from llama_index import Prompt
5
  from typing import List, Tuple
6
  import mdtex2html
7
+ from gradio_client import utils as client_utils
8
 
9
  from modules.presets import *
10
  from modules.llama_func import *
21
 
22
 
23
  def postprocess(
24
+ self,
25
+ y: List[List[str | Tuple[str] | Tuple[str, str] | None] | Tuple],
26
+ ) -> List[List[str | Dict | None]]:
27
+ """
28
+ Parameters:
29
+ y: List of lists representing the message and response pairs. Each message and response should be a string, which may be in Markdown format. It can also be a tuple whose first element is a string filepath or URL to an image/video/audio, and second (optional) element is the alt text, in which case the media file is displayed. It can also be None, in which case that message is not displayed.
30
+ Returns:
31
+ List of lists representing the message and response. Each message and response will be a string of HTML, or a dictionary with media information. Or None if the message is not to be displayed.
32
+ """
33
+ if y is None:
34
+ return []
35
+ processed_messages = []
36
+ for message_pair in y:
37
+ assert isinstance(
38
+ message_pair, (tuple, list)
39
+ ), f"Expected a list of lists or list of tuples. Received: {message_pair}"
40
+ assert (
41
+ len(message_pair) == 2
42
+ ), f"Expected a list of lists of length 2 or list of tuples of length 2. Received: {message_pair}"
43
+
44
+ processed_messages.append(
45
+ [
46
+ self._postprocess_chat_messages(message_pair[0], "user"),
47
+ self._postprocess_chat_messages(message_pair[1], "bot"),
48
+ ]
49
+ )
50
+ return processed_messages
51
+
52
+ def postprocess_chat_messages(
53
+ self, chat_message: str | Tuple | List | None, message_type: str
54
+ ) -> str | Dict | None:
55
+ if chat_message is None:
56
+ return None
57
+ elif isinstance(chat_message, (tuple, list)):
58
+ filepath = chat_message[0]
59
+ mime_type = client_utils.get_mimetype(filepath)
60
+ filepath = self.make_temp_copy_if_needed(filepath)
61
+ return {
62
+ "name": filepath,
63
+ "mime_type": mime_type,
64
+ "alt_text": chat_message[1] if len(chat_message) > 1 else None,
65
+ "data": None, # These last two fields are filled in by the frontend
66
+ "is_file": True,
67
+ }
68
+ elif isinstance(chat_message, str):
69
+ if message_type == "bot":
70
+ if not detect_converted_mark(chat_message):
71
+ chat_message = convert_mdtext(chat_message)
72
+ elif message_type == "user":
73
+ if not detect_converted_mark(chat_message):
74
+ chat_message = convert_asis(chat_message)
75
+ return chat_message
76
+ else:
77
+ raise ValueError(f"Invalid message for Chatbot component: {chat_message}")
78
 
79
  with open("./assets/custom.js", "r", encoding="utf-8") as f, open("./assets/Kelpy-Codos.js", "r", encoding="utf-8") as f2:
80
  customJS = f.read()
modules/presets.py CHANGED
@@ -1,89 +1,117 @@
1
  # -*- coding:utf-8 -*-
2
- import gradio as gr
3
  from pathlib import Path
 
 
 
 
 
 
 
 
 
4
 
5
  # ChatGPT 设置
6
- initial_prompt = "You are a helpful assistant."
7
  API_HOST = "api.openai.com"
8
  COMPLETION_URL = "https://api.openai.com/v1/chat/completions"
9
  BALANCE_API_URL="https://api.openai.com/dashboard/billing/credit_grants"
10
  USAGE_API_URL="https://api.openai.com/dashboard/billing/usage"
11
  HISTORY_DIR = Path("history")
 
12
  TEMPLATES_DIR = "templates"
13
 
14
  # 错误信息
15
- standard_error_msg = "☹️发生了错误:" # 错误信息的标准前缀
16
- error_retrieve_prompt = "请检查网络连接,或者API-Key是否有效。" # 获取对话时发生错误
17
- connection_timeout_prompt = "连接超时,无法获取对话。" # 连接超时
18
- read_timeout_prompt = "读取超时,无法获取对话。" # 读取超时
19
- proxy_error_prompt = "代理错误,无法获取对话。" # 代理错误
20
- ssl_error_prompt = "SSL错误,无法获取对话。" # SSL 错误
21
- no_apikey_msg = "API key长度不是51位,请检查是否输入正确。" # API key 长度不足 51 位
22
- no_input_msg = "请输入对话内容。" # 未输入对话内容
23
-
24
- timeout_streaming = 30 # 流式对话时的超时时间
25
- timeout_all = 200 # 非流式对话时的超时时间
26
- enable_streaming_option = True # 是否启用选择选择是否实时显示回答的勾选框
 
 
27
  HIDE_MY_KEY = False # 如果你想在UI中隐藏你的 API 密钥,将此值设置为 True
28
  CONCURRENT_COUNT = 100 # 允许同时使用的用户数量
29
 
30
  SIM_K = 5
31
  INDEX_QUERY_TEMPRATURE = 1.0
32
 
33
- title = """<h1 align="left" style="min-width:200px; margin-top:6px; white-space: nowrap;">川虎ChatGPT 🚀</h1>"""
34
- description = """\
35
- <div align="center" style="margin:16px 0">
36
 
37
- 由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536) 和 [明昭MZhao](https://space.bilibili.com/24807452)开发
38
 
39
- 访问川虎ChatGPT的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本
40
 
41
- 此App使用 `gpt-3.5-turbo` 大语言模型
 
 
 
 
 
 
42
  </div>
43
  """
44
 
45
- footer = """\
46
- <div class="versions">{versions}</div>
47
- """
48
-
49
- summarize_prompt = "你是谁?我们刚才聊了什么?" # 总结对话时的 prompt
50
 
51
- MODELS = [
52
  "gpt-3.5-turbo",
53
  "gpt-3.5-turbo-0301",
54
  "gpt-4",
55
  "gpt-4-0314",
56
  "gpt-4-32k",
57
  "gpt-4-32k-0314",
58
- ] # 可选的模型
59
-
60
- MODEL_SOFT_TOKEN_LIMIT = {
61
- "gpt-3.5-turbo": {
62
- "streaming": 3500,
63
- "all": 3500
64
- },
65
- "gpt-3.5-turbo-0301": {
66
- "streaming": 3500,
67
- "all": 3500
68
- },
69
- "gpt-4": {
70
- "streaming": 7500,
71
- "all": 7500
72
- },
73
- "gpt-4-0314": {
74
- "streaming": 7500,
75
- "all": 7500
76
- },
77
- "gpt-4-32k": {
78
- "streaming": 31000,
79
- "all": 31000
80
- },
81
- "gpt-4-32k-0314": {
82
- "streaming": 31000,
83
- "all": 31000
84
- }
 
 
 
 
 
 
 
 
 
 
 
 
85
  }
86
 
 
 
 
 
87
  REPLY_LANGUAGES = [
88
  "简体中文",
89
  "繁體中文",
1
  # -*- coding:utf-8 -*-
2
+ import os
3
  from pathlib import Path
4
+ import gradio as gr
5
+ from .webui_locale import I18nAuto
6
+
7
+ i18n = I18nAuto() # internationalization
8
+
9
+ CHATGLM_MODEL = None
10
+ CHATGLM_TOKENIZER = None
11
+ LLAMA_MODEL = None
12
+ LLAMA_INFERENCER = None
13
 
14
  # ChatGPT 设置
15
+ INITIAL_SYSTEM_PROMPT = "You are a helpful assistant."
16
  API_HOST = "api.openai.com"
17
  COMPLETION_URL = "https://api.openai.com/v1/chat/completions"
18
  BALANCE_API_URL="https://api.openai.com/dashboard/billing/credit_grants"
19
  USAGE_API_URL="https://api.openai.com/dashboard/billing/usage"
20
  HISTORY_DIR = Path("history")
21
+ HISTORY_DIR = "history"
22
  TEMPLATES_DIR = "templates"
23
 
24
  # 错误信息
25
+ STANDARD_ERROR_MSG = i18n("☹️发生了错误:") # 错误信息的标准前缀
26
+ GENERAL_ERROR_MSG = i18n("获取对话时发生错误,请查看后台日志")
27
+ ERROR_RETRIEVE_MSG = i18n("请检查网络连接,或者API-Key是否有效。")
28
+ CONNECTION_TIMEOUT_MSG = i18n("连接超时,无法获取对话。") # 连接超时
29
+ READ_TIMEOUT_MSG = i18n("读取超时,无法获取对话。") # 读取超时
30
+ PROXY_ERROR_MSG = i18n("代理错误,无法获取对话。") # 代理错误
31
+ SSL_ERROR_PROMPT = i18n("SSL错误,无法获取对话。") # SSL 错误
32
+ NO_APIKEY_MSG = i18n("API key为空,请检查是否输入正确。") # API key 长度不足 51 位
33
+ NO_INPUT_MSG = i18n("请输入对话内容。") # 未输入对话内容
34
+ BILLING_NOT_APPLICABLE_MSG = i18n("账单信息不适用") # 本地运行的模型返回的账单信息
35
+
36
+ TIMEOUT_STREAMING = 60 # 流式对话时的超时时间
37
+ TIMEOUT_ALL = 200 # 非流式对话时的超时时间
38
+ ENABLE_STREAMING_OPTION = True # 是否启用选择选择是否实时显示回答的勾选框
39
  HIDE_MY_KEY = False # 如果你想在UI中隐藏你的 API 密钥,将此值设置为 True
40
  CONCURRENT_COUNT = 100 # 允许同时使用的用户数量
41
 
42
  SIM_K = 5
43
  INDEX_QUERY_TEMPRATURE = 1.0
44
 
45
+ CHUANHU_TITLE = i18n("川虎Chat 🚀")
 
 
46
 
47
+ CHUANHU_DESCRIPTION = i18n("由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536) 和 [明昭MZhao](https://space.bilibili.com/24807452)开发<br />访问川虎Chat的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本")
48
 
49
+ FOOTER = """<div class="versions">{versions}</div>"""
50
 
51
+ APPEARANCE_SWITCHER = """
52
+ <div style="display: flex; justify-content: space-between;">
53
+ <span style="margin-top: 4px !important;">"""+ i18n("切换亮暗色主题") + """</span>
54
+ <span><label class="apSwitch" for="checkbox">
55
+ <input type="checkbox" id="checkbox">
56
+ <div class="apSlider"></div>
57
+ </label></span>
58
  </div>
59
  """
60
 
61
+ SUMMARIZE_PROMPT = "你是谁?我们刚才聊了什么?" # 总结对话时的 prompt
 
 
 
 
62
 
63
+ ONLINE_MODELS = [
64
  "gpt-3.5-turbo",
65
  "gpt-3.5-turbo-0301",
66
  "gpt-4",
67
  "gpt-4-0314",
68
  "gpt-4-32k",
69
  "gpt-4-32k-0314",
70
+ "xmbot",
71
+ ]
72
+
73
+ LOCAL_MODELS = [
74
+ "chatglm-6b",
75
+ "chatglm-6b-int4",
76
+ "chatglm-6b-int4-qe",
77
+ "llama-7b-hf",
78
+ "llama-7b-hf-int4",
79
+ "llama-7b-hf-int8",
80
+ "llama-13b-hf",
81
+ "llama-13b-hf-int4",
82
+ "llama-30b-hf",
83
+ "llama-30b-hf-int4",
84
+ "llama-65b-hf"
85
+ ]
86
+
87
+ if os.environ.get('HIDE_LOCAL_MODELS', 'false') == 'true':
88
+ MODELS = ONLINE_MODELS
89
+ else:
90
+ MODELS = ONLINE_MODELS + LOCAL_MODELS
91
+
92
+ DEFAULT_MODEL = 0
93
+
94
+ os.makedirs("models", exist_ok=True)
95
+ os.makedirs("lora", exist_ok=True)
96
+ os.makedirs("history", exist_ok=True)
97
+ for dir_name in os.listdir("models"):
98
+ if os.path.isdir(os.path.join("models", dir_name)):
99
+ if dir_name not in MODELS:
100
+ MODELS.append(dir_name)
101
+
102
+ MODEL_TOKEN_LIMIT = {
103
+ "gpt-3.5-turbo": 4096,
104
+ "gpt-3.5-turbo-0301": 4096,
105
+ "gpt-4": 8192,
106
+ "gpt-4-0314": 8192,
107
+ "gpt-4-32k": 32768,
108
+ "gpt-4-32k-0314": 32768
109
  }
110
 
111
+ TOKEN_OFFSET = 1000 # 模型的token上限减去这个值,得到软上限。到达软上限之后,自动尝试减少token占用。
112
+ DEFAULT_TOKEN_LIMIT = 3000 # 默认的token上限
113
+ REDUCE_TOKEN_FACTOR = 0.5 # 与模型token上限想乘,得到目标token数。减少token占用时,将token占用减少到目标token数以下。
114
+
115
  REPLY_LANGUAGES = [
116
  "简体中文",
117
  "繁體中文",
modules/shared.py CHANGED
@@ -41,11 +41,11 @@ class State:
41
  def switching_api_key(self, func):
42
  if not hasattr(self, "api_key_queue"):
43
  return func
44
-
45
  def wrapped(*args, **kwargs):
46
  api_key = self.api_key_queue.get()
47
- args = list(args)[1:]
48
- ret = func(api_key, *args, **kwargs)
49
  self.api_key_queue.put(api_key)
50
  return ret
51
 
41
  def switching_api_key(self, func):
42
  if not hasattr(self, "api_key_queue"):
43
  return func
44
+
45
  def wrapped(*args, **kwargs):
46
  api_key = self.api_key_queue.get()
47
+ args[0].api_key = api_key
48
+ ret = func(*args, **kwargs)
49
  self.api_key_queue.put(api_key)
50
  return ret
51
 
modules/utils.py CHANGED
@@ -34,6 +34,85 @@ if TYPE_CHECKING:
34
  headers: List[str]
35
  data: List[List[str | int | bool]]
36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
 
38
  def count_token(message):
39
  encoding = tiktoken.get_encoding("cl100k_base")
@@ -121,10 +200,13 @@ def convert_asis(userinput):
121
 
122
 
123
  def detect_converted_mark(userinput):
124
- if userinput.endswith(ALREADY_CONVERTED_MARK):
 
 
 
 
 
125
  return True
126
- else:
127
- return False
128
 
129
 
130
  def detect_language(code):
@@ -153,107 +235,22 @@ def construct_assistant(text):
153
  return construct_text("assistant", text)
154
 
155
 
156
- def construct_token_message(tokens: List[int]):
157
- token_sum = 0
158
- for i in range(len(tokens)):
159
- token_sum += sum(tokens[: i + 1])
160
- return f"Token 计数: {sum(tokens)},本次对话累计消耗了 {token_sum} tokens"
161
-
162
-
163
- def delete_first_conversation(history, previous_token_count):
164
- if history:
165
- del history[:2]
166
- del previous_token_count[0]
167
- return (
168
- history,
169
- previous_token_count,
170
- construct_token_message(previous_token_count),
171
- )
172
-
173
-
174
- def delete_last_conversation(chatbot, history, previous_token_count):
175
- if len(chatbot) > 0 and standard_error_msg in chatbot[-1][1]:
176
- logging.info("由于包含报错信息,只删除chatbot记录")
177
- chatbot.pop()
178
- return chatbot, history
179
- if len(history) > 0:
180
- logging.info("删除了一组对话历史")
181
- history.pop()
182
- history.pop()
183
- if len(chatbot) > 0:
184
- logging.info("删除了一组chatbot对话")
185
- chatbot.pop()
186
- if len(previous_token_count) > 0:
187
- logging.info("删除了一组对话的token计数记录")
188
- previous_token_count.pop()
189
- return (
190
- chatbot,
191
- history,
192
- previous_token_count,
193
- construct_token_message(previous_token_count),
194
- )
195
-
196
-
197
  def save_file(filename, system, history, chatbot, user_name):
198
- logging.info(f"{user_name} 保存对话历史中……")
199
- os.makedirs(HISTORY_DIR / user_name, exist_ok=True)
200
  if filename.endswith(".json"):
201
  json_s = {"system": system, "history": history, "chatbot": chatbot}
202
  print(json_s)
203
- with open(os.path.join(HISTORY_DIR / user_name, filename), "w") as f:
204
  json.dump(json_s, f)
205
  elif filename.endswith(".md"):
206
  md_s = f"system: \n- {system} \n"
207
  for data in history:
208
  md_s += f"\n{data['role']}: \n- {data['content']} \n"
209
- with open(os.path.join(HISTORY_DIR / user_name, filename), "w", encoding="utf8") as f:
210
  f.write(md_s)
211
- logging.info(f"{user_name} 保存对话历史完毕")
212
- return os.path.join(HISTORY_DIR / user_name, filename)
213
-
214
-
215
- def save_chat_history(filename, system, history, chatbot, user_name):
216
- if filename == "":
217
- return
218
- if not filename.endswith(".json"):
219
- filename += ".json"
220
- return save_file(filename, system, history, chatbot, user_name)
221
-
222
-
223
- def export_markdown(filename, system, history, chatbot, user_name):
224
- if filename == "":
225
- return
226
- if not filename.endswith(".md"):
227
- filename += ".md"
228
- return save_file(filename, system, history, chatbot, user_name)
229
-
230
-
231
- def load_chat_history(filename, system, history, chatbot, user_name):
232
- logging.info(f"{user_name} 加载对话历史中……")
233
- if type(filename) != str:
234
- filename = filename.name
235
- try:
236
- with open(os.path.join(HISTORY_DIR / user_name, filename), "r") as f:
237
- json_s = json.load(f)
238
- try:
239
- if type(json_s["history"][0]) == str:
240
- logging.info("历史记录格式为旧版,正在转换……")
241
- new_history = []
242
- for index, item in enumerate(json_s["history"]):
243
- if index % 2 == 0:
244
- new_history.append(construct_user(item))
245
- else:
246
- new_history.append(construct_assistant(item))
247
- json_s["history"] = new_history
248
- logging.info(new_history)
249
- except:
250
- # 没有对话历史
251
- pass
252
- logging.info(f"{user_name} 加载对话历史完毕")
253
- return filename, json_s["system"], json_s["history"], json_s["chatbot"]
254
- except FileNotFoundError:
255
- logging.info(f"{user_name} 没有找到对话历史文件,不执行任何操作")
256
- return filename, system, history, chatbot
257
 
258
 
259
  def sorted_by_pinyin(list):
@@ -261,7 +258,7 @@ def sorted_by_pinyin(list):
261
 
262
 
263
  def get_file_names(dir, plain=False, filetypes=[".json"]):
264
- logging.info(f"获取文件名列表,目录为{dir},文件类型为{filetypes},是否为纯文本列表{plain}")
265
  files = []
266
  try:
267
  for type in filetypes:
@@ -279,14 +276,13 @@ def get_file_names(dir, plain=False, filetypes=[".json"]):
279
 
280
 
281
  def get_history_names(plain=False, user_name=""):
282
- logging.info(f"从用户 {user_name} 中获取历史记录文件名列表")
283
- return get_file_names(HISTORY_DIR / user_name, plain)
284
 
285
 
286
  def load_template(filename, mode=0):
287
- logging.info(f"加载模板文件{filename},模式为{mode}(0为返回字典和下拉菜单,1为返回下拉菜单,2为返回字典)")
288
  lines = []
289
- logging.info("Loading template...")
290
  if filename.endswith(".json"):
291
  with open(os.path.join(TEMPLATES_DIR, filename), "r", encoding="utf8") as f:
292
  lines = json.load(f)
@@ -310,23 +306,18 @@ def load_template(filename, mode=0):
310
 
311
 
312
  def get_template_names(plain=False):
313
- logging.info("获取模板文件名列表")
314
  return get_file_names(TEMPLATES_DIR, plain, filetypes=[".csv", "json"])
315
 
316
 
317
  def get_template_content(templates, selection, original_system_prompt):
318
- logging.info(f"应用模板中,选择为{selection},原始系统提示为{original_system_prompt}")
319
  try:
320
  return templates[selection]
321
  except:
322
  return original_system_prompt
323
 
324
 
325
- def reset_state():
326
- logging.info("重置状态")
327
- return [], [], [], construct_token_message([0])
328
-
329
-
330
  def reset_textbox():
331
  logging.debug("重置文本框")
332
  return gr.update(value="")
@@ -388,16 +379,16 @@ def get_geoip():
388
  logging.warning(f"无法获取IP地址信息。\n{data}")
389
  if data["reason"] == "RateLimited":
390
  return (
391
- f"获取IP地理位置失败,因为达到了检测IP的速率限制。聊天功能可能仍然可用。"
392
  )
393
  else:
394
- return f"获取IP地理位置失败。原因:{data['reason']}。你仍然可以使用聊天功能。"
395
  else:
396
  country = data["country_name"]
397
  if country == "China":
398
  text = "**您的IP区域:中国。请立即检查代理设置,在不受支持的地区使用API可能导致账号被封禁。**"
399
  else:
400
- text = f"您的IP区域:{country}。"
401
  logging.info(text)
402
  return text
403
 
@@ -418,7 +409,7 @@ def find_n(lst, max_num):
418
 
419
  def start_outputing():
420
  logging.debug("显示取消按钮,隐藏发送按钮")
421
- return gr.Button.update(visible=True), gr.Button.update(visible=False)
422
 
423
 
424
  def end_outputing():
@@ -440,8 +431,8 @@ def transfer_input(inputs):
440
  return (
441
  inputs,
442
  gr.update(value=""),
443
- gr.Button.update(visible=True),
444
  gr.Button.update(visible=False),
 
445
  )
446
 
447
 
@@ -504,15 +495,15 @@ def add_details(lst):
504
  return nodes
505
 
506
 
507
- def sheet_to_string(sheet):
508
- result = ""
509
  for index, row in sheet.iterrows():
510
  row_string = ""
511
  for column in sheet.columns:
512
  row_string += f"{column}: {row[column]}, "
513
  row_string = row_string.rstrip(", ")
514
  row_string += "."
515
- result += row_string + "\n"
516
  return result
517
 
518
  def excel_to_string(file_path):
@@ -520,17 +511,23 @@ def excel_to_string(file_path):
520
  excel_file = pd.read_excel(file_path, engine='openpyxl', sheet_name=None)
521
 
522
  # 初始化结果字符串
523
- result = ""
524
 
525
  # 遍历每一个工作表
526
  for sheet_name, sheet_data in excel_file.items():
527
- # 将工作表名称添加到结果字符串
528
- result += f"Sheet: {sheet_name}\n"
529
 
530
  # 处理当前工作表并添加到结果字符串
531
- result += sheet_to_string(sheet_data)
532
 
533
- # 在不同工作表之间添加分隔符
534
- result += "\n" + ("-" * 20) + "\n\n"
535
 
536
  return result
 
 
 
 
 
 
 
 
 
 
34
  headers: List[str]
35
  data: List[List[str | int | bool]]
36
 
37
+ def predict(current_model, *args):
38
+ iter = current_model.predict(*args)
39
+ for i in iter:
40
+ yield i
41
+
42
+ def billing_info(current_model):
43
+ return current_model.billing_info()
44
+
45
+ def set_key(current_model, *args):
46
+ return current_model.set_key(*args)
47
+
48
+ def load_chat_history(current_model, *args):
49
+ return current_model.load_chat_history(*args)
50
+
51
+ def interrupt(current_model, *args):
52
+ return current_model.interrupt(*args)
53
+
54
+ def reset(current_model, *args):
55
+ return current_model.reset(*args)
56
+
57
+ def retry(current_model, *args):
58
+ iter = current_model.retry(*args)
59
+ for i in iter:
60
+ yield i
61
+
62
+ def delete_first_conversation(current_model, *args):
63
+ return current_model.delete_first_conversation(*args)
64
+
65
+ def delete_last_conversation(current_model, *args):
66
+ return current_model.delete_last_conversation(*args)
67
+
68
+ def set_system_prompt(current_model, *args):
69
+ return current_model.set_system_prompt(*args)
70
+
71
+ def save_chat_history(current_model, *args):
72
+ return current_model.save_chat_history(*args)
73
+
74
+ def export_markdown(current_model, *args):
75
+ return current_model.export_markdown(*args)
76
+
77
+ def load_chat_history(current_model, *args):
78
+ return current_model.load_chat_history(*args)
79
+
80
+ def set_token_upper_limit(current_model, *args):
81
+ return current_model.set_token_upper_limit(*args)
82
+
83
+ def set_temperature(current_model, *args):
84
+ current_model.set_temperature(*args)
85
+
86
+ def set_top_p(current_model, *args):
87
+ current_model.set_top_p(*args)
88
+
89
+ def set_n_choices(current_model, *args):
90
+ current_model.set_n_choices(*args)
91
+
92
+ def set_stop_sequence(current_model, *args):
93
+ current_model.set_stop_sequence(*args)
94
+
95
+ def set_max_tokens(current_model, *args):
96
+ current_model.set_max_tokens(*args)
97
+
98
+ def set_presence_penalty(current_model, *args):
99
+ current_model.set_presence_penalty(*args)
100
+
101
+ def set_frequency_penalty(current_model, *args):
102
+ current_model.set_frequency_penalty(*args)
103
+
104
+ def set_logit_bias(current_model, *args):
105
+ current_model.set_logit_bias(*args)
106
+
107
+ def set_user_identifier(current_model, *args):
108
+ current_model.set_user_identifier(*args)
109
+
110
+ def set_single_turn(current_model, *args):
111
+ current_model.set_single_turn(*args)
112
+
113
+ def handle_file_upload(current_model, *args):
114
+ return current_model.handle_file_upload(*args)
115
+
116
 
117
  def count_token(message):
118
  encoding = tiktoken.get_encoding("cl100k_base")
200
 
201
 
202
  def detect_converted_mark(userinput):
203
+ try:
204
+ if userinput.endswith(ALREADY_CONVERTED_MARK):
205
+ return True
206
+ else:
207
+ return False
208
+ except:
209
  return True
 
 
210
 
211
 
212
  def detect_language(code):
235
  return construct_text("assistant", text)
236
 
237
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
238
  def save_file(filename, system, history, chatbot, user_name):
239
+ logging.debug(f"{user_name} 保存对话历史中……")
240
+ os.makedirs(os.path.join(HISTORY_DIR, user_name), exist_ok=True)
241
  if filename.endswith(".json"):
242
  json_s = {"system": system, "history": history, "chatbot": chatbot}
243
  print(json_s)
244
+ with open(os.path.join(HISTORY_DIR, user_name, filename), "w") as f:
245
  json.dump(json_s, f)
246
  elif filename.endswith(".md"):
247
  md_s = f"system: \n- {system} \n"
248
  for data in history:
249
  md_s += f"\n{data['role']}: \n- {data['content']} \n"
250
+ with open(os.path.join(HISTORY_DIR, user_name, filename), "w", encoding="utf8") as f:
251
  f.write(md_s)
252
+ logging.debug(f"{user_name} 保存对话历史完毕")
253
+ return os.path.join(HISTORY_DIR, user_name, filename)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
254
 
255
 
256
  def sorted_by_pinyin(list):
258
 
259
 
260
  def get_file_names(dir, plain=False, filetypes=[".json"]):
261
+ logging.debug(f"获取文件名列表,目录为{dir},文件类型为{filetypes},是否为纯文本列表{plain}")
262
  files = []
263
  try:
264
  for type in filetypes:
276
 
277
 
278
  def get_history_names(plain=False, user_name=""):
279
+ logging.debug(f"从用户 {user_name} 中获取历史记录文件名列表")
280
+ return get_file_names(os.path.join(HISTORY_DIR, user_name), plain)
281
 
282
 
283
  def load_template(filename, mode=0):
284
+ logging.debug(f"加载模板文件{filename},模式为{mode}(0为返回字典和下拉菜单,1为返回下拉菜单,2为返回字典)")
285
  lines = []
 
286
  if filename.endswith(".json"):
287
  with open(os.path.join(TEMPLATES_DIR, filename), "r", encoding="utf8") as f:
288
  lines = json.load(f)
306
 
307
 
308
  def get_template_names(plain=False):
309
+ logging.debug("获取模板文件名列表")
310
  return get_file_names(TEMPLATES_DIR, plain, filetypes=[".csv", "json"])
311
 
312
 
313
  def get_template_content(templates, selection, original_system_prompt):
314
+ logging.debug(f"应用模板中,选择为{selection},原始系统提示为{original_system_prompt}")
315
  try:
316
  return templates[selection]
317
  except:
318
  return original_system_prompt
319
 
320
 
 
 
 
 
 
321
  def reset_textbox():
322
  logging.debug("重置文本框")
323
  return gr.update(value="")
379
  logging.warning(f"无法获取IP地址信息。\n{data}")
380
  if data["reason"] == "RateLimited":
381
  return (
382
+ i18n("您的IP区域:未知。")
383
  )
384
  else:
385
+ return i18n("获取IP地理位置失败。原���:") + f"{data['reason']}" + i18n("。你仍然可以使用聊天功能。")
386
  else:
387
  country = data["country_name"]
388
  if country == "China":
389
  text = "**您的IP区域:中国。请立即检查代理设置,在不受支持的地区使用API可能导致账号被封禁。**"
390
  else:
391
+ text = i18n("您的IP区域:") + f"{country}。"
392
  logging.info(text)
393
  return text
394
 
409
 
410
  def start_outputing():
411
  logging.debug("显示取消按钮,隐藏发送按钮")
412
+ return gr.Button.update(visible=False), gr.Button.update(visible=True)
413
 
414
 
415
  def end_outputing():
431
  return (
432
  inputs,
433
  gr.update(value=""),
 
434
  gr.Button.update(visible=False),
435
+ gr.Button.update(visible=True),
436
  )
437
 
438
 
495
  return nodes
496
 
497
 
498
+ def sheet_to_string(sheet, sheet_name = None):
499
+ result = []
500
  for index, row in sheet.iterrows():
501
  row_string = ""
502
  for column in sheet.columns:
503
  row_string += f"{column}: {row[column]}, "
504
  row_string = row_string.rstrip(", ")
505
  row_string += "."
506
+ result.append(row_string)
507
  return result
508
 
509
  def excel_to_string(file_path):
511
  excel_file = pd.read_excel(file_path, engine='openpyxl', sheet_name=None)
512
 
513
  # 初始化结果字符串
514
+ result = []
515
 
516
  # 遍历每一个工作表
517
  for sheet_name, sheet_data in excel_file.items():
 
 
518
 
519
  # 处理当前工作表并添加到结果字符串
520
+ result += sheet_to_string(sheet_data, sheet_name=sheet_name)
521
 
 
 
522
 
523
  return result
524
+
525
+ def get_last_day_of_month(any_day):
526
+ # The day 28 exists in every month. 4 days later, it's always next month
527
+ next_month = any_day.replace(day=28) + datetime.timedelta(days=4)
528
+ # subtracting the number of the current day brings us back one month
529
+ return next_month - datetime.timedelta(days=next_month.day)
530
+
531
+ def get_model_source(model_name, alternative_source):
532
+ if model_name == "gpt2-medium":
533
+ return "https://huggingface.co/gpt2-medium"
modules/webui_locale.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import locale
3
+ import commentjson as json
4
+
5
+ class I18nAuto:
6
+ def __init__(self):
7
+ if os.path.exists("config.json"):
8
+ with open("config.json", "r", encoding='utf-8') as f:
9
+ config = json.load(f)
10
+ else:
11
+ config = {}
12
+ language = config.get("language", "auto") # 在这里输入你的 API 密钥
13
+ language = os.environ.get("LANGUAGE", language)
14
+ if language == "auto":
15
+ language = locale.getdefaultlocale()[0] # get the language code of the system (ex. zh_CN)
16
+ self.language_map = {}
17
+ self.file_is_exists = os.path.isfile(f"./locale/{language}.json")
18
+ if self.file_is_exists:
19
+ with open(f"./locale/{language}.json", "r", encoding="utf-8") as f:
20
+ self.language_map.update(json.load(f))
21
+
22
+ def __call__(self, key):
23
+ if self.file_is_exists and key in self.language_map:
24
+ return self.language_map[key]
25
+ else:
26
+ return key
readme/README_en.md ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <div align="right">
2
+ <!-- Language: -->
3
+ <a title="Chinese" href="../README.md">简体中文</a> | English | <a title="Japanese" href="README_ja.md">日本語</a>
4
+ </div>
5
+
6
+ <h1 align="center">川虎 Chat 🐯 Chuanhu Chat</h1>
7
+ <div align="center">
8
+ <a href="https://github.com/GaiZhenBiao/ChuanhuChatGPT">
9
+ <img src="https://user-images.githubusercontent.com/70903329/227087087-93b37d64-7dc3-4738-a518-c1cf05591c8a.png" alt="Logo" height="156">
10
+ </a>
11
+
12
+ <p align="center">
13
+ <h3>Lightweight and User-friendly Web-UI for LLMs including ChatGPT/ChatGLM/LLaMA</h3>
14
+ <p align="center">
15
+ <a href="https://github.com/GaiZhenbiao/ChuanhuChatGPT/blob/main/LICENSE">
16
+ <img alt="Tests Passing" src="https://img.shields.io/github/license/GaiZhenbiao/ChuanhuChatGPT" />
17
+ </a>
18
+ <a href="https://gradio.app/">
19
+ <img alt="GitHub Contributors" src="https://img.shields.io/badge/Base-Gradio-fb7d1a?style=flat" />
20
+ </a>
21
+ <a href="https://t.me/tkdifferent">
22
+ <img alt="GitHub pull requests" src="https://img.shields.io/badge/Telegram-Group-blue.svg?logo=telegram" />
23
+ </a>
24
+ <p>
25
+ Streaming / Unlimited conversations / Save history / Preset prompts / Chat with files / Web search <br />
26
+ LaTeX rendering / Table rendering / Code highlighting <br />
27
+ Auto dark mode / Adaptive web interface / WeChat-like theme <br />
28
+ Multi-parameters tuning / Multi-API-Key support / Multi-user support <br />
29
+ Compatible with GPT-4 / Local deployment for LLMs
30
+ </p>
31
+ <a href="https://www.youtube.com/watch?v=MtxS4XZWbJE"><strong>Video Tutorial</strong></a>
32
+ ·
33
+ <a href="https://www.youtube.com/watch?v=77nw7iimYDE"><strong>2.0 Introduction</strong></a>
34
+ ·
35
+ <a href="https://www.youtube.com/watch?v=x-O1jjBqgu4"><strong>3.0 Introduction & Tutorial</strong></a>
36
+ ||
37
+ <a href="https://huggingface.co/spaces/JohnSmith9982/ChuanhuChatGPT"><strong>Online trial</strong></a>
38
+ ·
39
+ <a href="https://huggingface.co/login?next=%2Fspaces%2FJohnSmith9982%2FChuanhuChatGPT%3Fduplicate%3Dtrue"><strong>One-Click deployment</strong></a>
40
+ </p>
41
+ <p align="center">
42
+ <img alt="Animation Demo" src="https://user-images.githubusercontent.com/51039745/226255695-6b17ff1f-ea8d-464f-b69b-a7b6b68fffe8.gif" />
43
+ </p>
44
+ </p>
45
+ </div>
46
+
47
+ ## Usage Tips
48
+
49
+ - To better control the ChatGPT, use System Prompt.
50
+ - To use a Prompt Template, select the Prompt Template Collection file first, and then choose certain prompt from the drop-down menu.
51
+ - To try again if the response is unsatisfactory, use `🔄 Regenerate` button.
52
+ - To start a new line in the input box, press <kbd>Shift</kbd> + <kbd>Enter</kbd> keys.
53
+ - To quickly switch between input history, press <kbd>↑</kbd> and <kbd>↓</kbd> key in the input box.
54
+ - To deploy the program onto a server, change the last line of the program to `demo.launch(server_name="0.0.0.0", server_port=<your port number>)`.
55
+ - To get a public shared link, change the last line of the program to `demo.launch(share=True)`. Please be noted that the program must be running in order to be accessed via a public link.
56
+ - To use it in Hugging Face Spaces: It is recommended to **Duplicate Space** and run the program in your own Space for a faster and more secure experience.
57
+
58
+ ## Installation
59
+
60
+ ```shell
61
+ git clone https://github.com/GaiZhenbiao/ChuanhuChatGPT.git
62
+ cd ChuanhuChatGPT
63
+ pip install -r requirements.txt
64
+ ```
65
+
66
+ Then make a copy of `config_example.json`, rename it to `config.json`, and then fill in your API-Key and other settings in the file.
67
+
68
+ ```shell
69
+ python ChuanhuChatbot.py
70
+ ```
71
+
72
+ A browser window will open and you will be able to chat with ChatGPT.
73
+
74
+ > **Note**
75
+ >
76
+ > Please check our [wiki page](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用教程) for detailed instructions.
77
+
78
+ ## Troubleshooting
79
+
80
+ When you encounter problems, you should try manually pulling the latest changes of this project first. The steps are as follows:
81
+
82
+ 1. Download the latest code archive by clicking on `Download ZIP` on the webpage, or
83
+ ```shell
84
+ git pull https://github.com/GaiZhenbiao/ChuanhuChatGPT.git main -f
85
+ ```
86
+ 2. Try installing the dependencies again (as this project may have introduced new dependencies)
87
+ ```
88
+ pip install -r requirements.txt
89
+ ```
90
+ 3. Update Gradio
91
+ ```
92
+ pip install gradio --upgrade --force-reinstall
93
+ ```
94
+
95
+ Generally, you can solve most problems by following these steps.
96
+
97
+ If the problem still exists, please refer to this page: [Frequently Asked Questions (FAQ)](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/常见问题)
98
+
99
+ This page lists almost all the possible problems and solutions. Please read it carefully.
100
+
101
+ ## More Information
102
+
103
+ More information could be found in our [wiki](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki):
104
+
105
+ - [How to contribute a translation](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/Localization)
106
+ - [How to make a contribution](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/��献指南)
107
+ - [How to cite the project](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用许可#如何引用该项目)
108
+ - [Project changelog](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/更新日志)
109
+ - [Project license](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用许可)
110
+
111
+ ## Starchart
112
+
113
+ [![Star History Chart](https://api.star-history.com/svg?repos=GaiZhenbiao/ChuanhuChatGPT&type=Date)](https://star-history.com/#GaiZhenbiao/ChuanhuChatGPT&Date)
114
+
115
+ ## Contributors
116
+
117
+ <a href="https://github.com/GaiZhenbiao/ChuanhuChatGPT/graphs/contributors">
118
+ <img src="https://contrib.rocks/image?repo=GaiZhenbiao/ChuanhuChatGPT" />
119
+ </a>
120
+
121
+ ## Sponsor
122
+
123
+ 🐯 If you find this project helpful, feel free to buy me a coke or a cup of coffee~
124
+
125
+ <a href="https://www.buymeacoffee.com/ChuanhuChat" target="_blank"><img src="https://img.buymeacoffee.com/button-api/?text=Buy me a coffee&emoji=&slug=ChuanhuChat&button_colour=1aae59&font_colour=ffffff&font_family=Poppins&outline_colour=ffffff&coffee_colour=FFDD00" alt="Buy Me A Coffee" width="250"></a>
126
+
127
+ <img width="250" alt="image" src="https://user-images.githubusercontent.com/51039745/226920291-e8ec0b0a-400f-4c20-ac13-dafac0c3aeeb.JPG">
readme/README_ja.md ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <div align="right">
2
+ <!-- Language: -->
3
+ <a title="Chinese" href="../README.md">简体中文</a> | <a title="English" href="README_en.md">English</a> | 日本語
4
+ </div>
5
+
6
+ <h1 align="center">川虎 Chat 🐯 Chuanhu Chat</h1>
7
+ <div align="center">
8
+ <a href="https://github.com/GaiZhenBiao/ChuanhuChatGPT">
9
+ <img src="https://user-images.githubusercontent.com/70903329/227087087-93b37d64-7dc3-4738-a518-c1cf05591c8a.png" alt="Logo" height="156">
10
+ </a>
11
+
12
+ <p align="center">
13
+ <h3>ChatGPT/ChatGLM/LLaMAなどのLLMのための軽量でユーザーフレンドリーなWeb-UI</h3>
14
+ <p align="center">
15
+ <a href="https://github.com/GaiZhenbiao/ChuanhuChatGPT/blob/main/LICENSE">
16
+ <img alt="Tests Passing" src="https://img.shields.io/github/license/GaiZhenbiao/ChuanhuChatGPT" />
17
+ </a>
18
+ <a href="https://gradio.app/">
19
+ <img alt="GitHub Contributors" src="https://img.shields.io/badge/Base-Gradio-fb7d1a?style=flat" />
20
+ </a>
21
+ <a href="https://t.me/tkdifferent">
22
+ <img alt="GitHub pull requests" src="https://img.shields.io/badge/Telegram-Group-blue.svg?logo=telegram" />
23
+ </a>
24
+ <p>
25
+ ストリーム出力/会話回数無制限/履歴保存/プリセットプロンプト/ファイルへの質問チャット<br>
26
+ ウェブ検索/LaTeXレンダリング/表レンダリング/コードハイライト<br>
27
+ オートダークモード/アダプティブ・ウェブ・インターフェイス/WeChatライク・テーマ<br />
28
+ マルチパラメーターチューニング/マルチAPI-Key対応/マルチユーザー対応<br>
29
+ GPT-4対応/LLMのローカルデプロイ可能。
30
+ </p>
31
+ <a href="https://www.youtube.com/watch?v=MtxS4XZWbJE"><strong>動画チュートリアル</strong></a>
32
+ ·
33
+ <a href="https://www.youtube.com/watch?v=77nw7iimYDE"><strong>2.0 イントロダクション</strong></a>
34
+ ·
35
+ <a href="https://www.youtube.com/watch?v=x-O1jjBqgu4"><strong>3.0 イントロダクション & チュートリアル</strong></a>
36
+ ||
37
+ <a href="https://huggingface.co/spaces/JohnSmith9982/ChuanhuChatGPT"><strong>オンライントライアル</strong></a>
38
+ ·
39
+ <a href="https://huggingface.co/login?next=%2Fspaces%2FJohnSmith9982%2FChuanhuChatGPT%3Fduplicate%3Dtrue"><strong>ワンクリックデプロイ</strong></a>
40
+ </p>
41
+ <p align="center">
42
+ <img alt="Animation Demo" src="https://user-images.githubusercontent.com/51039745/226255695-6b17ff1f-ea8d-464f-b69b-a7b6b68fffe8.gif" />
43
+ </p>
44
+ </p>
45
+ </div>
46
+
47
+ ## 使う上でのTips
48
+
49
+ - ChatGPTをより適切に制御するために、システムプロンプトを使用できます。
50
+ - プロンプトテンプレートを使用するには、プロンプトテンプレートコレクションを選択し、ドロップダウンメニューから特定のプロンプトを選択。回答が不十分な場合は、`🔄再生成`ボタンを使って再試行します。
51
+ - 入力ボックスで改行するには、<kbd>Shift</kbd> + <kbd>Enter</kbd>キーを押してください。
52
+ - 入力履歴を素早く切り替えるには、入力ボックスで <kbd>↑</kbd>と<kbd>↓</kbd>キーを押す。
53
+ - プログラムをサーバにデプロイするには、プログラムの最終行を `demo.launch(server_name="0.0.0.0", server_port=<your port number>)`に変更します。
54
+ - 共有リンクを取得するには、プログラムの最後の行を `demo.launch(share=True)` に変更してください。なお、公開リンクでアクセスするためには、プログラムが実行されている必要があることに注意してください。
55
+ - Hugging Face Spacesで使用する場合: より速く、より安全に利用するために、**Duplicate Space**を使用し、自分のスペースでプログラムを実行することをお勧めします。
56
+
57
+ ## インストール
58
+
59
+ ```shell
60
+ git clone https://github.com/GaiZhenbiao/ChuanhuChatGPT.git
61
+ cd ChuanhuChatGPT
62
+ pip install -r requirements.txt
63
+ ```
64
+
65
+ 次に `config_example.json`をコピーして `config.json`にリネームし、そのファイルにAPI-Keyなどの設定を記入する。
66
+
67
+ ```shell
68
+ python ChuanhuChatbot.py
69
+ ```
70
+
71
+ ブラウザのウィンドウが開き、ChatGPTとチャットできるようになります。
72
+
73
+ > **Note**
74
+ >
75
+ > 詳しい手順は[wikiページ](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用教程)をご確認ください。
76
+
77
+ ## トラブルシューティング
78
+
79
+ 問題が発生した場合は、まずこのプロジェクトの最新の変更点を手動で引っ張ってみるのがよいでしょう。その手順は以下の通りです:
80
+
81
+ 1. ウェブページの `Download ZIP` をクリックして最新のコードアーカイブをダウンロードするか、または
82
+ ```shell
83
+ git pull https://github.com/GaiZhenbiao/ChuanhuChatGPT.git main -f
84
+ ```
85
+ 2. 新しい依存関係が導入されている可能性があるた��、依存関係を再度インストールしてみてください。
86
+ ```
87
+ pip install -r requirements.txt
88
+ ```
89
+ 3. Gradioを更新
90
+ ```
91
+ pip install gradio --upgrade --force-reinstall
92
+ ```
93
+
94
+ 一般的に、以下の手順でほとんどの問題を解決することができます。
95
+
96
+ それでも問題が解決しない場合は、こちらのページをご参照ください: [よくある質問(FAQ)](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/常见问题)
97
+
98
+ このページでは、考えられるほぼすべての問題点と解決策を掲載しています。よくお読みください。
99
+
100
+ ## More Information
101
+
102
+ より詳細な情報は、[wiki](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki) をご覧ください。:
103
+
104
+ - [How to contribute a translation](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/Localization)
105
+ - [How to make a contribution](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/贡献指南)
106
+ - [How to cite the project](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用许可#如何引用该项目)
107
+ - [Project changelog](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/更新日志)
108
+ - [Project license](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用许可)
109
+
110
+ ## Starchart
111
+
112
+ [![Star History Chart](https://api.star-history.com/svg?repos=GaiZhenbiao/ChuanhuChatGPT&type=Date)](https://star-history.com/#GaiZhenbiao/ChuanhuChatGPT&Date)
113
+
114
+ ## Contributors
115
+
116
+ <a href="https://github.com/GaiZhenbiao/ChuanhuChatGPT/graphs/contributors">
117
+ <img src="https://contrib.rocks/image?repo=GaiZhenbiao/ChuanhuChatGPT" />
118
+ </a>
119
+
120
+ ## Sponsor
121
+
122
+ 🐯 この企画が役に立ったら、遠慮なくコーラかコーヒーでもおごってください〜。
123
+
124
+ <a href="https://www.buymeacoffee.com/ChuanhuChat" target="_blank"><img src="https://img.buymeacoffee.com/button-api/?text=Buy me a coffee&emoji=&slug=ChuanhuChat&button_colour=1aae59&font_colour=ffffff&font_family=Poppins&outline_colour=ffffff&coffee_colour=FFDD00" alt="Buy Me A Coffee" width="250"></a>
125
+
126
+ <img width="250" alt="image" src="https://user-images.githubusercontent.com/51039745/226920291-e8ec0b0a-400f-4c20-ac13-dafac0c3aeeb.JPG">
requirements.txt CHANGED
@@ -1,4 +1,4 @@
1
- gradio
2
  mdtex2html
3
  pypinyin
4
  tiktoken
@@ -7,9 +7,19 @@ tqdm
7
  colorama
8
  duckduckgo_search
9
  Pygments
10
- llama_index==0.5.5
11
  langchain
12
  markdown
13
  PyPDF2
14
  pdfplumber
15
  pandas
 
 
 
 
 
 
 
 
 
 
1
+ gradio==3.25.0
2
  mdtex2html
3
  pypinyin
4
  tiktoken
7
  colorama
8
  duckduckgo_search
9
  Pygments
10
+ llama_index==0.5.13
11
  langchain
12
  markdown
13
  PyPDF2
14
  pdfplumber
15
  pandas
16
+ commentjson
17
+ openpyxl
18
+
19
+ transformers
20
+ torch
21
+ icetk
22
+ protobuf==3.19.0
23
+ git+https://github.com/OptimalScale/LMFlow.git
24
+ cpm-kernels
25
+ sentence_transformers
run_Linux.sh CHANGED
@@ -1,10 +1,10 @@
1
  #!/bin/bash
2
 
3
  # 获取脚本所在目录
4
- script_dir=$(dirname "$0")
5
 
6
  # 将工作目录更改为脚本所在目录
7
- cd "$script_dir"
8
 
9
  # 检查Git仓库是否有更新
10
  git remote update
@@ -23,3 +23,9 @@ if ! git status -uno | grep 'up to date' > /dev/null; then
23
  # 重新启动服务器
24
  nohup python3 ChuanhuChatbot.py &
25
  fi
 
 
 
 
 
 
1
  #!/bin/bash
2
 
3
  # 获取脚本所在目录
4
+ script_dir=$(dirname "$(readlink -f "$0")")
5
 
6
  # 将工作目录更改为脚本所在目录
7
+ cd "$script_dir" || exit
8
 
9
  # 检查Git仓库是否有更新
10
  git remote update
23
  # 重新启动服务器
24
  nohup python3 ChuanhuChatbot.py &
25
  fi
26
+
27
+ # 检查ChuanhuChatbot.py是否在运行
28
+ if ! pgrep -f ChuanhuChatbot.py > /dev/null; then
29
+ # 如果没有运行,启动服务器
30
+ nohup python3 ChuanhuChatbot.py &
31
+ fi
run_macOS.command CHANGED
@@ -1,10 +1,10 @@
1
  #!/bin/bash
2
 
3
  # 获取脚本所在目录
4
- script_dir=$(dirname "$0")
5
 
6
  # 将工作目录更改为脚本所在目录
7
- cd "$script_dir"
8
 
9
  # 检查Git仓库是否有更新
10
  git remote update
@@ -23,3 +23,9 @@ if ! git status -uno | grep 'up to date' > /dev/null; then
23
  # 重新启动服务器
24
  nohup python3 ChuanhuChatbot.py &
25
  fi
 
 
 
 
 
 
1
  #!/bin/bash
2
 
3
  # 获取脚本所在目录
4
+ script_dir=$(dirname "$(readlink -f "$0")")
5
 
6
  # 将工作目录更改为脚本所在目录
7
+ cd "$script_dir" || exit
8
 
9
  # 检查Git仓库是否有更新
10
  git remote update
23
  # 重新启动服务器
24
  nohup python3 ChuanhuChatbot.py &
25
  fi
26
+
27
+ # 检查ChuanhuChatbot.py是否在运行
28
+ if ! pgrep -f ChuanhuChatbot.py > /dev/null; then
29
+ # 如果没有运行,启动服务器
30
+ nohup python3 ChuanhuChatbot.py &
31
+ fi
templates/5 日本語Prompts.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "act":"専門家",
4
+ "prompt":"あなたは、プロの【その分野の専門家】です。\n以下の制約条件と入力文をもとに、【出力内容】を出力してください。\n\n# 制約条件:\n【前提条件や決まりごと】\n\n# 入力文:\n【期待する出力結果や大まかな指示】"
5
+ },
6
+ {
7
+ "act":"要約",
8
+ "prompt": "以下のテキストを要約し、最も重要なポイントを箇条書きにまとめてください。\n\nテキスト: 【テキスト】"
9
+ },
10
+ {
11
+ "act":"キーワード抽出",
12
+ "prompt":"以下のテキストからキーワードを抽出してください。\n\nテキスト:【テキスト】\n\nキーワード:"
13
+ },
14
+ {
15
+ "act": "質問させる",
16
+ "prompt": "【達成したいこと】を達成するために質問してください。\n\n- この条件が満たされるまで、またはこの目標を達成するために質問してください。\n- 質問項目は1つにしてください。\n- 日本語で質問してください。"
17
+ },
18
+ {
19
+ "act": "英会話教師",
20
+ "prompt": "あなたは私の英会話の相手として、ネイティブ話者として振る舞ってください。\n私の発言に対して、以下のフォーマットで1回に1つずつ回答します。\n説明は書かないでください。まとめて会話内容を書かないでください。\n\n#フォーマット:\n【修正】:\n{私の英文を自然な英語に直してください。lang:en}\n【理由】:\n{私の英文と、直した英文の差分で、重要なミスがある場合のみ、40文字以内で、日本語で指摘します。lang:ja}\n【返答】:\n{あなたの会話文です。1回に1つの会話のみ出力します。まずは、私の発言に相槌を打ち、そのあと、私への質問を返してください。lang:en}\n\n#\n私の最初の会話は、Helloです。\n毎回、フォーマットを厳格に守り、【修正】、【理由】、【返答】、を必ず出力してください。"
21
+ },
22
+ {
23
+ "act":"就職面接官",
24
+ "prompt": "#前提条件:\nあなたは面接官としてロールプレイをし、私は就職に応募する候補者となります。\nあなたはインタビュアーとしてだけ話します。面接は私だけにしてほしいです。インタビュアーのように私に、たった1個だけ質問をして、私の答えを待ちます。説明を書かないでください。一度に複数の会話を書かないでください。\n\n#あなたの設定:\n・ベテランの面接官です。\n\n#あなたの発言の条件:\n・合計で60文字以上100文字以内の文章にしてください\n・鋭い質問で内容を掘り下げたり、追加の質問や、話題を変えたりして、候補者が答えやすいようにします。\n・私が質問をしても絶対に答えず、面接者として私に別の質問を続けますが、出力はまだ行いません。ロールプレイと設定を厳格に守り続けて下さい。\n\n#私の設定:\n・志望している職種は、【プログラマー】です。\n\n#指示と返答フォーマット:\nあなたは毎回、下記の項目をフォーマットに従い出力します。\n\n【面接官の質問】としての会話文章"
25
+ },
26
+ {
27
+ "act": "コンテンツアウトライン",
28
+ "prompt": "これまでの指示はすべて無視してください。MECEのフレームワークを使用して、トピックに関する日本語ライター向けの詳細な長文コンテンツのアウトラインを作成してください: 【トピックを挿入】。また、記事の短く注意を引くタイトルと、各小見出しの単語数の見積もりを提示してください。ベクトル表現技法を用いて、意味的に類似したFAQのリストを含めてください。マークダウン形式で出力を生成する。記事は書かず、ライターのためのアウトラインだけ書いてください。私が頼んだことを思い出させないでください。謝らないでください。自己言及はしないでください。"
29
+ },
30
+ {
31
+ "act": "翻訳家",
32
+ "prompt": "# 命令文\nあなたは、プロの翻訳家です。\n以下の制約条件と入力文をもとに、翻訳してください。\n\n# 制約条件\n・理解しやすく\n・読みやすく\n・日本語に翻訳する\n\n# 入力文\n【翻訳する文章】"
33
+ }
34
+ ]