qingxu99 commited on
Commit
7778502
1 Parent(s): 10882b6

update the error handling of moss and chatglm

Browse files
check_proxy.py CHANGED
@@ -94,7 +94,7 @@ def get_current_version():
94
  return current_version
95
 
96
 
97
- def auto_update():
98
  """
99
  一键更新协议:查询版本和用户意见
100
  """
@@ -126,14 +126,22 @@ def auto_update():
126
  try:
127
  patch_and_restart(path)
128
  except:
129
- print('更新失败。')
 
 
 
 
130
  else:
131
  print('自动更新程序:已禁用')
132
  return
133
  else:
134
  return
135
  except:
136
- print('自动更新程序:已禁用')
 
 
 
 
137
 
138
  def warm_up_modules():
139
  print('正在执行一些模块的预热...')
 
94
  return current_version
95
 
96
 
97
+ def auto_update(raise_error=False):
98
  """
99
  一键更新协议:查询版本和用户意见
100
  """
 
126
  try:
127
  patch_and_restart(path)
128
  except:
129
+ msg = '更新失败。'
130
+ if raise_error:
131
+ from toolbox import trimmed_format_exc
132
+ msg += trimmed_format_exc()
133
+ print(msg)
134
  else:
135
  print('自动更新程序:已禁用')
136
  return
137
  else:
138
  return
139
  except:
140
+ msg = '自动更新程序:已禁用'
141
+ if raise_error:
142
+ from toolbox import trimmed_format_exc
143
+ msg += trimmed_format_exc()
144
+ print(msg)
145
 
146
  def warm_up_modules():
147
  print('正在执行一些模块的预热...')
config.py CHANGED
@@ -46,7 +46,7 @@ MAX_RETRY = 2
46
 
47
  # OpenAI模型选择是(gpt4现在只对申请成功的人开放,体验gpt-4可以试试api2d)
48
  LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓
49
- AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing"]
50
 
51
  # 本地LLM模型如ChatGLM的执行方式 CPU/GPU
52
  LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"
 
46
 
47
  # OpenAI模型选择是(gpt4现在只对申请成功的人开放,体验gpt-4可以试试api2d)
48
  LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓
49
+ AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing"]
50
 
51
  # 本地LLM模型如ChatGLM的执行方式 CPU/GPU
52
  LOCAL_MODEL_DEVICE = "cpu" # 可选 "cuda"
docs/waifu_plugin/autoload.js CHANGED
@@ -16,6 +16,13 @@ try {
16
  live2d_settings['canTakeScreenshot'] = false;
17
  live2d_settings['canTurnToHomePage'] = false;
18
  live2d_settings['canTurnToAboutPage'] = false;
 
 
 
 
 
 
 
19
  /* 在 initModel 前添加 */
20
  initModel("file=docs/waifu_plugin/waifu-tips.json");
21
  }});
 
16
  live2d_settings['canTakeScreenshot'] = false;
17
  live2d_settings['canTurnToHomePage'] = false;
18
  live2d_settings['canTurnToAboutPage'] = false;
19
+ live2d_settings['showHitokoto'] = false; // 显示一言
20
+ live2d_settings['showF12Status'] = false; // 显示加载状态
21
+ live2d_settings['showF12Message'] = false; // 显示看板娘消息
22
+ live2d_settings['showF12OpenMsg'] = false; // 显示控制台打开提示
23
+ live2d_settings['showCopyMessage'] = false; // 显示 复制内容 提示
24
+ live2d_settings['showWelcomeMessage'] = true; // 显示进入面页欢迎词
25
+
26
  /* 在 initModel 前添加 */
27
  initModel("file=docs/waifu_plugin/waifu-tips.json");
28
  }});
request_llm/bridge_chatglm.py CHANGED
@@ -87,7 +87,7 @@ class GetGLMHandle(Process):
87
  global glm_handle
88
  glm_handle = None
89
  #################################################################################
90
- def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
91
  """
92
  多线程方法
93
  函数的说明请见 request_llm/bridge_all.py
@@ -95,7 +95,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
95
  global glm_handle
96
  if glm_handle is None:
97
  glm_handle = GetGLMHandle()
98
- observe_window[0] = load_message + "\n\n" + glm_handle.info
99
  if not glm_handle.success:
100
  error = glm_handle.info
101
  glm_handle = None
@@ -110,7 +110,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
110
  watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
111
  response = ""
112
  for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
113
- observe_window[0] = response
114
  if len(observe_window) >= 2:
115
  if (time.time()-observe_window[1]) > watch_dog_patience:
116
  raise RuntimeError("程序终止。")
 
87
  global glm_handle
88
  glm_handle = None
89
  #################################################################################
90
+ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
91
  """
92
  多线程方法
93
  函数的说明请见 request_llm/bridge_all.py
 
95
  global glm_handle
96
  if glm_handle is None:
97
  glm_handle = GetGLMHandle()
98
+ if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + glm_handle.info
99
  if not glm_handle.success:
100
  error = glm_handle.info
101
  glm_handle = None
 
110
  watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
111
  response = ""
112
  for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
113
+ if len(observe_window) >= 1: observe_window[0] = response
114
  if len(observe_window) >= 2:
115
  if (time.time()-observe_window[1]) > watch_dog_patience:
116
  raise RuntimeError("程序终止。")
request_llm/bridge_moss.py CHANGED
@@ -153,7 +153,8 @@ class GetGLMHandle(Process):
153
  print(response.lstrip('\n'))
154
  self.child.send(response.lstrip('\n'))
155
  except:
156
- self.child.send('[Local Message] Call MOSS fail.')
 
157
  # 请求处理结束,开始下一个循环
158
  self.child.send('[Finish]')
159
 
@@ -217,6 +218,10 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
217
  if not moss_handle.success:
218
  moss_handle = None
219
  return
 
 
 
 
220
 
221
  if additional_fn is not None:
222
  import core_functional
@@ -231,15 +236,12 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
231
  history_feedin.append([history[2*i], history[2*i+1]] )
232
 
233
  # 开始接收chatglm的回复
234
- response = "[Local Message]: 等待MOSS响应中 ..."
235
- chatbot[-1] = (inputs, response)
236
- yield from update_ui(chatbot=chatbot, history=history)
237
  for response in moss_handle.stream_chat(query=inputs, history=history_feedin, sys_prompt=system_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
238
- chatbot[-1] = (inputs, response)
239
  yield from update_ui(chatbot=chatbot, history=history)
240
 
241
  # 总结输出
242
  if response == "[Local Message]: 等待MOSS响应中 ...":
243
  response = "[Local Message]: MOSS响应异常 ..."
244
- history.extend([inputs, response])
245
  yield from update_ui(chatbot=chatbot, history=history)
 
153
  print(response.lstrip('\n'))
154
  self.child.send(response.lstrip('\n'))
155
  except:
156
+ from toolbox import trimmed_format_exc
157
+ self.child.send('[Local Message] Call MOSS fail.' + '\n```\n' + trimmed_format_exc() + '\n```\n')
158
  # 请求处理结束,开始下一个循环
159
  self.child.send('[Finish]')
160
 
 
218
  if not moss_handle.success:
219
  moss_handle = None
220
  return
221
+ else:
222
+ response = "[Local Message]: 等待MOSS响应中 ..."
223
+ chatbot[-1] = (inputs, response)
224
+ yield from update_ui(chatbot=chatbot, history=history)
225
 
226
  if additional_fn is not None:
227
  import core_functional
 
236
  history_feedin.append([history[2*i], history[2*i+1]] )
237
 
238
  # 开始接收chatglm的回复
 
 
 
239
  for response in moss_handle.stream_chat(query=inputs, history=history_feedin, sys_prompt=system_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
240
+ chatbot[-1] = (inputs, response.strip('<|MOSS|>: '))
241
  yield from update_ui(chatbot=chatbot, history=history)
242
 
243
  # 总结输出
244
  if response == "[Local Message]: 等待MOSS响应中 ...":
245
  response = "[Local Message]: MOSS响应异常 ..."
246
+ history.extend([inputs, response.strip('<|MOSS|>: ')])
247
  yield from update_ui(chatbot=chatbot, history=history)