qingxu99 commited on
Commit
b3e5cdb
1 Parent(s): 6595ab8

加一些注释

Browse files
Files changed (1) hide show
  1. request_llm/bridge_chatglm.py +13 -2
request_llm/bridge_chatglm.py CHANGED
@@ -32,6 +32,7 @@ class GetGLMHandle(Process):
32
  return self.chatglm_model is not None
33
 
34
  def run(self):
 
35
  # 第一次运行,加载参数
36
  retry = 0
37
  while True:
@@ -53,17 +54,24 @@ class GetGLMHandle(Process):
53
  self.child.send('[Local Message] Call ChatGLM fail 不能正常加载ChatGLM的参数。')
54
  raise RuntimeError("不能正常加载ChatGLM的参数!")
55
 
56
- # 进入任务等待状态
57
  while True:
 
58
  kwargs = self.child.recv()
 
59
  try:
60
  for response, history in self.chatglm_model.stream_chat(self.chatglm_tokenizer, **kwargs):
61
  self.child.send(response)
 
 
 
 
62
  except:
63
  self.child.send('[Local Message] Call ChatGLM fail.')
 
64
  self.child.send('[Finish]')
65
 
66
  def stream_chat(self, **kwargs):
 
67
  self.parent.send(kwargs)
68
  while True:
69
  res = self.parent.recv()
@@ -130,14 +138,17 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
130
  if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
131
  inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
132
 
 
133
  history_feedin = []
134
  history_feedin.append(["What can I do?", system_prompt] )
135
  for i in range(len(history)//2):
136
  history_feedin.append([history[2*i], history[2*i+1]] )
137
 
 
138
  for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
139
  chatbot[-1] = (inputs, response)
140
  yield from update_ui(chatbot=chatbot, history=history)
141
 
 
142
  history.extend([inputs, response])
143
- yield from update_ui(chatbot=chatbot, history=history)
 
32
  return self.chatglm_model is not None
33
 
34
  def run(self):
35
+ # 子进程执行
36
  # 第一次运行,加载参数
37
  retry = 0
38
  while True:
 
54
  self.child.send('[Local Message] Call ChatGLM fail 不能正常加载ChatGLM的参数。')
55
  raise RuntimeError("不能正常加载ChatGLM的参数!")
56
 
 
57
  while True:
58
+ # 进入任务等待状态
59
  kwargs = self.child.recv()
60
+ # 收到消息,开始请求
61
  try:
62
  for response, history in self.chatglm_model.stream_chat(self.chatglm_tokenizer, **kwargs):
63
  self.child.send(response)
64
+ # # 中途接收可能的终止指令(如果有的话)
65
+ # if self.child.poll():
66
+ # command = self.child.recv()
67
+ # if command == '[Terminate]': break
68
  except:
69
  self.child.send('[Local Message] Call ChatGLM fail.')
70
+ # 请求处理结束,开始下一个循环
71
  self.child.send('[Finish]')
72
 
73
  def stream_chat(self, **kwargs):
74
+ # 主进程执行
75
  self.parent.send(kwargs)
76
  while True:
77
  res = self.parent.recv()
 
138
  if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
139
  inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
140
 
141
+ # 处理历史信息
142
  history_feedin = []
143
  history_feedin.append(["What can I do?", system_prompt] )
144
  for i in range(len(history)//2):
145
  history_feedin.append([history[2*i], history[2*i+1]] )
146
 
147
+ # 开始接收chatglm的回复
148
  for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
149
  chatbot[-1] = (inputs, response)
150
  yield from update_ui(chatbot=chatbot, history=history)
151
 
152
+ # 总结输出
153
  history.extend([inputs, response])
154
+ yield from update_ui(chatbot=chatbot, history=history)