3v324v23 commited on
Commit
ce1fc3a
1 Parent(s): 9481405

修改chatglm不记忆上下文的bug

Browse files
Files changed (1) hide show
  1. request_llm/bridge_chatglm.py +6 -3
request_llm/bridge_chatglm.py CHANGED
@@ -92,8 +92,8 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
92
 
93
  # chatglm 没有 sys_prompt 接口,因此把prompt加入 history
94
  history_feedin = []
 
95
  for i in range(len(history)//2):
96
- history_feedin.append(["What can I do?", sys_prompt] )
97
  history_feedin.append([history[2*i], history[2*i+1]] )
98
 
99
  watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
@@ -131,10 +131,13 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
131
  inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
132
 
133
  history_feedin = []
 
134
  for i in range(len(history)//2):
135
- history_feedin.append(["What can I do?", system_prompt] )
136
  history_feedin.append([history[2*i], history[2*i+1]] )
137
 
138
  for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
139
  chatbot[-1] = (inputs, response)
140
- yield from update_ui(chatbot=chatbot, history=history)
 
 
 
 
92
 
93
  # chatglm 没有 sys_prompt 接口,因此把prompt加入 history
94
  history_feedin = []
95
+ history_feedin.append(["What can I do?", sys_prompt])
96
  for i in range(len(history)//2):
 
97
  history_feedin.append([history[2*i], history[2*i+1]] )
98
 
99
  watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
 
131
  inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
132
 
133
  history_feedin = []
134
+ history_feedin.append(["What can I do?", system_prompt] )
135
  for i in range(len(history)//2):
 
136
  history_feedin.append([history[2*i], history[2*i+1]] )
137
 
138
  for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
139
  chatbot[-1] = (inputs, response)
140
+ yield from update_ui(chatbot=chatbot, history=history)
141
+
142
+ history.extend([inputs, response])
143
+ yield from update_ui(chatbot=chatbot, history=history)