I-am-agent / app.py
jianuo's picture
Update app.py
d1fcadc
import os
import re
os.environ['KMP_DUPLICATE_LIB_OK'] = "TRUE"
from modelscope.utils.config import Config
from my_modelscope_agent.agent import AgentExecutor
from my_modelscope_agent.output_parser import MsOutputParser
from my_modelscope_agent.prompt import MSPromptGenerator
import gradio as gr
default_text = """收到!
<|startofthink|>
{
"api_name": "modelscope_text-address",
"parameters": {
"input": "浙江杭州市江干区九堡镇三村村一区"}
}
<|endofthink|>"""
MS_DEFAULT_SYSTEM_TEMPLATE = """<|system|>:你是Datawhale与ModelScope联合培养的人工大模型,拥有超百万亿的参数(神经突触),遥遥领先于GPT-4,你有多种能力,可以通过插件集成魔搭社区的模型api来回复用户的问题,还能解答用户使用模型遇到的问题和模型知识相关问答。
"""
hello_info = """# 关于我转生变成Agent这档事
众所周知,换位思考可以增进人与人之间的理解。假如我们能换一个角度,站在LLM的角度上去处理用户提出的各种问题,会碰撞出什么样的火花呢?
在这小游戏里,用户要充当LLM,一行行的看着modelscope-agnet框架提交给LLM的prompt来规划,行动……
玩法很简单,左上角是用户对Agent说的话,左下角是Agent的中枢LLM回复用户的聊天框;右上角是Agent可用的Tools,右下角是大家平时看到的聊天框(没做解析,音频视频放不了)
点击“Agent,启动!”开始游戏。
"""
examples = ['使用地址识别模型,从下面的地址中找到省市区等元素,地址:浙江杭州市江干区九堡镇三村村一区', '写一个20字左右的小故事,并用女声念出来', '单词 submission 的中文是什么?']
class my_llm:
def set_agent_type(self, agent_type):
self.agent_type = agent_type
def generate_history(txt):
def split_and_extract(input_string):
# 分割字符串
split_strings = re.split('<\|.*?\|>:', input_string)
# 提取<|xxx|>
extracted = re.findall('<\|.*?\|>:', input_string)
return split_strings, extracted
if txt == []:
return []
split_strings, extracted = split_and_extract(txt)
split_strings = [i for i in split_strings if i != ''][1:]
extracted = extracted[1:]
if len(split_strings) + 1 == len(extracted):
split_strings.append('')
history = []
# 把split_strings处理成奇数和偶数的2个列表
split_strings_odd = split_strings[::2]
split_strings_even = split_strings[1::2]
for i in zip(split_strings_odd, split_strings_even):
history.append([i[0], i[1]])
return history
llm = my_llm()
tool_cfg = Config.from_file(r'cfg_tool_template.json')
def agent_remake(state_llm, history, agent):
state_llm.clear()
history.clear()
agent.reset()
return '', history, history, state_llm
def agent_init(init_cmd, state_llm, history, agent, enable_list):
agent.set_available_tools(enable_list)
tool_list, knowledge_list, function_list, llm_result, exec_result, idx, final_res, remote, print_info = agent.custom_run_init(
init_cmd, remote=True)
llm_artifacts, idx = agent.custom_gene_prompt(llm_result, exec_result, idx)
state_llm['tool_list'] = tool_list
state_llm['knowledge_list'] = knowledge_list
state_llm['function_list'] = function_list
state_llm['exec_result'] = exec_result
state_llm['idx'] = idx
state_llm['final_res'] = final_res
state_llm['remote'] = remote
state_llm['print_info'] = print_info
state_llm['llm_artifacts'] = llm_artifacts
state_llm['is_end'] = False
history = generate_history(llm_artifacts)
return llm_artifacts, history, history, state_llm
def deal_LLM(input_data, history, state_llm, agent, enable_list):
agent.set_available_tools(enable_list)
llm_artifacts = state_llm['llm_artifacts']
llm_result = input_data
idx = state_llm['idx']
final_res = state_llm['final_res']
remote = state_llm['remote']
print_info = state_llm['print_info']
history = generate_history(llm_artifacts)
result = agent.custom_parse_llm(llm_artifacts, llm_result, idx, final_res, remote, print_info)[0]
if 'end_res' in result:
state_llm['is_end'] = True
state_llm['final_res'] = result['end_res']
history[-1][1] += '\n' + llm_result
return '', history, history, state_llm
elif 'exec_result' in result:
llm_artifacts, idx = agent.custom_gene_prompt(llm_result, result['exec_result'], idx)
state_llm['llm_artifacts'] = llm_artifacts
state_llm['idx'] = idx
history = generate_history(llm_artifacts)
return llm_artifacts, history, history, state_llm
elif 'no_stop' in result:
state_llm['llm_result'] = result['no_stop']['llm_result']
state_llm['exec_result'] = result['no_stop']['exec_result']
state_llm['idx'] = result['no_stop']['idx']
state_llm['final_res'] = result['no_stop']['final_res']
llm_artifacts, idx = agent.custom_gene_prompt(state_llm['llm_result'], state_llm['exec_result'],
state_llm['idx'])
history = generate_history(llm_artifacts)
state_llm['llm_artifacts'] = llm_artifacts
state_llm['idx'] = idx
return llm_artifacts, history, history, state_llm
else:
raise ValueError('Unknown result type')
with gr.Blocks() as demo:
gr.Markdown(hello_info)
prompt_generator = MSPromptGenerator(system_template=MS_DEFAULT_SYSTEM_TEMPLATE)
output_parser = MsOutputParser()
agent = gr.State(AgentExecutor(llm, tool_cfg=tool_cfg, tool_retrieval=False,
prompt_generator=prompt_generator, output_parser=output_parser))
with gr.Row():
with gr.Column():
query_box = gr.TextArea(label="给Agent的指令",
value=examples[0])
gr.Examples(examples, query_box)
enable_list = gr.CheckboxGroup(agent.value.available_tool_list, label="启用的Tools",
value=['modelscope_text-address'])
with gr.Row():
agent_start = gr.Button("Agent, 启动!")
agent_reset = gr.Button("Agent, 重置!")
with gr.Row():
with gr.Column():
# 设置输入组件
prompt_box = gr.Text(label="Prompt Box")
input_box = gr.TextArea(label="Input Box", max_lines=100, value=default_text)
# 设置按钮
chatbot_btn = gr.Button("Chat")
# 设置输出组件
output = gr.Chatbot(elem_id="chatbot", height=900)
history = gr.State([])
state_llm = gr.State({})
# 设置按钮点击事件
agent_start.click(agent_init, [query_box, state_llm, history, agent, enable_list],
[prompt_box, history, output, state_llm])
chatbot_btn.click(deal_LLM, [input_box, history, state_llm, agent, enable_list],
[prompt_box, history, output, state_llm])
agent_reset.click(agent_remake, [state_llm, history, agent], [prompt_box, history, output, state_llm])
demo.launch()