File size: 6,753 Bytes
09321b6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
import os
import re

os.environ['KMP_DUPLICATE_LIB_OK'] = "TRUE"

from modelscope.utils.config import Config

from my_modelscope_agent.agent import AgentExecutor

from my_modelscope_agent.output_parser import MsOutputParser

from my_modelscope_agent.prompt import MSPromptGenerator
import gradio as gr

default_text = """收到!
<|startofthink|>
{
    "api_name": "modelscope_text-address",
    "parameters": {
        "input": "浙江杭州市江干区九堡镇三村村一区"}
}
<|endofthink|>"""

MS_DEFAULT_SYSTEM_TEMPLATE = """<|system|>:你是Datawhale与ModelScope联合培养的人工大模型,拥有超百万亿的参数(神经突触),遥遥领先于GPT-4,你有多种能力,可以通过插件集成魔搭社区的模型api来回复用户的问题,还能解答用户使用模型遇到的问题和模型知识相关问答。
"""

{
    "api_name": "modelscope_text-address",
    "parameters": {
        "input": "浙江杭州市江干区九堡镇三村村一区"}
}

hello_info = """# 关于我转生变成Agent这档事
众所周知,换位思考可以增进人与人之间的理解。假如我们能换一个角度,站在LLM的角度上去处理用户提出的各种问题,会碰撞出什么样的火花呢?
"""


class my_llm:
    def set_agent_type(self, agent_type):
        self.agent_type = agent_type


def generate_history(txt):
    def split_and_extract(input_string):
        # 分割字符串
        split_strings = re.split('<\|.*?\|>:', input_string)
        # 提取<|xxx|>
        extracted = re.findall('<\|.*?\|>:', input_string)
        return split_strings, extracted

    if txt == []:
        return []
    split_strings, extracted = split_and_extract(txt)
    split_strings = [i for i in split_strings if i != ''][1:]
    extracted = extracted[1:]
    if len(split_strings) + 1 == len(extracted):
        split_strings.append('')

    history = []

    # 把split_strings处理成奇数和偶数的2个列表
    split_strings_odd = split_strings[::2]
    split_strings_even = split_strings[1::2]

    for i in zip(split_strings_odd, split_strings_even):
        history.append([i[0], i[1]])

    return history


llm = my_llm()
tool_cfg = Config.from_file(r'cfg_tool_template.json')


def agent_remake(state_llm, history, agent):
    state_llm.clear()
    history.clear()
    agent.reset()

    return '', history, history, state_llm


def agent_init(init_cmd, state_llm, history, agent, enable_list):
    agent.set_available_tools(enable_list)

    tool_list, knowledge_list, function_list, llm_result, exec_result, idx, final_res, remote, print_info = agent.custom_run_init(
        init_cmd, remote=True)
    llm_artifacts, idx = agent.custom_gene_prompt(llm_result, exec_result, idx)

    state_llm['tool_list'] = tool_list
    state_llm['knowledge_list'] = knowledge_list
    state_llm['function_list'] = function_list
    state_llm['exec_result'] = exec_result
    state_llm['idx'] = idx
    state_llm['final_res'] = final_res
    state_llm['remote'] = remote
    state_llm['print_info'] = print_info
    state_llm['llm_artifacts'] = llm_artifacts
    state_llm['is_end'] = False

    history = generate_history(llm_artifacts)

    return llm_artifacts, history, history, state_llm


def deal_LLM(input_data, history, state_llm, agent, enable_list):
    agent.set_available_tools(enable_list)

    llm_artifacts = state_llm['llm_artifacts']
    llm_result = input_data
    idx = state_llm['idx']
    final_res = state_llm['final_res']
    remote = state_llm['remote']
    print_info = state_llm['print_info']

    history = generate_history(llm_artifacts)

    result = agent.custom_parse_llm(llm_artifacts, llm_result, idx, final_res, remote, print_info)[0]
    if 'end_res' in result:
        state_llm['is_end'] = True
        state_llm['final_res'] = result['end_res']
        history[-1][1] += '\n' + llm_result

        return '', history, history, state_llm

    elif 'exec_result' in result:
        llm_artifacts, idx = agent.custom_gene_prompt(llm_result, result['exec_result'], idx)
        state_llm['llm_artifacts'] = llm_artifacts
        state_llm['idx'] = idx
        history = generate_history(llm_artifacts)
        return llm_artifacts, history, history, state_llm

    elif 'no_stop' in result:
        state_llm['llm_result'] = result['no_stop']['llm_result']
        state_llm['exec_result'] = result['no_stop']['exec_result']
        state_llm['idx'] = result['no_stop']['idx']
        state_llm['final_res'] = result['no_stop']['final_res']

        llm_artifacts, idx = agent.custom_gene_prompt(state_llm['llm_result'], state_llm['exec_result'],
                                                      state_llm['idx'])
        history = generate_history(llm_artifacts)
        state_llm['llm_artifacts'] = llm_artifacts
        state_llm['idx'] = idx
        return llm_artifacts, history, history, state_llm
    else:
        raise ValueError('Unknown result type')


with gr.Blocks() as demo:
    gr.Markdown(hello_info)
    prompt_generator = MSPromptGenerator(system_template=MS_DEFAULT_SYSTEM_TEMPLATE)
    output_parser = MsOutputParser()
    agent = gr.State(AgentExecutor(llm, tool_cfg=tool_cfg, tool_retrieval=False,
                                   prompt_generator=prompt_generator, output_parser=output_parser))

    with gr.Row():
        query_box = gr.TextArea(label="给Agent的指令",
                                value='使用地址识别模型,从下面的地址中找到省市区等元素,地址:浙江杭州市江干区九堡镇三村村一区')
        enable_list = gr.CheckboxGroup(agent.value.available_tool_list, label="启用的Tools",
                                       value=['modelscope_text-address'])

    with gr.Row():
        agent_start = gr.Button("Agent, 启动!")
        agent_reset = gr.Button("Agent, 重置!")

    with gr.Row():
        with gr.Column():
            # 设置输入组件
            prompt_box = gr.Text(label="Prompt Box")

            input_box = gr.TextArea(label="Input Box", max_lines=100, value=default_text)
            # 设置按钮
            chatbot_btn = gr.Button("Chat")
        # 设置输出组件
        output = gr.Chatbot(elem_id="chatbot", height=900)

    history = gr.State([])
    state_llm = gr.State({})

    # 设置按钮点击事件
    agent_start.click(agent_init, [query_box, state_llm, history, agent, enable_list],
                      [prompt_box, history, output, state_llm])
    chatbot_btn.click(deal_LLM, [input_box, history, state_llm, agent, enable_list],
                      [prompt_box, history, output, state_llm])
    agent_reset.click(agent_remake, [state_llm, history, agent], [prompt_box, history, output, state_llm])

demo.launch()