starsaround
commited on
Commit
·
993584d
1
Parent(s):
fd506d6
Update app.py
Browse filesupdate the chat mode with memory.
app.py
CHANGED
@@ -18,6 +18,19 @@ from g4f.Provider import (
|
|
18 |
GetGpt
|
19 |
)
|
20 |
import os
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
|
22 |
provider_dict = {
|
23 |
'Ails': Ails,
|
@@ -37,92 +50,113 @@ provider_dict = {
|
|
37 |
'GetGpt': GetGpt
|
38 |
}
|
39 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
with gr.Blocks() as demo:
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
50 |
|
51 |
-
|
|
|
|
|
|
|
52 |
|
53 |
-
|
54 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
```
|
66 |
-
历史对话中的关键信息:
|
67 |
-
```
|
68 |
-
{}
|
69 |
-
```
|
70 |
-
用户最新的话:
|
71 |
-
```
|
72 |
-
{}
|
73 |
-
```
|
74 |
-
请回答:
|
75 |
-
"""
|
76 |
-
bot_msg = g4f.ChatCompletion.create(model=model_name,
|
77 |
-
provider=provider_dict[provider_name],
|
78 |
-
messages=[{"role": "user",
|
79 |
-
"content": prompt.format(system_msg,
|
80 |
-
memory_msg,
|
81 |
-
history[-1][0])}],
|
82 |
-
stream=True)
|
83 |
-
for c in bot_msg:
|
84 |
-
history[-1][1] += c
|
85 |
-
yield history
|
86 |
|
87 |
-
|
88 |
-
|
89 |
-
return gr.update(value=memory_msg, interactive=False)
|
90 |
-
prompt = """
|
91 |
-
请你记录对话内容,尤其是用户提供的各种关键信息。已知的对话内容总结如下:
|
92 |
-
```
|
93 |
-
{}
|
94 |
-
```
|
95 |
-
最新的对话内容如下:
|
96 |
-
```
|
97 |
-
用户:{}
|
98 |
-
AI:{}
|
99 |
-
```
|
100 |
-
总结格式如下:
|
101 |
-
```
|
102 |
-
关键信息总结:XXX
|
103 |
-
```
|
104 |
-
请对已知对话和最新对话的关键信息进行总结,以应对后续的聊天:
|
105 |
-
"""
|
106 |
-
memory = g4f.ChatCompletion.create(model=model_name,
|
107 |
-
provider=provider_dict[provider_name],
|
108 |
-
messages=[
|
109 |
-
{
|
110 |
-
'role':'user',
|
111 |
-
'content': prompt.format(memory_msg,
|
112 |
-
history[-1][0],
|
113 |
-
history[-1][1])
|
114 |
-
}
|
115 |
-
])
|
116 |
-
return gr.update(value=memory, interactive=False)
|
117 |
-
|
118 |
-
response = msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
|
119 |
-
bot, [chatbot, model_name, provider_name, system_msg, memory_msg], chatbot
|
120 |
-
)
|
121 |
-
response.then(lambda: gr.update(interactive=True), None, [msg], queue=False)
|
122 |
-
remember.click(
|
123 |
-
update_memory, [memory_msg, chatbot, model_name, provider_name], memory_msg
|
124 |
-
)
|
125 |
-
clear.click(lambda: (None, None), None, [chatbot, memory_msg], queue=False)
|
126 |
|
127 |
demo.title = "AI Chat"
|
128 |
demo.queue()
|
|
|
18 |
GetGpt
|
19 |
)
|
20 |
import os
|
21 |
+
import json
|
22 |
+
import pandas as pd
|
23 |
+
|
24 |
+
from models_for_langchain.model import CustomLLM
|
25 |
+
from langchain.memory import ConversationBufferWindowMemory, ConversationTokenBufferMemory
|
26 |
+
from langchain import LLMChain, PromptTemplate
|
27 |
+
from langchain.prompts import (
|
28 |
+
ChatPromptTemplate,
|
29 |
+
PromptTemplate,
|
30 |
+
SystemMessagePromptTemplate,
|
31 |
+
AIMessagePromptTemplate,
|
32 |
+
HumanMessagePromptTemplate,
|
33 |
+
)
|
34 |
|
35 |
provider_dict = {
|
36 |
'Ails': Ails,
|
|
|
50 |
'GetGpt': GetGpt
|
51 |
}
|
52 |
|
53 |
+
prompt_set_list = {}
|
54 |
+
for prompt_file in os.listdir("prompt_set"):
|
55 |
+
key = prompt_file
|
56 |
+
if '.csv' in key:
|
57 |
+
df = pd.read_csv("prompt_set/" + prompt_file)
|
58 |
+
prompt_dict = dict(zip(df['act'], df['prompt']))
|
59 |
+
else:
|
60 |
+
with open("prompt_set/" + prompt_file, encoding='utf-8') as f:
|
61 |
+
ds = json.load(f)
|
62 |
+
prompt_dict = {item["act"]: item["prompt"] for item in ds}
|
63 |
+
prompt_set_list[key] = prompt_dict
|
64 |
+
|
65 |
with gr.Blocks() as demo:
|
66 |
+
llm = CustomLLM()
|
67 |
+
|
68 |
+
template = """
|
69 |
+
Chat with human based on following instructions:
|
70 |
+
```
|
71 |
+
{system_instruction}
|
72 |
+
```
|
73 |
+
The following is a conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
|
74 |
+
{{chat_history}}
|
75 |
+
Human: {{human_input}}
|
76 |
+
Chatbot:"""
|
77 |
+
|
78 |
+
memory = ConversationBufferWindowMemory(k=10, memory_key="chat_history")
|
79 |
+
|
80 |
+
chatbot = gr.Chatbot([[None, None]], label='AI')
|
81 |
+
msg = gr.Textbox(value="", label='请输入:')
|
82 |
+
with gr.Row():
|
83 |
+
clear = gr.Button("清空对话", scale=2)
|
84 |
+
chat_mode = gr.Checkbox(value=True, label='聊天模式', interactive=True, scale=1)
|
85 |
+
system_msg = gr.Textbox(value="你是一名助手,可以解答问题。", label='系统提示')
|
86 |
+
with gr.Row():
|
87 |
+
default_prompt_set = "1 中文提示词.json"
|
88 |
+
prompt_set_name = gr.Dropdown(prompt_set_list.keys(), value=default_prompt_set, label='提示词集合')
|
89 |
+
prompt_name = gr.Dropdown(prompt_set_list[default_prompt_set].keys(), label='提示词')
|
90 |
+
with gr.Row():
|
91 |
+
model_name = gr.Dropdown(['gpt-3.5-turbo', 'gpt-4'], value='gpt-3.5-turbo', label='模型')
|
92 |
+
provider_name = gr.Dropdown(provider_dict.keys(), value='GetGpt', label='提供者')
|
93 |
+
|
94 |
+
def change_prompt_set(prompt_set_name):
|
95 |
+
return gr.Dropdown.update(choices=list(prompt_set_list[prompt_set_name].keys()))
|
96 |
+
|
97 |
+
def change_prompt(prompt_set_name, prompt_name):
|
98 |
+
return gr.update(value=prompt_set_list[prompt_set_name][prompt_name])
|
99 |
+
|
100 |
+
def user(user_message, history):
|
101 |
+
return gr.update(value="", interactive=False), history + [[user_message, None]]
|
102 |
|
103 |
+
def bot(history, model_name, provider_name, system_msg, chat_mode):
|
104 |
+
history[-1][1] = ''
|
105 |
+
if len(system_msg)>3000:
|
106 |
+
system_msg = system_msg[:2000] + system_msg[-1000:]
|
107 |
|
108 |
+
if chat_mode:
|
109 |
+
global template, memory
|
110 |
+
llm.model_name = model_name
|
111 |
+
llm.provider_name = provider_name
|
112 |
+
prompt = PromptTemplate(
|
113 |
+
input_variables=["chat_history", "human_input"], template=template.format(system_instruction=system_msg)
|
114 |
+
)
|
115 |
+
llm_chain = LLMChain(
|
116 |
+
llm=llm,
|
117 |
+
prompt=prompt,
|
118 |
+
verbose=False,
|
119 |
+
memory=memory,
|
120 |
+
)
|
121 |
+
bot_msg = llm_chain.run(history[-1][0])
|
122 |
+
for c in bot_msg:
|
123 |
+
history[-1][1] += c
|
124 |
+
yield history
|
125 |
+
else:
|
126 |
+
prompt = """
|
127 |
+
请你仔细阅读以下提示,然后针对用户的话进行回答。
|
128 |
+
提示:
|
129 |
+
```
|
130 |
+
{}
|
131 |
+
```
|
132 |
+
用户最新的话:
|
133 |
+
```
|
134 |
+
{}
|
135 |
+
```
|
136 |
+
请回答:
|
137 |
+
"""
|
138 |
+
bot_msg = g4f.ChatCompletion.create(model=model_name,
|
139 |
+
provider=provider_dict[provider_name],
|
140 |
+
messages=[{"role": "user",
|
141 |
+
"content": prompt.format(system_msg,
|
142 |
+
history[-1][0])}],
|
143 |
+
stream=True)
|
144 |
+
for c in bot_msg:
|
145 |
+
history[-1][1] += c
|
146 |
+
yield history
|
147 |
|
148 |
+
def empty_chat():
|
149 |
+
global memory
|
150 |
+
memory = ConversationBufferWindowMemory(k=10, memory_key="chat_history")
|
151 |
+
return None
|
152 |
+
response = msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
|
153 |
+
bot, [chatbot, model_name, provider_name, system_msg, chat_mode], chatbot
|
154 |
+
)
|
155 |
+
prompt_set_name.select(change_prompt_set, prompt_set_name, prompt_name)
|
156 |
+
prompt_name.select(change_prompt, [prompt_set_name, prompt_name], system_msg)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
157 |
|
158 |
+
response.then(lambda: gr.update(interactive=True), None, [msg], queue=False)
|
159 |
+
clear.click(empty_chat, None, [chatbot], queue=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
160 |
|
161 |
demo.title = "AI Chat"
|
162 |
demo.queue()
|