Spaces:
Runtime error
Runtime error
File size: 6,019 Bytes
96e2d26 900076b 96e2d26 2ed2501 96e2d26 492400e 96e2d26 f49b24a 4dc1c66 f49b24a 900076b 06b90de 9a0b6c3 4dc1c66 f49b24a 96e2d26 f49b24a 96e2d26 1aae827 96e2d26 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 |
import g4f
import gradio as gr
from g4f.Provider import (
Ails,
You,
Bing,
Yqcloud,
Theb,
Aichat,
Bard,
Vercel,
Forefront,
Lockchat,
Liaobots,
H2o,
ChatgptLogin,
DeepAi,
GetGpt
)
import os
import json
import pandas as pd
from models_for_langchain.model import CustomLLM
from langchain.memory import ConversationBufferWindowMemory, ConversationTokenBufferMemory
from langchain import LLMChain, PromptTemplate
from langchain.prompts import (
ChatPromptTemplate,
PromptTemplate,
SystemMessagePromptTemplate,
AIMessagePromptTemplate,
HumanMessagePromptTemplate,
)
provider_dict = {
'Ails': Ails,
'You': You,
'Bing': Bing,
'Yqcloud': Yqcloud,
'Theb': Theb,
'Aichat': Aichat,
'Bard': Bard,
'Vercel': Vercel,
'Forefront': Forefront,
'Lockchat': Lockchat,
'Liaobots': Liaobots,
'H2o': H2o,
'ChatgptLogin': ChatgptLogin,
'DeepAi': DeepAi,
'GetGpt': GetGpt
}
prompt_set_list = {}
for prompt_file in os.listdir("prompt_set"):
key = prompt_file
if '.csv' in key:
df = pd.read_csv("prompt_set/" + prompt_file)
prompt_dict = dict(zip(df['act'], df['prompt']))
else:
with open("prompt_set/" + prompt_file, encoding='utf-8') as f:
ds = json.load(f)
prompt_dict = {item["act"]: item["prompt"] for item in ds}
prompt_set_list[key] = prompt_dict
with gr.Blocks() as demo:
llm = CustomLLM()
template = """
Chat with human based on following instructions:
```
{system_instruction}
```
The following is a conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
{{chat_history}}
Human: {{human_input}}
Chatbot:"""
memory = ConversationBufferWindowMemory(k=10, memory_key="chat_history")
chatbot = gr.Chatbot([], label='AI')
msg = gr.Textbox(value="", label='请输入:')
with gr.Row():
clear = gr.Button("清空对话", scale=2)
chat_mode = gr.Checkbox(value=True, label='聊天模式', interactive=True, scale=1)
system_msg = gr.Textbox(value="你是一名助手,可以解答问题。", label='系统提示')
with gr.Row():
default_prompt_set = "1 中文提示词.json"
prompt_set_name = gr.Dropdown(prompt_set_list.keys(), value=default_prompt_set, label='提示词集合')
prompt_name = gr.Dropdown(prompt_set_list[default_prompt_set].keys(), label='提示词', min_width=20)
with gr.Row():
model_name = gr.Dropdown(['gpt-3.5-turbo', 'gpt-4'], value='gpt-3.5-turbo', label='模型')
provider_name = gr.Dropdown(provider_dict.keys(), value='GetGpt', label='提供者', min_width=20)
def change_prompt_set(prompt_set_name):
return gr.Dropdown.update(choices=list(prompt_set_list[prompt_set_name].keys()))
def change_prompt(prompt_set_name, prompt_name):
return gr.update(value=prompt_set_list[prompt_set_name][prompt_name])
def user(user_message, history = []):
return gr.update(value="", interactive=False), history + [[user_message, None]]
def bot(history, model_name, provider_name, system_msg, chat_mode):
history[-1][1] = ''
if len(system_msg)>3000:
system_msg = system_msg[:2000] + system_msg[-1000:]
if not chat_mode:
global template, memory
llm.model_name = model_name
llm.provider_name = provider_name
prompt = PromptTemplate(
input_variables=["chat_history", "human_input"], template=template.format(system_instruction=system_msg)
)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
verbose=False,
memory=memory,
)
bot_msg = llm_chain.run(history[-1][0])
for c in bot_msg:
history[-1][1] += c
yield history
else:
prompt = """
请你仔细阅读以下提示,然后针对用户的话进行回答。
提示:
```
{}
```
用户最新的话:
```
{}
```
请回答:
"""
# print(history)
messages = []
for user_message, assistant_message in history[:-1]:
messages.append({"role": "user", "content": user_message})
messages.append({"role": "assistant", "content": assistant_message})
messages.append({"role": "user", "content": history[-1][0]})
# print(messages)
bot_msg = g4f.ChatCompletion.create(
model=model_name,
provider=provider_dict[provider_name],
messages=messages,
stream=True)
for c in bot_msg:
history[-1][1] += c
print(c, flush=True, end='')
yield history
def empty_chat():
global memory
memory = ConversationBufferWindowMemory(k=10, memory_key="chat_history")
return None
response = msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
bot, [chatbot, model_name, provider_name, system_msg, chat_mode], chatbot
)
prompt_set_name.select(change_prompt_set, prompt_set_name, prompt_name)
prompt_name.select(change_prompt, [prompt_set_name, prompt_name], system_msg)
response.then(lambda: gr.update(interactive=True), None, [msg], queue=False)
clear.click(empty_chat, None, [chatbot], queue=False)
demo.title = "AI Chat"
demo.queue()
demo.launch() |