Spaces:
Runtime error
Runtime error
File size: 5,531 Bytes
2de3774 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 |
import gradio as gr
from shared import path_manager
import modules.async_worker as worker
from pathlib import Path
import json
def create_chat():
def llama_get_assistants():
names = []
folder_path = Path("chatbots")
for path in folder_path.rglob("*"):
if path.is_dir():
try:
with open(path / "info.json" , "r", encoding='utf-8') as f:
info = json.load(f)
names.append((info["name"], str(path)))
except Exception as e:
print(f"ERROR: in {path}: {e}")
pass
names.sort(key=lambda x: x[0].casefold())
return names
def gr_llama_get_assistants():
return {
llama_assistants: gr.update(
choices=llama_get_assistants(),
)
}
def _llama_select_assistant(dropdown):
folder = Path(dropdown)
try:
with open(folder / "info.json", "r", encoding='utf-8') as f:
info = json.load(f)
if "avatar" not in info:
info["avatar"] = folder / "avatar.png"
if "embed" in info:
info["embed"] = json.dumps(info["embed"])
else:
info["embed"] = json.dumps([])
except Exception as e:
print(f"ERROR: {dropdown}: {e}")
info = {
"name": "Error",
"greeting": "Error!",
"avatar": "html/error.png",
"system": "Everything is broken.",
"embed": json.dumps([]),
}
pass
info["chatstart"] = [{"role": "assistant", "content": info["greeting"]}]
return info
def llama_select_assistant(dropdown):
info = _llama_select_assistant(dropdown)
return {
llama_chat: gr.update(value=info["chatstart"]),
llama_msg: gr.update(value=""),
llama_avatar: gr.update(
value=info["avatar"],
label=info["name"],
),
llama_system: gr.update(value=info["system"]),
llama_embed: gr.update(value=info["embed"])
}
with gr.Blocks() as app_llama_chat:
with gr.Row():
with gr.Column(scale=3), gr.Group():
# FIXME!!! start value should be read from some info.json
default_bot = "chatbots/rf_support_troll"
llama_chat = gr.Chatbot(
label="",
show_label=False,
height=600,
type="messages",
allow_tags=["think", "thinking"],
value=_llama_select_assistant(default_bot)["chatstart"],
)
llama_msg = gr.Textbox(
show_label=False,
)
llama_sent = gr.Textbox(visible=False)
with gr.Column(scale=2), gr.Group():
llama_avatar = gr.Image(
value=_llama_select_assistant(default_bot)["avatar"],
label=_llama_select_assistant(default_bot)["name"],
height=400,
width=400,
show_label=True,
)
with gr.Row():
llama_assistants = gr.Dropdown(
choices=llama_get_assistants(),
value=_llama_select_assistant(default_bot)["name"],
show_label=False,
interactive=True,
scale=7,
)
llama_reload = gr.Button(
value="↻",
scale=1,
)
llama_system = gr.Textbox(
visible=False,
value=_llama_select_assistant(default_bot)["system"],
)
llama_embed = gr.Textbox(
visible=False,
value=_llama_select_assistant(default_bot)["embed"],
)
def llama_get_text(message):
return "", message
def llama_respond(message, system, embed, chat_history):
chat_history.append({"role": "user", "content": message})
gen_data = {
"task_type": "llama",
"system": system,
"embed": embed,
"history": chat_history,
}
# Add work
task_id = worker.add_task(gen_data.copy())
# Wait for result
finished = False
while not finished:
flag, product = worker.task_result(task_id)
if flag == "preview":
yield product
elif flag == "results":
finished = True
chat_history.append({"role": "assistant", "content": product})
yield chat_history
llama_msg.submit(
llama_get_text,
[llama_msg],
[llama_msg, llama_sent]
).then(
llama_respond,
[llama_sent, llama_system, llama_embed, llama_chat],
[llama_chat]
)
llama_assistants.select(llama_select_assistant, [llama_assistants], [llama_chat, llama_msg, llama_avatar, llama_system, llama_embed])
llama_reload.click(gr_llama_get_assistants, None, [llama_assistants])
return app_llama_chat
|