File size: 1,724 Bytes
02fc79d 7e0679d 933893f cfb47bd a0a3c71 02fc79d d19cbee 844418d 98cb8e3 1d0c984 9185ca3 64e9c35 572bbdb 98cb8e3 d19cbee 98cb8e3 d7fd676 81f5fb5 98cb8e3 199ae1f b24a356 98cb8e3 572bbdb 98cb8e3 f57f2aa |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 |
import gradio as gr
from gpt4all import GPT4All
from urllib.request import urlopen
import json
import time
from load_llms import model_choices, llm_intro, load_model
# Construct chatbot
def generate_response(model_name, message, chat_history):
model = load_model(model_name)
chat_history = []
if len(chat_history) > 0:
past_chat = ", ".join(chat_history)
input_text = past_chat + " " + message
else:
input_text = message
response = model.generate(input_text, max_tokens=100)
chat_history.append((input_text, response))
return "", chat_history
# Create Gradio UI
with gr.Blocks() as demo:
gr.Markdown("# GPT4All Chatbot")
with gr.Row():
with gr.Column(scale=1):
model_dropdown = gr.Dropdown(
choices=model_choices(),
multiselect=False,
type="value",
value="orca-mini-3b-gguf2-q4_0.gguf",
label="LLMs to choose from"
)
explanation = gr.Textbox(label="Model Description", interactive=False, value=llm_intro("orca-mini-3b-gguf2-q4_0.gguf"))
# Link the dropdown with the textbox to update the description based on the selected model
model_dropdown.change(fn=llm_intro, inputs=model_dropdown, outputs=explanation)
with gr.Column(scale=4):
chatbot = gr.Chatbot(label="Chatroom", value=[(None, "How may I help you today?")])
message = gr.Textbox(label="Message")
message.submit(generate_response, inputs=[model_dropdown, message, chatbot], outputs=[message, chatbot])
# Launch the Gradio app
demo.launch()
# clear = gr.ClearButton([input_text, chatbot]) |