|
import gradio as gr |
|
from gpt4all import GPT4All |
|
from urllib.request import urlopen |
|
import json |
|
import time |
|
|
|
url = "https://raw.githubusercontent.com/nomic-ai/gpt4all/main/gpt4all-chat/metadata/models3.json" |
|
response = urlopen(url) |
|
data_json = json.loads(response.read()) |
|
|
|
def model_choices(): |
|
model_list = [data_json[i]['filename'] for i in range(len(data_json))] |
|
return model_list |
|
|
|
model_description = {model['filename']: model['description'] for model in data_json} |
|
|
|
def llm_intro(selected_model): |
|
return model_description.get(selected_model, "No description available for this model selection.") |
|
|
|
def generate_text(input_text, selected_model): |
|
model = GPT4All(selected_model) |
|
output = model.generate(input_text, max_tokens=100) |
|
return output |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown("## GPT4All Text Generation Experiment") |
|
|
|
with gr.Row(): |
|
|
|
|
|
model_selection = gr.Dropdown(choices=model_choices(), |
|
multiselect=False, |
|
label="LLMs to choose from", |
|
type="value", |
|
value="orca-mini-3b-gguf2-q4_0.gguf") |
|
|
|
explanation = gr.Textbox(label="Model Description", lines=3, interactive=False, value=llm_intro("orca-mini-3b-gguf2-q4_0.gguf")) |
|
|
|
|
|
model_selection.change(fn=llm_intro, inputs=model_selection, outputs=explanation) |
|
|
|
chatbot = gr.Chatbot() |
|
input_text = gr.Textbox(lines=10, label="Input Text") |
|
|
|
clear = gr.ClearButton([input_text, chatbot]) |
|
|
|
def respond(message, chat_history): |
|
bot_reply = gr.Textbox(lines=10, label="Generated Text") |
|
chat_history.append((message, bot_reply)) |
|
time.sleep(2) |
|
return "", chat_history |
|
|
|
input_text.submit(respond, [input_text, chatbot], [input_text, chatbot]) |
|
|
|
|
|
|
|
|
|
|
|
demo.launch() |
|
|