Update app.py
Browse files
app.py
CHANGED
@@ -21,7 +21,7 @@ def llm_intro(selected_model):
|
|
21 |
model_cache = {} # Global cache
|
22 |
|
23 |
|
24 |
-
def
|
25 |
"""
|
26 |
This function checks the cache before loading a model.
|
27 |
If the model is cached, it returns the cached version.
|
@@ -63,39 +63,68 @@ def generate_text(input_text, selected_model):
|
|
63 |
# analytics_enabled=True,
|
64 |
# title="GPT4All Text Generation Experiment").launch()
|
65 |
|
66 |
-
with gr.Blocks() as demo:
|
67 |
-
|
68 |
|
69 |
-
|
70 |
|
71 |
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
|
78 |
-
|
79 |
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
|
93 |
-
|
94 |
|
95 |
-
|
96 |
|
97 |
-
|
98 |
-
|
99 |
-
|
|
|
|
|
100 |
|
101 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
model_cache = {} # Global cache
|
22 |
|
23 |
|
24 |
+
def load_model(model_name):
|
25 |
"""
|
26 |
This function checks the cache before loading a model.
|
27 |
If the model is cached, it returns the cached version.
|
|
|
63 |
# analytics_enabled=True,
|
64 |
# title="GPT4All Text Generation Experiment").launch()
|
65 |
|
66 |
+
# with gr.Blocks() as demo:
|
67 |
+
# gr.Markdown("## GPT4All Text Generation Experiment")
|
68 |
|
69 |
+
# with gr.Row():
|
70 |
|
71 |
|
72 |
+
# model_selection = gr.Dropdown(choices=model_choices(),
|
73 |
+
# multiselect=False,
|
74 |
+
# label="LLMs to choose from",
|
75 |
+
# type="value",
|
76 |
+
# value="orca-mini-3b-gguf2-q4_0.gguf")
|
77 |
|
78 |
+
# explanation = gr.Textbox(label="Model Description", lines=3, interactive=False, value=llm_intro("orca-mini-3b-gguf2-q4_0.gguf"))
|
79 |
|
80 |
+
# # Link the dropdown with the textbox to update the description based on the selected model
|
81 |
+
# model_selection.change(fn=llm_intro, inputs=model_selection, outputs=explanation)
|
82 |
+
|
83 |
+
# chatbot = gr.Chatbot()
|
84 |
+
# input_text = gr.Textbox(lines=3, label="Press shift+Enter to submit")
|
85 |
+
# # output_text = gr.Textbox(lines=10, label="Generated Text")
|
86 |
+
# clear = gr.ClearButton([input_text, chatbot])
|
87 |
+
|
88 |
+
# def respond(message, chat_history, selected_model):
|
89 |
+
# bot_message = generate_text(message, selected_model)
|
90 |
+
# chat_history.append((message, bot_message))
|
91 |
+
# time.sleep(2) # Simulating processing delay if necessary
|
92 |
|
93 |
+
# return bot_message, chat_history
|
94 |
|
95 |
+
# input_text.submit(respond, [input_text, chatbot, model_selection], [chatbot])
|
96 |
|
97 |
+
# # # Button to generate text
|
98 |
+
# # generate_btn = gr.Button("Generate")
|
99 |
+
# # generate_btn.click(fn=generate_text, inputs=[input_text, model_selection], outputs=output_text)
|
100 |
+
|
101 |
+
# demo.launch()
|
102 |
|
103 |
+
# Define the chatbot function
|
104 |
+
def chatbot(model_name, message, chat_history):
|
105 |
+
model = load_model(model_name)
|
106 |
+
response = model.generate(message, chat_history)
|
107 |
+
chat_history.append((message, response))
|
108 |
+
return chat_history, response
|
109 |
+
|
110 |
+
# Create the Gradio interface
|
111 |
+
with gr.Blocks() as demo:
|
112 |
+
gr.Markdown("# GPT4All Chatbot")
|
113 |
+
with gr.Row():
|
114 |
+
with gr.Column(scale=1):
|
115 |
+
model_dropdown = gr.Dropdown(
|
116 |
+
choices=model_choices(),
|
117 |
+
multiselect=False,
|
118 |
+
type="value",
|
119 |
+
value="orca-mini-3b-gguf2-q4_0.gguf",
|
120 |
+
label="LLMs to choose from"
|
121 |
+
)
|
122 |
+
with gr.Column(scale=4):
|
123 |
+
chatbot = gr.Chatbot(label="Conversation")
|
124 |
+
message = gr.Textbox(label="Message")
|
125 |
+
state = gr.State()
|
126 |
+
|
127 |
+
message.submit(chatbot, inputs=[model_dropdown, message, state], outputs=[chatbot, state])
|
128 |
+
|
129 |
+
# Launch the Gradio app
|
130 |
+
demo.launch()
|