yxmauw commited on
Commit
844418d
1 Parent(s): 98cb8e3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -4
app.py CHANGED
@@ -28,8 +28,7 @@ def load_model(model_name):
28
  Otherwise, it loads the model, caches it, and then returns it.
29
  """
30
  if model_name not in model_cache:
31
- # Load the model. This is a placeholder; replace with actual model loading logic.
32
- model = GPT4All(model_name) # Placeholder function
33
  model_cache[model_name] = model
34
  return model_cache[model_name]
35
 
@@ -101,7 +100,7 @@ def generate_text(input_text, selected_model):
101
  # demo.launch()
102
 
103
  # Define the chatbot function
104
- def chatbot(model_name, message, chat_history):
105
  model = load_model(model_name)
106
  response = model.generate(message, chat_history)
107
  chat_history.append((message, response))
@@ -124,7 +123,7 @@ with gr.Blocks() as demo:
124
  message = gr.Textbox(label="Message")
125
  state = gr.State()
126
 
127
- message.submit(chatbot, inputs=[model_dropdown, message, state], outputs=[chatbot, state])
128
 
129
  # Launch the Gradio app
130
  demo.launch()
 
28
  Otherwise, it loads the model, caches it, and then returns it.
29
  """
30
  if model_name not in model_cache:
31
+ model = GPT4All(model_name)
 
32
  model_cache[model_name] = model
33
  return model_cache[model_name]
34
 
 
100
  # demo.launch()
101
 
102
  # Define the chatbot function
103
+ def generate_response(model_name, message, chat_history):
104
  model = load_model(model_name)
105
  response = model.generate(message, chat_history)
106
  chat_history.append((message, response))
 
123
  message = gr.Textbox(label="Message")
124
  state = gr.State()
125
 
126
+ message.submit(generate_response, inputs=[model_dropdown, message, state], outputs=[chatbot, state])
127
 
128
  # Launch the Gradio app
129
  demo.launch()