hereoncollab commited on
Commit
751924a
1 Parent(s): 8fd6ec4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -11
app.py CHANGED
@@ -1,24 +1,19 @@
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
- # Initialize the text generation pipeline with the LLaMA model
5
- pipe = pipeline("text-generation", model="mlabonne/Hermes-3-Llama-3.1-8B-lorablated")
6
 
7
- # Define the function that generates a response
8
  def generate_response(prompt):
9
- # Generate text using the pipeline
10
- responses = pipe(prompt, max_length=100, num_return_sequences=1)
11
- # Extract and return the text from the generated responses
12
  return responses[0]['generated_text']
13
 
14
- # Create the Gradio interface
15
  interface = gr.Interface(
16
  fn=generate_response,
17
- inputs="text",
18
  outputs="text",
19
- title="LLaMA Chatbot",
20
- description="A simple chatbot using LLaMA for text generation. Enter a prompt and get a response."
21
  )
22
 
23
- # Launch the Gradio app
24
  interface.launch()
 
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
+ # Use a smaller model for lower memory usage
5
+ pipe = pipeline("text-generation", model="distilgpt2") # DistilGPT-2 is smaller and more efficient
6
 
 
7
  def generate_response(prompt):
8
+ responses = pipe(prompt, max_length=50, num_return_sequences=1)
 
 
9
  return responses[0]['generated_text']
10
 
 
11
  interface = gr.Interface(
12
  fn=generate_response,
13
+ inputs=gr.Textbox(label="Enter your message:", lines=2, placeholder="Type your message here..."),
14
  outputs="text",
15
+ title="Smaller Model Chatbot",
16
+ description="A chatbot using a smaller model for lower memory usage."
17
  )
18
 
 
19
  interface.launch()