IEEEVITPune-AI-Team commited on
Commit
76a93f3
1 Parent(s): 81f3c54

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -1
app.py CHANGED
@@ -1,4 +1,5 @@
1
  import gradio as gr
 
2
  from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
3
 
4
  # Load the model and tokenizer
@@ -9,7 +10,7 @@ tokenizer = AutoTokenizer.from_pretrained(model_name)
9
  # Define the function to generate response
10
  def generate_response(prompt):
11
  instruction = f"### Instruction:\n{prompt}\n\n### Response:\n"
12
- pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=256)
13
  result = pipe(instruction)
14
  generated_text = result[0]['generated_text'][len(instruction):].strip()
15
  return generated_text
 
1
  import gradio as gr
2
+ import transformers
3
  from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
4
 
5
  # Load the model and tokenizer
 
10
  # Define the function to generate response
11
  def generate_response(prompt):
12
  instruction = f"### Instruction:\n{prompt}\n\n### Response:\n"
13
+ pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=64)
14
  result = pipe(instruction)
15
  generated_text = result[0]['generated_text'][len(instruction):].strip()
16
  return generated_text