File size: 1,006 Bytes
433bfc0
76a93f3
81f3c54
433bfc0
81f3c54
 
 
 
 
 
 
 
76a93f3
81f3c54
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
import gradio as gr
import transformers
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer
model_name = "IEEEVITPune-AI-Team/ChatbotAlpha0.7"
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)

# Define the function to generate response
def generate_response(prompt):
    instruction = f"### Instruction:\n{prompt}\n\n### Response:\n"
    pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=64)
    result = pipe(instruction)
    generated_text = result[0]['generated_text'][len(instruction):].strip()
    return generated_text

# Create a Gradio interface
input_text = gr.inputs.Textbox(lines=3, label="Enter your prompt")
output_text = gr.outputs.Textbox(label="Response")

gr.Interface(generate_response, inputs=input_text, outputs=output_text, title="Chatbot", description="What is IEEE?.").launch()
ad("models/IEEEVITPune-AI-Team/ChatbotAlpha0.7").launch()