Test_ChatBot / app.py
Shahbazakbar's picture
Update app.py
fffa177 verified
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
# Load Zephyr 7B (no authentication required)
zephyr_tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-alpha")
zephyr_model = AutoModelForCausalLM.from_pretrained(
"HuggingFaceH4/zephyr-7b-alpha",
torch_dtype=torch.float16, # Use half-precision for faster inference
device_map="auto" # Automatically loads the model on GPU if available
)
def generate_response(prompt):
# Tokenize the input prompt
inputs = zephyr_tokenizer(prompt, return_tensors="pt").to(zephyr_model.device)
# Generate the response
outputs = zephyr_model.generate(**inputs, max_length=200)
# Decode the response
response = zephyr_tokenizer.decode(outputs[0], skip_special_tokens=True)
return response
import gradio as gr
# Gradio interface
def chatbot(prompt):
response = generate_response(prompt)
return response
interface = gr.Interface(
fn=chatbot,
inputs="text",
outputs="text",
title="Zephyr 7B Chatbot",
description="Ask questions and get answers from Zephyr 7B!"
)
# Launch the app
interface.launch()