AISpace / app.py
arawindsg's picture
app.py
af619f4 verified
raw
history blame contribute delete
933 Bytes
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import gradio as gr
# Use a small model that works on CPU
model_name = "facebook/blenderbot-3B"
# Load model and tokenizer
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
# Create a chatbot pipeline
chat = pipeline("text-generation", model=model, tokenizer=tokenizer)
# Define chatbot function
def chatbot_response(message):
response = chat(message, max_length=100, do_sample=True, temperature=0.7)
return response[0]["generated_text"].replace(message, "").strip()
# Create Gradio interface
iface = gr.Interface(
fn=chatbot_response,
inputs=gr.Textbox(lines=2, placeholder="Ask me anything..."),
outputs="text",
title="πŸ€– LLM Chatbot with Hugging Face",
description="Ask any question and get a response from an open-source LLM!"
)
# Launch the app
iface.launch()