BasicChatBot / app.py
akhilhsingh's picture
Update app.py
cf54443 verified
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load pre-trained GPT-2 model and tokenizer
model_name = "google/gemma-7b"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
# Define function for generating response
def generate_response(prompt):
# Tokenize input prompt
input_ids = tokenizer.encode(prompt, return_tensors="pt")
# Generate response from model
output = model.generate(input_ids, max_length=50, num_return_sequences=1, temperature=0.9)
# Decode response tokens
response = tokenizer.decode(output[0], skip_special_tokens=True)
return response
# Spaces-compatible function
def spaces_chatbot(input_dict):
prompt = input_dict["text"]
response = generate_response(prompt)
return {"response": response}
# Sample input
sample_input = {"text": "Hello, how are you?"}
# Test the function
print(spaces_chatbot(sample_input))