import torch from transformers import pipeline import streamlit as st from huggingface_hub import login # Login using the Hugging Face API token from secrets login(token=st.secrets["HF_TOKEN"]) # This ensures you are authenticated # Load the meta-llama model model_id = "meta-llama/Llama-3.2-3B-Instruct" pipe = pipeline( "text-generation", model=model_id, torch_dtype=torch.float32, # You can change this to torch.bfloat16 for better performance on GPUs with Tensor Cores device_map="auto", # Automatically choose the device (CPU/GPU) ) # Streamlit interface st.title("Talk With Historical Figures - Chatbot") st.write("Chat with historical figures and hear their responses!") # Input for character selection character = st.text_input("Enter the name of a historical figure (e.g., Albert Einstein, Cleopatra):") user_input = st.text_input("Ask a question:") # Generate response if character and user_input: prompt = f"You are {character}. Respond in the style of {character}. User: {user_input}" outputs = pipe(prompt, max_new_tokens=256) response = outputs[0]["generated_text"] # Display response st.write(f"**{character} says:** {response}")