Spaces:
Sleeping
Sleeping
import streamlit as st | |
import os | |
from huggingface_hub import login | |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline | |
# Access token from environment variables | |
hf_token = os.getenv("HUGGINGFACE_API_KEY") | |
login(token=hf_token) | |
model_name = "meta-llama/Llama-3.2-3B-Instruct" | |
rope_scaling = { | |
"type": "llama3", # or another valid type | |
"factor": 32.0 # your scaling factor, can be adjusted based on needs | |
} | |
# Ensure the model loading process uses the corrected `rope_scaling` | |
tokenizer = AutoTokenizer.from_pretrained(model_name, rope_scaling=rope_scaling) | |
model = AutoModelForCausalLM.from_pretrained(model_name, rope_scaling=rope_scaling) | |
# Define the generator function using the LLaMA model | |
generator = pipeline("text-generation", model=model, tokenizer=tokenizer) | |
# Now you can proceed with your code as normal | |
def generate_debate(topic): | |
# Generate response from Bot A (Proponent) | |
bot_a_prompt = f"Let's debate about the topic '{topic}'. What are your thoughts?" | |
bot_a_response = generator(bot_a_prompt, max_length=200, num_return_sequences=1)[0]['generated_text'] | |
# Generate response from Bot B (Opponent) based on Bot A's response | |
bot_b_prompt = f"Bot B, respond to the following: {bot_a_response} What is your counterargument?" | |
bot_b_response = generator(bot_b_prompt, max_length=200, num_return_sequences=1)[0]['generated_text'] | |
# Display the debate in paragraph format without introductory text | |
st.subheader("Bot A (Proponent) Response:") | |
st.write(bot_a_response.strip()) | |
st.subheader("Bot B (Opponent) Response:") | |
st.write(bot_b_response.strip()) | |
# Streamlit interface for the user to enter a debate topic | |
st.title("Debate Bot") | |
topic_input = st.text_input("Enter debate topic:", "Dogs Are Cute If They Are Small") | |
if topic_input: | |
generate_debate(topic_input) | |