import streamlit as st import torch from transformers import AutoTokenizer, AutoModelForCausalLM # Load GPT-2 tokenizer and model tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2") model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2") # Define function to generate text with GPT-2 model def generate_text(input_text): # Tokenize input text input_ids = tokenizer.encode(input_text, return_tensors='pt') # Generate output from GPT-2 model with st.spinner("Generating text..."): output = model.generate(input_ids, max_length=100, num_return_sequences=1, temperature=0.7) # Decode generated text generated_text = tokenizer.decode(output[0], skip_special_tokens=True) return generated_text # Streamlit UI def main(): st.title("GPT-2 Text Generator") input_text = st.text_area("Enter your prompt here:") if st.button("Generate Text"): if input_text: generated_text = generate_text(input_text) st.success("Generated text:") st.write(generated_text) else: st.error("Please enter a prompt.") if __name__ == "__main__": main()