import streamlit as st from transformers import GPT2LMHeadModel, GPT2Tokenizer @st.cache(allow_output_mutation=True) def load_model(): tokenizer = GPT2Tokenizer.from_pretrained("gpt2-large") model = GPT2LMHeadModel.from_pretrained("gpt2-large") return tokenizer, model def generate_blog_post(topic, max_length=200): tokenizer, model = load_model() input_ids = tokenizer.encode(topic, return_tensors='pt') output = model.generate(input_ids, max_length=max_length, num_return_sequences=1, no_repeat_ngram_size=2, pad_token_id=tokenizer.eos_token_id) blog_post = tokenizer.decode(output[0], skip_special_tokens=True) return blog_post st.title("Blog Post Generator") st.write("Enter a topic to generate a blog post using GPT-2 large.") topic = st.text_input("Topic:", "") length = st.slider("Post Length (in tokens):", min_value=50, max_value=500, value=200) if st.button("Generate"): if topic: blog_post = generate_blog_post(topic, max_length=length) st.subheader("Generated Blog Post") st.write(blog_post) else: st.write("Please enter a topic to generate a blog post.")