ibrahimgiki's picture
Update app.py
bb70c30 verified
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import streamlit as st
# Load the GPT-2 large model and tokenizer
model_name = "gpt2-large"
tokenizer = AutoTokenizer.from_pretrained(model_name)
# Add padding token to the tokenizer
tokenizer.pad_token = tokenizer.eos_token # Set padding token to EOS token
model = AutoModelForCausalLM.from_pretrained(model_name)
# Function to generate a blog post based on a topic title
def generate_blog(topic_title, max_length=200):
# Step 1: Encode the input
inputs = tokenizer.encode_plus(topic_title, return_tensors='pt', padding=True)
input_ids = inputs['input_ids']
attention_mask = inputs['attention_mask']
# Step 2: Generate model output
output_ids = model.generate(input_ids, attention_mask=attention_mask, max_length=max_length, num_return_sequences=1, pad_token_id=tokenizer.eos_token_id)
# Step 3: Decode the output
blog_post = tokenizer.decode(output_ids[0], skip_special_tokens=True)
return blog_post
# Example usage
#topic_title = input("Enter a topic title for the blog post: ")
#blog_post = generate_blog(topic_title)
#print("\nGenerated Blog Post:\n")
#print(blog_post)
title = st.text_area('Enter title')
if title:
out = generate_blog(title)
st.json(out)