Spaces:
Sleeping
Sleeping
File size: 1,897 Bytes
448342c f945833 448342c f945833 448342c b300b18 448342c f945833 b300b18 448342c b300b18 448342c f945833 448342c b300b18 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 |
import streamlit as st
from transformers import pipeline
from diffusers import StableDiffusionPipeline
from PIL import Image
import torch
# Initialize the text generation model
def initialize_text_generator():
try:
text_generator = pipeline('text-generation', model='gpt2')
except Exception as e:
st.error(f"Error loading text generation model: {e}")
return None
return text_generator
# Initialize the image generation model
def initialize_image_generator():
try:
device = "cuda" if torch.cuda.is_available() else "cpu"
image_generator = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4").to(device)
except Exception as e:
st.error(f"Error loading image generation model: {e}")
return None
return image_generator
text_generator = initialize_text_generator()
image_generator = initialize_image_generator()
def generate_blog(title):
if text_generator is None or image_generator is None:
return "Failed to load models", None
# Generate blog content
blog_content = text_generator(title, max_length=500, num_return_sequences=1)[0]['generated_text']
# Generate an image
image_prompt = f"An image representing {title}"
image = image_generator(image_prompt).images[0]
return blog_content, image
# Streamlit app
st.title('Blog Generator')
title = st.text_input('Enter the title of your blog:')
if title:
with st.spinner('Generating blog content and image...'):
blog_content, image = generate_blog(title)
if blog_content == "Failed to load models":
st.error(blog_content)
else:
st.success('Blog generated successfully!')
st.subheader('Blog Content')
st.write(blog_content)
st.subheader('Generated Image')
st.image(image, caption='Generated Image')
|