File size: 872 Bytes
0e39422
 
 
 
adb4a2a
0e39422
adb4a2a
0e39422
 
 
 
adb4a2a
0e39422
 
 
 
adb4a2a
0e39422
 
adb4a2a
0e39422
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
import streamlit as st
from transformers import CLIPProcessor, CLIPModel, DiffusionModel
import torch
from PIL import Image

st.title("Text to Image Generation")

# Load pretrained models
clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch16")
clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch16")
diffusion_model = DiffusionModel.from_pretrained("openai/guided-diffusion-clipped-coco")

text = st.text_area("Enter a description:")
if st.button("Generate Image") and text:
    # Process text and get CLIP features
    text_features = clip_processor(text, return_tensors="pt", padding=True)

    # Generate image from text using Guided Diffusion
    image = diffusion_model.generate_text_to_image(text_features["pixel_values"])

    # Display the generated image
    st.image(image, caption="Generated Image", use_column_width=True)