Tharunika1601 commited on
Commit
0e39422
1 Parent(s): 411a856

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -26
app.py CHANGED
@@ -1,31 +1,22 @@
1
- import requests
2
- import os
3
- from dotenv import load_dotenv # Install the 'python-dotenv' package
4
-
5
- # Load environment variables from a .env file
6
- load_dotenv()
7
-
8
- API_URL = "https://api-inference.huggingface.co/models/runwayml/stable-diffusion-v1-5"
9
- API_TOKEN = os.getenv("HF_API_TOKEN") # Use os.getenv to retrieve the API token from environment variables
10
-
11
- if API_TOKEN is None:
12
- raise ValueError("HF_API_TOKEN environment variable not set")
13
-
14
- headers = {"Authorization": f"Bearer {API_TOKEN}"}
15
 
16
- def query(payload):
17
- response = requests.post(API_URL, headers=headers, json=payload)
18
- return response.content
19
 
20
- image_bytes = query({
21
- "inputs": "cat is drinking milk ",
22
- })
 
23
 
24
- import io
25
- from PIL import Image
 
 
26
 
27
- image = Image.open(io.BytesIO(image_bytes))
28
- image.show()
29
 
30
- # If you want to save the generated image to a file, you can use the save method
31
- image.save("generated_image.png")
 
1
+ import streamlit as st
2
+ from transformers import CLIPProcessor, CLIPModel, DiffusionModel
3
+ import torch
4
+ from PIL import Image
 
 
 
 
 
 
 
 
 
 
5
 
6
+ st.title("Text to Image Generation")
 
 
7
 
8
+ # Load pretrained models
9
+ clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch16")
10
+ clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch16")
11
+ diffusion_model = DiffusionModel.from_pretrained("openai/guided-diffusion-clipped-coco")
12
 
13
+ text = st.text_area("Enter a description:")
14
+ if st.button("Generate Image") and text:
15
+ # Process text and get CLIP features
16
+ text_features = clip_processor(text, return_tensors="pt", padding=True)
17
 
18
+ # Generate image from text using Guided Diffusion
19
+ image = diffusion_model.generate_text_to_image(text_features["pixel_values"])
20
 
21
+ # Display the generated image
22
+ st.image(image, caption="Generated Image", use_column_width=True)