Spaces:
Runtime error
Runtime error
Initial Commit
Browse files- app.py +222 -0
- main_inference.py +252 -0
- requirements.txt +8 -0
- utils.py +75 -0
app.py
ADDED
@@ -0,0 +1,222 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from torchvision import transforms
|
3 |
+
import torch
|
4 |
+
from main_inference import generate_mixed_image, generate_image, progress_video
|
5 |
+
import matplotlib.colors as mcolors
|
6 |
+
|
7 |
+
def run_generate_mixed_image(prompt1,prompt2):
|
8 |
+
image = generate_mixed_image(prompt1,prompt2)
|
9 |
+
return image
|
10 |
+
|
11 |
+
def run_generate_image(prompt1,noise_checkbox):
|
12 |
+
image = generate_image(prompt1,noised_image=noise_checkbox)
|
13 |
+
return image
|
14 |
+
|
15 |
+
def run_generate_image_with_color_doninance(prompt1,color,color_loss_scale,noised_image_checkbox_1):
|
16 |
+
# Convert the hexadecimal color code to RGB values
|
17 |
+
rgba_color = mcolors.hex2color(color)
|
18 |
+
# Multiply the RGB values by 255 to get them in the [0, 255] range
|
19 |
+
rgb_values = [int(val * 255) for val in rgba_color]
|
20 |
+
image = generate_image(prompt1,True,color,color_loss_scale,noised_image_checkbox_1)
|
21 |
+
return image
|
22 |
+
|
23 |
+
def run_process_video(prompt):
|
24 |
+
# Ask for text input
|
25 |
+
video = progress_video(prompt)
|
26 |
+
return video
|
27 |
+
|
28 |
+
|
29 |
+
description_text_to_image = """ ### Text to Image Generation
|
30 |
+
|
31 |
+
1. Write a Text Prompt.
|
32 |
+
|
33 |
+
2. Output will be an image based on the text prompt provided.
|
34 |
+
|
35 |
+
3. Check if you want to see noised version of the image
|
36 |
+
|
37 |
+
"""
|
38 |
+
|
39 |
+
description_generate_mixed_image = """ ### Mix Image Generation
|
40 |
+
|
41 |
+
1. Write Two Text prompts.
|
42 |
+
|
43 |
+
2. Output will a image which is mix of both of the text provided.
|
44 |
+
|
45 |
+
3. Check if you want to see noised version of the image
|
46 |
+
|
47 |
+
"""
|
48 |
+
|
49 |
+
description_generate_image_with_color_dominance = """ ### Generate Images with color dominance
|
50 |
+
|
51 |
+
1. Write a Text Prompt.
|
52 |
+
|
53 |
+
2. Select a color
|
54 |
+
|
55 |
+
3. Choose Color loss value
|
56 |
+
|
57 |
+
4. Get the generated Image
|
58 |
+
|
59 |
+
5. Check if you want to see noised version of the image
|
60 |
+
|
61 |
+
"""
|
62 |
+
|
63 |
+
description_progress_video = """ ### Get the full generation process video
|
64 |
+
|
65 |
+
1. Write a Text Prompt.
|
66 |
+
|
67 |
+
2. Output will be the video which contains frames of generated image, during various inference steps
|
68 |
+
|
69 |
+
"""
|
70 |
+
|
71 |
+
# Description
|
72 |
+
title = "<center><strong><font size='8'>The Stable Diffusion</font></strong></center>"
|
73 |
+
|
74 |
+
image_input1 = gr.Image(type='pil')
|
75 |
+
image_input2 = gr.Image(type='filepath')
|
76 |
+
image_input3 = gr.Image(type='pil')
|
77 |
+
image_input4 = gr.Image(type='pil')
|
78 |
+
text_input = gr.Text(label="Enter Text Prompt")
|
79 |
+
text_input2 = gr.Text(label="Enter Text Prompt")
|
80 |
+
text_input3 = gr.Text(label="Enter Text Prompt")
|
81 |
+
text_input4 = gr.Text(label = "Enter Text Prompt")
|
82 |
+
text_input5 = gr.Text(label = "Enter Text Prompt")
|
83 |
+
video_output = gr.Video()
|
84 |
+
|
85 |
+
color = gr.ColorPicker(label="Select a Color",description="Choose a color from the color picker:")
|
86 |
+
noised_image_checkbox = gr.inputs.Checkbox(default=False, label="Show Noised Image")
|
87 |
+
noised_image_checkbox_1 = gr.inputs.Checkbox(default=False, label="Show Noised Image")
|
88 |
+
noised_image_checkbox_2 = gr.inputs.Checkbox(default=False, label="Show Noised Image")
|
89 |
+
color_loss_scale = gr.inputs.Slider(minimum=0, maximum=255, default=40, step=1,label="Color Loss")
|
90 |
+
css = "h1 { text-align: center } .about { text-align: justify; padding-left: 10%; padding-right: 10%; }"
|
91 |
+
|
92 |
+
with gr.Blocks(css=css, title='Play with Stable Diffusion') as demo:
|
93 |
+
with gr.Row():
|
94 |
+
with gr.Column(scale=1):
|
95 |
+
# Title
|
96 |
+
gr.Markdown(title)
|
97 |
+
|
98 |
+
with gr.Tab("Generate Image"):
|
99 |
+
# Images
|
100 |
+
with gr.Row(variant="panel"):
|
101 |
+
with gr.Column(scale=1):
|
102 |
+
text_input.render()
|
103 |
+
noised_image_checkbox.render()
|
104 |
+
with gr.Column(scale=1):
|
105 |
+
image_input1.render()
|
106 |
+
|
107 |
+
# Submit & Clear
|
108 |
+
with gr.Row():
|
109 |
+
with gr.Column():
|
110 |
+
run_generate_image_button = gr.Button("generate_image", variant='primary')
|
111 |
+
clear_btn_text_to_image = gr.Button("Clear", variant="secondary")
|
112 |
+
gr.Markdown(description_text_to_image)
|
113 |
+
# gr.Examples(examples = ["A White cat", "a dog playing in garden", "people enjoying around sea"],
|
114 |
+
# inputs=[text_input,red_color_slider,green_color_slider,blue_color_slider,noised_image_checkbox],
|
115 |
+
# outputs=image_input1,
|
116 |
+
# fn=run_generate_image,
|
117 |
+
# cache_examples=True,
|
118 |
+
# examples_per_page=4)
|
119 |
+
|
120 |
+
run_generate_image_button.click(run_generate_image,
|
121 |
+
inputs=[text_input,noised_image_checkbox],
|
122 |
+
outputs=image_input1)
|
123 |
+
|
124 |
+
with gr.Tab("Generate Image with Color Dominance"):
|
125 |
+
# Images
|
126 |
+
with gr.Row(variant="panel"):
|
127 |
+
with gr.Column(scale=1):
|
128 |
+
text_input4.render()
|
129 |
+
color_loss_scale.render()
|
130 |
+
noised_image_checkbox_1.render()
|
131 |
+
color.render()
|
132 |
+
with gr.Column(scale=1):
|
133 |
+
image_input3.render()
|
134 |
+
|
135 |
+
# Submit & Clear
|
136 |
+
with gr.Row():
|
137 |
+
with gr.Column():
|
138 |
+
run_generate_image_with_color_doninance_button = gr.Button("generate_image_with_color_doninance", variant='primary')
|
139 |
+
clear_btn_text_to_image = gr.Button("Clear", variant="secondary")
|
140 |
+
gr.Markdown(description_generate_image_with_color_dominance)
|
141 |
+
# gr.Examples(examples = ["A White cat", "a dog playing in garden", "people enjoying around sea"],
|
142 |
+
# inputs=[text_input4,red_color_slider,green_color_slider,blue_color_slider,noised_image_checkbox],
|
143 |
+
# outputs=image_input3,
|
144 |
+
# fn=run_generate_image_with_color_doninance,
|
145 |
+
# cache_examples=True,
|
146 |
+
# examples_per_page=4)
|
147 |
+
|
148 |
+
run_generate_image_with_color_doninance_button.click(run_generate_image_with_color_doninance,
|
149 |
+
inputs=[text_input4,color,color_loss_scale,noised_image_checkbox_1],
|
150 |
+
outputs=image_input3)
|
151 |
+
|
152 |
+
####################################################################################################################
|
153 |
+
with gr.Tab("Generate Mixed Image"):
|
154 |
+
# Images
|
155 |
+
with gr.Row(variant="panel"):
|
156 |
+
with gr.Column(scale=1):
|
157 |
+
text_input2.render()
|
158 |
+
text_input3.render()
|
159 |
+
noised_image_checkbox_2.render()
|
160 |
+
with gr.Column(scale=1):
|
161 |
+
image_input4.render()
|
162 |
+
|
163 |
+
# Submit & Clear
|
164 |
+
with gr.Row():
|
165 |
+
with gr.Column():
|
166 |
+
run_generate_mixed_image_button = gr.Button("generate_mixed_image", variant='primary')
|
167 |
+
clear_btn_image_to_image = gr.Button("Clear", variant="secondary")
|
168 |
+
|
169 |
+
gr.Markdown(description_generate_mixed_image)
|
170 |
+
# gr.Examples(examples = ["examples/12830823_87d2654e31.jpg", "examples/27782020_4dab210360.jpg", "examples/44129946_9eeb385d77.jpg"],
|
171 |
+
# inputs=[text_input,red_color_slider_1,green_color_slider_1,blue_color_slider_1,noised_image_checkbox],
|
172 |
+
# outputs=image_input4,
|
173 |
+
# fn=run_generate_mixed_image,
|
174 |
+
# cache_examples=True,
|
175 |
+
# examples_per_page=4)
|
176 |
+
|
177 |
+
run_generate_mixed_image_button.click(run_generate_mixed_image,
|
178 |
+
inputs=[text_input2,text_input3,noised_image_checkbox_2],
|
179 |
+
outputs=image_input4)
|
180 |
+
|
181 |
+
####################################################################################################################
|
182 |
+
with gr.Tab("progress_video"):
|
183 |
+
# Images
|
184 |
+
with gr.Row(variant="panel"):
|
185 |
+
with gr.Column(scale=1):
|
186 |
+
text_input5.render()
|
187 |
+
|
188 |
+
with gr.Column(scale=1):
|
189 |
+
video_output.render()
|
190 |
+
|
191 |
+
# Submit & Clear
|
192 |
+
with gr.Row():
|
193 |
+
with gr.Column():
|
194 |
+
run_progress_video_button = gr.Button("progress_video", variant='primary')
|
195 |
+
clear_btn_progress_video = gr.Button("Clear", variant="secondary")
|
196 |
+
|
197 |
+
gr.Markdown(description_progress_video)
|
198 |
+
# gr.Examples(examples = ["examples/12830823_87d2654e31.jpg", "examples/27782020_4dab210360.jpg", "examples/44129946_9eeb385d77.jpg"],
|
199 |
+
# inputs=[text_input5],
|
200 |
+
# outputs=video_output,
|
201 |
+
# fn=run_process_video,
|
202 |
+
# examples_per_page=4)
|
203 |
+
|
204 |
+
run_progress_video_button.click(run_process_video,
|
205 |
+
inputs=[
|
206 |
+
text_input5,
|
207 |
+
],
|
208 |
+
outputs=video_output)
|
209 |
+
|
210 |
+
#######################################################################################################################
|
211 |
+
#######################################################################################################################
|
212 |
+
def clear():
|
213 |
+
return None, None
|
214 |
+
|
215 |
+
def clear_text():
|
216 |
+
return None, None, None
|
217 |
+
|
218 |
+
clear_btn_text_to_image.click(clear, outputs=[image_input1, image_input1])
|
219 |
+
clear_btn_image_to_image.click(clear, outputs=[image_input2, image_input3])
|
220 |
+
clear_btn_progress_video.click(clear, outputs=[image_input2, image_input3])
|
221 |
+
demo.queue()
|
222 |
+
demo.launch(debug=True)
|
main_inference.py
ADDED
@@ -0,0 +1,252 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from transformers import CLIPTextModel, CLIPTokenizer
|
3 |
+
from diffusers import AutoencoderKL, UNet2DConditionModel, LMSDiscreteScheduler
|
4 |
+
from tqdm.auto import tqdm
|
5 |
+
from torch import autocast
|
6 |
+
from PIL import Image
|
7 |
+
from matplotlib import pyplot as plt
|
8 |
+
import numpy
|
9 |
+
from torchvision import transforms as tfms
|
10 |
+
import shutil
|
11 |
+
# For video display:
|
12 |
+
import cv2
|
13 |
+
from IPython.display import HTML
|
14 |
+
from base64 import b64encode
|
15 |
+
import os
|
16 |
+
from utils import color_loss,latents_to_pil,pil_to_latent,sketch_loss
|
17 |
+
# Set device
|
18 |
+
torch_device = "cpu"
|
19 |
+
|
20 |
+
vae = AutoencoderKL.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="vae")
|
21 |
+
|
22 |
+
# Load the tokenizer and text encoder to tokenize and encode the text.
|
23 |
+
tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14")
|
24 |
+
text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14")
|
25 |
+
|
26 |
+
# The UNet model for generating the latents.
|
27 |
+
unet = UNet2DConditionModel.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="unet")
|
28 |
+
|
29 |
+
# The noise scheduler
|
30 |
+
scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000)
|
31 |
+
vae = vae.to(torch_device)
|
32 |
+
text_encoder = text_encoder.to(torch_device)
|
33 |
+
unet = unet.to(torch_device)
|
34 |
+
|
35 |
+
scheduler.set_timesteps(15)
|
36 |
+
|
37 |
+
def generate_mixed_image(prompt1, prompt2,noised_image=False):
|
38 |
+
mix_factor = 0.4 #@param
|
39 |
+
height = 512 # default height of Stable Diffusion
|
40 |
+
width = 512 # default width of Stable Diffusion
|
41 |
+
num_inference_steps = 50 #@param # Number of denoising steps
|
42 |
+
guidance_scale = 8 # Scale for classifier-free guidance
|
43 |
+
generator = torch.manual_seed(32) # Seed generator to create the inital latent noise
|
44 |
+
batch_size = 1
|
45 |
+
|
46 |
+
# Prep text
|
47 |
+
# Embed both prompts
|
48 |
+
text_input1 = tokenizer([prompt1], padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
|
49 |
+
with torch.no_grad():
|
50 |
+
text_embeddings1 = text_encoder(text_input1.input_ids.to(torch_device))[0]
|
51 |
+
text_input2 = tokenizer([prompt2], padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
|
52 |
+
with torch.no_grad():
|
53 |
+
text_embeddings2 = text_encoder(text_input2.input_ids.to(torch_device))[0]
|
54 |
+
# Take the average
|
55 |
+
text_embeddings = (text_embeddings1*mix_factor + \
|
56 |
+
text_embeddings2*(1-mix_factor))
|
57 |
+
# And the uncond. input as before:
|
58 |
+
max_length = max(text_input1.input_ids.shape[-1],text_input2.input_ids.shape[-1])
|
59 |
+
uncond_input = tokenizer(
|
60 |
+
[""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt"
|
61 |
+
)
|
62 |
+
with torch.no_grad():
|
63 |
+
uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0]
|
64 |
+
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
|
65 |
+
|
66 |
+
# Prep Scheduler
|
67 |
+
scheduler.set_timesteps(num_inference_steps)
|
68 |
+
|
69 |
+
# Prep latents
|
70 |
+
latents = torch.randn(
|
71 |
+
(batch_size, unet.in_channels, height // 8, width // 8),
|
72 |
+
generator=generator,
|
73 |
+
)
|
74 |
+
latents = latents.to(torch_device)
|
75 |
+
latents = latents * scheduler.sigmas[0] # Need to scale to match k
|
76 |
+
|
77 |
+
# Loop
|
78 |
+
with autocast("cuda"):
|
79 |
+
for i, t in tqdm(enumerate(scheduler.timesteps)):
|
80 |
+
# expand the latents if we are doing classifier-free guidance to avoid doing two forward passes.
|
81 |
+
latent_model_input = torch.cat([latents] * 2)
|
82 |
+
sigma = scheduler.sigmas[i]
|
83 |
+
latent_model_input = latent_model_input / ((sigma**2 + 1) ** 0.5)
|
84 |
+
|
85 |
+
# predict the noise residual
|
86 |
+
with torch.no_grad():
|
87 |
+
noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings)["sample"]
|
88 |
+
|
89 |
+
# perform guidance
|
90 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
91 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
92 |
+
|
93 |
+
# compute the previous noisy sample x_t -> x_t-1
|
94 |
+
latents = scheduler.step(noise_pred, i, latents)["prev_sample"]
|
95 |
+
|
96 |
+
if noised_image:
|
97 |
+
output = generate_noised_version_of_image(latents_to_pil(latents,vae)[0])
|
98 |
+
else:
|
99 |
+
output = latents_to_pil(latents,vae)[0]
|
100 |
+
|
101 |
+
return output
|
102 |
+
|
103 |
+
def generate_image(prompt,color_postprocessing=False,postporcessing_color=None,color_loss_scale=40,noised_image=False):
|
104 |
+
#@title Store the predicted outputs and next frame for later viewing
|
105 |
+
#prompt = 'A campfire (oil on canvas)' #
|
106 |
+
height = 512 # default height of Stable Diffusion
|
107 |
+
width = 512 # default width of Stable Diffusion
|
108 |
+
num_inference_steps = 50 # # Number of denoising steps
|
109 |
+
guidance_scale = 8 # # Scale for classifier-free guidance
|
110 |
+
generator = torch.manual_seed(32) # Seed generator to create the inital latent noise
|
111 |
+
batch_size = 1
|
112 |
+
|
113 |
+
# Define the directory name
|
114 |
+
directory_name = "steps"
|
115 |
+
|
116 |
+
# Check if the directory exists, and if so, delete it
|
117 |
+
if os.path.exists(directory_name):
|
118 |
+
shutil.rmtree(directory_name)
|
119 |
+
|
120 |
+
#Create the directory
|
121 |
+
os.makedirs(directory_name)
|
122 |
+
# Prep text
|
123 |
+
text_input = tokenizer([prompt], padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
|
124 |
+
with torch.no_grad():
|
125 |
+
text_embeddings = text_encoder(text_input.input_ids.to(torch_device))[0]
|
126 |
+
|
127 |
+
# And the uncond. input as before:
|
128 |
+
max_length = text_input.input_ids.shape[-1]
|
129 |
+
uncond_input = tokenizer(
|
130 |
+
[""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt"
|
131 |
+
)
|
132 |
+
with torch.no_grad():
|
133 |
+
uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0]
|
134 |
+
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
|
135 |
+
|
136 |
+
|
137 |
+
# Prep Scheduler
|
138 |
+
scheduler.set_timesteps(num_inference_steps)
|
139 |
+
|
140 |
+
# Prep latents
|
141 |
+
latents = torch.randn(
|
142 |
+
(batch_size, unet.in_channels, height // 8, width // 8),
|
143 |
+
generator=generator,
|
144 |
+
)
|
145 |
+
latents = latents.to(torch_device)
|
146 |
+
latents = latents * scheduler.sigmas[0] # Need to scale to match k
|
147 |
+
|
148 |
+
# Loop
|
149 |
+
with autocast("cuda"):
|
150 |
+
for i, t in tqdm(enumerate(scheduler.timesteps)):
|
151 |
+
# expand the latents if we are doing classifier-free guidance to avoid doing two forward passes.
|
152 |
+
latent_model_input = torch.cat([latents] * 2)
|
153 |
+
sigma = scheduler.sigmas[i]
|
154 |
+
latent_model_input = latent_model_input / ((sigma**2 + 1) ** 0.5)
|
155 |
+
|
156 |
+
# predict the noise residual
|
157 |
+
with torch.no_grad():
|
158 |
+
noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings)["sample"]
|
159 |
+
|
160 |
+
# perform CFG
|
161 |
+
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
162 |
+
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
163 |
+
|
164 |
+
#### ADDITIONAL GUIDANCE ###
|
165 |
+
# Requires grad on the latents
|
166 |
+
if color_postprocessing:
|
167 |
+
latents = latents.detach().requires_grad_()
|
168 |
+
|
169 |
+
# Get the predicted x0:
|
170 |
+
latents_x0 = latents - sigma * noise_pred
|
171 |
+
|
172 |
+
# Decode to image space
|
173 |
+
denoised_images = vae.decode((1 / 0.18215) * latents_x0) / 2 + 0.5 # (0, 1)
|
174 |
+
|
175 |
+
# Calculate loss
|
176 |
+
#loss = sketch_loss(denoised_images) * color_loss_scale
|
177 |
+
loss = color_loss(denoised_images,postporcessing_color) * color_loss_scale
|
178 |
+
if i%10==0:
|
179 |
+
print(i, 'loss:', loss.item())
|
180 |
+
|
181 |
+
# Get gradient
|
182 |
+
cond_grad = -torch.autograd.grad(loss, latents)[0]
|
183 |
+
|
184 |
+
# Modify the latents based on this gradient
|
185 |
+
latents = latents.detach() + cond_grad * sigma**2
|
186 |
+
|
187 |
+
|
188 |
+
### And saving as before ###
|
189 |
+
# Get the predicted x0:
|
190 |
+
latents_x0 = latents - sigma * noise_pred
|
191 |
+
im_t0 = latents_to_pil(latents_x0,vae)[0]
|
192 |
+
|
193 |
+
# And the previous noisy sample x_t -> x_t-1
|
194 |
+
latents = scheduler.step(noise_pred, i, latents)["prev_sample"]
|
195 |
+
im_next = latents_to_pil(latents,vae)[0]
|
196 |
+
|
197 |
+
# Combine the two images and save for later viewing
|
198 |
+
im = Image.new('RGB', (1024, 512))
|
199 |
+
im.paste(im_next, (0, 0))
|
200 |
+
im.paste(im_t0, (512, 0))
|
201 |
+
im.save(f'steps/{i:04}.jpeg')
|
202 |
+
|
203 |
+
else:
|
204 |
+
latents = scheduler.step(noise_pred, i, latents)["prev_sample"]
|
205 |
+
|
206 |
+
if noised_image:
|
207 |
+
output = generate_noised_version_of_image(latents_to_pil(latents,vae)[0])
|
208 |
+
else:
|
209 |
+
output = latents_to_pil(latents,vae)[0]
|
210 |
+
|
211 |
+
return output
|
212 |
+
|
213 |
+
def progress_video(prompt):
|
214 |
+
pil_image = generate_image(prompt)
|
215 |
+
# Generate a list of image file paths (replace with your own logic)
|
216 |
+
num_frames = len(os.listdir("steps/"))
|
217 |
+
image_files = [f"steps/{i:04d}.jpeg" for i in range(1, num_frames + 1)]
|
218 |
+
# Read the first image to get its size (assuming all images have the same size)
|
219 |
+
first_image = cv2.imread({image_files[0]})
|
220 |
+
height, width, _ = first_image.shape
|
221 |
+
|
222 |
+
# Define the output video writer
|
223 |
+
fourcc = cv2.VideoWriter_fourcc(*'mp4v') # Codec for MP4
|
224 |
+
out = cv2.VideoWriter('out.mp4', fourcc, 12, (width, height))
|
225 |
+
|
226 |
+
for image_file in image_files:
|
227 |
+
frame = cv2.imread(image_file)
|
228 |
+
out.write(frame)
|
229 |
+
|
230 |
+
out.release()
|
231 |
+
return "out.mp4"
|
232 |
+
|
233 |
+
def generate_noised_version_of_image(pil_image):
|
234 |
+
# View a noised version
|
235 |
+
encoded = pil_to_latent(pil_image,vae)
|
236 |
+
noise = torch.randn_like(encoded) # Random noise
|
237 |
+
timestep = 150 # i.e. equivalent to that at 150/1000 training steps
|
238 |
+
encoded_and_noised = scheduler.add_noise(encoded, noise, timestep)
|
239 |
+
return latents_to_pil(encoded_and_noised,vae)[0] # Display
|
240 |
+
|
241 |
+
|
242 |
+
# if __name__ == "__main__":
|
243 |
+
# prompt = 'A campfire (oil on canvas)'
|
244 |
+
# color_loss_scale = 40
|
245 |
+
# color_postprocessing = False
|
246 |
+
# pil_image = generate_mixed_image("a dog", "a cat")
|
247 |
+
# #pil_image = generate_image(prompt,color_postprocessing,color_loss_scale)
|
248 |
+
# #pil_image = generate_noised_version_of_image(Image.open('output.png').resize((512, 512)))
|
249 |
+
# pil_image.save("output1.png")
|
250 |
+
|
251 |
+
if __name__ == "__main__":
|
252 |
+
progress_video("lol")
|
requirements.txt
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
transformers
|
2 |
+
diffusers==0.2.4
|
3 |
+
sentence_transformers
|
4 |
+
gradio
|
5 |
+
torch
|
6 |
+
torchvision
|
7 |
+
matplotlib
|
8 |
+
opencv-python
|
utils.py
ADDED
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from torchvision.transforms import functional as F
|
2 |
+
from torchvision import transforms as tfms
|
3 |
+
from PIL import Image, ImageEnhance
|
4 |
+
#from legofy import legofy_image
|
5 |
+
import numpy as np
|
6 |
+
from torchvision.transforms import functional as F
|
7 |
+
from PIL import Image, ImageEnhance
|
8 |
+
import torch
|
9 |
+
import cv2
|
10 |
+
|
11 |
+
to_tensor_tfm = tfms.ToTensor()
|
12 |
+
torch_device = "cpu"
|
13 |
+
|
14 |
+
|
15 |
+
def pil_to_latent(input_im,vae):
|
16 |
+
|
17 |
+
# Single image -> single latent in a batch (so size 1, 4, 64, 64)
|
18 |
+
with torch.no_grad():
|
19 |
+
latent = vae.encode(to_tensor_tfm(input_im).unsqueeze(0).to(torch_device)*2-1) # Note scaling
|
20 |
+
return 0.18215 * latent.mode() # or .mean or .sample
|
21 |
+
|
22 |
+
def latents_to_pil(latents,vae):
|
23 |
+
# bath of latents -> list of images
|
24 |
+
latents = (1 / 0.18215) * latents
|
25 |
+
with torch.no_grad():
|
26 |
+
image = vae.decode(latents)
|
27 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
28 |
+
image = image.detach().cpu().permute(0, 2, 3, 1).numpy()
|
29 |
+
images = (image * 255).round().astype("uint8")
|
30 |
+
pil_images = [Image.fromarray(image) for image in images]
|
31 |
+
return pil_images
|
32 |
+
|
33 |
+
|
34 |
+
def color_loss(images,color):
|
35 |
+
# Scale the coming color
|
36 |
+
red,green,blue = (color[0]/255)*0.9,(color[1]/255)*0.9,(color[2]/255)*0.9
|
37 |
+
|
38 |
+
red_chennel_error = torch.abs(images[:,0, :, :] - red).mean()
|
39 |
+
green_chennel_error = torch.abs(images[:,1, :, :] - green).mean()
|
40 |
+
blue_chennel_error = torch.abs(images[:,2, :, :] - blue).mean()
|
41 |
+
print(red_chennel_error, green_chennel_error, blue_chennel_error)
|
42 |
+
error = red_chennel_error + green_chennel_error + blue_chennel_error
|
43 |
+
return error
|
44 |
+
|
45 |
+
import torch
|
46 |
+
from PIL import Image, ImageOps, ImageFilter
|
47 |
+
import torchvision.transforms as transforms
|
48 |
+
|
49 |
+
def sketch_loss(image):
|
50 |
+
# Convert PyTorch tensor to a PIL image
|
51 |
+
to_pil = transforms.ToPILImage()
|
52 |
+
pil_image = to_pil(image[0])
|
53 |
+
|
54 |
+
# Convert the PIL image to grayscale
|
55 |
+
gray_image = ImageOps.grayscale(pil_image)
|
56 |
+
|
57 |
+
# Apply an inverted pencil sketch effect
|
58 |
+
inverted_image = ImageOps.invert(gray_image)
|
59 |
+
|
60 |
+
# Apply a blur effect to smooth the sketch
|
61 |
+
pencil_sketch = inverted_image.filter(ImageFilter.GaussianBlur(radius=5))
|
62 |
+
|
63 |
+
# Convert the PIL image back to a PyTorch tensor
|
64 |
+
to_tensor = transforms.ToTensor()
|
65 |
+
sketch_tensor = to_tensor(pencil_sketch).unsqueeze(0)
|
66 |
+
sketch_tensor.requires_grad = True # Enable gradients
|
67 |
+
|
68 |
+
#if num_channels == 3:
|
69 |
+
# # If the input was originally in CHW format (3 channels), permute it to CHW
|
70 |
+
sketch_tensor = sketch_tensor.permute(0, 3, 1, 2)
|
71 |
+
|
72 |
+
# Calculate the loss based on the watercolour_image tensor
|
73 |
+
loss = torch.abs(sketch_tensor - 0.9).mean() # Modify 0.5 to your desired threshold
|
74 |
+
|
75 |
+
return loss
|