Spaces:
Sleeping
Sleeping
remove notebook - too big
Browse files- .gitignore +3 -0
- image_generator.py +15 -24
- requirements.txt +8 -0
.gitignore
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
.venv/**
|
2 |
+
.DS_Store
|
3 |
+
__pycache__
|
image_generator.py
CHANGED
@@ -16,6 +16,7 @@ from diffusers import AutoencoderKL, UNet2DConditionModel
|
|
16 |
from diffusers import LMSDiscreteScheduler
|
17 |
from tqdm.auto import tqdm
|
18 |
|
|
|
19 |
logging.disable(logging.WARNING)
|
20 |
class ImageGenerator():
|
21 |
def __init__(self,
|
@@ -27,23 +28,16 @@ class ImageGenerator():
|
|
27 |
self.height = 512
|
28 |
self.generator = torch.manual_seed(32)
|
29 |
self.bs = 1
|
30 |
-
if torch.cuda.is_available():
|
31 |
-
self.device = torch.device("cuda")
|
32 |
-
self.dtype = torch.float16
|
33 |
-
else:
|
34 |
-
self.device = torch.device("cpu")
|
35 |
-
self.dtype = torch.float32
|
36 |
|
37 |
-
print(f"Working on device: {self.device=}")
|
38 |
def __repr__(self):
|
39 |
return f"Image Generator with {self.g=}"
|
40 |
|
41 |
def load_models(self):
|
42 |
-
self.tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14",
|
43 |
-
self.text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14",
|
44 |
-
# vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-ema",
|
45 |
-
self.vae = AutoencoderKL.from_pretrained("CompVis/stable-diffusion-v1-4",
|
46 |
-
self.unet = UNet2DConditionModel.from_pretrained("CompVis/stable-diffusion-v1-4",
|
47 |
|
48 |
def load_scheduler( self,
|
49 |
beta_start : float=0.00085,
|
@@ -57,13 +51,10 @@ class ImageGenerator():
|
|
57 |
beta_schedule="scaled_linear",
|
58 |
num_train_timesteps=num_train_timesteps)
|
59 |
|
60 |
-
def load_image(self, filepath:str)
|
61 |
return Image.open(filepath).resize(size=(self.width,self.height))
|
62 |
#.convert("RGB") # RGB = 3 dimensions, RGBA = 4 dimensions
|
63 |
|
64 |
-
def nparray_to_pil(self, np_image: np.array) -> Image:
|
65 |
-
return Image.fromarray(np_image).resize(size=(self.width,self.height))
|
66 |
-
|
67 |
def pil_to_latent(self, image: Image) -> torch.Tensor:
|
68 |
with torch.no_grad():
|
69 |
np_img = np.transpose( (( np.array(image) / 255)-0.5)*2, (2,0,1)) # turn pil image into np array with values between -1 and 1
|
@@ -72,7 +63,7 @@ class ImageGenerator():
|
|
72 |
np_images = np.repeat(np_img[np.newaxis, :, :], self.bs, axis=0) # adding a new dimension and repeating the image for each prompt
|
73 |
# print(f"{np_images.shape=}")
|
74 |
|
75 |
-
decoded_latent = torch.from_numpy(np_images).to(
|
76 |
# print(f"{decoded_latent.shape=}")
|
77 |
|
78 |
encoded_latent = 0.18215 * self.vae.encode(decoded_latent).latent_dist.sample()
|
@@ -84,7 +75,7 @@ class ImageGenerator():
|
|
84 |
# noise = torch.randn_like(latent) # missing generator parameter
|
85 |
noise = torch.randn(
|
86 |
size = (self.bs, self.unet.config.in_channels, self.height//8, self.width//8),
|
87 |
-
generator = self.generator).to(
|
88 |
timesteps = torch.tensor([self.scheduler.timesteps[scheduler_steps]])
|
89 |
noisy_latent = self.scheduler.add_noise(latent, noise, timesteps)
|
90 |
# print(f"add_noise: {timesteps.shape=} {timesteps=} {noisy_latent.shape=}")
|
@@ -112,7 +103,7 @@ class ImageGenerator():
|
|
112 |
if maxlen is None: maxlen = self.tokenizer.model_max_length
|
113 |
|
114 |
inp = self.tokenizer([prompt], padding="max_length", max_length=maxlen, truncation=True, return_tensors="pt")
|
115 |
-
return self.text_encoder(inp.input_ids.to(
|
116 |
|
117 |
def tensor_to_pil(self, t:torch.Tensor) -> Image:
|
118 |
'''transforms a tensor decoded by the vae to a pil image'''
|
@@ -135,7 +126,7 @@ class ImageGenerator():
|
|
135 |
seed : int=32,
|
136 |
steps : int=30,
|
137 |
start_step_ratio : float=1/5,
|
138 |
-
init_image :
|
139 |
latent_callback_mod : int=10):
|
140 |
self.latent_images = []
|
141 |
if not negative_prompt: negative_prompt = ""
|
@@ -162,13 +153,13 @@ class ImageGenerator():
|
|
162 |
else:
|
163 |
start_steps = int(steps * start_step_ratio) # 0%: too much noise, 100% no noise
|
164 |
# print(f"{start_steps=}")
|
165 |
-
|
166 |
-
latents =self.pil_to_latent(
|
167 |
self.latent_callback(latents)
|
168 |
-
latents = self.add_noise(latents, start_steps).to(
|
169 |
self.latent_callback(latents)
|
170 |
|
171 |
-
latents = latents.to(
|
172 |
|
173 |
for i,ts in enumerate(tqdm(self.scheduler.timesteps, leave=False)):
|
174 |
if i >= start_steps:
|
|
|
16 |
from diffusers import LMSDiscreteScheduler
|
17 |
from tqdm.auto import tqdm
|
18 |
|
19 |
+
|
20 |
logging.disable(logging.WARNING)
|
21 |
class ImageGenerator():
|
22 |
def __init__(self,
|
|
|
28 |
self.height = 512
|
29 |
self.generator = torch.manual_seed(32)
|
30 |
self.bs = 1
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
|
|
|
32 |
def __repr__(self):
|
33 |
return f"Image Generator with {self.g=}"
|
34 |
|
35 |
def load_models(self):
|
36 |
+
self.tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14", torch_dtype=torch.float16)
|
37 |
+
self.text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14", torch_dtype=torch.float16 ).to("cuda")
|
38 |
+
# vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-ema", torch_dtype=torch.float16 ).to("cuda")
|
39 |
+
self.vae = AutoencoderKL.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="vae" ).to("cuda")
|
40 |
+
self.unet = UNet2DConditionModel.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="unet" ).to("cuda") #torch_dtype=torch.float16,
|
41 |
|
42 |
def load_scheduler( self,
|
43 |
beta_start : float=0.00085,
|
|
|
51 |
beta_schedule="scaled_linear",
|
52 |
num_train_timesteps=num_train_timesteps)
|
53 |
|
54 |
+
def load_image(self, filepath:str):
|
55 |
return Image.open(filepath).resize(size=(self.width,self.height))
|
56 |
#.convert("RGB") # RGB = 3 dimensions, RGBA = 4 dimensions
|
57 |
|
|
|
|
|
|
|
58 |
def pil_to_latent(self, image: Image) -> torch.Tensor:
|
59 |
with torch.no_grad():
|
60 |
np_img = np.transpose( (( np.array(image) / 255)-0.5)*2, (2,0,1)) # turn pil image into np array with values between -1 and 1
|
|
|
63 |
np_images = np.repeat(np_img[np.newaxis, :, :], self.bs, axis=0) # adding a new dimension and repeating the image for each prompt
|
64 |
# print(f"{np_images.shape=}")
|
65 |
|
66 |
+
decoded_latent = torch.from_numpy(np_images).to("cuda").float() #<-- stability-ai vae uses half(), compvis vae uses float?
|
67 |
# print(f"{decoded_latent.shape=}")
|
68 |
|
69 |
encoded_latent = 0.18215 * self.vae.encode(decoded_latent).latent_dist.sample()
|
|
|
75 |
# noise = torch.randn_like(latent) # missing generator parameter
|
76 |
noise = torch.randn(
|
77 |
size = (self.bs, self.unet.config.in_channels, self.height//8, self.width//8),
|
78 |
+
generator = self.generator).to("cuda")
|
79 |
timesteps = torch.tensor([self.scheduler.timesteps[scheduler_steps]])
|
80 |
noisy_latent = self.scheduler.add_noise(latent, noise, timesteps)
|
81 |
# print(f"add_noise: {timesteps.shape=} {timesteps=} {noisy_latent.shape=}")
|
|
|
103 |
if maxlen is None: maxlen = self.tokenizer.model_max_length
|
104 |
|
105 |
inp = self.tokenizer([prompt], padding="max_length", max_length=maxlen, truncation=True, return_tensors="pt")
|
106 |
+
return self.text_encoder(inp.input_ids.to("cuda"))[0].float()
|
107 |
|
108 |
def tensor_to_pil(self, t:torch.Tensor) -> Image:
|
109 |
'''transforms a tensor decoded by the vae to a pil image'''
|
|
|
126 |
seed : int=32,
|
127 |
steps : int=30,
|
128 |
start_step_ratio : float=1/5,
|
129 |
+
init_image : str=None,
|
130 |
latent_callback_mod : int=10):
|
131 |
self.latent_images = []
|
132 |
if not negative_prompt: negative_prompt = ""
|
|
|
153 |
else:
|
154 |
start_steps = int(steps * start_step_ratio) # 0%: too much noise, 100% no noise
|
155 |
# print(f"{start_steps=}")
|
156 |
+
img = self.load_image(init_image)
|
157 |
+
latents =self. pil_to_latent(img)
|
158 |
self.latent_callback(latents)
|
159 |
+
latents = self.add_noise(latents, start_steps).to("cuda").float()
|
160 |
self.latent_callback(latents)
|
161 |
|
162 |
+
latents = latents.to("cuda").float()
|
163 |
|
164 |
for i,ts in enumerate(tqdm(self.scheduler.timesteps, leave=False)):
|
165 |
if i >= start_steps:
|
requirements.txt
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
diffusers
|
2 |
+
transformers
|
3 |
+
fastcore
|
4 |
+
matplotlib
|
5 |
+
scipy
|
6 |
+
torch
|
7 |
+
torchvision
|
8 |
+
gradio
|