mattb512 commited on
Commit
e57df08
1 Parent(s): 29f02ac

fix image type and float size

Browse files
Files changed (2) hide show
  1. app.py +2 -2
  2. image_generator.py +18 -13
app.py CHANGED
@@ -6,7 +6,7 @@ print(ig)
6
  ig.load_models()
7
  ig.load_scheduler()
8
 
9
- def greet(prompt, mix_prompt, mix_ratio, negative_prompt, steps, init_image ):
10
 
11
  print(f"{prompt=} {mix_prompt=} {mix_ratio=} {negative_prompt=} {steps=} {init_image=} ")
12
  generated_image, latents = ig.generate(
@@ -26,7 +26,7 @@ def greet(prompt, mix_prompt, mix_ratio, negative_prompt, steps, init_image ):
26
  return generated_image, noisy_latent
27
 
28
  iface = gr.Interface(
29
- fn=greet,
30
  inputs=[
31
  gr.Textbox(value="a cute dog", label="Prompt", info="primary prompt used to generate an image"),
32
  gr.Textbox(value=None, label="Secondary Prompt", info="secondary prompt to mix with the primary embeddings"),
 
6
  ig.load_models()
7
  ig.load_scheduler()
8
 
9
+ def call(prompt, mix_prompt, mix_ratio, negative_prompt, steps, init_image ):
10
 
11
  print(f"{prompt=} {mix_prompt=} {mix_ratio=} {negative_prompt=} {steps=} {init_image=} ")
12
  generated_image, latents = ig.generate(
 
26
  return generated_image, noisy_latent
27
 
28
  iface = gr.Interface(
29
+ fn=call,
30
  inputs=[
31
  gr.Textbox(value="a cute dog", label="Prompt", info="primary prompt used to generate an image"),
32
  gr.Textbox(value=None, label="Secondary Prompt", info="secondary prompt to mix with the primary embeddings"),
image_generator.py CHANGED
@@ -28,16 +28,22 @@ class ImageGenerator():
28
  self.height = 512
29
  self.generator = torch.manual_seed(32)
30
  self.bs = 1
 
 
 
 
 
 
31
 
32
  def __repr__(self):
33
  return f"Image Generator with {self.g=}"
34
 
35
  def load_models(self):
36
- self.tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14", torch_dtype=torch.float16)
37
- self.text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14", torch_dtype=torch.float16 ).to("cuda")
38
- # vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-ema", torch_dtype=torch.float16 ).to("cuda")
39
- self.vae = AutoencoderKL.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="vae" ).to("cuda")
40
- self.unet = UNet2DConditionModel.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="unet" ).to("cuda") #torch_dtype=torch.float16,
41
 
42
  def load_scheduler( self,
43
  beta_start : float=0.00085,
@@ -63,7 +69,7 @@ class ImageGenerator():
63
  np_images = np.repeat(np_img[np.newaxis, :, :], self.bs, axis=0) # adding a new dimension and repeating the image for each prompt
64
  # print(f"{np_images.shape=}")
65
 
66
- decoded_latent = torch.from_numpy(np_images).to("cuda").float() #<-- stability-ai vae uses half(), compvis vae uses float?
67
  # print(f"{decoded_latent.shape=}")
68
 
69
  encoded_latent = 0.18215 * self.vae.encode(decoded_latent).latent_dist.sample()
@@ -75,7 +81,7 @@ class ImageGenerator():
75
  # noise = torch.randn_like(latent) # missing generator parameter
76
  noise = torch.randn(
77
  size = (self.bs, self.unet.config.in_channels, self.height//8, self.width//8),
78
- generator = self.generator).to("cuda")
79
  timesteps = torch.tensor([self.scheduler.timesteps[scheduler_steps]])
80
  noisy_latent = self.scheduler.add_noise(latent, noise, timesteps)
81
  # print(f"add_noise: {timesteps.shape=} {timesteps=} {noisy_latent.shape=}")
@@ -103,7 +109,7 @@ class ImageGenerator():
103
  if maxlen is None: maxlen = self.tokenizer.model_max_length
104
 
105
  inp = self.tokenizer([prompt], padding="max_length", max_length=maxlen, truncation=True, return_tensors="pt")
106
- return self.text_encoder(inp.input_ids.to("cuda"))[0].float()
107
 
108
  def tensor_to_pil(self, t:torch.Tensor) -> Image:
109
  '''transforms a tensor decoded by the vae to a pil image'''
@@ -126,7 +132,7 @@ class ImageGenerator():
126
  seed : int=32,
127
  steps : int=30,
128
  start_step_ratio : float=1/5,
129
- init_image : str=None,
130
  latent_callback_mod : int=10):
131
  self.latent_images = []
132
  if not negative_prompt: negative_prompt = ""
@@ -153,13 +159,12 @@ class ImageGenerator():
153
  else:
154
  start_steps = int(steps * start_step_ratio) # 0%: too much noise, 100% no noise
155
  # print(f"{start_steps=}")
156
- img = self.load_image(init_image)
157
- latents =self. pil_to_latent(img)
158
  self.latent_callback(latents)
159
- latents = self.add_noise(latents, start_steps).to("cuda").float()
160
  self.latent_callback(latents)
161
 
162
- latents = latents.to("cuda").float()
163
 
164
  for i,ts in enumerate(tqdm(self.scheduler.timesteps, leave=False)):
165
  if i >= start_steps:
 
28
  self.height = 512
29
  self.generator = torch.manual_seed(32)
30
  self.bs = 1
31
+ if torch.cuda.is_available():
32
+ self.device = torch.device("cuda")
33
+ self.float_size = torch.float16
34
+ else:
35
+ self.device = torch.device("cpu")
36
+ self.float_size = torch.float32
37
 
38
  def __repr__(self):
39
  return f"Image Generator with {self.g=}"
40
 
41
  def load_models(self):
42
+ self.tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14", torch_dtype=self.float_size)
43
+ self.text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14", torch_dtype=self.float_size).to( self.device)
44
+ # vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-ema", torch_dtype=torch.float16 ).to(self.device)
45
+ self.vae = AutoencoderKL.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="vae").to( self.device)
46
+ self.unet = UNet2DConditionModel.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="unet" ).to( self.device) #torch_dtype=torch.float16,
47
 
48
  def load_scheduler( self,
49
  beta_start : float=0.00085,
 
69
  np_images = np.repeat(np_img[np.newaxis, :, :], self.bs, axis=0) # adding a new dimension and repeating the image for each prompt
70
  # print(f"{np_images.shape=}")
71
 
72
+ decoded_latent = torch.from_numpy(np_images).to(self.device).float() #<-- stability-ai vae uses half(), compvis vae uses float?
73
  # print(f"{decoded_latent.shape=}")
74
 
75
  encoded_latent = 0.18215 * self.vae.encode(decoded_latent).latent_dist.sample()
 
81
  # noise = torch.randn_like(latent) # missing generator parameter
82
  noise = torch.randn(
83
  size = (self.bs, self.unet.config.in_channels, self.height//8, self.width//8),
84
+ generator = self.generator).to(self.device)
85
  timesteps = torch.tensor([self.scheduler.timesteps[scheduler_steps]])
86
  noisy_latent = self.scheduler.add_noise(latent, noise, timesteps)
87
  # print(f"add_noise: {timesteps.shape=} {timesteps=} {noisy_latent.shape=}")
 
109
  if maxlen is None: maxlen = self.tokenizer.model_max_length
110
 
111
  inp = self.tokenizer([prompt], padding="max_length", max_length=maxlen, truncation=True, return_tensors="pt")
112
+ return self.text_encoder(inp.input_ids.to(self.device))[0].float()
113
 
114
  def tensor_to_pil(self, t:torch.Tensor) -> Image:
115
  '''transforms a tensor decoded by the vae to a pil image'''
 
132
  seed : int=32,
133
  steps : int=30,
134
  start_step_ratio : float=1/5,
135
+ init_image : Image=None,
136
  latent_callback_mod : int=10):
137
  self.latent_images = []
138
  if not negative_prompt: negative_prompt = ""
 
159
  else:
160
  start_steps = int(steps * start_step_ratio) # 0%: too much noise, 100% no noise
161
  # print(f"{start_steps=}")
162
+ latents =self. pil_to_latent(init_image)
 
163
  self.latent_callback(latents)
164
+ latents = self.add_noise(latents, start_steps).to(self.device).float()
165
  self.latent_callback(latents)
166
 
167
+ latents = latents.to(self.device).float()
168
 
169
  for i,ts in enumerate(tqdm(self.scheduler.timesteps, leave=False)):
170
  if i >= start_steps: