sachit-menon commited on
Commit
9277006
1 Parent(s): d46fb22

Create sd_model.py

Browse files
Files changed (1) hide show
  1. sd_model.py +297 -0
sd_model.py ADDED
@@ -0,0 +1,297 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ from dataclasses import dataclass, field
4
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
5
+
6
+
7
+ import torch
8
+ from torch import nn
9
+
10
+
11
+ from dataclasses import dataclass
12
+
13
+
14
+
15
+ @dataclass
16
+ class BaseModelConfig:
17
+ pass
18
+
19
+
20
+ from diffusers import AutoencoderKL, UNet2DConditionModel
21
+ from trainer.noise_schedulers.scheduling_ddpm_zerosnr import DDPMScheduler
22
+
23
+ from transformers import CLIPTextModel, CLIPTokenizer
24
+ from diffusers.training_utils import EMAModel
25
+
26
+ from diffusers.utils import logging
27
+
28
+ from diffusers.utils.hub_utils import PushToHubMixin
29
+
30
+ from diffusers.models.modeling_utils import ModelMixin
31
+
32
+ from diffusers.configuration_utils import ConfigMixin
33
+
34
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
35
+
36
+ # from hydra.utils import instantiate
37
+ from peft import get_peft_model
38
+
39
+ from layers import PositionalEncodingPermute1D
40
+ from einops import rearrange, repeat
41
+
42
+ from typing import Optional
43
+ from omegaconf import II
44
+
45
+
46
+ @dataclass
47
+ class LoraConfig:
48
+ _target_: str = "peft.LoraConfig"
49
+ r: int = 8
50
+ lora_alpha: int =32
51
+ target_modules: list = field(default_factory=lambda: ["to_q", "to_v", "query", "value"])
52
+ lora_dropout: float =0.0
53
+ bias: str ="none"
54
+
55
+
56
+ @dataclass
57
+ class SDModelConfig(BaseModelConfig):
58
+ _target_: str = "trainer.models.sd_model.SDModel"
59
+ pretrained_model_name_or_path: str = "runwayml/stable-diffusion-v1-5"
60
+ conditioning_dropout_prob: float = 0.05
61
+ use_ema: bool = True
62
+ concat_all_steps: bool = II("dataset.concat_all_steps")
63
+ positional_encoding_type: Optional[str] = "sinusoidal"
64
+ positional_encoding_length: Optional[int] = None
65
+ image_positional_encoding_type: Optional[str] = None #"sinusoidal"
66
+ image_positional_encoding_length: Optional[int] = None
67
+ broadcast_positional_encoding: bool = True
68
+ sequence_length: Optional[int] = II("dataset.sequence_length") # TODO consider changing interp on next line to this +1?
69
+ text_sequence_length: Optional[int] = II("dataset.text_sequence_length")
70
+ use_lora: bool = False
71
+ # lora_cfg: Any = LoraConfig()
72
+ zero_snr: bool = True
73
+ # seed: int = 42 # TODO: inherit from higher config
74
+ # lora: LoraConfig = LoraConfig(
75
+ # )
76
+
77
+
78
+ class SDModel(ModelMixin, ConfigMixin, PushToHubMixin):
79
+ def __init__(self, cfg: SDModelConfig) -> None:
80
+ super().__init__()
81
+ self.cfg = cfg
82
+ self.noise_scheduler = DDPMScheduler.from_pretrained(
83
+ self.cfg.pretrained_model_name_or_path,
84
+ subfolder="scheduler",
85
+ zero_snr=self.cfg.zero_snr)
86
+
87
+
88
+
89
+ self.text_encoder = CLIPTextModel.from_pretrained(
90
+ self.cfg.pretrained_model_name_or_path, subfolder="text_encoder",
91
+ )
92
+ self.tokenizer = CLIPTokenizer.from_pretrained(
93
+ self.cfg.pretrained_model_name_or_path, subfolder="tokenizer"
94
+ )
95
+
96
+ self.vae = AutoencoderKL.from_pretrained(self.cfg.pretrained_model_name_or_path, subfolder="vae")
97
+ self.unet = UNet2DConditionModel.from_pretrained(
98
+ self.cfg.pretrained_model_name_or_path, subfolder="unet"
99
+ )
100
+
101
+ in_channels = 8 # TODO make part of cfg
102
+ out_channels = self.unet.conv_in.out_channels
103
+ self.unet.register_to_config(in_channels=in_channels)
104
+
105
+ with torch.no_grad():
106
+ new_conv_in = nn.Conv2d(
107
+ in_channels, out_channels, self.unet.conv_in.kernel_size, self.unet.conv_in.stride, self.unet.conv_in.padding
108
+ )
109
+ new_conv_in.weight.zero_()
110
+ new_conv_in.weight[:, :4, :, :].copy_(self.unet.conv_in.weight) # copy the pretrained weights, leave the rest as zero
111
+ new_conv_in.bias.copy_(self.unet.conv_in.bias) # EXTREMELY IMPORTANT MODIFICATION FROM INITIAL DIFFUSERS CODE
112
+ self.unet.conv_in = new_conv_in
113
+
114
+ self.init_pos()
115
+ self.init_image_pos()
116
+
117
+
118
+ if self.cfg.use_lora:
119
+ config = LoraConfig(
120
+ r=8,
121
+ lora_alpha=32,
122
+ target_modules=["to_q", "to_v", "query", "value"],
123
+ lora_dropout=0.0,
124
+ bias="none",
125
+ )
126
+ self.unet = get_peft_model(self.unet, config)
127
+ self.unet.conv_in.requires_grad_(True) # NOTE: this makes the whole input conv trainable, not just the new parameters! consider if that's what you really want
128
+ self.unet.print_trainable_parameters()
129
+ print(self.unet)
130
+
131
+ self.vae.requires_grad_(False)
132
+ self.text_encoder.requires_grad_(False)
133
+
134
+ # use_ema = True
135
+ # if use_ema:
136
+ if self.cfg.use_ema:
137
+ self.ema_unet = EMAModel(self.unet.parameters(), model_cls=UNet2DConditionModel, model_config=self.unet.config)
138
+
139
+ self.generator = None
140
+
141
+ def init_pos(self):
142
+ self.cfg.positional_encoding_length = self.cfg.text_sequence_length
143
+ if not self.cfg.broadcast_positional_encoding:
144
+ self.cfg.positional_encoding_length *= 77
145
+ elif self.cfg.positional_encoding_type == 'sinusoidal':
146
+ self.unet.pos = PositionalEncodingPermute1D(self.cfg.positional_encoding_length)
147
+ elif self.cfg.positional_encoding_type is None or self.cfg.positional_encoding_type == 'None':
148
+ self.unet.pos = nn.Identity()
149
+ else:
150
+ raise ValueError(f'Unknown positional encoding type {self.cfg.positional_encoding_type}')#torch.Generator(self.unet.device).manual_seed(42) # seed: int = 42 # TODO: inherit from higher config # device=self.unet.device
151
+
152
+ def init_image_pos(self):
153
+ self.cfg.image_positional_encoding_length = self.cfg.sequence_length
154
+ if self.cfg.image_positional_encoding_type == 'sinusoidal':
155
+ self.unet.image_pos = PositionalEncodingPermute1D(self.cfg.image_positional_encoding_length)
156
+ elif self.cfg.image_positional_encoding_type is None:
157
+ self.unet.image_pos = nn.Identity()
158
+ else:
159
+ raise ValueError(f'Unknown image positional encoding type {self.cfg.image_positional_encoding_type}')
160
+
161
+ def tokenize_captions(self, captions):
162
+ inputs = self.tokenizer(
163
+ captions, max_length=self.tokenizer.model_max_length, padding="max_length", truncation=True, return_tensors="pt"
164
+ )
165
+ return inputs.input_ids
166
+
167
+ def forward(self, batch): # replace with input_ids, edited_pixel_values, original_pixel_values
168
+ batch_size = batch["input_ids"].shape[0]
169
+ condition_image = batch["original_pixel_values"]
170
+ input_ids = batch["input_ids"].to(self.text_encoder.device)
171
+ # We want to learn the denoising process w.r.t the edited images which
172
+ # are conditioned on the original image (which was edited) and the edit instruction.
173
+ # So, first, convert images to latent space.
174
+ edited_images = batch["edited_pixel_values"]#.to(self.cfg.weight_dtype) #TODO check dtype thing
175
+ output_seq_length = edited_images.shape[1]
176
+ # edited_images = edited_images.flatten(0,1)
177
+ edited_images = rearrange(edited_images, 'b s c h w -> (b s) c h w')
178
+
179
+ latents = self.vae.encode(edited_images).latent_dist.sample()
180
+ latents = latents * self.vae.config.scaling_factor
181
+
182
+ latents = rearrange(latents, '(b s) c h w -> b c (s h) w', s=output_seq_length)
183
+ # latents = latents.unflatten(0,(batch_size,output_seq_length)).transpose(1,2).flatten(2,3) # TODO: change the (batch_size, 3) to (batch_size, output_seq_length)
184
+ # Sample noise that we'll add to the latents
185
+ noise = torch.randn_like(latents)
186
+ bsz = latents.shape[0]
187
+ # Sample a random timestep for each image
188
+ timesteps = torch.randint(0, self.noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device)
189
+ timesteps = timesteps.long()
190
+
191
+ # Add noise to the latents according to the noise magnitude at each timestep
192
+ # (this is the forward diffusion process)
193
+ noisy_latents = self.noise_scheduler.add_noise(latents, noise, timesteps)
194
+
195
+ if self.cfg.image_positional_encoding_type is not None:
196
+ latents = self.apply_image_positional_encoding(noisy_latents, output_seq_length)
197
+
198
+ if len(input_ids.shape) == 2:
199
+ input_ids = input_ids.unsqueeze(0)
200
+
201
+ encoder_hidden_states = self.input_ids_to_text_condition(input_ids)
202
+ if self.cfg.positional_encoding_type is not None:
203
+ encoder_hidden_states = self.apply_step_positional_encoding(encoder_hidden_states)
204
+
205
+ # Get the additional image embedding for conditioning.
206
+ # Instead of getting a diagonal Gaussian here, we simply take the mode.
207
+ original_image_embeds = self.vae.encode(condition_image).latent_dist.mode() #.to(self.cfg.weight_dtype)).latent_dist.mode() #TODO check dtype thing
208
+
209
+ # Conditioning dropout to support classifier-free guidance during inference. For more details
210
+ # check out the section 3.2.1 of the original paper https://arxiv.org/abs/2211.09800.
211
+ if self.cfg.conditioning_dropout_prob is not None:
212
+ encoder_hidden_states, original_image_embeds = self.apply_conditioning_dropout(encoder_hidden_states, original_image_embeds)
213
+
214
+ # original_image_embeds = original_image_embeds.repeat(1,1,2,1)
215
+ # original_image_embeds = original_image_embeds.unsqueeze(2).expand(-1, -1, output_seq_length, -1, -1).reshape(batch_size, 4, 32*output_seq_length, 32)
216
+ original_image_embeds = repeat(original_image_embeds, 'b c h w -> b c (s h) w', s=output_seq_length) # TODO unify with pipeline get_image_latents
217
+
218
+ # Concatenate the `original_image_embeds` with the `noisy_latents`.
219
+ concatenated_noisy_latents = torch.cat([noisy_latents, original_image_embeds], dim=1)
220
+
221
+ target = self.get_loss_target(latents, noise, timesteps)
222
+
223
+ # Predict the noise residual and compute loss
224
+ model_pred = self.unet(concatenated_noisy_latents, timesteps, encoder_hidden_states).sample
225
+ return model_pred, target
226
+
227
+ def get_loss_target(self, latents, noise, timesteps):
228
+ # Get the target for loss depending on the prediction type
229
+ if self.noise_scheduler.config.prediction_type == "epsilon":
230
+ target = noise
231
+ elif self.noise_scheduler.config.prediction_type == "v_prediction":
232
+ target = self.noise_scheduler.get_velocity(latents, noise, timesteps)
233
+ else:
234
+ raise ValueError(f"Unknown prediction type {self.noise_scheduler.config.prediction_type}")
235
+ return target
236
+
237
+ def apply_conditioning_dropout(self, encoder_hidden_states, original_image_embeds):
238
+ bsz = original_image_embeds.shape[0] # changed from the comment in line 141 from latents, but should be same. TODO check
239
+ random_p = torch.rand(bsz, device=encoder_hidden_states.device, generator=self.generator) # was originally latents.device, TODO check
240
+ # Sample masks for the edit prompts.
241
+ prompt_mask = random_p < 2 * self.cfg.conditioning_dropout_prob
242
+ prompt_mask = prompt_mask.reshape(bsz, 1, 1)
243
+ # Final text conditioning.
244
+ null_conditioning = self.get_null_conditioning()
245
+ encoder_hidden_states = torch.where(prompt_mask, null_conditioning, encoder_hidden_states)
246
+
247
+ # Sample masks for the original images.
248
+ image_mask_dtype = original_image_embeds.dtype
249
+ image_mask = 1 - (
250
+ (random_p >= self.cfg.conditioning_dropout_prob).to(image_mask_dtype)
251
+ * (random_p < 3 * self.cfg.conditioning_dropout_prob).to(image_mask_dtype)
252
+ )
253
+ image_mask = image_mask.reshape(bsz, 1, 1, 1)
254
+ # Final image conditioning.
255
+ original_image_embeds = image_mask * original_image_embeds
256
+ return encoder_hidden_states,original_image_embeds
257
+
258
+ def get_null_conditioning(self):
259
+ null_token = self.tokenize_captions([""]).to(self.text_encoder.device)
260
+ # null_conditioning = self.input_ids_to_text_condition(null_token) # would apply positional encoding twice
261
+ null_conditioning = self.text_encoder(null_token)[0] # TODO fuse with input_ids_to_text_condition
262
+ if not self.cfg.concat_all_steps:
263
+ null_conditioning = repeat(null_conditioning, 'b t l -> b (s t) l', s=self.cfg.text_sequence_length)
264
+ return null_conditioning
265
+
266
+ def input_ids_to_text_condition(self, input_ids):
267
+ # Get the text embedding for conditioning.
268
+ if self.cfg.concat_all_steps:
269
+ encoder_hidden_states = self.text_encoder(input_ids)[0] # text padded to 77 tokens; encoder_hidden_states.shape = (bsz, 77, 768)
270
+ else:
271
+ input_ids = rearrange(input_ids, 'b s t->(b s) t')
272
+ encoder_hidden_states = self.text_encoder(input_ids)[0] # text padded to 77 tokens; encoder_hidden_states.shape = (bsz, 77, 768) # TODO check why this doesn't match concatenating the encodings of the three tokens; the ones that don't match are the 769-1535 dims of the feature, for tokens 15-76
273
+
274
+ # if args.use_positional_encoding: # old way: added before concat which doesn't make sense
275
+ # encoder_hidden_states = pos(encoder_hidden_states) + encoder_hidden_states
276
+ encoder_hidden_states = rearrange(encoder_hidden_states, '(b s) t d->b (s t) d', s=self.cfg.text_sequence_length)
277
+
278
+ return encoder_hidden_states
279
+
280
+ def apply_step_positional_encoding(self, encoder_hidden_states):
281
+ positional_encoding = self.unet.pos(encoder_hidden_states)
282
+ if self.cfg.broadcast_positional_encoding:
283
+ positional_encoding = repeat(positional_encoding, 'b s d -> b (s t) d', t=77) # TODO check this
284
+ encoder_hidden_states = positional_encoding + encoder_hidden_states
285
+ return encoder_hidden_states
286
+
287
+ def apply_image_positional_encoding(self, latents, output_seq_length):
288
+ original_latents_shape = latents.shape
289
+ h = original_latents_shape[2]//output_seq_length
290
+ latents = rearrange(latents, 'b c (s h) w -> b s (c h w)', s=output_seq_length)
291
+ image_pos = self.unet.image_pos(latents)
292
+ latents = latents + image_pos
293
+ latents = rearrange(latents, 'b s (c h w) -> b c (s h) w', s=output_seq_length, c=original_latents_shape[1], h=h, w=original_latents_shape[3]) # confirmed that without the pos addition in between, this reshaping brings it back to the original tensor
294
+ return latents
295
+
296
+ def instantiate_pipeline(self):
297
+ pass