| import einops |
| import torch |
| import torch as th |
| import torch.nn as nn |
|
|
| import os |
| import sys |
|
|
| from ldm.modules.diffusionmodules.util import ( |
| conv_nd, |
| linear, |
| zero_module, |
| timestep_embedding, |
| ) |
|
|
| from einops import rearrange, repeat |
| from torchvision.utils import make_grid |
| from ldm.modules.attention import SpatialTransformer |
| from ldm.modules.diffusionmodules.openaimodel import UNetModel, TimestepEmbedSequential, ResBlock, Upsample, Downsample, AttentionBlock, normalization |
| from ldm.models.diffusion.ddpm import LatentDiffusion |
| from ldm.util import log_txt_as_img, exists, instantiate_from_config |
| from ldm.models.diffusion.ddim import DDIMSampler |
|
|
|
|
| def count_parameters(params): |
| num_params = 0 |
| for p in params: |
| shape = p.shape |
| if len(shape) == 3 and shape[1] == shape[2]: |
| N, D, _ = shape |
| num_params += N * D * (D - 1) // 2 |
| else: |
| num_params += p.numel() |
| |
| return round(num_params / 1e6, 1) |
|
|
| def set_requires_grad(model, requires_grad=True): |
| for param in model.parameters(): |
| param.requires_grad = requires_grad |
|
|
| class ControlledUnetModel(UNetModel): |
| def forward(self, x, timesteps=None, context=None, control=None, only_mid_control=False, **kwargs): |
| hs = [] |
| t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) |
| emb = self.time_embed(t_emb) |
|
|
| h = x.type(self.dtype) |
| for module in self.input_blocks: |
| if control is not None: |
| h = module(h, emb, context) |
| h += control |
| control = None |
| else: |
| h = module(h, emb, context) |
| hs.append(h) |
| h = self.middle_block(h, emb, context) |
| for module in self.output_blocks: |
| h = th.cat([h, hs.pop()], dim=1) |
| h = module(h, emb, context) |
| h = h.type(x.dtype) |
| |
| return self.out(h) |
|
|
|
|
| class ControlNet(nn.Module): |
| def __init__( |
| self, |
| image_size, |
| in_channels, |
| model_channels, |
| out_channels, |
| hint_channels, |
| num_res_blocks, |
| attention_resolutions, |
| dropout=0, |
| channel_mult=(1, 2, 4, 8), |
| conv_resample=True, |
| dims=2, |
| use_checkpoint=False, |
| use_fp16=False, |
| num_heads=-1, |
| num_head_channels=-1, |
| num_heads_upsample=-1, |
| use_scale_shift_norm=False, |
| resblock_updown=False, |
| use_new_attention_order=False, |
| use_spatial_transformer=False, |
| transformer_depth=1, |
| context_dim=None, |
| n_embed=None, |
| legacy=True, |
| disable_self_attentions=None, |
| num_attention_blocks=None, |
| disable_middle_self_attn=False, |
| use_linear_in_transformer=False, |
| ): |
| super().__init__() |
| if use_spatial_transformer: |
| assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...' |
|
|
| if context_dim is not None: |
| assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...' |
| from omegaconf.listconfig import ListConfig |
| if type(context_dim) == ListConfig: |
| context_dim = list(context_dim) |
|
|
| if num_heads_upsample == -1: |
| num_heads_upsample = num_heads |
|
|
| if num_heads == -1: |
| assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set' |
|
|
| if num_head_channels == -1: |
| assert num_heads != -1, 'Either num_heads or num_head_channels has to be set' |
|
|
| self.dims = dims |
| self.image_size = image_size |
| self.in_channels = in_channels |
| self.model_channels = model_channels |
| if isinstance(num_res_blocks, int): |
| self.num_res_blocks = len(channel_mult) * [num_res_blocks] |
| else: |
| if len(num_res_blocks) != len(channel_mult): |
| raise ValueError("provide num_res_blocks either as an int (globally constant) or " |
| "as a list/tuple (per-level) with the same length as channel_mult") |
| self.num_res_blocks = num_res_blocks |
| if disable_self_attentions is not None: |
| |
| assert len(disable_self_attentions) == len(channel_mult) |
| if num_attention_blocks is not None: |
| assert len(num_attention_blocks) == len(self.num_res_blocks) |
| assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks)))) |
| print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. " |
| f"This option has LESS priority than attention_resolutions {attention_resolutions}, " |
| f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, " |
| f"attention will still not be set.") |
|
|
| self.attention_resolutions = attention_resolutions |
| self.dropout = dropout |
| self.channel_mult = channel_mult |
| self.conv_resample = conv_resample |
| self.use_checkpoint = use_checkpoint |
| self.dtype = th.float16 if use_fp16 else th.float32 |
| self.num_heads = num_heads |
| self.num_head_channels = num_head_channels |
| self.num_heads_upsample = num_heads_upsample |
| self.predict_codebook_ids = n_embed is not None |
|
|
| time_embed_dim = model_channels * 4 |
| self.time_embed = nn.Sequential( |
| linear(model_channels, time_embed_dim), |
| nn.SiLU(), |
| linear(time_embed_dim, time_embed_dim), |
| ) |
|
|
| self.input_hint_block = TimestepEmbedSequential( |
| conv_nd(dims, hint_channels, 16, 3, padding=1), |
| nn.SiLU(), |
| conv_nd(dims, 16, 16, 3, padding=1), |
| nn.SiLU(), |
| conv_nd(dims, 16, 32, 3, padding=1, stride=2), |
| nn.SiLU(), |
| conv_nd(dims, 32, 32, 3, padding=1), |
| nn.SiLU(), |
| conv_nd(dims, 32, 96, 3, padding=1, stride=2), |
| nn.SiLU(), |
| conv_nd(dims, 96, 96, 3, padding=1), |
| nn.SiLU(), |
| conv_nd(dims, 96, 256, 3, padding=1, stride=2), |
| nn.SiLU(), |
| zero_module(conv_nd(dims, 256, model_channels, 3, padding=1)) |
| ) |
|
|
|
|
| def forward(self, x, hint, timesteps, context, **kwargs): |
| t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) |
| emb = self.time_embed(t_emb) |
|
|
| guided_hint = self.input_hint_block(hint, emb, context) |
|
|
| |
| |
|
|
| return guided_hint |
|
|
|
|
| class ControlLDM(LatentDiffusion): |
| def __init__(self, control_stage_config, control_key, only_mid_control, *args, **kwargs): |
| super().__init__(*args, **kwargs) |
| self.control_model = instantiate_from_config(control_stage_config) |
| self.control_key = control_key |
| self.only_mid_control = only_mid_control |
| self.control_scales = [1.0] * 13 |
|
|
| @torch.no_grad() |
| def get_input(self, batch, k, bs=None, *args, **kwargs): |
| x, c = super().get_input(batch, self.first_stage_key, *args, **kwargs) |
| control = batch[self.control_key] |
| if bs is not None: |
| control = control[:bs] |
| control = control.to(self.device) |
| control = einops.rearrange(control, 'b h w c -> b c h w') |
| control = control.to(memory_format=torch.contiguous_format).float() |
| return x, dict(c_crossattn=[c], c_concat=[control]) |
|
|
| def apply_model(self, x_noisy, t, cond, *args, **kwargs): |
| assert isinstance(cond, dict) |
| diffusion_model = self.model.diffusion_model |
|
|
| cond_txt = torch.cat(cond['c_crossattn'], 1) |
|
|
| if cond['c_concat'] is None: |
| eps = diffusion_model(x=x_noisy, timesteps=t, context=cond_txt, control=None, only_mid_control=self.only_mid_control) |
| else: |
| control = self.control_model(x=x_noisy, hint=torch.cat(cond['c_concat'], 1), timesteps=t, context=cond_txt) |
| |
| eps = diffusion_model(x=x_noisy, timesteps=t, context=cond_txt, control=control, only_mid_control=self.only_mid_control) |
|
|
| return eps |
|
|
| @torch.no_grad() |
| def get_unconditional_conditioning(self, N): |
| return self.get_learned_conditioning([""] * N) |
|
|
| @torch.no_grad() |
| def log_images(self, batch, N=4, n_row=2, sample=False, ddim_steps=50, ddim_eta=0.0, return_keys=None, |
| quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, |
| plot_diffusion_rows=False, unconditional_guidance_scale=9.0, unconditional_guidance_label=None, |
| use_ema_scope=True, num_samples=1, |
| **kwargs): |
| use_ddim = ddim_steps is not None |
|
|
| log = dict() |
| z, c = self.get_input(batch, self.first_stage_key, bs=N) |
| c_cat, c = c["c_concat"][0][:N], c["c_crossattn"][0][:N] |
| N = min(z.shape[0], N) |
| n_row = min(z.shape[0], n_row) |
| log["reconstruction"] = self.decode_first_stage(z) |
| log["control"] = c_cat * 2.0 - 1.0 |
| log["conditioning"] = log_txt_as_img((512, 512), batch[self.cond_stage_key], size=16) |
|
|
| if plot_diffusion_rows: |
| |
| diffusion_row = list() |
| z_start = z[:n_row] |
| for t in range(self.num_timesteps): |
| if t % self.log_every_t == 0 or t == self.num_timesteps - 1: |
| t = repeat(torch.tensor([t]), '1 -> b', b=n_row) |
| t = t.to(self.device).long() |
| noise = torch.randn_like(z_start) |
| z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) |
| diffusion_row.append(self.decode_first_stage(z_noisy)) |
|
|
| diffusion_row = torch.stack(diffusion_row) |
| diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') |
| diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') |
| diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) |
| log["diffusion_row"] = diffusion_grid |
|
|
| if sample: |
| |
| samples, z_denoise_row = self.sample_log(cond={"c_concat": [c_cat], "c_crossattn": [c]}, |
| batch_size=N, ddim=use_ddim, |
| ddim_steps=ddim_steps, eta=ddim_eta) |
| x_samples = self.decode_first_stage(samples) |
| log["samples"] = x_samples |
| if plot_denoise_rows: |
| denoise_grid = self._get_denoise_row_from_list(z_denoise_row) |
| log["denoise_row"] = denoise_grid |
|
|
| if kwargs['split'] == 'train': |
| if unconditional_guidance_scale > 1.0: |
| uc_cross = self.get_unconditional_conditioning(N) |
| uc_cat = c_cat |
|
|
| uc_full = {"c_concat": [uc_cat], "c_crossattn": [uc_cross]} |
| samples_cfg, _ = self.sample_log(cond={"c_concat": [c_cat], "c_crossattn": [c]}, |
| batch_size=N, ddim=use_ddim, |
| ddim_steps=ddim_steps, eta=ddim_eta, |
| unconditional_guidance_scale=unconditional_guidance_scale, |
| unconditional_conditioning=uc_full, |
| ) |
| x_samples_cfg = self.decode_first_stage(samples_cfg) |
| log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg |
| |
| else: |
| if unconditional_guidance_scale > 1.0: |
| |
| |
| |
| c_cat = torch.stack([c_cat[0] for _ in range(num_samples)], dim=0).clone() |
|
|
| cond = {"c_concat": [c_cat], "c_crossattn": [self.get_learned_conditioning([batch['txt'][0]] * num_samples)]} |
| uc_full = {"c_concat": [c_cat], "c_crossattn": [self.get_learned_conditioning([''] * num_samples)]} |
|
|
| samples_cfg, _ = self.sample_log(cond=cond, |
| batch_size=num_samples, ddim=use_ddim, |
| ddim_steps=ddim_steps, eta=ddim_eta, |
| unconditional_guidance_scale=unconditional_guidance_scale, |
| unconditional_conditioning=uc_full, |
| ) |
| x_samples_cfg = self.decode_first_stage(samples_cfg) |
| log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg |
|
|
| return log |
|
|
| @torch.no_grad() |
| def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): |
| ddim_sampler = DDIMSampler(self) |
| b, c, h, w = cond["c_concat"][0].shape |
| shape = (self.channels, h // 8, w // 8) |
| samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs) |
| return samples, intermediates |
|
|
| def configure_optimizers(self): |
| lr = self.learning_rate |
| params = list(self.control_model.parameters()) |
|
|
| names = [] |
| for name, param in self.model.diffusion_model.named_parameters(): |
| if param.requires_grad: |
| params.append(param) |
| names.append(name) |
| |
|
|
| |
| if not self.sd_locked: |
| params += list(self.model.diffusion_model.output_blocks.parameters()) |
| params += list(self.model.diffusion_model.out.parameters()) |
| opt = torch.optim.AdamW(params, lr=lr) |
|
|
| set_requires_grad(self.model.diffusion_model, True) |
|
|
| num_params = count_parameters(params) |
| print() |
| print() |
| print(f"Total number of trainable parameters: {num_params} M!") |
| print() |
| print() |
|
|
| return opt |
|
|
| def low_vram_shift(self, is_diffusing): |
| if is_diffusing: |
| self.model = self.model.cuda() |
| self.control_model = self.control_model.cuda() |
| self.first_stage_model = self.first_stage_model.cpu() |
| self.cond_stage_model = self.cond_stage_model.cpu() |
| else: |
| self.model = self.model.cpu() |
| self.control_model = self.control_model.cpu() |
| self.first_stage_model = self.first_stage_model.cuda() |
| self.cond_stage_model = self.cond_stage_model.cuda() |
|
|