| |
| |
| |
| |
| |
| |
| |
| |
|
|
|
|
| import math |
| import logging |
| import torch |
| import torch.nn as nn |
| import numpy as np |
| from einops import repeat, rearrange |
|
|
| from comfy.ldm.util import instantiate_from_config |
|
|
| class AlphaBlender(nn.Module): |
| strategies = ["learned", "fixed", "learned_with_images"] |
|
|
| def __init__( |
| self, |
| alpha: float, |
| merge_strategy: str = "learned_with_images", |
| rearrange_pattern: str = "b t -> (b t) 1 1", |
| ): |
| super().__init__() |
| self.merge_strategy = merge_strategy |
| self.rearrange_pattern = rearrange_pattern |
|
|
| assert ( |
| merge_strategy in self.strategies |
| ), f"merge_strategy needs to be in {self.strategies}" |
|
|
| if self.merge_strategy == "fixed": |
| self.register_buffer("mix_factor", torch.Tensor([alpha])) |
| elif ( |
| self.merge_strategy == "learned" |
| or self.merge_strategy == "learned_with_images" |
| ): |
| self.register_parameter( |
| "mix_factor", torch.nn.Parameter(torch.Tensor([alpha])) |
| ) |
| else: |
| raise ValueError(f"unknown merge strategy {self.merge_strategy}") |
|
|
| def get_alpha(self, image_only_indicator: torch.Tensor, device) -> torch.Tensor: |
| |
| if self.merge_strategy == "fixed": |
| |
| |
| alpha = self.mix_factor.to(device) |
| elif self.merge_strategy == "learned": |
| alpha = torch.sigmoid(self.mix_factor.to(device)) |
| |
| |
| elif self.merge_strategy == "learned_with_images": |
| if image_only_indicator is None: |
| alpha = rearrange(torch.sigmoid(self.mix_factor.to(device)), "... -> ... 1") |
| else: |
| alpha = torch.where( |
| image_only_indicator.bool(), |
| torch.ones(1, 1, device=image_only_indicator.device), |
| rearrange(torch.sigmoid(self.mix_factor.to(image_only_indicator.device)), "... -> ... 1"), |
| ) |
| alpha = rearrange(alpha, self.rearrange_pattern) |
| |
| |
| else: |
| raise NotImplementedError() |
| return alpha |
|
|
| def forward( |
| self, |
| x_spatial, |
| x_temporal, |
| image_only_indicator=None, |
| ) -> torch.Tensor: |
| alpha = self.get_alpha(image_only_indicator, x_spatial.device) |
| x = ( |
| alpha.to(x_spatial.dtype) * x_spatial |
| + (1.0 - alpha).to(x_spatial.dtype) * x_temporal |
| ) |
| return x |
|
|
|
|
| def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): |
| if schedule == "linear": |
| betas = ( |
| torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2 |
| ) |
|
|
| elif schedule == "cosine": |
| timesteps = ( |
| torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s |
| ) |
| alphas = timesteps / (1 + cosine_s) * np.pi / 2 |
| alphas = torch.cos(alphas).pow(2) |
| alphas = alphas / alphas[0] |
| betas = 1 - alphas[1:] / alphas[:-1] |
| betas = torch.clamp(betas, min=0, max=0.999) |
|
|
| elif schedule == "squaredcos_cap_v2": |
| |
| return betas_for_alpha_bar( |
| n_timestep, |
| lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2, |
| ) |
|
|
| elif schedule == "sqrt_linear": |
| betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) |
| elif schedule == "sqrt": |
| betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5 |
| else: |
| raise ValueError(f"schedule '{schedule}' unknown.") |
| return betas |
|
|
|
|
| def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True): |
| if ddim_discr_method == 'uniform': |
| c = num_ddpm_timesteps // num_ddim_timesteps |
| ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c))) |
| elif ddim_discr_method == 'quad': |
| ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int) |
| else: |
| raise NotImplementedError(f'There is no ddim discretization method called "{ddim_discr_method}"') |
|
|
| |
| |
| steps_out = ddim_timesteps + 1 |
| if verbose: |
| logging.info(f'Selected timesteps for ddim sampler: {steps_out}') |
| return steps_out |
|
|
|
|
| def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True): |
| |
| alphas = alphacums[ddim_timesteps] |
| alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist()) |
|
|
| |
| sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev)) |
| if verbose: |
| logging.info(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}') |
| logging.info(f'For the chosen value of eta, which is {eta}, ' |
| f'this results in the following sigma_t schedule for ddim sampler {sigmas}') |
| return sigmas, alphas, alphas_prev |
|
|
|
|
| def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999): |
| """ |
| Create a beta schedule that discretizes the given alpha_t_bar function, |
| which defines the cumulative product of (1-beta) over time from t = [0,1]. |
| :param num_diffusion_timesteps: the number of betas to produce. |
| :param alpha_bar: a lambda that takes an argument t from 0 to 1 and |
| produces the cumulative product of (1-beta) up to that |
| part of the diffusion process. |
| :param max_beta: the maximum beta to use; use values lower than 1 to |
| prevent singularities. |
| """ |
| betas = [] |
| for i in range(num_diffusion_timesteps): |
| t1 = i / num_diffusion_timesteps |
| t2 = (i + 1) / num_diffusion_timesteps |
| betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) |
| return np.array(betas) |
|
|
|
|
| def extract_into_tensor(a, t, x_shape): |
| b, *_ = t.shape |
| out = a.gather(-1, t) |
| return out.reshape(b, *((1,) * (len(x_shape) - 1))) |
|
|
|
|
| def checkpoint(func, inputs, params, flag): |
| """ |
| Evaluate a function without caching intermediate activations, allowing for |
| reduced memory at the expense of extra compute in the backward pass. |
| :param func: the function to evaluate. |
| :param inputs: the argument sequence to pass to `func`. |
| :param params: a sequence of parameters `func` depends on but does not |
| explicitly take as arguments. |
| :param flag: if False, disable gradient checkpointing. |
| """ |
| if flag: |
| args = tuple(inputs) + tuple(params) |
| return CheckpointFunction.apply(func, len(inputs), *args) |
| else: |
| return func(*inputs) |
|
|
|
|
| class CheckpointFunction(torch.autograd.Function): |
| @staticmethod |
| def forward(ctx, run_function, length, *args): |
| ctx.run_function = run_function |
| ctx.input_tensors = list(args[:length]) |
| ctx.input_params = list(args[length:]) |
| ctx.gpu_autocast_kwargs = {"enabled": torch.is_autocast_enabled(), |
| "dtype": torch.get_autocast_gpu_dtype(), |
| "cache_enabled": torch.is_autocast_cache_enabled()} |
| with torch.no_grad(): |
| output_tensors = ctx.run_function(*ctx.input_tensors) |
| return output_tensors |
|
|
| @staticmethod |
| def backward(ctx, *output_grads): |
| ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors] |
| with torch.enable_grad(), \ |
| torch.cuda.amp.autocast(**ctx.gpu_autocast_kwargs): |
| |
| |
| |
| shallow_copies = [x.view_as(x) for x in ctx.input_tensors] |
| output_tensors = ctx.run_function(*shallow_copies) |
| input_grads = torch.autograd.grad( |
| output_tensors, |
| ctx.input_tensors + ctx.input_params, |
| output_grads, |
| allow_unused=True, |
| ) |
| del ctx.input_tensors |
| del ctx.input_params |
| del output_tensors |
| return (None, None) + input_grads |
|
|
|
|
| def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False): |
| """ |
| Create sinusoidal timestep embeddings. |
| :param timesteps: a 1-D Tensor of N indices, one per batch element. |
| These may be fractional. |
| :param dim: the dimension of the output. |
| :param max_period: controls the minimum frequency of the embeddings. |
| :return: an [N x dim] Tensor of positional embeddings. |
| """ |
| if not repeat_only: |
| half = dim // 2 |
| freqs = torch.exp( |
| -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32, device=timesteps.device) / half |
| ) |
| args = timesteps[:, None].float() * freqs[None] |
| embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) |
| if dim % 2: |
| embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1) |
| else: |
| embedding = repeat(timesteps, 'b -> b d', d=dim) |
| return embedding |
|
|
|
|
| def zero_module(module): |
| """ |
| Zero out the parameters of a module and return it. |
| """ |
| for p in module.parameters(): |
| p.detach().zero_() |
| return module |
|
|
|
|
| def scale_module(module, scale): |
| """ |
| Scale the parameters of a module and return it. |
| """ |
| for p in module.parameters(): |
| p.detach().mul_(scale) |
| return module |
|
|
|
|
| def mean_flat(tensor): |
| """ |
| Take the mean over all non-batch dimensions. |
| """ |
| return tensor.mean(dim=list(range(1, len(tensor.shape)))) |
|
|
|
|
| def avg_pool_nd(dims, *args, **kwargs): |
| """ |
| Create a 1D, 2D, or 3D average pooling module. |
| """ |
| if dims == 1: |
| return nn.AvgPool1d(*args, **kwargs) |
| elif dims == 2: |
| return nn.AvgPool2d(*args, **kwargs) |
| elif dims == 3: |
| return nn.AvgPool3d(*args, **kwargs) |
| raise ValueError(f"unsupported dimensions: {dims}") |
|
|
|
|
| class HybridConditioner(nn.Module): |
|
|
| def __init__(self, c_concat_config, c_crossattn_config): |
| super().__init__() |
| self.concat_conditioner = instantiate_from_config(c_concat_config) |
| self.crossattn_conditioner = instantiate_from_config(c_crossattn_config) |
|
|
| def forward(self, c_concat, c_crossattn): |
| c_concat = self.concat_conditioner(c_concat) |
| c_crossattn = self.crossattn_conditioner(c_crossattn) |
| return {'c_concat': [c_concat], 'c_crossattn': [c_crossattn]} |
|
|
|
|
| def noise_like(shape, device, repeat=False): |
| repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1))) |
| noise = lambda: torch.randn(shape, device=device) |
| return repeat_noise() if repeat else noise() |
|
|