Spaces:
Running
on
Zero
Running
on
Zero
import importlib | |
import torch | |
import numpy as np | |
from inspect import isfunction | |
def instantiate_from_config(config): | |
if not "target" in config: | |
raise KeyError("Expected key `target` to instantiate.") | |
return get_obj_from_str(config["target"])(**config.get("params", dict())) | |
def get_obj_from_str(string, reload=False): | |
module, cls = string.rsplit(".", 1) | |
if reload: | |
module_imp = importlib.import_module(module) | |
importlib.reload(module_imp) | |
return getattr(importlib.import_module(module, package=None), cls) | |
def exists(x): | |
return x is not None | |
def default(val, d): | |
if exists(val): | |
return val | |
return d() if isfunction(d) else d | |
def noise_like(shape, device, repeat=False): | |
repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1))) | |
noise = lambda: torch.randn(shape, device=device) | |
return repeat_noise() if repeat else noise() | |
def extract_into_tensor(a, t, x_shape): | |
b, *_ = t.shape | |
out = a.gather(-1, t) | |
return out.reshape(b, *((1,) * (len(x_shape) - 1))) | |
def right_pad_dims_to(x, t): | |
padding_dims = x.ndim - t.ndim | |
if padding_dims <= 0: | |
return t | |
return t.view(*t.shape, *((1,) * padding_dims)) | |
def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): | |
if schedule == "linear" or schedule == "scaled_linear": | |
betas = ( | |
torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2 | |
) | |
elif schedule == "cosine": | |
timesteps = ( | |
torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s | |
) | |
alphas = timesteps / (1 + cosine_s) * np.pi / 2 | |
alphas = torch.cos(alphas).pow(2) | |
alphas = alphas / alphas[0] | |
betas = 1 - alphas[1:] / alphas[:-1] | |
betas = np.clip(betas, a_min=0, a_max=0.999) | |
elif schedule == "sqrt_linear": | |
betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) | |
elif schedule == "sqrt": | |
betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5 | |
else: | |
raise ValueError(f"schedule '{schedule}' unknown.") | |
return betas.numpy() | |
def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True): | |
if ddim_discr_method == 'uniform': | |
c = num_ddpm_timesteps // num_ddim_timesteps | |
ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c))) | |
elif ddim_discr_method == 'quad': | |
ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int) | |
else: | |
raise NotImplementedError(f'There is no ddim discretization method called "{ddim_discr_method}"') | |
# assert ddim_timesteps.shape[0] == num_ddim_timesteps | |
# add one to get the final alpha values right (the ones from first scale to data during sampling) | |
steps_out = ddim_timesteps + 1 | |
if verbose: | |
print(f'Selected timesteps for ddim sampler: {steps_out}') | |
return steps_out | |
def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True): | |
# select alphas for computing the variance schedule | |
alphas = alphacums[ddim_timesteps] | |
alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist()) | |
# according the the formula provided in https://arxiv.org/abs/2010.02502 | |
sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev)) | |
if verbose: | |
print(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}') | |
print(f'For the chosen value of eta, which is {eta}, ' | |
f'this results in the following sigma_t schedule for ddim sampler {sigmas}') | |
return sigmas, alphas, alphas_prev | |
def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999): | |
""" | |
Create a beta schedule that discretizes the given alpha_t_bar function, | |
which defines the cumulative product of (1-beta) over time from t = [0,1]. | |
:param num_diffusion_timesteps: the number of betas to produce. | |
:param alpha_bar: a lambda that takes an argument t from 0 to 1 and | |
produces the cumulative product of (1-beta) up to that | |
part of the diffusion process. | |
:param max_beta: the maximum beta to use; use values lower than 1 to | |
prevent singularities. | |
""" | |
betas = [] | |
for i in range(num_diffusion_timesteps): | |
t1 = i / num_diffusion_timesteps | |
t2 = (i + 1) / num_diffusion_timesteps | |
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) | |
return np.array(betas) |