from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple, Union import os import math import json from glob import glob from functools import partial import torch import torch.nn as nn import torch.utils.checkpoint import torch.nn.functional as F import numpy as np from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.loaders import UNet2DConditionLoadersMixin from diffusers.utils import BaseOutput, logging, is_torch_version from diffusers.models.modeling_utils import ModelMixin from diffusers.models.unet_2d_blocks import ( CrossAttnDownBlock2D, CrossAttnUpBlock2D, DownBlock2D, UNetMidBlock2DCrossAttn, UNetMidBlock2DSimpleCrossAttn, UpBlock2D, get_down_block as gdb, get_up_block as gub, ) from diffusers.models.embeddings import ( GaussianFourierProjection, ImageHintTimeEmbedding, ImageProjection, ImageTimeEmbedding, TextImageProjection, TextImageTimeEmbedding, TextTimeEmbedding, TimestepEmbedding, Timesteps, ) from diffusers.models.activations import get_activation from diffusers.models.attention_processor import AttentionProcessor, AttnProcessor, Attention, AttnAddedKVProcessor, AttnAddedKVProcessor2_0 from diffusers.models.resnet import Downsample2D, FirDownsample2D, FirUpsample2D, KDownsample2D, KUpsample2D, ResnetBlock2D, Upsample2D from diffusers.models.transformer_2d import Transformer2DModel from diffusers.models.dual_transformer_2d import DualTransformer2DModel class CrossAttnDownBlock2DWithFlow(nn.Module): def __init__( self, in_channels: int, out_channels: int, temb_channels: int, flow_channels: int, # Added dropout: float = 0.0, num_layers: int = 1, transformer_layers_per_block: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, num_attention_heads=1, cross_attention_dim=1280, output_scale_factor=1.0, downsample_padding=1, add_downsample=True, dual_cross_attention=False, use_linear_projection=False, only_cross_attention=False, upcast_attention=False, ): super().__init__() resnets = [] attentions = [] flow_convs = [] self.has_cross_attention = True self.num_attention_heads = num_attention_heads for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels resnets.append( ResnetBlock2D( in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) if not dual_cross_attention: attentions.append( Transformer2DModel( num_attention_heads, out_channels // num_attention_heads, in_channels=out_channels, num_layers=transformer_layers_per_block, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, ) ) else: attentions.append( DualTransformer2DModel( num_attention_heads, out_channels // num_attention_heads, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, ) ) flow_convs.append( nn.Conv2d( flow_channels, out_channels, kernel_size=3, padding=1, bias=False, ) ) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) self.flow_convs = nn.ModuleList(flow_convs) if add_downsample: self.downsamplers = nn.ModuleList( [ Downsample2D( out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op" ) ] ) else: self.downsamplers = None self.gradient_checkpointing = False def forward( self, hidden_states: torch.FloatTensor, temb: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, additional_residuals=None, flow: Optional[torch.FloatTensor] = None, # Added ): output_states = () blocks = list(zip(self.resnets, self.attentions, self.flow_convs)) for i, (resnet, attn, flow_conv) in enumerate(blocks): if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(resnet), hidden_states, temb, **ckpt_kwargs, ) hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(attn, return_dict=False), hidden_states, encoder_hidden_states, None, # timestep None, # class_labels cross_attention_kwargs, attention_mask, encoder_attention_mask, **ckpt_kwargs, )[0] else: hidden_states = resnet(hidden_states, temb) if flow is not None: hidden_states = hidden_states + flow_conv(flow) hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False, )[0] # apply additional residuals to the output of the last pair of resnet and attention blocks if i == len(blocks) - 1 and additional_residuals is not None: hidden_states = hidden_states + additional_residuals output_states = output_states + (hidden_states,) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) output_states = output_states + (hidden_states,) return hidden_states, output_states class UNetMidBlock2DCrossAttnWithFlow(nn.Module): def __init__( self, in_channels: int, temb_channels: int, flow_channels: int, # Added dropout: float = 0.0, num_layers: int = 1, transformer_layers_per_block: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, num_attention_heads=1, output_scale_factor=1.0, cross_attention_dim=1280, dual_cross_attention=False, use_linear_projection=False, upcast_attention=False, ): super().__init__() self.has_cross_attention = True self.num_attention_heads = num_attention_heads resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) # there is always at least one resnet resnets = [ ResnetBlock2D( in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ] flow_convs = [ nn.Conv2d( flow_channels, in_channels, kernel_size=3, padding=1, bias=False, ) ] attentions = [] for _ in range(num_layers): if not dual_cross_attention: attentions.append( Transformer2DModel( num_attention_heads, in_channels // num_attention_heads, in_channels=in_channels, num_layers=transformer_layers_per_block, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, ) ) else: attentions.append( DualTransformer2DModel( num_attention_heads, in_channels // num_attention_heads, in_channels=in_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, ) ) resnets.append( ResnetBlock2D( in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) flow_convs.append( nn.Conv2d( flow_channels, in_channels, kernel_size=3, padding=1, bias=False, ) ) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) self.flow_convs = nn.ModuleList(flow_convs) def forward( self, hidden_states: torch.FloatTensor, temb: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, flow: Optional[torch.FloatTensor] = None, # Added ) -> torch.FloatTensor: hidden_states = self.resnets[0](hidden_states, temb) hidden_states = hidden_states + self.flow_convs[0](flow) for attn, resnet, flow_conv in zip(self.attentions, self.resnets[1:], self.flow_convs[1:]): hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False, )[0] hidden_states = resnet(hidden_states, temb) hidden_states = hidden_states + flow_conv(flow) return hidden_states class CrossAttnUpBlock2DWithFlow(nn.Module): def __init__( self, in_channels: int, out_channels: int, prev_output_channel: int, temb_channels: int, flow_channels: int, # Added dropout: float = 0.0, num_layers: int = 1, transformer_layers_per_block: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, num_attention_heads=1, cross_attention_dim=1280, output_scale_factor=1.0, add_upsample=True, dual_cross_attention=False, use_linear_projection=False, only_cross_attention=False, upcast_attention=False, ): super().__init__() resnets = [] attentions = [] flow_convs = [] self.has_cross_attention = True self.num_attention_heads = num_attention_heads for i in range(num_layers): res_skip_channels = in_channels if (i == num_layers - 1) else out_channels resnet_in_channels = prev_output_channel if i == 0 else out_channels resnets.append( ResnetBlock2D( in_channels=resnet_in_channels + res_skip_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) if not dual_cross_attention: attentions.append( Transformer2DModel( num_attention_heads, out_channels // num_attention_heads, in_channels=out_channels, num_layers=transformer_layers_per_block, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, ) ) else: attentions.append( DualTransformer2DModel( num_attention_heads, out_channels // num_attention_heads, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, ) ) flow_convs.append( nn.Conv2d( flow_channels, out_channels, kernel_size=3, padding=1, bias=False, ) ) self.attentions = nn.ModuleList(attentions) self.resnets = nn.ModuleList(resnets) self.flow_convs = nn.ModuleList(flow_convs) if add_upsample: self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)]) else: self.upsamplers = None self.gradient_checkpointing = False def forward( self, hidden_states: torch.FloatTensor, res_hidden_states_tuple: Tuple[torch.FloatTensor, ...], temb: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, cross_attention_kwargs: Optional[Dict[str, Any]] = None, upsample_size: Optional[int] = None, attention_mask: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, flow: Optional[torch.FloatTensor] = None, # Added ): for resnet, attn, flow_conv in zip(self.resnets, self.attentions, self.flow_convs): # pop res hidden states res_hidden_states = res_hidden_states_tuple[-1] res_hidden_states_tuple = res_hidden_states_tuple[:-1] hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1) if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(resnet), hidden_states, temb, **ckpt_kwargs, ) hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(attn, return_dict=False), hidden_states, encoder_hidden_states, None, # timestep None, # class_labels cross_attention_kwargs, attention_mask, encoder_attention_mask, **ckpt_kwargs, )[0] else: hidden_states = resnet(hidden_states, temb) hidden_states = hidden_states + flow_conv(flow) hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, attention_mask=attention_mask, encoder_attention_mask=encoder_attention_mask, return_dict=False, )[0] if self.upsamplers is not None: for upsampler in self.upsamplers: hidden_states = upsampler(hidden_states, upsample_size) return hidden_states def get_sin_cos_pos_embed(embed_dim: int, x: torch.Tensor): bsz, _ = x.shape x = x.reshape(-1)[:, None] div_term = torch.exp(torch.arange(0, embed_dim, 2) * (-math.log(10000.0) / embed_dim)).to(x.device) pos = x * div_term pos = torch.cat([torch.sin(pos), torch.cos(pos)], dim=-1).reshape(bsz, -1) return pos def get_down_block( with_concatenated_flow: bool = False, *args, **kwargs, ): if not with_concatenated_flow or args[0] == "DownBlock2D": kwargs.pop("flow_channels", None) return gdb(*args, **kwargs) elif args[0] == "CrossAttnDownBlock2D": kwargs.pop("downsample_type", None) kwargs.pop("attention_head_dim", None) kwargs.pop("resnet_skip_time_act", None) kwargs.pop("resnet_out_scale_factor", None) kwargs.pop("cross_attention_norm", None) return CrossAttnDownBlock2DWithFlow(*args[1:], **kwargs) else: raise ValueError(f"Unknown down block type: {args[0]}") def get_up_block( with_concatenated_flow: bool = False, *args, **kwargs, ): if not with_concatenated_flow or args[0] == "UpBlock2D": kwargs.pop("flow_channels", None) return gub(*args, **kwargs) elif args[0] == "CrossAttnUpBlock2D": kwargs.pop("upsample_type", None) kwargs.pop("attention_head_dim", None) kwargs.pop("resnet_skip_time_act", None) kwargs.pop("resnet_out_scale_factor", None) kwargs.pop("cross_attention_norm", None) return CrossAttnUpBlock2DWithFlow(*args[1:], **kwargs) else: raise ValueError(f"Unknown up block type: {args[0]}") logger = logging.get_logger(__name__) # pylint: disable=invalid-name def avg_pool_nd(dims, *args, **kwargs): """ Create a 1D, 2D, or 3D average pooling module. """ if dims == 1: return nn.AvgPool1d(*args, **kwargs) elif dims == 2: return nn.AvgPool2d(*args, **kwargs) elif dims == 3: return nn.AvgPool3d(*args, **kwargs) raise ValueError(f"unsupported dimensions: {dims}") def conv_nd(dims, *args, **kwargs): """ Create a 1D, 2D, or 3D convolution module. """ if dims == 1: return nn.Conv1d(*args, **kwargs) elif dims == 2: return nn.Conv2d(*args, **kwargs) elif dims == 3: return nn.Conv3d(*args, **kwargs) raise ValueError(f"unsupported dimensions: {dims}") class Downsample(nn.Module): """ A downsampling layer with an optional convolution. :param channels: channels in the inputs and outputs. :param use_conv: a bool determining if a convolution is applied. :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then downsampling occurs in the inner-two dimensions. """ def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1): super().__init__() self.channels = channels self.out_channels = out_channels or channels self.use_conv = use_conv self.dims = dims stride = 2 if dims != 3 else (1, 2, 2) if use_conv: self.op = conv_nd( dims, self.channels, self.out_channels, 3, stride=stride, padding=padding ) else: assert self.channels == self.out_channels self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride) def forward(self, x): assert x.shape[1] == self.channels return self.op(x) class ResnetBlock(nn.Module): def __init__(self, in_c, out_c, down, ksize=3, sk=False, use_conv=True): super().__init__() ps = ksize // 2 if in_c != out_c or sk == False: self.in_conv = nn.Conv2d(in_c, out_c, ksize, 1, ps) else: # print('n_in') self.in_conv = None self.block1 = nn.Conv2d(out_c, out_c, 3, 1, 1) self.act = nn.ReLU() self.block2 = nn.Conv2d(out_c, out_c, ksize, 1, ps) if sk == False: # self.skep = nn.Conv2d(in_c, out_c, ksize, 1, ps) # edit by zhouxiawang self.skep = nn.Conv2d(out_c, out_c, ksize, 1, ps) else: self.skep = None self.down = down if self.down == True: self.down_opt = Downsample(in_c, use_conv=use_conv) def forward(self, x): if self.down == True: x = self.down_opt(x) if self.in_conv is not None: # edit x = self.in_conv(x) h = self.block1(x) h = self.act(h) h = self.block2(h) if self.skep is not None: return h + self.skep(x) else: return h + x class Adapter(nn.Module): def __init__(self, channels=[320, 640, 1280, 1280], nums_rb=3, cin=64, ksize=3, sk=False, use_conv=True): super(Adapter, self).__init__() self.unshuffle = nn.PixelUnshuffle(16) self.channels = channels self.nums_rb = nums_rb self.body = [] for i in range(len(channels)): for j in range(nums_rb): if (i != 0) and (j == 0): self.body.append( ResnetBlock(channels[i - 1], channels[i], down=True, ksize=ksize, sk=sk, use_conv=use_conv)) else: self.body.append( ResnetBlock(channels[i], channels[i], down=False, ksize=ksize, sk=sk, use_conv=use_conv)) self.body = nn.ModuleList(self.body) self.conv_in = nn.Conv2d(cin * 16 * 16, channels[0], 3, 1, 1) def forward(self, x): # unshuffle x = self.unshuffle(x) # extract features features = [] x = self.conv_in(x) for i in range(len(self.channels)): for j in range(self.nums_rb): idx = i * self.nums_rb + j x = self.body[idx](x) features.append(x) return features class OneSidedAttnProcessor: r""" Processor for performing attention-related computations where the key and value are always from the upper half batch """ def __call__( self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None, temb=None, ): assert encoder_hidden_states is None residual = hidden_states if attn.spatial_norm is not None: hidden_states = attn.spatial_norm(hidden_states, temb) input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) assert batch_size % 2 == 0, "batch size must be even" half_batch_size = batch_size // 2 hidden_states_1, hidden_states_2 = hidden_states.chunk(2, dim=0) attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, half_batch_size) if attn.group_norm is not None: hidden_states_1 = attn.group_norm(hidden_states_1.transpose(1, 2)).transpose(1, 2) hidden_states_2 = attn.group_norm(hidden_states_2.transpose(1, 2)).transpose(1, 2) query_1 = attn.to_q(hidden_states_1) query_2 = attn.to_q(hidden_states_2) key = attn.to_k(hidden_states_1) value = attn.to_v(hidden_states_1) query = torch.cat([query_1, query_2], dim=0) key = torch.cat([key, key], dim=0) value = torch.cat([value, value], dim=0) query = attn.head_to_batch_dim(query) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) attention_probs = attn.get_attention_scores(query, key, attention_mask) hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states class UNet2DDragConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin): r""" A conditional 2D UNet model that takes a noisy sample, conditional state, and a timestep and returns a sample shaped output. This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented for all models (such as downloading or saving). Parameters: sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`): Height and width of input/output sample. in_channels (`int`, *optional*, defaults to 4): Number of channels in the input sample. out_channels (`int`, *optional*, defaults to 4): Number of channels in the output. center_input_sample (`bool`, *optional*, defaults to `False`): Whether to center the input sample. flip_sin_to_cos (`bool`, *optional*, defaults to `False`): Whether to flip the sin to cos in the time embedding. freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding. down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`): The tuple of downsample blocks to use. mid_block_type (`str`, *optional*, defaults to `"UNetMidBlock2DCrossAttn"`): Block type for middle of UNet, it can be either `UNetMidBlock2DCrossAttn` or `UNetMidBlock2DSimpleCrossAttn`. If `None`, the mid block layer is skipped. up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")`): The tuple of upsample blocks to use. only_cross_attention(`bool` or `Tuple[bool]`, *optional*, default to `False`): Whether to include self-attention in the basic transformer blocks, see [`~models.attention.BasicTransformerBlock`]. block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): The tuple of output channels for each block. layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block. downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution. mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block. act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use. norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization. If `None`, normalization and activation layers is skipped in post-processing. norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization. cross_attention_dim (`int` or `Tuple[int]`, *optional*, defaults to 1280): The dimension of the cross attention features. transformer_layers_per_block (`int` or `Tuple[int]`, *optional*, defaults to 1): The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for [`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`], [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`]. encoder_hid_dim (`int`, *optional*, defaults to None): If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim` dimension to `cross_attention_dim`. encoder_hid_dim_type (`str`, *optional*, defaults to `None`): If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`. attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads. num_attention_heads (`int`, *optional*): The number of attention heads. If not defined, defaults to `attention_head_dim` resnet_time_scale_shift (`str`, *optional*, defaults to `"default"`): Time scale shift config for ResNet blocks (see [`~models.resnet.ResnetBlock2D`]). Choose from `default` or `scale_shift`. class_embed_type (`str`, *optional*, defaults to `None`): The type of class embedding to use which is ultimately summed with the time embeddings. Choose from `None`, `"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`. addition_embed_type (`str`, *optional*, defaults to `None`): Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or "text". "text" will use the `TextTimeEmbedding` layer. addition_time_embed_dim: (`int`, *optional*, defaults to `None`): Dimension for the timestep embeddings. num_class_embeds (`int`, *optional*, defaults to `None`): Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing class conditioning with `class_embed_type` equal to `None`. time_embedding_type (`str`, *optional*, defaults to `positional`): The type of position embedding to use for timesteps. Choose from `positional` or `fourier`. time_embedding_dim (`int`, *optional*, defaults to `None`): An optional override for the dimension of the projected time embedding. time_embedding_act_fn (`str`, *optional*, defaults to `None`): Optional activation function to use only once on the time embeddings before they are passed to the rest of the UNet. Choose from `silu`, `mish`, `gelu`, and `swish`. timestep_post_act (`str`, *optional*, defaults to `None`): The second activation function to use in timestep embedding. Choose from `silu`, `mish` and `gelu`. time_cond_proj_dim (`int`, *optional*, defaults to `None`): The dimension of `cond_proj` layer in the timestep embedding. conv_in_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_in` layer. conv_out_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_out` layer. projection_class_embeddings_input_dim (`int`, *optional*): The dimension of the `class_labels` input when `class_embed_type="projection"`. Required when `class_embed_type="projection"`. class_embeddings_concat (`bool`, *optional*, defaults to `False`): Whether to concatenate the time embeddings with the class embeddings. mid_block_only_cross_attention (`bool`, *optional*, defaults to `None`): Whether to use cross attention with the mid block when using the `UNetMidBlock2DSimpleCrossAttn`. If `only_cross_attention` is given as a single boolean and `mid_block_only_cross_attention` is `None`, the `only_cross_attention` value is used as the value for `mid_block_only_cross_attention`. Default to `False` otherwise. """ _supports_gradient_checkpointing = True @register_to_config def __init__( self, sample_size: Optional[int] = None, in_channels: int = 4, flow_channels: int = 3, out_channels: int = 4, center_input_sample: bool = False, flip_sin_to_cos: bool = True, freq_shift: int = 0, down_block_types: Tuple[str] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ), mid_block_type: Optional[str] = "UNetMidBlock2DCrossAttn", up_block_types: Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D"), only_cross_attention: Union[bool, Tuple[bool]] = False, block_out_channels: Tuple[int] = (320, 640, 1280, 1280), layers_per_block: Union[int, Tuple[int]] = 2, downsample_padding: int = 1, mid_block_scale_factor: float = 1, act_fn: str = "silu", norm_num_groups: Optional[int] = 32, norm_eps: float = 1e-5, cross_attention_dim: Union[int, Tuple[int]] = 1280, transformer_layers_per_block: Union[int, Tuple[int]] = 1, encoder_hid_dim: Optional[int] = None, encoder_hid_dim_type: Optional[str] = None, attention_head_dim: Union[int, Tuple[int]] = 8, num_attention_heads: Optional[Union[int, Tuple[int]]] = None, dual_cross_attention: bool = False, use_linear_projection: bool = False, class_embed_type: Optional[str] = None, addition_embed_type: Optional[str] = None, addition_time_embed_dim: Optional[int] = None, num_class_embeds: Optional[int] = None, upcast_attention: bool = False, resnet_time_scale_shift: str = "default", resnet_skip_time_act: bool = False, resnet_out_scale_factor: int = 1.0, time_embedding_type: str = "positional", time_embedding_dim: Optional[int] = None, time_embedding_act_fn: Optional[str] = None, timestep_post_act: Optional[str] = None, time_cond_proj_dim: Optional[int] = None, conv_in_kernel: int = 3, conv_out_kernel: int = 3, projection_class_embeddings_input_dim: Optional[int] = None, class_embeddings_concat: bool = False, mid_block_only_cross_attention: Optional[bool] = None, cross_attention_norm: Optional[str] = None, addition_embed_type_num_heads=64, # Added clip_embedding_dim: int = 1024, num_clip_in: int = 25, dragging_embedding_dim: int = 256, use_drag_tokens: bool = True, single_drag_token: bool = False, num_drags: int = 10, class_dropout_prob: float = 0.1, flow_original_res: bool = False, flow_size: int = 512, input_concat_dragging: bool = True, attn_concat_dragging: bool = False, flow_multi_resolution_conv: bool = False, flow_in_old_version: bool = True, ): super().__init__() assert input_concat_dragging or attn_concat_dragging or flow_multi_resolution_conv if flow_multi_resolution_conv: assert not attn_concat_dragging and not input_concat_dragging self.sample_size = sample_size self.drag_dropout_prob = class_dropout_prob if num_attention_heads is not None: raise ValueError( "At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. num_attention_heads = num_attention_heads or attention_head_dim # Check inputs if len(down_block_types) != len(up_block_types): raise ValueError( f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}." ) if len(block_out_channels) != len(down_block_types): raise ValueError( f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}." ) if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types): raise ValueError( f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}." ) if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types): raise ValueError( f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}." ) if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types): raise ValueError( f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}." ) if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types): raise ValueError( f"Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}." ) if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types): raise ValueError( f"Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: {layers_per_block}. `down_block_types`: {down_block_types}." ) # input conv_in_padding = (conv_in_kernel - 1) // 2 self.num_drags = num_drags self.attn_concat_dragging = attn_concat_dragging if self.attn_concat_dragging: self.drag_extra_dim = 4 * self.num_drags self.flow_multi_resolution_conv = flow_multi_resolution_conv if self.flow_multi_resolution_conv: self.flow_adapter = Adapter( channels=block_out_channels[:1] + block_out_channels[:-1], nums_rb=2, cin=3, sk=True, use_conv=False, ) self.input_concat_dragging = input_concat_dragging self.flow_in_old_version = flow_in_old_version if self.input_concat_dragging: if self.flow_in_old_version: self.conv_in_flow = nn.Conv2d( in_channels + flow_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding ) else: self.conv_in = nn.Conv2d( in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding ) self.conv_in_flow = nn.Conv2d( flow_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding, bias=False ) else: self.conv_in = nn.Conv2d( in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding ) self.flow_original_res = flow_original_res if flow_original_res and self.input_concat_dragging: self.num_flow_down_layers = 0 cur_sample_size = sample_size while flow_size > cur_sample_size: assert flow_size % cur_sample_size == 0 self.num_flow_down_layers += 1 cur_sample_size *= 2 self.flow_preprocess = nn.ModuleList([]) for _ in range(self.num_flow_down_layers): self.flow_preprocess.append(nn.Conv2d( flow_channels, flow_channels, kernel_size=3, padding=1 )) self.flow_proj_act = get_activation(act_fn) self.num_clip_in = num_clip_in self.clip_proj = nn.ModuleList([]) for i in range(num_clip_in): self.clip_proj.append(nn.Linear(clip_embedding_dim, clip_embedding_dim)) self.clip_final = nn.Linear(clip_embedding_dim, cross_attention_dim) self.use_drag_tokens = use_drag_tokens self.single_drag_token = single_drag_token if use_drag_tokens: self.dragging_embedding_dim = dragging_embedding_dim self.drag_proj = nn.Linear(dragging_embedding_dim * 4, dragging_embedding_dim * 4) self.drag_final = nn.Linear(dragging_embedding_dim * 4, cross_attention_dim) self.proj_act = get_activation(act_fn) # time if time_embedding_type == "fourier": time_embed_dim = time_embedding_dim or block_out_channels[0] * 2 if time_embed_dim % 2 != 0: raise ValueError(f"`time_embed_dim` should be divisible by 2, but is {time_embed_dim}.") self.time_proj = GaussianFourierProjection( time_embed_dim // 2, set_W_to_weight=False, log=False, flip_sin_to_cos=flip_sin_to_cos ) timestep_input_dim = time_embed_dim elif time_embedding_type == "positional": time_embed_dim = time_embedding_dim or block_out_channels[0] * 4 self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) timestep_input_dim = block_out_channels[0] else: raise ValueError( f"{time_embedding_type} does not exist. Please make sure to use one of `fourier` or `positional`." ) self.time_embedding = TimestepEmbedding( timestep_input_dim, time_embed_dim, act_fn=act_fn, post_act_fn=timestep_post_act, cond_proj_dim=time_cond_proj_dim, ) if encoder_hid_dim_type is None and encoder_hid_dim is not None: encoder_hid_dim_type = "text_proj" self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type) logger.info("encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.") if encoder_hid_dim is None and encoder_hid_dim_type is not None: raise ValueError( f"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}." ) if encoder_hid_dim_type == "text_proj": self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim) elif encoder_hid_dim_type == "text_image_proj": # image_embed_dim DOESN'T have to be `cross_attention_dim`. To not clutter the __init__ too much # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use # case when `addition_embed_type == "text_image_proj"` (Kadinsky 2.1)` self.encoder_hid_proj = TextImageProjection( text_embed_dim=encoder_hid_dim, image_embed_dim=cross_attention_dim, cross_attention_dim=cross_attention_dim, ) elif encoder_hid_dim_type == "image_proj": # Kandinsky 2.2 self.encoder_hid_proj = ImageProjection( image_embed_dim=encoder_hid_dim, cross_attention_dim=cross_attention_dim, ) elif encoder_hid_dim_type is not None: raise ValueError( f"encoder_hid_dim_type: {encoder_hid_dim_type} must be None, 'text_proj' or 'text_image_proj'." ) else: self.encoder_hid_proj = None # class embedding if class_embed_type is None and num_class_embeds is not None: self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim) elif class_embed_type == "timestep": self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn) elif class_embed_type == "identity": self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim) elif class_embed_type == "projection": if projection_class_embeddings_input_dim is None: raise ValueError( "`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set" ) # The projection `class_embed_type` is the same as the timestep `class_embed_type` except # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings # 2. it projects from an arbitrary input dimension. # # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations. # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings. # As a result, `TimestepEmbedding` can be passed arbitrary vectors. self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) elif class_embed_type == "simple_projection": if projection_class_embeddings_input_dim is None: raise ValueError( "`class_embed_type`: 'simple_projection' requires `projection_class_embeddings_input_dim` be set" ) self.class_embedding = nn.Linear(projection_class_embeddings_input_dim, time_embed_dim) else: self.class_embedding = None if addition_embed_type == "text": if encoder_hid_dim is not None: text_time_embedding_from_dim = encoder_hid_dim else: text_time_embedding_from_dim = cross_attention_dim self.add_embedding = TextTimeEmbedding( text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads ) elif addition_embed_type == "text_image": # text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use # case when `addition_embed_type == "text_image"` (Kadinsky 2.1)` self.add_embedding = TextImageTimeEmbedding( text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim ) elif addition_embed_type == "text_time": self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift) self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) elif addition_embed_type == "image": # Kandinsky 2.2 self.add_embedding = ImageTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim) elif addition_embed_type == "image_hint": # Kandinsky 2.2 ControlNet self.add_embedding = ImageHintTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim) elif addition_embed_type is not None: raise ValueError(f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.") if time_embedding_act_fn is None: self.time_embed_act = None else: self.time_embed_act = get_activation(time_embedding_act_fn) self.down_blocks = nn.ModuleList([]) self.up_blocks = nn.ModuleList([]) if isinstance(only_cross_attention, bool): if mid_block_only_cross_attention is None: mid_block_only_cross_attention = only_cross_attention only_cross_attention = [only_cross_attention] * len(down_block_types) if mid_block_only_cross_attention is None: mid_block_only_cross_attention = False if isinstance(num_attention_heads, int): num_attention_heads = (num_attention_heads,) * len(down_block_types) if isinstance(attention_head_dim, int): attention_head_dim = (attention_head_dim,) * len(down_block_types) if isinstance(cross_attention_dim, int): cross_attention_dim = (cross_attention_dim,) * len(down_block_types) if isinstance(layers_per_block, int): layers_per_block = [layers_per_block] * len(down_block_types) if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types) if class_embeddings_concat: # The time embeddings are concatenated with the class embeddings. The dimension of the # time embeddings passed to the down, middle, and up blocks is twice the dimension of the # regular time embeddings blocks_time_embed_dim = time_embed_dim * 2 else: blocks_time_embed_dim = time_embed_dim # down output_channel = block_out_channels[0] for i, down_block_type in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 down_block = get_down_block( self.attn_concat_dragging, down_block_type, num_layers=layers_per_block[i], transformer_layers_per_block=transformer_layers_per_block[i], in_channels=input_channel, out_channels=output_channel, temb_channels=blocks_time_embed_dim, add_downsample=not is_final_block, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, cross_attention_dim=cross_attention_dim[i], num_attention_heads=num_attention_heads[i], downsample_padding=downsample_padding, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention[i], upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, resnet_skip_time_act=resnet_skip_time_act, resnet_out_scale_factor=resnet_out_scale_factor, cross_attention_norm=cross_attention_norm, attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel, flow_channels=self.drag_extra_dim if self.attn_concat_dragging else None, ) self.down_blocks.append(down_block) # mid if mid_block_type == "UNetMidBlock2DCrossAttn": mid_block_kwargs = dict( transformer_layers_per_block=transformer_layers_per_block[-1], in_channels=block_out_channels[-1], temb_channels=blocks_time_embed_dim, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, resnet_time_scale_shift=resnet_time_scale_shift, cross_attention_dim=cross_attention_dim[-1], num_attention_heads=num_attention_heads[-1], resnet_groups=norm_num_groups, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, ) if self.attn_concat_dragging: mid_block_kwargs["flow_channels"] = self.drag_extra_dim mid_block_type += "WithFlow" self.mid_block = eval(mid_block_type)( **mid_block_kwargs ) elif mid_block_type == "UNetMidBlock2DSimpleCrossAttn": raise NotImplementedError self.mid_block = UNetMidBlock2DSimpleCrossAttn( in_channels=block_out_channels[-1], temb_channels=blocks_time_embed_dim, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, cross_attention_dim=cross_attention_dim[-1], attention_head_dim=attention_head_dim[-1], resnet_groups=norm_num_groups, resnet_time_scale_shift=resnet_time_scale_shift, skip_time_act=resnet_skip_time_act, only_cross_attention=mid_block_only_cross_attention, cross_attention_norm=cross_attention_norm, ) elif mid_block_type is None: self.mid_block = None else: raise ValueError(f"unknown mid_block_type : {mid_block_type}") # count how many layers upsample the images self.num_upsamplers = 0 # up reversed_block_out_channels = list(reversed(block_out_channels)) reversed_num_attention_heads = list(reversed(num_attention_heads)) reversed_layers_per_block = list(reversed(layers_per_block)) reversed_cross_attention_dim = list(reversed(cross_attention_dim)) reversed_transformer_layers_per_block = list(reversed(transformer_layers_per_block)) only_cross_attention = list(reversed(only_cross_attention)) output_channel = reversed_block_out_channels[0] for i, up_block_type in enumerate(up_block_types): is_final_block = i == len(block_out_channels) - 1 prev_output_channel = output_channel output_channel = reversed_block_out_channels[i] input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] # add upsample block for all BUT final layer if not is_final_block: add_upsample = True self.num_upsamplers += 1 else: add_upsample = False up_block = get_up_block( self.attn_concat_dragging, up_block_type, num_layers=reversed_layers_per_block[i] + 1, transformer_layers_per_block=reversed_transformer_layers_per_block[i], in_channels=input_channel, out_channels=output_channel, prev_output_channel=prev_output_channel, temb_channels=blocks_time_embed_dim, add_upsample=add_upsample, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, cross_attention_dim=reversed_cross_attention_dim[i], num_attention_heads=reversed_num_attention_heads[i], dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention[i], upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, resnet_skip_time_act=resnet_skip_time_act, resnet_out_scale_factor=resnet_out_scale_factor, cross_attention_norm=cross_attention_norm, attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel, flow_channels=self.drag_extra_dim if self.attn_concat_dragging else None, ) self.up_blocks.append(up_block) prev_output_channel = output_channel # out if norm_num_groups is not None: self.conv_norm_out = nn.GroupNorm( num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps ) self.conv_act = get_activation(act_fn) else: self.conv_norm_out = None self.conv_act = None conv_out_padding = (conv_out_kernel - 1) // 2 self.conv_out = nn.Conv2d( block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding ) @property def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "set_processor"): processors[f"{name}.processor"] = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. """ self.set_attn_processor(AttnProcessor()) def set_attention_slice(self, slice_size): r""" Enable sliced attention computation. When this option is enabled, the attention module splits the input tensor in slices to compute attention in several steps. This is useful for saving some memory in exchange for a small decrease in speed. Args: slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`): When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim` must be a multiple of `slice_size`. """ sliceable_head_dims = [] def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module): if hasattr(module, "set_attention_slice"): sliceable_head_dims.append(module.sliceable_head_dim) for child in module.children(): fn_recursive_retrieve_sliceable_dims(child) # retrieve number of attention layers for module in self.children(): fn_recursive_retrieve_sliceable_dims(module) num_sliceable_layers = len(sliceable_head_dims) if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory slice_size = [dim // 2 for dim in sliceable_head_dims] elif slice_size == "max": # make smallest slice possible slice_size = num_sliceable_layers * [1] slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size if len(slice_size) != len(sliceable_head_dims): raise ValueError( f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different" f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}." ) for i in range(len(slice_size)): size = slice_size[i] dim = sliceable_head_dims[i] if size is not None and size > dim: raise ValueError(f"size {size} has to be smaller or equal to {dim}.") # Recursively walk through all the children. # Any children which exposes the set_attention_slice method # gets the message def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]): if hasattr(module, "set_attention_slice"): module.set_attention_slice(slice_size.pop()) for child in module.children(): fn_recursive_set_attention_slice(child, slice_size) reversed_slice_size = list(reversed(slice_size)) for module in self.children(): fn_recursive_set_attention_slice(module, reversed_slice_size) def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, (CrossAttnDownBlock2D, DownBlock2D, CrossAttnUpBlock2D, UpBlock2D)): module.gradient_checkpointing = value def _convert_drag_to_concatting_image(self, drag: torch.Tensor, current_resolution: int) -> torch.Tensor: assert self.drag_extra_dim == 4 * self.num_drags bsz = drag.shape[0] concatting_image = -torch.ones(bsz, self.drag_extra_dim, current_resolution, current_resolution) concatting_image = concatting_image.to(drag.device) not_all_zeros = drag.any(dim=-1).repeat_interleave(4, dim=1).unsqueeze(-1).unsqueeze(-1) y_grid, x_grid = torch.meshgrid(torch.arange(current_resolution), torch.arange(current_resolution), indexing="ij") y_grid = y_grid.to(drag.device).unsqueeze(0).unsqueeze(0) # (1, 1, res, res) x_grid = x_grid.to(drag.device).unsqueeze(0).unsqueeze(0) x0 = (drag[..., 0] * current_resolution - 0.5).round().clip(0, current_resolution - 1) x_src = (drag[..., 0] * current_resolution - x0).unsqueeze(-1).unsqueeze(-1) # (bsz, num_drags, 1, 1) x0 = x0.unsqueeze(-1).unsqueeze(-1) x0 = torch.stack([x0, x0, torch.zeros_like(x0) - 1, torch.zeros_like(x0) - 1], dim=2).view(bsz, 4 * self.num_drags, 1, 1) y0 = (drag[..., 1] * current_resolution - 0.5).round().clip(0, current_resolution - 1) y_src = (drag[..., 1] * current_resolution - y0).unsqueeze(-1).unsqueeze(-1) y0 = y0.unsqueeze(-1).unsqueeze(-1) y0 = torch.stack([y0, y0, torch.zeros_like(y0) - 1, torch.zeros_like(y0) - 1], dim=2).view(bsz, 4 * self.num_drags, 1, 1) x1 = (drag[..., 2] * current_resolution - 0.5).round().clip(0, current_resolution - 1) x_tgt = (drag[..., 2] * current_resolution - x1).unsqueeze(-1).unsqueeze(-1) x1 = x1.unsqueeze(-1).unsqueeze(-1) x1 = torch.stack([torch.zeros_like(x1) - 1, torch.zeros_like(x1) - 1, x1, x1], dim=2).view(bsz, 4 * self.num_drags, 1, 1) y1 = (drag[..., 3] * current_resolution - 0.5).round().clip(0, current_resolution - 1) y_tgt = (drag[..., 3] * current_resolution - y1).unsqueeze(-1).unsqueeze(-1) y1 = y1.unsqueeze(-1).unsqueeze(-1) y1 = torch.stack([torch.zeros_like(y1) - 1, torch.zeros_like(y1) - 1, y1, y1], dim=2).view(bsz, 4 * self.num_drags, 1, 1) # assert torch.all(x_src >= 0) and torch.all(x_src <= 1) # assert torch.all(y_src >= 0) and torch.all(y_src <= 1) # assert torch.all(x_tgt >= 0) and torch.all(x_tgt <= 1) # assert torch.all(y_tgt >= 0) and torch.all(y_tgt <= 1) value_image = torch.stack([x_src, y_src, x_tgt, y_tgt], dim=2).view(bsz, 4 * self.num_drags, 1, 1) value_image = value_image.expand(bsz, 4 * self.num_drags, current_resolution, current_resolution) concatting_image[(x_grid == x0) & (y_grid == y0) & not_all_zeros] = value_image[(x_grid == x0) & (y_grid == y0) & not_all_zeros] concatting_image[(x_grid == x1) & (y_grid == y1) & not_all_zeros] = value_image[(x_grid == x1) & (y_grid == y1) & not_all_zeros] return concatting_image def forward( self, # sample: torch.FloatTensor, # timestep: Union[torch.Tensor, float, int], # encoder_hidden_states: torch.Tensor, # class_labels: Optional[torch.Tensor] = None, # timestep_cond: Optional[torch.Tensor] = None, # attention_mask: Optional[torch.Tensor] = None, # cross_attention_kwargs: Optional[Dict[str, Any]] = None, # added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None, # down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None, # mid_block_additional_residual: Optional[torch.Tensor] = None, # encoder_attention_mask: Optional[torch.Tensor] = None, # return_dict: bool = True, x: torch.FloatTensor, t: torch.Tensor, x_cond: torch.FloatTensor, x_cond_extra: Optional[torch.Tensor] = None, force_drop_ids: Optional[torch.Tensor] = None, hidden_cls: Optional[torch.Tensor] = None, drags: Optional[torch.Tensor] = None, save_features: bool = False, ) -> torch.Tensor: r""" The [`UNet2DConditionModel`] forward method. Args: sample (`torch.FloatTensor`): The noisy input tensor with the following shape `(batch, channel, height, width)`. timestep (`torch.FloatTensor` or `float` or `int`): The number of timesteps to denoise an input. encoder_hidden_states (`torch.FloatTensor`): The encoder hidden states with shape `(batch, sequence_length, feature_dim)`. encoder_attention_mask (`torch.Tensor`): A cross-attention mask of shape `(batch, sequence_length)` is applied to `encoder_hidden_states`. If `True` the mask is kept, otherwise if `False` it is discarded. Mask will be converted into a bias, which adds large negative values to the attention scores corresponding to "discard" tokens. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the [`AttnProcessor`]. added_cond_kwargs: (`dict`, *optional*): A kwargs dictionary containin additional embeddings that if specified are added to the embeddings that are passed along to the UNet blocks. Returns: [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`: If `return_dict` is True, an [`~models.unet_2d_condition.UNet2DConditionOutput`] is returned, otherwise a `tuple` is returned where the first element is the sample tensor. """ # By default samples have to be AT least a multiple of the overall upsampling factor. # The overall upsampling factor is equal to 2 ** (# num of upsampling layers). # However, the upsampling interpolation output size can be forced to fit any upsampling size # on the fly if necessary. default_overall_up_factor = 2 ** self.num_upsamplers # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor` forward_upsample_size = False upsample_size = None if any(s % default_overall_up_factor != 0 for s in x.shape[-2:]): logger.info("Forward upsample size to force interpolation output size.") forward_upsample_size = True # ensure attention_mask is a bias, and give it a singleton query_tokens dimension # expects mask of shape: # [batch, key_tokens] # adds singleton query_tokens dimension: # [batch, 1, key_tokens] # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes: # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn) # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn) # if attention_mask is not None: # assume that mask is expressed as: # (1 = keep, 0 = discard) # convert mask into a bias that can be added to attention scores: # (keep = +0, discard = -10000.0) # attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0 # attention_mask = attention_mask.unsqueeze(1) # convert encoder_attention_mask to a bias the same way we do for attention_mask # if encoder_attention_mask is not None: # encoder_attention_mask = (1 - encoder_attention_mask.to(sample.dtype)) * -10000.0 # encoder_attention_mask = encoder_attention_mask.unsqueeze(1) if self.flow_original_res and self.input_concat_dragging: for i in range(self.num_flow_down_layers): x_cond_extra = self.flow_preprocess[i](x_cond_extra) x_cond_extra = self.flow_proj_act(x_cond_extra) x_cond_extra = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)(x_cond_extra) if self.input_concat_dragging: assert x_cond_extra.shape[-1] == x.shape[-1], f"{x_cond_extra.shape} != {x.shape}" bsz, num_drags, drag_dim = drags.shape assert num_drags == self.num_drags if (self.train and self.drag_dropout_prob > 0) or force_drop_ids is not None: if force_drop_ids is None: drop_ids = torch.rand(bsz, device=x_cond_extra.device) < self.drag_dropout_prob else: drop_ids = force_drop_ids == 1 x_cond_extra = torch.where( drop_ids[:, None, None, None].expand_as(x_cond_extra), torch.zeros_like(x_cond_extra), x_cond_extra, ) drags = torch.where( drop_ids[:, None, None].expand_as(drags), torch.zeros_like(drags), drags, ) if not self.input_concat_dragging: sample = torch.cat([x_cond, x], dim=0) else: sample_noised = torch.cat([x, x_cond_extra], dim=1) sample_input = torch.cat([x_cond, torch.zeros_like(x_cond_extra)], dim=1) sample = torch.cat([sample_input, sample_noised], dim=0) drags = torch.cat([torch.zeros_like(drags), drags], dim=0) if self.flow_multi_resolution_conv: x_cond_extra = torch.cat([torch.zeros_like(x_cond_extra), x_cond_extra], dim=0) flow_multi_resolution_features = self.flow_adapter(x_cond_extra) # -1. (new) get encoder_hidden_states if self.use_drag_tokens: assert drag_dim == 4 drags = drags.reshape(-1, 4) drags = get_sin_cos_pos_embed(embed_dim=self.dragging_embedding_dim, x=drags) drags = drags.reshape(-1, num_drags, self.dragging_embedding_dim * 4) drag_states = self.drag_proj(drags) drag_states = self.proj_act(drag_states) drag_states = self.drag_final(drag_states) assert hidden_cls.shape[1] >= self.num_clip_in cls_proj = 0 for i in range(self.num_clip_in): current_cls = hidden_cls[:, -(i+1), :] cls_proj += self.clip_proj[i](current_cls) cls_proj = cls_proj / self.num_clip_in cls_proj = self.proj_act(cls_proj) cls_proj = self.clip_final(cls_proj) if self.use_drag_tokens: if not self.single_drag_token: encoder_hidden_states = torch.cat([drag_states, torch.concat([cls_proj[:, None, :], cls_proj[:, None, :]], dim=0)], dim=1) assert encoder_hidden_states.shape[1] == num_drags + 1 else: encoder_hidden_states = torch.cat([torch.mean(drag_states, dim=1, keepdim=True), torch.concat([cls_proj[:, None, :], cls_proj[:, None, :]], dim=0)], dim=1) assert encoder_hidden_states.shape[1] == 2 else: encoder_hidden_states = cls_proj[:, None, :] assert encoder_hidden_states.shape[1] == 1 encoder_hidden_states = torch.cat([encoder_hidden_states, encoder_hidden_states], dim=0) # 0. center input if necessary assert not self.config.center_input_sample, "center_input_sample is not supported yet." if self.config.center_input_sample: sample = 2 * sample - 1.0 # 1. time timesteps = t if len(timesteps.shape) == 0: timesteps = timesteps[None].to(sample.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timesteps = torch.cat([timesteps, timesteps], dim=0) t_emb = self.time_proj(timesteps) # `Timesteps` does not contain any weights and will always return f32 tensors # but time_embedding might actually be running in fp16. so we need to cast here. # there might be better ways to encapsulate this. t_emb = t_emb.to(dtype=sample.dtype) emb = self.time_embedding(t_emb, None) aug_emb = None if self.class_embedding is not None: if class_labels is None: raise ValueError("class_labels should be provided when num_class_embeds > 0") if self.config.class_embed_type == "timestep": class_labels = self.time_proj(class_labels) # `Timesteps` does not contain any weights and will always return f32 tensors # there might be better ways to encapsulate this. class_labels = class_labels.to(dtype=sample.dtype) class_emb = self.class_embedding(class_labels).to(dtype=sample.dtype) if self.config.class_embeddings_concat: emb = torch.cat([emb, class_emb], dim=-1) else: emb = emb + class_emb if self.config.addition_embed_type == "text": aug_emb = self.add_embedding(encoder_hidden_states) elif self.config.addition_embed_type == "text_image": # Kandinsky 2.1 - style if "image_embeds" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `addition_embed_type` set to 'text_image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`" ) image_embs = added_cond_kwargs.get("image_embeds") text_embs = added_cond_kwargs.get("text_embeds", encoder_hidden_states) aug_emb = self.add_embedding(text_embs, image_embs) elif self.config.addition_embed_type == "text_time": # SDXL - style if "text_embeds" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`" ) text_embeds = added_cond_kwargs.get("text_embeds") if "time_ids" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`" ) time_ids = added_cond_kwargs.get("time_ids") time_embeds = self.add_time_proj(time_ids.flatten()) time_embeds = time_embeds.reshape((text_embeds.shape[0], -1)) add_embeds = torch.concat([text_embeds, time_embeds], dim=-1) add_embeds = add_embeds.to(emb.dtype) aug_emb = self.add_embedding(add_embeds) elif self.config.addition_embed_type == "image": # Kandinsky 2.2 - style if "image_embeds" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `addition_embed_type` set to 'image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`" ) image_embs = added_cond_kwargs.get("image_embeds") aug_emb = self.add_embedding(image_embs) elif self.config.addition_embed_type == "image_hint": # Kandinsky 2.2 - style if "image_embeds" not in added_cond_kwargs or "hint" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `addition_embed_type` set to 'image_hint' which requires the keyword arguments `image_embeds` and `hint` to be passed in `added_cond_kwargs`" ) image_embs = added_cond_kwargs.get("image_embeds") hint = added_cond_kwargs.get("hint") aug_emb, hint = self.add_embedding(image_embs, hint) sample = torch.cat([sample, hint], dim=1) emb = emb + aug_emb if aug_emb is not None else emb if self.time_embed_act is not None: emb = self.time_embed_act(emb) if self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_proj": encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states) elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_image_proj": # Kadinsky 2.1 - style if "image_embeds" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'text_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`" ) image_embeds = added_cond_kwargs.get("image_embeds") encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states, image_embeds) elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "image_proj": # Kandinsky 2.2 - style if "image_embeds" not in added_cond_kwargs: raise ValueError( f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`" ) image_embeds = added_cond_kwargs.get("image_embeds") encoder_hidden_states = self.encoder_hid_proj(image_embeds) # 2. pre-process if self.input_concat_dragging: if self.flow_in_old_version: sample = self.conv_in_flow(sample) else: sample_x, sample_flow = torch.split(sample, 4, dim=1) sample_x = self.conv_in(sample_x) sample_flow = self.conv_in_flow(sample_flow) sample = sample_x + sample_flow else: sample = self.conv_in(sample) # 3. down down_block_res_samples = (sample,) for idx, downsample_block in enumerate(self.down_blocks): if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention: # For t2i-adapter CrossAttnDownBlock2D additional_residuals = {} down_forward_kwargs = dict( hidden_states=sample if not self.flow_multi_resolution_conv else (sample + flow_multi_resolution_features[idx]), temb=emb, encoder_hidden_states=encoder_hidden_states, attention_mask=None, cross_attention_kwargs=None, encoder_attention_mask=None, **additional_residuals, ) if self.attn_concat_dragging: down_forward_kwargs["flow"] = self._convert_drag_to_concatting_image(drags, sample.shape[-1]) sample, res_samples = downsample_block( **down_forward_kwargs ) else: sample, res_samples = downsample_block( hidden_states=sample if not self.flow_multi_resolution_conv else (sample + flow_multi_resolution_features[idx]), temb=emb ) down_block_res_samples += res_samples # 4. mid if self.mid_block is not None: if self.attn_concat_dragging: sample = self.mid_block( sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=None, cross_attention_kwargs=None, encoder_attention_mask=None, flow=self._convert_drag_to_concatting_image(drags, sample.shape[-1]), ) else: sample = self.mid_block( sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=None, cross_attention_kwargs=None, encoder_attention_mask=None, ) # 5. up for i, upsample_block in enumerate(self.up_blocks): is_final_block = i == len(self.up_blocks) - 1 res_samples = down_block_res_samples[-len(upsample_block.resnets) :] down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)] # if we have not reached the final block and need to forward the # upsample size, we do it here if not is_final_block and forward_upsample_size: upsample_size = down_block_res_samples[-1].shape[2:] if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention: up_block_forward_kwargs = dict( hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, encoder_hidden_states=encoder_hidden_states, attention_mask=None, cross_attention_kwargs=None, encoder_attention_mask=None, ) if self.attn_concat_dragging: up_block_forward_kwargs["flow"] = self._convert_drag_to_concatting_image(drags, sample.shape[-1]) sample = upsample_block( **up_block_forward_kwargs ) else: sample = upsample_block( hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size ) # 6. post-process if self.conv_norm_out: sample = self.conv_norm_out(sample) sample = self.conv_act(sample) sample = self.conv_out(sample) return sample[bsz:] def forward_with_cfg( self, x: torch.FloatTensor, t: torch.Tensor, x_cond: torch.FloatTensor, x_cond_extra: Optional[torch.Tensor] = None, hidden_cls: Optional[torch.Tensor] = None, drags: Optional[torch.Tensor] = None, cfg_scale: float = 1, ) -> torch.Tensor: half = x[: len(x) // 2] combined = torch.cat([half, half], dim=0) force_drop_ids = torch.arange(len(combined), device=combined.device) < len(half) model_out = self.forward(combined, t, x_cond, x_cond_extra, force_drop_ids=force_drop_ids, hidden_cls=hidden_cls, drags=drags) # For exact reproducibility reasons, we apply classifier-free guidance on only # three channels by default. The standard approach to cfg applies it to all channels. # This can be done by uncommenting the following line and commenting-out the line following that. # eps, rest = model_out[:, :3], model_out[:, 3:] eps, rest = model_out[:, :4], model_out[:, 4:] uncond_eps, cond_eps = torch.split(eps, len(eps) // 2, dim=0) half_eps = uncond_eps + cfg_scale * (cond_eps - uncond_eps) eps = torch.cat([half_eps, half_eps], dim=0) return torch.cat([eps, rest], dim=1) @classmethod def from_pretrained_sd(cls, pretrained_model_path, subfolder="unet", unet_additional_kwargs=None, load=True): if subfolder is not None: pretrained_model_path = os.path.join(pretrained_model_path, subfolder) print(f"loading unet's pretrained weights from {pretrained_model_path} ...") config_file = os.path.join(pretrained_model_path, 'config.json') if not os.path.isfile(config_file): raise RuntimeError(f"{config_file} does not exist") with open(config_file, "r") as f: config = json.load(f) config["_class_name"] = cls.__name__ from diffusers.utils import WEIGHTS_NAME one_sided_attn = unet_additional_kwargs.pop("one_sided_attn", True) if unet_additional_kwargs is not None else True model = cls.from_config(config, **unet_additional_kwargs) if unet_additional_kwargs is not None else cls.from_config(config) model_file = os.path.join(pretrained_model_path, WEIGHTS_NAME) if not os.path.isfile(model_file): raise RuntimeError(f"{model_file} does not exist") if load: state_dict = torch.load(model_file, map_location="cpu") m, u = model.load_state_dict(state_dict, strict=False) # Set the attention processor to always take k, v from the input (upper) branch if one_sided_attn: attn_processors_dict={ "down_blocks.0.attentions.0.transformer_blocks.0.attn1.processor": OneSidedAttnProcessor(), "down_blocks.0.attentions.0.transformer_blocks.0.attn2.processor": AttnProcessor(), "down_blocks.0.attentions.1.transformer_blocks.0.attn1.processor": OneSidedAttnProcessor(), "down_blocks.0.attentions.1.transformer_blocks.0.attn2.processor": AttnProcessor(), "down_blocks.1.attentions.0.transformer_blocks.0.attn1.processor": OneSidedAttnProcessor(), "down_blocks.1.attentions.0.transformer_blocks.0.attn2.processor": AttnProcessor(), "down_blocks.1.attentions.1.transformer_blocks.0.attn1.processor": OneSidedAttnProcessor(), "down_blocks.1.attentions.1.transformer_blocks.0.attn2.processor": AttnProcessor(), "down_blocks.2.attentions.0.transformer_blocks.0.attn1.processor": OneSidedAttnProcessor(), "down_blocks.2.attentions.0.transformer_blocks.0.attn2.processor": AttnProcessor(), "down_blocks.2.attentions.1.transformer_blocks.0.attn1.processor": OneSidedAttnProcessor(), "down_blocks.2.attentions.1.transformer_blocks.0.attn2.processor": AttnProcessor(), "up_blocks.1.attentions.0.transformer_blocks.0.attn1.processor": OneSidedAttnProcessor(), "up_blocks.1.attentions.0.transformer_blocks.0.attn2.processor": AttnProcessor(), "up_blocks.1.attentions.1.transformer_blocks.0.attn1.processor": OneSidedAttnProcessor(), "up_blocks.1.attentions.1.transformer_blocks.0.attn2.processor": AttnProcessor(), "up_blocks.1.attentions.2.transformer_blocks.0.attn1.processor": OneSidedAttnProcessor(), "up_blocks.1.attentions.2.transformer_blocks.0.attn2.processor": AttnProcessor(), "up_blocks.2.attentions.0.transformer_blocks.0.attn1.processor": OneSidedAttnProcessor(), "up_blocks.2.attentions.0.transformer_blocks.0.attn2.processor": AttnProcessor(), "up_blocks.2.attentions.1.transformer_blocks.0.attn1.processor": OneSidedAttnProcessor(), "up_blocks.2.attentions.1.transformer_blocks.0.attn2.processor": AttnProcessor(), "up_blocks.2.attentions.2.transformer_blocks.0.attn1.processor": OneSidedAttnProcessor(), "up_blocks.2.attentions.2.transformer_blocks.0.attn2.processor": AttnProcessor(), "up_blocks.3.attentions.0.transformer_blocks.0.attn1.processor": OneSidedAttnProcessor(), "up_blocks.3.attentions.0.transformer_blocks.0.attn2.processor": AttnProcessor(), "up_blocks.3.attentions.1.transformer_blocks.0.attn1.processor": OneSidedAttnProcessor(), "up_blocks.3.attentions.1.transformer_blocks.0.attn2.processor": AttnProcessor(), "up_blocks.3.attentions.2.transformer_blocks.0.attn1.processor": OneSidedAttnProcessor(), "up_blocks.3.attentions.2.transformer_blocks.0.attn2.processor": AttnProcessor(), "mid_block.attentions.0.transformer_blocks.0.attn1.processor": OneSidedAttnProcessor(), "mid_block.attentions.0.transformer_blocks.0.attn2.processor": AttnProcessor(), } model.set_attn_processor(attn_processors_dict) return model