|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from dataclasses import dataclass |
|
from typing import Any, Dict, List, Optional, Tuple, Union |
|
import os |
|
|
|
import torch |
|
import torch.nn as nn |
|
import torch.utils.checkpoint |
|
|
|
from diffusers.configuration_utils import ConfigMixin, register_to_config |
|
from diffusers.loaders import UNet2DConditionLoadersMixin |
|
from diffusers.utils import BaseOutput, logging |
|
from diffusers.models.activations import get_activation |
|
from diffusers.models.attention_processor import AttentionProcessor, AttnProcessor |
|
from diffusers.models.embeddings import ( |
|
GaussianFourierProjection, |
|
ImageHintTimeEmbedding, |
|
ImageProjection, |
|
ImageTimeEmbedding, |
|
TextImageProjection, |
|
TextImageTimeEmbedding, |
|
TextTimeEmbedding, |
|
TimestepEmbedding, |
|
Timesteps, |
|
) |
|
from diffusers.models.modeling_utils import ModelMixin, load_state_dict, _load_state_dict_into_model |
|
from diffusers.models.unets.unet_2d_blocks import ( |
|
CrossAttnDownBlock2D, |
|
CrossAttnUpBlock2D, |
|
DownBlock2D, |
|
UNetMidBlock2DCrossAttn, |
|
UNetMidBlock2DSimpleCrossAttn, |
|
UpBlock2D, |
|
) |
|
from diffusers.utils import ( |
|
CONFIG_NAME, |
|
FLAX_WEIGHTS_NAME, |
|
SAFETENSORS_WEIGHTS_NAME, |
|
WEIGHTS_NAME, |
|
_add_variant, |
|
_get_model_file, |
|
deprecate, |
|
is_torch_version, |
|
logging, |
|
) |
|
from diffusers.utils.import_utils import is_accelerate_available |
|
from diffusers.utils.hub_utils import HF_HUB_OFFLINE |
|
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE |
|
DIFFUSERS_CACHE = HUGGINGFACE_HUB_CACHE |
|
|
|
from diffusers import __version__ |
|
from .unet_mv2d_blocks import ( |
|
CrossAttnDownBlockMV2D, |
|
CrossAttnUpBlockMV2D, |
|
UNetMidBlockMV2DCrossAttn, |
|
get_down_block, |
|
get_up_block, |
|
) |
|
from einops import rearrange, repeat |
|
|
|
from diffusers import __version__ |
|
from mvdiffusion.models_unclip.unet_mv2d_blocks import ( |
|
CrossAttnDownBlockMV2D, |
|
CrossAttnUpBlockMV2D, |
|
UNetMidBlockMV2DCrossAttn, |
|
get_down_block, |
|
get_up_block, |
|
) |
|
|
|
|
|
logger = logging.get_logger(__name__) |
|
|
|
|
|
@dataclass |
|
class UNetMV2DConditionOutput(BaseOutput): |
|
""" |
|
The output of [`UNet2DConditionModel`]. |
|
|
|
Args: |
|
sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): |
|
The hidden states output conditioned on `encoder_hidden_states` input. Output of last layer of model. |
|
""" |
|
|
|
sample: torch.FloatTensor = None |
|
|
|
|
|
class ResidualBlock(nn.Module): |
|
def __init__(self, dim): |
|
super(ResidualBlock, self).__init__() |
|
self.linear1 = nn.Linear(dim, dim) |
|
self.activation = nn.SiLU() |
|
self.linear2 = nn.Linear(dim, dim) |
|
|
|
def forward(self, x): |
|
identity = x |
|
out = self.linear1(x) |
|
out = self.activation(out) |
|
out = self.linear2(out) |
|
out += identity |
|
out = self.activation(out) |
|
return out |
|
|
|
class ResidualLiner(nn.Module): |
|
def __init__(self, in_features, out_features, dim, act=None, num_block=1): |
|
super(ResidualLiner, self).__init__() |
|
self.linear_in = nn.Sequential(nn.Linear(in_features, dim), nn.SiLU()) |
|
|
|
blocks = nn.ModuleList() |
|
for _ in range(num_block): |
|
blocks.append(ResidualBlock(dim)) |
|
self.blocks = blocks |
|
|
|
self.linear_out = nn.Linear(dim, out_features) |
|
self.act = act |
|
|
|
def forward(self, x): |
|
out = self.linear_in(x) |
|
for block in self.blocks: |
|
out = block(out) |
|
out = self.linear_out(out) |
|
if self.act is not None: |
|
out = self.act(out) |
|
return out |
|
|
|
class BasicConvBlock(nn.Module): |
|
def __init__(self, in_channels, out_channels, stride=1): |
|
super(BasicConvBlock, self).__init__() |
|
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False) |
|
self.norm1 = nn.GroupNorm(num_groups=8, num_channels=in_channels, affine=True) |
|
self.act = nn.SiLU() |
|
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False) |
|
self.norm2 = nn.GroupNorm(num_groups=8, num_channels=in_channels, affine=True) |
|
self.downsample = nn.Sequential() |
|
if stride != 1 or in_channels != out_channels: |
|
self.downsample = nn.Sequential( |
|
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False), |
|
nn.GroupNorm(num_groups=8, num_channels=in_channels, affine=True) |
|
) |
|
|
|
def forward(self, x): |
|
identity = x |
|
out = self.conv1(x) |
|
out = self.norm1(out) |
|
out = self.act(out) |
|
out = self.conv2(out) |
|
out = self.norm2(out) |
|
out += self.downsample(identity) |
|
out = self.act(out) |
|
return out |
|
|
|
class UNetMV2DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin): |
|
r""" |
|
A conditional 2D UNet model that takes a noisy sample, conditional state, and a timestep and returns a sample |
|
shaped output. |
|
|
|
This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented |
|
for all models (such as downloading or saving). |
|
|
|
Parameters: |
|
sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`): |
|
Height and width of input/output sample. |
|
in_channels (`int`, *optional*, defaults to 4): Number of channels in the input sample. |
|
out_channels (`int`, *optional*, defaults to 4): Number of channels in the output. |
|
center_input_sample (`bool`, *optional*, defaults to `False`): Whether to center the input sample. |
|
flip_sin_to_cos (`bool`, *optional*, defaults to `False`): |
|
Whether to flip the sin to cos in the time embedding. |
|
freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding. |
|
down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`): |
|
The tuple of downsample blocks to use. |
|
mid_block_type (`str`, *optional*, defaults to `"UNetMidBlock2DCrossAttn"`): |
|
Block type for middle of UNet, it can be either `UNetMidBlock2DCrossAttn` or |
|
`UNetMidBlock2DSimpleCrossAttn`. If `None`, the mid block layer is skipped. |
|
up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")`): |
|
The tuple of upsample blocks to use. |
|
only_cross_attention(`bool` or `Tuple[bool]`, *optional*, default to `False`): |
|
Whether to include self-attention in the basic transformer blocks, see |
|
[`~models.attention.BasicTransformerBlock`]. |
|
block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): |
|
The tuple of output channels for each block. |
|
layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block. |
|
downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution. |
|
mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block. |
|
act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use. |
|
norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization. |
|
If `None`, normalization and activation layers is skipped in post-processing. |
|
norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization. |
|
cross_attention_dim (`int` or `Tuple[int]`, *optional*, defaults to 1280): |
|
The dimension of the cross attention features. |
|
transformer_layers_per_block (`int` or `Tuple[int]`, *optional*, defaults to 1): |
|
The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for |
|
[`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`], |
|
[`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`]. |
|
encoder_hid_dim (`int`, *optional*, defaults to None): |
|
If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim` |
|
dimension to `cross_attention_dim`. |
|
encoder_hid_dim_type (`str`, *optional*, defaults to `None`): |
|
If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text |
|
embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`. |
|
attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads. |
|
num_attention_heads (`int`, *optional*): |
|
The number of attention heads. If not defined, defaults to `attention_head_dim` |
|
resnet_time_scale_shift (`str`, *optional*, defaults to `"default"`): Time scale shift config |
|
for ResNet blocks (see [`~models.resnet.ResnetBlock2D`]). Choose from `default` or `scale_shift`. |
|
class_embed_type (`str`, *optional*, defaults to `None`): |
|
The type of class embedding to use which is ultimately summed with the time embeddings. Choose from `None`, |
|
`"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`. |
|
addition_embed_type (`str`, *optional*, defaults to `None`): |
|
Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or |
|
"text". "text" will use the `TextTimeEmbedding` layer. |
|
addition_time_embed_dim: (`int`, *optional*, defaults to `None`): |
|
Dimension for the timestep embeddings. |
|
num_class_embeds (`int`, *optional*, defaults to `None`): |
|
Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing |
|
class conditioning with `class_embed_type` equal to `None`. |
|
time_embedding_type (`str`, *optional*, defaults to `positional`): |
|
The type of position embedding to use for timesteps. Choose from `positional` or `fourier`. |
|
time_embedding_dim (`int`, *optional*, defaults to `None`): |
|
An optional override for the dimension of the projected time embedding. |
|
time_embedding_act_fn (`str`, *optional*, defaults to `None`): |
|
Optional activation function to use only once on the time embeddings before they are passed to the rest of |
|
the UNet. Choose from `silu`, `mish`, `gelu`, and `swish`. |
|
timestep_post_act (`str`, *optional*, defaults to `None`): |
|
The second activation function to use in timestep embedding. Choose from `silu`, `mish` and `gelu`. |
|
time_cond_proj_dim (`int`, *optional*, defaults to `None`): |
|
The dimension of `cond_proj` layer in the timestep embedding. |
|
conv_in_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_in` layer. |
|
conv_out_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_out` layer. |
|
projection_class_embeddings_input_dim (`int`, *optional*): The dimension of the `class_labels` input when |
|
`class_embed_type="projection"`. Required when `class_embed_type="projection"`. |
|
class_embeddings_concat (`bool`, *optional*, defaults to `False`): Whether to concatenate the time |
|
embeddings with the class embeddings. |
|
mid_block_only_cross_attention (`bool`, *optional*, defaults to `None`): |
|
Whether to use cross attention with the mid block when using the `UNetMidBlock2DSimpleCrossAttn`. If |
|
`only_cross_attention` is given as a single boolean and `mid_block_only_cross_attention` is `None`, the |
|
`only_cross_attention` value is used as the value for `mid_block_only_cross_attention`. Default to `False` |
|
otherwise. |
|
""" |
|
|
|
_supports_gradient_checkpointing = True |
|
|
|
@register_to_config |
|
def __init__( |
|
self, |
|
sample_size: Optional[int] = None, |
|
in_channels: int = 4, |
|
out_channels: int = 4, |
|
center_input_sample: bool = False, |
|
flip_sin_to_cos: bool = True, |
|
freq_shift: int = 0, |
|
down_block_types: Tuple[str] = ( |
|
"CrossAttnDownBlockMV2D", |
|
"CrossAttnDownBlockMV2D", |
|
"CrossAttnDownBlockMV2D", |
|
"DownBlock2D", |
|
), |
|
mid_block_type: Optional[str] = "UNetMidBlockMV2DCrossAttn", |
|
up_block_types: Tuple[str] = ("UpBlock2D", "CrossAttnUpBlockMV2D", "CrossAttnUpBlockMV2D", "CrossAttnUpBlockMV2D"), |
|
only_cross_attention: Union[bool, Tuple[bool]] = False, |
|
block_out_channels: Tuple[int] = (320, 640, 1280, 1280), |
|
layers_per_block: Union[int, Tuple[int]] = 2, |
|
downsample_padding: int = 1, |
|
mid_block_scale_factor: float = 1, |
|
act_fn: str = "silu", |
|
norm_num_groups: Optional[int] = 32, |
|
norm_eps: float = 1e-5, |
|
cross_attention_dim: Union[int, Tuple[int]] = 1280, |
|
transformer_layers_per_block: Union[int, Tuple[int]] = 1, |
|
encoder_hid_dim: Optional[int] = None, |
|
encoder_hid_dim_type: Optional[str] = None, |
|
attention_head_dim: Union[int, Tuple[int]] = 8, |
|
num_attention_heads: Optional[Union[int, Tuple[int]]] = None, |
|
dual_cross_attention: bool = False, |
|
use_linear_projection: bool = False, |
|
class_embed_type: Optional[str] = None, |
|
addition_embed_type: Optional[str] = None, |
|
addition_time_embed_dim: Optional[int] = None, |
|
num_class_embeds: Optional[int] = None, |
|
upcast_attention: bool = False, |
|
resnet_time_scale_shift: str = "default", |
|
resnet_skip_time_act: bool = False, |
|
resnet_out_scale_factor: int = 1.0, |
|
time_embedding_type: str = "positional", |
|
time_embedding_dim: Optional[int] = None, |
|
time_embedding_act_fn: Optional[str] = None, |
|
timestep_post_act: Optional[str] = None, |
|
time_cond_proj_dim: Optional[int] = None, |
|
conv_in_kernel: int = 3, |
|
conv_out_kernel: int = 3, |
|
projection_class_embeddings_input_dim: Optional[int] = None, |
|
projection_camera_embeddings_input_dim: Optional[int] = None, |
|
class_embeddings_concat: bool = False, |
|
mid_block_only_cross_attention: Optional[bool] = None, |
|
cross_attention_norm: Optional[str] = None, |
|
addition_embed_type_num_heads=64, |
|
num_views: int = 1, |
|
cd_attention_last: bool = False, |
|
cd_attention_mid: bool = False, |
|
multiview_attention: bool = True, |
|
sparse_mv_attention: bool = False, |
|
selfattn_block: str = "custom", |
|
mvcd_attention: bool = False, |
|
regress_elevation: bool = False, |
|
regress_focal_length: bool = False, |
|
num_regress_blocks: int = 4, |
|
use_dino: bool = False, |
|
addition_downsample: bool = False, |
|
addition_channels: Optional[Tuple[int]] = (1280, 1280, 1280), |
|
): |
|
super().__init__() |
|
|
|
self.sample_size = sample_size |
|
self.num_views = num_views |
|
self.mvcd_attention = mvcd_attention |
|
if num_attention_heads is not None: |
|
raise ValueError( |
|
"At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
num_attention_heads = num_attention_heads or attention_head_dim |
|
|
|
|
|
if len(down_block_types) != len(up_block_types): |
|
raise ValueError( |
|
f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}." |
|
) |
|
|
|
if len(block_out_channels) != len(down_block_types): |
|
raise ValueError( |
|
f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}." |
|
) |
|
|
|
if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types): |
|
raise ValueError( |
|
f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}." |
|
) |
|
|
|
if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types): |
|
raise ValueError( |
|
f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}." |
|
) |
|
|
|
if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types): |
|
raise ValueError( |
|
f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}." |
|
) |
|
|
|
if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types): |
|
raise ValueError( |
|
f"Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}." |
|
) |
|
|
|
if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types): |
|
raise ValueError( |
|
f"Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: {layers_per_block}. `down_block_types`: {down_block_types}." |
|
) |
|
|
|
|
|
conv_in_padding = (conv_in_kernel - 1) // 2 |
|
self.conv_in = nn.Conv2d( |
|
in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding |
|
) |
|
|
|
|
|
if time_embedding_type == "fourier": |
|
time_embed_dim = time_embedding_dim or block_out_channels[0] * 2 |
|
if time_embed_dim % 2 != 0: |
|
raise ValueError(f"`time_embed_dim` should be divisible by 2, but is {time_embed_dim}.") |
|
self.time_proj = GaussianFourierProjection( |
|
time_embed_dim // 2, set_W_to_weight=False, log=False, flip_sin_to_cos=flip_sin_to_cos |
|
) |
|
timestep_input_dim = time_embed_dim |
|
elif time_embedding_type == "positional": |
|
time_embed_dim = time_embedding_dim or block_out_channels[0] * 4 |
|
|
|
self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) |
|
timestep_input_dim = block_out_channels[0] |
|
else: |
|
raise ValueError( |
|
f"{time_embedding_type} does not exist. Please make sure to use one of `fourier` or `positional`." |
|
) |
|
|
|
self.time_embedding = TimestepEmbedding( |
|
timestep_input_dim, |
|
time_embed_dim, |
|
act_fn=act_fn, |
|
post_act_fn=timestep_post_act, |
|
cond_proj_dim=time_cond_proj_dim, |
|
) |
|
|
|
if encoder_hid_dim_type is None and encoder_hid_dim is not None: |
|
encoder_hid_dim_type = "text_proj" |
|
self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type) |
|
logger.info("encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.") |
|
|
|
if encoder_hid_dim is None and encoder_hid_dim_type is not None: |
|
raise ValueError( |
|
f"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}." |
|
) |
|
|
|
if encoder_hid_dim_type == "text_proj": |
|
self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim) |
|
elif encoder_hid_dim_type == "text_image_proj": |
|
|
|
|
|
|
|
self.encoder_hid_proj = TextImageProjection( |
|
text_embed_dim=encoder_hid_dim, |
|
image_embed_dim=cross_attention_dim, |
|
cross_attention_dim=cross_attention_dim, |
|
) |
|
elif encoder_hid_dim_type == "image_proj": |
|
|
|
self.encoder_hid_proj = ImageProjection( |
|
image_embed_dim=encoder_hid_dim, |
|
cross_attention_dim=cross_attention_dim, |
|
) |
|
elif encoder_hid_dim_type is not None: |
|
raise ValueError( |
|
f"encoder_hid_dim_type: {encoder_hid_dim_type} must be None, 'text_proj' or 'text_image_proj'." |
|
) |
|
else: |
|
self.encoder_hid_proj = None |
|
|
|
|
|
if class_embed_type is None and num_class_embeds is not None: |
|
self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim) |
|
elif class_embed_type == "timestep": |
|
self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn) |
|
elif class_embed_type == "identity": |
|
self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim) |
|
elif class_embed_type == "projection": |
|
if projection_class_embeddings_input_dim is None: |
|
raise ValueError( |
|
"`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set" |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) |
|
elif class_embed_type == "simple_projection": |
|
if projection_class_embeddings_input_dim is None: |
|
raise ValueError( |
|
"`class_embed_type`: 'simple_projection' requires `projection_class_embeddings_input_dim` be set" |
|
) |
|
self.class_embedding = nn.Linear(projection_class_embeddings_input_dim, time_embed_dim) |
|
else: |
|
self.class_embedding = None |
|
|
|
if addition_embed_type == "text": |
|
if encoder_hid_dim is not None: |
|
text_time_embedding_from_dim = encoder_hid_dim |
|
else: |
|
text_time_embedding_from_dim = cross_attention_dim |
|
|
|
self.add_embedding = TextTimeEmbedding( |
|
text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads |
|
) |
|
elif addition_embed_type == "text_image": |
|
|
|
|
|
|
|
self.add_embedding = TextImageTimeEmbedding( |
|
text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim |
|
) |
|
elif addition_embed_type == "text_time": |
|
self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift) |
|
self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) |
|
elif addition_embed_type == "image": |
|
|
|
self.add_embedding = ImageTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim) |
|
elif addition_embed_type == "image_hint": |
|
|
|
self.add_embedding = ImageHintTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim) |
|
elif addition_embed_type is not None: |
|
raise ValueError(f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.") |
|
|
|
if time_embedding_act_fn is None: |
|
self.time_embed_act = None |
|
else: |
|
self.time_embed_act = get_activation(time_embedding_act_fn) |
|
|
|
self.down_blocks = nn.ModuleList([]) |
|
self.up_blocks = nn.ModuleList([]) |
|
|
|
if isinstance(only_cross_attention, bool): |
|
if mid_block_only_cross_attention is None: |
|
mid_block_only_cross_attention = only_cross_attention |
|
|
|
only_cross_attention = [only_cross_attention] * len(down_block_types) |
|
|
|
if mid_block_only_cross_attention is None: |
|
mid_block_only_cross_attention = False |
|
|
|
if isinstance(num_attention_heads, int): |
|
num_attention_heads = (num_attention_heads,) * len(down_block_types) |
|
|
|
if isinstance(attention_head_dim, int): |
|
attention_head_dim = (attention_head_dim,) * len(down_block_types) |
|
|
|
if isinstance(cross_attention_dim, int): |
|
cross_attention_dim = (cross_attention_dim,) * len(down_block_types) |
|
|
|
if isinstance(layers_per_block, int): |
|
layers_per_block = [layers_per_block] * len(down_block_types) |
|
|
|
if isinstance(transformer_layers_per_block, int): |
|
transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types) |
|
|
|
if class_embeddings_concat: |
|
|
|
|
|
|
|
blocks_time_embed_dim = time_embed_dim * 2 |
|
else: |
|
blocks_time_embed_dim = time_embed_dim |
|
|
|
|
|
output_channel = block_out_channels[0] |
|
for i, down_block_type in enumerate(down_block_types): |
|
input_channel = output_channel |
|
output_channel = block_out_channels[i] |
|
is_final_block = i == len(block_out_channels) - 1 |
|
|
|
down_block = get_down_block( |
|
down_block_type, |
|
num_layers=layers_per_block[i], |
|
transformer_layers_per_block=transformer_layers_per_block[i], |
|
in_channels=input_channel, |
|
out_channels=output_channel, |
|
temb_channels=blocks_time_embed_dim, |
|
add_downsample=not is_final_block, |
|
resnet_eps=norm_eps, |
|
resnet_act_fn=act_fn, |
|
resnet_groups=norm_num_groups, |
|
cross_attention_dim=cross_attention_dim[i], |
|
num_attention_heads=num_attention_heads[i], |
|
downsample_padding=downsample_padding, |
|
dual_cross_attention=dual_cross_attention, |
|
use_linear_projection=use_linear_projection, |
|
only_cross_attention=only_cross_attention[i], |
|
upcast_attention=upcast_attention, |
|
resnet_time_scale_shift=resnet_time_scale_shift, |
|
resnet_skip_time_act=resnet_skip_time_act, |
|
resnet_out_scale_factor=resnet_out_scale_factor, |
|
cross_attention_norm=cross_attention_norm, |
|
attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel, |
|
num_views=num_views, |
|
cd_attention_last=cd_attention_last, |
|
cd_attention_mid=cd_attention_mid, |
|
multiview_attention=multiview_attention, |
|
sparse_mv_attention=sparse_mv_attention, |
|
selfattn_block=selfattn_block, |
|
mvcd_attention=mvcd_attention, |
|
use_dino=use_dino |
|
) |
|
self.down_blocks.append(down_block) |
|
|
|
|
|
if mid_block_type == "UNetMidBlock2DCrossAttn": |
|
self.mid_block = UNetMidBlock2DCrossAttn( |
|
transformer_layers_per_block=transformer_layers_per_block[-1], |
|
in_channels=block_out_channels[-1], |
|
temb_channels=blocks_time_embed_dim, |
|
resnet_eps=norm_eps, |
|
resnet_act_fn=act_fn, |
|
output_scale_factor=mid_block_scale_factor, |
|
resnet_time_scale_shift=resnet_time_scale_shift, |
|
cross_attention_dim=cross_attention_dim[-1], |
|
num_attention_heads=num_attention_heads[-1], |
|
resnet_groups=norm_num_groups, |
|
dual_cross_attention=dual_cross_attention, |
|
use_linear_projection=use_linear_projection, |
|
upcast_attention=upcast_attention, |
|
) |
|
|
|
elif mid_block_type == "UNetMidBlockMV2DCrossAttn": |
|
self.mid_block = UNetMidBlockMV2DCrossAttn( |
|
transformer_layers_per_block=transformer_layers_per_block[-1], |
|
in_channels=block_out_channels[-1], |
|
temb_channels=blocks_time_embed_dim, |
|
resnet_eps=norm_eps, |
|
resnet_act_fn=act_fn, |
|
output_scale_factor=mid_block_scale_factor, |
|
resnet_time_scale_shift=resnet_time_scale_shift, |
|
cross_attention_dim=cross_attention_dim[-1], |
|
num_attention_heads=num_attention_heads[-1], |
|
resnet_groups=norm_num_groups, |
|
dual_cross_attention=dual_cross_attention, |
|
use_linear_projection=use_linear_projection, |
|
upcast_attention=upcast_attention, |
|
num_views=num_views, |
|
cd_attention_last=cd_attention_last, |
|
cd_attention_mid=cd_attention_mid, |
|
multiview_attention=multiview_attention, |
|
sparse_mv_attention=sparse_mv_attention, |
|
selfattn_block=selfattn_block, |
|
mvcd_attention=mvcd_attention, |
|
use_dino=use_dino |
|
) |
|
elif mid_block_type == "UNetMidBlock2DSimpleCrossAttn": |
|
self.mid_block = UNetMidBlock2DSimpleCrossAttn( |
|
in_channels=block_out_channels[-1], |
|
temb_channels=blocks_time_embed_dim, |
|
resnet_eps=norm_eps, |
|
resnet_act_fn=act_fn, |
|
output_scale_factor=mid_block_scale_factor, |
|
cross_attention_dim=cross_attention_dim[-1], |
|
attention_head_dim=attention_head_dim[-1], |
|
resnet_groups=norm_num_groups, |
|
resnet_time_scale_shift=resnet_time_scale_shift, |
|
skip_time_act=resnet_skip_time_act, |
|
only_cross_attention=mid_block_only_cross_attention, |
|
cross_attention_norm=cross_attention_norm, |
|
) |
|
elif mid_block_type is None: |
|
self.mid_block = None |
|
else: |
|
raise ValueError(f"unknown mid_block_type : {mid_block_type}") |
|
|
|
self.addition_downsample = addition_downsample |
|
if self.addition_downsample: |
|
inc = block_out_channels[-1] |
|
self.downsample = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) |
|
self.conv_block = nn.ModuleList() |
|
self.conv_block.append(BasicConvBlock(inc, addition_channels[0], stride=1)) |
|
for dim_ in addition_channels[1:-1]: |
|
self.conv_block.append(BasicConvBlock(dim_, dim_, stride=1)) |
|
self.conv_block.append(BasicConvBlock(dim_, inc)) |
|
self.addition_conv_out = nn.Conv2d(inc, inc, kernel_size=1, bias=False) |
|
nn.init.zeros_(self.addition_conv_out.weight.data) |
|
self.addition_act_out = nn.SiLU() |
|
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) |
|
|
|
self.regress_elevation = regress_elevation |
|
self.regress_focal_length = regress_focal_length |
|
if regress_elevation or regress_focal_length: |
|
self.pool = nn.AdaptiveAvgPool2d((1, 1)) |
|
self.camera_embedding = TimestepEmbedding(projection_camera_embeddings_input_dim, time_embed_dim=time_embed_dim) |
|
|
|
regress_in_dim = block_out_channels[-1]*2 if mvcd_attention else block_out_channels |
|
|
|
if regress_elevation: |
|
self.elevation_regressor = ResidualLiner(regress_in_dim, 1, 1280, act=None, num_block=num_regress_blocks) |
|
if regress_focal_length: |
|
self.focal_regressor = ResidualLiner(regress_in_dim, 1, 1280, act=None, num_block=num_regress_blocks) |
|
''' |
|
self.regress_elevation = regress_elevation |
|
self.regress_focal_length = regress_focal_length |
|
if regress_elevation and (not regress_focal_length): |
|
print("Regressing elevation") |
|
cam_dim = 1 |
|
elif regress_focal_length and (not regress_elevation): |
|
print("Regressing focal length") |
|
cam_dim = 6 |
|
elif regress_elevation and regress_focal_length: |
|
print("Regressing both elevation and focal length") |
|
cam_dim = 7 |
|
else: |
|
cam_dim = 0 |
|
assert projection_camera_embeddings_input_dim == 2*cam_dim, "projection_camera_embeddings_input_dim should be 2*cam_dim" |
|
if regress_elevation or regress_focal_length: |
|
self.elevation_regressor = nn.ModuleList([ |
|
nn.Linear(block_out_channels[-1], 1280), |
|
nn.SiLU(), |
|
nn.Linear(1280, 1280), |
|
nn.SiLU(), |
|
nn.Linear(1280, cam_dim) |
|
]) |
|
self.pool = nn.AdaptiveAvgPool2d((1, 1)) |
|
self.focal_act = nn.Softmax(dim=-1) |
|
self.camera_embedding = TimestepEmbedding(projection_camera_embeddings_input_dim, time_embed_dim=time_embed_dim) |
|
''' |
|
|
|
|
|
self.num_upsamplers = 0 |
|
|
|
|
|
reversed_block_out_channels = list(reversed(block_out_channels)) |
|
reversed_num_attention_heads = list(reversed(num_attention_heads)) |
|
reversed_layers_per_block = list(reversed(layers_per_block)) |
|
reversed_cross_attention_dim = list(reversed(cross_attention_dim)) |
|
reversed_transformer_layers_per_block = list(reversed(transformer_layers_per_block)) |
|
only_cross_attention = list(reversed(only_cross_attention)) |
|
|
|
output_channel = reversed_block_out_channels[0] |
|
for i, up_block_type in enumerate(up_block_types): |
|
is_final_block = i == len(block_out_channels) - 1 |
|
|
|
prev_output_channel = output_channel |
|
output_channel = reversed_block_out_channels[i] |
|
input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] |
|
|
|
|
|
if not is_final_block: |
|
add_upsample = True |
|
self.num_upsamplers += 1 |
|
else: |
|
add_upsample = False |
|
|
|
up_block = get_up_block( |
|
up_block_type, |
|
num_layers=reversed_layers_per_block[i] + 1, |
|
transformer_layers_per_block=reversed_transformer_layers_per_block[i], |
|
in_channels=input_channel, |
|
out_channels=output_channel, |
|
prev_output_channel=prev_output_channel, |
|
temb_channels=blocks_time_embed_dim, |
|
add_upsample=add_upsample, |
|
resnet_eps=norm_eps, |
|
resnet_act_fn=act_fn, |
|
resnet_groups=norm_num_groups, |
|
cross_attention_dim=reversed_cross_attention_dim[i], |
|
num_attention_heads=reversed_num_attention_heads[i], |
|
dual_cross_attention=dual_cross_attention, |
|
use_linear_projection=use_linear_projection, |
|
only_cross_attention=only_cross_attention[i], |
|
upcast_attention=upcast_attention, |
|
resnet_time_scale_shift=resnet_time_scale_shift, |
|
resnet_skip_time_act=resnet_skip_time_act, |
|
resnet_out_scale_factor=resnet_out_scale_factor, |
|
cross_attention_norm=cross_attention_norm, |
|
attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel, |
|
num_views=num_views, |
|
cd_attention_last=cd_attention_last, |
|
cd_attention_mid=cd_attention_mid, |
|
multiview_attention=multiview_attention, |
|
sparse_mv_attention=sparse_mv_attention, |
|
selfattn_block=selfattn_block, |
|
mvcd_attention=mvcd_attention, |
|
use_dino=use_dino |
|
) |
|
self.up_blocks.append(up_block) |
|
prev_output_channel = output_channel |
|
|
|
|
|
if norm_num_groups is not None: |
|
self.conv_norm_out = nn.GroupNorm( |
|
num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps |
|
) |
|
|
|
self.conv_act = get_activation(act_fn) |
|
|
|
else: |
|
self.conv_norm_out = None |
|
self.conv_act = None |
|
|
|
conv_out_padding = (conv_out_kernel - 1) // 2 |
|
self.conv_out = nn.Conv2d( |
|
block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding |
|
) |
|
|
|
@property |
|
def attn_processors(self) -> Dict[str, AttentionProcessor]: |
|
r""" |
|
Returns: |
|
`dict` of attention processors: A dictionary containing all attention processors used in the model with |
|
indexed by its weight name. |
|
""" |
|
|
|
processors = {} |
|
|
|
def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): |
|
if hasattr(module, "set_processor"): |
|
processors[f"{name}.processor"] = module.processor |
|
|
|
for sub_name, child in module.named_children(): |
|
fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) |
|
|
|
return processors |
|
|
|
for name, module in self.named_children(): |
|
fn_recursive_add_processors(name, module, processors) |
|
|
|
return processors |
|
|
|
def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): |
|
r""" |
|
Sets the attention processor to use to compute attention. |
|
|
|
Parameters: |
|
processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): |
|
The instantiated processor class or a dictionary of processor classes that will be set as the processor |
|
for **all** `Attention` layers. |
|
|
|
If `processor` is a dict, the key needs to define the path to the corresponding cross attention |
|
processor. This is strongly recommended when setting trainable attention processors. |
|
|
|
""" |
|
count = len(self.attn_processors.keys()) |
|
|
|
if isinstance(processor, dict) and len(processor) != count: |
|
raise ValueError( |
|
f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" |
|
f" number of attention layers: {count}. Please make sure to pass {count} processor classes." |
|
) |
|
|
|
def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): |
|
if hasattr(module, "set_processor"): |
|
if not isinstance(processor, dict): |
|
module.set_processor(processor) |
|
else: |
|
module.set_processor(processor.pop(f"{name}.processor")) |
|
|
|
for sub_name, child in module.named_children(): |
|
fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) |
|
|
|
for name, module in self.named_children(): |
|
fn_recursive_attn_processor(name, module, processor) |
|
|
|
def set_default_attn_processor(self): |
|
""" |
|
Disables custom attention processors and sets the default attention implementation. |
|
""" |
|
self.set_attn_processor(AttnProcessor()) |
|
|
|
def set_attention_slice(self, slice_size): |
|
r""" |
|
Enable sliced attention computation. |
|
|
|
When this option is enabled, the attention module splits the input tensor in slices to compute attention in |
|
several steps. This is useful for saving some memory in exchange for a small decrease in speed. |
|
|
|
Args: |
|
slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`): |
|
When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If |
|
`"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is |
|
provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim` |
|
must be a multiple of `slice_size`. |
|
""" |
|
sliceable_head_dims = [] |
|
|
|
def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module): |
|
if hasattr(module, "set_attention_slice"): |
|
sliceable_head_dims.append(module.sliceable_head_dim) |
|
|
|
for child in module.children(): |
|
fn_recursive_retrieve_sliceable_dims(child) |
|
|
|
|
|
for module in self.children(): |
|
fn_recursive_retrieve_sliceable_dims(module) |
|
|
|
num_sliceable_layers = len(sliceable_head_dims) |
|
|
|
if slice_size == "auto": |
|
|
|
|
|
slice_size = [dim // 2 for dim in sliceable_head_dims] |
|
elif slice_size == "max": |
|
|
|
slice_size = num_sliceable_layers * [1] |
|
|
|
slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size |
|
|
|
if len(slice_size) != len(sliceable_head_dims): |
|
raise ValueError( |
|
f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different" |
|
f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}." |
|
) |
|
|
|
for i in range(len(slice_size)): |
|
size = slice_size[i] |
|
dim = sliceable_head_dims[i] |
|
if size is not None and size > dim: |
|
raise ValueError(f"size {size} has to be smaller or equal to {dim}.") |
|
|
|
|
|
|
|
|
|
def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]): |
|
if hasattr(module, "set_attention_slice"): |
|
module.set_attention_slice(slice_size.pop()) |
|
|
|
for child in module.children(): |
|
fn_recursive_set_attention_slice(child, slice_size) |
|
|
|
reversed_slice_size = list(reversed(slice_size)) |
|
for module in self.children(): |
|
fn_recursive_set_attention_slice(module, reversed_slice_size) |
|
|
|
def _set_gradient_checkpointing(self, module, value=False): |
|
if isinstance(module, (CrossAttnDownBlock2D, CrossAttnDownBlockMV2D, DownBlock2D, CrossAttnUpBlock2D, CrossAttnUpBlockMV2D, UpBlock2D)): |
|
module.gradient_checkpointing = value |
|
|
|
def forward( |
|
self, |
|
sample: torch.FloatTensor, |
|
timestep: Union[torch.Tensor, float, int], |
|
encoder_hidden_states: torch.Tensor, |
|
class_labels: Optional[torch.Tensor] = None, |
|
timestep_cond: Optional[torch.Tensor] = None, |
|
attention_mask: Optional[torch.Tensor] = None, |
|
cross_attention_kwargs: Optional[Dict[str, Any]] = None, |
|
added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None, |
|
down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None, |
|
mid_block_additional_residual: Optional[torch.Tensor] = None, |
|
encoder_attention_mask: Optional[torch.Tensor] = None, |
|
dino_feature: Optional[torch.Tensor] = None, |
|
return_dict: bool = True, |
|
vis_max_min: bool = False, |
|
) -> Union[UNetMV2DConditionOutput, Tuple]: |
|
r""" |
|
The [`UNet2DConditionModel`] forward method. |
|
|
|
Args: |
|
sample (`torch.FloatTensor`): |
|
The noisy input tensor with the following shape `(batch, channel, height, width)`. |
|
timestep (`torch.FloatTensor` or `float` or `int`): The number of timesteps to denoise an input. |
|
encoder_hidden_states (`torch.FloatTensor`): |
|
The encoder hidden states with shape `(batch, sequence_length, feature_dim)`. |
|
encoder_attention_mask (`torch.Tensor`): |
|
A cross-attention mask of shape `(batch, sequence_length)` is applied to `encoder_hidden_states`. If |
|
`True` the mask is kept, otherwise if `False` it is discarded. Mask will be converted into a bias, |
|
which adds large negative values to the attention scores corresponding to "discard" tokens. |
|
return_dict (`bool`, *optional*, defaults to `True`): |
|
Whether or not to return a [`~models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain |
|
tuple. |
|
cross_attention_kwargs (`dict`, *optional*): |
|
A kwargs dictionary that if specified is passed along to the [`AttnProcessor`]. |
|
added_cond_kwargs: (`dict`, *optional*): |
|
A kwargs dictionary containin additional embeddings that if specified are added to the embeddings that |
|
are passed along to the UNet blocks. |
|
|
|
Returns: |
|
[`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`: |
|
If `return_dict` is True, an [`~models.unet_2d_condition.UNet2DConditionOutput`] is returned, otherwise |
|
a `tuple` is returned where the first element is the sample tensor. |
|
""" |
|
record_max_min = {} |
|
|
|
|
|
|
|
|
|
default_overall_up_factor = 2**self.num_upsamplers |
|
|
|
|
|
forward_upsample_size = False |
|
upsample_size = None |
|
|
|
if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]): |
|
logger.info("Forward upsample size to force interpolation output size.") |
|
forward_upsample_size = True |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if attention_mask is not None: |
|
|
|
|
|
|
|
|
|
attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0 |
|
attention_mask = attention_mask.unsqueeze(1) |
|
|
|
|
|
if encoder_attention_mask is not None: |
|
encoder_attention_mask = (1 - encoder_attention_mask.to(sample.dtype)) * -10000.0 |
|
encoder_attention_mask = encoder_attention_mask.unsqueeze(1) |
|
|
|
|
|
if self.config.center_input_sample: |
|
sample = 2 * sample - 1.0 |
|
if vis_max_min: record_max_min["sample"] = (sample.min().detach().float().cpu().numpy().tolist(), sample.max().detach().float().cpu().numpy().tolist()) |
|
|
|
timesteps = timestep |
|
if not torch.is_tensor(timesteps): |
|
|
|
|
|
is_mps = sample.device.type == "mps" |
|
if isinstance(timestep, float): |
|
dtype = torch.float32 if is_mps else torch.float64 |
|
else: |
|
dtype = torch.int32 if is_mps else torch.int64 |
|
timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) |
|
elif len(timesteps.shape) == 0: |
|
timesteps = timesteps[None].to(sample.device) |
|
|
|
|
|
timesteps = timesteps.expand(sample.shape[0]) |
|
|
|
t_emb = self.time_proj(timesteps) |
|
|
|
|
|
|
|
|
|
t_emb = t_emb.to(dtype=sample.dtype) |
|
|
|
emb = self.time_embedding(t_emb, timestep_cond) |
|
aug_emb = None |
|
if vis_max_min: record_max_min["t_emb"] = (t_emb.min().detach().float().cpu().numpy().tolist(), t_emb.max().detach().float().cpu().numpy().tolist()) |
|
if self.class_embedding is not None: |
|
if class_labels is None: |
|
raise ValueError("class_labels should be provided when num_class_embeds > 0") |
|
|
|
if self.config.class_embed_type == "timestep": |
|
class_labels = self.time_proj(class_labels) |
|
|
|
|
|
|
|
class_labels = class_labels.to(dtype=sample.dtype) |
|
|
|
class_emb = self.class_embedding(class_labels).to(dtype=sample.dtype) |
|
if vis_max_min: record_max_min["class_emb"] = (class_emb.min().detach().float().cpu().numpy().tolist(), class_emb.max().detach().float().cpu().numpy().tolist()) |
|
if self.config.class_embeddings_concat: |
|
emb = torch.cat([emb, class_emb], dim=-1) |
|
else: |
|
emb = emb + class_emb |
|
|
|
if self.config.addition_embed_type == "text": |
|
aug_emb = self.add_embedding(encoder_hidden_states) |
|
elif self.config.addition_embed_type == "text_image": |
|
|
|
if "image_embeds" not in added_cond_kwargs: |
|
raise ValueError( |
|
f"{self.__class__} has the config param `addition_embed_type` set to 'text_image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`" |
|
) |
|
|
|
image_embs = added_cond_kwargs.get("image_embeds") |
|
text_embs = added_cond_kwargs.get("text_embeds", encoder_hidden_states) |
|
aug_emb = self.add_embedding(text_embs, image_embs) |
|
elif self.config.addition_embed_type == "text_time": |
|
|
|
if "text_embeds" not in added_cond_kwargs: |
|
raise ValueError( |
|
f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`" |
|
) |
|
text_embeds = added_cond_kwargs.get("text_embeds") |
|
if "time_ids" not in added_cond_kwargs: |
|
raise ValueError( |
|
f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`" |
|
) |
|
time_ids = added_cond_kwargs.get("time_ids") |
|
time_embeds = self.add_time_proj(time_ids.flatten()) |
|
time_embeds = time_embeds.reshape((text_embeds.shape[0], -1)) |
|
|
|
add_embeds = torch.concat([text_embeds, time_embeds], dim=-1) |
|
add_embeds = add_embeds.to(emb.dtype) |
|
aug_emb = self.add_embedding(add_embeds) |
|
elif self.config.addition_embed_type == "image": |
|
|
|
if "image_embeds" not in added_cond_kwargs: |
|
raise ValueError( |
|
f"{self.__class__} has the config param `addition_embed_type` set to 'image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`" |
|
) |
|
image_embs = added_cond_kwargs.get("image_embeds") |
|
aug_emb = self.add_embedding(image_embs) |
|
elif self.config.addition_embed_type == "image_hint": |
|
|
|
if "image_embeds" not in added_cond_kwargs or "hint" not in added_cond_kwargs: |
|
raise ValueError( |
|
f"{self.__class__} has the config param `addition_embed_type` set to 'image_hint' which requires the keyword arguments `image_embeds` and `hint` to be passed in `added_cond_kwargs`" |
|
) |
|
image_embs = added_cond_kwargs.get("image_embeds") |
|
hint = added_cond_kwargs.get("hint") |
|
aug_emb, hint = self.add_embedding(image_embs, hint) |
|
sample = torch.cat([sample, hint], dim=1) |
|
|
|
emb = emb + aug_emb if aug_emb is not None else emb |
|
if aug_emb is not None and vis_max_min: record_max_min["aug_emb"] = (aug_emb.min().detach().float().cpu().numpy().tolist(), aug_emb.max().detach().float().cpu().numpy().tolist()) |
|
emb_pre_act = emb |
|
if self.time_embed_act is not None: |
|
emb = self.time_embed_act(emb) |
|
|
|
if self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_proj": |
|
encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states) |
|
elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_image_proj": |
|
|
|
if "image_embeds" not in added_cond_kwargs: |
|
raise ValueError( |
|
f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'text_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`" |
|
) |
|
|
|
image_embeds = added_cond_kwargs.get("image_embeds") |
|
encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states, image_embeds) |
|
elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "image_proj": |
|
|
|
if "image_embeds" not in added_cond_kwargs: |
|
raise ValueError( |
|
f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`" |
|
) |
|
image_embeds = added_cond_kwargs.get("image_embeds") |
|
encoder_hidden_states = self.encoder_hid_proj(image_embeds) |
|
|
|
sample = self.conv_in(sample) |
|
if vis_max_min: record_max_min["conv_in"] = (sample.min().detach().float().cpu().numpy().tolist(), sample.max().detach().float().cpu().numpy().tolist()) |
|
|
|
|
|
is_controlnet = mid_block_additional_residual is not None and down_block_additional_residuals is not None |
|
is_adapter = mid_block_additional_residual is None and down_block_additional_residuals is not None |
|
|
|
down_block_res_samples = (sample,) |
|
for i, downsample_block in enumerate(self.down_blocks): |
|
if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention: |
|
|
|
additional_residuals = {} |
|
if is_adapter and len(down_block_additional_residuals) > 0: |
|
additional_residuals["additional_residuals"] = down_block_additional_residuals.pop(0) |
|
|
|
sample, res_samples = downsample_block( |
|
hidden_states=sample, |
|
temb=emb, |
|
encoder_hidden_states=encoder_hidden_states, |
|
dino_feature=dino_feature, |
|
attention_mask=attention_mask, |
|
cross_attention_kwargs=cross_attention_kwargs, |
|
encoder_attention_mask=encoder_attention_mask, |
|
**additional_residuals, |
|
) |
|
else: |
|
sample, res_samples = downsample_block(hidden_states=sample, temb=emb) |
|
|
|
if is_adapter and len(down_block_additional_residuals) > 0: |
|
sample += down_block_additional_residuals.pop(0) |
|
|
|
down_block_res_samples += res_samples |
|
if vis_max_min: record_max_min[f"down_block_{i}"] = (sample.min().detach().float().cpu().numpy().tolist(), sample.max().detach().float().cpu().numpy().tolist()) |
|
|
|
if is_controlnet: |
|
new_down_block_res_samples = () |
|
|
|
for down_block_res_sample, down_block_additional_residual in zip( |
|
down_block_res_samples, down_block_additional_residuals |
|
): |
|
down_block_res_sample = down_block_res_sample + down_block_additional_residual |
|
new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,) |
|
|
|
down_block_res_samples = new_down_block_res_samples |
|
|
|
if self.addition_downsample: |
|
global_sample = sample |
|
global_sample = self.downsample(global_sample) |
|
for layer in self.conv_block: |
|
global_sample = layer(global_sample) |
|
global_sample = self.addition_act_out(self.addition_conv_out(global_sample)) |
|
global_sample = self.upsample(global_sample) |
|
if vis_max_min: record_max_min["global_sample"] = (global_sample.min().detach().float().cpu().numpy().tolist(), global_sample.max().detach().float().cpu().numpy().tolist()) |
|
|
|
if self.mid_block is not None: |
|
sample = self.mid_block( |
|
sample, |
|
emb, |
|
encoder_hidden_states=encoder_hidden_states, |
|
dino_feature=dino_feature, |
|
attention_mask=attention_mask, |
|
cross_attention_kwargs=cross_attention_kwargs, |
|
encoder_attention_mask=encoder_attention_mask, |
|
) |
|
if vis_max_min: record_max_min["mid_block"] = (sample.min().detach().float().cpu().numpy().tolist(), sample.max().detach().float().cpu().numpy().tolist()) |
|
|
|
|
|
|
|
if self.regress_elevation or self.regress_focal_length: |
|
pool_embeds = self.pool(sample.detach()).squeeze(-1).squeeze(-1) |
|
if self.mvcd_attention: |
|
pool_embeds_normal, pool_embeds_color = torch.chunk(pool_embeds, 2, dim=0) |
|
pool_embeds = torch.cat([pool_embeds_normal, pool_embeds_color], dim=-1) |
|
pose_pred = [] |
|
if self.regress_elevation: |
|
ele_pred = self.elevation_regressor(pool_embeds) |
|
ele_pred = rearrange(ele_pred, '(b v) c -> b v c', v=self.num_views) |
|
ele_pred = torch.mean(ele_pred, dim=1) |
|
pose_pred.append(ele_pred) |
|
if vis_max_min: record_max_min["ele_pred"] = (ele_pred.min().detach().float().cpu().numpy().tolist(), ele_pred.max().detach().float().cpu().numpy().tolist()) |
|
|
|
if self.regress_focal_length: |
|
focal_pred = self.focal_regressor(pool_embeds) |
|
focal_pred = rearrange(focal_pred, '(b v) c -> b v c', v=self.num_views) |
|
focal_pred = torch.mean(focal_pred, dim=1) |
|
pose_pred.append(focal_pred) |
|
if vis_max_min: record_max_min["focal_pred"] = (focal_pred.min().detach().float().cpu().numpy().tolist(), focal_pred.max().detach().float().cpu().numpy().tolist()) |
|
pose_pred = torch.cat(pose_pred, dim=-1) |
|
|
|
pose_embeds = torch.cat([ |
|
torch.sin(pose_pred), |
|
torch.cos(pose_pred) |
|
], dim=-1) |
|
pose_embeds = self.camera_embedding(pose_embeds) |
|
pose_embeds = torch.repeat_interleave(pose_embeds, self.num_views, 0) |
|
if vis_max_min: record_max_min["pose_embeds"] = (pose_embeds.min().detach().float().cpu().numpy().tolist(), pose_embeds.max().detach().float().cpu().numpy().tolist()) |
|
if self.mvcd_attention: |
|
pose_embeds = torch.cat([pose_embeds,] * 2, dim=0) |
|
|
|
emb = pose_embeds + emb_pre_act |
|
if self.time_embed_act is not None: |
|
emb = self.time_embed_act(emb) |
|
|
|
''' |
|
if self.regress_elevation or self.regress_focal_length: |
|
pose_pred = self.pool(sample.detach()).squeeze(-1).squeeze(-1) # (B, C) |
|
|
|
for liner in self.elevation_regressor: |
|
pose_pred = liner(pose_pred) |
|
|
|
pose_pred = torch.cat([ |
|
pose_pred[:, 0:1], |
|
self.focal_act(pose_pred[:, 1:]) |
|
], dim=-1) |
|
# 'e_de_da_sincos', (B, 2) |
|
pose_embeds = torch.cat([ |
|
torch.sin(pose_pred), |
|
torch.cos(pose_pred) |
|
], dim=-1) |
|
pose_embeds = self.camera_embedding(pose_embeds) |
|
emb = pose_embeds + emb_pre_act |
|
if self.time_embed_act is not None: |
|
emb = self.time_embed_act(emb) |
|
''' |
|
if is_controlnet: |
|
sample = sample + mid_block_additional_residual |
|
|
|
if self.addition_downsample: |
|
sample = sample + global_sample |
|
|
|
|
|
for i, upsample_block in enumerate(self.up_blocks): |
|
is_final_block = i == len(self.up_blocks) - 1 |
|
|
|
res_samples = down_block_res_samples[-len(upsample_block.resnets) :] |
|
down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)] |
|
|
|
|
|
|
|
if not is_final_block and forward_upsample_size: |
|
upsample_size = down_block_res_samples[-1].shape[2:] |
|
|
|
if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention: |
|
sample = upsample_block( |
|
hidden_states=sample, |
|
temb=emb, |
|
res_hidden_states_tuple=res_samples, |
|
encoder_hidden_states=encoder_hidden_states, |
|
dino_feature=dino_feature, |
|
cross_attention_kwargs=cross_attention_kwargs, |
|
upsample_size=upsample_size, |
|
attention_mask=attention_mask, |
|
encoder_attention_mask=encoder_attention_mask, |
|
) |
|
else: |
|
sample = upsample_block( |
|
hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size |
|
) |
|
if vis_max_min: record_max_min[f"upsample_block_{i}"] = (torch.abs(sample.min().detach().float()).cpu().numpy().tolist(), sample.max().detach().float().cpu().numpy().tolist()) |
|
up_s = sample |
|
if torch.isnan(sample).any() or torch.isinf(sample).any(): |
|
print("NAN in sample, stop training.") |
|
exit() |
|
|
|
if self.conv_norm_out: |
|
sample = self.conv_norm_out(sample) |
|
if vis_max_min: record_max_min[f"conv_norm_out"] = (sample.min().detach().float().cpu().numpy().tolist(), sample.max().detach().float().cpu().numpy().tolist()) |
|
sample = self.conv_act(sample) |
|
sample = self.conv_out(sample) |
|
if vis_max_min: record_max_min[f"conv_out"] = (sample.min().detach().float().cpu().numpy().tolist(), sample.max().detach().float().cpu().numpy().tolist()) |
|
if not return_dict: |
|
return (sample,) |
|
|
|
if self.regress_elevation or self.regress_focal_length: |
|
return UNetMV2DConditionOutput(sample=sample), pose_pred, record_max_min, up_s |
|
else: |
|
return UNetMV2DConditionOutput(sample=sample), up_s |
|
|
|
|
|
@classmethod |
|
def from_pretrained_2d( |
|
cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], |
|
camera_embedding_type: str, num_views: int, sample_size: int, |
|
zero_init_conv_in: bool = True, zero_init_camera_projection: bool = False, |
|
projection_camera_embeddings_input_dim: int=2, |
|
cd_attention_last: bool = False, num_regress_blocks: int = 4, |
|
cd_attention_mid: bool = False, multiview_attention: bool = True, |
|
sparse_mv_attention: bool = False, selfattn_block: str = 'custom', mvcd_attention: bool = False, |
|
in_channels: int = 8, out_channels: int = 4, unclip: bool = False, regress_elevation: bool = False, regress_focal_length: bool = False, |
|
init_mvattn_with_selfattn: bool= False, use_dino: bool = False, addition_downsample: bool = False, use_face_adapter: bool=True, |
|
**kwargs |
|
): |
|
r""" |
|
Instantiate a pretrained PyTorch model from a pretrained model configuration. |
|
|
|
The model is set in evaluation mode - `model.eval()` - by default, and dropout modules are deactivated. To |
|
train the model, set it back in training mode with `model.train()`. |
|
|
|
Parameters: |
|
pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*): |
|
Can be either: |
|
|
|
- A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on |
|
the Hub. |
|
- A path to a *directory* (for example `./my_model_directory`) containing the model weights saved |
|
with [`~ModelMixin.save_pretrained`]. |
|
|
|
cache_dir (`Union[str, os.PathLike]`, *optional*): |
|
Path to a directory where a downloaded pretrained model configuration is cached if the standard cache |
|
is not used. |
|
torch_dtype (`str` or `torch.dtype`, *optional*): |
|
Override the default `torch.dtype` and load the model with another dtype. If `"auto"` is passed, the |
|
dtype is automatically derived from the model's weights. |
|
force_download (`bool`, *optional*, defaults to `False`): |
|
Whether or not to force the (re-)download of the model weights and configuration files, overriding the |
|
cached versions if they exist. |
|
resume_download (`bool`, *optional*, defaults to `False`): |
|
Whether or not to resume downloading the model weights and configuration files. If set to `False`, any |
|
incompletely downloaded files are deleted. |
|
proxies (`Dict[str, str]`, *optional*): |
|
A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', |
|
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. |
|
output_loading_info (`bool`, *optional*, defaults to `False`): |
|
Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. |
|
local_files_only(`bool`, *optional*, defaults to `False`): |
|
Whether to only load local model weights and configuration files or not. If set to `True`, the model |
|
won't be downloaded from the Hub. |
|
use_auth_token (`str` or *bool*, *optional*): |
|
The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from |
|
`diffusers-cli login` (stored in `~/.huggingface`) is used. |
|
revision (`str`, *optional*, defaults to `"main"`): |
|
The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier |
|
allowed by Git. |
|
from_flax (`bool`, *optional*, defaults to `False`): |
|
Load the model weights from a Flax checkpoint save file. |
|
subfolder (`str`, *optional*, defaults to `""`): |
|
The subfolder location of a model file within a larger model repository on the Hub or locally. |
|
mirror (`str`, *optional*): |
|
Mirror source to resolve accessibility issues if you're downloading a model in China. We do not |
|
guarantee the timeliness or safety of the source, and you should refer to the mirror site for more |
|
information. |
|
device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*): |
|
A map that specifies where each submodule should go. It doesn't need to be defined for each |
|
parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the |
|
same device. |
|
|
|
Set `device_map="auto"` to have 🤗 Accelerate automatically compute the most optimized `device_map`. For |
|
more information about each option see [designing a device |
|
map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). |
|
max_memory (`Dict`, *optional*): |
|
A dictionary device identifier for the maximum memory. Will default to the maximum memory available for |
|
each GPU and the available CPU RAM if unset. |
|
offload_folder (`str` or `os.PathLike`, *optional*): |
|
The path to offload weights if `device_map` contains the value `"disk"`. |
|
offload_state_dict (`bool`, *optional*): |
|
If `True`, temporarily offloads the CPU state dict to the hard drive to avoid running out of CPU RAM if |
|
the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to `True` |
|
when there is some disk offload. |
|
low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`): |
|
Speed up model loading only loading the pretrained weights and not initializing the weights. This also |
|
tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model. |
|
Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this |
|
argument to `True` will raise an error. |
|
variant (`str`, *optional*): |
|
Load weights from a specified `variant` filename such as `"fp16"` or `"ema"`. This is ignored when |
|
loading `from_flax`. |
|
use_safetensors (`bool`, *optional*, defaults to `None`): |
|
If set to `None`, the `safetensors` weights are downloaded if they're available **and** if the |
|
`safetensors` library is installed. If set to `True`, the model is forcibly loaded from `safetensors` |
|
weights. If set to `False`, `safetensors` weights are not loaded. |
|
|
|
<Tip> |
|
|
|
To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with |
|
`huggingface-cli login`. You can also activate the special |
|
["offline-mode"](https://huggingface.co/diffusers/installation.html#offline-mode) to use this method in a |
|
firewalled environment. |
|
|
|
</Tip> |
|
|
|
Example: |
|
|
|
```py |
|
from diffusers import UNet2DConditionModel |
|
|
|
unet = UNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="unet") |
|
``` |
|
|
|
If you get the error message below, you need to finetune the weights for your downstream task: |
|
|
|
```bash |
|
Some weights of UNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match: |
|
- conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated |
|
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. |
|
``` |
|
""" |
|
cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE) |
|
ignore_mismatched_sizes = kwargs.pop("ignore_mismatched_sizes", False) |
|
force_download = kwargs.pop("force_download", False) |
|
from_flax = kwargs.pop("from_flax", False) |
|
resume_download = kwargs.pop("resume_download", False) |
|
proxies = kwargs.pop("proxies", None) |
|
output_loading_info = kwargs.pop("output_loading_info", False) |
|
local_files_only = kwargs.pop("local_files_only", HF_HUB_OFFLINE) |
|
use_auth_token = kwargs.pop("use_auth_token", None) |
|
revision = kwargs.pop("revision", None) |
|
torch_dtype = kwargs.pop("torch_dtype", None) |
|
subfolder = kwargs.pop("subfolder", None) |
|
device_map = kwargs.pop("device_map", None) |
|
max_memory = kwargs.pop("max_memory", None) |
|
offload_folder = kwargs.pop("offload_folder", None) |
|
offload_state_dict = kwargs.pop("offload_state_dict", False) |
|
variant = kwargs.pop("variant", None) |
|
use_safetensors = kwargs.pop("use_safetensors", None) |
|
|
|
if use_safetensors: |
|
raise ValueError( |
|
"`use_safetensors`=True but safetensors is not installed. Please install safetensors with `pip install safetensors" |
|
) |
|
|
|
allow_pickle = False |
|
if use_safetensors is None: |
|
use_safetensors = True |
|
allow_pickle = True |
|
|
|
if device_map is not None and not is_accelerate_available(): |
|
raise NotImplementedError( |
|
"Loading and dispatching requires `accelerate`. Please make sure to install accelerate or set" |
|
" `device_map=None`. You can install accelerate with `pip install accelerate`." |
|
) |
|
|
|
|
|
if device_map is not None and not is_torch_version(">=", "1.9.0"): |
|
raise NotImplementedError( |
|
"Loading and dispatching requires torch >= 1.9.0. Please either update your PyTorch version or set" |
|
" `device_map=None`." |
|
) |
|
|
|
|
|
config_path = pretrained_model_name_or_path |
|
|
|
user_agent = { |
|
"diffusers": __version__, |
|
"file_type": "model", |
|
"framework": "pytorch", |
|
} |
|
|
|
|
|
config, unused_kwargs, commit_hash = cls.load_config( |
|
config_path, |
|
cache_dir=cache_dir, |
|
return_unused_kwargs=True, |
|
return_commit_hash=True, |
|
force_download=force_download, |
|
resume_download=resume_download, |
|
proxies=proxies, |
|
local_files_only=local_files_only, |
|
use_auth_token=use_auth_token, |
|
revision=revision, |
|
subfolder=subfolder, |
|
device_map=device_map, |
|
max_memory=max_memory, |
|
offload_folder=offload_folder, |
|
offload_state_dict=offload_state_dict, |
|
user_agent=user_agent, |
|
**kwargs, |
|
) |
|
|
|
|
|
config["_class_name"] = cls.__name__ |
|
config['in_channels'] = in_channels |
|
config['out_channels'] = out_channels |
|
config['sample_size'] = sample_size |
|
config['num_views'] = num_views |
|
config['cd_attention_last'] = cd_attention_last |
|
config['cd_attention_mid'] = cd_attention_mid |
|
config['multiview_attention'] = multiview_attention |
|
config['sparse_mv_attention'] = sparse_mv_attention |
|
config['selfattn_block'] = selfattn_block |
|
config['mvcd_attention'] = mvcd_attention |
|
config["down_block_types"] = [ |
|
"CrossAttnDownBlockMV2D", |
|
"CrossAttnDownBlockMV2D", |
|
"CrossAttnDownBlockMV2D", |
|
"DownBlock2D" |
|
] |
|
config['mid_block_type'] = "UNetMidBlockMV2DCrossAttn" |
|
config["up_block_types"] = [ |
|
"UpBlock2D", |
|
"CrossAttnUpBlockMV2D", |
|
"CrossAttnUpBlockMV2D", |
|
"CrossAttnUpBlockMV2D" |
|
] |
|
|
|
|
|
config['regress_elevation'] = regress_elevation |
|
config['regress_focal_length'] = regress_focal_length |
|
config['projection_camera_embeddings_input_dim'] = projection_camera_embeddings_input_dim |
|
config['use_dino'] = use_dino |
|
config['num_regress_blocks'] = num_regress_blocks |
|
config['addition_downsample'] = addition_downsample |
|
|
|
model_file = None |
|
if from_flax: |
|
raise NotImplementedError |
|
else: |
|
if use_safetensors: |
|
try: |
|
model_file = _get_model_file( |
|
pretrained_model_name_or_path, |
|
weights_name=_add_variant(SAFETENSORS_WEIGHTS_NAME, variant), |
|
cache_dir=cache_dir, |
|
force_download=force_download, |
|
resume_download=resume_download, |
|
proxies=proxies, |
|
local_files_only=local_files_only, |
|
use_auth_token=use_auth_token, |
|
revision=revision, |
|
subfolder=subfolder, |
|
user_agent=user_agent, |
|
commit_hash=commit_hash, |
|
) |
|
except IOError as e: |
|
if not allow_pickle: |
|
raise e |
|
pass |
|
if model_file is None: |
|
model_file = _get_model_file( |
|
pretrained_model_name_or_path, |
|
weights_name=_add_variant(WEIGHTS_NAME, variant), |
|
cache_dir=cache_dir, |
|
force_download=force_download, |
|
resume_download=resume_download, |
|
proxies=proxies, |
|
local_files_only=local_files_only, |
|
use_auth_token=use_auth_token, |
|
revision=revision, |
|
subfolder=subfolder, |
|
user_agent=user_agent, |
|
commit_hash=commit_hash, |
|
) |
|
|
|
model = cls.from_config(config, **unused_kwargs) |
|
import copy |
|
state_dict_pretrain = load_state_dict(model_file, variant=variant) |
|
state_dict = copy.deepcopy(state_dict_pretrain) |
|
if init_mvattn_with_selfattn: |
|
for key in state_dict_pretrain: |
|
if 'attn1' in key: |
|
key_mv = key.replace('attn1', 'attn_mv') |
|
state_dict[key_mv] = state_dict_pretrain[key] |
|
if 'to_out.0.weight' in key: |
|
nn.init.zeros_(state_dict[key_mv].data) |
|
if 'transformer_blocks' in key and 'norm1' in key: |
|
key_mv = key.replace('norm1', 'norm_mv') |
|
state_dict[key_mv] = state_dict_pretrain[key] |
|
del state_dict_pretrain |
|
|
|
model._convert_deprecated_attention_blocks(state_dict) |
|
|
|
conv_in_weight = state_dict['conv_in.weight'] |
|
conv_out_weight = state_dict['conv_out.weight'] |
|
model, missing_keys, unexpected_keys, mismatched_keys, error_msgs = cls._load_pretrained_model_2d( |
|
model, |
|
state_dict, |
|
model_file, |
|
pretrained_model_name_or_path, |
|
ignore_mismatched_sizes=True, |
|
) |
|
if any([key == 'conv_in.weight' for key, _, _ in mismatched_keys]): |
|
|
|
model.conv_in.weight.data[:,:4] = conv_in_weight |
|
|
|
|
|
if zero_init_conv_in: |
|
model.conv_in.weight.data[:,4:] = 0. |
|
|
|
if any([key == 'conv_out.weight' for key, _, _ in mismatched_keys]): |
|
|
|
model.conv_out.weight.data[:,:4] = conv_out_weight |
|
if out_channels == 8: |
|
model.conv_out.weight.data[:, 4:] = conv_out_weight |
|
|
|
if (regress_elevation or regress_focal_length) and zero_init_camera_projection: |
|
params = [p for p in model.camera_embedding.parameters()] |
|
torch.nn.init.zeros_(params[-1].data) |
|
|
|
loading_info = { |
|
"missing_keys": missing_keys, |
|
"unexpected_keys": unexpected_keys, |
|
"mismatched_keys": mismatched_keys, |
|
"error_msgs": error_msgs, |
|
} |
|
|
|
if torch_dtype is not None and not isinstance(torch_dtype, torch.dtype): |
|
raise ValueError( |
|
f"{torch_dtype} needs to be of type `torch.dtype`, e.g. `torch.float16`, but is {type(torch_dtype)}." |
|
) |
|
elif torch_dtype is not None: |
|
model = model.to(torch_dtype) |
|
|
|
model.register_to_config(_name_or_path=pretrained_model_name_or_path) |
|
|
|
|
|
model.eval() |
|
if output_loading_info: |
|
return model, loading_info |
|
return model |
|
|
|
@classmethod |
|
def _load_pretrained_model_2d( |
|
cls, |
|
model, |
|
state_dict, |
|
resolved_archive_file, |
|
pretrained_model_name_or_path, |
|
ignore_mismatched_sizes=False, |
|
): |
|
|
|
model_state_dict = model.state_dict() |
|
loaded_keys = list(state_dict.keys()) |
|
|
|
expected_keys = list(model_state_dict.keys()) |
|
|
|
original_loaded_keys = loaded_keys |
|
|
|
missing_keys = list(set(expected_keys) - set(loaded_keys)) |
|
unexpected_keys = list(set(loaded_keys) - set(expected_keys)) |
|
|
|
|
|
model_to_load = model |
|
|
|
def _find_mismatched_keys( |
|
state_dict, |
|
model_state_dict, |
|
loaded_keys, |
|
ignore_mismatched_sizes, |
|
): |
|
mismatched_keys = [] |
|
if ignore_mismatched_sizes: |
|
for checkpoint_key in loaded_keys: |
|
model_key = checkpoint_key |
|
|
|
if ( |
|
model_key in model_state_dict |
|
and state_dict[checkpoint_key].shape != model_state_dict[model_key].shape |
|
): |
|
mismatched_keys.append( |
|
(checkpoint_key, state_dict[checkpoint_key].shape, model_state_dict[model_key].shape) |
|
) |
|
del state_dict[checkpoint_key] |
|
return mismatched_keys |
|
|
|
if state_dict is not None: |
|
|
|
mismatched_keys = _find_mismatched_keys( |
|
state_dict, |
|
model_state_dict, |
|
original_loaded_keys, |
|
ignore_mismatched_sizes, |
|
) |
|
error_msgs = _load_state_dict_into_model(model_to_load, state_dict) |
|
|
|
if len(error_msgs) > 0: |
|
error_msg = "\n\t".join(error_msgs) |
|
if "size mismatch" in error_msg: |
|
error_msg += ( |
|
"\n\tYou may consider adding `ignore_mismatched_sizes=True` in the model `from_pretrained` method." |
|
) |
|
raise RuntimeError(f"Error(s) in loading state_dict for {model.__class__.__name__}:\n\t{error_msg}") |
|
|
|
if len(unexpected_keys) > 0: |
|
logger.warning( |
|
f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when" |
|
f" initializing {model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are" |
|
f" initializing {model.__class__.__name__} from the checkpoint of a model trained on another task" |
|
" or with another architecture (e.g. initializing a BertForSequenceClassification model from a" |
|
" BertForPreTraining model).\n- This IS NOT expected if you are initializing" |
|
f" {model.__class__.__name__} from the checkpoint of a model that you expect to be exactly" |
|
" identical (initializing a BertForSequenceClassification model from a" |
|
" BertForSequenceClassification model)." |
|
) |
|
else: |
|
logger.info(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n") |
|
if len(missing_keys) > 0: |
|
logger.warning( |
|
f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at" |
|
f" {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\nYou should probably" |
|
" TRAIN this model on a down-stream task to be able to use it for predictions and inference." |
|
) |
|
elif len(mismatched_keys) == 0: |
|
logger.info( |
|
f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at" |
|
f" {pretrained_model_name_or_path}.\nIf your task is similar to the task the model of the" |
|
f" checkpoint was trained on, you can already use {model.__class__.__name__} for predictions" |
|
" without further training." |
|
) |
|
if len(mismatched_keys) > 0: |
|
mismatched_warning = "\n".join( |
|
[ |
|
f"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated" |
|
for key, shape1, shape2 in mismatched_keys |
|
] |
|
) |
|
logger.warning( |
|
f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at" |
|
f" {pretrained_model_name_or_path} and are newly initialized because the shapes did not" |
|
f" match:\n{mismatched_warning}\nYou should probably TRAIN this model on a down-stream task to be" |
|
" able to use it for predictions and inference." |
|
) |
|
|
|
return model, missing_keys, unexpected_keys, mismatched_keys, error_msgs |
|
|
|
|