Spaces:
Running
on
Zero
Running
on
Zero
# Copyright 2023 The HuggingFace Team. All rights reserved. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
from typing import Optional | |
import torch | |
import torch.nn.functional as F | |
from torch import nn | |
class Attention(nn.Module): | |
r""" | |
A cross attention layer. | |
Parameters: | |
query_dim (`int`): | |
The number of channels in the query. | |
cross_attention_dim (`int`, *optional*): | |
The number of channels in the encoder_hidden_states. If not given, defaults to `query_dim`. | |
heads (`int`, *optional*, defaults to 8): | |
The number of heads to use for multi-head attention. | |
dim_head (`int`, *optional*, defaults to 64): | |
The number of channels in each head. | |
dropout (`float`, *optional*, defaults to 0.0): | |
The dropout probability to use. | |
bias (`bool`, *optional*, defaults to False): | |
Set to `True` for the query, key, and value linear layers to contain a bias parameter. | |
upcast_attention (`bool`, *optional*, defaults to False): | |
Set to `True` to upcast the attention computation to `float32`. | |
upcast_softmax (`bool`, *optional*, defaults to False): | |
Set to `True` to upcast the softmax computation to `float32`. | |
cross_attention_norm (`str`, *optional*, defaults to `None`): | |
The type of normalization to use for the cross attention. Can be `None`, `layer_norm`, or `group_norm`. | |
cross_attention_norm_num_groups (`int`, *optional*, defaults to 32): | |
The number of groups to use for the group norm in the cross attention. | |
added_kv_proj_dim (`int`, *optional*, defaults to `None`): | |
The number of channels to use for the added key and value projections. If `None`, no projection is used. | |
norm_num_groups (`int`, *optional*, defaults to `None`): | |
The number of groups to use for the group norm in the attention. | |
spatial_norm_dim (`int`, *optional*, defaults to `None`): | |
The number of channels to use for the spatial normalization. | |
out_bias (`bool`, *optional*, defaults to `True`): | |
Set to `True` to use a bias in the output linear layer. | |
scale_qk (`bool`, *optional*, defaults to `True`): | |
Set to `True` to scale the query and key by `1 / sqrt(dim_head)`. | |
only_cross_attention (`bool`, *optional*, defaults to `False`): | |
Set to `True` to only use cross attention and not added_kv_proj_dim. Can only be set to `True` if | |
`added_kv_proj_dim` is not `None`. | |
eps (`float`, *optional*, defaults to 1e-5): | |
An additional value added to the denominator in group normalization that is used for numerical stability. | |
rescale_output_factor (`float`, *optional*, defaults to 1.0): | |
A factor to rescale the output by dividing it with this value. | |
residual_connection (`bool`, *optional*, defaults to `False`): | |
Set to `True` to add the residual connection to the output. | |
_from_deprecated_attn_block (`bool`, *optional*, defaults to `False`): | |
Set to `True` if the attention block is loaded from a deprecated state dict. | |
processor (`AttnProcessor`, *optional*, defaults to `None`): | |
The attention processor to use. If `None`, defaults to `AttnProcessor2_0` if `torch 2.x` is used and | |
`AttnProcessor` otherwise. | |
""" | |
def __init__( | |
self, | |
query_dim: int, | |
cross_attention_dim: Optional[int] = None, | |
heads: int = 8, | |
dim_head: int = 64, | |
dropout: float = 0.0, | |
bias: bool = False, | |
upcast_attention: bool = False, | |
upcast_softmax: bool = False, | |
cross_attention_norm: Optional[str] = None, | |
cross_attention_norm_num_groups: int = 32, | |
added_kv_proj_dim: Optional[int] = None, | |
norm_num_groups: Optional[int] = None, | |
out_bias: bool = True, | |
scale_qk: bool = True, | |
only_cross_attention: bool = False, | |
eps: float = 1e-5, | |
rescale_output_factor: float = 1.0, | |
residual_connection: bool = False, | |
_from_deprecated_attn_block: bool = False, | |
processor: Optional["AttnProcessor"] = None, | |
out_dim: int = None, | |
): | |
super().__init__() | |
self.inner_dim = out_dim if out_dim is not None else dim_head * heads | |
self.query_dim = query_dim | |
self.cross_attention_dim = ( | |
cross_attention_dim if cross_attention_dim is not None else query_dim | |
) | |
self.upcast_attention = upcast_attention | |
self.upcast_softmax = upcast_softmax | |
self.rescale_output_factor = rescale_output_factor | |
self.residual_connection = residual_connection | |
self.dropout = dropout | |
self.fused_projections = False | |
self.out_dim = out_dim if out_dim is not None else query_dim | |
# we make use of this private variable to know whether this class is loaded | |
# with an deprecated state dict so that we can convert it on the fly | |
self._from_deprecated_attn_block = _from_deprecated_attn_block | |
self.scale_qk = scale_qk | |
self.scale = dim_head**-0.5 if self.scale_qk else 1.0 | |
self.heads = out_dim // dim_head if out_dim is not None else heads | |
# for slice_size > 0 the attention score computation | |
# is split across the batch axis to save memory | |
# You can set slice_size with `set_attention_slice` | |
self.sliceable_head_dim = heads | |
self.added_kv_proj_dim = added_kv_proj_dim | |
self.only_cross_attention = only_cross_attention | |
if self.added_kv_proj_dim is None and self.only_cross_attention: | |
raise ValueError( | |
"`only_cross_attention` can only be set to True if `added_kv_proj_dim` is not None. Make sure to set either `only_cross_attention=False` or define `added_kv_proj_dim`." | |
) | |
if norm_num_groups is not None: | |
self.group_norm = nn.GroupNorm( | |
num_channels=query_dim, num_groups=norm_num_groups, eps=eps, affine=True | |
) | |
else: | |
self.group_norm = None | |
self.spatial_norm = None | |
if cross_attention_norm is None: | |
self.norm_cross = None | |
elif cross_attention_norm == "layer_norm": | |
self.norm_cross = nn.LayerNorm(self.cross_attention_dim) | |
elif cross_attention_norm == "group_norm": | |
if self.added_kv_proj_dim is not None: | |
# The given `encoder_hidden_states` are initially of shape | |
# (batch_size, seq_len, added_kv_proj_dim) before being projected | |
# to (batch_size, seq_len, cross_attention_dim). The norm is applied | |
# before the projection, so we need to use `added_kv_proj_dim` as | |
# the number of channels for the group norm. | |
norm_cross_num_channels = added_kv_proj_dim | |
else: | |
norm_cross_num_channels = self.cross_attention_dim | |
self.norm_cross = nn.GroupNorm( | |
num_channels=norm_cross_num_channels, | |
num_groups=cross_attention_norm_num_groups, | |
eps=1e-5, | |
affine=True, | |
) | |
else: | |
raise ValueError( | |
f"unknown cross_attention_norm: {cross_attention_norm}. Should be None, 'layer_norm' or 'group_norm'" | |
) | |
linear_cls = nn.Linear | |
self.linear_cls = linear_cls | |
self.to_q = linear_cls(query_dim, self.inner_dim, bias=bias) | |
if not self.only_cross_attention: | |
# only relevant for the `AddedKVProcessor` classes | |
self.to_k = linear_cls(self.cross_attention_dim, self.inner_dim, bias=bias) | |
self.to_v = linear_cls(self.cross_attention_dim, self.inner_dim, bias=bias) | |
else: | |
self.to_k = None | |
self.to_v = None | |
if self.added_kv_proj_dim is not None: | |
self.add_k_proj = linear_cls(added_kv_proj_dim, self.inner_dim) | |
self.add_v_proj = linear_cls(added_kv_proj_dim, self.inner_dim) | |
self.to_out = nn.ModuleList([]) | |
self.to_out.append(linear_cls(self.inner_dim, self.out_dim, bias=out_bias)) | |
self.to_out.append(nn.Dropout(dropout)) | |
# set attention processor | |
# We use the AttnProcessor2_0 by default when torch 2.x is used which uses | |
# torch.nn.functional.scaled_dot_product_attention for native Flash/memory_efficient_attention | |
# but only if it has the default `scale` argument. TODO remove scale_qk check when we move to torch 2.1 | |
if processor is None: | |
processor = ( | |
AttnProcessor2_0() | |
if hasattr(F, "scaled_dot_product_attention") and self.scale_qk | |
else AttnProcessor() | |
) | |
self.set_processor(processor) | |
def set_processor(self, processor: "AttnProcessor") -> None: | |
self.processor = processor | |
def forward( | |
self, | |
hidden_states: torch.FloatTensor, | |
encoder_hidden_states: Optional[torch.FloatTensor] = None, | |
attention_mask: Optional[torch.FloatTensor] = None, | |
**cross_attention_kwargs, | |
) -> torch.Tensor: | |
r""" | |
The forward method of the `Attention` class. | |
Args: | |
hidden_states (`torch.Tensor`): | |
The hidden states of the query. | |
encoder_hidden_states (`torch.Tensor`, *optional*): | |
The hidden states of the encoder. | |
attention_mask (`torch.Tensor`, *optional*): | |
The attention mask to use. If `None`, no mask is applied. | |
**cross_attention_kwargs: | |
Additional keyword arguments to pass along to the cross attention. | |
Returns: | |
`torch.Tensor`: The output of the attention layer. | |
""" | |
# The `Attention` class can call different attention processors / attention functions | |
# here we simply pass along all tensors to the selected processor class | |
# For standard processors that are defined here, `**cross_attention_kwargs` is empty | |
return self.processor( | |
self, | |
hidden_states, | |
encoder_hidden_states=encoder_hidden_states, | |
attention_mask=attention_mask, | |
**cross_attention_kwargs, | |
) | |
def batch_to_head_dim(self, tensor: torch.Tensor) -> torch.Tensor: | |
r""" | |
Reshape the tensor from `[batch_size, seq_len, dim]` to `[batch_size // heads, seq_len, dim * heads]`. `heads` | |
is the number of heads initialized while constructing the `Attention` class. | |
Args: | |
tensor (`torch.Tensor`): The tensor to reshape. | |
Returns: | |
`torch.Tensor`: The reshaped tensor. | |
""" | |
head_size = self.heads | |
batch_size, seq_len, dim = tensor.shape | |
tensor = tensor.reshape(batch_size // head_size, head_size, seq_len, dim) | |
tensor = tensor.permute(0, 2, 1, 3).reshape( | |
batch_size // head_size, seq_len, dim * head_size | |
) | |
return tensor | |
def head_to_batch_dim(self, tensor: torch.Tensor, out_dim: int = 3) -> torch.Tensor: | |
r""" | |
Reshape the tensor from `[batch_size, seq_len, dim]` to `[batch_size, seq_len, heads, dim // heads]` `heads` is | |
the number of heads initialized while constructing the `Attention` class. | |
Args: | |
tensor (`torch.Tensor`): The tensor to reshape. | |
out_dim (`int`, *optional*, defaults to `3`): The output dimension of the tensor. If `3`, the tensor is | |
reshaped to `[batch_size * heads, seq_len, dim // heads]`. | |
Returns: | |
`torch.Tensor`: The reshaped tensor. | |
""" | |
head_size = self.heads | |
batch_size, seq_len, dim = tensor.shape | |
tensor = tensor.reshape(batch_size, seq_len, head_size, dim // head_size) | |
tensor = tensor.permute(0, 2, 1, 3) | |
if out_dim == 3: | |
tensor = tensor.reshape(batch_size * head_size, seq_len, dim // head_size) | |
return tensor | |
def get_attention_scores( | |
self, | |
query: torch.Tensor, | |
key: torch.Tensor, | |
attention_mask: torch.Tensor = None, | |
) -> torch.Tensor: | |
r""" | |
Compute the attention scores. | |
Args: | |
query (`torch.Tensor`): The query tensor. | |
key (`torch.Tensor`): The key tensor. | |
attention_mask (`torch.Tensor`, *optional*): The attention mask to use. If `None`, no mask is applied. | |
Returns: | |
`torch.Tensor`: The attention probabilities/scores. | |
""" | |
dtype = query.dtype | |
if self.upcast_attention: | |
query = query.float() | |
key = key.float() | |
if attention_mask is None: | |
baddbmm_input = torch.empty( | |
query.shape[0], | |
query.shape[1], | |
key.shape[1], | |
dtype=query.dtype, | |
device=query.device, | |
) | |
beta = 0 | |
else: | |
baddbmm_input = attention_mask | |
beta = 1 | |
attention_scores = torch.baddbmm( | |
baddbmm_input, | |
query, | |
key.transpose(-1, -2), | |
beta=beta, | |
alpha=self.scale, | |
) | |
del baddbmm_input | |
if self.upcast_softmax: | |
attention_scores = attention_scores.float() | |
attention_probs = attention_scores.softmax(dim=-1) | |
del attention_scores | |
attention_probs = attention_probs.to(dtype) | |
return attention_probs | |
def prepare_attention_mask( | |
self, | |
attention_mask: torch.Tensor, | |
target_length: int, | |
batch_size: int, | |
out_dim: int = 3, | |
) -> torch.Tensor: | |
r""" | |
Prepare the attention mask for the attention computation. | |
Args: | |
attention_mask (`torch.Tensor`): | |
The attention mask to prepare. | |
target_length (`int`): | |
The target length of the attention mask. This is the length of the attention mask after padding. | |
batch_size (`int`): | |
The batch size, which is used to repeat the attention mask. | |
out_dim (`int`, *optional*, defaults to `3`): | |
The output dimension of the attention mask. Can be either `3` or `4`. | |
Returns: | |
`torch.Tensor`: The prepared attention mask. | |
""" | |
head_size = self.heads | |
if attention_mask is None: | |
return attention_mask | |
current_length: int = attention_mask.shape[-1] | |
if current_length != target_length: | |
if attention_mask.device.type == "mps": | |
# HACK: MPS: Does not support padding by greater than dimension of input tensor. | |
# Instead, we can manually construct the padding tensor. | |
padding_shape = ( | |
attention_mask.shape[0], | |
attention_mask.shape[1], | |
target_length, | |
) | |
padding = torch.zeros( | |
padding_shape, | |
dtype=attention_mask.dtype, | |
device=attention_mask.device, | |
) | |
attention_mask = torch.cat([attention_mask, padding], dim=2) | |
else: | |
# TODO: for pipelines such as stable-diffusion, padding cross-attn mask: | |
# we want to instead pad by (0, remaining_length), where remaining_length is: | |
# remaining_length: int = target_length - current_length | |
# TODO: re-enable tests/models/test_models_unet_2d_condition.py#test_model_xattn_padding | |
attention_mask = F.pad(attention_mask, (0, target_length), value=0.0) | |
if out_dim == 3: | |
if attention_mask.shape[0] < batch_size * head_size: | |
attention_mask = attention_mask.repeat_interleave(head_size, dim=0) | |
elif out_dim == 4: | |
attention_mask = attention_mask.unsqueeze(1) | |
attention_mask = attention_mask.repeat_interleave(head_size, dim=1) | |
return attention_mask | |
def norm_encoder_hidden_states( | |
self, encoder_hidden_states: torch.Tensor | |
) -> torch.Tensor: | |
r""" | |
Normalize the encoder hidden states. Requires `self.norm_cross` to be specified when constructing the | |
`Attention` class. | |
Args: | |
encoder_hidden_states (`torch.Tensor`): Hidden states of the encoder. | |
Returns: | |
`torch.Tensor`: The normalized encoder hidden states. | |
""" | |
assert ( | |
self.norm_cross is not None | |
), "self.norm_cross must be defined to call self.norm_encoder_hidden_states" | |
if isinstance(self.norm_cross, nn.LayerNorm): | |
encoder_hidden_states = self.norm_cross(encoder_hidden_states) | |
elif isinstance(self.norm_cross, nn.GroupNorm): | |
# Group norm norms along the channels dimension and expects | |
# input to be in the shape of (N, C, *). In this case, we want | |
# to norm along the hidden dimension, so we need to move | |
# (batch_size, sequence_length, hidden_size) -> | |
# (batch_size, hidden_size, sequence_length) | |
encoder_hidden_states = encoder_hidden_states.transpose(1, 2) | |
encoder_hidden_states = self.norm_cross(encoder_hidden_states) | |
encoder_hidden_states = encoder_hidden_states.transpose(1, 2) | |
else: | |
assert False | |
return encoder_hidden_states | |
def fuse_projections(self, fuse=True): | |
is_cross_attention = self.cross_attention_dim != self.query_dim | |
device = self.to_q.weight.data.device | |
dtype = self.to_q.weight.data.dtype | |
if not is_cross_attention: | |
# fetch weight matrices. | |
concatenated_weights = torch.cat( | |
[self.to_q.weight.data, self.to_k.weight.data, self.to_v.weight.data] | |
) | |
in_features = concatenated_weights.shape[1] | |
out_features = concatenated_weights.shape[0] | |
# create a new single projection layer and copy over the weights. | |
self.to_qkv = self.linear_cls( | |
in_features, out_features, bias=False, device=device, dtype=dtype | |
) | |
self.to_qkv.weight.copy_(concatenated_weights) | |
else: | |
concatenated_weights = torch.cat( | |
[self.to_k.weight.data, self.to_v.weight.data] | |
) | |
in_features = concatenated_weights.shape[1] | |
out_features = concatenated_weights.shape[0] | |
self.to_kv = self.linear_cls( | |
in_features, out_features, bias=False, device=device, dtype=dtype | |
) | |
self.to_kv.weight.copy_(concatenated_weights) | |
self.fused_projections = fuse | |
class AttnProcessor: | |
r""" | |
Default processor for performing attention-related computations. | |
""" | |
def __call__( | |
self, | |
attn: Attention, | |
hidden_states: torch.FloatTensor, | |
encoder_hidden_states: Optional[torch.FloatTensor] = None, | |
attention_mask: Optional[torch.FloatTensor] = None, | |
) -> torch.Tensor: | |
residual = hidden_states | |
input_ndim = hidden_states.ndim | |
if input_ndim == 4: | |
batch_size, channel, height, width = hidden_states.shape | |
hidden_states = hidden_states.view( | |
batch_size, channel, height * width | |
).transpose(1, 2) | |
batch_size, sequence_length, _ = ( | |
hidden_states.shape | |
if encoder_hidden_states is None | |
else encoder_hidden_states.shape | |
) | |
attention_mask = attn.prepare_attention_mask( | |
attention_mask, sequence_length, batch_size | |
) | |
if attn.group_norm is not None: | |
hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose( | |
1, 2 | |
) | |
query = attn.to_q(hidden_states) | |
if encoder_hidden_states is None: | |
encoder_hidden_states = hidden_states | |
elif attn.norm_cross: | |
encoder_hidden_states = attn.norm_encoder_hidden_states( | |
encoder_hidden_states | |
) | |
key = attn.to_k(encoder_hidden_states) | |
value = attn.to_v(encoder_hidden_states) | |
query = attn.head_to_batch_dim(query) | |
key = attn.head_to_batch_dim(key) | |
value = attn.head_to_batch_dim(value) | |
attention_probs = attn.get_attention_scores(query, key, attention_mask) | |
hidden_states = torch.bmm(attention_probs, value) | |
hidden_states = attn.batch_to_head_dim(hidden_states) | |
# linear proj | |
hidden_states = attn.to_out[0](hidden_states) | |
# dropout | |
hidden_states = attn.to_out[1](hidden_states) | |
if input_ndim == 4: | |
hidden_states = hidden_states.transpose(-1, -2).reshape( | |
batch_size, channel, height, width | |
) | |
if attn.residual_connection: | |
hidden_states = hidden_states + residual | |
hidden_states = hidden_states / attn.rescale_output_factor | |
return hidden_states | |
class AttnProcessor2_0: | |
r""" | |
Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0). | |
""" | |
def __init__(self): | |
if not hasattr(F, "scaled_dot_product_attention"): | |
raise ImportError( | |
"AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0." | |
) | |
def __call__( | |
self, | |
attn: Attention, | |
hidden_states: torch.FloatTensor, | |
encoder_hidden_states: Optional[torch.FloatTensor] = None, | |
attention_mask: Optional[torch.FloatTensor] = None, | |
) -> torch.FloatTensor: | |
residual = hidden_states | |
input_ndim = hidden_states.ndim | |
if input_ndim == 4: | |
batch_size, channel, height, width = hidden_states.shape | |
hidden_states = hidden_states.view( | |
batch_size, channel, height * width | |
).transpose(1, 2) | |
batch_size, sequence_length, _ = ( | |
hidden_states.shape | |
if encoder_hidden_states is None | |
else encoder_hidden_states.shape | |
) | |
if attention_mask is not None: | |
attention_mask = attn.prepare_attention_mask( | |
attention_mask, sequence_length, batch_size | |
) | |
# scaled_dot_product_attention expects attention_mask shape to be | |
# (batch, heads, source_length, target_length) | |
attention_mask = attention_mask.view( | |
batch_size, attn.heads, -1, attention_mask.shape[-1] | |
) | |
if attn.group_norm is not None: | |
hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose( | |
1, 2 | |
) | |
query = attn.to_q(hidden_states) | |
if encoder_hidden_states is None: | |
encoder_hidden_states = hidden_states | |
elif attn.norm_cross: | |
encoder_hidden_states = attn.norm_encoder_hidden_states( | |
encoder_hidden_states | |
) | |
key = attn.to_k(encoder_hidden_states) | |
value = attn.to_v(encoder_hidden_states) | |
inner_dim = key.shape[-1] | |
head_dim = inner_dim // attn.heads | |
query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) | |
key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) | |
value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) | |
# the output of sdp = (batch, num_heads, seq_len, head_dim) | |
# TODO: add support for attn.scale when we move to Torch 2.1 | |
hidden_states = F.scaled_dot_product_attention( | |
query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False | |
) | |
hidden_states = hidden_states.transpose(1, 2).reshape( | |
batch_size, -1, attn.heads * head_dim | |
) | |
hidden_states = hidden_states.to(query.dtype) | |
# linear proj | |
hidden_states = attn.to_out[0](hidden_states) | |
# dropout | |
hidden_states = attn.to_out[1](hidden_states) | |
if input_ndim == 4: | |
hidden_states = hidden_states.transpose(-1, -2).reshape( | |
batch_size, channel, height, width | |
) | |
if attn.residual_connection: | |
hidden_states = hidden_states + residual | |
hidden_states = hidden_states / attn.rescale_output_factor | |
return hidden_states | |