lemonaddie commited on
Commit
f5889f1
·
verified ·
1 Parent(s): a7299bc
models/attention.py DELETED
@@ -1,777 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
-
16
- # Some modifications are reimplemented in public environments by Xiao Fu and Mu Hu
17
-
18
-
19
- from typing import Any, Dict, Optional
20
-
21
- import torch
22
- import torch.nn.functional as F
23
- from torch import nn
24
- import xformers
25
-
26
- from diffusers.utils import USE_PEFT_BACKEND
27
- from diffusers.utils.torch_utils import maybe_allow_in_graph
28
- from diffusers.models.activations import GEGLU, GELU, ApproximateGELU
29
- from diffusers.models.attention_processor import Attention
30
- from diffusers.models.embeddings import SinusoidalPositionalEmbedding
31
- from diffusers.models.lora import LoRACompatibleLinear
32
- from diffusers.models.normalization import AdaLayerNorm, AdaLayerNormContinuous, AdaLayerNormZero, RMSNorm
33
-
34
-
35
- def _chunked_feed_forward(
36
- ff: nn.Module, hidden_states: torch.Tensor, chunk_dim: int, chunk_size: int, lora_scale: Optional[float] = None
37
- ):
38
- # "feed_forward_chunk_size" can be used to save memory
39
- if hidden_states.shape[chunk_dim] % chunk_size != 0:
40
- raise ValueError(
41
- f"`hidden_states` dimension to be chunked: {hidden_states.shape[chunk_dim]} has to be divisible by chunk size: {chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`."
42
- )
43
-
44
- num_chunks = hidden_states.shape[chunk_dim] // chunk_size
45
- if lora_scale is None:
46
- ff_output = torch.cat(
47
- [ff(hid_slice) for hid_slice in hidden_states.chunk(num_chunks, dim=chunk_dim)],
48
- dim=chunk_dim,
49
- )
50
- else:
51
- # TOOD(Patrick): LoRA scale can be removed once PEFT refactor is complete
52
- ff_output = torch.cat(
53
- [ff(hid_slice, scale=lora_scale) for hid_slice in hidden_states.chunk(num_chunks, dim=chunk_dim)],
54
- dim=chunk_dim,
55
- )
56
-
57
- return ff_output
58
-
59
-
60
- @maybe_allow_in_graph
61
- class GatedSelfAttentionDense(nn.Module):
62
- r"""
63
- A gated self-attention dense layer that combines visual features and object features.
64
-
65
- Parameters:
66
- query_dim (`int`): The number of channels in the query.
67
- context_dim (`int`): The number of channels in the context.
68
- n_heads (`int`): The number of heads to use for attention.
69
- d_head (`int`): The number of channels in each head.
70
- """
71
-
72
- def __init__(self, query_dim: int, context_dim: int, n_heads: int, d_head: int):
73
- super().__init__()
74
-
75
- # we need a linear projection since we need cat visual feature and obj feature
76
- self.linear = nn.Linear(context_dim, query_dim)
77
-
78
- self.attn = Attention(query_dim=query_dim, heads=n_heads, dim_head=d_head)
79
- self.ff = FeedForward(query_dim, activation_fn="geglu")
80
-
81
- self.norm1 = nn.LayerNorm(query_dim)
82
- self.norm2 = nn.LayerNorm(query_dim)
83
-
84
- self.register_parameter("alpha_attn", nn.Parameter(torch.tensor(0.0)))
85
- self.register_parameter("alpha_dense", nn.Parameter(torch.tensor(0.0)))
86
-
87
- self.enabled = True
88
-
89
- def forward(self, x: torch.Tensor, objs: torch.Tensor) -> torch.Tensor:
90
- if not self.enabled:
91
- return x
92
-
93
- n_visual = x.shape[1]
94
- objs = self.linear(objs)
95
-
96
- x = x + self.alpha_attn.tanh() * self.attn(self.norm1(torch.cat([x, objs], dim=1)))[:, :n_visual, :]
97
- x = x + self.alpha_dense.tanh() * self.ff(self.norm2(x))
98
-
99
- return x
100
-
101
-
102
- @maybe_allow_in_graph
103
- class BasicTransformerBlock(nn.Module):
104
- r"""
105
- A basic Transformer block.
106
-
107
- Parameters:
108
- dim (`int`): The number of channels in the input and output.
109
- num_attention_heads (`int`): The number of heads to use for multi-head attention.
110
- attention_head_dim (`int`): The number of channels in each head.
111
- dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
112
- cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention.
113
- activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
114
- num_embeds_ada_norm (:
115
- obj: `int`, *optional*): The number of diffusion steps used during training. See `Transformer2DModel`.
116
- attention_bias (:
117
- obj: `bool`, *optional*, defaults to `False`): Configure if the attentions should contain a bias parameter.
118
- only_cross_attention (`bool`, *optional*):
119
- Whether to use only cross-attention layers. In this case two cross attention layers are used.
120
- double_self_attention (`bool`, *optional*):
121
- Whether to use two self-attention layers. In this case no cross attention layers are used.
122
- upcast_attention (`bool`, *optional*):
123
- Whether to upcast the attention computation to float32. This is useful for mixed precision training.
124
- norm_elementwise_affine (`bool`, *optional*, defaults to `True`):
125
- Whether to use learnable elementwise affine parameters for normalization.
126
- norm_type (`str`, *optional*, defaults to `"layer_norm"`):
127
- The normalization layer to use. Can be `"layer_norm"`, `"ada_norm"` or `"ada_norm_zero"`.
128
- final_dropout (`bool` *optional*, defaults to False):
129
- Whether to apply a final dropout after the last feed-forward layer.
130
- attention_type (`str`, *optional*, defaults to `"default"`):
131
- The type of attention to use. Can be `"default"` or `"gated"` or `"gated-text-image"`.
132
- positional_embeddings (`str`, *optional*, defaults to `None`):
133
- The type of positional embeddings to apply to.
134
- num_positional_embeddings (`int`, *optional*, defaults to `None`):
135
- The maximum number of positional embeddings to apply.
136
- """
137
-
138
- def __init__(
139
- self,
140
- dim: int,
141
- num_attention_heads: int,
142
- attention_head_dim: int,
143
- dropout=0.0,
144
- cross_attention_dim: Optional[int] = None,
145
- activation_fn: str = "geglu",
146
- num_embeds_ada_norm: Optional[int] = None,
147
- attention_bias: bool = False,
148
- only_cross_attention: bool = False,
149
- double_self_attention: bool = False,
150
- upcast_attention: bool = False,
151
- norm_elementwise_affine: bool = True,
152
- norm_type: str = "layer_norm", # 'layer_norm', 'ada_norm', 'ada_norm_zero', 'ada_norm_single'
153
- norm_eps: float = 1e-5,
154
- final_dropout: bool = False,
155
- attention_type: str = "default",
156
- positional_embeddings: Optional[str] = None,
157
- num_positional_embeddings: Optional[int] = None,
158
- ada_norm_continous_conditioning_embedding_dim: Optional[int] = None,
159
- ada_norm_bias: Optional[int] = None,
160
- ff_inner_dim: Optional[int] = None,
161
- ff_bias: bool = True,
162
- attention_out_bias: bool = True,
163
- ):
164
- super().__init__()
165
- self.only_cross_attention = only_cross_attention
166
-
167
- self.use_ada_layer_norm_zero = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero"
168
- self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
169
- self.use_ada_layer_norm_single = norm_type == "ada_norm_single"
170
- self.use_layer_norm = norm_type == "layer_norm"
171
- self.use_ada_layer_norm_continuous = norm_type == "ada_norm_continuous"
172
-
173
- if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
174
- raise ValueError(
175
- f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"
176
- f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}."
177
- )
178
-
179
- if positional_embeddings and (num_positional_embeddings is None):
180
- raise ValueError(
181
- "If `positional_embedding` type is defined, `num_positition_embeddings` must also be defined."
182
- )
183
-
184
- if positional_embeddings == "sinusoidal":
185
- self.pos_embed = SinusoidalPositionalEmbedding(dim, max_seq_length=num_positional_embeddings)
186
- else:
187
- self.pos_embed = None
188
-
189
- # Define 3 blocks. Each block has its own normalization layer.
190
- # 1. Self-Attn
191
- if self.use_ada_layer_norm:
192
- self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm)
193
- elif self.use_ada_layer_norm_zero:
194
- self.norm1 = AdaLayerNormZero(dim, num_embeds_ada_norm)
195
- elif self.use_ada_layer_norm_continuous:
196
- self.norm1 = AdaLayerNormContinuous(
197
- dim,
198
- ada_norm_continous_conditioning_embedding_dim,
199
- norm_elementwise_affine,
200
- norm_eps,
201
- ada_norm_bias,
202
- "rms_norm",
203
- )
204
- else:
205
- self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps)
206
-
207
-
208
- self.attn1 = CustomJointAttention(
209
- query_dim=dim,
210
- heads=num_attention_heads,
211
- dim_head=attention_head_dim,
212
- dropout=dropout,
213
- bias=attention_bias,
214
- cross_attention_dim=cross_attention_dim if only_cross_attention else None,
215
- upcast_attention=upcast_attention,
216
- out_bias=attention_out_bias
217
- )
218
-
219
- # 2. Cross-Attn
220
- if cross_attention_dim is not None or double_self_attention:
221
- # We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
222
- # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
223
- # the second cross attention block.
224
-
225
- if self.use_ada_layer_norm:
226
- self.norm2 = AdaLayerNorm(dim, num_embeds_ada_norm)
227
- elif self.use_ada_layer_norm_continuous:
228
- self.norm2 = AdaLayerNormContinuous(
229
- dim,
230
- ada_norm_continous_conditioning_embedding_dim,
231
- norm_elementwise_affine,
232
- norm_eps,
233
- ada_norm_bias,
234
- "rms_norm",
235
- )
236
- else:
237
- self.norm2 = nn.LayerNorm(dim, norm_eps, norm_elementwise_affine)
238
-
239
- self.attn2 = Attention(
240
- query_dim=dim,
241
- cross_attention_dim=cross_attention_dim if not double_self_attention else None,
242
- heads=num_attention_heads,
243
- dim_head=attention_head_dim,
244
- dropout=dropout,
245
- bias=attention_bias,
246
- upcast_attention=upcast_attention,
247
- out_bias=attention_out_bias,
248
- ) # is self-attn if encoder_hidden_states is none
249
- else:
250
- self.norm2 = None
251
- self.attn2 = None
252
-
253
- # 3. Feed-forward
254
- if self.use_ada_layer_norm_continuous:
255
- self.norm3 = AdaLayerNormContinuous(
256
- dim,
257
- ada_norm_continous_conditioning_embedding_dim,
258
- norm_elementwise_affine,
259
- norm_eps,
260
- ada_norm_bias,
261
- "layer_norm",
262
- )
263
- elif not self.use_ada_layer_norm_single:
264
- self.norm3 = nn.LayerNorm(dim, norm_eps, norm_elementwise_affine)
265
-
266
- self.ff = FeedForward(
267
- dim,
268
- dropout=dropout,
269
- activation_fn=activation_fn,
270
- final_dropout=final_dropout,
271
- inner_dim=ff_inner_dim,
272
- bias=ff_bias,
273
- )
274
-
275
- # 4. Fuser
276
- if attention_type == "gated" or attention_type == "gated-text-image":
277
- self.fuser = GatedSelfAttentionDense(dim, cross_attention_dim, num_attention_heads, attention_head_dim)
278
-
279
- # 5. Scale-shift for PixArt-Alpha.
280
- if self.use_ada_layer_norm_single:
281
- self.scale_shift_table = nn.Parameter(torch.randn(6, dim) / dim**0.5)
282
-
283
- # let chunk size default to None
284
- self._chunk_size = None
285
- self._chunk_dim = 0
286
-
287
- def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int = 0):
288
- # Sets chunk feed-forward
289
- self._chunk_size = chunk_size
290
- self._chunk_dim = dim
291
-
292
- def forward(
293
- self,
294
- hidden_states: torch.FloatTensor,
295
- attention_mask: Optional[torch.FloatTensor] = None,
296
- encoder_hidden_states: Optional[torch.FloatTensor] = None,
297
- encoder_attention_mask: Optional[torch.FloatTensor] = None,
298
- timestep: Optional[torch.LongTensor] = None,
299
- cross_attention_kwargs: Dict[str, Any] = None,
300
- class_labels: Optional[torch.LongTensor] = None,
301
- added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
302
- ) -> torch.FloatTensor:
303
- # Notice that normalization is always applied before the real computation in the following blocks.
304
-
305
- # 0. Self-Attention
306
- batch_size = hidden_states.shape[0]
307
-
308
- if self.use_ada_layer_norm:
309
- norm_hidden_states = self.norm1(hidden_states, timestep)
310
- elif self.use_ada_layer_norm_zero:
311
- norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(
312
- hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype
313
- )
314
- elif self.use_layer_norm:
315
- norm_hidden_states = self.norm1(hidden_states)
316
- elif self.use_ada_layer_norm_continuous:
317
- norm_hidden_states = self.norm1(hidden_states, added_cond_kwargs["pooled_text_emb"])
318
- elif self.use_ada_layer_norm_single:
319
- shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = (
320
- self.scale_shift_table[None] + timestep.reshape(batch_size, 6, -1)
321
- ).chunk(6, dim=1)
322
- norm_hidden_states = self.norm1(hidden_states)
323
- norm_hidden_states = norm_hidden_states * (1 + scale_msa) + shift_msa
324
- norm_hidden_states = norm_hidden_states.squeeze(1)
325
- else:
326
- raise ValueError("Incorrect norm used")
327
-
328
- if self.pos_embed is not None:
329
- norm_hidden_states = self.pos_embed(norm_hidden_states)
330
-
331
- # 1. Retrieve lora scale.
332
- lora_scale = cross_attention_kwargs.get("scale", 1.0) if cross_attention_kwargs is not None else 1.0
333
-
334
- # 2. Prepare GLIGEN inputs
335
- cross_attention_kwargs = cross_attention_kwargs.copy() if cross_attention_kwargs is not None else {}
336
- gligen_kwargs = cross_attention_kwargs.pop("gligen", None)
337
-
338
- attn_output = self.attn1(
339
- norm_hidden_states,
340
- encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
341
- attention_mask=attention_mask,
342
- **cross_attention_kwargs,
343
- )
344
- if self.use_ada_layer_norm_zero:
345
- attn_output = gate_msa.unsqueeze(1) * attn_output
346
- elif self.use_ada_layer_norm_single:
347
- attn_output = gate_msa * attn_output
348
-
349
- hidden_states = attn_output + hidden_states
350
- if hidden_states.ndim == 4:
351
- hidden_states = hidden_states.squeeze(1)
352
-
353
- # 2.5 GLIGEN Control
354
- if gligen_kwargs is not None:
355
- hidden_states = self.fuser(hidden_states, gligen_kwargs["objs"])
356
-
357
- # 3. Cross-Attention
358
- if self.attn2 is not None:
359
- if self.use_ada_layer_norm:
360
- norm_hidden_states = self.norm2(hidden_states, timestep)
361
- elif self.use_ada_layer_norm_zero or self.use_layer_norm:
362
- norm_hidden_states = self.norm2(hidden_states)
363
- elif self.use_ada_layer_norm_single:
364
- # For PixArt norm2 isn't applied here:
365
- # https://github.com/PixArt-alpha/PixArt-alpha/blob/0f55e922376d8b797edd44d25d0e7464b260dcab/diffusion/model/nets/PixArtMS.py#L70C1-L76C103
366
- norm_hidden_states = hidden_states
367
- elif self.use_ada_layer_norm_continuous:
368
- norm_hidden_states = self.norm2(hidden_states, added_cond_kwargs["pooled_text_emb"])
369
- else:
370
- raise ValueError("Incorrect norm")
371
-
372
- if self.pos_embed is not None and self.use_ada_layer_norm_single is False:
373
- norm_hidden_states = self.pos_embed(norm_hidden_states)
374
-
375
- attn_output = self.attn2(
376
- norm_hidden_states,
377
- encoder_hidden_states=encoder_hidden_states,
378
- attention_mask=encoder_attention_mask,
379
- **cross_attention_kwargs,
380
- )
381
- hidden_states = attn_output + hidden_states
382
-
383
- # 4. Feed-forward
384
- if self.use_ada_layer_norm_continuous:
385
- norm_hidden_states = self.norm3(hidden_states, added_cond_kwargs["pooled_text_emb"])
386
- elif not self.use_ada_layer_norm_single:
387
- norm_hidden_states = self.norm3(hidden_states)
388
-
389
- if self.use_ada_layer_norm_zero:
390
- norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
391
-
392
- if self.use_ada_layer_norm_single:
393
- norm_hidden_states = self.norm2(hidden_states)
394
- norm_hidden_states = norm_hidden_states * (1 + scale_mlp) + shift_mlp
395
-
396
- if self._chunk_size is not None:
397
- # "feed_forward_chunk_size" can be used to save memory
398
- ff_output = _chunked_feed_forward(
399
- self.ff, norm_hidden_states, self._chunk_dim, self._chunk_size, lora_scale=lora_scale
400
- )
401
- else:
402
- ff_output = self.ff(norm_hidden_states, scale=lora_scale)
403
-
404
- if self.use_ada_layer_norm_zero:
405
- ff_output = gate_mlp.unsqueeze(1) * ff_output
406
- elif self.use_ada_layer_norm_single:
407
- ff_output = gate_mlp * ff_output
408
-
409
- hidden_states = ff_output + hidden_states
410
- if hidden_states.ndim == 4:
411
- hidden_states = hidden_states.squeeze(1)
412
-
413
- return hidden_states
414
-
415
-
416
- class CustomJointAttention(Attention):
417
- def set_use_memory_efficient_attention_xformers(
418
- self, use_memory_efficient_attention_xformers: bool, *args, **kwargs
419
- ):
420
- processor = XFormersJointAttnProcessor()
421
- self.set_processor(processor)
422
- # print("using xformers attention processor")
423
-
424
-
425
- class XFormersJointAttnProcessor:
426
- r"""
427
- Default processor for performing attention-related computations.
428
- """
429
-
430
- def __call__(
431
- self,
432
- attn: Attention,
433
- hidden_states,
434
- encoder_hidden_states=None,
435
- attention_mask=None,
436
- temb=None,
437
- num_tasks=2
438
- ):
439
-
440
- residual = hidden_states
441
-
442
- if attn.spatial_norm is not None:
443
- hidden_states = attn.spatial_norm(hidden_states, temb)
444
-
445
- input_ndim = hidden_states.ndim
446
-
447
- if input_ndim == 4:
448
- batch_size, channel, height, width = hidden_states.shape
449
- hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
450
-
451
- batch_size, sequence_length, _ = (
452
- hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
453
- )
454
- attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
455
-
456
- # from yuancheng; here attention_mask is None
457
- if attention_mask is not None:
458
- # expand our mask's singleton query_tokens dimension:
459
- # [batch*heads, 1, key_tokens] ->
460
- # [batch*heads, query_tokens, key_tokens]
461
- # so that it can be added as a bias onto the attention scores that xformers computes:
462
- # [batch*heads, query_tokens, key_tokens]
463
- # we do this explicitly because xformers doesn't broadcast the singleton dimension for us.
464
- _, query_tokens, _ = hidden_states.shape
465
- attention_mask = attention_mask.expand(-1, query_tokens, -1)
466
-
467
- if attn.group_norm is not None:
468
- hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
469
-
470
- query = attn.to_q(hidden_states)
471
-
472
- if encoder_hidden_states is None:
473
- encoder_hidden_states = hidden_states
474
- elif attn.norm_cross:
475
- encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
476
-
477
- key = attn.to_k(encoder_hidden_states)
478
- value = attn.to_v(encoder_hidden_states)
479
-
480
- assert num_tasks == 2 # only support two tasks now
481
-
482
- key_0, key_1 = torch.chunk(key, dim=0, chunks=2) # keys shape (b t) d c
483
- value_0, value_1 = torch.chunk(value, dim=0, chunks=2)
484
-
485
- # key = torch.cat([key_1, key_0], dim=0)
486
- # value = torch.cat([value_1, value_0], dim=0)
487
-
488
- key = torch.cat([key_0, key_1], dim=1) # (b t) 2d c
489
- value = torch.cat([value_0, value_1], dim=1) # (b t) 2d c
490
- key = torch.cat([key]*2, dim=0) # (2 b t) 2d c
491
- value = torch.cat([value]*2, dim=0) # (2 b t) 2d c
492
-
493
- query = attn.head_to_batch_dim(query).contiguous()
494
- key = attn.head_to_batch_dim(key).contiguous()
495
- value = attn.head_to_batch_dim(value).contiguous()
496
-
497
- hidden_states = xformers.ops.memory_efficient_attention(query, key, value, attn_bias=attention_mask)
498
- hidden_states = attn.batch_to_head_dim(hidden_states)
499
-
500
- # linear proj
501
- hidden_states = attn.to_out[0](hidden_states)
502
- # dropout
503
- hidden_states = attn.to_out[1](hidden_states)
504
-
505
- if input_ndim == 4:
506
- hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
507
-
508
- if attn.residual_connection:
509
- hidden_states = hidden_states + residual
510
-
511
- hidden_states = hidden_states / attn.rescale_output_factor
512
-
513
- return hidden_states
514
-
515
-
516
- @maybe_allow_in_graph
517
- class TemporalBasicTransformerBlock(nn.Module):
518
- r"""
519
- A basic Transformer block for video like data.
520
-
521
- Parameters:
522
- dim (`int`): The number of channels in the input and output.
523
- time_mix_inner_dim (`int`): The number of channels for temporal attention.
524
- num_attention_heads (`int`): The number of heads to use for multi-head attention.
525
- attention_head_dim (`int`): The number of channels in each head.
526
- cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention.
527
- """
528
-
529
- def __init__(
530
- self,
531
- dim: int,
532
- time_mix_inner_dim: int,
533
- num_attention_heads: int,
534
- attention_head_dim: int,
535
- cross_attention_dim: Optional[int] = None,
536
- ):
537
- super().__init__()
538
- self.is_res = dim == time_mix_inner_dim
539
-
540
- self.norm_in = nn.LayerNorm(dim)
541
-
542
- # Define 3 blocks. Each block has its own normalization layer.
543
- # 1. Self-Attn
544
- self.norm_in = nn.LayerNorm(dim)
545
- self.ff_in = FeedForward(
546
- dim,
547
- dim_out=time_mix_inner_dim,
548
- activation_fn="geglu",
549
- )
550
-
551
- self.norm1 = nn.LayerNorm(time_mix_inner_dim)
552
- self.attn1 = Attention(
553
- query_dim=time_mix_inner_dim,
554
- heads=num_attention_heads,
555
- dim_head=attention_head_dim,
556
- cross_attention_dim=None,
557
- )
558
-
559
- # 2. Cross-Attn
560
- if cross_attention_dim is not None:
561
- # We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
562
- # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
563
- # the second cross attention block.
564
- self.norm2 = nn.LayerNorm(time_mix_inner_dim)
565
- self.attn2 = Attention(
566
- query_dim=time_mix_inner_dim,
567
- cross_attention_dim=cross_attention_dim,
568
- heads=num_attention_heads,
569
- dim_head=attention_head_dim,
570
- ) # is self-attn if encoder_hidden_states is none
571
- else:
572
- self.norm2 = None
573
- self.attn2 = None
574
-
575
- # 3. Feed-forward
576
- self.norm3 = nn.LayerNorm(time_mix_inner_dim)
577
- self.ff = FeedForward(time_mix_inner_dim, activation_fn="geglu")
578
-
579
- # let chunk size default to None
580
- self._chunk_size = None
581
- self._chunk_dim = None
582
-
583
- def set_chunk_feed_forward(self, chunk_size: Optional[int], **kwargs):
584
- # Sets chunk feed-forward
585
- self._chunk_size = chunk_size
586
- # chunk dim should be hardcoded to 1 to have better speed vs. memory trade-off
587
- self._chunk_dim = 1
588
-
589
- def forward(
590
- self,
591
- hidden_states: torch.FloatTensor,
592
- num_frames: int,
593
- encoder_hidden_states: Optional[torch.FloatTensor] = None,
594
- ) -> torch.FloatTensor:
595
- # Notice that normalization is always applied before the real computation in the following blocks.
596
- # 0. Self-Attention
597
- batch_size = hidden_states.shape[0]
598
-
599
- batch_frames, seq_length, channels = hidden_states.shape
600
- batch_size = batch_frames // num_frames
601
-
602
- hidden_states = hidden_states[None, :].reshape(batch_size, num_frames, seq_length, channels)
603
- hidden_states = hidden_states.permute(0, 2, 1, 3)
604
- hidden_states = hidden_states.reshape(batch_size * seq_length, num_frames, channels)
605
-
606
- residual = hidden_states
607
- hidden_states = self.norm_in(hidden_states)
608
-
609
- if self._chunk_size is not None:
610
- hidden_states = _chunked_feed_forward(self.ff_in, hidden_states, self._chunk_dim, self._chunk_size)
611
- else:
612
- hidden_states = self.ff_in(hidden_states)
613
-
614
- if self.is_res:
615
- hidden_states = hidden_states + residual
616
-
617
- norm_hidden_states = self.norm1(hidden_states)
618
- attn_output = self.attn1(norm_hidden_states, encoder_hidden_states=None)
619
- hidden_states = attn_output + hidden_states
620
-
621
- # 3. Cross-Attention
622
- if self.attn2 is not None:
623
- norm_hidden_states = self.norm2(hidden_states)
624
- attn_output = self.attn2(norm_hidden_states, encoder_hidden_states=encoder_hidden_states)
625
- hidden_states = attn_output + hidden_states
626
-
627
- # 4. Feed-forward
628
- norm_hidden_states = self.norm3(hidden_states)
629
-
630
- if self._chunk_size is not None:
631
- ff_output = _chunked_feed_forward(self.ff, norm_hidden_states, self._chunk_dim, self._chunk_size)
632
- else:
633
- ff_output = self.ff(norm_hidden_states)
634
-
635
- if self.is_res:
636
- hidden_states = ff_output + hidden_states
637
- else:
638
- hidden_states = ff_output
639
-
640
- hidden_states = hidden_states[None, :].reshape(batch_size, seq_length, num_frames, channels)
641
- hidden_states = hidden_states.permute(0, 2, 1, 3)
642
- hidden_states = hidden_states.reshape(batch_size * num_frames, seq_length, channels)
643
-
644
- return hidden_states
645
-
646
-
647
- class SkipFFTransformerBlock(nn.Module):
648
- def __init__(
649
- self,
650
- dim: int,
651
- num_attention_heads: int,
652
- attention_head_dim: int,
653
- kv_input_dim: int,
654
- kv_input_dim_proj_use_bias: bool,
655
- dropout=0.0,
656
- cross_attention_dim: Optional[int] = None,
657
- attention_bias: bool = False,
658
- attention_out_bias: bool = True,
659
- ):
660
- super().__init__()
661
- if kv_input_dim != dim:
662
- self.kv_mapper = nn.Linear(kv_input_dim, dim, kv_input_dim_proj_use_bias)
663
- else:
664
- self.kv_mapper = None
665
-
666
- self.norm1 = RMSNorm(dim, 1e-06)
667
-
668
- self.attn1 = Attention(
669
- query_dim=dim,
670
- heads=num_attention_heads,
671
- dim_head=attention_head_dim,
672
- dropout=dropout,
673
- bias=attention_bias,
674
- cross_attention_dim=cross_attention_dim,
675
- out_bias=attention_out_bias,
676
- )
677
-
678
- self.norm2 = RMSNorm(dim, 1e-06)
679
-
680
- self.attn2 = Attention(
681
- query_dim=dim,
682
- cross_attention_dim=cross_attention_dim,
683
- heads=num_attention_heads,
684
- dim_head=attention_head_dim,
685
- dropout=dropout,
686
- bias=attention_bias,
687
- out_bias=attention_out_bias,
688
- )
689
-
690
- def forward(self, hidden_states, encoder_hidden_states, cross_attention_kwargs):
691
- cross_attention_kwargs = cross_attention_kwargs.copy() if cross_attention_kwargs is not None else {}
692
-
693
- if self.kv_mapper is not None:
694
- encoder_hidden_states = self.kv_mapper(F.silu(encoder_hidden_states))
695
-
696
- norm_hidden_states = self.norm1(hidden_states)
697
-
698
- attn_output = self.attn1(
699
- norm_hidden_states,
700
- encoder_hidden_states=encoder_hidden_states,
701
- **cross_attention_kwargs,
702
- )
703
-
704
- hidden_states = attn_output + hidden_states
705
-
706
- norm_hidden_states = self.norm2(hidden_states)
707
-
708
- attn_output = self.attn2(
709
- norm_hidden_states,
710
- encoder_hidden_states=encoder_hidden_states,
711
- **cross_attention_kwargs,
712
- )
713
-
714
- hidden_states = attn_output + hidden_states
715
-
716
- return hidden_states
717
-
718
-
719
- class FeedForward(nn.Module):
720
- r"""
721
- A feed-forward layer.
722
-
723
- Parameters:
724
- dim (`int`): The number of channels in the input.
725
- dim_out (`int`, *optional*): The number of channels in the output. If not given, defaults to `dim`.
726
- mult (`int`, *optional*, defaults to 4): The multiplier to use for the hidden dimension.
727
- dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
728
- activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
729
- final_dropout (`bool` *optional*, defaults to False): Apply a final dropout.
730
- bias (`bool`, defaults to True): Whether to use a bias in the linear layer.
731
- """
732
-
733
- def __init__(
734
- self,
735
- dim: int,
736
- dim_out: Optional[int] = None,
737
- mult: int = 4,
738
- dropout: float = 0.0,
739
- activation_fn: str = "geglu",
740
- final_dropout: bool = False,
741
- inner_dim=None,
742
- bias: bool = True,
743
- ):
744
- super().__init__()
745
- if inner_dim is None:
746
- inner_dim = int(dim * mult)
747
- dim_out = dim_out if dim_out is not None else dim
748
- linear_cls = LoRACompatibleLinear if not USE_PEFT_BACKEND else nn.Linear
749
-
750
- if activation_fn == "gelu":
751
- act_fn = GELU(dim, inner_dim, bias=bias)
752
- if activation_fn == "gelu-approximate":
753
- act_fn = GELU(dim, inner_dim, approximate="tanh", bias=bias)
754
- elif activation_fn == "geglu":
755
- act_fn = GEGLU(dim, inner_dim, bias=bias)
756
- elif activation_fn == "geglu-approximate":
757
- act_fn = ApproximateGELU(dim, inner_dim, bias=bias)
758
-
759
- self.net = nn.ModuleList([])
760
- # project in
761
- self.net.append(act_fn)
762
- # project dropout
763
- self.net.append(nn.Dropout(dropout))
764
- # project out
765
- self.net.append(linear_cls(inner_dim, dim_out, bias=bias))
766
- # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
767
- if final_dropout:
768
- self.net.append(nn.Dropout(dropout))
769
-
770
- def forward(self, hidden_states: torch.Tensor, scale: float = 1.0) -> torch.Tensor:
771
- compatible_cls = (GEGLU,) if USE_PEFT_BACKEND else (GEGLU, LoRACompatibleLinear)
772
- for module in self.net:
773
- if isinstance(module, compatible_cls):
774
- hidden_states = module(hidden_states, scale)
775
- else:
776
- hidden_states = module(hidden_states)
777
- return hidden_states
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
models/depth_normal_pipeline_clip.py DELETED
@@ -1,368 +0,0 @@
1
- # A reimplemented version in public environments by Xiao Fu and Mu Hu
2
-
3
- from typing import Any, Dict, Union
4
-
5
- import torch
6
- from torch.utils.data import DataLoader, TensorDataset
7
- import numpy as np
8
- from tqdm.auto import tqdm
9
- from PIL import Image
10
- from diffusers import (
11
- DiffusionPipeline,
12
- DDIMScheduler,
13
- AutoencoderKL,
14
- )
15
- from models.unet_2d_condition import UNet2DConditionModel
16
- from diffusers.utils import BaseOutput
17
- from transformers import CLIPTextModel, CLIPTokenizer
18
- from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
19
- import torchvision.transforms.functional as TF
20
- from torchvision.transforms import InterpolationMode
21
-
22
- from utils.image_util import resize_max_res,chw2hwc,colorize_depth_maps
23
- from utils.colormap import kitti_colormap
24
- from utils.depth_ensemble import ensemble_depths
25
- from utils.normal_ensemble import ensemble_normals
26
- from utils.batch_size import find_batch_size
27
- import cv2
28
-
29
- class DepthNormalPipelineOutput(BaseOutput):
30
- """
31
- Output class for Marigold monocular depth prediction pipeline.
32
-
33
- Args:
34
- depth_np (`np.ndarray`):
35
- Predicted depth map, with depth values in the range of [0, 1].
36
- depth_colored (`PIL.Image.Image`):
37
- Colorized depth map, with the shape of [3, H, W] and values in [0, 1].
38
- normal_np (`np.ndarray`):
39
- Predicted normal map, with depth values in the range of [0, 1].
40
- normal_colored (`PIL.Image.Image`):
41
- Colorized normal map, with the shape of [3, H, W] and values in [0, 1].
42
- uncertainty (`None` or `np.ndarray`):
43
- Uncalibrated uncertainty(MAD, median absolute deviation) coming from ensembling.
44
- """
45
- depth_np: np.ndarray
46
- depth_colored: Image.Image
47
- normal_np: np.ndarray
48
- normal_colored: Image.Image
49
- uncertainty: Union[None, np.ndarray]
50
-
51
- class DepthNormalEstimationPipeline(DiffusionPipeline):
52
- # two hyper-parameters
53
- latent_scale_factor = 0.18215
54
-
55
- def __init__(self,
56
- unet:UNet2DConditionModel,
57
- vae:AutoencoderKL,
58
- scheduler:DDIMScheduler,
59
- image_encoder:CLIPVisionModelWithProjection,
60
- feature_extractor:CLIPImageProcessor,
61
- ):
62
- super().__init__()
63
-
64
- self.register_modules(
65
- unet=unet,
66
- vae=vae,
67
- scheduler=scheduler,
68
- image_encoder=image_encoder,
69
- feature_extractor=feature_extractor,
70
- )
71
- self.img_embed = None
72
-
73
- @torch.no_grad()
74
- def __call__(self,
75
- input_image:Image,
76
- denosing_steps: int = 10,
77
- ensemble_size: int = 10,
78
- processing_res: int = 768,
79
- match_input_res:bool =True,
80
- batch_size:int = 0,
81
- domain: str = "indoor",
82
- color_map: str="Spectral",
83
- show_progress_bar:bool = True,
84
- ensemble_kwargs: Dict = None,
85
- ) -> DepthNormalPipelineOutput:
86
-
87
- # inherit from thea Diffusion Pipeline
88
- device = self.device
89
- input_size = input_image.size
90
-
91
- # adjust the input resolution.
92
- if not match_input_res:
93
- assert (
94
- processing_res is not None
95
- )," Value Error: `resize_output_back` is only valid with "
96
-
97
- assert processing_res >=0
98
- assert denosing_steps >=1
99
- assert ensemble_size >=1
100
-
101
- # --------------- Image Processing ------------------------
102
- # Resize image
103
- if processing_res >0:
104
- input_image = resize_max_res(
105
- input_image, max_edge_resolution=processing_res
106
- )
107
-
108
- # Convert the image to RGB, to 1. reomve the alpha channel.
109
- input_image = input_image.convert("RGB")
110
- image = np.array(input_image)
111
-
112
- # Normalize RGB Values.
113
- rgb = np.transpose(image,(2,0,1))
114
- rgb_norm = rgb / 255.0 * 2.0 - 1.0 # [0, 255] -> [-1, 1]
115
- rgb_norm = torch.from_numpy(rgb_norm).to(self.dtype)
116
- rgb_norm = rgb_norm.to(device)
117
-
118
- assert rgb_norm.min() >= -1.0 and rgb_norm.max() <= 1.0
119
-
120
- # ----------------- predicting depth -----------------
121
- duplicated_rgb = torch.stack([rgb_norm] * ensemble_size)
122
- single_rgb_dataset = TensorDataset(duplicated_rgb)
123
-
124
- # find the batch size
125
- if batch_size>0:
126
- _bs = batch_size
127
- else:
128
- _bs = 1
129
-
130
- single_rgb_loader = DataLoader(single_rgb_dataset, batch_size=_bs, shuffle=False)
131
-
132
- # predicted the depth
133
- depth_pred_ls = []
134
- normal_pred_ls = []
135
-
136
- if show_progress_bar:
137
- iterable_bar = tqdm(
138
- single_rgb_loader, desc=" " * 2 + "Inference batches", leave=False
139
- )
140
- else:
141
- iterable_bar = single_rgb_loader
142
-
143
- for batch in iterable_bar:
144
- (batched_image, )= batch # here the image is still around 0-1
145
-
146
- depth_pred_raw, normal_pred_raw = self.single_infer(
147
- input_rgb=batched_image,
148
- num_inference_steps=denosing_steps,
149
- domain=domain,
150
- show_pbar=show_progress_bar,
151
- )
152
- depth_pred_ls.append(depth_pred_raw.detach().clone())
153
- normal_pred_ls.append(normal_pred_raw.detach().clone())
154
-
155
- depth_preds = torch.concat(depth_pred_ls, axis=0).squeeze() #(10,224,768)
156
- normal_preds = torch.concat(normal_pred_ls, axis=0).squeeze()
157
- torch.cuda.empty_cache() # clear vram cache for ensembling
158
-
159
- # ----------------- Test-time ensembling -----------------
160
- if ensemble_size > 1:
161
- depth_pred, pred_uncert = ensemble_depths(
162
- depth_preds, **(ensemble_kwargs or {})
163
- )
164
- normal_pred = ensemble_normals(normal_preds)
165
- else:
166
- depth_pred = depth_preds
167
- normal_pred = normal_preds
168
- pred_uncert = None
169
-
170
- # ----------------- Post processing -----------------
171
- # Scale prediction to [0, 1]
172
- min_d = torch.min(depth_pred)
173
- max_d = torch.max(depth_pred)
174
- depth_pred = (depth_pred - min_d) / (max_d - min_d)
175
-
176
- # Convert to numpy
177
- depth_pred = depth_pred.cpu().numpy().astype(np.float32)
178
- normal_pred = normal_pred.cpu().numpy().astype(np.float32)
179
-
180
- # Resize back to original resolution
181
- if match_input_res:
182
- pred_img = Image.fromarray(depth_pred)
183
- pred_img = pred_img.resize(input_size)
184
- depth_pred = np.asarray(pred_img)
185
- normal_pred = cv2.resize(chw2hwc(normal_pred), input_size, interpolation = cv2.INTER_NEAREST)
186
-
187
- # Clip output range: current size is the original size
188
- depth_pred = depth_pred.clip(0, 1)
189
- normal_pred = normal_pred.clip(-1, 1)
190
-
191
- # Colorize
192
- depth_colored = colorize_depth_maps(
193
- depth_pred, 0, 1, cmap=color_map
194
- ).squeeze() # [3, H, W], value in (0, 1)
195
- depth_colored = (depth_colored * 255).astype(np.uint8)
196
- depth_colored_hwc = chw2hwc(depth_colored)
197
- depth_colored_img = Image.fromarray(depth_colored_hwc)
198
-
199
- normal_colored = ((normal_pred + 1)/2 * 255).astype(np.uint8)
200
- normal_colored_img = Image.fromarray(normal_colored)
201
-
202
- return DepthNormalPipelineOutput(
203
- depth_np = depth_pred,
204
- depth_colored = depth_colored_img,
205
- normal_np = normal_pred,
206
- normal_colored = normal_colored_img,
207
- uncertainty=pred_uncert,
208
- )
209
-
210
- def __encode_img_embed(self, rgb):
211
- """
212
- Encode clip embeddings for img
213
- """
214
- clip_image_mean = torch.as_tensor(self.feature_extractor.image_mean)[:,None,None].to(device=self.device, dtype=self.dtype)
215
- clip_image_std = torch.as_tensor(self.feature_extractor.image_std)[:,None,None].to(device=self.device, dtype=self.dtype)
216
-
217
- img_in_proc = TF.resize((rgb +1)/2,
218
- (self.feature_extractor.crop_size['height'], self.feature_extractor.crop_size['width']),
219
- interpolation=InterpolationMode.BICUBIC,
220
- antialias=True
221
- )
222
- # do the normalization in float32 to preserve precision
223
- img_in_proc = ((img_in_proc.float() - clip_image_mean) / clip_image_std).to(self.dtype)
224
- img_embed = self.image_encoder(img_in_proc).image_embeds.unsqueeze(1).to(self.dtype)
225
-
226
- self.img_embed = img_embed
227
-
228
-
229
- @torch.no_grad()
230
- def single_infer(self,input_rgb:torch.Tensor,
231
- num_inference_steps:int,
232
- domain:str,
233
- show_pbar:bool,):
234
-
235
- device = input_rgb.device
236
-
237
- # Set timesteps: inherit from the diffuison pipeline
238
- self.scheduler.set_timesteps(num_inference_steps, device=device) # here the numbers of the steps is only 10.
239
- timesteps = self.scheduler.timesteps # [T]
240
-
241
- # encode image
242
- rgb_latent = self.encode_RGB(input_rgb)
243
-
244
- # Initial depth map (Guassian noise)
245
- geo_latent = torch.randn(rgb_latent.shape, device=device, dtype=self.dtype).repeat(2,1,1,1)
246
- rgb_latent = rgb_latent.repeat(2,1,1,1)
247
-
248
- # Batched img embedding
249
- if self.img_embed is None:
250
- self.__encode_img_embed(input_rgb)
251
-
252
- batch_img_embed = self.img_embed.repeat(
253
- (rgb_latent.shape[0], 1, 1)
254
- ) # [B, 1, 768]
255
-
256
- # hybrid hierarchical switcher
257
- geo_class = torch.tensor([[0., 1.], [1, 0]], device=device, dtype=self.dtype)
258
- geo_embedding = torch.cat([torch.sin(geo_class), torch.cos(geo_class)], dim=-1)
259
-
260
- if domain == "indoor":
261
- domain_class = torch.tensor([[1., 0., 0]], device=device, dtype=self.dtype).repeat(2,1)
262
- elif domain == "outdoor":
263
- domain_class = torch.tensor([[0., 1., 0]], device=device, dtype=self.dtype).repeat(2,1)
264
- elif domain == "object":
265
- domain_class = torch.tensor([[0., 0., 1]], device=device, dtype=self.dtype).repeat(2,1)
266
- domain_embedding = torch.cat([torch.sin(domain_class), torch.cos(domain_class)], dim=-1)
267
-
268
- class_embedding = torch.cat((geo_embedding, domain_embedding), dim=-1)
269
-
270
- # Denoising loop
271
- if show_pbar:
272
- iterable = tqdm(
273
- enumerate(timesteps),
274
- total=len(timesteps),
275
- leave=False,
276
- desc=" " * 4 + "Diffusion denoising",
277
- )
278
- else:
279
- iterable = enumerate(timesteps)
280
-
281
- for i, t in iterable:
282
- unet_input = torch.cat([rgb_latent, geo_latent], dim=1)
283
-
284
- # predict the noise residual
285
- noise_pred = self.unet(
286
- unet_input, t.repeat(2), encoder_hidden_states=batch_img_embed, class_labels=class_embedding
287
- ).sample # [B, 4, h, w]
288
-
289
- # compute the previous noisy sample x_t -> x_t-1
290
- geo_latent = self.scheduler.step(noise_pred, t, geo_latent).prev_sample
291
-
292
- geo_latent = geo_latent
293
- torch.cuda.empty_cache()
294
-
295
- depth = self.decode_depth(geo_latent[0][None])
296
- depth = torch.clip(depth, -1.0, 1.0)
297
- depth = (depth + 1.0) / 2.0
298
-
299
- normal = self.decode_normal(geo_latent[1][None])
300
- normal /= (torch.norm(normal, p=2, dim=1, keepdim=True)+1e-5)
301
- normal *= -1.
302
-
303
- return depth, normal
304
-
305
-
306
- def encode_RGB(self, rgb_in: torch.Tensor) -> torch.Tensor:
307
- """
308
- Encode RGB image into latent.
309
-
310
- Args:
311
- rgb_in (`torch.Tensor`):
312
- Input RGB image to be encoded.
313
-
314
- Returns:
315
- `torch.Tensor`: Image latent.
316
- """
317
-
318
- # encode
319
- h = self.vae.encoder(rgb_in)
320
-
321
- moments = self.vae.quant_conv(h)
322
- mean, logvar = torch.chunk(moments, 2, dim=1)
323
- # scale latent
324
- rgb_latent = mean * self.latent_scale_factor
325
-
326
- return rgb_latent
327
-
328
- def decode_depth(self, depth_latent: torch.Tensor) -> torch.Tensor:
329
- """
330
- Decode depth latent into depth map.
331
-
332
- Args:
333
- depth_latent (`torch.Tensor`):
334
- Depth latent to be decoded.
335
-
336
- Returns:
337
- `torch.Tensor`: Decoded depth map.
338
- """
339
-
340
- # scale latent
341
- depth_latent = depth_latent / self.latent_scale_factor
342
- # decode
343
- z = self.vae.post_quant_conv(depth_latent)
344
- stacked = self.vae.decoder(z)
345
- # mean of output channels
346
- depth_mean = stacked.mean(dim=1, keepdim=True)
347
- return depth_mean
348
-
349
- def decode_normal(self, normal_latent: torch.Tensor) -> torch.Tensor:
350
- """
351
- Decode normal latent into normal map.
352
-
353
- Args:
354
- normal_latent (`torch.Tensor`):
355
- Depth latent to be decoded.
356
-
357
- Returns:
358
- `torch.Tensor`: Decoded normal map.
359
- """
360
-
361
- # scale latent
362
- normal_latent = normal_latent / self.latent_scale_factor
363
- # decode
364
- z = self.vae.post_quant_conv(normal_latent)
365
- normal = self.vae.decoder(z)
366
- return normal
367
-
368
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
models/depth_normal_pipeline_clip_cfg.py DELETED
@@ -1,373 +0,0 @@
1
- # A reimplemented version in public environments by Xiao Fu and Mu Hu
2
-
3
- from typing import Any, Dict, Union
4
-
5
- import torch
6
- from torch.utils.data import DataLoader, TensorDataset
7
- import numpy as np
8
- from tqdm.auto import tqdm
9
- from PIL import Image
10
- from diffusers import (
11
- DiffusionPipeline,
12
- DDIMScheduler,
13
- AutoencoderKL,
14
- )
15
- from models.unet_2d_condition import UNet2DConditionModel
16
- from diffusers.utils import BaseOutput
17
- from transformers import CLIPTextModel, CLIPTokenizer
18
- from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
19
- import torchvision.transforms.functional as TF
20
- from torchvision.transforms import InterpolationMode
21
-
22
- from utils.image_util import resize_max_res,chw2hwc,colorize_depth_maps
23
- from utils.colormap import kitti_colormap
24
- from utils.depth_ensemble import ensemble_depths
25
- from utils.normal_ensemble import ensemble_normals
26
- from utils.batch_size import find_batch_size
27
- import cv2
28
-
29
- class DepthNormalPipelineOutput(BaseOutput):
30
- """
31
- Output class for Marigold monocular depth prediction pipeline.
32
-
33
- Args:
34
- depth_np (`np.ndarray`):
35
- Predicted depth map, with depth values in the range of [0, 1].
36
- depth_colored (`PIL.Image.Image`):
37
- Colorized depth map, with the shape of [3, H, W] and values in [0, 1].
38
- normal_np (`np.ndarray`):
39
- Predicted normal map, with depth values in the range of [0, 1].
40
- normal_colored (`PIL.Image.Image`):
41
- Colorized normal map, with the shape of [3, H, W] and values in [0, 1].
42
- uncertainty (`None` or `np.ndarray`):
43
- Uncalibrated uncertainty(MAD, median absolute deviation) coming from ensembling.
44
- """
45
- depth_np: np.ndarray
46
- depth_colored: Image.Image
47
- normal_np: np.ndarray
48
- normal_colored: Image.Image
49
- uncertainty: Union[None, np.ndarray]
50
-
51
- class DepthNormalEstimationPipeline(DiffusionPipeline):
52
- # two hyper-parameters
53
- latent_scale_factor = 0.18215
54
-
55
- def __init__(self,
56
- unet:UNet2DConditionModel,
57
- vae:AutoencoderKL,
58
- scheduler:DDIMScheduler,
59
- image_encoder:CLIPVisionModelWithProjection,
60
- feature_extractor:CLIPImageProcessor,
61
- ):
62
- super().__init__()
63
-
64
- self.register_modules(
65
- unet=unet,
66
- vae=vae,
67
- scheduler=scheduler,
68
- image_encoder=image_encoder,
69
- feature_extractor=feature_extractor,
70
- )
71
- self.img_embed = None
72
-
73
- @torch.no_grad()
74
- def __call__(self,
75
- input_image:Image,
76
- denosing_steps: int = 10,
77
- ensemble_size: int = 10,
78
- processing_res: int = 768,
79
- match_input_res:bool =True,
80
- batch_size:int = 0,
81
- guidance_scale:int = 1,
82
- domain: str = "indoor",
83
- color_map: str="Spectral",
84
- show_progress_bar:bool = True,
85
- ensemble_kwargs: Dict = None,
86
- ) -> DepthNormalPipelineOutput:
87
-
88
- # inherit from thea Diffusion Pipeline
89
- device = self.device
90
- input_size = input_image.size
91
-
92
- # adjust the input resolution.
93
- if not match_input_res:
94
- assert (
95
- processing_res is not None
96
- )," Value Error: `resize_output_back` is only valid with "
97
-
98
- assert processing_res >=0
99
- assert denosing_steps >=1
100
- assert ensemble_size >=1
101
-
102
- # --------------- Image Processing ------------------------
103
- # Resize image
104
- if processing_res >0:
105
- input_image = resize_max_res(
106
- input_image, max_edge_resolution=processing_res
107
- )
108
-
109
- # Convert the image to RGB, to 1. reomve the alpha channel.
110
- input_image = input_image.convert("RGB")
111
- image = np.array(input_image)
112
-
113
- # Normalize RGB Values.
114
- rgb = np.transpose(image,(2,0,1))
115
- rgb_norm = rgb / 255.0 * 2.0 - 1.0 # [0, 255] -> [-1, 1]
116
- rgb_norm = torch.from_numpy(rgb_norm).to(self.dtype)
117
- rgb_norm = rgb_norm.to(device)
118
-
119
- assert rgb_norm.min() >= -1.0 and rgb_norm.max() <= 1.0
120
-
121
- # ----------------- predicting depth -----------------
122
- duplicated_rgb = torch.stack([rgb_norm] * ensemble_size)
123
- single_rgb_dataset = TensorDataset(duplicated_rgb)
124
-
125
- # find the batch size
126
- if batch_size>0:
127
- _bs = batch_size
128
- else:
129
- _bs = 1
130
-
131
- single_rgb_loader = DataLoader(single_rgb_dataset, batch_size=_bs, shuffle=False)
132
-
133
- # predicted the depth
134
- depth_pred_ls = []
135
- normal_pred_ls = []
136
-
137
- if show_progress_bar:
138
- iterable_bar = tqdm(
139
- single_rgb_loader, desc=" " * 2 + "Inference batches", leave=False
140
- )
141
- else:
142
- iterable_bar = single_rgb_loader
143
-
144
- for batch in iterable_bar:
145
- (batched_image, )= batch # here the image is still around 0-1
146
-
147
- depth_pred_raw, normal_pred_raw = self.single_infer(
148
- input_rgb=batched_image,
149
- num_inference_steps=denosing_steps,
150
- guidance_scale=guidance_scale,
151
- domain=domain,
152
- show_pbar=show_progress_bar,
153
- )
154
- depth_pred_ls.append(depth_pred_raw.detach().clone())
155
- normal_pred_ls.append(normal_pred_raw.detach().clone())
156
-
157
- depth_preds = torch.concat(depth_pred_ls, axis=0).squeeze()
158
- normal_preds = torch.concat(normal_pred_ls, axis=0).squeeze()
159
- torch.cuda.empty_cache() # clear vram cache for ensembling
160
-
161
- # ----------------- Test-time ensembling -----------------
162
- if ensemble_size > 1:
163
- depth_pred, pred_uncert = ensemble_depths(
164
- depth_preds, **(ensemble_kwargs or {})
165
- )
166
- normal_pred = ensemble_normals(normal_preds)
167
- else:
168
- depth_pred = depth_preds
169
- normal_pred = normal_preds
170
- pred_uncert = None
171
-
172
- # ----------------- Post processing -----------------
173
- # Scale prediction to [0, 1]
174
- min_d = torch.min(depth_pred)
175
- max_d = torch.max(depth_pred)
176
- depth_pred = (depth_pred - min_d) / (max_d - min_d)
177
-
178
- # Convert to numpy
179
- depth_pred = depth_pred.cpu().numpy().astype(np.float32)
180
- normal_pred = normal_pred.cpu().numpy().astype(np.float32)
181
-
182
- # Resize back to original resolution
183
- if match_input_res:
184
- pred_img = Image.fromarray(depth_pred)
185
- pred_img = pred_img.resize(input_size)
186
- depth_pred = np.asarray(pred_img)
187
- normal_pred = cv2.resize(chw2hwc(normal_pred), input_size, interpolation = cv2.INTER_NEAREST)
188
-
189
- # Clip output range: current size is the original size
190
- depth_pred = depth_pred.clip(0, 1)
191
- normal_pred = normal_pred.clip(-1, 1)
192
-
193
- # Colorize
194
- depth_colored = colorize_depth_maps(
195
- depth_pred, 0, 1, cmap=color_map
196
- ).squeeze() # [3, H, W], value in (0, 1)
197
- depth_colored = (depth_colored * 255).astype(np.uint8)
198
- depth_colored_hwc = chw2hwc(depth_colored)
199
- depth_colored_img = Image.fromarray(depth_colored_hwc)
200
-
201
- normal_colored = ((normal_pred + 1)/2 * 255).astype(np.uint8)
202
- normal_colored_img = Image.fromarray(normal_colored)
203
-
204
- return DepthNormalPipelineOutput(
205
- depth_np = depth_pred,
206
- depth_colored = depth_colored_img,
207
- normal_np = normal_pred,
208
- normal_colored = normal_colored_img,
209
- uncertainty=pred_uncert,
210
- )
211
-
212
- def __encode_img_embed(self, rgb):
213
- """
214
- Encode clip embeddings for img
215
- """
216
- clip_image_mean = torch.as_tensor(self.feature_extractor.image_mean)[:,None,None].to(device=self.device, dtype=self.dtype)
217
- clip_image_std = torch.as_tensor(self.feature_extractor.image_std)[:,None,None].to(device=self.device, dtype=self.dtype)
218
-
219
- img_in_proc = TF.resize((rgb +1)/2,
220
- (self.feature_extractor.crop_size['height'], self.feature_extractor.crop_size['width']),
221
- interpolation=InterpolationMode.BICUBIC,
222
- antialias=True
223
- )
224
- # do the normalization in float32 to preserve precision
225
- img_in_proc = ((img_in_proc.float() - clip_image_mean) / clip_image_std).to(self.dtype)
226
- img_embed = self.image_encoder(img_in_proc).image_embeds.unsqueeze(1).to(self.dtype)
227
-
228
- self.img_embed = img_embed
229
-
230
-
231
- @torch.no_grad()
232
- def single_infer(self,input_rgb:torch.Tensor,
233
- num_inference_steps:int,
234
- guidance_scale:int,
235
- domain:str,
236
- show_pbar:bool,):
237
-
238
- device = input_rgb.device
239
-
240
- # Set timesteps: inherit from the diffuison pipeline
241
- self.scheduler.set_timesteps(num_inference_steps, device=device) # here the numbers of the steps is only 10.
242
- timesteps = self.scheduler.timesteps # [T]
243
-
244
- # encode image
245
- rgb_latent = self.encode_RGB(input_rgb)
246
-
247
- # Initial depth map (Guassian noise)
248
- geo_latent = torch.randn(rgb_latent.shape, device=device, dtype=self.dtype).repeat(2,1,1,1)
249
- rgb_latent = rgb_latent.repeat(2,1,1,1)
250
-
251
- # Batched img embedding
252
- if self.img_embed is None:
253
- self.__encode_img_embed(input_rgb)
254
-
255
- batch_img_embed = self.img_embed.repeat(
256
- (rgb_latent.shape[0], 1, 1)
257
- ) # [B, 1, 768]
258
-
259
- batch_img_embed = torch.cat((torch.zeros_like(batch_img_embed), batch_img_embed), dim=0)
260
- rgb_latent = torch.cat((rgb_latent, rgb_latent), dim=0)
261
-
262
- # hybrid switcher
263
- geo_class = torch.tensor([[0., 1.], [1, 0]], device=device, dtype=self.dtype)
264
- geo_embedding = torch.cat([torch.sin(geo_class), torch.cos(geo_class)], dim=-1)
265
-
266
- if domain == "indoor":
267
- domain_class = torch.tensor([[1., 0., 0]], device=device, dtype=self.dtype).repeat(2,1)
268
- elif domain == "outdoor":
269
- domain_class = torch.tensor([[0., 1., 0]], device=device, dtype=self.dtype).repeat(2,1)
270
- elif domain == "object":
271
- domain_class = torch.tensor([[0., 0., 1]], device=device, dtype=self.dtype).repeat(2,1)
272
- domain_embedding = torch.cat([torch.sin(domain_class), torch.cos(domain_class)], dim=-1)
273
-
274
- class_embedding = torch.cat((geo_embedding, domain_embedding), dim=-1)
275
-
276
- # Denoising loop
277
- if show_pbar:
278
- iterable = tqdm(
279
- enumerate(timesteps),
280
- total=len(timesteps),
281
- leave=False,
282
- desc=" " * 4 + "Diffusion denoising",
283
- )
284
- else:
285
- iterable = enumerate(timesteps)
286
-
287
- for i, t in iterable:
288
- unet_input = torch.cat((rgb_latent, geo_latent.repeat(2,1,1,1)), dim=1)
289
- # predict the noise residual
290
- noise_pred = self.unet(unet_input, t.repeat(4), encoder_hidden_states=batch_img_embed, class_labels=class_embedding.repeat(2,1)).sample
291
- noise_pred_uncond, noise_pred_cond = noise_pred.chunk(2)
292
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_cond - noise_pred_uncond)
293
-
294
- # compute the previous noisy sample x_t -> x_t-1
295
- geo_latent = self.scheduler.step(noise_pred, t, geo_latent).prev_sample
296
-
297
- geo_latent = geo_latent
298
- torch.cuda.empty_cache()
299
-
300
- depth = self.decode_depth(geo_latent[0][None])
301
- depth = torch.clip(depth, -1.0, 1.0)
302
- depth = (depth + 1.0) / 2.0
303
-
304
- normal = self.decode_normal(geo_latent[1][None])
305
- normal /= (torch.norm(normal, p=2, dim=1, keepdim=True)+1e-5)
306
- normal *= -1.
307
-
308
- return depth, normal
309
-
310
-
311
- def encode_RGB(self, rgb_in: torch.Tensor) -> torch.Tensor:
312
- """
313
- Encode RGB image into latent.
314
-
315
- Args:
316
- rgb_in (`torch.Tensor`):
317
- Input RGB image to be encoded.
318
-
319
- Returns:
320
- `torch.Tensor`: Image latent.
321
- """
322
-
323
- # encode
324
- h = self.vae.encoder(rgb_in)
325
-
326
- moments = self.vae.quant_conv(h)
327
- mean, logvar = torch.chunk(moments, 2, dim=1)
328
- # scale latent
329
- rgb_latent = mean * self.latent_scale_factor
330
-
331
- return rgb_latent
332
-
333
- def decode_depth(self, depth_latent: torch.Tensor) -> torch.Tensor:
334
- """
335
- Decode depth latent into depth map.
336
-
337
- Args:
338
- depth_latent (`torch.Tensor`):
339
- Depth latent to be decoded.
340
-
341
- Returns:
342
- `torch.Tensor`: Decoded depth map.
343
- """
344
-
345
- # scale latent
346
- depth_latent = depth_latent / self.latent_scale_factor
347
- # decode
348
- z = self.vae.post_quant_conv(depth_latent)
349
- stacked = self.vae.decoder(z)
350
- # mean of output channels
351
- depth_mean = stacked.mean(dim=1, keepdim=True)
352
- return depth_mean
353
-
354
- def decode_normal(self, normal_latent: torch.Tensor) -> torch.Tensor:
355
- """
356
- Decode normal latent into normal map.
357
-
358
- Args:
359
- normal_latent (`torch.Tensor`):
360
- Depth latent to be decoded.
361
-
362
- Returns:
363
- `torch.Tensor`: Decoded normal map.
364
- """
365
-
366
- # scale latent
367
- normal_latent = normal_latent / self.latent_scale_factor
368
- # decode
369
- z = self.vae.post_quant_conv(normal_latent)
370
- normal = self.vae.decoder(z)
371
- return normal
372
-
373
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
models/transformer_2d.py DELETED
@@ -1,463 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- # Some modifications are reimplemented in public environments by Xiao Fu and Mu Hu
16
-
17
- from dataclasses import dataclass
18
- from typing import Any, Dict, Optional
19
-
20
- import torch
21
- import torch.nn.functional as F
22
- from torch import nn
23
-
24
- from diffusers.configuration_utils import ConfigMixin, register_to_config
25
- from diffusers.models.embeddings import ImagePositionalEmbeddings
26
- from diffusers.utils import USE_PEFT_BACKEND, BaseOutput, deprecate, is_torch_version
27
- from models.attention import BasicTransformerBlock
28
- from diffusers.models.embeddings import PatchEmbed, PixArtAlphaTextProjection
29
- from diffusers.models.lora import LoRACompatibleConv, LoRACompatibleLinear
30
- from diffusers.models.modeling_utils import ModelMixin
31
- from diffusers.models.normalization import AdaLayerNormSingle
32
-
33
-
34
- @dataclass
35
- class Transformer2DModelOutput(BaseOutput):
36
- """
37
- The output of [`Transformer2DModel`].
38
-
39
- Args:
40
- sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` or `(batch size, num_vector_embeds - 1, num_latent_pixels)` if [`Transformer2DModel`] is discrete):
41
- The hidden states output conditioned on the `encoder_hidden_states` input. If discrete, returns probability
42
- distributions for the unnoised latent pixels.
43
- """
44
-
45
- sample: torch.FloatTensor
46
-
47
-
48
- class Transformer2DModel(ModelMixin, ConfigMixin):
49
- """
50
- A 2D Transformer model for image-like data.
51
-
52
- Parameters:
53
- num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention.
54
- attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head.
55
- in_channels (`int`, *optional*):
56
- The number of channels in the input and output (specify if the input is **continuous**).
57
- num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use.
58
- dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
59
- cross_attention_dim (`int`, *optional*): The number of `encoder_hidden_states` dimensions to use.
60
- sample_size (`int`, *optional*): The width of the latent images (specify if the input is **discrete**).
61
- This is fixed during training since it is used to learn a number of position embeddings.
62
- num_vector_embeds (`int`, *optional*):
63
- The number of classes of the vector embeddings of the latent pixels (specify if the input is **discrete**).
64
- Includes the class for the masked latent pixel.
65
- activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to use in feed-forward.
66
- num_embeds_ada_norm ( `int`, *optional*):
67
- The number of diffusion steps used during training. Pass if at least one of the norm_layers is
68
- `AdaLayerNorm`. This is fixed during training since it is used to learn a number of embeddings that are
69
- added to the hidden states.
70
-
71
- During inference, you can denoise for up to but not more steps than `num_embeds_ada_norm`.
72
- attention_bias (`bool`, *optional*):
73
- Configure if the `TransformerBlocks` attention should contain a bias parameter.
74
- """
75
-
76
- _supports_gradient_checkpointing = True
77
-
78
- @register_to_config
79
- def __init__(
80
- self,
81
- num_attention_heads: int = 16,
82
- attention_head_dim: int = 88,
83
- in_channels: Optional[int] = None,
84
- out_channels: Optional[int] = None,
85
- num_layers: int = 1,
86
- dropout: float = 0.0,
87
- norm_num_groups: int = 32,
88
- cross_attention_dim: Optional[int] = None,
89
- attention_bias: bool = False,
90
- sample_size: Optional[int] = None,
91
- num_vector_embeds: Optional[int] = None,
92
- patch_size: Optional[int] = None,
93
- activation_fn: str = "geglu",
94
- num_embeds_ada_norm: Optional[int] = None,
95
- use_linear_projection: bool = False,
96
- only_cross_attention: bool = False,
97
- double_self_attention: bool = False,
98
- upcast_attention: bool = False,
99
- norm_type: str = "layer_norm",
100
- norm_elementwise_affine: bool = True,
101
- norm_eps: float = 1e-5,
102
- attention_type: str = "default",
103
- caption_channels: int = None,
104
- ):
105
- super().__init__()
106
- self.use_linear_projection = use_linear_projection
107
- self.num_attention_heads = num_attention_heads
108
- self.attention_head_dim = attention_head_dim
109
- inner_dim = num_attention_heads * attention_head_dim
110
-
111
- conv_cls = nn.Conv2d if USE_PEFT_BACKEND else LoRACompatibleConv
112
- linear_cls = nn.Linear if USE_PEFT_BACKEND else LoRACompatibleLinear
113
-
114
- # 1. Transformer2DModel can process both standard continuous images of shape `(batch_size, num_channels, width, height)` as well as quantized image embeddings of shape `(batch_size, num_image_vectors)`
115
- # Define whether input is continuous or discrete depending on configuration
116
- self.is_input_continuous = (in_channels is not None) and (patch_size is None)
117
- self.is_input_vectorized = num_vector_embeds is not None
118
- self.is_input_patches = in_channels is not None and patch_size is not None
119
-
120
- if norm_type == "layer_norm" and num_embeds_ada_norm is not None:
121
- deprecation_message = (
122
- f"The configuration file of this model: {self.__class__} is outdated. `norm_type` is either not set or"
123
- " incorrectly set to `'layer_norm'`.Make sure to set `norm_type` to `'ada_norm'` in the config."
124
- " Please make sure to update the config accordingly as leaving `norm_type` might led to incorrect"
125
- " results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it"
126
- " would be very nice if you could open a Pull request for the `transformer/config.json` file"
127
- )
128
- deprecate("norm_type!=num_embeds_ada_norm", "1.0.0", deprecation_message, standard_warn=False)
129
- norm_type = "ada_norm"
130
-
131
- if self.is_input_continuous and self.is_input_vectorized:
132
- raise ValueError(
133
- f"Cannot define both `in_channels`: {in_channels} and `num_vector_embeds`: {num_vector_embeds}. Make"
134
- " sure that either `in_channels` or `num_vector_embeds` is None."
135
- )
136
- elif self.is_input_vectorized and self.is_input_patches:
137
- raise ValueError(
138
- f"Cannot define both `num_vector_embeds`: {num_vector_embeds} and `patch_size`: {patch_size}. Make"
139
- " sure that either `num_vector_embeds` or `num_patches` is None."
140
- )
141
- elif not self.is_input_continuous and not self.is_input_vectorized and not self.is_input_patches:
142
- raise ValueError(
143
- f"Has to define `in_channels`: {in_channels}, `num_vector_embeds`: {num_vector_embeds}, or patch_size:"
144
- f" {patch_size}. Make sure that `in_channels`, `num_vector_embeds` or `num_patches` is not None."
145
- )
146
-
147
- # 2. Define input layers
148
- if self.is_input_continuous:
149
- self.in_channels = in_channels
150
-
151
- self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True)
152
- if use_linear_projection:
153
- self.proj_in = linear_cls(in_channels, inner_dim)
154
- else:
155
- self.proj_in = conv_cls(in_channels, inner_dim, kernel_size=1, stride=1, padding=0)
156
- elif self.is_input_vectorized:
157
- assert sample_size is not None, "Transformer2DModel over discrete input must provide sample_size"
158
- assert num_vector_embeds is not None, "Transformer2DModel over discrete input must provide num_embed"
159
-
160
- self.height = sample_size
161
- self.width = sample_size
162
- self.num_vector_embeds = num_vector_embeds
163
- self.num_latent_pixels = self.height * self.width
164
-
165
- self.latent_image_embedding = ImagePositionalEmbeddings(
166
- num_embed=num_vector_embeds, embed_dim=inner_dim, height=self.height, width=self.width
167
- )
168
- elif self.is_input_patches:
169
- assert sample_size is not None, "Transformer2DModel over patched input must provide sample_size"
170
-
171
- self.height = sample_size
172
- self.width = sample_size
173
-
174
- self.patch_size = patch_size
175
- interpolation_scale = self.config.sample_size // 64 # => 64 (= 512 pixart) has interpolation scale 1
176
- interpolation_scale = max(interpolation_scale, 1)
177
- self.pos_embed = PatchEmbed(
178
- height=sample_size,
179
- width=sample_size,
180
- patch_size=patch_size,
181
- in_channels=in_channels,
182
- embed_dim=inner_dim,
183
- interpolation_scale=interpolation_scale,
184
- )
185
-
186
- # 3. Define transformers blocks
187
- self.transformer_blocks = nn.ModuleList(
188
- [
189
- BasicTransformerBlock(
190
- inner_dim,
191
- num_attention_heads,
192
- attention_head_dim,
193
- dropout=dropout,
194
- cross_attention_dim=cross_attention_dim,
195
- activation_fn=activation_fn,
196
- num_embeds_ada_norm=num_embeds_ada_norm,
197
- attention_bias=attention_bias,
198
- only_cross_attention=only_cross_attention,
199
- double_self_attention=double_self_attention,
200
- upcast_attention=upcast_attention,
201
- norm_type=norm_type,
202
- norm_elementwise_affine=norm_elementwise_affine,
203
- norm_eps=norm_eps,
204
- attention_type=attention_type,
205
- )
206
- for d in range(num_layers)
207
- ]
208
- )
209
-
210
- # 4. Define output layers
211
- self.out_channels = in_channels if out_channels is None else out_channels
212
- if self.is_input_continuous:
213
- # TODO: should use out_channels for continuous projections
214
- if use_linear_projection:
215
- self.proj_out = linear_cls(inner_dim, in_channels)
216
- else:
217
- self.proj_out = conv_cls(inner_dim, in_channels, kernel_size=1, stride=1, padding=0)
218
- elif self.is_input_vectorized:
219
- self.norm_out = nn.LayerNorm(inner_dim)
220
- self.out = nn.Linear(inner_dim, self.num_vector_embeds - 1)
221
- elif self.is_input_patches and norm_type != "ada_norm_single":
222
- self.norm_out = nn.LayerNorm(inner_dim, elementwise_affine=False, eps=1e-6)
223
- self.proj_out_1 = nn.Linear(inner_dim, 2 * inner_dim)
224
- self.proj_out_2 = nn.Linear(inner_dim, patch_size * patch_size * self.out_channels)
225
- elif self.is_input_patches and norm_type == "ada_norm_single":
226
- self.norm_out = nn.LayerNorm(inner_dim, elementwise_affine=False, eps=1e-6)
227
- self.scale_shift_table = nn.Parameter(torch.randn(2, inner_dim) / inner_dim**0.5)
228
- self.proj_out = nn.Linear(inner_dim, patch_size * patch_size * self.out_channels)
229
-
230
- # 5. PixArt-Alpha blocks.
231
- self.adaln_single = None
232
- self.use_additional_conditions = False
233
- if norm_type == "ada_norm_single":
234
- self.use_additional_conditions = self.config.sample_size == 128
235
- # TODO(Sayak, PVP) clean this, for now we use sample size to determine whether to use
236
- # additional conditions until we find better name
237
- self.adaln_single = AdaLayerNormSingle(inner_dim, use_additional_conditions=self.use_additional_conditions)
238
-
239
- self.caption_projection = None
240
- if caption_channels is not None:
241
- self.caption_projection = PixArtAlphaTextProjection(in_features=caption_channels, hidden_size=inner_dim)
242
-
243
- self.gradient_checkpointing = False
244
-
245
- def _set_gradient_checkpointing(self, module, value=False):
246
- if hasattr(module, "gradient_checkpointing"):
247
- module.gradient_checkpointing = value
248
-
249
- def forward(
250
- self,
251
- hidden_states: torch.Tensor,
252
- encoder_hidden_states: Optional[torch.Tensor] = None,
253
- timestep: Optional[torch.LongTensor] = None,
254
- added_cond_kwargs: Dict[str, torch.Tensor] = None,
255
- class_labels: Optional[torch.LongTensor] = None,
256
- cross_attention_kwargs: Dict[str, Any] = None,
257
- attention_mask: Optional[torch.Tensor] = None,
258
- encoder_attention_mask: Optional[torch.Tensor] = None,
259
- return_dict: bool = True,
260
- ):
261
- """
262
- The [`Transformer2DModel`] forward method.
263
-
264
- Args:
265
- hidden_states (`torch.LongTensor` of shape `(batch size, num latent pixels)` if discrete, `torch.FloatTensor` of shape `(batch size, channel, height, width)` if continuous):
266
- Input `hidden_states`.
267
- encoder_hidden_states ( `torch.FloatTensor` of shape `(batch size, sequence len, embed dims)`, *optional*):
268
- Conditional embeddings for cross attention layer. If not given, cross-attention defaults to
269
- self-attention.
270
- timestep ( `torch.LongTensor`, *optional*):
271
- Used to indicate denoising step. Optional timestep to be applied as an embedding in `AdaLayerNorm`.
272
- class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*):
273
- Used to indicate class labels conditioning. Optional class labels to be applied as an embedding in
274
- `AdaLayerZeroNorm`.
275
- cross_attention_kwargs ( `Dict[str, Any]`, *optional*):
276
- A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
277
- `self.processor` in
278
- [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
279
- attention_mask ( `torch.Tensor`, *optional*):
280
- An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
281
- is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
282
- negative values to the attention scores corresponding to "discard" tokens.
283
- encoder_attention_mask ( `torch.Tensor`, *optional*):
284
- Cross-attention mask applied to `encoder_hidden_states`. Two formats supported:
285
-
286
- * Mask `(batch, sequence_length)` True = keep, False = discard.
287
- * Bias `(batch, 1, sequence_length)` 0 = keep, -10000 = discard.
288
-
289
- If `ndim == 2`: will be interpreted as a mask, then converted into a bias consistent with the format
290
- above. This bias will be added to the cross-attention scores.
291
- return_dict (`bool`, *optional*, defaults to `True`):
292
- Whether or not to return a [`~models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain
293
- tuple.
294
-
295
- Returns:
296
- If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a
297
- `tuple` where the first element is the sample tensor.
298
- """
299
- # ensure attention_mask is a bias, and give it a singleton query_tokens dimension.
300
- # we may have done this conversion already, e.g. if we came here via UNet2DConditionModel#forward.
301
- # we can tell by counting dims; if ndim == 2: it's a mask rather than a bias.
302
- # expects mask of shape:
303
- # [batch, key_tokens]
304
- # adds singleton query_tokens dimension:
305
- # [batch, 1, key_tokens]
306
- # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes:
307
- # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn)
308
- # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn)
309
-
310
- if attention_mask is not None and attention_mask.ndim == 2:
311
- # assume that mask is expressed as:
312
- # (1 = keep, 0 = discard)
313
- # convert mask into a bias that can be added to attention scores:
314
- # (keep = +0, discard = -10000.0)
315
- attention_mask = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0
316
- attention_mask = attention_mask.unsqueeze(1)
317
-
318
- # convert encoder_attention_mask to a bias the same way we do for attention_mask
319
- if encoder_attention_mask is not None and encoder_attention_mask.ndim == 2:
320
- encoder_attention_mask = (1 - encoder_attention_mask.to(hidden_states.dtype)) * -10000.0
321
- encoder_attention_mask = encoder_attention_mask.unsqueeze(1)
322
-
323
- # Retrieve lora scale.
324
- lora_scale = cross_attention_kwargs.get("scale", 1.0) if cross_attention_kwargs is not None else 1.0
325
-
326
- # 1. Input
327
- if self.is_input_continuous:
328
- batch, _, height, width = hidden_states.shape
329
- residual = hidden_states
330
-
331
- hidden_states = self.norm(hidden_states)
332
- if not self.use_linear_projection:
333
- hidden_states = (
334
- self.proj_in(hidden_states, scale=lora_scale)
335
- if not USE_PEFT_BACKEND
336
- else self.proj_in(hidden_states)
337
- )
338
- inner_dim = hidden_states.shape[1]
339
- hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * width, inner_dim)
340
- else:
341
- inner_dim = hidden_states.shape[1]
342
- hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * width, inner_dim)
343
- hidden_states = (
344
- self.proj_in(hidden_states, scale=lora_scale)
345
- if not USE_PEFT_BACKEND
346
- else self.proj_in(hidden_states)
347
- )
348
-
349
- elif self.is_input_vectorized:
350
- hidden_states = self.latent_image_embedding(hidden_states)
351
- elif self.is_input_patches:
352
- height, width = hidden_states.shape[-2] // self.patch_size, hidden_states.shape[-1] // self.patch_size
353
- hidden_states = self.pos_embed(hidden_states)
354
-
355
- if self.adaln_single is not None:
356
- if self.use_additional_conditions and added_cond_kwargs is None:
357
- raise ValueError(
358
- "`added_cond_kwargs` cannot be None when using additional conditions for `adaln_single`."
359
- )
360
- batch_size = hidden_states.shape[0]
361
- timestep, embedded_timestep = self.adaln_single(
362
- timestep, added_cond_kwargs, batch_size=batch_size, hidden_dtype=hidden_states.dtype
363
- )
364
-
365
- # 2. Blocks
366
- if self.caption_projection is not None:
367
- batch_size = hidden_states.shape[0]
368
- encoder_hidden_states = self.caption_projection(encoder_hidden_states)
369
- encoder_hidden_states = encoder_hidden_states.view(batch_size, -1, hidden_states.shape[-1])
370
-
371
- for block in self.transformer_blocks:
372
- if self.training and self.gradient_checkpointing:
373
-
374
- def create_custom_forward(module, return_dict=None):
375
- def custom_forward(*inputs):
376
- if return_dict is not None:
377
- return module(*inputs, return_dict=return_dict)
378
- else:
379
- return module(*inputs)
380
-
381
- return custom_forward
382
-
383
- ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
384
- hidden_states = torch.utils.checkpoint.checkpoint(
385
- create_custom_forward(block),
386
- hidden_states,
387
- attention_mask,
388
- encoder_hidden_states,
389
- encoder_attention_mask,
390
- timestep,
391
- cross_attention_kwargs,
392
- class_labels,
393
- **ckpt_kwargs,
394
- )
395
- else:
396
- hidden_states = block(
397
- hidden_states,
398
- attention_mask=attention_mask,
399
- encoder_hidden_states=encoder_hidden_states,
400
- encoder_attention_mask=encoder_attention_mask,
401
- timestep=timestep,
402
- cross_attention_kwargs=cross_attention_kwargs,
403
- class_labels=class_labels,
404
- )
405
-
406
- # 3. Output
407
- if self.is_input_continuous:
408
- if not self.use_linear_projection:
409
- hidden_states = hidden_states.reshape(batch, height, width, inner_dim).permute(0, 3, 1, 2).contiguous()
410
- hidden_states = (
411
- self.proj_out(hidden_states, scale=lora_scale)
412
- if not USE_PEFT_BACKEND
413
- else self.proj_out(hidden_states)
414
- )
415
- else:
416
- hidden_states = (
417
- self.proj_out(hidden_states, scale=lora_scale)
418
- if not USE_PEFT_BACKEND
419
- else self.proj_out(hidden_states)
420
- )
421
- hidden_states = hidden_states.reshape(batch, height, width, inner_dim).permute(0, 3, 1, 2).contiguous()
422
-
423
- output = hidden_states + residual
424
- elif self.is_input_vectorized:
425
- hidden_states = self.norm_out(hidden_states)
426
- logits = self.out(hidden_states)
427
- # (batch, self.num_vector_embeds - 1, self.num_latent_pixels)
428
- logits = logits.permute(0, 2, 1)
429
-
430
- # log(p(x_0))
431
- output = F.log_softmax(logits.double(), dim=1).float()
432
-
433
- if self.is_input_patches:
434
- if self.config.norm_type != "ada_norm_single":
435
- conditioning = self.transformer_blocks[0].norm1.emb(
436
- timestep, class_labels, hidden_dtype=hidden_states.dtype
437
- )
438
- shift, scale = self.proj_out_1(F.silu(conditioning)).chunk(2, dim=1)
439
- hidden_states = self.norm_out(hidden_states) * (1 + scale[:, None]) + shift[:, None]
440
- hidden_states = self.proj_out_2(hidden_states)
441
- elif self.config.norm_type == "ada_norm_single":
442
- shift, scale = (self.scale_shift_table[None] + embedded_timestep[:, None]).chunk(2, dim=1)
443
- hidden_states = self.norm_out(hidden_states)
444
- # Modulation
445
- hidden_states = hidden_states * (1 + scale) + shift
446
- hidden_states = self.proj_out(hidden_states)
447
- hidden_states = hidden_states.squeeze(1)
448
-
449
- # unpatchify
450
- if self.adaln_single is None:
451
- height = width = int(hidden_states.shape[1] ** 0.5)
452
- hidden_states = hidden_states.reshape(
453
- shape=(-1, height, width, self.patch_size, self.patch_size, self.out_channels)
454
- )
455
- hidden_states = torch.einsum("nhwpqc->nchpwq", hidden_states)
456
- output = hidden_states.reshape(
457
- shape=(-1, self.out_channels, height * self.patch_size, width * self.patch_size)
458
- )
459
-
460
- if not return_dict:
461
- return (output,)
462
-
463
- return Transformer2DModelOutput(sample=output)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
models/unet_2d_blocks.py DELETED
The diff for this file is too large to render. See raw diff
 
models/unet_2d_condition.py DELETED
@@ -1,1214 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- # Some modifications are reimplemented in public environments by Xiao Fu and Mu Hu
16
-
17
- from dataclasses import dataclass
18
- from typing import Any, Dict, List, Optional, Tuple, Union
19
-
20
- import torch
21
- import torch.nn as nn
22
- import torch.utils.checkpoint
23
-
24
- from diffusers.configuration_utils import ConfigMixin, register_to_config
25
- from diffusers.loaders import UNet2DConditionLoadersMixin
26
- from diffusers.utils import USE_PEFT_BACKEND, BaseOutput, deprecate, logging, scale_lora_layers, unscale_lora_layers
27
- from diffusers.models.activations import get_activation
28
- from diffusers.models.attention_processor import (
29
- ADDED_KV_ATTENTION_PROCESSORS,
30
- CROSS_ATTENTION_PROCESSORS,
31
- Attention,
32
- AttentionProcessor,
33
- AttnAddedKVProcessor,
34
- AttnProcessor,
35
- )
36
- from diffusers.models.embeddings import (
37
- GaussianFourierProjection,
38
- ImageHintTimeEmbedding,
39
- ImageProjection,
40
- ImageTimeEmbedding,
41
- PositionNet,
42
- TextImageProjection,
43
- TextImageTimeEmbedding,
44
- TextTimeEmbedding,
45
- TimestepEmbedding,
46
- Timesteps,
47
- )
48
- from diffusers.models.modeling_utils import ModelMixin
49
-
50
- from models.unet_2d_blocks import (
51
- UNetMidBlock2D,
52
- UNetMidBlock2DCrossAttn,
53
- UNetMidBlock2DSimpleCrossAttn,
54
- get_down_block,
55
- get_up_block,
56
- )
57
-
58
-
59
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
60
-
61
-
62
- @dataclass
63
- class UNet2DConditionOutput(BaseOutput):
64
- """
65
- The output of [`UNet2DConditionModel`].
66
-
67
- Args:
68
- sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
69
- The hidden states output conditioned on `encoder_hidden_states` input. Output of last layer of model.
70
- """
71
-
72
- sample: torch.FloatTensor = None
73
-
74
-
75
- class UNet2DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin):
76
- r"""
77
- A conditional 2D UNet model that takes a noisy sample, conditional state, and a timestep and returns a sample
78
- shaped output.
79
-
80
- This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
81
- for all models (such as downloading or saving).
82
-
83
- Parameters:
84
- sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):
85
- Height and width of input/output sample.
86
- in_channels (`int`, *optional*, defaults to 4): Number of channels in the input sample.
87
- out_channels (`int`, *optional*, defaults to 4): Number of channels in the output.
88
- center_input_sample (`bool`, *optional*, defaults to `False`): Whether to center the input sample.
89
- flip_sin_to_cos (`bool`, *optional*, defaults to `False`):
90
- Whether to flip the sin to cos in the time embedding.
91
- freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding.
92
- down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`):
93
- The tuple of downsample blocks to use.
94
- mid_block_type (`str`, *optional*, defaults to `"UNetMidBlock2DCrossAttn"`):
95
- Block type for middle of UNet, it can be one of `UNetMidBlock2DCrossAttn`, `UNetMidBlock2D`, or
96
- `UNetMidBlock2DSimpleCrossAttn`. If `None`, the mid block layer is skipped.
97
- up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")`):
98
- The tuple of upsample blocks to use.
99
- only_cross_attention(`bool` or `Tuple[bool]`, *optional*, default to `False`):
100
- Whether to include self-attention in the basic transformer blocks, see
101
- [`~models.attention.BasicTransformerBlock`].
102
- block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):
103
- The tuple of output channels for each block.
104
- layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block.
105
- downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution.
106
- mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block.
107
- dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
108
- act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
109
- norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization.
110
- If `None`, normalization and activation layers is skipped in post-processing.
111
- norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization.
112
- cross_attention_dim (`int` or `Tuple[int]`, *optional*, defaults to 1280):
113
- The dimension of the cross attention features.
114
- transformer_layers_per_block (`int`, `Tuple[int]`, or `Tuple[Tuple]` , *optional*, defaults to 1):
115
- The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for
116
- [`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`],
117
- [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`].
118
- reverse_transformer_layers_per_block : (`Tuple[Tuple]`, *optional*, defaults to None):
119
- The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`], in the upsampling
120
- blocks of the U-Net. Only relevant if `transformer_layers_per_block` is of type `Tuple[Tuple]` and for
121
- [`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`],
122
- [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`].
123
- encoder_hid_dim (`int`, *optional*, defaults to None):
124
- If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim`
125
- dimension to `cross_attention_dim`.
126
- encoder_hid_dim_type (`str`, *optional*, defaults to `None`):
127
- If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text
128
- embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`.
129
- attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads.
130
- num_attention_heads (`int`, *optional*):
131
- The number of attention heads. If not defined, defaults to `attention_head_dim`
132
- resnet_time_scale_shift (`str`, *optional*, defaults to `"default"`): Time scale shift config
133
- for ResNet blocks (see [`~models.resnet.ResnetBlock2D`]). Choose from `default` or `scale_shift`.
134
- class_embed_type (`str`, *optional*, defaults to `None`):
135
- The type of class embedding to use which is ultimately summed with the time embeddings. Choose from `None`,
136
- `"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`.
137
- addition_embed_type (`str`, *optional*, defaults to `None`):
138
- Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or
139
- "text". "text" will use the `TextTimeEmbedding` layer.
140
- addition_time_embed_dim: (`int`, *optional*, defaults to `None`):
141
- Dimension for the timestep embeddings.
142
- num_class_embeds (`int`, *optional*, defaults to `None`):
143
- Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing
144
- class conditioning with `class_embed_type` equal to `None`.
145
- time_embedding_type (`str`, *optional*, defaults to `positional`):
146
- The type of position embedding to use for timesteps. Choose from `positional` or `fourier`.
147
- time_embedding_dim (`int`, *optional*, defaults to `None`):
148
- An optional override for the dimension of the projected time embedding.
149
- time_embedding_act_fn (`str`, *optional*, defaults to `None`):
150
- Optional activation function to use only once on the time embeddings before they are passed to the rest of
151
- the UNet. Choose from `silu`, `mish`, `gelu`, and `swish`.
152
- timestep_post_act (`str`, *optional*, defaults to `None`):
153
- The second activation function to use in timestep embedding. Choose from `silu`, `mish` and `gelu`.
154
- time_cond_proj_dim (`int`, *optional*, defaults to `None`):
155
- The dimension of `cond_proj` layer in the timestep embedding.
156
- conv_in_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_in` layer. conv_out_kernel (`int`,
157
- *optional*, default to `3`): The kernel size of `conv_out` layer. projection_class_embeddings_input_dim (`int`,
158
- *optional*): The dimension of the `class_labels` input when
159
- `class_embed_type="projection"`. Required when `class_embed_type="projection"`.
160
- class_embeddings_concat (`bool`, *optional*, defaults to `False`): Whether to concatenate the time
161
- embeddings with the class embeddings.
162
- mid_block_only_cross_attention (`bool`, *optional*, defaults to `None`):
163
- Whether to use cross attention with the mid block when using the `UNetMidBlock2DSimpleCrossAttn`. If
164
- `only_cross_attention` is given as a single boolean and `mid_block_only_cross_attention` is `None`, the
165
- `only_cross_attention` value is used as the value for `mid_block_only_cross_attention`. Default to `False`
166
- otherwise.
167
- """
168
-
169
- _supports_gradient_checkpointing = True
170
-
171
- @register_to_config
172
- def __init__(
173
- self,
174
- sample_size: Optional[int] = None,
175
- in_channels: int = 4,
176
- out_channels: int = 4,
177
- center_input_sample: bool = False,
178
- flip_sin_to_cos: bool = True,
179
- freq_shift: int = 0,
180
- down_block_types: Tuple[str] = (
181
- "CrossAttnDownBlock2D",
182
- "CrossAttnDownBlock2D",
183
- "CrossAttnDownBlock2D",
184
- "DownBlock2D",
185
- ),
186
- mid_block_type: Optional[str] = "UNetMidBlock2DCrossAttn",
187
- up_block_types: Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D"),
188
- only_cross_attention: Union[bool, Tuple[bool]] = False,
189
- block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
190
- layers_per_block: Union[int, Tuple[int]] = 2,
191
- downsample_padding: int = 1,
192
- mid_block_scale_factor: float = 1,
193
- dropout: float = 0.0,
194
- act_fn: str = "silu",
195
- norm_num_groups: Optional[int] = 32,
196
- norm_eps: float = 1e-5,
197
- cross_attention_dim: Union[int, Tuple[int]] = 1280,
198
- transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple]] = 1,
199
- reverse_transformer_layers_per_block: Optional[Tuple[Tuple[int]]] = None,
200
- encoder_hid_dim: Optional[int] = None,
201
- encoder_hid_dim_type: Optional[str] = None,
202
- attention_head_dim: Union[int, Tuple[int]] = 8,
203
- num_attention_heads: Optional[Union[int, Tuple[int]]] = None,
204
- dual_cross_attention: bool = False,
205
- use_linear_projection: bool = False,
206
- class_embed_type: Optional[str] = None,
207
- addition_embed_type: Optional[str] = None,
208
- addition_time_embed_dim: Optional[int] = None,
209
- num_class_embeds: Optional[int] = None,
210
- upcast_attention: bool = False,
211
- resnet_time_scale_shift: str = "default",
212
- resnet_skip_time_act: bool = False,
213
- resnet_out_scale_factor: int = 1.0,
214
- time_embedding_type: str = "positional",
215
- time_embedding_dim: Optional[int] = None,
216
- time_embedding_act_fn: Optional[str] = None,
217
- timestep_post_act: Optional[str] = None,
218
- time_cond_proj_dim: Optional[int] = None,
219
- conv_in_kernel: int = 3,
220
- conv_out_kernel: int = 3,
221
- projection_class_embeddings_input_dim: Optional[int] = None,
222
- attention_type: str = "default",
223
- class_embeddings_concat: bool = False,
224
- mid_block_only_cross_attention: Optional[bool] = None,
225
- cross_attention_norm: Optional[str] = None,
226
- addition_embed_type_num_heads=64,
227
- ):
228
- super().__init__()
229
-
230
- self.sample_size = sample_size
231
-
232
- if num_attention_heads is not None:
233
- raise ValueError(
234
- "At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19."
235
- )
236
-
237
- # If `num_attention_heads` is not defined (which is the case for most models)
238
- # it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
239
- # The reason for this behavior is to correct for incorrectly named variables that were introduced
240
- # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
241
- # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
242
- # which is why we correct for the naming here.
243
- num_attention_heads = num_attention_heads or attention_head_dim
244
-
245
- # Check inputs
246
- if len(down_block_types) != len(up_block_types):
247
- raise ValueError(
248
- f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}."
249
- )
250
-
251
- if len(block_out_channels) != len(down_block_types):
252
- raise ValueError(
253
- f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
254
- )
255
-
256
- if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):
257
- raise ValueError(
258
- f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}."
259
- )
260
-
261
- if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types):
262
- raise ValueError(
263
- f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}."
264
- )
265
-
266
- if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):
267
- raise ValueError(
268
- f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}."
269
- )
270
-
271
- if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types):
272
- raise ValueError(
273
- f"Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}."
274
- )
275
-
276
- if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types):
277
- raise ValueError(
278
- f"Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: {layers_per_block}. `down_block_types`: {down_block_types}."
279
- )
280
- if isinstance(transformer_layers_per_block, list) and reverse_transformer_layers_per_block is None:
281
- for layer_number_per_block in transformer_layers_per_block:
282
- if isinstance(layer_number_per_block, list):
283
- raise ValueError("Must provide 'reverse_transformer_layers_per_block` if using asymmetrical UNet.")
284
-
285
- # input
286
- conv_in_padding = (conv_in_kernel - 1) // 2
287
- self.conv_in = nn.Conv2d(
288
- in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding
289
- )
290
-
291
- # time
292
- if time_embedding_type == "fourier":
293
- time_embed_dim = time_embedding_dim or block_out_channels[0] * 2
294
- if time_embed_dim % 2 != 0:
295
- raise ValueError(f"`time_embed_dim` should be divisible by 2, but is {time_embed_dim}.")
296
- self.time_proj = GaussianFourierProjection(
297
- time_embed_dim // 2, set_W_to_weight=False, log=False, flip_sin_to_cos=flip_sin_to_cos
298
- )
299
- timestep_input_dim = time_embed_dim
300
- elif time_embedding_type == "positional":
301
- time_embed_dim = time_embedding_dim or block_out_channels[0] * 4
302
-
303
- self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
304
- timestep_input_dim = block_out_channels[0]
305
- else:
306
- raise ValueError(
307
- f"{time_embedding_type} does not exist. Please make sure to use one of `fourier` or `positional`."
308
- )
309
-
310
- self.time_embedding = TimestepEmbedding(
311
- timestep_input_dim,
312
- time_embed_dim,
313
- act_fn=act_fn,
314
- post_act_fn=timestep_post_act,
315
- cond_proj_dim=time_cond_proj_dim,
316
- )
317
-
318
- if encoder_hid_dim_type is None and encoder_hid_dim is not None:
319
- encoder_hid_dim_type = "text_proj"
320
- self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type)
321
- logger.info("encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.")
322
-
323
- if encoder_hid_dim is None and encoder_hid_dim_type is not None:
324
- raise ValueError(
325
- f"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}."
326
- )
327
-
328
- if encoder_hid_dim_type == "text_proj":
329
- self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim)
330
- elif encoder_hid_dim_type == "text_image_proj":
331
- # image_embed_dim DOESN'T have to be `cross_attention_dim`. To not clutter the __init__ too much
332
- # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use
333
- # case when `addition_embed_type == "text_image_proj"` (Kadinsky 2.1)`
334
- self.encoder_hid_proj = TextImageProjection(
335
- text_embed_dim=encoder_hid_dim,
336
- image_embed_dim=cross_attention_dim,
337
- cross_attention_dim=cross_attention_dim,
338
- )
339
- elif encoder_hid_dim_type == "image_proj":
340
- # Kandinsky 2.2
341
- self.encoder_hid_proj = ImageProjection(
342
- image_embed_dim=encoder_hid_dim,
343
- cross_attention_dim=cross_attention_dim,
344
- )
345
- elif encoder_hid_dim_type is not None:
346
- raise ValueError(
347
- f"encoder_hid_dim_type: {encoder_hid_dim_type} must be None, 'text_proj' or 'text_image_proj'."
348
- )
349
- else:
350
- self.encoder_hid_proj = None
351
-
352
- # class embedding
353
- if class_embed_type is None and num_class_embeds is not None:
354
- self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
355
- elif class_embed_type == "timestep":
356
- self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn)
357
- elif class_embed_type == "identity":
358
- self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
359
- elif class_embed_type == "projection":
360
- if projection_class_embeddings_input_dim is None:
361
- raise ValueError(
362
- "`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set"
363
- )
364
- # The projection `class_embed_type` is the same as the timestep `class_embed_type` except
365
- # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings
366
- # 2. it projects from an arbitrary input dimension.
367
- #
368
- # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations.
369
- # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings.
370
- # As a result, `TimestepEmbedding` can be passed arbitrary vectors.
371
- self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
372
- elif class_embed_type == "simple_projection":
373
- if projection_class_embeddings_input_dim is None:
374
- raise ValueError(
375
- "`class_embed_type`: 'simple_projection' requires `projection_class_embeddings_input_dim` be set"
376
- )
377
- self.class_embedding = nn.Linear(projection_class_embeddings_input_dim, time_embed_dim)
378
- else:
379
- self.class_embedding = None
380
-
381
- if addition_embed_type == "text":
382
- if encoder_hid_dim is not None:
383
- text_time_embedding_from_dim = encoder_hid_dim
384
- else:
385
- text_time_embedding_from_dim = cross_attention_dim
386
-
387
- self.add_embedding = TextTimeEmbedding(
388
- text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads
389
- )
390
- elif addition_embed_type == "text_image":
391
- # text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much
392
- # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use
393
- # case when `addition_embed_type == "text_image"` (Kadinsky 2.1)`
394
- self.add_embedding = TextImageTimeEmbedding(
395
- text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim
396
- )
397
- elif addition_embed_type == "text_time":
398
- self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift)
399
- self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
400
- elif addition_embed_type == "image":
401
- # Kandinsky 2.2
402
- self.add_embedding = ImageTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim)
403
- elif addition_embed_type == "image_hint":
404
- # Kandinsky 2.2 ControlNet
405
- self.add_embedding = ImageHintTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim)
406
- elif addition_embed_type is not None:
407
- raise ValueError(f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.")
408
-
409
- if time_embedding_act_fn is None:
410
- self.time_embed_act = None
411
- else:
412
- self.time_embed_act = get_activation(time_embedding_act_fn)
413
-
414
- self.down_blocks = nn.ModuleList([])
415
- self.up_blocks = nn.ModuleList([])
416
-
417
- if isinstance(only_cross_attention, bool):
418
- if mid_block_only_cross_attention is None:
419
- mid_block_only_cross_attention = only_cross_attention
420
-
421
- only_cross_attention = [only_cross_attention] * len(down_block_types)
422
-
423
- if mid_block_only_cross_attention is None:
424
- mid_block_only_cross_attention = False
425
-
426
- if isinstance(num_attention_heads, int):
427
- num_attention_heads = (num_attention_heads,) * len(down_block_types)
428
-
429
- if isinstance(attention_head_dim, int):
430
- attention_head_dim = (attention_head_dim,) * len(down_block_types)
431
-
432
- if isinstance(cross_attention_dim, int):
433
- cross_attention_dim = (cross_attention_dim,) * len(down_block_types)
434
-
435
- if isinstance(layers_per_block, int):
436
- layers_per_block = [layers_per_block] * len(down_block_types)
437
-
438
- if isinstance(transformer_layers_per_block, int):
439
- transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types)
440
-
441
- if class_embeddings_concat:
442
- # The time embeddings are concatenated with the class embeddings. The dimension of the
443
- # time embeddings passed to the down, middle, and up blocks is twice the dimension of the
444
- # regular time embeddings
445
- blocks_time_embed_dim = time_embed_dim * 2
446
- else:
447
- blocks_time_embed_dim = time_embed_dim
448
-
449
- # down
450
- output_channel = block_out_channels[0]
451
- for i, down_block_type in enumerate(down_block_types):
452
- input_channel = output_channel
453
- output_channel = block_out_channels[i]
454
- is_final_block = i == len(block_out_channels) - 1
455
-
456
- down_block = get_down_block(
457
- down_block_type,
458
- num_layers=layers_per_block[i],
459
- transformer_layers_per_block=transformer_layers_per_block[i],
460
- in_channels=input_channel,
461
- out_channels=output_channel,
462
- temb_channels=blocks_time_embed_dim,
463
- add_downsample=not is_final_block,
464
- resnet_eps=norm_eps,
465
- resnet_act_fn=act_fn,
466
- resnet_groups=norm_num_groups,
467
- cross_attention_dim=cross_attention_dim[i],
468
- num_attention_heads=num_attention_heads[i],
469
- downsample_padding=downsample_padding,
470
- dual_cross_attention=dual_cross_attention,
471
- use_linear_projection=use_linear_projection,
472
- only_cross_attention=only_cross_attention[i],
473
- upcast_attention=upcast_attention,
474
- resnet_time_scale_shift=resnet_time_scale_shift,
475
- attention_type=attention_type,
476
- resnet_skip_time_act=resnet_skip_time_act,
477
- resnet_out_scale_factor=resnet_out_scale_factor,
478
- cross_attention_norm=cross_attention_norm,
479
- attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel,
480
- dropout=dropout,
481
- )
482
- self.down_blocks.append(down_block)
483
-
484
- # mid
485
- if mid_block_type == "UNetMidBlock2DCrossAttn":
486
- self.mid_block = UNetMidBlock2DCrossAttn(
487
- transformer_layers_per_block=transformer_layers_per_block[-1],
488
- in_channels=block_out_channels[-1],
489
- temb_channels=blocks_time_embed_dim,
490
- dropout=dropout,
491
- resnet_eps=norm_eps,
492
- resnet_act_fn=act_fn,
493
- output_scale_factor=mid_block_scale_factor,
494
- resnet_time_scale_shift=resnet_time_scale_shift,
495
- cross_attention_dim=cross_attention_dim[-1],
496
- num_attention_heads=num_attention_heads[-1],
497
- resnet_groups=norm_num_groups,
498
- dual_cross_attention=dual_cross_attention,
499
- use_linear_projection=use_linear_projection,
500
- upcast_attention=upcast_attention,
501
- attention_type=attention_type,
502
- )
503
- elif mid_block_type == "UNetMidBlock2DSimpleCrossAttn":
504
- self.mid_block = UNetMidBlock2DSimpleCrossAttn(
505
- in_channels=block_out_channels[-1],
506
- temb_channels=blocks_time_embed_dim,
507
- dropout=dropout,
508
- resnet_eps=norm_eps,
509
- resnet_act_fn=act_fn,
510
- output_scale_factor=mid_block_scale_factor,
511
- cross_attention_dim=cross_attention_dim[-1],
512
- attention_head_dim=attention_head_dim[-1],
513
- resnet_groups=norm_num_groups,
514
- resnet_time_scale_shift=resnet_time_scale_shift,
515
- skip_time_act=resnet_skip_time_act,
516
- only_cross_attention=mid_block_only_cross_attention,
517
- cross_attention_norm=cross_attention_norm,
518
- )
519
- elif mid_block_type == "UNetMidBlock2D":
520
- self.mid_block = UNetMidBlock2D(
521
- in_channels=block_out_channels[-1],
522
- temb_channels=blocks_time_embed_dim,
523
- dropout=dropout,
524
- num_layers=0,
525
- resnet_eps=norm_eps,
526
- resnet_act_fn=act_fn,
527
- output_scale_factor=mid_block_scale_factor,
528
- resnet_groups=norm_num_groups,
529
- resnet_time_scale_shift=resnet_time_scale_shift,
530
- add_attention=False,
531
- )
532
- elif mid_block_type is None:
533
- self.mid_block = None
534
- else:
535
- raise ValueError(f"unknown mid_block_type : {mid_block_type}")
536
-
537
- # count how many layers upsample the images
538
- self.num_upsamplers = 0
539
-
540
- # up
541
- reversed_block_out_channels = list(reversed(block_out_channels))
542
- reversed_num_attention_heads = list(reversed(num_attention_heads))
543
- reversed_layers_per_block = list(reversed(layers_per_block))
544
- reversed_cross_attention_dim = list(reversed(cross_attention_dim))
545
- reversed_transformer_layers_per_block = (
546
- list(reversed(transformer_layers_per_block))
547
- if reverse_transformer_layers_per_block is None
548
- else reverse_transformer_layers_per_block
549
- )
550
- only_cross_attention = list(reversed(only_cross_attention))
551
-
552
- output_channel = reversed_block_out_channels[0]
553
- for i, up_block_type in enumerate(up_block_types):
554
- is_final_block = i == len(block_out_channels) - 1
555
-
556
- prev_output_channel = output_channel
557
- output_channel = reversed_block_out_channels[i]
558
- input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
559
-
560
- # add upsample block for all BUT final layer
561
- if not is_final_block:
562
- add_upsample = True
563
- self.num_upsamplers += 1
564
- else:
565
- add_upsample = False
566
-
567
- up_block = get_up_block(
568
- up_block_type,
569
- num_layers=reversed_layers_per_block[i] + 1,
570
- transformer_layers_per_block=reversed_transformer_layers_per_block[i],
571
- in_channels=input_channel,
572
- out_channels=output_channel,
573
- prev_output_channel=prev_output_channel,
574
- temb_channels=blocks_time_embed_dim,
575
- add_upsample=add_upsample,
576
- resnet_eps=norm_eps,
577
- resnet_act_fn=act_fn,
578
- resolution_idx=i,
579
- resnet_groups=norm_num_groups,
580
- cross_attention_dim=reversed_cross_attention_dim[i],
581
- num_attention_heads=reversed_num_attention_heads[i],
582
- dual_cross_attention=dual_cross_attention,
583
- use_linear_projection=use_linear_projection,
584
- only_cross_attention=only_cross_attention[i],
585
- upcast_attention=upcast_attention,
586
- resnet_time_scale_shift=resnet_time_scale_shift,
587
- attention_type=attention_type,
588
- resnet_skip_time_act=resnet_skip_time_act,
589
- resnet_out_scale_factor=resnet_out_scale_factor,
590
- cross_attention_norm=cross_attention_norm,
591
- attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel,
592
- dropout=dropout,
593
- )
594
- self.up_blocks.append(up_block)
595
- prev_output_channel = output_channel
596
-
597
- # out
598
- if norm_num_groups is not None:
599
- self.conv_norm_out = nn.GroupNorm(
600
- num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps
601
- )
602
-
603
- self.conv_act = get_activation(act_fn)
604
-
605
- else:
606
- self.conv_norm_out = None
607
- self.conv_act = None
608
-
609
- conv_out_padding = (conv_out_kernel - 1) // 2
610
- self.conv_out = nn.Conv2d(
611
- block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding
612
- )
613
-
614
- if attention_type in ["gated", "gated-text-image"]:
615
- positive_len = 768
616
- if isinstance(cross_attention_dim, int):
617
- positive_len = cross_attention_dim
618
- elif isinstance(cross_attention_dim, tuple) or isinstance(cross_attention_dim, list):
619
- positive_len = cross_attention_dim[0]
620
-
621
- feature_type = "text-only" if attention_type == "gated" else "text-image"
622
- self.position_net = PositionNet(
623
- positive_len=positive_len, out_dim=cross_attention_dim, feature_type=feature_type
624
- )
625
-
626
- @property
627
- def attn_processors(self) -> Dict[str, AttentionProcessor]:
628
- r"""
629
- Returns:
630
- `dict` of attention processors: A dictionary containing all attention processors used in the model with
631
- indexed by its weight name.
632
- """
633
- # set recursively
634
- processors = {}
635
-
636
- def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
637
- if hasattr(module, "get_processor"):
638
- processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True)
639
-
640
- for sub_name, child in module.named_children():
641
- fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
642
-
643
- return processors
644
-
645
- for name, module in self.named_children():
646
- fn_recursive_add_processors(name, module, processors)
647
-
648
- return processors
649
-
650
- def set_attn_processor(
651
- self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]], _remove_lora=False
652
- ):
653
- r"""
654
- Sets the attention processor to use to compute attention.
655
-
656
- Parameters:
657
- processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
658
- The instantiated processor class or a dictionary of processor classes that will be set as the processor
659
- for **all** `Attention` layers.
660
-
661
- If `processor` is a dict, the key needs to define the path to the corresponding cross attention
662
- processor. This is strongly recommended when setting trainable attention processors.
663
-
664
- """
665
- count = len(self.attn_processors.keys())
666
-
667
- if isinstance(processor, dict) and len(processor) != count:
668
- raise ValueError(
669
- f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
670
- f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
671
- )
672
-
673
- def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
674
- if hasattr(module, "set_processor"):
675
- if not isinstance(processor, dict):
676
- module.set_processor(processor, _remove_lora=_remove_lora)
677
- else:
678
- module.set_processor(processor.pop(f"{name}.processor"), _remove_lora=_remove_lora)
679
-
680
- for sub_name, child in module.named_children():
681
- fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
682
-
683
- for name, module in self.named_children():
684
- fn_recursive_attn_processor(name, module, processor)
685
-
686
- def set_default_attn_processor(self):
687
- """
688
- Disables custom attention processors and sets the default attention implementation.
689
- """
690
- if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
691
- processor = AttnAddedKVProcessor()
692
- elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
693
- processor = AttnProcessor()
694
- else:
695
- raise ValueError(
696
- f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}"
697
- )
698
-
699
- self.set_attn_processor(processor, _remove_lora=True)
700
-
701
- def set_attention_slice(self, slice_size):
702
- r"""
703
- Enable sliced attention computation.
704
-
705
- When this option is enabled, the attention module splits the input tensor in slices to compute attention in
706
- several steps. This is useful for saving some memory in exchange for a small decrease in speed.
707
-
708
- Args:
709
- slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
710
- When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If
711
- `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is
712
- provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
713
- must be a multiple of `slice_size`.
714
- """
715
- sliceable_head_dims = []
716
-
717
- def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module):
718
- if hasattr(module, "set_attention_slice"):
719
- sliceable_head_dims.append(module.sliceable_head_dim)
720
-
721
- for child in module.children():
722
- fn_recursive_retrieve_sliceable_dims(child)
723
-
724
- # retrieve number of attention layers
725
- for module in self.children():
726
- fn_recursive_retrieve_sliceable_dims(module)
727
-
728
- num_sliceable_layers = len(sliceable_head_dims)
729
-
730
- if slice_size == "auto":
731
- # half the attention head size is usually a good trade-off between
732
- # speed and memory
733
- slice_size = [dim // 2 for dim in sliceable_head_dims]
734
- elif slice_size == "max":
735
- # make smallest slice possible
736
- slice_size = num_sliceable_layers * [1]
737
-
738
- slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
739
-
740
- if len(slice_size) != len(sliceable_head_dims):
741
- raise ValueError(
742
- f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
743
- f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
744
- )
745
-
746
- for i in range(len(slice_size)):
747
- size = slice_size[i]
748
- dim = sliceable_head_dims[i]
749
- if size is not None and size > dim:
750
- raise ValueError(f"size {size} has to be smaller or equal to {dim}.")
751
-
752
- # Recursively walk through all the children.
753
- # Any children which exposes the set_attention_slice method
754
- # gets the message
755
- def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
756
- if hasattr(module, "set_attention_slice"):
757
- module.set_attention_slice(slice_size.pop())
758
-
759
- for child in module.children():
760
- fn_recursive_set_attention_slice(child, slice_size)
761
-
762
- reversed_slice_size = list(reversed(slice_size))
763
- for module in self.children():
764
- fn_recursive_set_attention_slice(module, reversed_slice_size)
765
-
766
- def _set_gradient_checkpointing(self, module, value=False):
767
- if hasattr(module, "gradient_checkpointing"):
768
- module.gradient_checkpointing = value
769
-
770
- def enable_freeu(self, s1, s2, b1, b2):
771
- r"""Enables the FreeU mechanism from https://arxiv.org/abs/2309.11497.
772
-
773
- The suffixes after the scaling factors represent the stage blocks where they are being applied.
774
-
775
- Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of values that
776
- are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL.
777
-
778
- Args:
779
- s1 (`float`):
780
- Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to
781
- mitigate the "oversmoothing effect" in the enhanced denoising process.
782
- s2 (`float`):
783
- Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to
784
- mitigate the "oversmoothing effect" in the enhanced denoising process.
785
- b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features.
786
- b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.
787
- """
788
- for i, upsample_block in enumerate(self.up_blocks):
789
- setattr(upsample_block, "s1", s1)
790
- setattr(upsample_block, "s2", s2)
791
- setattr(upsample_block, "b1", b1)
792
- setattr(upsample_block, "b2", b2)
793
-
794
- def disable_freeu(self):
795
- """Disables the FreeU mechanism."""
796
- freeu_keys = {"s1", "s2", "b1", "b2"}
797
- for i, upsample_block in enumerate(self.up_blocks):
798
- for k in freeu_keys:
799
- if hasattr(upsample_block, k) or getattr(upsample_block, k, None) is not None:
800
- setattr(upsample_block, k, None)
801
-
802
- def fuse_qkv_projections(self):
803
- """
804
- Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query,
805
- key, value) are fused. For cross-attention modules, key and value projection matrices are fused.
806
-
807
- <Tip warning={true}>
808
-
809
- This API is 🧪 experimental.
810
-
811
- </Tip>
812
- """
813
- self.original_attn_processors = None
814
-
815
- for _, attn_processor in self.attn_processors.items():
816
- if "Added" in str(attn_processor.__class__.__name__):
817
- raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.")
818
-
819
- self.original_attn_processors = self.attn_processors
820
-
821
- for module in self.modules():
822
- if isinstance(module, Attention):
823
- module.fuse_projections(fuse=True)
824
-
825
- def unfuse_qkv_projections(self):
826
- """Disables the fused QKV projection if enabled.
827
-
828
- <Tip warning={true}>
829
-
830
- This API is 🧪 experimental.
831
-
832
- </Tip>
833
-
834
- """
835
- if self.original_attn_processors is not None:
836
- self.set_attn_processor(self.original_attn_processors)
837
-
838
- def forward(
839
- self,
840
- sample: torch.FloatTensor,
841
- timestep: Union[torch.Tensor, float, int],
842
- encoder_hidden_states: torch.Tensor,
843
- class_labels: Optional[torch.Tensor] = None,
844
- timestep_cond: Optional[torch.Tensor] = None,
845
- attention_mask: Optional[torch.Tensor] = None,
846
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
847
- added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
848
- down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
849
- mid_block_additional_residual: Optional[torch.Tensor] = None,
850
- down_intrablock_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
851
- encoder_attention_mask: Optional[torch.Tensor] = None,
852
- return_dict: bool = True,
853
- ) -> Union[UNet2DConditionOutput, Tuple]:
854
- r"""
855
- The [`UNet2DConditionModel`] forward method.
856
-
857
- Args:
858
- sample (`torch.FloatTensor`):
859
- The noisy input tensor with the following shape `(batch, channel, height, width)`.
860
- timestep (`torch.FloatTensor` or `float` or `int`): The number of timesteps to denoise an input.
861
- encoder_hidden_states (`torch.FloatTensor`):
862
- The encoder hidden states with shape `(batch, sequence_length, feature_dim)`.
863
- class_labels (`torch.Tensor`, *optional*, defaults to `None`):
864
- Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.
865
- timestep_cond: (`torch.Tensor`, *optional*, defaults to `None`):
866
- Conditional embeddings for timestep. If provided, the embeddings will be summed with the samples passed
867
- through the `self.time_embedding` layer to obtain the timestep embeddings.
868
- attention_mask (`torch.Tensor`, *optional*, defaults to `None`):
869
- An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
870
- is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
871
- negative values to the attention scores corresponding to "discard" tokens.
872
- cross_attention_kwargs (`dict`, *optional*):
873
- A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
874
- `self.processor` in
875
- [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
876
- added_cond_kwargs: (`dict`, *optional*):
877
- A kwargs dictionary containing additional embeddings that if specified are added to the embeddings that
878
- are passed along to the UNet blocks.
879
- down_block_additional_residuals: (`tuple` of `torch.Tensor`, *optional*):
880
- A tuple of tensors that if specified are added to the residuals of down unet blocks.
881
- mid_block_additional_residual: (`torch.Tensor`, *optional*):
882
- A tensor that if specified is added to the residual of the middle unet block.
883
- encoder_attention_mask (`torch.Tensor`):
884
- A cross-attention mask of shape `(batch, sequence_length)` is applied to `encoder_hidden_states`. If
885
- `True` the mask is kept, otherwise if `False` it is discarded. Mask will be converted into a bias,
886
- which adds large negative values to the attention scores corresponding to "discard" tokens.
887
- return_dict (`bool`, *optional*, defaults to `True`):
888
- Whether or not to return a [`~models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain
889
- tuple.
890
- cross_attention_kwargs (`dict`, *optional*):
891
- A kwargs dictionary that if specified is passed along to the [`AttnProcessor`].
892
- added_cond_kwargs: (`dict`, *optional*):
893
- A kwargs dictionary containin additional embeddings that if specified are added to the embeddings that
894
- are passed along to the UNet blocks.
895
- down_block_additional_residuals (`tuple` of `torch.Tensor`, *optional*):
896
- additional residuals to be added to UNet long skip connections from down blocks to up blocks for
897
- example from ControlNet side model(s)
898
- mid_block_additional_residual (`torch.Tensor`, *optional*):
899
- additional residual to be added to UNet mid block output, for example from ControlNet side model
900
- down_intrablock_additional_residuals (`tuple` of `torch.Tensor`, *optional*):
901
- additional residuals to be added within UNet down blocks, for example from T2I-Adapter side model(s)
902
-
903
- Returns:
904
- [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:
905
- If `return_dict` is True, an [`~models.unet_2d_condition.UNet2DConditionOutput`] is returned, otherwise
906
- a `tuple` is returned where the first element is the sample tensor.
907
- """
908
-
909
- # By default samples have to be AT least a multiple of the overall upsampling factor.
910
- # The overall upsampling factor is equal to 2 ** (# num of upsampling layers).
911
- # However, the upsampling interpolation output size can be forced to fit any upsampling size
912
- # on the fly if necessary.
913
- default_overall_up_factor = 2**self.num_upsamplers
914
-
915
- # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`
916
- forward_upsample_size = False
917
- upsample_size = None
918
-
919
- for dim in sample.shape[-2:]:
920
- if dim % default_overall_up_factor != 0:
921
- # Forward upsample size to force interpolation output size.
922
- forward_upsample_size = True
923
- break
924
-
925
- # ensure attention_mask is a bias, and give it a singleton query_tokens dimension
926
- # expects mask of shape:
927
- # [batch, key_tokens]
928
- # adds singleton query_tokens dimension:
929
- # [batch, 1, key_tokens]
930
- # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes:
931
- # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn)
932
- # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn)
933
- if attention_mask is not None:
934
- # assume that mask is expressed as:
935
- # (1 = keep, 0 = discard)
936
- # convert mask into a bias that can be added to attention scores:
937
- # (keep = +0, discard = -10000.0)
938
- attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
939
- attention_mask = attention_mask.unsqueeze(1)
940
-
941
- # convert encoder_attention_mask to a bias the same way we do for attention_mask
942
- if encoder_attention_mask is not None:
943
- encoder_attention_mask = (1 - encoder_attention_mask.to(sample.dtype)) * -10000.0
944
- encoder_attention_mask = encoder_attention_mask.unsqueeze(1)
945
-
946
- # 0. center input if necessary
947
- if self.config.center_input_sample:
948
- sample = 2 * sample - 1.0
949
-
950
- # 1. time
951
- timesteps = timestep
952
- if not torch.is_tensor(timesteps):
953
- # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
954
- # This would be a good case for the `match` statement (Python 3.10+)
955
- is_mps = sample.device.type == "mps"
956
- if isinstance(timestep, float):
957
- dtype = torch.float32 if is_mps else torch.float64
958
- else:
959
- dtype = torch.int32 if is_mps else torch.int64
960
- timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
961
- elif len(timesteps.shape) == 0:
962
- timesteps = timesteps[None].to(sample.device)
963
-
964
- # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
965
- timesteps = timesteps.expand(sample.shape[0])
966
-
967
- t_emb = self.time_proj(timesteps)
968
-
969
- # `Timesteps` does not contain any weights and will always return f32 tensors
970
- # but time_embedding might actually be running in fp16. so we need to cast here.
971
- # there might be better ways to encapsulate this.
972
- t_emb = t_emb.to(dtype=sample.dtype)
973
-
974
- emb = self.time_embedding(t_emb, timestep_cond)
975
- aug_emb = None
976
-
977
- if self.class_embedding is not None:
978
- if class_labels is None:
979
- raise ValueError("class_labels should be provided when num_class_embeds > 0")
980
-
981
- if self.config.class_embed_type == "timestep":
982
- class_labels = self.time_proj(class_labels)
983
-
984
- # `Timesteps` does not contain any weights and will always return f32 tensors
985
- # there might be better ways to encapsulate this.
986
- class_labels = class_labels.to(dtype=sample.dtype)
987
-
988
- class_emb = self.class_embedding(class_labels).to(dtype=sample.dtype)
989
-
990
- if self.config.class_embeddings_concat:
991
- emb = torch.cat([emb, class_emb], dim=-1)
992
- else:
993
- emb = emb + class_emb
994
-
995
- if self.config.addition_embed_type == "text":
996
- aug_emb = self.add_embedding(encoder_hidden_states)
997
- elif self.config.addition_embed_type == "text_image":
998
- # Kandinsky 2.1 - style
999
- if "image_embeds" not in added_cond_kwargs:
1000
- raise ValueError(
1001
- f"{self.__class__} has the config param `addition_embed_type` set to 'text_image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`"
1002
- )
1003
-
1004
- image_embs = added_cond_kwargs.get("image_embeds")
1005
- text_embs = added_cond_kwargs.get("text_embeds", encoder_hidden_states)
1006
- aug_emb = self.add_embedding(text_embs, image_embs)
1007
- elif self.config.addition_embed_type == "text_time":
1008
- # SDXL - style
1009
- if "text_embeds" not in added_cond_kwargs:
1010
- raise ValueError(
1011
- f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`"
1012
- )
1013
- text_embeds = added_cond_kwargs.get("text_embeds")
1014
- if "time_ids" not in added_cond_kwargs:
1015
- raise ValueError(
1016
- f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`"
1017
- )
1018
- time_ids = added_cond_kwargs.get("time_ids")
1019
- time_embeds = self.add_time_proj(time_ids.flatten())
1020
- time_embeds = time_embeds.reshape((text_embeds.shape[0], -1))
1021
- add_embeds = torch.concat([text_embeds, time_embeds], dim=-1)
1022
- add_embeds = add_embeds.to(emb.dtype)
1023
- aug_emb = self.add_embedding(add_embeds)
1024
- elif self.config.addition_embed_type == "image":
1025
- # Kandinsky 2.2 - style
1026
- if "image_embeds" not in added_cond_kwargs:
1027
- raise ValueError(
1028
- f"{self.__class__} has the config param `addition_embed_type` set to 'image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`"
1029
- )
1030
- image_embs = added_cond_kwargs.get("image_embeds")
1031
- aug_emb = self.add_embedding(image_embs)
1032
- elif self.config.addition_embed_type == "image_hint":
1033
- # Kandinsky 2.2 - style
1034
- if "image_embeds" not in added_cond_kwargs or "hint" not in added_cond_kwargs:
1035
- raise ValueError(
1036
- f"{self.__class__} has the config param `addition_embed_type` set to 'image_hint' which requires the keyword arguments `image_embeds` and `hint` to be passed in `added_cond_kwargs`"
1037
- )
1038
- image_embs = added_cond_kwargs.get("image_embeds")
1039
- hint = added_cond_kwargs.get("hint")
1040
- aug_emb, hint = self.add_embedding(image_embs, hint)
1041
- sample = torch.cat([sample, hint], dim=1)
1042
-
1043
- emb = emb + aug_emb if aug_emb is not None else emb
1044
-
1045
- if self.time_embed_act is not None:
1046
- emb = self.time_embed_act(emb)
1047
-
1048
- if self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_proj":
1049
- encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states)
1050
- elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_image_proj":
1051
- # Kadinsky 2.1 - style
1052
- if "image_embeds" not in added_cond_kwargs:
1053
- raise ValueError(
1054
- f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'text_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`"
1055
- )
1056
-
1057
- image_embeds = added_cond_kwargs.get("image_embeds")
1058
- encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states, image_embeds)
1059
- elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "image_proj":
1060
- # Kandinsky 2.2 - style
1061
- if "image_embeds" not in added_cond_kwargs:
1062
- raise ValueError(
1063
- f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`"
1064
- )
1065
- image_embeds = added_cond_kwargs.get("image_embeds")
1066
- encoder_hidden_states = self.encoder_hid_proj(image_embeds)
1067
- elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "ip_image_proj":
1068
- if "image_embeds" not in added_cond_kwargs:
1069
- raise ValueError(
1070
- f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'ip_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`"
1071
- )
1072
- image_embeds = added_cond_kwargs.get("image_embeds")
1073
- image_embeds = self.encoder_hid_proj(image_embeds).to(encoder_hidden_states.dtype)
1074
- encoder_hidden_states = torch.cat([encoder_hidden_states, image_embeds], dim=1)
1075
-
1076
- # 2. pre-process
1077
- sample = self.conv_in(sample)
1078
-
1079
- # 2.5 GLIGEN position net
1080
- if cross_attention_kwargs is not None and cross_attention_kwargs.get("gligen", None) is not None:
1081
- cross_attention_kwargs = cross_attention_kwargs.copy()
1082
- gligen_args = cross_attention_kwargs.pop("gligen")
1083
- cross_attention_kwargs["gligen"] = {"objs": self.position_net(**gligen_args)}
1084
-
1085
- # 3. down
1086
- lora_scale = cross_attention_kwargs.get("scale", 1.0) if cross_attention_kwargs is not None else 1.0
1087
- if USE_PEFT_BACKEND:
1088
- # weight the lora layers by setting `lora_scale` for each PEFT layer
1089
- scale_lora_layers(self, lora_scale)
1090
-
1091
- is_controlnet = mid_block_additional_residual is not None and down_block_additional_residuals is not None
1092
- # using new arg down_intrablock_additional_residuals for T2I-Adapters, to distinguish from controlnets
1093
- is_adapter = down_intrablock_additional_residuals is not None
1094
- # maintain backward compatibility for legacy usage, where
1095
- # T2I-Adapter and ControlNet both use down_block_additional_residuals arg
1096
- # but can only use one or the other
1097
- if not is_adapter and mid_block_additional_residual is None and down_block_additional_residuals is not None:
1098
- deprecate(
1099
- "T2I should not use down_block_additional_residuals",
1100
- "1.3.0",
1101
- "Passing intrablock residual connections with `down_block_additional_residuals` is deprecated \
1102
- and will be removed in diffusers 1.3.0. `down_block_additional_residuals` should only be used \
1103
- for ControlNet. Please make sure use `down_intrablock_additional_residuals` instead. ",
1104
- standard_warn=False,
1105
- )
1106
- down_intrablock_additional_residuals = down_block_additional_residuals
1107
- is_adapter = True
1108
-
1109
- down_block_res_samples = (sample,)
1110
- for downsample_block in self.down_blocks:
1111
- if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention:
1112
- # For t2i-adapter CrossAttnDownBlock2D
1113
- additional_residuals = {}
1114
- if is_adapter and len(down_intrablock_additional_residuals) > 0:
1115
- additional_residuals["additional_residuals"] = down_intrablock_additional_residuals.pop(0)
1116
-
1117
- sample, res_samples = downsample_block(
1118
- hidden_states=sample,
1119
- temb=emb,
1120
- encoder_hidden_states=encoder_hidden_states,
1121
- attention_mask=attention_mask,
1122
- cross_attention_kwargs=cross_attention_kwargs,
1123
- encoder_attention_mask=encoder_attention_mask,
1124
- **additional_residuals,
1125
- )
1126
- else:
1127
- sample, res_samples = downsample_block(hidden_states=sample, temb=emb, scale=lora_scale)
1128
- if is_adapter and len(down_intrablock_additional_residuals) > 0:
1129
- sample += down_intrablock_additional_residuals.pop(0)
1130
-
1131
- down_block_res_samples += res_samples
1132
-
1133
- if is_controlnet:
1134
- new_down_block_res_samples = ()
1135
-
1136
- for down_block_res_sample, down_block_additional_residual in zip(
1137
- down_block_res_samples, down_block_additional_residuals
1138
- ):
1139
- down_block_res_sample = down_block_res_sample + down_block_additional_residual
1140
- new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,)
1141
-
1142
- down_block_res_samples = new_down_block_res_samples
1143
-
1144
- # 4. mid
1145
- if self.mid_block is not None:
1146
- if hasattr(self.mid_block, "has_cross_attention") and self.mid_block.has_cross_attention:
1147
- sample = self.mid_block(
1148
- sample,
1149
- emb,
1150
- encoder_hidden_states=encoder_hidden_states,
1151
- attention_mask=attention_mask,
1152
- cross_attention_kwargs=cross_attention_kwargs,
1153
- encoder_attention_mask=encoder_attention_mask,
1154
- )
1155
- else:
1156
- sample = self.mid_block(sample, emb)
1157
-
1158
- # To support T2I-Adapter-XL
1159
- if (
1160
- is_adapter
1161
- and len(down_intrablock_additional_residuals) > 0
1162
- and sample.shape == down_intrablock_additional_residuals[0].shape
1163
- ):
1164
- sample += down_intrablock_additional_residuals.pop(0)
1165
-
1166
- if is_controlnet:
1167
- sample = sample + mid_block_additional_residual
1168
-
1169
- # 5. up
1170
- for i, upsample_block in enumerate(self.up_blocks):
1171
- is_final_block = i == len(self.up_blocks) - 1
1172
-
1173
- res_samples = down_block_res_samples[-len(upsample_block.resnets) :]
1174
- down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]
1175
-
1176
- # if we have not reached the final block and need to forward the
1177
- # upsample size, we do it here
1178
- if not is_final_block and forward_upsample_size:
1179
- upsample_size = down_block_res_samples[-1].shape[2:]
1180
-
1181
- if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention:
1182
- sample = upsample_block(
1183
- hidden_states=sample,
1184
- temb=emb,
1185
- res_hidden_states_tuple=res_samples,
1186
- encoder_hidden_states=encoder_hidden_states,
1187
- cross_attention_kwargs=cross_attention_kwargs,
1188
- upsample_size=upsample_size,
1189
- attention_mask=attention_mask,
1190
- encoder_attention_mask=encoder_attention_mask,
1191
- )
1192
- else:
1193
- sample = upsample_block(
1194
- hidden_states=sample,
1195
- temb=emb,
1196
- res_hidden_states_tuple=res_samples,
1197
- upsample_size=upsample_size,
1198
- scale=lora_scale,
1199
- )
1200
-
1201
- # 6. post-process
1202
- if self.conv_norm_out:
1203
- sample = self.conv_norm_out(sample)
1204
- sample = self.conv_act(sample)
1205
- sample = self.conv_out(sample)
1206
-
1207
- if USE_PEFT_BACKEND:
1208
- # remove `lora_scale` from each PEFT layer
1209
- unscale_lora_layers(self, lora_scale)
1210
-
1211
- if not return_dict:
1212
- return (sample,)
1213
-
1214
- return UNet2DConditionOutput(sample=sample)