Tonic commited on
Commit
2783d97
β€’
1 Parent(s): 2551487

Create sa_handler

Browse files
Files changed (1) hide show
  1. sa_handler +279 -0
sa_handler ADDED
@@ -0,0 +1,279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 Google LLC
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ from __future__ import annotations
17
+
18
+ from dataclasses import dataclass
19
+ from diffusers import StableDiffusionXLPipeline
20
+ import torch
21
+ import torch.nn as nn
22
+ from torch.nn import functional as nnf
23
+ from diffusers.models import attention_processor
24
+ import einops
25
+
26
+ T = torch.Tensor
27
+
28
+
29
+ @dataclass(frozen=True)
30
+ class StyleAlignedArgs:
31
+ share_group_norm: bool = True
32
+ share_layer_norm: bool = True,
33
+ share_attention: bool = True
34
+ adain_queries: bool = True
35
+ adain_keys: bool = True
36
+ adain_values: bool = False
37
+ full_attention_share: bool = False
38
+ shared_score_scale: float = 1.
39
+ shared_score_shift: float = 0.
40
+ only_self_level: float = 0.
41
+
42
+
43
+ def expand_first(feat: T, scale=1.,) -> T:
44
+ b = feat.shape[0]
45
+ feat_style = torch.stack((feat[0], feat[b // 2])).unsqueeze(1)
46
+ if scale == 1:
47
+ feat_style = feat_style.expand(2, b // 2, *feat.shape[1:])
48
+ else:
49
+ feat_style = feat_style.repeat(1, b // 2, 1, 1, 1)
50
+ feat_style = torch.cat([feat_style[:, :1], scale * feat_style[:, 1:]], dim=1)
51
+ return feat_style.reshape(*feat.shape)
52
+
53
+
54
+ def concat_first(feat: T, dim=2, scale=1.) -> T:
55
+ feat_style = expand_first(feat, scale=scale)
56
+ return torch.cat((feat, feat_style), dim=dim)
57
+
58
+
59
+ def calc_mean_std(feat, eps: float = 1e-5) -> tuple[T, T]:
60
+ feat_std = (feat.var(dim=-2, keepdims=True) + eps).sqrt()
61
+ feat_mean = feat.mean(dim=-2, keepdims=True)
62
+ return feat_mean, feat_std
63
+
64
+
65
+ def adain(feat: T) -> T:
66
+ feat_mean, feat_std = calc_mean_std(feat)
67
+ feat_style_mean = expand_first(feat_mean)
68
+ feat_style_std = expand_first(feat_std)
69
+ feat = (feat - feat_mean) / feat_std
70
+ feat = feat * feat_style_std + feat_style_mean
71
+ return feat
72
+
73
+
74
+ class DefaultAttentionProcessor(nn.Module):
75
+
76
+ def __init__(self):
77
+ super().__init__()
78
+ self.processor = attention_processor.AttnProcessor2_0()
79
+
80
+ def __call__(self, attn: attention_processor.Attention, hidden_states, encoder_hidden_states=None,
81
+ attention_mask=None, **kwargs):
82
+ return self.processor(attn, hidden_states, encoder_hidden_states, attention_mask)
83
+
84
+
85
+ class SharedAttentionProcessor(DefaultAttentionProcessor):
86
+
87
+ def shifted_scaled_dot_product_attention(self, attn: attention_processor.Attention, query: T, key: T, value: T) -> T:
88
+ logits = torch.einsum('bhqd,bhkd->bhqk', query, key) * attn.scale
89
+ logits[:, :, :, query.shape[2]:] += self.shared_score_shift
90
+ probs = logits.softmax(-1)
91
+ return torch.einsum('bhqk,bhkd->bhqd', probs, value)
92
+
93
+ def shared_call(
94
+ self,
95
+ attn: attention_processor.Attention,
96
+ hidden_states,
97
+ encoder_hidden_states=None,
98
+ attention_mask=None,
99
+ **kwargs
100
+ ):
101
+
102
+ residual = hidden_states
103
+ input_ndim = hidden_states.ndim
104
+ if input_ndim == 4:
105
+ batch_size, channel, height, width = hidden_states.shape
106
+ hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
107
+ batch_size, sequence_length, _ = (
108
+ hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
109
+ )
110
+
111
+ if attention_mask is not None:
112
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
113
+ # scaled_dot_product_attention expects attention_mask shape to be
114
+ # (batch, heads, source_length, target_length)
115
+ attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])
116
+
117
+ if attn.group_norm is not None:
118
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
119
+
120
+ query = attn.to_q(hidden_states)
121
+ key = attn.to_k(hidden_states)
122
+ value = attn.to_v(hidden_states)
123
+ inner_dim = key.shape[-1]
124
+ head_dim = inner_dim // attn.heads
125
+
126
+ query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
127
+ key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
128
+ value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
129
+ # if self.step >= self.start_inject:
130
+ if self.adain_queries:
131
+ query = adain(query)
132
+ if self.adain_keys:
133
+ key = adain(key)
134
+ if self.adain_values:
135
+ value = adain(value)
136
+ if self.share_attention:
137
+ key = concat_first(key, -2, scale=self.shared_score_scale)
138
+ value = concat_first(value, -2)
139
+ if self.shared_score_shift != 0:
140
+ hidden_states = self.shifted_scaled_dot_product_attention(attn, query, key, value,)
141
+ else:
142
+ hidden_states = nnf.scaled_dot_product_attention(
143
+ query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
144
+ )
145
+ else:
146
+ hidden_states = nnf.scaled_dot_product_attention(
147
+ query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
148
+ )
149
+ # hidden_states = adain(hidden_states)
150
+ hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
151
+ hidden_states = hidden_states.to(query.dtype)
152
+
153
+ # linear proj
154
+ hidden_states = attn.to_out[0](hidden_states)
155
+ # dropout
156
+ hidden_states = attn.to_out[1](hidden_states)
157
+
158
+ if input_ndim == 4:
159
+ hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
160
+
161
+ if attn.residual_connection:
162
+ hidden_states = hidden_states + residual
163
+
164
+ hidden_states = hidden_states / attn.rescale_output_factor
165
+ return hidden_states
166
+
167
+ def __call__(self, attn: attention_processor.Attention, hidden_states, encoder_hidden_states=None,
168
+ attention_mask=None, **kwargs):
169
+ if self.full_attention_share:
170
+ b, n, d = hidden_states.shape
171
+ hidden_states = einops.rearrange(hidden_states, '(k b) n d -> k (b n) d', k=2)
172
+ hidden_states = super().__call__(attn, hidden_states, encoder_hidden_states=encoder_hidden_states,
173
+ attention_mask=attention_mask, **kwargs)
174
+ hidden_states = einops.rearrange(hidden_states, 'k (b n) d -> (k b) n d', n=n)
175
+ else:
176
+ hidden_states = self.shared_call(attn, hidden_states, hidden_states, attention_mask, **kwargs)
177
+
178
+ return hidden_states
179
+
180
+ def __init__(self, style_aligned_args: StyleAlignedArgs):
181
+ super().__init__()
182
+ self.share_attention = style_aligned_args.share_attention
183
+ self.adain_queries = style_aligned_args.adain_queries
184
+ self.adain_keys = style_aligned_args.adain_keys
185
+ self.adain_values = style_aligned_args.adain_values
186
+ self.full_attention_share = style_aligned_args.full_attention_share
187
+ self.shared_score_scale = style_aligned_args.shared_score_scale
188
+ self.shared_score_shift = style_aligned_args.shared_score_shift
189
+
190
+
191
+ def _get_switch_vec(total_num_layers, level):
192
+ if level == 0:
193
+ return torch.zeros(total_num_layers, dtype=torch.bool)
194
+ if level == 1:
195
+ return torch.ones(total_num_layers, dtype=torch.bool)
196
+ to_flip = level > .5
197
+ if to_flip:
198
+ level = 1 - level
199
+ num_switch = int(level * total_num_layers)
200
+ vec = torch.arange(total_num_layers)
201
+ vec = vec % (total_num_layers // num_switch)
202
+ vec = vec == 0
203
+ if to_flip:
204
+ vec = ~vec
205
+ return vec
206
+
207
+
208
+ def init_attention_processors(pipeline: StableDiffusionXLPipeline, style_aligned_args: StyleAlignedArgs | None = None):
209
+ attn_procs = {}
210
+ unet = pipeline.unet
211
+ number_of_self, number_of_cross = 0, 0
212
+ num_self_layers = len([name for name in unet.attn_processors.keys() if 'attn1' in name])
213
+ if style_aligned_args is None:
214
+ only_self_vec = _get_switch_vec(num_self_layers, 1)
215
+ else:
216
+ only_self_vec = _get_switch_vec(num_self_layers, style_aligned_args.only_self_level)
217
+ for i, name in enumerate(unet.attn_processors.keys()):
218
+ is_self_attention = 'attn1' in name
219
+ if is_self_attention:
220
+ number_of_self += 1
221
+ if style_aligned_args is None or only_self_vec[i // 2]:
222
+ attn_procs[name] = DefaultAttentionProcessor()
223
+ else:
224
+ attn_procs[name] = SharedAttentionProcessor(style_aligned_args)
225
+ else:
226
+ number_of_cross += 1
227
+ attn_procs[name] = DefaultAttentionProcessor()
228
+
229
+ unet.set_attn_processor(attn_procs)
230
+
231
+
232
+ def register_shared_norm(pipeline: StableDiffusionXLPipeline,
233
+ share_group_norm: bool = True,
234
+ share_layer_norm: bool = True, ):
235
+ def register_norm_forward(norm_layer: nn.GroupNorm | nn.LayerNorm) -> nn.GroupNorm | nn.LayerNorm:
236
+ if not hasattr(norm_layer, 'orig_forward'):
237
+ setattr(norm_layer, 'orig_forward', norm_layer.forward)
238
+ orig_forward = norm_layer.orig_forward
239
+
240
+ def forward_(hidden_states: T) -> T:
241
+ n = hidden_states.shape[-2]
242
+ hidden_states = concat_first(hidden_states, dim=-2)
243
+ hidden_states = orig_forward(hidden_states)
244
+ return hidden_states[..., :n, :]
245
+
246
+ norm_layer.forward = forward_
247
+ return norm_layer
248
+
249
+ def get_norm_layers(pipeline_, norm_layers_: dict[str, list[nn.GroupNorm | nn.LayerNorm]]):
250
+ if isinstance(pipeline_, nn.LayerNorm) and share_layer_norm:
251
+ norm_layers_['layer'].append(pipeline_)
252
+ if isinstance(pipeline_, nn.GroupNorm) and share_group_norm:
253
+ norm_layers_['group'].append(pipeline_)
254
+ else:
255
+ for layer in pipeline_.children():
256
+ get_norm_layers(layer, norm_layers_)
257
+
258
+ norm_layers = {'group': [], 'layer': []}
259
+ get_norm_layers(pipeline.unet, norm_layers)
260
+ return [register_norm_forward(layer) for layer in norm_layers['group']] + [register_norm_forward(layer) for layer in
261
+ norm_layers['layer']]
262
+
263
+
264
+ class Handler:
265
+
266
+ def register(self, style_aligned_args: StyleAlignedArgs, ):
267
+ self.norm_layers = register_shared_norm(self.pipeline, style_aligned_args.share_group_norm,
268
+ style_aligned_args.share_layer_norm)
269
+ init_attention_processors(self.pipeline, style_aligned_args)
270
+
271
+ def remove(self):
272
+ for layer in self.norm_layers:
273
+ layer.forward = layer.orig_forward
274
+ self.norm_layers = []
275
+ init_attention_processors(self.pipeline, None)
276
+
277
+ def __init__(self, pipeline: StableDiffusionXLPipeline):
278
+ self.pipeline = pipeline
279
+ self.norm_layers = []