Files changed (1) hide show
  1. resampler.py +671 -1
resampler.py CHANGED
@@ -3,6 +3,11 @@
3
  # This source code is licensed under the license found in the
4
  # LICENSE file in the root directory of this source tree.
5
 
 
 
 
 
 
6
  from collections import OrderedDict
7
  import math
8
  import requests
@@ -19,6 +24,21 @@ from torch.nn.init import trunc_normal_
19
  from torchvision import transforms
20
  from torchvision.transforms import InterpolationMode
21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  def get_abs_pos(abs_pos, tgt_size):
23
  # abs_pos: L, C
24
  # tgt_size: (H, W)
@@ -126,7 +146,7 @@ class Resampler(nn.Module):
126
  else:
127
  self.kv_proj = nn.Identity()
128
 
129
- self.attn = nn.MultiheadAttention(embed_dim, num_heads)
130
  self.ln_q = norm_layer(embed_dim)
131
  self.ln_kv = norm_layer(embed_dim)
132
 
@@ -168,3 +188,653 @@ class Resampler(nn.Module):
168
 
169
  def _repeat(self, query, N: int):
170
  return query.unsqueeze(1).repeat(1, N, 1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  # This source code is licensed under the license found in the
4
  # LICENSE file in the root directory of this source tree.
5
 
6
+ # Copyright (c) Alibaba Cloud.
7
+ #
8
+ # This source code is licensed under the license found in the
9
+ # LICENSE file in the root directory of this source tree.
10
+
11
  from collections import OrderedDict
12
  import math
13
  import requests
 
24
  from torchvision import transforms
25
  from torchvision.transforms import InterpolationMode
26
 
27
+ from functools import partial
28
+ import numpy as np
29
+ import warnings
30
+ from typing import Optional, Tuple
31
+ import torch
32
+ from torch import nn
33
+ from torch import Tensor
34
+ import deepspeed
35
+ import torch.nn.functional as F
36
+ from torch.nn.functional import *
37
+ from torch.nn.modules.activation import *
38
+ from torch.nn.init import trunc_normal_
39
+ from torch.nn.init import constant_, xavier_normal_, xavier_uniform_
40
+ from transformers import PreTrainedModel
41
+ from transformers.integrations import is_deepspeed_zero3_enabled
42
  def get_abs_pos(abs_pos, tgt_size):
43
  # abs_pos: L, C
44
  # tgt_size: (H, W)
 
146
  else:
147
  self.kv_proj = nn.Identity()
148
 
149
+ self.attn = MultiheadAttention(embed_dim, num_heads)
150
  self.ln_q = norm_layer(embed_dim)
151
  self.ln_kv = norm_layer(embed_dim)
152
 
 
188
 
189
  def _repeat(self, query, N: int):
190
  return query.unsqueeze(1).repeat(1, N, 1)
191
+
192
+
193
+
194
+ class MultiheadAttention(nn.MultiheadAttention):
195
+ def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False,
196
+ add_zero_attn=False, kdim=None, vdim=None, batch_first=False, device=None, dtype=None):
197
+ super().__init__(embed_dim, num_heads, dropout, bias, add_bias_kv, add_zero_attn, kdim, vdim, batch_first, device, dtype)
198
+
199
+ # rewrite out_proj layer,with nn.Linear
200
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias, device=device, dtype=dtype)
201
+
202
+ def forward(
203
+ self,
204
+ query: Tensor,
205
+ key: Tensor,
206
+ value: Tensor,
207
+ key_padding_mask: Optional[Tensor] = None,
208
+ need_weights: bool = True,
209
+ attn_mask: Optional[Tensor] = None,
210
+ average_attn_weights: bool = True,
211
+ is_causal : bool = False) -> Tuple[Tensor, Optional[Tensor]]:
212
+ why_not_fast_path = ''
213
+ if ((attn_mask is not None and torch.is_floating_point(attn_mask))
214
+ or (key_padding_mask is not None) and torch.is_floating_point(key_padding_mask)):
215
+ why_not_fast_path = "floating-point masks are not supported for fast path."
216
+
217
+ is_batched = query.dim() == 3
218
+
219
+ key_padding_mask = F._canonical_mask(
220
+ mask=key_padding_mask,
221
+ mask_name="key_padding_mask",
222
+ other_type=F._none_or_dtype(attn_mask),
223
+ other_name="attn_mask",
224
+ target_type=query.dtype
225
+ )
226
+
227
+ attn_mask = F._canonical_mask(
228
+ mask=attn_mask,
229
+ mask_name="attn_mask",
230
+ other_type=None,
231
+ other_name="",
232
+ target_type=query.dtype,
233
+ check_other=False,
234
+ )
235
+
236
+
237
+ if not is_batched:
238
+ why_not_fast_path = f"input not batched; expected query.dim() of 3 but got {query.dim()}"
239
+ elif query is not key or key is not value:
240
+ # When lifting this restriction, don't forget to either
241
+ # enforce that the dtypes all match or test cases where
242
+ # they don't!
243
+ why_not_fast_path = "non-self attention was used (query, key, and value are not the same Tensor)"
244
+ elif self.in_proj_bias is not None and query.dtype != self.in_proj_bias.dtype:
245
+ why_not_fast_path = f"dtypes of query ({query.dtype}) and self.in_proj_bias ({self.in_proj_bias.dtype}) don't match"
246
+ elif self.in_proj_weight is None:
247
+ why_not_fast_path = "in_proj_weight was None"
248
+ elif query.dtype != self.in_proj_weight.dtype:
249
+ # this case will fail anyway, but at least they'll get a useful error message.
250
+ why_not_fast_path = f"dtypes of query ({query.dtype}) and self.in_proj_weight ({self.in_proj_weight.dtype}) don't match"
251
+ elif self.training:
252
+ why_not_fast_path = "training is enabled"
253
+ elif (self.num_heads % 2) != 0:
254
+ why_not_fast_path = "self.num_heads is not even"
255
+ elif not self.batch_first:
256
+ why_not_fast_path = "batch_first was not True"
257
+ elif self.bias_k is not None:
258
+ why_not_fast_path = "self.bias_k was not None"
259
+ elif self.bias_v is not None:
260
+ why_not_fast_path = "self.bias_v was not None"
261
+ elif self.add_zero_attn:
262
+ why_not_fast_path = "add_zero_attn was enabled"
263
+ elif not self._qkv_same_embed_dim:
264
+ why_not_fast_path = "_qkv_same_embed_dim was not True"
265
+ elif query.is_nested and (key_padding_mask is not None or attn_mask is not None):
266
+ why_not_fast_path = "supplying both src_key_padding_mask and src_mask at the same time \
267
+ is not supported with NestedTensor input"
268
+ elif torch.is_autocast_enabled():
269
+ why_not_fast_path = "autocast is enabled"
270
+
271
+ if not why_not_fast_path:
272
+ tensor_args = (
273
+ query,
274
+ key,
275
+ value,
276
+ self.in_proj_weight,
277
+ self.in_proj_bias,
278
+ self.out_proj.weight,
279
+ self.out_proj.bias,
280
+ )
281
+ # We have to use list comprehensions below because TorchScript does not support
282
+ # generator expressions.
283
+ if torch.overrides.has_torch_function(tensor_args):
284
+ why_not_fast_path = "some Tensor argument has_torch_function"
285
+ elif _is_make_fx_tracing():
286
+ why_not_fast_path = "we are running make_fx tracing"
287
+ elif not all(_check_arg_device(x) for x in tensor_args):
288
+ why_not_fast_path = ("some Tensor argument's device is neither one of "
289
+ f"cpu, cuda or {torch.utils.backend_registration._privateuse1_backend_name}")
290
+ elif torch.is_grad_enabled() and any(_arg_requires_grad(x) for x in tensor_args):
291
+ why_not_fast_path = ("grad is enabled and at least one of query or the "
292
+ "input/output projection weights or biases requires_grad")
293
+ if not why_not_fast_path:
294
+ merged_mask, mask_type = self.merge_masks(attn_mask, key_padding_mask, query)
295
+
296
+ if self.in_proj_bias is not None and self.in_proj_weight is not None:
297
+ return torch._native_multi_head_attention(
298
+ query,
299
+ key,
300
+ value,
301
+ self.embed_dim,
302
+ self.num_heads,
303
+ self.in_proj_weight,
304
+ self.in_proj_bias,
305
+ self.out_proj.weight,
306
+ self.out_proj.bias,
307
+ merged_mask,
308
+ need_weights,
309
+ average_attn_weights,
310
+ mask_type)
311
+
312
+ any_nested = query.is_nested or key.is_nested or value.is_nested
313
+ assert not any_nested, ("MultiheadAttention does not support NestedTensor outside of its fast path. " +
314
+ f"The fast path was not hit because {why_not_fast_path}")
315
+
316
+ if self.batch_first and is_batched:
317
+ # make sure that the transpose op does not affect the "is" property
318
+ if key is value:
319
+ if query is key:
320
+ query = key = value = query.transpose(1, 0)
321
+ else:
322
+ query, key = (x.transpose(1, 0) for x in (query, key))
323
+ value = key
324
+ else:
325
+ query, key, value = (x.transpose(1, 0) for x in (query, key, value))
326
+
327
+ if not self._qkv_same_embed_dim:
328
+ attn_output, attn_output_weights = self.multi_head_attention_forward(
329
+ query, key, value, self.embed_dim, self.num_heads,
330
+ self.in_proj_weight, self.in_proj_bias,
331
+ self.bias_k, self.bias_v, self.add_zero_attn,
332
+ self.dropout, self.out_proj.weight, self.out_proj.bias,
333
+ training=self.training,
334
+ key_padding_mask=key_padding_mask, need_weights=need_weights,
335
+ attn_mask=attn_mask,
336
+ use_separate_proj_weight=True,
337
+ q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,
338
+ v_proj_weight=self.v_proj_weight,
339
+ average_attn_weights=average_attn_weights,
340
+ is_causal=is_causal)
341
+ else:
342
+ attn_output, attn_output_weights = self.multi_head_attention_forward(
343
+ query, key, value, self.embed_dim, self.num_heads,
344
+ self.in_proj_weight, self.in_proj_bias,
345
+ self.bias_k, self.bias_v, self.add_zero_attn,
346
+ self.dropout, self.out_proj.weight, self.out_proj.bias,
347
+ training=self.training,
348
+ key_padding_mask=key_padding_mask,
349
+ need_weights=need_weights,
350
+ attn_mask=attn_mask,
351
+ average_attn_weights=average_attn_weights,
352
+ is_causal=is_causal)
353
+ if self.batch_first and is_batched:
354
+ return attn_output.transpose(1, 0), attn_output_weights
355
+ else:
356
+ return attn_output, attn_output_weights
357
+
358
+ def multi_head_attention_forward(
359
+ self,
360
+ query: Tensor,
361
+ key: Tensor,
362
+ value: Tensor,
363
+ embed_dim_to_check: int,
364
+ num_heads: int,
365
+ in_proj_weight: Optional[Tensor],
366
+ in_proj_bias: Optional[Tensor],
367
+ bias_k: Optional[Tensor],
368
+ bias_v: Optional[Tensor],
369
+ add_zero_attn: bool,
370
+ dropout_p: float,
371
+ out_proj_weight: Tensor,
372
+ out_proj_bias: Optional[Tensor],
373
+ training: bool = True,
374
+ key_padding_mask: Optional[Tensor] = None,
375
+ need_weights: bool = True,
376
+ attn_mask: Optional[Tensor] = None,
377
+ use_separate_proj_weight: bool = False,
378
+ q_proj_weight: Optional[Tensor] = None,
379
+ k_proj_weight: Optional[Tensor] = None,
380
+ v_proj_weight: Optional[Tensor] = None,
381
+ static_k: Optional[Tensor] = None,
382
+ static_v: Optional[Tensor] = None,
383
+ average_attn_weights: bool = True,
384
+ is_causal: bool = False,
385
+ ) -> Tuple[Tensor, Optional[Tensor]]:
386
+ tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v, out_proj_weight, out_proj_bias)
387
+ if has_torch_function(tens_ops):
388
+ return handle_torch_function(
389
+ multi_head_attention_forward,
390
+ tens_ops,
391
+ query,
392
+ key,
393
+ value,
394
+ embed_dim_to_check,
395
+ num_heads,
396
+ in_proj_weight,
397
+ in_proj_bias,
398
+ bias_k,
399
+ bias_v,
400
+ add_zero_attn,
401
+ dropout_p,
402
+ out_proj_weight,
403
+ out_proj_bias,
404
+ training=training,
405
+ key_padding_mask=key_padding_mask,
406
+ need_weights=need_weights,
407
+ attn_mask=attn_mask,
408
+ is_causal=is_causal,
409
+ use_separate_proj_weight=use_separate_proj_weight,
410
+ q_proj_weight=q_proj_weight,
411
+ k_proj_weight=k_proj_weight,
412
+ v_proj_weight=v_proj_weight,
413
+ static_k=static_k,
414
+ static_v=static_v,
415
+ average_attn_weights=average_attn_weights,
416
+ )
417
+
418
+ is_batched = _mha_shape_check(query, key, value, key_padding_mask, attn_mask, num_heads)
419
+
420
+ # For unbatched input, we unsqueeze at the expected batch-dim to pretend that the input
421
+ # is batched, run the computation and before returning squeeze the
422
+ # batch dimension so that the output doesn't carry this temporary batch dimension.
423
+ if not is_batched:
424
+ # unsqueeze if the input is unbatched
425
+ query = query.unsqueeze(1)
426
+ key = key.unsqueeze(1)
427
+ value = value.unsqueeze(1)
428
+ if key_padding_mask is not None:
429
+ key_padding_mask = key_padding_mask.unsqueeze(0)
430
+
431
+ # set up shape vars
432
+ tgt_len, bsz, embed_dim = query.shape
433
+ src_len, _, _ = key.shape
434
+
435
+ key_padding_mask = _canonical_mask(
436
+ mask=key_padding_mask,
437
+ mask_name="key_padding_mask",
438
+ other_type=_none_or_dtype(attn_mask),
439
+ other_name="attn_mask",
440
+ target_type=query.dtype
441
+ )
442
+
443
+ if is_causal and attn_mask is None:
444
+ raise RuntimeError(
445
+ "Need attn_mask if specifying the is_causal hint. "
446
+ "You may use the Transformer module method "
447
+ "`generate_square_subsequent_mask` to create this mask."
448
+ )
449
+
450
+ if is_causal and key_padding_mask is None and not need_weights:
451
+ # when we have a kpm or need weights, we need attn_mask
452
+ # Otherwise, we use the is_causal hint go as is_causal
453
+ # indicator to SDPA.
454
+ attn_mask = None
455
+ else:
456
+ attn_mask = _canonical_mask(
457
+ mask=attn_mask,
458
+ mask_name="attn_mask",
459
+ other_type=None,
460
+ other_name="",
461
+ target_type=query.dtype,
462
+ check_other=False,
463
+ )
464
+
465
+ if key_padding_mask is not None:
466
+ # We have the attn_mask, and use that to merge kpm into it.
467
+ # Turn off use of is_causal hint, as the merged mask is no
468
+ # longer causal.
469
+ is_causal = False
470
+
471
+ assert embed_dim == embed_dim_to_check, \
472
+ f"was expecting embedding dimension of {embed_dim_to_check}, but got {embed_dim}"
473
+ if isinstance(embed_dim, torch.Tensor):
474
+ # embed_dim can be a tensor when JIT tracing
475
+ head_dim = embed_dim.div(num_heads, rounding_mode='trunc')
476
+ else:
477
+ head_dim = embed_dim // num_heads
478
+ assert head_dim * num_heads == embed_dim, f"embed_dim {embed_dim} not divisible by num_heads {num_heads}"
479
+ if use_separate_proj_weight:
480
+ # allow MHA to have different embedding dimensions when separate projection weights are used
481
+ assert key.shape[:2] == value.shape[:2], \
482
+ f"key's sequence and batch dims {key.shape[:2]} do not match value's {value.shape[:2]}"
483
+ else:
484
+ assert key.shape == value.shape, f"key shape {key.shape} does not match value shape {value.shape}"
485
+
486
+ #
487
+ # compute in-projection
488
+ #
489
+ if not use_separate_proj_weight:
490
+ assert in_proj_weight is not None, "use_separate_proj_weight is False but in_proj_weight is None"
491
+ q, k, v = _in_projection_packed(query, key, value, in_proj_weight, in_proj_bias)
492
+ else:
493
+ assert q_proj_weight is not None, "use_separate_proj_weight is True but q_proj_weight is None"
494
+ assert k_proj_weight is not None, "use_separate_proj_weight is True but k_proj_weight is None"
495
+ assert v_proj_weight is not None, "use_separate_proj_weight is True but v_proj_weight is None"
496
+ if in_proj_bias is None:
497
+ b_q = b_k = b_v = None
498
+ else:
499
+ b_q, b_k, b_v = in_proj_bias.chunk(3)
500
+ q, k, v = _in_projection(query, key, value, q_proj_weight, k_proj_weight, v_proj_weight, b_q, b_k, b_v)
501
+
502
+ # prep attention mask
503
+
504
+ if attn_mask is not None:
505
+ # ensure attn_mask's dim is 3
506
+ if attn_mask.dim() == 2:
507
+ correct_2d_size = (tgt_len, src_len)
508
+ if attn_mask.shape != correct_2d_size:
509
+ raise RuntimeError(f"The shape of the 2D attn_mask is {attn_mask.shape}, but should be {correct_2d_size}.")
510
+ attn_mask = attn_mask.unsqueeze(0)
511
+ elif attn_mask.dim() == 3:
512
+ correct_3d_size = (bsz * num_heads, tgt_len, src_len)
513
+ if attn_mask.shape != correct_3d_size:
514
+ raise RuntimeError(f"The shape of the 3D attn_mask is {attn_mask.shape}, but should be {correct_3d_size}.")
515
+ else:
516
+ raise RuntimeError(f"attn_mask's dimension {attn_mask.dim()} is not supported")
517
+
518
+ # add bias along batch dimension (currently second)
519
+ if bias_k is not None and bias_v is not None:
520
+ assert static_k is None, "bias cannot be added to static key."
521
+ assert static_v is None, "bias cannot be added to static value."
522
+ k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
523
+ v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
524
+ if attn_mask is not None:
525
+ attn_mask = pad(attn_mask, (0, 1))
526
+ if key_padding_mask is not None:
527
+ key_padding_mask = pad(key_padding_mask, (0, 1))
528
+ else:
529
+ assert bias_k is None
530
+ assert bias_v is None
531
+
532
+ #
533
+ # reshape q, k, v for multihead attention and make em batch first
534
+ #
535
+ q = q.view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
536
+ if static_k is None:
537
+ k = k.view(k.shape[0], bsz * num_heads, head_dim).transpose(0, 1)
538
+ else:
539
+ # TODO finish disentangling control flow so we don't do in-projections when statics are passed
540
+ assert static_k.size(0) == bsz * num_heads, \
541
+ f"expecting static_k.size(0) of {bsz * num_heads}, but got {static_k.size(0)}"
542
+ assert static_k.size(2) == head_dim, \
543
+ f"expecting static_k.size(2) of {head_dim}, but got {static_k.size(2)}"
544
+ k = static_k
545
+ if static_v is None:
546
+ v = v.view(v.shape[0], bsz * num_heads, head_dim).transpose(0, 1)
547
+ else:
548
+ # TODO finish disentangling control flow so we don't do in-projections when statics are passed
549
+ assert static_v.size(0) == bsz * num_heads, \
550
+ f"expecting static_v.size(0) of {bsz * num_heads}, but got {static_v.size(0)}"
551
+ assert static_v.size(2) == head_dim, \
552
+ f"expecting static_v.size(2) of {head_dim}, but got {static_v.size(2)}"
553
+ v = static_v
554
+
555
+ # add zero attention along batch dimension (now first)
556
+ if add_zero_attn:
557
+ zero_attn_shape = (bsz * num_heads, 1, head_dim)
558
+ k = torch.cat([k, torch.zeros(zero_attn_shape, dtype=k.dtype, device=k.device)], dim=1)
559
+ v = torch.cat([v, torch.zeros(zero_attn_shape, dtype=v.dtype, device=v.device)], dim=1)
560
+ if attn_mask is not None:
561
+ attn_mask = pad(attn_mask, (0, 1))
562
+ if key_padding_mask is not None:
563
+ key_padding_mask = pad(key_padding_mask, (0, 1))
564
+
565
+ # update source sequence length after adjustments
566
+ src_len = k.size(1)
567
+
568
+ # merge key padding and attention masks
569
+ if key_padding_mask is not None:
570
+ assert key_padding_mask.shape == (bsz, src_len), \
571
+ f"expecting key_padding_mask shape of {(bsz, src_len)}, but got {key_padding_mask.shape}"
572
+ key_padding_mask = key_padding_mask.view(bsz, 1, 1, src_len). \
573
+ expand(-1, num_heads, -1, -1).reshape(bsz * num_heads, 1, src_len)
574
+ if attn_mask is None:
575
+ attn_mask = key_padding_mask
576
+ else:
577
+ attn_mask = attn_mask + key_padding_mask
578
+
579
+ # adjust dropout probability
580
+ if not training:
581
+ dropout_p = 0.0
582
+
583
+ #
584
+ # (deep breath) calculate attention and out projection
585
+ #
586
+
587
+ if need_weights:
588
+ B, Nt, E = q.shape
589
+ q_scaled = q / math.sqrt(E)
590
+
591
+ assert not (is_causal and attn_mask is None), "FIXME: is_causal not implemented for need_weights"
592
+
593
+ if attn_mask is not None:
594
+ attn_output_weights = torch.baddbmm(attn_mask, q_scaled, k.transpose(-2, -1))
595
+ else:
596
+ attn_output_weights = torch.bmm(q_scaled, k.transpose(-2, -1))
597
+ attn_output_weights = softmax(attn_output_weights, dim=-1)
598
+ if dropout_p > 0.0:
599
+ attn_output_weights = dropout(attn_output_weights, p=dropout_p)
600
+
601
+ attn_output = torch.bmm(attn_output_weights, v)
602
+
603
+ attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len * bsz, embed_dim)
604
+ attn_output = self.out_proj(attn_output)
605
+ attn_output = attn_output.view(tgt_len, bsz, attn_output.size(1))
606
+
607
+ # optionally average attention weights over heads
608
+ attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
609
+ if average_attn_weights:
610
+ attn_output_weights = attn_output_weights.mean(dim=1)
611
+
612
+ if not is_batched:
613
+ # squeeze the output if input was unbatched
614
+ attn_output = attn_output.squeeze(1)
615
+ attn_output_weights = attn_output_weights.squeeze(0)
616
+ return attn_output, attn_output_weights
617
+ else:
618
+ # attn_mask can be either (L,S) or (N*num_heads, L, S)
619
+ # if attn_mask's shape is (1, L, S) we need to unsqueeze to (1, 1, L, S)
620
+ # in order to match the input for SDPA of (N, num_heads, L, S)
621
+ if attn_mask is not None:
622
+ if attn_mask.size(0) == 1 and attn_mask.dim() == 3:
623
+ attn_mask = attn_mask.unsqueeze(0)
624
+ else:
625
+ attn_mask = attn_mask.view(bsz, num_heads, -1, src_len)
626
+
627
+ q = q.view(bsz, num_heads, tgt_len, head_dim)
628
+ k = k.view(bsz, num_heads, src_len, head_dim)
629
+ v = v.view(bsz, num_heads, src_len, head_dim)
630
+
631
+ attn_output = F.scaled_dot_product_attention(q, k, v, attn_mask, dropout_p, is_causal)
632
+ attn_output = attn_output.permute(2, 0, 1, 3).contiguous().view(bsz * tgt_len, embed_dim)
633
+
634
+ attn_output = self.out_proj(attn_output)
635
+ attn_output = attn_output.view(tgt_len, bsz, attn_output.size(1))
636
+ if not is_batched:
637
+ # squeeze the output if input was unbatched
638
+ attn_output = attn_output.squeeze(1)
639
+ return attn_output, None
640
+
641
+
642
+ def _mha_shape_check(query: Tensor, key: Tensor, value: Tensor,
643
+ key_padding_mask: Optional[Tensor], attn_mask: Optional[Tensor], num_heads: int):
644
+ # Verifies the expected shape for `query, `key`, `value`, `key_padding_mask` and `attn_mask`
645
+ # and returns if the input is batched or not.
646
+ # Raises an error if `query` is not 2-D (unbatched) or 3-D (batched) tensor.
647
+
648
+ # Shape check.
649
+ if query.dim() == 3:
650
+ # Batched Inputs
651
+ is_batched = True
652
+ assert key.dim() == 3 and value.dim() == 3, \
653
+ ("For batched (3-D) `query`, expected `key` and `value` to be 3-D"
654
+ f" but found {key.dim()}-D and {value.dim()}-D tensors respectively")
655
+ if key_padding_mask is not None:
656
+ assert key_padding_mask.dim() == 2, \
657
+ ("For batched (3-D) `query`, expected `key_padding_mask` to be `None` or 2-D"
658
+ f" but found {key_padding_mask.dim()}-D tensor instead")
659
+ if attn_mask is not None:
660
+ assert attn_mask.dim() in (2, 3), \
661
+ ("For batched (3-D) `query`, expected `attn_mask` to be `None`, 2-D or 3-D"
662
+ f" but found {attn_mask.dim()}-D tensor instead")
663
+ elif query.dim() == 2:
664
+ # Unbatched Inputs
665
+ is_batched = False
666
+ assert key.dim() == 2 and value.dim() == 2, \
667
+ ("For unbatched (2-D) `query`, expected `key` and `value` to be 2-D"
668
+ f" but found {key.dim()}-D and {value.dim()}-D tensors respectively")
669
+
670
+ if key_padding_mask is not None:
671
+ assert key_padding_mask.dim() == 1, \
672
+ ("For unbatched (2-D) `query`, expected `key_padding_mask` to be `None` or 1-D"
673
+ f" but found {key_padding_mask.dim()}-D tensor instead")
674
+
675
+ if attn_mask is not None:
676
+ assert attn_mask.dim() in (2, 3), \
677
+ ("For unbatched (2-D) `query`, expected `attn_mask` to be `None`, 2-D or 3-D"
678
+ f" but found {attn_mask.dim()}-D tensor instead")
679
+ if attn_mask.dim() == 3:
680
+ expected_shape = (num_heads, query.shape[0], key.shape[0])
681
+ assert attn_mask.shape == expected_shape, \
682
+ (f"Expected `attn_mask` shape to be {expected_shape} but got {attn_mask.shape}")
683
+ else:
684
+ raise AssertionError(
685
+ f"query should be unbatched 2D or batched 3D tensor but received {query.dim()}-D query tensor")
686
+
687
+ return is_batched
688
+
689
+
690
+ def _canonical_mask(
691
+ mask: Optional[Tensor],
692
+ mask_name: str,
693
+ other_type: Optional[DType],
694
+ other_name: str,
695
+ target_type: DType,
696
+ check_other: bool = True,
697
+ ) -> Optional[Tensor]:
698
+
699
+ if mask is not None:
700
+ _mask_dtype = mask.dtype
701
+ _mask_is_float = torch.is_floating_point(mask)
702
+ if _mask_dtype != torch.bool and not _mask_is_float:
703
+ raise AssertionError(
704
+ f"only bool and floating types of {mask_name} are supported")
705
+ if check_other and other_type is not None:
706
+ if _mask_dtype != other_type:
707
+ warnings.warn(
708
+ f"Support for mismatched {mask_name} and {other_name} "
709
+ "is deprecated. Use same type for both instead."
710
+ )
711
+ if not _mask_is_float:
712
+ mask = (
713
+ torch.zeros_like(mask, dtype=target_type)
714
+ .masked_fill_(mask, float("-inf"))
715
+ )
716
+ return mask
717
+
718
+
719
+ def _none_or_dtype(input: Optional[Tensor]) -> Optional[DType]:
720
+ if input is None:
721
+ return None
722
+ elif isinstance(input, torch.Tensor):
723
+ return input.dtype
724
+ raise RuntimeError("input to _none_or_dtype() must be None or torch.Tensor")
725
+
726
+ def _in_projection_packed(
727
+ q: Tensor,
728
+ k: Tensor,
729
+ v: Tensor,
730
+ w: Tensor,
731
+ b: Optional[Tensor] = None,
732
+ ) -> List[Tensor]:
733
+ r"""
734
+ Performs the in-projection step of the attention operation, using packed weights.
735
+ Output is a triple containing projection tensors for query, key and value.
736
+
737
+ Args:
738
+ q, k, v: query, key and value tensors to be projected. For self-attention,
739
+ these are typically the same tensor; for encoder-decoder attention,
740
+ k and v are typically the same tensor. (We take advantage of these
741
+ identities for performance if they are present.) Regardless, q, k and v
742
+ must share a common embedding dimension; otherwise their shapes may vary.
743
+ w: projection weights for q, k and v, packed into a single tensor. Weights
744
+ are packed along dimension 0, in q, k, v order.
745
+ b: optional projection biases for q, k and v, packed into a single tensor
746
+ in q, k, v order.
747
+
748
+ Shape:
749
+ Inputs:
750
+ - q: :math:`(..., E)` where E is the embedding dimension
751
+ - k: :math:`(..., E)` where E is the embedding dimension
752
+ - v: :math:`(..., E)` where E is the embedding dimension
753
+ - w: :math:`(E * 3, E)` where E is the embedding dimension
754
+ - b: :math:`E * 3` where E is the embedding dimension
755
+
756
+ Output:
757
+ - in output list :math:`[q', k', v']`, each output tensor will have the
758
+ same shape as the corresponding input tensor.
759
+ """
760
+ E = q.size(-1)
761
+ if k is v:
762
+ if q is k:
763
+ # self-attention
764
+ proj = linear(q, w, b)
765
+ # reshape to 3, E and not E, 3 is deliberate for better memory coalescing and keeping same order as chunk()
766
+ proj = proj.unflatten(-1, (3, E)).unsqueeze(0).transpose(0, -2).squeeze(-2).contiguous()
767
+ return proj[0], proj[1], proj[2]
768
+ else:
769
+ # encoder-decoder attention
770
+ w_q, w_kv = w.split([E, E * 2])
771
+ if b is None:
772
+ b_q = b_kv = None
773
+ else:
774
+ b_q, b_kv = b.split([E, E * 2])
775
+ q_proj = linear(q, w_q, b_q)
776
+ kv_proj = linear(k, w_kv, b_kv)
777
+ # reshape to 2, E and not E, 2 is deliberate for better memory coalescing and keeping same order as chunk()
778
+ kv_proj = kv_proj.unflatten(-1, (2, E)).unsqueeze(0).transpose(0, -2).squeeze(-2).contiguous()
779
+ return (q_proj, kv_proj[0], kv_proj[1])
780
+ else:
781
+ w_q, w_k, w_v = w.chunk(3)
782
+ if b is None:
783
+ b_q = b_k = b_v = None
784
+ else:
785
+ b_q, b_k, b_v = b.chunk(3)
786
+ return linear(q, w_q, b_q), linear(k, w_k, b_k), linear(v, w_v, b_v)
787
+
788
+
789
+ def _in_projection(
790
+ q: Tensor,
791
+ k: Tensor,
792
+ v: Tensor,
793
+ w_q: Tensor,
794
+ w_k: Tensor,
795
+ w_v: Tensor,
796
+ b_q: Optional[Tensor] = None,
797
+ b_k: Optional[Tensor] = None,
798
+ b_v: Optional[Tensor] = None,
799
+ ) -> Tuple[Tensor, Tensor, Tensor]:
800
+ r"""
801
+ Performs the in-projection step of the attention operation. This is simply
802
+ a triple of linear projections, with shape constraints on the weights which
803
+ ensure embedding dimension uniformity in the projected outputs.
804
+ Output is a triple containing projection tensors for query, key and value.
805
+
806
+ Args:
807
+ q, k, v: query, key and value tensors to be projected.
808
+ w_q, w_k, w_v: weights for q, k and v, respectively.
809
+ b_q, b_k, b_v: optional biases for q, k and v, respectively.
810
+
811
+ Shape:
812
+ Inputs:
813
+ - q: :math:`(Qdims..., Eq)` where Eq is the query embedding dimension and Qdims are any
814
+ number of leading dimensions.
815
+ - k: :math:`(Kdims..., Ek)` where Ek is the key embedding dimension and Kdims are any
816
+ number of leading dimensions.
817
+ - v: :math:`(Vdims..., Ev)` where Ev is the value embedding dimension and Vdims are any
818
+ number of leading dimensions.
819
+ - w_q: :math:`(Eq, Eq)`
820
+ - w_k: :math:`(Eq, Ek)`
821
+ - w_v: :math:`(Eq, Ev)`
822
+ - b_q: :math:`(Eq)`
823
+ - b_k: :math:`(Eq)`
824
+ - b_v: :math:`(Eq)`
825
+
826
+ Output: in output triple :math:`(q', k', v')`,
827
+ - q': :math:`[Qdims..., Eq]`
828
+ - k': :math:`[Kdims..., Eq]`
829
+ - v': :math:`[Vdims..., Eq]`
830
+
831
+ """
832
+ Eq, Ek, Ev = q.size(-1), k.size(-1), v.size(-1)
833
+ assert w_q.shape == (Eq, Eq), f"expecting query weights shape of {(Eq, Eq)}, but got {w_q.shape}"
834
+ assert w_k.shape == (Eq, Ek), f"expecting key weights shape of {(Eq, Ek)}, but got {w_k.shape}"
835
+ assert w_v.shape == (Eq, Ev), f"expecting value weights shape of {(Eq, Ev)}, but got {w_v.shape}"
836
+ assert b_q is None or b_q.shape == (Eq,), f"expecting query bias shape of {(Eq,)}, but got {b_q.shape}"
837
+ assert b_k is None or b_k.shape == (Eq,), f"expecting key bias shape of {(Eq,)}, but got {b_k.shape}"
838
+ assert b_v is None or b_v.shape == (Eq,), f"expecting value bias shape of {(Eq,)}, but got {b_v.shape}"
839
+ return linear(q, w_q, b_q), linear(k, w_k, b_k), linear(v, w_v, b_v)
840
+