swimmiing commited on
Commit
b20af9f
1 Parent(s): 581ac51

Upload model files

Browse files
app.py CHANGED
@@ -1,7 +1,45 @@
1
  import gradio as gr
 
 
 
 
 
 
2
 
3
  def greet(name):
4
  return "Hello " + name + "!!"
5
 
6
- iface = gr.Interface(fn=greet, inputs="text", outputs="text")
7
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ import torch
3
+ import numpy as np
4
+ from modules.models import *
5
+ from util import get_prompt_template
6
+ from PIL import Image
7
+
8
 
9
  def greet(name):
10
  return "Hello " + name + "!!"
11
 
12
+
13
+ def main():
14
+ device = "cuda:0" if torch.cuda.is_available() else "cpu"
15
+
16
+ # Get model
17
+ model_conf_file = f'./config/model/ACL_ViT16.yaml'
18
+ model = ACL(model_conf_file, device)
19
+ model.train(False)
20
+ model.load('./pretrain/Param_best.pth')
21
+
22
+ # Get placeholder text
23
+ prompt_template, text_pos_at_prompt, prompt_length = get_prompt_template()
24
+
25
+ # Input pre processing
26
+
27
+ # Inference
28
+ placeholder_tokens = model.get_placeholder_token(prompt_template.replace('{}', ''))
29
+ # audio_driven_embedding = model.encode_audio(audios.to(model.device), placeholder_tokens, text_pos_at_prompt,
30
+ # prompt_length)
31
+
32
+ # Localization result
33
+ # out_dict = model(images.to(model.device), audio_driven_embedding, 352)
34
+ # seg = out_dict['heatmap'][j:j + 1]
35
+ # seg_image = ((1 - seg.squeeze().detach().cpu().numpy()) * 255).astype(np.uint8)
36
+ # seg_image = Image.fromarray(seg_image)
37
+ heatmap_image = cv2.applyColorMap(np.array(seg_image), cv2.COLORMAP_JET)
38
+ # overlaid_image = cv2.addWeighted(np.array(original_image), 0.5, heatmap_image, 0.5, 0)
39
+
40
+
41
+ if __name__ == "__main__":
42
+ iface = gr.Interface(fn=greet, inputs="text", outputs="text")
43
+ iface.launch()
44
+
45
+ main()
config/model/ACL_ViT16.yaml ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ clip: ViT16
3
+ vision_backbone: null
4
+ audio_backbone: BEATs
5
+ audio_proj: FGA512
6
+
7
+ pretrain:
8
+ vision_backbone: null
9
+ audio_backbone: ./pretrain/BEATs_iter3_plus_AS2M_finetuned_on_AS2M_cpt2.pt
10
+ audio_proj: null
11
+
12
+ fga_conf:
13
+ FGA:
14
+ input_size: 768
15
+ output_size: 768
16
+
17
+ FGA512:
18
+ input_size: 768
19
+ output_size: 512
20
+
21
+ clip_conf:
22
+ RN50:
23
+ name: RN50
24
+ vision:
25
+ image_resolution: 224
26
+ vision_layers: [3, 4, 6, 3]
27
+ vision_width: 64
28
+ heads: 8
29
+ vision_patch_size: null
30
+ text:
31
+ transformer_layers: 12
32
+ transformer_width: 512
33
+ transformer_heads: 8
34
+ vocab_size: 49408
35
+ context_length: 77
36
+ embedding_dim: 1024
37
+
38
+ ViT16:
39
+ name: ViT-B/16
40
+ vision:
41
+ image_resolution: 224
42
+ vision_layers: 12
43
+ vision_width: 768
44
+ heads: 12
45
+ vision_patch_size: 16
46
+ text:
47
+ transformer_layers: 12
48
+ transformer_width: 512
49
+ transformer_heads: 8
50
+ vocab_size: 49408
51
+ context_length: 77
52
+ embedding_dim: 512
53
+
54
+ ViT14:
55
+ name: ViT-L/14
56
+ vision:
57
+ image_resolution: 224
58
+ vision_layers: 24
59
+ vision_width: 1024
60
+ heads: 16
61
+ vision_patch_size: 14
62
+ text:
63
+ transformer_layers: 12
64
+ transformer_width: 768
65
+ transformer_heads: 12
66
+ vocab_size: 49408
67
+ context_length: 77
68
+ embedding_dim: 768
69
+
70
+ vision_backbone_conf:
71
+ maskclip_plus_rn50_512:
72
+ name: maskclip_plus_rn50_512
73
+ image_resolution: 512
74
+ vision_layers: [ 3, 4, 6, 3 ]
75
+ vision_width: 2048
76
+ aspp:
77
+ dilations: [ 6, 12, 18, 24 ]
78
+ in_channels: 2048
79
+ channels: 512
80
+
81
+ maskclip_plus_rn101_512:
82
+ name: maskclip_plus_rn101_512
83
+ image_resolution: 512
84
+ vision_layers: [ 3, 4, 23, 3 ]
85
+ vision_width: 2048
86
+ aspp:
87
+ dilations: [ 6, 12, 18, 24 ]
88
+ in_channels: 2048
89
+ channels: 1024
config/train/Exp_ACL_v1.yaml ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model: ACL
2
+
3
+ common:
4
+ train_data: vggss
5
+ epoch: 20
6
+ batch_size: 8
7
+ input_resolution: 352
8
+ num_workers: 4
9
+ seed: 0
10
+ loss:
11
+ - acl_i
12
+ - acl_f
13
+ - area_reg
14
+ loss_w:
15
+ - 1
16
+ - 1
17
+ - 1
18
+
19
+ optimizer: Adam
20
+ scheduler: null
21
+ amp: True
22
+
23
+ optim_conf:
24
+ Adam:
25
+ module_path: torch.optim
26
+ module_name: Adam
27
+ lr: 0.0001
28
+ weight_decay: 0.0001
29
+
30
+ AdamW:
31
+ module_path: torch.optim
32
+ module_name: AdamW
33
+ lr: 0.001
34
+
35
+ SGDR:
36
+ module_path: torch.optim
37
+ module_name: SGD
38
+ lr: 0.5
39
+ weight_decay: 0.00001
40
+
41
+ sched_conf:
42
+ Cosine:
43
+ module_path: torch.optim.lr_scheduler
44
+ module_name: CosineAnnealingLR
45
+ eta_ratio: 0.0
modules/AudioToken/AudioToken.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from diffusers.loaders import AttnProcsLayers
3
+
4
+ from modules.BEATs.BEATs import BEATs, BEATsConfig
5
+ from modules.AudioToken.embedder import FGAEmbedder
6
+ from diffusers import AutoencoderKL, UNet2DConditionModel
7
+ from diffusers.models.attention_processor import LoRAAttnProcessor
8
+
9
+
10
+ class AudioTokenWrapper(torch.nn.Module):
11
+ """Simple wrapper module for Stable Diffusion that holds all the models together"""
12
+
13
+ def __init__(
14
+ self,
15
+ args,
16
+ accelerator,
17
+ ):
18
+
19
+ super().__init__()
20
+ # Load scheduler and models
21
+ from modules.clip_text_model.modeling_clip import CLIPTextModel
22
+ self.text_encoder = CLIPTextModel.from_pretrained(
23
+ args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision
24
+ )
25
+ self.unet = UNet2DConditionModel.from_pretrained(
26
+ args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision
27
+ )
28
+ self.vae = AutoencoderKL.from_pretrained(
29
+ args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision
30
+ )
31
+
32
+ checkpoint = torch.load(
33
+ 'models/BEATs/BEATs_iter3_plus_AS2M_finetuned_on_AS2M_cpt2.pt')
34
+ cfg = BEATsConfig(checkpoint['cfg'])
35
+ self.aud_encoder = BEATs(cfg)
36
+ self.aud_encoder.load_state_dict(checkpoint['model'])
37
+ self.aud_encoder.predictor = None
38
+ input_size = 768 * 3
39
+
40
+ if args.pretrained_model_name_or_path == "CompVis/stable-diffusion-v1-4":
41
+ self.embedder = FGAEmbedder(input_size=input_size, output_size=768)
42
+
43
+ else:
44
+ self.embedder = FGAEmbedder(input_size=input_size, output_size=1024)
45
+
46
+ self.vae.eval()
47
+ self.unet.eval()
48
+ self.text_encoder.eval()
49
+ self.aud_encoder.eval()
50
+
51
+ if 'lora' in args and args.lora:
52
+ # Set correct lora layers
53
+ lora_attn_procs = {}
54
+ for name in self.unet.attn_processors.keys():
55
+ cross_attention_dim = None if name.endswith(
56
+ "attn1.processor") else self.unet.config.cross_attention_dim
57
+ if name.startswith("mid_block"):
58
+ hidden_size = self.unet.config.block_out_channels[-1]
59
+ elif name.startswith("up_blocks"):
60
+ block_id = int(name[len("up_blocks.")])
61
+ hidden_size = list(reversed(self.unet.config.block_out_channels))[block_id]
62
+ elif name.startswith("down_blocks"):
63
+ block_id = int(name[len("down_blocks.")])
64
+ hidden_size = self.unet.config.block_out_channels[block_id]
65
+
66
+ lora_attn_procs[name] = LoRAAttnProcessor(hidden_size=hidden_size,
67
+ cross_attention_dim=cross_attention_dim)
68
+
69
+ self.unet.set_attn_processor(lora_attn_procs)
70
+ self.lora_layers = AttnProcsLayers(self.unet.attn_processors)
71
+
72
+ if args.data_set == 'train':
73
+
74
+ # Freeze vae, unet, text_enc and aud_encoder
75
+ self.vae.requires_grad_(False)
76
+ self.unet.requires_grad_(False)
77
+ self.text_encoder.requires_grad_(False)
78
+ self.aud_encoder.requires_grad_(False)
79
+ self.embedder.requires_grad_(True)
80
+ self.embedder.train()
81
+
82
+ if 'lora' in args and args.lora:
83
+ self.unet.train()
84
+
85
+ if args.data_set == 'test':
86
+
87
+ from transformers import CLIPTextModel
88
+ self.text_encoder = CLIPTextModel.from_pretrained(
89
+ args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision
90
+ )
91
+
92
+ self.embedder.eval()
93
+ embedder_learned_embeds = args.learned_embeds
94
+ self.embedder.load_state_dict(torch.load(embedder_learned_embeds, map_location=accelerator.device))
95
+
96
+ if 'lora' in args and args.lora:
97
+ self.lora_layers.eval()
98
+ lora_layers_learned_embeds = args.lora_learned_embeds
99
+ self.lora_layers.load_state_dict(torch.load(lora_layers_learned_embeds, map_location=accelerator.device))
100
+ self.unet.load_attn_procs(lora_layers_learned_embeds)
modules/AudioToken/embedder.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.nn as nn
2
+ from modules.FGA.atten import Atten
3
+
4
+ class FGAEmbedder(nn.Module):
5
+ def __init__(self, input_size=768*3, output_size=768):
6
+ super(FGAEmbedder, self).__init__()
7
+ self.fc1 = nn.Linear(input_size, input_size)
8
+ self.fc2 = nn.Linear(input_size, output_size)
9
+ self.gelu = nn.GELU()
10
+ self.fga = Atten(util_e=[output_size], pairwise_flag=False)
11
+
12
+ def forward(self, audio_embs):
13
+ audio_embs = self.fc1(audio_embs)
14
+ audio_embs = self.gelu(audio_embs)
15
+ audio_embs = self.fc2(audio_embs)
16
+ attend = self.fga([audio_embs])[0]
17
+ return attend
modules/BEATs/BEATs.py ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # BEATs: Audio Pre-Training with Acoustic Tokenizers (https://arxiv.org/abs/2212.09058)
3
+ # Github source: https://github.com/microsoft/unilm/tree/master/beats
4
+ # Copyright (c) 2022 Microsoft
5
+ # Licensed under The MIT License [see LICENSE for details]
6
+ # Based on fairseq code bases
7
+ # https://github.com/pytorch/fairseq
8
+ # --------------------------------------------------------
9
+
10
+
11
+ import torch
12
+ import torch.nn as nn
13
+ from torch.nn import LayerNorm
14
+ import torchaudio.compliance.kaldi as ta_kaldi
15
+ from torch.cuda.amp import autocast
16
+
17
+ from modules.BEATs.backbone import (
18
+ TransformerEncoder,
19
+ )
20
+
21
+ import logging
22
+ from typing import Optional
23
+
24
+ logger = logging.getLogger(__name__)
25
+
26
+
27
+ class BEATsConfig:
28
+ def __init__(self, cfg=None):
29
+ self.input_patch_size: int = -1 # path size of patch embedding
30
+ self.embed_dim: int = 512 # patch embedding dimension
31
+ self.conv_bias: bool = False # include bias in conv encoder
32
+
33
+ self.encoder_layers: int = 12 # num encoder layers in the transformer
34
+ self.encoder_embed_dim: int = 768 # encoder embedding dimension
35
+ self.encoder_ffn_embed_dim: int = 3072 # encoder embedding dimension for FFN
36
+ self.encoder_attention_heads: int = 12 # num encoder attention heads
37
+ self.activation_fn: str = "gelu" # activation function to use
38
+
39
+ self.layer_wise_gradient_decay_ratio: float = 1.0 # ratio for layer-wise gradient decay
40
+ self.layer_norm_first: bool = False # apply layernorm first in the transformer
41
+ self.deep_norm: bool = False # apply deep_norm first in the transformer
42
+
43
+ # dropouts
44
+ self.dropout: float = 0.1 # dropout probability for the transformer
45
+ self.attention_dropout: float = 0.1 # dropout probability for attention weights
46
+ self.activation_dropout: float = 0.0 # dropout probability after activation in FFN
47
+ self.encoder_layerdrop: float = 0.0 # probability of dropping a tarnsformer layer
48
+ self.dropout_input: float = 0.0 # dropout to apply to the input (after feat extr)
49
+
50
+ # positional embeddings
51
+ self.conv_pos: int = 128 # number of filters for convolutional positional embeddings
52
+ self.conv_pos_groups: int = 16 # number of groups for convolutional positional embedding
53
+
54
+ # relative position embedding
55
+ self.relative_position_embedding: bool = False # apply relative position embedding
56
+ self.num_buckets: int = 320 # number of buckets for relative position embedding
57
+ self.max_distance: int = 1280 # maximum distance for relative position embedding
58
+ self.gru_rel_pos: bool = False # apply gated relative position embedding
59
+
60
+ # label predictor
61
+ self.finetuned_model: bool = False # whether the model is a fine-tuned model.
62
+ self.predictor_dropout: float = 0.1 # dropout probability for the predictor
63
+ self.predictor_class: int = 527 # target class number for the predictor
64
+
65
+ if cfg is not None:
66
+ self.update(cfg)
67
+
68
+ def update(self, cfg: dict):
69
+ self.__dict__.update(cfg)
70
+
71
+
72
+ class BEATs(nn.Module):
73
+ def __init__(
74
+ self,
75
+ cfg: BEATsConfig,
76
+ ) -> None:
77
+ super().__init__()
78
+ logger.info(f"BEATs Config: {cfg.__dict__}")
79
+
80
+ self.cfg = cfg
81
+
82
+ self.embed = cfg.embed_dim
83
+ self.post_extract_proj = (
84
+ nn.Linear(self.embed, cfg.encoder_embed_dim)
85
+ if self.embed != cfg.encoder_embed_dim
86
+ else None
87
+ )
88
+
89
+ self.input_patch_size = cfg.input_patch_size
90
+ self.patch_embedding = nn.Conv2d(1, self.embed, kernel_size=self.input_patch_size, stride=self.input_patch_size,
91
+ bias=cfg.conv_bias)
92
+
93
+ self.dropout_input = nn.Dropout(cfg.dropout_input)
94
+
95
+ assert not cfg.deep_norm or not cfg.layer_norm_first
96
+ self.encoder = TransformerEncoder(cfg)
97
+ self.layer_norm = LayerNorm(self.embed)
98
+
99
+ if cfg.finetuned_model:
100
+ self.predictor_dropout = nn.Dropout(cfg.predictor_dropout)
101
+ self.predictor = nn.Linear(cfg.encoder_embed_dim, cfg.predictor_class)
102
+ else:
103
+ self.predictor = None
104
+
105
+ def forward_padding_mask(
106
+ self,
107
+ features: torch.Tensor,
108
+ padding_mask: torch.Tensor,
109
+ ) -> torch.Tensor:
110
+ extra = padding_mask.size(1) % features.size(1)
111
+ if extra > 0:
112
+ padding_mask = padding_mask[:, :-extra]
113
+ padding_mask = padding_mask.view(
114
+ padding_mask.size(0), features.size(1), -1
115
+ )
116
+ padding_mask = padding_mask.all(-1)
117
+ return padding_mask
118
+
119
+ @autocast(enabled=False)
120
+ def preprocess(
121
+ self,
122
+ source: torch.Tensor,
123
+ fbank_mean: float = 15.41663,
124
+ fbank_std: float = 6.55582,
125
+ ) -> torch.Tensor:
126
+ fbanks = []
127
+ for waveform in source:
128
+ waveform = waveform.unsqueeze(0) * 2 ** 15
129
+ fbank = ta_kaldi.fbank(waveform, num_mel_bins=128, sample_frequency=16000, frame_length=25, frame_shift=10)
130
+ fbanks.append(fbank)
131
+ fbank = torch.stack(fbanks, dim=0)
132
+ fbank = (fbank - fbank_mean) / (2 * fbank_std)
133
+ return fbank
134
+
135
+ def extract_features(
136
+ self,
137
+ source: torch.Tensor,
138
+ padding_mask: Optional[torch.Tensor] = None,
139
+ fbank_mean: float = 15.41663,
140
+ fbank_std: float = 6.55582,
141
+ ):
142
+ fbank = self.preprocess(source, fbank_mean=fbank_mean, fbank_std=fbank_std)
143
+ if padding_mask is not None:
144
+ padding_mask = self.forward_padding_mask(fbank, padding_mask)
145
+ # ToDo Aug here
146
+ fbank = fbank.unsqueeze(1)
147
+ features = self.patch_embedding(fbank)
148
+ features = features.reshape(features.shape[0], features.shape[1], -1)
149
+ features = features.transpose(1, 2)
150
+ features = self.layer_norm(features)
151
+
152
+ if padding_mask is not None:
153
+ padding_mask = self.forward_padding_mask(features, padding_mask)
154
+
155
+ if self.post_extract_proj is not None:
156
+ features = self.post_extract_proj(features)
157
+
158
+ x = self.dropout_input(features)
159
+
160
+ x, layers_sum, layers = self.encoder(
161
+ x,
162
+ padding_mask=padding_mask,
163
+ )
164
+
165
+ if self.predictor is not None:
166
+ x = self.predictor_dropout(x)
167
+ logits = self.predictor(x)
168
+
169
+ if padding_mask is not None and padding_mask.any():
170
+ logits[padding_mask] = 0
171
+ logits = logits.sum(dim=1)
172
+ logits = logits / (~padding_mask).sum(dim=1).unsqueeze(-1).expand_as(logits)
173
+ else:
174
+ logits = logits.mean(dim=1)
175
+
176
+ lprobs = torch.sigmoid(logits)
177
+
178
+ return lprobs, padding_mask
179
+ else:
180
+ return x, layers_sum, layers
modules/BEATs/Tokenizers.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # BEATs: Audio Pre-Training with Acoustic Tokenizers (https://arxiv.org/abs/2212.09058)
3
+ # Github source: https://github.com/microsoft/unilm/tree/master/beats
4
+ # Copyright (c) 2022 Microsoft
5
+ # Licensed under The MIT License [see LICENSE for details]
6
+ # Based on fairseq code bases
7
+ # https://github.com/pytorch/fairseq
8
+ # --------------------------------------------------------
9
+
10
+
11
+ import torch
12
+ import torch.nn as nn
13
+ from torch.nn import LayerNorm
14
+ import torchaudio.compliance.kaldi as ta_kaldi
15
+
16
+ from modules.BEATs.backbone import (
17
+ TransformerEncoder,
18
+ )
19
+ from modules.BEATs.quantizer import (
20
+ NormEMAVectorQuantizer,
21
+ )
22
+
23
+ import logging
24
+ from typing import Optional
25
+
26
+ logger = logging.getLogger(__name__)
27
+
28
+
29
+ class TokenizersConfig:
30
+ def __init__(self, cfg=None):
31
+ self.input_patch_size: int = -1 # path size of patch embedding
32
+ self.embed_dim: int = 512 # patch embedding dimension
33
+ self.conv_bias: bool = False # include bias in conv encoder
34
+
35
+ self.encoder_layers: int = 12 # num encoder layers in the transformer
36
+ self.encoder_embed_dim: int = 768 # encoder embedding dimension
37
+ self.encoder_ffn_embed_dim: int = 3072 # encoder embedding dimension for FFN
38
+ self.encoder_attention_heads: int = 12 # num encoder attention heads
39
+ self.activation_fn: str = "gelu" # activation function to use
40
+
41
+ self.layer_norm_first: bool = False # apply layernorm first in the transformer
42
+ self.deep_norm: bool = False # apply deep_norm first in the transformer
43
+
44
+ # dropouts
45
+ self.dropout: float = 0.1 # dropout probability for the transformer
46
+ self.attention_dropout: float = 0.1 # dropout probability for attention weights
47
+ self.activation_dropout: float = 0.0 # dropout probability after activation in FFN
48
+ self.encoder_layerdrop: float = 0.0 # probability of dropping a tarnsformer layer
49
+ self.dropout_input: float = 0.0 # dropout to apply to the input (after feat extr)
50
+
51
+ # positional embeddings
52
+ self.conv_pos: int = 128 # number of filters for convolutional positional embeddings
53
+ self.conv_pos_groups: int = 16 # number of groups for convolutional positional embedding
54
+
55
+ # relative position embedding
56
+ self.relative_position_embedding: bool = False # apply relative position embedding
57
+ self.num_buckets: int = 320 # number of buckets for relative position embedding
58
+ self.max_distance: int = 1280 # maximum distance for relative position embedding
59
+ self.gru_rel_pos: bool = False # apply gated relative position embedding
60
+
61
+ # quantizer
62
+ self.quant_n: int = 1024 # codebook number in quantizer
63
+ self.quant_dim: int = 256 # codebook dimension in quantizer
64
+
65
+ if cfg is not None:
66
+ self.update(cfg)
67
+
68
+ def update(self, cfg: dict):
69
+ self.__dict__.update(cfg)
70
+
71
+
72
+ class Tokenizers(nn.Module):
73
+ def __init__(
74
+ self,
75
+ cfg: TokenizersConfig,
76
+ ) -> None:
77
+ super().__init__()
78
+ logger.info(f"Tokenizers Config: {cfg.__dict__}")
79
+
80
+ self.cfg = cfg
81
+
82
+ self.embed = cfg.embed_dim
83
+ self.post_extract_proj = (
84
+ nn.Linear(self.embed, cfg.encoder_embed_dim)
85
+ if self.embed != cfg.encoder_embed_dim
86
+ else None
87
+ )
88
+
89
+ self.input_patch_size = cfg.input_patch_size
90
+ self.patch_embedding = nn.Conv2d(1, self.embed, kernel_size=self.input_patch_size, stride=self.input_patch_size,
91
+ bias=cfg.conv_bias)
92
+
93
+ self.dropout_input = nn.Dropout(cfg.dropout_input)
94
+
95
+ assert not cfg.deep_norm or not cfg.layer_norm_first
96
+ self.encoder = TransformerEncoder(cfg)
97
+ self.layer_norm = LayerNorm(self.embed)
98
+
99
+ self.quantize = NormEMAVectorQuantizer(
100
+ n_embed=cfg.quant_n, embedding_dim=cfg.quant_dim, beta=1.0, kmeans_init=True, decay=0.99,
101
+ )
102
+ self.quant_n = cfg.quant_n
103
+ self.quantize_layer = nn.Sequential(
104
+ nn.Linear(cfg.encoder_embed_dim, cfg.encoder_embed_dim),
105
+ nn.Tanh(),
106
+ nn.Linear(cfg.encoder_embed_dim, cfg.quant_dim) # for quantize
107
+ )
108
+
109
+ def forward_padding_mask(
110
+ self,
111
+ features: torch.Tensor,
112
+ padding_mask: torch.Tensor,
113
+ ) -> torch.Tensor:
114
+ extra = padding_mask.size(1) % features.size(1)
115
+ if extra > 0:
116
+ padding_mask = padding_mask[:, :-extra]
117
+ padding_mask = padding_mask.view(
118
+ padding_mask.size(0), features.size(1), -1
119
+ )
120
+ padding_mask = padding_mask.all(-1)
121
+ return padding_mask
122
+
123
+ def preprocess(
124
+ self,
125
+ source: torch.Tensor,
126
+ fbank_mean: float = 15.41663,
127
+ fbank_std: float = 6.55582,
128
+ ) -> torch.Tensor:
129
+ fbanks = []
130
+ for waveform in source:
131
+ waveform = waveform.unsqueeze(0) * 2 ** 15
132
+ fbank = ta_kaldi.fbank(waveform, num_mel_bins=128, sample_frequency=16000, frame_length=25, frame_shift=10)
133
+ fbanks.append(fbank)
134
+ fbank = torch.stack(fbanks, dim=0)
135
+ fbank = (fbank - fbank_mean) / (2 * fbank_std)
136
+ return fbank
137
+
138
+ def extract_labels(
139
+ self,
140
+ source: torch.Tensor,
141
+ padding_mask: Optional[torch.Tensor] = None,
142
+ fbank_mean: float = 15.41663,
143
+ fbank_std: float = 6.55582,
144
+ ):
145
+ fbank = self.preprocess(source, fbank_mean=fbank_mean, fbank_std=fbank_std)
146
+
147
+ if padding_mask is not None:
148
+ padding_mask = self.forward_padding_mask(fbank, padding_mask)
149
+
150
+ fbank = fbank.unsqueeze(1)
151
+ features = self.patch_embedding(fbank)
152
+ features = features.reshape(features.shape[0], features.shape[1], -1)
153
+ features = features.transpose(1, 2)
154
+ features = self.layer_norm(features)
155
+
156
+ if padding_mask is not None:
157
+ padding_mask = self.forward_padding_mask(features, padding_mask)
158
+
159
+ if self.post_extract_proj is not None:
160
+ features = self.post_extract_proj(features)
161
+
162
+ x = self.dropout_input(features)
163
+
164
+ x, layer_results = self.encoder(
165
+ x,
166
+ padding_mask=padding_mask,
167
+ )
168
+
169
+ quantize_input = self.quantize_layer(x)
170
+ quantize_feature, embed_loss, embed_ind = self.quantize(quantize_input)
171
+
172
+ return embed_ind
modules/BEATs/backbone.py ADDED
@@ -0,0 +1,788 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # BEATs: Audio Pre-Training with Acoustic Tokenizers (https://arxiv.org/abs/2212.09058)
3
+ # Github source: https://github.com/microsoft/unilm/tree/master/beats
4
+ # Copyright (c) 2022 Microsoft
5
+ # Licensed under The MIT License [see LICENSE for details]
6
+ # Based on fairseq code bases
7
+ # https://github.com/pytorch/fairseq
8
+ # --------------------------------------------------------
9
+
10
+ import math
11
+ import numpy as np
12
+ from typing import Dict, Optional, Tuple
13
+ import torch
14
+ from torch import Tensor, nn
15
+ import torch.nn.functional as F
16
+ from torch.nn import LayerNorm, Parameter
17
+ from modules.BEATs.modules import (
18
+ GradMultiply,
19
+ SamePad,
20
+ get_activation_fn,
21
+ GLU_Linear,
22
+ quant_noise,
23
+ )
24
+
25
+
26
+ class TransformerEncoder(nn.Module):
27
+ def __init__(self, args):
28
+ super().__init__()
29
+
30
+ self.dropout = args.dropout
31
+ self.embedding_dim = args.encoder_embed_dim
32
+
33
+ self.pos_conv = nn.Conv1d(
34
+ self.embedding_dim,
35
+ self.embedding_dim,
36
+ kernel_size=args.conv_pos,
37
+ padding=args.conv_pos // 2,
38
+ groups=args.conv_pos_groups,
39
+ )
40
+ dropout = 0
41
+ std = math.sqrt((4 * (1.0 - dropout)) / (args.conv_pos * self.embedding_dim))
42
+ nn.init.normal_(self.pos_conv.weight, mean=0, std=std)
43
+ nn.init.constant_(self.pos_conv.bias, 0)
44
+
45
+ self.pos_conv = nn.utils.weight_norm(self.pos_conv, name="weight", dim=2)
46
+ self.pos_conv = nn.Sequential(self.pos_conv, SamePad(args.conv_pos), nn.GELU())
47
+
48
+ if hasattr(args, "relative_position_embedding"):
49
+ self.relative_position_embedding = args.relative_position_embedding
50
+ self.num_buckets = args.num_buckets
51
+ self.max_distance = args.max_distance
52
+ else:
53
+ self.relative_position_embedding = False
54
+ self.num_buckets = 0
55
+ self.max_distance = 0
56
+
57
+ self.layers = nn.ModuleList(
58
+ [
59
+ TransformerSentenceEncoderLayer(
60
+ embedding_dim=self.embedding_dim,
61
+ ffn_embedding_dim=args.encoder_ffn_embed_dim,
62
+ num_attention_heads=args.encoder_attention_heads,
63
+ dropout=self.dropout,
64
+ attention_dropout=args.attention_dropout,
65
+ activation_dropout=args.activation_dropout,
66
+ activation_fn=args.activation_fn,
67
+ layer_norm_first=args.layer_norm_first,
68
+ deep_norm=args.deep_norm,
69
+ has_relative_attention_bias=self.relative_position_embedding,
70
+ num_buckets=self.num_buckets,
71
+ max_distance=self.max_distance,
72
+ gru_rel_pos=args.gru_rel_pos,
73
+ encoder_layers=args.encoder_layers,
74
+ )
75
+ for i in range(args.encoder_layers)
76
+ ]
77
+ )
78
+ if self.relative_position_embedding:
79
+ for i in range(1, args.encoder_layers):
80
+ del self.layers[i].self_attn.relative_attention_bias
81
+ self.layers[i].self_attn.relative_attention_bias = self.layers[0].self_attn.relative_attention_bias
82
+
83
+ self.layer_norm_first = args.layer_norm_first
84
+ self.layer_norm = LayerNorm(self.embedding_dim)
85
+ self.layerdrop = args.encoder_layerdrop
86
+
87
+ self.apply(init_bert_params)
88
+
89
+ if args.deep_norm:
90
+ deep_norm_beta = math.pow(8 * args.encoder_layers, -1 / 4)
91
+ for i in range(args.encoder_layers):
92
+ nn.init.xavier_normal_(self.layers[i].self_attn.k_proj.weight, gain=1)
93
+ nn.init.xavier_normal_(self.layers[i].self_attn.v_proj.weight, gain=deep_norm_beta)
94
+ nn.init.xavier_normal_(self.layers[i].self_attn.q_proj.weight, gain=1)
95
+ nn.init.xavier_normal_(self.layers[i].self_attn.out_proj.weight, gain=deep_norm_beta)
96
+ nn.init.xavier_normal_(self.layers[i].fc1.weight, gain=deep_norm_beta)
97
+ nn.init.xavier_normal_(self.layers[i].fc2.weight, gain=deep_norm_beta)
98
+
99
+ self.layer_wise_gradient_decay_ratio = getattr(args, "layer_wise_gradient_decay_ratio", 1)
100
+
101
+ def forward(self, x, padding_mask=None, layer=None):
102
+ x, layers_sum, layers = self.extract_features(x, padding_mask, layer)
103
+
104
+ if self.layer_norm_first and layer is None:
105
+ x = self.layer_norm(x)
106
+
107
+ return x, layers_sum, layers
108
+
109
+ def extract_features(self, x, padding_mask=None, tgt_layer=None):
110
+
111
+ if padding_mask is not None:
112
+ x[padding_mask] = 0
113
+
114
+ x_conv = self.pos_conv(x.transpose(1, 2))
115
+ x_conv = x_conv.transpose(1, 2)
116
+ x += x_conv
117
+
118
+ if not self.layer_norm_first:
119
+ x = self.layer_norm(x)
120
+
121
+ x = F.dropout(x, p=self.dropout, training=self.training)
122
+
123
+ # B x T x C -> T x B x C
124
+ x = x.transpose(0, 1)
125
+ layers = []
126
+
127
+ layer_results = []
128
+ z = None
129
+ if tgt_layer is not None:
130
+ layer_results.append((x, z))
131
+ r = None
132
+ pos_bias = None
133
+ for i, layer in enumerate(self.layers):
134
+ if self.layer_wise_gradient_decay_ratio != 1.0:
135
+ x = GradMultiply.apply(x, self.layer_wise_gradient_decay_ratio)
136
+ dropout_probability = np.random.random()
137
+ if not self.training or (dropout_probability > self.layerdrop):
138
+ x, z, pos_bias = layer(x, self_attn_padding_mask=padding_mask, need_weights=False, pos_bias=pos_bias)
139
+ if tgt_layer is not None:
140
+ layer_results.append((x, z))
141
+ if i == tgt_layer:
142
+ r = x
143
+ break
144
+ if i in [3, 7, 11]:
145
+ layers.append(x.transpose(0, 1))
146
+
147
+ if r is not None:
148
+ x = r
149
+
150
+ # T x B x C -> B x T x C
151
+ x = x.transpose(0, 1)
152
+ layers_cat = torch.cat(layers, dim=2)
153
+ # layers = layers[0] + layers[1] + layers[2]
154
+
155
+ return x, layers_cat, layers
156
+
157
+
158
+ class TransformerSentenceEncoderLayer(nn.Module):
159
+ def __init__(
160
+ self,
161
+ embedding_dim: float = 768,
162
+ ffn_embedding_dim: float = 3072,
163
+ num_attention_heads: float = 8,
164
+ dropout: float = 0.1,
165
+ attention_dropout: float = 0.1,
166
+ activation_dropout: float = 0.1,
167
+ activation_fn: str = "relu",
168
+ layer_norm_first: bool = False,
169
+ deep_norm: bool = False,
170
+ has_relative_attention_bias: bool = False,
171
+ num_buckets: int = 0,
172
+ max_distance: int = 0,
173
+ rescale_init: bool = False,
174
+ gru_rel_pos: bool = False,
175
+ encoder_layers: int = 0,
176
+ ) -> None:
177
+
178
+ super().__init__()
179
+ self.embedding_dim = embedding_dim
180
+ self.dropout = dropout
181
+ self.activation_dropout = activation_dropout
182
+
183
+ self.activation_name = activation_fn
184
+ self.activation_fn = get_activation_fn(activation_fn)
185
+ self.self_attn = MultiheadAttention(
186
+ self.embedding_dim,
187
+ num_attention_heads,
188
+ dropout=attention_dropout,
189
+ self_attention=True,
190
+ has_relative_attention_bias=has_relative_attention_bias,
191
+ num_buckets=num_buckets,
192
+ max_distance=max_distance,
193
+ rescale_init=rescale_init,
194
+ gru_rel_pos=gru_rel_pos,
195
+ )
196
+
197
+ self.dropout1 = nn.Dropout(dropout)
198
+ self.dropout2 = nn.Dropout(self.activation_dropout)
199
+ self.dropout3 = nn.Dropout(dropout)
200
+
201
+ self.layer_norm_first = layer_norm_first
202
+
203
+ self.self_attn_layer_norm = LayerNorm(self.embedding_dim)
204
+
205
+ if self.activation_name == "glu":
206
+ self.fc1 = GLU_Linear(self.embedding_dim, ffn_embedding_dim, "swish")
207
+ else:
208
+ self.fc1 = nn.Linear(self.embedding_dim, ffn_embedding_dim)
209
+ self.fc2 = nn.Linear(ffn_embedding_dim, self.embedding_dim)
210
+
211
+ self.final_layer_norm = LayerNorm(self.embedding_dim)
212
+
213
+ self.deep_norm = deep_norm
214
+ if self.deep_norm:
215
+ self.deep_norm_alpha = math.pow(2 * encoder_layers, 1 / 4)
216
+ else:
217
+ self.deep_norm_alpha = 1
218
+
219
+ def forward(
220
+ self,
221
+ x: torch.Tensor,
222
+ self_attn_mask: torch.Tensor = None,
223
+ self_attn_padding_mask: torch.Tensor = None,
224
+ need_weights: bool = False,
225
+ pos_bias=None
226
+ ):
227
+ residual = x
228
+
229
+ if self.layer_norm_first:
230
+ x = self.self_attn_layer_norm(x)
231
+ x, attn, pos_bias = self.self_attn(
232
+ query=x,
233
+ key=x,
234
+ value=x,
235
+ key_padding_mask=self_attn_padding_mask,
236
+ need_weights=False,
237
+ attn_mask=self_attn_mask,
238
+ position_bias=pos_bias
239
+ )
240
+ x = self.dropout1(x)
241
+ x = residual + x
242
+
243
+ residual = x
244
+ x = self.final_layer_norm(x)
245
+ if self.activation_name == "glu":
246
+ x = self.fc1(x)
247
+ else:
248
+ x = self.activation_fn(self.fc1(x))
249
+ x = self.dropout2(x)
250
+ x = self.fc2(x)
251
+ x = self.dropout3(x)
252
+ x = residual + x
253
+ else:
254
+ x, attn, pos_bias = self.self_attn(
255
+ query=x,
256
+ key=x,
257
+ value=x,
258
+ key_padding_mask=self_attn_padding_mask,
259
+ need_weights=need_weights,
260
+ attn_mask=self_attn_mask,
261
+ position_bias=pos_bias
262
+ )
263
+
264
+ x = self.dropout1(x)
265
+ x = residual * self.deep_norm_alpha + x
266
+
267
+ x = self.self_attn_layer_norm(x)
268
+
269
+ residual = x
270
+ if self.activation_name == "glu":
271
+ x = self.fc1(x)
272
+ else:
273
+ x = self.activation_fn(self.fc1(x))
274
+ x = self.dropout2(x)
275
+ x = self.fc2(x)
276
+ x = self.dropout3(x)
277
+ x = residual * self.deep_norm_alpha + x
278
+ x = self.final_layer_norm(x)
279
+
280
+ return x, attn, pos_bias
281
+
282
+
283
+ class MultiheadAttention(nn.Module):
284
+ """Multi-headed attention.
285
+
286
+ See "Attention Is All You Need" for more details.
287
+ """
288
+
289
+ def __init__(
290
+ self,
291
+ embed_dim,
292
+ num_heads,
293
+ kdim=None,
294
+ vdim=None,
295
+ dropout=0.0,
296
+ bias=True,
297
+ add_bias_kv=False,
298
+ add_zero_attn=False,
299
+ self_attention=False,
300
+ encoder_decoder_attention=False,
301
+ q_noise=0.0,
302
+ qn_block_size=8,
303
+ has_relative_attention_bias=False,
304
+ num_buckets=32,
305
+ max_distance=128,
306
+ gru_rel_pos=False,
307
+ rescale_init=False,
308
+ ):
309
+ super().__init__()
310
+ self.embed_dim = embed_dim
311
+ self.kdim = kdim if kdim is not None else embed_dim
312
+ self.vdim = vdim if vdim is not None else embed_dim
313
+ self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
314
+
315
+ self.num_heads = num_heads
316
+ self.dropout_module = nn.Dropout(dropout)
317
+
318
+ self.has_relative_attention_bias = has_relative_attention_bias
319
+ self.num_buckets = num_buckets
320
+ self.max_distance = max_distance
321
+ if self.has_relative_attention_bias:
322
+ self.relative_attention_bias = nn.Embedding(num_buckets, num_heads)
323
+
324
+ self.head_dim = embed_dim // num_heads
325
+ self.q_head_dim = self.head_dim
326
+ self.k_head_dim = self.head_dim
327
+ assert (
328
+ self.head_dim * num_heads == self.embed_dim
329
+ ), "embed_dim must be divisible by num_heads"
330
+ self.scaling = self.head_dim ** -0.5
331
+
332
+ self.self_attention = self_attention
333
+ self.encoder_decoder_attention = encoder_decoder_attention
334
+
335
+ assert not self.self_attention or self.qkv_same_dim, (
336
+ "Self-attention requires query, key and " "value to be of the same size"
337
+ )
338
+
339
+ k_bias = True
340
+ if rescale_init:
341
+ k_bias = False
342
+
343
+ k_embed_dim = embed_dim
344
+ q_embed_dim = embed_dim
345
+
346
+ self.k_proj = quant_noise(
347
+ nn.Linear(self.kdim, k_embed_dim, bias=k_bias), q_noise, qn_block_size
348
+ )
349
+ self.v_proj = quant_noise(
350
+ nn.Linear(self.vdim, embed_dim, bias=bias), q_noise, qn_block_size
351
+ )
352
+ self.q_proj = quant_noise(
353
+ nn.Linear(embed_dim, q_embed_dim, bias=bias), q_noise, qn_block_size
354
+ )
355
+
356
+ self.out_proj = quant_noise(
357
+ nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size
358
+ )
359
+
360
+ if add_bias_kv:
361
+ self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
362
+ self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
363
+ else:
364
+ self.bias_k = self.bias_v = None
365
+
366
+ self.add_zero_attn = add_zero_attn
367
+
368
+ self.gru_rel_pos = gru_rel_pos
369
+ if self.gru_rel_pos:
370
+ self.grep_linear = nn.Linear(self.q_head_dim, 8)
371
+ self.grep_a = nn.Parameter(torch.ones(1, num_heads, 1, 1))
372
+
373
+ self.reset_parameters()
374
+
375
+ def reset_parameters(self):
376
+ if self.qkv_same_dim:
377
+ # Empirically observed the convergence to be much better with
378
+ # the scaled initialization
379
+ nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
380
+ nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
381
+ nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
382
+ else:
383
+ nn.init.xavier_uniform_(self.k_proj.weight)
384
+ nn.init.xavier_uniform_(self.v_proj.weight)
385
+ nn.init.xavier_uniform_(self.q_proj.weight)
386
+
387
+ nn.init.xavier_uniform_(self.out_proj.weight)
388
+ if self.out_proj.bias is not None:
389
+ nn.init.constant_(self.out_proj.bias, 0.0)
390
+ if self.bias_k is not None:
391
+ nn.init.xavier_normal_(self.bias_k)
392
+ if self.bias_v is not None:
393
+ nn.init.xavier_normal_(self.bias_v)
394
+ if self.has_relative_attention_bias:
395
+ nn.init.xavier_normal_(self.relative_attention_bias.weight)
396
+
397
+ def _relative_positions_bucket(self, relative_positions, bidirectional=True):
398
+ num_buckets = self.num_buckets
399
+ max_distance = self.max_distance
400
+ relative_buckets = 0
401
+
402
+ if bidirectional:
403
+ num_buckets = num_buckets // 2
404
+ relative_buckets += (relative_positions > 0).to(torch.long) * num_buckets
405
+ relative_positions = torch.abs(relative_positions)
406
+ else:
407
+ relative_positions = -torch.min(relative_positions, torch.zeros_like(relative_positions))
408
+
409
+ max_exact = num_buckets // 2
410
+ is_small = relative_positions < max_exact
411
+
412
+ relative_postion_if_large = max_exact + (
413
+ torch.log(relative_positions.float() / max_exact)
414
+ / math.log(max_distance / max_exact)
415
+ * (num_buckets - max_exact)
416
+ ).to(torch.long)
417
+ relative_postion_if_large = torch.min(
418
+ relative_postion_if_large, torch.full_like(relative_postion_if_large, num_buckets - 1)
419
+ )
420
+
421
+ relative_buckets += torch.where(is_small, relative_positions, relative_postion_if_large)
422
+ return relative_buckets
423
+
424
+ def compute_bias(self, query_length, key_length):
425
+ context_position = torch.arange(query_length, dtype=torch.long)[:, None]
426
+ memory_position = torch.arange(key_length, dtype=torch.long)[None, :]
427
+ relative_position = memory_position - context_position
428
+ relative_position_bucket = self._relative_positions_bucket(
429
+ relative_position,
430
+ bidirectional=True
431
+ )
432
+ relative_position_bucket = relative_position_bucket.to(self.relative_attention_bias.weight.device)
433
+ values = self.relative_attention_bias(relative_position_bucket)
434
+ values = values.permute([2, 0, 1])
435
+ return values
436
+
437
+ def forward(
438
+ self,
439
+ query,
440
+ key: Optional[Tensor],
441
+ value: Optional[Tensor],
442
+ key_padding_mask: Optional[Tensor] = None,
443
+ incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
444
+ need_weights: bool = True,
445
+ static_kv: bool = False,
446
+ attn_mask: Optional[Tensor] = None,
447
+ before_softmax: bool = False,
448
+ need_head_weights: bool = False,
449
+ position_bias: Optional[Tensor] = None
450
+ ) -> Tuple[Tensor, Optional[Tensor], Optional[Tensor]]:
451
+ """Input shape: Time x Batch x Channel
452
+
453
+ Args:
454
+ key_padding_mask (ByteTensor, optional): mask to exclude
455
+ keys that are pads, of shape `(batch, src_len)`, where
456
+ padding elements are indicated by 1s.
457
+ need_weights (bool, optional): return the attention weights,
458
+ averaged over heads (default: False).
459
+ attn_mask (ByteTensor, optional): typically used to
460
+ implement causal attention, where the mask prevents the
461
+ attention from looking forward in time (default: None).
462
+ before_softmax (bool, optional): return the raw attention
463
+ weights and values before the attention softmax.
464
+ need_head_weights (bool, optional): return the attention
465
+ weights for each head. Implies *need_weights*. Default:
466
+ return the average attention weights over all heads.
467
+ """
468
+ if need_head_weights:
469
+ need_weights = True
470
+
471
+ is_tpu = query.device.type == "xla"
472
+
473
+ tgt_len, bsz, embed_dim = query.size()
474
+ src_len = tgt_len
475
+ assert embed_dim == self.embed_dim
476
+ assert list(query.size()) == [tgt_len, bsz, embed_dim]
477
+ if key is not None:
478
+ src_len, key_bsz, _ = key.size()
479
+ if not torch.jit.is_scripting():
480
+ assert key_bsz == bsz
481
+ assert value is not None
482
+ assert src_len, bsz == value.shape[:2]
483
+
484
+ if self.has_relative_attention_bias and position_bias is None:
485
+ position_bias = self.compute_bias(tgt_len, src_len)
486
+ position_bias = position_bias.unsqueeze(0).repeat(bsz, 1, 1, 1).view(bsz * self.num_heads, tgt_len, src_len)
487
+
488
+ if incremental_state is not None:
489
+ saved_state = self._get_input_buffer(incremental_state)
490
+ if saved_state is not None and "prev_key" in saved_state:
491
+ # previous time steps are cached - no need to recompute
492
+ # key and value if they are static
493
+ if static_kv:
494
+ assert self.encoder_decoder_attention and not self.self_attention
495
+ key = value = None
496
+ else:
497
+ saved_state = None
498
+
499
+ if self.self_attention:
500
+ q = self.q_proj(query)
501
+ k = self.k_proj(query)
502
+ v = self.v_proj(query)
503
+ elif self.encoder_decoder_attention:
504
+ # encoder-decoder attention
505
+ q = self.q_proj(query)
506
+ if key is None:
507
+ assert value is None
508
+ k = v = None
509
+ else:
510
+ k = self.k_proj(key)
511
+ v = self.v_proj(key)
512
+
513
+ else:
514
+ assert key is not None and value is not None
515
+ q = self.q_proj(query)
516
+ k = self.k_proj(key)
517
+ v = self.v_proj(value)
518
+ q *= self.scaling
519
+ alpha = 32
520
+ q *= 1 / alpha
521
+
522
+ if self.bias_k is not None:
523
+ assert self.bias_v is not None
524
+ k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
525
+ v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
526
+ if attn_mask is not None:
527
+ attn_mask = torch.cat(
528
+ [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
529
+ )
530
+ if key_padding_mask is not None:
531
+ key_padding_mask = torch.cat(
532
+ [
533
+ key_padding_mask,
534
+ key_padding_mask.new_zeros(key_padding_mask.size(0), 1),
535
+ ],
536
+ dim=1,
537
+ )
538
+
539
+ q = (
540
+ q.contiguous()
541
+ .view(tgt_len, bsz * self.num_heads, self.q_head_dim)
542
+ .transpose(0, 1)
543
+ )
544
+ if k is not None:
545
+ k = (
546
+ k.contiguous()
547
+ .view(-1, bsz * self.num_heads, self.k_head_dim)
548
+ .transpose(0, 1)
549
+ )
550
+ if v is not None:
551
+ v = (
552
+ v.contiguous()
553
+ .view(-1, bsz * self.num_heads, self.head_dim)
554
+ .transpose(0, 1)
555
+ )
556
+
557
+ if saved_state is not None:
558
+ # saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
559
+ if "prev_key" in saved_state:
560
+ _prev_key = saved_state["prev_key"]
561
+ assert _prev_key is not None
562
+ prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim)
563
+ if static_kv:
564
+ k = prev_key
565
+ else:
566
+ assert k is not None
567
+ k = torch.cat([prev_key, k], dim=1)
568
+ src_len = k.size(1)
569
+ if "prev_value" in saved_state:
570
+ _prev_value = saved_state["prev_value"]
571
+ assert _prev_value is not None
572
+ prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim)
573
+ if static_kv:
574
+ v = prev_value
575
+ else:
576
+ assert v is not None
577
+ v = torch.cat([prev_value, v], dim=1)
578
+ prev_key_padding_mask: Optional[Tensor] = None
579
+ if "prev_key_padding_mask" in saved_state:
580
+ prev_key_padding_mask = saved_state["prev_key_padding_mask"]
581
+ assert k is not None and v is not None
582
+ key_padding_mask = MultiheadAttention._append_prev_key_padding_mask(
583
+ key_padding_mask=key_padding_mask,
584
+ prev_key_padding_mask=prev_key_padding_mask,
585
+ batch_size=bsz,
586
+ src_len=k.size(1),
587
+ static_kv=static_kv,
588
+ )
589
+
590
+ saved_state["prev_key"] = k.view(bsz, self.num_heads, -1, self.head_dim)
591
+ saved_state["prev_value"] = v.view(bsz, self.num_heads, -1, self.head_dim)
592
+ saved_state["prev_key_padding_mask"] = key_padding_mask
593
+ # In this branch incremental_state is never None
594
+ assert incremental_state is not None
595
+ incremental_state = self._set_input_buffer(incremental_state, saved_state)
596
+ assert k is not None
597
+ assert k.size(1) == src_len
598
+
599
+ # This is part of a workaround to get around fork/join parallelism
600
+ # not supporting Optional types.
601
+ if key_padding_mask is not None and key_padding_mask.dim() == 0:
602
+ key_padding_mask = None
603
+
604
+ if key_padding_mask is not None:
605
+ assert key_padding_mask.size(0) == bsz
606
+ assert key_padding_mask.size(1) == src_len
607
+
608
+ if self.add_zero_attn:
609
+ assert v is not None
610
+ src_len += 1
611
+ k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)
612
+ v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)
613
+ if attn_mask is not None:
614
+ attn_mask = torch.cat(
615
+ [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
616
+ )
617
+ if key_padding_mask is not None:
618
+ key_padding_mask = torch.cat(
619
+ [
620
+ key_padding_mask,
621
+ torch.zeros(key_padding_mask.size(0), 1).type_as(
622
+ key_padding_mask
623
+ ),
624
+ ],
625
+ dim=1,
626
+ )
627
+
628
+ attn_weights = torch.bmm(q, k.transpose(1, 2))
629
+ attn_weights = (attn_weights - attn_weights.max(dim=-1, keepdim=True)[0]) * alpha
630
+ attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)
631
+
632
+ assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
633
+
634
+ if attn_mask is not None:
635
+ attn_mask = attn_mask.unsqueeze(0)
636
+ attn_weights += attn_mask
637
+
638
+ if key_padding_mask is not None:
639
+ # don't attend to padding symbols
640
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
641
+ if not is_tpu:
642
+ attn_weights = attn_weights.masked_fill(
643
+ key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),
644
+ float("-inf"),
645
+ )
646
+ else:
647
+ attn_weights = attn_weights.transpose(0, 2)
648
+ attn_weights = attn_weights.masked_fill(key_padding_mask, float("-inf"))
649
+ attn_weights = attn_weights.transpose(0, 2)
650
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
651
+
652
+ if before_softmax:
653
+ return attn_weights, v, position_bias
654
+
655
+ if position_bias is not None:
656
+ attn_mask_rel_pos = position_bias
657
+ if self.gru_rel_pos == 1:
658
+ query_layer = q.view(bsz, self.num_heads, tgt_len, self.q_head_dim) * alpha / self.scaling
659
+ _B, _H, _L, __ = query_layer.size()
660
+ gate_a, gate_b = torch.sigmoid(self.grep_linear(query_layer).view(
661
+ _B, _H, _L, 2, 4).sum(-1, keepdim=False)).chunk(2, dim=-1)
662
+ gate_a_1 = gate_a * (gate_b * self.grep_a - 1.0) + 2.0
663
+ attn_mask_rel_pos = gate_a_1.view(bsz * self.num_heads, tgt_len, 1) * position_bias
664
+
665
+ attn_mask_rel_pos = attn_mask_rel_pos.view(attn_weights.size())
666
+
667
+ attn_weights = attn_weights + attn_mask_rel_pos
668
+
669
+ attn_weights_float = F.softmax(
670
+ attn_weights, dim=-1
671
+ )
672
+ attn_weights = attn_weights_float.type_as(attn_weights)
673
+ attn_probs = self.dropout_module(attn_weights)
674
+
675
+ assert v is not None
676
+ attn = torch.bmm(attn_probs, v)
677
+ assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
678
+ attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
679
+ attn = self.out_proj(attn)
680
+ attn_weights: Optional[Tensor] = None
681
+ if need_weights:
682
+ attn_weights = attn_weights_float.view(
683
+ bsz, self.num_heads, tgt_len, src_len
684
+ ).transpose(1, 0)
685
+ if not need_head_weights:
686
+ # average attention weights over heads
687
+ attn_weights = attn_weights.mean(dim=0)
688
+
689
+ return attn, attn_weights, position_bias
690
+
691
+ @staticmethod
692
+ def _append_prev_key_padding_mask(
693
+ key_padding_mask: Optional[Tensor],
694
+ prev_key_padding_mask: Optional[Tensor],
695
+ batch_size: int,
696
+ src_len: int,
697
+ static_kv: bool,
698
+ ) -> Optional[Tensor]:
699
+ # saved key padding masks have shape (bsz, seq_len)
700
+ if prev_key_padding_mask is not None and static_kv:
701
+ new_key_padding_mask = prev_key_padding_mask
702
+ elif prev_key_padding_mask is not None and key_padding_mask is not None:
703
+ new_key_padding_mask = torch.cat(
704
+ [prev_key_padding_mask.float(), key_padding_mask.float()], dim=1
705
+ )
706
+ # During incremental decoding, as the padding token enters and
707