fdschmidt93 commited on
Commit
65a0eff
0 Parent(s):

initial commit

Browse files
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
config.json ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_map": {
3
+ "AutoConfig": "configuration_seamless_m4t_v2_speech_encoder.SeamlessM4Tv2EncoderConfig",
4
+ "AutoModel": "modeling_seamless_m4t_v2_speech_encoder.SeamlessM4Tv2SpeechEncoder",
5
+ "AutoModelForSequenceClassification": "modeling_seamless_m4t_v2_speech_encoder.SeamlessM4Tv2ForAudioClassification",
6
+ "AutoModelForAudioClassification": "modeling_seamless_m4t_v2_speech_encoder.SeamlessM4Tv2ForAudioClassification"
7
+ },
8
+ "_name_or_path": "seamless-m4t-v2-large",
9
+ "activation_dropout": 0.0,
10
+ "activation_function": "relu",
11
+ "adaptor_dropout": 0.1,
12
+ "adaptor_kernel_size": 8,
13
+ "adaptor_stride": 8,
14
+ "add_adapter": true,
15
+ "architectures": [
16
+ "SeamlessM4TSpeechEncoder"
17
+ ],
18
+ "attention_dropout": 0.1,
19
+ "bos_token_id": 2,
20
+ "char_vocab_size": 10943,
21
+ "conv_depthwise_kernel_size": 31,
22
+ "decoder_attention_heads": 16,
23
+ "decoder_ffn_dim": 8192,
24
+ "decoder_layerdrop": 0.05,
25
+ "decoder_layers": 24,
26
+ "decoder_start_token_id": 3,
27
+ "dropout": 0.1,
28
+ "encoder_attention_heads": 16,
29
+ "encoder_ffn_dim": 8192,
30
+ "encoder_layerdrop": 0.05,
31
+ "encoder_layers": 24,
32
+ "eos_token_id": 3,
33
+ "feature_projection_input_dim": 160,
34
+ "hidden_size": 1024,
35
+ "initializer_range": 0.02,
36
+ "is_encoder_decoder": true,
37
+ "lang_embed_dim": 256,
38
+ "layer_norm_eps": 1e-05,
39
+ "leaky_relu_slope": 0.1,
40
+ "left_max_position_embeddings": 64,
41
+ "max_new_tokens": 256,
42
+ "max_position_embeddings": 4096,
43
+ "model_type": "seamlessm4t-v2-large-speech_encoder",
44
+ "num_adapter_layers": 1,
45
+ "num_attention_heads": 16,
46
+ "num_hidden_layers": 24,
47
+ "pad_token_id": 0,
48
+ "position_embeddings_type": "relative_key",
49
+ "resblock_dilation_sizes": [
50
+ [
51
+ 1,
52
+ 3,
53
+ 5
54
+ ],
55
+ [
56
+ 1,
57
+ 3,
58
+ 5
59
+ ],
60
+ [
61
+ 1,
62
+ 3,
63
+ 5
64
+ ]
65
+ ],
66
+ "resblock_kernel_sizes": [
67
+ 3,
68
+ 7,
69
+ 11
70
+ ],
71
+ "right_max_position_embeddings": 8,
72
+ "sampling_rate": 16000,
73
+ "scale_embedding": true,
74
+ "speech_encoder_attention_heads": 16,
75
+ "speech_encoder_chunk_size": 20000,
76
+ "speech_encoder_dropout": 0.0,
77
+ "speech_encoder_hidden_act": "swish",
78
+ "speech_encoder_intermediate_size": 4096,
79
+ "speech_encoder_layerdrop": 0.1,
80
+ "speech_encoder_layers": 24,
81
+ "speech_encoder_left_chunk_num": 128,
82
+ "spkr_embed_dim": 256,
83
+ "t2u_bos_token_id": 0,
84
+ "t2u_decoder_attention_heads": 16,
85
+ "t2u_decoder_ffn_dim": 8192,
86
+ "t2u_decoder_layers": 6,
87
+ "t2u_encoder_attention_heads": 16,
88
+ "t2u_encoder_ffn_dim": 8192,
89
+ "t2u_encoder_layers": 6,
90
+ "t2u_eos_token_id": 2,
91
+ "t2u_max_position_embeddings": 4096,
92
+ "t2u_pad_token_id": 1,
93
+ "t2u_variance_pred_dropout": 0.5,
94
+ "t2u_variance_predictor_embed_dim": 1024,
95
+ "t2u_variance_predictor_hidden_dim": 256,
96
+ "t2u_variance_predictor_kernel_size": 3,
97
+ "t2u_vocab_size": 10082,
98
+ "torch_dtype": "float32",
99
+ "transformers_version": "4.45.2",
100
+ "unit_embed_dim": 1280,
101
+ "unit_hifi_gan_vocab_size": 10000,
102
+ "upsample_initial_channel": 512,
103
+ "upsample_kernel_sizes": [
104
+ 11,
105
+ 8,
106
+ 8,
107
+ 4,
108
+ 4
109
+ ],
110
+ "upsample_rates": [
111
+ 5,
112
+ 4,
113
+ 4,
114
+ 2,
115
+ 2
116
+ ],
117
+ "use_cache": true,
118
+ "var_pred_dropout": 0.5,
119
+ "variance_predictor_kernel_size": 3,
120
+ "vocab_size": 256102,
121
+ "vocoder_num_langs": 36,
122
+ "vocoder_num_spkrs": 200,
123
+ "vocoder_offset": 4
124
+ }
125
+
configuration_seamless_m4t_v2_speech_encoder.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers.models.seamless_m4t_v2.configuration_seamless_m4t_v2 import (
2
+ SeamlessM4Tv2Config,
3
+ )
4
+ from transformers.models.auto.configuration_auto import AutoConfig
5
+
6
+ MODEL_TYPE = "seamlessm4t-v2-large-speech_encoder"
7
+
8
+
9
+ class SeamlessM4Tv2EncoderConfig(SeamlessM4Tv2Config):
10
+ model_type = MODEL_TYPE
11
+
12
+ def __init__(self, *args, **kwargs):
13
+ super().__init__(*args, **kwargs)
14
+
15
+
16
+ AutoConfig.register(MODEL_TYPE, SeamlessM4Tv2EncoderConfig)
17
+
18
+ SeamlessM4Tv2EncoderConfig.register_for_auto_class()
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae3e6d6221ade043eaa54f3cafb2f37683617c209ad10d635b4fd73dcee2f591
3
+ size 2540281584
modeling_seamless_m4t_v2_speech_encoder.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ from transformers.models.seamless_m4t.modeling_seamless_m4t import (
5
+ _compute_new_attention_mask,
6
+ )
7
+ from transformers.models.seamless_m4t_v2.modeling_seamless_m4t_v2 import (
8
+ SeamlessM4Tv2SpeechEncoder,
9
+ SeamlessM4Tv2PreTrainedModel,
10
+ )
11
+ from .configuration_seamless_m4t_v2_speech_encoder import (
12
+ MODEL_TYPE,
13
+ SeamlessM4Tv2EncoderConfig,
14
+ )
15
+ from transformers.modeling_outputs import SequenceClassifierOutput
16
+
17
+ from transformers.models.auto import AutoModel, AutoModelForAudioClassification, AutoModelForSequenceClassification
18
+
19
+
20
+ class SeamlessM4Tv2SpeechEncoder(SeamlessM4Tv2SpeechEncoder):
21
+ model_type = MODEL_TYPE
22
+ config_class = SeamlessM4Tv2EncoderConfig
23
+
24
+ def __init__(self, *args, **kwargs):
25
+ super().__init__(*args, **kwargs)
26
+
27
+ def _compute_sub_sample_lengths_from_attention_mask(self, attention_mask):
28
+ pad = self.kernel_size // 2
29
+ seq_lens = attention_mask.size(1) - (1 - attention_mask.int()).sum(1)
30
+
31
+ seq_lens = ((seq_lens + 2 * pad - self.kernel_size) / self.stride) + 1
32
+
33
+ return seq_lens.floor()
34
+
35
+ @staticmethod
36
+ def mean_pooling(
37
+ hidden_states: torch.Tensor, attention_mask: torch.Tensor
38
+ ) -> torch.Tensor:
39
+ # hidden_states shape: (batch_size, sequence_length, hidden_size)
40
+ # attention_mask shape: (batch_size, sequence_length)
41
+
42
+ # Apply attention mask and avoid division by zero
43
+ input_mask_expanded = (
44
+ attention_mask.unsqueeze(-1).expand(hidden_states.size()).float()
45
+ )
46
+ sum_hidden_states = torch.sum(hidden_states * input_mask_expanded, 1)
47
+ sum_mask = input_mask_expanded.sum(1)
48
+
49
+ return sum_hidden_states / torch.clamp(sum_mask, min=1e-9)
50
+
51
+
52
+ class SeamlessM4Tv2ForAudioClassification(SeamlessM4Tv2PreTrainedModel):
53
+ model_type = MODEL_TYPE
54
+ base_model_prefix = "model"
55
+ config_class = SeamlessM4Tv2EncoderConfig
56
+
57
+ def __init__(self, config, *args, **kwargs):
58
+ super().__init__(config)
59
+ self.num_labels = config.num_labels
60
+
61
+ self.model = SeamlessM4Tv2SpeechEncoder(config)
62
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
63
+
64
+ def forward(
65
+ self,
66
+ input_features: torch.Tensor,
67
+ attention_mask: torch.Tensor,
68
+ labels: None | torch.Tensor,
69
+ *args,
70
+ **kwargs,
71
+ ):
72
+ output_hidden_states = kwargs.pop("output_hidden_states", False)
73
+ outputs = self.model(
74
+ input_features,
75
+ attention_mask,
76
+ output_hidden_states=output_hidden_states,
77
+ *args,
78
+ **kwargs,
79
+ )
80
+ hidden_states = outputs.last_hidden_state
81
+ if attention_mask is not None:
82
+ sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(
83
+ attention_mask
84
+ ).to(outputs.last_hidden_state.device)
85
+ attention_mask = _compute_new_attention_mask(
86
+ hidden_states=hidden_states, seq_lens=sub_sampled_lengths
87
+ )
88
+ hidden_states = self.model.mean_pooling(
89
+ outputs.last_hidden_state, attention_mask
90
+ )
91
+ logits = self.score(hidden_states)
92
+ if labels is not None:
93
+ loss = F.cross_entropy(logits, labels)
94
+ else:
95
+ loss = None
96
+ return SequenceClassifierOutput(
97
+ loss=loss, # type: ignore
98
+ logits=logits,
99
+ hidden_states=outputs.hidden_states if output_hidden_states else None,
100
+ )
101
+
102
+
103
+ AutoModel.register(SeamlessM4Tv2EncoderConfig, SeamlessM4Tv2SpeechEncoder)
104
+ AutoModelForAudioClassification.register(
105
+ SeamlessM4Tv2EncoderConfig, SeamlessM4Tv2ForAudioClassification
106
+ )
107
+ AutoModelForSequenceClassification.register(
108
+ SeamlessM4Tv2EncoderConfig, SeamlessM4Tv2ForAudioClassification
109
+ )
preprocessor_config.json ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "feature_extractor_type": "SeamlessM4TFeatureExtractor",
3
+ "feature_size": 80,
4
+ "language_code": [
5
+ "__afr__",
6
+ "__amh__",
7
+ "__arb__",
8
+ "__ary__",
9
+ "__arz__",
10
+ "__asm__",
11
+ "__azj__",
12
+ "__bel__",
13
+ "__ben__",
14
+ "__bos__",
15
+ "__bul__",
16
+ "__cat__",
17
+ "__ceb__",
18
+ "__ces__",
19
+ "__ckb__",
20
+ "__cmn__",
21
+ "__cmn_Hant__",
22
+ "__cym__",
23
+ "__dan__",
24
+ "__deu__",
25
+ "__ell__",
26
+ "__eng__",
27
+ "__est__",
28
+ "__eus__",
29
+ "__fin__",
30
+ "__fra__",
31
+ "__fuv__",
32
+ "__gaz__",
33
+ "__gle__",
34
+ "__glg__",
35
+ "__guj__",
36
+ "__heb__",
37
+ "__hin__",
38
+ "__hrv__",
39
+ "__hun__",
40
+ "__hye__",
41
+ "__ibo__",
42
+ "__ind__",
43
+ "__isl__",
44
+ "__ita__",
45
+ "__jav__",
46
+ "__jpn__",
47
+ "__kan__",
48
+ "__kat__",
49
+ "__kaz__",
50
+ "__khk__",
51
+ "__khm__",
52
+ "__kir__",
53
+ "__kor__",
54
+ "__lao__",
55
+ "__lit__",
56
+ "__lug__",
57
+ "__luo__",
58
+ "__lvs__",
59
+ "__mai__",
60
+ "__mal__",
61
+ "__mar__",
62
+ "__mkd__",
63
+ "__mlt__",
64
+ "__mni__",
65
+ "__mya__",
66
+ "__nld__",
67
+ "__nno__",
68
+ "__nob__",
69
+ "__npi__",
70
+ "__nya__",
71
+ "__ory__",
72
+ "__pan__",
73
+ "__pbt__",
74
+ "__pes__",
75
+ "__pol__",
76
+ "__por__",
77
+ "__ron__",
78
+ "__rus__",
79
+ "__sat__",
80
+ "__slk__",
81
+ "__slv__",
82
+ "__sna__",
83
+ "__snd__",
84
+ "__som__",
85
+ "__spa__",
86
+ "__srp__",
87
+ "__swe__",
88
+ "__swh__",
89
+ "__tam__",
90
+ "__tel__",
91
+ "__tgk__",
92
+ "__tgl__",
93
+ "__tha__",
94
+ "__tur__",
95
+ "__ukr__",
96
+ "__urd__",
97
+ "__uzn__",
98
+ "__vie__",
99
+ "__yor__",
100
+ "__yue__",
101
+ "__zlm__",
102
+ "__zul__"
103
+ ],
104
+ "num_mel_bins": 80,
105
+ "padding_side": "right",
106
+ "padding_value": 0.0,
107
+ "processor_class": "SeamlessM4TProcessor",
108
+ "return_attention_mask": true,
109
+ "sampling_rate": 16000,
110
+ "stride": 2
111
+ }