amankhandelia commited on
Commit
5ae3a9f
1 Parent(s): 38477d6

Upload Wav2Vec2ForAudioFrameClassification

Browse files
Files changed (2) hide show
  1. config.json +173 -0
  2. pytorch_model.bin +3 -0
config.json ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "amankhandelia/mms_alignment_model",
3
+ "activation_dropout": 0.1,
4
+ "adapter_attn_dim": null,
5
+ "adapter_kernel_size": 3,
6
+ "adapter_stride": 2,
7
+ "add_adapter": false,
8
+ "apply_spec_augment": false,
9
+ "architectures": [
10
+ "Wav2Vec2ForAudioFrameClassification"
11
+ ],
12
+ "attention_dropout": 0.0,
13
+ "bos_token_id": 1,
14
+ "classifier_proj_size": 256,
15
+ "codevector_dim": 256,
16
+ "contrastive_logits_temperature": 0.1,
17
+ "conv_bias": true,
18
+ "conv_dim": [
19
+ 512,
20
+ 512,
21
+ 512,
22
+ 512,
23
+ 512,
24
+ 512,
25
+ 512
26
+ ],
27
+ "conv_kernel": [
28
+ 10,
29
+ 3,
30
+ 3,
31
+ 3,
32
+ 3,
33
+ 2,
34
+ 2
35
+ ],
36
+ "conv_stride": [
37
+ 5,
38
+ 2,
39
+ 2,
40
+ 2,
41
+ 2,
42
+ 2,
43
+ 2
44
+ ],
45
+ "ctc_loss_reduction": "sum",
46
+ "ctc_zero_infinity": false,
47
+ "diversity_loss_weight": 0.1,
48
+ "do_stable_layer_norm": true,
49
+ "eos_token_id": 2,
50
+ "feat_extract_activation": "gelu",
51
+ "feat_extract_norm": "layer",
52
+ "feat_proj_dropout": 0.0,
53
+ "feat_quantizer_dropout": 0.0,
54
+ "final_dropout": 0.1,
55
+ "hidden_act": "gelu",
56
+ "hidden_dropout": 0.0,
57
+ "hidden_size": 1024,
58
+ "id2label": {
59
+ "0": "LABEL_0",
60
+ "1": "LABEL_1",
61
+ "2": "LABEL_2",
62
+ "3": "LABEL_3",
63
+ "4": "LABEL_4",
64
+ "5": "LABEL_5",
65
+ "6": "LABEL_6",
66
+ "7": "LABEL_7",
67
+ "8": "LABEL_8",
68
+ "9": "LABEL_9",
69
+ "10": "LABEL_10",
70
+ "11": "LABEL_11",
71
+ "12": "LABEL_12",
72
+ "13": "LABEL_13",
73
+ "14": "LABEL_14",
74
+ "15": "LABEL_15",
75
+ "16": "LABEL_16",
76
+ "17": "LABEL_17",
77
+ "18": "LABEL_18",
78
+ "19": "LABEL_19",
79
+ "20": "LABEL_20",
80
+ "21": "LABEL_21",
81
+ "22": "LABEL_22",
82
+ "23": "LABEL_23",
83
+ "24": "LABEL_24",
84
+ "25": "LABEL_25",
85
+ "26": "LABEL_26",
86
+ "27": "LABEL_27",
87
+ "28": "LABEL_28",
88
+ "29": "LABEL_29",
89
+ "30": "LABEL_30"
90
+ },
91
+ "initializer_range": 0.02,
92
+ "intermediate_size": 4096,
93
+ "label2id": {
94
+ "LABEL_0": 0,
95
+ "LABEL_1": 1,
96
+ "LABEL_10": 10,
97
+ "LABEL_11": 11,
98
+ "LABEL_12": 12,
99
+ "LABEL_13": 13,
100
+ "LABEL_14": 14,
101
+ "LABEL_15": 15,
102
+ "LABEL_16": 16,
103
+ "LABEL_17": 17,
104
+ "LABEL_18": 18,
105
+ "LABEL_19": 19,
106
+ "LABEL_2": 2,
107
+ "LABEL_20": 20,
108
+ "LABEL_21": 21,
109
+ "LABEL_22": 22,
110
+ "LABEL_23": 23,
111
+ "LABEL_24": 24,
112
+ "LABEL_25": 25,
113
+ "LABEL_26": 26,
114
+ "LABEL_27": 27,
115
+ "LABEL_28": 28,
116
+ "LABEL_29": 29,
117
+ "LABEL_3": 3,
118
+ "LABEL_30": 30,
119
+ "LABEL_4": 4,
120
+ "LABEL_5": 5,
121
+ "LABEL_6": 6,
122
+ "LABEL_7": 7,
123
+ "LABEL_8": 8,
124
+ "LABEL_9": 9
125
+ },
126
+ "layer_norm_eps": 1e-05,
127
+ "layerdrop": 0.1,
128
+ "mask_feature_length": 10,
129
+ "mask_feature_min_masks": 0,
130
+ "mask_feature_prob": 0.0,
131
+ "mask_time_length": 10,
132
+ "mask_time_min_masks": 2,
133
+ "mask_time_prob": 0.0,
134
+ "model_type": "wav2vec2",
135
+ "num_adapter_layers": 3,
136
+ "num_attention_heads": 16,
137
+ "num_codevector_groups": 2,
138
+ "num_codevectors_per_group": 320,
139
+ "num_conv_pos_embedding_groups": 16,
140
+ "num_conv_pos_embeddings": 128,
141
+ "num_feat_extract_layers": 7,
142
+ "num_hidden_layers": 24,
143
+ "num_negatives": 100,
144
+ "output_hidden_size": 1024,
145
+ "pad_token_id": 0,
146
+ "proj_codevector_dim": 256,
147
+ "tdnn_dilation": [
148
+ 1,
149
+ 2,
150
+ 3,
151
+ 1,
152
+ 1
153
+ ],
154
+ "tdnn_dim": [
155
+ 512,
156
+ 512,
157
+ 512,
158
+ 512,
159
+ 1500
160
+ ],
161
+ "tdnn_kernel": [
162
+ 5,
163
+ 3,
164
+ 3,
165
+ 1,
166
+ 1
167
+ ],
168
+ "torch_dtype": "float32",
169
+ "transformers_version": "4.31.0",
170
+ "use_weighted_layer_sum": false,
171
+ "vocab_size": 32,
172
+ "xvector_output_dim": 512
173
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b530ba577f4cde9e29b04933818132bf397cde1f2f0b5bf056adb387991ce157
3
+ size 1262018665