jialicheng commited on
Commit
5666234
1 Parent(s): c12a78d

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: facebook/wav2vec2-base
4
+ tags:
5
+ - audio-classification
6
+ - generated_from_trainer
7
+ datasets:
8
+ - superb
9
+ metrics:
10
+ - accuracy
11
+ model-index:
12
+ - name: superb_ks_42
13
+ results:
14
+ - task:
15
+ name: Audio Classification
16
+ type: audio-classification
17
+ dataset:
18
+ name: superb
19
+ type: superb
20
+ config: ks
21
+ split: validation
22
+ args: ks
23
+ metrics:
24
+ - name: Accuracy
25
+ type: accuracy
26
+ value: 0.9845542806707855
27
+ ---
28
+
29
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
30
+ should probably proofread and complete it, then remove this comment. -->
31
+
32
+ # superb_ks_42
33
+
34
+ This model is a fine-tuned version of [facebook/wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base) on the superb dataset.
35
+ It achieves the following results on the evaluation set:
36
+ - Loss: 0.0853
37
+ - Accuracy: 0.9846
38
+
39
+ ## Model description
40
+
41
+ More information needed
42
+
43
+ ## Intended uses & limitations
44
+
45
+ More information needed
46
+
47
+ ## Training and evaluation data
48
+
49
+ More information needed
50
+
51
+ ## Training procedure
52
+
53
+ ### Training hyperparameters
54
+
55
+ The following hyperparameters were used during training:
56
+ - learning_rate: 5e-05
57
+ - train_batch_size: 32
58
+ - eval_batch_size: 4
59
+ - seed: 42
60
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
61
+ - lr_scheduler_type: linear
62
+ - lr_scheduler_warmup_ratio: 0.1
63
+ - num_epochs: 10
64
+
65
+ ### Training results
66
+
67
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy |
68
+ |:-------------:|:-----:|:-----:|:---------------:|:--------:|
69
+ | 1.25 | 1.0 | 1597 | 0.1755 | 0.9582 |
70
+ | 0.2502 | 2.0 | 3194 | 0.1402 | 0.9713 |
71
+ | 0.2218 | 3.0 | 4791 | 0.0956 | 0.9803 |
72
+ | 0.1746 | 4.0 | 6388 | 0.0917 | 0.9797 |
73
+ | 0.17 | 5.0 | 7985 | 0.0893 | 0.9807 |
74
+ | 0.1431 | 6.0 | 9582 | 0.0933 | 0.9810 |
75
+ | 0.1238 | 7.0 | 11179 | 0.0958 | 0.9831 |
76
+ | 0.116 | 8.0 | 12776 | 0.0970 | 0.9834 |
77
+ | 0.0995 | 9.0 | 14373 | 0.0853 | 0.9846 |
78
+ | 0.0985 | 10.0 | 15970 | 0.0829 | 0.9838 |
79
+
80
+
81
+ ### Framework versions
82
+
83
+ - Transformers 4.40.1
84
+ - Pytorch 2.3.0+cu121
85
+ - Datasets 2.19.0
86
+ - Tokenizers 0.19.1
all_results.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "eval_accuracy": 0.9845542806707855,
4
+ "eval_loss": 0.08534605801105499,
5
+ "eval_runtime": 6.6778,
6
+ "eval_samples_per_second": 1017.998,
7
+ "eval_steps_per_second": 4.043,
8
+ "test_accuracy": 0.890295358649789,
9
+ "test_loss": 0.5566478967666626,
10
+ "test_runtime": 4.3735,
11
+ "test_samples_per_second": 704.473,
12
+ "test_steps_per_second": 2.972,
13
+ "total_flos": 4.6387626201984e+18,
14
+ "train_accuracy": 0.9925235839824637,
15
+ "train_loss": 0.03144249692559242,
16
+ "train_runtime": 176.1726,
17
+ "train_samples_per_second": 290.022,
18
+ "train_steps_per_second": 72.508
19
+ }
config.json ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/wav2vec2-base",
3
+ "activation_dropout": 0.0,
4
+ "adapter_attn_dim": null,
5
+ "adapter_kernel_size": 3,
6
+ "adapter_stride": 2,
7
+ "add_adapter": false,
8
+ "apply_spec_augment": true,
9
+ "architectures": [
10
+ "Wav2Vec2ForSequenceClassification"
11
+ ],
12
+ "attention_dropout": 0.1,
13
+ "bos_token_id": 1,
14
+ "classifier_proj_size": 256,
15
+ "codevector_dim": 256,
16
+ "contrastive_logits_temperature": 0.1,
17
+ "conv_bias": false,
18
+ "conv_dim": [
19
+ 512,
20
+ 512,
21
+ 512,
22
+ 512,
23
+ 512,
24
+ 512,
25
+ 512
26
+ ],
27
+ "conv_kernel": [
28
+ 10,
29
+ 3,
30
+ 3,
31
+ 3,
32
+ 3,
33
+ 2,
34
+ 2
35
+ ],
36
+ "conv_stride": [
37
+ 5,
38
+ 2,
39
+ 2,
40
+ 2,
41
+ 2,
42
+ 2,
43
+ 2
44
+ ],
45
+ "ctc_loss_reduction": "sum",
46
+ "ctc_zero_infinity": false,
47
+ "diversity_loss_weight": 0.1,
48
+ "do_stable_layer_norm": false,
49
+ "eos_token_id": 2,
50
+ "feat_extract_activation": "gelu",
51
+ "feat_extract_norm": "group",
52
+ "feat_proj_dropout": 0.1,
53
+ "feat_quantizer_dropout": 0.0,
54
+ "final_dropout": 0.0,
55
+ "finetuning_task": "audio-classification",
56
+ "freeze_feat_extract_train": true,
57
+ "hidden_act": "gelu",
58
+ "hidden_dropout": 0.1,
59
+ "hidden_size": 768,
60
+ "id2label": {
61
+ "0": "yes",
62
+ "1": "no",
63
+ "10": "_silence_",
64
+ "11": "_unknown_",
65
+ "2": "up",
66
+ "3": "down",
67
+ "4": "left",
68
+ "5": "right",
69
+ "6": "on",
70
+ "7": "off",
71
+ "8": "stop",
72
+ "9": "go"
73
+ },
74
+ "initializer_range": 0.02,
75
+ "intermediate_size": 3072,
76
+ "label2id": {
77
+ "_silence_": "10",
78
+ "_unknown_": "11",
79
+ "down": "3",
80
+ "go": "9",
81
+ "left": "4",
82
+ "no": "1",
83
+ "off": "7",
84
+ "on": "6",
85
+ "right": "5",
86
+ "stop": "8",
87
+ "up": "2",
88
+ "yes": "0"
89
+ },
90
+ "layer_norm_eps": 1e-05,
91
+ "layerdrop": 0.0,
92
+ "mask_channel_length": 10,
93
+ "mask_channel_min_space": 1,
94
+ "mask_channel_other": 0.0,
95
+ "mask_channel_prob": 0.0,
96
+ "mask_channel_selection": "static",
97
+ "mask_feature_length": 10,
98
+ "mask_feature_min_masks": 0,
99
+ "mask_feature_prob": 0.0,
100
+ "mask_time_length": 10,
101
+ "mask_time_min_masks": 2,
102
+ "mask_time_min_space": 1,
103
+ "mask_time_other": 0.0,
104
+ "mask_time_prob": 0.05,
105
+ "mask_time_selection": "static",
106
+ "model_type": "wav2vec2",
107
+ "no_mask_channel_overlap": false,
108
+ "no_mask_time_overlap": false,
109
+ "num_adapter_layers": 3,
110
+ "num_attention_heads": 12,
111
+ "num_codevector_groups": 2,
112
+ "num_codevectors_per_group": 320,
113
+ "num_conv_pos_embedding_groups": 16,
114
+ "num_conv_pos_embeddings": 128,
115
+ "num_feat_extract_layers": 7,
116
+ "num_hidden_layers": 12,
117
+ "num_negatives": 100,
118
+ "output_hidden_size": 768,
119
+ "pad_token_id": 0,
120
+ "proj_codevector_dim": 256,
121
+ "tdnn_dilation": [
122
+ 1,
123
+ 2,
124
+ 3,
125
+ 1,
126
+ 1
127
+ ],
128
+ "tdnn_dim": [
129
+ 512,
130
+ 512,
131
+ 512,
132
+ 512,
133
+ 1500
134
+ ],
135
+ "tdnn_kernel": [
136
+ 5,
137
+ 3,
138
+ 3,
139
+ 1,
140
+ 1
141
+ ],
142
+ "torch_dtype": "float32",
143
+ "transformers_version": "4.40.1",
144
+ "use_weighted_layer_sum": false,
145
+ "vocab_size": 32,
146
+ "xvector_output_dim": 512
147
+ }
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "eval_accuracy": 0.9845542806707855,
4
+ "eval_loss": 0.08534605801105499,
5
+ "eval_runtime": 6.6778,
6
+ "eval_samples_per_second": 1017.998,
7
+ "eval_steps_per_second": 4.043
8
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0928bcd031d5be3fe523ea73a2ee512b5fd55648a2556f891314b57370d29f5
3
+ size 378312648
pred_logit_eval.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:666c92228c20c7cc99bbd5a19f99c35c39f088b0598e1c09f37423e4c4d5b627
3
+ size 326432
pred_logit_test.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f59a8da91884126fd15f9937d12636ef2740f9d5e1ce526ae8d0e681a17dc795
3
+ size 148016
pred_logit_train.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec0143d5d04ccb97b2238a9740696b0988609b06f76a0d8612c22d8e352b5419
3
+ size 2452640
preprocessor_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
+ "feature_size": 1,
5
+ "padding_side": "right",
6
+ "padding_value": 0.0,
7
+ "return_attention_mask": true,
8
+ "sampling_rate": 16000
9
+ }
test_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "test_accuracy": 0.890295358649789,
4
+ "test_loss": 0.5566478967666626,
5
+ "test_runtime": 4.3735,
6
+ "test_samples_per_second": 704.473,
7
+ "test_steps_per_second": 2.972
8
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "train_accuracy": 0.9925235839824637,
4
+ "train_loss": 0.03144249692559242,
5
+ "train_runtime": 176.1726,
6
+ "train_samples_per_second": 290.022,
7
+ "train_steps_per_second": 72.508
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.9845542806707855,
3
+ "best_model_checkpoint": "audio/train/checkpoint/wav2vec2-base/superb_ks_42/checkpoint-14373",
4
+ "epoch": 10.0,
5
+ "eval_steps": 500,
6
+ "global_step": 15970,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.6261740763932373,
13
+ "grad_norm": 12.87492847442627,
14
+ "learning_rate": 3.1308703819661866e-05,
15
+ "loss": 1.25,
16
+ "step": 1000
17
+ },
18
+ {
19
+ "epoch": 1.0,
20
+ "eval_accuracy": 0.9582230067666961,
21
+ "eval_loss": 0.17545583844184875,
22
+ "eval_runtime": 7.033,
23
+ "eval_samples_per_second": 966.585,
24
+ "eval_steps_per_second": 3.839,
25
+ "step": 1597
26
+ },
27
+ {
28
+ "epoch": 1.2523481527864746,
29
+ "grad_norm": 2.619173765182495,
30
+ "learning_rate": 4.859806581785292e-05,
31
+ "loss": 0.3308,
32
+ "step": 2000
33
+ },
34
+ {
35
+ "epoch": 1.878522229179712,
36
+ "grad_norm": 6.4030070304870605,
37
+ "learning_rate": 4.51193209490016e-05,
38
+ "loss": 0.2502,
39
+ "step": 3000
40
+ },
41
+ {
42
+ "epoch": 2.0,
43
+ "eval_accuracy": 0.9713150926743159,
44
+ "eval_loss": 0.140165776014328,
45
+ "eval_runtime": 6.9118,
46
+ "eval_samples_per_second": 983.532,
47
+ "eval_steps_per_second": 3.906,
48
+ "step": 3194
49
+ },
50
+ {
51
+ "epoch": 2.504696305572949,
52
+ "grad_norm": 0.2295457422733307,
53
+ "learning_rate": 4.1640576080150286e-05,
54
+ "loss": 0.2218,
55
+ "step": 4000
56
+ },
57
+ {
58
+ "epoch": 3.0,
59
+ "eval_accuracy": 0.9802883200941453,
60
+ "eval_loss": 0.09560667723417282,
61
+ "eval_runtime": 7.0467,
62
+ "eval_samples_per_second": 964.712,
63
+ "eval_steps_per_second": 3.832,
64
+ "step": 4791
65
+ },
66
+ {
67
+ "epoch": 3.1308703819661865,
68
+ "grad_norm": 0.9765733480453491,
69
+ "learning_rate": 3.8161831211298965e-05,
70
+ "loss": 0.194,
71
+ "step": 5000
72
+ },
73
+ {
74
+ "epoch": 3.757044458359424,
75
+ "grad_norm": 0.9296017289161682,
76
+ "learning_rate": 3.468308634244764e-05,
77
+ "loss": 0.1746,
78
+ "step": 6000
79
+ },
80
+ {
81
+ "epoch": 4.0,
82
+ "eval_accuracy": 0.9796999117387467,
83
+ "eval_loss": 0.09171486645936966,
84
+ "eval_runtime": 6.5552,
85
+ "eval_samples_per_second": 1037.04,
86
+ "eval_steps_per_second": 4.119,
87
+ "step": 6388
88
+ },
89
+ {
90
+ "epoch": 4.383218534752661,
91
+ "grad_norm": 7.867722511291504,
92
+ "learning_rate": 3.120434147359633e-05,
93
+ "loss": 0.17,
94
+ "step": 7000
95
+ },
96
+ {
97
+ "epoch": 5.0,
98
+ "eval_accuracy": 0.9807296263606943,
99
+ "eval_loss": 0.08932828158140182,
100
+ "eval_runtime": 6.9043,
101
+ "eval_samples_per_second": 984.61,
102
+ "eval_steps_per_second": 3.911,
103
+ "step": 7985
104
+ },
105
+ {
106
+ "epoch": 5.009392611145898,
107
+ "grad_norm": 1.299570083618164,
108
+ "learning_rate": 2.7725596604745007e-05,
109
+ "loss": 0.1554,
110
+ "step": 8000
111
+ },
112
+ {
113
+ "epoch": 5.635566687539136,
114
+ "grad_norm": 1.2470015287399292,
115
+ "learning_rate": 2.4246851735893692e-05,
116
+ "loss": 0.1431,
117
+ "step": 9000
118
+ },
119
+ {
120
+ "epoch": 6.0,
121
+ "eval_accuracy": 0.9810238305383936,
122
+ "eval_loss": 0.09325208514928818,
123
+ "eval_runtime": 6.9991,
124
+ "eval_samples_per_second": 971.269,
125
+ "eval_steps_per_second": 3.858,
126
+ "step": 9582
127
+ },
128
+ {
129
+ "epoch": 6.261740763932373,
130
+ "grad_norm": 1.0575257539749146,
131
+ "learning_rate": 2.076810686704237e-05,
132
+ "loss": 0.1346,
133
+ "step": 10000
134
+ },
135
+ {
136
+ "epoch": 6.88791484032561,
137
+ "grad_norm": 3.034492015838623,
138
+ "learning_rate": 1.7289361998191053e-05,
139
+ "loss": 0.1238,
140
+ "step": 11000
141
+ },
142
+ {
143
+ "epoch": 7.0,
144
+ "eval_accuracy": 0.9830832597822889,
145
+ "eval_loss": 0.09578000754117966,
146
+ "eval_runtime": 6.8981,
147
+ "eval_samples_per_second": 985.49,
148
+ "eval_steps_per_second": 3.914,
149
+ "step": 11179
150
+ },
151
+ {
152
+ "epoch": 7.514088916718848,
153
+ "grad_norm": 1.000091314315796,
154
+ "learning_rate": 1.3810617129339735e-05,
155
+ "loss": 0.116,
156
+ "step": 12000
157
+ },
158
+ {
159
+ "epoch": 8.0,
160
+ "eval_accuracy": 0.9833774639599883,
161
+ "eval_loss": 0.09704861044883728,
162
+ "eval_runtime": 6.9946,
163
+ "eval_samples_per_second": 971.892,
164
+ "eval_steps_per_second": 3.86,
165
+ "step": 12776
166
+ },
167
+ {
168
+ "epoch": 8.140262993112085,
169
+ "grad_norm": 2.212705373764038,
170
+ "learning_rate": 1.0331872260488417e-05,
171
+ "loss": 0.1113,
172
+ "step": 13000
173
+ },
174
+ {
175
+ "epoch": 8.766437069505322,
176
+ "grad_norm": 3.2522332668304443,
177
+ "learning_rate": 6.853127391637097e-06,
178
+ "loss": 0.0995,
179
+ "step": 14000
180
+ },
181
+ {
182
+ "epoch": 9.0,
183
+ "eval_accuracy": 0.9845542806707855,
184
+ "eval_loss": 0.08534605801105499,
185
+ "eval_runtime": 6.7022,
186
+ "eval_samples_per_second": 1014.3,
187
+ "eval_steps_per_second": 4.029,
188
+ "step": 14373
189
+ },
190
+ {
191
+ "epoch": 9.39261114589856,
192
+ "grad_norm": 6.198681354522705,
193
+ "learning_rate": 3.3743825227857787e-06,
194
+ "loss": 0.0985,
195
+ "step": 15000
196
+ },
197
+ {
198
+ "epoch": 10.0,
199
+ "eval_accuracy": 0.9838187702265372,
200
+ "eval_loss": 0.08294928818941116,
201
+ "eval_runtime": 6.7918,
202
+ "eval_samples_per_second": 1000.911,
203
+ "eval_steps_per_second": 3.975,
204
+ "step": 15970
205
+ },
206
+ {
207
+ "epoch": 10.0,
208
+ "step": 15970,
209
+ "total_flos": 4.6387626201984e+18,
210
+ "train_loss": 0.22964708045190324,
211
+ "train_runtime": 1401.6538,
212
+ "train_samples_per_second": 364.527,
213
+ "train_steps_per_second": 11.394
214
+ }
215
+ ],
216
+ "logging_steps": 1000,
217
+ "max_steps": 15970,
218
+ "num_input_tokens_seen": 0,
219
+ "num_train_epochs": 10,
220
+ "save_steps": 500,
221
+ "total_flos": 4.6387626201984e+18,
222
+ "train_batch_size": 32,
223
+ "trial_name": null,
224
+ "trial_params": null
225
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4163bf7dd5d5be42c1b50a5088f847d384f3a5c385f61bc45011a7e48dc998a0
3
+ size 5112