ThreeBlessings commited on
Commit
02f258c
1 Parent(s): 18e2547

Upload model

Browse files
Files changed (3) hide show
  1. README.md +97 -0
  2. adapter_config.json +33 -0
  3. adapter_model.safetensors +3 -0
README.md ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ library_name: peft
4
+ tags:
5
+ - generated_from_trainer
6
+ datasets:
7
+ - marsyas/gtzan
8
+ metrics:
9
+ - accuracy
10
+ base_model: ntu-spml/distilhubert
11
+ model-index:
12
+ - name: distilhubert-finetuned-gtzan
13
+ results:
14
+ - task:
15
+ type: audio-classification
16
+ name: Audio Classification
17
+ dataset:
18
+ name: GTZAN
19
+ type: marsyas/gtzan
20
+ split: None
21
+ metrics:
22
+ - type: accuracy
23
+ value: 0.9319319319319319
24
+ name: Accuracy
25
+ ---
26
+
27
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
28
+ should probably proofread and complete it, then remove this comment. -->
29
+
30
+ # distilhubert-finetuned-gtzan
31
+
32
+ This model is a fine-tuned version of [ntu-spml/distilhubert](https://huggingface.co/ntu-spml/distilhubert) on the GTZAN dataset.
33
+ It achieves the following results on the evaluation set:
34
+ - Loss: 0.2387
35
+ - Accuracy: 0.9319
36
+
37
+ ## Model description
38
+
39
+ More information needed
40
+
41
+ ## Intended uses & limitations
42
+
43
+ More information needed
44
+
45
+ ## Training and evaluation data
46
+
47
+ More information needed
48
+
49
+ ## Training procedure
50
+
51
+ ### Training hyperparameters
52
+
53
+ The following hyperparameters were used during training:
54
+ - learning_rate: 0.001
55
+ - train_batch_size: 6
56
+ - eval_batch_size: 6
57
+ - seed: 42
58
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
59
+ - lr_scheduler_type: linear
60
+ - lr_scheduler_warmup_ratio: 0.1
61
+ - num_epochs: 15
62
+ - mixed_precision_training: Native AMP
63
+
64
+ ### Training results
65
+
66
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy |
67
+ |:-------------:|:-----:|:----:|:---------------:|:--------:|
68
+ | 1.7644 | 1.0 | 167 | 1.7832 | 0.3554 |
69
+ | 1.2856 | 2.0 | 334 | 1.4226 | 0.4745 |
70
+ | 1.2123 | 3.0 | 501 | 1.0047 | 0.6737 |
71
+ | 0.6613 | 4.0 | 668 | 0.8091 | 0.6987 |
72
+ | 0.6442 | 5.0 | 835 | 0.6713 | 0.7858 |
73
+ | 0.7172 | 6.0 | 1002 | 0.5749 | 0.8238 |
74
+ | 0.5394 | 7.0 | 1169 | 0.5079 | 0.8408 |
75
+ | 0.3853 | 8.0 | 1336 | 0.4574 | 0.8539 |
76
+ | 0.5441 | 9.0 | 1503 | 0.3729 | 0.8869 |
77
+ | 0.5062 | 10.0 | 1670 | 0.3319 | 0.9009 |
78
+ | 0.3955 | 11.0 | 1837 | 0.3745 | 0.8849 |
79
+ | 0.3112 | 12.0 | 2004 | 0.2752 | 0.9289 |
80
+ | 0.2887 | 13.0 | 2171 | 0.2544 | 0.9289 |
81
+ | 0.2038 | 14.0 | 2338 | 0.2344 | 0.9329 |
82
+ | 0.2374 | 15.0 | 2505 | 0.2387 | 0.9319 |
83
+
84
+
85
+ ### Framework versions
86
+
87
+ - Transformers 4.35.2
88
+ - Pytorch 2.1.1+cu121
89
+ - Datasets 2.15.0
90
+ - Tokenizers 0.15.0
91
+ ## Training procedure
92
+
93
+
94
+ ### Framework versions
95
+
96
+
97
+ - PEFT 0.6.2
adapter_config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": {
4
+ "base_model_class": "HubertForSequenceClassification",
5
+ "parent_library": "transformers.models.hubert.modeling_hubert"
6
+ },
7
+ "base_model_name_or_path": "ntu-spml/distilhubert",
8
+ "bias": "none",
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layers_pattern": null,
13
+ "layers_to_transform": null,
14
+ "lora_alpha": 1,
15
+ "lora_dropout": 0.0,
16
+ "modules_to_save": [
17
+ "classifier"
18
+ ],
19
+ "peft_type": "LORA",
20
+ "r": 1,
21
+ "rank_pattern": {},
22
+ "revision": null,
23
+ "target_modules": [
24
+ "output_dense",
25
+ "projector",
26
+ "out_proj",
27
+ "q_proj",
28
+ "intermediate_dense",
29
+ "k_proj",
30
+ "v_proj"
31
+ ],
32
+ "task_type": null
33
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4972889e485c53e67906a9e6e3a78133c5bdf3324e5715ac7f9dd9407a9856ba
3
+ size 128760