MathildeB3 commited on
Commit
9381e31
·
verified ·
1 Parent(s): 4d74c36

End of training

Browse files
README.md ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ language:
4
+ - fr
5
+ license: apache-2.0
6
+ base_model: openai/whisper-small
7
+ tags:
8
+ - base_model:adapter:openai/whisper-small
9
+ - lora
10
+ - transformers
11
+ metrics:
12
+ - wer
13
+ model-index:
14
+ - name: Whisper Small Fr - IMT Atlantique X 52 Hertz Full
15
+ results: []
16
+ ---
17
+
18
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
19
+ should probably proofread and complete it, then remove this comment. -->
20
+
21
+ # Whisper Small Fr - IMT Atlantique X 52 Hertz Full
22
+
23
+ This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the FullDatabase dataset.
24
+ It achieves the following results on the evaluation set:
25
+ - Loss: 0.6790
26
+ - Wer: 0.4187
27
+
28
+ ## Model description
29
+
30
+ More information needed
31
+
32
+ ## Intended uses & limitations
33
+
34
+ More information needed
35
+
36
+ ## Training and evaluation data
37
+
38
+ More information needed
39
+
40
+ ## Training procedure
41
+
42
+ ### Training hyperparameters
43
+
44
+ The following hyperparameters were used during training:
45
+ - learning_rate: 0.001
46
+ - train_batch_size: 8
47
+ - eval_batch_size: 8
48
+ - seed: 42
49
+ - optimizer: Use OptimizerNames.ADAMW_TORCH_FUSED with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
50
+ - lr_scheduler_type: linear
51
+ - num_epochs: 3
52
+ - mixed_precision_training: Native AMP
53
+
54
+ ### Training results
55
+
56
+ | Training Loss | Epoch | Step | Validation Loss | Wer |
57
+ |:-------------:|:------:|:----:|:---------------:|:------:|
58
+ | 1.398 | 0.4762 | 20 | 1.5259 | 0.3767 |
59
+ | 1.211 | 0.9524 | 40 | 1.1977 | 0.3499 |
60
+ | 1.7856 | 1.4286 | 60 | 1.4056 | 0.5985 |
61
+ | 0.6241 | 1.9048 | 80 | 0.7924 | 0.4417 |
62
+ | 0.8817 | 2.3810 | 100 | 0.7137 | 0.4034 |
63
+ | 0.2795 | 2.8571 | 120 | 0.6790 | 0.4187 |
64
+
65
+
66
+ ### Framework versions
67
+
68
+ - PEFT 0.18.0
69
+ - Transformers 4.57.3
70
+ - Pytorch 2.9.0+cu126
71
+ - Datasets 4.4.1
72
+ - Tokenizers 0.22.1
adapter_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alora_invocation_tokens": null,
3
+ "alpha_pattern": {},
4
+ "arrow_config": null,
5
+ "auto_mapping": {
6
+ "base_model_class": "WhisperForConditionalGeneration",
7
+ "parent_library": "transformers.models.whisper.modeling_whisper"
8
+ },
9
+ "base_model_name_or_path": "openai/whisper-small",
10
+ "bias": "none",
11
+ "corda_config": null,
12
+ "ensure_weight_tying": false,
13
+ "eva_config": null,
14
+ "exclude_modules": null,
15
+ "fan_in_fan_out": false,
16
+ "inference_mode": true,
17
+ "init_lora_weights": true,
18
+ "layer_replication": null,
19
+ "layers_pattern": null,
20
+ "layers_to_transform": null,
21
+ "loftq_config": {},
22
+ "lora_alpha": 64,
23
+ "lora_bias": false,
24
+ "lora_dropout": 0.05,
25
+ "megatron_config": null,
26
+ "megatron_core": "megatron.core",
27
+ "modules_to_save": null,
28
+ "peft_type": "LORA",
29
+ "peft_version": "0.18.0",
30
+ "qalora_group_size": 16,
31
+ "r": 32,
32
+ "rank_pattern": {},
33
+ "revision": null,
34
+ "target_modules": [
35
+ "q_proj",
36
+ "v_proj"
37
+ ],
38
+ "target_parameters": null,
39
+ "task_type": null,
40
+ "trainable_token_indices": null,
41
+ "use_dora": false,
42
+ "use_qalora": false,
43
+ "use_rslora": false
44
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3fd3d11721c8775d1673343e68fbd1856b4b2ebe535ccdcd552daac8d764cf4e
3
+ size 14176064
preprocessor_config.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "chunk_length": 30,
3
+ "dither": 0.0,
4
+ "feature_extractor_type": "WhisperFeatureExtractor",
5
+ "feature_size": 80,
6
+ "hop_length": 160,
7
+ "n_fft": 400,
8
+ "n_samples": 480000,
9
+ "nb_max_frames": 3000,
10
+ "padding_side": "right",
11
+ "padding_value": 0.0,
12
+ "processor_class": "WhisperProcessor",
13
+ "return_attention_mask": false,
14
+ "sampling_rate": 16000
15
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f173de7dd3fd2ff2e9ca4bc079d5499f0fafb6a6bc2ea08324106721b8b8d664
3
+ size 5969