zuazo commited on
Commit
45de320
1 Parent(s): 6f27190

End of training

Browse files
README.md CHANGED
@@ -1,21 +1,24 @@
1
  ---
 
 
2
  license: apache-2.0
3
  base_model: openai/whisper-large
4
  tags:
 
5
  - generated_from_trainer
6
  datasets:
7
- - common_voice_16_1
8
  metrics:
9
  - wer
10
  model-index:
11
- - name: openai/whisper-large
12
  results:
13
  - task:
14
  name: Automatic Speech Recognition
15
  type: automatic-speech-recognition
16
  dataset:
17
- name: common_voice_16_1
18
- type: common_voice_16_1
19
  config: eu
20
  split: test
21
  args: eu
@@ -28,9 +31,9 @@ model-index:
28
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
29
  should probably proofread and complete it, then remove this comment. -->
30
 
31
- # openai/whisper-large
32
 
33
- This model is a fine-tuned version of [openai/whisper-large](https://huggingface.co/openai/whisper-large) on the common_voice_16_1 dataset.
34
  It achieves the following results on the evaluation set:
35
  - Loss: 0.4111
36
  - Wer: 8.1444
 
1
  ---
2
+ language:
3
+ - eu
4
  license: apache-2.0
5
  base_model: openai/whisper-large
6
  tags:
7
+ - whisper-event
8
  - generated_from_trainer
9
  datasets:
10
+ - mozilla-foundation/common_voice_16_1
11
  metrics:
12
  - wer
13
  model-index:
14
+ - name: Whisper Large Basque
15
  results:
16
  - task:
17
  name: Automatic Speech Recognition
18
  type: automatic-speech-recognition
19
  dataset:
20
+ name: mozilla-foundation/common_voice_16_1 eu
21
+ type: mozilla-foundation/common_voice_16_1
22
  config: eu
23
  split: test
24
  args: eu
 
31
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
32
  should probably proofread and complete it, then remove this comment. -->
33
 
34
+ # Whisper Large Basque
35
 
36
+ This model is a fine-tuned version of [openai/whisper-large](https://huggingface.co/openai/whisper-large) on the mozilla-foundation/common_voice_16_1 eu dataset.
37
  It achieves the following results on the evaluation set:
38
  - Loss: 0.4111
39
  - Wer: 8.1444
all_results.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 401.51,
3
+ "eval_loss": 0.4111008048057556,
4
+ "eval_runtime": 2807.472,
5
+ "eval_samples_per_second": 4.539,
6
+ "eval_steps_per_second": 0.284,
7
+ "eval_wer": 8.144442707519149,
8
+ "train_loss": 0.0036419942842522914,
9
+ "train_runtime": 986578.8545,
10
+ "train_samples_per_second": 10.379,
11
+ "train_steps_per_second": 0.041
12
+ }
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 401.51,
3
+ "eval_loss": 0.4111008048057556,
4
+ "eval_runtime": 2807.472,
5
+ "eval_samples_per_second": 4.539,
6
+ "eval_steps_per_second": 0.284,
7
+ "eval_wer": 8.144442707519149
8
+ }
runs/Apr13_10-23-07_hyperion-256/events.out.tfevents.1713986174.hyperion-256 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f3c2a61c83e4e960c71e239a88f7d66f45c6c73a9a7079169a0e479587f9138
3
+ size 40
train_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 401.51,
3
+ "train_loss": 0.0036419942842522914,
4
+ "train_runtime": 986578.8545,
5
+ "train_samples_per_second": 10.379,
6
+ "train_steps_per_second": 0.041
7
+ }
trainer_state.json ADDED
The diff for this file is too large to render. See raw diff