SamagraDataGov commited on
Commit
1556c6b
1 Parent(s): d21b1c6

Model save

Browse files
README.md CHANGED
@@ -1,6 +1,4 @@
1
  ---
2
- license: apache-2.0
3
- base_model: openai/whisper-tiny
4
  tags:
5
  - generated_from_trainer
6
  metrics:
@@ -15,10 +13,10 @@ should probably proofread and complete it, then remove this comment. -->
15
 
16
  # whisper-tiny-hi2_test
17
 
18
- This model is a fine-tuned version of [openai/whisper-tiny](https://huggingface.co/openai/whisper-tiny) on the None dataset.
19
  It achieves the following results on the evaluation set:
20
- - Loss: 0.6051
21
- - Wer: 70.0398
22
 
23
  ## Model description
24
 
 
1
  ---
 
 
2
  tags:
3
  - generated_from_trainer
4
  metrics:
 
13
 
14
  # whisper-tiny-hi2_test
15
 
16
+ This model was trained from scratch on the None dataset.
17
  It achieves the following results on the evaluation set:
18
+ - Loss: 0.6505
19
+ - Wer: 69.9890
20
 
21
  ## Model description
22
 
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "openai/whisper-tiny",
3
  "activation_dropout": 0.0,
4
  "activation_function": "gelu",
5
  "apply_spec_augment": false,
 
1
  {
2
+ "_name_or_path": "./whisper-tiny-hi2_test/whisper-tiny-hi-checkpoint-1",
3
  "activation_dropout": 0.0,
4
  "activation_function": "gelu",
5
  "apply_spec_augment": false,
runs/Jun20_15-53-41_bharatsahaiyak-test/events.out.tfevents.1718898822.bharatsahaiyak-test.584004.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0c3bea61afd7059e4bfbdc33b93fec1962be1a41673fd5707f3c3893899f6a4
3
+ size 6041
runs/Jun20_15-53-41_bharatsahaiyak-test/events.out.tfevents.1718899378.bharatsahaiyak-test.584004.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:66096c03089eb5b6b771e373cfb9672913aa337d5d14d2236b9451e46a12ec5a
3
+ size 406
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8c982fa9327e2e49f4765e9f4a5be54ca2c672121610ce55b8168505a2beb3b7
3
  size 5240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57320ccedcf473157fcb44c0152851471c74170811def3aa0279b4a3552d63db
3
  size 5240
whisper-tiny-hi-checkpoint-1/trainer_state.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
- "best_metric": 63.60403082394783,
3
- "best_model_checkpoint": "./whisper-tiny-hi2_test/checkpoint-200",
4
  "epoch": 6.25,
5
  "eval_steps": 40,
6
  "global_step": 200,
@@ -10,127 +10,127 @@
10
  "log_history": [
11
  {
12
  "epoch": 0.625,
13
- "grad_norm": 10.62358283996582,
14
- "learning_rate": 1e-05,
15
- "loss": 2.3669,
16
  "step": 20
17
  },
18
  {
19
  "epoch": 1.25,
20
- "grad_norm": 7.5668721199035645,
21
- "learning_rate": 1e-05,
22
- "loss": 1.1887,
23
  "step": 40
24
  },
25
  {
26
  "epoch": 1.25,
27
- "eval_loss": 0.9953901171684265,
28
- "eval_runtime": 571.1605,
29
- "eval_samples_per_second": 1.751,
30
- "eval_steps_per_second": 1.751,
31
- "eval_wer": 87.29782369379288,
32
  "step": 40
33
  },
34
  {
35
  "epoch": 1.875,
36
- "grad_norm": 6.932765960693359,
37
- "learning_rate": 1e-05,
38
- "loss": 0.855,
39
  "step": 60
40
  },
41
  {
42
  "epoch": 2.5,
43
- "grad_norm": 6.589940547943115,
44
- "learning_rate": 1e-05,
45
- "loss": 0.6663,
46
  "step": 80
47
  },
48
  {
49
  "epoch": 2.5,
50
- "eval_loss": 0.6780964136123657,
51
- "eval_runtime": 554.1893,
52
- "eval_samples_per_second": 1.804,
53
- "eval_steps_per_second": 1.804,
54
- "eval_wer": 73.29155728681515,
55
  "step": 80
56
  },
57
  {
58
  "epoch": 3.125,
59
- "grad_norm": 6.102142810821533,
60
- "learning_rate": 1e-05,
61
- "loss": 0.5761,
62
  "step": 100
63
  },
64
  {
65
  "epoch": 3.75,
66
- "grad_norm": 5.4204583168029785,
67
- "learning_rate": 1e-05,
68
- "loss": 0.5053,
69
  "step": 120
70
  },
71
  {
72
  "epoch": 3.75,
73
- "eval_loss": 0.5857690572738647,
74
- "eval_runtime": 567.8481,
75
- "eval_samples_per_second": 1.761,
76
- "eval_steps_per_second": 1.761,
77
- "eval_wer": 66.22914726056398,
78
  "step": 120
79
  },
80
  {
81
  "epoch": 4.375,
82
- "grad_norm": 4.851945400238037,
83
- "learning_rate": 1e-05,
84
- "loss": 0.4369,
85
  "step": 140
86
  },
87
  {
88
  "epoch": 5.0,
89
- "grad_norm": 9.788898468017578,
90
- "learning_rate": 1e-05,
91
- "loss": 0.406,
92
  "step": 160
93
  },
94
  {
95
  "epoch": 5.0,
96
- "eval_loss": 0.5440137982368469,
97
- "eval_runtime": 560.2795,
98
- "eval_samples_per_second": 1.785,
99
- "eval_steps_per_second": 1.785,
100
- "eval_wer": 66.13599796765179,
101
  "step": 160
102
  },
103
  {
104
  "epoch": 5.625,
105
- "grad_norm": 4.664266109466553,
106
- "learning_rate": 1e-05,
107
- "loss": 0.356,
108
  "step": 180
109
  },
110
  {
111
  "epoch": 6.25,
112
- "grad_norm": 4.6183037757873535,
113
- "learning_rate": 1e-05,
114
- "loss": 0.3236,
115
  "step": 200
116
  },
117
  {
118
  "epoch": 6.25,
119
- "eval_loss": 0.5290657877922058,
120
- "eval_runtime": 553.81,
121
- "eval_samples_per_second": 1.806,
122
- "eval_steps_per_second": 1.806,
123
- "eval_wer": 63.60403082394783,
124
  "step": 200
125
  },
126
  {
127
  "epoch": 6.25,
128
  "step": 200,
129
  "total_flos": 1.5401574531072e+17,
130
- "train_loss": 0.7680861353874207,
131
- "train_runtime": 3626.9505,
132
- "train_samples_per_second": 1.765,
133
- "train_steps_per_second": 0.055
134
  }
135
  ],
136
  "logging_steps": 20,
 
1
  {
2
+ "best_metric": 69.98899144720129,
3
+ "best_model_checkpoint": "./whisper-tiny-hi2_test/checkpoint-120",
4
  "epoch": 6.25,
5
  "eval_steps": 40,
6
  "global_step": 200,
 
10
  "log_history": [
11
  {
12
  "epoch": 0.625,
13
+ "grad_norm": 10.95101547241211,
14
+ "learning_rate": 9.200000000000002e-06,
15
+ "loss": 2.3747,
16
  "step": 20
17
  },
18
  {
19
  "epoch": 1.25,
20
+ "grad_norm": 7.747081279754639,
21
+ "learning_rate": 8.2e-06,
22
+ "loss": 1.2169,
23
  "step": 40
24
  },
25
  {
26
  "epoch": 1.25,
27
+ "eval_loss": 1.0360029935836792,
28
+ "eval_runtime": 574.5339,
29
+ "eval_samples_per_second": 1.741,
30
+ "eval_steps_per_second": 1.741,
31
+ "eval_wer": 91.60809552036582,
32
  "step": 40
33
  },
34
  {
35
  "epoch": 1.875,
36
+ "grad_norm": 7.32460355758667,
37
+ "learning_rate": 7.2000000000000005e-06,
38
+ "loss": 0.9058,
39
  "step": 60
40
  },
41
  {
42
  "epoch": 2.5,
43
+ "grad_norm": 6.835532188415527,
44
+ "learning_rate": 6.200000000000001e-06,
45
+ "loss": 0.7302,
46
  "step": 80
47
  },
48
  {
49
  "epoch": 2.5,
50
+ "eval_loss": 0.7368654608726501,
51
+ "eval_runtime": 549.562,
52
+ "eval_samples_per_second": 1.82,
53
+ "eval_steps_per_second": 1.82,
54
+ "eval_wer": 79.80353967313066,
55
  "step": 80
56
  },
57
  {
58
  "epoch": 3.125,
59
+ "grad_norm": 6.408384323120117,
60
+ "learning_rate": 5.2e-06,
61
+ "loss": 0.6453,
62
  "step": 100
63
  },
64
  {
65
  "epoch": 3.75,
66
+ "grad_norm": 5.809150695800781,
67
+ "learning_rate": 4.2000000000000004e-06,
68
+ "loss": 0.5917,
69
  "step": 120
70
  },
71
  {
72
  "epoch": 3.75,
73
+ "eval_loss": 0.6504533886909485,
74
+ "eval_runtime": 548.8688,
75
+ "eval_samples_per_second": 1.822,
76
+ "eval_steps_per_second": 1.822,
77
+ "eval_wer": 69.98899144720129,
78
  "step": 120
79
  },
80
  {
81
  "epoch": 4.375,
82
+ "grad_norm": 5.724637508392334,
83
+ "learning_rate": 3.2000000000000003e-06,
84
+ "loss": 0.5356,
85
  "step": 140
86
  },
87
  {
88
  "epoch": 5.0,
89
+ "grad_norm": 10.124310493469238,
90
+ "learning_rate": 2.2e-06,
91
+ "loss": 0.5156,
92
  "step": 160
93
  },
94
  {
95
  "epoch": 5.0,
96
+ "eval_loss": 0.6157090663909912,
97
+ "eval_runtime": 549.7895,
98
+ "eval_samples_per_second": 1.819,
99
+ "eval_steps_per_second": 1.819,
100
+ "eval_wer": 70.4293335591498,
101
  "step": 160
102
  },
103
  {
104
  "epoch": 5.625,
105
+ "grad_norm": 4.947811126708984,
106
+ "learning_rate": 1.2000000000000002e-06,
107
+ "loss": 0.4986,
108
  "step": 180
109
  },
110
  {
111
  "epoch": 6.25,
112
+ "grad_norm": 4.541861057281494,
113
+ "learning_rate": 2.0000000000000002e-07,
114
+ "loss": 0.481,
115
  "step": 200
116
  },
117
  {
118
  "epoch": 6.25,
119
+ "eval_loss": 0.6050636768341064,
120
+ "eval_runtime": 552.964,
121
+ "eval_samples_per_second": 1.808,
122
+ "eval_steps_per_second": 1.808,
123
+ "eval_wer": 70.03980015242611,
124
  "step": 200
125
  },
126
  {
127
  "epoch": 6.25,
128
  "step": 200,
129
  "total_flos": 1.5401574531072e+17,
130
+ "train_loss": 0.8495243072509766,
131
+ "train_runtime": 3592.94,
132
+ "train_samples_per_second": 1.781,
133
+ "train_steps_per_second": 0.056
134
  }
135
  ],
136
  "logging_steps": 20,
whisper-tiny-hi-checkpoint-2/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:65486a040e9edffe3575e458f27f57a36d8b2427a1a78dbd727b53bf7ff9bc9a
3
  size 151061672
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a59c8982089b3ffb0f35b2a8b93a329827d3603f741f4d47ddb7a17dc5a7e5e2
3
  size 151061672
whisper-tiny-hi-checkpoint-2/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:96ff286bdab46917ec7269a5c1949a3be1e038a7df16afe7f24c0e4862fa0eb7
3
  size 5240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57320ccedcf473157fcb44c0152851471c74170811def3aa0279b4a3552d63db
3
  size 5240