SamagraDataGov commited on
Commit
5fe4b08
1 Parent(s): 1556c6b

Model save

Browse files
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "./whisper-tiny-hi2_test/whisper-tiny-hi-checkpoint-1",
3
  "activation_dropout": 0.0,
4
  "activation_function": "gelu",
5
  "apply_spec_augment": false,
 
1
  {
2
+ "_name_or_path": "./whisper-tiny-hi2_test/whisper-tiny-hi-checkpoint-2",
3
  "activation_dropout": 0.0,
4
  "activation_function": "gelu",
5
  "apply_spec_augment": false,
runs/Jun20_16-03-30_bharatsahaiyak-test/events.out.tfevents.1718899410.bharatsahaiyak-test.584004.3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d736509209f05e11f32fb53771eb5ae9685748b90d5a424b9dee1eb4bb8a12e8
3
+ size 6041
runs/Jun20_16-03-30_bharatsahaiyak-test/events.out.tfevents.1718899966.bharatsahaiyak-test.584004.4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6215965a67ec2a2a36bb46a58de7e0014aa9eef5c41a43aa4e8ad00b4a258ee4
3
+ size 406
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:57320ccedcf473157fcb44c0152851471c74170811def3aa0279b4a3552d63db
3
  size 5240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf420e0b38b82d46a55cd7ad07aeb13abc779974b6f703a2306e4deb9f513f5b
3
  size 5240
whisper-tiny-hi-checkpoint-2/trainer_state.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
- "best_metric": 63.60403082394783,
3
- "best_model_checkpoint": "./whisper-tiny-hi2_test/checkpoint-200",
4
  "epoch": 6.28125,
5
  "eval_steps": 40,
6
  "global_step": 201,
@@ -10,144 +10,144 @@
10
  "log_history": [
11
  {
12
  "epoch": 0.625,
13
- "grad_norm": 10.62358283996582,
14
- "learning_rate": 1e-05,
15
- "loss": 2.3669,
16
  "step": 20
17
  },
18
  {
19
  "epoch": 1.25,
20
- "grad_norm": 7.5668721199035645,
21
- "learning_rate": 1e-05,
22
- "loss": 1.1887,
23
  "step": 40
24
  },
25
  {
26
  "epoch": 1.25,
27
- "eval_loss": 0.9953901171684265,
28
- "eval_runtime": 571.1605,
29
- "eval_samples_per_second": 1.751,
30
- "eval_steps_per_second": 1.751,
31
- "eval_wer": 87.29782369379288,
32
  "step": 40
33
  },
34
  {
35
  "epoch": 1.875,
36
- "grad_norm": 6.932765960693359,
37
- "learning_rate": 1e-05,
38
- "loss": 0.855,
39
  "step": 60
40
  },
41
  {
42
  "epoch": 2.5,
43
- "grad_norm": 6.589940547943115,
44
- "learning_rate": 1e-05,
45
- "loss": 0.6663,
46
  "step": 80
47
  },
48
  {
49
  "epoch": 2.5,
50
- "eval_loss": 0.6780964136123657,
51
- "eval_runtime": 554.1893,
52
- "eval_samples_per_second": 1.804,
53
- "eval_steps_per_second": 1.804,
54
- "eval_wer": 73.29155728681515,
55
  "step": 80
56
  },
57
  {
58
  "epoch": 3.125,
59
- "grad_norm": 6.102142810821533,
60
- "learning_rate": 1e-05,
61
- "loss": 0.5761,
62
  "step": 100
63
  },
64
  {
65
  "epoch": 3.75,
66
- "grad_norm": 5.4204583168029785,
67
- "learning_rate": 1e-05,
68
- "loss": 0.5053,
69
  "step": 120
70
  },
71
  {
72
  "epoch": 3.75,
73
- "eval_loss": 0.5857690572738647,
74
- "eval_runtime": 567.8481,
75
- "eval_samples_per_second": 1.761,
76
- "eval_steps_per_second": 1.761,
77
- "eval_wer": 66.22914726056398,
78
  "step": 120
79
  },
80
  {
81
  "epoch": 4.375,
82
- "grad_norm": 4.851945400238037,
83
- "learning_rate": 1e-05,
84
- "loss": 0.4369,
85
  "step": 140
86
  },
87
  {
88
  "epoch": 5.0,
89
- "grad_norm": 9.788898468017578,
90
- "learning_rate": 1e-05,
91
- "loss": 0.406,
92
  "step": 160
93
  },
94
  {
95
  "epoch": 5.0,
96
- "eval_loss": 0.5440137982368469,
97
- "eval_runtime": 560.2795,
98
- "eval_samples_per_second": 1.785,
99
- "eval_steps_per_second": 1.785,
100
- "eval_wer": 66.13599796765179,
101
  "step": 160
102
  },
103
  {
104
  "epoch": 5.625,
105
- "grad_norm": 4.664266109466553,
106
- "learning_rate": 1e-05,
107
- "loss": 0.356,
108
  "step": 180
109
  },
110
  {
111
  "epoch": 6.25,
112
- "grad_norm": 4.6183037757873535,
113
- "learning_rate": 1e-05,
114
- "loss": 0.3236,
115
  "step": 200
116
  },
117
  {
118
  "epoch": 6.25,
119
- "eval_loss": 0.5290657877922058,
120
- "eval_runtime": 553.81,
121
- "eval_samples_per_second": 1.806,
122
- "eval_steps_per_second": 1.806,
123
- "eval_wer": 63.60403082394783,
124
  "step": 200
125
  },
126
  {
127
  "epoch": 6.25,
128
  "step": 200,
129
  "total_flos": 1.5401574531072e+17,
130
- "train_loss": 0.7680861353874207,
131
- "train_runtime": 3626.9505,
132
- "train_samples_per_second": 1.765,
133
- "train_steps_per_second": 0.055
134
  },
135
  {
136
  "epoch": 6.28125,
137
  "step": 201,
138
  "total_flos": 1.5480354963456e+17,
139
- "train_loss": 0.002653346132876268,
140
- "train_runtime": 7.7901,
141
- "train_samples_per_second": 821.558,
142
- "train_steps_per_second": 25.674
143
  },
144
  {
145
  "epoch": 6.28125,
146
- "eval_loss": 0.5290657877922058,
147
- "eval_runtime": 551.3177,
148
- "eval_samples_per_second": 1.814,
149
- "eval_steps_per_second": 1.814,
150
- "eval_wer": 63.60403082394783,
151
  "step": 201
152
  }
153
  ],
 
1
  {
2
+ "best_metric": 69.98899144720129,
3
+ "best_model_checkpoint": "./whisper-tiny-hi2_test/checkpoint-120",
4
  "epoch": 6.28125,
5
  "eval_steps": 40,
6
  "global_step": 201,
 
10
  "log_history": [
11
  {
12
  "epoch": 0.625,
13
+ "grad_norm": 10.95101547241211,
14
+ "learning_rate": 9.200000000000002e-06,
15
+ "loss": 2.3747,
16
  "step": 20
17
  },
18
  {
19
  "epoch": 1.25,
20
+ "grad_norm": 7.747081279754639,
21
+ "learning_rate": 8.2e-06,
22
+ "loss": 1.2169,
23
  "step": 40
24
  },
25
  {
26
  "epoch": 1.25,
27
+ "eval_loss": 1.0360029935836792,
28
+ "eval_runtime": 574.5339,
29
+ "eval_samples_per_second": 1.741,
30
+ "eval_steps_per_second": 1.741,
31
+ "eval_wer": 91.60809552036582,
32
  "step": 40
33
  },
34
  {
35
  "epoch": 1.875,
36
+ "grad_norm": 7.32460355758667,
37
+ "learning_rate": 7.2000000000000005e-06,
38
+ "loss": 0.9058,
39
  "step": 60
40
  },
41
  {
42
  "epoch": 2.5,
43
+ "grad_norm": 6.835532188415527,
44
+ "learning_rate": 6.200000000000001e-06,
45
+ "loss": 0.7302,
46
  "step": 80
47
  },
48
  {
49
  "epoch": 2.5,
50
+ "eval_loss": 0.7368654608726501,
51
+ "eval_runtime": 549.562,
52
+ "eval_samples_per_second": 1.82,
53
+ "eval_steps_per_second": 1.82,
54
+ "eval_wer": 79.80353967313066,
55
  "step": 80
56
  },
57
  {
58
  "epoch": 3.125,
59
+ "grad_norm": 6.408384323120117,
60
+ "learning_rate": 5.2e-06,
61
+ "loss": 0.6453,
62
  "step": 100
63
  },
64
  {
65
  "epoch": 3.75,
66
+ "grad_norm": 5.809150695800781,
67
+ "learning_rate": 4.2000000000000004e-06,
68
+ "loss": 0.5917,
69
  "step": 120
70
  },
71
  {
72
  "epoch": 3.75,
73
+ "eval_loss": 0.6504533886909485,
74
+ "eval_runtime": 548.8688,
75
+ "eval_samples_per_second": 1.822,
76
+ "eval_steps_per_second": 1.822,
77
+ "eval_wer": 69.98899144720129,
78
  "step": 120
79
  },
80
  {
81
  "epoch": 4.375,
82
+ "grad_norm": 5.724637508392334,
83
+ "learning_rate": 3.2000000000000003e-06,
84
+ "loss": 0.5356,
85
  "step": 140
86
  },
87
  {
88
  "epoch": 5.0,
89
+ "grad_norm": 10.124310493469238,
90
+ "learning_rate": 2.2e-06,
91
+ "loss": 0.5156,
92
  "step": 160
93
  },
94
  {
95
  "epoch": 5.0,
96
+ "eval_loss": 0.6157090663909912,
97
+ "eval_runtime": 549.7895,
98
+ "eval_samples_per_second": 1.819,
99
+ "eval_steps_per_second": 1.819,
100
+ "eval_wer": 70.4293335591498,
101
  "step": 160
102
  },
103
  {
104
  "epoch": 5.625,
105
+ "grad_norm": 4.947811126708984,
106
+ "learning_rate": 1.2000000000000002e-06,
107
+ "loss": 0.4986,
108
  "step": 180
109
  },
110
  {
111
  "epoch": 6.25,
112
+ "grad_norm": 4.541861057281494,
113
+ "learning_rate": 2.0000000000000002e-07,
114
+ "loss": 0.481,
115
  "step": 200
116
  },
117
  {
118
  "epoch": 6.25,
119
+ "eval_loss": 0.6050636768341064,
120
+ "eval_runtime": 552.964,
121
+ "eval_samples_per_second": 1.808,
122
+ "eval_steps_per_second": 1.808,
123
+ "eval_wer": 70.03980015242611,
124
  "step": 200
125
  },
126
  {
127
  "epoch": 6.25,
128
  "step": 200,
129
  "total_flos": 1.5401574531072e+17,
130
+ "train_loss": 0.8495243072509766,
131
+ "train_runtime": 3592.94,
132
+ "train_samples_per_second": 1.781,
133
+ "train_steps_per_second": 0.056
134
  },
135
  {
136
  "epoch": 6.28125,
137
  "step": 201,
138
  "total_flos": 1.5480354963456e+17,
139
+ "train_loss": 0.003230639654605543,
140
+ "train_runtime": 7.5043,
141
+ "train_samples_per_second": 852.844,
142
+ "train_steps_per_second": 26.651
143
  },
144
  {
145
  "epoch": 6.28125,
146
+ "eval_loss": 0.6504533886909485,
147
+ "eval_runtime": 548.5863,
148
+ "eval_samples_per_second": 1.823,
149
+ "eval_steps_per_second": 1.823,
150
+ "eval_wer": 69.98899144720129,
151
  "step": 201
152
  }
153
  ],
whisper-tiny-hi-checkpoint-3/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:65486a040e9edffe3575e458f27f57a36d8b2427a1a78dbd727b53bf7ff9bc9a
3
  size 151061672
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a59c8982089b3ffb0f35b2a8b93a329827d3603f741f4d47ddb7a17dc5a7e5e2
3
  size 151061672
whisper-tiny-hi-checkpoint-3/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8e0770d9b45c8db56eef68e29c4665dbf1340c456a4a2f2cbdf47eb81e643cb7
3
  size 5240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf420e0b38b82d46a55cd7ad07aeb13abc779974b6f703a2306e4deb9f513f5b
3
  size 5240