besimray commited on
Commit
8cba6fc
·
verified ·
1 Parent(s): 0f5dbbc

Training in progress, step 20, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ce55eae5143e44bedba21839a287fdaefe81d278047872f7b15f365c553baa33
3
  size 335604696
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c6b7b374bccaa753c8998ccbefe4d5ea4bd82d8ce717af4cbde591e21261f3ff
3
  size 335604696
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:068337dbbdc24c920314550b4d297f12930a9da549f71535723634dc8925ac56
3
  size 170920084
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22e65d0405190cd72d8b0ecd23e5632449682712f04f3c76f8a82ddbc5518b47
3
  size 170920084
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fc1ac17452726e5cedfbf9cc2e40377e5a6129f49113fea151b529c4b6d68216
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ec7fddaa93b8a5104562fd596c16565abf5deb95425dbc4839aa86c8fa34863
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bb578e75c11a81e85dda67a691f96ba4793a02960f1409fd3e1511aac873491a
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:321b462a2538632d6d720f0cf198c8f471dee11f51db9b50cc50d1fa7f132bbe
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.7966330051422119,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-10",
4
- "epoch": 0.02386634844868735,
5
  "eval_steps": 5,
6
- "global_step": 10,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -101,6 +101,92 @@
101
  "eval_samples_per_second": 1.99,
102
  "eval_steps_per_second": 1.0,
103
  "step": 10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
  }
105
  ],
106
  "logging_steps": 1,
@@ -129,7 +215,7 @@
129
  "attributes": {}
130
  }
131
  },
132
- "total_flos": 1.492020904329216e+16,
133
  "train_batch_size": 2,
134
  "trial_name": null,
135
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.7479696273803711,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-20",
4
+ "epoch": 0.0477326968973747,
5
  "eval_steps": 5,
6
+ "global_step": 20,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
101
  "eval_samples_per_second": 1.99,
102
  "eval_steps_per_second": 1.0,
103
  "step": 10
104
+ },
105
+ {
106
+ "epoch": 0.026252983293556086,
107
+ "grad_norm": 1.3630088567733765,
108
+ "learning_rate": 0.0001999979446958366,
109
+ "loss": 0.8378,
110
+ "step": 11
111
+ },
112
+ {
113
+ "epoch": 0.028639618138424822,
114
+ "grad_norm": 0.4510791301727295,
115
+ "learning_rate": 0.00019999177886783194,
116
+ "loss": 0.8985,
117
+ "step": 12
118
+ },
119
+ {
120
+ "epoch": 0.031026252983293555,
121
+ "grad_norm": 0.5864933729171753,
122
+ "learning_rate": 0.00019998150276943902,
123
+ "loss": 0.6781,
124
+ "step": 13
125
+ },
126
+ {
127
+ "epoch": 0.03341288782816229,
128
+ "grad_norm": 0.423950731754303,
129
+ "learning_rate": 0.000199967116823068,
130
+ "loss": 0.5872,
131
+ "step": 14
132
+ },
133
+ {
134
+ "epoch": 0.03579952267303103,
135
+ "grad_norm": 0.5833825469017029,
136
+ "learning_rate": 0.0001999486216200688,
137
+ "loss": 0.7589,
138
+ "step": 15
139
+ },
140
+ {
141
+ "epoch": 0.03579952267303103,
142
+ "eval_loss": 0.7643417119979858,
143
+ "eval_runtime": 88.8878,
144
+ "eval_samples_per_second": 1.991,
145
+ "eval_steps_per_second": 1.001,
146
+ "step": 15
147
+ },
148
+ {
149
+ "epoch": 0.03818615751789976,
150
+ "grad_norm": 0.368539035320282,
151
+ "learning_rate": 0.00019992601792070679,
152
+ "loss": 0.684,
153
+ "step": 16
154
+ },
155
+ {
156
+ "epoch": 0.0405727923627685,
157
+ "grad_norm": 0.4044187664985657,
158
+ "learning_rate": 0.00019989930665413147,
159
+ "loss": 0.7617,
160
+ "step": 17
161
+ },
162
+ {
163
+ "epoch": 0.04295942720763723,
164
+ "grad_norm": 0.590376079082489,
165
+ "learning_rate": 0.00019986848891833845,
166
+ "loss": 0.7967,
167
+ "step": 18
168
+ },
169
+ {
170
+ "epoch": 0.045346062052505964,
171
+ "grad_norm": 0.3635483384132385,
172
+ "learning_rate": 0.0001998335659801241,
173
+ "loss": 0.5567,
174
+ "step": 19
175
+ },
176
+ {
177
+ "epoch": 0.0477326968973747,
178
+ "grad_norm": 0.5464037656784058,
179
+ "learning_rate": 0.00019979453927503364,
180
+ "loss": 0.6186,
181
+ "step": 20
182
+ },
183
+ {
184
+ "epoch": 0.0477326968973747,
185
+ "eval_loss": 0.7479696273803711,
186
+ "eval_runtime": 88.8942,
187
+ "eval_samples_per_second": 1.991,
188
+ "eval_steps_per_second": 1.001,
189
+ "step": 20
190
  }
191
  ],
192
  "logging_steps": 1,
 
215
  "attributes": {}
216
  }
217
  },
218
+ "total_flos": 2.984041808658432e+16,
219
  "train_batch_size": 2,
220
  "trial_name": null,
221
  "trial_params": null