besimray commited on
Commit
9faa8cc
·
verified ·
1 Parent(s): 1bc0c20

Training in progress, step 20, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:63c747cb54076e533c765903acd04e88622a003034c7b31bab3a8303f1954618
3
  size 90207248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba896c1a9b2aec35506899b83c844fa6d37c25a53de68bf0ae11832102123721
3
  size 90207248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9509e530880c8ca648132cfa13968afbafbdd353dc79758160a4c504660a23ea
3
  size 46057082
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b97e7311cd8459bf41e63a7e256e80ed90a909d8af14bff0d388642269ffcd81
3
  size 46057082
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3d06bafe6b7413a64b1b2b7501fafe281fdadc6b8d8a7d2b8d42c504a96de904
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d8414ba48354825ad20b0a3d80cc23ff1d239366d3e0da53cb9bbe2c2455ccc
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bb578e75c11a81e85dda67a691f96ba4793a02960f1409fd3e1511aac873491a
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:321b462a2538632d6d720f0cf198c8f471dee11f51db9b50cc50d1fa7f132bbe
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 2.1240005493164062,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-10",
4
- "epoch": 0.0028238616307800918,
5
  "eval_steps": 10,
6
- "global_step": 10,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -93,6 +93,84 @@
93
  "eval_samples_per_second": 5.579,
94
  "eval_steps_per_second": 5.579,
95
  "step": 10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
  }
97
  ],
98
  "logging_steps": 1,
@@ -121,7 +199,7 @@
121
  "attributes": {}
122
  }
123
  },
124
- "total_flos": 978803585187840.0,
125
  "train_batch_size": 1,
126
  "trial_name": null,
127
  "trial_params": null
 
1
  {
2
+ "best_metric": 1.7092158794403076,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-20",
4
+ "epoch": 0.0056477232615601836,
5
  "eval_steps": 10,
6
+ "global_step": 20,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
93
  "eval_samples_per_second": 5.579,
94
  "eval_steps_per_second": 5.579,
95
  "step": 10
96
+ },
97
+ {
98
+ "epoch": 0.003106247793858101,
99
+ "grad_norm": 1.0237597227096558,
100
+ "learning_rate": 0.0001999979446958366,
101
+ "loss": 1.8844,
102
+ "step": 11
103
+ },
104
+ {
105
+ "epoch": 0.00338863395693611,
106
+ "grad_norm": 4.726717472076416,
107
+ "learning_rate": 0.00019999177886783194,
108
+ "loss": 3.1172,
109
+ "step": 12
110
+ },
111
+ {
112
+ "epoch": 0.0036710201200141194,
113
+ "grad_norm": 1.6403062343597412,
114
+ "learning_rate": 0.00019998150276943902,
115
+ "loss": 1.7007,
116
+ "step": 13
117
+ },
118
+ {
119
+ "epoch": 0.003953406283092128,
120
+ "grad_norm": 2.1164722442626953,
121
+ "learning_rate": 0.000199967116823068,
122
+ "loss": 2.5321,
123
+ "step": 14
124
+ },
125
+ {
126
+ "epoch": 0.004235792446170138,
127
+ "grad_norm": 3.5340867042541504,
128
+ "learning_rate": 0.0001999486216200688,
129
+ "loss": 2.3608,
130
+ "step": 15
131
+ },
132
+ {
133
+ "epoch": 0.004518178609248147,
134
+ "grad_norm": 5.55496072769165,
135
+ "learning_rate": 0.00019992601792070679,
136
+ "loss": 1.6141,
137
+ "step": 16
138
+ },
139
+ {
140
+ "epoch": 0.004800564772326156,
141
+ "grad_norm": 7.0048136711120605,
142
+ "learning_rate": 0.00019989930665413147,
143
+ "loss": 2.2728,
144
+ "step": 17
145
+ },
146
+ {
147
+ "epoch": 0.005082950935404165,
148
+ "grad_norm": 1.7807990312576294,
149
+ "learning_rate": 0.00019986848891833845,
150
+ "loss": 1.6759,
151
+ "step": 18
152
+ },
153
+ {
154
+ "epoch": 0.005365337098482174,
155
+ "grad_norm": 3.1774418354034424,
156
+ "learning_rate": 0.0001998335659801241,
157
+ "loss": 0.8362,
158
+ "step": 19
159
+ },
160
+ {
161
+ "epoch": 0.0056477232615601836,
162
+ "grad_norm": 5.73573637008667,
163
+ "learning_rate": 0.00019979453927503364,
164
+ "loss": 2.7934,
165
+ "step": 20
166
+ },
167
+ {
168
+ "epoch": 0.0056477232615601836,
169
+ "eval_loss": 1.7092158794403076,
170
+ "eval_runtime": 133.3036,
171
+ "eval_samples_per_second": 5.596,
172
+ "eval_steps_per_second": 5.596,
173
+ "step": 20
174
  }
175
  ],
176
  "logging_steps": 1,
 
199
  "attributes": {}
200
  }
201
  },
202
+ "total_flos": 1957607170375680.0,
203
  "train_batch_size": 1,
204
  "trial_name": null,
205
  "trial_params": null