mikhail-panzo commited on
Commit
9060f4e
1 Parent(s): e0a2156

Training in progress, step 7500, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cce8730ec7977cf6faded9c8a64710dd40c64e82312111fdae82ba0e37b6fe02
3
  size 577789320
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f03919594e15aa04dbb8008cf60a2a2d5b89be3490f892590197689b8410a661
3
  size 577789320
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7d0bacbf128b4eb958fabc358356f531f65f6bd424a2a9be5add80fa30ab7cff
3
  size 1155772233
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77e4c693ccaf9249bd98af9f184a00d5eb2f36b77cb0e433fbc26661d80daa72
3
  size 1155772233
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a59b056016e351429f719aaf02cc6fa4544a2d92d2a3d69beeeb56674b12a1f2
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d880acef54d4eb61cf44bc2962e1f38bf44896cefc09a0303d9df0b77a1ae5c2
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a48d9034d2ce2771f0a840f8364e39645d6f213d3858f7f5342b741dc49975d2
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5cdfe5ed4f14bdadfee62402701e9c3c91a7e1b8246c7c7f0be536b67574fb3
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.31611359119415283,
3
- "best_model_checkpoint": "mikhail_panzo/zlm_b128_le4_s8000/checkpoint-7000",
4
- "epoch": 11.727748691099476,
5
  "eval_steps": 500,
6
- "global_step": 7000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1099,6 +1099,84 @@
1099
  "eval_samples_per_second": 31.262,
1100
  "eval_steps_per_second": 3.911,
1101
  "step": 7000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1102
  }
1103
  ],
1104
  "logging_steps": 50,
@@ -1118,7 +1196,7 @@
1118
  "attributes": {}
1119
  }
1120
  },
1121
- "total_flos": 1.2541009593096864e+17,
1122
  "train_batch_size": 16,
1123
  "trial_name": null,
1124
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.3158954679965973,
3
+ "best_model_checkpoint": "mikhail_panzo/zlm_b128_le4_s8000/checkpoint-7500",
4
+ "epoch": 12.565445026178011,
5
  "eval_steps": 500,
6
+ "global_step": 7500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1099
  "eval_samples_per_second": 31.262,
1100
  "eval_steps_per_second": 3.911,
1101
  "step": 7000
1102
+ },
1103
+ {
1104
+ "epoch": 11.81151832460733,
1105
+ "grad_norm": 0.9007428288459778,
1106
+ "learning_rate": 1.586666666666667e-05,
1107
+ "loss": 0.3461,
1108
+ "step": 7050
1109
+ },
1110
+ {
1111
+ "epoch": 11.895287958115183,
1112
+ "grad_norm": 1.1172393560409546,
1113
+ "learning_rate": 1.5033333333333336e-05,
1114
+ "loss": 0.3489,
1115
+ "step": 7100
1116
+ },
1117
+ {
1118
+ "epoch": 11.979057591623036,
1119
+ "grad_norm": 1.0454591512680054,
1120
+ "learning_rate": 1.42e-05,
1121
+ "loss": 0.3429,
1122
+ "step": 7150
1123
+ },
1124
+ {
1125
+ "epoch": 12.06282722513089,
1126
+ "grad_norm": 1.1494097709655762,
1127
+ "learning_rate": 1.3383333333333335e-05,
1128
+ "loss": 0.3523,
1129
+ "step": 7200
1130
+ },
1131
+ {
1132
+ "epoch": 12.146596858638743,
1133
+ "grad_norm": 0.8459360003471375,
1134
+ "learning_rate": 1.255e-05,
1135
+ "loss": 0.3469,
1136
+ "step": 7250
1137
+ },
1138
+ {
1139
+ "epoch": 12.230366492146597,
1140
+ "grad_norm": 1.034494161605835,
1141
+ "learning_rate": 1.1716666666666667e-05,
1142
+ "loss": 0.3451,
1143
+ "step": 7300
1144
+ },
1145
+ {
1146
+ "epoch": 12.31413612565445,
1147
+ "grad_norm": 1.1340311765670776,
1148
+ "learning_rate": 1.0883333333333335e-05,
1149
+ "loss": 0.3441,
1150
+ "step": 7350
1151
+ },
1152
+ {
1153
+ "epoch": 12.397905759162304,
1154
+ "grad_norm": 1.4063786268234253,
1155
+ "learning_rate": 1.005e-05,
1156
+ "loss": 0.3472,
1157
+ "step": 7400
1158
+ },
1159
+ {
1160
+ "epoch": 12.481675392670157,
1161
+ "grad_norm": 1.271904468536377,
1162
+ "learning_rate": 9.216666666666666e-06,
1163
+ "loss": 0.3458,
1164
+ "step": 7450
1165
+ },
1166
+ {
1167
+ "epoch": 12.565445026178011,
1168
+ "grad_norm": 1.2097103595733643,
1169
+ "learning_rate": 8.383333333333333e-06,
1170
+ "loss": 0.3443,
1171
+ "step": 7500
1172
+ },
1173
+ {
1174
+ "epoch": 12.565445026178011,
1175
+ "eval_loss": 0.3158954679965973,
1176
+ "eval_runtime": 271.3497,
1177
+ "eval_samples_per_second": 31.284,
1178
+ "eval_steps_per_second": 3.914,
1179
+ "step": 7500
1180
  }
1181
  ],
1182
  "logging_steps": 50,
 
1196
  "attributes": {}
1197
  }
1198
  },
1199
+ "total_flos": 1.3437364892618304e+17,
1200
  "train_batch_size": 16,
1201
  "trial_name": null,
1202
  "trial_params": null