nttx commited on
Commit
97de4e7
·
verified ·
1 Parent(s): 8b3d07a

Training in progress, step 1500, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5dd59cc37c01e371a95eb2d0658162ad360f53603d8c21065f49d74c625c47dd
3
  size 608282672
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73bfbf2ac983b311095d86ba60d91641513e9d05dde15e49e1c7b5776b2e0edf
3
  size 608282672
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:02634bdb3e98cef96461ceed468b5470bbe070408556bd4cfec2ab82673ca362
3
  size 168149074
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb5d5875cf5c9270eaa55da9668d1ba437ccf55f2a225357857370e8929ff32f
3
  size 168149074
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d5b95f764d72ed6aa17d7197e22e16e0cc217530d7d3f18f36eafb2b4ceb1e32
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d52950ec15e966e18aaf7209ff00250eaac59d546ed773da760f76891e528d13
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a866955cff9370cd3957339d6bf23f5ca8494fc491b0c5ef9330a9273b5d4460
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c8e87504e3807136188d9860a28acbec106160a24a8674e04169e93f2e42104
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.714054524898529,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-1350",
4
- "epoch": 2.4226110363391653,
5
  "eval_steps": 150,
6
- "global_step": 1350,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1032,6 +1032,119 @@
1032
  "eval_samples_per_second": 9.861,
1033
  "eval_steps_per_second": 1.239,
1034
  "step": 1350
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1035
  }
1036
  ],
1037
  "logging_steps": 10,
@@ -1055,12 +1168,12 @@
1055
  "should_evaluate": false,
1056
  "should_log": false,
1057
  "should_save": true,
1058
- "should_training_stop": false
1059
  },
1060
  "attributes": {}
1061
  }
1062
  },
1063
- "total_flos": 2.50754898332536e+18,
1064
  "train_batch_size": 8,
1065
  "trial_name": null,
1066
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.7140275239944458,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-1500",
4
+ "epoch": 2.6917900403768504,
5
  "eval_steps": 150,
6
+ "global_step": 1500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1032
  "eval_samples_per_second": 9.861,
1033
  "eval_steps_per_second": 1.239,
1034
  "step": 1350
1035
+ },
1036
+ {
1037
+ "epoch": 2.4405563032750113,
1038
+ "grad_norm": 5.729117393493652,
1039
+ "learning_rate": 6.847762393717782e-07,
1040
+ "loss": 2.9529,
1041
+ "step": 1360
1042
+ },
1043
+ {
1044
+ "epoch": 2.458501570210857,
1045
+ "grad_norm": 3.2370126247406006,
1046
+ "learning_rate": 5.910696692505202e-07,
1047
+ "loss": 2.3974,
1048
+ "step": 1370
1049
+ },
1050
+ {
1051
+ "epoch": 2.4764468371467023,
1052
+ "grad_norm": 3.672769546508789,
1053
+ "learning_rate": 5.041267081295648e-07,
1054
+ "loss": 2.4916,
1055
+ "step": 1380
1056
+ },
1057
+ {
1058
+ "epoch": 2.4943921040825483,
1059
+ "grad_norm": 3.912321090698242,
1060
+ "learning_rate": 4.239881673794166e-07,
1061
+ "loss": 2.6173,
1062
+ "step": 1390
1063
+ },
1064
+ {
1065
+ "epoch": 2.512337371018394,
1066
+ "grad_norm": 4.756332874298096,
1067
+ "learning_rate": 3.5069166434870014e-07,
1068
+ "loss": 2.8032,
1069
+ "step": 1400
1070
+ },
1071
+ {
1072
+ "epoch": 2.5302826379542394,
1073
+ "grad_norm": 5.58614444732666,
1074
+ "learning_rate": 2.8427160470641255e-07,
1075
+ "loss": 2.931,
1076
+ "step": 1410
1077
+ },
1078
+ {
1079
+ "epoch": 2.5482279048900853,
1080
+ "grad_norm": 3.3309953212738037,
1081
+ "learning_rate": 2.2475916629177418e-07,
1082
+ "loss": 2.5066,
1083
+ "step": 1420
1084
+ },
1085
+ {
1086
+ "epoch": 2.566173171825931,
1087
+ "grad_norm": 3.625159502029419,
1088
+ "learning_rate": 1.7218228447922869e-07,
1089
+ "loss": 2.3303,
1090
+ "step": 1430
1091
+ },
1092
+ {
1093
+ "epoch": 2.5841184387617764,
1094
+ "grad_norm": 4.057403087615967,
1095
+ "learning_rate": 1.2656563906545903e-07,
1096
+ "loss": 2.6747,
1097
+ "step": 1440
1098
+ },
1099
+ {
1100
+ "epoch": 2.6020637056976224,
1101
+ "grad_norm": 4.8578877449035645,
1102
+ "learning_rate": 8.793064268460605e-08,
1103
+ "loss": 2.7823,
1104
+ "step": 1450
1105
+ },
1106
+ {
1107
+ "epoch": 2.620008972633468,
1108
+ "grad_norm": 5.646084308624268,
1109
+ "learning_rate": 5.629543075708177e-08,
1110
+ "loss": 2.9719,
1111
+ "step": 1460
1112
+ },
1113
+ {
1114
+ "epoch": 2.6379542395693134,
1115
+ "grad_norm": 3.3096609115600586,
1116
+ "learning_rate": 3.1674852976734116e-08,
1117
+ "loss": 2.5002,
1118
+ "step": 1470
1119
+ },
1120
+ {
1121
+ "epoch": 2.6558995065051594,
1122
+ "grad_norm": 3.604172706604004,
1123
+ "learning_rate": 1.4080466340349318e-08,
1124
+ "loss": 2.3697,
1125
+ "step": 1480
1126
+ },
1127
+ {
1128
+ "epoch": 2.673844773441005,
1129
+ "grad_norm": 4.091582298278809,
1130
+ "learning_rate": 3.520529722738086e-09,
1131
+ "loss": 2.7008,
1132
+ "step": 1490
1133
+ },
1134
+ {
1135
+ "epoch": 2.6917900403768504,
1136
+ "grad_norm": 4.5670952796936035,
1137
+ "learning_rate": 0.0,
1138
+ "loss": 2.6918,
1139
+ "step": 1500
1140
+ },
1141
+ {
1142
+ "epoch": 2.6917900403768504,
1143
+ "eval_loss": 0.7140275239944458,
1144
+ "eval_runtime": 95.2168,
1145
+ "eval_samples_per_second": 9.862,
1146
+ "eval_steps_per_second": 1.239,
1147
+ "step": 1500
1148
  }
1149
  ],
1150
  "logging_steps": 10,
 
1168
  "should_evaluate": false,
1169
  "should_log": false,
1170
  "should_save": true,
1171
+ "should_training_stop": true
1172
  },
1173
  "attributes": {}
1174
  }
1175
  },
1176
+ "total_flos": 2.78587342489387e+18,
1177
  "train_batch_size": 8,
1178
  "trial_name": null,
1179
  "trial_params": null