{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 0.8125126955108674, | |
"eval_steps": 500, | |
"global_step": 500, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.02, | |
"learning_rate": 0.0002, | |
"loss": 1.8707, | |
"step": 10 | |
}, | |
{ | |
"epoch": 0.03, | |
"learning_rate": 0.0002, | |
"loss": 1.7918, | |
"step": 20 | |
}, | |
{ | |
"epoch": 0.05, | |
"learning_rate": 0.0002, | |
"loss": 1.741, | |
"step": 30 | |
}, | |
{ | |
"epoch": 0.07, | |
"learning_rate": 0.0002, | |
"loss": 1.6859, | |
"step": 40 | |
}, | |
{ | |
"epoch": 0.08, | |
"learning_rate": 0.0002, | |
"loss": 1.7982, | |
"step": 50 | |
}, | |
{ | |
"epoch": 0.1, | |
"learning_rate": 0.0002, | |
"loss": 1.7449, | |
"step": 60 | |
}, | |
{ | |
"epoch": 0.11, | |
"learning_rate": 0.0002, | |
"loss": 1.735, | |
"step": 70 | |
}, | |
{ | |
"epoch": 0.13, | |
"learning_rate": 0.0002, | |
"loss": 1.6497, | |
"step": 80 | |
}, | |
{ | |
"epoch": 0.15, | |
"learning_rate": 0.0002, | |
"loss": 1.7169, | |
"step": 90 | |
}, | |
{ | |
"epoch": 0.16, | |
"learning_rate": 0.0002, | |
"loss": 1.7922, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.18, | |
"learning_rate": 0.0002, | |
"loss": 1.7233, | |
"step": 110 | |
}, | |
{ | |
"epoch": 0.2, | |
"learning_rate": 0.0002, | |
"loss": 1.6464, | |
"step": 120 | |
}, | |
{ | |
"epoch": 0.21, | |
"learning_rate": 0.0002, | |
"loss": 1.7568, | |
"step": 130 | |
}, | |
{ | |
"epoch": 0.23, | |
"learning_rate": 0.0002, | |
"loss": 1.7124, | |
"step": 140 | |
}, | |
{ | |
"epoch": 0.24, | |
"learning_rate": 0.0002, | |
"loss": 1.7768, | |
"step": 150 | |
}, | |
{ | |
"epoch": 0.26, | |
"learning_rate": 0.0002, | |
"loss": 1.7485, | |
"step": 160 | |
}, | |
{ | |
"epoch": 0.28, | |
"learning_rate": 0.0002, | |
"loss": 1.6739, | |
"step": 170 | |
}, | |
{ | |
"epoch": 0.29, | |
"learning_rate": 0.0002, | |
"loss": 1.6386, | |
"step": 180 | |
}, | |
{ | |
"epoch": 0.31, | |
"learning_rate": 0.0002, | |
"loss": 1.7529, | |
"step": 190 | |
}, | |
{ | |
"epoch": 0.33, | |
"learning_rate": 0.0002, | |
"loss": 1.7624, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.34, | |
"learning_rate": 0.0002, | |
"loss": 1.7303, | |
"step": 210 | |
}, | |
{ | |
"epoch": 0.36, | |
"learning_rate": 0.0002, | |
"loss": 1.6326, | |
"step": 220 | |
}, | |
{ | |
"epoch": 0.37, | |
"learning_rate": 0.0002, | |
"loss": 1.6637, | |
"step": 230 | |
}, | |
{ | |
"epoch": 0.39, | |
"learning_rate": 0.0002, | |
"loss": 1.6883, | |
"step": 240 | |
}, | |
{ | |
"epoch": 0.41, | |
"learning_rate": 0.0002, | |
"loss": 1.7608, | |
"step": 250 | |
}, | |
{ | |
"epoch": 0.42, | |
"learning_rate": 0.0002, | |
"loss": 1.6986, | |
"step": 260 | |
}, | |
{ | |
"epoch": 0.44, | |
"learning_rate": 0.0002, | |
"loss": 1.6054, | |
"step": 270 | |
}, | |
{ | |
"epoch": 0.46, | |
"learning_rate": 0.0002, | |
"loss": 1.6785, | |
"step": 280 | |
}, | |
{ | |
"epoch": 0.47, | |
"learning_rate": 0.0002, | |
"loss": 1.6308, | |
"step": 290 | |
}, | |
{ | |
"epoch": 0.49, | |
"learning_rate": 0.0002, | |
"loss": 1.884, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.5, | |
"learning_rate": 0.0002, | |
"loss": 1.6899, | |
"step": 310 | |
}, | |
{ | |
"epoch": 0.52, | |
"learning_rate": 0.0002, | |
"loss": 1.7043, | |
"step": 320 | |
}, | |
{ | |
"epoch": 0.54, | |
"learning_rate": 0.0002, | |
"loss": 1.6413, | |
"step": 330 | |
}, | |
{ | |
"epoch": 0.55, | |
"learning_rate": 0.0002, | |
"loss": 1.7365, | |
"step": 340 | |
}, | |
{ | |
"epoch": 0.57, | |
"learning_rate": 0.0002, | |
"loss": 1.676, | |
"step": 350 | |
}, | |
{ | |
"epoch": 0.59, | |
"learning_rate": 0.0002, | |
"loss": 1.6738, | |
"step": 360 | |
}, | |
{ | |
"epoch": 0.6, | |
"learning_rate": 0.0002, | |
"loss": 1.7175, | |
"step": 370 | |
}, | |
{ | |
"epoch": 0.62, | |
"learning_rate": 0.0002, | |
"loss": 1.6143, | |
"step": 380 | |
}, | |
{ | |
"epoch": 0.63, | |
"learning_rate": 0.0002, | |
"loss": 1.6481, | |
"step": 390 | |
}, | |
{ | |
"epoch": 0.65, | |
"learning_rate": 0.0002, | |
"loss": 1.8566, | |
"step": 400 | |
}, | |
{ | |
"epoch": 0.67, | |
"learning_rate": 0.0002, | |
"loss": 1.741, | |
"step": 410 | |
}, | |
{ | |
"epoch": 0.68, | |
"learning_rate": 0.0002, | |
"loss": 1.6964, | |
"step": 420 | |
}, | |
{ | |
"epoch": 0.7, | |
"learning_rate": 0.0002, | |
"loss": 1.6211, | |
"step": 430 | |
}, | |
{ | |
"epoch": 0.72, | |
"learning_rate": 0.0002, | |
"loss": 1.6425, | |
"step": 440 | |
}, | |
{ | |
"epoch": 0.73, | |
"learning_rate": 0.0002, | |
"loss": 1.7595, | |
"step": 450 | |
}, | |
{ | |
"epoch": 0.75, | |
"learning_rate": 0.0002, | |
"loss": 1.6, | |
"step": 460 | |
}, | |
{ | |
"epoch": 0.76, | |
"learning_rate": 0.0002, | |
"loss": 1.606, | |
"step": 470 | |
}, | |
{ | |
"epoch": 0.78, | |
"learning_rate": 0.0002, | |
"loss": 1.6649, | |
"step": 480 | |
}, | |
{ | |
"epoch": 0.8, | |
"learning_rate": 0.0002, | |
"loss": 1.6447, | |
"step": 490 | |
}, | |
{ | |
"epoch": 0.81, | |
"learning_rate": 0.0002, | |
"loss": 1.7634, | |
"step": 500 | |
} | |
], | |
"logging_steps": 10, | |
"max_steps": 500, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 1, | |
"save_steps": 100, | |
"total_flos": 3.293667738832896e+16, | |
"train_batch_size": 2, | |
"trial_name": null, | |
"trial_params": null | |
} | |