|
{ |
|
"best_metric": 0.6467951536178589, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.007600589045651038, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00015201178091302076, |
|
"grad_norm": 3.6593680381774902, |
|
"learning_rate": 5e-05, |
|
"loss": 10.9362, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00015201178091302076, |
|
"eval_loss": 2.270036220550537, |
|
"eval_runtime": 2217.9223, |
|
"eval_samples_per_second": 19.982, |
|
"eval_steps_per_second": 2.498, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0003040235618260415, |
|
"grad_norm": 4.245506763458252, |
|
"learning_rate": 0.0001, |
|
"loss": 12.8607, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.00045603534273906226, |
|
"grad_norm": 4.411743640899658, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 13.5941, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.000608047123652083, |
|
"grad_norm": 5.3891096115112305, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 14.3746, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0007600589045651038, |
|
"grad_norm": 7.611150741577148, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 16.8511, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0009120706854781245, |
|
"grad_norm": 7.441198348999023, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 16.271, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0010640824663911454, |
|
"grad_norm": 8.687851905822754, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 16.6546, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.001216094247304166, |
|
"grad_norm": 18.38853645324707, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 19.7066, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0013681060282171868, |
|
"grad_norm": 55.841705322265625, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 27.5424, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0015201178091302076, |
|
"grad_norm": 42.36283874511719, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 25.9464, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0016721295900432283, |
|
"grad_norm": 37.95155715942383, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 25.4448, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.001824141370956249, |
|
"grad_norm": 23.544416427612305, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 19.4115, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.00197615315186927, |
|
"grad_norm": 6.003608226776123, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 11.717, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.0021281649327822907, |
|
"grad_norm": 1.9133445024490356, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 8.8507, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.0022801767136953113, |
|
"grad_norm": 1.9236044883728027, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 9.9265, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.002432188494608332, |
|
"grad_norm": 2.067864179611206, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 10.75, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.0025842002755213527, |
|
"grad_norm": 2.5621371269226074, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 11.0612, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.0027362120564343737, |
|
"grad_norm": 4.215025901794434, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 12.8646, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.0028882238373473946, |
|
"grad_norm": 5.507603168487549, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 11.8597, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.003040235618260415, |
|
"grad_norm": 6.5292134284973145, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 11.8724, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.003192247399173436, |
|
"grad_norm": 12.831018447875977, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 11.5659, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.0033442591800864566, |
|
"grad_norm": 11.646411895751953, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 9.7654, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.0034962709609994776, |
|
"grad_norm": 14.624390602111816, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 11.7632, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.003648282741912498, |
|
"grad_norm": 17.542835235595703, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 12.8791, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.003800294522825519, |
|
"grad_norm": 9.677604675292969, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 12.1658, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.003800294522825519, |
|
"eval_loss": 0.6706960797309875, |
|
"eval_runtime": 2218.0335, |
|
"eval_samples_per_second": 19.981, |
|
"eval_steps_per_second": 2.498, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.00395230630373854, |
|
"grad_norm": 1.5809613466262817, |
|
"learning_rate": 5e-05, |
|
"loss": 8.0624, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.00410431808465156, |
|
"grad_norm": 1.5702935457229614, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 9.5761, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.0042563298655645815, |
|
"grad_norm": 1.5925304889678955, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 9.387, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.004408341646477602, |
|
"grad_norm": 1.8622325658798218, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 9.7713, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.0045603534273906225, |
|
"grad_norm": 2.6215012073516846, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 10.6637, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.004712365208303644, |
|
"grad_norm": 2.9679672718048096, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 11.3137, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.004864376989216664, |
|
"grad_norm": 5.380817413330078, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 11.4652, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.005016388770129685, |
|
"grad_norm": 7.637914657592773, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 10.5382, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.0051684005510427055, |
|
"grad_norm": 18.107852935791016, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 11.4912, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.005320412331955727, |
|
"grad_norm": 15.613152503967285, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 10.7273, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.005472424112868747, |
|
"grad_norm": 19.3842830657959, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 12.1613, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.005624435893781768, |
|
"grad_norm": 9.611380577087402, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 10.761, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.005776447674694789, |
|
"grad_norm": 4.896385669708252, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 11.5633, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.00592845945560781, |
|
"grad_norm": 1.013009786605835, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 8.4005, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.00608047123652083, |
|
"grad_norm": 1.1705186367034912, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 9.1887, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.006232483017433851, |
|
"grad_norm": 1.3294516801834106, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 9.906, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.006384494798346872, |
|
"grad_norm": 2.011843681335449, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 9.8106, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.006536506579259893, |
|
"grad_norm": 2.1309995651245117, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 10.9305, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.006688518360172913, |
|
"grad_norm": 2.600017547607422, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 11.1133, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.006840530141085934, |
|
"grad_norm": 2.881525754928589, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 10.9566, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.006992541921998955, |
|
"grad_norm": 10.933311462402344, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 10.6738, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.007144553702911976, |
|
"grad_norm": 10.83802318572998, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 10.5406, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.007296565483824996, |
|
"grad_norm": 11.648588180541992, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 12.5734, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.007448577264738018, |
|
"grad_norm": 15.444548606872559, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 11.7787, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.007600589045651038, |
|
"grad_norm": 11.467397689819336, |
|
"learning_rate": 0.0, |
|
"loss": 12.5185, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.007600589045651038, |
|
"eval_loss": 0.6467951536178589, |
|
"eval_runtime": 2213.672, |
|
"eval_samples_per_second": 20.02, |
|
"eval_steps_per_second": 2.503, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.1089660061406986e+18, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|