|
{ |
|
"best_metric": 0.8535917401313782, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.09740655059052722, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0019481310118105442, |
|
"grad_norm": 23.000314712524414, |
|
"learning_rate": 5e-05, |
|
"loss": 2.7164, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0019481310118105442, |
|
"eval_loss": 3.9044392108917236, |
|
"eval_runtime": 29.8355, |
|
"eval_samples_per_second": 115.902, |
|
"eval_steps_per_second": 14.513, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0038962620236210883, |
|
"grad_norm": 23.18843650817871, |
|
"learning_rate": 0.0001, |
|
"loss": 2.7355, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.005844393035431633, |
|
"grad_norm": 17.209299087524414, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 2.6214, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.007792524047242177, |
|
"grad_norm": 9.175566673278809, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 2.3306, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.009740655059052722, |
|
"grad_norm": 7.254579544067383, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 2.2872, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.011688786070863266, |
|
"grad_norm": 6.648281574249268, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 1.7377, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.01363691708267381, |
|
"grad_norm": 3.4053423404693604, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 1.6466, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.015585048094484353, |
|
"grad_norm": 2.829256772994995, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 1.5147, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0175331791062949, |
|
"grad_norm": 4.3303608894348145, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 1.629, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.019481310118105444, |
|
"grad_norm": 4.374320030212402, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 1.602, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.021429441129915988, |
|
"grad_norm": 3.482375144958496, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 1.6133, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.023377572141726533, |
|
"grad_norm": 3.8871004581451416, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 1.3356, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.025325703153537077, |
|
"grad_norm": 2.3258163928985596, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 0.7787, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.02727383416534762, |
|
"grad_norm": 1.4656181335449219, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 0.8414, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.029221965177158162, |
|
"grad_norm": 1.4904340505599976, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 0.9771, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.031170096188968707, |
|
"grad_norm": 1.236912727355957, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 1.0273, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.033118227200779254, |
|
"grad_norm": 1.1031506061553955, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 0.9108, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.0350663582125898, |
|
"grad_norm": 1.278933048248291, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 1.0385, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.03701448922440034, |
|
"grad_norm": 1.6012283563613892, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 1.0603, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.03896262023621089, |
|
"grad_norm": 1.4366340637207031, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 1.1865, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.04091075124802143, |
|
"grad_norm": 1.5510519742965698, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 1.1492, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.042858882259831976, |
|
"grad_norm": 1.4250149726867676, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 1.2327, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.04480701327164252, |
|
"grad_norm": 2.2062790393829346, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 1.4777, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.046755144283453065, |
|
"grad_norm": 2.0388667583465576, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 1.1637, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.04870327529526361, |
|
"grad_norm": 3.433821201324463, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 0.7372, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.04870327529526361, |
|
"eval_loss": 0.9098588824272156, |
|
"eval_runtime": 29.8515, |
|
"eval_samples_per_second": 115.84, |
|
"eval_steps_per_second": 14.505, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.050651406307074154, |
|
"grad_norm": 1.0087053775787354, |
|
"learning_rate": 5e-05, |
|
"loss": 0.6443, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.0525995373188847, |
|
"grad_norm": 0.9601799249649048, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 0.6904, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.05454766833069524, |
|
"grad_norm": 0.9024684429168701, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 0.8345, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.05649579934250579, |
|
"grad_norm": 1.067520260810852, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 0.7891, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.058443930354316324, |
|
"grad_norm": 1.0981365442276, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 0.8215, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.06039206136612687, |
|
"grad_norm": 1.0717315673828125, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 0.8982, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.06234019237793741, |
|
"grad_norm": 1.1622248888015747, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 1.0009, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.06428832338974796, |
|
"grad_norm": 1.4013619422912598, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 1.0698, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.06623645440155851, |
|
"grad_norm": 1.499001383781433, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 1.2458, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.06818458541336905, |
|
"grad_norm": 1.7074172496795654, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 1.0946, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.0701327164251796, |
|
"grad_norm": 1.734402060508728, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 1.1359, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.07208084743699014, |
|
"grad_norm": 2.261868715286255, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 0.7585, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.07402897844880069, |
|
"grad_norm": 0.9555121660232544, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 0.5668, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.07597710946061123, |
|
"grad_norm": 0.8737242221832275, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 0.6213, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.07792524047242178, |
|
"grad_norm": 0.9610376954078674, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 0.6776, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.07987337148423232, |
|
"grad_norm": 1.0207109451293945, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 0.8021, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.08182150249604286, |
|
"grad_norm": 1.0356788635253906, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 0.8581, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.08376963350785341, |
|
"grad_norm": 1.0629724264144897, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 0.8796, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.08571776451966395, |
|
"grad_norm": 1.2652376890182495, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 1.028, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.0876658955314745, |
|
"grad_norm": 1.1882436275482178, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 0.9639, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.08961402654328504, |
|
"grad_norm": 1.58311128616333, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 1.1072, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.09156215755509559, |
|
"grad_norm": 1.32377290725708, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 1.0653, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.09351028856690613, |
|
"grad_norm": 1.4533188343048096, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 1.1439, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.09545841957871667, |
|
"grad_norm": 1.530142903327942, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 0.9235, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.09740655059052722, |
|
"grad_norm": 2.5200765132904053, |
|
"learning_rate": 0.0, |
|
"loss": 0.6299, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.09740655059052722, |
|
"eval_loss": 0.8535917401313782, |
|
"eval_runtime": 29.845, |
|
"eval_samples_per_second": 115.865, |
|
"eval_steps_per_second": 14.508, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 8.33416392081408e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|