|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.0631578947368423, |
|
"eval_steps": 25, |
|
"global_step": 90, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.03368421052631579, |
|
"grad_norm": 0.39811971783638, |
|
"learning_rate": 1e-05, |
|
"loss": 2.1869, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.03368421052631579, |
|
"eval_loss": 2.1871018409729004, |
|
"eval_runtime": 51.726, |
|
"eval_samples_per_second": 1.933, |
|
"eval_steps_per_second": 0.967, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.06736842105263158, |
|
"grad_norm": 0.43366214632987976, |
|
"learning_rate": 2e-05, |
|
"loss": 2.124, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.10105263157894737, |
|
"grad_norm": 0.41468173265457153, |
|
"learning_rate": 3e-05, |
|
"loss": 2.1397, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.13473684210526315, |
|
"grad_norm": 0.41099846363067627, |
|
"learning_rate": 4e-05, |
|
"loss": 2.1909, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.16842105263157894, |
|
"grad_norm": 0.35164669156074524, |
|
"learning_rate": 5e-05, |
|
"loss": 2.1483, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.20210526315789473, |
|
"grad_norm": 0.3426206111907959, |
|
"learning_rate": 6e-05, |
|
"loss": 2.0835, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.23578947368421052, |
|
"grad_norm": 0.30055052042007446, |
|
"learning_rate": 7e-05, |
|
"loss": 2.1794, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.2694736842105263, |
|
"grad_norm": 0.3416151702404022, |
|
"learning_rate": 8e-05, |
|
"loss": 2.1128, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.3031578947368421, |
|
"grad_norm": 0.3587937355041504, |
|
"learning_rate": 9e-05, |
|
"loss": 2.1879, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.3368421052631579, |
|
"grad_norm": 0.3512348234653473, |
|
"learning_rate": 0.0001, |
|
"loss": 2.1369, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.3705263157894737, |
|
"grad_norm": 0.3542545437812805, |
|
"learning_rate": 0.00011000000000000002, |
|
"loss": 2.0747, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.40421052631578946, |
|
"grad_norm": 0.3271574079990387, |
|
"learning_rate": 0.00012, |
|
"loss": 2.0892, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.4378947368421053, |
|
"grad_norm": 0.29716193675994873, |
|
"learning_rate": 0.00013000000000000002, |
|
"loss": 2.0945, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.47157894736842104, |
|
"grad_norm": 0.2834334373474121, |
|
"learning_rate": 0.00014, |
|
"loss": 2.1163, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.5052631578947369, |
|
"grad_norm": 0.2676835060119629, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 2.0774, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.5389473684210526, |
|
"grad_norm": 0.3038509786128998, |
|
"learning_rate": 0.00016, |
|
"loss": 2.0738, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.5726315789473684, |
|
"grad_norm": 0.30092963576316833, |
|
"learning_rate": 0.00017, |
|
"loss": 2.0443, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.6063157894736843, |
|
"grad_norm": 0.2652921974658966, |
|
"learning_rate": 0.00018, |
|
"loss": 2.0226, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 0.23126669228076935, |
|
"learning_rate": 0.00019, |
|
"loss": 2.0893, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.6736842105263158, |
|
"grad_norm": 0.25947996973991394, |
|
"learning_rate": 0.0002, |
|
"loss": 2.0797, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.7073684210526315, |
|
"grad_norm": 0.24937580525875092, |
|
"learning_rate": 0.00019989930665413147, |
|
"loss": 2.113, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.7410526315789474, |
|
"grad_norm": 0.23798011243343353, |
|
"learning_rate": 0.00019959742939952392, |
|
"loss": 2.0782, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.7747368421052632, |
|
"grad_norm": 0.23232997953891754, |
|
"learning_rate": 0.00019909497617679348, |
|
"loss": 2.0749, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.8084210526315789, |
|
"grad_norm": 0.22174346446990967, |
|
"learning_rate": 0.00019839295885986296, |
|
"loss": 2.0835, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.8421052631578947, |
|
"grad_norm": 0.20978115499019623, |
|
"learning_rate": 0.00019749279121818235, |
|
"loss": 2.1273, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.8421052631578947, |
|
"eval_loss": 2.085695743560791, |
|
"eval_runtime": 51.638, |
|
"eval_samples_per_second": 1.937, |
|
"eval_steps_per_second": 0.968, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.8757894736842106, |
|
"grad_norm": 0.20944544672966003, |
|
"learning_rate": 0.00019639628606958533, |
|
"loss": 2.0262, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.9094736842105263, |
|
"grad_norm": 0.20344075560569763, |
|
"learning_rate": 0.00019510565162951537, |
|
"loss": 2.0426, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.9431578947368421, |
|
"grad_norm": 0.22783611714839935, |
|
"learning_rate": 0.00019362348706397373, |
|
"loss": 2.0554, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.9768421052631578, |
|
"grad_norm": 0.19957782328128815, |
|
"learning_rate": 0.0001919527772551451, |
|
"loss": 2.0124, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 1.0210526315789474, |
|
"grad_norm": 0.38232752680778503, |
|
"learning_rate": 0.0001900968867902419, |
|
"loss": 3.3482, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.0547368421052632, |
|
"grad_norm": 0.21452440321445465, |
|
"learning_rate": 0.0001880595531856738, |
|
"loss": 1.9092, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 1.088421052631579, |
|
"grad_norm": 0.20885184407234192, |
|
"learning_rate": 0.00018584487936018661, |
|
"loss": 2.0082, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 1.1221052631578947, |
|
"grad_norm": 0.23769386112689972, |
|
"learning_rate": 0.00018345732537213027, |
|
"loss": 1.9497, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 1.1557894736842105, |
|
"grad_norm": 0.22073900699615479, |
|
"learning_rate": 0.00018090169943749476, |
|
"loss": 2.0897, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 1.1894736842105262, |
|
"grad_norm": 0.20628167688846588, |
|
"learning_rate": 0.000178183148246803, |
|
"loss": 1.9091, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 1.223157894736842, |
|
"grad_norm": 0.20254100859165192, |
|
"learning_rate": 0.00017530714660036112, |
|
"loss": 2.0089, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 1.256842105263158, |
|
"grad_norm": 0.21576657891273499, |
|
"learning_rate": 0.00017227948638273916, |
|
"loss": 1.9733, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 1.2905263157894737, |
|
"grad_norm": 0.21345406770706177, |
|
"learning_rate": 0.00016910626489868649, |
|
"loss": 1.9593, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 1.3242105263157895, |
|
"grad_norm": 0.2377728521823883, |
|
"learning_rate": 0.00016579387259397127, |
|
"loss": 1.9921, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 1.3578947368421053, |
|
"grad_norm": 0.22072383761405945, |
|
"learning_rate": 0.00016234898018587337, |
|
"loss": 1.9538, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.391578947368421, |
|
"grad_norm": 0.22244809567928314, |
|
"learning_rate": 0.00015877852522924732, |
|
"loss": 1.9346, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 1.4252631578947368, |
|
"grad_norm": 0.22625932097434998, |
|
"learning_rate": 0.00015508969814521025, |
|
"loss": 2.0541, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 1.4589473684210525, |
|
"grad_norm": 0.22077956795692444, |
|
"learning_rate": 0.00015128992774059063, |
|
"loss": 1.9145, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 1.4926315789473685, |
|
"grad_norm": 0.22373822331428528, |
|
"learning_rate": 0.00014738686624729986, |
|
"loss": 2.0576, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 1.526315789473684, |
|
"grad_norm": 0.271822988986969, |
|
"learning_rate": 0.00014338837391175582, |
|
"loss": 1.9241, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"grad_norm": 0.22692303359508514, |
|
"learning_rate": 0.00013930250316539238, |
|
"loss": 2.0193, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 1.5936842105263158, |
|
"grad_norm": 0.23916593194007874, |
|
"learning_rate": 0.0001351374824081343, |
|
"loss": 1.9436, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 1.6273684210526316, |
|
"grad_norm": 0.2340150773525238, |
|
"learning_rate": 0.00013090169943749476, |
|
"loss": 1.9656, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 1.6610526315789473, |
|
"grad_norm": 0.23769573867321014, |
|
"learning_rate": 0.00012660368455666752, |
|
"loss": 1.9992, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 1.694736842105263, |
|
"grad_norm": 0.227084219455719, |
|
"learning_rate": 0.00012225209339563145, |
|
"loss": 2.0096, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.694736842105263, |
|
"eval_loss": 2.0874733924865723, |
|
"eval_runtime": 51.8934, |
|
"eval_samples_per_second": 1.927, |
|
"eval_steps_per_second": 0.964, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.728421052631579, |
|
"grad_norm": 0.2218991219997406, |
|
"learning_rate": 0.00011785568947986367, |
|
"loss": 1.9305, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 1.7621052631578946, |
|
"grad_norm": 0.2461809664964676, |
|
"learning_rate": 0.00011342332658176555, |
|
"loss": 1.9155, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 1.7957894736842106, |
|
"grad_norm": 0.246916726231575, |
|
"learning_rate": 0.00010896393089034336, |
|
"loss": 1.9407, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 1.8294736842105264, |
|
"grad_norm": 0.23680198192596436, |
|
"learning_rate": 0.00010448648303505151, |
|
"loss": 1.8634, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 1.8631578947368421, |
|
"grad_norm": 0.2648875117301941, |
|
"learning_rate": 0.0001, |
|
"loss": 2.0646, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 1.8968421052631579, |
|
"grad_norm": 0.24859718978405, |
|
"learning_rate": 9.551351696494854e-05, |
|
"loss": 1.9494, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 1.9305263157894736, |
|
"grad_norm": 0.26295167207717896, |
|
"learning_rate": 9.103606910965666e-05, |
|
"loss": 1.9067, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 1.9642105263157896, |
|
"grad_norm": 0.24121926724910736, |
|
"learning_rate": 8.657667341823448e-05, |
|
"loss": 1.9897, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 2.008421052631579, |
|
"grad_norm": 0.4270052909851074, |
|
"learning_rate": 8.214431052013634e-05, |
|
"loss": 3.2422, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 2.042105263157895, |
|
"grad_norm": 0.2644731402397156, |
|
"learning_rate": 7.774790660436858e-05, |
|
"loss": 1.8985, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 2.0757894736842104, |
|
"grad_norm": 0.26980847120285034, |
|
"learning_rate": 7.339631544333249e-05, |
|
"loss": 1.8777, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 2.1094736842105264, |
|
"grad_norm": 0.25031837821006775, |
|
"learning_rate": 6.909830056250527e-05, |
|
"loss": 1.8957, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 2.143157894736842, |
|
"grad_norm": 0.2601235806941986, |
|
"learning_rate": 6.486251759186572e-05, |
|
"loss": 1.8774, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 2.176842105263158, |
|
"grad_norm": 0.27925577759742737, |
|
"learning_rate": 6.069749683460765e-05, |
|
"loss": 1.8837, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 2.2105263157894735, |
|
"grad_norm": 0.2980926036834717, |
|
"learning_rate": 5.6611626088244194e-05, |
|
"loss": 1.8308, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 2.2442105263157894, |
|
"grad_norm": 0.3060623109340668, |
|
"learning_rate": 5.261313375270014e-05, |
|
"loss": 1.8415, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 2.2778947368421054, |
|
"grad_norm": 0.2616610825061798, |
|
"learning_rate": 4.87100722594094e-05, |
|
"loss": 1.8877, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 2.311578947368421, |
|
"grad_norm": 0.26330044865608215, |
|
"learning_rate": 4.491030185478976e-05, |
|
"loss": 1.9152, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 2.345263157894737, |
|
"grad_norm": 0.2531837224960327, |
|
"learning_rate": 4.12214747707527e-05, |
|
"loss": 1.753, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 2.3789473684210525, |
|
"grad_norm": 0.29288211464881897, |
|
"learning_rate": 3.7651019814126654e-05, |
|
"loss": 1.8325, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 2.4126315789473685, |
|
"grad_norm": 0.2796313762664795, |
|
"learning_rate": 3.4206127406028745e-05, |
|
"loss": 1.8628, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 2.446315789473684, |
|
"grad_norm": 0.2576499283313751, |
|
"learning_rate": 3.089373510131354e-05, |
|
"loss": 1.8157, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 2.48, |
|
"grad_norm": 0.26932939887046814, |
|
"learning_rate": 2.7720513617260856e-05, |
|
"loss": 1.7607, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 2.513684210526316, |
|
"grad_norm": 0.2623027563095093, |
|
"learning_rate": 2.4692853399638917e-05, |
|
"loss": 1.8798, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 2.5473684210526315, |
|
"grad_norm": 0.2754516005516052, |
|
"learning_rate": 2.181685175319702e-05, |
|
"loss": 1.8257, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 2.5473684210526315, |
|
"eval_loss": 2.1104278564453125, |
|
"eval_runtime": 51.5518, |
|
"eval_samples_per_second": 1.94, |
|
"eval_steps_per_second": 0.97, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 2.5810526315789475, |
|
"grad_norm": 0.2684014141559601, |
|
"learning_rate": 1.9098300562505266e-05, |
|
"loss": 1.8146, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 2.614736842105263, |
|
"grad_norm": 0.27565282583236694, |
|
"learning_rate": 1.6542674627869737e-05, |
|
"loss": 1.9054, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 2.648421052631579, |
|
"grad_norm": 0.27578240633010864, |
|
"learning_rate": 1.415512063981339e-05, |
|
"loss": 1.8384, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 2.6821052631578945, |
|
"grad_norm": 0.2741873264312744, |
|
"learning_rate": 1.19404468143262e-05, |
|
"loss": 1.7925, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 2.7157894736842105, |
|
"grad_norm": 0.28904208540916443, |
|
"learning_rate": 9.903113209758096e-06, |
|
"loss": 1.8668, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 2.7494736842105265, |
|
"grad_norm": 0.291592538356781, |
|
"learning_rate": 8.047222744854943e-06, |
|
"loss": 1.8642, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 2.783157894736842, |
|
"grad_norm": 0.287810742855072, |
|
"learning_rate": 6.37651293602628e-06, |
|
"loss": 1.8866, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 2.816842105263158, |
|
"grad_norm": 0.28119951486587524, |
|
"learning_rate": 4.8943483704846475e-06, |
|
"loss": 1.7627, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 2.8505263157894736, |
|
"grad_norm": 0.26779597997665405, |
|
"learning_rate": 3.6037139304146762e-06, |
|
"loss": 1.9118, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 2.8842105263157896, |
|
"grad_norm": 0.2809458374977112, |
|
"learning_rate": 2.5072087818176382e-06, |
|
"loss": 1.9202, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 2.917894736842105, |
|
"grad_norm": 0.27657240629196167, |
|
"learning_rate": 1.6070411401370334e-06, |
|
"loss": 1.7985, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 2.951578947368421, |
|
"grad_norm": 0.2845059335231781, |
|
"learning_rate": 9.0502382320653e-07, |
|
"loss": 1.7862, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 2.985263157894737, |
|
"grad_norm": 0.4435933828353882, |
|
"learning_rate": 4.025706004760932e-07, |
|
"loss": 2.6286, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 3.0294736842105263, |
|
"grad_norm": 0.3721281886100769, |
|
"learning_rate": 1.0069334586854107e-07, |
|
"loss": 2.1659, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 3.0631578947368423, |
|
"grad_norm": 0.2612936794757843, |
|
"learning_rate": 0.0, |
|
"loss": 1.8654, |
|
"step": 90 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 90, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 8, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.0742550511170355e+18, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|