{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.05684240443370755, "eval_steps": 500, "global_step": 50, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.001136848088674151, "grad_norm": 1.4815157651901245, "learning_rate": 0.0001, "loss": 2.1248, "step": 1 }, { "epoch": 0.002273696177348302, "grad_norm": 1.8329018354415894, "learning_rate": 0.0002, "loss": 2.6443, "step": 2 }, { "epoch": 0.003410544266022453, "grad_norm": 1.605793833732605, "learning_rate": 0.00019978589232386035, "loss": 2.2647, "step": 3 }, { "epoch": 0.004547392354696604, "grad_norm": 1.5724087953567505, "learning_rate": 0.00019914448613738106, "loss": 2.1454, "step": 4 }, { "epoch": 0.005684240443370754, "grad_norm": 1.348822832107544, "learning_rate": 0.00019807852804032305, "loss": 2.2881, "step": 5 }, { "epoch": 0.006821088532044906, "grad_norm": 1.3859797716140747, "learning_rate": 0.00019659258262890683, "loss": 2.1213, "step": 6 }, { "epoch": 0.007957936620719057, "grad_norm": 1.4515624046325684, "learning_rate": 0.0001946930129495106, "loss": 2.1561, "step": 7 }, { "epoch": 0.009094784709393207, "grad_norm": 1.3554356098175049, "learning_rate": 0.0001923879532511287, "loss": 1.8992, "step": 8 }, { "epoch": 0.010231632798067358, "grad_norm": 1.0971583127975464, "learning_rate": 0.00018968727415326884, "loss": 1.8558, "step": 9 }, { "epoch": 0.011368480886741509, "grad_norm": 0.9929386377334595, "learning_rate": 0.00018660254037844388, "loss": 1.7249, "step": 10 }, { "epoch": 0.01250532897541566, "grad_norm": 1.1700142621994019, "learning_rate": 0.00018314696123025454, "loss": 1.9864, "step": 11 }, { "epoch": 0.013642177064089812, "grad_norm": 1.3076943159103394, "learning_rate": 0.00017933533402912354, "loss": 1.5089, "step": 12 }, { "epoch": 0.014779025152763962, "grad_norm": 1.39998459815979, "learning_rate": 0.00017518398074789775, "loss": 1.6929, "step": 13 }, { "epoch": 0.015915873241438113, "grad_norm": 1.1641753911972046, "learning_rate": 0.00017071067811865476, "loss": 1.6188, "step": 14 }, { "epoch": 0.017052721330112264, "grad_norm": 1.0474761724472046, "learning_rate": 0.00016593458151000688, "loss": 1.8896, "step": 15 }, { "epoch": 0.018189569418786414, "grad_norm": 0.8149166703224182, "learning_rate": 0.00016087614290087208, "loss": 1.6699, "step": 16 }, { "epoch": 0.019326417507460565, "grad_norm": 1.3811651468276978, "learning_rate": 0.00015555702330196023, "loss": 1.9316, "step": 17 }, { "epoch": 0.020463265596134716, "grad_norm": 1.001374363899231, "learning_rate": 0.00015000000000000001, "loss": 1.8242, "step": 18 }, { "epoch": 0.021600113684808867, "grad_norm": 0.9255009889602661, "learning_rate": 0.00014422886902190014, "loss": 1.7512, "step": 19 }, { "epoch": 0.022736961773483017, "grad_norm": 0.8966016173362732, "learning_rate": 0.000138268343236509, "loss": 1.6891, "step": 20 }, { "epoch": 0.023873809862157168, "grad_norm": 0.9224222302436829, "learning_rate": 0.00013214394653031616, "loss": 1.7677, "step": 21 }, { "epoch": 0.02501065795083132, "grad_norm": 1.3027838468551636, "learning_rate": 0.00012588190451025207, "loss": 1.7951, "step": 22 }, { "epoch": 0.026147506039505473, "grad_norm": 0.9471145272254944, "learning_rate": 0.00011950903220161285, "loss": 1.701, "step": 23 }, { "epoch": 0.027284354128179623, "grad_norm": 0.9963980913162231, "learning_rate": 0.00011305261922200519, "loss": 1.5028, "step": 24 }, { "epoch": 0.028421202216853774, "grad_norm": 1.285988211631775, "learning_rate": 0.00010654031292301432, "loss": 1.9019, "step": 25 }, { "epoch": 0.029558050305527925, "grad_norm": 0.9514207243919373, "learning_rate": 0.0001, "loss": 1.9525, "step": 26 }, { "epoch": 0.030694898394202075, "grad_norm": 0.8910967111587524, "learning_rate": 9.345968707698569e-05, "loss": 1.7485, "step": 27 }, { "epoch": 0.031831746482876226, "grad_norm": 1.0070226192474365, "learning_rate": 8.694738077799488e-05, "loss": 2.0128, "step": 28 }, { "epoch": 0.03296859457155037, "grad_norm": 0.8559178709983826, "learning_rate": 8.049096779838719e-05, "loss": 1.5326, "step": 29 }, { "epoch": 0.03410544266022453, "grad_norm": 0.8095901012420654, "learning_rate": 7.411809548974792e-05, "loss": 1.6773, "step": 30 }, { "epoch": 0.03524229074889868, "grad_norm": 1.0621888637542725, "learning_rate": 6.785605346968386e-05, "loss": 1.6565, "step": 31 }, { "epoch": 0.03637913883757283, "grad_norm": 1.2069458961486816, "learning_rate": 6.173165676349103e-05, "loss": 1.6746, "step": 32 }, { "epoch": 0.03751598692624698, "grad_norm": 1.1669138669967651, "learning_rate": 5.577113097809989e-05, "loss": 1.9012, "step": 33 }, { "epoch": 0.03865283501492113, "grad_norm": 0.8983680605888367, "learning_rate": 5.000000000000002e-05, "loss": 1.9007, "step": 34 }, { "epoch": 0.039789683103595284, "grad_norm": 1.0233827829360962, "learning_rate": 4.444297669803981e-05, "loss": 1.4926, "step": 35 }, { "epoch": 0.04092653119226943, "grad_norm": 0.7972369194030762, "learning_rate": 3.9123857099127936e-05, "loss": 1.6314, "step": 36 }, { "epoch": 0.042063379280943586, "grad_norm": 1.2082093954086304, "learning_rate": 3.406541848999312e-05, "loss": 1.5206, "step": 37 }, { "epoch": 0.04320022736961773, "grad_norm": 1.1588741540908813, "learning_rate": 2.9289321881345254e-05, "loss": 1.7528, "step": 38 }, { "epoch": 0.04433707545829189, "grad_norm": 1.1021286249160767, "learning_rate": 2.4816019252102273e-05, "loss": 1.8891, "step": 39 }, { "epoch": 0.045473923546966034, "grad_norm": 0.8373359441757202, "learning_rate": 2.0664665970876496e-05, "loss": 1.7719, "step": 40 }, { "epoch": 0.04661077163564019, "grad_norm": 1.1442703008651733, "learning_rate": 1.6853038769745467e-05, "loss": 1.8333, "step": 41 }, { "epoch": 0.047747619724314336, "grad_norm": 0.8961570858955383, "learning_rate": 1.339745962155613e-05, "loss": 1.7535, "step": 42 }, { "epoch": 0.04888446781298849, "grad_norm": 0.8508850932121277, "learning_rate": 1.0312725846731175e-05, "loss": 1.667, "step": 43 }, { "epoch": 0.05002131590166264, "grad_norm": 1.1900713443756104, "learning_rate": 7.612046748871327e-06, "loss": 1.8957, "step": 44 }, { "epoch": 0.05115816399033679, "grad_norm": 0.7741867899894714, "learning_rate": 5.306987050489442e-06, "loss": 1.9832, "step": 45 }, { "epoch": 0.052295012079010945, "grad_norm": 0.8075515031814575, "learning_rate": 3.40741737109318e-06, "loss": 1.8664, "step": 46 }, { "epoch": 0.05343186016768509, "grad_norm": 0.9421095252037048, "learning_rate": 1.921471959676957e-06, "loss": 1.6778, "step": 47 }, { "epoch": 0.05456870825635925, "grad_norm": 0.6307845115661621, "learning_rate": 8.555138626189618e-07, "loss": 1.8083, "step": 48 }, { "epoch": 0.055705556345033394, "grad_norm": 1.0674786567687988, "learning_rate": 2.141076761396521e-07, "loss": 1.5506, "step": 49 }, { "epoch": 0.05684240443370755, "grad_norm": 1.3023602962493896, "learning_rate": 0.0, "loss": 1.6605, "step": 50 } ], "logging_steps": 1, "max_steps": 50, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 25, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 7.455971246447002e+16, "train_batch_size": 2, "trial_name": null, "trial_params": null }