| { |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 2.9904153354632586, |
| "eval_steps": 500, |
| "global_step": 78, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.038338658146964855, |
| "grad_norm": 6.45109748840332, |
| "learning_rate": 1.25e-06, |
| "loss": 1.2212, |
| "step": 1 |
| }, |
| { |
| "epoch": 0.07667731629392971, |
| "grad_norm": 6.449772834777832, |
| "learning_rate": 2.5e-06, |
| "loss": 1.2011, |
| "step": 2 |
| }, |
| { |
| "epoch": 0.11501597444089456, |
| "grad_norm": 6.236454963684082, |
| "learning_rate": 3.7500000000000005e-06, |
| "loss": 1.1949, |
| "step": 3 |
| }, |
| { |
| "epoch": 0.15335463258785942, |
| "grad_norm": 6.070368766784668, |
| "learning_rate": 5e-06, |
| "loss": 1.2172, |
| "step": 4 |
| }, |
| { |
| "epoch": 0.19169329073482427, |
| "grad_norm": 4.3351287841796875, |
| "learning_rate": 6.25e-06, |
| "loss": 1.1609, |
| "step": 5 |
| }, |
| { |
| "epoch": 0.23003194888178913, |
| "grad_norm": 2.6603915691375732, |
| "learning_rate": 7.500000000000001e-06, |
| "loss": 1.0722, |
| "step": 6 |
| }, |
| { |
| "epoch": 0.268370607028754, |
| "grad_norm": 4.560462951660156, |
| "learning_rate": 8.750000000000001e-06, |
| "loss": 1.0798, |
| "step": 7 |
| }, |
| { |
| "epoch": 0.30670926517571884, |
| "grad_norm": 5.290356159210205, |
| "learning_rate": 1e-05, |
| "loss": 1.1219, |
| "step": 8 |
| }, |
| { |
| "epoch": 0.3450479233226837, |
| "grad_norm": 4.381709098815918, |
| "learning_rate": 9.994965332706574e-06, |
| "loss": 1.0392, |
| "step": 9 |
| }, |
| { |
| "epoch": 0.38338658146964855, |
| "grad_norm": 4.178859710693359, |
| "learning_rate": 9.979871469976197e-06, |
| "loss": 1.0663, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.4217252396166134, |
| "grad_norm": 3.180849552154541, |
| "learning_rate": 9.954748808839675e-06, |
| "loss": 1.031, |
| "step": 11 |
| }, |
| { |
| "epoch": 0.46006389776357826, |
| "grad_norm": 2.457411289215088, |
| "learning_rate": 9.91964794299315e-06, |
| "loss": 1.0179, |
| "step": 12 |
| }, |
| { |
| "epoch": 0.4984025559105431, |
| "grad_norm": 2.3789966106414795, |
| "learning_rate": 9.874639560909118e-06, |
| "loss": 0.967, |
| "step": 13 |
| }, |
| { |
| "epoch": 0.536741214057508, |
| "grad_norm": 1.7590055465698242, |
| "learning_rate": 9.819814303479268e-06, |
| "loss": 0.9176, |
| "step": 14 |
| }, |
| { |
| "epoch": 0.5750798722044729, |
| "grad_norm": 1.504982829093933, |
| "learning_rate": 9.755282581475769e-06, |
| "loss": 0.9332, |
| "step": 15 |
| }, |
| { |
| "epoch": 0.6134185303514377, |
| "grad_norm": 1.328847885131836, |
| "learning_rate": 9.681174353198687e-06, |
| "loss": 0.9285, |
| "step": 16 |
| }, |
| { |
| "epoch": 0.6517571884984026, |
| "grad_norm": 1.3487250804901123, |
| "learning_rate": 9.597638862757255e-06, |
| "loss": 0.9548, |
| "step": 17 |
| }, |
| { |
| "epoch": 0.6900958466453674, |
| "grad_norm": 1.309167742729187, |
| "learning_rate": 9.504844339512096e-06, |
| "loss": 0.8749, |
| "step": 18 |
| }, |
| { |
| "epoch": 0.7284345047923323, |
| "grad_norm": 1.07999587059021, |
| "learning_rate": 9.40297765928369e-06, |
| "loss": 0.906, |
| "step": 19 |
| }, |
| { |
| "epoch": 0.7667731629392971, |
| "grad_norm": 0.9898855686187744, |
| "learning_rate": 9.292243968009332e-06, |
| "loss": 0.8826, |
| "step": 20 |
| }, |
| { |
| "epoch": 0.805111821086262, |
| "grad_norm": 1.084998369216919, |
| "learning_rate": 9.172866268606514e-06, |
| "loss": 0.884, |
| "step": 21 |
| }, |
| { |
| "epoch": 0.8434504792332268, |
| "grad_norm": 0.9995360374450684, |
| "learning_rate": 9.045084971874738e-06, |
| "loss": 0.8734, |
| "step": 22 |
| }, |
| { |
| "epoch": 0.8817891373801917, |
| "grad_norm": 0.914259672164917, |
| "learning_rate": 8.90915741234015e-06, |
| "loss": 0.8466, |
| "step": 23 |
| }, |
| { |
| "epoch": 0.9201277955271565, |
| "grad_norm": 0.8691526651382446, |
| "learning_rate": 8.765357330018056e-06, |
| "loss": 0.9043, |
| "step": 24 |
| }, |
| { |
| "epoch": 0.9584664536741214, |
| "grad_norm": 0.8191248774528503, |
| "learning_rate": 8.613974319136959e-06, |
| "loss": 0.9746, |
| "step": 25 |
| }, |
| { |
| "epoch": 0.9968051118210862, |
| "grad_norm": 0.9226800799369812, |
| "learning_rate": 8.455313244934324e-06, |
| "loss": 0.8703, |
| "step": 26 |
| }, |
| { |
| "epoch": 1.035143769968051, |
| "grad_norm": 2.459444046020508, |
| "learning_rate": 8.289693629698564e-06, |
| "loss": 1.3673, |
| "step": 27 |
| }, |
| { |
| "epoch": 1.073482428115016, |
| "grad_norm": 0.8009559512138367, |
| "learning_rate": 8.117449009293668e-06, |
| "loss": 0.8485, |
| "step": 28 |
| }, |
| { |
| "epoch": 1.1118210862619808, |
| "grad_norm": 0.703082799911499, |
| "learning_rate": 7.938926261462366e-06, |
| "loss": 0.8098, |
| "step": 29 |
| }, |
| { |
| "epoch": 1.1501597444089458, |
| "grad_norm": 0.7537962794303894, |
| "learning_rate": 7.754484907260513e-06, |
| "loss": 0.8444, |
| "step": 30 |
| }, |
| { |
| "epoch": 1.1884984025559104, |
| "grad_norm": 0.760931134223938, |
| "learning_rate": 7.564496387029532e-06, |
| "loss": 0.8598, |
| "step": 31 |
| }, |
| { |
| "epoch": 1.2268370607028753, |
| "grad_norm": 0.7525911331176758, |
| "learning_rate": 7.369343312364994e-06, |
| "loss": 0.865, |
| "step": 32 |
| }, |
| { |
| "epoch": 1.2651757188498403, |
| "grad_norm": 0.723669707775116, |
| "learning_rate": 7.169418695587791e-06, |
| "loss": 0.7959, |
| "step": 33 |
| }, |
| { |
| "epoch": 1.3035143769968052, |
| "grad_norm": 0.7163628339767456, |
| "learning_rate": 6.965125158269619e-06, |
| "loss": 0.8051, |
| "step": 34 |
| }, |
| { |
| "epoch": 1.34185303514377, |
| "grad_norm": 0.6843879222869873, |
| "learning_rate": 6.7568741204067145e-06, |
| "loss": 0.828, |
| "step": 35 |
| }, |
| { |
| "epoch": 1.3801916932907348, |
| "grad_norm": 0.6427307724952698, |
| "learning_rate": 6.545084971874738e-06, |
| "loss": 0.854, |
| "step": 36 |
| }, |
| { |
| "epoch": 1.4185303514376997, |
| "grad_norm": 0.7232085466384888, |
| "learning_rate": 6.330184227833376e-06, |
| "loss": 0.8848, |
| "step": 37 |
| }, |
| { |
| "epoch": 1.4568690095846646, |
| "grad_norm": 0.7611010074615479, |
| "learning_rate": 6.112604669781572e-06, |
| "loss": 0.6879, |
| "step": 38 |
| }, |
| { |
| "epoch": 1.4952076677316293, |
| "grad_norm": 0.7250047922134399, |
| "learning_rate": 5.892784473993184e-06, |
| "loss": 0.8684, |
| "step": 39 |
| }, |
| { |
| "epoch": 1.5335463258785942, |
| "grad_norm": 0.633452832698822, |
| "learning_rate": 5.671166329088278e-06, |
| "loss": 0.8257, |
| "step": 40 |
| }, |
| { |
| "epoch": 1.571884984025559, |
| "grad_norm": 0.6049233078956604, |
| "learning_rate": 5.448196544517168e-06, |
| "loss": 0.8012, |
| "step": 41 |
| }, |
| { |
| "epoch": 1.610223642172524, |
| "grad_norm": 0.701932430267334, |
| "learning_rate": 5.224324151752575e-06, |
| "loss": 0.8206, |
| "step": 42 |
| }, |
| { |
| "epoch": 1.648562300319489, |
| "grad_norm": 0.626121997833252, |
| "learning_rate": 5e-06, |
| "loss": 0.8031, |
| "step": 43 |
| }, |
| { |
| "epoch": 1.6869009584664538, |
| "grad_norm": 0.6095514297485352, |
| "learning_rate": 4.775675848247427e-06, |
| "loss": 0.8131, |
| "step": 44 |
| }, |
| { |
| "epoch": 1.7252396166134185, |
| "grad_norm": 0.6313042044639587, |
| "learning_rate": 4.551803455482833e-06, |
| "loss": 0.8061, |
| "step": 45 |
| }, |
| { |
| "epoch": 1.7635782747603834, |
| "grad_norm": 0.6074307560920715, |
| "learning_rate": 4.3288336709117246e-06, |
| "loss": 0.8172, |
| "step": 46 |
| }, |
| { |
| "epoch": 1.8019169329073481, |
| "grad_norm": 0.6239637732505798, |
| "learning_rate": 4.107215526006818e-06, |
| "loss": 0.8394, |
| "step": 47 |
| }, |
| { |
| "epoch": 1.840255591054313, |
| "grad_norm": 0.6018702983856201, |
| "learning_rate": 3.887395330218429e-06, |
| "loss": 0.8024, |
| "step": 48 |
| }, |
| { |
| "epoch": 1.878594249201278, |
| "grad_norm": 0.6898264288902283, |
| "learning_rate": 3.669815772166625e-06, |
| "loss": 0.8907, |
| "step": 49 |
| }, |
| { |
| "epoch": 1.9169329073482428, |
| "grad_norm": 0.5472529530525208, |
| "learning_rate": 3.4549150281252635e-06, |
| "loss": 0.7073, |
| "step": 50 |
| }, |
| { |
| "epoch": 1.9552715654952078, |
| "grad_norm": 0.5887239575386047, |
| "learning_rate": 3.2431258795932863e-06, |
| "loss": 0.7974, |
| "step": 51 |
| }, |
| { |
| "epoch": 1.9936102236421727, |
| "grad_norm": 0.5762373805046082, |
| "learning_rate": 3.0348748417303826e-06, |
| "loss": 0.8499, |
| "step": 52 |
| }, |
| { |
| "epoch": 2.0319488817891376, |
| "grad_norm": 1.788190484046936, |
| "learning_rate": 2.83058130441221e-06, |
| "loss": 1.4672, |
| "step": 53 |
| }, |
| { |
| "epoch": 2.070287539936102, |
| "grad_norm": 0.6082499027252197, |
| "learning_rate": 2.6306566876350072e-06, |
| "loss": 0.7681, |
| "step": 54 |
| }, |
| { |
| "epoch": 2.108626198083067, |
| "grad_norm": 0.5818493962287903, |
| "learning_rate": 2.43550361297047e-06, |
| "loss": 0.7296, |
| "step": 55 |
| }, |
| { |
| "epoch": 2.146964856230032, |
| "grad_norm": 0.4775056540966034, |
| "learning_rate": 2.245515092739488e-06, |
| "loss": 0.7339, |
| "step": 56 |
| }, |
| { |
| "epoch": 2.1853035143769968, |
| "grad_norm": 0.48535844683647156, |
| "learning_rate": 2.061073738537635e-06, |
| "loss": 0.7905, |
| "step": 57 |
| }, |
| { |
| "epoch": 2.2236421725239617, |
| "grad_norm": 0.5611085295677185, |
| "learning_rate": 1.8825509907063328e-06, |
| "loss": 0.7852, |
| "step": 58 |
| }, |
| { |
| "epoch": 2.2619808306709266, |
| "grad_norm": 0.554451048374176, |
| "learning_rate": 1.7103063703014372e-06, |
| "loss": 0.7263, |
| "step": 59 |
| }, |
| { |
| "epoch": 2.3003194888178915, |
| "grad_norm": 0.5591139793395996, |
| "learning_rate": 1.544686755065677e-06, |
| "loss": 0.7394, |
| "step": 60 |
| }, |
| { |
| "epoch": 2.3386581469648564, |
| "grad_norm": 0.5835963487625122, |
| "learning_rate": 1.3860256808630429e-06, |
| "loss": 0.7811, |
| "step": 61 |
| }, |
| { |
| "epoch": 2.376996805111821, |
| "grad_norm": 0.4586201608181, |
| "learning_rate": 1.234642669981946e-06, |
| "loss": 0.6595, |
| "step": 62 |
| }, |
| { |
| "epoch": 2.415335463258786, |
| "grad_norm": 0.6206969618797302, |
| "learning_rate": 1.0908425876598512e-06, |
| "loss": 0.9082, |
| "step": 63 |
| }, |
| { |
| "epoch": 2.4536741214057507, |
| "grad_norm": 0.5300808548927307, |
| "learning_rate": 9.549150281252633e-07, |
| "loss": 0.7043, |
| "step": 64 |
| }, |
| { |
| "epoch": 2.4920127795527156, |
| "grad_norm": 0.5531863570213318, |
| "learning_rate": 8.271337313934869e-07, |
| "loss": 0.7529, |
| "step": 65 |
| }, |
| { |
| "epoch": 2.5303514376996805, |
| "grad_norm": 0.5038548111915588, |
| "learning_rate": 7.077560319906696e-07, |
| "loss": 0.7902, |
| "step": 66 |
| }, |
| { |
| "epoch": 2.5686900958466454, |
| "grad_norm": 0.4608278274536133, |
| "learning_rate": 5.9702234071631e-07, |
| "loss": 0.7202, |
| "step": 67 |
| }, |
| { |
| "epoch": 2.6070287539936103, |
| "grad_norm": 0.5147573351860046, |
| "learning_rate": 4.951556604879049e-07, |
| "loss": 0.8864, |
| "step": 68 |
| }, |
| { |
| "epoch": 2.6453674121405752, |
| "grad_norm": 0.5285633206367493, |
| "learning_rate": 4.0236113724274716e-07, |
| "loss": 0.7205, |
| "step": 69 |
| }, |
| { |
| "epoch": 2.68370607028754, |
| "grad_norm": 0.511046290397644, |
| "learning_rate": 3.18825646801314e-07, |
| "loss": 0.72, |
| "step": 70 |
| }, |
| { |
| "epoch": 2.722044728434505, |
| "grad_norm": 0.5221746563911438, |
| "learning_rate": 2.447174185242324e-07, |
| "loss": 0.7959, |
| "step": 71 |
| }, |
| { |
| "epoch": 2.7603833865814695, |
| "grad_norm": 0.504964292049408, |
| "learning_rate": 1.801856965207338e-07, |
| "loss": 0.7179, |
| "step": 72 |
| }, |
| { |
| "epoch": 2.7987220447284344, |
| "grad_norm": 0.4362109899520874, |
| "learning_rate": 1.253604390908819e-07, |
| "loss": 0.6673, |
| "step": 73 |
| }, |
| { |
| "epoch": 2.8370607028753994, |
| "grad_norm": 0.5560856461524963, |
| "learning_rate": 8.035205700685167e-08, |
| "loss": 0.8126, |
| "step": 74 |
| }, |
| { |
| "epoch": 2.8753993610223643, |
| "grad_norm": 0.5098027586936951, |
| "learning_rate": 4.52511911603265e-08, |
| "loss": 0.7562, |
| "step": 75 |
| }, |
| { |
| "epoch": 2.913738019169329, |
| "grad_norm": 0.494514137506485, |
| "learning_rate": 2.012853002380466e-08, |
| "loss": 0.7857, |
| "step": 76 |
| }, |
| { |
| "epoch": 2.952076677316294, |
| "grad_norm": 0.5156562924385071, |
| "learning_rate": 5.034667293427053e-09, |
| "loss": 0.7503, |
| "step": 77 |
| }, |
| { |
| "epoch": 2.9904153354632586, |
| "grad_norm": 0.4876989722251892, |
| "learning_rate": 0.0, |
| "loss": 0.7684, |
| "step": 78 |
| }, |
| { |
| "epoch": 2.9904153354632586, |
| "step": 78, |
| "total_flos": 53089482080256.0, |
| "train_loss": 0.8778469868195362, |
| "train_runtime": 3457.6659, |
| "train_samples_per_second": 2.169, |
| "train_steps_per_second": 0.023 |
| } |
| ], |
| "logging_steps": 1.0, |
| "max_steps": 78, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 3, |
| "save_steps": 500, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": true |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 53089482080256.0, |
| "train_batch_size": 1, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|