{ "best_metric": null, "best_model_checkpoint": null, "epoch": 2.1509824198552225, "eval_steps": 500, "global_step": 130, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.02, "learning_rate": 4.000000000000001e-06, "loss": 0.643, "step": 1 }, { "epoch": 0.03, "learning_rate": 8.000000000000001e-06, "loss": 0.6409, "step": 2 }, { "epoch": 0.05, "learning_rate": 1.2e-05, "loss": 0.5779, "step": 3 }, { "epoch": 0.07, "learning_rate": 1.6000000000000003e-05, "loss": 0.6641, "step": 4 }, { "epoch": 0.08, "learning_rate": 2e-05, "loss": 0.61, "step": 5 }, { "epoch": 0.1, "learning_rate": 2.4e-05, "loss": 0.6653, "step": 6 }, { "epoch": 0.12, "learning_rate": 2.8000000000000003e-05, "loss": 0.6101, "step": 7 }, { "epoch": 0.13, "learning_rate": 3.2000000000000005e-05, "loss": 0.7479, "step": 8 }, { "epoch": 0.15, "learning_rate": 3.6e-05, "loss": 0.6357, "step": 9 }, { "epoch": 0.17, "learning_rate": 4e-05, "loss": 0.6214, "step": 10 }, { "epoch": 0.18, "learning_rate": 4.4000000000000006e-05, "loss": 0.6257, "step": 11 }, { "epoch": 0.2, "learning_rate": 4.8e-05, "loss": 0.6228, "step": 12 }, { "epoch": 0.22, "learning_rate": 5.2000000000000004e-05, "loss": 0.6295, "step": 13 }, { "epoch": 0.23, "learning_rate": 5.6000000000000006e-05, "loss": 0.6345, "step": 14 }, { "epoch": 0.25, "learning_rate": 6e-05, "loss": 0.6282, "step": 15 }, { "epoch": 0.26, "learning_rate": 6.400000000000001e-05, "loss": 0.6231, "step": 16 }, { "epoch": 0.28, "learning_rate": 6.800000000000001e-05, "loss": 0.6282, "step": 17 }, { "epoch": 0.3, "learning_rate": 7.2e-05, "loss": 0.6423, "step": 18 }, { "epoch": 0.31, "learning_rate": 7.6e-05, "loss": 0.6055, "step": 19 }, { "epoch": 0.33, "learning_rate": 8e-05, "loss": 0.6244, "step": 20 }, { "epoch": 0.35, "learning_rate": 8.4e-05, "loss": 0.5443, "step": 21 }, { "epoch": 0.36, "learning_rate": 8.800000000000001e-05, "loss": 0.6479, "step": 22 }, { "epoch": 0.38, "learning_rate": 9.200000000000001e-05, "loss": 0.5565, "step": 23 }, { "epoch": 0.4, "learning_rate": 9.6e-05, "loss": 0.6131, "step": 24 }, { "epoch": 0.41, "learning_rate": 0.0001, "loss": 0.6696, "step": 25 }, { "epoch": 0.43, "learning_rate": 0.00010400000000000001, "loss": 0.5886, "step": 26 }, { "epoch": 0.45, "learning_rate": 0.00010800000000000001, "loss": 0.5999, "step": 27 }, { "epoch": 0.46, "learning_rate": 0.00011200000000000001, "loss": 0.7083, "step": 28 }, { "epoch": 0.48, "learning_rate": 0.000116, "loss": 0.6178, "step": 29 }, { "epoch": 0.5, "learning_rate": 0.00012, "loss": 0.6466, "step": 30 }, { "epoch": 0.51, "learning_rate": 0.000124, "loss": 0.5462, "step": 31 }, { "epoch": 0.53, "learning_rate": 0.00012800000000000002, "loss": 0.6239, "step": 32 }, { "epoch": 0.55, "learning_rate": 0.000132, "loss": 0.6646, "step": 33 }, { "epoch": 0.56, "learning_rate": 0.00013600000000000003, "loss": 0.6293, "step": 34 }, { "epoch": 0.58, "learning_rate": 0.00014, "loss": 0.5853, "step": 35 }, { "epoch": 0.6, "learning_rate": 0.000144, "loss": 0.6881, "step": 36 }, { "epoch": 0.61, "learning_rate": 0.000148, "loss": 0.6398, "step": 37 }, { "epoch": 0.63, "learning_rate": 0.000152, "loss": 0.6054, "step": 38 }, { "epoch": 0.65, "learning_rate": 0.00015600000000000002, "loss": 0.6332, "step": 39 }, { "epoch": 0.66, "learning_rate": 0.00016, "loss": 0.5941, "step": 40 }, { "epoch": 0.68, "learning_rate": 0.000164, "loss": 0.6127, "step": 41 }, { "epoch": 0.69, "learning_rate": 0.000168, "loss": 0.6475, "step": 42 }, { "epoch": 0.71, "learning_rate": 0.000172, "loss": 0.5668, "step": 43 }, { "epoch": 0.73, "learning_rate": 0.00017600000000000002, "loss": 0.6348, "step": 44 }, { "epoch": 0.74, "learning_rate": 0.00018, "loss": 0.6379, "step": 45 }, { "epoch": 0.76, "learning_rate": 0.00018400000000000003, "loss": 0.5992, "step": 46 }, { "epoch": 0.78, "learning_rate": 0.000188, "loss": 0.6435, "step": 47 }, { "epoch": 0.79, "learning_rate": 0.000192, "loss": 0.5909, "step": 48 }, { "epoch": 0.81, "learning_rate": 0.000196, "loss": 0.6596, "step": 49 }, { "epoch": 0.83, "learning_rate": 0.0002, "loss": 0.6426, "step": 50 }, { "epoch": 0.84, "learning_rate": 0.00020400000000000003, "loss": 0.5908, "step": 51 }, { "epoch": 0.86, "learning_rate": 0.00020800000000000001, "loss": 0.5486, "step": 52 }, { "epoch": 0.88, "learning_rate": 0.00021200000000000003, "loss": 0.6184, "step": 53 }, { "epoch": 0.89, "learning_rate": 0.00021600000000000002, "loss": 0.6977, "step": 54 }, { "epoch": 0.91, "learning_rate": 0.00022000000000000003, "loss": 0.6308, "step": 55 }, { "epoch": 0.93, "learning_rate": 0.00022400000000000002, "loss": 0.5394, "step": 56 }, { "epoch": 0.94, "learning_rate": 0.00022799999999999999, "loss": 0.5426, "step": 57 }, { "epoch": 0.96, "learning_rate": 0.000232, "loss": 0.5429, "step": 58 }, { "epoch": 0.98, "learning_rate": 0.000236, "loss": 0.5822, "step": 59 }, { "epoch": 0.99, "learning_rate": 0.00024, "loss": 0.5793, "step": 60 }, { "epoch": 1.01, "learning_rate": 0.000244, "loss": 0.5906, "step": 61 }, { "epoch": 1.03, "learning_rate": 0.000248, "loss": 0.4957, "step": 62 }, { "epoch": 1.04, "learning_rate": 0.000252, "loss": 0.5421, "step": 63 }, { "epoch": 1.06, "learning_rate": 0.00025600000000000004, "loss": 0.5444, "step": 64 }, { "epoch": 1.08, "learning_rate": 0.00026000000000000003, "loss": 0.5618, "step": 65 }, { "epoch": 1.09, "learning_rate": 0.000264, "loss": 0.5372, "step": 66 }, { "epoch": 1.11, "learning_rate": 0.000268, "loss": 0.5701, "step": 67 }, { "epoch": 1.13, "learning_rate": 0.00027200000000000005, "loss": 0.5257, "step": 68 }, { "epoch": 1.14, "learning_rate": 0.000276, "loss": 0.5237, "step": 69 }, { "epoch": 1.16, "learning_rate": 0.00028, "loss": 0.4729, "step": 70 }, { "epoch": 1.17, "learning_rate": 0.000284, "loss": 0.5142, "step": 71 }, { "epoch": 1.19, "learning_rate": 0.000288, "loss": 0.5365, "step": 72 }, { "epoch": 1.21, "learning_rate": 0.000292, "loss": 0.5288, "step": 73 }, { "epoch": 1.22, "learning_rate": 0.000296, "loss": 0.5095, "step": 74 }, { "epoch": 1.24, "learning_rate": 0.00030000000000000003, "loss": 0.5229, "step": 75 }, { "epoch": 1.26, "learning_rate": 0.000304, "loss": 0.5355, "step": 76 }, { "epoch": 1.27, "learning_rate": 0.000308, "loss": 0.5626, "step": 77 }, { "epoch": 1.29, "learning_rate": 0.00031200000000000005, "loss": 0.5548, "step": 78 }, { "epoch": 1.31, "learning_rate": 0.00031600000000000004, "loss": 0.5404, "step": 79 }, { "epoch": 1.32, "learning_rate": 0.00032, "loss": 0.5686, "step": 80 }, { "epoch": 1.34, "learning_rate": 0.000324, "loss": 0.5242, "step": 81 }, { "epoch": 1.36, "learning_rate": 0.000328, "loss": 0.52, "step": 82 }, { "epoch": 1.37, "learning_rate": 0.000332, "loss": 0.5481, "step": 83 }, { "epoch": 1.39, "learning_rate": 0.000336, "loss": 0.5321, "step": 84 }, { "epoch": 1.41, "learning_rate": 0.00034, "loss": 0.603, "step": 85 }, { "epoch": 1.42, "learning_rate": 0.000344, "loss": 0.4836, "step": 86 }, { "epoch": 1.44, "learning_rate": 0.000348, "loss": 0.5583, "step": 87 }, { "epoch": 1.46, "learning_rate": 0.00035200000000000005, "loss": 0.5336, "step": 88 }, { "epoch": 1.47, "learning_rate": 0.00035600000000000003, "loss": 0.4061, "step": 89 }, { "epoch": 1.49, "learning_rate": 0.00036, "loss": 0.5555, "step": 90 }, { "epoch": 1.51, "learning_rate": 0.000364, "loss": 0.5216, "step": 91 }, { "epoch": 1.52, "learning_rate": 0.00036800000000000005, "loss": 0.4991, "step": 92 }, { "epoch": 1.54, "learning_rate": 0.00037200000000000004, "loss": 0.5601, "step": 93 }, { "epoch": 1.56, "learning_rate": 0.000376, "loss": 0.4857, "step": 94 }, { "epoch": 1.57, "learning_rate": 0.00038, "loss": 0.5439, "step": 95 }, { "epoch": 1.59, "learning_rate": 0.000384, "loss": 0.5817, "step": 96 }, { "epoch": 1.6, "learning_rate": 0.000388, "loss": 0.5179, "step": 97 }, { "epoch": 1.62, "learning_rate": 0.000392, "loss": 0.5464, "step": 98 }, { "epoch": 1.64, "learning_rate": 0.00039600000000000003, "loss": 0.513, "step": 99 }, { "epoch": 1.65, "learning_rate": 0.0004, "loss": 0.4899, "step": 100 }, { "epoch": 1.67, "learning_rate": 0.0003998458072481446, "loss": 0.5715, "step": 101 }, { "epoch": 1.69, "learning_rate": 0.0003993834667466256, "loss": 0.5312, "step": 102 }, { "epoch": 1.7, "learning_rate": 0.0003986136913909853, "loss": 0.5066, "step": 103 }, { "epoch": 1.72, "learning_rate": 0.00039753766811902755, "loss": 0.5407, "step": 104 }, { "epoch": 1.74, "learning_rate": 0.0003961570560806461, "loss": 0.5281, "step": 105 }, { "epoch": 1.75, "learning_rate": 0.0003944739840795353, "loss": 0.4873, "step": 106 }, { "epoch": 1.77, "learning_rate": 0.00039249104729072946, "loss": 0.5336, "step": 107 }, { "epoch": 1.79, "learning_rate": 0.00039021130325903074, "loss": 0.4842, "step": 108 }, { "epoch": 1.8, "learning_rate": 0.00038763826718449685, "loss": 0.5136, "step": 109 }, { "epoch": 1.82, "learning_rate": 0.0003847759065022574, "loss": 0.5359, "step": 110 }, { "epoch": 1.84, "learning_rate": 0.0003816286347650163, "loss": 0.4766, "step": 111 }, { "epoch": 1.85, "learning_rate": 0.0003782013048376736, "loss": 0.4896, "step": 112 }, { "epoch": 1.87, "learning_rate": 0.00037449920141455944, "loss": 0.4977, "step": 113 }, { "epoch": 1.89, "learning_rate": 0.00037052803287081844, "loss": 0.506, "step": 114 }, { "epoch": 1.9, "learning_rate": 0.0003662939224605091, "loss": 0.5442, "step": 115 }, { "epoch": 1.92, "learning_rate": 0.0003618033988749895, "loss": 0.5445, "step": 116 }, { "epoch": 1.94, "learning_rate": 0.00035706338617614897, "loss": 0.5348, "step": 117 }, { "epoch": 1.95, "learning_rate": 0.0003520811931200062, "loss": 0.5331, "step": 118 }, { "epoch": 1.97, "learning_rate": 0.0003468645018871371, "loss": 0.4697, "step": 119 }, { "epoch": 1.99, "learning_rate": 0.0003414213562373095, "loss": 0.5119, "step": 120 }, { "epoch": 2.0, "learning_rate": 0.0003357601491065884, "loss": 0.4679, "step": 121 }, { "epoch": 2.02, "learning_rate": 0.0003298896096660367, "loss": 0.3522, "step": 122 }, { "epoch": 2.04, "learning_rate": 0.00032381878986196687, "loss": 0.3267, "step": 123 }, { "epoch": 2.05, "learning_rate": 0.00031755705045849464, "loss": 0.3626, "step": 124 }, { "epoch": 2.07, "learning_rate": 0.00031111404660392046, "loss": 0.3274, "step": 125 }, { "epoch": 2.08, "learning_rate": 0.0003044997129431898, "loss": 0.3159, "step": 126 }, { "epoch": 2.1, "learning_rate": 0.00029772424829939106, "loss": 0.3336, "step": 127 }, { "epoch": 2.12, "learning_rate": 0.00029079809994790937, "loss": 0.2973, "step": 128 }, { "epoch": 2.13, "learning_rate": 0.0002837319475074856, "loss": 0.319, "step": 129 }, { "epoch": 2.15, "learning_rate": 0.000276536686473018, "loss": 0.2978, "step": 130 } ], "logging_steps": 1, "max_steps": 180, "num_input_tokens_seen": 0, "num_train_epochs": 3, "save_steps": 10, "total_flos": 4.50723491463168e+17, "trial_name": null, "trial_params": null }