{ "best_metric": null, "best_model_checkpoint": null, "epoch": 3.5309734513274336, "eval_steps": 50, "global_step": 300, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.11799410029498525, "grad_norm": 2.4879140853881836, "learning_rate": 0.0003, "loss": 1.8567, "step": 10 }, { "epoch": 0.2359882005899705, "grad_norm": 0.8311281204223633, "learning_rate": 0.0002963855421686747, "loss": 1.3245, "step": 20 }, { "epoch": 0.35398230088495575, "grad_norm": 0.6665307879447937, "learning_rate": 0.0002927710843373494, "loss": 1.1834, "step": 30 }, { "epoch": 0.471976401179941, "grad_norm": 0.5632417798042297, "learning_rate": 0.0002891566265060241, "loss": 1.0915, "step": 40 }, { "epoch": 0.5899705014749262, "grad_norm": 0.4381982684135437, "learning_rate": 0.00028554216867469873, "loss": 1.0438, "step": 50 }, { "epoch": 0.5899705014749262, "eval_loss": 1.0292344093322754, "eval_runtime": 20.4608, "eval_samples_per_second": 8.015, "eval_steps_per_second": 0.684, "step": 50 }, { "epoch": 0.7079646017699115, "grad_norm": 0.43256932497024536, "learning_rate": 0.0002819277108433735, "loss": 1.0156, "step": 60 }, { "epoch": 0.8259587020648967, "grad_norm": 0.3582414388656616, "learning_rate": 0.0002783132530120482, "loss": 0.9793, "step": 70 }, { "epoch": 0.943952802359882, "grad_norm": 0.3671708405017853, "learning_rate": 0.00027469879518072284, "loss": 0.9644, "step": 80 }, { "epoch": 1.0589970501474926, "grad_norm": 0.34048739075660706, "learning_rate": 0.0002710843373493976, "loss": 0.9173, "step": 90 }, { "epoch": 1.176991150442478, "grad_norm": 0.29820743203163147, "learning_rate": 0.00026746987951807225, "loss": 0.8913, "step": 100 }, { "epoch": 1.176991150442478, "eval_loss": 0.9288875460624695, "eval_runtime": 20.3929, "eval_samples_per_second": 8.042, "eval_steps_per_second": 0.687, "step": 100 }, { "epoch": 1.294985250737463, "grad_norm": 0.31813672184944153, "learning_rate": 0.00026385542168674695, "loss": 0.8801, "step": 110 }, { "epoch": 1.4129793510324484, "grad_norm": 0.2814970016479492, "learning_rate": 0.00026024096385542165, "loss": 0.875, "step": 120 }, { "epoch": 1.5309734513274336, "grad_norm": 0.2815490663051605, "learning_rate": 0.00025662650602409636, "loss": 0.8642, "step": 130 }, { "epoch": 1.648967551622419, "grad_norm": 0.2590789198875427, "learning_rate": 0.00025301204819277106, "loss": 0.8501, "step": 140 }, { "epoch": 1.7669616519174043, "grad_norm": 0.25773778557777405, "learning_rate": 0.00024939759036144576, "loss": 0.8487, "step": 150 }, { "epoch": 1.7669616519174043, "eval_loss": 0.8774459362030029, "eval_runtime": 20.4395, "eval_samples_per_second": 8.024, "eval_steps_per_second": 0.685, "step": 150 }, { "epoch": 1.8849557522123894, "grad_norm": 0.2629424035549164, "learning_rate": 0.00024578313253012046, "loss": 0.8427, "step": 160 }, { "epoch": 2.0, "grad_norm": 0.2865707278251648, "learning_rate": 0.00024216867469879517, "loss": 0.8258, "step": 170 }, { "epoch": 2.117994100294985, "grad_norm": 0.27073633670806885, "learning_rate": 0.00023855421686746987, "loss": 0.7835, "step": 180 }, { "epoch": 2.2359882005899703, "grad_norm": 0.2692751884460449, "learning_rate": 0.00023493975903614455, "loss": 0.7804, "step": 190 }, { "epoch": 2.353982300884956, "grad_norm": 0.2548248767852783, "learning_rate": 0.00023132530120481928, "loss": 0.7756, "step": 200 }, { "epoch": 2.353982300884956, "eval_loss": 0.8462932109832764, "eval_runtime": 20.4134, "eval_samples_per_second": 8.034, "eval_steps_per_second": 0.686, "step": 200 }, { "epoch": 2.471976401179941, "grad_norm": 0.21938325464725494, "learning_rate": 0.00022771084337349395, "loss": 0.7695, "step": 210 }, { "epoch": 2.589970501474926, "grad_norm": 0.23754891753196716, "learning_rate": 0.00022409638554216866, "loss": 0.7771, "step": 220 }, { "epoch": 2.7079646017699117, "grad_norm": 0.2382248044013977, "learning_rate": 0.00022048192771084336, "loss": 0.7672, "step": 230 }, { "epoch": 2.825958702064897, "grad_norm": 0.2536454200744629, "learning_rate": 0.00021686746987951806, "loss": 0.7645, "step": 240 }, { "epoch": 2.943952802359882, "grad_norm": 0.21255475282669067, "learning_rate": 0.00021325301204819274, "loss": 0.7667, "step": 250 }, { "epoch": 2.943952802359882, "eval_loss": 0.8220995664596558, "eval_runtime": 20.3922, "eval_samples_per_second": 8.042, "eval_steps_per_second": 0.687, "step": 250 }, { "epoch": 3.0589970501474926, "grad_norm": 0.2579372227191925, "learning_rate": 0.00020963855421686747, "loss": 0.7418, "step": 260 }, { "epoch": 3.1769911504424777, "grad_norm": 0.2223750501871109, "learning_rate": 0.00020602409638554214, "loss": 0.7198, "step": 270 }, { "epoch": 3.2949852507374633, "grad_norm": 0.26154494285583496, "learning_rate": 0.00020240963855421685, "loss": 0.7188, "step": 280 }, { "epoch": 3.4129793510324484, "grad_norm": 0.2428540140390396, "learning_rate": 0.00019879518072289155, "loss": 0.7158, "step": 290 }, { "epoch": 3.5309734513274336, "grad_norm": 0.2056645303964615, "learning_rate": 0.00019518072289156625, "loss": 0.7103, "step": 300 }, { "epoch": 3.5309734513274336, "eval_loss": 0.8080394864082336, "eval_runtime": 20.4084, "eval_samples_per_second": 8.036, "eval_steps_per_second": 0.686, "step": 300 } ], "logging_steps": 10, "max_steps": 840, "num_input_tokens_seen": 0, "num_train_epochs": 10, "save_steps": 100, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 5.433776809382511e+18, "train_batch_size": 4, "trial_name": null, "trial_params": null }