{ "best_metric": 0.4463141858577728, "best_model_checkpoint": "mikhail-panzo/fil_b32_le3_s4000/checkpoint-500", "epoch": 22.22222222222222, "eval_steps": 500, "global_step": 1000, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 1.1111111111111112, "grad_norm": 3.548590898513794, "learning_rate": 2.5e-05, "loss": 0.7354, "step": 50 }, { "epoch": 2.2222222222222223, "grad_norm": 5.517350196838379, "learning_rate": 4.9500000000000004e-05, "loss": 0.5892, "step": 100 }, { "epoch": 3.3333333333333335, "grad_norm": 4.143002033233643, "learning_rate": 7.45e-05, "loss": 0.532, "step": 150 }, { "epoch": 4.444444444444445, "grad_norm": 6.414477825164795, "learning_rate": 9.95e-05, "loss": 0.5284, "step": 200 }, { "epoch": 5.555555555555555, "grad_norm": 2.265143394470215, "learning_rate": 0.0001245, "loss": 0.517, "step": 250 }, { "epoch": 6.666666666666667, "grad_norm": 4.954311847686768, "learning_rate": 0.0001495, "loss": 0.5295, "step": 300 }, { "epoch": 7.777777777777778, "grad_norm": 2.5993571281433105, "learning_rate": 0.00017449999999999999, "loss": 0.517, "step": 350 }, { "epoch": 8.88888888888889, "grad_norm": 3.27908992767334, "learning_rate": 0.00019950000000000002, "loss": 0.4939, "step": 400 }, { "epoch": 10.0, "grad_norm": 4.22811222076416, "learning_rate": 0.0002245, "loss": 0.5043, "step": 450 }, { "epoch": 11.11111111111111, "grad_norm": 2.7051336765289307, "learning_rate": 0.0002495, "loss": 0.5028, "step": 500 }, { "epoch": 11.11111111111111, "eval_loss": 0.4463141858577728, "eval_runtime": 16.8864, "eval_samples_per_second": 9.416, "eval_steps_per_second": 1.184, "step": 500 }, { "epoch": 12.222222222222221, "grad_norm": 3.0811643600463867, "learning_rate": 0.0002745, "loss": 0.545, "step": 550 }, { "epoch": 13.333333333333334, "grad_norm": 7.707492828369141, "learning_rate": 0.0002995, "loss": 0.4979, "step": 600 }, { "epoch": 14.444444444444445, "grad_norm": 5.85659122467041, "learning_rate": 0.00032450000000000003, "loss": 0.4916, "step": 650 }, { "epoch": 15.555555555555555, "grad_norm": 8.300439834594727, "learning_rate": 0.0003495, "loss": 0.5112, "step": 700 }, { "epoch": 16.666666666666668, "grad_norm": 10.664773941040039, "learning_rate": 0.0003745, "loss": 0.5312, "step": 750 }, { "epoch": 17.77777777777778, "grad_norm": 6.4097161293029785, "learning_rate": 0.0003995, "loss": 0.6087, "step": 800 }, { "epoch": 18.88888888888889, "grad_norm": 5.435739994049072, "learning_rate": 0.0004245, "loss": 0.6127, "step": 850 }, { "epoch": 20.0, "grad_norm": 7.4649834632873535, "learning_rate": 0.00044950000000000003, "loss": 0.6904, "step": 900 }, { "epoch": 21.11111111111111, "grad_norm": 10.81910228729248, "learning_rate": 0.0004745, "loss": 0.6948, "step": 950 }, { "epoch": 22.22222222222222, "grad_norm": 4.4550886154174805, "learning_rate": 0.0004995, "loss": 0.6348, "step": 1000 }, { "epoch": 22.22222222222222, "eval_loss": 0.5990382432937622, "eval_runtime": 9.0191, "eval_samples_per_second": 17.629, "eval_steps_per_second": 2.218, "step": 1000 } ], "logging_steps": 50, "max_steps": 4000, "num_input_tokens_seen": 0, "num_train_epochs": 89, "save_steps": 500, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 6884217255754080.0, "train_batch_size": 16, "trial_name": null, "trial_params": null }