| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 243, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.2476780185758514, | |
| "grad_norm": 0.9520496726036072, | |
| "learning_rate": 9.5e-05, | |
| "loss": 5.8329, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.4953560371517028, | |
| "grad_norm": 0.8770681619644165, | |
| "learning_rate": 9.147982062780269e-05, | |
| "loss": 5.5358, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.7430340557275542, | |
| "grad_norm": 1.0322086811065674, | |
| "learning_rate": 8.251121076233185e-05, | |
| "loss": 5.4243, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.9907120743034056, | |
| "grad_norm": 0.8727726340293884, | |
| "learning_rate": 7.354260089686099e-05, | |
| "loss": 5.3845, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 5.324367523193359, | |
| "eval_runtime": 11.8461, | |
| "eval_samples_per_second": 6.078, | |
| "eval_steps_per_second": 3.039, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 1.2352941176470589, | |
| "grad_norm": 1.2265089750289917, | |
| "learning_rate": 6.457399103139014e-05, | |
| "loss": 5.2119, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.4829721362229102, | |
| "grad_norm": 0.9408429265022278, | |
| "learning_rate": 5.560538116591929e-05, | |
| "loss": 5.1513, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.7306501547987616, | |
| "grad_norm": 1.1574674844741821, | |
| "learning_rate": 4.6636771300448435e-05, | |
| "loss": 5.1168, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.978328173374613, | |
| "grad_norm": 1.0991945266723633, | |
| "learning_rate": 3.766816143497758e-05, | |
| "loss": 5.1545, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 5.29133415222168, | |
| "eval_runtime": 9.9587, | |
| "eval_samples_per_second": 7.23, | |
| "eval_steps_per_second": 3.615, | |
| "step": 162 | |
| }, | |
| { | |
| "epoch": 2.222910216718266, | |
| "grad_norm": 1.2449538707733154, | |
| "learning_rate": 2.8699551569506727e-05, | |
| "loss": 4.9941, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 2.4705882352941178, | |
| "grad_norm": 1.2352479696273804, | |
| "learning_rate": 1.9730941704035873e-05, | |
| "loss": 4.9227, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 2.718266253869969, | |
| "grad_norm": 1.1933473348617554, | |
| "learning_rate": 1.0762331838565023e-05, | |
| "loss": 4.9397, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 2.9659442724458205, | |
| "grad_norm": 1.0653965473175049, | |
| "learning_rate": 1.7937219730941704e-06, | |
| "loss": 4.9391, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_loss": 5.3089494705200195, | |
| "eval_runtime": 10.0065, | |
| "eval_samples_per_second": 7.195, | |
| "eval_steps_per_second": 3.598, | |
| "step": 243 | |
| } | |
| ], | |
| "logging_steps": 20, | |
| "max_steps": 243, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1645442986239648.0, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |