{ "best_metric": null, "best_model_checkpoint": null, "epoch": 7.861635220125786, "eval_steps": 500, "global_step": 2500, "is_hyper_param_search": true, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 1.0, "eval_accuracy": 0.6216129032258064, "eval_loss": 0.230796217918396, "eval_runtime": 2.0292, "eval_samples_per_second": 1527.677, "eval_steps_per_second": 32.032, "step": 318 }, { "epoch": 1.57, "grad_norm": 0.5734981894493103, "learning_rate": 1.606918238993711e-05, "loss": 0.3689, "step": 500 }, { "epoch": 2.0, "eval_accuracy": 0.8338709677419355, "eval_loss": 0.10718880593776703, "eval_runtime": 2.0538, "eval_samples_per_second": 1509.399, "eval_steps_per_second": 31.649, "step": 636 }, { "epoch": 3.0, "eval_accuracy": 0.8806451612903226, "eval_loss": 0.068777896463871, "eval_runtime": 2.2761, "eval_samples_per_second": 1362.004, "eval_steps_per_second": 28.558, "step": 954 }, { "epoch": 3.14, "grad_norm": 0.44710573554039, "learning_rate": 1.2138364779874214e-05, "loss": 0.1243, "step": 1000 }, { "epoch": 4.0, "eval_accuracy": 0.9012903225806451, "eval_loss": 0.051503442227840424, "eval_runtime": 2.2457, "eval_samples_per_second": 1380.387, "eval_steps_per_second": 28.944, "step": 1272 }, { "epoch": 4.72, "grad_norm": 0.3705664277076721, "learning_rate": 8.207547169811321e-06, "loss": 0.0768, "step": 1500 }, { "epoch": 5.0, "eval_accuracy": 0.9135483870967742, "eval_loss": 0.04221797734498978, "eval_runtime": 2.1276, "eval_samples_per_second": 1457.071, "eval_steps_per_second": 30.551, "step": 1590 }, { "epoch": 6.0, "eval_accuracy": 0.9167741935483871, "eval_loss": 0.03726252540946007, "eval_runtime": 2.1432, "eval_samples_per_second": 1446.46, "eval_steps_per_second": 30.329, "step": 1908 }, { "epoch": 6.29, "grad_norm": 0.28841668367385864, "learning_rate": 4.276729559748428e-06, "loss": 0.0606, "step": 2000 }, { "epoch": 7.0, "eval_accuracy": 0.922258064516129, "eval_loss": 0.03493266925215721, "eval_runtime": 2.1129, "eval_samples_per_second": 1467.162, "eval_steps_per_second": 30.763, "step": 2226 }, { "epoch": 7.86, "grad_norm": 0.265544593334198, "learning_rate": 3.459119496855346e-07, "loss": 0.0543, "step": 2500 } ], "logging_steps": 500, "max_steps": 2544, "num_input_tokens_seen": 0, "num_train_epochs": 8, "save_steps": 500, "total_flos": 651155886807636.0, "train_batch_size": 48, "trial_name": null, "trial_params": { "alpha": 0.5475107121282025, "num_train_epochs": 8, "temperature": 5 } }