{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 0.4149377593360996,
  "eval_steps": 9,
  "global_step": 100,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.004149377593360996,
      "grad_norm": NaN,
      "learning_rate": 1e-05,
      "loss": 0.0,
      "step": 1
    },
    {
      "epoch": 0.004149377593360996,
      "eval_loss": NaN,
      "eval_runtime": 21.1811,
      "eval_samples_per_second": 9.584,
      "eval_steps_per_second": 1.228,
      "step": 1
    },
    {
      "epoch": 0.008298755186721992,
      "grad_norm": NaN,
      "learning_rate": 2e-05,
      "loss": 0.0,
      "step": 2
    },
    {
      "epoch": 0.012448132780082987,
      "grad_norm": NaN,
      "learning_rate": 3e-05,
      "loss": 0.0,
      "step": 3
    },
    {
      "epoch": 0.016597510373443983,
      "grad_norm": NaN,
      "learning_rate": 4e-05,
      "loss": 0.0,
      "step": 4
    },
    {
      "epoch": 0.02074688796680498,
      "grad_norm": NaN,
      "learning_rate": 5e-05,
      "loss": 0.0,
      "step": 5
    },
    {
      "epoch": 0.024896265560165973,
      "grad_norm": NaN,
      "learning_rate": 6e-05,
      "loss": 0.0,
      "step": 6
    },
    {
      "epoch": 0.029045643153526972,
      "grad_norm": NaN,
      "learning_rate": 7e-05,
      "loss": 0.0,
      "step": 7
    },
    {
      "epoch": 0.03319502074688797,
      "grad_norm": NaN,
      "learning_rate": 8e-05,
      "loss": 0.0,
      "step": 8
    },
    {
      "epoch": 0.03734439834024896,
      "grad_norm": NaN,
      "learning_rate": 9e-05,
      "loss": 0.0,
      "step": 9
    },
    {
      "epoch": 0.03734439834024896,
      "eval_loss": NaN,
      "eval_runtime": 20.3996,
      "eval_samples_per_second": 9.951,
      "eval_steps_per_second": 1.275,
      "step": 9
    },
    {
      "epoch": 0.04149377593360996,
      "grad_norm": NaN,
      "learning_rate": 0.0001,
      "loss": 0.0,
      "step": 10
    },
    {
      "epoch": 0.04564315352697095,
      "grad_norm": NaN,
      "learning_rate": 9.99695413509548e-05,
      "loss": 0.0,
      "step": 11
    },
    {
      "epoch": 0.04979253112033195,
      "grad_norm": NaN,
      "learning_rate": 9.987820251299122e-05,
      "loss": 0.0,
      "step": 12
    },
    {
      "epoch": 0.05394190871369295,
      "grad_norm": NaN,
      "learning_rate": 9.972609476841367e-05,
      "loss": 0.0,
      "step": 13
    },
    {
      "epoch": 0.058091286307053944,
      "grad_norm": NaN,
      "learning_rate": 9.951340343707852e-05,
      "loss": 0.0,
      "step": 14
    },
    {
      "epoch": 0.06224066390041494,
      "grad_norm": NaN,
      "learning_rate": 9.924038765061042e-05,
      "loss": 0.0,
      "step": 15
    },
    {
      "epoch": 0.06639004149377593,
      "grad_norm": NaN,
      "learning_rate": 9.890738003669029e-05,
      "loss": 0.0,
      "step": 16
    },
    {
      "epoch": 0.07053941908713693,
      "grad_norm": NaN,
      "learning_rate": 9.851478631379982e-05,
      "loss": 0.0,
      "step": 17
    },
    {
      "epoch": 0.07468879668049792,
      "grad_norm": NaN,
      "learning_rate": 9.806308479691595e-05,
      "loss": 0.0,
      "step": 18
    },
    {
      "epoch": 0.07468879668049792,
      "eval_loss": NaN,
      "eval_runtime": 20.3971,
      "eval_samples_per_second": 9.952,
      "eval_steps_per_second": 1.275,
      "step": 18
    },
    {
      "epoch": 0.07883817427385892,
      "grad_norm": NaN,
      "learning_rate": 9.755282581475769e-05,
      "loss": 0.0,
      "step": 19
    },
    {
      "epoch": 0.08298755186721991,
      "grad_norm": NaN,
      "learning_rate": 9.698463103929542e-05,
      "loss": 0.0,
      "step": 20
    },
    {
      "epoch": 0.08713692946058091,
      "grad_norm": NaN,
      "learning_rate": 9.635919272833938e-05,
      "loss": 0.0,
      "step": 21
    },
    {
      "epoch": 0.0912863070539419,
      "grad_norm": NaN,
      "learning_rate": 9.567727288213005e-05,
      "loss": 0.0,
      "step": 22
    },
    {
      "epoch": 0.0954356846473029,
      "grad_norm": NaN,
      "learning_rate": 9.493970231495835e-05,
      "loss": 0.0,
      "step": 23
    },
    {
      "epoch": 0.0995850622406639,
      "grad_norm": NaN,
      "learning_rate": 9.414737964294636e-05,
      "loss": 0.0,
      "step": 24
    },
    {
      "epoch": 0.1037344398340249,
      "grad_norm": NaN,
      "learning_rate": 9.330127018922194e-05,
      "loss": 0.0,
      "step": 25
    },
    {
      "epoch": 0.1078838174273859,
      "grad_norm": NaN,
      "learning_rate": 9.24024048078213e-05,
      "loss": 0.0,
      "step": 26
    },
    {
      "epoch": 0.11203319502074689,
      "grad_norm": NaN,
      "learning_rate": 9.145187862775209e-05,
      "loss": 0.0,
      "step": 27
    },
    {
      "epoch": 0.11203319502074689,
      "eval_loss": NaN,
      "eval_runtime": 20.4052,
      "eval_samples_per_second": 9.948,
      "eval_steps_per_second": 1.274,
      "step": 27
    },
    {
      "epoch": 0.11618257261410789,
      "grad_norm": NaN,
      "learning_rate": 9.045084971874738e-05,
      "loss": 0.0,
      "step": 28
    },
    {
      "epoch": 0.12033195020746888,
      "grad_norm": NaN,
      "learning_rate": 8.940053768033609e-05,
      "loss": 0.0,
      "step": 29
    },
    {
      "epoch": 0.12448132780082988,
      "grad_norm": NaN,
      "learning_rate": 8.83022221559489e-05,
      "loss": 0.0,
      "step": 30
    },
    {
      "epoch": 0.12863070539419086,
      "grad_norm": NaN,
      "learning_rate": 8.715724127386972e-05,
      "loss": 0.0,
      "step": 31
    },
    {
      "epoch": 0.13278008298755187,
      "grad_norm": NaN,
      "learning_rate": 8.596699001693255e-05,
      "loss": 0.0,
      "step": 32
    },
    {
      "epoch": 0.13692946058091288,
      "grad_norm": NaN,
      "learning_rate": 8.473291852294987e-05,
      "loss": 0.0,
      "step": 33
    },
    {
      "epoch": 0.14107883817427386,
      "grad_norm": NaN,
      "learning_rate": 8.345653031794292e-05,
      "loss": 0.0,
      "step": 34
    },
    {
      "epoch": 0.14522821576763487,
      "grad_norm": NaN,
      "learning_rate": 8.213938048432697e-05,
      "loss": 0.0,
      "step": 35
    },
    {
      "epoch": 0.14937759336099585,
      "grad_norm": NaN,
      "learning_rate": 8.07830737662829e-05,
      "loss": 0.0,
      "step": 36
    },
    {
      "epoch": 0.14937759336099585,
      "eval_loss": NaN,
      "eval_runtime": 20.394,
      "eval_samples_per_second": 9.954,
      "eval_steps_per_second": 1.275,
      "step": 36
    },
    {
      "epoch": 0.15352697095435686,
      "grad_norm": NaN,
      "learning_rate": 7.938926261462366e-05,
      "loss": 0.0,
      "step": 37
    },
    {
      "epoch": 0.15767634854771784,
      "grad_norm": NaN,
      "learning_rate": 7.795964517353735e-05,
      "loss": 0.0,
      "step": 38
    },
    {
      "epoch": 0.16182572614107885,
      "grad_norm": NaN,
      "learning_rate": 7.649596321166024e-05,
      "loss": 0.0,
      "step": 39
    },
    {
      "epoch": 0.16597510373443983,
      "grad_norm": NaN,
      "learning_rate": 7.500000000000001e-05,
      "loss": 0.0,
      "step": 40
    },
    {
      "epoch": 0.17012448132780084,
      "grad_norm": NaN,
      "learning_rate": 7.347357813929454e-05,
      "loss": 0.0,
      "step": 41
    },
    {
      "epoch": 0.17427385892116182,
      "grad_norm": NaN,
      "learning_rate": 7.191855733945387e-05,
      "loss": 0.0,
      "step": 42
    },
    {
      "epoch": 0.17842323651452283,
      "grad_norm": NaN,
      "learning_rate": 7.033683215379002e-05,
      "loss": 0.0,
      "step": 43
    },
    {
      "epoch": 0.1825726141078838,
      "grad_norm": NaN,
      "learning_rate": 6.873032967079561e-05,
      "loss": 0.0,
      "step": 44
    },
    {
      "epoch": 0.18672199170124482,
      "grad_norm": NaN,
      "learning_rate": 6.710100716628344e-05,
      "loss": 0.0,
      "step": 45
    },
    {
      "epoch": 0.18672199170124482,
      "eval_loss": NaN,
      "eval_runtime": 20.3953,
      "eval_samples_per_second": 9.953,
      "eval_steps_per_second": 1.275,
      "step": 45
    },
    {
      "epoch": 0.1908713692946058,
      "grad_norm": NaN,
      "learning_rate": 6.545084971874738e-05,
      "loss": 0.0,
      "step": 46
    },
    {
      "epoch": 0.1950207468879668,
      "grad_norm": NaN,
      "learning_rate": 6.378186779084995e-05,
      "loss": 0.0,
      "step": 47
    },
    {
      "epoch": 0.1991701244813278,
      "grad_norm": NaN,
      "learning_rate": 6.209609477998338e-05,
      "loss": 0.0,
      "step": 48
    },
    {
      "epoch": 0.2033195020746888,
      "grad_norm": NaN,
      "learning_rate": 6.0395584540887963e-05,
      "loss": 0.0,
      "step": 49
    },
    {
      "epoch": 0.2074688796680498,
      "grad_norm": NaN,
      "learning_rate": 5.868240888334653e-05,
      "loss": 0.0,
      "step": 50
    },
    {
      "epoch": 0.21161825726141079,
      "grad_norm": NaN,
      "learning_rate": 5.695865504800327e-05,
      "loss": 0.0,
      "step": 51
    },
    {
      "epoch": 0.2157676348547718,
      "grad_norm": NaN,
      "learning_rate": 5.522642316338268e-05,
      "loss": 0.0,
      "step": 52
    },
    {
      "epoch": 0.21991701244813278,
      "grad_norm": NaN,
      "learning_rate": 5.348782368720626e-05,
      "loss": 0.0,
      "step": 53
    },
    {
      "epoch": 0.22406639004149378,
      "grad_norm": NaN,
      "learning_rate": 5.174497483512506e-05,
      "loss": 0.0,
      "step": 54
    },
    {
      "epoch": 0.22406639004149378,
      "eval_loss": NaN,
      "eval_runtime": 20.3937,
      "eval_samples_per_second": 9.954,
      "eval_steps_per_second": 1.275,
      "step": 54
    },
    {
      "epoch": 0.22821576763485477,
      "grad_norm": NaN,
      "learning_rate": 5e-05,
      "loss": 0.0,
      "step": 55
    },
    {
      "epoch": 0.23236514522821577,
      "grad_norm": NaN,
      "learning_rate": 4.825502516487497e-05,
      "loss": 0.0,
      "step": 56
    },
    {
      "epoch": 0.23651452282157676,
      "grad_norm": NaN,
      "learning_rate": 4.6512176312793736e-05,
      "loss": 0.0,
      "step": 57
    },
    {
      "epoch": 0.24066390041493776,
      "grad_norm": NaN,
      "learning_rate": 4.477357683661734e-05,
      "loss": 0.0,
      "step": 58
    },
    {
      "epoch": 0.24481327800829875,
      "grad_norm": NaN,
      "learning_rate": 4.3041344951996746e-05,
      "loss": 0.0,
      "step": 59
    },
    {
      "epoch": 0.24896265560165975,
      "grad_norm": NaN,
      "learning_rate": 4.131759111665349e-05,
      "loss": 0.0,
      "step": 60
    },
    {
      "epoch": 0.25311203319502074,
      "grad_norm": NaN,
      "learning_rate": 3.960441545911204e-05,
      "loss": 0.0,
      "step": 61
    },
    {
      "epoch": 0.2572614107883817,
      "grad_norm": NaN,
      "learning_rate": 3.790390522001662e-05,
      "loss": 0.0,
      "step": 62
    },
    {
      "epoch": 0.26141078838174275,
      "grad_norm": NaN,
      "learning_rate": 3.6218132209150045e-05,
      "loss": 0.0,
      "step": 63
    },
    {
      "epoch": 0.26141078838174275,
      "eval_loss": NaN,
      "eval_runtime": 20.3935,
      "eval_samples_per_second": 9.954,
      "eval_steps_per_second": 1.275,
      "step": 63
    },
    {
      "epoch": 0.26556016597510373,
      "grad_norm": NaN,
      "learning_rate": 3.4549150281252636e-05,
      "loss": 0.0,
      "step": 64
    },
    {
      "epoch": 0.2697095435684647,
      "grad_norm": NaN,
      "learning_rate": 3.289899283371657e-05,
      "loss": 0.0,
      "step": 65
    },
    {
      "epoch": 0.27385892116182575,
      "grad_norm": NaN,
      "learning_rate": 3.12696703292044e-05,
      "loss": 0.0,
      "step": 66
    },
    {
      "epoch": 0.27800829875518673,
      "grad_norm": NaN,
      "learning_rate": 2.9663167846209998e-05,
      "loss": 0.0,
      "step": 67
    },
    {
      "epoch": 0.2821576763485477,
      "grad_norm": NaN,
      "learning_rate": 2.8081442660546125e-05,
      "loss": 0.0,
      "step": 68
    },
    {
      "epoch": 0.2863070539419087,
      "grad_norm": NaN,
      "learning_rate": 2.6526421860705473e-05,
      "loss": 0.0,
      "step": 69
    },
    {
      "epoch": 0.29045643153526973,
      "grad_norm": NaN,
      "learning_rate": 2.500000000000001e-05,
      "loss": 0.0,
      "step": 70
    },
    {
      "epoch": 0.2946058091286307,
      "grad_norm": NaN,
      "learning_rate": 2.350403678833976e-05,
      "loss": 0.0,
      "step": 71
    },
    {
      "epoch": 0.2987551867219917,
      "grad_norm": NaN,
      "learning_rate": 2.2040354826462668e-05,
      "loss": 0.0,
      "step": 72
    },
    {
      "epoch": 0.2987551867219917,
      "eval_loss": NaN,
      "eval_runtime": 20.393,
      "eval_samples_per_second": 9.954,
      "eval_steps_per_second": 1.275,
      "step": 72
    },
    {
      "epoch": 0.3029045643153527,
      "grad_norm": NaN,
      "learning_rate": 2.061073738537635e-05,
      "loss": 0.0,
      "step": 73
    },
    {
      "epoch": 0.3070539419087137,
      "grad_norm": NaN,
      "learning_rate": 1.9216926233717085e-05,
      "loss": 0.0,
      "step": 74
    },
    {
      "epoch": 0.3112033195020747,
      "grad_norm": NaN,
      "learning_rate": 1.7860619515673033e-05,
      "loss": 0.0,
      "step": 75
    },
    {
      "epoch": 0.3153526970954357,
      "grad_norm": NaN,
      "learning_rate": 1.6543469682057106e-05,
      "loss": 0.0,
      "step": 76
    },
    {
      "epoch": 0.31950207468879666,
      "grad_norm": NaN,
      "learning_rate": 1.526708147705013e-05,
      "loss": 0.0,
      "step": 77
    },
    {
      "epoch": 0.3236514522821577,
      "grad_norm": NaN,
      "learning_rate": 1.4033009983067452e-05,
      "loss": 0.0,
      "step": 78
    },
    {
      "epoch": 0.3278008298755187,
      "grad_norm": NaN,
      "learning_rate": 1.2842758726130283e-05,
      "loss": 0.0,
      "step": 79
    },
    {
      "epoch": 0.33195020746887965,
      "grad_norm": NaN,
      "learning_rate": 1.1697777844051105e-05,
      "loss": 0.0,
      "step": 80
    },
    {
      "epoch": 0.3360995850622407,
      "grad_norm": NaN,
      "learning_rate": 1.0599462319663905e-05,
      "loss": 0.0,
      "step": 81
    },
    {
      "epoch": 0.3360995850622407,
      "eval_loss": NaN,
      "eval_runtime": 20.394,
      "eval_samples_per_second": 9.954,
      "eval_steps_per_second": 1.275,
      "step": 81
    },
    {
      "epoch": 0.34024896265560167,
      "grad_norm": NaN,
      "learning_rate": 9.549150281252633e-06,
      "loss": 0.0,
      "step": 82
    },
    {
      "epoch": 0.34439834024896265,
      "grad_norm": NaN,
      "learning_rate": 8.548121372247918e-06,
      "loss": 0.0,
      "step": 83
    },
    {
      "epoch": 0.34854771784232363,
      "grad_norm": NaN,
      "learning_rate": 7.597595192178702e-06,
      "loss": 0.0,
      "step": 84
    },
    {
      "epoch": 0.35269709543568467,
      "grad_norm": NaN,
      "learning_rate": 6.698729810778065e-06,
      "loss": 0.0,
      "step": 85
    },
    {
      "epoch": 0.35684647302904565,
      "grad_norm": NaN,
      "learning_rate": 5.852620357053651e-06,
      "loss": 0.0,
      "step": 86
    },
    {
      "epoch": 0.36099585062240663,
      "grad_norm": NaN,
      "learning_rate": 5.060297685041659e-06,
      "loss": 0.0,
      "step": 87
    },
    {
      "epoch": 0.3651452282157676,
      "grad_norm": NaN,
      "learning_rate": 4.322727117869951e-06,
      "loss": 0.0,
      "step": 88
    },
    {
      "epoch": 0.36929460580912865,
      "grad_norm": NaN,
      "learning_rate": 3.6408072716606346e-06,
      "loss": 0.0,
      "step": 89
    },
    {
      "epoch": 0.37344398340248963,
      "grad_norm": NaN,
      "learning_rate": 3.0153689607045845e-06,
      "loss": 0.0,
      "step": 90
    },
    {
      "epoch": 0.37344398340248963,
      "eval_loss": NaN,
      "eval_runtime": 20.3955,
      "eval_samples_per_second": 9.953,
      "eval_steps_per_second": 1.275,
      "step": 90
    },
    {
      "epoch": 0.3775933609958506,
      "grad_norm": NaN,
      "learning_rate": 2.4471741852423237e-06,
      "loss": 0.0,
      "step": 91
    },
    {
      "epoch": 0.3817427385892116,
      "grad_norm": NaN,
      "learning_rate": 1.9369152030840556e-06,
      "loss": 0.0,
      "step": 92
    },
    {
      "epoch": 0.38589211618257263,
      "grad_norm": NaN,
      "learning_rate": 1.4852136862001764e-06,
      "loss": 0.0,
      "step": 93
    },
    {
      "epoch": 0.3900414937759336,
      "grad_norm": NaN,
      "learning_rate": 1.0926199633097157e-06,
      "loss": 0.0,
      "step": 94
    },
    {
      "epoch": 0.3941908713692946,
      "grad_norm": NaN,
      "learning_rate": 7.596123493895991e-07,
      "loss": 0.0,
      "step": 95
    },
    {
      "epoch": 0.3983402489626556,
      "grad_norm": NaN,
      "learning_rate": 4.865965629214819e-07,
      "loss": 0.0,
      "step": 96
    },
    {
      "epoch": 0.4024896265560166,
      "grad_norm": NaN,
      "learning_rate": 2.7390523158633554e-07,
      "loss": 0.0,
      "step": 97
    },
    {
      "epoch": 0.4066390041493776,
      "grad_norm": NaN,
      "learning_rate": 1.2179748700879012e-07,
      "loss": 0.0,
      "step": 98
    },
    {
      "epoch": 0.4107883817427386,
      "grad_norm": NaN,
      "learning_rate": 3.04586490452119e-08,
      "loss": 0.0,
      "step": 99
    },
    {
      "epoch": 0.4107883817427386,
      "eval_loss": NaN,
      "eval_runtime": 20.3974,
      "eval_samples_per_second": 9.952,
      "eval_steps_per_second": 1.275,
      "step": 99
    },
    {
      "epoch": 0.4149377593360996,
      "grad_norm": NaN,
      "learning_rate": 0.0,
      "loss": 0.0,
      "step": 100
    }
  ],
  "logging_steps": 1,
  "max_steps": 100,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 1,
  "save_steps": 25,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": true
      },
      "attributes": {}
    }
  },
  "total_flos": 6.59230637948928e+16,
  "train_batch_size": 8,
  "trial_name": null,
  "trial_params": null
}