leixa's picture
Training in progress, step 130, checkpoint
677600b verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.9885057471264367,
"eval_steps": 11,
"global_step": 130,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.022988505747126436,
"eval_loss": 9.30986213684082,
"eval_runtime": 9.0065,
"eval_samples_per_second": 8.105,
"eval_steps_per_second": 1.11,
"step": 1
},
{
"epoch": 0.06896551724137931,
"grad_norm": 37.248802185058594,
"learning_rate": 3e-05,
"loss": 8.9493,
"step": 3
},
{
"epoch": 0.13793103448275862,
"grad_norm": 9.639245986938477,
"learning_rate": 6e-05,
"loss": 6.0587,
"step": 6
},
{
"epoch": 0.20689655172413793,
"grad_norm": 6.129356384277344,
"learning_rate": 9e-05,
"loss": 3.933,
"step": 9
},
{
"epoch": 0.25287356321839083,
"eval_loss": 3.160940408706665,
"eval_runtime": 9.1581,
"eval_samples_per_second": 7.971,
"eval_steps_per_second": 1.092,
"step": 11
},
{
"epoch": 0.27586206896551724,
"grad_norm": 5.822756767272949,
"learning_rate": 9.99314767377287e-05,
"loss": 3.2319,
"step": 12
},
{
"epoch": 0.3448275862068966,
"grad_norm": 4.974664688110352,
"learning_rate": 9.957224306869053e-05,
"loss": 2.9212,
"step": 15
},
{
"epoch": 0.41379310344827586,
"grad_norm": 3.2585740089416504,
"learning_rate": 9.890738003669029e-05,
"loss": 2.2899,
"step": 18
},
{
"epoch": 0.4827586206896552,
"grad_norm": 3.2496554851531982,
"learning_rate": 9.794098674340965e-05,
"loss": 2.1133,
"step": 21
},
{
"epoch": 0.5057471264367817,
"eval_loss": 2.0404434204101562,
"eval_runtime": 9.1631,
"eval_samples_per_second": 7.967,
"eval_steps_per_second": 1.091,
"step": 22
},
{
"epoch": 0.5517241379310345,
"grad_norm": 3.3397107124328613,
"learning_rate": 9.667902132486009e-05,
"loss": 1.8379,
"step": 24
},
{
"epoch": 0.6206896551724138,
"grad_norm": 4.2894768714904785,
"learning_rate": 9.512926421749304e-05,
"loss": 1.9301,
"step": 27
},
{
"epoch": 0.6896551724137931,
"grad_norm": 3.3234455585479736,
"learning_rate": 9.330127018922194e-05,
"loss": 1.6536,
"step": 30
},
{
"epoch": 0.7586206896551724,
"grad_norm": 2.444336414337158,
"learning_rate": 9.120630943110077e-05,
"loss": 1.4659,
"step": 33
},
{
"epoch": 0.7586206896551724,
"eval_loss": 1.616043210029602,
"eval_runtime": 9.1967,
"eval_samples_per_second": 7.938,
"eval_steps_per_second": 1.087,
"step": 33
},
{
"epoch": 0.8275862068965517,
"grad_norm": 4.422275066375732,
"learning_rate": 8.885729807284856e-05,
"loss": 1.4601,
"step": 36
},
{
"epoch": 0.896551724137931,
"grad_norm": 3.7574503421783447,
"learning_rate": 8.626871855061438e-05,
"loss": 1.3037,
"step": 39
},
{
"epoch": 0.9655172413793104,
"grad_norm": 3.0062453746795654,
"learning_rate": 8.345653031794292e-05,
"loss": 1.234,
"step": 42
},
{
"epoch": 1.0114942528735633,
"eval_loss": 1.342581868171692,
"eval_runtime": 9.1668,
"eval_samples_per_second": 7.964,
"eval_steps_per_second": 1.091,
"step": 44
},
{
"epoch": 1.0344827586206897,
"grad_norm": 2.715128183364868,
"learning_rate": 8.043807145043604e-05,
"loss": 1.4425,
"step": 45
},
{
"epoch": 1.103448275862069,
"grad_norm": 2.441864490509033,
"learning_rate": 7.723195175075136e-05,
"loss": 0.9217,
"step": 48
},
{
"epoch": 1.1724137931034484,
"grad_norm": 2.2927913665771484,
"learning_rate": 7.385793801298042e-05,
"loss": 0.8214,
"step": 51
},
{
"epoch": 1.2413793103448276,
"grad_norm": 1.960526466369629,
"learning_rate": 7.033683215379002e-05,
"loss": 0.9555,
"step": 54
},
{
"epoch": 1.264367816091954,
"eval_loss": 1.2750840187072754,
"eval_runtime": 9.1846,
"eval_samples_per_second": 7.948,
"eval_steps_per_second": 1.089,
"step": 55
},
{
"epoch": 1.3103448275862069,
"grad_norm": 3.3633315563201904,
"learning_rate": 6.669034296168855e-05,
"loss": 0.8391,
"step": 57
},
{
"epoch": 1.3793103448275863,
"grad_norm": 1.9623112678527832,
"learning_rate": 6.294095225512603e-05,
"loss": 0.7523,
"step": 60
},
{
"epoch": 1.4482758620689655,
"grad_norm": 2.6259655952453613,
"learning_rate": 5.911177627460739e-05,
"loss": 0.8822,
"step": 63
},
{
"epoch": 1.5172413793103448,
"grad_norm": 3.360589027404785,
"learning_rate": 5.522642316338268e-05,
"loss": 1.0438,
"step": 66
},
{
"epoch": 1.5172413793103448,
"eval_loss": 1.2139718532562256,
"eval_runtime": 9.1834,
"eval_samples_per_second": 7.949,
"eval_steps_per_second": 1.089,
"step": 66
},
{
"epoch": 1.5862068965517242,
"grad_norm": 1.6269245147705078,
"learning_rate": 5.1308847415393666e-05,
"loss": 0.8215,
"step": 69
},
{
"epoch": 1.6551724137931034,
"grad_norm": 2.1838412284851074,
"learning_rate": 4.738320218785281e-05,
"loss": 0.8856,
"step": 72
},
{
"epoch": 1.7241379310344827,
"grad_norm": 2.0462594032287598,
"learning_rate": 4.347369038899744e-05,
"loss": 0.7003,
"step": 75
},
{
"epoch": 1.7701149425287355,
"eval_loss": 1.1193917989730835,
"eval_runtime": 9.1719,
"eval_samples_per_second": 7.959,
"eval_steps_per_second": 1.09,
"step": 77
},
{
"epoch": 1.793103448275862,
"grad_norm": 2.196936845779419,
"learning_rate": 3.960441545911204e-05,
"loss": 0.7238,
"step": 78
},
{
"epoch": 1.8620689655172413,
"grad_norm": 2.042219400405884,
"learning_rate": 3.579923276480387e-05,
"loss": 0.7039,
"step": 81
},
{
"epoch": 1.9310344827586206,
"grad_norm": 2.6126327514648438,
"learning_rate": 3.2081602522734986e-05,
"loss": 0.7641,
"step": 84
},
{
"epoch": 2.0,
"grad_norm": 5.926860332489014,
"learning_rate": 2.8474445159585235e-05,
"loss": 0.9279,
"step": 87
},
{
"epoch": 2.0229885057471266,
"eval_loss": 1.031583547592163,
"eval_runtime": 9.1542,
"eval_samples_per_second": 7.974,
"eval_steps_per_second": 1.092,
"step": 88
},
{
"epoch": 2.0689655172413794,
"grad_norm": 1.4757606983184814,
"learning_rate": 2.500000000000001e-05,
"loss": 0.6411,
"step": 90
},
{
"epoch": 2.1379310344827585,
"grad_norm": 1.4598504304885864,
"learning_rate": 2.167968815375837e-05,
"loss": 0.7081,
"step": 93
},
{
"epoch": 2.206896551724138,
"grad_norm": 1.0860507488250732,
"learning_rate": 1.8533980447508137e-05,
"loss": 0.611,
"step": 96
},
{
"epoch": 2.2758620689655173,
"grad_norm": 1.1976888179779053,
"learning_rate": 1.5582271215312294e-05,
"loss": 0.5523,
"step": 99
},
{
"epoch": 2.2758620689655173,
"eval_loss": 1.016935110092163,
"eval_runtime": 9.1948,
"eval_samples_per_second": 7.939,
"eval_steps_per_second": 1.088,
"step": 99
},
{
"epoch": 2.344827586206897,
"grad_norm": 1.211234211921692,
"learning_rate": 1.2842758726130283e-05,
"loss": 0.6378,
"step": 102
},
{
"epoch": 2.413793103448276,
"grad_norm": 1.2312695980072021,
"learning_rate": 1.0332332985438248e-05,
"loss": 0.5659,
"step": 105
},
{
"epoch": 2.4827586206896552,
"grad_norm": 1.7582778930664062,
"learning_rate": 8.066471602728803e-06,
"loss": 0.6258,
"step": 108
},
{
"epoch": 2.528735632183908,
"eval_loss": 1.0043144226074219,
"eval_runtime": 9.1676,
"eval_samples_per_second": 7.963,
"eval_steps_per_second": 1.091,
"step": 110
},
{
"epoch": 2.5517241379310347,
"grad_norm": 1.0846282243728638,
"learning_rate": 6.059144366901736e-06,
"loss": 0.6884,
"step": 111
},
{
"epoch": 2.6206896551724137,
"grad_norm": 1.0979564189910889,
"learning_rate": 4.322727117869951e-06,
"loss": 0.6791,
"step": 114
},
{
"epoch": 2.689655172413793,
"grad_norm": 1.3624191284179688,
"learning_rate": 2.8679254453910785e-06,
"loss": 0.7076,
"step": 117
},
{
"epoch": 2.7586206896551726,
"grad_norm": 1.714447021484375,
"learning_rate": 1.70370868554659e-06,
"loss": 0.6285,
"step": 120
},
{
"epoch": 2.781609195402299,
"eval_loss": 0.9969158172607422,
"eval_runtime": 9.1743,
"eval_samples_per_second": 7.957,
"eval_steps_per_second": 1.09,
"step": 121
},
{
"epoch": 2.8275862068965516,
"grad_norm": 1.9297926425933838,
"learning_rate": 8.372546218022747e-07,
"loss": 0.7666,
"step": 123
},
{
"epoch": 2.896551724137931,
"grad_norm": 1.666353702545166,
"learning_rate": 2.7390523158633554e-07,
"loss": 0.6233,
"step": 126
},
{
"epoch": 2.9655172413793105,
"grad_norm": 1.1451926231384277,
"learning_rate": 1.7133751222137007e-08,
"loss": 0.5853,
"step": 129
}
],
"logging_steps": 3,
"max_steps": 130,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 11,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.176515519409029e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}