hwm21's picture
Training in progress, step 6500, checkpoint
e6714e4 verified
raw
history blame
11.4 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 100.0,
"eval_steps": 500,
"global_step": 6500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.5384615384615383,
"grad_norm": 134.7136688232422,
"learning_rate": 9.981600000000001e-06,
"loss": 5.6198,
"step": 100
},
{
"epoch": 3.076923076923077,
"grad_norm": 115.98611450195312,
"learning_rate": 9.961600000000001e-06,
"loss": 3.5959,
"step": 200
},
{
"epoch": 4.615384615384615,
"grad_norm": 114.23514556884766,
"learning_rate": 9.941600000000002e-06,
"loss": 3.3845,
"step": 300
},
{
"epoch": 6.153846153846154,
"grad_norm": 140.5648193359375,
"learning_rate": 9.921600000000002e-06,
"loss": 3.1494,
"step": 400
},
{
"epoch": 7.6923076923076925,
"grad_norm": 117.4485855102539,
"learning_rate": 9.901600000000002e-06,
"loss": 3.0773,
"step": 500
},
{
"epoch": 9.23076923076923,
"grad_norm": 389.71490478515625,
"learning_rate": 9.8816e-06,
"loss": 3.2116,
"step": 600
},
{
"epoch": 10.76923076923077,
"grad_norm": 107.11251831054688,
"learning_rate": 9.8616e-06,
"loss": 3.0471,
"step": 700
},
{
"epoch": 12.307692307692308,
"grad_norm": 85.49571228027344,
"learning_rate": 9.8416e-06,
"loss": 3.0201,
"step": 800
},
{
"epoch": 13.846153846153847,
"grad_norm": 121.62274932861328,
"learning_rate": 9.821600000000001e-06,
"loss": 2.9355,
"step": 900
},
{
"epoch": 15.384615384615385,
"grad_norm": 64.66451263427734,
"learning_rate": 9.801600000000001e-06,
"loss": 2.9637,
"step": 1000
},
{
"epoch": 16.923076923076923,
"grad_norm": 182.4657440185547,
"learning_rate": 9.781600000000001e-06,
"loss": 2.9819,
"step": 1100
},
{
"epoch": 18.46153846153846,
"grad_norm": 97.80529022216797,
"learning_rate": 9.761600000000002e-06,
"loss": 2.9486,
"step": 1200
},
{
"epoch": 20.0,
"grad_norm": 220.0562744140625,
"learning_rate": 9.741600000000002e-06,
"loss": 2.8608,
"step": 1300
},
{
"epoch": 21.53846153846154,
"grad_norm": 95.53397369384766,
"learning_rate": 9.7216e-06,
"loss": 2.8322,
"step": 1400
},
{
"epoch": 23.076923076923077,
"grad_norm": 67.54853057861328,
"learning_rate": 9.7016e-06,
"loss": 2.9429,
"step": 1500
},
{
"epoch": 24.615384615384617,
"grad_norm": 214.53131103515625,
"learning_rate": 9.6816e-06,
"loss": 2.7927,
"step": 1600
},
{
"epoch": 26.153846153846153,
"grad_norm": 293.3318786621094,
"learning_rate": 9.6616e-06,
"loss": 2.7665,
"step": 1700
},
{
"epoch": 27.692307692307693,
"grad_norm": 216.3682861328125,
"learning_rate": 9.641600000000001e-06,
"loss": 2.8309,
"step": 1800
},
{
"epoch": 29.23076923076923,
"grad_norm": 168.0605010986328,
"learning_rate": 9.621600000000001e-06,
"loss": 2.8433,
"step": 1900
},
{
"epoch": 30.76923076923077,
"grad_norm": 103.49143981933594,
"learning_rate": 9.601600000000001e-06,
"loss": 2.7606,
"step": 2000
},
{
"epoch": 32.30769230769231,
"grad_norm": 116.6761474609375,
"learning_rate": 9.581600000000002e-06,
"loss": 2.6595,
"step": 2100
},
{
"epoch": 33.84615384615385,
"grad_norm": 176.0087432861328,
"learning_rate": 9.5616e-06,
"loss": 2.632,
"step": 2200
},
{
"epoch": 35.38461538461539,
"grad_norm": 120.39679718017578,
"learning_rate": 9.5416e-06,
"loss": 2.5941,
"step": 2300
},
{
"epoch": 36.92307692307692,
"grad_norm": 100.69256591796875,
"learning_rate": 9.5216e-06,
"loss": 2.6007,
"step": 2400
},
{
"epoch": 38.46153846153846,
"grad_norm": 254.54440307617188,
"learning_rate": 9.5016e-06,
"loss": 2.5548,
"step": 2500
},
{
"epoch": 40.0,
"grad_norm": 113.618896484375,
"learning_rate": 9.4816e-06,
"loss": 2.5157,
"step": 2600
},
{
"epoch": 41.53846153846154,
"grad_norm": 825.5616455078125,
"learning_rate": 9.461600000000001e-06,
"loss": 2.4832,
"step": 2700
},
{
"epoch": 43.07692307692308,
"grad_norm": 58.83940505981445,
"learning_rate": 9.441600000000001e-06,
"loss": 2.4393,
"step": 2800
},
{
"epoch": 44.61538461538461,
"grad_norm": 89.66182708740234,
"learning_rate": 9.421600000000001e-06,
"loss": 2.4381,
"step": 2900
},
{
"epoch": 46.15384615384615,
"grad_norm": 242.81410217285156,
"learning_rate": 9.4016e-06,
"loss": 2.3978,
"step": 3000
},
{
"epoch": 47.69230769230769,
"grad_norm": 70.43364715576172,
"learning_rate": 9.3816e-06,
"loss": 2.3782,
"step": 3100
},
{
"epoch": 49.23076923076923,
"grad_norm": 244.31356811523438,
"learning_rate": 9.3616e-06,
"loss": 2.3319,
"step": 3200
},
{
"epoch": 50.76923076923077,
"grad_norm": 59.848697662353516,
"learning_rate": 9.3416e-06,
"loss": 2.3205,
"step": 3300
},
{
"epoch": 52.30769230769231,
"grad_norm": 109.26374053955078,
"learning_rate": 9.3216e-06,
"loss": 2.2943,
"step": 3400
},
{
"epoch": 53.84615384615385,
"grad_norm": 307.2849426269531,
"learning_rate": 9.301600000000001e-06,
"loss": 2.2913,
"step": 3500
},
{
"epoch": 55.38461538461539,
"grad_norm": 69.93729400634766,
"learning_rate": 9.281600000000001e-06,
"loss": 2.2166,
"step": 3600
},
{
"epoch": 56.92307692307692,
"grad_norm": 83.3631591796875,
"learning_rate": 9.261600000000001e-06,
"loss": 2.2718,
"step": 3700
},
{
"epoch": 58.46153846153846,
"grad_norm": 48.52922821044922,
"learning_rate": 9.2416e-06,
"loss": 2.2645,
"step": 3800
},
{
"epoch": 60.0,
"grad_norm": 51.951324462890625,
"learning_rate": 9.2218e-06,
"loss": 2.2416,
"step": 3900
},
{
"epoch": 61.53846153846154,
"grad_norm": 254.99429321289062,
"learning_rate": 9.2018e-06,
"loss": 2.2104,
"step": 4000
},
{
"epoch": 63.07692307692308,
"grad_norm": 109.68157958984375,
"learning_rate": 9.1818e-06,
"loss": 2.1574,
"step": 4100
},
{
"epoch": 64.61538461538461,
"grad_norm": 85.5855941772461,
"learning_rate": 9.161800000000001e-06,
"loss": 2.1513,
"step": 4200
},
{
"epoch": 66.15384615384616,
"grad_norm": 63.89781951904297,
"learning_rate": 9.141800000000001e-06,
"loss": 2.1331,
"step": 4300
},
{
"epoch": 67.6923076923077,
"grad_norm": 184.29612731933594,
"learning_rate": 9.121800000000001e-06,
"loss": 2.1866,
"step": 4400
},
{
"epoch": 69.23076923076923,
"grad_norm": 108.59600830078125,
"learning_rate": 9.1018e-06,
"loss": 2.1453,
"step": 4500
},
{
"epoch": 70.76923076923077,
"grad_norm": 80.51258087158203,
"learning_rate": 9.0818e-06,
"loss": 2.1125,
"step": 4600
},
{
"epoch": 72.3076923076923,
"grad_norm": 172.7328643798828,
"learning_rate": 9.0618e-06,
"loss": 2.141,
"step": 4700
},
{
"epoch": 73.84615384615384,
"grad_norm": 58.20785903930664,
"learning_rate": 9.0418e-06,
"loss": 2.0786,
"step": 4800
},
{
"epoch": 75.38461538461539,
"grad_norm": 72.46143341064453,
"learning_rate": 9.0218e-06,
"loss": 2.0914,
"step": 4900
},
{
"epoch": 76.92307692307692,
"grad_norm": 48.18838119506836,
"learning_rate": 9.0018e-06,
"loss": 2.0439,
"step": 5000
},
{
"epoch": 78.46153846153847,
"grad_norm": 87.95365905761719,
"learning_rate": 8.981800000000001e-06,
"loss": 2.0331,
"step": 5100
},
{
"epoch": 80.0,
"grad_norm": 99.82858276367188,
"learning_rate": 8.961800000000001e-06,
"loss": 2.1268,
"step": 5200
},
{
"epoch": 81.53846153846153,
"grad_norm": 85.33229064941406,
"learning_rate": 8.9418e-06,
"loss": 2.028,
"step": 5300
},
{
"epoch": 83.07692307692308,
"grad_norm": 83.01776885986328,
"learning_rate": 8.9218e-06,
"loss": 2.0179,
"step": 5400
},
{
"epoch": 84.61538461538461,
"grad_norm": 95.0901107788086,
"learning_rate": 8.9018e-06,
"loss": 2.0825,
"step": 5500
},
{
"epoch": 86.15384615384616,
"grad_norm": 85.66802978515625,
"learning_rate": 8.8818e-06,
"loss": 2.0588,
"step": 5600
},
{
"epoch": 87.6923076923077,
"grad_norm": 69.63407897949219,
"learning_rate": 8.8618e-06,
"loss": 2.0298,
"step": 5700
},
{
"epoch": 89.23076923076923,
"grad_norm": 174.88063049316406,
"learning_rate": 8.8418e-06,
"loss": 1.9867,
"step": 5800
},
{
"epoch": 90.76923076923077,
"grad_norm": 65.64617156982422,
"learning_rate": 8.8218e-06,
"loss": 2.0674,
"step": 5900
},
{
"epoch": 92.3076923076923,
"grad_norm": 152.2218780517578,
"learning_rate": 8.802e-06,
"loss": 1.992,
"step": 6000
},
{
"epoch": 93.84615384615384,
"grad_norm": 74.4027328491211,
"learning_rate": 8.782e-06,
"loss": 2.0016,
"step": 6100
},
{
"epoch": 95.38461538461539,
"grad_norm": 67.4207992553711,
"learning_rate": 8.762e-06,
"loss": 2.0001,
"step": 6200
},
{
"epoch": 96.92307692307692,
"grad_norm": 182.99061584472656,
"learning_rate": 8.742e-06,
"loss": 1.9897,
"step": 6300
},
{
"epoch": 98.46153846153847,
"grad_norm": 70.14286804199219,
"learning_rate": 8.722e-06,
"loss": 2.0146,
"step": 6400
},
{
"epoch": 100.0,
"grad_norm": 117.57647705078125,
"learning_rate": 8.702e-06,
"loss": 1.9298,
"step": 6500
}
],
"logging_steps": 100,
"max_steps": 50000,
"num_input_tokens_seen": 0,
"num_train_epochs": 770,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.484580580352e+19,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}