wangjiawei2003's picture
Training in progress, epoch 5
7c2d9fe verified
raw
history blame contribute delete
No virus
9.23 kB
{
"best_metric": 0.9036697247706422,
"best_model_checkpoint": "distilbert-base-uncased-finetuned-sst2/run-0/checkpoint-4210",
"epoch": 5.0,
"eval_steps": 500,
"global_step": 21050,
"is_hyper_param_search": true,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.1187648456057007,
"grad_norm": 9.763530731201172,
"learning_rate": 2.4130303407723998e-05,
"loss": 0.3459,
"step": 500
},
{
"epoch": 0.2375296912114014,
"grad_norm": 7.90731954574585,
"learning_rate": 2.3543191402669885e-05,
"loss": 0.2557,
"step": 1000
},
{
"epoch": 0.35629453681710216,
"grad_norm": 7.188111305236816,
"learning_rate": 2.2956079397615776e-05,
"loss": 0.2242,
"step": 1500
},
{
"epoch": 0.4750593824228028,
"grad_norm": 9.416199684143066,
"learning_rate": 2.2368967392561663e-05,
"loss": 0.2281,
"step": 2000
},
{
"epoch": 0.5938242280285035,
"grad_norm": 6.982799053192139,
"learning_rate": 2.1781855387507554e-05,
"loss": 0.2196,
"step": 2500
},
{
"epoch": 0.7125890736342043,
"grad_norm": 3.475909948348999,
"learning_rate": 2.1194743382453438e-05,
"loss": 0.199,
"step": 3000
},
{
"epoch": 0.831353919239905,
"grad_norm": 18.930234909057617,
"learning_rate": 2.0607631377399328e-05,
"loss": 0.1988,
"step": 3500
},
{
"epoch": 0.9501187648456056,
"grad_norm": 21.44248390197754,
"learning_rate": 2.0020519372345215e-05,
"loss": 0.1964,
"step": 4000
},
{
"epoch": 1.0,
"eval_accuracy": 0.9036697247706422,
"eval_loss": 0.279630184173584,
"eval_runtime": 1.1981,
"eval_samples_per_second": 727.831,
"eval_steps_per_second": 45.907,
"step": 4210
},
{
"epoch": 1.0688836104513064,
"grad_norm": 6.991998672485352,
"learning_rate": 1.9433407367291103e-05,
"loss": 0.1449,
"step": 4500
},
{
"epoch": 1.187648456057007,
"grad_norm": 1.0682291984558105,
"learning_rate": 1.8846295362236993e-05,
"loss": 0.1307,
"step": 5000
},
{
"epoch": 1.3064133016627077,
"grad_norm": 1.4475390911102295,
"learning_rate": 1.825918335718288e-05,
"loss": 0.1248,
"step": 5500
},
{
"epoch": 1.4251781472684084,
"grad_norm": 0.4122050702571869,
"learning_rate": 1.767207135212877e-05,
"loss": 0.1288,
"step": 6000
},
{
"epoch": 1.5439429928741093,
"grad_norm": 0.35570672154426575,
"learning_rate": 1.7084959347074655e-05,
"loss": 0.1353,
"step": 6500
},
{
"epoch": 1.66270783847981,
"grad_norm": 0.9978006482124329,
"learning_rate": 1.6497847342020545e-05,
"loss": 0.1316,
"step": 7000
},
{
"epoch": 1.7814726840855108,
"grad_norm": 7.568444728851318,
"learning_rate": 1.5910735336966433e-05,
"loss": 0.1363,
"step": 7500
},
{
"epoch": 1.9002375296912115,
"grad_norm": 0.1252310872077942,
"learning_rate": 1.532362333191232e-05,
"loss": 0.1271,
"step": 8000
},
{
"epoch": 2.0,
"eval_accuracy": 0.8887614678899083,
"eval_loss": 0.37789419293403625,
"eval_runtime": 1.2,
"eval_samples_per_second": 726.651,
"eval_steps_per_second": 45.832,
"step": 8420
},
{
"epoch": 2.019002375296912,
"grad_norm": 0.2641623616218567,
"learning_rate": 1.4736511326858209e-05,
"loss": 0.114,
"step": 8500
},
{
"epoch": 2.137767220902613,
"grad_norm": 16.91781234741211,
"learning_rate": 1.4149399321804098e-05,
"loss": 0.0814,
"step": 9000
},
{
"epoch": 2.2565320665083135,
"grad_norm": 0.13691911101341248,
"learning_rate": 1.3562287316749987e-05,
"loss": 0.0807,
"step": 9500
},
{
"epoch": 2.375296912114014,
"grad_norm": 14.652079582214355,
"learning_rate": 1.2975175311695872e-05,
"loss": 0.0909,
"step": 10000
},
{
"epoch": 2.494061757719715,
"grad_norm": 0.23481616377830505,
"learning_rate": 1.2388063306641761e-05,
"loss": 0.0823,
"step": 10500
},
{
"epoch": 2.6128266033254155,
"grad_norm": 0.16778022050857544,
"learning_rate": 1.1800951301587648e-05,
"loss": 0.0866,
"step": 11000
},
{
"epoch": 2.731591448931116,
"grad_norm": 22.94564437866211,
"learning_rate": 1.1213839296533537e-05,
"loss": 0.0894,
"step": 11500
},
{
"epoch": 2.850356294536817,
"grad_norm": 4.668305397033691,
"learning_rate": 1.0626727291479424e-05,
"loss": 0.0969,
"step": 12000
},
{
"epoch": 2.969121140142518,
"grad_norm": 0.056590911000967026,
"learning_rate": 1.0039615286425313e-05,
"loss": 0.0932,
"step": 12500
},
{
"epoch": 3.0,
"eval_accuracy": 0.8990825688073395,
"eval_loss": 0.4282897412776947,
"eval_runtime": 1.1944,
"eval_samples_per_second": 730.051,
"eval_steps_per_second": 46.047,
"step": 12630
},
{
"epoch": 3.0878859857482186,
"grad_norm": 0.023897308856248856,
"learning_rate": 9.452503281371202e-06,
"loss": 0.0537,
"step": 13000
},
{
"epoch": 3.2066508313539193,
"grad_norm": 0.7962842583656311,
"learning_rate": 8.86539127631709e-06,
"loss": 0.0473,
"step": 13500
},
{
"epoch": 3.32541567695962,
"grad_norm": 0.47386619448661804,
"learning_rate": 8.278279271262978e-06,
"loss": 0.0608,
"step": 14000
},
{
"epoch": 3.4441805225653206,
"grad_norm": 0.06599200516939163,
"learning_rate": 7.691167266208865e-06,
"loss": 0.0677,
"step": 14500
},
{
"epoch": 3.5629453681710213,
"grad_norm": 0.18340125679969788,
"learning_rate": 7.104055261154754e-06,
"loss": 0.0579,
"step": 15000
},
{
"epoch": 3.6817102137767224,
"grad_norm": 5.418150901794434,
"learning_rate": 6.5169432561006415e-06,
"loss": 0.0517,
"step": 15500
},
{
"epoch": 3.800475059382423,
"grad_norm": 19.50031852722168,
"learning_rate": 5.92983125104653e-06,
"loss": 0.0617,
"step": 16000
},
{
"epoch": 3.9192399049881237,
"grad_norm": 0.0843653604388237,
"learning_rate": 5.3427192459924185e-06,
"loss": 0.0599,
"step": 16500
},
{
"epoch": 4.0,
"eval_accuracy": 0.9036697247706422,
"eval_loss": 0.466791033744812,
"eval_runtime": 1.2178,
"eval_samples_per_second": 716.054,
"eval_steps_per_second": 45.164,
"step": 16840
},
{
"epoch": 4.038004750593824,
"grad_norm": 0.04484863579273224,
"learning_rate": 4.7556072409383065e-06,
"loss": 0.0424,
"step": 17000
},
{
"epoch": 4.156769596199525,
"grad_norm": 0.0025116826873272657,
"learning_rate": 4.1684952358841946e-06,
"loss": 0.0335,
"step": 17500
},
{
"epoch": 4.275534441805226,
"grad_norm": 0.4570842981338501,
"learning_rate": 3.581383230830083e-06,
"loss": 0.0395,
"step": 18000
},
{
"epoch": 4.394299287410926,
"grad_norm": 0.7941423654556274,
"learning_rate": 2.9942712257759707e-06,
"loss": 0.0334,
"step": 18500
},
{
"epoch": 4.513064133016627,
"grad_norm": 3.7694590091705322,
"learning_rate": 2.4071592207218587e-06,
"loss": 0.035,
"step": 19000
},
{
"epoch": 4.631828978622328,
"grad_norm": 0.018865738064050674,
"learning_rate": 1.8200472156677468e-06,
"loss": 0.0373,
"step": 19500
},
{
"epoch": 4.750593824228028,
"grad_norm": 0.03485831245779991,
"learning_rate": 1.232935210613635e-06,
"loss": 0.0282,
"step": 20000
},
{
"epoch": 4.869358669833729,
"grad_norm": 0.014678980223834515,
"learning_rate": 6.458232055595231e-07,
"loss": 0.0369,
"step": 20500
},
{
"epoch": 4.98812351543943,
"grad_norm": 0.06822077184915543,
"learning_rate": 5.871120050541119e-08,
"loss": 0.0299,
"step": 21000
},
{
"epoch": 5.0,
"eval_accuracy": 0.9025229357798165,
"eval_loss": 0.5252613425254822,
"eval_runtime": 1.204,
"eval_samples_per_second": 724.232,
"eval_steps_per_second": 45.68,
"step": 21050
}
],
"logging_steps": 500,
"max_steps": 21050,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"total_flos": 3061523906193576.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": {
"learning_rate": 2.471741541277811e-05,
"num_train_epochs": 5,
"per_device_train_batch_size": 16,
"seed": 27
}
}