roberta-tiny-4l-10M / trainer_state.json
g8a9's picture
End of training
3553b04
{
"best_metric": 7.343978404998779,
"best_model_checkpoint": "/data1/attanasiog/babylm/roberta-tiny-4l-10M/checkpoint-200",
"epoch": 29.16441875401413,
"global_step": 350,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.82,
"learning_rate": 0.00014000000000000001,
"loss": 10.2422,
"step": 10
},
{
"epoch": 1.66,
"learning_rate": 0.00028000000000000003,
"loss": 8.631,
"step": 20
},
{
"epoch": 2.49,
"learning_rate": 0.00041999999999999996,
"loss": 7.5098,
"step": 30
},
{
"epoch": 3.33,
"learning_rate": 0.0005600000000000001,
"loss": 9.1052,
"step": 40
},
{
"epoch": 4.16,
"learning_rate": 0.0007,
"loss": 7.4785,
"step": 50
},
{
"epoch": 4.16,
"eval_accuracy": 0.051358309226408695,
"eval_loss": 7.383351802825928,
"eval_runtime": 145.6157,
"eval_samples_per_second": 165.195,
"eval_steps_per_second": 5.164,
"step": 50
},
{
"epoch": 4.99,
"learning_rate": 0.0006998694084416687,
"loss": 7.3182,
"step": 60
},
{
"epoch": 5.82,
"learning_rate": 0.0006994777312189898,
"loss": 7.4319,
"step": 70
},
{
"epoch": 6.66,
"learning_rate": 0.0006988252606161853,
"loss": 7.4297,
"step": 80
},
{
"epoch": 7.49,
"learning_rate": 0.0006979124835312712,
"loss": 7.426,
"step": 90
},
{
"epoch": 8.33,
"learning_rate": 0.0006967400811127157,
"loss": 7.425,
"step": 100
},
{
"epoch": 8.33,
"eval_accuracy": 0.05140231728427503,
"eval_loss": 7.355892181396484,
"eval_runtime": 145.5988,
"eval_samples_per_second": 165.214,
"eval_steps_per_second": 5.165,
"step": 100
},
{
"epoch": 9.16,
"learning_rate": 0.0006953089282511407,
"loss": 7.427,
"step": 110
},
{
"epoch": 9.99,
"learning_rate": 0.0006936200929264454,
"loss": 7.2955,
"step": 120
},
{
"epoch": 10.82,
"learning_rate": 0.0006916748354108406,
"loss": 7.4214,
"step": 130
},
{
"epoch": 11.66,
"learning_rate": 0.0006894746073283849,
"loss": 7.4198,
"step": 140
},
{
"epoch": 12.49,
"learning_rate": 0.0006870210505717298,
"loss": 7.4187,
"step": 150
},
{
"epoch": 12.49,
"eval_accuracy": 0.051204356500776864,
"eval_loss": 7.351686477661133,
"eval_runtime": 309.0562,
"eval_samples_per_second": 77.834,
"eval_steps_per_second": 2.433,
"step": 150
},
{
"epoch": 13.33,
"learning_rate": 0.0006843159960768768,
"loss": 7.4248,
"step": 160
},
{
"epoch": 14.16,
"learning_rate": 0.0006813614624568652,
"loss": 7.418,
"step": 170
},
{
"epoch": 14.99,
"learning_rate": 0.0006781596544954071,
"loss": 7.295,
"step": 180
},
{
"epoch": 15.82,
"learning_rate": 0.0006747129615015945,
"loss": 7.413,
"step": 190
},
{
"epoch": 16.66,
"learning_rate": 0.0006710239555269085,
"loss": 7.4204,
"step": 200
},
{
"epoch": 16.66,
"eval_accuracy": 0.05138556381472711,
"eval_loss": 7.343978404998779,
"eval_runtime": 145.4825,
"eval_samples_per_second": 165.346,
"eval_steps_per_second": 5.169,
"step": 200
},
{
"epoch": 17.49,
"learning_rate": 0.0006670953894458573,
"loss": 7.4064,
"step": 210
},
{
"epoch": 18.33,
"learning_rate": 0.0006629301949016779,
"loss": 7.4197,
"step": 220
},
{
"epoch": 19.16,
"learning_rate": 0.000658531480118635,
"loss": 7.4168,
"step": 230
},
{
"epoch": 19.99,
"learning_rate": 0.0006539025275825468,
"loss": 7.2923,
"step": 240
},
{
"epoch": 20.82,
"learning_rate": 0.000649046791591271,
"loss": 7.4099,
"step": 250
},
{
"epoch": 20.82,
"eval_accuracy": 0.051534592005405604,
"eval_loss": 7.345396041870117,
"eval_runtime": 145.5459,
"eval_samples_per_second": 165.274,
"eval_steps_per_second": 5.167,
"step": 250
},
{
"epoch": 21.66,
"learning_rate": 0.0006439678956769787,
"loss": 7.4172,
"step": 260
},
{
"epoch": 22.49,
"learning_rate": 0.0006386696299021389,
"loss": 7.4133,
"step": 270
},
{
"epoch": 23.33,
"learning_rate": 0.0006331559480312316,
"loss": 7.4126,
"step": 280
},
{
"epoch": 24.16,
"learning_rate": 0.0006274309645803004,
"loss": 7.4111,
"step": 290
},
{
"epoch": 24.99,
"learning_rate": 0.000621498951746547,
"loss": 7.2916,
"step": 300
},
{
"epoch": 24.99,
"eval_accuracy": 0.05147383671825258,
"eval_loss": 7.34423303604126,
"eval_runtime": 145.4168,
"eval_samples_per_second": 165.421,
"eval_steps_per_second": 5.171,
"step": 300
},
{
"epoch": 25.82,
"learning_rate": 0.0006153643362202569,
"loss": 7.408,
"step": 310
},
{
"epoch": 26.66,
"learning_rate": 0.0006090316958814382,
"loss": 7.4066,
"step": 320
},
{
"epoch": 27.49,
"learning_rate": 0.0006025057563836345,
"loss": 7.4151,
"step": 330
},
{
"epoch": 28.33,
"learning_rate": 0.0005957913876274665,
"loss": 7.4088,
"step": 340
},
{
"epoch": 29.16,
"learning_rate": 0.0005888936001265289,
"loss": 7.4117,
"step": 350
},
{
"epoch": 29.16,
"eval_accuracy": 0.05125750394432031,
"eval_loss": 7.344011306762695,
"eval_runtime": 145.4424,
"eval_samples_per_second": 165.392,
"eval_steps_per_second": 5.17,
"step": 350
},
{
"epoch": 29.16,
"step": 350,
"total_flos": 9.618048555527578e+16,
"train_loss": 7.568343920026507,
"train_runtime": 10701.7058,
"train_samples_per_second": 232.767,
"train_steps_per_second": 0.112
}
],
"max_steps": 1200,
"num_train_epochs": 100,
"total_flos": 9.618048555527578e+16,
"trial_name": null,
"trial_params": null
}