afro-xlmr-base-76L_script / trainer_state.json
Davlan's picture
Upload 11 files
6d2913a verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 49023,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03,
"learning_rate": 5e-05,
"loss": 4.8278,
"step": 500
},
{
"epoch": 0.06,
"learning_rate": 5e-05,
"loss": 3.5561,
"step": 1000
},
{
"epoch": 0.09,
"learning_rate": 5e-05,
"loss": 3.0434,
"step": 1500
},
{
"epoch": 0.12,
"learning_rate": 5e-05,
"loss": 2.7336,
"step": 2000
},
{
"epoch": 0.15,
"learning_rate": 5e-05,
"loss": 2.5149,
"step": 2500
},
{
"epoch": 0.18,
"learning_rate": 5e-05,
"loss": 2.3549,
"step": 3000
},
{
"epoch": 0.21,
"learning_rate": 5e-05,
"loss": 2.2287,
"step": 3500
},
{
"epoch": 0.24,
"learning_rate": 5e-05,
"loss": 2.1257,
"step": 4000
},
{
"epoch": 0.28,
"learning_rate": 5e-05,
"loss": 2.0463,
"step": 4500
},
{
"epoch": 0.31,
"learning_rate": 5e-05,
"loss": 1.9848,
"step": 5000
},
{
"epoch": 0.34,
"learning_rate": 5e-05,
"loss": 1.9228,
"step": 5500
},
{
"epoch": 0.37,
"learning_rate": 5e-05,
"loss": 1.8806,
"step": 6000
},
{
"epoch": 0.4,
"learning_rate": 5e-05,
"loss": 1.8365,
"step": 6500
},
{
"epoch": 0.43,
"learning_rate": 5e-05,
"loss": 1.801,
"step": 7000
},
{
"epoch": 0.46,
"learning_rate": 5e-05,
"loss": 1.7703,
"step": 7500
},
{
"epoch": 0.49,
"learning_rate": 5e-05,
"loss": 1.7434,
"step": 8000
},
{
"epoch": 0.52,
"learning_rate": 5e-05,
"loss": 1.7187,
"step": 8500
},
{
"epoch": 0.55,
"learning_rate": 5e-05,
"loss": 1.697,
"step": 9000
},
{
"epoch": 0.58,
"learning_rate": 5e-05,
"loss": 1.673,
"step": 9500
},
{
"epoch": 0.61,
"learning_rate": 5e-05,
"loss": 1.6555,
"step": 10000
},
{
"epoch": 0.64,
"learning_rate": 5e-05,
"loss": 1.6399,
"step": 10500
},
{
"epoch": 0.67,
"learning_rate": 5e-05,
"loss": 1.6233,
"step": 11000
},
{
"epoch": 0.7,
"learning_rate": 5e-05,
"loss": 1.6087,
"step": 11500
},
{
"epoch": 0.73,
"learning_rate": 5e-05,
"loss": 1.597,
"step": 12000
},
{
"epoch": 0.76,
"learning_rate": 5e-05,
"loss": 1.5816,
"step": 12500
},
{
"epoch": 0.8,
"learning_rate": 5e-05,
"loss": 1.5684,
"step": 13000
},
{
"epoch": 0.83,
"learning_rate": 5e-05,
"loss": 1.5602,
"step": 13500
},
{
"epoch": 0.86,
"learning_rate": 5e-05,
"loss": 1.5453,
"step": 14000
},
{
"epoch": 0.89,
"learning_rate": 5e-05,
"loss": 1.5366,
"step": 14500
},
{
"epoch": 0.92,
"learning_rate": 5e-05,
"loss": 1.5276,
"step": 15000
},
{
"epoch": 0.95,
"learning_rate": 5e-05,
"loss": 1.5156,
"step": 15500
},
{
"epoch": 0.98,
"learning_rate": 5e-05,
"loss": 1.5075,
"step": 16000
},
{
"epoch": 1.01,
"learning_rate": 5e-05,
"loss": 1.4988,
"step": 16500
},
{
"epoch": 1.04,
"learning_rate": 5e-05,
"loss": 1.4935,
"step": 17000
},
{
"epoch": 1.07,
"learning_rate": 5e-05,
"loss": 1.4818,
"step": 17500
},
{
"epoch": 1.1,
"learning_rate": 5e-05,
"loss": 1.4733,
"step": 18000
},
{
"epoch": 1.13,
"learning_rate": 5e-05,
"loss": 1.4661,
"step": 18500
},
{
"epoch": 1.16,
"learning_rate": 5e-05,
"loss": 1.4603,
"step": 19000
},
{
"epoch": 1.19,
"learning_rate": 5e-05,
"loss": 1.4554,
"step": 19500
},
{
"epoch": 1.22,
"learning_rate": 5e-05,
"loss": 1.449,
"step": 20000
},
{
"epoch": 1.25,
"learning_rate": 5e-05,
"loss": 1.4409,
"step": 20500
},
{
"epoch": 1.29,
"learning_rate": 5e-05,
"loss": 1.4382,
"step": 21000
},
{
"epoch": 1.32,
"learning_rate": 5e-05,
"loss": 1.4298,
"step": 21500
},
{
"epoch": 1.35,
"learning_rate": 5e-05,
"loss": 1.4251,
"step": 22000
},
{
"epoch": 1.38,
"learning_rate": 5e-05,
"loss": 1.4225,
"step": 22500
},
{
"epoch": 1.41,
"learning_rate": 5e-05,
"loss": 1.4122,
"step": 23000
},
{
"epoch": 1.44,
"learning_rate": 5e-05,
"loss": 1.4089,
"step": 23500
},
{
"epoch": 1.47,
"learning_rate": 5e-05,
"loss": 1.4041,
"step": 24000
},
{
"epoch": 1.5,
"learning_rate": 5e-05,
"loss": 1.3998,
"step": 24500
},
{
"epoch": 1.53,
"learning_rate": 5e-05,
"loss": 1.3933,
"step": 25000
},
{
"epoch": 1.56,
"learning_rate": 5e-05,
"loss": 1.3897,
"step": 25500
},
{
"epoch": 1.59,
"learning_rate": 5e-05,
"loss": 1.3874,
"step": 26000
},
{
"epoch": 1.62,
"learning_rate": 5e-05,
"loss": 1.3812,
"step": 26500
},
{
"epoch": 1.65,
"learning_rate": 5e-05,
"loss": 1.3745,
"step": 27000
},
{
"epoch": 1.68,
"learning_rate": 5e-05,
"loss": 1.3728,
"step": 27500
},
{
"epoch": 1.71,
"learning_rate": 5e-05,
"loss": 1.3706,
"step": 28000
},
{
"epoch": 1.74,
"learning_rate": 5e-05,
"loss": 1.3673,
"step": 28500
},
{
"epoch": 1.77,
"learning_rate": 5e-05,
"loss": 1.3605,
"step": 29000
},
{
"epoch": 1.81,
"learning_rate": 5e-05,
"loss": 1.3574,
"step": 29500
},
{
"epoch": 1.84,
"learning_rate": 5e-05,
"loss": 1.3514,
"step": 30000
},
{
"epoch": 1.87,
"learning_rate": 5e-05,
"loss": 1.351,
"step": 30500
},
{
"epoch": 1.9,
"learning_rate": 5e-05,
"loss": 1.3457,
"step": 31000
},
{
"epoch": 1.93,
"learning_rate": 5e-05,
"loss": 1.3451,
"step": 31500
},
{
"epoch": 1.96,
"learning_rate": 5e-05,
"loss": 1.3403,
"step": 32000
},
{
"epoch": 1.99,
"learning_rate": 5e-05,
"loss": 1.3389,
"step": 32500
},
{
"epoch": 2.02,
"learning_rate": 5e-05,
"loss": 1.3346,
"step": 33000
},
{
"epoch": 2.05,
"learning_rate": 5e-05,
"loss": 1.3313,
"step": 33500
},
{
"epoch": 2.08,
"learning_rate": 5e-05,
"loss": 1.3278,
"step": 34000
},
{
"epoch": 2.11,
"learning_rate": 5e-05,
"loss": 1.3249,
"step": 34500
},
{
"epoch": 2.14,
"learning_rate": 5e-05,
"loss": 1.3212,
"step": 35000
},
{
"epoch": 2.17,
"learning_rate": 5e-05,
"loss": 1.3191,
"step": 35500
},
{
"epoch": 2.2,
"learning_rate": 5e-05,
"loss": 1.3158,
"step": 36000
},
{
"epoch": 2.23,
"learning_rate": 5e-05,
"loss": 1.3135,
"step": 36500
},
{
"epoch": 2.26,
"learning_rate": 5e-05,
"loss": 1.3086,
"step": 37000
},
{
"epoch": 2.29,
"learning_rate": 5e-05,
"loss": 1.3077,
"step": 37500
},
{
"epoch": 2.33,
"learning_rate": 5e-05,
"loss": 1.3045,
"step": 38000
},
{
"epoch": 2.36,
"learning_rate": 5e-05,
"loss": 1.3013,
"step": 38500
},
{
"epoch": 2.39,
"learning_rate": 5e-05,
"loss": 1.3017,
"step": 39000
},
{
"epoch": 2.42,
"learning_rate": 5e-05,
"loss": 1.2989,
"step": 39500
},
{
"epoch": 2.45,
"learning_rate": 5e-05,
"loss": 1.2963,
"step": 40000
},
{
"epoch": 2.48,
"learning_rate": 5e-05,
"loss": 1.2916,
"step": 40500
},
{
"epoch": 2.51,
"learning_rate": 5e-05,
"loss": 1.2935,
"step": 41000
},
{
"epoch": 2.54,
"learning_rate": 5e-05,
"loss": 1.2922,
"step": 41500
},
{
"epoch": 2.57,
"learning_rate": 5e-05,
"loss": 1.2867,
"step": 42000
},
{
"epoch": 2.6,
"learning_rate": 5e-05,
"loss": 1.2847,
"step": 42500
},
{
"epoch": 2.63,
"learning_rate": 5e-05,
"loss": 1.2831,
"step": 43000
},
{
"epoch": 2.66,
"learning_rate": 5e-05,
"loss": 1.2802,
"step": 43500
},
{
"epoch": 2.69,
"learning_rate": 5e-05,
"loss": 1.2784,
"step": 44000
},
{
"epoch": 2.72,
"learning_rate": 5e-05,
"loss": 1.2748,
"step": 44500
},
{
"epoch": 2.75,
"learning_rate": 5e-05,
"loss": 1.2779,
"step": 45000
},
{
"epoch": 2.78,
"learning_rate": 5e-05,
"loss": 1.2712,
"step": 45500
},
{
"epoch": 2.82,
"learning_rate": 5e-05,
"loss": 1.2695,
"step": 46000
},
{
"epoch": 2.85,
"learning_rate": 5e-05,
"loss": 1.2677,
"step": 46500
},
{
"epoch": 2.88,
"learning_rate": 5e-05,
"loss": 1.2682,
"step": 47000
},
{
"epoch": 2.91,
"learning_rate": 5e-05,
"loss": 1.2651,
"step": 47500
},
{
"epoch": 2.94,
"learning_rate": 5e-05,
"loss": 1.2635,
"step": 48000
},
{
"epoch": 2.97,
"learning_rate": 5e-05,
"loss": 1.2626,
"step": 48500
},
{
"epoch": 3.0,
"learning_rate": 5e-05,
"loss": 1.2583,
"step": 49000
},
{
"epoch": 3.0,
"step": 49023,
"total_flos": 9.937271302396051e+18,
"train_loss": 1.5609709015213173,
"train_runtime": 628686.7825,
"train_samples_per_second": 59.886,
"train_steps_per_second": 0.078
}
],
"logging_steps": 500,
"max_steps": 49023,
"num_train_epochs": 3,
"save_steps": 10000,
"total_flos": 9.937271302396051e+18,
"trial_name": null,
"trial_params": null
}