|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 69.61844761573268, |
|
"global_step": 250000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 1e-05, |
|
"learning_rate_embeddings": 1e-05, |
|
"loss": 6.6253, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 2e-05, |
|
"learning_rate_embeddings": 2e-05, |
|
"loss": 5.3003, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 3e-05, |
|
"learning_rate_embeddings": 3e-05, |
|
"loss": 4.8291, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 4e-05, |
|
"learning_rate_embeddings": 4e-05, |
|
"loss": 4.5601, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 5e-05, |
|
"learning_rate_embeddings": 5e-05, |
|
"loss": 4.3749, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 6e-05, |
|
"learning_rate_embeddings": 6e-05, |
|
"loss": 4.2365, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"learning_rate": 7.000000000000001e-05, |
|
"learning_rate_embeddings": 7.000000000000001e-05, |
|
"loss": 4.114, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"learning_rate": 8e-05, |
|
"learning_rate_embeddings": 8e-05, |
|
"loss": 4.0063, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"learning_rate": 8.999999999999999e-05, |
|
"learning_rate_embeddings": 8.999999999999999e-05, |
|
"loss": 3.9135, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 1.39, |
|
"learning_rate": 0.0001, |
|
"learning_rate_embeddings": 0.0001, |
|
"loss": 3.8331, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.53, |
|
"learning_rate": 0.00011, |
|
"learning_rate_embeddings": 0.00011, |
|
"loss": 3.7766, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 0.00012, |
|
"learning_rate_embeddings": 0.00012, |
|
"loss": 3.7228, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.81, |
|
"learning_rate": 0.00013000000000000002, |
|
"learning_rate_embeddings": 0.00013000000000000002, |
|
"loss": 3.6715, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 1.95, |
|
"learning_rate": 0.00014000000000000001, |
|
"learning_rate_embeddings": 0.00014000000000000001, |
|
"loss": 3.634, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 0.00015, |
|
"learning_rate_embeddings": 0.00015, |
|
"loss": 3.5923, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 2.23, |
|
"learning_rate": 0.00016, |
|
"learning_rate_embeddings": 0.00016, |
|
"loss": 3.5615, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"learning_rate": 0.00017, |
|
"learning_rate_embeddings": 0.00017, |
|
"loss": 3.5314, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"learning_rate": 0.00017999999999999998, |
|
"learning_rate_embeddings": 0.00017999999999999998, |
|
"loss": 3.506, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"learning_rate": 0.00019, |
|
"learning_rate_embeddings": 0.00019, |
|
"loss": 3.4803, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 2.78, |
|
"learning_rate": 0.0002, |
|
"learning_rate_embeddings": 0.0002, |
|
"loss": 3.4648, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 2.92, |
|
"learning_rate": 0.00021, |
|
"learning_rate_embeddings": 0.00021, |
|
"loss": 3.4497, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 3.06, |
|
"learning_rate": 0.00022, |
|
"learning_rate_embeddings": 0.00022, |
|
"loss": 3.4254, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"learning_rate": 0.00023, |
|
"learning_rate_embeddings": 0.00023, |
|
"loss": 3.3993, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 3.34, |
|
"learning_rate": 0.00024, |
|
"learning_rate_embeddings": 0.00024, |
|
"loss": 3.3893, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 3.48, |
|
"learning_rate": 0.00025, |
|
"learning_rate_embeddings": 0.00025, |
|
"loss": 3.3673, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 3.48, |
|
"eval_loss": 3.3232665061950684, |
|
"eval_runtime": 384.8896, |
|
"eval_samples_per_second": 479.493, |
|
"eval_steps_per_second": 3.747, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 3.62, |
|
"learning_rate": 0.00026000000000000003, |
|
"learning_rate_embeddings": 0.00026000000000000003, |
|
"loss": 3.3559, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 3.76, |
|
"learning_rate": 0.00027, |
|
"learning_rate_embeddings": 0.00027, |
|
"loss": 3.3442, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 3.9, |
|
"learning_rate": 0.00028000000000000003, |
|
"learning_rate_embeddings": 0.00028000000000000003, |
|
"loss": 3.3282, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 4.04, |
|
"learning_rate": 0.00029, |
|
"learning_rate_embeddings": 0.00029, |
|
"loss": 3.3189, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 4.18, |
|
"learning_rate": 0.0003, |
|
"learning_rate_embeddings": 0.0003, |
|
"loss": 3.2965, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 4.32, |
|
"learning_rate": 0.00031, |
|
"learning_rate_embeddings": 0.00031, |
|
"loss": 3.2934, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 4.46, |
|
"learning_rate": 0.00032, |
|
"learning_rate_embeddings": 0.00032, |
|
"loss": 3.281, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 4.59, |
|
"learning_rate": 0.00033, |
|
"learning_rate_embeddings": 0.00033, |
|
"loss": 3.2739, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 4.73, |
|
"learning_rate": 0.00034, |
|
"learning_rate_embeddings": 0.00034, |
|
"loss": 3.2687, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 4.87, |
|
"learning_rate": 0.00035, |
|
"learning_rate_embeddings": 0.00035, |
|
"loss": 3.2584, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 5.01, |
|
"learning_rate": 0.00035999999999999997, |
|
"learning_rate_embeddings": 0.00035999999999999997, |
|
"loss": 3.2436, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 5.15, |
|
"learning_rate": 0.00037, |
|
"learning_rate_embeddings": 0.00037, |
|
"loss": 3.2294, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 5.29, |
|
"learning_rate": 0.00038, |
|
"learning_rate_embeddings": 0.00038, |
|
"loss": 3.2221, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 5.43, |
|
"learning_rate": 0.00039000000000000005, |
|
"learning_rate_embeddings": 0.00039000000000000005, |
|
"loss": 3.2163, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 5.57, |
|
"learning_rate": 0.0004, |
|
"learning_rate_embeddings": 0.0004, |
|
"loss": 3.2084, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 5.71, |
|
"learning_rate": 0.00041, |
|
"learning_rate_embeddings": 0.00041, |
|
"loss": 3.209, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 5.85, |
|
"learning_rate": 0.00042, |
|
"learning_rate_embeddings": 0.00042, |
|
"loss": 3.2081, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 5.99, |
|
"learning_rate": 0.00043, |
|
"learning_rate_embeddings": 0.00043, |
|
"loss": 3.1962, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 6.13, |
|
"learning_rate": 0.00044, |
|
"learning_rate_embeddings": 0.00044, |
|
"loss": 3.1895, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 6.27, |
|
"learning_rate": 0.00045000000000000004, |
|
"learning_rate_embeddings": 0.00045000000000000004, |
|
"loss": 3.1731, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 6.4, |
|
"learning_rate": 0.00046, |
|
"learning_rate_embeddings": 0.00046, |
|
"loss": 3.1771, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 6.54, |
|
"learning_rate": 0.00047, |
|
"learning_rate_embeddings": 0.00047, |
|
"loss": 3.1732, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 6.68, |
|
"learning_rate": 0.00048, |
|
"learning_rate_embeddings": 0.00048, |
|
"loss": 3.1668, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 6.82, |
|
"learning_rate": 0.00049, |
|
"learning_rate_embeddings": 0.00049, |
|
"loss": 3.1598, |
|
"step": 24500 |
|
}, |
|
{ |
|
"epoch": 6.96, |
|
"learning_rate": 0.0005, |
|
"learning_rate_embeddings": 0.0005, |
|
"loss": 3.1646, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 6.96, |
|
"eval_loss": 3.1556143760681152, |
|
"eval_runtime": 371.5552, |
|
"eval_samples_per_second": 496.701, |
|
"eval_steps_per_second": 3.881, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 7.1, |
|
"learning_rate": 0.0004988888888888889, |
|
"learning_rate_embeddings": 0.0004988888888888889, |
|
"loss": 3.1431, |
|
"step": 25500 |
|
}, |
|
{ |
|
"epoch": 7.24, |
|
"learning_rate": 0.0004977777777777778, |
|
"learning_rate_embeddings": 0.0004977777777777778, |
|
"loss": 3.1398, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 7.38, |
|
"learning_rate": 0.0004966666666666666, |
|
"learning_rate_embeddings": 0.0004966666666666666, |
|
"loss": 3.1355, |
|
"step": 26500 |
|
}, |
|
{ |
|
"epoch": 7.52, |
|
"learning_rate": 0.0004955555555555556, |
|
"learning_rate_embeddings": 0.0004955555555555556, |
|
"loss": 3.1349, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 7.66, |
|
"learning_rate": 0.0004944444444444445, |
|
"learning_rate_embeddings": 0.0004944444444444445, |
|
"loss": 3.1308, |
|
"step": 27500 |
|
}, |
|
{ |
|
"epoch": 7.8, |
|
"learning_rate": 0.0004933333333333334, |
|
"learning_rate_embeddings": 0.0004933333333333334, |
|
"loss": 3.1305, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 7.94, |
|
"learning_rate": 0.0004922222222222222, |
|
"learning_rate_embeddings": 0.0004922222222222222, |
|
"loss": 3.1323, |
|
"step": 28500 |
|
}, |
|
{ |
|
"epoch": 8.08, |
|
"learning_rate": 0.0004911111111111111, |
|
"learning_rate_embeddings": 0.0004911111111111111, |
|
"loss": 3.1179, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 8.21, |
|
"learning_rate": 0.00049, |
|
"learning_rate_embeddings": 0.00049, |
|
"loss": 3.1083, |
|
"step": 29500 |
|
}, |
|
{ |
|
"epoch": 8.35, |
|
"learning_rate": 0.0004888888888888889, |
|
"learning_rate_embeddings": 0.0004888888888888889, |
|
"loss": 3.1094, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 8.49, |
|
"learning_rate": 0.0004877777777777778, |
|
"learning_rate_embeddings": 0.0004877777777777778, |
|
"loss": 3.1131, |
|
"step": 30500 |
|
}, |
|
{ |
|
"epoch": 8.63, |
|
"learning_rate": 0.0004866666666666667, |
|
"learning_rate_embeddings": 0.0004866666666666667, |
|
"loss": 3.1127, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 8.77, |
|
"learning_rate": 0.0004855555555555556, |
|
"learning_rate_embeddings": 0.0004855555555555556, |
|
"loss": 3.1104, |
|
"step": 31500 |
|
}, |
|
{ |
|
"epoch": 8.91, |
|
"learning_rate": 0.00048444444444444446, |
|
"learning_rate_embeddings": 0.00048444444444444446, |
|
"loss": 3.1043, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 9.05, |
|
"learning_rate": 0.00048333333333333334, |
|
"learning_rate_embeddings": 0.00048333333333333334, |
|
"loss": 3.0959, |
|
"step": 32500 |
|
}, |
|
{ |
|
"epoch": 9.19, |
|
"learning_rate": 0.0004822222222222222, |
|
"learning_rate_embeddings": 0.0004822222222222222, |
|
"loss": 3.0801, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 9.33, |
|
"learning_rate": 0.0004811111111111111, |
|
"learning_rate_embeddings": 0.0004811111111111111, |
|
"loss": 3.0935, |
|
"step": 33500 |
|
}, |
|
{ |
|
"epoch": 9.47, |
|
"learning_rate": 0.00048, |
|
"learning_rate_embeddings": 0.00048, |
|
"loss": 3.084, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 9.61, |
|
"learning_rate": 0.0004788888888888889, |
|
"learning_rate_embeddings": 0.0004788888888888889, |
|
"loss": 3.0918, |
|
"step": 34500 |
|
}, |
|
{ |
|
"epoch": 9.75, |
|
"learning_rate": 0.0004777777777777778, |
|
"learning_rate_embeddings": 0.0004777777777777778, |
|
"loss": 3.0901, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 9.89, |
|
"learning_rate": 0.0004766666666666667, |
|
"learning_rate_embeddings": 0.0004766666666666667, |
|
"loss": 3.084, |
|
"step": 35500 |
|
}, |
|
{ |
|
"epoch": 10.03, |
|
"learning_rate": 0.00047555555555555556, |
|
"learning_rate_embeddings": 0.00047555555555555556, |
|
"loss": 3.0878, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 10.16, |
|
"learning_rate": 0.00047444444444444444, |
|
"learning_rate_embeddings": 0.00047444444444444444, |
|
"loss": 3.0629, |
|
"step": 36500 |
|
}, |
|
{ |
|
"epoch": 10.3, |
|
"learning_rate": 0.00047333333333333336, |
|
"learning_rate_embeddings": 0.00047333333333333336, |
|
"loss": 3.0741, |
|
"step": 37000 |
|
}, |
|
{ |
|
"epoch": 10.44, |
|
"learning_rate": 0.00047222222222222224, |
|
"learning_rate_embeddings": 0.00047222222222222224, |
|
"loss": 3.0791, |
|
"step": 37500 |
|
}, |
|
{ |
|
"epoch": 10.44, |
|
"eval_loss": 3.100255250930786, |
|
"eval_runtime": 371.6431, |
|
"eval_samples_per_second": 496.584, |
|
"eval_steps_per_second": 3.88, |
|
"step": 37500 |
|
}, |
|
{ |
|
"epoch": 10.58, |
|
"learning_rate": 0.0004711111111111111, |
|
"learning_rate_embeddings": 0.0004711111111111111, |
|
"loss": 3.0781, |
|
"step": 38000 |
|
}, |
|
{ |
|
"epoch": 10.72, |
|
"learning_rate": 0.00047, |
|
"learning_rate_embeddings": 0.00047, |
|
"loss": 3.0746, |
|
"step": 38500 |
|
}, |
|
{ |
|
"epoch": 10.86, |
|
"learning_rate": 0.0004688888888888889, |
|
"learning_rate_embeddings": 0.0004688888888888889, |
|
"loss": 3.0734, |
|
"step": 39000 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"learning_rate": 0.0004677777777777778, |
|
"learning_rate_embeddings": 0.0004677777777777778, |
|
"loss": 3.0779, |
|
"step": 39500 |
|
}, |
|
{ |
|
"epoch": 11.14, |
|
"learning_rate": 0.00046666666666666666, |
|
"learning_rate_embeddings": 0.00046666666666666666, |
|
"loss": 3.0547, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 11.28, |
|
"learning_rate": 0.0004655555555555556, |
|
"learning_rate_embeddings": 0.0004655555555555556, |
|
"loss": 3.0579, |
|
"step": 40500 |
|
}, |
|
{ |
|
"epoch": 11.42, |
|
"learning_rate": 0.00046444444444444446, |
|
"learning_rate_embeddings": 0.00046444444444444446, |
|
"loss": 3.0598, |
|
"step": 41000 |
|
}, |
|
{ |
|
"epoch": 11.56, |
|
"learning_rate": 0.00046333333333333334, |
|
"learning_rate_embeddings": 0.00046333333333333334, |
|
"loss": 3.0549, |
|
"step": 41500 |
|
}, |
|
{ |
|
"epoch": 11.7, |
|
"learning_rate": 0.0004622222222222222, |
|
"learning_rate_embeddings": 0.0004622222222222222, |
|
"loss": 3.0635, |
|
"step": 42000 |
|
}, |
|
{ |
|
"epoch": 11.84, |
|
"learning_rate": 0.00046111111111111114, |
|
"learning_rate_embeddings": 0.00046111111111111114, |
|
"loss": 3.0648, |
|
"step": 42500 |
|
}, |
|
{ |
|
"epoch": 11.97, |
|
"learning_rate": 0.00046, |
|
"learning_rate_embeddings": 0.00046, |
|
"loss": 3.0614, |
|
"step": 43000 |
|
}, |
|
{ |
|
"epoch": 12.11, |
|
"learning_rate": 0.0004588888888888889, |
|
"learning_rate_embeddings": 0.0004588888888888889, |
|
"loss": 3.046, |
|
"step": 43500 |
|
}, |
|
{ |
|
"epoch": 12.25, |
|
"learning_rate": 0.0004577777777777778, |
|
"learning_rate_embeddings": 0.0004577777777777778, |
|
"loss": 3.0464, |
|
"step": 44000 |
|
}, |
|
{ |
|
"epoch": 12.39, |
|
"learning_rate": 0.0004566666666666667, |
|
"learning_rate_embeddings": 0.0004566666666666667, |
|
"loss": 3.0481, |
|
"step": 44500 |
|
}, |
|
{ |
|
"epoch": 12.53, |
|
"learning_rate": 0.00045555555555555556, |
|
"learning_rate_embeddings": 0.00045555555555555556, |
|
"loss": 3.0445, |
|
"step": 45000 |
|
}, |
|
{ |
|
"epoch": 12.67, |
|
"learning_rate": 0.00045444444444444444, |
|
"learning_rate_embeddings": 0.00045444444444444444, |
|
"loss": 3.0547, |
|
"step": 45500 |
|
}, |
|
{ |
|
"epoch": 12.81, |
|
"learning_rate": 0.0004533333333333333, |
|
"learning_rate_embeddings": 0.0004533333333333333, |
|
"loss": 3.0611, |
|
"step": 46000 |
|
}, |
|
{ |
|
"epoch": 12.95, |
|
"learning_rate": 0.00045222222222222224, |
|
"learning_rate_embeddings": 0.00045222222222222224, |
|
"loss": 3.055, |
|
"step": 46500 |
|
}, |
|
{ |
|
"epoch": 13.09, |
|
"learning_rate": 0.0004511111111111111, |
|
"learning_rate_embeddings": 0.0004511111111111111, |
|
"loss": 3.0476, |
|
"step": 47000 |
|
}, |
|
{ |
|
"epoch": 13.23, |
|
"learning_rate": 0.00045000000000000004, |
|
"learning_rate_embeddings": 0.00045000000000000004, |
|
"loss": 3.0308, |
|
"step": 47500 |
|
}, |
|
{ |
|
"epoch": 13.37, |
|
"learning_rate": 0.0004488888888888889, |
|
"learning_rate_embeddings": 0.0004488888888888889, |
|
"loss": 3.0415, |
|
"step": 48000 |
|
}, |
|
{ |
|
"epoch": 13.51, |
|
"learning_rate": 0.0004477777777777778, |
|
"learning_rate_embeddings": 0.0004477777777777778, |
|
"loss": 3.0434, |
|
"step": 48500 |
|
}, |
|
{ |
|
"epoch": 13.65, |
|
"learning_rate": 0.00044666666666666666, |
|
"learning_rate_embeddings": 0.00044666666666666666, |
|
"loss": 3.0444, |
|
"step": 49000 |
|
}, |
|
{ |
|
"epoch": 13.78, |
|
"learning_rate": 0.00044555555555555554, |
|
"learning_rate_embeddings": 0.00044555555555555554, |
|
"loss": 3.0389, |
|
"step": 49500 |
|
}, |
|
{ |
|
"epoch": 13.92, |
|
"learning_rate": 0.0004444444444444444, |
|
"learning_rate_embeddings": 0.0004444444444444444, |
|
"loss": 3.0458, |
|
"step": 50000 |
|
}, |
|
{ |
|
"epoch": 13.92, |
|
"eval_loss": 3.0721282958984375, |
|
"eval_runtime": 371.4664, |
|
"eval_samples_per_second": 496.82, |
|
"eval_steps_per_second": 3.882, |
|
"step": 50000 |
|
}, |
|
{ |
|
"epoch": 14.06, |
|
"learning_rate": 0.00044333333333333334, |
|
"learning_rate_embeddings": 0.00044333333333333334, |
|
"loss": 3.0367, |
|
"step": 50500 |
|
}, |
|
{ |
|
"epoch": 14.2, |
|
"learning_rate": 0.00044222222222222227, |
|
"learning_rate_embeddings": 0.00044222222222222227, |
|
"loss": 3.0245, |
|
"step": 51000 |
|
}, |
|
{ |
|
"epoch": 14.34, |
|
"learning_rate": 0.00044111111111111114, |
|
"learning_rate_embeddings": 0.00044111111111111114, |
|
"loss": 3.0234, |
|
"step": 51500 |
|
}, |
|
{ |
|
"epoch": 14.48, |
|
"learning_rate": 0.00044, |
|
"learning_rate_embeddings": 0.00044, |
|
"loss": 3.0328, |
|
"step": 52000 |
|
}, |
|
{ |
|
"epoch": 14.62, |
|
"learning_rate": 0.0004388888888888889, |
|
"learning_rate_embeddings": 0.0004388888888888889, |
|
"loss": 3.0301, |
|
"step": 52500 |
|
}, |
|
{ |
|
"epoch": 14.76, |
|
"learning_rate": 0.00043777777777777776, |
|
"learning_rate_embeddings": 0.00043777777777777776, |
|
"loss": 3.0275, |
|
"step": 53000 |
|
}, |
|
{ |
|
"epoch": 14.9, |
|
"learning_rate": 0.00043666666666666664, |
|
"learning_rate_embeddings": 0.00043666666666666664, |
|
"loss": 3.0303, |
|
"step": 53500 |
|
}, |
|
{ |
|
"epoch": 15.04, |
|
"learning_rate": 0.0004355555555555555, |
|
"learning_rate_embeddings": 0.0004355555555555555, |
|
"loss": 3.0339, |
|
"step": 54000 |
|
}, |
|
{ |
|
"epoch": 15.18, |
|
"learning_rate": 0.0004344444444444445, |
|
"learning_rate_embeddings": 0.0004344444444444445, |
|
"loss": 3.0195, |
|
"step": 54500 |
|
}, |
|
{ |
|
"epoch": 15.32, |
|
"learning_rate": 0.00043333333333333337, |
|
"learning_rate_embeddings": 0.00043333333333333337, |
|
"loss": 3.0201, |
|
"step": 55000 |
|
}, |
|
{ |
|
"epoch": 15.46, |
|
"learning_rate": 0.00043222222222222224, |
|
"learning_rate_embeddings": 0.00043222222222222224, |
|
"loss": 3.0305, |
|
"step": 55500 |
|
}, |
|
{ |
|
"epoch": 15.59, |
|
"learning_rate": 0.0004311111111111111, |
|
"learning_rate_embeddings": 0.0004311111111111111, |
|
"loss": 3.0345, |
|
"step": 56000 |
|
}, |
|
{ |
|
"epoch": 15.73, |
|
"learning_rate": 0.00043, |
|
"learning_rate_embeddings": 0.00043, |
|
"loss": 3.0209, |
|
"step": 56500 |
|
}, |
|
{ |
|
"epoch": 15.87, |
|
"learning_rate": 0.00042888888888888886, |
|
"learning_rate_embeddings": 0.00042888888888888886, |
|
"loss": 3.0264, |
|
"step": 57000 |
|
}, |
|
{ |
|
"epoch": 16.01, |
|
"learning_rate": 0.0004277777777777778, |
|
"learning_rate_embeddings": 0.0004277777777777778, |
|
"loss": 3.0239, |
|
"step": 57500 |
|
}, |
|
{ |
|
"epoch": 16.15, |
|
"learning_rate": 0.0004266666666666667, |
|
"learning_rate_embeddings": 0.0004266666666666667, |
|
"loss": 3.0056, |
|
"step": 58000 |
|
}, |
|
{ |
|
"epoch": 16.29, |
|
"learning_rate": 0.0004255555555555556, |
|
"learning_rate_embeddings": 0.0004255555555555556, |
|
"loss": 3.0169, |
|
"step": 58500 |
|
}, |
|
{ |
|
"epoch": 16.43, |
|
"learning_rate": 0.00042444444444444447, |
|
"learning_rate_embeddings": 0.00042444444444444447, |
|
"loss": 3.0115, |
|
"step": 59000 |
|
}, |
|
{ |
|
"epoch": 16.57, |
|
"learning_rate": 0.00042333333333333334, |
|
"learning_rate_embeddings": 0.00042333333333333334, |
|
"loss": 3.022, |
|
"step": 59500 |
|
}, |
|
{ |
|
"epoch": 16.71, |
|
"learning_rate": 0.0004222222222222222, |
|
"learning_rate_embeddings": 0.0004222222222222222, |
|
"loss": 3.0212, |
|
"step": 60000 |
|
}, |
|
{ |
|
"epoch": 16.85, |
|
"learning_rate": 0.0004211111111111111, |
|
"learning_rate_embeddings": 0.0004211111111111111, |
|
"loss": 3.0194, |
|
"step": 60500 |
|
}, |
|
{ |
|
"epoch": 16.99, |
|
"learning_rate": 0.00042, |
|
"learning_rate_embeddings": 0.00042, |
|
"loss": 3.0207, |
|
"step": 61000 |
|
}, |
|
{ |
|
"epoch": 17.13, |
|
"learning_rate": 0.0004188888888888889, |
|
"learning_rate_embeddings": 0.0004188888888888889, |
|
"loss": 3.0067, |
|
"step": 61500 |
|
}, |
|
{ |
|
"epoch": 17.27, |
|
"learning_rate": 0.0004177777777777778, |
|
"learning_rate_embeddings": 0.0004177777777777778, |
|
"loss": 3.0035, |
|
"step": 62000 |
|
}, |
|
{ |
|
"epoch": 17.4, |
|
"learning_rate": 0.0004166666666666667, |
|
"learning_rate_embeddings": 0.0004166666666666667, |
|
"loss": 3.0143, |
|
"step": 62500 |
|
}, |
|
{ |
|
"epoch": 17.4, |
|
"eval_loss": 3.0584957599639893, |
|
"eval_runtime": 372.4044, |
|
"eval_samples_per_second": 495.569, |
|
"eval_steps_per_second": 3.872, |
|
"step": 62500 |
|
}, |
|
{ |
|
"epoch": 17.54, |
|
"learning_rate": 0.00041555555555555557, |
|
"learning_rate_embeddings": 0.00041555555555555557, |
|
"loss": 3.0167, |
|
"step": 63000 |
|
}, |
|
{ |
|
"epoch": 17.68, |
|
"learning_rate": 0.00041444444444444444, |
|
"learning_rate_embeddings": 0.00041444444444444444, |
|
"loss": 3.0174, |
|
"step": 63500 |
|
}, |
|
{ |
|
"epoch": 17.82, |
|
"learning_rate": 0.0004133333333333333, |
|
"learning_rate_embeddings": 0.0004133333333333333, |
|
"loss": 3.009, |
|
"step": 64000 |
|
}, |
|
{ |
|
"epoch": 17.96, |
|
"learning_rate": 0.00041222222222222224, |
|
"learning_rate_embeddings": 0.00041222222222222224, |
|
"loss": 3.0174, |
|
"step": 64500 |
|
}, |
|
{ |
|
"epoch": 18.1, |
|
"learning_rate": 0.0004111111111111111, |
|
"learning_rate_embeddings": 0.0004111111111111111, |
|
"loss": 2.9896, |
|
"step": 65000 |
|
}, |
|
{ |
|
"epoch": 18.24, |
|
"learning_rate": 0.00041, |
|
"learning_rate_embeddings": 0.00041, |
|
"loss": 2.9986, |
|
"step": 65500 |
|
}, |
|
{ |
|
"epoch": 18.38, |
|
"learning_rate": 0.0004088888888888889, |
|
"learning_rate_embeddings": 0.0004088888888888889, |
|
"loss": 2.9994, |
|
"step": 66000 |
|
}, |
|
{ |
|
"epoch": 18.52, |
|
"learning_rate": 0.0004077777777777778, |
|
"learning_rate_embeddings": 0.0004077777777777778, |
|
"loss": 3.0095, |
|
"step": 66500 |
|
}, |
|
{ |
|
"epoch": 18.66, |
|
"learning_rate": 0.00040666666666666667, |
|
"learning_rate_embeddings": 0.00040666666666666667, |
|
"loss": 3.0088, |
|
"step": 67000 |
|
}, |
|
{ |
|
"epoch": 18.8, |
|
"learning_rate": 0.00040555555555555554, |
|
"learning_rate_embeddings": 0.00040555555555555554, |
|
"loss": 3.0075, |
|
"step": 67500 |
|
}, |
|
{ |
|
"epoch": 18.94, |
|
"learning_rate": 0.00040444444444444447, |
|
"learning_rate_embeddings": 0.00040444444444444447, |
|
"loss": 3.0176, |
|
"step": 68000 |
|
}, |
|
{ |
|
"epoch": 19.08, |
|
"learning_rate": 0.00040333333333333334, |
|
"learning_rate_embeddings": 0.00040333333333333334, |
|
"loss": 2.9948, |
|
"step": 68500 |
|
}, |
|
{ |
|
"epoch": 19.21, |
|
"learning_rate": 0.0004022222222222222, |
|
"learning_rate_embeddings": 0.0004022222222222222, |
|
"loss": 2.9915, |
|
"step": 69000 |
|
}, |
|
{ |
|
"epoch": 19.35, |
|
"learning_rate": 0.0004011111111111111, |
|
"learning_rate_embeddings": 0.0004011111111111111, |
|
"loss": 2.9945, |
|
"step": 69500 |
|
}, |
|
{ |
|
"epoch": 19.49, |
|
"learning_rate": 0.0004, |
|
"learning_rate_embeddings": 0.0004, |
|
"loss": 3.0017, |
|
"step": 70000 |
|
}, |
|
{ |
|
"epoch": 19.63, |
|
"learning_rate": 0.0003988888888888889, |
|
"learning_rate_embeddings": 0.0003988888888888889, |
|
"loss": 3.003, |
|
"step": 70500 |
|
}, |
|
{ |
|
"epoch": 19.77, |
|
"learning_rate": 0.00039777777777777777, |
|
"learning_rate_embeddings": 0.00039777777777777777, |
|
"loss": 2.9999, |
|
"step": 71000 |
|
}, |
|
{ |
|
"epoch": 19.91, |
|
"learning_rate": 0.0003966666666666667, |
|
"learning_rate_embeddings": 0.0003966666666666667, |
|
"loss": 3.0018, |
|
"step": 71500 |
|
}, |
|
{ |
|
"epoch": 20.05, |
|
"learning_rate": 0.00039555555555555557, |
|
"learning_rate_embeddings": 0.00039555555555555557, |
|
"loss": 2.9957, |
|
"step": 72000 |
|
}, |
|
{ |
|
"epoch": 20.19, |
|
"learning_rate": 0.00039444444444444444, |
|
"learning_rate_embeddings": 0.00039444444444444444, |
|
"loss": 2.9824, |
|
"step": 72500 |
|
}, |
|
{ |
|
"epoch": 20.33, |
|
"learning_rate": 0.0003933333333333333, |
|
"learning_rate_embeddings": 0.0003933333333333333, |
|
"loss": 2.9868, |
|
"step": 73000 |
|
}, |
|
{ |
|
"epoch": 20.47, |
|
"learning_rate": 0.00039222222222222225, |
|
"learning_rate_embeddings": 0.00039222222222222225, |
|
"loss": 2.9939, |
|
"step": 73500 |
|
}, |
|
{ |
|
"epoch": 20.61, |
|
"learning_rate": 0.0003911111111111111, |
|
"learning_rate_embeddings": 0.0003911111111111111, |
|
"loss": 3.0016, |
|
"step": 74000 |
|
}, |
|
{ |
|
"epoch": 20.75, |
|
"learning_rate": 0.00039000000000000005, |
|
"learning_rate_embeddings": 0.00039000000000000005, |
|
"loss": 2.9937, |
|
"step": 74500 |
|
}, |
|
{ |
|
"epoch": 20.89, |
|
"learning_rate": 0.0003888888888888889, |
|
"learning_rate_embeddings": 0.0003888888888888889, |
|
"loss": 3.0017, |
|
"step": 75000 |
|
}, |
|
{ |
|
"epoch": 20.89, |
|
"eval_loss": 3.0433623790740967, |
|
"eval_runtime": 372.4329, |
|
"eval_samples_per_second": 495.531, |
|
"eval_steps_per_second": 3.872, |
|
"step": 75000 |
|
}, |
|
{ |
|
"epoch": 21.02, |
|
"learning_rate": 0.0003877777777777778, |
|
"learning_rate_embeddings": 0.0003877777777777778, |
|
"loss": 2.9915, |
|
"step": 75500 |
|
}, |
|
{ |
|
"epoch": 21.16, |
|
"learning_rate": 0.00038666666666666667, |
|
"learning_rate_embeddings": 0.00038666666666666667, |
|
"loss": 2.9781, |
|
"step": 76000 |
|
}, |
|
{ |
|
"epoch": 21.3, |
|
"learning_rate": 0.00038555555555555554, |
|
"learning_rate_embeddings": 0.00038555555555555554, |
|
"loss": 2.9834, |
|
"step": 76500 |
|
}, |
|
{ |
|
"epoch": 21.44, |
|
"learning_rate": 0.0003844444444444444, |
|
"learning_rate_embeddings": 0.0003844444444444444, |
|
"loss": 2.9903, |
|
"step": 77000 |
|
}, |
|
{ |
|
"epoch": 21.58, |
|
"learning_rate": 0.00038333333333333334, |
|
"learning_rate_embeddings": 0.00038333333333333334, |
|
"loss": 2.9895, |
|
"step": 77500 |
|
}, |
|
{ |
|
"epoch": 21.72, |
|
"learning_rate": 0.0003822222222222223, |
|
"learning_rate_embeddings": 0.0003822222222222223, |
|
"loss": 2.9909, |
|
"step": 78000 |
|
}, |
|
{ |
|
"epoch": 21.86, |
|
"learning_rate": 0.00038111111111111115, |
|
"learning_rate_embeddings": 0.00038111111111111115, |
|
"loss": 2.9985, |
|
"step": 78500 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"learning_rate": 0.00038, |
|
"learning_rate_embeddings": 0.00038, |
|
"loss": 2.994, |
|
"step": 79000 |
|
}, |
|
{ |
|
"epoch": 22.14, |
|
"learning_rate": 0.0003788888888888889, |
|
"learning_rate_embeddings": 0.0003788888888888889, |
|
"loss": 2.9787, |
|
"step": 79500 |
|
}, |
|
{ |
|
"epoch": 22.28, |
|
"learning_rate": 0.00037777777777777777, |
|
"learning_rate_embeddings": 0.00037777777777777777, |
|
"loss": 2.9791, |
|
"step": 80000 |
|
}, |
|
{ |
|
"epoch": 22.42, |
|
"learning_rate": 0.00037666666666666664, |
|
"learning_rate_embeddings": 0.00037666666666666664, |
|
"loss": 2.9859, |
|
"step": 80500 |
|
}, |
|
{ |
|
"epoch": 22.56, |
|
"learning_rate": 0.0003755555555555555, |
|
"learning_rate_embeddings": 0.0003755555555555555, |
|
"loss": 2.9891, |
|
"step": 81000 |
|
}, |
|
{ |
|
"epoch": 22.7, |
|
"learning_rate": 0.0003744444444444445, |
|
"learning_rate_embeddings": 0.0003744444444444445, |
|
"loss": 2.9873, |
|
"step": 81500 |
|
}, |
|
{ |
|
"epoch": 22.83, |
|
"learning_rate": 0.0003733333333333334, |
|
"learning_rate_embeddings": 0.0003733333333333334, |
|
"loss": 2.9902, |
|
"step": 82000 |
|
}, |
|
{ |
|
"epoch": 22.97, |
|
"learning_rate": 0.00037222222222222225, |
|
"learning_rate_embeddings": 0.00037222222222222225, |
|
"loss": 2.9869, |
|
"step": 82500 |
|
}, |
|
{ |
|
"epoch": 23.11, |
|
"learning_rate": 0.0003711111111111111, |
|
"learning_rate_embeddings": 0.0003711111111111111, |
|
"loss": 2.9785, |
|
"step": 83000 |
|
}, |
|
{ |
|
"epoch": 23.25, |
|
"learning_rate": 0.00037, |
|
"learning_rate_embeddings": 0.00037, |
|
"loss": 2.9751, |
|
"step": 83500 |
|
}, |
|
{ |
|
"epoch": 23.39, |
|
"learning_rate": 0.00036888888888888887, |
|
"learning_rate_embeddings": 0.00036888888888888887, |
|
"loss": 2.9821, |
|
"step": 84000 |
|
}, |
|
{ |
|
"epoch": 23.53, |
|
"learning_rate": 0.00036777777777777774, |
|
"learning_rate_embeddings": 0.00036777777777777774, |
|
"loss": 2.9825, |
|
"step": 84500 |
|
}, |
|
{ |
|
"epoch": 23.67, |
|
"learning_rate": 0.00036666666666666667, |
|
"learning_rate_embeddings": 0.00036666666666666667, |
|
"loss": 2.9846, |
|
"step": 85000 |
|
}, |
|
{ |
|
"epoch": 23.81, |
|
"learning_rate": 0.0003655555555555556, |
|
"learning_rate_embeddings": 0.0003655555555555556, |
|
"loss": 2.9916, |
|
"step": 85500 |
|
}, |
|
{ |
|
"epoch": 23.95, |
|
"learning_rate": 0.00036444444444444447, |
|
"learning_rate_embeddings": 0.00036444444444444447, |
|
"loss": 2.9819, |
|
"step": 86000 |
|
}, |
|
{ |
|
"epoch": 24.09, |
|
"learning_rate": 0.00036333333333333335, |
|
"learning_rate_embeddings": 0.00036333333333333335, |
|
"loss": 2.9715, |
|
"step": 86500 |
|
}, |
|
{ |
|
"epoch": 24.23, |
|
"learning_rate": 0.0003622222222222222, |
|
"learning_rate_embeddings": 0.0003622222222222222, |
|
"loss": 2.981, |
|
"step": 87000 |
|
}, |
|
{ |
|
"epoch": 24.37, |
|
"learning_rate": 0.0003611111111111111, |
|
"learning_rate_embeddings": 0.0003611111111111111, |
|
"loss": 2.9809, |
|
"step": 87500 |
|
}, |
|
{ |
|
"epoch": 24.37, |
|
"eval_loss": 3.0395069122314453, |
|
"eval_runtime": 372.3364, |
|
"eval_samples_per_second": 495.659, |
|
"eval_steps_per_second": 3.873, |
|
"step": 87500 |
|
}, |
|
{ |
|
"epoch": 24.51, |
|
"learning_rate": 0.00035999999999999997, |
|
"learning_rate_embeddings": 0.00035999999999999997, |
|
"loss": 2.9762, |
|
"step": 88000 |
|
}, |
|
{ |
|
"epoch": 24.64, |
|
"learning_rate": 0.0003588888888888889, |
|
"learning_rate_embeddings": 0.0003588888888888889, |
|
"loss": 2.9833, |
|
"step": 88500 |
|
}, |
|
{ |
|
"epoch": 24.78, |
|
"learning_rate": 0.00035777777777777777, |
|
"learning_rate_embeddings": 0.00035777777777777777, |
|
"loss": 2.9825, |
|
"step": 89000 |
|
}, |
|
{ |
|
"epoch": 24.92, |
|
"learning_rate": 0.0003566666666666667, |
|
"learning_rate_embeddings": 0.0003566666666666667, |
|
"loss": 2.9818, |
|
"step": 89500 |
|
}, |
|
{ |
|
"epoch": 25.06, |
|
"learning_rate": 0.00035555555555555557, |
|
"learning_rate_embeddings": 0.00035555555555555557, |
|
"loss": 2.9766, |
|
"step": 90000 |
|
}, |
|
{ |
|
"epoch": 25.2, |
|
"learning_rate": 0.00035444444444444445, |
|
"learning_rate_embeddings": 0.00035444444444444445, |
|
"loss": 2.9756, |
|
"step": 90500 |
|
}, |
|
{ |
|
"epoch": 25.34, |
|
"learning_rate": 0.0003533333333333333, |
|
"learning_rate_embeddings": 0.0003533333333333333, |
|
"loss": 2.9714, |
|
"step": 91000 |
|
}, |
|
{ |
|
"epoch": 25.48, |
|
"learning_rate": 0.00035222222222222225, |
|
"learning_rate_embeddings": 0.00035222222222222225, |
|
"loss": 2.9687, |
|
"step": 91500 |
|
}, |
|
{ |
|
"epoch": 25.62, |
|
"learning_rate": 0.0003511111111111111, |
|
"learning_rate_embeddings": 0.0003511111111111111, |
|
"loss": 2.9801, |
|
"step": 92000 |
|
}, |
|
{ |
|
"epoch": 25.76, |
|
"learning_rate": 0.00035, |
|
"learning_rate_embeddings": 0.00035, |
|
"loss": 2.9767, |
|
"step": 92500 |
|
}, |
|
{ |
|
"epoch": 25.9, |
|
"learning_rate": 0.0003488888888888889, |
|
"learning_rate_embeddings": 0.0003488888888888889, |
|
"loss": 2.9796, |
|
"step": 93000 |
|
}, |
|
{ |
|
"epoch": 26.04, |
|
"learning_rate": 0.0003477777777777778, |
|
"learning_rate_embeddings": 0.0003477777777777778, |
|
"loss": 2.9668, |
|
"step": 93500 |
|
}, |
|
{ |
|
"epoch": 26.18, |
|
"learning_rate": 0.00034666666666666667, |
|
"learning_rate_embeddings": 0.00034666666666666667, |
|
"loss": 2.9654, |
|
"step": 94000 |
|
}, |
|
{ |
|
"epoch": 26.32, |
|
"learning_rate": 0.00034555555555555555, |
|
"learning_rate_embeddings": 0.00034555555555555555, |
|
"loss": 2.9637, |
|
"step": 94500 |
|
}, |
|
{ |
|
"epoch": 26.45, |
|
"learning_rate": 0.0003444444444444445, |
|
"learning_rate_embeddings": 0.0003444444444444445, |
|
"loss": 2.9736, |
|
"step": 95000 |
|
}, |
|
{ |
|
"epoch": 26.59, |
|
"learning_rate": 0.00034333333333333335, |
|
"learning_rate_embeddings": 0.00034333333333333335, |
|
"loss": 2.9742, |
|
"step": 95500 |
|
}, |
|
{ |
|
"epoch": 26.73, |
|
"learning_rate": 0.0003422222222222222, |
|
"learning_rate_embeddings": 0.0003422222222222222, |
|
"loss": 2.9767, |
|
"step": 96000 |
|
}, |
|
{ |
|
"epoch": 26.87, |
|
"learning_rate": 0.0003411111111111111, |
|
"learning_rate_embeddings": 0.0003411111111111111, |
|
"loss": 2.9831, |
|
"step": 96500 |
|
}, |
|
{ |
|
"epoch": 27.01, |
|
"learning_rate": 0.00034, |
|
"learning_rate_embeddings": 0.00034, |
|
"loss": 2.975, |
|
"step": 97000 |
|
}, |
|
{ |
|
"epoch": 27.15, |
|
"learning_rate": 0.0003388888888888889, |
|
"learning_rate_embeddings": 0.0003388888888888889, |
|
"loss": 2.9581, |
|
"step": 97500 |
|
}, |
|
{ |
|
"epoch": 27.29, |
|
"learning_rate": 0.00033777777777777777, |
|
"learning_rate_embeddings": 0.00033777777777777777, |
|
"loss": 2.9659, |
|
"step": 98000 |
|
}, |
|
{ |
|
"epoch": 27.43, |
|
"learning_rate": 0.0003366666666666667, |
|
"learning_rate_embeddings": 0.0003366666666666667, |
|
"loss": 2.9714, |
|
"step": 98500 |
|
}, |
|
{ |
|
"epoch": 27.57, |
|
"learning_rate": 0.0003355555555555556, |
|
"learning_rate_embeddings": 0.0003355555555555556, |
|
"loss": 2.9697, |
|
"step": 99000 |
|
}, |
|
{ |
|
"epoch": 27.71, |
|
"learning_rate": 0.00033444444444444445, |
|
"learning_rate_embeddings": 0.00033444444444444445, |
|
"loss": 2.9753, |
|
"step": 99500 |
|
}, |
|
{ |
|
"epoch": 27.85, |
|
"learning_rate": 0.0003333333333333333, |
|
"learning_rate_embeddings": 0.0003333333333333333, |
|
"loss": 2.9737, |
|
"step": 100000 |
|
}, |
|
{ |
|
"epoch": 27.85, |
|
"eval_loss": 3.029412031173706, |
|
"eval_runtime": 371.951, |
|
"eval_samples_per_second": 496.173, |
|
"eval_steps_per_second": 3.877, |
|
"step": 100000 |
|
}, |
|
{ |
|
"epoch": 27.99, |
|
"learning_rate": 0.0003322222222222222, |
|
"learning_rate_embeddings": 0.0003322222222222222, |
|
"loss": 2.9716, |
|
"step": 100500 |
|
}, |
|
{ |
|
"epoch": 28.13, |
|
"learning_rate": 0.0003311111111111111, |
|
"learning_rate_embeddings": 0.0003311111111111111, |
|
"loss": 2.9588, |
|
"step": 101000 |
|
}, |
|
{ |
|
"epoch": 28.27, |
|
"learning_rate": 0.00033, |
|
"learning_rate_embeddings": 0.00033, |
|
"loss": 2.963, |
|
"step": 101500 |
|
}, |
|
{ |
|
"epoch": 28.4, |
|
"learning_rate": 0.0003288888888888889, |
|
"learning_rate_embeddings": 0.0003288888888888889, |
|
"loss": 2.9687, |
|
"step": 102000 |
|
}, |
|
{ |
|
"epoch": 28.54, |
|
"learning_rate": 0.0003277777777777778, |
|
"learning_rate_embeddings": 0.0003277777777777778, |
|
"loss": 2.9656, |
|
"step": 102500 |
|
}, |
|
{ |
|
"epoch": 28.68, |
|
"learning_rate": 0.0003266666666666667, |
|
"learning_rate_embeddings": 0.0003266666666666667, |
|
"loss": 2.9737, |
|
"step": 103000 |
|
}, |
|
{ |
|
"epoch": 28.82, |
|
"learning_rate": 0.00032555555555555555, |
|
"learning_rate_embeddings": 0.00032555555555555555, |
|
"loss": 2.9756, |
|
"step": 103500 |
|
}, |
|
{ |
|
"epoch": 28.96, |
|
"learning_rate": 0.0003244444444444444, |
|
"learning_rate_embeddings": 0.0003244444444444444, |
|
"loss": 2.9722, |
|
"step": 104000 |
|
}, |
|
{ |
|
"epoch": 29.1, |
|
"learning_rate": 0.0003233333333333333, |
|
"learning_rate_embeddings": 0.0003233333333333333, |
|
"loss": 2.9641, |
|
"step": 104500 |
|
}, |
|
{ |
|
"epoch": 29.24, |
|
"learning_rate": 0.0003222222222222222, |
|
"learning_rate_embeddings": 0.0003222222222222222, |
|
"loss": 2.9624, |
|
"step": 105000 |
|
}, |
|
{ |
|
"epoch": 29.38, |
|
"learning_rate": 0.00032111111111111115, |
|
"learning_rate_embeddings": 0.00032111111111111115, |
|
"loss": 2.9622, |
|
"step": 105500 |
|
}, |
|
{ |
|
"epoch": 29.52, |
|
"learning_rate": 0.00032, |
|
"learning_rate_embeddings": 0.00032, |
|
"loss": 2.9678, |
|
"step": 106000 |
|
}, |
|
{ |
|
"epoch": 29.66, |
|
"learning_rate": 0.0003188888888888889, |
|
"learning_rate_embeddings": 0.0003188888888888889, |
|
"loss": 2.9656, |
|
"step": 106500 |
|
}, |
|
{ |
|
"epoch": 29.8, |
|
"learning_rate": 0.0003177777777777778, |
|
"learning_rate_embeddings": 0.0003177777777777778, |
|
"loss": 2.9703, |
|
"step": 107000 |
|
}, |
|
{ |
|
"epoch": 29.94, |
|
"learning_rate": 0.00031666666666666665, |
|
"learning_rate_embeddings": 0.00031666666666666665, |
|
"loss": 2.9671, |
|
"step": 107500 |
|
}, |
|
{ |
|
"epoch": 30.08, |
|
"learning_rate": 0.0003155555555555555, |
|
"learning_rate_embeddings": 0.0003155555555555555, |
|
"loss": 2.9646, |
|
"step": 108000 |
|
}, |
|
{ |
|
"epoch": 30.21, |
|
"learning_rate": 0.0003144444444444445, |
|
"learning_rate_embeddings": 0.0003144444444444445, |
|
"loss": 2.9542, |
|
"step": 108500 |
|
}, |
|
{ |
|
"epoch": 30.35, |
|
"learning_rate": 0.0003133333333333334, |
|
"learning_rate_embeddings": 0.0003133333333333334, |
|
"loss": 2.9569, |
|
"step": 109000 |
|
}, |
|
{ |
|
"epoch": 30.49, |
|
"learning_rate": 0.00031222222222222225, |
|
"learning_rate_embeddings": 0.00031222222222222225, |
|
"loss": 2.9601, |
|
"step": 109500 |
|
}, |
|
{ |
|
"epoch": 30.63, |
|
"learning_rate": 0.0003111111111111111, |
|
"learning_rate_embeddings": 0.0003111111111111111, |
|
"loss": 2.9666, |
|
"step": 110000 |
|
}, |
|
{ |
|
"epoch": 30.77, |
|
"learning_rate": 0.00031, |
|
"learning_rate_embeddings": 0.00031, |
|
"loss": 2.9671, |
|
"step": 110500 |
|
}, |
|
{ |
|
"epoch": 30.91, |
|
"learning_rate": 0.0003088888888888889, |
|
"learning_rate_embeddings": 0.0003088888888888889, |
|
"loss": 2.9688, |
|
"step": 111000 |
|
}, |
|
{ |
|
"epoch": 31.05, |
|
"learning_rate": 0.00030777777777777775, |
|
"learning_rate_embeddings": 0.00030777777777777775, |
|
"loss": 2.9593, |
|
"step": 111500 |
|
}, |
|
{ |
|
"epoch": 31.19, |
|
"learning_rate": 0.0003066666666666667, |
|
"learning_rate_embeddings": 0.0003066666666666667, |
|
"loss": 2.9461, |
|
"step": 112000 |
|
}, |
|
{ |
|
"epoch": 31.33, |
|
"learning_rate": 0.0003055555555555556, |
|
"learning_rate_embeddings": 0.0003055555555555556, |
|
"loss": 2.9599, |
|
"step": 112500 |
|
}, |
|
{ |
|
"epoch": 31.33, |
|
"eval_loss": 3.028226375579834, |
|
"eval_runtime": 371.9034, |
|
"eval_samples_per_second": 496.236, |
|
"eval_steps_per_second": 3.877, |
|
"step": 112500 |
|
}, |
|
{ |
|
"epoch": 31.47, |
|
"learning_rate": 0.0003044444444444445, |
|
"learning_rate_embeddings": 0.0003044444444444445, |
|
"loss": 2.9567, |
|
"step": 113000 |
|
}, |
|
{ |
|
"epoch": 31.61, |
|
"learning_rate": 0.00030333333333333335, |
|
"learning_rate_embeddings": 0.00030333333333333335, |
|
"loss": 2.9622, |
|
"step": 113500 |
|
}, |
|
{ |
|
"epoch": 31.75, |
|
"learning_rate": 0.0003022222222222222, |
|
"learning_rate_embeddings": 0.0003022222222222222, |
|
"loss": 2.966, |
|
"step": 114000 |
|
}, |
|
{ |
|
"epoch": 31.89, |
|
"learning_rate": 0.0003011111111111111, |
|
"learning_rate_embeddings": 0.0003011111111111111, |
|
"loss": 2.9648, |
|
"step": 114500 |
|
}, |
|
{ |
|
"epoch": 32.02, |
|
"learning_rate": 0.0003, |
|
"learning_rate_embeddings": 0.0003, |
|
"loss": 2.9599, |
|
"step": 115000 |
|
}, |
|
{ |
|
"epoch": 32.16, |
|
"learning_rate": 0.0002988888888888889, |
|
"learning_rate_embeddings": 0.0002988888888888889, |
|
"loss": 2.9498, |
|
"step": 115500 |
|
}, |
|
{ |
|
"epoch": 32.3, |
|
"learning_rate": 0.0002977777777777778, |
|
"learning_rate_embeddings": 0.0002977777777777778, |
|
"loss": 2.9547, |
|
"step": 116000 |
|
}, |
|
{ |
|
"epoch": 32.44, |
|
"learning_rate": 0.0002966666666666667, |
|
"learning_rate_embeddings": 0.0002966666666666667, |
|
"loss": 2.9573, |
|
"step": 116500 |
|
}, |
|
{ |
|
"epoch": 32.58, |
|
"learning_rate": 0.0002955555555555556, |
|
"learning_rate_embeddings": 0.0002955555555555556, |
|
"loss": 2.9509, |
|
"step": 117000 |
|
}, |
|
{ |
|
"epoch": 32.72, |
|
"learning_rate": 0.00029444444444444445, |
|
"learning_rate_embeddings": 0.00029444444444444445, |
|
"loss": 2.9599, |
|
"step": 117500 |
|
}, |
|
{ |
|
"epoch": 32.86, |
|
"learning_rate": 0.0002933333333333333, |
|
"learning_rate_embeddings": 0.0002933333333333333, |
|
"loss": 2.9641, |
|
"step": 118000 |
|
}, |
|
{ |
|
"epoch": 33.0, |
|
"learning_rate": 0.0002922222222222222, |
|
"learning_rate_embeddings": 0.0002922222222222222, |
|
"loss": 2.9676, |
|
"step": 118500 |
|
}, |
|
{ |
|
"epoch": 33.14, |
|
"learning_rate": 0.00029111111111111113, |
|
"learning_rate_embeddings": 0.00029111111111111113, |
|
"loss": 2.9484, |
|
"step": 119000 |
|
}, |
|
{ |
|
"epoch": 33.28, |
|
"learning_rate": 0.00029, |
|
"learning_rate_embeddings": 0.00029, |
|
"loss": 2.9464, |
|
"step": 119500 |
|
}, |
|
{ |
|
"epoch": 33.42, |
|
"learning_rate": 0.0002888888888888889, |
|
"learning_rate_embeddings": 0.0002888888888888889, |
|
"loss": 2.9512, |
|
"step": 120000 |
|
}, |
|
{ |
|
"epoch": 33.56, |
|
"learning_rate": 0.0002877777777777778, |
|
"learning_rate_embeddings": 0.0002877777777777778, |
|
"loss": 2.9515, |
|
"step": 120500 |
|
}, |
|
{ |
|
"epoch": 33.7, |
|
"learning_rate": 0.0002866666666666667, |
|
"learning_rate_embeddings": 0.0002866666666666667, |
|
"loss": 2.9572, |
|
"step": 121000 |
|
}, |
|
{ |
|
"epoch": 33.83, |
|
"learning_rate": 0.00028555555555555555, |
|
"learning_rate_embeddings": 0.00028555555555555555, |
|
"loss": 2.9568, |
|
"step": 121500 |
|
}, |
|
{ |
|
"epoch": 33.97, |
|
"learning_rate": 0.0002844444444444444, |
|
"learning_rate_embeddings": 0.0002844444444444444, |
|
"loss": 2.9584, |
|
"step": 122000 |
|
}, |
|
{ |
|
"epoch": 34.11, |
|
"learning_rate": 0.00028333333333333335, |
|
"learning_rate_embeddings": 0.00028333333333333335, |
|
"loss": 2.9487, |
|
"step": 122500 |
|
}, |
|
{ |
|
"epoch": 34.25, |
|
"learning_rate": 0.00028222222222222223, |
|
"learning_rate_embeddings": 0.00028222222222222223, |
|
"loss": 2.9521, |
|
"step": 123000 |
|
}, |
|
{ |
|
"epoch": 34.39, |
|
"learning_rate": 0.0002811111111111111, |
|
"learning_rate_embeddings": 0.0002811111111111111, |
|
"loss": 2.9571, |
|
"step": 123500 |
|
}, |
|
{ |
|
"epoch": 34.53, |
|
"learning_rate": 0.00028000000000000003, |
|
"learning_rate_embeddings": 0.00028000000000000003, |
|
"loss": 2.9541, |
|
"step": 124000 |
|
}, |
|
{ |
|
"epoch": 34.67, |
|
"learning_rate": 0.0002788888888888889, |
|
"learning_rate_embeddings": 0.0002788888888888889, |
|
"loss": 2.9535, |
|
"step": 124500 |
|
}, |
|
{ |
|
"epoch": 34.81, |
|
"learning_rate": 0.0002777777777777778, |
|
"learning_rate_embeddings": 0.0002777777777777778, |
|
"loss": 2.956, |
|
"step": 125000 |
|
}, |
|
{ |
|
"epoch": 34.81, |
|
"eval_loss": 3.0202476978302, |
|
"eval_runtime": 371.6194, |
|
"eval_samples_per_second": 496.616, |
|
"eval_steps_per_second": 3.88, |
|
"step": 125000 |
|
}, |
|
{ |
|
"epoch": 34.95, |
|
"learning_rate": 0.00027666666666666665, |
|
"learning_rate_embeddings": 0.00027666666666666665, |
|
"loss": 2.9584, |
|
"step": 125500 |
|
}, |
|
{ |
|
"epoch": 35.09, |
|
"learning_rate": 0.0002755555555555556, |
|
"learning_rate_embeddings": 0.0002755555555555556, |
|
"loss": 2.9457, |
|
"step": 126000 |
|
}, |
|
{ |
|
"epoch": 35.23, |
|
"learning_rate": 0.00027444444444444445, |
|
"learning_rate_embeddings": 0.00027444444444444445, |
|
"loss": 2.953, |
|
"step": 126500 |
|
}, |
|
{ |
|
"epoch": 35.37, |
|
"learning_rate": 0.00027333333333333333, |
|
"learning_rate_embeddings": 0.00027333333333333333, |
|
"loss": 2.9454, |
|
"step": 127000 |
|
}, |
|
{ |
|
"epoch": 35.51, |
|
"learning_rate": 0.0002722222222222222, |
|
"learning_rate_embeddings": 0.0002722222222222222, |
|
"loss": 2.951, |
|
"step": 127500 |
|
}, |
|
{ |
|
"epoch": 35.64, |
|
"learning_rate": 0.00027111111111111113, |
|
"learning_rate_embeddings": 0.00027111111111111113, |
|
"loss": 2.9565, |
|
"step": 128000 |
|
}, |
|
{ |
|
"epoch": 35.78, |
|
"learning_rate": 0.00027, |
|
"learning_rate_embeddings": 0.00027, |
|
"loss": 2.9517, |
|
"step": 128500 |
|
}, |
|
{ |
|
"epoch": 35.92, |
|
"learning_rate": 0.00026888888888888893, |
|
"learning_rate_embeddings": 0.00026888888888888893, |
|
"loss": 2.9531, |
|
"step": 129000 |
|
}, |
|
{ |
|
"epoch": 36.06, |
|
"learning_rate": 0.0002677777777777778, |
|
"learning_rate_embeddings": 0.0002677777777777778, |
|
"loss": 2.9478, |
|
"step": 129500 |
|
}, |
|
{ |
|
"epoch": 36.2, |
|
"learning_rate": 0.0002666666666666667, |
|
"learning_rate_embeddings": 0.0002666666666666667, |
|
"loss": 2.941, |
|
"step": 130000 |
|
}, |
|
{ |
|
"epoch": 36.34, |
|
"learning_rate": 0.00026555555555555555, |
|
"learning_rate_embeddings": 0.00026555555555555555, |
|
"loss": 2.9475, |
|
"step": 130500 |
|
}, |
|
{ |
|
"epoch": 36.48, |
|
"learning_rate": 0.00026444444444444443, |
|
"learning_rate_embeddings": 0.00026444444444444443, |
|
"loss": 2.944, |
|
"step": 131000 |
|
}, |
|
{ |
|
"epoch": 36.62, |
|
"learning_rate": 0.0002633333333333333, |
|
"learning_rate_embeddings": 0.0002633333333333333, |
|
"loss": 2.9524, |
|
"step": 131500 |
|
}, |
|
{ |
|
"epoch": 36.76, |
|
"learning_rate": 0.00026222222222222223, |
|
"learning_rate_embeddings": 0.00026222222222222223, |
|
"loss": 2.9598, |
|
"step": 132000 |
|
}, |
|
{ |
|
"epoch": 36.9, |
|
"learning_rate": 0.00026111111111111116, |
|
"learning_rate_embeddings": 0.00026111111111111116, |
|
"loss": 2.9563, |
|
"step": 132500 |
|
}, |
|
{ |
|
"epoch": 37.04, |
|
"learning_rate": 0.00026000000000000003, |
|
"learning_rate_embeddings": 0.00026000000000000003, |
|
"loss": 2.9514, |
|
"step": 133000 |
|
}, |
|
{ |
|
"epoch": 37.18, |
|
"learning_rate": 0.0002588888888888889, |
|
"learning_rate_embeddings": 0.0002588888888888889, |
|
"loss": 2.9466, |
|
"step": 133500 |
|
}, |
|
{ |
|
"epoch": 37.32, |
|
"learning_rate": 0.0002577777777777778, |
|
"learning_rate_embeddings": 0.0002577777777777778, |
|
"loss": 2.9474, |
|
"step": 134000 |
|
}, |
|
{ |
|
"epoch": 37.45, |
|
"learning_rate": 0.00025666666666666665, |
|
"learning_rate_embeddings": 0.00025666666666666665, |
|
"loss": 2.9447, |
|
"step": 134500 |
|
}, |
|
{ |
|
"epoch": 37.59, |
|
"learning_rate": 0.00025555555555555553, |
|
"learning_rate_embeddings": 0.00025555555555555553, |
|
"loss": 2.9452, |
|
"step": 135000 |
|
}, |
|
{ |
|
"epoch": 37.73, |
|
"learning_rate": 0.0002544444444444444, |
|
"learning_rate_embeddings": 0.0002544444444444444, |
|
"loss": 2.9533, |
|
"step": 135500 |
|
}, |
|
{ |
|
"epoch": 37.87, |
|
"learning_rate": 0.0002533333333333334, |
|
"learning_rate_embeddings": 0.0002533333333333334, |
|
"loss": 2.9492, |
|
"step": 136000 |
|
}, |
|
{ |
|
"epoch": 38.01, |
|
"learning_rate": 0.00025222222222222226, |
|
"learning_rate_embeddings": 0.00025222222222222226, |
|
"loss": 2.9486, |
|
"step": 136500 |
|
}, |
|
{ |
|
"epoch": 38.15, |
|
"learning_rate": 0.00025111111111111113, |
|
"learning_rate_embeddings": 0.00025111111111111113, |
|
"loss": 2.94, |
|
"step": 137000 |
|
}, |
|
{ |
|
"epoch": 38.29, |
|
"learning_rate": 0.00025, |
|
"learning_rate_embeddings": 0.00025, |
|
"loss": 2.9434, |
|
"step": 137500 |
|
}, |
|
{ |
|
"epoch": 38.29, |
|
"eval_loss": 3.0181665420532227, |
|
"eval_runtime": 371.7979, |
|
"eval_samples_per_second": 496.377, |
|
"eval_steps_per_second": 3.878, |
|
"step": 137500 |
|
}, |
|
{ |
|
"epoch": 38.43, |
|
"learning_rate": 0.0002488888888888889, |
|
"learning_rate_embeddings": 0.0002488888888888889, |
|
"loss": 2.9442, |
|
"step": 138000 |
|
}, |
|
{ |
|
"epoch": 38.57, |
|
"learning_rate": 0.0002477777777777778, |
|
"learning_rate_embeddings": 0.0002477777777777778, |
|
"loss": 2.9447, |
|
"step": 138500 |
|
}, |
|
{ |
|
"epoch": 38.71, |
|
"learning_rate": 0.0002466666666666667, |
|
"learning_rate_embeddings": 0.0002466666666666667, |
|
"loss": 2.9478, |
|
"step": 139000 |
|
}, |
|
{ |
|
"epoch": 38.85, |
|
"learning_rate": 0.00024555555555555556, |
|
"learning_rate_embeddings": 0.00024555555555555556, |
|
"loss": 2.9467, |
|
"step": 139500 |
|
}, |
|
{ |
|
"epoch": 38.99, |
|
"learning_rate": 0.00024444444444444443, |
|
"learning_rate_embeddings": 0.00024444444444444443, |
|
"loss": 2.9508, |
|
"step": 140000 |
|
}, |
|
{ |
|
"epoch": 39.13, |
|
"learning_rate": 0.00024333333333333336, |
|
"learning_rate_embeddings": 0.00024333333333333336, |
|
"loss": 2.9367, |
|
"step": 140500 |
|
}, |
|
{ |
|
"epoch": 39.26, |
|
"learning_rate": 0.00024222222222222223, |
|
"learning_rate_embeddings": 0.00024222222222222223, |
|
"loss": 2.9416, |
|
"step": 141000 |
|
}, |
|
{ |
|
"epoch": 39.4, |
|
"learning_rate": 0.0002411111111111111, |
|
"learning_rate_embeddings": 0.0002411111111111111, |
|
"loss": 2.9424, |
|
"step": 141500 |
|
}, |
|
{ |
|
"epoch": 39.54, |
|
"learning_rate": 0.00024, |
|
"learning_rate_embeddings": 0.00024, |
|
"loss": 2.9483, |
|
"step": 142000 |
|
}, |
|
{ |
|
"epoch": 39.68, |
|
"learning_rate": 0.0002388888888888889, |
|
"learning_rate_embeddings": 0.0002388888888888889, |
|
"loss": 2.9475, |
|
"step": 142500 |
|
}, |
|
{ |
|
"epoch": 39.82, |
|
"learning_rate": 0.00023777777777777778, |
|
"learning_rate_embeddings": 0.00023777777777777778, |
|
"loss": 2.9446, |
|
"step": 143000 |
|
}, |
|
{ |
|
"epoch": 39.96, |
|
"learning_rate": 0.00023666666666666668, |
|
"learning_rate_embeddings": 0.00023666666666666668, |
|
"loss": 2.9511, |
|
"step": 143500 |
|
}, |
|
{ |
|
"epoch": 40.1, |
|
"learning_rate": 0.00023555555555555556, |
|
"learning_rate_embeddings": 0.00023555555555555556, |
|
"loss": 2.9359, |
|
"step": 144000 |
|
}, |
|
{ |
|
"epoch": 40.24, |
|
"learning_rate": 0.00023444444444444446, |
|
"learning_rate_embeddings": 0.00023444444444444446, |
|
"loss": 2.9381, |
|
"step": 144500 |
|
}, |
|
{ |
|
"epoch": 40.38, |
|
"learning_rate": 0.00023333333333333333, |
|
"learning_rate_embeddings": 0.00023333333333333333, |
|
"loss": 2.9348, |
|
"step": 145000 |
|
}, |
|
{ |
|
"epoch": 40.52, |
|
"learning_rate": 0.00023222222222222223, |
|
"learning_rate_embeddings": 0.00023222222222222223, |
|
"loss": 2.9352, |
|
"step": 145500 |
|
}, |
|
{ |
|
"epoch": 40.66, |
|
"learning_rate": 0.0002311111111111111, |
|
"learning_rate_embeddings": 0.0002311111111111111, |
|
"loss": 2.9368, |
|
"step": 146000 |
|
}, |
|
{ |
|
"epoch": 40.8, |
|
"learning_rate": 0.00023, |
|
"learning_rate_embeddings": 0.00023, |
|
"loss": 2.9408, |
|
"step": 146500 |
|
}, |
|
{ |
|
"epoch": 40.94, |
|
"learning_rate": 0.0002288888888888889, |
|
"learning_rate_embeddings": 0.0002288888888888889, |
|
"loss": 2.9437, |
|
"step": 147000 |
|
}, |
|
{ |
|
"epoch": 41.07, |
|
"learning_rate": 0.00022777777777777778, |
|
"learning_rate_embeddings": 0.00022777777777777778, |
|
"loss": 2.9407, |
|
"step": 147500 |
|
}, |
|
{ |
|
"epoch": 41.21, |
|
"learning_rate": 0.00022666666666666666, |
|
"learning_rate_embeddings": 0.00022666666666666666, |
|
"loss": 2.9401, |
|
"step": 148000 |
|
}, |
|
{ |
|
"epoch": 41.35, |
|
"learning_rate": 0.00022555555555555556, |
|
"learning_rate_embeddings": 0.00022555555555555556, |
|
"loss": 2.9404, |
|
"step": 148500 |
|
}, |
|
{ |
|
"epoch": 41.49, |
|
"learning_rate": 0.00022444444444444446, |
|
"learning_rate_embeddings": 0.00022444444444444446, |
|
"loss": 2.9385, |
|
"step": 149000 |
|
}, |
|
{ |
|
"epoch": 41.63, |
|
"learning_rate": 0.00022333333333333333, |
|
"learning_rate_embeddings": 0.00022333333333333333, |
|
"loss": 2.9441, |
|
"step": 149500 |
|
}, |
|
{ |
|
"epoch": 41.77, |
|
"learning_rate": 0.0002222222222222222, |
|
"learning_rate_embeddings": 0.0002222222222222222, |
|
"loss": 2.9449, |
|
"step": 150000 |
|
}, |
|
{ |
|
"epoch": 41.77, |
|
"eval_loss": 3.011936902999878, |
|
"eval_runtime": 371.5953, |
|
"eval_samples_per_second": 496.648, |
|
"eval_steps_per_second": 3.881, |
|
"step": 150000 |
|
}, |
|
{ |
|
"epoch": 41.91, |
|
"learning_rate": 0.00022111111111111113, |
|
"learning_rate_embeddings": 0.00022111111111111113, |
|
"loss": 2.9414, |
|
"step": 150500 |
|
}, |
|
{ |
|
"epoch": 42.05, |
|
"learning_rate": 0.00022, |
|
"learning_rate_embeddings": 0.00022, |
|
"loss": 2.9422, |
|
"step": 151000 |
|
}, |
|
{ |
|
"epoch": 42.19, |
|
"learning_rate": 0.00021888888888888888, |
|
"learning_rate_embeddings": 0.00021888888888888888, |
|
"loss": 2.9298, |
|
"step": 151500 |
|
}, |
|
{ |
|
"epoch": 42.33, |
|
"learning_rate": 0.00021777777777777776, |
|
"learning_rate_embeddings": 0.00021777777777777776, |
|
"loss": 2.9378, |
|
"step": 152000 |
|
}, |
|
{ |
|
"epoch": 42.47, |
|
"learning_rate": 0.00021666666666666668, |
|
"learning_rate_embeddings": 0.00021666666666666668, |
|
"loss": 2.9386, |
|
"step": 152500 |
|
}, |
|
{ |
|
"epoch": 42.61, |
|
"learning_rate": 0.00021555555555555556, |
|
"learning_rate_embeddings": 0.00021555555555555556, |
|
"loss": 2.9385, |
|
"step": 153000 |
|
}, |
|
{ |
|
"epoch": 42.75, |
|
"learning_rate": 0.00021444444444444443, |
|
"learning_rate_embeddings": 0.00021444444444444443, |
|
"loss": 2.9334, |
|
"step": 153500 |
|
}, |
|
{ |
|
"epoch": 42.88, |
|
"learning_rate": 0.00021333333333333336, |
|
"learning_rate_embeddings": 0.00021333333333333336, |
|
"loss": 2.9427, |
|
"step": 154000 |
|
}, |
|
{ |
|
"epoch": 43.02, |
|
"learning_rate": 0.00021222222222222223, |
|
"learning_rate_embeddings": 0.00021222222222222223, |
|
"loss": 2.9475, |
|
"step": 154500 |
|
}, |
|
{ |
|
"epoch": 43.16, |
|
"learning_rate": 0.0002111111111111111, |
|
"learning_rate_embeddings": 0.0002111111111111111, |
|
"loss": 2.9279, |
|
"step": 155000 |
|
}, |
|
{ |
|
"epoch": 43.3, |
|
"learning_rate": 0.00021, |
|
"learning_rate_embeddings": 0.00021, |
|
"loss": 2.9308, |
|
"step": 155500 |
|
}, |
|
{ |
|
"epoch": 43.44, |
|
"learning_rate": 0.0002088888888888889, |
|
"learning_rate_embeddings": 0.0002088888888888889, |
|
"loss": 2.9331, |
|
"step": 156000 |
|
}, |
|
{ |
|
"epoch": 43.58, |
|
"learning_rate": 0.00020777777777777778, |
|
"learning_rate_embeddings": 0.00020777777777777778, |
|
"loss": 2.9312, |
|
"step": 156500 |
|
}, |
|
{ |
|
"epoch": 43.72, |
|
"learning_rate": 0.00020666666666666666, |
|
"learning_rate_embeddings": 0.00020666666666666666, |
|
"loss": 2.9393, |
|
"step": 157000 |
|
}, |
|
{ |
|
"epoch": 43.86, |
|
"learning_rate": 0.00020555555555555556, |
|
"learning_rate_embeddings": 0.00020555555555555556, |
|
"loss": 2.9401, |
|
"step": 157500 |
|
}, |
|
{ |
|
"epoch": 44.0, |
|
"learning_rate": 0.00020444444444444446, |
|
"learning_rate_embeddings": 0.00020444444444444446, |
|
"loss": 2.9441, |
|
"step": 158000 |
|
}, |
|
{ |
|
"epoch": 44.14, |
|
"learning_rate": 0.00020333333333333333, |
|
"learning_rate_embeddings": 0.00020333333333333333, |
|
"loss": 2.9241, |
|
"step": 158500 |
|
}, |
|
{ |
|
"epoch": 44.28, |
|
"learning_rate": 0.00020222222222222223, |
|
"learning_rate_embeddings": 0.00020222222222222223, |
|
"loss": 2.9266, |
|
"step": 159000 |
|
}, |
|
{ |
|
"epoch": 44.42, |
|
"learning_rate": 0.0002011111111111111, |
|
"learning_rate_embeddings": 0.0002011111111111111, |
|
"loss": 2.9326, |
|
"step": 159500 |
|
}, |
|
{ |
|
"epoch": 44.56, |
|
"learning_rate": 0.0002, |
|
"learning_rate_embeddings": 0.0002, |
|
"loss": 2.9406, |
|
"step": 160000 |
|
}, |
|
{ |
|
"epoch": 44.7, |
|
"learning_rate": 0.00019888888888888888, |
|
"learning_rate_embeddings": 0.00019888888888888888, |
|
"loss": 2.9357, |
|
"step": 160500 |
|
}, |
|
{ |
|
"epoch": 44.83, |
|
"learning_rate": 0.00019777777777777778, |
|
"learning_rate_embeddings": 0.00019777777777777778, |
|
"loss": 2.9355, |
|
"step": 161000 |
|
}, |
|
{ |
|
"epoch": 44.97, |
|
"learning_rate": 0.00019666666666666666, |
|
"learning_rate_embeddings": 0.00019666666666666666, |
|
"loss": 2.945, |
|
"step": 161500 |
|
}, |
|
{ |
|
"epoch": 45.11, |
|
"learning_rate": 0.00019555555555555556, |
|
"learning_rate_embeddings": 0.00019555555555555556, |
|
"loss": 2.9329, |
|
"step": 162000 |
|
}, |
|
{ |
|
"epoch": 45.25, |
|
"learning_rate": 0.00019444444444444446, |
|
"learning_rate_embeddings": 0.00019444444444444446, |
|
"loss": 2.9273, |
|
"step": 162500 |
|
}, |
|
{ |
|
"epoch": 45.25, |
|
"eval_loss": 3.0122811794281006, |
|
"eval_runtime": 372.1511, |
|
"eval_samples_per_second": 495.906, |
|
"eval_steps_per_second": 3.875, |
|
"step": 162500 |
|
}, |
|
{ |
|
"epoch": 45.39, |
|
"learning_rate": 0.00019333333333333333, |
|
"learning_rate_embeddings": 0.00019333333333333333, |
|
"loss": 2.9298, |
|
"step": 163000 |
|
}, |
|
{ |
|
"epoch": 45.53, |
|
"learning_rate": 0.0001922222222222222, |
|
"learning_rate_embeddings": 0.0001922222222222222, |
|
"loss": 2.9366, |
|
"step": 163500 |
|
}, |
|
{ |
|
"epoch": 45.67, |
|
"learning_rate": 0.00019111111111111114, |
|
"learning_rate_embeddings": 0.00019111111111111114, |
|
"loss": 2.9322, |
|
"step": 164000 |
|
}, |
|
{ |
|
"epoch": 45.81, |
|
"learning_rate": 0.00019, |
|
"learning_rate_embeddings": 0.00019, |
|
"loss": 2.9347, |
|
"step": 164500 |
|
}, |
|
{ |
|
"epoch": 45.95, |
|
"learning_rate": 0.00018888888888888888, |
|
"learning_rate_embeddings": 0.00018888888888888888, |
|
"loss": 2.9404, |
|
"step": 165000 |
|
}, |
|
{ |
|
"epoch": 46.09, |
|
"learning_rate": 0.00018777777777777776, |
|
"learning_rate_embeddings": 0.00018777777777777776, |
|
"loss": 2.9306, |
|
"step": 165500 |
|
}, |
|
{ |
|
"epoch": 46.23, |
|
"learning_rate": 0.0001866666666666667, |
|
"learning_rate_embeddings": 0.0001866666666666667, |
|
"loss": 2.9237, |
|
"step": 166000 |
|
}, |
|
{ |
|
"epoch": 46.37, |
|
"learning_rate": 0.00018555555555555556, |
|
"learning_rate_embeddings": 0.00018555555555555556, |
|
"loss": 2.9236, |
|
"step": 166500 |
|
}, |
|
{ |
|
"epoch": 46.51, |
|
"learning_rate": 0.00018444444444444443, |
|
"learning_rate_embeddings": 0.00018444444444444443, |
|
"loss": 2.9265, |
|
"step": 167000 |
|
}, |
|
{ |
|
"epoch": 46.64, |
|
"learning_rate": 0.00018333333333333334, |
|
"learning_rate_embeddings": 0.00018333333333333334, |
|
"loss": 2.9376, |
|
"step": 167500 |
|
}, |
|
{ |
|
"epoch": 46.78, |
|
"learning_rate": 0.00018222222222222224, |
|
"learning_rate_embeddings": 0.00018222222222222224, |
|
"loss": 2.9279, |
|
"step": 168000 |
|
}, |
|
{ |
|
"epoch": 46.92, |
|
"learning_rate": 0.0001811111111111111, |
|
"learning_rate_embeddings": 0.0001811111111111111, |
|
"loss": 2.9361, |
|
"step": 168500 |
|
}, |
|
{ |
|
"epoch": 47.06, |
|
"learning_rate": 0.00017999999999999998, |
|
"learning_rate_embeddings": 0.00017999999999999998, |
|
"loss": 2.935, |
|
"step": 169000 |
|
}, |
|
{ |
|
"epoch": 47.2, |
|
"learning_rate": 0.00017888888888888889, |
|
"learning_rate_embeddings": 0.00017888888888888889, |
|
"loss": 2.922, |
|
"step": 169500 |
|
}, |
|
{ |
|
"epoch": 47.34, |
|
"learning_rate": 0.00017777777777777779, |
|
"learning_rate_embeddings": 0.00017777777777777779, |
|
"loss": 2.9212, |
|
"step": 170000 |
|
}, |
|
{ |
|
"epoch": 47.48, |
|
"learning_rate": 0.00017666666666666666, |
|
"learning_rate_embeddings": 0.00017666666666666666, |
|
"loss": 2.9281, |
|
"step": 170500 |
|
}, |
|
{ |
|
"epoch": 47.62, |
|
"learning_rate": 0.00017555555555555556, |
|
"learning_rate_embeddings": 0.00017555555555555556, |
|
"loss": 2.9334, |
|
"step": 171000 |
|
}, |
|
{ |
|
"epoch": 47.76, |
|
"learning_rate": 0.00017444444444444446, |
|
"learning_rate_embeddings": 0.00017444444444444446, |
|
"loss": 2.9288, |
|
"step": 171500 |
|
}, |
|
{ |
|
"epoch": 47.9, |
|
"learning_rate": 0.00017333333333333334, |
|
"learning_rate_embeddings": 0.00017333333333333334, |
|
"loss": 2.9359, |
|
"step": 172000 |
|
}, |
|
{ |
|
"epoch": 48.04, |
|
"learning_rate": 0.00017222222222222224, |
|
"learning_rate_embeddings": 0.00017222222222222224, |
|
"loss": 2.9363, |
|
"step": 172500 |
|
}, |
|
{ |
|
"epoch": 48.18, |
|
"learning_rate": 0.0001711111111111111, |
|
"learning_rate_embeddings": 0.0001711111111111111, |
|
"loss": 2.9184, |
|
"step": 173000 |
|
}, |
|
{ |
|
"epoch": 48.32, |
|
"learning_rate": 0.00017, |
|
"learning_rate_embeddings": 0.00017, |
|
"loss": 2.93, |
|
"step": 173500 |
|
}, |
|
{ |
|
"epoch": 48.45, |
|
"learning_rate": 0.00016888888888888889, |
|
"learning_rate_embeddings": 0.00016888888888888889, |
|
"loss": 2.9244, |
|
"step": 174000 |
|
}, |
|
{ |
|
"epoch": 48.59, |
|
"learning_rate": 0.0001677777777777778, |
|
"learning_rate_embeddings": 0.0001677777777777778, |
|
"loss": 2.9349, |
|
"step": 174500 |
|
}, |
|
{ |
|
"epoch": 48.73, |
|
"learning_rate": 0.00016666666666666666, |
|
"learning_rate_embeddings": 0.00016666666666666666, |
|
"loss": 2.9239, |
|
"step": 175000 |
|
}, |
|
{ |
|
"epoch": 48.73, |
|
"eval_loss": 3.005641460418701, |
|
"eval_runtime": 371.5929, |
|
"eval_samples_per_second": 496.651, |
|
"eval_steps_per_second": 3.881, |
|
"step": 175000 |
|
}, |
|
{ |
|
"epoch": 48.87, |
|
"learning_rate": 0.00016555555555555556, |
|
"learning_rate_embeddings": 0.00016555555555555556, |
|
"loss": 2.9306, |
|
"step": 175500 |
|
}, |
|
{ |
|
"epoch": 49.01, |
|
"learning_rate": 0.00016444444444444446, |
|
"learning_rate_embeddings": 0.00016444444444444446, |
|
"loss": 2.9388, |
|
"step": 176000 |
|
}, |
|
{ |
|
"epoch": 49.15, |
|
"learning_rate": 0.00016333333333333334, |
|
"learning_rate_embeddings": 0.00016333333333333334, |
|
"loss": 2.9193, |
|
"step": 176500 |
|
}, |
|
{ |
|
"epoch": 49.29, |
|
"learning_rate": 0.0001622222222222222, |
|
"learning_rate_embeddings": 0.0001622222222222222, |
|
"loss": 2.9203, |
|
"step": 177000 |
|
}, |
|
{ |
|
"epoch": 49.43, |
|
"learning_rate": 0.0001611111111111111, |
|
"learning_rate_embeddings": 0.0001611111111111111, |
|
"loss": 2.9198, |
|
"step": 177500 |
|
}, |
|
{ |
|
"epoch": 49.57, |
|
"learning_rate": 0.00016, |
|
"learning_rate_embeddings": 0.00016, |
|
"loss": 2.9254, |
|
"step": 178000 |
|
}, |
|
{ |
|
"epoch": 49.71, |
|
"learning_rate": 0.0001588888888888889, |
|
"learning_rate_embeddings": 0.0001588888888888889, |
|
"loss": 2.9323, |
|
"step": 178500 |
|
}, |
|
{ |
|
"epoch": 49.85, |
|
"learning_rate": 0.00015777777777777776, |
|
"learning_rate_embeddings": 0.00015777777777777776, |
|
"loss": 2.9322, |
|
"step": 179000 |
|
}, |
|
{ |
|
"epoch": 49.99, |
|
"learning_rate": 0.0001566666666666667, |
|
"learning_rate_embeddings": 0.0001566666666666667, |
|
"loss": 2.9324, |
|
"step": 179500 |
|
}, |
|
{ |
|
"epoch": 50.13, |
|
"learning_rate": 0.00015555555555555556, |
|
"learning_rate_embeddings": 0.00015555555555555556, |
|
"loss": 2.9181, |
|
"step": 180000 |
|
}, |
|
{ |
|
"epoch": 50.26, |
|
"learning_rate": 0.00015444444444444444, |
|
"learning_rate_embeddings": 0.00015444444444444444, |
|
"loss": 2.9278, |
|
"step": 180500 |
|
}, |
|
{ |
|
"epoch": 50.4, |
|
"learning_rate": 0.00015333333333333334, |
|
"learning_rate_embeddings": 0.00015333333333333334, |
|
"loss": 2.9298, |
|
"step": 181000 |
|
}, |
|
{ |
|
"epoch": 50.54, |
|
"learning_rate": 0.00015222222222222224, |
|
"learning_rate_embeddings": 0.00015222222222222224, |
|
"loss": 2.9277, |
|
"step": 181500 |
|
}, |
|
{ |
|
"epoch": 50.68, |
|
"learning_rate": 0.0001511111111111111, |
|
"learning_rate_embeddings": 0.0001511111111111111, |
|
"loss": 2.9312, |
|
"step": 182000 |
|
}, |
|
{ |
|
"epoch": 50.82, |
|
"learning_rate": 0.00015, |
|
"learning_rate_embeddings": 0.00015, |
|
"loss": 2.9307, |
|
"step": 182500 |
|
}, |
|
{ |
|
"epoch": 50.96, |
|
"learning_rate": 0.0001488888888888889, |
|
"learning_rate_embeddings": 0.0001488888888888889, |
|
"loss": 2.9271, |
|
"step": 183000 |
|
}, |
|
{ |
|
"epoch": 51.1, |
|
"learning_rate": 0.0001477777777777778, |
|
"learning_rate_embeddings": 0.0001477777777777778, |
|
"loss": 2.9173, |
|
"step": 183500 |
|
}, |
|
{ |
|
"epoch": 51.24, |
|
"learning_rate": 0.00014666666666666666, |
|
"learning_rate_embeddings": 0.00014666666666666666, |
|
"loss": 2.9242, |
|
"step": 184000 |
|
}, |
|
{ |
|
"epoch": 51.38, |
|
"learning_rate": 0.00014555555555555556, |
|
"learning_rate_embeddings": 0.00014555555555555556, |
|
"loss": 2.931, |
|
"step": 184500 |
|
}, |
|
{ |
|
"epoch": 51.52, |
|
"learning_rate": 0.00014444444444444444, |
|
"learning_rate_embeddings": 0.00014444444444444444, |
|
"loss": 2.9222, |
|
"step": 185000 |
|
}, |
|
{ |
|
"epoch": 51.66, |
|
"learning_rate": 0.00014333333333333334, |
|
"learning_rate_embeddings": 0.00014333333333333334, |
|
"loss": 2.9242, |
|
"step": 185500 |
|
}, |
|
{ |
|
"epoch": 51.8, |
|
"learning_rate": 0.0001422222222222222, |
|
"learning_rate_embeddings": 0.0001422222222222222, |
|
"loss": 2.9271, |
|
"step": 186000 |
|
}, |
|
{ |
|
"epoch": 51.94, |
|
"learning_rate": 0.00014111111111111111, |
|
"learning_rate_embeddings": 0.00014111111111111111, |
|
"loss": 2.9257, |
|
"step": 186500 |
|
}, |
|
{ |
|
"epoch": 52.07, |
|
"learning_rate": 0.00014000000000000001, |
|
"learning_rate_embeddings": 0.00014000000000000001, |
|
"loss": 2.925, |
|
"step": 187000 |
|
}, |
|
{ |
|
"epoch": 52.21, |
|
"learning_rate": 0.0001388888888888889, |
|
"learning_rate_embeddings": 0.0001388888888888889, |
|
"loss": 2.9166, |
|
"step": 187500 |
|
}, |
|
{ |
|
"epoch": 52.21, |
|
"eval_loss": 3.0037143230438232, |
|
"eval_runtime": 371.8941, |
|
"eval_samples_per_second": 496.249, |
|
"eval_steps_per_second": 3.877, |
|
"step": 187500 |
|
}, |
|
{ |
|
"epoch": 52.35, |
|
"learning_rate": 0.0001377777777777778, |
|
"learning_rate_embeddings": 0.0001377777777777778, |
|
"loss": 2.9189, |
|
"step": 188000 |
|
}, |
|
{ |
|
"epoch": 52.49, |
|
"learning_rate": 0.00013666666666666666, |
|
"learning_rate_embeddings": 0.00013666666666666666, |
|
"loss": 2.9241, |
|
"step": 188500 |
|
}, |
|
{ |
|
"epoch": 52.63, |
|
"learning_rate": 0.00013555555555555556, |
|
"learning_rate_embeddings": 0.00013555555555555556, |
|
"loss": 2.9269, |
|
"step": 189000 |
|
}, |
|
{ |
|
"epoch": 52.77, |
|
"learning_rate": 0.00013444444444444447, |
|
"learning_rate_embeddings": 0.00013444444444444447, |
|
"loss": 2.9249, |
|
"step": 189500 |
|
}, |
|
{ |
|
"epoch": 52.91, |
|
"learning_rate": 0.00013333333333333334, |
|
"learning_rate_embeddings": 0.00013333333333333334, |
|
"loss": 2.9311, |
|
"step": 190000 |
|
}, |
|
{ |
|
"epoch": 53.05, |
|
"learning_rate": 0.00013222222222222221, |
|
"learning_rate_embeddings": 0.00013222222222222221, |
|
"loss": 2.9271, |
|
"step": 190500 |
|
}, |
|
{ |
|
"epoch": 53.19, |
|
"learning_rate": 0.00013111111111111111, |
|
"learning_rate_embeddings": 0.00013111111111111111, |
|
"loss": 2.9154, |
|
"step": 191000 |
|
}, |
|
{ |
|
"epoch": 53.33, |
|
"learning_rate": 0.00013000000000000002, |
|
"learning_rate_embeddings": 0.00013000000000000002, |
|
"loss": 2.9217, |
|
"step": 191500 |
|
}, |
|
{ |
|
"epoch": 53.47, |
|
"learning_rate": 0.0001288888888888889, |
|
"learning_rate_embeddings": 0.0001288888888888889, |
|
"loss": 2.9171, |
|
"step": 192000 |
|
}, |
|
{ |
|
"epoch": 53.61, |
|
"learning_rate": 0.00012777777777777776, |
|
"learning_rate_embeddings": 0.00012777777777777776, |
|
"loss": 2.9267, |
|
"step": 192500 |
|
}, |
|
{ |
|
"epoch": 53.75, |
|
"learning_rate": 0.0001266666666666667, |
|
"learning_rate_embeddings": 0.0001266666666666667, |
|
"loss": 2.9191, |
|
"step": 193000 |
|
}, |
|
{ |
|
"epoch": 53.88, |
|
"learning_rate": 0.00012555555555555557, |
|
"learning_rate_embeddings": 0.00012555555555555557, |
|
"loss": 2.9272, |
|
"step": 193500 |
|
}, |
|
{ |
|
"epoch": 54.02, |
|
"learning_rate": 0.00012444444444444444, |
|
"learning_rate_embeddings": 0.00012444444444444444, |
|
"loss": 2.9261, |
|
"step": 194000 |
|
}, |
|
{ |
|
"epoch": 54.16, |
|
"learning_rate": 0.00012333333333333334, |
|
"learning_rate_embeddings": 0.00012333333333333334, |
|
"loss": 2.9099, |
|
"step": 194500 |
|
}, |
|
{ |
|
"epoch": 54.3, |
|
"learning_rate": 0.00012222222222222221, |
|
"learning_rate_embeddings": 0.00012222222222222221, |
|
"loss": 2.9197, |
|
"step": 195000 |
|
}, |
|
{ |
|
"epoch": 54.44, |
|
"learning_rate": 0.00012111111111111112, |
|
"learning_rate_embeddings": 0.00012111111111111112, |
|
"loss": 2.9187, |
|
"step": 195500 |
|
}, |
|
{ |
|
"epoch": 54.58, |
|
"learning_rate": 0.00012, |
|
"learning_rate_embeddings": 0.00012, |
|
"loss": 2.9207, |
|
"step": 196000 |
|
}, |
|
{ |
|
"epoch": 54.72, |
|
"learning_rate": 0.00011888888888888889, |
|
"learning_rate_embeddings": 0.00011888888888888889, |
|
"loss": 2.927, |
|
"step": 196500 |
|
}, |
|
{ |
|
"epoch": 54.86, |
|
"learning_rate": 0.00011777777777777778, |
|
"learning_rate_embeddings": 0.00011777777777777778, |
|
"loss": 2.9222, |
|
"step": 197000 |
|
}, |
|
{ |
|
"epoch": 55.0, |
|
"learning_rate": 0.00011666666666666667, |
|
"learning_rate_embeddings": 0.00011666666666666667, |
|
"loss": 2.925, |
|
"step": 197500 |
|
}, |
|
{ |
|
"epoch": 55.14, |
|
"learning_rate": 0.00011555555555555555, |
|
"learning_rate_embeddings": 0.00011555555555555555, |
|
"loss": 2.9116, |
|
"step": 198000 |
|
}, |
|
{ |
|
"epoch": 55.28, |
|
"learning_rate": 0.00011444444444444445, |
|
"learning_rate_embeddings": 0.00011444444444444445, |
|
"loss": 2.9173, |
|
"step": 198500 |
|
}, |
|
{ |
|
"epoch": 55.42, |
|
"learning_rate": 0.00011333333333333333, |
|
"learning_rate_embeddings": 0.00011333333333333333, |
|
"loss": 2.9176, |
|
"step": 199000 |
|
}, |
|
{ |
|
"epoch": 55.56, |
|
"learning_rate": 0.00011222222222222223, |
|
"learning_rate_embeddings": 0.00011222222222222223, |
|
"loss": 2.9137, |
|
"step": 199500 |
|
}, |
|
{ |
|
"epoch": 55.69, |
|
"learning_rate": 0.0001111111111111111, |
|
"learning_rate_embeddings": 0.0001111111111111111, |
|
"loss": 2.9225, |
|
"step": 200000 |
|
}, |
|
{ |
|
"epoch": 55.69, |
|
"eval_loss": 3.000563621520996, |
|
"eval_runtime": 371.9938, |
|
"eval_samples_per_second": 496.116, |
|
"eval_steps_per_second": 3.876, |
|
"step": 200000 |
|
}, |
|
{ |
|
"epoch": 55.83, |
|
"learning_rate": 0.00011, |
|
"learning_rate_embeddings": 0.00011, |
|
"loss": 2.9211, |
|
"step": 200500 |
|
}, |
|
{ |
|
"epoch": 55.97, |
|
"learning_rate": 0.00010888888888888888, |
|
"learning_rate_embeddings": 0.00010888888888888888, |
|
"loss": 2.9202, |
|
"step": 201000 |
|
}, |
|
{ |
|
"epoch": 56.11, |
|
"learning_rate": 0.00010777777777777778, |
|
"learning_rate_embeddings": 0.00010777777777777778, |
|
"loss": 2.916, |
|
"step": 201500 |
|
}, |
|
{ |
|
"epoch": 56.25, |
|
"learning_rate": 0.00010666666666666668, |
|
"learning_rate_embeddings": 0.00010666666666666668, |
|
"loss": 2.9217, |
|
"step": 202000 |
|
}, |
|
{ |
|
"epoch": 56.39, |
|
"learning_rate": 0.00010555555555555555, |
|
"learning_rate_embeddings": 0.00010555555555555555, |
|
"loss": 2.9111, |
|
"step": 202500 |
|
}, |
|
{ |
|
"epoch": 56.53, |
|
"learning_rate": 0.00010444444444444445, |
|
"learning_rate_embeddings": 0.00010444444444444445, |
|
"loss": 2.916, |
|
"step": 203000 |
|
}, |
|
{ |
|
"epoch": 56.67, |
|
"learning_rate": 0.00010333333333333333, |
|
"learning_rate_embeddings": 0.00010333333333333333, |
|
"loss": 2.9217, |
|
"step": 203500 |
|
}, |
|
{ |
|
"epoch": 56.81, |
|
"learning_rate": 0.00010222222222222223, |
|
"learning_rate_embeddings": 0.00010222222222222223, |
|
"loss": 2.9174, |
|
"step": 204000 |
|
}, |
|
{ |
|
"epoch": 56.95, |
|
"learning_rate": 0.00010111111111111112, |
|
"learning_rate_embeddings": 0.00010111111111111112, |
|
"loss": 2.9179, |
|
"step": 204500 |
|
}, |
|
{ |
|
"epoch": 57.09, |
|
"learning_rate": 0.0001, |
|
"learning_rate_embeddings": 0.0001, |
|
"loss": 2.9108, |
|
"step": 205000 |
|
}, |
|
{ |
|
"epoch": 57.23, |
|
"learning_rate": 9.888888888888889e-05, |
|
"learning_rate_embeddings": 9.888888888888889e-05, |
|
"loss": 2.9113, |
|
"step": 205500 |
|
}, |
|
{ |
|
"epoch": 57.37, |
|
"learning_rate": 9.777777777777778e-05, |
|
"learning_rate_embeddings": 9.777777777777778e-05, |
|
"loss": 2.9194, |
|
"step": 206000 |
|
}, |
|
{ |
|
"epoch": 57.5, |
|
"learning_rate": 9.666666666666667e-05, |
|
"learning_rate_embeddings": 9.666666666666667e-05, |
|
"loss": 2.9101, |
|
"step": 206500 |
|
}, |
|
{ |
|
"epoch": 57.64, |
|
"learning_rate": 9.555555555555557e-05, |
|
"learning_rate_embeddings": 9.555555555555557e-05, |
|
"loss": 2.9138, |
|
"step": 207000 |
|
}, |
|
{ |
|
"epoch": 57.78, |
|
"learning_rate": 9.444444444444444e-05, |
|
"learning_rate_embeddings": 9.444444444444444e-05, |
|
"loss": 2.9161, |
|
"step": 207500 |
|
}, |
|
{ |
|
"epoch": 57.92, |
|
"learning_rate": 9.333333333333334e-05, |
|
"learning_rate_embeddings": 9.333333333333334e-05, |
|
"loss": 2.9162, |
|
"step": 208000 |
|
}, |
|
{ |
|
"epoch": 58.06, |
|
"learning_rate": 9.222222222222222e-05, |
|
"learning_rate_embeddings": 9.222222222222222e-05, |
|
"loss": 2.9065, |
|
"step": 208500 |
|
}, |
|
{ |
|
"epoch": 58.2, |
|
"learning_rate": 9.111111111111112e-05, |
|
"learning_rate_embeddings": 9.111111111111112e-05, |
|
"loss": 2.9155, |
|
"step": 209000 |
|
}, |
|
{ |
|
"epoch": 58.34, |
|
"learning_rate": 8.999999999999999e-05, |
|
"learning_rate_embeddings": 8.999999999999999e-05, |
|
"loss": 2.9155, |
|
"step": 209500 |
|
}, |
|
{ |
|
"epoch": 58.48, |
|
"learning_rate": 8.888888888888889e-05, |
|
"learning_rate_embeddings": 8.888888888888889e-05, |
|
"loss": 2.9149, |
|
"step": 210000 |
|
}, |
|
{ |
|
"epoch": 58.62, |
|
"learning_rate": 8.777777777777778e-05, |
|
"learning_rate_embeddings": 8.777777777777778e-05, |
|
"loss": 2.913, |
|
"step": 210500 |
|
}, |
|
{ |
|
"epoch": 58.76, |
|
"learning_rate": 8.666666666666667e-05, |
|
"learning_rate_embeddings": 8.666666666666667e-05, |
|
"loss": 2.9152, |
|
"step": 211000 |
|
}, |
|
{ |
|
"epoch": 58.9, |
|
"learning_rate": 8.555555555555556e-05, |
|
"learning_rate_embeddings": 8.555555555555556e-05, |
|
"loss": 2.9119, |
|
"step": 211500 |
|
}, |
|
{ |
|
"epoch": 59.04, |
|
"learning_rate": 8.444444444444444e-05, |
|
"learning_rate_embeddings": 8.444444444444444e-05, |
|
"loss": 2.9173, |
|
"step": 212000 |
|
}, |
|
{ |
|
"epoch": 59.18, |
|
"learning_rate": 8.333333333333333e-05, |
|
"learning_rate_embeddings": 8.333333333333333e-05, |
|
"loss": 2.9085, |
|
"step": 212500 |
|
}, |
|
{ |
|
"epoch": 59.18, |
|
"eval_loss": 2.9969003200531006, |
|
"eval_runtime": 371.9315, |
|
"eval_samples_per_second": 496.199, |
|
"eval_steps_per_second": 3.877, |
|
"step": 212500 |
|
}, |
|
{ |
|
"epoch": 59.31, |
|
"learning_rate": 8.222222222222223e-05, |
|
"learning_rate_embeddings": 8.222222222222223e-05, |
|
"loss": 2.9039, |
|
"step": 213000 |
|
}, |
|
{ |
|
"epoch": 59.45, |
|
"learning_rate": 8.11111111111111e-05, |
|
"learning_rate_embeddings": 8.11111111111111e-05, |
|
"loss": 2.9117, |
|
"step": 213500 |
|
}, |
|
{ |
|
"epoch": 59.59, |
|
"learning_rate": 8e-05, |
|
"learning_rate_embeddings": 8e-05, |
|
"loss": 2.9157, |
|
"step": 214000 |
|
}, |
|
{ |
|
"epoch": 59.73, |
|
"learning_rate": 7.888888888888888e-05, |
|
"learning_rate_embeddings": 7.888888888888888e-05, |
|
"loss": 2.9101, |
|
"step": 214500 |
|
}, |
|
{ |
|
"epoch": 59.87, |
|
"learning_rate": 7.777777777777778e-05, |
|
"learning_rate_embeddings": 7.777777777777778e-05, |
|
"loss": 2.9161, |
|
"step": 215000 |
|
}, |
|
{ |
|
"epoch": 60.01, |
|
"learning_rate": 7.666666666666667e-05, |
|
"learning_rate_embeddings": 7.666666666666667e-05, |
|
"loss": 2.9101, |
|
"step": 215500 |
|
}, |
|
{ |
|
"epoch": 60.15, |
|
"learning_rate": 7.555555555555556e-05, |
|
"learning_rate_embeddings": 7.555555555555556e-05, |
|
"loss": 2.9074, |
|
"step": 216000 |
|
}, |
|
{ |
|
"epoch": 60.29, |
|
"learning_rate": 7.444444444444444e-05, |
|
"learning_rate_embeddings": 7.444444444444444e-05, |
|
"loss": 2.9081, |
|
"step": 216500 |
|
}, |
|
{ |
|
"epoch": 60.43, |
|
"learning_rate": 7.333333333333333e-05, |
|
"learning_rate_embeddings": 7.333333333333333e-05, |
|
"loss": 2.9121, |
|
"step": 217000 |
|
}, |
|
{ |
|
"epoch": 60.57, |
|
"learning_rate": 7.222222222222222e-05, |
|
"learning_rate_embeddings": 7.222222222222222e-05, |
|
"loss": 2.907, |
|
"step": 217500 |
|
}, |
|
{ |
|
"epoch": 60.71, |
|
"learning_rate": 7.11111111111111e-05, |
|
"learning_rate_embeddings": 7.11111111111111e-05, |
|
"loss": 2.9098, |
|
"step": 218000 |
|
}, |
|
{ |
|
"epoch": 60.85, |
|
"learning_rate": 7.000000000000001e-05, |
|
"learning_rate_embeddings": 7.000000000000001e-05, |
|
"loss": 2.9116, |
|
"step": 218500 |
|
}, |
|
{ |
|
"epoch": 60.99, |
|
"learning_rate": 6.88888888888889e-05, |
|
"learning_rate_embeddings": 6.88888888888889e-05, |
|
"loss": 2.9078, |
|
"step": 219000 |
|
}, |
|
{ |
|
"epoch": 61.13, |
|
"learning_rate": 6.777777777777778e-05, |
|
"learning_rate_embeddings": 6.777777777777778e-05, |
|
"loss": 2.9076, |
|
"step": 219500 |
|
}, |
|
{ |
|
"epoch": 61.26, |
|
"learning_rate": 6.666666666666667e-05, |
|
"learning_rate_embeddings": 6.666666666666667e-05, |
|
"loss": 2.9083, |
|
"step": 220000 |
|
}, |
|
{ |
|
"epoch": 61.4, |
|
"learning_rate": 6.555555555555556e-05, |
|
"learning_rate_embeddings": 6.555555555555556e-05, |
|
"loss": 2.909, |
|
"step": 220500 |
|
}, |
|
{ |
|
"epoch": 61.54, |
|
"learning_rate": 6.444444444444444e-05, |
|
"learning_rate_embeddings": 6.444444444444444e-05, |
|
"loss": 2.9109, |
|
"step": 221000 |
|
}, |
|
{ |
|
"epoch": 61.68, |
|
"learning_rate": 6.333333333333335e-05, |
|
"learning_rate_embeddings": 6.333333333333335e-05, |
|
"loss": 2.9115, |
|
"step": 221500 |
|
}, |
|
{ |
|
"epoch": 61.82, |
|
"learning_rate": 6.222222222222222e-05, |
|
"learning_rate_embeddings": 6.222222222222222e-05, |
|
"loss": 2.9083, |
|
"step": 222000 |
|
}, |
|
{ |
|
"epoch": 61.96, |
|
"learning_rate": 6.111111111111111e-05, |
|
"learning_rate_embeddings": 6.111111111111111e-05, |
|
"loss": 2.9076, |
|
"step": 222500 |
|
}, |
|
{ |
|
"epoch": 62.1, |
|
"learning_rate": 6e-05, |
|
"learning_rate_embeddings": 6e-05, |
|
"loss": 2.9018, |
|
"step": 223000 |
|
}, |
|
{ |
|
"epoch": 62.24, |
|
"learning_rate": 5.888888888888889e-05, |
|
"learning_rate_embeddings": 5.888888888888889e-05, |
|
"loss": 2.9016, |
|
"step": 223500 |
|
}, |
|
{ |
|
"epoch": 62.38, |
|
"learning_rate": 5.7777777777777776e-05, |
|
"learning_rate_embeddings": 5.7777777777777776e-05, |
|
"loss": 2.9047, |
|
"step": 224000 |
|
}, |
|
{ |
|
"epoch": 62.52, |
|
"learning_rate": 5.6666666666666664e-05, |
|
"learning_rate_embeddings": 5.6666666666666664e-05, |
|
"loss": 2.9047, |
|
"step": 224500 |
|
}, |
|
{ |
|
"epoch": 62.66, |
|
"learning_rate": 5.555555555555555e-05, |
|
"learning_rate_embeddings": 5.555555555555555e-05, |
|
"loss": 2.9084, |
|
"step": 225000 |
|
}, |
|
{ |
|
"epoch": 62.66, |
|
"eval_loss": 2.994610548019409, |
|
"eval_runtime": 371.8603, |
|
"eval_samples_per_second": 496.294, |
|
"eval_steps_per_second": 3.878, |
|
"step": 225000 |
|
}, |
|
{ |
|
"epoch": 62.8, |
|
"learning_rate": 5.444444444444444e-05, |
|
"learning_rate_embeddings": 5.444444444444444e-05, |
|
"loss": 2.9048, |
|
"step": 225500 |
|
}, |
|
{ |
|
"epoch": 62.94, |
|
"learning_rate": 5.333333333333334e-05, |
|
"learning_rate_embeddings": 5.333333333333334e-05, |
|
"loss": 2.9025, |
|
"step": 226000 |
|
}, |
|
{ |
|
"epoch": 63.07, |
|
"learning_rate": 5.222222222222223e-05, |
|
"learning_rate_embeddings": 5.222222222222223e-05, |
|
"loss": 2.9048, |
|
"step": 226500 |
|
}, |
|
{ |
|
"epoch": 63.21, |
|
"learning_rate": 5.1111111111111115e-05, |
|
"learning_rate_embeddings": 5.1111111111111115e-05, |
|
"loss": 2.906, |
|
"step": 227000 |
|
}, |
|
{ |
|
"epoch": 63.35, |
|
"learning_rate": 5e-05, |
|
"learning_rate_embeddings": 5e-05, |
|
"loss": 2.9042, |
|
"step": 227500 |
|
}, |
|
{ |
|
"epoch": 63.49, |
|
"learning_rate": 4.888888888888889e-05, |
|
"learning_rate_embeddings": 4.888888888888889e-05, |
|
"loss": 2.9018, |
|
"step": 228000 |
|
}, |
|
{ |
|
"epoch": 63.63, |
|
"learning_rate": 4.7777777777777784e-05, |
|
"learning_rate_embeddings": 4.7777777777777784e-05, |
|
"loss": 2.9103, |
|
"step": 228500 |
|
}, |
|
{ |
|
"epoch": 63.77, |
|
"learning_rate": 4.666666666666667e-05, |
|
"learning_rate_embeddings": 4.666666666666667e-05, |
|
"loss": 2.9079, |
|
"step": 229000 |
|
}, |
|
{ |
|
"epoch": 63.91, |
|
"learning_rate": 4.555555555555556e-05, |
|
"learning_rate_embeddings": 4.555555555555556e-05, |
|
"loss": 2.9017, |
|
"step": 229500 |
|
}, |
|
{ |
|
"epoch": 64.05, |
|
"learning_rate": 4.4444444444444447e-05, |
|
"learning_rate_embeddings": 4.4444444444444447e-05, |
|
"loss": 2.9066, |
|
"step": 230000 |
|
}, |
|
{ |
|
"epoch": 64.19, |
|
"learning_rate": 4.3333333333333334e-05, |
|
"learning_rate_embeddings": 4.3333333333333334e-05, |
|
"loss": 2.9032, |
|
"step": 230500 |
|
}, |
|
{ |
|
"epoch": 64.33, |
|
"learning_rate": 4.222222222222222e-05, |
|
"learning_rate_embeddings": 4.222222222222222e-05, |
|
"loss": 2.8985, |
|
"step": 231000 |
|
}, |
|
{ |
|
"epoch": 64.47, |
|
"learning_rate": 4.1111111111111116e-05, |
|
"learning_rate_embeddings": 4.1111111111111116e-05, |
|
"loss": 2.8984, |
|
"step": 231500 |
|
}, |
|
{ |
|
"epoch": 64.61, |
|
"learning_rate": 4e-05, |
|
"learning_rate_embeddings": 4e-05, |
|
"loss": 2.8997, |
|
"step": 232000 |
|
}, |
|
{ |
|
"epoch": 64.75, |
|
"learning_rate": 3.888888888888889e-05, |
|
"learning_rate_embeddings": 3.888888888888889e-05, |
|
"loss": 2.9084, |
|
"step": 232500 |
|
}, |
|
{ |
|
"epoch": 64.88, |
|
"learning_rate": 3.777777777777778e-05, |
|
"learning_rate_embeddings": 3.777777777777778e-05, |
|
"loss": 2.9057, |
|
"step": 233000 |
|
}, |
|
{ |
|
"epoch": 65.02, |
|
"learning_rate": 3.6666666666666666e-05, |
|
"learning_rate_embeddings": 3.6666666666666666e-05, |
|
"loss": 2.9019, |
|
"step": 233500 |
|
}, |
|
{ |
|
"epoch": 65.16, |
|
"learning_rate": 3.555555555555555e-05, |
|
"learning_rate_embeddings": 3.555555555555555e-05, |
|
"loss": 2.9003, |
|
"step": 234000 |
|
}, |
|
{ |
|
"epoch": 65.3, |
|
"learning_rate": 3.444444444444445e-05, |
|
"learning_rate_embeddings": 3.444444444444445e-05, |
|
"loss": 2.8984, |
|
"step": 234500 |
|
}, |
|
{ |
|
"epoch": 65.44, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"learning_rate_embeddings": 3.3333333333333335e-05, |
|
"loss": 2.8975, |
|
"step": 235000 |
|
}, |
|
{ |
|
"epoch": 65.58, |
|
"learning_rate": 3.222222222222222e-05, |
|
"learning_rate_embeddings": 3.222222222222222e-05, |
|
"loss": 2.9005, |
|
"step": 235500 |
|
}, |
|
{ |
|
"epoch": 65.72, |
|
"learning_rate": 3.111111111111111e-05, |
|
"learning_rate_embeddings": 3.111111111111111e-05, |
|
"loss": 2.901, |
|
"step": 236000 |
|
}, |
|
{ |
|
"epoch": 65.86, |
|
"learning_rate": 3e-05, |
|
"learning_rate_embeddings": 3e-05, |
|
"loss": 2.9041, |
|
"step": 236500 |
|
}, |
|
{ |
|
"epoch": 66.0, |
|
"learning_rate": 2.8888888888888888e-05, |
|
"learning_rate_embeddings": 2.8888888888888888e-05, |
|
"loss": 2.8981, |
|
"step": 237000 |
|
}, |
|
{ |
|
"epoch": 66.14, |
|
"learning_rate": 2.7777777777777776e-05, |
|
"learning_rate_embeddings": 2.7777777777777776e-05, |
|
"loss": 2.9033, |
|
"step": 237500 |
|
}, |
|
{ |
|
"epoch": 66.14, |
|
"eval_loss": 2.990591287612915, |
|
"eval_runtime": 371.7717, |
|
"eval_samples_per_second": 496.412, |
|
"eval_steps_per_second": 3.879, |
|
"step": 237500 |
|
}, |
|
{ |
|
"epoch": 66.28, |
|
"learning_rate": 2.666666666666667e-05, |
|
"learning_rate_embeddings": 2.666666666666667e-05, |
|
"loss": 2.898, |
|
"step": 238000 |
|
}, |
|
{ |
|
"epoch": 66.42, |
|
"learning_rate": 2.5555555555555557e-05, |
|
"learning_rate_embeddings": 2.5555555555555557e-05, |
|
"loss": 2.9022, |
|
"step": 238500 |
|
}, |
|
{ |
|
"epoch": 66.56, |
|
"learning_rate": 2.4444444444444445e-05, |
|
"learning_rate_embeddings": 2.4444444444444445e-05, |
|
"loss": 2.8963, |
|
"step": 239000 |
|
}, |
|
{ |
|
"epoch": 66.69, |
|
"learning_rate": 2.3333333333333336e-05, |
|
"learning_rate_embeddings": 2.3333333333333336e-05, |
|
"loss": 2.9019, |
|
"step": 239500 |
|
}, |
|
{ |
|
"epoch": 66.83, |
|
"learning_rate": 2.2222222222222223e-05, |
|
"learning_rate_embeddings": 2.2222222222222223e-05, |
|
"loss": 2.8966, |
|
"step": 240000 |
|
}, |
|
{ |
|
"epoch": 66.97, |
|
"learning_rate": 2.111111111111111e-05, |
|
"learning_rate_embeddings": 2.111111111111111e-05, |
|
"loss": 2.8934, |
|
"step": 240500 |
|
}, |
|
{ |
|
"epoch": 67.11, |
|
"learning_rate": 2e-05, |
|
"learning_rate_embeddings": 2e-05, |
|
"loss": 2.8972, |
|
"step": 241000 |
|
}, |
|
{ |
|
"epoch": 67.25, |
|
"learning_rate": 1.888888888888889e-05, |
|
"learning_rate_embeddings": 1.888888888888889e-05, |
|
"loss": 2.8937, |
|
"step": 241500 |
|
}, |
|
{ |
|
"epoch": 67.39, |
|
"learning_rate": 1.7777777777777777e-05, |
|
"learning_rate_embeddings": 1.7777777777777777e-05, |
|
"loss": 2.8924, |
|
"step": 242000 |
|
}, |
|
{ |
|
"epoch": 67.53, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"learning_rate_embeddings": 1.6666666666666667e-05, |
|
"loss": 2.8954, |
|
"step": 242500 |
|
}, |
|
{ |
|
"epoch": 67.67, |
|
"learning_rate": 1.5555555555555555e-05, |
|
"learning_rate_embeddings": 1.5555555555555555e-05, |
|
"loss": 2.8983, |
|
"step": 243000 |
|
}, |
|
{ |
|
"epoch": 67.81, |
|
"learning_rate": 1.4444444444444444e-05, |
|
"learning_rate_embeddings": 1.4444444444444444e-05, |
|
"loss": 2.9004, |
|
"step": 243500 |
|
}, |
|
{ |
|
"epoch": 67.95, |
|
"learning_rate": 1.3333333333333335e-05, |
|
"learning_rate_embeddings": 1.3333333333333335e-05, |
|
"loss": 2.9029, |
|
"step": 244000 |
|
}, |
|
{ |
|
"epoch": 68.09, |
|
"learning_rate": 1.2222222222222222e-05, |
|
"learning_rate_embeddings": 1.2222222222222222e-05, |
|
"loss": 2.8969, |
|
"step": 244500 |
|
}, |
|
{ |
|
"epoch": 68.23, |
|
"learning_rate": 1.1111111111111112e-05, |
|
"learning_rate_embeddings": 1.1111111111111112e-05, |
|
"loss": 2.8966, |
|
"step": 245000 |
|
}, |
|
{ |
|
"epoch": 68.37, |
|
"learning_rate": 1e-05, |
|
"learning_rate_embeddings": 1e-05, |
|
"loss": 2.895, |
|
"step": 245500 |
|
}, |
|
{ |
|
"epoch": 68.5, |
|
"learning_rate": 8.888888888888888e-06, |
|
"learning_rate_embeddings": 8.888888888888888e-06, |
|
"loss": 2.8982, |
|
"step": 246000 |
|
}, |
|
{ |
|
"epoch": 68.64, |
|
"learning_rate": 7.777777777777777e-06, |
|
"learning_rate_embeddings": 7.777777777777777e-06, |
|
"loss": 2.8934, |
|
"step": 246500 |
|
}, |
|
{ |
|
"epoch": 68.78, |
|
"learning_rate": 6.6666666666666675e-06, |
|
"learning_rate_embeddings": 6.6666666666666675e-06, |
|
"loss": 2.8999, |
|
"step": 247000 |
|
}, |
|
{ |
|
"epoch": 68.92, |
|
"learning_rate": 5.555555555555556e-06, |
|
"learning_rate_embeddings": 5.555555555555556e-06, |
|
"loss": 2.9024, |
|
"step": 247500 |
|
}, |
|
{ |
|
"epoch": 69.06, |
|
"learning_rate": 4.444444444444444e-06, |
|
"learning_rate_embeddings": 4.444444444444444e-06, |
|
"loss": 2.8982, |
|
"step": 248000 |
|
}, |
|
{ |
|
"epoch": 69.2, |
|
"learning_rate": 3.3333333333333337e-06, |
|
"learning_rate_embeddings": 3.3333333333333337e-06, |
|
"loss": 2.8987, |
|
"step": 248500 |
|
}, |
|
{ |
|
"epoch": 69.34, |
|
"learning_rate": 2.222222222222222e-06, |
|
"learning_rate_embeddings": 2.222222222222222e-06, |
|
"loss": 2.8941, |
|
"step": 249000 |
|
}, |
|
{ |
|
"epoch": 69.48, |
|
"learning_rate": 1.111111111111111e-06, |
|
"learning_rate_embeddings": 1.111111111111111e-06, |
|
"loss": 2.8917, |
|
"step": 249500 |
|
}, |
|
{ |
|
"epoch": 69.62, |
|
"learning_rate": 0.0, |
|
"learning_rate_embeddings": 0.0, |
|
"loss": 2.8921, |
|
"step": 250000 |
|
}, |
|
{ |
|
"epoch": 69.62, |
|
"eval_loss": 2.9811859130859375, |
|
"eval_runtime": 371.5712, |
|
"eval_samples_per_second": 496.68, |
|
"eval_steps_per_second": 3.881, |
|
"step": 250000 |
|
} |
|
], |
|
"max_steps": 250000, |
|
"num_train_epochs": 70, |
|
"total_flos": 4.180960978403328e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|