|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 5.0, |
|
"eval_steps": 500, |
|
"global_step": 35370, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 4.929318631608708e-05, |
|
"loss": 0.5887, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.858637263217416e-05, |
|
"loss": 0.6212, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 4.787955894826124e-05, |
|
"loss": 0.6428, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 4.717274526434832e-05, |
|
"loss": 0.6492, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.35, |
|
"learning_rate": 4.64659315804354e-05, |
|
"loss": 0.643, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 4.575911789652248e-05, |
|
"loss": 0.6446, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"learning_rate": 4.505230421260956e-05, |
|
"loss": 0.6578, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"learning_rate": 4.434549052869664e-05, |
|
"loss": 0.6545, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 4.3638676844783716e-05, |
|
"loss": 0.6706, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.71, |
|
"learning_rate": 4.2931863160870795e-05, |
|
"loss": 0.673, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.78, |
|
"learning_rate": 4.222504947695788e-05, |
|
"loss": 0.6592, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"learning_rate": 4.151823579304496e-05, |
|
"loss": 0.6771, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"learning_rate": 4.081142210913204e-05, |
|
"loss": 0.6889, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 4.0104608425219116e-05, |
|
"loss": 0.6697, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_bleu": 30.7024, |
|
"eval_chrf++": 57.2458, |
|
"eval_gen_len": 27.1368, |
|
"eval_loss": 1.2473633289337158, |
|
"eval_runtime": 1116.0922, |
|
"eval_samples_per_second": 4.387, |
|
"eval_spbleu": 42.8416, |
|
"eval_steps_per_second": 0.627, |
|
"eval_ter": 59.9898, |
|
"step": 7074 |
|
}, |
|
{ |
|
"epoch": 1.06, |
|
"learning_rate": 3.9397794741306194e-05, |
|
"loss": 0.4754, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 1.13, |
|
"learning_rate": 3.869098105739327e-05, |
|
"loss": 0.4397, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"learning_rate": 3.798416737348035e-05, |
|
"loss": 0.4587, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 3.727735368956743e-05, |
|
"loss": 0.4694, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 1.34, |
|
"learning_rate": 3.6570540005654515e-05, |
|
"loss": 0.458, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 1.41, |
|
"learning_rate": 3.5863726321741594e-05, |
|
"loss": 0.4628, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"learning_rate": 3.515691263782867e-05, |
|
"loss": 0.4549, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 3.445009895391575e-05, |
|
"loss": 0.4579, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 3.374328527000283e-05, |
|
"loss": 0.4775, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 1.7, |
|
"learning_rate": 3.303647158608991e-05, |
|
"loss": 0.4674, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 3.2329657902176986e-05, |
|
"loss": 0.4659, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 1.84, |
|
"learning_rate": 3.162284421826407e-05, |
|
"loss": 0.4693, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 3.091603053435115e-05, |
|
"loss": 0.4554, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 3.0209216850438226e-05, |
|
"loss": 0.4671, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_bleu": 31.3008, |
|
"eval_chrf++": 57.3245, |
|
"eval_gen_len": 26.9416, |
|
"eval_loss": 1.328170895576477, |
|
"eval_runtime": 1121.14, |
|
"eval_samples_per_second": 4.367, |
|
"eval_spbleu": 43.1798, |
|
"eval_steps_per_second": 0.624, |
|
"eval_ter": 59.0032, |
|
"step": 14148 |
|
}, |
|
{ |
|
"epoch": 2.05, |
|
"learning_rate": 2.9502403166525304e-05, |
|
"loss": 0.3415, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"learning_rate": 2.8795589482612383e-05, |
|
"loss": 0.3063, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"learning_rate": 2.808877579869946e-05, |
|
"loss": 0.3058, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"learning_rate": 2.738196211478654e-05, |
|
"loss": 0.3089, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 2.6675148430873625e-05, |
|
"loss": 0.3205, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 2.5968334746960704e-05, |
|
"loss": 0.3082, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"learning_rate": 2.5261521063047782e-05, |
|
"loss": 0.3425, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 2.54, |
|
"learning_rate": 2.455470737913486e-05, |
|
"loss": 0.3189, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"learning_rate": 2.384789369522194e-05, |
|
"loss": 0.3257, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"learning_rate": 2.3141080011309018e-05, |
|
"loss": 0.3249, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"learning_rate": 2.24342663273961e-05, |
|
"loss": 0.3271, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"learning_rate": 2.1727452643483178e-05, |
|
"loss": 0.3362, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 2.9, |
|
"learning_rate": 2.1020638959570257e-05, |
|
"loss": 0.3334, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"learning_rate": 2.031382527565734e-05, |
|
"loss": 0.3229, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_bleu": 30.103, |
|
"eval_chrf++": 56.7595, |
|
"eval_gen_len": 27.2512, |
|
"eval_loss": 1.4371376037597656, |
|
"eval_runtime": 1144.2163, |
|
"eval_samples_per_second": 4.279, |
|
"eval_spbleu": 42.008, |
|
"eval_steps_per_second": 0.612, |
|
"eval_ter": 60.4163, |
|
"step": 21222 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"learning_rate": 1.9607011591744417e-05, |
|
"loss": 0.2754, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 3.11, |
|
"learning_rate": 1.8900197907831496e-05, |
|
"loss": 0.2294, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 3.18, |
|
"learning_rate": 1.8193384223918574e-05, |
|
"loss": 0.2256, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 3.25, |
|
"learning_rate": 1.7486570540005656e-05, |
|
"loss": 0.2248, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 3.32, |
|
"learning_rate": 1.6779756856092735e-05, |
|
"loss": 0.2268, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 3.39, |
|
"learning_rate": 1.6072943172179813e-05, |
|
"loss": 0.2215, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 3.46, |
|
"learning_rate": 1.5366129488266895e-05, |
|
"loss": 0.2233, |
|
"step": 24500 |
|
}, |
|
{ |
|
"epoch": 3.53, |
|
"learning_rate": 1.4659315804353974e-05, |
|
"loss": 0.2305, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 3.6, |
|
"learning_rate": 1.3952502120441052e-05, |
|
"loss": 0.2212, |
|
"step": 25500 |
|
}, |
|
{ |
|
"epoch": 3.68, |
|
"learning_rate": 1.3245688436528131e-05, |
|
"loss": 0.2248, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"learning_rate": 1.2538874752615213e-05, |
|
"loss": 0.2161, |
|
"step": 26500 |
|
}, |
|
{ |
|
"epoch": 3.82, |
|
"learning_rate": 1.1832061068702292e-05, |
|
"loss": 0.2367, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 3.89, |
|
"learning_rate": 1.112524738478937e-05, |
|
"loss": 0.2326, |
|
"step": 27500 |
|
}, |
|
{ |
|
"epoch": 3.96, |
|
"learning_rate": 1.041843370087645e-05, |
|
"loss": 0.2257, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_bleu": 29.9341, |
|
"eval_chrf++": 56.6787, |
|
"eval_gen_len": 27.2226, |
|
"eval_loss": 1.529828429222107, |
|
"eval_runtime": 1139.5818, |
|
"eval_samples_per_second": 4.296, |
|
"eval_spbleu": 42.0167, |
|
"eval_steps_per_second": 0.614, |
|
"eval_ter": 60.6963, |
|
"step": 28296 |
|
}, |
|
{ |
|
"epoch": 4.03, |
|
"learning_rate": 9.711620016963529e-06, |
|
"loss": 0.2022, |
|
"step": 28500 |
|
}, |
|
{ |
|
"epoch": 4.1, |
|
"learning_rate": 9.004806333050609e-06, |
|
"loss": 0.1638, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 4.17, |
|
"learning_rate": 8.297992649137688e-06, |
|
"loss": 0.1778, |
|
"step": 29500 |
|
}, |
|
{ |
|
"epoch": 4.24, |
|
"learning_rate": 7.591178965224767e-06, |
|
"loss": 0.1752, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 4.31, |
|
"learning_rate": 6.884365281311847e-06, |
|
"loss": 0.1678, |
|
"step": 30500 |
|
}, |
|
{ |
|
"epoch": 4.38, |
|
"learning_rate": 6.177551597398926e-06, |
|
"loss": 0.1723, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 4.45, |
|
"learning_rate": 5.470737913486006e-06, |
|
"loss": 0.1655, |
|
"step": 31500 |
|
}, |
|
{ |
|
"epoch": 4.52, |
|
"learning_rate": 4.763924229573085e-06, |
|
"loss": 0.1658, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 4.59, |
|
"learning_rate": 4.057110545660164e-06, |
|
"loss": 0.1776, |
|
"step": 32500 |
|
}, |
|
{ |
|
"epoch": 4.66, |
|
"learning_rate": 3.350296861747244e-06, |
|
"loss": 0.1632, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 4.74, |
|
"learning_rate": 2.643483177834323e-06, |
|
"loss": 0.1653, |
|
"step": 33500 |
|
}, |
|
{ |
|
"epoch": 4.81, |
|
"learning_rate": 1.9366694939214022e-06, |
|
"loss": 0.1639, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 4.88, |
|
"learning_rate": 1.2298558100084818e-06, |
|
"loss": 0.1655, |
|
"step": 34500 |
|
}, |
|
{ |
|
"epoch": 4.95, |
|
"learning_rate": 5.230421260955612e-07, |
|
"loss": 0.1722, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_bleu": 30.6094, |
|
"eval_chrf++": 56.9952, |
|
"eval_gen_len": 27.0588, |
|
"eval_loss": 1.5824342966079712, |
|
"eval_runtime": 1129.1112, |
|
"eval_samples_per_second": 4.336, |
|
"eval_spbleu": 42.6439, |
|
"eval_steps_per_second": 0.62, |
|
"eval_ter": 59.923, |
|
"step": 35370 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 35370, |
|
"num_train_epochs": 5, |
|
"save_steps": 500, |
|
"total_flos": 2.107220927048581e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|