Varine's picture
Add model files
c73aa4a
raw
history blame
8.92 kB
{
"best_metric": 1.5939216613769531,
"best_model_checkpoint": "opus-mt-zh-en-finetuned-zhen-checkpoints/checkpoint-25985",
"epoch": 1.0,
"eval_steps": 500,
"global_step": 25985,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02,
"grad_norm": 2.777989149093628,
"learning_rate": 1.9961593226861654e-05,
"loss": 0.608,
"step": 500
},
{
"epoch": 0.04,
"grad_norm": 2.571350336074829,
"learning_rate": 1.9923109486242066e-05,
"loss": 0.6031,
"step": 1000
},
{
"epoch": 0.06,
"grad_norm": 6.669718265533447,
"learning_rate": 1.9884625745622478e-05,
"loss": 0.5823,
"step": 1500
},
{
"epoch": 0.08,
"grad_norm": 3.766803503036499,
"learning_rate": 1.984614200500289e-05,
"loss": 0.625,
"step": 2000
},
{
"epoch": 0.1,
"grad_norm": 2.617687225341797,
"learning_rate": 1.9807735231864538e-05,
"loss": 0.6324,
"step": 2500
},
{
"epoch": 0.12,
"grad_norm": 4.26535177230835,
"learning_rate": 1.976932845872619e-05,
"loss": 0.6184,
"step": 3000
},
{
"epoch": 0.13,
"grad_norm": 5.035365581512451,
"learning_rate": 1.9730844718106602e-05,
"loss": 0.6277,
"step": 3500
},
{
"epoch": 0.15,
"grad_norm": 6.626140594482422,
"learning_rate": 1.9692360977487014e-05,
"loss": 0.6557,
"step": 4000
},
{
"epoch": 0.17,
"grad_norm": 3.718008279800415,
"learning_rate": 1.9653877236867426e-05,
"loss": 0.6623,
"step": 4500
},
{
"epoch": 0.19,
"grad_norm": 4.26861047744751,
"learning_rate": 1.9615470463729075e-05,
"loss": 0.6719,
"step": 5000
},
{
"epoch": 0.21,
"grad_norm": 5.266055107116699,
"learning_rate": 1.9576986723109487e-05,
"loss": 0.636,
"step": 5500
},
{
"epoch": 0.23,
"grad_norm": 6.086512565612793,
"learning_rate": 1.95385029824899e-05,
"loss": 0.6585,
"step": 6000
},
{
"epoch": 0.25,
"grad_norm": 4.0940327644348145,
"learning_rate": 1.950001924187031e-05,
"loss": 0.6341,
"step": 6500
},
{
"epoch": 0.27,
"grad_norm": 7.52503776550293,
"learning_rate": 1.9461535501250722e-05,
"loss": 0.6589,
"step": 7000
},
{
"epoch": 0.29,
"grad_norm": 3.5400032997131348,
"learning_rate": 1.9423051760631134e-05,
"loss": 0.6627,
"step": 7500
},
{
"epoch": 0.31,
"grad_norm": 3.525592565536499,
"learning_rate": 1.9384568020011546e-05,
"loss": 0.639,
"step": 8000
},
{
"epoch": 0.33,
"grad_norm": 3.1940386295318604,
"learning_rate": 1.9346084279391958e-05,
"loss": 0.6433,
"step": 8500
},
{
"epoch": 0.35,
"grad_norm": 5.362303733825684,
"learning_rate": 1.930767750625361e-05,
"loss": 0.6714,
"step": 9000
},
{
"epoch": 0.37,
"grad_norm": 4.885318279266357,
"learning_rate": 1.9269193765634022e-05,
"loss": 0.6334,
"step": 9500
},
{
"epoch": 0.38,
"grad_norm": 7.104074001312256,
"learning_rate": 1.9230710025014434e-05,
"loss": 0.6496,
"step": 10000
},
{
"epoch": 0.4,
"grad_norm": 5.086849689483643,
"learning_rate": 1.9192226284394843e-05,
"loss": 0.6647,
"step": 10500
},
{
"epoch": 0.42,
"grad_norm": 10.493572235107422,
"learning_rate": 1.9153819511256495e-05,
"loss": 0.6609,
"step": 11000
},
{
"epoch": 0.44,
"grad_norm": 10.206048011779785,
"learning_rate": 1.9115335770636907e-05,
"loss": 0.6806,
"step": 11500
},
{
"epoch": 0.46,
"grad_norm": 9.489506721496582,
"learning_rate": 1.907685203001732e-05,
"loss": 0.641,
"step": 12000
},
{
"epoch": 0.48,
"grad_norm": 4.525002479553223,
"learning_rate": 1.903844525687897e-05,
"loss": 0.6863,
"step": 12500
},
{
"epoch": 0.5,
"grad_norm": 7.507730007171631,
"learning_rate": 1.9000038483740623e-05,
"loss": 0.6829,
"step": 13000
},
{
"epoch": 0.52,
"grad_norm": 4.827548980712891,
"learning_rate": 1.8961554743121035e-05,
"loss": 0.6597,
"step": 13500
},
{
"epoch": 0.54,
"grad_norm": 6.409604072570801,
"learning_rate": 1.8923071002501447e-05,
"loss": 0.6445,
"step": 14000
},
{
"epoch": 0.56,
"grad_norm": 9.57291316986084,
"learning_rate": 1.8884664229363095e-05,
"loss": 0.6882,
"step": 14500
},
{
"epoch": 0.58,
"grad_norm": 9.129105567932129,
"learning_rate": 1.8846180488743507e-05,
"loss": 0.6719,
"step": 15000
},
{
"epoch": 0.6,
"grad_norm": 8.071757316589355,
"learning_rate": 1.880769674812392e-05,
"loss": 0.6668,
"step": 15500
},
{
"epoch": 0.62,
"grad_norm": 3.636784791946411,
"learning_rate": 1.876921300750433e-05,
"loss": 0.6773,
"step": 16000
},
{
"epoch": 0.63,
"grad_norm": 6.6433515548706055,
"learning_rate": 1.8730729266884743e-05,
"loss": 0.6776,
"step": 16500
},
{
"epoch": 0.65,
"grad_norm": 3.0176546573638916,
"learning_rate": 1.8692245526265155e-05,
"loss": 0.6587,
"step": 17000
},
{
"epoch": 0.67,
"grad_norm": 7.913615703582764,
"learning_rate": 1.8653761785645567e-05,
"loss": 0.6665,
"step": 17500
},
{
"epoch": 0.69,
"grad_norm": 5.335203647613525,
"learning_rate": 1.861527804502598e-05,
"loss": 0.6684,
"step": 18000
},
{
"epoch": 0.71,
"grad_norm": 5.51749324798584,
"learning_rate": 1.857679430440639e-05,
"loss": 0.6577,
"step": 18500
},
{
"epoch": 0.73,
"grad_norm": 4.254025936126709,
"learning_rate": 1.8538310563786802e-05,
"loss": 0.6776,
"step": 19000
},
{
"epoch": 0.75,
"grad_norm": 3.3869874477386475,
"learning_rate": 1.8499826823167214e-05,
"loss": 0.6351,
"step": 19500
},
{
"epoch": 0.77,
"grad_norm": 12.704727172851562,
"learning_rate": 1.8461343082547626e-05,
"loss": 0.6904,
"step": 20000
},
{
"epoch": 0.79,
"grad_norm": 3.478996515274048,
"learning_rate": 1.8422859341928038e-05,
"loss": 0.6829,
"step": 20500
},
{
"epoch": 0.81,
"grad_norm": 6.513260841369629,
"learning_rate": 1.838437560130845e-05,
"loss": 0.6615,
"step": 21000
},
{
"epoch": 0.83,
"grad_norm": 5.242546081542969,
"learning_rate": 1.8345891860688862e-05,
"loss": 0.6859,
"step": 21500
},
{
"epoch": 0.85,
"grad_norm": 6.638239860534668,
"learning_rate": 1.8307408120069274e-05,
"loss": 0.6485,
"step": 22000
},
{
"epoch": 0.87,
"grad_norm": 8.32187557220459,
"learning_rate": 1.8269001346930923e-05,
"loss": 0.6567,
"step": 22500
},
{
"epoch": 0.89,
"grad_norm": 3.9929823875427246,
"learning_rate": 1.8230517606311334e-05,
"loss": 0.6996,
"step": 23000
},
{
"epoch": 0.9,
"grad_norm": 5.426717281341553,
"learning_rate": 1.8192110833172987e-05,
"loss": 0.6529,
"step": 23500
},
{
"epoch": 0.92,
"grad_norm": 7.328232288360596,
"learning_rate": 1.81536270925534e-05,
"loss": 0.6711,
"step": 24000
},
{
"epoch": 0.94,
"grad_norm": 6.648090362548828,
"learning_rate": 1.811514335193381e-05,
"loss": 0.7026,
"step": 24500
},
{
"epoch": 0.96,
"grad_norm": 4.047011375427246,
"learning_rate": 1.8076659611314222e-05,
"loss": 0.6867,
"step": 25000
},
{
"epoch": 0.98,
"grad_norm": 10.354930877685547,
"learning_rate": 1.803825283817587e-05,
"loss": 0.6718,
"step": 25500
},
{
"epoch": 1.0,
"eval_loss": 1.5939216613769531,
"eval_runtime": 11.3331,
"eval_samples_per_second": 351.271,
"eval_steps_per_second": 87.884,
"step": 25985
}
],
"logging_steps": 500,
"max_steps": 259850,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"total_flos": 1029890059665408.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}