dima806's picture
Upload folder using huggingface_hub
52520d5 verified
raw
history blame
No virus
8.65 kB
{
"best_metric": 4.681754112243652,
"best_model_checkpoint": "car_brands_image_detection/checkpoint-21765",
"epoch": 5.0,
"eval_steps": 500,
"global_step": 21765,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.11,
"grad_norm": 1.245316505432129,
"learning_rate": 4.8963849873359435e-06,
"loss": 5.7646,
"step": 500
},
{
"epoch": 0.23,
"grad_norm": 1.3855398893356323,
"learning_rate": 4.781257195486991e-06,
"loss": 5.7198,
"step": 1000
},
{
"epoch": 0.34,
"grad_norm": 1.5914552211761475,
"learning_rate": 4.666129403638039e-06,
"loss": 5.6722,
"step": 1500
},
{
"epoch": 0.46,
"grad_norm": 1.574438214302063,
"learning_rate": 4.551001611789087e-06,
"loss": 5.6256,
"step": 2000
},
{
"epoch": 0.57,
"grad_norm": 1.6384121179580688,
"learning_rate": 4.435873819940134e-06,
"loss": 5.5753,
"step": 2500
},
{
"epoch": 0.69,
"grad_norm": 2.455289363861084,
"learning_rate": 4.320746028091182e-06,
"loss": 5.5277,
"step": 3000
},
{
"epoch": 0.8,
"grad_norm": 1.637087345123291,
"learning_rate": 4.205618236242229e-06,
"loss": 5.4828,
"step": 3500
},
{
"epoch": 0.92,
"grad_norm": 1.887794017791748,
"learning_rate": 4.090490444393277e-06,
"loss": 5.4315,
"step": 4000
},
{
"epoch": 1.0,
"eval_accuracy": 0.16143021252153936,
"eval_loss": 5.4130473136901855,
"eval_runtime": 373.3699,
"eval_samples_per_second": 93.259,
"eval_steps_per_second": 11.659,
"step": 4353
},
{
"epoch": 1.03,
"grad_norm": 2.1020355224609375,
"learning_rate": 3.975362652544324e-06,
"loss": 5.3915,
"step": 4500
},
{
"epoch": 1.15,
"grad_norm": 1.868577241897583,
"learning_rate": 3.860234860695372e-06,
"loss": 5.34,
"step": 5000
},
{
"epoch": 1.26,
"grad_norm": 1.8365103006362915,
"learning_rate": 3.74510706884642e-06,
"loss": 5.2992,
"step": 5500
},
{
"epoch": 1.38,
"grad_norm": 1.9328190088272095,
"learning_rate": 3.6299792769974675e-06,
"loss": 5.2557,
"step": 6000
},
{
"epoch": 1.49,
"grad_norm": 1.9776891469955444,
"learning_rate": 3.514851485148515e-06,
"loss": 5.2221,
"step": 6500
},
{
"epoch": 1.61,
"grad_norm": 1.8611873388290405,
"learning_rate": 3.3997236932995626e-06,
"loss": 5.1845,
"step": 7000
},
{
"epoch": 1.72,
"grad_norm": 2.0713303089141846,
"learning_rate": 3.2845959014506106e-06,
"loss": 5.1466,
"step": 7500
},
{
"epoch": 1.84,
"grad_norm": 2.1336920261383057,
"learning_rate": 3.1694681096016582e-06,
"loss": 5.1209,
"step": 8000
},
{
"epoch": 1.95,
"grad_norm": 2.0790388584136963,
"learning_rate": 3.054340317752706e-06,
"loss": 5.0821,
"step": 8500
},
{
"epoch": 2.0,
"eval_accuracy": 0.2534175761056864,
"eval_loss": 5.101416110992432,
"eval_runtime": 379.9834,
"eval_samples_per_second": 91.636,
"eval_steps_per_second": 11.456,
"step": 8706
},
{
"epoch": 2.07,
"grad_norm": 2.2780089378356934,
"learning_rate": 2.9392125259037534e-06,
"loss": 5.0448,
"step": 9000
},
{
"epoch": 2.18,
"grad_norm": 2.555509090423584,
"learning_rate": 2.824084734054801e-06,
"loss": 5.005,
"step": 9500
},
{
"epoch": 2.3,
"grad_norm": 2.188175916671753,
"learning_rate": 2.7089569422058486e-06,
"loss": 4.9819,
"step": 10000
},
{
"epoch": 2.41,
"grad_norm": 2.2957634925842285,
"learning_rate": 2.593829150356896e-06,
"loss": 4.9499,
"step": 10500
},
{
"epoch": 2.53,
"grad_norm": 2.6697535514831543,
"learning_rate": 2.4787013585079438e-06,
"loss": 4.9236,
"step": 11000
},
{
"epoch": 2.64,
"grad_norm": 2.6424810886383057,
"learning_rate": 2.363573566658992e-06,
"loss": 4.8961,
"step": 11500
},
{
"epoch": 2.76,
"grad_norm": 2.5476348400115967,
"learning_rate": 2.2484457748100394e-06,
"loss": 4.8669,
"step": 12000
},
{
"epoch": 2.87,
"grad_norm": 2.3084092140197754,
"learning_rate": 2.133317982961087e-06,
"loss": 4.8464,
"step": 12500
},
{
"epoch": 2.99,
"grad_norm": 2.4545116424560547,
"learning_rate": 2.0181901911121346e-06,
"loss": 4.825,
"step": 13000
},
{
"epoch": 3.0,
"eval_accuracy": 0.3051120045950603,
"eval_loss": 4.873249530792236,
"eval_runtime": 383.4043,
"eval_samples_per_second": 90.818,
"eval_steps_per_second": 11.354,
"step": 13059
},
{
"epoch": 3.1,
"grad_norm": 2.7930803298950195,
"learning_rate": 1.9030623992631822e-06,
"loss": 4.7866,
"step": 13500
},
{
"epoch": 3.22,
"grad_norm": 3.1879501342773438,
"learning_rate": 1.7879346074142297e-06,
"loss": 4.7743,
"step": 14000
},
{
"epoch": 3.33,
"grad_norm": 2.784158945083618,
"learning_rate": 1.6728068155652778e-06,
"loss": 4.7578,
"step": 14500
},
{
"epoch": 3.45,
"grad_norm": 2.792235851287842,
"learning_rate": 1.5576790237163253e-06,
"loss": 4.7372,
"step": 15000
},
{
"epoch": 3.56,
"grad_norm": 2.644819974899292,
"learning_rate": 1.442551231867373e-06,
"loss": 4.7102,
"step": 15500
},
{
"epoch": 3.68,
"grad_norm": 2.5567691326141357,
"learning_rate": 1.3274234400184205e-06,
"loss": 4.705,
"step": 16000
},
{
"epoch": 3.79,
"grad_norm": 2.979523181915283,
"learning_rate": 1.2122956481694683e-06,
"loss": 4.6817,
"step": 16500
},
{
"epoch": 3.91,
"grad_norm": 2.797534227371216,
"learning_rate": 1.097167856320516e-06,
"loss": 4.6712,
"step": 17000
},
{
"epoch": 4.0,
"eval_accuracy": 0.3327110855829983,
"eval_loss": 4.730710029602051,
"eval_runtime": 371.2204,
"eval_samples_per_second": 93.799,
"eval_steps_per_second": 11.726,
"step": 17412
},
{
"epoch": 4.02,
"grad_norm": 2.7830564975738525,
"learning_rate": 9.820400644715635e-07,
"loss": 4.6613,
"step": 17500
},
{
"epoch": 4.14,
"grad_norm": 2.9348843097686768,
"learning_rate": 8.669122726226112e-07,
"loss": 4.6467,
"step": 18000
},
{
"epoch": 4.25,
"grad_norm": 2.592219829559326,
"learning_rate": 7.517844807736588e-07,
"loss": 4.6333,
"step": 18500
},
{
"epoch": 4.36,
"grad_norm": 4.5050129890441895,
"learning_rate": 6.366566889247065e-07,
"loss": 4.6238,
"step": 19000
},
{
"epoch": 4.48,
"grad_norm": 3.7189841270446777,
"learning_rate": 5.215288970757541e-07,
"loss": 4.6177,
"step": 19500
},
{
"epoch": 4.59,
"grad_norm": 2.521855115890503,
"learning_rate": 4.064011052268018e-07,
"loss": 4.6131,
"step": 20000
},
{
"epoch": 4.71,
"grad_norm": 2.892822027206421,
"learning_rate": 2.912733133778494e-07,
"loss": 4.6138,
"step": 20500
},
{
"epoch": 4.82,
"grad_norm": 3.660858631134033,
"learning_rate": 1.761455215288971e-07,
"loss": 4.5955,
"step": 21000
},
{
"epoch": 4.94,
"grad_norm": 2.4211411476135254,
"learning_rate": 6.101772967994475e-08,
"loss": 4.592,
"step": 21500
},
{
"epoch": 5.0,
"eval_accuracy": 0.3435956346927053,
"eval_loss": 4.681754112243652,
"eval_runtime": 375.04,
"eval_samples_per_second": 92.843,
"eval_steps_per_second": 11.607,
"step": 21765
}
],
"logging_steps": 500,
"max_steps": 21765,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"total_flos": 5.411951551988482e+19,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}