dima806's picture
Upload folder using huggingface_hub
18f7ea8 verified
raw
history blame
No virus
8.65 kB
{
"best_metric": 3.9286768436431885,
"best_model_checkpoint": "car_brands_image_detection/checkpoint-21765",
"epoch": 5.0,
"eval_steps": 500,
"global_step": 21765,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.11,
"grad_norm": 3.6166505813598633,
"learning_rate": 2.937830992401566e-06,
"loss": 4.5799,
"step": 500
},
{
"epoch": 0.23,
"grad_norm": 2.645850419998169,
"learning_rate": 2.8687543172921943e-06,
"loss": 4.5716,
"step": 1000
},
{
"epoch": 0.34,
"grad_norm": 2.978064775466919,
"learning_rate": 2.799677642182823e-06,
"loss": 4.5286,
"step": 1500
},
{
"epoch": 0.46,
"grad_norm": 2.759573459625244,
"learning_rate": 2.7306009670734517e-06,
"loss": 4.4846,
"step": 2000
},
{
"epoch": 0.57,
"grad_norm": 2.4581897258758545,
"learning_rate": 2.66152429196408e-06,
"loss": 4.4705,
"step": 2500
},
{
"epoch": 0.69,
"grad_norm": 2.911447286605835,
"learning_rate": 2.592447616854709e-06,
"loss": 4.4394,
"step": 3000
},
{
"epoch": 0.8,
"grad_norm": 3.8367297649383545,
"learning_rate": 2.5233709417453375e-06,
"loss": 4.41,
"step": 3500
},
{
"epoch": 0.92,
"grad_norm": 2.930182456970215,
"learning_rate": 2.454294266635966e-06,
"loss": 4.385,
"step": 4000
},
{
"epoch": 1.0,
"eval_accuracy": 0.40094773118897187,
"eval_loss": 4.410472869873047,
"eval_runtime": 357.321,
"eval_samples_per_second": 97.447,
"eval_steps_per_second": 12.182,
"step": 4353
},
{
"epoch": 1.03,
"grad_norm": 3.3039791584014893,
"learning_rate": 2.3852175915265945e-06,
"loss": 4.3545,
"step": 4500
},
{
"epoch": 1.15,
"grad_norm": 3.3890039920806885,
"learning_rate": 2.316140916417223e-06,
"loss": 4.3126,
"step": 5000
},
{
"epoch": 1.26,
"grad_norm": 4.098603248596191,
"learning_rate": 2.247064241307852e-06,
"loss": 4.2967,
"step": 5500
},
{
"epoch": 1.38,
"grad_norm": 3.3784146308898926,
"learning_rate": 2.1779875661984803e-06,
"loss": 4.2693,
"step": 6000
},
{
"epoch": 1.49,
"grad_norm": 3.1738123893737793,
"learning_rate": 2.1089108910891088e-06,
"loss": 4.2466,
"step": 6500
},
{
"epoch": 1.61,
"grad_norm": 3.7148706912994385,
"learning_rate": 2.0398342159797372e-06,
"loss": 4.2021,
"step": 7000
},
{
"epoch": 1.72,
"grad_norm": 3.290153741836548,
"learning_rate": 1.970757540870366e-06,
"loss": 4.1814,
"step": 7500
},
{
"epoch": 1.84,
"grad_norm": 3.142911672592163,
"learning_rate": 1.9016808657609948e-06,
"loss": 4.1626,
"step": 8000
},
{
"epoch": 1.95,
"grad_norm": 4.143848896026611,
"learning_rate": 1.8326041906516235e-06,
"loss": 4.1376,
"step": 8500
},
{
"epoch": 2.0,
"eval_accuracy": 0.43512349224583574,
"eval_loss": 4.2049126625061035,
"eval_runtime": 371.7808,
"eval_samples_per_second": 93.657,
"eval_steps_per_second": 11.709,
"step": 8706
},
{
"epoch": 2.07,
"grad_norm": 3.016541004180908,
"learning_rate": 1.763527515542252e-06,
"loss": 4.1126,
"step": 9000
},
{
"epoch": 2.18,
"grad_norm": 3.141395092010498,
"learning_rate": 1.6944508404328806e-06,
"loss": 4.0881,
"step": 9500
},
{
"epoch": 2.3,
"grad_norm": 4.218649864196777,
"learning_rate": 1.6253741653235091e-06,
"loss": 4.0854,
"step": 10000
},
{
"epoch": 2.41,
"grad_norm": 3.368730306625366,
"learning_rate": 1.5562974902141378e-06,
"loss": 4.0432,
"step": 10500
},
{
"epoch": 2.53,
"grad_norm": 3.461085557937622,
"learning_rate": 1.4872208151047663e-06,
"loss": 4.0324,
"step": 11000
},
{
"epoch": 2.64,
"grad_norm": 3.5780344009399414,
"learning_rate": 1.418144139995395e-06,
"loss": 4.0083,
"step": 11500
},
{
"epoch": 2.76,
"grad_norm": 3.727916955947876,
"learning_rate": 1.3490674648860236e-06,
"loss": 4.0215,
"step": 12000
},
{
"epoch": 2.87,
"grad_norm": 3.84293532371521,
"learning_rate": 1.279990789776652e-06,
"loss": 3.9905,
"step": 12500
},
{
"epoch": 2.99,
"grad_norm": 4.721088409423828,
"learning_rate": 1.2109141146672808e-06,
"loss": 3.9723,
"step": 13000
},
{
"epoch": 3.0,
"eval_accuracy": 0.4607696726019529,
"eval_loss": 4.052371025085449,
"eval_runtime": 385.9466,
"eval_samples_per_second": 90.22,
"eval_steps_per_second": 11.279,
"step": 13059
},
{
"epoch": 3.1,
"grad_norm": 4.341360092163086,
"learning_rate": 1.1418374395579092e-06,
"loss": 3.9604,
"step": 13500
},
{
"epoch": 3.22,
"grad_norm": 4.207143306732178,
"learning_rate": 1.0727607644485377e-06,
"loss": 3.9293,
"step": 14000
},
{
"epoch": 3.33,
"grad_norm": 4.3804850578308105,
"learning_rate": 1.0036840893391666e-06,
"loss": 3.9193,
"step": 14500
},
{
"epoch": 3.45,
"grad_norm": 3.7782533168792725,
"learning_rate": 9.346074142297951e-07,
"loss": 3.9125,
"step": 15000
},
{
"epoch": 3.56,
"grad_norm": 3.5943124294281006,
"learning_rate": 8.655307391204237e-07,
"loss": 3.8896,
"step": 15500
},
{
"epoch": 3.68,
"grad_norm": 3.702357053756714,
"learning_rate": 7.964540640110522e-07,
"loss": 3.9014,
"step": 16000
},
{
"epoch": 3.79,
"grad_norm": 3.796862840652466,
"learning_rate": 7.273773889016809e-07,
"loss": 3.891,
"step": 16500
},
{
"epoch": 3.91,
"grad_norm": 4.275099277496338,
"learning_rate": 6.583007137923095e-07,
"loss": 3.8704,
"step": 17000
},
{
"epoch": 4.0,
"eval_accuracy": 0.4705054566341183,
"eval_loss": 3.9602584838867188,
"eval_runtime": 391.9958,
"eval_samples_per_second": 88.827,
"eval_steps_per_second": 11.105,
"step": 17412
},
{
"epoch": 4.02,
"grad_norm": 3.892676591873169,
"learning_rate": 5.892240386829381e-07,
"loss": 3.8528,
"step": 17500
},
{
"epoch": 4.14,
"grad_norm": 3.8556807041168213,
"learning_rate": 5.201473635735666e-07,
"loss": 3.8603,
"step": 18000
},
{
"epoch": 4.25,
"grad_norm": 4.647615909576416,
"learning_rate": 4.5107068846419527e-07,
"loss": 3.8373,
"step": 18500
},
{
"epoch": 4.36,
"grad_norm": 5.085917949676514,
"learning_rate": 3.819940133548239e-07,
"loss": 3.8558,
"step": 19000
},
{
"epoch": 4.48,
"grad_norm": 3.4918742179870605,
"learning_rate": 3.1291733824545247e-07,
"loss": 3.8305,
"step": 19500
},
{
"epoch": 4.59,
"grad_norm": 5.628491401672363,
"learning_rate": 2.4384066313608105e-07,
"loss": 3.8442,
"step": 20000
},
{
"epoch": 4.71,
"grad_norm": 6.177685737609863,
"learning_rate": 1.7476398802670965e-07,
"loss": 3.821,
"step": 20500
},
{
"epoch": 4.82,
"grad_norm": 3.602433681488037,
"learning_rate": 1.0568731291733824e-07,
"loss": 3.8144,
"step": 21000
},
{
"epoch": 4.94,
"grad_norm": 3.561356782913208,
"learning_rate": 3.6610637807966844e-08,
"loss": 3.8221,
"step": 21500
},
{
"epoch": 5.0,
"eval_accuracy": 0.4763928776565192,
"eval_loss": 3.9286768436431885,
"eval_runtime": 356.658,
"eval_samples_per_second": 97.629,
"eval_steps_per_second": 12.205,
"step": 21765
}
],
"logging_steps": 500,
"max_steps": 21765,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"total_flos": 5.411951551988482e+19,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}