dinov2-base-1k_1L-boulderspot / trainer_state.json
pszemraj's picture
End of training
8c2135b verified
{
"best_metric": 0.9808994233422476,
"best_model_checkpoint": "./outputs/dinov2-base-imagenet1k-1-layer-boulderspot-vN/checkpoint-1015",
"epoch": 4.993849938499385,
"eval_steps": 500,
"global_step": 1015,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05,
"grad_norm": 21.444149017333984,
"learning_rate": 3.92156862745098e-06,
"loss": 0.3077,
"step": 10
},
{
"epoch": 0.1,
"grad_norm": 20.83109474182129,
"learning_rate": 7.84313725490196e-06,
"loss": 0.0953,
"step": 20
},
{
"epoch": 0.15,
"grad_norm": 8.993614196777344,
"learning_rate": 1.1764705882352942e-05,
"loss": 0.1968,
"step": 30
},
{
"epoch": 0.2,
"grad_norm": 32.97685241699219,
"learning_rate": 1.568627450980392e-05,
"loss": 0.1265,
"step": 40
},
{
"epoch": 0.25,
"grad_norm": 10.921149253845215,
"learning_rate": 1.9607843137254903e-05,
"loss": 0.166,
"step": 50
},
{
"epoch": 0.3,
"grad_norm": 30.291000366210938,
"learning_rate": 1.9995698998770955e-05,
"loss": 0.1484,
"step": 60
},
{
"epoch": 0.34,
"grad_norm": 7.144667625427246,
"learning_rate": 1.998083609002402e-05,
"loss": 0.1529,
"step": 70
},
{
"epoch": 0.39,
"grad_norm": 72.38031005859375,
"learning_rate": 1.995537395500004e-05,
"loss": 0.1588,
"step": 80
},
{
"epoch": 0.44,
"grad_norm": 9.280795097351074,
"learning_rate": 1.9919339633410737e-05,
"loss": 0.1575,
"step": 90
},
{
"epoch": 0.49,
"grad_norm": 8.867383003234863,
"learning_rate": 1.9872771392183334e-05,
"loss": 0.1345,
"step": 100
},
{
"epoch": 0.54,
"grad_norm": 11.048301696777344,
"learning_rate": 1.981571868482269e-05,
"loss": 0.1832,
"step": 110
},
{
"epoch": 0.59,
"grad_norm": 18.908288955688477,
"learning_rate": 1.974824209889377e-05,
"loss": 0.1392,
"step": 120
},
{
"epoch": 0.64,
"grad_norm": 6.0579047203063965,
"learning_rate": 1.9670413291680223e-05,
"loss": 0.1179,
"step": 130
},
{
"epoch": 0.69,
"grad_norm": 6.6037774085998535,
"learning_rate": 1.9582314914087344e-05,
"loss": 0.1258,
"step": 140
},
{
"epoch": 0.74,
"grad_norm": 17.16813087463379,
"learning_rate": 1.9484040522870333e-05,
"loss": 0.1914,
"step": 150
},
{
"epoch": 0.79,
"grad_norm": 15.085983276367188,
"learning_rate": 1.9375694481280965e-05,
"loss": 0.1747,
"step": 160
},
{
"epoch": 0.84,
"grad_norm": 6.54490852355957,
"learning_rate": 1.9257391848238212e-05,
"loss": 0.0749,
"step": 170
},
{
"epoch": 0.89,
"grad_norm": 0.9332025051116943,
"learning_rate": 1.9129258256140556e-05,
"loss": 0.0706,
"step": 180
},
{
"epoch": 0.93,
"grad_norm": 26.89777946472168,
"learning_rate": 1.8991429777449674e-05,
"loss": 0.0852,
"step": 190
},
{
"epoch": 0.98,
"grad_norm": 4.798658847808838,
"learning_rate": 1.884405278018722e-05,
"loss": 0.1596,
"step": 200
},
{
"epoch": 1.0,
"eval_accuracy": 0.9766081871345029,
"eval_f1": 0.9758655635300373,
"eval_loss": 0.0732586681842804,
"eval_matthews_correlation": 0.8078903073020254,
"eval_precision": 0.9756885037132272,
"eval_recall": 0.9766081871345029,
"eval_runtime": 3.9698,
"eval_samples_per_second": 172.303,
"eval_steps_per_second": 10.832,
"step": 203
},
{
"epoch": 1.03,
"grad_norm": 13.787192344665527,
"learning_rate": 1.8687283772498205e-05,
"loss": 0.1367,
"step": 210
},
{
"epoch": 1.08,
"grad_norm": 5.066114902496338,
"learning_rate": 1.852128923644593e-05,
"loss": 0.1308,
"step": 220
},
{
"epoch": 1.13,
"grad_norm": 2.300729990005493,
"learning_rate": 1.8346245451215068e-05,
"loss": 0.1011,
"step": 230
},
{
"epoch": 1.18,
"grad_norm": 6.617341995239258,
"learning_rate": 1.8162338305910636e-05,
"loss": 0.0879,
"step": 240
},
{
"epoch": 1.23,
"grad_norm": 5.020946025848389,
"learning_rate": 1.79697631021516e-05,
"loss": 0.0969,
"step": 250
},
{
"epoch": 1.28,
"grad_norm": 7.580730438232422,
"learning_rate": 1.776872434666882e-05,
"loss": 0.0942,
"step": 260
},
{
"epoch": 1.33,
"grad_norm": 1.9427905082702637,
"learning_rate": 1.7559435534127534e-05,
"loss": 0.0745,
"step": 270
},
{
"epoch": 1.38,
"grad_norm": 10.397661209106445,
"learning_rate": 1.7342118920405035e-05,
"loss": 0.1028,
"step": 280
},
{
"epoch": 1.43,
"grad_norm": 8.107540130615234,
"learning_rate": 1.7117005286564344e-05,
"loss": 0.0941,
"step": 290
},
{
"epoch": 1.48,
"grad_norm": 13.503528594970703,
"learning_rate": 1.688433369377444e-05,
"loss": 0.1162,
"step": 300
},
{
"epoch": 1.53,
"grad_norm": 15.07189655303955,
"learning_rate": 1.6644351229437416e-05,
"loss": 0.1301,
"step": 310
},
{
"epoch": 1.57,
"grad_norm": 3.9105443954467773,
"learning_rate": 1.63973127447921e-05,
"loss": 0.1441,
"step": 320
},
{
"epoch": 1.62,
"grad_norm": 7.494752883911133,
"learning_rate": 1.6143480584272794e-05,
"loss": 0.1002,
"step": 330
},
{
"epoch": 1.67,
"grad_norm": 2.2176589965820312,
"learning_rate": 1.5883124306910563e-05,
"loss": 0.0731,
"step": 340
},
{
"epoch": 1.72,
"grad_norm": 5.566643714904785,
"learning_rate": 1.5616520400072963e-05,
"loss": 0.093,
"step": 350
},
{
"epoch": 1.77,
"grad_norm": 4.072403907775879,
"learning_rate": 1.5343951985846096e-05,
"loss": 0.0899,
"step": 360
},
{
"epoch": 1.82,
"grad_norm": 4.870898246765137,
"learning_rate": 1.5065708520370943e-05,
"loss": 0.0781,
"step": 370
},
{
"epoch": 1.87,
"grad_norm": 21.179044723510742,
"learning_rate": 1.4782085486453155e-05,
"loss": 0.0807,
"step": 380
},
{
"epoch": 1.92,
"grad_norm": 4.826471328735352,
"learning_rate": 1.4493384079772815e-05,
"loss": 0.0852,
"step": 390
},
{
"epoch": 1.97,
"grad_norm": 3.290867328643799,
"learning_rate": 1.4199910889027335e-05,
"loss": 0.0635,
"step": 400
},
{
"epoch": 2.0,
"eval_accuracy": 0.9473684210526315,
"eval_f1": 0.9522155218554862,
"eval_loss": 0.12761278450489044,
"eval_matthews_correlation": 0.6844503635019795,
"eval_precision": 0.9618507818612543,
"eval_recall": 0.9473684210526315,
"eval_runtime": 4.0112,
"eval_samples_per_second": 170.521,
"eval_steps_per_second": 10.72,
"step": 406
},
{
"epoch": 2.02,
"grad_norm": 16.522815704345703,
"learning_rate": 1.390197757034721e-05,
"loss": 0.0853,
"step": 410
},
{
"epoch": 2.07,
"grad_norm": 5.749844074249268,
"learning_rate": 1.3599900516330382e-05,
"loss": 0.0685,
"step": 420
},
{
"epoch": 2.12,
"grad_norm": 4.761476993560791,
"learning_rate": 1.3294000520046666e-05,
"loss": 0.086,
"step": 430
},
{
"epoch": 2.16,
"grad_norm": 5.712699890136719,
"learning_rate": 1.2984602434369058e-05,
"loss": 0.0927,
"step": 440
},
{
"epoch": 2.21,
"grad_norm": 7.019559860229492,
"learning_rate": 1.2672034826993716e-05,
"loss": 0.0771,
"step": 450
},
{
"epoch": 2.26,
"grad_norm": 5.033693313598633,
"learning_rate": 1.235662963151493e-05,
"loss": 0.065,
"step": 460
},
{
"epoch": 2.31,
"grad_norm": 7.356439590454102,
"learning_rate": 1.2038721794925689e-05,
"loss": 0.0782,
"step": 470
},
{
"epoch": 2.36,
"grad_norm": 13.046978950500488,
"learning_rate": 1.1718648921918112e-05,
"loss": 0.1074,
"step": 480
},
{
"epoch": 2.41,
"grad_norm": 2.489901542663574,
"learning_rate": 1.1396750916361526e-05,
"loss": 0.0891,
"step": 490
},
{
"epoch": 2.46,
"grad_norm": 8.636452674865723,
"learning_rate": 1.1073369620338928e-05,
"loss": 0.0922,
"step": 500
},
{
"epoch": 2.51,
"grad_norm": 3.626593589782715,
"learning_rate": 1.074884845112512e-05,
"loss": 0.0832,
"step": 510
},
{
"epoch": 2.56,
"grad_norm": 2.680762529373169,
"learning_rate": 1.0423532036492077e-05,
"loss": 0.0659,
"step": 520
},
{
"epoch": 2.61,
"grad_norm": 9.335197448730469,
"learning_rate": 1.0097765848728825e-05,
"loss": 0.0718,
"step": 530
},
{
"epoch": 2.66,
"grad_norm": 2.929506301879883,
"learning_rate": 9.771895837764438e-06,
"loss": 0.0975,
"step": 540
},
{
"epoch": 2.71,
"grad_norm": 1.251734733581543,
"learning_rate": 9.446268063783853e-06,
"loss": 0.0343,
"step": 550
},
{
"epoch": 2.76,
"grad_norm": 11.404159545898438,
"learning_rate": 9.121228329726563e-06,
"loss": 0.0488,
"step": 560
},
{
"epoch": 2.8,
"grad_norm": 3.3042876720428467,
"learning_rate": 8.797121814058502e-06,
"loss": 0.0641,
"step": 570
},
{
"epoch": 2.85,
"grad_norm": 10.385183334350586,
"learning_rate": 8.474292704207095e-06,
"loss": 0.0951,
"step": 580
},
{
"epoch": 2.9,
"grad_norm": 7.184572219848633,
"learning_rate": 8.153083831048772e-06,
"loss": 0.0591,
"step": 590
},
{
"epoch": 2.95,
"grad_norm": 4.226497650146484,
"learning_rate": 7.833836304837022e-06,
"loss": 0.1031,
"step": 600
},
{
"epoch": 3.0,
"eval_accuracy": 0.9751461988304093,
"eval_f1": 0.9755012041745224,
"eval_loss": 0.06017656996846199,
"eval_matthews_correlation": 0.8118305972172924,
"eval_precision": 0.9759749663327615,
"eval_recall": 0.9751461988304093,
"eval_runtime": 4.1933,
"eval_samples_per_second": 163.119,
"eval_steps_per_second": 10.255,
"step": 609
},
{
"epoch": 3.0,
"grad_norm": 4.557692527770996,
"learning_rate": 7.516889152957744e-06,
"loss": 0.0533,
"step": 610
},
{
"epoch": 3.05,
"grad_norm": 3.751300096511841,
"learning_rate": 7.202578959896491e-06,
"loss": 0.0782,
"step": 620
},
{
"epoch": 3.1,
"grad_norm": 4.593023777008057,
"learning_rate": 6.891239509799932e-06,
"loss": 0.0627,
"step": 630
},
{
"epoch": 3.15,
"grad_norm": 4.492953777313232,
"learning_rate": 6.583201432011217e-06,
"loss": 0.0564,
"step": 640
},
{
"epoch": 3.2,
"grad_norm": 3.022425889968872,
"learning_rate": 6.278791849955583e-06,
"loss": 0.0719,
"step": 650
},
{
"epoch": 3.25,
"grad_norm": 4.909082889556885,
"learning_rate": 5.978334033749076e-06,
"loss": 0.0531,
"step": 660
},
{
"epoch": 3.3,
"grad_norm": 2.2064766883850098,
"learning_rate": 5.682147056899361e-06,
"loss": 0.0628,
"step": 670
},
{
"epoch": 3.35,
"grad_norm": 5.478167533874512,
"learning_rate": 5.390545457463134e-06,
"loss": 0.0705,
"step": 680
},
{
"epoch": 3.39,
"grad_norm": 5.376189708709717,
"learning_rate": 5.103838904019993e-06,
"loss": 0.0888,
"step": 690
},
{
"epoch": 3.44,
"grad_norm": 3.5109596252441406,
"learning_rate": 4.822331866817478e-06,
"loss": 0.0577,
"step": 700
},
{
"epoch": 3.49,
"grad_norm": 0.5529822707176208,
"learning_rate": 4.546323294436556e-06,
"loss": 0.0421,
"step": 710
},
{
"epoch": 3.54,
"grad_norm": 10.782859802246094,
"learning_rate": 4.276106296320828e-06,
"loss": 0.0579,
"step": 720
},
{
"epoch": 3.59,
"grad_norm": 0.36797773838043213,
"learning_rate": 4.0119678315067025e-06,
"loss": 0.0381,
"step": 730
},
{
"epoch": 3.64,
"grad_norm": 3.724026679992676,
"learning_rate": 3.754188403885013e-06,
"loss": 0.057,
"step": 740
},
{
"epoch": 3.69,
"grad_norm": 6.234977722167969,
"learning_rate": 3.5030417643177416e-06,
"loss": 0.0556,
"step": 750
},
{
"epoch": 3.74,
"grad_norm": 6.538670063018799,
"learning_rate": 3.258794619926159e-06,
"loss": 0.0624,
"step": 760
},
{
"epoch": 3.79,
"grad_norm": 1.8592917919158936,
"learning_rate": 3.021706350859147e-06,
"loss": 0.0544,
"step": 770
},
{
"epoch": 3.84,
"grad_norm": 1.5543180704116821,
"learning_rate": 2.792028734842418e-06,
"loss": 0.0321,
"step": 780
},
{
"epoch": 3.89,
"grad_norm": 6.113970756530762,
"learning_rate": 2.5700056798012164e-06,
"loss": 0.052,
"step": 790
},
{
"epoch": 3.94,
"grad_norm": 6.493689060211182,
"learning_rate": 2.3558729648404065e-06,
"loss": 0.046,
"step": 800
},
{
"epoch": 3.99,
"grad_norm": 3.7167036533355713,
"learning_rate": 2.1498579898570228e-06,
"loss": 0.0587,
"step": 810
},
{
"epoch": 4.0,
"eval_accuracy": 0.9736842105263158,
"eval_f1": 0.9734187929958467,
"eval_loss": 0.05119941756129265,
"eval_matthews_correlation": 0.7904630921052955,
"eval_precision": 0.9732101510950039,
"eval_recall": 0.9736842105263158,
"eval_runtime": 4.2261,
"eval_samples_per_second": 161.853,
"eval_steps_per_second": 10.175,
"step": 813
},
{
"epoch": 4.03,
"grad_norm": 6.378363132476807,
"learning_rate": 1.952179534051183e-06,
"loss": 0.0389,
"step": 820
},
{
"epoch": 4.08,
"grad_norm": 5.403367042541504,
"learning_rate": 1.763047523591831e-06,
"loss": 0.0727,
"step": 830
},
{
"epoch": 4.13,
"grad_norm": 5.556785583496094,
"learning_rate": 1.5826628086839968e-06,
"loss": 0.0365,
"step": 840
},
{
"epoch": 4.18,
"grad_norm": 10.821802139282227,
"learning_rate": 1.41121695027438e-06,
"loss": 0.0419,
"step": 850
},
{
"epoch": 4.23,
"grad_norm": 2.722320795059204,
"learning_rate": 1.2488920166217034e-06,
"loss": 0.0404,
"step": 860
},
{
"epoch": 4.28,
"grad_norm": 11.169462203979492,
"learning_rate": 1.095860389947928e-06,
"loss": 0.0522,
"step": 870
},
{
"epoch": 4.33,
"grad_norm": 4.065892696380615,
"learning_rate": 9.522845833756001e-07,
"loss": 0.0497,
"step": 880
},
{
"epoch": 4.38,
"grad_norm": 11.717745780944824,
"learning_rate": 8.183170683457986e-07,
"loss": 0.0543,
"step": 890
},
{
"epoch": 4.43,
"grad_norm": 3.715181589126587,
"learning_rate": 6.941001126998892e-07,
"loss": 0.065,
"step": 900
},
{
"epoch": 4.48,
"grad_norm": 8.879691123962402,
"learning_rate": 5.797656295970955e-07,
"loss": 0.0546,
"step": 910
},
{
"epoch": 4.53,
"grad_norm": 6.542267799377441,
"learning_rate": 4.754350374283001e-07,
"loss": 0.0491,
"step": 920
},
{
"epoch": 4.58,
"grad_norm": 5.4677557945251465,
"learning_rate": 3.8121913087483033e-07,
"loss": 0.0434,
"step": 930
},
{
"epoch": 4.62,
"grad_norm": 5.965972900390625,
"learning_rate": 2.972179632491989e-07,
"loss": 0.0594,
"step": 940
},
{
"epoch": 4.67,
"grad_norm": 5.097690105438232,
"learning_rate": 2.23520740242712e-07,
"loss": 0.0632,
"step": 950
},
{
"epoch": 4.72,
"grad_norm": 5.078763961791992,
"learning_rate": 1.602057251927891e-07,
"loss": 0.0448,
"step": 960
},
{
"epoch": 4.77,
"grad_norm": 4.476650714874268,
"learning_rate": 1.0734015597060222e-07,
"loss": 0.0481,
"step": 970
},
{
"epoch": 4.82,
"grad_norm": 3.210946798324585,
"learning_rate": 6.498017357731035e-08,
"loss": 0.0604,
"step": 980
},
{
"epoch": 4.87,
"grad_norm": 6.374156951904297,
"learning_rate": 3.317076252467133e-08,
"loss": 0.0406,
"step": 990
},
{
"epoch": 4.92,
"grad_norm": 2.580352306365967,
"learning_rate": 1.1945703063402925e-08,
"loss": 0.0343,
"step": 1000
},
{
"epoch": 4.97,
"grad_norm": 1.094759464263916,
"learning_rate": 1.327535309979533e-09,
"loss": 0.038,
"step": 1010
},
{
"epoch": 4.99,
"eval_accuracy": 0.9809941520467836,
"eval_f1": 0.9808994233422476,
"eval_loss": 0.051854074001312256,
"eval_matthews_correlation": 0.8500768147494288,
"eval_precision": 0.9808194985441887,
"eval_recall": 0.9809941520467836,
"eval_runtime": 4.1997,
"eval_samples_per_second": 162.87,
"eval_steps_per_second": 10.239,
"step": 1015
},
{
"epoch": 4.99,
"step": 1015,
"total_flos": 6.629580853384053e+18,
"train_loss": 0.08605182834446724,
"train_runtime": 583.274,
"train_samples_per_second": 111.397,
"train_steps_per_second": 1.74
}
],
"logging_steps": 10,
"max_steps": 1015,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"total_flos": 6.629580853384053e+18,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}