|
{ |
|
"best_metric": 0.5627052187919617, |
|
"best_model_checkpoint": "/robodata/smodak/Projects/nspl/scripts/terrainseg/training/models/safety-utcustom-train-SF-RGB-b5/checkpoint-240", |
|
"epoch": 21.818181818181817, |
|
"global_step": 240, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 1.764705882352941e-07, |
|
"loss": 1.2293, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 3.529411764705882e-07, |
|
"loss": 1.2349, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.27, |
|
"learning_rate": 5.294117647058824e-07, |
|
"loss": 1.2357, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"learning_rate": 7.058823529411765e-07, |
|
"loss": 1.2242, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 8.823529411764706e-07, |
|
"loss": 1.2247, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 1.0588235294117648e-06, |
|
"loss": 1.227, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 1.2352941176470588e-06, |
|
"loss": 1.2292, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 1.411764705882353e-06, |
|
"loss": 1.2257, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 1.5882352941176472e-06, |
|
"loss": 1.2173, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"learning_rate": 1.7647058823529412e-06, |
|
"loss": 1.2239, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.91, |
|
"eval_accuracy_safe": 0.39920432072351303, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.29510386893492613, |
|
"eval_iou_safe": 0.031418797454432365, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.29392505413655395, |
|
"eval_loss": 1.1102662086486816, |
|
"eval_mean_accuracy": 0.34715409482921955, |
|
"eval_mean_iou": 0.10844795053032878, |
|
"eval_overall_accuracy": 0.2981788293639226, |
|
"eval_runtime": 9.3061, |
|
"eval_samples_per_second": 7.2, |
|
"eval_steps_per_second": 0.537, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 1.9411764705882353e-06, |
|
"loss": 1.221, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 1.09, |
|
"learning_rate": 2.1176470588235296e-06, |
|
"loss": 1.2149, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 2.2941176470588234e-06, |
|
"loss": 1.2197, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 1.27, |
|
"learning_rate": 2.4705882352941177e-06, |
|
"loss": 1.2064, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"learning_rate": 2.647058823529412e-06, |
|
"loss": 1.2042, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 1.45, |
|
"learning_rate": 2.823529411764706e-06, |
|
"loss": 1.2046, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 1.55, |
|
"learning_rate": 3e-06, |
|
"loss": 1.2054, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 1.64, |
|
"learning_rate": 2.990415335463259e-06, |
|
"loss": 1.2002, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 1.73, |
|
"learning_rate": 2.9808306709265177e-06, |
|
"loss": 1.1982, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"learning_rate": 2.9712460063897764e-06, |
|
"loss": 1.1948, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.82, |
|
"eval_accuracy_safe": 0.5218599774094934, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.3705393407485172, |
|
"eval_iou_safe": 0.0439841259320199, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.36893471456827454, |
|
"eval_loss": 1.0963102579116821, |
|
"eval_mean_accuracy": 0.4461996590790053, |
|
"eval_mean_iou": 0.13763961350009815, |
|
"eval_overall_accuracy": 0.37500910972481344, |
|
"eval_runtime": 8.3942, |
|
"eval_samples_per_second": 7.982, |
|
"eval_steps_per_second": 0.596, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 2.9616613418530352e-06, |
|
"loss": 1.1776, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 2.952076677316294e-06, |
|
"loss": 1.1936, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 2.942492012779553e-06, |
|
"loss": 1.1664, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 2.18, |
|
"learning_rate": 2.9329073482428116e-06, |
|
"loss": 1.1809, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 2.27, |
|
"learning_rate": 2.9233226837060704e-06, |
|
"loss": 1.1731, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 2.36, |
|
"learning_rate": 2.913738019169329e-06, |
|
"loss": 1.1794, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 2.45, |
|
"learning_rate": 2.904153354632588e-06, |
|
"loss": 1.1684, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"learning_rate": 2.8945686900958468e-06, |
|
"loss": 1.1594, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 2.884984025559105e-06, |
|
"loss": 1.146, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"learning_rate": 2.8753993610223644e-06, |
|
"loss": 1.1661, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 2.73, |
|
"eval_accuracy_safe": 0.5863200218966003, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.49882181393718666, |
|
"eval_iou_safe": 0.06472096392925002, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.49613792944888824, |
|
"eval_loss": 1.0516184568405151, |
|
"eval_mean_accuracy": 0.5425709179168935, |
|
"eval_mean_iou": 0.1869529644593794, |
|
"eval_overall_accuracy": 0.5014063707038537, |
|
"eval_runtime": 9.3615, |
|
"eval_samples_per_second": 7.157, |
|
"eval_steps_per_second": 0.534, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 2.82, |
|
"learning_rate": 2.865814696485623e-06, |
|
"loss": 1.153, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 2.91, |
|
"learning_rate": 2.856230031948882e-06, |
|
"loss": 1.149, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 2.8466453674121407e-06, |
|
"loss": 1.164, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 3.09, |
|
"learning_rate": 2.8370607028753995e-06, |
|
"loss": 1.1479, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 3.18, |
|
"learning_rate": 2.8274760383386583e-06, |
|
"loss": 1.1428, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 3.27, |
|
"learning_rate": 2.817891373801917e-06, |
|
"loss": 1.1398, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 3.36, |
|
"learning_rate": 2.8083067092651755e-06, |
|
"loss": 1.1415, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 3.45, |
|
"learning_rate": 2.7987220447284347e-06, |
|
"loss": 1.1146, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 3.55, |
|
"learning_rate": 2.7891373801916935e-06, |
|
"loss": 1.1234, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 3.64, |
|
"learning_rate": 2.7795527156549523e-06, |
|
"loss": 1.1112, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 3.64, |
|
"eval_accuracy_safe": 0.5458517893146133, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.5794390867479824, |
|
"eval_iou_safe": 0.0899865428032596, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.5753581085678546, |
|
"eval_loss": 1.0047743320465088, |
|
"eval_mean_accuracy": 0.5626454380312979, |
|
"eval_mean_iou": 0.22178155045703807, |
|
"eval_overall_accuracy": 0.5784469718363747, |
|
"eval_runtime": 9.38, |
|
"eval_samples_per_second": 7.143, |
|
"eval_steps_per_second": 0.533, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 3.73, |
|
"learning_rate": 2.769968051118211e-06, |
|
"loss": 1.1212, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 3.82, |
|
"learning_rate": 2.7603833865814694e-06, |
|
"loss": 1.1117, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 3.91, |
|
"learning_rate": 2.7507987220447286e-06, |
|
"loss": 1.1051, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 2.7412140575079874e-06, |
|
"loss": 1.1382, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 4.09, |
|
"learning_rate": 2.731629392971246e-06, |
|
"loss": 1.1075, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 4.18, |
|
"learning_rate": 2.722044728434505e-06, |
|
"loss": 1.1096, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 4.27, |
|
"learning_rate": 2.7124600638977634e-06, |
|
"loss": 1.0974, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 4.36, |
|
"learning_rate": 2.7028753993610226e-06, |
|
"loss": 1.0851, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 4.45, |
|
"learning_rate": 2.6932907348242814e-06, |
|
"loss": 1.0851, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 4.55, |
|
"learning_rate": 2.6837060702875398e-06, |
|
"loss": 1.0907, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 4.55, |
|
"eval_accuracy_safe": 0.5993018531154467, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.6366664738420048, |
|
"eval_iou_safe": 0.10939424133801141, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.6320960572783577, |
|
"eval_loss": 0.9689871668815613, |
|
"eval_mean_accuracy": 0.6179841634787258, |
|
"eval_mean_iou": 0.24716343287212306, |
|
"eval_overall_accuracy": 0.6355627828569554, |
|
"eval_runtime": 9.8119, |
|
"eval_samples_per_second": 6.828, |
|
"eval_steps_per_second": 0.51, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 4.64, |
|
"learning_rate": 2.674121405750799e-06, |
|
"loss": 1.0789, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 4.73, |
|
"learning_rate": 2.6645367412140573e-06, |
|
"loss": 1.0566, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 4.82, |
|
"learning_rate": 2.654952076677316e-06, |
|
"loss": 1.0718, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 4.91, |
|
"learning_rate": 2.6453674121405753e-06, |
|
"loss": 1.0808, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 2.6357827476038337e-06, |
|
"loss": 1.0808, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 5.09, |
|
"learning_rate": 2.626198083067093e-06, |
|
"loss": 1.0625, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 5.18, |
|
"learning_rate": 2.6166134185303517e-06, |
|
"loss": 1.0508, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 5.27, |
|
"learning_rate": 2.60702875399361e-06, |
|
"loss": 1.0521, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 5.36, |
|
"learning_rate": 2.5974440894568693e-06, |
|
"loss": 1.052, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 5.45, |
|
"learning_rate": 2.5878594249201277e-06, |
|
"loss": 1.047, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 5.45, |
|
"eval_accuracy_safe": 0.6691570194409427, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.669918812994849, |
|
"eval_iou_safe": 0.11592410370525706, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.6655783206367077, |
|
"eval_loss": 0.9437160491943359, |
|
"eval_mean_accuracy": 0.6695379162178958, |
|
"eval_mean_iou": 0.2605008081139883, |
|
"eval_overall_accuracy": 0.6698963108347423, |
|
"eval_runtime": 9.3506, |
|
"eval_samples_per_second": 7.165, |
|
"eval_steps_per_second": 0.535, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 5.55, |
|
"learning_rate": 2.5782747603833865e-06, |
|
"loss": 1.0456, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 5.64, |
|
"learning_rate": 2.5686900958466457e-06, |
|
"loss": 1.0296, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 5.73, |
|
"learning_rate": 2.559105431309904e-06, |
|
"loss": 1.0334, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 5.82, |
|
"learning_rate": 2.5495207667731633e-06, |
|
"loss": 1.0525, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 5.91, |
|
"learning_rate": 2.5399361022364216e-06, |
|
"loss": 1.0397, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 2.5303514376996804e-06, |
|
"loss": 1.0575, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 6.09, |
|
"learning_rate": 2.5207667731629396e-06, |
|
"loss": 1.0245, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 6.18, |
|
"learning_rate": 2.511182108626198e-06, |
|
"loss": 1.0284, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 6.27, |
|
"learning_rate": 2.501597444089457e-06, |
|
"loss": 1.0127, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 6.36, |
|
"learning_rate": 2.4920127795527156e-06, |
|
"loss": 1.0112, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 6.36, |
|
"eval_accuracy_safe": 0.6673046750012529, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.718865515124044, |
|
"eval_iou_safe": 0.13494764234639625, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.7136723036690169, |
|
"eval_loss": 0.9084014892578125, |
|
"eval_mean_accuracy": 0.6930850950626484, |
|
"eval_mean_iou": 0.28287331533847104, |
|
"eval_overall_accuracy": 0.7173424905805446, |
|
"eval_runtime": 9.0845, |
|
"eval_samples_per_second": 7.375, |
|
"eval_steps_per_second": 0.55, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 6.45, |
|
"learning_rate": 2.4824281150159744e-06, |
|
"loss": 1.0107, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 6.55, |
|
"learning_rate": 2.4728434504792336e-06, |
|
"loss": 1.0122, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 6.64, |
|
"learning_rate": 2.463258785942492e-06, |
|
"loss": 1.0087, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 6.73, |
|
"learning_rate": 2.4536741214057507e-06, |
|
"loss": 0.991, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 6.82, |
|
"learning_rate": 2.44408945686901e-06, |
|
"loss": 1.0042, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 6.91, |
|
"learning_rate": 2.4345047923322683e-06, |
|
"loss": 0.9788, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 2.424920127795527e-06, |
|
"loss": 1.0371, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 7.09, |
|
"learning_rate": 2.415335463258786e-06, |
|
"loss": 0.9884, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 7.18, |
|
"learning_rate": 2.4057507987220447e-06, |
|
"loss": 0.9759, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 7.27, |
|
"learning_rate": 2.396166134185304e-06, |
|
"loss": 0.9925, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 7.27, |
|
"eval_accuracy_safe": 0.6842282797676185, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.7664939888573942, |
|
"eval_iou_safe": 0.14521835177647324, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.7605469001045981, |
|
"eval_loss": 0.8647122979164124, |
|
"eval_mean_accuracy": 0.7253611343125064, |
|
"eval_mean_iou": 0.3019217506270238, |
|
"eval_overall_accuracy": 0.7640639917174382, |
|
"eval_runtime": 9.9787, |
|
"eval_samples_per_second": 6.714, |
|
"eval_steps_per_second": 0.501, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 7.36, |
|
"learning_rate": 2.3865814696485623e-06, |
|
"loss": 0.9784, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 7.45, |
|
"learning_rate": 2.376996805111821e-06, |
|
"loss": 0.9806, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 7.55, |
|
"learning_rate": 2.36741214057508e-06, |
|
"loss": 0.973, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 7.64, |
|
"learning_rate": 2.3578274760383387e-06, |
|
"loss": 0.9551, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 7.73, |
|
"learning_rate": 2.3482428115015974e-06, |
|
"loss": 0.9484, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 7.82, |
|
"learning_rate": 2.3386581469648562e-06, |
|
"loss": 0.939, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 7.91, |
|
"learning_rate": 2.329073482428115e-06, |
|
"loss": 0.9533, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 2.319488817891374e-06, |
|
"loss": 0.9533, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 8.09, |
|
"learning_rate": 2.3099041533546326e-06, |
|
"loss": 0.9469, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 8.18, |
|
"learning_rate": 2.3003194888178914e-06, |
|
"loss": 0.9395, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 8.18, |
|
"eval_accuracy_safe": 0.6817822598987667, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.7920860651953089, |
|
"eval_iou_safe": 0.16203325137990066, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.785611270080723, |
|
"eval_loss": 0.8319001197814941, |
|
"eval_mean_accuracy": 0.7369341625470378, |
|
"eval_mean_iou": 0.3158815071535412, |
|
"eval_overall_accuracy": 0.788827867650274, |
|
"eval_runtime": 9.682, |
|
"eval_samples_per_second": 6.92, |
|
"eval_steps_per_second": 0.516, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 8.27, |
|
"learning_rate": 2.29073482428115e-06, |
|
"loss": 0.9422, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 8.36, |
|
"learning_rate": 2.281150159744409e-06, |
|
"loss": 0.9012, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 8.45, |
|
"learning_rate": 2.2715654952076678e-06, |
|
"loss": 0.937, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 8.55, |
|
"learning_rate": 2.2619808306709266e-06, |
|
"loss": 0.9122, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 8.64, |
|
"learning_rate": 2.2523961661341854e-06, |
|
"loss": 0.9272, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 8.73, |
|
"learning_rate": 2.242811501597444e-06, |
|
"loss": 0.9122, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 8.82, |
|
"learning_rate": 2.233226837060703e-06, |
|
"loss": 0.9261, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 8.91, |
|
"learning_rate": 2.2236421725239617e-06, |
|
"loss": 0.9004, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"learning_rate": 2.2140575079872205e-06, |
|
"loss": 0.8981, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 9.09, |
|
"learning_rate": 2.2044728434504793e-06, |
|
"loss": 0.8902, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 9.09, |
|
"eval_accuracy_safe": 0.6805698513112902, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.8141590718977455, |
|
"eval_iou_safe": 0.1769983948474704, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.8072069422555583, |
|
"eval_loss": 0.8013909459114075, |
|
"eval_mean_accuracy": 0.7473644616045179, |
|
"eval_mean_iou": 0.32806844570100957, |
|
"eval_overall_accuracy": 0.8102130605213678, |
|
"eval_runtime": 9.6492, |
|
"eval_samples_per_second": 6.944, |
|
"eval_steps_per_second": 0.518, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 9.18, |
|
"learning_rate": 2.194888178913738e-06, |
|
"loss": 0.9085, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 9.27, |
|
"learning_rate": 2.185303514376997e-06, |
|
"loss": 0.8976, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 9.36, |
|
"learning_rate": 2.1757188498402557e-06, |
|
"loss": 0.8909, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 9.45, |
|
"learning_rate": 2.1661341853035145e-06, |
|
"loss": 0.8878, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 9.55, |
|
"learning_rate": 2.1565495207667733e-06, |
|
"loss": 0.8909, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 9.64, |
|
"learning_rate": 2.146964856230032e-06, |
|
"loss": 0.8529, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 9.73, |
|
"learning_rate": 2.137380191693291e-06, |
|
"loss": 0.9097, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 9.82, |
|
"learning_rate": 2.1277955271565496e-06, |
|
"loss": 0.8778, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 9.91, |
|
"learning_rate": 2.1182108626198084e-06, |
|
"loss": 0.8819, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 2.1086261980830672e-06, |
|
"loss": 0.9057, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy_safe": 0.6984244470915687, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.8178638868312451, |
|
"eval_iou_safe": 0.17330681673606707, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.8109252159162895, |
|
"eval_loss": 0.7867299914360046, |
|
"eval_mean_accuracy": 0.7581441669614069, |
|
"eval_mean_iou": 0.3280773442174522, |
|
"eval_overall_accuracy": 0.814335837293027, |
|
"eval_runtime": 9.8362, |
|
"eval_samples_per_second": 6.812, |
|
"eval_steps_per_second": 0.508, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 10.09, |
|
"learning_rate": 2.099041533546326e-06, |
|
"loss": 0.8509, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 10.18, |
|
"learning_rate": 2.089456869009585e-06, |
|
"loss": 0.8746, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 10.27, |
|
"learning_rate": 2.0798722044728436e-06, |
|
"loss": 0.8532, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 10.36, |
|
"learning_rate": 2.0702875399361024e-06, |
|
"loss": 0.8509, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 10.45, |
|
"learning_rate": 2.060702875399361e-06, |
|
"loss": 0.8559, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 10.55, |
|
"learning_rate": 2.05111821086262e-06, |
|
"loss": 0.8435, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 10.64, |
|
"learning_rate": 2.0415335463258783e-06, |
|
"loss": 0.8465, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 10.73, |
|
"learning_rate": 2.0319488817891376e-06, |
|
"loss": 0.8345, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 10.82, |
|
"learning_rate": 2.0223642172523963e-06, |
|
"loss": 0.8403, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 10.91, |
|
"learning_rate": 2.012779552715655e-06, |
|
"loss": 0.8321, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 10.91, |
|
"eval_accuracy_safe": 0.6743748096576343, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.8493713583566551, |
|
"eval_iou_safe": 0.18617264978095113, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.8413265356526651, |
|
"eval_loss": 0.7439588308334351, |
|
"eval_mean_accuracy": 0.7618730840071447, |
|
"eval_mean_iou": 0.3424997284778721, |
|
"eval_overall_accuracy": 0.8442022409012069, |
|
"eval_runtime": 9.4999, |
|
"eval_samples_per_second": 7.053, |
|
"eval_steps_per_second": 0.526, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"learning_rate": 2.003194888178914e-06, |
|
"loss": 0.8343, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 11.09, |
|
"learning_rate": 1.9936102236421723e-06, |
|
"loss": 0.8413, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 11.18, |
|
"learning_rate": 1.9840255591054315e-06, |
|
"loss": 0.8037, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 11.27, |
|
"learning_rate": 1.9744408945686903e-06, |
|
"loss": 0.8284, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 11.36, |
|
"learning_rate": 1.9648562300319487e-06, |
|
"loss": 0.8023, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 11.45, |
|
"learning_rate": 1.955271565495208e-06, |
|
"loss": 0.8176, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 11.55, |
|
"learning_rate": 1.9456869009584667e-06, |
|
"loss": 0.8078, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 11.64, |
|
"learning_rate": 1.936102236421725e-06, |
|
"loss": 0.8074, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 11.73, |
|
"learning_rate": 1.9265175718849843e-06, |
|
"loss": 0.8139, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 11.82, |
|
"learning_rate": 1.9169329073482426e-06, |
|
"loss": 0.8152, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 11.82, |
|
"eval_accuracy_safe": 0.6688004286799203, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.8589712690862681, |
|
"eval_iou_safe": 0.20063653201077156, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.8506617582762998, |
|
"eval_loss": 0.7270171046257019, |
|
"eval_mean_accuracy": 0.7638858488830942, |
|
"eval_mean_iou": 0.3504327634290238, |
|
"eval_overall_accuracy": 0.8533539273845616, |
|
"eval_runtime": 11.0671, |
|
"eval_samples_per_second": 6.054, |
|
"eval_steps_per_second": 0.452, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 11.91, |
|
"learning_rate": 1.9073482428115018e-06, |
|
"loss": 0.8102, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"learning_rate": 1.8977635782747604e-06, |
|
"loss": 0.8192, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 12.09, |
|
"learning_rate": 1.888178913738019e-06, |
|
"loss": 0.8246, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 12.18, |
|
"learning_rate": 1.878594249201278e-06, |
|
"loss": 0.7864, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 12.27, |
|
"learning_rate": 1.8690095846645368e-06, |
|
"loss": 0.7718, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 12.36, |
|
"learning_rate": 1.8594249201277954e-06, |
|
"loss": 0.8074, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 12.45, |
|
"learning_rate": 1.8498402555910544e-06, |
|
"loss": 0.7569, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 12.55, |
|
"learning_rate": 1.8402555910543132e-06, |
|
"loss": 0.7989, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 12.64, |
|
"learning_rate": 1.8306709265175722e-06, |
|
"loss": 0.7987, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 12.73, |
|
"learning_rate": 1.8210862619808307e-06, |
|
"loss": 0.7929, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 12.73, |
|
"eval_accuracy_safe": 0.6659920354971646, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.8656634973410731, |
|
"eval_iou_safe": 0.20852726465106847, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.8572303764577973, |
|
"eval_loss": 0.7045122981071472, |
|
"eval_mean_accuracy": 0.7658277664191189, |
|
"eval_mean_iou": 0.3552525470362886, |
|
"eval_overall_accuracy": 0.8597655225155959, |
|
"eval_runtime": 9.4502, |
|
"eval_samples_per_second": 7.09, |
|
"eval_steps_per_second": 0.529, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 12.82, |
|
"learning_rate": 1.8115015974440893e-06, |
|
"loss": 0.7703, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 12.91, |
|
"learning_rate": 1.8019169329073483e-06, |
|
"loss": 0.768, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"learning_rate": 1.7923322683706071e-06, |
|
"loss": 0.8397, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 13.09, |
|
"learning_rate": 1.7827476038338657e-06, |
|
"loss": 0.775, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 13.18, |
|
"learning_rate": 1.7731629392971247e-06, |
|
"loss": 0.7718, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 13.27, |
|
"learning_rate": 1.7635782747603833e-06, |
|
"loss": 0.7544, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 13.36, |
|
"learning_rate": 1.7539936102236423e-06, |
|
"loss": 0.7585, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 13.45, |
|
"learning_rate": 1.744408945686901e-06, |
|
"loss": 0.742, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 13.55, |
|
"learning_rate": 1.7348242811501597e-06, |
|
"loss": 0.7629, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 13.64, |
|
"learning_rate": 1.7252396166134187e-06, |
|
"loss": 0.7568, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 13.64, |
|
"eval_accuracy_safe": 0.657079193989229, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.8838084544735693, |
|
"eval_iou_safe": 0.21846087789622765, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.874847055157359, |
|
"eval_loss": 0.6744459867477417, |
|
"eval_mean_accuracy": 0.7704438242313991, |
|
"eval_mean_iou": 0.3644359776845289, |
|
"eval_overall_accuracy": 0.8771112356612931, |
|
"eval_runtime": 10.8117, |
|
"eval_samples_per_second": 6.197, |
|
"eval_steps_per_second": 0.462, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 13.73, |
|
"learning_rate": 1.7156549520766772e-06, |
|
"loss": 0.7311, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 13.82, |
|
"learning_rate": 1.706070287539936e-06, |
|
"loss": 0.7352, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 13.91, |
|
"learning_rate": 1.696485623003195e-06, |
|
"loss": 0.7661, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"learning_rate": 1.6869009584664536e-06, |
|
"loss": 0.7335, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 14.09, |
|
"learning_rate": 1.6773162939297126e-06, |
|
"loss": 0.7491, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 14.18, |
|
"learning_rate": 1.6677316293929714e-06, |
|
"loss": 0.715, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 14.27, |
|
"learning_rate": 1.65814696485623e-06, |
|
"loss": 0.7349, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 14.36, |
|
"learning_rate": 1.648562300319489e-06, |
|
"loss": 0.7127, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 14.45, |
|
"learning_rate": 1.6389776357827476e-06, |
|
"loss": 0.718, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 14.55, |
|
"learning_rate": 1.6293929712460064e-06, |
|
"loss": 0.7085, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 14.55, |
|
"eval_accuracy_safe": 0.6519423595128777, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.8934174588611713, |
|
"eval_iou_safe": 0.2259934305828019, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.8842020655315154, |
|
"eval_loss": 0.65559983253479, |
|
"eval_mean_accuracy": 0.7726799091870244, |
|
"eval_mean_iou": 0.3700651653714391, |
|
"eval_overall_accuracy": 0.8862846716126399, |
|
"eval_runtime": 9.325, |
|
"eval_samples_per_second": 7.185, |
|
"eval_steps_per_second": 0.536, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 14.64, |
|
"learning_rate": 1.6198083067092654e-06, |
|
"loss": 0.753, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 14.73, |
|
"learning_rate": 1.610223642172524e-06, |
|
"loss": 0.7376, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 14.82, |
|
"learning_rate": 1.600638977635783e-06, |
|
"loss": 0.7256, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 14.91, |
|
"learning_rate": 1.5910543130990415e-06, |
|
"loss": 0.7132, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"learning_rate": 1.5814696485623003e-06, |
|
"loss": 0.722, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 15.09, |
|
"learning_rate": 1.5718849840255593e-06, |
|
"loss": 0.7015, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 15.18, |
|
"learning_rate": 1.562300319488818e-06, |
|
"loss": 0.7176, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 15.27, |
|
"learning_rate": 1.5527156549520765e-06, |
|
"loss": 0.7147, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 15.36, |
|
"learning_rate": 1.5431309904153355e-06, |
|
"loss": 0.7217, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 15.45, |
|
"learning_rate": 1.5335463258785943e-06, |
|
"loss": 0.7147, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 15.45, |
|
"eval_accuracy_safe": 0.6561000150346374, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.8963761831582403, |
|
"eval_iou_safe": 0.22827906452703248, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.8872234626234047, |
|
"eval_loss": 0.6508952975273132, |
|
"eval_mean_accuracy": 0.7762380990964388, |
|
"eval_mean_iou": 0.3718341757168124, |
|
"eval_overall_accuracy": 0.889278810415695, |
|
"eval_runtime": 10.5537, |
|
"eval_samples_per_second": 6.348, |
|
"eval_steps_per_second": 0.474, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 15.55, |
|
"learning_rate": 1.5239616613418533e-06, |
|
"loss": 0.7212, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 15.64, |
|
"learning_rate": 1.5143769968051119e-06, |
|
"loss": 0.7196, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 15.73, |
|
"learning_rate": 1.5047923322683706e-06, |
|
"loss": 0.6797, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 15.82, |
|
"learning_rate": 1.4952076677316294e-06, |
|
"loss": 0.6626, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 15.91, |
|
"learning_rate": 1.4856230031948882e-06, |
|
"loss": 0.6783, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"learning_rate": 1.476038338658147e-06, |
|
"loss": 0.7436, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 16.09, |
|
"learning_rate": 1.4664536741214058e-06, |
|
"loss": 0.7123, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 16.18, |
|
"learning_rate": 1.4568690095846646e-06, |
|
"loss": 0.7241, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 16.27, |
|
"learning_rate": 1.4472843450479234e-06, |
|
"loss": 0.6914, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 16.36, |
|
"learning_rate": 1.4376996805111822e-06, |
|
"loss": 0.6991, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 16.36, |
|
"eval_accuracy_safe": 0.6619847263503225, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.8964291023808605, |
|
"eval_iou_safe": 0.22672413565462626, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.8874142561414083, |
|
"eval_loss": 0.6502141952514648, |
|
"eval_mean_accuracy": 0.7792069143655915, |
|
"eval_mean_iou": 0.37137946393201154, |
|
"eval_overall_accuracy": 0.889503991425927, |
|
"eval_runtime": 9.3648, |
|
"eval_samples_per_second": 7.154, |
|
"eval_steps_per_second": 0.534, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 16.45, |
|
"learning_rate": 1.428115015974441e-06, |
|
"loss": 0.6552, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 16.55, |
|
"learning_rate": 1.4185303514376998e-06, |
|
"loss": 0.6621, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 16.64, |
|
"learning_rate": 1.4089456869009586e-06, |
|
"loss": 0.6742, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 16.73, |
|
"learning_rate": 1.3993610223642173e-06, |
|
"loss": 0.6462, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 16.82, |
|
"learning_rate": 1.3897763578274761e-06, |
|
"loss": 0.6759, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 16.91, |
|
"learning_rate": 1.3801916932907347e-06, |
|
"loss": 0.6773, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"learning_rate": 1.3706070287539937e-06, |
|
"loss": 0.7158, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 17.09, |
|
"learning_rate": 1.3610223642172525e-06, |
|
"loss": 0.6958, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 17.18, |
|
"learning_rate": 1.3514376996805113e-06, |
|
"loss": 0.6747, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 17.27, |
|
"learning_rate": 1.3418530351437699e-06, |
|
"loss": 0.6357, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 17.27, |
|
"eval_accuracy_safe": 0.6611674588764114, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9051011079830231, |
|
"eval_iou_safe": 0.24110413837507133, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.895975219611748, |
|
"eval_loss": 0.6229549646377563, |
|
"eval_mean_accuracy": 0.7831342834297172, |
|
"eval_mean_iou": 0.3790264526622731, |
|
"eval_overall_accuracy": 0.8978956991167211, |
|
"eval_runtime": 10.2719, |
|
"eval_samples_per_second": 6.523, |
|
"eval_steps_per_second": 0.487, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 17.36, |
|
"learning_rate": 1.3322683706070287e-06, |
|
"loss": 0.6769, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 17.45, |
|
"learning_rate": 1.3226837060702877e-06, |
|
"loss": 0.6396, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 17.55, |
|
"learning_rate": 1.3130990415335465e-06, |
|
"loss": 0.6567, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 17.64, |
|
"learning_rate": 1.303514376996805e-06, |
|
"loss": 0.6538, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 17.73, |
|
"learning_rate": 1.2939297124600638e-06, |
|
"loss": 0.6654, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 17.82, |
|
"learning_rate": 1.2843450479233228e-06, |
|
"loss": 0.6637, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 17.91, |
|
"learning_rate": 1.2747603833865816e-06, |
|
"loss": 0.6417, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"learning_rate": 1.2651757188498402e-06, |
|
"loss": 0.6707, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 18.09, |
|
"learning_rate": 1.255591054313099e-06, |
|
"loss": 0.6229, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 18.18, |
|
"learning_rate": 1.2460063897763578e-06, |
|
"loss": 0.6815, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 18.18, |
|
"eval_accuracy_safe": 0.6484207848080771, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9178010760554833, |
|
"eval_iou_safe": 0.2593923131195133, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.908185465668198, |
|
"eval_loss": 0.5992904305458069, |
|
"eval_mean_accuracy": 0.7831109304317803, |
|
"eval_mean_iou": 0.3891925929292371, |
|
"eval_overall_accuracy": 0.9098440141820195, |
|
"eval_runtime": 9.7351, |
|
"eval_samples_per_second": 6.882, |
|
"eval_steps_per_second": 0.514, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 18.27, |
|
"learning_rate": 1.2364217252396168e-06, |
|
"loss": 0.6677, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 18.36, |
|
"learning_rate": 1.2268370607028754e-06, |
|
"loss": 0.6658, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 18.45, |
|
"learning_rate": 1.2172523961661342e-06, |
|
"loss": 0.649, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 18.55, |
|
"learning_rate": 1.207667731629393e-06, |
|
"loss": 0.6342, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 18.64, |
|
"learning_rate": 1.198083067092652e-06, |
|
"loss": 0.6441, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 18.73, |
|
"learning_rate": 1.1884984025559105e-06, |
|
"loss": 0.6429, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 18.82, |
|
"learning_rate": 1.1789137380191693e-06, |
|
"loss": 0.6155, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 18.91, |
|
"learning_rate": 1.1693290734824281e-06, |
|
"loss": 0.6347, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"learning_rate": 1.159744408945687e-06, |
|
"loss": 0.6611, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 19.09, |
|
"learning_rate": 1.1501597444089457e-06, |
|
"loss": 0.6398, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 19.09, |
|
"eval_accuracy_safe": 0.6413602877398313, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9258418644556835, |
|
"eval_iou_safe": 0.26823363821482293, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.9159351131912487, |
|
"eval_loss": 0.5784907341003418, |
|
"eval_mean_accuracy": 0.7836010760977574, |
|
"eval_mean_iou": 0.39472291713535723, |
|
"eval_overall_accuracy": 0.9174387348231985, |
|
"eval_runtime": 11.2723, |
|
"eval_samples_per_second": 5.944, |
|
"eval_steps_per_second": 0.444, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 19.18, |
|
"learning_rate": 1.1405750798722045e-06, |
|
"loss": 0.6403, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 19.27, |
|
"learning_rate": 1.1309904153354633e-06, |
|
"loss": 0.6271, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 19.36, |
|
"learning_rate": 1.121405750798722e-06, |
|
"loss": 0.679, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 19.45, |
|
"learning_rate": 1.1118210862619809e-06, |
|
"loss": 0.6239, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 19.55, |
|
"learning_rate": 1.1022364217252397e-06, |
|
"loss": 0.6527, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 19.64, |
|
"learning_rate": 1.0926517571884984e-06, |
|
"loss": 0.6228, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 19.73, |
|
"learning_rate": 1.0830670926517572e-06, |
|
"loss": 0.6227, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 19.82, |
|
"learning_rate": 1.073482428115016e-06, |
|
"loss": 0.6347, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 19.91, |
|
"learning_rate": 1.0638977635782748e-06, |
|
"loss": 0.6374, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"learning_rate": 1.0543130990415336e-06, |
|
"loss": 0.5845, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_accuracy_safe": 0.6426151017151052, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9286315640516787, |
|
"eval_iou_safe": 0.26982054857603477, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.9187221572756564, |
|
"eval_loss": 0.5641139149665833, |
|
"eval_mean_accuracy": 0.7856233328833919, |
|
"eval_mean_iou": 0.39618090195056377, |
|
"eval_overall_accuracy": 0.9201830963590252, |
|
"eval_runtime": 9.4874, |
|
"eval_samples_per_second": 7.062, |
|
"eval_steps_per_second": 0.527, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 20.09, |
|
"learning_rate": 1.0447284345047924e-06, |
|
"loss": 0.606, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 20.18, |
|
"learning_rate": 1.0351437699680512e-06, |
|
"loss": 0.6491, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 20.27, |
|
"learning_rate": 1.02555910543131e-06, |
|
"loss": 0.6396, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 20.36, |
|
"learning_rate": 1.0159744408945688e-06, |
|
"loss": 0.617, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 20.45, |
|
"learning_rate": 1.0063897763578276e-06, |
|
"loss": 0.614, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 20.55, |
|
"learning_rate": 9.968051118210861e-07, |
|
"loss": 0.6281, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 20.64, |
|
"learning_rate": 9.872204472843452e-07, |
|
"loss": 0.6374, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 20.73, |
|
"learning_rate": 9.77635782747604e-07, |
|
"loss": 0.6394, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 20.82, |
|
"learning_rate": 9.680511182108625e-07, |
|
"loss": 0.6351, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 20.91, |
|
"learning_rate": 9.584664536741213e-07, |
|
"loss": 0.6062, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 20.91, |
|
"eval_accuracy_safe": 0.6519963300064379, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9252195649054266, |
|
"eval_iou_safe": 0.2640576051310034, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.9156001170929594, |
|
"eval_loss": 0.5693235397338867, |
|
"eval_mean_accuracy": 0.7886079474559322, |
|
"eval_mean_iou": 0.3932192407413209, |
|
"eval_overall_accuracy": 0.9171489886383513, |
|
"eval_runtime": 11.0762, |
|
"eval_samples_per_second": 6.049, |
|
"eval_steps_per_second": 0.451, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"learning_rate": 9.488817891373802e-07, |
|
"loss": 0.6456, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 21.09, |
|
"learning_rate": 9.39297124600639e-07, |
|
"loss": 0.6055, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 21.18, |
|
"learning_rate": 9.297124600638977e-07, |
|
"loss": 0.6134, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 21.27, |
|
"learning_rate": 9.201277955271566e-07, |
|
"loss": 0.5718, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 21.36, |
|
"learning_rate": 9.105431309904154e-07, |
|
"loss": 0.5939, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 21.45, |
|
"learning_rate": 9.009584664536742e-07, |
|
"loss": 0.5917, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 21.55, |
|
"learning_rate": 8.913738019169329e-07, |
|
"loss": 0.599, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 21.64, |
|
"learning_rate": 8.817891373801916e-07, |
|
"loss": 0.6239, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 21.73, |
|
"learning_rate": 8.722044728434505e-07, |
|
"loss": 0.5946, |
|
"step": 239 |
|
}, |
|
{ |
|
"epoch": 21.82, |
|
"learning_rate": 8.626198083067093e-07, |
|
"loss": 0.6071, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 21.82, |
|
"eval_accuracy_safe": 0.6591840432380754, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_accuracy_unsafe": 0.9282916959179331, |
|
"eval_iou_safe": 0.267531459702011, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_iou_unsafe": 0.9188227497665283, |
|
"eval_loss": 0.5627052187919617, |
|
"eval_mean_accuracy": 0.7937378695780043, |
|
"eval_mean_iou": 0.39545140315617977, |
|
"eval_overall_accuracy": 0.9203426873506005, |
|
"eval_runtime": 9.7383, |
|
"eval_samples_per_second": 6.88, |
|
"eval_steps_per_second": 0.513, |
|
"step": 240 |
|
} |
|
], |
|
"max_steps": 330, |
|
"num_train_epochs": 30, |
|
"total_flos": 1.3448116877241876e+18, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|