|
{ |
|
"best_metric": 0.18488384783267975, |
|
"best_model_checkpoint": "/robodata/smodak/Projects/nspl/scripts/terrainseg/training/models/dropoff-utcustom-train-SF-RGB-b0_6/checkpoint-320", |
|
"epoch": 106.66666666666667, |
|
"global_step": 320, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 3.888888888888889e-06, |
|
"loss": 1.1755, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 7.777777777777777e-06, |
|
"loss": 1.1749, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 1.1666666666666665e-05, |
|
"loss": 1.183, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 1.5555555555555555e-05, |
|
"loss": 1.1718, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 1.9444444444444442e-05, |
|
"loss": 1.1706, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 2.333333333333333e-05, |
|
"loss": 1.1696, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 2.722222222222222e-05, |
|
"loss": 1.1558, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 3.111111111111111e-05, |
|
"loss": 1.1489, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 3.5e-05, |
|
"loss": 1.135, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"learning_rate": 3.8888888888888884e-05, |
|
"loss": 1.1234, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"eval_accuracy_dropoff": 0.772015071105291, |
|
"eval_accuracy_undropoff": 0.353825865102452, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.1801195043477445, |
|
"eval_iou_undropoff": 0.3536296682807036, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 1.097259283065796, |
|
"eval_mean_accuracy": 0.5629204681038715, |
|
"eval_mean_iou": 0.17791639087614938, |
|
"eval_overall_accuracy": 0.37234242757161456, |
|
"eval_runtime": 1.873, |
|
"eval_samples_per_second": 8.008, |
|
"eval_steps_per_second": 0.534, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 3.67, |
|
"learning_rate": 4.277777777777778e-05, |
|
"loss": 1.1168, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 4.666666666666666e-05, |
|
"loss": 1.0895, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 4.33, |
|
"learning_rate": 5.055555555555555e-05, |
|
"loss": 1.1027, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 4.67, |
|
"learning_rate": 5.444444444444444e-05, |
|
"loss": 1.0795, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 5.833333333333333e-05, |
|
"loss": 1.0598, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 5.33, |
|
"learning_rate": 6.222222222222222e-05, |
|
"loss": 1.0449, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 5.67, |
|
"learning_rate": 6.611111111111111e-05, |
|
"loss": 1.0268, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 7e-05, |
|
"loss": 1.0843, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 6.33, |
|
"learning_rate": 6.97953216374269e-05, |
|
"loss": 1.0029, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 6.67, |
|
"learning_rate": 6.95906432748538e-05, |
|
"loss": 0.975, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 6.67, |
|
"eval_accuracy_dropoff": 0.8258609598639924, |
|
"eval_accuracy_undropoff": 0.8102080545984994, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.24279987572018127, |
|
"eval_iou_undropoff": 0.8068537963656117, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 1.0260422229766846, |
|
"eval_mean_accuracy": 0.8180345072312459, |
|
"eval_mean_iou": 0.34988455736193097, |
|
"eval_overall_accuracy": 0.810901133219401, |
|
"eval_runtime": 1.6566, |
|
"eval_samples_per_second": 9.055, |
|
"eval_steps_per_second": 0.604, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 6.938596491228069e-05, |
|
"loss": 1.0358, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 7.33, |
|
"learning_rate": 6.91812865497076e-05, |
|
"loss": 0.9361, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 7.67, |
|
"learning_rate": 6.89766081871345e-05, |
|
"loss": 0.9311, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 6.877192982456139e-05, |
|
"loss": 0.9149, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 8.33, |
|
"learning_rate": 6.85672514619883e-05, |
|
"loss": 0.9056, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 8.67, |
|
"learning_rate": 6.83625730994152e-05, |
|
"loss": 0.8637, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"learning_rate": 6.81578947368421e-05, |
|
"loss": 0.7991, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 9.33, |
|
"learning_rate": 6.7953216374269e-05, |
|
"loss": 0.8143, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 9.67, |
|
"learning_rate": 6.774853801169589e-05, |
|
"loss": 0.7999, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 6.75438596491228e-05, |
|
"loss": 0.9464, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy_dropoff": 0.5212052289383601, |
|
"eval_accuracy_undropoff": 0.9700488444545206, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3384996437670423, |
|
"eval_iou_undropoff": 0.9507046257182781, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.8130465149879456, |
|
"eval_mean_accuracy": 0.7456270366964404, |
|
"eval_mean_iou": 0.4297347564951068, |
|
"eval_overall_accuracy": 0.9501749674479166, |
|
"eval_runtime": 1.7097, |
|
"eval_samples_per_second": 8.773, |
|
"eval_steps_per_second": 0.585, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 10.33, |
|
"learning_rate": 6.733918128654971e-05, |
|
"loss": 0.763, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 10.67, |
|
"learning_rate": 6.71345029239766e-05, |
|
"loss": 0.7591, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"learning_rate": 6.69298245614035e-05, |
|
"loss": 0.6623, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 11.33, |
|
"learning_rate": 6.672514619883041e-05, |
|
"loss": 0.7138, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 11.67, |
|
"learning_rate": 6.65204678362573e-05, |
|
"loss": 0.6695, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"learning_rate": 6.63157894736842e-05, |
|
"loss": 0.6987, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 12.33, |
|
"learning_rate": 6.611111111111111e-05, |
|
"loss": 0.6494, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 12.67, |
|
"learning_rate": 6.5906432748538e-05, |
|
"loss": 0.6317, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"learning_rate": 6.570175438596491e-05, |
|
"loss": 0.834, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 13.33, |
|
"learning_rate": 6.54970760233918e-05, |
|
"loss": 0.6167, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 13.33, |
|
"eval_accuracy_dropoff": 0.5047671560181037, |
|
"eval_accuracy_undropoff": 0.9829004494881923, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3743041743152479, |
|
"eval_iou_undropoff": 0.961015278770039, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.6001297235488892, |
|
"eval_mean_accuracy": 0.743833802753148, |
|
"eval_mean_iou": 0.44510648436176226, |
|
"eval_overall_accuracy": 0.9617296854654948, |
|
"eval_runtime": 1.7423, |
|
"eval_samples_per_second": 8.609, |
|
"eval_steps_per_second": 0.574, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 13.67, |
|
"learning_rate": 6.52923976608187e-05, |
|
"loss": 0.6201, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"learning_rate": 6.508771929824561e-05, |
|
"loss": 0.6009, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 14.33, |
|
"learning_rate": 6.488304093567252e-05, |
|
"loss": 0.5716, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 14.67, |
|
"learning_rate": 6.467836257309941e-05, |
|
"loss": 0.5779, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"learning_rate": 6.447368421052631e-05, |
|
"loss": 0.5397, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 15.33, |
|
"learning_rate": 6.426900584795322e-05, |
|
"loss": 0.5569, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 15.67, |
|
"learning_rate": 6.406432748538011e-05, |
|
"loss": 0.5152, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"learning_rate": 6.3859649122807e-05, |
|
"loss": 0.5266, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 16.33, |
|
"learning_rate": 6.365497076023391e-05, |
|
"loss": 0.5389, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 16.67, |
|
"learning_rate": 6.345029239766081e-05, |
|
"loss": 0.4818, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 16.67, |
|
"eval_accuracy_dropoff": 0.4572736462425621, |
|
"eval_accuracy_undropoff": 0.9901589972677334, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.38151532722193204, |
|
"eval_iou_undropoff": 0.9659037872797944, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.46292775869369507, |
|
"eval_mean_accuracy": 0.7237163217551478, |
|
"eval_mean_iou": 0.4491397048339088, |
|
"eval_overall_accuracy": 0.9665639241536458, |
|
"eval_runtime": 1.7358, |
|
"eval_samples_per_second": 8.641, |
|
"eval_steps_per_second": 0.576, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"learning_rate": 6.324561403508772e-05, |
|
"loss": 0.5289, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 17.33, |
|
"learning_rate": 6.304093567251461e-05, |
|
"loss": 0.4991, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 17.67, |
|
"learning_rate": 6.283625730994151e-05, |
|
"loss": 0.4643, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"learning_rate": 6.263157894736842e-05, |
|
"loss": 0.4359, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 18.33, |
|
"learning_rate": 6.242690058479532e-05, |
|
"loss": 0.4679, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 18.67, |
|
"learning_rate": 6.222222222222222e-05, |
|
"loss": 0.4619, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"learning_rate": 6.201754385964911e-05, |
|
"loss": 0.6641, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 19.33, |
|
"learning_rate": 6.181286549707602e-05, |
|
"loss": 0.4316, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 19.67, |
|
"learning_rate": 6.160818713450292e-05, |
|
"loss": 0.4475, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"learning_rate": 6.140350877192981e-05, |
|
"loss": 0.4733, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_accuracy_dropoff": 0.42558641762584143, |
|
"eval_accuracy_undropoff": 0.987872440296196, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.33826825716385683, |
|
"eval_iou_undropoff": 0.9622649725509712, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.43790048360824585, |
|
"eval_mean_accuracy": 0.7067294289610188, |
|
"eval_mean_iou": 0.4335110765716093, |
|
"eval_overall_accuracy": 0.9629755655924479, |
|
"eval_runtime": 1.7742, |
|
"eval_samples_per_second": 8.455, |
|
"eval_steps_per_second": 0.564, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 20.33, |
|
"learning_rate": 6.119883040935672e-05, |
|
"loss": 0.423, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 20.67, |
|
"learning_rate": 6.0994152046783624e-05, |
|
"loss": 0.4219, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"learning_rate": 6.0789473684210525e-05, |
|
"loss": 0.6326, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 21.33, |
|
"learning_rate": 6.058479532163742e-05, |
|
"loss": 0.4299, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 21.67, |
|
"learning_rate": 6.038011695906432e-05, |
|
"loss": 0.4147, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"learning_rate": 6.0175438596491224e-05, |
|
"loss": 0.4815, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 22.33, |
|
"learning_rate": 5.9970760233918126e-05, |
|
"loss": 0.4477, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 22.67, |
|
"learning_rate": 5.976608187134502e-05, |
|
"loss": 0.4031, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"learning_rate": 5.956140350877192e-05, |
|
"loss": 0.6048, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 23.33, |
|
"learning_rate": 5.9356725146198824e-05, |
|
"loss": 0.3843, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 23.33, |
|
"eval_accuracy_dropoff": 0.3821076573161486, |
|
"eval_accuracy_undropoff": 0.992239862567096, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.328346864743479, |
|
"eval_iou_undropoff": 0.9646259654307497, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.4072829484939575, |
|
"eval_mean_accuracy": 0.6871737599416223, |
|
"eval_mean_iou": 0.4309909433914096, |
|
"eval_overall_accuracy": 0.9652244567871093, |
|
"eval_runtime": 1.8122, |
|
"eval_samples_per_second": 8.277, |
|
"eval_steps_per_second": 0.552, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 23.67, |
|
"learning_rate": 5.9152046783625726e-05, |
|
"loss": 0.3967, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"learning_rate": 5.894736842105262e-05, |
|
"loss": 0.4051, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 24.33, |
|
"learning_rate": 5.874269005847952e-05, |
|
"loss": 0.3836, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 24.67, |
|
"learning_rate": 5.853801169590643e-05, |
|
"loss": 0.3896, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"learning_rate": 5.833333333333333e-05, |
|
"loss": 0.5942, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 25.33, |
|
"learning_rate": 5.812865497076023e-05, |
|
"loss": 0.3688, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 25.67, |
|
"learning_rate": 5.792397660818713e-05, |
|
"loss": 0.3721, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"learning_rate": 5.771929824561403e-05, |
|
"loss": 0.5951, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 26.33, |
|
"learning_rate": 5.751461988304093e-05, |
|
"loss": 0.3973, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 26.67, |
|
"learning_rate": 5.730994152046783e-05, |
|
"loss": 0.3579, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 26.67, |
|
"eval_accuracy_dropoff": 0.40901049923036276, |
|
"eval_accuracy_undropoff": 0.9908359437282933, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.34181486548107615, |
|
"eval_iou_undropoff": 0.9644297203691943, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.3731442093849182, |
|
"eval_mean_accuracy": 0.6999232214793281, |
|
"eval_mean_iou": 0.43541486195009016, |
|
"eval_overall_accuracy": 0.9650739034016927, |
|
"eval_runtime": 1.7608, |
|
"eval_samples_per_second": 8.519, |
|
"eval_steps_per_second": 0.568, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 27.0, |
|
"learning_rate": 5.710526315789473e-05, |
|
"loss": 0.416, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 27.33, |
|
"learning_rate": 5.690058479532163e-05, |
|
"loss": 0.3931, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 27.67, |
|
"learning_rate": 5.669590643274853e-05, |
|
"loss": 0.3753, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"learning_rate": 5.649122807017543e-05, |
|
"loss": 0.3469, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 28.33, |
|
"learning_rate": 5.628654970760233e-05, |
|
"loss": 0.3548, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 28.67, |
|
"learning_rate": 5.608187134502924e-05, |
|
"loss": 0.3347, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 29.0, |
|
"learning_rate": 5.587719298245614e-05, |
|
"loss": 0.5499, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 29.33, |
|
"learning_rate": 5.5672514619883035e-05, |
|
"loss": 0.3353, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 29.67, |
|
"learning_rate": 5.546783625730994e-05, |
|
"loss": 0.3393, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"learning_rate": 5.526315789473684e-05, |
|
"loss": 0.3212, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"eval_accuracy_dropoff": 0.43656236359041517, |
|
"eval_accuracy_undropoff": 0.9891638008202122, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3538083423714455, |
|
"eval_iou_undropoff": 0.9639998537401673, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.3654923737049103, |
|
"eval_mean_accuracy": 0.7128630822053137, |
|
"eval_mean_iou": 0.6589040980558064, |
|
"eval_overall_accuracy": 0.9646957397460938, |
|
"eval_runtime": 1.7507, |
|
"eval_samples_per_second": 8.568, |
|
"eval_steps_per_second": 0.571, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 30.33, |
|
"learning_rate": 5.505847953216374e-05, |
|
"loss": 0.3282, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 30.67, |
|
"learning_rate": 5.4853801169590635e-05, |
|
"loss": 0.341, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 31.0, |
|
"learning_rate": 5.464912280701754e-05, |
|
"loss": 0.5022, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 31.33, |
|
"learning_rate": 5.444444444444444e-05, |
|
"loss": 0.3242, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 31.67, |
|
"learning_rate": 5.423976608187134e-05, |
|
"loss": 0.3237, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"learning_rate": 5.4035087719298236e-05, |
|
"loss": 0.3377, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 32.33, |
|
"learning_rate": 5.383040935672514e-05, |
|
"loss": 0.3189, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 32.67, |
|
"learning_rate": 5.3625730994152046e-05, |
|
"loss": 0.31, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 33.0, |
|
"learning_rate": 5.342105263157895e-05, |
|
"loss": 0.4947, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 33.33, |
|
"learning_rate": 5.321637426900584e-05, |
|
"loss": 0.3088, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 33.33, |
|
"eval_accuracy_dropoff": 0.34505019872722675, |
|
"eval_accuracy_undropoff": 0.9927792377540279, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2985231857844209, |
|
"eval_iou_undropoff": 0.9635421132392789, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.3305789530277252, |
|
"eval_mean_accuracy": 0.6689147182406273, |
|
"eval_mean_iou": 0.6310326495118499, |
|
"eval_overall_accuracy": 0.96409912109375, |
|
"eval_runtime": 1.6777, |
|
"eval_samples_per_second": 8.941, |
|
"eval_steps_per_second": 0.596, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 33.67, |
|
"learning_rate": 5.3011695906432744e-05, |
|
"loss": 0.3057, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 34.0, |
|
"learning_rate": 5.2807017543859646e-05, |
|
"loss": 0.4575, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 34.33, |
|
"learning_rate": 5.260233918128655e-05, |
|
"loss": 0.3011, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 34.67, |
|
"learning_rate": 5.239766081871344e-05, |
|
"loss": 0.3063, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"learning_rate": 5.2192982456140345e-05, |
|
"loss": 0.2768, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 35.33, |
|
"learning_rate": 5.1988304093567246e-05, |
|
"loss": 0.3065, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 35.67, |
|
"learning_rate": 5.178362573099415e-05, |
|
"loss": 0.2841, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 36.0, |
|
"learning_rate": 5.157894736842104e-05, |
|
"loss": 0.4367, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 36.33, |
|
"learning_rate": 5.1374269005847945e-05, |
|
"loss": 0.2944, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 36.67, |
|
"learning_rate": 5.1169590643274853e-05, |
|
"loss": 0.2825, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 36.67, |
|
"eval_accuracy_dropoff": 0.42931398901831047, |
|
"eval_accuracy_undropoff": 0.9912175243982787, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3608996055293487, |
|
"eval_iou_undropoff": 0.9656852921893507, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.3253430724143982, |
|
"eval_mean_accuracy": 0.7102657567082946, |
|
"eval_mean_iou": 0.6632924488593497, |
|
"eval_overall_accuracy": 0.9663375854492188, |
|
"eval_runtime": 1.7624, |
|
"eval_samples_per_second": 8.511, |
|
"eval_steps_per_second": 0.567, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 37.0, |
|
"learning_rate": 5.0964912280701755e-05, |
|
"loss": 0.292, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 37.33, |
|
"learning_rate": 5.076023391812865e-05, |
|
"loss": 0.2868, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 37.67, |
|
"learning_rate": 5.055555555555555e-05, |
|
"loss": 0.2897, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 38.0, |
|
"learning_rate": 5.0350877192982454e-05, |
|
"loss": 0.4326, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 38.33, |
|
"learning_rate": 5.0146198830409355e-05, |
|
"loss": 0.2754, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 38.67, |
|
"learning_rate": 4.994152046783625e-05, |
|
"loss": 0.3155, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 39.0, |
|
"learning_rate": 4.973684210526315e-05, |
|
"loss": 0.2995, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 39.33, |
|
"learning_rate": 4.9532163742690054e-05, |
|
"loss": 0.2727, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 39.67, |
|
"learning_rate": 4.9327485380116956e-05, |
|
"loss": 0.2645, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"learning_rate": 4.912280701754385e-05, |
|
"loss": 0.3029, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"eval_accuracy_dropoff": 0.42643646472304547, |
|
"eval_accuracy_undropoff": 0.9894596988014003, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.34740009077339873, |
|
"eval_iou_undropoff": 0.9638475611431463, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.3130444884300232, |
|
"eval_mean_accuracy": 0.7079480817622229, |
|
"eval_mean_iou": 0.6556238259582725, |
|
"eval_overall_accuracy": 0.9645301818847656, |
|
"eval_runtime": 1.7686, |
|
"eval_samples_per_second": 8.481, |
|
"eval_steps_per_second": 0.565, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 40.33, |
|
"learning_rate": 4.891812865497075e-05, |
|
"loss": 0.2717, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 40.67, |
|
"learning_rate": 4.871345029239766e-05, |
|
"loss": 0.2581, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 41.0, |
|
"learning_rate": 4.850877192982456e-05, |
|
"loss": 0.4004, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 41.33, |
|
"learning_rate": 4.830409356725146e-05, |
|
"loss": 0.2551, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 41.67, |
|
"learning_rate": 4.809941520467836e-05, |
|
"loss": 0.2635, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 42.0, |
|
"learning_rate": 4.789473684210526e-05, |
|
"loss": 0.2444, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 42.33, |
|
"learning_rate": 4.769005847953216e-05, |
|
"loss": 0.2535, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 42.67, |
|
"learning_rate": 4.748538011695906e-05, |
|
"loss": 0.2552, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 43.0, |
|
"learning_rate": 4.728070175438596e-05, |
|
"loss": 0.3605, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 43.33, |
|
"learning_rate": 4.707602339181286e-05, |
|
"loss": 0.252, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 43.33, |
|
"eval_accuracy_dropoff": 0.4739644358673927, |
|
"eval_accuracy_undropoff": 0.9879597195568343, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.37619668484108026, |
|
"eval_iou_undropoff": 0.9644551086478499, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.28977030515670776, |
|
"eval_mean_accuracy": 0.7309620777121135, |
|
"eval_mean_iou": 0.6703258967444651, |
|
"eval_overall_accuracy": 0.9652010599772135, |
|
"eval_runtime": 1.7093, |
|
"eval_samples_per_second": 8.775, |
|
"eval_steps_per_second": 0.585, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 43.67, |
|
"learning_rate": 4.687134502923976e-05, |
|
"loss": 0.2414, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 44.0, |
|
"learning_rate": 4.666666666666666e-05, |
|
"loss": 0.3671, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 44.33, |
|
"learning_rate": 4.646198830409356e-05, |
|
"loss": 0.245, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 44.67, |
|
"learning_rate": 4.625730994152047e-05, |
|
"loss": 0.2705, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 45.0, |
|
"learning_rate": 4.605263157894737e-05, |
|
"loss": 0.2488, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 45.33, |
|
"learning_rate": 4.5847953216374265e-05, |
|
"loss": 0.2472, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 45.67, |
|
"learning_rate": 4.564327485380117e-05, |
|
"loss": 0.2355, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 46.0, |
|
"learning_rate": 4.543859649122807e-05, |
|
"loss": 0.2335, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 46.33, |
|
"learning_rate": 4.523391812865497e-05, |
|
"loss": 0.2386, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 46.67, |
|
"learning_rate": 4.5029239766081865e-05, |
|
"loss": 0.2395, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 46.67, |
|
"eval_accuracy_dropoff": 0.4275105107174857, |
|
"eval_accuracy_undropoff": 0.990174696890836, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.35270954168088253, |
|
"eval_iou_undropoff": 0.9645908069700111, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.2842903435230255, |
|
"eval_mean_accuracy": 0.7088426038041609, |
|
"eval_mean_iou": 0.6586501743254468, |
|
"eval_overall_accuracy": 0.9652610778808594, |
|
"eval_runtime": 1.7483, |
|
"eval_samples_per_second": 8.58, |
|
"eval_steps_per_second": 0.572, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 47.0, |
|
"learning_rate": 4.482456140350877e-05, |
|
"loss": 0.2558, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 47.33, |
|
"learning_rate": 4.461988304093567e-05, |
|
"loss": 0.2299, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 47.67, |
|
"learning_rate": 4.441520467836257e-05, |
|
"loss": 0.2289, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 48.0, |
|
"learning_rate": 4.4210526315789466e-05, |
|
"loss": 0.3324, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 48.33, |
|
"learning_rate": 4.400584795321637e-05, |
|
"loss": 0.2302, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 48.67, |
|
"learning_rate": 4.3801169590643276e-05, |
|
"loss": 0.2306, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 49.0, |
|
"learning_rate": 4.359649122807018e-05, |
|
"loss": 0.2666, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 49.33, |
|
"learning_rate": 4.339181286549707e-05, |
|
"loss": 0.2229, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 49.67, |
|
"learning_rate": 4.3187134502923974e-05, |
|
"loss": 0.2251, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"learning_rate": 4.2982456140350876e-05, |
|
"loss": 0.2308, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"eval_accuracy_dropoff": 0.3811197647437223, |
|
"eval_accuracy_undropoff": 0.9929697619937138, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.33090640708529484, |
|
"eval_iou_undropoff": 0.9652926014872973, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.27435046434402466, |
|
"eval_mean_accuracy": 0.687044763368718, |
|
"eval_mean_iou": 0.648099504286296, |
|
"eval_overall_accuracy": 0.9658782958984375, |
|
"eval_runtime": 1.6671, |
|
"eval_samples_per_second": 8.998, |
|
"eval_steps_per_second": 0.6, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 50.33, |
|
"learning_rate": 4.277777777777778e-05, |
|
"loss": 0.2302, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 50.67, |
|
"learning_rate": 4.257309941520467e-05, |
|
"loss": 0.2153, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 51.0, |
|
"learning_rate": 4.2368421052631575e-05, |
|
"loss": 0.2086, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 51.33, |
|
"learning_rate": 4.2163742690058476e-05, |
|
"loss": 0.2267, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 51.67, |
|
"learning_rate": 4.195906432748538e-05, |
|
"loss": 0.2051, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 52.0, |
|
"learning_rate": 4.175438596491227e-05, |
|
"loss": 0.3191, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 52.33, |
|
"learning_rate": 4.1549707602339175e-05, |
|
"loss": 0.2112, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 52.67, |
|
"learning_rate": 4.1345029239766077e-05, |
|
"loss": 0.2164, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 53.0, |
|
"learning_rate": 4.1140350877192985e-05, |
|
"loss": 0.2079, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 53.33, |
|
"learning_rate": 4.093567251461988e-05, |
|
"loss": 0.2125, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 53.33, |
|
"eval_accuracy_dropoff": 0.41470811220621684, |
|
"eval_accuracy_undropoff": 0.9908596262106006, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3463719310364678, |
|
"eval_iou_undropoff": 0.9647006338932361, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.2579088807106018, |
|
"eval_mean_accuracy": 0.7027838692084087, |
|
"eval_mean_iou": 0.655536282464852, |
|
"eval_overall_accuracy": 0.9653488159179687, |
|
"eval_runtime": 1.6718, |
|
"eval_samples_per_second": 8.973, |
|
"eval_steps_per_second": 0.598, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 53.67, |
|
"learning_rate": 4.073099415204678e-05, |
|
"loss": 0.2052, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 54.0, |
|
"learning_rate": 4.0526315789473684e-05, |
|
"loss": 0.2796, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 54.33, |
|
"learning_rate": 4.0321637426900585e-05, |
|
"loss": 0.2035, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 54.67, |
|
"learning_rate": 4.011695906432748e-05, |
|
"loss": 0.2021, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 55.0, |
|
"learning_rate": 3.991228070175438e-05, |
|
"loss": 0.2092, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 55.33, |
|
"learning_rate": 3.9707602339181284e-05, |
|
"loss": 0.196, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 55.67, |
|
"learning_rate": 3.9502923976608185e-05, |
|
"loss": 0.2008, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 56.0, |
|
"learning_rate": 3.929824561403508e-05, |
|
"loss": 0.2223, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 56.33, |
|
"learning_rate": 3.909356725146198e-05, |
|
"loss": 0.1942, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 56.67, |
|
"learning_rate": 3.8888888888888884e-05, |
|
"loss": 0.1953, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 56.67, |
|
"eval_accuracy_dropoff": 0.42093413283708964, |
|
"eval_accuracy_undropoff": 0.9899346789240809, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3458057697207621, |
|
"eval_iou_undropoff": 0.9640708476466698, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.25514769554138184, |
|
"eval_mean_accuracy": 0.7054344058805853, |
|
"eval_mean_iou": 0.6549383086837159, |
|
"eval_overall_accuracy": 0.964740498860677, |
|
"eval_runtime": 1.832, |
|
"eval_samples_per_second": 8.188, |
|
"eval_steps_per_second": 0.546, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 57.0, |
|
"learning_rate": 3.868421052631579e-05, |
|
"loss": 0.275, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 57.33, |
|
"learning_rate": 3.847953216374269e-05, |
|
"loss": 0.1901, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 57.67, |
|
"learning_rate": 3.827485380116959e-05, |
|
"loss": 0.1929, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 58.0, |
|
"learning_rate": 3.807017543859649e-05, |
|
"loss": 0.2671, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 58.33, |
|
"learning_rate": 3.786549707602339e-05, |
|
"loss": 0.1832, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 58.67, |
|
"learning_rate": 3.766081871345029e-05, |
|
"loss": 0.1962, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 59.0, |
|
"learning_rate": 3.745614035087719e-05, |
|
"loss": 0.1992, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 59.33, |
|
"learning_rate": 3.725146198830409e-05, |
|
"loss": 0.1891, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 59.67, |
|
"learning_rate": 3.704678362573099e-05, |
|
"loss": 0.1831, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"learning_rate": 3.684210526315789e-05, |
|
"loss": 0.1743, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"eval_accuracy_dropoff": 0.3604544305833161, |
|
"eval_accuracy_undropoff": 0.9931485780398994, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3140160915859419, |
|
"eval_iou_undropoff": 0.9645686852903885, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.23770421743392944, |
|
"eval_mean_accuracy": 0.6768015043116078, |
|
"eval_mean_iou": 0.6392923884381652, |
|
"eval_overall_accuracy": 0.9651341756184896, |
|
"eval_runtime": 1.7553, |
|
"eval_samples_per_second": 8.545, |
|
"eval_steps_per_second": 0.57, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 60.33, |
|
"learning_rate": 3.663742690058479e-05, |
|
"loss": 0.1899, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 60.67, |
|
"learning_rate": 3.643274853801169e-05, |
|
"loss": 0.1845, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 61.0, |
|
"learning_rate": 3.62280701754386e-05, |
|
"loss": 0.1884, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 61.33, |
|
"learning_rate": 3.6023391812865495e-05, |
|
"loss": 0.1829, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 61.67, |
|
"learning_rate": 3.58187134502924e-05, |
|
"loss": 0.1854, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 62.0, |
|
"learning_rate": 3.56140350877193e-05, |
|
"loss": 0.1663, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 62.33, |
|
"learning_rate": 3.54093567251462e-05, |
|
"loss": 0.1773, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 62.67, |
|
"learning_rate": 3.5204678362573095e-05, |
|
"loss": 0.1761, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 63.0, |
|
"learning_rate": 3.5e-05, |
|
"loss": 0.2612, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 63.33, |
|
"learning_rate": 3.47953216374269e-05, |
|
"loss": 0.17, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 63.33, |
|
"eval_accuracy_dropoff": 0.40857398855882554, |
|
"eval_accuracy_undropoff": 0.9918479041801443, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.34743875278396436, |
|
"eval_iou_undropoff": 0.9653957084959844, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.23416396975517273, |
|
"eval_mean_accuracy": 0.700210946369485, |
|
"eval_mean_iou": 0.6564172306399744, |
|
"eval_overall_accuracy": 0.966021728515625, |
|
"eval_runtime": 1.7511, |
|
"eval_samples_per_second": 8.566, |
|
"eval_steps_per_second": 0.571, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 63.67, |
|
"learning_rate": 3.45906432748538e-05, |
|
"loss": 0.1831, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 64.0, |
|
"learning_rate": 3.4385964912280695e-05, |
|
"loss": 0.2357, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 64.33, |
|
"learning_rate": 3.41812865497076e-05, |
|
"loss": 0.1722, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 64.67, |
|
"learning_rate": 3.39766081871345e-05, |
|
"loss": 0.1712, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 65.0, |
|
"learning_rate": 3.37719298245614e-05, |
|
"loss": 0.1619, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 65.33, |
|
"learning_rate": 3.35672514619883e-05, |
|
"loss": 0.1684, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 65.67, |
|
"learning_rate": 3.3362573099415204e-05, |
|
"loss": 0.1647, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 66.0, |
|
"learning_rate": 3.31578947368421e-05, |
|
"loss": 0.1673, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 66.33, |
|
"learning_rate": 3.2953216374269e-05, |
|
"loss": 0.1545, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 66.67, |
|
"learning_rate": 3.27485380116959e-05, |
|
"loss": 0.173, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 66.67, |
|
"eval_accuracy_dropoff": 0.42767707400004595, |
|
"eval_accuracy_undropoff": 0.9913141169946558, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3601547762998791, |
|
"eval_iou_undropoff": 0.9657080466122543, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.22958947718143463, |
|
"eval_mean_accuracy": 0.7094955954973509, |
|
"eval_mean_iou": 0.6629314114560667, |
|
"eval_overall_accuracy": 0.966357421875, |
|
"eval_runtime": 1.7374, |
|
"eval_samples_per_second": 8.633, |
|
"eval_steps_per_second": 0.576, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 67.0, |
|
"learning_rate": 3.2543859649122804e-05, |
|
"loss": 0.2388, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 67.33, |
|
"learning_rate": 3.2339181286549706e-05, |
|
"loss": 0.1593, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 67.67, |
|
"learning_rate": 3.213450292397661e-05, |
|
"loss": 0.1636, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 68.0, |
|
"learning_rate": 3.19298245614035e-05, |
|
"loss": 0.1466, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 68.33, |
|
"learning_rate": 3.1725146198830405e-05, |
|
"loss": 0.164, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 68.67, |
|
"learning_rate": 3.1520467836257306e-05, |
|
"loss": 0.1593, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 69.0, |
|
"learning_rate": 3.131578947368421e-05, |
|
"loss": 0.2254, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 69.33, |
|
"learning_rate": 3.111111111111111e-05, |
|
"loss": 0.1608, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 69.67, |
|
"learning_rate": 3.090643274853801e-05, |
|
"loss": 0.1621, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 70.0, |
|
"learning_rate": 3.070175438596491e-05, |
|
"loss": 0.1487, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 70.0, |
|
"eval_accuracy_dropoff": 0.377650653617295, |
|
"eval_accuracy_undropoff": 0.9946097073696691, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3382913590409796, |
|
"eval_iou_undropoff": 0.966735791980567, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.2151830494403839, |
|
"eval_mean_accuracy": 0.6861301804934821, |
|
"eval_mean_iou": 0.6525135755107734, |
|
"eval_overall_accuracy": 0.9672920227050781, |
|
"eval_runtime": 1.8067, |
|
"eval_samples_per_second": 8.303, |
|
"eval_steps_per_second": 0.554, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 70.33, |
|
"learning_rate": 3.0497076023391812e-05, |
|
"loss": 0.1505, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 70.67, |
|
"learning_rate": 3.029239766081871e-05, |
|
"loss": 0.1584, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 71.0, |
|
"learning_rate": 3.0087719298245612e-05, |
|
"loss": 0.2104, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 71.33, |
|
"learning_rate": 2.988304093567251e-05, |
|
"loss": 0.149, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 71.67, |
|
"learning_rate": 2.9678362573099412e-05, |
|
"loss": 0.1536, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 72.0, |
|
"learning_rate": 2.947368421052631e-05, |
|
"loss": 0.2299, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 72.33, |
|
"learning_rate": 2.9269005847953215e-05, |
|
"loss": 0.1492, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 72.67, |
|
"learning_rate": 2.9064327485380114e-05, |
|
"loss": 0.152, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 73.0, |
|
"learning_rate": 2.8859649122807016e-05, |
|
"loss": 0.1923, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 73.33, |
|
"learning_rate": 2.8654970760233914e-05, |
|
"loss": 0.1501, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 73.33, |
|
"eval_accuracy_dropoff": 0.4116180761366508, |
|
"eval_accuracy_undropoff": 0.9922568926667327, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.35267485864168063, |
|
"eval_iou_undropoff": 0.965926381442375, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.21790172159671783, |
|
"eval_mean_accuracy": 0.7019374844016917, |
|
"eval_mean_iou": 0.6593006200420278, |
|
"eval_overall_accuracy": 0.9665473937988281, |
|
"eval_runtime": 1.7336, |
|
"eval_samples_per_second": 8.653, |
|
"eval_steps_per_second": 0.577, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 73.67, |
|
"learning_rate": 2.8450292397660816e-05, |
|
"loss": 0.1449, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 74.0, |
|
"learning_rate": 2.8245614035087714e-05, |
|
"loss": 0.1505, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 74.33, |
|
"learning_rate": 2.804093567251462e-05, |
|
"loss": 0.1507, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 74.67, |
|
"learning_rate": 2.7836257309941518e-05, |
|
"loss": 0.1443, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 75.0, |
|
"learning_rate": 2.763157894736842e-05, |
|
"loss": 0.1347, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 75.33, |
|
"learning_rate": 2.7426900584795318e-05, |
|
"loss": 0.1519, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 75.67, |
|
"learning_rate": 2.722222222222222e-05, |
|
"loss": 0.1389, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 76.0, |
|
"learning_rate": 2.7017543859649118e-05, |
|
"loss": 0.1469, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 76.33, |
|
"learning_rate": 2.6812865497076023e-05, |
|
"loss": 0.1439, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 76.67, |
|
"learning_rate": 2.660818713450292e-05, |
|
"loss": 0.1419, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 76.67, |
|
"eval_accuracy_dropoff": 0.4198543432811818, |
|
"eval_accuracy_undropoff": 0.9915812766827069, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3552924479698269, |
|
"eval_iou_undropoff": 0.965627380761118, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.20552903413772583, |
|
"eval_mean_accuracy": 0.7057178099819443, |
|
"eval_mean_iou": 0.6604599143654725, |
|
"eval_overall_accuracy": 0.966266377766927, |
|
"eval_runtime": 1.8087, |
|
"eval_samples_per_second": 8.293, |
|
"eval_steps_per_second": 0.553, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 77.0, |
|
"learning_rate": 2.6403508771929823e-05, |
|
"loss": 0.1345, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 77.33, |
|
"learning_rate": 2.619883040935672e-05, |
|
"loss": 0.1407, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 77.67, |
|
"learning_rate": 2.5994152046783623e-05, |
|
"loss": 0.1371, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 78.0, |
|
"learning_rate": 2.578947368421052e-05, |
|
"loss": 0.1822, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 78.33, |
|
"learning_rate": 2.5584795321637427e-05, |
|
"loss": 0.1341, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 78.67, |
|
"learning_rate": 2.5380116959064325e-05, |
|
"loss": 0.1475, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 79.0, |
|
"learning_rate": 2.5175438596491227e-05, |
|
"loss": 0.1383, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 79.33, |
|
"learning_rate": 2.4970760233918125e-05, |
|
"loss": 0.1365, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 79.67, |
|
"learning_rate": 2.4766081871345027e-05, |
|
"loss": 0.1363, |
|
"step": 239 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"learning_rate": 2.4561403508771925e-05, |
|
"loss": 0.2049, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"eval_accuracy_dropoff": 0.40911388333677945, |
|
"eval_accuracy_undropoff": 0.9917459896776308, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3472482900837042, |
|
"eval_iou_undropoff": 0.9653200135718596, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.20602351427078247, |
|
"eval_mean_accuracy": 0.7004299365072051, |
|
"eval_mean_iou": 0.6562841518277819, |
|
"eval_overall_accuracy": 0.9659482320149739, |
|
"eval_runtime": 1.7223, |
|
"eval_samples_per_second": 8.709, |
|
"eval_steps_per_second": 0.581, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 80.33, |
|
"learning_rate": 2.435672514619883e-05, |
|
"loss": 0.137, |
|
"step": 241 |
|
}, |
|
{ |
|
"epoch": 80.67, |
|
"learning_rate": 2.415204678362573e-05, |
|
"loss": 0.1319, |
|
"step": 242 |
|
}, |
|
{ |
|
"epoch": 81.0, |
|
"learning_rate": 2.394736842105263e-05, |
|
"loss": 0.1365, |
|
"step": 243 |
|
}, |
|
{ |
|
"epoch": 81.33, |
|
"learning_rate": 2.374269005847953e-05, |
|
"loss": 0.133, |
|
"step": 244 |
|
}, |
|
{ |
|
"epoch": 81.67, |
|
"learning_rate": 2.353801169590643e-05, |
|
"loss": 0.1286, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 82.0, |
|
"learning_rate": 2.333333333333333e-05, |
|
"loss": 0.1856, |
|
"step": 246 |
|
}, |
|
{ |
|
"epoch": 82.33, |
|
"learning_rate": 2.3128654970760234e-05, |
|
"loss": 0.1287, |
|
"step": 247 |
|
}, |
|
{ |
|
"epoch": 82.67, |
|
"learning_rate": 2.2923976608187133e-05, |
|
"loss": 0.1333, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 83.0, |
|
"learning_rate": 2.2719298245614034e-05, |
|
"loss": 0.1353, |
|
"step": 249 |
|
}, |
|
{ |
|
"epoch": 83.33, |
|
"learning_rate": 2.2514619883040933e-05, |
|
"loss": 0.1339, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 83.33, |
|
"eval_accuracy_dropoff": 0.39157304661474485, |
|
"eval_accuracy_undropoff": 0.9925711512240917, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3374615148545237, |
|
"eval_iou_undropoff": 0.9653595874102998, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.20059725642204285, |
|
"eval_mean_accuracy": 0.6920720989194182, |
|
"eval_mean_iou": 0.6514105511324118, |
|
"eval_overall_accuracy": 0.9659601847330729, |
|
"eval_runtime": 1.7746, |
|
"eval_samples_per_second": 8.453, |
|
"eval_steps_per_second": 0.564, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 83.67, |
|
"learning_rate": 2.2309941520467834e-05, |
|
"loss": 0.1327, |
|
"step": 251 |
|
}, |
|
{ |
|
"epoch": 84.0, |
|
"learning_rate": 2.2105263157894733e-05, |
|
"loss": 0.1214, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 84.33, |
|
"learning_rate": 2.1900584795321638e-05, |
|
"loss": 0.1276, |
|
"step": 253 |
|
}, |
|
{ |
|
"epoch": 84.67, |
|
"learning_rate": 2.1695906432748536e-05, |
|
"loss": 0.1326, |
|
"step": 254 |
|
}, |
|
{ |
|
"epoch": 85.0, |
|
"learning_rate": 2.1491228070175438e-05, |
|
"loss": 0.1294, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 85.33, |
|
"learning_rate": 2.1286549707602336e-05, |
|
"loss": 0.1316, |
|
"step": 256 |
|
}, |
|
{ |
|
"epoch": 85.67, |
|
"learning_rate": 2.1081871345029238e-05, |
|
"loss": 0.1321, |
|
"step": 257 |
|
}, |
|
{ |
|
"epoch": 86.0, |
|
"learning_rate": 2.0877192982456137e-05, |
|
"loss": 0.1203, |
|
"step": 258 |
|
}, |
|
{ |
|
"epoch": 86.33, |
|
"learning_rate": 2.0672514619883038e-05, |
|
"loss": 0.1231, |
|
"step": 259 |
|
}, |
|
{ |
|
"epoch": 86.67, |
|
"learning_rate": 2.046783625730994e-05, |
|
"loss": 0.1262, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 86.67, |
|
"eval_accuracy_dropoff": 0.41579364532359225, |
|
"eval_accuracy_undropoff": 0.9908175831521224, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.34701556927560684, |
|
"eval_iou_undropoff": 0.9647069369731386, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.19629599153995514, |
|
"eval_mean_accuracy": 0.7033056142378573, |
|
"eval_mean_iou": 0.6558612531243727, |
|
"eval_overall_accuracy": 0.9653566996256511, |
|
"eval_runtime": 1.7129, |
|
"eval_samples_per_second": 8.757, |
|
"eval_steps_per_second": 0.584, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 87.0, |
|
"learning_rate": 2.0263157894736842e-05, |
|
"loss": 0.1769, |
|
"step": 261 |
|
}, |
|
{ |
|
"epoch": 87.33, |
|
"learning_rate": 2.005847953216374e-05, |
|
"loss": 0.1265, |
|
"step": 262 |
|
}, |
|
{ |
|
"epoch": 87.67, |
|
"learning_rate": 1.9853801169590642e-05, |
|
"loss": 0.1283, |
|
"step": 263 |
|
}, |
|
{ |
|
"epoch": 88.0, |
|
"learning_rate": 1.964912280701754e-05, |
|
"loss": 0.1253, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 88.33, |
|
"learning_rate": 1.9444444444444442e-05, |
|
"loss": 0.1248, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 88.67, |
|
"learning_rate": 1.9239766081871344e-05, |
|
"loss": 0.1278, |
|
"step": 266 |
|
}, |
|
{ |
|
"epoch": 89.0, |
|
"learning_rate": 1.9035087719298245e-05, |
|
"loss": 0.114, |
|
"step": 267 |
|
}, |
|
{ |
|
"epoch": 89.33, |
|
"learning_rate": 1.8830409356725144e-05, |
|
"loss": 0.1258, |
|
"step": 268 |
|
}, |
|
{ |
|
"epoch": 89.67, |
|
"learning_rate": 1.8625730994152046e-05, |
|
"loss": 0.126, |
|
"step": 269 |
|
}, |
|
{ |
|
"epoch": 90.0, |
|
"learning_rate": 1.8421052631578944e-05, |
|
"loss": 0.179, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 90.0, |
|
"eval_accuracy_dropoff": 0.403180784340754, |
|
"eval_accuracy_undropoff": 0.9921025573887748, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.34446260065656786, |
|
"eval_iou_undropoff": 0.9654087830463419, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.1906900405883789, |
|
"eval_mean_accuracy": 0.6976416708647644, |
|
"eval_mean_iou": 0.6549356918514548, |
|
"eval_overall_accuracy": 0.9660263061523438, |
|
"eval_runtime": 1.6554, |
|
"eval_samples_per_second": 9.061, |
|
"eval_steps_per_second": 0.604, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 90.33, |
|
"learning_rate": 1.8216374269005846e-05, |
|
"loss": 0.1217, |
|
"step": 271 |
|
}, |
|
{ |
|
"epoch": 90.67, |
|
"learning_rate": 1.8011695906432747e-05, |
|
"loss": 0.1248, |
|
"step": 272 |
|
}, |
|
{ |
|
"epoch": 91.0, |
|
"learning_rate": 1.780701754385965e-05, |
|
"loss": 0.1784, |
|
"step": 273 |
|
}, |
|
{ |
|
"epoch": 91.33, |
|
"learning_rate": 1.7602339181286548e-05, |
|
"loss": 0.124, |
|
"step": 274 |
|
}, |
|
{ |
|
"epoch": 91.67, |
|
"learning_rate": 1.739766081871345e-05, |
|
"loss": 0.1208, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 92.0, |
|
"learning_rate": 1.7192982456140348e-05, |
|
"loss": 0.1274, |
|
"step": 276 |
|
}, |
|
{ |
|
"epoch": 92.33, |
|
"learning_rate": 1.698830409356725e-05, |
|
"loss": 0.122, |
|
"step": 277 |
|
}, |
|
{ |
|
"epoch": 92.67, |
|
"learning_rate": 1.678362573099415e-05, |
|
"loss": 0.1178, |
|
"step": 278 |
|
}, |
|
{ |
|
"epoch": 93.0, |
|
"learning_rate": 1.657894736842105e-05, |
|
"loss": 0.1181, |
|
"step": 279 |
|
}, |
|
{ |
|
"epoch": 93.33, |
|
"learning_rate": 1.637426900584795e-05, |
|
"loss": 0.1216, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 93.33, |
|
"eval_accuracy_dropoff": 0.40678199738093596, |
|
"eval_accuracy_undropoff": 0.9919910634552156, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3468262440868535, |
|
"eval_iou_undropoff": 0.9654570335241481, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.19010943174362183, |
|
"eval_mean_accuracy": 0.6993865304180757, |
|
"eval_mean_iou": 0.6561416388055008, |
|
"eval_overall_accuracy": 0.9660792032877604, |
|
"eval_runtime": 1.6449, |
|
"eval_samples_per_second": 9.119, |
|
"eval_steps_per_second": 0.608, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 93.67, |
|
"learning_rate": 1.6169590643274853e-05, |
|
"loss": 0.1182, |
|
"step": 281 |
|
}, |
|
{ |
|
"epoch": 94.0, |
|
"learning_rate": 1.596491228070175e-05, |
|
"loss": 0.1625, |
|
"step": 282 |
|
}, |
|
{ |
|
"epoch": 94.33, |
|
"learning_rate": 1.5760233918128653e-05, |
|
"loss": 0.1229, |
|
"step": 283 |
|
}, |
|
{ |
|
"epoch": 94.67, |
|
"learning_rate": 1.5555555555555555e-05, |
|
"loss": 0.119, |
|
"step": 284 |
|
}, |
|
{ |
|
"epoch": 95.0, |
|
"learning_rate": 1.5350877192982453e-05, |
|
"loss": 0.1223, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 95.33, |
|
"learning_rate": 1.5146198830409355e-05, |
|
"loss": 0.1198, |
|
"step": 286 |
|
}, |
|
{ |
|
"epoch": 95.67, |
|
"learning_rate": 1.4941520467836255e-05, |
|
"loss": 0.1189, |
|
"step": 287 |
|
}, |
|
{ |
|
"epoch": 96.0, |
|
"learning_rate": 1.4736842105263155e-05, |
|
"loss": 0.1105, |
|
"step": 288 |
|
}, |
|
{ |
|
"epoch": 96.33, |
|
"learning_rate": 1.4532163742690057e-05, |
|
"loss": 0.1179, |
|
"step": 289 |
|
}, |
|
{ |
|
"epoch": 96.67, |
|
"learning_rate": 1.4327485380116957e-05, |
|
"loss": 0.1144, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 96.67, |
|
"eval_accuracy_dropoff": 0.41187653640269256, |
|
"eval_accuracy_undropoff": 0.9914586067462611, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3477622001192976, |
|
"eval_iou_undropoff": 0.9651605292544987, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.19165104627609253, |
|
"eval_mean_accuracy": 0.7016675715744768, |
|
"eval_mean_iou": 0.6564613646868982, |
|
"eval_overall_accuracy": 0.9657958984375, |
|
"eval_runtime": 1.7446, |
|
"eval_samples_per_second": 8.598, |
|
"eval_steps_per_second": 0.573, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 97.0, |
|
"learning_rate": 1.4122807017543857e-05, |
|
"loss": 0.1237, |
|
"step": 291 |
|
}, |
|
{ |
|
"epoch": 97.33, |
|
"learning_rate": 1.3918128654970759e-05, |
|
"loss": 0.1151, |
|
"step": 292 |
|
}, |
|
{ |
|
"epoch": 97.67, |
|
"learning_rate": 1.3713450292397659e-05, |
|
"loss": 0.1274, |
|
"step": 293 |
|
}, |
|
{ |
|
"epoch": 98.0, |
|
"learning_rate": 1.3508771929824559e-05, |
|
"loss": 0.1225, |
|
"step": 294 |
|
}, |
|
{ |
|
"epoch": 98.33, |
|
"learning_rate": 1.330409356725146e-05, |
|
"loss": 0.1193, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 98.67, |
|
"learning_rate": 1.309941520467836e-05, |
|
"loss": 0.1099, |
|
"step": 296 |
|
}, |
|
{ |
|
"epoch": 99.0, |
|
"learning_rate": 1.289473684210526e-05, |
|
"loss": 0.1671, |
|
"step": 297 |
|
}, |
|
{ |
|
"epoch": 99.33, |
|
"learning_rate": 1.2690058479532163e-05, |
|
"loss": 0.1211, |
|
"step": 298 |
|
}, |
|
{ |
|
"epoch": 99.67, |
|
"learning_rate": 1.2485380116959063e-05, |
|
"loss": 0.1126, |
|
"step": 299 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"learning_rate": 1.2280701754385963e-05, |
|
"loss": 0.1095, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 100.0, |
|
"eval_accuracy_dropoff": 0.4308704941760287, |
|
"eval_accuracy_undropoff": 0.9907225871275863, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.35898417498911345, |
|
"eval_iou_undropoff": 0.9652709182083522, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.19000361859798431, |
|
"eval_mean_accuracy": 0.7107965406518075, |
|
"eval_mean_iou": 0.6621275465987329, |
|
"eval_overall_accuracy": 0.9659334818522135, |
|
"eval_runtime": 1.7433, |
|
"eval_samples_per_second": 8.604, |
|
"eval_steps_per_second": 0.574, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 100.33, |
|
"learning_rate": 1.2076023391812864e-05, |
|
"loss": 0.1215, |
|
"step": 301 |
|
}, |
|
{ |
|
"epoch": 100.67, |
|
"learning_rate": 1.1871345029239764e-05, |
|
"loss": 0.1158, |
|
"step": 302 |
|
}, |
|
{ |
|
"epoch": 101.0, |
|
"learning_rate": 1.1666666666666665e-05, |
|
"loss": 0.1064, |
|
"step": 303 |
|
}, |
|
{ |
|
"epoch": 101.33, |
|
"learning_rate": 1.1461988304093566e-05, |
|
"loss": 0.1166, |
|
"step": 304 |
|
}, |
|
{ |
|
"epoch": 101.67, |
|
"learning_rate": 1.1257309941520466e-05, |
|
"loss": 0.116, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 102.0, |
|
"learning_rate": 1.1052631578947366e-05, |
|
"loss": 0.1121, |
|
"step": 306 |
|
}, |
|
{ |
|
"epoch": 102.33, |
|
"learning_rate": 1.0847953216374268e-05, |
|
"loss": 0.1195, |
|
"step": 307 |
|
}, |
|
{ |
|
"epoch": 102.67, |
|
"learning_rate": 1.0643274853801168e-05, |
|
"loss": 0.1171, |
|
"step": 308 |
|
}, |
|
{ |
|
"epoch": 103.0, |
|
"learning_rate": 1.0438596491228068e-05, |
|
"loss": 0.1115, |
|
"step": 309 |
|
}, |
|
{ |
|
"epoch": 103.33, |
|
"learning_rate": 1.023391812865497e-05, |
|
"loss": 0.1144, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 103.33, |
|
"eval_accuracy_dropoff": 0.4058285661773152, |
|
"eval_accuracy_undropoff": 0.9930000968586917, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.35256020038520264, |
|
"eval_iou_undropoff": 0.9663975313233037, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.18475459516048431, |
|
"eval_mean_accuracy": 0.6994143315180035, |
|
"eval_mean_iou": 0.6594788658542532, |
|
"eval_overall_accuracy": 0.9670013427734375, |
|
"eval_runtime": 1.7272, |
|
"eval_samples_per_second": 8.685, |
|
"eval_steps_per_second": 0.579, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 103.67, |
|
"learning_rate": 1.002923976608187e-05, |
|
"loss": 0.111, |
|
"step": 311 |
|
}, |
|
{ |
|
"epoch": 104.0, |
|
"learning_rate": 9.82456140350877e-06, |
|
"loss": 0.1465, |
|
"step": 312 |
|
}, |
|
{ |
|
"epoch": 104.33, |
|
"learning_rate": 9.619883040935672e-06, |
|
"loss": 0.1109, |
|
"step": 313 |
|
}, |
|
{ |
|
"epoch": 104.67, |
|
"learning_rate": 9.415204678362572e-06, |
|
"loss": 0.114, |
|
"step": 314 |
|
}, |
|
{ |
|
"epoch": 105.0, |
|
"learning_rate": 9.210526315789472e-06, |
|
"loss": 0.1056, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 105.33, |
|
"learning_rate": 9.005847953216374e-06, |
|
"loss": 0.1147, |
|
"step": 316 |
|
}, |
|
{ |
|
"epoch": 105.67, |
|
"learning_rate": 8.801169590643274e-06, |
|
"loss": 0.11, |
|
"step": 317 |
|
}, |
|
{ |
|
"epoch": 106.0, |
|
"learning_rate": 8.596491228070174e-06, |
|
"loss": 0.14, |
|
"step": 318 |
|
}, |
|
{ |
|
"epoch": 106.33, |
|
"learning_rate": 8.391812865497076e-06, |
|
"loss": 0.1075, |
|
"step": 319 |
|
}, |
|
{ |
|
"epoch": 106.67, |
|
"learning_rate": 8.187134502923976e-06, |
|
"loss": 0.1144, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 106.67, |
|
"eval_accuracy_dropoff": 0.4099754175569187, |
|
"eval_accuracy_undropoff": 0.9922393303764823, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.35115337501783317, |
|
"eval_iou_undropoff": 0.9658377322717171, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.18488384783267975, |
|
"eval_mean_accuracy": 0.7011073739667004, |
|
"eval_mean_iou": 0.6584955536447752, |
|
"eval_overall_accuracy": 0.9664578755696615, |
|
"eval_runtime": 1.7037, |
|
"eval_samples_per_second": 8.804, |
|
"eval_steps_per_second": 0.587, |
|
"step": 320 |
|
} |
|
], |
|
"max_steps": 360, |
|
"num_train_epochs": 120, |
|
"total_flos": 6.373605557403648e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|