|
{ |
|
"best_metric": 0.41156673431396484, |
|
"best_model_checkpoint": "/robodata/smodak/Projects/nspl/scripts/terrainseg/training/models/dropoff-utcustom-train-SF-RGB-b0_3/checkpoint-240", |
|
"epoch": 80.0, |
|
"global_step": 240, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 1.111111111111111e-06, |
|
"loss": 1.1153, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 2.222222222222222e-06, |
|
"loss": 1.1199, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 3.3333333333333333e-06, |
|
"loss": 1.1156, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"learning_rate": 4.444444444444444e-06, |
|
"loss": 1.1182, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 1.67, |
|
"learning_rate": 5.555555555555557e-06, |
|
"loss": 1.1161, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 6.666666666666667e-06, |
|
"loss": 1.1106, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"learning_rate": 7.77777777777778e-06, |
|
"loss": 1.1141, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 2.67, |
|
"learning_rate": 8.888888888888888e-06, |
|
"loss": 1.1085, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 1e-05, |
|
"loss": 1.0992, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"learning_rate": 1.1111111111111113e-05, |
|
"loss": 1.1015, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"eval_accuracy_dropoff": 0.5975256737197602, |
|
"eval_accuracy_undropoff": 0.31695782815139334, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.04272275097162494, |
|
"eval_iou_undropoff": 0.3123742688218148, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 1.0990227460861206, |
|
"eval_mean_accuracy": 0.45724175093557673, |
|
"eval_mean_iou": 0.1183656732644799, |
|
"eval_overall_accuracy": 0.32938079833984374, |
|
"eval_runtime": 1.6795, |
|
"eval_samples_per_second": 8.931, |
|
"eval_steps_per_second": 0.595, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 3.67, |
|
"learning_rate": 1.2222222222222224e-05, |
|
"loss": 1.102, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 1.3333333333333333e-05, |
|
"loss": 1.0912, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 4.33, |
|
"learning_rate": 1.4444444444444446e-05, |
|
"loss": 1.0939, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 4.67, |
|
"learning_rate": 1.555555555555556e-05, |
|
"loss": 1.0846, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 1.0804, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 5.33, |
|
"learning_rate": 1.7777777777777777e-05, |
|
"loss": 1.0734, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 5.67, |
|
"learning_rate": 1.888888888888889e-05, |
|
"loss": 1.0642, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 2e-05, |
|
"loss": 1.0945, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 6.33, |
|
"learning_rate": 1.994152046783626e-05, |
|
"loss": 1.0588, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 6.67, |
|
"learning_rate": 1.9883040935672515e-05, |
|
"loss": 1.0478, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 6.67, |
|
"eval_accuracy_dropoff": 0.8648482551060261, |
|
"eval_accuracy_undropoff": 0.5515245664509166, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.08793587759511783, |
|
"eval_iou_undropoff": 0.5482206998703943, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 1.0755951404571533, |
|
"eval_mean_accuracy": 0.7081864107784714, |
|
"eval_mean_iou": 0.21205219248850404, |
|
"eval_overall_accuracy": 0.5653978983561198, |
|
"eval_runtime": 1.8074, |
|
"eval_samples_per_second": 8.299, |
|
"eval_steps_per_second": 0.553, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 1.9824561403508773e-05, |
|
"loss": 1.0748, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 7.33, |
|
"learning_rate": 1.976608187134503e-05, |
|
"loss": 1.0343, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 7.67, |
|
"learning_rate": 1.970760233918129e-05, |
|
"loss": 1.0348, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 1.9649122807017544e-05, |
|
"loss": 1.0366, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 8.33, |
|
"learning_rate": 1.9590643274853802e-05, |
|
"loss": 1.0351, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 8.67, |
|
"learning_rate": 1.953216374269006e-05, |
|
"loss": 1.0085, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"learning_rate": 1.9473684210526318e-05, |
|
"loss": 1.0048, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 9.33, |
|
"learning_rate": 1.9415204678362573e-05, |
|
"loss": 1.0001, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 9.67, |
|
"learning_rate": 1.935672514619883e-05, |
|
"loss": 0.9877, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 1.929824561403509e-05, |
|
"loss": 1.0451, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_accuracy_dropoff": 0.8842327750591586, |
|
"eval_accuracy_undropoff": 0.7264455095352592, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.1312558881078339, |
|
"eval_iou_undropoff": 0.7226098464796189, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 1.026917576789856, |
|
"eval_mean_accuracy": 0.8053391422972089, |
|
"eval_mean_iou": 0.2846219115291509, |
|
"eval_overall_accuracy": 0.7334320068359375, |
|
"eval_runtime": 1.7831, |
|
"eval_samples_per_second": 8.412, |
|
"eval_steps_per_second": 0.561, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 10.33, |
|
"learning_rate": 1.9239766081871347e-05, |
|
"loss": 0.9774, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 10.67, |
|
"learning_rate": 1.9181286549707602e-05, |
|
"loss": 0.9797, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"learning_rate": 1.912280701754386e-05, |
|
"loss": 0.9325, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 11.33, |
|
"learning_rate": 1.9064327485380118e-05, |
|
"loss": 0.9775, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 11.67, |
|
"learning_rate": 1.9005847953216376e-05, |
|
"loss": 0.9393, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"learning_rate": 1.894736842105263e-05, |
|
"loss": 0.9431, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 12.33, |
|
"learning_rate": 1.888888888888889e-05, |
|
"loss": 0.9369, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 12.67, |
|
"learning_rate": 1.8830409356725147e-05, |
|
"loss": 0.9234, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"learning_rate": 1.8771929824561405e-05, |
|
"loss": 1.0184, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 13.33, |
|
"learning_rate": 1.871345029239766e-05, |
|
"loss": 0.9095, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 13.33, |
|
"eval_accuracy_dropoff": 0.7349346382704988, |
|
"eval_accuracy_undropoff": 0.8459984055569215, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.17233702408257653, |
|
"eval_iou_undropoff": 0.835792668708584, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.947559118270874, |
|
"eval_mean_accuracy": 0.7904665219137101, |
|
"eval_mean_iou": 0.33604323093038685, |
|
"eval_overall_accuracy": 0.8410807291666667, |
|
"eval_runtime": 1.7664, |
|
"eval_samples_per_second": 8.492, |
|
"eval_steps_per_second": 0.566, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 13.67, |
|
"learning_rate": 1.8654970760233918e-05, |
|
"loss": 0.9068, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"learning_rate": 1.8596491228070176e-05, |
|
"loss": 0.9478, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 14.33, |
|
"learning_rate": 1.8538011695906434e-05, |
|
"loss": 0.9028, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 14.67, |
|
"learning_rate": 1.847953216374269e-05, |
|
"loss": 0.8937, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"learning_rate": 1.8421052631578947e-05, |
|
"loss": 0.7988, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 15.33, |
|
"learning_rate": 1.8362573099415205e-05, |
|
"loss": 0.8873, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 15.67, |
|
"learning_rate": 1.8304093567251464e-05, |
|
"loss": 0.8282, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"learning_rate": 1.824561403508772e-05, |
|
"loss": 0.868, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 16.33, |
|
"learning_rate": 1.8187134502923976e-05, |
|
"loss": 0.8684, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 16.67, |
|
"learning_rate": 1.8128654970760235e-05, |
|
"loss": 0.8091, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 16.67, |
|
"eval_accuracy_dropoff": 0.5974624945436166, |
|
"eval_accuracy_undropoff": 0.9315254285997107, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2429297387681515, |
|
"eval_iou_undropoff": 0.914512280023459, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.8424645662307739, |
|
"eval_mean_accuracy": 0.7644939615716636, |
|
"eval_mean_iou": 0.3858140062638702, |
|
"eval_overall_accuracy": 0.9167338053385417, |
|
"eval_runtime": 1.7428, |
|
"eval_samples_per_second": 8.607, |
|
"eval_steps_per_second": 0.574, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"learning_rate": 1.8070175438596493e-05, |
|
"loss": 0.7826, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 17.33, |
|
"learning_rate": 1.8011695906432747e-05, |
|
"loss": 0.8106, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 17.67, |
|
"learning_rate": 1.7953216374269006e-05, |
|
"loss": 0.7961, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"learning_rate": 1.7894736842105264e-05, |
|
"loss": 0.7195, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 18.33, |
|
"learning_rate": 1.7836257309941522e-05, |
|
"loss": 0.7777, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 18.67, |
|
"learning_rate": 1.7777777777777777e-05, |
|
"loss": 0.7961, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"learning_rate": 1.7719298245614035e-05, |
|
"loss": 0.9039, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 19.33, |
|
"learning_rate": 1.7660818713450293e-05, |
|
"loss": 0.7125, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 19.67, |
|
"learning_rate": 1.760233918128655e-05, |
|
"loss": 0.7704, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"learning_rate": 1.754385964912281e-05, |
|
"loss": 0.8094, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_accuracy_dropoff": 0.5280687848921359, |
|
"eval_accuracy_undropoff": 0.9608414146478016, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2866027007818053, |
|
"eval_iou_undropoff": 0.9402828328280236, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.7488501071929932, |
|
"eval_mean_accuracy": 0.7444550997699688, |
|
"eval_mean_iou": 0.40896184453660966, |
|
"eval_overall_accuracy": 0.9416791280110677, |
|
"eval_runtime": 1.8293, |
|
"eval_samples_per_second": 8.2, |
|
"eval_steps_per_second": 0.547, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 20.33, |
|
"learning_rate": 1.7485380116959064e-05, |
|
"loss": 0.7233, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 20.67, |
|
"learning_rate": 1.7426900584795322e-05, |
|
"loss": 0.7181, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 21.0, |
|
"learning_rate": 1.736842105263158e-05, |
|
"loss": 0.9374, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 21.33, |
|
"learning_rate": 1.7309941520467838e-05, |
|
"loss": 0.7476, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 21.67, |
|
"learning_rate": 1.7251461988304093e-05, |
|
"loss": 0.7718, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 22.0, |
|
"learning_rate": 1.719298245614035e-05, |
|
"loss": 0.7803, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 22.33, |
|
"learning_rate": 1.713450292397661e-05, |
|
"loss": 0.761, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 22.67, |
|
"learning_rate": 1.7076023391812867e-05, |
|
"loss": 0.6901, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 23.0, |
|
"learning_rate": 1.7017543859649125e-05, |
|
"loss": 0.9088, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 23.33, |
|
"learning_rate": 1.695906432748538e-05, |
|
"loss": 0.6945, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 23.33, |
|
"eval_accuracy_dropoff": 0.5298435453856227, |
|
"eval_accuracy_undropoff": 0.9645688777057901, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.30036630036630035, |
|
"eval_iou_undropoff": 0.9440065001523473, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.7005425095558167, |
|
"eval_mean_accuracy": 0.7472062115457064, |
|
"eval_mean_iou": 0.4147909335062159, |
|
"eval_overall_accuracy": 0.9453201293945312, |
|
"eval_runtime": 1.7223, |
|
"eval_samples_per_second": 8.71, |
|
"eval_steps_per_second": 0.581, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 23.67, |
|
"learning_rate": 1.690058479532164e-05, |
|
"loss": 0.6711, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"learning_rate": 1.6842105263157896e-05, |
|
"loss": 0.7554, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 24.33, |
|
"learning_rate": 1.6783625730994155e-05, |
|
"loss": 0.6708, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 24.67, |
|
"learning_rate": 1.672514619883041e-05, |
|
"loss": 0.6926, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 25.0, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 0.9247, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 25.33, |
|
"learning_rate": 1.6608187134502926e-05, |
|
"loss": 0.6425, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 25.67, |
|
"learning_rate": 1.6549707602339184e-05, |
|
"loss": 0.6567, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 26.0, |
|
"learning_rate": 1.649122807017544e-05, |
|
"loss": 0.8971, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 26.33, |
|
"learning_rate": 1.6432748538011697e-05, |
|
"loss": 0.6993, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 26.67, |
|
"learning_rate": 1.6374269005847955e-05, |
|
"loss": 0.6337, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 26.67, |
|
"eval_accuracy_dropoff": 0.49581294369012335, |
|
"eval_accuracy_undropoff": 0.9709405298276873, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.3046961862238145, |
|
"eval_iou_undropoff": 0.9487783537255239, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.6331071257591248, |
|
"eval_mean_accuracy": 0.7333767367589054, |
|
"eval_mean_iou": 0.6267372699746692, |
|
"eval_overall_accuracy": 0.9499028523763021, |
|
"eval_runtime": 1.7079, |
|
"eval_samples_per_second": 8.783, |
|
"eval_steps_per_second": 0.586, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 27.0, |
|
"learning_rate": 1.6315789473684213e-05, |
|
"loss": 0.7322, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 27.33, |
|
"learning_rate": 1.625730994152047e-05, |
|
"loss": 0.6814, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 27.67, |
|
"learning_rate": 1.6198830409356726e-05, |
|
"loss": 0.6695, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"learning_rate": 1.6140350877192984e-05, |
|
"loss": 0.6004, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 28.33, |
|
"learning_rate": 1.6081871345029242e-05, |
|
"loss": 0.647, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 28.67, |
|
"learning_rate": 1.60233918128655e-05, |
|
"loss": 0.5803, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 29.0, |
|
"learning_rate": 1.5964912280701755e-05, |
|
"loss": 0.8716, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 29.33, |
|
"learning_rate": 1.5906432748538013e-05, |
|
"loss": 0.6239, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 29.67, |
|
"learning_rate": 1.584795321637427e-05, |
|
"loss": 0.597, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"learning_rate": 1.578947368421053e-05, |
|
"loss": 0.603, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 30.0, |
|
"eval_accuracy_dropoff": 0.40565625933328736, |
|
"eval_accuracy_undropoff": 0.9813906247172738, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.28940801416143874, |
|
"eval_iou_undropoff": 0.9550916579222961, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.5726452469825745, |
|
"eval_mean_accuracy": 0.6935234420252806, |
|
"eval_mean_iou": 0.6222498360418675, |
|
"eval_overall_accuracy": 0.9558982849121094, |
|
"eval_runtime": 1.7573, |
|
"eval_samples_per_second": 8.536, |
|
"eval_steps_per_second": 0.569, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 30.33, |
|
"learning_rate": 1.5730994152046787e-05, |
|
"loss": 0.5913, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 30.67, |
|
"learning_rate": 1.5672514619883042e-05, |
|
"loss": 0.6683, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 31.0, |
|
"learning_rate": 1.56140350877193e-05, |
|
"loss": 0.8263, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 31.33, |
|
"learning_rate": 1.555555555555556e-05, |
|
"loss": 0.5791, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 31.67, |
|
"learning_rate": 1.5497076023391816e-05, |
|
"loss": 0.6118, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"learning_rate": 1.543859649122807e-05, |
|
"loss": 0.6546, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 32.33, |
|
"learning_rate": 1.538011695906433e-05, |
|
"loss": 0.6008, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 32.67, |
|
"learning_rate": 1.5321637426900587e-05, |
|
"loss": 0.5931, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 33.0, |
|
"learning_rate": 1.5263157894736846e-05, |
|
"loss": 0.8761, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 33.33, |
|
"learning_rate": 1.52046783625731e-05, |
|
"loss": 0.5903, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 33.33, |
|
"eval_accuracy_dropoff": 0.4546373515289361, |
|
"eval_accuracy_undropoff": 0.9756530777115378, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2980214980892679, |
|
"eval_iou_undropoff": 0.9516094455131633, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.5840517282485962, |
|
"eval_mean_accuracy": 0.715145214620237, |
|
"eval_mean_iou": 0.6248154718012155, |
|
"eval_overall_accuracy": 0.9525835673014323, |
|
"eval_runtime": 1.6835, |
|
"eval_samples_per_second": 8.91, |
|
"eval_steps_per_second": 0.594, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 33.67, |
|
"learning_rate": 1.5146198830409358e-05, |
|
"loss": 0.5691, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 34.0, |
|
"learning_rate": 1.5087719298245615e-05, |
|
"loss": 0.8078, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 34.33, |
|
"learning_rate": 1.5029239766081873e-05, |
|
"loss": 0.5583, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 34.67, |
|
"learning_rate": 1.497076023391813e-05, |
|
"loss": 0.5857, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 35.0, |
|
"learning_rate": 1.4912280701754388e-05, |
|
"loss": 0.5148, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 35.33, |
|
"learning_rate": 1.4853801169590644e-05, |
|
"loss": 0.5866, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 35.67, |
|
"learning_rate": 1.4795321637426902e-05, |
|
"loss": 0.5276, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 36.0, |
|
"learning_rate": 1.4736842105263159e-05, |
|
"loss": 0.8399, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 36.33, |
|
"learning_rate": 1.4678362573099417e-05, |
|
"loss": 0.5761, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 36.67, |
|
"learning_rate": 1.4619883040935675e-05, |
|
"loss": 0.5514, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 36.67, |
|
"eval_accuracy_dropoff": 0.3781273692191054, |
|
"eval_accuracy_undropoff": 0.9853985522286546, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.28751293775466086, |
|
"eval_iou_undropoff": 0.9578033403813699, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.5156698822975159, |
|
"eval_mean_accuracy": 0.68176296072388, |
|
"eval_mean_iou": 0.6226581390680154, |
|
"eval_overall_accuracy": 0.9585098266601563, |
|
"eval_runtime": 1.8267, |
|
"eval_samples_per_second": 8.212, |
|
"eval_steps_per_second": 0.547, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 37.0, |
|
"learning_rate": 1.4561403508771931e-05, |
|
"loss": 0.6281, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 37.33, |
|
"learning_rate": 1.4502923976608188e-05, |
|
"loss": 0.5795, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 37.67, |
|
"learning_rate": 1.4444444444444446e-05, |
|
"loss": 0.5427, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 38.0, |
|
"learning_rate": 1.4385964912280704e-05, |
|
"loss": 0.8123, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 38.33, |
|
"learning_rate": 1.432748538011696e-05, |
|
"loss": 0.5282, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 38.67, |
|
"learning_rate": 1.4269005847953217e-05, |
|
"loss": 0.6357, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 39.0, |
|
"learning_rate": 1.4210526315789475e-05, |
|
"loss": 0.5648, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 39.33, |
|
"learning_rate": 1.4152046783625733e-05, |
|
"loss": 0.5177, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 39.67, |
|
"learning_rate": 1.409356725146199e-05, |
|
"loss": 0.5163, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"learning_rate": 1.4035087719298246e-05, |
|
"loss": 0.6464, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 40.0, |
|
"eval_accuracy_dropoff": 0.3941002136604866, |
|
"eval_accuracy_undropoff": 0.983633542058492, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.29122208357737656, |
|
"eval_iou_undropoff": 0.9567759549263578, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.5141220092773438, |
|
"eval_mean_accuracy": 0.6888668778594893, |
|
"eval_mean_iou": 0.6239990192518672, |
|
"eval_overall_accuracy": 0.9575302124023437, |
|
"eval_runtime": 1.7894, |
|
"eval_samples_per_second": 8.383, |
|
"eval_steps_per_second": 0.559, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 40.33, |
|
"learning_rate": 1.3976608187134504e-05, |
|
"loss": 0.5223, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 40.67, |
|
"learning_rate": 1.3918128654970762e-05, |
|
"loss": 0.5158, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 41.0, |
|
"learning_rate": 1.385964912280702e-05, |
|
"loss": 0.7741, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 41.33, |
|
"learning_rate": 1.3801169590643275e-05, |
|
"loss": 0.5087, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 41.67, |
|
"learning_rate": 1.3742690058479533e-05, |
|
"loss": 0.5203, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 42.0, |
|
"learning_rate": 1.3684210526315791e-05, |
|
"loss": 0.5061, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 42.33, |
|
"learning_rate": 1.362573099415205e-05, |
|
"loss": 0.4992, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 42.67, |
|
"learning_rate": 1.3567251461988304e-05, |
|
"loss": 0.5293, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 43.0, |
|
"learning_rate": 1.3508771929824562e-05, |
|
"loss": 0.7778, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 43.33, |
|
"learning_rate": 1.345029239766082e-05, |
|
"loss": 0.5198, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 43.33, |
|
"eval_accuracy_dropoff": 0.3656810715188274, |
|
"eval_accuracy_undropoff": 0.9866212601635103, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.28375204453179664, |
|
"eval_iou_undropoff": 0.9584546122881992, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.48896968364715576, |
|
"eval_mean_accuracy": 0.6761511658411689, |
|
"eval_mean_iou": 0.41406888560666527, |
|
"eval_overall_accuracy": 0.9591272989908854, |
|
"eval_runtime": 1.7404, |
|
"eval_samples_per_second": 8.619, |
|
"eval_steps_per_second": 0.575, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 43.67, |
|
"learning_rate": 1.3391812865497079e-05, |
|
"loss": 0.4739, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 44.0, |
|
"learning_rate": 1.3333333333333333e-05, |
|
"loss": 0.7511, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 44.33, |
|
"learning_rate": 1.3274853801169591e-05, |
|
"loss": 0.4826, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 44.67, |
|
"learning_rate": 1.321637426900585e-05, |
|
"loss": 0.5534, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 45.0, |
|
"learning_rate": 1.3157894736842108e-05, |
|
"loss": 0.5636, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 45.33, |
|
"learning_rate": 1.3099415204678362e-05, |
|
"loss": 0.5102, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 45.67, |
|
"learning_rate": 1.304093567251462e-05, |
|
"loss": 0.485, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 46.0, |
|
"learning_rate": 1.2982456140350879e-05, |
|
"loss": 0.486, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 46.33, |
|
"learning_rate": 1.2923976608187137e-05, |
|
"loss": 0.4934, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 46.67, |
|
"learning_rate": 1.2865497076023392e-05, |
|
"loss": 0.5077, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 46.67, |
|
"eval_accuracy_dropoff": 0.3572322925999954, |
|
"eval_accuracy_undropoff": 0.986647337503579, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.27731853040841803, |
|
"eval_iou_undropoff": 0.95811562022686, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.48549026250839233, |
|
"eval_mean_accuracy": 0.6719398150517872, |
|
"eval_mean_iou": 0.4118113835450927, |
|
"eval_overall_accuracy": 0.9587781270345052, |
|
"eval_runtime": 1.7229, |
|
"eval_samples_per_second": 8.706, |
|
"eval_steps_per_second": 0.58, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 47.0, |
|
"learning_rate": 1.280701754385965e-05, |
|
"loss": 0.647, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 47.33, |
|
"learning_rate": 1.2748538011695908e-05, |
|
"loss": 0.4891, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 47.67, |
|
"learning_rate": 1.2690058479532166e-05, |
|
"loss": 0.4703, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 48.0, |
|
"learning_rate": 1.263157894736842e-05, |
|
"loss": 0.7596, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 48.33, |
|
"learning_rate": 1.2573099415204679e-05, |
|
"loss": 0.4825, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 48.67, |
|
"learning_rate": 1.2514619883040937e-05, |
|
"loss": 0.4702, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 49.0, |
|
"learning_rate": 1.2456140350877195e-05, |
|
"loss": 0.6684, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 49.33, |
|
"learning_rate": 1.239766081871345e-05, |
|
"loss": 0.4681, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 49.67, |
|
"learning_rate": 1.2339181286549708e-05, |
|
"loss": 0.497, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"learning_rate": 1.2280701754385966e-05, |
|
"loss": 0.4817, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 50.0, |
|
"eval_accuracy_dropoff": 0.36020745744020954, |
|
"eval_accuracy_undropoff": 0.9863945469620963, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.27843880695086975, |
|
"eval_iou_undropoff": 0.9579983692751803, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.47096526622772217, |
|
"eval_mean_accuracy": 0.673301002201153, |
|
"eval_mean_iou": 0.6182185881130251, |
|
"eval_overall_accuracy": 0.9586682637532552, |
|
"eval_runtime": 1.7512, |
|
"eval_samples_per_second": 8.565, |
|
"eval_steps_per_second": 0.571, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 50.33, |
|
"learning_rate": 1.2222222222222224e-05, |
|
"loss": 0.4865, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 50.67, |
|
"learning_rate": 1.216374269005848e-05, |
|
"loss": 0.4611, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 51.0, |
|
"learning_rate": 1.2105263157894737e-05, |
|
"loss": 0.5011, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 51.33, |
|
"learning_rate": 1.2046783625730995e-05, |
|
"loss": 0.4967, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 51.67, |
|
"learning_rate": 1.1988304093567253e-05, |
|
"loss": 0.4445, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 52.0, |
|
"learning_rate": 1.192982456140351e-05, |
|
"loss": 0.7343, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 52.33, |
|
"learning_rate": 1.1871345029239766e-05, |
|
"loss": 0.4597, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 52.67, |
|
"learning_rate": 1.1812865497076024e-05, |
|
"loss": 0.5019, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 53.0, |
|
"learning_rate": 1.1754385964912282e-05, |
|
"loss": 0.4556, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 53.33, |
|
"learning_rate": 1.1695906432748539e-05, |
|
"loss": 0.4713, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 53.33, |
|
"eval_accuracy_dropoff": 0.3479047487766214, |
|
"eval_accuracy_undropoff": 0.9886552926888718, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.27947052255677257, |
|
"eval_iou_undropoff": 0.9596627946110987, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.46686360239982605, |
|
"eval_mean_accuracy": 0.6682800207327466, |
|
"eval_mean_iou": 0.6195666585839357, |
|
"eval_overall_accuracy": 0.960284169514974, |
|
"eval_runtime": 1.8139, |
|
"eval_samples_per_second": 8.27, |
|
"eval_steps_per_second": 0.551, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 53.67, |
|
"learning_rate": 1.1637426900584797e-05, |
|
"loss": 0.4486, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 54.0, |
|
"learning_rate": 1.1578947368421053e-05, |
|
"loss": 0.7127, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 54.33, |
|
"learning_rate": 1.1520467836257312e-05, |
|
"loss": 0.4508, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 54.67, |
|
"learning_rate": 1.1461988304093568e-05, |
|
"loss": 0.4438, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 55.0, |
|
"learning_rate": 1.1403508771929826e-05, |
|
"loss": 0.464, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 55.33, |
|
"learning_rate": 1.1345029239766083e-05, |
|
"loss": 0.4426, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 55.67, |
|
"learning_rate": 1.128654970760234e-05, |
|
"loss": 0.4411, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 56.0, |
|
"learning_rate": 1.1228070175438597e-05, |
|
"loss": 0.71, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 56.33, |
|
"learning_rate": 1.1169590643274855e-05, |
|
"loss": 0.4387, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 56.67, |
|
"learning_rate": 1.1111111111111113e-05, |
|
"loss": 0.4516, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 56.67, |
|
"eval_accuracy_dropoff": 0.3265214694327659, |
|
"eval_accuracy_undropoff": 0.9906150846236295, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2715210912425493, |
|
"eval_iou_undropoff": 0.9606413422410078, |
|
"eval_iou_unlabeled": 0.0, |
|
"eval_loss": 0.4485587179660797, |
|
"eval_mean_accuracy": 0.6585682770281976, |
|
"eval_mean_iou": 0.41072081116118575, |
|
"eval_overall_accuracy": 0.9612103780110677, |
|
"eval_runtime": 1.7967, |
|
"eval_samples_per_second": 8.349, |
|
"eval_steps_per_second": 0.557, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 57.0, |
|
"learning_rate": 1.105263157894737e-05, |
|
"loss": 0.6996, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 57.33, |
|
"learning_rate": 1.0994152046783626e-05, |
|
"loss": 0.456, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 57.67, |
|
"learning_rate": 1.0935672514619884e-05, |
|
"loss": 0.4341, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 58.0, |
|
"learning_rate": 1.0877192982456142e-05, |
|
"loss": 0.7256, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 58.33, |
|
"learning_rate": 1.0818713450292399e-05, |
|
"loss": 0.4225, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 58.67, |
|
"learning_rate": 1.0760233918128655e-05, |
|
"loss": 0.471, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 59.0, |
|
"learning_rate": 1.0701754385964913e-05, |
|
"loss": 0.4369, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 59.33, |
|
"learning_rate": 1.0643274853801172e-05, |
|
"loss": 0.4481, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 59.67, |
|
"learning_rate": 1.0584795321637428e-05, |
|
"loss": 0.4331, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"learning_rate": 1.0526315789473684e-05, |
|
"loss": 0.4059, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 60.0, |
|
"eval_accuracy_dropoff": 0.31866427734509617, |
|
"eval_accuracy_undropoff": 0.9909349311824317, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.26651615227572956, |
|
"eval_iou_undropoff": 0.9606124109353421, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.4360799789428711, |
|
"eval_mean_accuracy": 0.6547996042637639, |
|
"eval_mean_iou": 0.6135642816055358, |
|
"eval_overall_accuracy": 0.9611681620279948, |
|
"eval_runtime": 1.686, |
|
"eval_samples_per_second": 8.897, |
|
"eval_steps_per_second": 0.593, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 60.33, |
|
"learning_rate": 1.0467836257309943e-05, |
|
"loss": 0.4479, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 60.67, |
|
"learning_rate": 1.04093567251462e-05, |
|
"loss": 0.4304, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 61.0, |
|
"learning_rate": 1.0350877192982459e-05, |
|
"loss": 0.4971, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 61.33, |
|
"learning_rate": 1.0292397660818714e-05, |
|
"loss": 0.4302, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 61.67, |
|
"learning_rate": 1.0233918128654972e-05, |
|
"loss": 0.4735, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 62.0, |
|
"learning_rate": 1.017543859649123e-05, |
|
"loss": 0.3974, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 62.33, |
|
"learning_rate": 1.0116959064327488e-05, |
|
"loss": 0.4172, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 62.67, |
|
"learning_rate": 1.0058479532163743e-05, |
|
"loss": 0.4458, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 63.0, |
|
"learning_rate": 1e-05, |
|
"loss": 0.6849, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 63.33, |
|
"learning_rate": 9.941520467836257e-06, |
|
"loss": 0.4142, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 63.33, |
|
"eval_accuracy_dropoff": 0.3088657614813794, |
|
"eval_accuracy_undropoff": 0.9917302900545283, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2620841577885431, |
|
"eval_iou_undropoff": 0.9609605443916848, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.42670688033103943, |
|
"eval_mean_accuracy": 0.6502980257679538, |
|
"eval_mean_iou": 0.611522351090114, |
|
"eval_overall_accuracy": 0.9614944458007812, |
|
"eval_runtime": 1.7659, |
|
"eval_samples_per_second": 8.494, |
|
"eval_steps_per_second": 0.566, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 63.67, |
|
"learning_rate": 9.883040935672515e-06, |
|
"loss": 0.4708, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 64.0, |
|
"learning_rate": 9.824561403508772e-06, |
|
"loss": 0.6601, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 64.33, |
|
"learning_rate": 9.76608187134503e-06, |
|
"loss": 0.4291, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 64.67, |
|
"learning_rate": 9.707602339181286e-06, |
|
"loss": 0.4169, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 65.0, |
|
"learning_rate": 9.649122807017545e-06, |
|
"loss": 0.4025, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 65.33, |
|
"learning_rate": 9.590643274853801e-06, |
|
"loss": 0.415, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 65.67, |
|
"learning_rate": 9.532163742690059e-06, |
|
"loss": 0.4125, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 66.0, |
|
"learning_rate": 9.473684210526315e-06, |
|
"loss": 0.4011, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 66.33, |
|
"learning_rate": 9.415204678362574e-06, |
|
"loss": 0.3921, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 66.67, |
|
"learning_rate": 9.35672514619883e-06, |
|
"loss": 0.4393, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 66.67, |
|
"eval_accuracy_dropoff": 0.2767822271233947, |
|
"eval_accuracy_undropoff": 0.9940290874101795, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.24518308597943494, |
|
"eval_iou_undropoff": 0.9618027430695911, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.4188476800918579, |
|
"eval_mean_accuracy": 0.6354056572667871, |
|
"eval_mean_iou": 0.603492914524513, |
|
"eval_overall_accuracy": 0.9622708638509114, |
|
"eval_runtime": 1.8501, |
|
"eval_samples_per_second": 8.108, |
|
"eval_steps_per_second": 0.541, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 67.0, |
|
"learning_rate": 9.298245614035088e-06, |
|
"loss": 0.669, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 67.33, |
|
"learning_rate": 9.239766081871345e-06, |
|
"loss": 0.4048, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 67.67, |
|
"learning_rate": 9.181286549707603e-06, |
|
"loss": 0.4138, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 68.0, |
|
"learning_rate": 9.12280701754386e-06, |
|
"loss": 0.3792, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 68.33, |
|
"learning_rate": 9.064327485380117e-06, |
|
"loss": 0.4212, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 68.67, |
|
"learning_rate": 9.005847953216374e-06, |
|
"loss": 0.4216, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 69.0, |
|
"learning_rate": 8.947368421052632e-06, |
|
"loss": 0.6528, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 69.33, |
|
"learning_rate": 8.888888888888888e-06, |
|
"loss": 0.4081, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 69.67, |
|
"learning_rate": 8.830409356725146e-06, |
|
"loss": 0.4353, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 70.0, |
|
"learning_rate": 8.771929824561405e-06, |
|
"loss": 0.4071, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 70.0, |
|
"eval_accuracy_dropoff": 0.31381096790497853, |
|
"eval_accuracy_undropoff": 0.9917361441512784, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2663089040962352, |
|
"eval_iou_undropoff": 0.9611795984188876, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.422432005405426, |
|
"eval_mean_accuracy": 0.6527735560281285, |
|
"eval_mean_iou": 0.6137442512575614, |
|
"eval_overall_accuracy": 0.961719004313151, |
|
"eval_runtime": 1.7566, |
|
"eval_samples_per_second": 8.539, |
|
"eval_steps_per_second": 0.569, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 70.33, |
|
"learning_rate": 8.713450292397661e-06, |
|
"loss": 0.3994, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 70.67, |
|
"learning_rate": 8.654970760233919e-06, |
|
"loss": 0.4181, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 71.0, |
|
"learning_rate": 8.596491228070176e-06, |
|
"loss": 0.6651, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 71.33, |
|
"learning_rate": 8.538011695906434e-06, |
|
"loss": 0.4053, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 71.67, |
|
"learning_rate": 8.47953216374269e-06, |
|
"loss": 0.4051, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 72.0, |
|
"learning_rate": 8.421052631578948e-06, |
|
"loss": 0.6544, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 72.33, |
|
"learning_rate": 8.362573099415205e-06, |
|
"loss": 0.3965, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 72.67, |
|
"learning_rate": 8.304093567251463e-06, |
|
"loss": 0.4129, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 73.0, |
|
"learning_rate": 8.24561403508772e-06, |
|
"loss": 0.629, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 73.33, |
|
"learning_rate": 8.187134502923977e-06, |
|
"loss": 0.4009, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 73.33, |
|
"eval_accuracy_dropoff": 0.3167344406919843, |
|
"eval_accuracy_undropoff": 0.991247327072643, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.2664045101231395, |
|
"eval_iou_undropoff": 0.9608319701708583, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.42051607370376587, |
|
"eval_mean_accuracy": 0.6539908838823136, |
|
"eval_mean_iou": 0.613618240146999, |
|
"eval_overall_accuracy": 0.9613812764485677, |
|
"eval_runtime": 1.867, |
|
"eval_samples_per_second": 8.034, |
|
"eval_steps_per_second": 0.536, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 73.67, |
|
"learning_rate": 8.128654970760235e-06, |
|
"loss": 0.3928, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 74.0, |
|
"learning_rate": 8.070175438596492e-06, |
|
"loss": 0.4003, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 74.33, |
|
"learning_rate": 8.01169590643275e-06, |
|
"loss": 0.4068, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 74.67, |
|
"learning_rate": 7.953216374269006e-06, |
|
"loss": 0.3951, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 75.0, |
|
"learning_rate": 7.894736842105265e-06, |
|
"loss": 0.3757, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 75.33, |
|
"learning_rate": 7.836257309941521e-06, |
|
"loss": 0.4151, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 75.67, |
|
"learning_rate": 7.77777777777778e-06, |
|
"loss": 0.3908, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 76.0, |
|
"learning_rate": 7.719298245614036e-06, |
|
"loss": 0.3951, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 76.33, |
|
"learning_rate": 7.660818713450294e-06, |
|
"loss": 0.3945, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 76.67, |
|
"learning_rate": 7.60233918128655e-06, |
|
"loss": 0.4043, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 76.67, |
|
"eval_accuracy_dropoff": 0.3107553932042181, |
|
"eval_accuracy_undropoff": 0.9920240592732618, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.265114022794759, |
|
"eval_iou_undropoff": 0.961326747557732, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.4147787392139435, |
|
"eval_mean_accuracy": 0.6513897262387399, |
|
"eval_mean_iou": 0.6132203851762454, |
|
"eval_overall_accuracy": 0.9618588765462239, |
|
"eval_runtime": 1.7618, |
|
"eval_samples_per_second": 8.514, |
|
"eval_steps_per_second": 0.568, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 77.0, |
|
"learning_rate": 7.5438596491228074e-06, |
|
"loss": 0.3996, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 77.33, |
|
"learning_rate": 7.485380116959065e-06, |
|
"loss": 0.3915, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 77.67, |
|
"learning_rate": 7.426900584795322e-06, |
|
"loss": 0.3897, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 78.0, |
|
"learning_rate": 7.368421052631579e-06, |
|
"loss": 0.6263, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 78.33, |
|
"learning_rate": 7.309941520467837e-06, |
|
"loss": 0.3799, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 78.67, |
|
"learning_rate": 7.251461988304094e-06, |
|
"loss": 0.426, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 79.0, |
|
"learning_rate": 7.192982456140352e-06, |
|
"loss": 0.3634, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 79.33, |
|
"learning_rate": 7.134502923976608e-06, |
|
"loss": 0.3855, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 79.67, |
|
"learning_rate": 7.0760233918128665e-06, |
|
"loss": 0.3971, |
|
"step": 239 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"learning_rate": 7.017543859649123e-06, |
|
"loss": 0.6302, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 80.0, |
|
"eval_accuracy_dropoff": 0.3104911893767087, |
|
"eval_accuracy_undropoff": 0.9920998964357066, |
|
"eval_accuracy_unlabeled": NaN, |
|
"eval_iou_dropoff": 0.26525905680652806, |
|
"eval_iou_undropoff": 0.9613888343805383, |
|
"eval_iou_unlabeled": NaN, |
|
"eval_loss": 0.41156673431396484, |
|
"eval_mean_accuracy": 0.6512955429062077, |
|
"eval_mean_iou": 0.6133239455935332, |
|
"eval_overall_accuracy": 0.961919657389323, |
|
"eval_runtime": 1.7273, |
|
"eval_samples_per_second": 8.684, |
|
"eval_steps_per_second": 0.579, |
|
"step": 240 |
|
} |
|
], |
|
"max_steps": 360, |
|
"num_train_epochs": 120, |
|
"total_flos": 4.76793375031296e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|