MexicanVanGogh commited on
Commit
5c493ea
1 Parent(s): 8e9fd14

End of training

Browse files
Files changed (4) hide show
  1. README.md +93 -0
  2. config.json +96 -0
  3. pytorch_model.bin +3 -0
  4. training_args.bin +3 -0
README.md ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: other
3
+ base_model: nvidia/mit-b0
4
+ tags:
5
+ - vision
6
+ - image-segmentation
7
+ - generated_from_trainer
8
+ model-index:
9
+ - name: segformer-b0-finetuned-segments-greenhouse-oct-23
10
+ results: []
11
+ ---
12
+
13
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
+ should probably proofread and complete it, then remove this comment. -->
15
+
16
+ # segformer-b0-finetuned-segments-greenhouse-oct-23
17
+
18
+ This model is a fine-tuned version of [nvidia/mit-b0](https://huggingface.co/nvidia/mit-b0) on the MexicanVanGogh/greenhouse dataset.
19
+ It achieves the following results on the evaluation set:
20
+ - Loss: 0.7058
21
+ - Mean Iou: 0.2227
22
+ - Mean Accuracy: 0.2804
23
+ - Overall Accuracy: 0.9101
24
+ - Accuracy Unlabeled: nan
25
+ - Accuracy Object: nan
26
+ - Accuracy Road: 0.9378
27
+ - Accuracy Plant: 0.9667
28
+ - Accuracy Iron: 0.0
29
+ - Accuracy Wood: 0.0
30
+ - Accuracy Wall: 0.1932
31
+ - Accuracy Raw Road: nan
32
+ - Accuracy Bottom Wall: 0.0
33
+ - Accuracy Roof: 0.1457
34
+ - Accuracy Grass: 0.0
35
+ - Iou Unlabeled: nan
36
+ - Iou Object: nan
37
+ - Iou Road: 0.9039
38
+ - Iou Plant: 0.8421
39
+ - Iou Iron: 0.0
40
+ - Iou Wood: 0.0
41
+ - Iou Wall: 0.1521
42
+ - Iou Raw Road: 0.0
43
+ - Iou Bottom Wall: 0.0
44
+ - Iou Roof: 0.1061
45
+ - Iou Grass: 0.0
46
+
47
+ ## Model description
48
+
49
+ More information needed
50
+
51
+ ## Intended uses & limitations
52
+
53
+ More information needed
54
+
55
+ ## Training and evaluation data
56
+
57
+ More information needed
58
+
59
+ ## Training procedure
60
+
61
+ ### Training hyperparameters
62
+
63
+ The following hyperparameters were used during training:
64
+ - learning_rate: 6e-05
65
+ - train_batch_size: 2
66
+ - eval_batch_size: 2
67
+ - seed: 42
68
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
69
+ - lr_scheduler_type: linear
70
+ - num_epochs: 30
71
+
72
+ ### Training results
73
+
74
+ | Training Loss | Epoch | Step | Validation Loss | Mean Iou | Mean Accuracy | Overall Accuracy | Accuracy Unlabeled | Accuracy Object | Accuracy Road | Accuracy Plant | Accuracy Iron | Accuracy Wood | Accuracy Wall | Accuracy Raw Road | Accuracy Bottom Wall | Accuracy Roof | Accuracy Grass | Iou Unlabeled | Iou Object | Iou Road | Iou Plant | Iou Iron | Iou Wood | Iou Wall | Iou Raw Road | Iou Bottom Wall | Iou Roof | Iou Grass |
75
+ |:-------------:|:-----:|:----:|:---------------:|:--------:|:-------------:|:----------------:|:------------------:|:---------------:|:-------------:|:--------------:|:-------------:|:-------------:|:-------------:|:-----------------:|:--------------------:|:-------------:|:--------------:|:-------------:|:----------:|:--------:|:---------:|:--------:|:--------:|:--------:|:------------:|:---------------:|:--------:|:---------:|
76
+ | 1.8756 | 2.86 | 20 | 2.0063 | 0.1415 | 0.2269 | 0.8216 | nan | nan | 0.7882 | 0.9674 | 0.0 | 0.0594 | 0.0 | nan | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.7760 | 0.7552 | 0.0 | 0.0256 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 |
77
+ | 1.3624 | 5.71 | 40 | 1.0910 | 0.1715 | 0.2380 | 0.8991 | nan | nan | 0.9206 | 0.9757 | 0.0 | 0.0077 | 0.0 | nan | 0.0 | 0.0 | 0.0 | 0.0 | nan | 0.8888 | 0.8220 | 0.0 | 0.0045 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 |
78
+ | 1.4095 | 8.57 | 60 | 0.9033 | 0.1734 | 0.2392 | 0.9068 | nan | nan | 0.9264 | 0.9873 | 0.0 | 0.0 | 0.0 | nan | 0.0 | 0.0 | 0.0 | 0.0 | nan | 0.9000 | 0.8338 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 |
79
+ | 0.8802 | 11.43 | 80 | 0.7784 | 0.1764 | 0.2414 | 0.9165 | nan | nan | 0.9470 | 0.9823 | 0.0 | 0.0022 | 0.0 | nan | 0.0 | 0.0 | 0.0 | 0.0 | nan | 0.9155 | 0.8463 | 0.0 | 0.0021 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 |
80
+ | 1.0936 | 14.29 | 100 | 0.8060 | 0.1946 | 0.2405 | 0.9132 | nan | nan | 0.9400 | 0.9839 | 0.0 | 0.0 | 0.0 | nan | 0.0 | 0.0 | 0.0 | nan | nan | 0.9100 | 0.8418 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 |
81
+ | 0.8086 | 17.14 | 120 | 0.7786 | 0.1940 | 0.2402 | 0.9115 | nan | nan | 0.9361 | 0.9852 | 0.0 | 0.0 | 0.0 | nan | 0.0 | 0.0006 | 0.0 | nan | nan | 0.9071 | 0.8380 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | 0.0006 | 0.0 |
82
+ | 1.0669 | 20.0 | 140 | 0.7462 | 0.2072 | 0.2562 | 0.9088 | nan | nan | 0.9282 | 0.9853 | 0.0 | 0.0 | 0.0113 | nan | 0.0 | 0.1246 | 0.0 | nan | nan | 0.9010 | 0.8385 | 0.0 | 0.0 | 0.0102 | 0.0 | 0.0 | 0.1155 | 0.0 |
83
+ | 0.7399 | 22.86 | 160 | 0.7328 | 0.2137 | 0.2662 | 0.9080 | nan | nan | 0.9290 | 0.9788 | 0.0 | 0.0 | 0.0814 | nan | 0.0 | 0.1405 | 0.0 | nan | nan | 0.8997 | 0.8389 | 0.0 | 0.0 | 0.0663 | 0.0 | 0.0 | 0.1181 | 0.0 |
84
+ | 0.808 | 25.71 | 180 | 0.7296 | 0.2218 | 0.2797 | 0.9072 | nan | nan | 0.9277 | 0.9742 | 0.0 | 0.0 | 0.1840 | nan | 0.0 | 0.1515 | 0.0 | nan | nan | 0.8981 | 0.8404 | 0.0 | 0.0 | 0.1423 | 0.0 | 0.0 | 0.1155 | 0.0 |
85
+ | 0.8494 | 28.57 | 200 | 0.7058 | 0.2227 | 0.2804 | 0.9101 | nan | nan | 0.9378 | 0.9667 | 0.0 | 0.0 | 0.1932 | nan | 0.0 | 0.1457 | 0.0 | nan | nan | 0.9039 | 0.8421 | 0.0 | 0.0 | 0.1521 | 0.0 | 0.0 | 0.1061 | 0.0 |
86
+
87
+
88
+ ### Framework versions
89
+
90
+ - Transformers 4.34.1
91
+ - Pytorch 2.1.0+cu118
92
+ - Datasets 2.14.5
93
+ - Tokenizers 0.14.1
config.json ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "nvidia/mit-b0",
3
+ "architectures": [
4
+ "SegformerForSemanticSegmentation"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "classifier_dropout_prob": 0.1,
8
+ "decoder_hidden_size": 256,
9
+ "depths": [
10
+ 2,
11
+ 2,
12
+ 2,
13
+ 2
14
+ ],
15
+ "downsampling_rates": [
16
+ 1,
17
+ 4,
18
+ 8,
19
+ 16
20
+ ],
21
+ "drop_path_rate": 0.1,
22
+ "hidden_act": "gelu",
23
+ "hidden_dropout_prob": 0.0,
24
+ "hidden_sizes": [
25
+ 32,
26
+ 64,
27
+ 160,
28
+ 256
29
+ ],
30
+ "id2label": {
31
+ "0": "unlabeled",
32
+ "1": "object",
33
+ "2": "road",
34
+ "3": "plant",
35
+ "4": "iron",
36
+ "5": "wood",
37
+ "6": "wall",
38
+ "7": "raw_road",
39
+ "8": "bottom_wall",
40
+ "9": "roof",
41
+ "10": "grass"
42
+ },
43
+ "image_size": 224,
44
+ "initializer_range": 0.02,
45
+ "label2id": {
46
+ "bottom_wall": 8,
47
+ "grass": 10,
48
+ "iron": 4,
49
+ "object": 1,
50
+ "plant": 3,
51
+ "raw_road": 7,
52
+ "road": 2,
53
+ "roof": 9,
54
+ "unlabeled": 0,
55
+ "wall": 6,
56
+ "wood": 5
57
+ },
58
+ "layer_norm_eps": 1e-06,
59
+ "mlp_ratios": [
60
+ 4,
61
+ 4,
62
+ 4,
63
+ 4
64
+ ],
65
+ "model_type": "segformer",
66
+ "num_attention_heads": [
67
+ 1,
68
+ 2,
69
+ 5,
70
+ 8
71
+ ],
72
+ "num_channels": 3,
73
+ "num_encoder_blocks": 4,
74
+ "patch_sizes": [
75
+ 7,
76
+ 3,
77
+ 3,
78
+ 3
79
+ ],
80
+ "reshape_last_stage": true,
81
+ "semantic_loss_ignore_index": 255,
82
+ "sr_ratios": [
83
+ 8,
84
+ 4,
85
+ 2,
86
+ 1
87
+ ],
88
+ "strides": [
89
+ 4,
90
+ 2,
91
+ 2,
92
+ 2
93
+ ],
94
+ "torch_dtype": "float32",
95
+ "transformers_version": "4.34.1"
96
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74d1af5a8b23f99550cbec7e4330581d8b1d277774f695511b3fc4d9869f900c
3
+ size 14941450
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe2c0b2f54e769816a18a99e4d3995ef0617d9463c8b2d20f861102d1a7e85b3
3
+ size 4600