aliFrancis commited on
Commit
1919121
·
1 Parent(s): 256952d

Add models

Browse files
full-models/SEnSeIv2-DeepLabv3-S2-unambiguous/config.yaml ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ NAME: SEnSeIv2-DeepLabv3-S2-unambiguous
2
+
3
+ #-------------
4
+ # Model options
5
+ #-------------
6
+ PATCH_SIZE: 512
7
+ SEnSeIv2: 'models/sensei-configs/senseiv2-medium.yaml'
8
+ MODEL_TYPE: 'DeepLabv3+'
9
+ DEEPLAB_CONFIG: 'resnet50'
10
+ RECOVERY_MODULE: false
11
+ CLASSES: 4
12
+ MULTIMODAL: false
13
+ NUM_CHANNELS: null # Set to null for sensor independent models
14
+
15
+ #----------------
16
+ #training options (not needed for inference)
17
+ #----------------
18
+ EPOCHS: 70
19
+ BATCH_SIZE: 8
20
+ PHASES: [0, 1, 2, 45, 60, 65]
21
+ ACCUMULATE_STEPS: [1, 1, 1, 1, 4, 8]
22
+ LR: [0.000005, 0.00002, 0.0001, 0.00001, 0.00001, 0.000002]
23
+ EPSILON: 0.000001
24
+ WEIGHT_DECAY: 0.0001
25
+ L1_REG: 0
26
+ RECOVERY_WARMUP_STEPS: 5000
27
+ RECOVERY_LOSS_FACTOR: 1
28
+ LOSS: 'ambiguous_crossentropy_loss'
29
+
30
+
31
+ #Data options
32
+ TRAIN_DIRS:
33
+ - '/home/ali/data/cloud-masking/processed/S2CS12/train'
34
+ VALID_DIRS:
35
+ - '/home/ali/data/cloud-masking/processed/S2CS12/valid'
36
+ MIN_BANDS: 3
37
+ MAX_BANDS: 13
full-models/SEnSeIv2-DeepLabv3-S2-unambiguous/weights.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96c36be3b6dc2da504dc3f96cdc4006cd8ee29bff48a8a50c783730404388ecd
3
+ size 109219821
full-models/SEnSeIv2-SegFormerB2-S2-ambiguous/config.yaml ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ NAME: SEnSeIv2-SegFormerB2-S2-ambiguous
2
+
3
+ #-------------
4
+ #Model options
5
+ #-------------
6
+ PATCH_SIZE: 512
7
+ SEnSeIv2: 'models/sensei-configs/senseiv2-medium.yaml'
8
+ MODEL_TYPE: 'Segformer'
9
+ SEGFORMER_CONFIG: 'nvidia/mit-b2'
10
+ RECOVERY_MODULE: false
11
+ CLASSES: 7
12
+ MULTIMODAL: false
13
+ NUM_CHANNELS: null # Set to null for sensor independent models
14
+
15
+ #----------------
16
+ #training options (not needed for inference)
17
+ #----------------
18
+ EPOCHS: 105
19
+ BATCH_SIZE: 8
20
+ PHASES: [0, 1, 2, 75, 95, 110]
21
+ ACCUMULATE_STEPS: [1, 1, 1, 1, 1, 1]
22
+ LR: [0.000005, 0.00002, 0.0001, 0.00002, 0.00001, 0.000002]
23
+ EPSILON: 0.000001
24
+ WEIGHT_DECAY: 0.0001
25
+ L1_REG: 0
26
+ RECOVERY_WARMUP_STEPS: 10000
27
+ RECOVERY_LOSS_FACTOR: 1
28
+ LOSS: 'ambiguous_crossentropy_loss'
29
+
30
+ # Data options
31
+ # Note: L7Irish and L8CCA are very large, so repeat other datasets to make up for it
32
+ # Training epochs will max out at 4000 steps, so repeating datasets beyond that just
33
+ # changes relative frequency of each dataset appearing.
34
+ TRAIN_DIRS:
35
+ - '/home/ali/data/cloud-masking/processed/S2CS12/train'
36
+ VALID_DIRS:
37
+ - '/home/ali/data/cloud-masking/processed/S2CS12/valid'
38
+ MIN_BANDS: 3
39
+ MAX_BANDS: 13
full-models/SEnSeIv2-SegFormerB2-S2-ambiguous/weights.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:215580a6f516714705cf26319eba3da824cab6a67a5972cc5ae5816aa5148ceb
3
+ size 111716650
full-models/SEnSeIv2-SegFormerB2-alldata-ambiguous/config.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ NAME: SEnSeIv2-SegFormerB2-alldata-ambiguous
2
+
3
+ #-------------
4
+ # Model options
5
+ #-------------
6
+ PATCH_SIZE: 512
7
+ SEnSeIv2: 'models/sensei-configs/senseiv2-medium.yaml'
8
+ MODEL_TYPE: 'Segformer'
9
+ SEGFORMER_CONFIG: 'nvidia/mit-b2'
10
+ RECOVERY_MODULE: false
11
+ CLASSES: 7
12
+ MULTIMODAL: false
13
+ NUM_CHANNELS: null # Set to null for sensor independent models
14
+
15
+ #----------------
16
+ # Training options (not needed for inference)
17
+ #----------------
18
+ EPOCHS: 105
19
+ BATCH_SIZE: 8
20
+ PHASES: [0, 1, 2, 75, 95, 110]
21
+ ACCUMULATE_STEPS: [1, 1, 1, 1, 1, 1]
22
+ LR: [0.000005, 0.00002, 0.0001, 0.00002, 0.00001, 0.000002]
23
+ EPSILON: 0.000001
24
+ WEIGHT_DECAY: 0.0001
25
+ L1_REG: 0
26
+ RECOVERY_WARMUP_STEPS: 10000
27
+ RECOVERY_LOSS_FACTOR: 1
28
+ LOSS: 'ambiguous_crossentropy_loss'
29
+
30
+ #-------------
31
+ # Data options
32
+ # Note: L7Irish and L8CCA are very large, so repeat other datasets to make up for it
33
+ # Training epochs will max out at 4000 steps, so repeating datasets beyond that just
34
+ # changes relative frequency of each dataset appearing.
35
+ #-------------
36
+ TRAIN_DIRS:
37
+ - '/home/ali/data/cloud-masking/processed/S2CS12/train'
38
+ - '/home/ali/data/cloud-masking/processed/S2CS12/train' # Repeat to boost sampling frequency
39
+ - '/home/ali/data/cloud-masking/processed/S2CS12/train'
40
+ - '/home/ali/data/cloud-masking/processed/S2CS12/train'
41
+ - '/home/ali/data/cloud-masking/processed/S2CS12/train'
42
+ - '/home/ali/data/cloud-masking/processed/S2IRIS'
43
+ - '/home/ali/data/cloud-masking/processed/S2IRIS'
44
+ - '/home/ali/data/cloud-masking/processed/S2IRIS'
45
+ - '/home/ali/data/cloud-masking/processed/S2IRIS'
46
+ - '/home/ali/data/cloud-masking/processed/S2IRIS'
47
+ - '/home/ali/data/cloud-masking/processed/S2KZ/train'
48
+ - '/home/ali/data/cloud-masking/processed/L8SPARCS'
49
+ - '/home/ali/data/cloud-masking/processed/L8SPARCS'
50
+ - '/home/ali/data/cloud-masking/processed/L8SPARCS'
51
+ - '/home/ali/data/cloud-masking/processed/L8SPARCS'
52
+ - '/home/ali/data/cloud-masking/processed/L8SPARCS'
53
+ - '/home/ali/data/cloud-masking/processed/L8SPARCS'
54
+ - '/home/ali/data/cloud-masking/processed/L8SPARCS'
55
+ - '/home/ali/data/cloud-masking/processed/L8SPARCS-60m'
56
+ - '/home/ali/data/cloud-masking/processed/L8SPARCS-60m'
57
+ - '/home/ali/data/cloud-masking/processed/L8SPARCS-60m'
58
+ - '/home/ali/data/cloud-masking/processed/L8SPARCS-60m'
59
+ - '/home/ali/data/cloud-masking/processed/L8SPARCS-60m'
60
+ - '/home/ali/data/cloud-masking/processed/L8CCA'
61
+ - '/home/ali/data/cloud-masking/processed/L7Irish'
62
+ - '/home/ali/data/cloud-masking/processed/CP2'
63
+ VALID_DIRS:
64
+ - '/home/ali/data/cloud-masking/processed/S2CS12/valid'
65
+ MIN_BANDS: 3
66
+ MAX_BANDS: 13
full-models/SEnSeIv2-SegFormerB2-alldata-ambiguous/weights.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f43a0c23eb4b22cf09c1c0d59fc3d2f050f164f7c1a43f18e4974458926c57d9
3
+ size 111716650
full-models/SegFormerB2-S2-unambiguous/config.yaml ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ NAME: SegformerB2
2
+
3
+ #-------------
4
+ # Model options
5
+ #-------------
6
+ PATCH_SIZE: 512
7
+ SEnSeIv2: false
8
+ MODEL_TYPE: 'Segformer'
9
+ SEGFORMER_CONFIG: 'nvidia/mit-b2'
10
+ RECOVERY_MODULE: null
11
+ MULTIMODAL: false
12
+ NUM_CHANNELS: 13 # Set to null for sensor independent models
13
+ CLASSES: 4
14
+
15
+ #----------------
16
+ # Training options (not needed for inference)
17
+ #----------------
18
+ EPOCHS: 60
19
+ BATCH_SIZE: 8
20
+ PHASES: [0, 1, 2, 40, 50, 55]
21
+ ACCUMULATE_STEPS: [1, 1, 1, 1, 4, 8]
22
+ LR: [0.000005, 0.00002, 0.0001, 0.00001, 0.00001, 0.000002]
23
+ EPSILON: 0.000001
24
+ WEIGHT_DECAY: 0.0001
25
+ L1_REG: 0
26
+ RECOVERY_WARMUP_STEPS: 0
27
+ RECOVERY_LOSS_FACTOR: 0
28
+ LOSS: 'ambiguous_crossentropy_loss' # Not actually ambiguous, because labels are not ambiguous
29
+
30
+ #------------
31
+ #Data options
32
+ #------------
33
+ TRAIN_DIRS:
34
+ - '/home/ali/data/cloud-masking/processed/S2CS12/train'
35
+ VALID_DIRS:
36
+ - '/home/ali/data/cloud-masking/processed/S2CS12/valid'
37
+ MIN_BANDS: null
38
+ MAX_BANDS: null
full-models/SegFormerB2-S2-unambiguous/weights.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8378575b89c7dddf237edddb9981fa6c3cccc63490976884bf527eaaf9c191e
3
+ size 109685259
sensei-configs/recovery-module.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ preconcatenation_layer_sizes:
2
+ - 256
3
+ - 32
4
+ postconcatenation_layer_sizes:
5
+ - 32
6
+ sampling_rate: 10
sensei-configs/senseiv2-medium.yaml ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ descriptors:
3
+ style: SEnSeIv2
4
+ N_embeddings: 32
5
+ final_size: 74
6
+ blocks:
7
+ -
8
+ type: GLOBAL_STATS
9
+ percentiles:
10
+ - 0.01
11
+ - 0.1
12
+ - 0.5
13
+ - 0.9
14
+ - 0.99
15
+ -
16
+ type: FCL
17
+ layer_sizes:
18
+ - 128
19
+ - 128
20
+ - 128
21
+ skips: add
22
+ -
23
+ type: ATTENTION
24
+ num_transformerencoders: 2
25
+ intermediate_size: 256
26
+ num_heads: 4
27
+ dims_per_head: 32
28
+ skips: true
29
+ dropout: 0.2
30
+ -
31
+ type: BAND_EMBEDDING
32
+ embedding_dims: 32 # number of output channels
33
+ head_layer_sizes:
34
+ - 128
35
+ skips_heads: false
36
+ normalize: true