gonzmg88 commited on
Commit
515adb7
1 Parent(s): 8624a86

Upload 4 files

Browse files
models/hyperstarcop_mag1c_only/config.yaml ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ experiment_name: f5b_hyper_unetsempos1all_all_15ep_JustMag1c_R1
2
+ seed: None
3
+ resume_from_checkpoint: false
4
+ wandb:
5
+ wandb_project: starcop-aviris-seg-vitek
6
+ wandb_entity: dtacs
7
+ images_logging: wandb
8
+ dataloader:
9
+ batch_size: 32
10
+ num_workers: 4
11
+ products_plot:
12
+ - mag1c
13
+ - label
14
+ - pred
15
+ - differences
16
+ plot_samples: 8
17
+ dataset:
18
+ input_products:
19
+ - mag1c
20
+ output_products:
21
+ - labelbinary
22
+ use_weight_loss: true
23
+ weight_loss: weight_mag1c
24
+ training_size:
25
+ - 128
26
+ - 128
27
+ training_size_overlap:
28
+ - 64
29
+ - 64
30
+ weight_sampling: true
31
+ root_folder: /home/previtus/Permian/dataset
32
+ train_csv: train.csv
33
+ model:
34
+ train: true
35
+ test: true
36
+ model_mode: segmentation_output
37
+ model_type: unet_semseg
38
+ semseg_backbone: mobilenet_v2
39
+ num_classes: 1
40
+ optimizer: adam
41
+ lr: 0.0001
42
+ lr_decay: 0.5
43
+ lr_patience: 4
44
+ loss: BCEWithLogitsLoss
45
+ pos_weight: 1
46
+ early_stopping_patience: 8
47
+ training:
48
+ accelerator: gpu
49
+ devices: 1
50
+ max_epochs: 15
51
+ val_check_interval: 0.5
52
+ train_log_every_n_steps: 10
models/hyperstarcop_mag1c_only/final_checkpoint_model.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d4391d5b05f90c411fd459db5bbe4e88650e5ff30ec2eb10d36c66ed0a43137
3
+ size 79987359
models/hyperstarcop_mag1c_rgb/config.yaml ADDED
@@ -0,0 +1,344 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ wandb_version: 1
2
+
3
+ _content:
4
+ desc: null
5
+ value:
6
+ dataloader: '{''batch_size'': 32, ''num_workers'': 4}'
7
+ dataset: '{''input_products'': [''mag1c'', ''TOA_AVIRIS_640nm'', ''TOA_AVIRIS_550nm'',
8
+ ''TOA_AVIRIS_460nm''], ''output_products'': [''labelbinary''], ''use_weight_loss'':
9
+ True, ''weight_loss'': ''weight_mag1c'', ''training_size'': [128, 128], ''training_size_overlap'':
10
+ [64, 64], ''weight_sampling'': True, ''root_folder'': ''/Permian/dataset'',
11
+ ''train_csv'': ''train.csv''}'
12
+ experiment_name: f4_hyper_unetsempos1all_all_15ep_R3
13
+ experiment_path: gs://starcop/experiments/f4_hyper_unetsempos1all_all_15ep_R3/2022-11-10_19-03/
14
+ model: '{''train'': True, ''test'': True, ''model_mode'': ''segmentation_output'',
15
+ ''model_type'': ''unet_semseg'', ''semseg_backbone'': ''mobilenet_v2'', ''num_classes'':
16
+ 1, ''optimizer'': ''adam'', ''lr'': 0.0001, ''lr_decay'': 0.5, ''lr_patience'':
17
+ 4, ''loss'': ''BCEWithLogitsLoss'', ''pos_weight'': 1, ''early_stopping_patience'':
18
+ 8}'
19
+ plot_samples: '8'
20
+ products_plot:
21
+ - rgb_aviris
22
+ - mag1c
23
+ - label
24
+ - pred
25
+ - differences
26
+ resume_from_checkpoint: 'False'
27
+ seed: None
28
+ training: '{''accelerator'': ''gpu'', ''devices'': 1, ''max_epochs'': 15, ''val_check_interval'':
29
+ 0.5, ''train_log_every_n_steps'': 10}'
30
+ wandb: '{''wandb_project'': ''starcop-aviris-seg-vitek'', ''wandb_entity'': ''dtacs'',
31
+ ''images_logging'': ''wandb''}'
32
+ _flags_cache:
33
+ desc: null
34
+ value:
35
+ allow_objects: null
36
+ convert: null
37
+ readonly: null
38
+ struct: false
39
+ _metadata:
40
+ desc: null
41
+ value: 'ContainerMetadata(ref_type=typing.Any, object_type=<class ''dict''>, optional=True,
42
+ key=None, flags={''struct'': False}, flags_root=False, resolver_cache=defaultdict(<class
43
+ ''dict''>, {''now'': {(''%Y-%m-%d'',): ''2022-11-10'', (''%H-%M-%S'',): ''19-03-19'',
44
+ (''%Y-%m-%d_%H-%M'',): ''2022-11-10_19-03''}}), key_type=typing.Any, element_type=typing.Any)'
45
+ _parent:
46
+ desc: null
47
+ value: null
48
+ _wandb:
49
+ desc: null
50
+ value:
51
+ cli_version: 0.13.3
52
+ framework: lightning
53
+ is_jupyter_run: false
54
+ is_kaggle_kernel: false
55
+ m:
56
+ - 1: trainer/global_step
57
+ 6:
58
+ - 3
59
+ - 1: val_batch._type
60
+ 5: 1
61
+ 6:
62
+ - 1
63
+ - 1: val_batch.sha256
64
+ 5: 1
65
+ 6:
66
+ - 1
67
+ - 1: val_batch.size
68
+ 5: 1
69
+ 6:
70
+ - 1
71
+ - 1: val_batch.path
72
+ 5: 1
73
+ 6:
74
+ - 1
75
+ - 1: val_batch.format
76
+ 5: 1
77
+ 6:
78
+ - 1
79
+ - 1: val_batch.width
80
+ 5: 1
81
+ 6:
82
+ - 1
83
+ - 1: val_batch.height
84
+ 5: 1
85
+ 6:
86
+ - 1
87
+ - 1: train_BCEWithLogitsLoss
88
+ 5: 1
89
+ 6:
90
+ - 1
91
+ - 1: epoch
92
+ 5: 1
93
+ 6:
94
+ - 1
95
+ - 1: val_loss
96
+ 5: 1
97
+ 6:
98
+ - 1
99
+ - 1: val_precision
100
+ 5: 1
101
+ 6:
102
+ - 1
103
+ - 1: val_recall
104
+ 5: 1
105
+ 6:
106
+ - 1
107
+ - 1: val_f1score
108
+ 5: 1
109
+ 6:
110
+ - 1
111
+ - 1: val_iou
112
+ 5: 1
113
+ 6:
114
+ - 1
115
+ - 1: val_accuracy
116
+ 5: 1
117
+ 6:
118
+ - 1
119
+ - 1: val_cohen_kappa
120
+ 5: 1
121
+ 6:
122
+ - 1
123
+ - 1: val_balanced_accuracy
124
+ 5: 1
125
+ 6:
126
+ - 1
127
+ - 1: val_classification_precision
128
+ 5: 1
129
+ 6:
130
+ - 1
131
+ - 1: val_classification_recall
132
+ 5: 1
133
+ 6:
134
+ - 1
135
+ - 1: val_classification_f1score
136
+ 5: 1
137
+ 6:
138
+ - 1
139
+ - 1: val_classification_iou
140
+ 5: 1
141
+ 6:
142
+ - 1
143
+ - 1: val_classification_accuracy
144
+ 5: 1
145
+ 6:
146
+ - 1
147
+ - 1: val_classification_cohen_kappa
148
+ 5: 1
149
+ 6:
150
+ - 1
151
+ - 1: val_classification_balanced_accuracy
152
+ 5: 1
153
+ 6:
154
+ - 1
155
+ - 1: train_batch._type
156
+ 5: 1
157
+ 6:
158
+ - 1
159
+ - 1: train_batch.sha256
160
+ 5: 1
161
+ 6:
162
+ - 1
163
+ - 1: train_batch.size
164
+ 5: 1
165
+ 6:
166
+ - 1
167
+ - 1: train_batch.path
168
+ 5: 1
169
+ 6:
170
+ - 1
171
+ - 1: train_batch.format
172
+ 5: 1
173
+ 6:
174
+ - 1
175
+ - 1: train_batch.width
176
+ 5: 1
177
+ 6:
178
+ - 1
179
+ - 1: train_batch.height
180
+ 5: 1
181
+ 6:
182
+ - 1
183
+ python_version: 3.10.6
184
+ start_time: 1668107000.728245
185
+ t:
186
+ 1:
187
+ - 1
188
+ - 5
189
+ - 9
190
+ - 41
191
+ - 50
192
+ - 53
193
+ - 55
194
+ - 79
195
+ 2:
196
+ - 1
197
+ - 5
198
+ - 9
199
+ - 41
200
+ - 50
201
+ - 53
202
+ - 55
203
+ - 63
204
+ - 74
205
+ - 79
206
+ 3:
207
+ - 7
208
+ - 13
209
+ - 23
210
+ 4: 3.10.6
211
+ 5: 0.13.3
212
+ 8:
213
+ - 5
214
+ settings/dataloader/batch_size:
215
+ desc: null
216
+ value: 32
217
+ settings/dataloader/num_workers:
218
+ desc: null
219
+ value: 4
220
+ settings/dataset/input_products:
221
+ desc: null
222
+ value:
223
+ - mag1c
224
+ - TOA_AVIRIS_640nm
225
+ - TOA_AVIRIS_550nm
226
+ - TOA_AVIRIS_460nm
227
+ settings/dataset/output_products:
228
+ desc: null
229
+ value:
230
+ - labelbinary
231
+ settings/dataset/root_folder:
232
+ desc: null
233
+ value: /Permian/dataset
234
+ settings/dataset/train_csv:
235
+ desc: null
236
+ value: train.csv
237
+ settings/dataset/training_size:
238
+ desc: null
239
+ value:
240
+ - 128
241
+ - 128
242
+ settings/dataset/training_size_overlap:
243
+ desc: null
244
+ value:
245
+ - 64
246
+ - 64
247
+ settings/dataset/use_weight_loss:
248
+ desc: null
249
+ value: true
250
+ settings/dataset/weight_loss:
251
+ desc: null
252
+ value: weight_mag1c
253
+ settings/dataset/weight_sampling:
254
+ desc: null
255
+ value: true
256
+ settings/experiment_name:
257
+ desc: null
258
+ value: f4_hyper_unetsempos1all_all_15ep_R3
259
+ settings/experiment_path:
260
+ desc: null
261
+ value: gs://starcop/experiments/f4_hyper_unetsempos1all_all_15ep_R3/2022-11-10_19-03/
262
+ settings/model/early_stopping_patience:
263
+ desc: null
264
+ value: 8
265
+ settings/model/loss:
266
+ desc: null
267
+ value: BCEWithLogitsLoss
268
+ settings/model/lr:
269
+ desc: null
270
+ value: 0.0001
271
+ settings/model/lr_decay:
272
+ desc: null
273
+ value: 0.5
274
+ settings/model/lr_patience:
275
+ desc: null
276
+ value: 4
277
+ settings/model/model_mode:
278
+ desc: null
279
+ value: segmentation_output
280
+ settings/model/model_type:
281
+ desc: null
282
+ value: unet_semseg
283
+ settings/model/num_classes:
284
+ desc: null
285
+ value: 1
286
+ settings/model/optimizer:
287
+ desc: null
288
+ value: adam
289
+ settings/model/pos_weight:
290
+ desc: null
291
+ value: 1
292
+ settings/model/semseg_backbone:
293
+ desc: null
294
+ value: mobilenet_v2
295
+ settings/model/test:
296
+ desc: null
297
+ value: false
298
+ settings/model/train:
299
+ desc: null
300
+ value: true
301
+ settings/plot_samples:
302
+ desc: null
303
+ value: 8
304
+ settings/products_plot:
305
+ desc: null
306
+ value:
307
+ - rgb_aviris
308
+ - mag1c
309
+ - label
310
+ - pred
311
+ - differences
312
+ settings/resume_from_checkpoint:
313
+ desc: null
314
+ value: false
315
+ settings/seed:
316
+ desc: null
317
+ value: None
318
+ settings/training/accelerator:
319
+ desc: null
320
+ value: gpu
321
+ settings/training/devices:
322
+ desc: null
323
+ value: 1
324
+ settings/training/max_epochs:
325
+ desc: null
326
+ value: 15
327
+ settings/training/train_log_every_n_steps:
328
+ desc: null
329
+ value: 10
330
+ settings/training/val_check_interval:
331
+ desc: null
332
+ value: 0.5
333
+ settings/wandb/images_logging:
334
+ desc: null
335
+ value: wandb
336
+ settings/wandb/wandb_entity:
337
+ desc: null
338
+ value: dtacs
339
+ settings/wandb/wandb_project:
340
+ desc: null
341
+ value: starcop-aviris-seg-vitek
342
+ settings/wandb_logger_version:
343
+ desc: null
344
+ value: 1envh3p6
models/hyperstarcop_mag1c_rgb/final_checkpoint_model.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96e274be943f64e028faded3bac3d1ee325ee7a79d6de2ee7f5deeaea1ef188d
3
+ size 79998303