cssprad1 commited on
Commit
ab687e7
1 Parent(s): 6186b89

initial commit

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. app.py +105 -0
  2. ckpt_epoch_800.pth +3 -0
  3. data/images/sv-demo-mod09ga-11.npy +3 -0
  4. data/images/sv-demo-mod09ga-12.npy +3 -0
  5. data/images/sv-demo-mod09ga-6.npy +3 -0
  6. data/images/sv-demo-mod09ga-7.npy +3 -0
  7. data/thumbnails/sv-demo-mod09ga-11.png +0 -0
  8. data/thumbnails/sv-demo-mod09ga-12.png +0 -0
  9. data/thumbnails/sv-demo-mod09ga-6.png +0 -0
  10. data/thumbnails/sv-demo-mod09ga-7.png +0 -0
  11. inference.py +227 -0
  12. model.sav +3 -0
  13. pytorch-caney/.readthedocs.yaml +9 -0
  14. pytorch-caney/CODE_OF_CONDUCT.md +128 -0
  15. pytorch-caney/CONTRIBUTING.md +173 -0
  16. pytorch-caney/LICENSE +201 -0
  17. pytorch-caney/LICENSE.md +201 -0
  18. pytorch-caney/README.md +132 -0
  19. pytorch-caney/README.rst +164 -0
  20. pytorch-caney/docs/Makefile +20 -0
  21. pytorch-caney/docs/conf.py +52 -0
  22. pytorch-caney/docs/examples.rst +3 -0
  23. pytorch-caney/docs/index.rst +22 -0
  24. pytorch-caney/docs/make.bat +35 -0
  25. pytorch-caney/docs/modules.rst +284 -0
  26. pytorch-caney/docs/pytorch_caney.rst +31 -0
  27. pytorch-caney/docs/readme.rst +1 -0
  28. pytorch-caney/docs/requirements.txt +4 -0
  29. pytorch-caney/docs/source/index.rst +20 -0
  30. pytorch-caney/docs/static/DSG_LOGO_REDESIGN.png +0 -0
  31. pytorch-caney/examples/satvision/finetune_satvision_base_landcover5class_192_window12_100ep.yaml +33 -0
  32. pytorch-caney/examples/satvision/finetune_satvision_base_landcover9class_192_window12_100ep.yaml +33 -0
  33. pytorch-caney/examples/satvision/mim_pretrain_swinv2_satvision_base_192_window12_800ep.yaml +27 -0
  34. pytorch-caney/examples/satvision/run_satvision_finetune_lc_fiveclass.sh +20 -0
  35. pytorch-caney/examples/satvision/run_satvision_finetune_lc_nineclass.sh +20 -0
  36. pytorch-caney/examples/satvision/run_satvision_pretrain.sh +19 -0
  37. pytorch-caney/pyproject.toml +6 -0
  38. pytorch-caney/pytorch_caney/__init__.py +1 -0
  39. pytorch-caney/pytorch_caney/__pycache__/__init__.cpython-310.pyc +0 -0
  40. pytorch-caney/pytorch_caney/config.py +226 -0
  41. pytorch-caney/pytorch_caney/console/__init__.py +0 -0
  42. pytorch-caney/pytorch_caney/console/cli.py +62 -0
  43. pytorch-caney/pytorch_caney/console/dl_pipeline.py +62 -0
  44. pytorch-caney/pytorch_caney/data/__pycache__/utils.cpython-310.pyc +0 -0
  45. pytorch-caney/pytorch_caney/data/datamodules/__init__.py +0 -0
  46. pytorch-caney/pytorch_caney/data/datamodules/finetune_datamodule.py +114 -0
  47. pytorch-caney/pytorch_caney/data/datamodules/mim_datamodule.py +80 -0
  48. pytorch-caney/pytorch_caney/data/datamodules/segmentation_datamodule.py +164 -0
  49. pytorch-caney/pytorch_caney/data/datamodules/simmim_datamodule.py +80 -0
  50. pytorch-caney/pytorch_caney/data/datasets/__init__.py +0 -0
app.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import numpy as np
3
+ import os
4
+ import pathlib
5
+ from inference import infer, InferenceModel
6
+
7
+ # -----------------------------------------------------------------------------
8
+ # class SatvisionDemoApp
9
+ #
10
+ # Directory Structure: base-directory/MOD09GA/year
11
+ # MOD09GQ/year
12
+ # MYD09GA/year
13
+ # MYD09GQ/year
14
+ #
15
+ # -----------------------------------------------------------------------------
16
+ class SatvisionDemoApp:
17
+
18
+ # -------------------------------------------------------------------------
19
+ # __init__
20
+ # -------------------------------------------------------------------------
21
+ def __init__(self):
22
+
23
+ self.thumbnail_dir = pathlib.Path('data/thumbnails')
24
+ self.image_dir = pathlib.Path('data/images')
25
+ print(self.thumbnail_dir)
26
+ self.thumbnail_files = sorted(list(self.thumbnail_dir.glob('sv-*.png')))
27
+ self.image_files = sorted(list(self.image_dir.glob('sv-*.npy')))
28
+ print(list(self.image_files))
29
+ self.thumbnail_names = [str(tn_path.name) for tn_path in self.thumbnail_files]
30
+ print(self.thumbnail_names)
31
+
32
+ self.inferenceModel = InferenceModel()
33
+
34
+ # -------------------------------------------------------------------------
35
+ # render_sidebar
36
+ # -------------------------------------------------------------------------
37
+ def render_sidebar(self):
38
+
39
+ st.sidebar.header("Select an Image")
40
+
41
+ for index, thumbnail in enumerate(self.thumbnail_names):
42
+
43
+ thumbnail_path = self.thumbnail_dir / thumbnail
44
+
45
+ # thumbnail_arr = np.load(thumbnail_path)
46
+ print(str(thumbnail_path))
47
+
48
+ st.sidebar.image(str(thumbnail_path), use_column_width=True, caption=thumbnail)
49
+
50
+ # -------------------------------------------------------------------------
51
+ # render_main_app
52
+ # -------------------------------------------------------------------------
53
+ def render_main_app(self):
54
+
55
+ st.title("Satvision-Base Demo")
56
+
57
+ st.header("Image Reconstruction Process")
58
+ selected_image_index = st.sidebar.selectbox(
59
+ "Select an Image",
60
+ self.thumbnail_names)
61
+ print(selected_image_index)
62
+
63
+ selected_image = self.load_selected_image(selected_image_index)
64
+
65
+ image, masked_input, output = self.inferenceModel.infer(selected_image)
66
+
67
+ col1, col2, col3 = st.columns(3, gap="large")
68
+
69
+ # Display the selected image with a title three times side-by-side
70
+
71
+ with col1:
72
+ st.image(image, use_column_width=True, caption="Input")
73
+
74
+ with col2:
75
+ st.image(masked_input, use_column_width=True, caption="Input Masked")
76
+
77
+ with col3:
78
+ st.image(output, use_column_width=True, caption="Reconstruction")
79
+
80
+ # -------------------------------------------------------------------------
81
+ # load_selected_image
82
+ # -------------------------------------------------------------------------
83
+ def load_selected_image(self, image_name):
84
+
85
+ # Load the selected image using NumPy (replace this with your image loading code)
86
+ image_name = image_name.replace('.png', '.npy')
87
+
88
+ image = np.load(self.image_dir / image_name)
89
+ image = np.moveaxis(image, 0, 2)
90
+ return image
91
+
92
+ # -----------------------------------------------------------------------------
93
+ # main
94
+ # -----------------------------------------------------------------------------
95
+ def main():
96
+
97
+ app = SatvisionDemoApp()
98
+
99
+ app.render_main_app()
100
+
101
+ app.render_sidebar()
102
+
103
+ if __name__ == "__main__":
104
+
105
+ main()
ckpt_epoch_800.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56efed01b695ccccea2a30bf435978ffd867bc08d618b1e9be73d4849bfc007f
3
+ size 1136978755
data/images/sv-demo-mod09ga-11.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:357b9efeda497e81aa0f825c056f2c58b9066ec7208da0e28ba22f57217a165c
3
+ size 1032320
data/images/sv-demo-mod09ga-12.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e4eb11effc171010ad5397121da3ae546aa5c9167a75627bd34820b08447d64
3
+ size 1032320
data/images/sv-demo-mod09ga-6.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:814c059fbcae6269c79b477c758fa00cfaad23fde73f2b4efd97f6c4a940d40a
3
+ size 1032320
data/images/sv-demo-mod09ga-7.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f4159969faa0b6123f5918e39e28ce24f8fc735c28fecfb9943dd1bc03b74146
3
+ size 1032320
data/thumbnails/sv-demo-mod09ga-11.png ADDED
data/thumbnails/sv-demo-mod09ga-12.png ADDED
data/thumbnails/sv-demo-mod09ga-6.png ADDED
data/thumbnails/sv-demo-mod09ga-7.png ADDED
inference.py ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ import torch
4
+ import joblib
5
+ import numpy as np
6
+
7
+ import torchvision.transforms as T
8
+ import sys
9
+
10
+ sys.path.append('pytorch-caney')
11
+ # from pytorch_caney.models.mim.mim import build_mim_model
12
+
13
+
14
+ class Transform:
15
+ """
16
+ torchvision transform which transforms the input imagery into
17
+ addition to generating a MiM mask
18
+ """
19
+
20
+ def __init__(self, config):
21
+
22
+ self.transform_img = \
23
+ T.Compose([
24
+ T.ToTensor(),
25
+ T.Resize((config.DATA.IMG_SIZE, config.DATA.IMG_SIZE)),
26
+ ])
27
+
28
+ model_patch_size = config.MODEL.SWINV2.PATCH_SIZE
29
+
30
+ self.mask_generator = SimmimMaskGenerator(
31
+ input_size=config.DATA.IMG_SIZE,
32
+ mask_patch_size=config.DATA.MASK_PATCH_SIZE,
33
+ model_patch_size=model_patch_size,
34
+ mask_ratio=config.DATA.MASK_RATIO,
35
+ )
36
+
37
+ def __call__(self, img):
38
+
39
+ img = self.transform_img(img)
40
+ mask = self.mask_generator()
41
+
42
+ return img, mask
43
+
44
+
45
+ class SimmimMaskGenerator:
46
+ """
47
+ Generates the masks for masked-image-modeling
48
+ """
49
+ def __init__(self,
50
+ input_size=192,
51
+ mask_patch_size=32,
52
+ model_patch_size=4,
53
+ mask_ratio=0.6):
54
+ self.input_size = input_size
55
+ self.mask_patch_size = mask_patch_size
56
+ self.model_patch_size = model_patch_size
57
+ self.mask_ratio = mask_ratio
58
+
59
+ assert self.input_size % self.mask_patch_size == 0
60
+ assert self.mask_patch_size % self.model_patch_size == 0
61
+
62
+ self.rand_size = self.input_size // self.mask_patch_size
63
+ self.scale = self.mask_patch_size // self.model_patch_size
64
+
65
+ self.token_count = self.rand_size ** 2
66
+ self.mask_count = int(np.ceil(self.token_count * self.mask_ratio))
67
+
68
+ def __call__(self):
69
+ mask = self.make_simmim_mask(self.token_count, self.mask_count,
70
+ self.rand_size, self.scale)
71
+ mask = mask.repeat(self.scale, axis=0).repeat(self.scale, axis=1)
72
+ return mask
73
+
74
+ @staticmethod
75
+ def make_simmim_mask(token_count, mask_count, rand_size, scale):
76
+ """JIT-compiled random mask generation
77
+
78
+ Args:
79
+ token_count
80
+ mask_count
81
+ rand_size
82
+ scale
83
+
84
+ Returns:
85
+ mask
86
+ """
87
+ mask_idx = np.random.permutation(token_count)[:mask_count]
88
+ mask = np.zeros(token_count, dtype=np.int64)
89
+ mask[mask_idx] = 1
90
+ mask = mask.reshape((rand_size, rand_size))
91
+ return mask
92
+
93
+
94
+ class InferenceModel(object):
95
+
96
+ def __init__(self):
97
+ self.checkpoint_path = 'ckpt_epoch_800.pth'
98
+ self.config_path = 'simmim_pretrain__satnet_swinv2_base__img192_window12__800ep_v3_no_norm.config.sav'
99
+ self.architecture_path = 'model.sav'
100
+
101
+ self.config = joblib.load(self.config_path)
102
+ self.model = joblib.load(self.architecture_path)
103
+ self.load_checkpoint()
104
+
105
+ self.transform = Transform(self.config)
106
+
107
+
108
+ def load_checkpoint(self):
109
+
110
+
111
+ checkpoint = torch.load(self.checkpoint_path, map_location='cpu')
112
+
113
+ # re-map keys due to name change (only for loading provided models)
114
+ rpe_mlp_keys = [k for k in checkpoint['model'].keys() if "rpe_mlp" in k]
115
+
116
+ for k in rpe_mlp_keys:
117
+
118
+ checkpoint['model'][k.replace(
119
+ 'rpe_mlp', 'cpb_mlp')] = checkpoint['model'].pop(k)
120
+
121
+ msg = self.model.load_state_dict(checkpoint['model'], strict=False)
122
+
123
+ print(msg)
124
+
125
+ del checkpoint
126
+
127
+ torch.cuda.empty_cache()
128
+
129
+ @staticmethod
130
+ def minmax_norm(img_arr):
131
+ arr_min = img_arr.min()
132
+ arr_max = img_arr.max()
133
+ img_arr_scaled = (img_arr - arr_min) / (arr_max - arr_min)
134
+ img_arr_scaled = img_arr_scaled * 255
135
+ img_arr_scaled = img_arr_scaled.astype(np.uint8)
136
+ return img_arr_scaled
137
+
138
+ # -------------------------------------------------------------------------
139
+ # load_selected_image
140
+ # -------------------------------------------------------------------------
141
+ def preprocess(self, image):
142
+
143
+ image, mask = self.transform(image)
144
+
145
+ image = image.unsqueeze(0)
146
+
147
+ mask = torch.tensor(mask).unsqueeze(0)
148
+
149
+ print(image.size())
150
+ print(mask.shape)
151
+
152
+ return image, mask
153
+
154
+ # -------------------------------------------------------------------------
155
+ # load_selected_image
156
+ # -------------------------------------------------------------------------
157
+ def predict(self, image, mask):
158
+
159
+ with torch.no_grad():
160
+
161
+ logits = self.model.encoder(image, mask)
162
+
163
+ image_recon = self.model.decoder(logits)
164
+
165
+ image_recon = image_recon.numpy()[0, :, :, :]
166
+
167
+ return image_recon
168
+
169
+ # -------------------------------------------------------------------------
170
+ # load_selected_image
171
+ # -------------------------------------------------------------------------
172
+ @staticmethod
173
+ def process_mask(mask):
174
+ mask = mask.repeat_interleave(4, 1).repeat_interleave(4, 2).unsqueeze(1).contiguous()
175
+ mask = mask[0, 0, :, :]
176
+ mask = np.stack([mask, mask, mask], axis=-1)
177
+ return mask
178
+
179
+ # -------------------------------------------------------------------------
180
+ # load_selected_image
181
+ # -------------------------------------------------------------------------
182
+ def infer(self, image):
183
+
184
+ image, mask = self.preprocess(image)
185
+
186
+ img_recon = self.predict(image, mask)
187
+
188
+ mask = self.process_mask(mask)
189
+
190
+ img_normed = self.minmax_norm(image.numpy()[0, :, :, :])
191
+
192
+ print(img_normed.shape)
193
+ rgb_image = np.stack((img_normed[0, :, :],
194
+ img_normed[3, :, :],
195
+ img_normed[2, :, :]),
196
+ axis=-1)
197
+
198
+ img_recon = self.minmax_norm(img_recon)
199
+ rgb_image_recon = np.stack((img_recon[0, :, :],
200
+ img_recon[3, :, :],
201
+ img_recon[2, :, :]),
202
+ axis=-1)
203
+
204
+ rgb_masked = np.where(mask == 0, rgb_image, rgb_image_recon)
205
+ rgb_image_masked = np.where(mask == 1, 0, rgb_image)
206
+ rgb_recon_masked = rgb_masked# self.minmax_norm(rgb_masked)
207
+
208
+ return rgb_image, rgb_image_masked, rgb_recon_masked
209
+
210
+
211
+ def infer(array_input: np.ndarray) -> tuple[np.ndarray, np.ndarray]:
212
+
213
+ masked_input = np.random.rand(256, 256, 3)
214
+
215
+ output = np.random.rand(256, 256, 3)
216
+
217
+ return masked_input, output
218
+
219
+ if __name__ == '__main__':
220
+ inferenceModel = InferenceModel()
221
+
222
+ image = np.load('data/images/sv-demo-mod09ga-11.npy')
223
+ print(image.shape)
224
+ image = np.moveaxis(image, 0, 2)
225
+ print(image.shape)
226
+
227
+ inference = inferenceModel.infer(image)
model.sav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:884d0f2d5dabc20e541b43a5d69a815a63339631b4ed9a0121bf0da7cd8450ff
3
+ size 382621982
pytorch-caney/.readthedocs.yaml ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ version: 2
2
+
3
+ build:
4
+ os: "ubuntu-20.04"
5
+ tools:
6
+ python: "3.8"
7
+
8
+ sphinx:
9
+ fail_on_warning: true
pytorch-caney/CODE_OF_CONDUCT.md ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Contributor Covenant Code of Conduct
2
+
3
+ ## Our Pledge
4
+
5
+ We as members, contributors, and leaders pledge to make participation in our
6
+ community a harassment-free experience for everyone, regardless of age, body
7
+ size, visible or invisible disability, ethnicity, sex characteristics, gender
8
+ identity and expression, level of experience, education, socio-economic status,
9
+ nationality, personal appearance, race, religion, or sexual identity
10
+ and orientation.
11
+
12
+ We pledge to act and interact in ways that contribute to an open, welcoming,
13
+ diverse, inclusive, and healthy community.
14
+
15
+ ## Our Standards
16
+
17
+ Examples of behavior that contributes to a positive environment for our
18
+ community include:
19
+
20
+ * Demonstrating empathy and kindness toward other people
21
+ * Being respectful of differing opinions, viewpoints, and experiences
22
+ * Giving and gracefully accepting constructive feedback
23
+ * Accepting responsibility and apologizing to those affected by our mistakes,
24
+ and learning from the experience
25
+ * Focusing on what is best not just for us as individuals, but for the
26
+ overall community
27
+
28
+ Examples of unacceptable behavior include:
29
+
30
+ * The use of sexualized language or imagery, and sexual attention or
31
+ advances of any kind
32
+ * Trolling, insulting or derogatory comments, and personal or political attacks
33
+ * Public or private harassment
34
+ * Publishing others' private information, such as a physical or email
35
+ address, without their explicit permission
36
+ * Other conduct which could reasonably be considered inappropriate in a
37
+ professional setting
38
+
39
+ ## Enforcement Responsibilities
40
+
41
+ Community leaders are responsible for clarifying and enforcing our standards of
42
+ acceptable behavior and will take appropriate and fair corrective action in
43
+ response to any behavior that they deem inappropriate, threatening, offensive,
44
+ or harmful.
45
+
46
+ Community leaders have the right and responsibility to remove, edit, or reject
47
+ comments, commits, code, wiki edits, issues, and other contributions that are
48
+ not aligned to this Code of Conduct, and will communicate reasons for moderation
49
+ decisions when appropriate.
50
+
51
+ ## Scope
52
+
53
+ This Code of Conduct applies within all community spaces, and also applies when
54
+ an individual is officially representing the community in public spaces.
55
+ Examples of representing our community include using an official e-mail address,
56
+ posting via an official social media account, or acting as an appointed
57
+ representative at an online or offline event.
58
+
59
+ ## Enforcement
60
+
61
+ Instances of abusive, harassing, or otherwise unacceptable behavior may be
62
+ reported to the community leaders responsible for enforcement at
63
+ support@nccs.nasa.gov.
64
+ All complaints will be reviewed and investigated promptly and fairly.
65
+
66
+ All community leaders are obligated to respect the privacy and security of the
67
+ reporter of any incident.
68
+
69
+ ## Enforcement Guidelines
70
+
71
+ Community leaders will follow these Community Impact Guidelines in determining
72
+ the consequences for any action they deem in violation of this Code of Conduct:
73
+
74
+ ### 1. Correction
75
+
76
+ **Community Impact**: Use of inappropriate language or other behavior deemed
77
+ unprofessional or unwelcome in the community.
78
+
79
+ **Consequence**: A private, written warning from community leaders, providing
80
+ clarity around the nature of the violation and an explanation of why the
81
+ behavior was inappropriate. A public apology may be requested.
82
+
83
+ ### 2. Warning
84
+
85
+ **Community Impact**: A violation through a single incident or series
86
+ of actions.
87
+
88
+ **Consequence**: A warning with consequences for continued behavior. No
89
+ interaction with the people involved, including unsolicited interaction with
90
+ those enforcing the Code of Conduct, for a specified period of time. This
91
+ includes avoiding interactions in community spaces as well as external channels
92
+ like social media. Violating these terms may lead to a temporary or
93
+ permanent ban.
94
+
95
+ ### 3. Temporary Ban
96
+
97
+ **Community Impact**: A serious violation of community standards, including
98
+ sustained inappropriate behavior.
99
+
100
+ **Consequence**: A temporary ban from any sort of interaction or public
101
+ communication with the community for a specified period of time. No public or
102
+ private interaction with the people involved, including unsolicited interaction
103
+ with those enforcing the Code of Conduct, is allowed during this period.
104
+ Violating these terms may lead to a permanent ban.
105
+
106
+ ### 4. Permanent Ban
107
+
108
+ **Community Impact**: Demonstrating a pattern of violation of community
109
+ standards, including sustained inappropriate behavior, harassment of an
110
+ individual, or aggression toward or disparagement of classes of individuals.
111
+
112
+ **Consequence**: A permanent ban from any sort of public interaction within
113
+ the community.
114
+
115
+ ## Attribution
116
+
117
+ This Code of Conduct is adapted from the [Contributor Covenant][homepage],
118
+ version 2.0, available at
119
+ https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
120
+
121
+ Community Impact Guidelines were inspired by [Mozilla's code of conduct
122
+ enforcement ladder](https://github.com/mozilla/diversity).
123
+
124
+ [homepage]: https://www.contributor-covenant.org
125
+
126
+ For answers to common questions about this code of conduct, see the FAQ at
127
+ https://www.contributor-covenant.org/faq. Translations are available at
128
+ https://www.contributor-covenant.org/translations.
pytorch-caney/CONTRIBUTING.md ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Contributing
2
+
3
+ When contributing to this repository, please first discuss the change you wish to make via issue,
4
+ email, or any other method with the owners of this repository before making a change.
5
+
6
+ Please note we have a code of conduct, please follow it in all your interactions with the project.
7
+
8
+ ## Pull Request Process
9
+
10
+ 1. Ensure any install or build dependencies are removed before the end of the layer when doing a
11
+ build.
12
+ 2. Update the README.md with details of changes to the interface, this includes new environment
13
+ variables, exposed ports, useful file locations and container parameters.
14
+ 3. Increase the version numbers in any examples files and the README.md to the new version that this
15
+ Pull Request would represent. The versioning scheme we use is [SemVer](http://semver.org/).
16
+ 4. Regenerate any additional documentation using PDOC (usage details listed below).
17
+ 5. Document the proposed changes in the CHANGELOG.md file.
18
+ 6. You may submit your merge request for review and the change will be reviewed.
19
+
20
+ ## Code of Conduct
21
+
22
+ ### Our Pledge
23
+
24
+ In the interest of fostering an open and welcoming environment, we as
25
+ contributors and maintainers pledge to making participation in our project and
26
+ our community a harassment-free experience for everyone, regardless of age, body
27
+ size, disability, ethnicity, gender identity and expression, level of experience,
28
+ nationality, personal appearance, race, religion, or sexual identity and
29
+ orientation.
30
+
31
+ ### Our Standards
32
+
33
+ Examples of behavior that contributes to creating a positive environment
34
+ include:
35
+
36
+ * Using welcoming and inclusive language
37
+ * Being respectful of differing viewpoints and experiences
38
+ * Gracefully accepting constructive criticism
39
+ * Focusing on what is best for the community
40
+ * Showing empathy towards other community members
41
+
42
+ Examples of unacceptable behavior by participants include:
43
+
44
+ * The use of sexualized language or imagery and unwelcome sexual attention or
45
+ advances
46
+ * Trolling, insulting/derogatory comments, and personal or political attacks
47
+ * Public or private harassment
48
+ * Publishing others' private information, such as a physical or electronic
49
+ address, without explicit permission
50
+ * Other conduct which could reasonably be considered inappropriate in a
51
+ professional setting
52
+
53
+ ### Our Responsibilities
54
+
55
+ Project maintainers are responsible for clarifying the standards of acceptable
56
+ behavior and are expected to take appropriate and fair corrective action in
57
+ response to any instances of unacceptable behavior.
58
+
59
+ Project maintainers have the right and responsibility to remove, edit, or
60
+ reject comments, commits, code, wiki edits, issues, and other contributions
61
+ that are not aligned to this Code of Conduct, or to ban temporarily or
62
+ permanently any contributor for other behaviors that they deem inappropriate,
63
+ threatening, offensive, or harmful.
64
+
65
+ ### Scope
66
+
67
+ This Code of Conduct applies both within project spaces and in public spaces
68
+ when an individual is representing the project or its community. Examples of
69
+ representing a project or community include using an official project e-mail
70
+ address, posting via an official social media account, or acting as an appointed
71
+ representative at an online or offline event. Representation of a project may be
72
+ further defined and clarified by project maintainers.
73
+
74
+ ### Enforcement
75
+
76
+ Instances of abusive, harassing, or otherwise unacceptable behavior may be
77
+ reported by contacting the project team at [INSERT EMAIL ADDRESS]. All
78
+ complaints will be reviewed and investigated and will result in a response that
79
+ is deemed necessary and appropriate to the circumstances. The project team is
80
+ obligated to maintain confidentiality with regard to the reporter of an incident.
81
+ Further details of specific enforcement policies may be posted separately.
82
+
83
+ Project maintainers who do not follow or enforce the Code of Conduct in good
84
+ faith may face temporary or permanent repercussions as determined by other
85
+ members of the project's leadership.
86
+
87
+ ### Attribution
88
+
89
+ This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
90
+ available at [http://contributor-covenant.org/version/1/4][version]
91
+
92
+ [homepage]: http://contributor-covenant.org
93
+ [version]: http://contributor-covenant.org/version/1/4/
94
+
95
+ ## Appendix
96
+
97
+ ### Generating Documentation
98
+
99
+ This repository follows semi-automatic documentation generation. The following
100
+ is an example of how to generate documentation for a single module.
101
+
102
+ ```bash
103
+ conda activate tensorflow-caney
104
+ pdoc --html tensorflow-caney/raster.py --force
105
+ ```
106
+
107
+ ### Linting
108
+
109
+ This project uses flake8 for PREP8 linting and format. Every submodule should include
110
+ a test section in the tests directory. Refer to the text directory for more examples.
111
+ The Python unittests library is used for these purposes.
112
+
113
+ ### Documenting Methods
114
+
115
+ The following documentation format should be followed below each method to allow for
116
+ explicit semi-automatic documentation generation.
117
+
118
+ ```bash
119
+ """
120
+ Read raster and append data to existing Raster object
121
+ Args:
122
+ filename (str): raster filename to read from
123
+ bands (str list): list of bands to append to object, e.g ['Red']
124
+ chunks_band (int): integer to map object to memory, z
125
+ chunks_x (int): integer to map object to memory, x
126
+ chunks_y (int): integer to map object to memory, y
127
+ Return:
128
+ raster (raster object): raster object to manipulate rasters
129
+ ----------
130
+ Example
131
+ ----------
132
+ raster.readraster(filename, bands)
133
+ """
134
+ ```
135
+
136
+ ### Format of CHANGELOG
137
+
138
+ The following describes the format for each CHANGELOG release. If there are no contributions
139
+ in any of the sections, they are removed from the description.
140
+
141
+ ```bash
142
+ ## [0.0.3] - 2020-12-14
143
+
144
+ ### Added
145
+ - Short description
146
+
147
+ ### Fixed
148
+ - Short description
149
+
150
+ ### Changed
151
+ - Short description
152
+
153
+ ### Removed
154
+ - Short description
155
+
156
+ ### Approved
157
+ Approver Name, Email
158
+ ```
159
+
160
+ ### Example Using Container in ADAPT
161
+
162
+ ```bash
163
+ module load singularity
164
+ singularity shell -B $your_mounts --nv tensorflow-caney
165
+ ```
166
+
167
+ ### Current Workflow
168
+
169
+ ```bash
170
+ module load singularity
171
+ singularity shell --nv -B /lscratch,/css,/explore/nobackup/projects/ilab,/explore/nobackup/people /explore/nobackup/projects/ilab/containers/tensorflow-caney-2022.12
172
+ export PYTHONPATH="/explore/nobackup/people/jacaraba/development/tensorflow-caney:/adapt/nobackup/people/jacaraba/development/vhr-cnn-chm"
173
+ ```
pytorch-caney/LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
pytorch-caney/LICENSE.md ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
pytorch-caney/README.md ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # pytorch-caney
2
+
3
+ Python package for lots of Pytorch tools.
4
+
5
+ [![DOI](https://zenodo.org/badge/472450059.svg)](https://zenodo.org/badge/latestdoi/472450059)
6
+ ![CI Workflow](https://github.com/nasa-nccs-hpda/pytorch-caney/actions/workflows/ci.yml/badge.svg)
7
+ ![CI to DockerHub ](https://github.com/nasa-nccs-hpda/pytorch-caney/actions/workflows/dockerhub.yml/badge.svg)
8
+ ![Code style: PEP8](https://github.com/nasa-nccs-hpda/pytorch-caney/actions/workflows/lint.yml/badge.svg)
9
+ [![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black)
10
+ [![Coverage Status](https://coveralls.io/repos/github/nasa-nccs-hpda/pytorch-caney/badge.svg?branch=main)](https://coveralls.io/github/nasa-nccs-hpda/pytorch-caney?branch=main)
11
+
12
+ ## Documentation
13
+
14
+ - Latest: https://nasa-nccs-hpda.github.io/pytorch-caney/latest
15
+
16
+ ## Objectives
17
+
18
+ - Library to process remote sensing imagery using GPU and CPU parallelization.
19
+ - Machine Learning and Deep Learning image classification and regression.
20
+ - Agnostic array and vector-like data structures.
21
+ - User interface environments via Notebooks for easy to use AI/ML projects.
22
+ - Example notebooks for quick AI/ML start with your own data.
23
+
24
+ ## Installation
25
+
26
+ The following library is intended to be used to accelerate the development of data science products
27
+ for remote sensing satellite imagery, or any other applications. pytorch-caney can be installed
28
+ by itself, but instructions for installing the full environments are listed under the requirements
29
+ directory so projects, examples, and notebooks can be run.
30
+
31
+ Note: PIP installations do not include CUDA libraries for GPU support. Make sure NVIDIA libraries
32
+ are installed locally in the system if not using conda/mamba.
33
+
34
+ ```bash
35
+ module load singularity # if a module needs to be loaded
36
+ singularity build --sandbox pytorch-caney-container docker://nasanccs/pytorch-caney:latest
37
+ ```
38
+
39
+ ## Why Caney?
40
+
41
+ "Caney" means longhouse in Taíno.
42
+
43
+ ## Contributors
44
+
45
+ - Jordan Alexis Caraballo-Vega, jordan.a.caraballo-vega@nasa.gov
46
+ - Caleb Spradlin, caleb.s.spradlin@nasa.gov
47
+
48
+ ## Contributing
49
+
50
+ Please see our [guide for contributing to pytorch-caney](CONTRIBUTING.md).
51
+
52
+ ## SatVision
53
+
54
+ | name | pretrain | resolution | #params |
55
+ | :---: | :---: | :---: | :---: |
56
+ | SatVision-B | MODIS-1.9-M | 192x192 | 84.5M |
57
+
58
+ ## SatVision Datasets
59
+
60
+ | name | bands | resolution | #chips |
61
+ | :---: | :---: | :---: | :---: |
62
+ | MODIS-Small | 7 | 128x128 | 1,994,131 |
63
+
64
+ ## MODIS Surface Reflectance (MOD09GA) Band Details
65
+
66
+ | Band Name | Bandwidth |
67
+ | :------------: | :-----------: |
68
+ | sur_refl_b01_1 | 0.620 - 0.670 |
69
+ | sur_refl_b02_1 | 0.841 - 0.876 |
70
+ | sur_refl_b03_1 | 0.459 - 0.479 |
71
+ | sur_refl_b04_1 | 0.545 - 0.565 |
72
+ | sur_refl_b05_1 | 1.230 - 1.250 |
73
+ | sur_refl_b06_1 | 1.628 - 1.652 |
74
+ | sur_refl_b07_1 | 2.105 - 2.155 |
75
+
76
+ ## Pre-training with Masked Image Modeling
77
+
78
+ To pre-train the swinv2 base model with masked image modeling pre-training, run:
79
+ ```bash
80
+ torchrun --nproc_per_node <NGPUS> pytorch-caney/pytorch_caney/pipelines/pretraining/mim.py --cfg <config-file> --dataset <dataset-name> --data-paths <path-to-data-subfolder-1> --batch-size <batch-size> --output <output-dir> --enable-amp
81
+ ```
82
+
83
+ For example to run on a compute node with 4 GPUs and a batch size of 128 on the MODIS SatVision pre-training dataset with a base swinv2 model, run:
84
+
85
+ ```bash
86
+ singularity shell --nv -B <mounts> /path/to/container/pytorch-caney-container
87
+ Singularity> export PYTHONPATH=$PWD:$PWD/pytorch-caney
88
+ Singularity> torchrun --nproc_per_node 4 pytorch-caney/pytorch_caney/pipelines/pretraining/mim.py --cfg pytorch-caney/examples/satvision/mim_pretrain_swinv2_satvision_base_192_window12_800ep.yaml --dataset MODIS --data-paths /explore/nobackup/projects/ilab/data/satvision/pretraining/training_* --batch-size 128 --output . --enable-amp
89
+ ```
90
+
91
+ This example script runs the exact configuration used to make the SatVision-base model pre-training with MiM and the MODIS pre-training dataset.
92
+ ```bash
93
+ singularity shell --nv -B <mounts> /path/to/container/pytorch-caney-container
94
+ Singularity> cd pytorch-caney/examples/satvision
95
+ Singularity> ./run_satvision_pretrain.sh
96
+ ```
97
+
98
+ ## Fine-tuning Satvision-base
99
+ To fine-tune the satvision-base pre-trained model, run:
100
+ ```bash
101
+ torchrun --nproc_per_node <NGPUS> pytorch-caney/pytorch_caney/pipelines/finetuning/finetune.py --cfg <config-file> --pretrained <path-to-pretrained> --dataset <dataset-name> --data-paths <path-to-data-subfolder-1> --batch-size <batch-size> --output <output-dir> --enable-amp
102
+ ```
103
+
104
+ See example config files pytorch-caney/examples/satvision/finetune_satvision_base_*.yaml to see how to structure your config file for fine-tuning.
105
+
106
+
107
+ ## Testing
108
+ For unittests, run this bash command to run linting and unit test runs. This will execute unit tests and linting in a temporary venv environment only used for testing.
109
+ ```bash
110
+ git clone git@github.com:nasa-nccs-hpda/pytorch-caney.git
111
+ cd pytorch-caney; bash test.sh
112
+ ```
113
+ or run unit tests directly with container or anaconda env
114
+
115
+ ```bash
116
+ git clone git@github.com:nasa-nccs-hpda/pytorch-caney.git
117
+ singularity build --sandbox pytorch-caney-container docker://nasanccs/pytorch-caney:latest
118
+ singularity shell --nv -B <mounts> /path/to/container/pytorch-caney-container
119
+ cd pytorch-caney; python -m unittest discover pytorch_caney/tests
120
+ ```
121
+
122
+ ```bash
123
+ git clone git@github.com:nasa-nccs-hpda/pytorch-caney.git
124
+ cd pytorch-caney; conda env create -f requirements/environment_gpu.yml;
125
+ conda activate pytorch-caney
126
+ python -m unittest discover pytorch_caney/tests
127
+ ```
128
+ ## References
129
+
130
+ - [Pytorch Lightning](https://github.com/Lightning-AI/lightning)
131
+ - [Swin Transformer](https://github.com/microsoft/Swin-Transformer)
132
+ - [SimMIM](https://github.com/microsoft/SimMIM)
pytorch-caney/README.rst ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ================
2
+ pytorch-caney
3
+ ================
4
+
5
+ Python package for lots of Pytorch tools for geospatial science problems.
6
+
7
+ .. image:: https://zenodo.org/badge/472450059.svg
8
+ :target: https://zenodo.org/badge/latestdoi/472450059
9
+
10
+ Objectives
11
+ ------------
12
+
13
+ - Library to process remote sensing imagery using GPU and CPU parallelization.
14
+ - Machine Learning and Deep Learning image classification and regression.
15
+ - Agnostic array and vector-like data structures.
16
+ - User interface environments via Notebooks for easy to use AI/ML projects.
17
+ - Example notebooks for quick AI/ML start with your own data.
18
+
19
+ Installation
20
+ ----------------
21
+
22
+ The following library is intended to be used to accelerate the development of data science products
23
+ for remote sensing satellite imagery, or any other applications. pytorch-caney can be installed
24
+ by itself, but instructions for installing the full environments are listed under the requirements
25
+ directory so projects, examples, and notebooks can be run.
26
+
27
+ Note: PIP installations do not include CUDA libraries for GPU support. Make sure NVIDIA libraries
28
+ are installed locally in the system if not using conda/mamba.
29
+
30
+ .. code-block:: bash
31
+
32
+ module load singularity # if a module needs to be loaded
33
+ singularity build --sandbox pytorch-caney-container docker://nasanccs/pytorch-caney:latest
34
+
35
+
36
+ Why Caney?
37
+ ---------------
38
+
39
+ "Caney" means longhouse in Taíno.
40
+
41
+ Contributors
42
+ -------------
43
+
44
+ - Jordan Alexis Caraballo-Vega, jordan.a.caraballo-vega@nasa.gov
45
+ - Caleb Spradlin, caleb.s.spradlin@nasa.gov
46
+ - Jian Li, jian.li@nasa.gov
47
+
48
+ Contributing
49
+ -------------
50
+
51
+ Please see our `guide for contributing to pytorch-caney <CONTRIBUTING.md>`_.
52
+
53
+ SatVision
54
+ ------------
55
+
56
+ +---------------+--------------+------------+------------+
57
+ | Name | Pretrain | Resolution | Parameters |
58
+ +===============+==============+============+============+
59
+ | SatVision-B | MODIS-1.9-M | 192x192 | 84.5M |
60
+ +---------------+--------------+------------+------------+
61
+
62
+ SatVision Datasets
63
+ -----------------------
64
+
65
+ +---------------+-----------+------------+-------------+
66
+ | Name | Bands | Resolution | Image Chips |
67
+ +===============+===========+============+=============+
68
+ | MODIS-Small | 7 | 128x128 | 1,994,131 |
69
+ +---------------+-----------+------------+-------------+
70
+
71
+ MODIS Surface Reflectance (MOD09GA) Band Details
72
+ ------------------------------------------------------
73
+
74
+ +-----------------+---------------+
75
+ | Band Name | Bandwidth |
76
+ +=================+===============+
77
+ | sur_refl_b01_1 | 0.620 - 0.670 |
78
+ +-----------------+---------------+
79
+ | sur_refl_b02_1 | 0.841 - 0.876 |
80
+ +-----------------+---------------+
81
+ | sur_refl_b03_1 | 0.459 - 0.479 |
82
+ +-----------------+---------------+
83
+ | sur_refl_b04_1 | 0.545 - 0.565 |
84
+ +-----------------+---------------+
85
+ | sur_refl_b05_1 | 1.230 - 1.250 |
86
+ +-----------------+---------------+
87
+ | sur_refl_b06_1 | 1.628 - 1.652 |
88
+ +-----------------+---------------+
89
+ | sur_refl_b07_1 | 2.105 - 2.155 |
90
+ +-----------------+---------------+
91
+
92
+ Pre-training with Masked Image Modeling
93
+ -----------------------------------------
94
+
95
+ To pre-train the swinv2 base model with masked image modeling pre-training, run:
96
+
97
+ .. code-block:: bash
98
+
99
+ torchrun --nproc_per_node <NGPUS> pytorch-caney/pytorch_caney/pipelines/pretraining/mim.py --cfg <config-file> --dataset <dataset-name> --data-paths <path-to-data-subfolder-1> --batch-size <batch-size> --output <output-dir> --enable-amp
100
+
101
+ For example to run on a compute node with 4 GPUs and a batch size of 128 on the MODIS SatVision pre-training dataset with a base swinv2 model, run:
102
+
103
+ .. code-block:: bash
104
+
105
+ singularity shell --nv -B <mounts> /path/to/container/pytorch-caney-container
106
+ Singularity> export PYTHONPATH=$PWD:$PWD/pytorch-caney
107
+ Singularity> torchrun --nproc_per_node 4 pytorch-caney/pytorch_caney/pipelines/pretraining/mim.py --cfg pytorch-caney/examples/satvision/mim_pretrain_swinv2_satvision_base_192_window12_800ep.yaml --dataset MODIS --data-paths /explore/nobackup/projects/ilab/data/satvision/pretraining/training_* --batch-size 128 --output . --enable-amp
108
+
109
+
110
+ This example script runs the exact configuration used to make the SatVision-base model pre-training with MiM and the MODIS pre-training dataset.
111
+
112
+ .. code-block:: bash
113
+
114
+ singularity shell --nv -B <mounts> /path/to/container/pytorch-caney-container
115
+ Singularity> cd pytorch-caney/examples/satvision
116
+ Singularity> ./run_satvision_pretrain.sh
117
+
118
+
119
+ Fine-tuning Satvision-base
120
+ -----------------------------
121
+
122
+ To fine-tune the satvision-base pre-trained model, run:
123
+
124
+ .. code-block:: bash
125
+
126
+ torchrun --nproc_per_node <NGPUS> pytorch-caney/pytorch_caney/pipelines/finetuning/finetune.py --cfg <config-file> --pretrained <path-to-pretrained> --dataset <dataset-name> --data-paths <path-to-data-subfolder-1> --batch-size <batch-size> --output <output-dir> --enable-amp
127
+
128
+ See example config files pytorch-caney/examples/satvision/finetune_satvision_base_*.yaml to see how to structure your config file for fine-tuning.
129
+
130
+
131
+ Testing
132
+ ------------
133
+
134
+ For unittests, run this bash command to run linting and unit test runs. This will execute unit tests and linting in a temporary venv environment only used for testing.
135
+
136
+ .. code-block:: bash
137
+
138
+ git clone git@github.com:nasa-nccs-hpda/pytorch-caney.git
139
+ cd pytorch-caney; bash test.sh
140
+
141
+
142
+ or run unit tests directly with container or anaconda env
143
+
144
+ .. code-block:: bash
145
+
146
+ git clone git@github.com:nasa-nccs-hpda/pytorch-caney.git
147
+ singularity build --sandbox pytorch-caney-container docker://nasanccs/pytorch-caney:latest
148
+ singularity shell --nv -B <mounts> /path/to/container/pytorch-caney-container
149
+ cd pytorch-caney; python -m unittest discover pytorch_caney/tests
150
+
151
+ .. code-block:: bash
152
+
153
+ git clone git@github.com:nasa-nccs-hpda/pytorch-caney.git
154
+ cd pytorch-caney; conda env create -f requirements/environment_gpu.yml;
155
+ conda activate pytorch-caney
156
+ python -m unittest discover pytorch_caney/tests
157
+
158
+
159
+ References
160
+ ------------
161
+
162
+ - `Pytorch Lightning <https://github.com/Lightning-AI/lightning>`_
163
+ - `Swin Transformer <https://github.com/microsoft/Swin-Transformer>`_
164
+ - `SimMIM <https://github.com/microsoft/SimMIM>`_
pytorch-caney/docs/Makefile ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Minimal makefile for Sphinx documentation
2
+ #
3
+
4
+ # You can set these variables from the command line, and also
5
+ # from the environment for the first two.
6
+ SPHINXOPTS ?=
7
+ SPHINXBUILD ?= sphinx-build
8
+ SOURCEDIR = .
9
+ BUILDDIR = _build
10
+
11
+ # Put it first so that "make" without argument is like "make help".
12
+ help:
13
+ @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14
+
15
+ .PHONY: help Makefile
16
+
17
+ # Catch-all target: route all unknown targets to Sphinx using the new
18
+ # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19
+ %: Makefile
20
+ @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
pytorch-caney/docs/conf.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+
4
+ sys.path.insert(0, os.path.abspath('..'))
5
+
6
+ import pytorch_caney # noqa: E402
7
+
8
+ project = 'pytorch-caney'
9
+ copyright = '2023, Jordan A. Caraballo-Vega'
10
+ author = 'Jordan A. Caraballo-Vega'
11
+
12
+ extensions = [
13
+ 'sphinx.ext.autodoc',
14
+ 'sphinx_autodoc_typehints',
15
+ 'jupyter_sphinx.execute',
16
+ "sphinx.ext.intersphinx",
17
+ "sphinx.ext.viewcode",
18
+ "sphinx.ext.napoleon",
19
+ "sphinx_click.ext",
20
+ "sphinx.ext.githubpages",
21
+ "nbsphinx",
22
+ ]
23
+
24
+ intersphinx_mapping = {
25
+ "pyproj": ("https://pyproj4.github.io/pyproj/stable/", None),
26
+ "rasterio": ("https://rasterio.readthedocs.io/en/stable/", None),
27
+ "xarray": ("http://xarray.pydata.org/en/stable/", None),
28
+ }
29
+
30
+ templates_path = ['_templates']
31
+ exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
32
+
33
+ master_doc = "index"
34
+
35
+ version = release = pytorch_caney.__version__
36
+
37
+ pygments_style = "sphinx"
38
+
39
+ todo_include_todos = False
40
+
41
+ html_theme = 'sphinx_rtd_theme'
42
+ html_logo = 'static/DSG_LOGO_REDESIGN.png'
43
+
44
+ myst_enable_extensions = [
45
+ "amsmath",
46
+ "colon_fence",
47
+ "deflist",
48
+ "dollarmath",
49
+ "html_image",
50
+ ]
51
+
52
+ myst_url_schemes = ("http", "https", "mailto")
pytorch-caney/docs/examples.rst ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ .. toctree::
2
+ :maxdepth: 2
3
+ :caption: Contents:
pytorch-caney/docs/index.rst ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. pytorch-caney's documentation master file, created by
2
+ sphinx-quickstart on Fri Jun 23 11:32:18 2023.
3
+ You can adapt this file completely to your liking, but it should at least
4
+ contain the root `toctree` directive.
5
+
6
+ Welcome to pytorch-caney's documentation!
7
+ =========================================
8
+
9
+ .. toctree::
10
+ :maxdepth: 2
11
+ :caption: Contents:
12
+
13
+ readme
14
+ examples
15
+ modules
16
+
17
+ Indices and tables
18
+ ==================
19
+
20
+ * :ref:`genindex`
21
+ * :ref:`modindex`
22
+ * :ref:`search`
pytorch-caney/docs/make.bat ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @ECHO OFF
2
+
3
+ pushd %~dp0
4
+
5
+ REM Command file for Sphinx documentation
6
+
7
+ if "%SPHINXBUILD%" == "" (
8
+ set SPHINXBUILD=sphinx-build
9
+ )
10
+ set SOURCEDIR=.
11
+ set BUILDDIR=_build
12
+
13
+ %SPHINXBUILD% >NUL 2>NUL
14
+ if errorlevel 9009 (
15
+ echo.
16
+ echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
17
+ echo.installed, then set the SPHINXBUILD environment variable to point
18
+ echo.to the full path of the 'sphinx-build' executable. Alternatively you
19
+ echo.may add the Sphinx directory to PATH.
20
+ echo.
21
+ echo.If you don't have Sphinx installed, grab it from
22
+ echo.https://www.sphinx-doc.org/
23
+ exit /b 1
24
+ )
25
+
26
+ if "%1" == "" goto help
27
+
28
+ %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
29
+ goto end
30
+
31
+ :help
32
+ %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
33
+
34
+ :end
35
+ popd
pytorch-caney/docs/modules.rst ADDED
@@ -0,0 +1,284 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pytorch-caney package
2
+ ========================
3
+
4
+ pytorch_caney.config
5
+ ----------------------
6
+
7
+ .. automodule:: pytorch_caney.config
8
+ :members:
9
+ :undoc-members:
10
+ :show-inheritance:
11
+
12
+ pytorch_caney.data.datamodules.finetune_datamodule
13
+ ----------------------
14
+
15
+ .. automodule:: pytorch_caney.data.datamodules.finetune_datamodule
16
+ :members:
17
+ :undoc-members:
18
+ :show-inheritance:
19
+
20
+ pytorch_caney.data.datamodules.mim_datamodule
21
+ ----------------------
22
+
23
+ .. automodule:: pytorch_caney.data.datamodules.mim_datamodule
24
+ :members:
25
+ :undoc-members:
26
+ :show-inheritance:
27
+
28
+ pytorch_caney.data.datamodules.segmentation_datamodule
29
+ ----------------------
30
+
31
+ .. automodule:: pytorch_caney.data.datamodules.segmentation_datamodule
32
+ :members:
33
+ :undoc-members:
34
+ :show-inheritance:
35
+
36
+ pytorch_caney.data.datamodules.simmim_datamodule
37
+ ----------------------
38
+
39
+ .. automodule:: pytorch_caney.data.datamodules.simmim_datamodule
40
+ :members:
41
+ :undoc-members:
42
+ :show-inheritance:
43
+
44
+ pytorch_caney.data.datasets.classification_dataset
45
+ ----------------------
46
+
47
+ .. automodule:: pytorch_caney.data.datasets.classification_dataset
48
+ :members:
49
+ :undoc-members:
50
+ :show-inheritance:
51
+
52
+ pytorch_caney.data.datasets.modis_dataset
53
+ ----------------------
54
+
55
+ .. automodule:: pytorch_caney.data.datasets.modis_dataset
56
+ :members:
57
+ :undoc-members:
58
+ :show-inheritance:
59
+
60
+ pytorch_caney.data.transforms
61
+ ----------------------
62
+
63
+ .. automodule:: pytorch_caney.data.transforms
64
+ :members:
65
+ :undoc-members:
66
+ :show-inheritance:
67
+
68
+ pytorch_caney.data.utils
69
+ ----------------------
70
+
71
+ .. automodule:: pytorch_caney.data.utils
72
+ :members:
73
+ :undoc-members:
74
+ :show-inheritance:
75
+
76
+ pytorch_caney.inference
77
+ ----------------------
78
+
79
+ .. automodule:: pytorch_caney.inference
80
+ :members:
81
+ :undoc-members:
82
+ :show-inheritance:
83
+
84
+ pytorch_caney.loss.build
85
+ ----------------------
86
+
87
+ .. automodule:: pytorch_caney.loss.build
88
+ :members:
89
+ :undoc-members:
90
+ :show-inheritance:
91
+
92
+ pytorch_caney.loss.utils
93
+ ----------------------
94
+
95
+ .. automodule:: pytorch_caney.loss.utils
96
+ :members:
97
+ :undoc-members:
98
+ :show-inheritance:
99
+
100
+ pytorch_caney.lr_scheduler
101
+ ----------------------
102
+
103
+ .. automodule:: pytorch_caney.lr_scheduler
104
+ :members:
105
+ :undoc-members:
106
+ :show-inheritance:
107
+
108
+ pytorch_caney.metrics
109
+ ----------------------
110
+
111
+ .. automodule:: pytorch_caney.metrics
112
+ :members:
113
+ :undoc-members:
114
+ :show-inheritance:
115
+
116
+ pytorch_caney.models.decoders.unet_decoder
117
+ ----------------------
118
+
119
+ .. automodule:: pytorch_caney.models.decoders.unet_decoder
120
+ :members:
121
+ :undoc-members:
122
+ :show-inheritance:
123
+
124
+ pytorch_caney.models.mim.mim
125
+ ----------------------
126
+
127
+ .. automodule:: pytorch_caney.models.mim.mim
128
+ :members:
129
+ :undoc-members:
130
+ :show-inheritance:
131
+
132
+ pytorch_caney.models.simmim.simmim
133
+ ----------------------
134
+
135
+ .. automodule:: pytorch_caney.models.simmim.simmim
136
+ :members:
137
+ :undoc-members:
138
+ :show-inheritance:
139
+
140
+ pytorch_caney.models.build
141
+ ----------------------
142
+
143
+ .. automodule:: pytorch_caney.models.build
144
+ :members:
145
+ :undoc-members:
146
+ :show-inheritance:
147
+
148
+ pytorch_caney.models.maskrcnn_model
149
+ ----------------------
150
+
151
+ .. automodule:: pytorch_caney.models.maskrcnn_model
152
+ :members:
153
+ :undoc-members:
154
+ :show-inheritance:
155
+
156
+ pytorch_caney.models.swinv2_model
157
+ ----------------------
158
+
159
+ .. automodule:: pytorch_caney.models.swinv2_model
160
+ :members:
161
+ :undoc-members:
162
+ :show-inheritance:
163
+
164
+ pytorch_caney.models.unet_model
165
+ ----------------------
166
+
167
+ .. automodule:: pytorch_caney.models.unet_model
168
+ :members:
169
+ :undoc-members:
170
+ :show-inheritance:
171
+
172
+ pytorch_caney.models.unet_swin_model
173
+ ----------------------
174
+
175
+ .. automodule:: pytorch_caney.models.unet_swin_model
176
+ :members:
177
+ :undoc-members:
178
+ :show-inheritance:
179
+
180
+ pytorch_caney.network.attention
181
+ ----------------------
182
+
183
+ .. automodule:: pytorch_caney.network.attention
184
+ :members:
185
+ :undoc-members:
186
+ :show-inheritance:
187
+
188
+ pytorch_caney.network.mlp
189
+ ----------------------
190
+
191
+ .. automodule:: pytorch_caney.network.mlp
192
+ :members:
193
+ :undoc-members:
194
+ :show-inheritance:
195
+
196
+ pytorch_caney.pipelines.finetuning.finetune
197
+ ----------------------
198
+
199
+ .. automodule:: pytorch_caney.pipelines.finetuning.finetune
200
+ :members:
201
+ :undoc-members:
202
+ :show-inheritance:
203
+
204
+ pytorch_caney.pipelines.pretraining.mim
205
+ ----------------------
206
+
207
+ .. automodule:: pytorch_caney.pipelines.pretraining.mim
208
+ :members:
209
+ :undoc-members:
210
+ :show-inheritance:
211
+
212
+ pytorch_caney.pipelines.modis_segmentation
213
+ ----------------------
214
+
215
+ .. automodule:: pytorch_caney.pipelines.modis_segmentation
216
+ :members:
217
+ :undoc-members:
218
+ :show-inheritance:
219
+
220
+ pytorch_caney.processing
221
+ ----------------------
222
+
223
+ .. automodule:: pytorch_caney.processing
224
+ :members:
225
+ :undoc-members:
226
+ :show-inheritance:
227
+
228
+ pytorch_caney.ptc_logging
229
+ ----------------------
230
+
231
+ .. automodule:: pytorch_caney.ptc_logging
232
+ :members:
233
+ :undoc-members:
234
+ :show-inheritance:
235
+
236
+ pytorch_caney.training.fine_tuning
237
+ ----------------------
238
+
239
+ .. automodule:: pytorch_caney.training.fine_tuning
240
+ :members:
241
+ :undoc-members:
242
+ :show-inheritance:
243
+
244
+ pytorch_caney.training.mim_utils
245
+ ----------------------
246
+
247
+ .. automodule:: pytorch_caney.training.mim_utils
248
+ :members:
249
+ :undoc-members:
250
+ :show-inheritance:
251
+
252
+ pytorch_caney.training.pre_training
253
+ ----------------------
254
+
255
+ .. automodule:: pytorch_caney.training.pre_training
256
+ :members:
257
+ :undoc-members:
258
+ :show-inheritance:
259
+
260
+ pytorch_caney.training.simmim_utils
261
+ ----------------------
262
+
263
+ .. automodule:: pytorch_caney.training.simmim_utils
264
+ :members:
265
+ :undoc-members:
266
+ :show-inheritance:
267
+
268
+ pytorch_caney.training.utils
269
+ ----------------------
270
+
271
+ .. automodule:: pytorch_caney.training.utils
272
+ :members:
273
+ :undoc-members:
274
+ :show-inheritance:
275
+
276
+ pytorch_caney.utils
277
+ ----------------------
278
+
279
+ .. automodule:: pytorch_caney.utils
280
+ :members:
281
+ :undoc-members:
282
+ :show-inheritance:
283
+
284
+
pytorch-caney/docs/pytorch_caney.rst ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ pytorch-caney
2
+ ------------------
3
+
4
+ Python package for lots of Pytorch tools for geospatial science problems.
5
+
6
+ Installation
7
+ -----------------
8
+
9
+ Install with pip
10
+ ::
11
+
12
+ pip install pytorch-caney
13
+
14
+ API
15
+ ----
16
+
17
+ .. automodule:: pytorch_caney
18
+ :members:
19
+
20
+ Authors
21
+ ----------
22
+
23
+ Jordan A. Caraballo-Vega, jordan.a.caraballo-vega@nasa.gov
24
+ Caleb S. Spradlin, caleb.s.spradlin@nasa.gov
25
+ Jian Li, jian.li@nasa.gov
26
+
27
+ License
28
+ ---------------
29
+
30
+ The package is released under the `MIT
31
+ License <https://opensource.org/licenses/MIT>`__.
pytorch-caney/docs/readme.rst ADDED
@@ -0,0 +1 @@
 
 
1
+ .. include:: ../README.rst
pytorch-caney/docs/requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ sphinx
2
+ sphinx-autodoc-typehints
3
+ jupyter-sphinx
4
+ sphinx-rtd-theme
pytorch-caney/docs/source/index.rst ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. tensorflow-caney documentation master file, created by
2
+ sphinx-quickstart on Fri Jan 13 06:59:19 2023.
3
+ You can adapt this file completely to your liking, but it should at least
4
+ contain the root `toctree` directive.
5
+
6
+ Welcome to tensorflow-caney's documentation!
7
+ ============================================
8
+
9
+ .. toctree::
10
+ :maxdepth: 2
11
+ :caption: Contents:
12
+
13
+
14
+
15
+ Indices and tables
16
+ ==================
17
+
18
+ * :ref:`genindex`
19
+ * :ref:`modindex`
20
+ * :ref:`search`
pytorch-caney/docs/static/DSG_LOGO_REDESIGN.png ADDED
pytorch-caney/examples/satvision/finetune_satvision_base_landcover5class_192_window12_100ep.yaml ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MODEL:
2
+ TYPE: swinv2
3
+ DECODER: unet
4
+ NAME: satvision_finetune_lc5class
5
+ DROP_PATH_RATE: 0.1
6
+ NUM_CLASSES: 5
7
+ SWINV2:
8
+ IN_CHANS: 7
9
+ EMBED_DIM: 128
10
+ DEPTHS: [ 2, 2, 18, 2 ]
11
+ NUM_HEADS: [ 4, 8, 16, 32 ]
12
+ WINDOW_SIZE: 14
13
+ PRETRAINED_WINDOW_SIZES: [ 12, 12, 12, 6 ]
14
+ DATA:
15
+ IMG_SIZE: 224
16
+ DATASET: MODISLC5
17
+ MASK_PATCH_SIZE: 32
18
+ MASK_RATIO: 0.6
19
+ LOSS:
20
+ NAME: 'tversky'
21
+ MODE: 'multiclass'
22
+ ALPHA: 0.4
23
+ BETA: 0.6
24
+ TRAIN:
25
+ EPOCHS: 100
26
+ WARMUP_EPOCHS: 10
27
+ BASE_LR: 1e-4
28
+ WARMUP_LR: 5e-7
29
+ WEIGHT_DECAY: 0.01
30
+ LAYER_DECAY: 0.8
31
+ PRINT_FREQ: 100
32
+ SAVE_FREQ: 5
33
+ TAG: satvision_finetune_land_cover_5class_swinv2_satvision_192_window12__800ep
pytorch-caney/examples/satvision/finetune_satvision_base_landcover9class_192_window12_100ep.yaml ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MODEL:
2
+ TYPE: swinv2
3
+ DECODER: unet
4
+ NAME: satvision_finetune_lc9class
5
+ DROP_PATH_RATE: 0.1
6
+ NUM_CLASSES: 9
7
+ SWINV2:
8
+ IN_CHANS: 7
9
+ EMBED_DIM: 128
10
+ DEPTHS: [ 2, 2, 18, 2 ]
11
+ NUM_HEADS: [ 4, 8, 16, 32 ]
12
+ WINDOW_SIZE: 14
13
+ PRETRAINED_WINDOW_SIZES: [ 12, 12, 12, 6 ]
14
+ DATA:
15
+ IMG_SIZE: 224
16
+ DATASET: MODISLC5
17
+ MASK_PATCH_SIZE: 32
18
+ MASK_RATIO: 0.6
19
+ LOSS:
20
+ NAME: 'tversky'
21
+ MODE: 'multiclass'
22
+ ALPHA: 0.4
23
+ BETA: 0.6
24
+ TRAIN:
25
+ EPOCHS: 100
26
+ WARMUP_EPOCHS: 10
27
+ BASE_LR: 1e-4
28
+ WARMUP_LR: 5e-7
29
+ WEIGHT_DECAY: 0.01
30
+ LAYER_DECAY: 0.8
31
+ PRINT_FREQ: 100
32
+ SAVE_FREQ: 5
33
+ TAG: satvision_finetune_land_cover_9class_swinv2_satvision_192_window12__800ep
pytorch-caney/examples/satvision/mim_pretrain_swinv2_satvision_base_192_window12_800ep.yaml ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MODEL:
2
+ TYPE: swinv2
3
+ NAME: mim_satvision_pretrain
4
+ DROP_PATH_RATE: 0.1
5
+ SWINV2:
6
+ IN_CHANS: 7
7
+ EMBED_DIM: 128
8
+ DEPTHS: [ 2, 2, 18, 2 ]
9
+ NUM_HEADS: [ 4, 8, 16, 32 ]
10
+ WINDOW_SIZE: 12
11
+ DATA:
12
+ IMG_SIZE: 192
13
+ MASK_PATCH_SIZE: 32
14
+ MASK_RATIO: 0.6
15
+ TRAIN:
16
+ EPOCHS: 800
17
+ WARMUP_EPOCHS: 10
18
+ BASE_LR: 1e-4
19
+ WARMUP_LR: 5e-7
20
+ WEIGHT_DECAY: 0.05
21
+ LR_SCHEDULER:
22
+ NAME: 'multistep'
23
+ GAMMA: 0.1
24
+ MULTISTEPS: [700,]
25
+ PRINT_FREQ: 100
26
+ SAVE_FREQ: 5
27
+ TAG: mim_pretrain_swinv2_satvision_192_window12__800ep
pytorch-caney/examples/satvision/run_satvision_finetune_lc_fiveclass.sh ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH -J finetune_satvision_lc5
4
+ #SBATCH -t 3-00:00:00
5
+ #SBATCH -G 4
6
+ #SBATCH -N 1
7
+
8
+
9
+ export PYTHONPATH=$PWD:../../../:../../../pytorch-caney
10
+ export NGPUS=8
11
+
12
+ torchrun --nproc_per_node $NGPUS \
13
+ ../../../pytorch-caney/pytorch_caney/pipelines/finetuning/finetune.py \
14
+ --cfg finetune_satvision_base_landcover5class_192_window12_100ep.yaml \
15
+ --pretrained /explore/nobackup/people/cssprad1/projects/satnet/code/development/masked_image_modeling/development/models/simmim_satnet_pretrain_pretrain/simmim_pretrain__satnet_swinv2_base__img192_window12__800ep_v3_no_norm/ckpt_epoch_800.pth \
16
+ --dataset MODISLC9 \
17
+ --data-paths /explore/nobackup/projects/ilab/data/satvision/finetuning/h18v04/labels_9classes_224 \
18
+ --batch-size 4 \
19
+ --output /explore/nobackup/people/cssprad1/projects/satnet/code/development/cleanup/finetune/models \
20
+ --enable-amp
pytorch-caney/examples/satvision/run_satvision_finetune_lc_nineclass.sh ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH -J finetune_satvision_lc9
4
+ #SBATCH -t 3-00:00:00
5
+ #SBATCH -G 4
6
+ #SBATCH -N 1
7
+
8
+
9
+ export PYTHONPATH=$PWD:../../../:../../../pytorch-caney
10
+ export NGPUS=8
11
+
12
+ torchrun --nproc_per_node $NGPUS \
13
+ ../../../pytorch-caney/pytorch_caney/pipelines/finetuning/finetune.py \
14
+ --cfg finetune_satvision_base_landcover5class_192_window12_100ep.yaml \
15
+ --pretrained /explore/nobackup/people/cssprad1/projects/satnet/code/development/masked_image_modeling/development/models/simmim_satnet_pretrain_pretrain/simmim_pretrain__satnet_swinv2_base__img192_window12__800ep_v3_no_norm/ckpt_epoch_800.pth \
16
+ --dataset MODISLC9 \
17
+ --data-paths /explore/nobackup/projects/ilab/data/satvision/finetuning/h18v04/labels_5classes_224 \
18
+ --batch-size 4 \
19
+ --output /explore/nobackup/people/cssprad1/projects/satnet/code/development/cleanup/finetune/models \
20
+ --enable-amp
pytorch-caney/examples/satvision/run_satvision_pretrain.sh ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH -J pretrain_satvision_swinv2
4
+ #SBATCH -t 3-00:00:00
5
+ #SBATCH -G 4
6
+ #SBATCH -N 1
7
+
8
+
9
+ export PYTHONPATH=$PWD:../../../:../../../pytorch-caney
10
+ export NGPUS=4
11
+
12
+ torchrun --nproc_per_node $NGPUS \
13
+ ../../../pytorch-caney/pytorch_caney/pipelines/pretraining/mim.py \
14
+ --cfg mim_pretrain_swinv2_satvision_base_192_window12_800ep.yaml \
15
+ --dataset MODIS \
16
+ --data-paths /explore/nobackup/projects/ilab/data/satvision/pretraining/training_* \
17
+ --batch-size 128 \
18
+ --output /explore/nobackup/people/cssprad1/projects/satnet/code/development/cleanup/trf/transformer/models \
19
+ --enable-amp
pytorch-caney/pyproject.toml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ [build-system]
2
+ # Minimum requirements for the build system to execute.
3
+ requires = ["setuptools", "wheel"]
4
+
5
+ [tool.black]
6
+ target_version = ['py39']
pytorch-caney/pytorch_caney/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ __version__ = "0.1.0"
pytorch-caney/pytorch_caney/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (228 Bytes). View file
 
pytorch-caney/pytorch_caney/config.py ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import yaml
3
+ from yacs.config import CfgNode as CN
4
+
5
+ _C = CN()
6
+
7
+ # Base config files
8
+ _C.BASE = ['']
9
+
10
+ # -----------------------------------------------------------------------------
11
+ # Data settings
12
+ # -----------------------------------------------------------------------------
13
+ _C.DATA = CN()
14
+ # Batch size for a single GPU, could be overwritten by command line argument
15
+ _C.DATA.BATCH_SIZE = 128
16
+ # Path(s) to dataset, could be overwritten by command line argument
17
+ _C.DATA.DATA_PATHS = ['']
18
+ # Dataset name
19
+ _C.DATA.DATASET = 'MODIS'
20
+ # Input image size
21
+ _C.DATA.IMG_SIZE = 224
22
+ # Interpolation to resize image (random, bilinear, bicubic)
23
+ _C.DATA.INTERPOLATION = 'bicubic'
24
+ # Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.
25
+ _C.DATA.PIN_MEMORY = True
26
+ # Number of data loading threads
27
+ _C.DATA.NUM_WORKERS = 8
28
+ # [SimMIM] Mask patch size for MaskGenerator
29
+ _C.DATA.MASK_PATCH_SIZE = 32
30
+ # [SimMIM] Mask ratio for MaskGenerator
31
+ _C.DATA.MASK_RATIO = 0.6
32
+
33
+ # -----------------------------------------------------------------------------
34
+ # Model settings
35
+ # -----------------------------------------------------------------------------
36
+ _C.MODEL = CN()
37
+ # Model type
38
+ _C.MODEL.TYPE = 'swinv2'
39
+ # Decoder type
40
+ _C.MODEL.DECODER = None
41
+ # Model name
42
+ _C.MODEL.NAME = 'swinv2_base_patch4_window7_224'
43
+ # Pretrained weight from checkpoint, could be from previous pre-training
44
+ # could be overwritten by command line argument
45
+ _C.MODEL.PRETRAINED = ''
46
+ # Checkpoint to resume, could be overwritten by command line argument
47
+ _C.MODEL.RESUME = ''
48
+ # Number of classes, overwritten in data preparation
49
+ _C.MODEL.NUM_CLASSES = 17
50
+ # Dropout rate
51
+ _C.MODEL.DROP_RATE = 0.0
52
+ # Drop path rate
53
+ _C.MODEL.DROP_PATH_RATE = 0.1
54
+
55
+ # Swin Transformer V2 parameters
56
+ _C.MODEL.SWINV2 = CN()
57
+ _C.MODEL.SWINV2.PATCH_SIZE = 4
58
+ _C.MODEL.SWINV2.IN_CHANS = 3
59
+ _C.MODEL.SWINV2.EMBED_DIM = 96
60
+ _C.MODEL.SWINV2.DEPTHS = [2, 2, 6, 2]
61
+ _C.MODEL.SWINV2.NUM_HEADS = [3, 6, 12, 24]
62
+ _C.MODEL.SWINV2.WINDOW_SIZE = 7
63
+ _C.MODEL.SWINV2.MLP_RATIO = 4.
64
+ _C.MODEL.SWINV2.QKV_BIAS = True
65
+ _C.MODEL.SWINV2.APE = False
66
+ _C.MODEL.SWINV2.PATCH_NORM = True
67
+ _C.MODEL.SWINV2.PRETRAINED_WINDOW_SIZES = [0, 0, 0, 0]
68
+
69
+ # -----------------------------------------------------------------------------
70
+ # Training settings
71
+ # -----------------------------------------------------------------------------
72
+ _C.LOSS = CN()
73
+ _C.LOSS.NAME = 'tversky'
74
+ _C.LOSS.MODE = 'multiclass'
75
+ _C.LOSS.CLASSES = None
76
+ _C.LOSS.LOG = False
77
+ _C.LOSS.LOGITS = True
78
+ _C.LOSS.SMOOTH = 0.0
79
+ _C.LOSS.IGNORE_INDEX = None
80
+ _C.LOSS.EPS = 1e-7
81
+ _C.LOSS.ALPHA = 0.5
82
+ _C.LOSS.BETA = 0.5
83
+ _C.LOSS.GAMMA = 1.0
84
+
85
+ # -----------------------------------------------------------------------------
86
+ # Training settings
87
+ # -----------------------------------------------------------------------------
88
+ _C.TRAIN = CN()
89
+ _C.TRAIN.START_EPOCH = 0
90
+ _C.TRAIN.EPOCHS = 300
91
+ _C.TRAIN.WARMUP_EPOCHS = 20
92
+ _C.TRAIN.WEIGHT_DECAY = 0.05
93
+ _C.TRAIN.BASE_LR = 5e-4
94
+ _C.TRAIN.WARMUP_LR = 5e-7
95
+ _C.TRAIN.MIN_LR = 5e-6
96
+ # Clip gradient norm
97
+ _C.TRAIN.CLIP_GRAD = 5.0
98
+ # Auto resume from latest checkpoint
99
+ _C.TRAIN.AUTO_RESUME = True
100
+ # Gradient accumulation steps
101
+ # could be overwritten by command line argument
102
+ _C.TRAIN.ACCUMULATION_STEPS = 0
103
+ # Whether to use gradient checkpointing to save memory
104
+ # could be overwritten by command line argument
105
+ _C.TRAIN.USE_CHECKPOINT = False
106
+
107
+ # LR scheduler
108
+ _C.TRAIN.LR_SCHEDULER = CN()
109
+ _C.TRAIN.LR_SCHEDULER.NAME = 'cosine'
110
+ # Epoch interval to decay LR, used in StepLRScheduler
111
+ _C.TRAIN.LR_SCHEDULER.DECAY_EPOCHS = 30
112
+ # LR decay rate, used in StepLRScheduler
113
+ _C.TRAIN.LR_SCHEDULER.DECAY_RATE = 0.1
114
+ # Gamma / Multi steps value, used in MultiStepLRScheduler
115
+ _C.TRAIN.LR_SCHEDULER.GAMMA = 0.1
116
+ _C.TRAIN.LR_SCHEDULER.MULTISTEPS = []
117
+
118
+ # Optimizer
119
+ _C.TRAIN.OPTIMIZER = CN()
120
+ _C.TRAIN.OPTIMIZER.NAME = 'adamw'
121
+ # Optimizer Epsilon
122
+ _C.TRAIN.OPTIMIZER.EPS = 1e-8
123
+ # Optimizer Betas
124
+ _C.TRAIN.OPTIMIZER.BETAS = (0.9, 0.999)
125
+ # SGD momentum
126
+ _C.TRAIN.OPTIMIZER.MOMENTUM = 0.9
127
+
128
+ # [SimMIM] Layer decay for fine-tuning
129
+ _C.TRAIN.LAYER_DECAY = 1.0
130
+
131
+
132
+ # -----------------------------------------------------------------------------
133
+ # Testing settings
134
+ # -----------------------------------------------------------------------------
135
+ _C.TEST = CN()
136
+ # Whether to use center crop when testing
137
+ _C.TEST.CROP = True
138
+
139
+ # -----------------------------------------------------------------------------
140
+ # Misc
141
+ # -----------------------------------------------------------------------------
142
+ # Whether to enable pytorch amp, overwritten by command line argument
143
+ _C.ENABLE_AMP = False
144
+ # Enable Pytorch automatic mixed precision (amp).
145
+ _C.AMP_ENABLE = True
146
+ # Path to output folder, overwritten by command line argument
147
+ _C.OUTPUT = ''
148
+ # Tag of experiment, overwritten by command line argument
149
+ _C.TAG = 'pt-caney-default-tag'
150
+ # Frequency to save checkpoint
151
+ _C.SAVE_FREQ = 1
152
+ # Frequency to logging info
153
+ _C.PRINT_FREQ = 10
154
+ # Fixed random seed
155
+ _C.SEED = 42
156
+ # Perform evaluation only, overwritten by command line argument
157
+ _C.EVAL_MODE = False
158
+
159
+
160
+ def _update_config_from_file(config, cfg_file):
161
+ config.defrost()
162
+ with open(cfg_file, 'r') as f:
163
+ yaml_cfg = yaml.load(f, Loader=yaml.FullLoader)
164
+
165
+ for cfg in yaml_cfg.setdefault('BASE', ['']):
166
+ if cfg:
167
+ _update_config_from_file(
168
+ config, os.path.join(os.path.dirname(cfg_file), cfg)
169
+ )
170
+ print('=> merge config from {}'.format(cfg_file))
171
+ config.merge_from_file(cfg_file)
172
+ config.freeze()
173
+
174
+
175
+ def update_config(config, args):
176
+ _update_config_from_file(config, args.cfg)
177
+
178
+ config.defrost()
179
+
180
+ def _check_args(name):
181
+ if hasattr(args, name) and eval(f'args.{name}'):
182
+ return True
183
+ return False
184
+
185
+ # merge from specific arguments
186
+ if _check_args('batch_size'):
187
+ config.DATA.BATCH_SIZE = args.batch_size
188
+ if _check_args('data_paths'):
189
+ config.DATA.DATA_PATHS = args.data_paths
190
+ if _check_args('dataset'):
191
+ config.DATA.DATASET = args.dataset
192
+ if _check_args('resume'):
193
+ config.MODEL.RESUME = args.resume
194
+ if _check_args('pretrained'):
195
+ config.MODEL.PRETRAINED = args.pretrained
196
+ if _check_args('resume'):
197
+ config.MODEL.RESUME = args.resume
198
+ if _check_args('accumulation_steps'):
199
+ config.TRAIN.ACCUMULATION_STEPS = args.accumulation_steps
200
+ if _check_args('use_checkpoint'):
201
+ config.TRAIN.USE_CHECKPOINT = True
202
+ if _check_args('disable_amp'):
203
+ config.AMP_ENABLE = False
204
+ if _check_args('output'):
205
+ config.OUTPUT = args.output
206
+ if _check_args('tag'):
207
+ config.TAG = args.tag
208
+ if _check_args('eval'):
209
+ config.EVAL_MODE = True
210
+ if _check_args('enable_amp'):
211
+ config.ENABLE_AMP = args.enable_amp
212
+
213
+ # output folder
214
+ config.OUTPUT = os.path.join(config.OUTPUT, config.MODEL.NAME, config.TAG)
215
+
216
+ config.freeze()
217
+
218
+
219
+ def get_config(args):
220
+ """Get a yacs CfgNode object with default values."""
221
+ # Return a clone so that the defaults will not be altered
222
+ # This is for the "local variable" use pattern
223
+ config = _C.clone()
224
+ update_config(config, args)
225
+
226
+ return config
pytorch-caney/pytorch_caney/console/__init__.py ADDED
File without changes
pytorch-caney/pytorch_caney/console/cli.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pytorch_lightning.utilities.cli import LightningCLI
2
+
3
+ import torch
4
+
5
+
6
+ class TerraGPULightningCLI(LightningCLI):
7
+
8
+ def add_arguments_to_parser(self, parser):
9
+
10
+ # Trainer - performance
11
+ parser.set_defaults({"trainer.accelerator": "auto"})
12
+ parser.set_defaults({"trainer.devices": "auto"})
13
+ parser.set_defaults({"trainer.auto_select_gpus": True})
14
+ parser.set_defaults({"trainer.precision": 32})
15
+
16
+ # Trainer - training
17
+ parser.set_defaults({"trainer.max_epochs": 500})
18
+ parser.set_defaults({"trainer.min_epochs": 1})
19
+ parser.set_defaults({"trainer.detect_anomaly": True})
20
+ parser.set_defaults({"trainer.logger": True})
21
+ parser.set_defaults({"trainer.default_root_dir": "output_model"})
22
+
23
+ # Trainer - optimizer - TODO
24
+ _ = {
25
+ "class_path": torch.optim.Adam,
26
+ "init_args": {
27
+ "lr": 0.01
28
+ }
29
+ }
30
+
31
+ # Trainer - callbacks
32
+ default_callbacks = [
33
+ {"class_path": "pytorch_lightning.callbacks.DeviceStatsMonitor"},
34
+ {
35
+ "class_path": "pytorch_lightning.callbacks.EarlyStopping",
36
+ "init_args": {
37
+ "monitor": "val_loss",
38
+ "patience": 5,
39
+ "mode": "min"
40
+ }
41
+ },
42
+ # {
43
+ # "class_path": "pytorch_lightning.callbacks.ModelCheckpoint",
44
+ # "init_args": {
45
+ # "dirpath": "output_model",
46
+ # "monitor": "val_loss",
47
+ # "auto_insert_metric_name": True
48
+ # }
49
+ # },
50
+ ]
51
+ parser.set_defaults({"trainer.callbacks": default_callbacks})
52
+
53
+ # {
54
+ # "class_path": "pytorch_lightning.callbacks.ModelCheckpoint",
55
+ # "init_args": {
56
+ # "dirpath": "output_model",
57
+ # "monitor": "val_loss",
58
+ # "auto_insert_metric_name": True
59
+ # }
60
+ # },
61
+ # ]
62
+ # parser.set_defaults({"trainer.callbacks": default_callbacks})
pytorch-caney/pytorch_caney/console/dl_pipeline.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # RF pipeline: preprocess, train, and predict.
3
+
4
+ import sys
5
+ import logging
6
+
7
+ # from terragpu import unet_model
8
+ # from terragpu.decorators import DuplicateFilter
9
+ # from terragpu.ai.deep_learning.datamodules.segmentation_datamodule \
10
+ # import SegmentationDataModule
11
+
12
+ from pytorch_lightning import seed_everything # , trainer
13
+ # from pytorch_lightning import LightningModule, LightningDataModule
14
+ from terragpu.ai.deep_learning.console.cli import TerraGPULightningCLI
15
+
16
+
17
+ # -----------------------------------------------------------------------------
18
+ # main
19
+ #
20
+ # python rf_pipeline.py options here
21
+ # -----------------------------------------------------------------------------
22
+ def main():
23
+
24
+ # -------------------------------------------------------------------------
25
+ # Set logging
26
+ # -------------------------------------------------------------------------
27
+ logger = logging.getLogger()
28
+ logger.setLevel(logging.INFO)
29
+ ch = logging.StreamHandler(sys.stdout)
30
+ ch.setLevel(logging.INFO)
31
+
32
+ # Set formatter and handlers
33
+ formatter = logging.Formatter(
34
+ "%(asctime)s; %(levelname)s; %(message)s", "%Y-%m-%d %H:%M:%S")
35
+ ch.setFormatter(formatter)
36
+ logger.addHandler(ch)
37
+
38
+ # -------------------------------------------------------------------------
39
+ # Execute pipeline step
40
+ # -------------------------------------------------------------------------
41
+ # Seed every library
42
+ seed_everything(1234, workers=True)
43
+ _ = TerraGPULightningCLI(save_config_callback=None)
44
+ # unet_model.UNetSegmentation, SegmentationDataModule)
45
+
46
+ # train
47
+ # trainer = pl.Trainer()
48
+ # trainer.fit(model, datamodule=dm)
49
+ # validate
50
+ # trainer.validate(datamodule=dm)
51
+ # test
52
+ # trainer.test(datamodule=dm)
53
+ # predict
54
+ # predictions = trainer.predict(datamodule=dm)
55
+ return
56
+
57
+
58
+ # -----------------------------------------------------------------------------
59
+ # Invoke the main
60
+ # -----------------------------------------------------------------------------
61
+ if __name__ == "__main__":
62
+ sys.exit(main())
pytorch-caney/pytorch_caney/data/__pycache__/utils.cpython-310.pyc ADDED
Binary file (3.1 kB). View file
 
pytorch-caney/pytorch_caney/data/datamodules/__init__.py ADDED
File without changes
pytorch-caney/pytorch_caney/data/datamodules/finetune_datamodule.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ..datasets.modis_dataset import MODISDataset
2
+ from ..datasets.modis_lc_five_dataset import MODISLCFiveDataset
3
+ from ..datasets.modis_lc_nine_dataset import MODISLCNineDataset
4
+
5
+ from ..transforms import TensorResizeTransform
6
+
7
+ import torch.distributed as dist
8
+ from torch.utils.data import DataLoader, DistributedSampler
9
+
10
+
11
+ DATASETS = {
12
+ 'modis': MODISDataset,
13
+ 'modislc9': MODISLCNineDataset,
14
+ 'modislc5': MODISLCFiveDataset,
15
+ # 'modis tree': MODISTree,
16
+ }
17
+
18
+
19
+ def get_dataset_from_dict(dataset_name: str):
20
+ """Gets the proper dataset given a dataset name.
21
+
22
+ Args:
23
+ dataset_name (str): name of the dataset
24
+
25
+ Raises:
26
+ KeyError: thrown if dataset key is not present in dict
27
+
28
+ Returns:
29
+ dataset: pytorch dataset
30
+ """
31
+
32
+ dataset_name = dataset_name.lower()
33
+
34
+ try:
35
+
36
+ dataset_to_use = DATASETS[dataset_name]
37
+
38
+ except KeyError:
39
+
40
+ error_msg = f"{dataset_name} is not an existing dataset"
41
+
42
+ error_msg = f"{error_msg}. Available datasets: {DATASETS.keys()}"
43
+
44
+ raise KeyError(error_msg)
45
+
46
+ return dataset_to_use
47
+
48
+
49
+ def build_finetune_dataloaders(config, logger):
50
+ """Builds the dataloaders and datasets for a fine-tuning task.
51
+
52
+ Args:
53
+ config: config object
54
+ logger: logging logger
55
+
56
+ Returns:
57
+ dataloader_train: training dataloader
58
+ dataloader_val: validation dataloader
59
+ """
60
+
61
+ transform = TensorResizeTransform(config)
62
+
63
+ logger.info(f'Finetuning data transform:\n{transform}')
64
+
65
+ dataset_name = config.DATA.DATASET
66
+
67
+ logger.info(f'Dataset: {dataset_name}')
68
+ logger.info(f'Data Paths: {config.DATA.DATA_PATHS}')
69
+
70
+ dataset_to_use = get_dataset_from_dict(dataset_name)
71
+
72
+ logger.info(f'Dataset obj: {dataset_to_use}')
73
+
74
+ dataset_train = dataset_to_use(data_paths=config.DATA.DATA_PATHS,
75
+ split="train",
76
+ img_size=config.DATA.IMG_SIZE,
77
+ transform=transform)
78
+
79
+ dataset_val = dataset_to_use(data_paths=config.DATA.DATA_PATHS,
80
+ split="val",
81
+ img_size=config.DATA.IMG_SIZE,
82
+ transform=transform)
83
+
84
+ logger.info(f'Build dataset: train images = {len(dataset_train)}')
85
+
86
+ logger.info(f'Build dataset: val images = {len(dataset_val)}')
87
+
88
+ sampler_train = DistributedSampler(
89
+ dataset_train,
90
+ num_replicas=dist.get_world_size(),
91
+ rank=dist.get_rank(),
92
+ shuffle=True)
93
+
94
+ sampler_val = DistributedSampler(
95
+ dataset_val,
96
+ num_replicas=dist.get_world_size(),
97
+ rank=dist.get_rank(),
98
+ shuffle=False)
99
+
100
+ dataloader_train = DataLoader(dataset_train,
101
+ config.DATA.BATCH_SIZE,
102
+ sampler=sampler_train,
103
+ num_workers=config.DATA.NUM_WORKERS,
104
+ pin_memory=True,
105
+ drop_last=True)
106
+
107
+ dataloader_val = DataLoader(dataset_val,
108
+ config.DATA.BATCH_SIZE,
109
+ sampler=sampler_val,
110
+ num_workers=config.DATA.NUM_WORKERS,
111
+ pin_memory=True,
112
+ drop_last=False)
113
+
114
+ return dataloader_train, dataloader_val
pytorch-caney/pytorch_caney/data/datamodules/mim_datamodule.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ..datasets.simmim_modis_dataset import MODISDataset
2
+
3
+ from ..transforms import SimmimTransform
4
+
5
+ import torch.distributed as dist
6
+ from torch.utils.data import DataLoader, DistributedSampler
7
+ from torch.utils.data._utils.collate import default_collate
8
+
9
+
10
+ DATASETS = {
11
+ 'MODIS': MODISDataset,
12
+ }
13
+
14
+
15
+ def collate_fn(batch):
16
+ if not isinstance(batch[0][0], tuple):
17
+ return default_collate(batch)
18
+ else:
19
+ batch_num = len(batch)
20
+ ret = []
21
+ for item_idx in range(len(batch[0][0])):
22
+ if batch[0][0][item_idx] is None:
23
+ ret.append(None)
24
+ else:
25
+ ret.append(default_collate(
26
+ [batch[i][0][item_idx] for i in range(batch_num)]))
27
+ ret.append(default_collate([batch[i][1] for i in range(batch_num)]))
28
+ return ret
29
+
30
+
31
+ def get_dataset_from_dict(dataset_name):
32
+
33
+ try:
34
+
35
+ dataset_to_use = DATASETS[dataset_name]
36
+
37
+ except KeyError:
38
+
39
+ error_msg = f"{dataset_name} is not an existing dataset"
40
+
41
+ error_msg = f"{error_msg}. Available datasets: {DATASETS.keys()}"
42
+
43
+ raise KeyError(error_msg)
44
+
45
+ return dataset_to_use
46
+
47
+
48
+ def build_mim_dataloader(config, logger):
49
+
50
+ transform = SimmimTransform(config)
51
+
52
+ logger.info(f'Pre-train data transform:\n{transform}')
53
+
54
+ dataset_name = config.DATA.DATASET
55
+
56
+ dataset_to_use = get_dataset_from_dict(dataset_name)
57
+
58
+ dataset = dataset_to_use(config,
59
+ config.DATA.DATA_PATHS,
60
+ split="train",
61
+ img_size=config.DATA.IMG_SIZE,
62
+ transform=transform)
63
+
64
+ logger.info(f'Build dataset: train images = {len(dataset)}')
65
+
66
+ sampler = DistributedSampler(
67
+ dataset,
68
+ num_replicas=dist.get_world_size(),
69
+ rank=dist.get_rank(),
70
+ shuffle=True)
71
+
72
+ dataloader = DataLoader(dataset,
73
+ config.DATA.BATCH_SIZE,
74
+ sampler=sampler,
75
+ num_workers=config.DATA.NUM_WORKERS,
76
+ pin_memory=True,
77
+ drop_last=True,
78
+ collate_fn=collate_fn)
79
+
80
+ return dataloader
pytorch-caney/pytorch_caney/data/datamodules/segmentation_datamodule.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import logging
3
+ from typing import Any, Union, Optional
4
+
5
+ import torch
6
+ from torch.utils.data import DataLoader
7
+ from torch.utils.data.dataset import random_split
8
+ from pytorch_lightning import LightningDataModule
9
+ from pytorch_lightning.utilities.cli import DATAMODULE_REGISTRY
10
+
11
+ from terragpu.ai.deep_learning.datasets.segmentation_dataset \
12
+ import SegmentationDataset
13
+
14
+
15
+ @DATAMODULE_REGISTRY
16
+ class SegmentationDataModule(LightningDataModule):
17
+
18
+ def __init__(
19
+ self,
20
+
21
+ # Dataset parameters
22
+ dataset_dir: str = 'dataset/',
23
+ images_regex: str = 'dataset/images/*.tif',
24
+ labels_regex: str = 'dataset/labels/*.tif',
25
+ generate_dataset: bool = True,
26
+ tile_size: int = 256,
27
+ max_patches: Union[float, int] = 100,
28
+ augment: bool = True,
29
+ chunks: dict = {'band': 1, 'x': 2048, 'y': 2048},
30
+ input_bands: list = ['CB', 'B', 'G', 'Y', 'R', 'RE', 'N1', 'N2'],
31
+ output_bands: list = ['B', 'G', 'R'],
32
+ seed: int = 24,
33
+ normalize: bool = True,
34
+ pytorch: bool = True,
35
+
36
+ # Datamodule parameters
37
+ val_split: float = 0.2,
38
+ test_split: float = 0.1,
39
+ num_workers: int = os.cpu_count(),
40
+ batch_size: int = 32,
41
+ shuffle: bool = True,
42
+ pin_memory: bool = False,
43
+ drop_last: bool = False,
44
+
45
+ # Inference parameters
46
+ raster_regex: str = 'rasters/*.tif',
47
+
48
+ *args: Any,
49
+ **kwargs: Any,
50
+
51
+ ) -> None:
52
+
53
+ super().__init__(*args, **kwargs)
54
+
55
+ # Dataset parameters
56
+ self.images_regex = images_regex
57
+ self.labels_regex = labels_regex
58
+ self.dataset_dir = dataset_dir
59
+ self.generate_dataset = generate_dataset
60
+ self.tile_size = tile_size
61
+ self.max_patches = max_patches
62
+ self.augment = augment
63
+ self.chunks = chunks
64
+ self.input_bands = input_bands
65
+ self.output_bands = output_bands
66
+ self.seed = seed
67
+ self.normalize = normalize
68
+ self.pytorch = pytorch
69
+
70
+ self.val_split = val_split
71
+ self.test_split = test_split
72
+ self.raster_regex = raster_regex
73
+
74
+ # Performance parameters
75
+ self.batch_size = batch_size
76
+ self.num_workers = num_workers
77
+ self.shuffle = shuffle
78
+ self.pin_memory = pin_memory
79
+ self.drop_last = drop_last
80
+
81
+ def prepare_data(self):
82
+ if self.generate_dataset:
83
+ SegmentationDataset(
84
+ images_regex=self.images_regex,
85
+ labels_regex=self.labels_regex,
86
+ dataset_dir=self.dataset_dir,
87
+ generate_dataset=self.generate_dataset,
88
+ tile_size=self.tile_size,
89
+ max_patches=self.max_patches,
90
+ augment=self.augment,
91
+ chunks=self.chunks,
92
+ input_bands=self.input_bands,
93
+ output_bands=self.output_bands,
94
+ seed=self.seed,
95
+ normalize=self.normalize,
96
+ pytorch=self.pytorch,
97
+ )
98
+
99
+ def setup(self, stage: Optional[str] = None):
100
+
101
+ # Split into train, val, test
102
+ segmentation_dataset = SegmentationDataset(
103
+ images_regex=self.images_regex,
104
+ labels_regex=self.labels_regex,
105
+ dataset_dir=self.dataset_dir,
106
+ generate_dataset=False,
107
+ tile_size=self.tile_size,
108
+ max_patches=self.max_patches,
109
+ augment=self.augment,
110
+ chunks=self.chunks,
111
+ input_bands=self.input_bands,
112
+ output_bands=self.output_bands,
113
+ seed=self.seed,
114
+ normalize=self.normalize,
115
+ pytorch=self.pytorch,
116
+ )
117
+
118
+ # Split datasets into train, val, and test sets
119
+ val_len = round(self.val_split * len(segmentation_dataset))
120
+ test_len = round(self.test_split * len(segmentation_dataset))
121
+ train_len = len(segmentation_dataset) - val_len - test_len
122
+
123
+ # Initialize datasets
124
+ self.train_set, self.val_set, self.test_set = random_split(
125
+ segmentation_dataset, lengths=[train_len, val_len, test_len],
126
+ generator=torch.Generator().manual_seed(self.seed)
127
+ )
128
+ logging.info("Initialized datasets...")
129
+
130
+ def train_dataloader(self) -> DataLoader:
131
+ loader = DataLoader(
132
+ self.train_set,
133
+ batch_size=self.batch_size,
134
+ shuffle=self.shuffle,
135
+ num_workers=self.num_workers,
136
+ drop_last=self.drop_last,
137
+ pin_memory=self.pin_memory,
138
+ )
139
+ return loader
140
+
141
+ def val_dataloader(self) -> DataLoader:
142
+ loader = DataLoader(
143
+ self.val_set,
144
+ batch_size=self.batch_size,
145
+ shuffle=False,
146
+ num_workers=self.num_workers,
147
+ drop_last=self.drop_last,
148
+ pin_memory=self.pin_memory,
149
+ )
150
+ return loader
151
+
152
+ def test_dataloader(self) -> DataLoader:
153
+ loader = DataLoader(
154
+ self.test_set,
155
+ batch_size=self.batch_size,
156
+ shuffle=False,
157
+ num_workers=self.num_workers,
158
+ drop_last=self.drop_last,
159
+ pin_memory=self.pin_memory,
160
+ )
161
+ return loader
162
+
163
+ def predict_dataloader(self) -> DataLoader:
164
+ raise NotImplementedError
pytorch-caney/pytorch_caney/data/datamodules/simmim_datamodule.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ..datasets.simmim_modis_dataset import MODISDataset
2
+
3
+ from ..transforms import SimmimTransform
4
+
5
+ import torch.distributed as dist
6
+ from torch.utils.data import DataLoader, DistributedSampler
7
+ from torch.utils.data._utils.collate import default_collate
8
+
9
+
10
+ DATASETS = {
11
+ 'MODIS': MODISDataset,
12
+ }
13
+
14
+
15
+ def collate_fn(batch):
16
+ if not isinstance(batch[0][0], tuple):
17
+ return default_collate(batch)
18
+ else:
19
+ batch_num = len(batch)
20
+ ret = []
21
+ for item_idx in range(len(batch[0][0])):
22
+ if batch[0][0][item_idx] is None:
23
+ ret.append(None)
24
+ else:
25
+ ret.append(default_collate(
26
+ [batch[i][0][item_idx] for i in range(batch_num)]))
27
+ ret.append(default_collate([batch[i][1] for i in range(batch_num)]))
28
+ return ret
29
+
30
+
31
+ def get_dataset_from_dict(dataset_name):
32
+
33
+ try:
34
+
35
+ dataset_to_use = DATASETS[dataset_name]
36
+
37
+ except KeyError:
38
+
39
+ error_msg = f"{dataset_name} is not an existing dataset"
40
+
41
+ error_msg = f"{error_msg}. Available datasets: {DATASETS.keys()}"
42
+
43
+ raise KeyError(error_msg)
44
+
45
+ return dataset_to_use
46
+
47
+
48
+ def build_mim_dataloader(config, logger):
49
+
50
+ transform = SimmimTransform(config)
51
+
52
+ logger.info(f'Pre-train data transform:\n{transform}')
53
+
54
+ dataset_name = config.DATA.DATASET
55
+
56
+ dataset_to_use = get_dataset_from_dict(dataset_name)
57
+
58
+ dataset = dataset_to_use(config,
59
+ config.DATA.DATA_PATHS,
60
+ split="train",
61
+ img_size=config.DATA.IMG_SIZE,
62
+ transform=transform)
63
+
64
+ logger.info(f'Build dataset: train images = {len(dataset)}')
65
+
66
+ sampler = DistributedSampler(
67
+ dataset,
68
+ num_replicas=dist.get_world_size(),
69
+ rank=dist.get_rank(),
70
+ shuffle=True)
71
+
72
+ dataloader = DataLoader(dataset,
73
+ config.DATA.BATCH_SIZE,
74
+ sampler=sampler,
75
+ num_workers=config.DATA.NUM_WORKERS,
76
+ pin_memory=True,
77
+ drop_last=True,
78
+ collate_fn=collate_fn)
79
+
80
+ return dataloader
pytorch-caney/pytorch_caney/data/datasets/__init__.py ADDED
File without changes