Datasets:

Modalities:
Image
Languages:
English
Size:
< 1K
Libraries:
Datasets
License:
csrijac commited on
Commit
b7a6232
·
verified ·
1 Parent(s): f9047f9

Data preprocessing & generation

Browse files

data generation to derive HLS chips and MERRA, flux csv.

Files changed (3) hide show
  1. fluxconfig.yaml +78 -0
  2. make_chips.py +283 -0
  3. prep_input.py +169 -0
fluxconfig.yaml ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ name: "Base_Flux"
3
+ n_channel: 6
4
+ n_class: 1
5
+ embed_dim: 1024
6
+ dropout_rate: 0.5
7
+
8
+ device_name: "cuda"
9
+
10
+ n_iteration: 50
11
+
12
+ prithvi_model_new_weight: "/vol/cephfs/impact/srija/Prithvi-Global-downstream_v0/new_flood/checkpoint.pt"
13
+
14
+ training:
15
+ train_batch_size: 16
16
+ shuffle: True
17
+
18
+ optimizer:
19
+ name: "AdamW"
20
+ params:
21
+ lr: 5e-5
22
+
23
+
24
+ scheduler:
25
+ use: 1
26
+ name: "ReduceLROnPlateau"
27
+
28
+ dropout:
29
+ use: 1
30
+ val: 0.2
31
+
32
+ bn: 1
33
+
34
+
35
+
36
+ testing:
37
+ test_batch_size: 16
38
+ shuffle: False
39
+
40
+ normalization: "z-score-std"
41
+ test_year: 2021
42
+
43
+
44
+ data:
45
+ n_frame: 1
46
+ chips: "/vol/cephfs/impact/srija/Prithvi-Global-downstream_v0/new_flood_v2/chips/"
47
+ input_size: [6,50, 50]
48
+ means_for2018test: [0.07286696773903256, 0.10036772476940378, 0.11363777043869523, 0.2720510638470194, 0.2201167122609674, 0.1484162876040495]
49
+ stds_for2018test: [0.13271414936598172, 0.13268933338964875, 0.1384673725283858, 0.12089142598551804, 0.10977084890500641, 0.0978705241034744]
50
+ merra_means_for2018test: [282.011721, 295.823746,288.291530, 278.243071,0.552373,55.363476, 48.984387, 202.461732, 22.907336,0.000004]
51
+ merra_stds_for2018test: [9.141752,11.374619,10.224494,7.912334,0.178115,50.069111,48.238661,74.897672,9.277971,0.000014]
52
+ gpp_means_for2018test: [3.455948]
53
+ gpp_stds_for2018test: [3.754123]
54
+ means_for2019test: [0.07287311832611834,0.10025904848484847,0.1122947444733045,0.27563822551226563,0.21583184092352084,0.14331408109668098]
55
+ stds_for2019test: [0.13511944688809177,0.1349403534769768,0.14037014996437144,0.12365673294486092,0.10852189245620811,0.09485890083382985]
56
+ merra_means_for2019test: [281.960274,295.974675, 288.330014,278.306133,0.548831,55.167287,48.381169,202.003449,23.097742,0.000004]
57
+ merra_stds_for2019test: [9.077508,11.436697,10.178588,7.750465, 0.175302,50.656796,49.182061,73.949519,9.422290,0.000016]
58
+ gpp_means_for2019test: [3.581604]
59
+ gpp_stds_for2019test: [3.889343]
60
+ means_for2020test: [0.07372144372093026,0.10117611215116282,0.11269885680232558,0.2775572554069766,0.21387001372093037, 0.14144541145348838]
61
+ stds_for2020test: [0.13324302628303733, 0.13308921403475235, 0.13829909331863693,0.12039809083338567,0.1088096350639653,0.09366368859284444]
62
+ merra_means_for2020test: [282.373169, 296.706468, 288.852922, 278.612209, 0.540145, 53.830276, 53.827718, 206.817980, 23.077581, 0.000003]
63
+ merra_stds_for2020test: [9.296960, 11.402008, 10.311107, 8.064209, 0.171909, 49.945953, 48.907351, 74.591578, 8.746668, 0.000014 ]
64
+ gpp_means_for2020test: [3.668982]
65
+ gpp_stds_for2020test: [3.804261]
66
+ means_for2021test: [0.06743080268702287,0.09420638137404584,0.10626692164885497,0.2692502415877864,0.21780909367938925,0.1468194037862596]
67
+ stds_for2021test: [0.12261400510468322,0.12276593355350174,0.12836180894665594,0.11639597942158948,0.10570861595781685,0.09646322486302224]
68
+ merra_means_for2021test: [281.762443,294.883832,287.753053, 279.168366,0.569313,61.064687,45.930611,200.842519,23.072735,0.000003]
69
+ merra_stds_for2021test: [9.040586,11.143439,10.063070,8.121612,0.172953,52.172274,47.056911,76.875468,9.553304,0.000010]
70
+ gpp_means_for2021test: [3.787582]
71
+ gpp_stds_for2021test: [3.862494]
72
+
73
+
74
+ logging:
75
+ checkpoint_dir: "/vol/cephfs/impact/srija/Prithvi-Global-downstream_v0/Prithvi-global-v1/flux_base_pred_logs/"
76
+ metrics_dir: "/vol/cephfs/impact/srija/Prithvi-Global-downstream_v0/Prithvi-global-v1/flux_base_pred_logs/metrics/"
77
+ plots_dir: "/vol/cephfs/impact/srija/Prithvi-Global-downstream_v0/Prithvi-global-v1/flux_base_pred_logs/plots/"
78
+
make_chips.py ADDED
@@ -0,0 +1,283 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ make_chips.py
3
+
4
+ This script reads in HLS S30/L30 data and extracts
5
+ band information around a chip_size x chip_size subset
6
+ of the original raster grid. Snowy and cloudy chips beyond a
7
+ threshold are discarded.
8
+
9
+ Author: Besart Mujeci, Srija Chakraborty, Christopher Phillips
10
+
11
+ Usage:
12
+ python make_chips.py
13
+ """
14
+ import rclone
15
+ from pathlib import Path
16
+ import shutil
17
+ import pandas as pd
18
+ from collections import Counter
19
+ import cartopy.crs as ccrs
20
+ import numpy as np
21
+ import rasterio
22
+ from rasterio.transform import from_gcps
23
+ from rasterio.warp import transform
24
+ from rasterio.windows import Window
25
+ import os
26
+
27
+
28
+ # --- --- ---
29
+ def point_to_index(dataset, long, lat):
30
+ """
31
+ Converts long/lat point to row, col position on rasterio grid.
32
+
33
+ Args:
34
+ dataset (Rasterio Object): rasterio object
35
+ long (float): longitude float
36
+ lat (float): latitude float
37
+
38
+
39
+ Returns:
40
+ tuple: tuple representing point mapping on grid
41
+ """
42
+ from_crs = rasterio.crs.CRS.from_epsg(4326)
43
+ to_crs = dataset.crs
44
+ new_x,new_y = transform(from_crs,to_crs, [long], [lat])
45
+ new_x = new_x[0]
46
+ new_y = new_y[0]
47
+
48
+ # get row and col
49
+ row, col = dataset.index(new_x,new_y)
50
+ return(row, col)
51
+ # --- --- ---
52
+
53
+
54
+ # --- --- --- Citation for this function: Christopher Phillips
55
+ def check_qc_bit(data, bit):
56
+ """
57
+ Function to check QC flags
58
+
59
+ Args:
60
+ data (numpy array): rasterio numpy grid
61
+ bit (int): 1 or 4 representing cloud or snow
62
+
63
+ Returns:
64
+ numpy array: numpy array with flagged indices marking cloud/snow
65
+ """
66
+ qc = np.array(data//(10**bit), dtype='int')
67
+ qc = qc-((qc//2)*2)
68
+
69
+ return np.sum(qc)/qc.size
70
+ # --- --- ---
71
+
72
+
73
+ # --- --- --- rclone configuration, file collection
74
+ cfg = ""
75
+ result = rclone.with_config(cfg).run_cmd("ls", extra_args=[f"{idir}/"])
76
+ output_lines = result['out'].decode('utf-8').splitlines()
77
+ file_list = [line.split(maxsplit=1)[1] for line in output_lines if line]
78
+ # --- --- ---
79
+
80
+
81
+ # --- --- --- Options
82
+ hls_type = 'L30' # Switch between 'L30' and 'S30' manually.
83
+ idir = "" # Raw Images Dir
84
+ odir = "" # Output Chips Dir
85
+ chip_size = 50 # Chip dimensions
86
+ scale = 0.0001 # Scale value for HLS bandssqm
87
+ cthresh = 0.05 # Cloud threshold
88
+ sthresh = 0.02 # Snow/ice threshold
89
+ # --- --- ---
90
+
91
+
92
+ # --- --- --- Read station site data
93
+ df = pd.read_csv("./TILED_filtered_flux_sites_2018_2021.csv")
94
+ stations = df['SITE_ID'].tolist()
95
+ tiles = [tile.split(";")[0] for tile in df['tiles'].tolist()]
96
+ sYear = df['start_year'].tolist()
97
+ eYear = df['end_year'].tolist()
98
+ longs = df['LOCATION_LONG'].tolist()
99
+ lats = df['LOCATION_LAT'].tolist()
100
+ all_years = [str(sYear[i]) + "-" + str(eYear[i]) for i in range(len(df))]
101
+ coords = [str(lat) + ";" + str(long) for lat, long in zip(lats, longs)]
102
+ # --- --- ---
103
+
104
+
105
+ for i, line in enumerate(tiles):
106
+ station_data = [stations[i], coords[i].split(";")[0], coords[i].split(";")[1], all_years[i].split("-")[0], all_years[i].split("-")[1], "filler", tiles[i]]
107
+ tile = station_data[-1].strip()
108
+ print(f"Working on {tile}")
109
+
110
+ # Determine years for this station
111
+ years = range(int(station_data[3]), int(station_data[4])+1)
112
+ for year in years:
113
+ print(year)
114
+
115
+ # Build path to this tile and locate all tifs
116
+ tifs1 = sorted([filepath for filepath in file_list if tile in filepath and "B01" in filepath and hls_type in filepath and str(year) == filepath.split(".")[3][:4]]) # Numbered by band
117
+ tifs2 = sorted([filepath for filepath in file_list if tile in filepath and "B02" in filepath and hls_type in filepath and str(year) == filepath.split(".")[3][:4]]) # Numbered by band
118
+ tifs3 = sorted([filepath for filepath in file_list if tile in filepath and "B03" in filepath and hls_type in filepath and str(year) == filepath.split(".")[3][:4]]) # Numbered by band
119
+ tifs4 = sorted([filepath for filepath in file_list if tile in filepath and "B04" in filepath and hls_type in filepath and str(year) == filepath.split(".")[3][:4]]) # Numbered by band
120
+ tifs5 = sorted([filepath for filepath in file_list if tile in filepath and "B05" in filepath and hls_type in filepath and str(year) == filepath.split(".")[3][:4]]) # Numbered by band
121
+ tifs6 = sorted([filepath for filepath in file_list if tile in filepath and "B06" in filepath and hls_type in filepath and str(year) == filepath.split(".")[3][:4]]) # Numbered by band
122
+ tifs7 = sorted([filepath for filepath in file_list if tile in filepath and "B07" in filepath and hls_type in filepath and str(year) == filepath.split(".")[3][:4]]) # Numbered by band
123
+ tifs8 = sorted([filepath for filepath in file_list if tile in filepath and "B08" in filepath and hls_type in filepath and str(year) == filepath.split(".")[3][:4]]) # Numbered by band
124
+ tifs8A = sorted([filepath for filepath in file_list if tile in filepath and "B8A" in filepath and hls_type in filepath and str(year) == filepath.split(".")[3][:4]]) # Numbered by band
125
+ tifs9 = sorted([filepath for filepath in file_list if tile in filepath and "B09" in filepath and hls_type in filepath and str(year) == filepath.split(".")[3][:4]]) # Numbered by band
126
+ tifs10 = sorted([filepath for filepath in file_list if tile in filepath and "B10" in filepath and hls_type in filepath and str(year) == filepath.split(".")[3][:4]]) # Numbered by band
127
+ tifs11 = sorted([filepath for filepath in file_list if tile in filepath and "B11" in filepath and hls_type in filepath and str(year) == filepath.split(".")[3][:4]]) # Numbered by band
128
+ tifs12 = sorted([filepath for filepath in file_list if tile in filepath and "B12" in filepath and hls_type in filepath and str(year) == filepath.split(".")[3][:4]]) # Numbered by band
129
+ tifsF = sorted([filepath for filepath in file_list if tile in filepath and "Fmask" in filepath and hls_type in filepath and str(year) == filepath.split(".")[3][:4]]) # Numbered by band
130
+
131
+ # Loop over each tif
132
+ first = True
133
+ chip_flag = False # Flag for detecting chip size errors
134
+ for i in range(len(tifs2)):
135
+
136
+ # Open tifs based on HLS product
137
+ skip_file_iteration = False
138
+ if (hls_type == 'L30'):
139
+ # Ensure the sorted files are aligned correctly.
140
+ # If a band is missing then things can go out of order.
141
+ # Push 'filler' if layer is missing a band to maintain sorting.
142
+ checkListMain = [tifs2, tifs3, tifs4, tifs5, tifs6, tifs7, tifsF]
143
+ checkList = [tifs2[i], tifs3[i], tifs4[i], tifs5[i], tifs6[i], tifs7[i], tifsF[i]]
144
+ checkList = ['.'.join(ele.split(".")[2:4]) for ele in checkList]
145
+ counts = Counter(checkList)
146
+ common_value, _ = counts.most_common(1)[0]
147
+ for z, value in enumerate(checkList):
148
+ if value != common_value:
149
+ checkListMain[z].insert(i, "filler") # Push
150
+ skip_file_iteration=True
151
+ print(f"Misaligned - {checkList}")
152
+ break
153
+ if skip_file_iteration:
154
+ continue
155
+ try:
156
+ if not os.path.exists(f"./{tile}"):
157
+ os.makedirs(f"./{tile}")
158
+ rclone.with_config(cfg).copy(f"{idir}/{tifs2[i]}", f"./{tile}")
159
+ rclone.with_config(cfg).copy(f"{idir}/{tifs3[i]}", f"./{tile}")
160
+ rclone.with_config(cfg).copy(f"{idir}/{tifs4[i]}", f"./{tile}")
161
+ rclone.with_config(cfg).copy(f"{idir}/{tifs5[i]}", f"./{tile}")
162
+ rclone.with_config(cfg).copy(f"{idir}/{tifs6[i]}", f"./{tile}")
163
+ rclone.with_config(cfg).copy(f"{idir}/{tifs7[i]}", f"./{tile}")
164
+ rclone.with_config(cfg).copy(f"{idir}/{tifsF[i]}", f"./{tile}")
165
+ except:
166
+ print(f"MISALIGNED FOR - {tifs2[i]} check if all bands exist")
167
+ continue
168
+
169
+ src2 = rasterio.open(tifs2[i])
170
+ src3 = rasterio.open(tifs3[i])
171
+ src4 = rasterio.open(tifs4[i])
172
+ src5 = rasterio.open(tifs5[i])
173
+ src6 = rasterio.open(tifs6[i])
174
+ src7 = rasterio.open(tifs7[i])
175
+ srcF = rasterio.open(tifsF[i])
176
+
177
+ elif (hls_type == 'S30'):
178
+ # Ensure the sorted files are aligned correctly.
179
+ # If a band is missing then order is compromised.
180
+ # Push 'filler' if layer is missing a band to maintain sorting.
181
+ checkListMain = [tifs2, tifs3, tifs4, tifs8A, tifs11, tifs12, tifsF]
182
+ checkList = [tifs2[i], tifs3[i], tifs4[i], tifs8A[i], tifs11[i], tifs12[i], tifsF[i]]
183
+ checkList = ['.'.join(ele.split(".")[2:4]) for ele in checkList]
184
+ counts = Counter(checkList)
185
+ common_value, _ = counts.most_common(1)[0]
186
+ for z, value in enumerate(checkList):
187
+ if value != common_value:
188
+ checkListMain[z].insert(i, "filler")
189
+ skip_file_iteration=True
190
+ break
191
+ if skip_file_iteration:
192
+ continue
193
+ try:
194
+ if not os.path.exists(f"./{tile}"):
195
+ os.makedirs(f"./{tile}")
196
+ rclone.with_config(cfg).copy(f"{idir}/{tifs2[i]}", f"./{tile}")
197
+ rclone.with_config(cfg).copy(f"{idir}/{tifs3[i]}", f"./{tile}")
198
+ rclone.with_config(cfg).copy(f"{idir}/{tifs4[i]}", f"./{tile}")
199
+ rclone.with_config(cfg).copy(f"{idir}/{tifs8A[i]}", f"./{tile}")
200
+ rclone.with_config(cfg).copy(f"{idir}/{tifs11[i]}", f"./{tile}")
201
+ rclone.with_config(cfg).copy(f"{idir}/{tifs12[i]}", f"./{tile}")
202
+ rclone.with_config(cfg).copy(f"{idir}/{tifsF[i]}", f"./{tile}")
203
+ except:
204
+ print(f"MISALIGNED FOR - {tifs2[i]} check if all bands exist")
205
+ continue
206
+
207
+
208
+ src2 = rasterio.open(f"./{tifs2[i]}")
209
+ src3 = rasterio.open(f"./{tifs3[i]}")
210
+ src4 = rasterio.open(f"./{tifs4[i]}")
211
+ src5 = rasterio.open(f"./{tifs8A[i]}")
212
+ src6 = rasterio.open(f"./{tifs11[i]}")
213
+ src7 = rasterio.open(f"./{tifs12[i]}")
214
+ srcF = rasterio.open(f"./{tifsF[i]}")
215
+
216
+ else:
217
+ raise ValueError(f'HLS product type must be \"L30\" or \"S30\" not \"{hls_type}\".')
218
+
219
+ # Station remains in the same spot/tile so only gather information once.
220
+ if first:
221
+ row, col = point_to_index(src2, float(station_data[2]), float(station_data[1]))
222
+
223
+ y_offset = row - (chip_size // 2)
224
+ x_offset = col - (chip_size // 2)
225
+
226
+ window = Window(y_offset, x_offset, chip_size, chip_size)
227
+ window_data = src2.read(window=window, boundless=True)
228
+ window_transform = src2.window_transform(window)
229
+
230
+ first = False
231
+
232
+ # Subset tif
233
+ bands = []
234
+ for src in (src2,src3,src4,src5,src6,src7): # Set the tuple to match desired bands
235
+
236
+ # Scale and clip reflectances
237
+ band = np.clip(src.read(1)[y_offset:y_offset + chip_size, x_offset:x_offset + chip_size]*scale, 0, 1)
238
+ bands.append(band)
239
+ bands = np.array(bands)
240
+
241
+ # Check chip size and break out if wrong shape
242
+ if (bands.shape[1] != chip_size) or (bands.shape[2] != chip_size):
243
+ print(f'ERROR: Chip for tile {tile} is wronge size!\n Size is {band.shape[1:]} and not ({chip_size},{chip_size}).\nSkipping to next tile.')
244
+ chip_flag = True
245
+ break
246
+
247
+ # Subset Fmask to get imperfections
248
+ cbands = np.array(srcF.read(1)[y_offset:y_offset + 50, x_offset:x_offset + 50], dtype='int')
249
+ cloud_frac = check_qc_bit(cbands, 1)
250
+ snow_frac = check_qc_bit(cbands, 4)
251
+
252
+ # Check cloud fraction
253
+ if (cloud_frac > cthresh):
254
+ print("CLOUDY")
255
+ continue
256
+
257
+ # Check snow/ice fraction
258
+ if (snow_frac > sthresh):
259
+ print("SNOWY")
260
+ continue
261
+
262
+ # Save chip with new metadata
263
+ out_meta = src2.meta
264
+ out_meta.update({'driver':'GTiff', 'height':bands.shape[1],
265
+ 'width':bands.shape[2], 'count':bands.shape[0], 'dtype':bands.dtype,
266
+ 'transform':window_transform})
267
+ save_name = f'./chips/{tifs2[i].replace("B02", f"{station_data[0]}_merged.{chip_size}x{chip_size}pixels")}'
268
+ if not os.path.exists(save_name):
269
+ os.makedirs(f"./chips/{tile}")
270
+ with rasterio.open(save_name, 'w', **out_meta) as dest:
271
+ dest.write(bands)
272
+
273
+ rclone.with_config(cfg).copy(f"./chips/{tile}", f"{odir}/{tile}/")
274
+ shutil.rmtree(Path(f"./chips/"))
275
+
276
+ # If chip is the wrong size break to next station
277
+ if chip_flag:
278
+ print("Breaking to tile -- wrong size ")
279
+ break
280
+ shutil.rmtree(Path(f"./{tile}"))
281
+ break
282
+
283
+ print('Done chipping.')
prep_input.py ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ prep_input.py
3
+
4
+ This script reads in MERRA2 SLV,LND data in combination with
5
+ flux station data together with HLS chips to produce a CSV with
6
+ aggregate data values that can be used to train for GPP flux prediction.
7
+
8
+ Author: Besart Mujeci, Srija Chakraborty, Christopher Phillips
9
+
10
+ Usage:
11
+ python prep_input.py
12
+ """
13
+ import rclone
14
+ from pathlib import Path
15
+ import shutil
16
+ import os
17
+ import pandas as pd
18
+ import netCDF4 as nc
19
+ import numpy as np
20
+ import pandas as pd
21
+
22
+
23
+ # --- --- ---
24
+ def convert_HLS_date(chip_name):
25
+ """
26
+ Extracts date string from HLS tile name and returns date object
27
+
28
+ Args:
29
+ chip_name (string): name of hls file
30
+
31
+ Returns:
32
+ datetime: datetime object of time string
33
+ """
34
+ hls_date = chip_name.split('.')[3][:7]
35
+ year = int(hls_date[:4])
36
+ day = int(hls_date[4:])
37
+ date = datetime(year, 1, 1)+timedelta(days=day-1)
38
+
39
+ return date
40
+ # --- --- ---
41
+
42
+
43
+ # --- --- --- Set up rclone and get chips and merra files
44
+ rawdir = ''
45
+ merradir = ''
46
+ cfg = ""
47
+ result = rclone.with_config(cfg).run_cmd("ls", extra_args=[f"{rawdir}/"])
48
+ output_lines = result['out'].decode('utf-8').splitlines()
49
+ file_list = [line.split(maxsplit=1)[1] for line in output_lines if line]
50
+ result = rclone.with_config(cfg).run_cmd("ls", extra_args=[f"{merradir}/"])
51
+ output_lines = result['out'].decode('utf-8').splitlines()
52
+ merras = [line.split(maxsplit=1)[1] for line in output_lines if line]
53
+ # --- --- ---
54
+
55
+
56
+ # --- --- --- Set up paths
57
+ # Location of station tile list
58
+ station_file = './TILED_filtered_flux_sites_2018_2021.csv'
59
+ # Location to save the input file
60
+ spath = './all_inputs.csv'
61
+ odir = ''
62
+ # --- --- ---
63
+
64
+
65
+ # --- --- --- Get station information
66
+ stations = {}
67
+ fn = open(station_file, 'r')
68
+ for line in list(fn)[1:]:
69
+ dummy = line.split(',')
70
+ stations[dummy[1].strip()] = (dummy[1], float(dummy[9]), float(dummy[8]), dummy[3], dummy[4])
71
+ fn.close()
72
+ flux_nets = os.listdir("./fluxnets/flux_sites_2018_2021/")
73
+ # --- --- ---
74
+
75
+ # Locate all HLS chips
76
+ chips = sorted(file_list)
77
+ skipped = []
78
+
79
+ # Make the input file to which to save the data
80
+ out_fn = open(spath, 'w')
81
+ out_fn.write(f'Chip,Station,T2MIN,T2MAX,T2MEAN,TSMDEWMEAN,GWETROOT,LHLAND,SHLAND,SWLAND,PARDFLAND,PRECTOTLAND,GPP')
82
+
83
+ # And loop over them
84
+ for chip in chips:
85
+ rclone.with_config(cfg).copy(f"{rawdir}/{chip}", f"./{chip}")
86
+
87
+ # Match to an Ameriflux station
88
+ chip_name = chip.split('/')[-1]
89
+ tile = chip_name.split('.')[2][1:]
90
+ station_name = chip_name.split('.')[6].split("_")[0]
91
+ try: # Skip tiles for which no station exists
92
+ station = stations[station_name]
93
+ except:
94
+ print(f"exception - {('station dict indexing', station_name, tile)}")
95
+ continue
96
+ date = helpers.convert_HLS_date(chip_name)
97
+
98
+ # Locate station from tile and pull in the daily reference value
99
+ try: # Skip tiles for which no station data is available
100
+ station_file = [fluxnet for fluxnet in flux_nets if station[0] in fluxnet][0]
101
+ flux_df = pd.read_csv(f"genai-usra-east/impact/fluxnets/flux_sites_2018_2021/{station_file}")
102
+ except:
103
+ print(f"exception - {('station exception', station_name, tile)}")
104
+ continue
105
+
106
+ flux_times = np.array(flux_df.TIMESTAMP, dtype='str')
107
+ flux_gpp = np.array(flux_df.GPP_NT_VUT_REF)
108
+ try: # Skip if cannot find CO2 data
109
+ quality_flag = np.array(flux_df.NEE_VUT_REF_QC)
110
+ if quality_flag[flux_times==date.strftime("%Y%m%d")][0] >= 0.6:
111
+ co2 = flux_gpp[flux_times==date.strftime("%Y%m%d")][0]
112
+ else: # Quality not met, skip
113
+ print(f"co2 quality not met for - {('co2', station_name, tile)}")
114
+ continue
115
+ except:
116
+ print(f"co2 quality not met for - {('co2 exception', station_name, tile)}")
117
+ skipped.append(('co2 exception', station_name, tile))
118
+ continue
119
+
120
+
121
+ # Pull MERRA-2 data for temperature and dew
122
+ merra_file = [file for file in merras if "slv" in file and str(date.strftime("%Y%m%d")) in file][0]
123
+ rclone.with_config(cfg).copy(f"{merradir}/{merra_file}", f"./merra/")
124
+ merra_fn = nc.Dataset(f'./merra/{merra_file}')
125
+
126
+ # Pull in the MERRA-2 grid and find closest point
127
+ mlons = merra_fn.variables['lon'][:]
128
+ mlats = merra_fn.variables['lat'][:]
129
+ xind = np.argmin((mlons-station[1])**2)
130
+ yind = np.argmin((mlats-station[2])**2)
131
+
132
+ # Read the variables and collect stats based on time dimension
133
+ tmax = np.max(merra_fn.variables['T2M'], keepdims=True, axis=0)
134
+ tmin = np.min(merra_fn.variables['T2M'], keepdims=True, axis=0)
135
+ tmean = np.nanmean(merra_fn.variables['T2M'][:,yind, xind])
136
+ tmax = tmax[0,yind,xind]
137
+ tmin = tmin[0,yind,xind]
138
+ tdewmean = np.nanmean(merra_fn.variables['T2MDEW'][:,yind, xind])
139
+
140
+
141
+ shutil.rmtree(Path(f"./merra"))
142
+
143
+ # Pull MERRA-2 data for surface data
144
+ merra_file = [file for file in merras if "lnd" in file and str(date.strftime("%Y%m%d")) in file][0]
145
+ rclone.with_config(cfg).copy(f"{merradir}/{merra_file}", f"./merra/")
146
+ merra_fn = nc.Dataset(f'./merra/{merra_file}')
147
+
148
+ # Pull in the MERRA-2 grid and find closest point
149
+ mlons = merra_fn.variables['lon'][:]
150
+ mlats = merra_fn.variables['lat'][:]
151
+ xind = np.argmin((mlons-station[1])**2)
152
+ yind = np.argmin((mlats-station[2])**2)
153
+
154
+ # Read the variables and collect stats based on time dimension
155
+ GWETROOT = np.nanmean(merra_fn.variables['GWETROOT'][:,yind,xind])
156
+ LHLAND = np.nanmean(merra_fn.variables['LHLAND'][:,yind,xind])
157
+ SHLAND = np.nanmean(merra_fn.variables['SHLAND'][:,yind,xind])
158
+ PARDFLAND = np.nanmean(merra_fn.variables['PARDFLAND'][:,yind,xind])
159
+ PRECTOTLAND = np.nanmean(merra_fn.variables['PRECTOTLAND'][:,yind,xind])
160
+ SWLAND = np.nanmean(merra_fn.variables['SWLAND'][:,yind,xind])
161
+
162
+ shutil.rmtree(Path(f"./merra"))
163
+ shutil.rmtree(Path(f"./{tile}"))
164
+ # Save chip name, MERRA-2 values, and Ameriflux measurement to data file
165
+ out_fn.write(f'\n{chip_name},{station[0]},{tmin:.2f},{tmax:.2f},{tmean:.2f},{tdewmean:.2f},{GWETROOT:.2f},{LHLAND:.2f},{SHLAND:.2f},{SWLAND:.2f},{PARDFLAND:2f},{PRECTOTLAND:2f},{co2}')
166
+ # Close the file
167
+ out_fn.close()
168
+ rclone.with_config(cfg).copy(f"{spath}", f"{odir}")
169
+ print("DONE")