jacobbieker
commited on
Commit
•
db8068a
1
Parent(s):
5e9558f
Better processing
Browse files- gfs-reforecast.py +9 -22
gfs-reforecast.py
CHANGED
@@ -158,33 +158,20 @@ class GFEReforecastDataset(datasets.GeneratorBasedBuilder):
|
|
158 |
filepaths = json.load(f)
|
159 |
filepaths = ['zip:///::https://huggingface.co/datasets/openclimatefix/gfs-reforecast/resolve/main/' + f for f in filepaths]
|
160 |
if "v16" in self.config.name:
|
161 |
-
|
162 |
-
for
|
|
|
163 |
try:
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
"next_state": state_values[1],
|
170 |
"timestamp": data_t["time"].values,
|
171 |
"latitude": data_t["latitude"].values,
|
172 |
"longitude": data_t["longitude"].values}
|
|
|
173 |
yield idx, value
|
174 |
except:
|
175 |
# Some of the zarrs potentially have corrupted data at the end, and might fail, so this avoids that
|
176 |
continue
|
177 |
-
else:
|
178 |
-
for f in filepaths:
|
179 |
-
dataset = xr.open_dataset('zip:///::'+f, engine='zarr', chunks={}).sortby("time").drop_duplicates("time")
|
180 |
-
for key, row in enumerate(dataset["time"].values):
|
181 |
-
try:
|
182 |
-
data = dataset.sel(time=row)
|
183 |
-
value = {"precipitation_rate": data["unknown"].values,
|
184 |
-
"timestamp": data["time"].values,
|
185 |
-
"latitude": data["latitude"].values,
|
186 |
-
"longitude": data["longitude"].values}
|
187 |
-
yield key, value
|
188 |
-
except:
|
189 |
-
# Some of the zarrs potentially have corrupted data at the end, and might fail, so this avoids that
|
190 |
-
continue
|
|
|
158 |
filepaths = json.load(f)
|
159 |
filepaths = ['zip:///::https://huggingface.co/datasets/openclimatefix/gfs-reforecast/resolve/main/' + f for f in filepaths]
|
160 |
if "v16" in self.config.name:
|
161 |
+
idx = 0
|
162 |
+
for f in filepaths:
|
163 |
+
dataset = xr.open_dataset(f, engine='zarr', chunks={})
|
164 |
try:
|
165 |
+
for t in range(len(dataset["time"].values)-1):
|
166 |
+
data_t = dataset.isel(time=t)
|
167 |
+
data_t1 = dataset.isel(time=(t+1))
|
168 |
+
value = {"current_state": np.concatenate([data_t[v].values for v in sorted(data_t.data_vars)], axis=0),
|
169 |
+
"next_state": np.concatenate([data_t1[v].values for v in sorted(data_t.data_vars)], axis=0),
|
|
|
170 |
"timestamp": data_t["time"].values,
|
171 |
"latitude": data_t["latitude"].values,
|
172 |
"longitude": data_t["longitude"].values}
|
173 |
+
idx += 1
|
174 |
yield idx, value
|
175 |
except:
|
176 |
# Some of the zarrs potentially have corrupted data at the end, and might fail, so this avoids that
|
177 |
continue
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|