rithwiks commited on
Commit
a6a92a9
1 Parent(s): 1dd6ec4

utils folder

Browse files
Files changed (3) hide show
  1. SBI-16-3D.py +1 -93
  2. utils/__init__.py +0 -0
  3. utils/create_splits.py +105 -0
SBI-16-3D.py CHANGED
@@ -129,96 +129,4 @@ class SBI_16_4D(datasets.GeneratorBasedBuilder):
129
  # the first axis is integrations one, so we take the first element
130
  # the second axis is the groups (time) axis and varies between images
131
  image_data = hdul["SCI"].data[0,:,:,:].tolist()
132
- yield task_instance_key, {**{"image": image_data}, **item}
133
-
134
-
135
- def get_fits_footprint(fits_path):
136
- """
137
- Process a FITS file to extract WCS information and calculate the footprint.
138
-
139
- Parameters:
140
- fits_path (str): Path to the FITS file.
141
-
142
- Returns:
143
- tuple: A tuple containing the WCS footprint coordinates.
144
- """
145
- with fits.open(fits_path) as hdul:
146
- hdul[1].data = hdul[1].data[0, 0]
147
- wcs = WCS(hdul[1].header)
148
- shape = sorted(tuple(wcs.pixel_shape))[:2]
149
- footprint = wcs.calc_footprint(axes=shape)
150
- coords = list(footprint.flatten())
151
- return coords
152
-
153
-
154
- def calculate_pixel_scale(header):
155
- """
156
- Calculate the pixel scale in arcseconds per pixel from a FITS header.
157
-
158
- Parameters:
159
- header (astropy.io.fits.header.Header): The FITS header containing WCS information.
160
-
161
- Returns:
162
- Mean of the pixel scales in x and y.
163
- """
164
-
165
- # Calculate the pixel scales in arcseconds per pixel
166
- pixscale_x = header.get('CDELT1', np.nan)
167
- pixscale_y = header.get('CDELT2', np.nan)
168
-
169
- return np.mean([pixscale_x, pixscale_y])
170
-
171
-
172
- def make_split_jsonl_files(config_type="tiny", data_dir="./data",
173
- outdir="./splits", seed=42):
174
- """
175
- Create jsonl files for the SBI-16-3D dataset.
176
-
177
- config_type: str, default="tiny"
178
- The type of split to create. Options are "tiny" and "full".
179
- data_dir: str, default="./data"
180
- The directory where the FITS files are located.
181
- outdir: str, default="./splits"
182
- The directory where the jsonl files will be created.
183
- seed: int, default=42
184
- The seed for the random split.
185
- """
186
- random.seed(seed)
187
- os.makedirs(outdir, exist_ok=True)
188
-
189
- fits_files = glob(os.path.join(data_dir, "*.fits"))
190
- random.shuffle(fits_files)
191
- if config_type == "tiny":
192
- train_files = fits_files[:2]
193
- test_files = fits_files[2:3]
194
- elif config_type == "full":
195
- split_idx = int(0.8 * len(fits_files))
196
- train_files = fits_files[:split_idx]
197
- test_files = fits_files[split_idx:]
198
- else:
199
- raise ValueError("Unsupported config_type. Use 'tiny' or 'full'.")
200
-
201
- def create_jsonl(files, split_name):
202
- output_file = os.path.join(outdir, f"{config_type}_{split_name}.jsonl")
203
- with open(output_file, "w") as out_f:
204
- for file in tqdm(files):
205
- #print(file, flush=True, end="...")
206
- with fits.open(file, memmap=False) as hdul:
207
- image_id = os.path.basename(file).split(".fits")[0]
208
- ra = hdul["SCI"].header.get('CRVAL1', 0)
209
- dec = hdul["SCI"].header.get('CRVAL2', 0)
210
- pixscale = calculate_pixel_scale(hdul["SCI"].header)
211
- footprint = get_fits_footprint(file)
212
- # get the number of groups per int
213
- ntimes = hdul["SCI"].data.shape[1]
214
- item = {"image_id": image_id, "image": file, "ra": ra, "dec": dec,
215
- "pixscale": pixscale, "ntimes": ntimes, "footprint": footprint}
216
- out_f.write(json.dumps(item) + "\n")
217
-
218
- create_jsonl(train_files, "train")
219
- create_jsonl(test_files, "test")
220
-
221
-
222
- if __name__ == "__main__":
223
- make_split_jsonl_files("tiny")
224
- make_split_jsonl_files("full")
 
129
  # the first axis is integrations one, so we take the first element
130
  # the second axis is the groups (time) axis and varies between images
131
  image_data = hdul["SCI"].data[0,:,:,:].tolist()
132
+ yield task_instance_key, {**{"image": image_data}, **item}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
utils/__init__.py ADDED
File without changes
utils/create_splits.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+ from glob import glob
4
+ import json
5
+ from huggingface_hub import hf_hub_download
6
+ from tqdm import tqdm
7
+ import numpy as np
8
+
9
+ from astropy.io import fits
10
+ from astropy.wcs import WCS
11
+ import datasets
12
+ from datasets import DownloadManager
13
+ from fsspec.core import url_to_fs
14
+
15
+
16
+ def get_fits_footprint(fits_path):
17
+ """
18
+ Process a FITS file to extract WCS information and calculate the footprint.
19
+
20
+ Parameters:
21
+ fits_path (str): Path to the FITS file.
22
+
23
+ Returns:
24
+ tuple: A tuple containing the WCS footprint coordinates.
25
+ """
26
+ with fits.open(fits_path) as hdul:
27
+ hdul[1].data = hdul[1].data[0, 0]
28
+ wcs = WCS(hdul[1].header)
29
+ shape = sorted(tuple(wcs.pixel_shape))[:2]
30
+ footprint = wcs.calc_footprint(axes=shape)
31
+ coords = list(footprint.flatten())
32
+ return coords
33
+
34
+
35
+ def calculate_pixel_scale(header):
36
+ """
37
+ Calculate the pixel scale in arcseconds per pixel from a FITS header.
38
+
39
+ Parameters:
40
+ header (astropy.io.fits.header.Header): The FITS header containing WCS information.
41
+
42
+ Returns:
43
+ Mean of the pixel scales in x and y.
44
+ """
45
+
46
+ # Calculate the pixel scales in arcseconds per pixel
47
+ pixscale_x = header.get('CDELT1', np.nan)
48
+ pixscale_y = header.get('CDELT2', np.nan)
49
+
50
+ return np.mean([pixscale_x, pixscale_y])
51
+
52
+
53
+ def make_split_jsonl_files(config_type="tiny", data_dir="./data",
54
+ outdir="./splits", seed=42):
55
+ """
56
+ Create jsonl files for the SBI-16-3D dataset.
57
+
58
+ config_type: str, default="tiny"
59
+ The type of split to create. Options are "tiny" and "full".
60
+ data_dir: str, default="./data"
61
+ The directory where the FITS files are located.
62
+ outdir: str, default="./splits"
63
+ The directory where the jsonl files will be created.
64
+ seed: int, default=42
65
+ The seed for the random split.
66
+ """
67
+ random.seed(seed)
68
+ os.makedirs(outdir, exist_ok=True)
69
+
70
+ fits_files = glob(os.path.join(data_dir, "*.fits"))
71
+ random.shuffle(fits_files)
72
+ if config_type == "tiny":
73
+ train_files = fits_files[:2]
74
+ test_files = fits_files[2:3]
75
+ elif config_type == "full":
76
+ split_idx = int(0.8 * len(fits_files))
77
+ train_files = fits_files[:split_idx]
78
+ test_files = fits_files[split_idx:]
79
+ else:
80
+ raise ValueError("Unsupported config_type. Use 'tiny' or 'full'.")
81
+
82
+ def create_jsonl(files, split_name):
83
+ output_file = os.path.join(outdir, f"{config_type}_{split_name}.jsonl")
84
+ with open(output_file, "w") as out_f:
85
+ for file in tqdm(files):
86
+ #print(file, flush=True, end="...")
87
+ with fits.open(file, memmap=False) as hdul:
88
+ image_id = os.path.basename(file).split(".fits")[0]
89
+ ra = hdul["SCI"].header.get('CRVAL1', 0)
90
+ dec = hdul["SCI"].header.get('CRVAL2', 0)
91
+ pixscale = calculate_pixel_scale(hdul["SCI"].header)
92
+ footprint = get_fits_footprint(file)
93
+ # get the number of groups per int
94
+ ntimes = hdul["SCI"].data.shape[1]
95
+ item = {"image_id": image_id, "image": file, "ra": ra, "dec": dec,
96
+ "pixscale": pixscale, "ntimes": ntimes, "footprint": footprint}
97
+ out_f.write(json.dumps(item) + "\n")
98
+
99
+ create_jsonl(train_files, "train")
100
+ create_jsonl(test_files, "test")
101
+
102
+
103
+ if __name__ == "__main__":
104
+ make_split_jsonl_files("tiny")
105
+ make_split_jsonl_files("full")