SBI-16-3D / SBI-16-3D.py
rithwiks's picture
added read pattern to splits. updated version
0733e6e
raw
history blame
No virus
8.91 kB
import os
import random
from glob import glob
import json
from huggingface_hub import hf_hub_download
from tqdm import tqdm
import numpy as np
from astropy.io import fits
from astropy.wcs import WCS
import datasets
from datasets import DownloadManager
from fsspec.core import url_to_fs
_DESCRIPTION = (
"""SBI-16-3D is a dataset which is part of the AstroCompress project. """
"""It contains data assembled from the James Webb Space Telescope (JWST). """
"""<TODO>Describe data format</TODO>"""
)
_HOMEPAGE = "https://google.github.io/AstroCompress"
_LICENSE = "CC BY 4.0"
_URL = "https://huggingface.co/datasets/AstroCompress/SBI-16-3D/resolve/main/"
_URLS = {
"tiny": {
"train": "./splits/tiny_train.jsonl",
"test": "./splits/tiny_test.jsonl",
},
"full": {
"train": "./splits/full_train.jsonl",
"test": "./splits/full_test.jsonl",
},
}
_REPO_ID = "AstroCompress/SBI-16-3D"
class SBI_16_3D(datasets.GeneratorBasedBuilder):
"""SBI-16-3D Dataset"""
VERSION = datasets.Version("1.0.3")
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="tiny",
version=VERSION,
description="A small subset of the data, to test downsteam workflows.",
),
datasets.BuilderConfig(
name="full",
version=VERSION,
description="The full dataset",
),
]
DEFAULT_CONFIG_NAME = "tiny"
def __init__(self, **kwargs):
super().__init__(version=self.VERSION, **kwargs)
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"image": datasets.Array3D(shape=(None, 2048, 2048), dtype="uint16"),
"ra": datasets.Value("float64"),
"dec": datasets.Value("float64"),
"pixscale": datasets.Value("float64"),
"ntimes": datasets.Value("int64"),
"image_id": datasets.Value("string"),
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation="TBD",
)
def _split_generators(self, dl_manager: DownloadManager):
ret = []
base_path = dl_manager._base_path
locally_run = not base_path.startswith(datasets.config.HF_ENDPOINT)
_, path = url_to_fs(base_path)
for split in ["train", "test"]:
if locally_run:
split_file_location = os.path.normpath(
os.path.join(path, _URLS[self.config.name][split])
)
split_file = dl_manager.download_and_extract(split_file_location)
else:
split_file = hf_hub_download(
repo_id=_REPO_ID,
filename=_URLS[self.config.name][split],
repo_type="dataset",
)
with open(split_file, encoding="utf-8") as f:
data_filenames = []
data_metadata = []
for line in f:
item = json.loads(line)
data_filenames.append(item["image"])
data_metadata.append(
{
"ra": item["ra"],
"dec": item["dec"],
"pixscale": item["pixscale"],
"ntimes": item["ntimes"],
"image_id": item["image_id"],
}
)
if locally_run:
data_urls = [
os.path.normpath(os.path.join(path, data_filename))
for data_filename in data_filenames
]
data_files = [
dl_manager.download(data_url) for data_url in data_urls
]
else:
data_urls = data_filenames
data_files = [
hf_hub_download(
repo_id=_REPO_ID, filename=data_url, repo_type="dataset"
)
for data_url in data_urls
]
ret.append(
datasets.SplitGenerator(
name=(
datasets.Split.TRAIN
if split == "train"
else datasets.Split.TEST
),
gen_kwargs={
"filepaths": data_files,
"split_file": split_file,
"split": split,
"data_metadata": data_metadata,
},
),
)
return ret
def _generate_examples(self, filepaths, split_file, split, data_metadata):
"""Generate GBI-16-4D examples"""
for idx, (filepath, item) in enumerate(zip(filepaths, data_metadata)):
task_instance_key = f"{self.config.name}-{split}-{idx}"
with fits.open(filepath, memmap=False) as hdul:
# the first axis is integrations one, so we take the first element
# the second axis is the groups (time) axis and varies between images
image_data = hdul["SCI"].data[0, :, :, :] # .tolist()
yield task_instance_key, {**{"image": image_data}, **item}
def get_fits_footprint(fits_path):
"""
Process a FITS file to extract WCS information and calculate the footprint.
Parameters:
fits_path (str): Path to the FITS file.
Returns:
tuple: A tuple containing the WCS footprint coordinates.
"""
with fits.open(fits_path) as hdul:
hdul[1].data = hdul[1].data[0, 0]
wcs = WCS(hdul[1].header)
shape = sorted(tuple(wcs.pixel_shape))[:2]
footprint = wcs.calc_footprint(axes=shape)
coords = list(footprint.flatten())
return coords
def calculate_pixel_scale(header):
"""
Calculate the pixel scale in arcseconds per pixel from a FITS header.
Parameters:
header (astropy.io.fits.header.Header): The FITS header containing WCS information.
Returns:
Mean of the pixel scales in x and y.
"""
# Calculate the pixel scales in arcseconds per pixel
pixscale_x = header.get("CDELT1", np.nan)
pixscale_y = header.get("CDELT2", np.nan)
return np.mean([pixscale_x, pixscale_y])
def make_split_jsonl_files(
config_type="tiny", data_dir="./data", outdir="./splits", seed=42
):
"""
Create jsonl files for the SBI-16-3D dataset.
config_type: str, default="tiny"
The type of split to create. Options are "tiny" and "full".
data_dir: str, default="./data"
The directory where the FITS files are located.
outdir: str, default="./splits"
The directory where the jsonl files will be created.
seed: int, default=42
The seed for the random split.
"""
random.seed(seed)
os.makedirs(outdir, exist_ok=True)
fits_files = glob(os.path.join(data_dir, "*.fits"))
random.shuffle(fits_files)
if config_type == "tiny":
train_files = fits_files[:2]
test_files = fits_files[2:3]
elif config_type == "full":
split_idx = int(0.8 * len(fits_files))
train_files = fits_files[:split_idx]
test_files = fits_files[split_idx:]
else:
raise ValueError("Unsupported config_type. Use 'tiny' or 'full'.")
def create_jsonl(files, split_name):
output_file = os.path.join(outdir, f"{config_type}_{split_name}.jsonl")
with open(output_file, "w") as out_f:
for file in tqdm(files):
# print(file, flush=True, end="...")
with fits.open(file, memmap=False) as hdul:
image_id = os.path.basename(file).split(".fits")[0]
ra = hdul["SCI"].header.get("CRVAL1", 0)
dec = hdul["SCI"].header.get("CRVAL2", 0)
pixscale = calculate_pixel_scale(hdul["SCI"].header)
footprint = get_fits_footprint(file)
# get the number of groups per int
ntimes = hdul["SCI"].data.shape[1]
item = {
"image_id": image_id,
"image": file,
"ra": ra,
"dec": dec,
"pixscale": pixscale,
"ntimes": ntimes,
"footprint": footprint,
}
out_f.write(json.dumps(item) + "\n")
create_jsonl(train_files, "train")
create_jsonl(test_files, "test")
if __name__ == "__main__":
make_split_jsonl_files("tiny")
make_split_jsonl_files("full")