jbloom commited on
Commit
0b4f8df
1 Parent(s): 0f41df2

adding train/test files and dataset script

Browse files
.gitattributes CHANGED
@@ -49,6 +49,8 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
49
  *.gif filter=lfs diff=lfs merge=lfs -text
50
  *.png filter=lfs diff=lfs merge=lfs -text
51
  *.tiff filter=lfs diff=lfs merge=lfs -text
 
 
52
  # Image files - compressed
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
 
49
  *.gif filter=lfs diff=lfs merge=lfs -text
50
  *.png filter=lfs diff=lfs merge=lfs -text
51
  *.tiff filter=lfs diff=lfs merge=lfs -text
52
+ *.fits filter=lfs diff=lfs merge=lfs -text
53
+ *.fit filter=lfs diff=lfs merge=lfs -text
54
  # Image files - compressed
55
  *.jpg filter=lfs diff=lfs merge=lfs -text
56
  *.jpeg filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,3 +1,70 @@
1
- ---
2
- license: cc-by-4.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: cc-by-4.0
3
+ pretty_name: Space-based (JWST) 3d data cubes
4
+ tags:
5
+ - astronomy
6
+ - compression
7
+ - images
8
+ ---
9
+
10
+ # SBI-16-3D Dataset
11
+
12
+ SBI-16-3D is a dataset which is part of the AstroCompress project. It contains data assembled from the James Webb Space Telescope (JWST). <TODO>Describe data format</TODO>
13
+
14
+ # Usage
15
+
16
+ You first need to install the `datasets` and `astropy` packages:
17
+
18
+ ```bash
19
+ pip install datasets astropy
20
+ ```
21
+
22
+ There are two datasets: `tiny` and `full`, each with `train` and `test` splits. The `tiny` dataset has 2 4D images in the `train` and 1 in the `test`. The `full` dataset contains all the images in the `data/` directory.
23
+
24
+ ## Use from Huggingface Directly
25
+
26
+ To directly use from this data from Huggingface, you'll want to log in on the command line before starting python:
27
+
28
+ ```bash
29
+ huggingface-cli login
30
+ ```
31
+
32
+ or
33
+
34
+ ```
35
+ import huggingface_hub
36
+ huggingface_hub.login(token=token)
37
+ ```
38
+
39
+ Then in your python script:
40
+
41
+ ```python
42
+ from datasets import load_dataset
43
+ dataset = load_dataset("AstroCompress/SBI-16-3D", "tiny")
44
+ ds = dataset.with_format("np")
45
+ ```
46
+
47
+ ## Local Use
48
+
49
+ Alternatively, you can clone this repo and use directly without connecting to hf:
50
+
51
+ ```bash
52
+ git clone https://huggingface.co/datasets/AstroCompress/SBI-16-3D
53
+ ```
54
+
55
+ Then `cd SBI-16-3D` and start python like:
56
+
57
+ ```python
58
+ from datasets import load_dataset
59
+ dataset = load_dataset("./SBI-16-3D.py", "tiny", data_dir="./data/")
60
+ ds = dataset.with_format("np")
61
+ ```
62
+
63
+ Now you should be able to use the `ds` variable like:
64
+
65
+ ```python
66
+ ds["test"][0]["image"].shape # -> (TBD)
67
+ ```
68
+
69
+ Note of course that it will take a long time to download and convert the images in the local cache for the `full` dataset. Afterward, the usage should be quick as the files are memory-mapped from disk.
70
+
SBI-16-3D.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+ from glob import glob
4
+ import json
5
+ from huggingface_hub import hf_hub_download
6
+
7
+
8
+ from astropy.io import fits
9
+ import datasets
10
+ from datasets import DownloadManager
11
+ from fsspec.core import url_to_fs
12
+
13
+ _DESCRIPTION = (
14
+ """SBI-16-3D is a dataset which is part of the AstroCompress project. """
15
+ """It contains data assembled from the James Webb Space Telescope (JWST). """
16
+ """<TODO>Describe data format</TODO>"""
17
+ )
18
+
19
+ _HOMEPAGE = "https://google.github.io/AstroCompress"
20
+
21
+ _LICENSE = "CC BY 4.0"
22
+
23
+ _URL = "https://huggingface.co/datasets/AstroCompress/SBI-16-3D/resolve/main/"
24
+
25
+ _URLS = {
26
+ "tiny": {
27
+ "train": "./splits/tiny_train.jsonl",
28
+ "test": "./splits/tiny_test.jsonl",
29
+ },
30
+ "full": {
31
+ "train": "./splits/full_train.jsonl",
32
+ "test": "./splits/full_test.jsonl",
33
+ }
34
+ }
35
+
36
+ _REPO_ID = "AstroCompress/SBI-16-3D"
37
+
38
+ class GBI_16_4D(datasets.GeneratorBasedBuilder):
39
+ """GBI-16-4D Dataset"""
40
+
41
+ VERSION = datasets.Version("1.0.0")
42
+
43
+ BUILDER_CONFIGS = [
44
+ datasets.BuilderConfig(
45
+ name="tiny",
46
+ version=VERSION,
47
+ description="A small subset of the data, to test downsteam workflows.",
48
+ ),
49
+ datasets.BuilderConfig(
50
+ name="full",
51
+ version=VERSION,
52
+ description="The full dataset",
53
+ ),
54
+ ]
55
+
56
+ DEFAULT_CONFIG_NAME = "tiny"
57
+
58
+ def __init__(self, **kwargs):
59
+ super().__init__(version=self.VERSION, **kwargs)
60
+
61
+ def _info(self):
62
+ return datasets.DatasetInfo(
63
+ description=_DESCRIPTION,
64
+ features=datasets.Features(
65
+ {
66
+ "image": datasets.Array3D(shape=(None, 2048, 2048), dtype="uint16"),
67
+ "ra": datasets.Value("float64"),
68
+ "dec": datasets.Value("float64"),
69
+ "pixscale": datasets.Value("float64"),
70
+ "ntimes": datasets.Value("int64"),
71
+ "image_id": datasets.Value("string"),
72
+ }
73
+ ),
74
+ supervised_keys=None,
75
+ homepage=_HOMEPAGE,
76
+ license=_LICENSE,
77
+ citation="TBD",
78
+ )
79
+
80
+ def _split_generators(self, dl_manager: DownloadManager):
81
+
82
+ ret = []
83
+ base_path = dl_manager._base_path
84
+ locally_run = not base_path.startswith(datasets.config.HF_ENDPOINT)
85
+ _, path = url_to_fs(base_path)
86
+
87
+ for split in ["train", "test"]:
88
+ if locally_run:
89
+ split_file_location = os.path.normpath(os.path.join(path, _URLS[self.config.name][split]))
90
+ split_file = dl_manager.download_and_extract(split_file_location)
91
+ else:
92
+ split_file = hf_hub_download(repo_id=_REPO_ID, filename=_URLS[self.config.name][split], repo_type="dataset")
93
+ with open(split_file, encoding="utf-8") as f:
94
+ data_filenames = []
95
+ data_metadata = []
96
+ for line in f:
97
+ item = json.loads(line)
98
+ data_filenames.append(item["image"])
99
+ data_metadata.append({"ra": item["ra"],
100
+ "dec": item["dec"],
101
+ "pixscale": item["pixscale"],
102
+ "ntimes": item["ntimes"],
103
+ "image_id": item["image_id"]})
104
+ if locally_run:
105
+ data_urls = [os.path.normpath(os.path.join(path,data_filename)) for data_filename in data_filenames]
106
+ data_files = [dl_manager.download(data_url) for data_url in data_urls]
107
+ else:
108
+ data_urls = data_filenames
109
+ data_files = [hf_hub_download(repo_id=_REPO_ID, filename=data_url, repo_type="dataset") for data_url in data_urls]
110
+ ret.append(
111
+ datasets.SplitGenerator(
112
+ name=datasets.Split.TRAIN if split == "train" else datasets.Split.TEST,
113
+ gen_kwargs={"filepaths": data_files,
114
+ "split_file": split_file,
115
+ "split": split,
116
+ "data_metadata": data_metadata},
117
+ ),
118
+ )
119
+ return ret
120
+
121
+ def _generate_examples(self, filepaths, split_file, split, data_metadata):
122
+ """Generate GBI-16-4D examples"""
123
+
124
+ for idx, (filepath, item) in enumerate(zip(filepaths, data_metadata)):
125
+ task_instance_key = f"{self.config.name}-{split}-{idx}"
126
+ with fits.open(filepath, memmap=False) as hdul:
127
+ # the first axis is length one, so we take the first element
128
+ # the second axis is the time axis and varies between images
129
+ image_data = hdul["SCI"].data[0,:,:,:].tolist()
130
+ yield task_instance_key, {**{"image": image_data}, **item}
131
+
132
+ def make_split_jsonl_files(config_type="tiny", data_dir="./data",
133
+ outdir="./splits", seed=42):
134
+ """
135
+ Create jsonl files for the SBI-16-3D dataset.
136
+
137
+ config_type: str, default="tiny"
138
+ The type of split to create. Options are "tiny" and "full".
139
+ data_dir: str, default="./data"
140
+ The directory where the FITS files are located.
141
+ outdir: str, default="./splits"
142
+ The directory where the jsonl files will be created.
143
+ seed: int, default=42
144
+ The seed for the random split.
145
+ """
146
+ random.seed(seed)
147
+ os.makedirs(outdir, exist_ok=True)
148
+
149
+ fits_files = glob(os.path.join(data_dir, "*.fits"))
150
+ random.shuffle(fits_files)
151
+ if config_type == "tiny":
152
+ train_files = fits_files[:2]
153
+ test_files = fits_files[2:3]
154
+ elif config_type == "full":
155
+ split_idx = int(0.8 * len(fits_files))
156
+ train_files = fits_files[:split_idx]
157
+ test_files = fits_files[split_idx:]
158
+ else:
159
+ raise ValueError("Unsupported config_type. Use 'tiny' or 'full'.")
160
+
161
+ def create_jsonl(files, split_name):
162
+ output_file = os.path.join(outdir, f"{config_type}_{split_name}.jsonl")
163
+ with open(output_file, "w") as out_f:
164
+ for file in files:
165
+ print(file, flush=True, end="...")
166
+ with fits.open(file, memmap=False) as hdul:
167
+ image_id = os.path.basename(file).split(".fits")[0]
168
+ ra = hdul["SCI"].header.get('CRVAL1', 0)
169
+ dec = hdul["SCI"].header.get('CRVAL2', 0)
170
+ pixscale = hdul["SCI"].header.get('CD1_2', 0.396)
171
+ ntimes = hdul["SCI"].data.shape[0]
172
+ item = {"image_id": image_id, "image": file, "ra": ra, "dec": dec,
173
+ "pixscale": pixscale, "ntimes": ntimes}
174
+ out_f.write(json.dumps(item) + "\n")
175
+
176
+ create_jsonl(train_files, "train")
177
+ create_jsonl(test_files, "test")
data/jw01208002001_09101_00001_nrca1_uncal.fits ADDED

Git LFS Details

  • SHA256: d27a608d5e2ef3cfe3e1653d06ccf94169585f101f60a20f8bbe10d7b919f2f7
  • Pointer size: 133 Bytes
  • Size of remote file: 50.4 MB
data/jw01783904008_02101_00004_nrca1_uncal.fits ADDED

Git LFS Details

  • SHA256: d3843655223909d87e1c80e0e8503c5d1e41cfaed184da3cc848c6ae23340581
  • Pointer size: 133 Bytes
  • Size of remote file: 67.2 MB
data/jw02078006001_02101_00006_nrca1_uncal.fits ADDED

Git LFS Details

  • SHA256: 108324d54e9089d46793e53bfab51fb8e039ed3e5fb9cd39c421acc773dd7005
  • Pointer size: 133 Bytes
  • Size of remote file: 83.9 MB
splits/tiny_test.jsonl ADDED
@@ -0,0 +1 @@
 
 
1
+ {"image_id": "jw02078006001_02101_00006_nrca1_uncal", "image": "./data/jw02078006001_02101_00006_nrca1_uncal.fits", "ra": 41.01955415796331, "dec": -50.1293030738652, "pixscale": 0.396, "ntimes": 1}
splits/tiny_train.jsonl ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ {"image_id": "jw01783904008_02101_00004_nrca1_uncal", "image": "./data/jw01783904008_02101_00004_nrca1_uncal.fits", "ra": 24.180739733402092, "dec": 15.742045567000984, "pixscale": 0.396, "ntimes": 1}
2
+ {"image_id": "jw01208002001_09101_00001_nrca1_uncal", "image": "./data/jw01208002001_09101_00001_nrca1_uncal.fits", "ra": 39.9893664900413, "dec": -1.6303009359988683, "pixscale": 0.396, "ntimes": 1}