AstroM3Dataset / preprocess.py
MeriDK's picture
Added script to preprocess data
17b9b62
from collections import defaultdict
import datasets
from datasets import load_dataset
import numpy as np
from scipy import stats
METADATA_FUNC = {
"abs": [
"mean_vmag",
"phot_g_mean_mag",
"phot_bp_mean_mag",
"phot_rp_mean_mag",
"j_mag",
"h_mag",
"k_mag",
"w1_mag",
"w2_mag",
"w3_mag",
"w4_mag",
],
"cos": ["l"],
"sin": ["b"],
"log": ["period"]
}
def preprocess_spectra(example):
"""
Preprocess spectral data. Steps:
- Interpolate flux and flux error to a fixed wavelength grid (3850 to 9000 Å).
- Normalize flux using mean and median absolute deviation (MAD).
- Append MAD as an auxiliary feature.
"""
spectra = example['spectra']
wavelengths = spectra[:, 0]
flux = spectra[:, 1]
flux_err = spectra[:, 2]
# Interpolate flux and flux error onto a fixed grid
new_wavelengths = np.arange(3850, 9000, 2)
flux = np.interp(new_wavelengths, wavelengths, flux)
flux_err = np.interp(new_wavelengths, wavelengths, flux_err)
# Normalize flux and flux error
mean = np.mean(flux)
mad = stats.median_abs_deviation(flux[flux != 0])
flux = (flux - mean) / mad
flux_err = flux_err / mad
aux_values = np.full_like(flux, np.log10(mad)) # Store MAD as an auxiliary feature
# Stack processed data into a single array
spectra = np.vstack([flux, flux_err, aux_values])
example['spectra'] = spectra
return example
def preprocess_lc(example):
"""
Preprocess photometry (light curve) data. Steps:
- Remove duplicate time entries.
- Sort by Heliocentric Julian Date (HJD).
- Normalize flux and flux error using mean and median absolute deviation (MAD).
- Scale time values between 0 and 1.
- Append auxiliary features (log MAD and time span delta_t).
"""
X = example['photometry']
aux_values = np.stack(list(example['metadata']['photo_cols'].values()))
# Remove duplicate entries
X = np.unique(X, axis=0)
# Sort based on HJD
sorted_indices = np.argsort(X[:, 0])
X = X[sorted_indices]
# Normalize flux and flux error
mean = X[:, 1].mean()
mad = stats.median_abs_deviation(X[:, 1])
X[:, 1] = (X[:, 1] - mean) / mad
X[:, 2] = X[:, 2] / mad
# Compute delta_t (time span of the light curve in years)
delta_t = (X[:, 0].max() - X[:, 0].min()) / 365
# Scale time from 0 to 1
X[:, 0] = (X[:, 0] - X[:, 0].min()) / (X[:, 0].max() - X[:, 0].min())
# Add MAD and delta_t to auxiliary metadata features
aux_values = np.concatenate((aux_values, [np.log10(mad), delta_t]))
# Add auxiliary features to the sequence
aux_values = np.tile(aux_values, (X.shape[0], 1))
X = np.concatenate((X, aux_values), axis=-1)
example['photometry'] = X
return example
def transform_metadata(example):
"""
Transforms the metadata of an example based on METADATA_FUNC.
"""
metadata = example["metadata"]
# Process 'abs' transformation on meta_cols:
# Note: This transformation uses 'parallax' from meta_cols.
for col in METADATA_FUNC["abs"]:
if col in metadata["meta_cols"]:
# Use np.where to avoid issues when parallax is non-positive.
metadata["meta_cols"][col] = (
metadata["meta_cols"][col]
- 10
+ 5 * np.log10(np.where(metadata["meta_cols"]["parallax"] <= 0, 1, metadata["meta_cols"]["parallax"]))
)
# Process 'cos' transformation on meta_cols:
for col in METADATA_FUNC["cos"]:
if col in metadata["meta_cols"]:
metadata["meta_cols"][col] = np.cos(np.radians(metadata["meta_cols"][col]))
# Process 'sin' transformation on meta_cols:
for col in METADATA_FUNC["sin"]:
if col in metadata["meta_cols"]:
metadata["meta_cols"][col] = np.sin(np.radians(metadata["meta_cols"][col]))
# Process 'log' transformation on photo_cols:
for col in METADATA_FUNC["log"]:
if col in metadata["photo_cols"]:
metadata["photo_cols"][col] = np.log10(metadata["photo_cols"][col])
# Update the example with the transformed metadata.
example["metadata"] = metadata
return example
def compute_metadata_stats(ds):
"""
Compute the mean and standard deviation for each column in meta_cols and photo_cols.
"""
meta_vals = defaultdict(list)
photo_vals = defaultdict(list)
# Accumulate values for each column
for example in ds:
meta = example["metadata"]["meta_cols"]
photo = example["metadata"]["photo_cols"]
for col, value in meta.items():
meta_vals[col].append(value)
for col, value in photo.items():
photo_vals[col].append(value)
# Compute mean and standard deviation for each column
stats = {"meta_cols": {}, "photo_cols": {}}
for col, values in meta_vals.items():
arr = np.stack(values)
stats["meta_cols"][col] = {"mean": arr.mean(), "std": arr.std()}
for col, values in photo_vals.items():
arr = np.stack(values)
stats["photo_cols"][col] = {"mean": arr.mean(), "std": arr.std()}
return stats
def normalize_metadata(example, info):
"""
Normalize metadata values using z-score normalization:
(value - mean) / std.
The 'stats' parameter should be a dictionary with computed means and stds for both meta_cols and photo_cols.
"""
metadata = example["metadata"]
# Normalize meta_cols
for col, value in metadata["meta_cols"].items():
mean = info["meta_cols"][col]["mean"]
std = info["meta_cols"][col]["std"]
metadata["meta_cols"][col] = (metadata["meta_cols"][col] - mean) / std
# Normalize photo_cols
for col, value in metadata["photo_cols"].items():
mean = info["photo_cols"][col]["mean"]
std = info["photo_cols"][col]["std"]
metadata["photo_cols"][col] = (metadata["photo_cols"][col] - mean) / std
example["metadata"] = metadata
return example
def preprocess_metadata(example):
"""
Extract the values from 'meta_cols' and stack them into a numpy array.
"""
example["metadata"] = np.stack(list(example["metadata"]["meta_cols"].values()))
return example
def main():
"""
Main function for processing and uploading datasets.
- Loads each dataset based on subset and random seed.
- Applies preprocessing for spectra, photometry, and metadata.
- Casts columns to appropriate feature types.
- Pushes the processed dataset to Hugging Face Hub.
"""
for sub in ["sub10", "sub25", "sub50", "full"]:
for seed in [42, 66, 0, 12, 123]:
name = f"{sub}_{seed}"
print(f"Processing: {name}")
# Load dataset from Hugging Face Hub
ds = load_dataset('MeriDK/AstroM3Dataset', name=name, trust_remote_code=True, num_proc=16)
ds = ds.with_format('numpy')
# Transform and normalize metadata
ds = ds.map(transform_metadata, num_proc=16)
info = compute_metadata_stats(ds['train'])
ds = ds.map(lambda example: normalize_metadata(example, info))
# Transform spectra
ds = ds.map(preprocess_spectra, num_proc=16)
ds = ds.cast_column('spectra', datasets.Array2D(shape=(3, 2575), dtype='float32'))
# Transform photometry
ds = ds.map(preprocess_lc, num_proc=16)
ds = ds.cast_column('photometry', datasets.Array2D(shape=(None, 9), dtype='float32'))
# Stack metadata into one numpy array
ds = ds.map(preprocess_metadata, num_proc=16)
ds = ds.cast_column('metadata', datasets.Sequence(feature=datasets.Value('float32'), length=34))
# Change label type
ds = ds.cast_column('label', datasets.ClassLabel(
names=['DSCT', 'EA', 'EB', 'EW', 'HADS', 'M', 'ROT', 'RRAB', 'RRC', 'SR']))
# Upload processed dataset to Hugging Face Hub
ds.push_to_hub('MeriDK/AstroM3Processed', config_name=name)
if __name__ == '__main__':
main()