import os
import numpy as np
import torch
from torch.utils.data import Dataset

class PPG2BPDataset(Dataset):
    """
    PyTorch Dataset for PPG2BP-Net, using the “6th window as calibration” rule,
    with in-memory caching for speed.
    """

    def __init__(self,
                 split_txt: str,
                 data_root: str = "data/final2",
                 min_windows: int = 6):
        super().__init__()
        self.data_root   = data_root
        self.min_windows = min_windows

        # 1) Read subject IDs
        with open(split_txt, 'r') as f:
            subject_ids = [line.strip() for line in f if line.strip()]

        # 2) Preload each subject's data once
        self.data_cache = {}   # sid -> dict of arrays
        for sid in subject_ids:
            path = os.path.join(data_root, sid, "signals_with_metadata.npz")
            if not os.path.isfile(path):
                continue
            arrs = np.load(path)
            N = arrs["ppg_windows"].shape[0]
            if N < min_windows:
                continue
            # store in cache
            self.data_cache[sid] = {
                "ppg":  arrs["ppg_windows"].astype(np.float32),  # (N,500)
                "sbp":  arrs["sbp_values"].astype(np.float32),   # (N,)
                "dbp":  arrs["dbp_values"].astype(np.float32)    # (N,)
            }

        # 3) Build flattened index_map of all non-calib windows
        self.index_map = []
        for sid, arrs in self.data_cache.items():
            N = arrs["ppg"].shape[0]
            # skip subjects with too few windows
            if N < min_windows:
                continue
            for wi in range(N):
                if wi == 5:
                    continue  # window 5 is calibration
                self.index_map.append((sid, wi))

    def __len__(self):
        return len(self.index_map)

    def __getitem__(self, idx):
        sid, win_idx = self.index_map[idx]
        arrs = self.data_cache[sid]

        # calibration always window 5
        ppg_cal = arrs["ppg"][5]     # (500,)
        sbp_cal = arrs["sbp"][5]     # scalar
        dbp_cal = arrs["dbp"][5]     # scalar

        # target window
        ppg_tgt = arrs["ppg"][win_idx]
        sbp_tgt = arrs["sbp"][win_idx]
        dbp_tgt = arrs["dbp"][win_idx]

        # to tensors
        sample = {
            "ppg_target": torch.from_numpy(ppg_tgt).unsqueeze(0),   # (1,500)
            "ppg_calib":  torch.from_numpy(ppg_cal).unsqueeze(0),   # (1,500)
            "sbp_calib":  torch.tensor([sbp_cal], dtype=torch.float32),
            "dbp_calib":  torch.tensor([dbp_cal], dtype=torch.float32),
            "sbp_true":   torch.tensor([sbp_tgt], dtype=torch.float32),
            "dbp_true":   torch.tensor([dbp_tgt], dtype=torch.float32),
        }
        return sample


# ----------------------------
# Quick sanity check when run as script
# ----------------------------
if __name__ == "__main__":
    from torch.utils.data import DataLoader

    train_ds = PPG2BPDataset("data/data_split/split_train.txt", data_root="data/final2")
    train_loader = DataLoader(train_ds, batch_size=64, shuffle=True, num_workers=4, pin_memory=True)

    print(f"# samples in train: {len(train_ds)}")
    b = next(iter(train_loader))
    print({k: v.shape for k, v in b.items()})
    # Expected:
    # {
    #   'ppg_target': (64, 1, 500),
    #   'ppg_calib':  (64, 1, 500),
    #   'sbp_calib':  (64, 1),
    #   'dbp_calib':  (64, 1),
    #   'sbp_true':   (64, 1),
    #   'dbp_true':   (64, 1)
    # }
