# from torch.utils.data import DataLoader, TensorDataset
from torch.utils.data import DataLoader, TensorDataset
from src.utils.data import Dataset
import torch
from torch import Tensor
import logging
import numpy as np
import os
import sys
import pickle
import random

from src.utils.scaler import StandardScaler
from src.utils.augment import apply_augmentations
import zipfile
from typing import List, Optional
from src.utils.dq import build_dq_transforms, DQTransform


class MemmapDataset(torch.utils.data.Dataset):
    def __init__(self, x_memmap, y_memmap, scalers, output_dim, limit_samples=None, dq_transforms: Optional[List[DQTransform]] = None):
        self.x = x_memmap
        self.y = y_memmap
        self.limit = None if limit_samples is None else int(limit_samples)
        self.output_dim = int(output_dim)
        self.scalers = scalers or []
        self.dq_transforms = dq_transforms or []

    def __len__(self):
        n = int(self.x.shape[0])
        return n if self.limit is None else min(n, self.limit)

    def __getitem__(self, idx):
        x = self.x[idx]
        y = self.y[idx]
        # cast to float32 and ensure contiguity to avoid extra copies later
        try:
            x = np.ascontiguousarray(x, dtype=np.float32)
            y = np.ascontiguousarray(y, dtype=np.float32)
        except Exception:
            pass
        # per-channel standardization
        try:
            for i in range(self.output_dim):
                x[..., i] = self.scalers[i].transform(x[..., i])
                y[..., i] = self.scalers[i].transform(y[..., i])
        except Exception:
            pass
        # DQ transforms
        if self.dq_transforms:
            try:
                _x, _y = x, y
                for t in self.dq_transforms:
                    _x, _y = t(_x, _y)
                x, y = _x, _y
            except Exception:
                pass
        # Avoid an extra copy: use from_numpy so DataLoader can pin memory efficiently
        return torch.from_numpy(x), torch.from_numpy(y)


def _ensure_memmap_paths(npz_path: str, out_dir: str, prefix: str):
    os.makedirs(out_dir, exist_ok=True)
    x_path = os.path.join(out_dir, f"{prefix}_x.npy")
    y_path = os.path.join(out_dir, f"{prefix}_y.npy")
    if (not os.path.exists(x_path)) or (not os.path.exists(y_path)):
        try:
            with zipfile.ZipFile(npz_path, 'r') as zf:
                names = zf.namelist()
                x_name = next((n for n in names if n.endswith('x.npy')), None)
                y_name = next((n for n in names if n.endswith('y.npy')), None)
                if x_name:
                    with zf.open(x_name) as src, open(x_path, 'wb') as dst:
                        dst.write(src.read())
                if y_name:
                    with zf.open(y_name) as src, open(y_path, 'wb') as dst:
                        dst.write(src.read())
        except Exception as e:
            print(f"failed to extract memmap arrays from {npz_path}: {e}")
    return x_path, y_path

def get_dataloader(datapath, batch_size, output_dim, mode="train", num_workers=0, pin_memory=False, prefetch_factor=2, persistent_workers=False, aug_enable=False, aug_noise_std=0.0, aug_mask_prob=0.0, memmap_eval=False, limit_eval_samples=None, dq_enable=False, dq_config=None, memmap_dir: Optional[str] = None):
    """Load data, apply optional DQ and augmentation, and build DataLoaders."""
    data = {}
    processed = {}
    results = {}

    mode_lc = str(mode).lower()
    if mode_lc == "train":
        categories = ["train", "val", "test"]
    elif mode_lc == "val":
        categories = ["val"]
    else:
        categories = ["test"]

    # Prepare scalers from train if available
    scalers = []
    x_train_for_scaler = None
    train_npz = os.path.join(datapath, "train.npz")
    if os.path.exists(train_npz):
        if memmap_eval:
            _mm_dir = memmap_dir or os.environ.get("MEMMAP_DIR", os.path.join(datapath, "_memmap"))
            if not os.path.isdir(_mm_dir):
                _mm_dir = os.path.join(datapath, "_memmap")
            tx_path, _ = _ensure_memmap_paths(train_npz, _mm_dir, "train")
            try:
                x_train_for_scaler = np.load(tx_path, mmap_mode='r')
            except Exception:
                x_train_for_scaler = None
        if x_train_for_scaler is None:
            try:
                cat_data = np.load(train_npz, allow_pickle=True)
                x_train_for_scaler = cat_data["x"]
            except Exception:
                x_train_for_scaler = None

    # Optional DQ pipeline
    dq_transforms: List[DQTransform] = build_dq_transforms(dq_config) if dq_enable else []

    # Build datasets per category
    for category in categories:
        cat_npz = os.path.join(datapath, category + ".npz")
        print(cat_npz)
        # 优先使用 memmap：若直接存在 _memmap/{category}_x.npy 和 _memmap/{category}_y.npy 则直接加载；
        # 否则在存在 npz 时按需抽取 memmap；均失败则回退到 npz in-memory 加载。
        memmap_loaded = False
        if memmap_eval:
            _mm_dir = memmap_dir or os.environ.get("MEMMAP_DIR", os.path.join(datapath, "_memmap"))
            if not os.path.isdir(_mm_dir):
                _mm_dir = os.path.join(datapath, "_memmap")
            print(f"memmap_dir={_mm_dir}")
            direct_x = os.path.join(_mm_dir, f"{category}_x.npy")
            direct_y = os.path.join(_mm_dir, f"{category}_y.npy")
            try:
                if os.path.exists(direct_x) and os.path.exists(direct_y):
                    x_mem = np.load(direct_x, mmap_mode='r')
                    y_mem = np.load(direct_y, mmap_mode='r')
                    memmap_loaded = True
                elif os.path.exists(cat_npz):
                    x_path, y_path = _ensure_memmap_paths(cat_npz, _mm_dir, category)
                    x_mem = np.load(x_path, mmap_mode='r')
                    y_mem = np.load(y_path, mmap_mode='r')
                    memmap_loaded = True
            except Exception as e:
                print(f"memmap load failed: {e}")
        if memmap_loaded:
            # Initialize scalers lazily from first available source if not built
            if not scalers:
                src = x_train_for_scaler if x_train_for_scaler is not None else x_mem
                for i in range(int(output_dim)):
                    scalers.append(StandardScaler(mean=src[..., i].mean(), std=src[..., i].std()))
            ds = MemmapDataset(x_mem, y_mem, scalers, output_dim, limit_samples=(None if category == "train" else limit_eval_samples), dq_transforms=dq_transforms)
            processed[category] = ds
            # logging shapes
            try:
                print(category)
                print(x_mem.shape)
                print(y_mem.shape)
            except Exception:
                pass
        else:
            # Regular in-memory load
            cat_data = np.load(cat_npz, allow_pickle=True)
            X = cat_data["x"]
            Y = cat_data["y"]
            # apply DQ pipeline in-memory
            if dq_transforms:
                try:
                    x_list = []
                    y_list = []
                    for i in range(X.shape[0]):
                        _x, _y = X[i].astype(np.float32, copy=True), Y[i].astype(np.float32, copy=True)
                        for t in dq_transforms:
                            _x, _y = t(_x, _y)
                        x_list.append(_x)
                        y_list.append(_y)
                    X = np.stack(x_list, axis=0)
                    Y = np.stack(y_list, axis=0)
                except Exception:
                    pass
            # Standardize per-channel using scalers if available
            try:
                if not scalers and X is not None:
                    for i in range(int(output_dim)):
                        scalers.append(StandardScaler(mean=X[..., i].mean(), std=X[..., i].std()))
            except Exception:
                pass
            try:
                for i in range(int(output_dim)):
                    X[..., i] = scalers[i].transform(X[..., i])
                    Y[..., i] = scalers[i].transform(Y[..., i])
            except Exception:
                pass
            # Assign
            key_x = "x_" + ("train" if category == "train" else category)
            key_y = "y_" + ("train" if category == "train" else category)
            if category == "train":
                data["x_train"] = X
                data["y_train"] = Y
            elif category == "val":
                data["x_val"] = X
                data["y_val"] = Y
            else:
                data["x_test"] = X
                data["y_test"] = Y

        # Optional augmentation on training data (support memmap-backed training)
        try:
            if aug_enable and category == "train":
                if "train" in processed:
                    ds = processed["train"]
                    x_aug, y_aug = apply_augmentations(ds.x, ds.y, noise_std=aug_noise_std, mask_prob=aug_mask_prob)
                    ds.x = x_aug
                    ds.y = y_aug
                else:
                    x_aug, y_aug = apply_augmentations(X, Y, noise_std=aug_noise_std, mask_prob=aug_mask_prob)
                    data["x_train"] = x_aug
                    data["y_train"] = y_aug
        except Exception as e:
            print(f"augmentation failed or skipped: {e}")

    # 将非 memmap 的 in-memory 数组封装为 TensorDataset，便于统一构建 DataLoader
    try:
        for c in categories:
            x_key = "x_" + c
            y_key = "y_" + c
            if (c not in processed) and (x_key in data) and (y_key in data):
                X = data[x_key]
                Y = data[y_key]
                # 确保为 float32 并应用标准化（与 MemmapDataset 保持一致）
                try:
                    X = np.ascontiguousarray(X, dtype=np.float32)
                    Y = np.ascontiguousarray(Y, dtype=np.float32)
                except Exception:
                    pass
                try:
                    for i in range(int(output_dim)):
                        X[..., i] = scalers[i].transform(X[..., i])
                        Y[..., i] = scalers[i].transform(Y[..., i])
                except Exception:
                    # 若 scaler 未就绪或维度不匹配，跳过标准化以保证流程不中断
                    pass
                processed[c] = TensorDataset(torch.from_numpy(X), torch.from_numpy(Y))
    except Exception as e:
        print(f"wrap in-memory datasets failed or skipped: {e}")

    # Construct DataLoaders for only the loaded categories
    dl_kwargs = {}
    if int(num_workers) > 0:
        dl_kwargs.update(dict(num_workers=int(num_workers), prefetch_factor=int(prefetch_factor), persistent_workers=bool(persistent_workers)))
    if bool(pin_memory):
        dl_kwargs.update(dict(pin_memory=True))

    try:
        if "train" in processed:
            results["train_loader"] = DataLoader(processed["train"], batch_size=int(batch_size), shuffle=True, drop_last=True, **dl_kwargs)
        if "val" in processed:
            results["val_loader"] = DataLoader(processed["val"], batch_size=int(batch_size), shuffle=False, drop_last=False, **dl_kwargs)
        if "test" in processed:
            results["test_loader"] = DataLoader(processed["test"], batch_size=int(batch_size), shuffle=False, drop_last=False, **dl_kwargs)
    except Exception as e:
        print(f"creating DataLoaders failed or skipped: {e}")

    results["scalers"] = scalers
    # also persist augmentation config for downstream logging
    results["aug_enable"] = bool(aug_enable)
    results["aug_noise_std"] = float(aug_noise_std)
    results["aug_mask_prob"] = float(aug_mask_prob)
    results["dq_enable"] = bool(dq_enable)
    results["dq_config"] = dq_config
    return results


def check_device(device=None):
    """
    Checks and returns the device to be used for training and evaluation.

    Args:
        device (torch.device or str, optional): The device to be used. If not provided, the default device will be used.

    Returns:
        torch.device: The device to be used for training and evaluation.

    Raises:
        TypeError: If the provided device is not of type torch.device or str.

    """
    if device is None:
        print("`device` is missing, try to train and evaluate the model on default device.")
        if torch.cuda.is_available():
            print("cuda device is available, place the model on the device.")
            return torch.device("cuda")
        else:
            print("cuda device is not available, place the model on cpu.")
            return torch.device("cpu")
    else:
        if isinstance(device, torch.device):
            return device
        else:
            return torch.device(device)


def get_num_nodes(dataset, memmap_dir: Optional[str] = None):
    """
    Get the number of nodes for a given dataset.

    Parameters:
    dataset (str): The name of the dataset.

    Returns:
    int: The number of nodes for the given dataset.

    Raises:
    AssertionError: If the dataset is not found in the dictionary.

    """
    base = os.path.join("./data", str(dataset))
    _mm_dir = memmap_dir or os.environ.get("MEMMAP_DIR", os.path.join(base, "_memmap"))
    if not os.path.isdir(_mm_dir):
        _mm_dir = os.path.join(base, "_memmap")

    def _shape_from_memmap(prefix: str):
        p = os.path.join(_mm_dir, f"{prefix}_x.npy")
        if os.path.exists(p):
            try:
                a = np.load(p, mmap_mode='r')
                return int(a.shape[2])
            except Exception:
                return None
        return None

    # Try memmap files first to avoid loading large npz into RAM
    node_counts = []
    for split in ["train", "val", "test"]:
        n = _shape_from_memmap(split)
        if n is not None:
            node_counts.append(n)

    # If no memmap available, peek inside npz by extracting just x.npy to disk
    if len(node_counts) == 0:
        for split in ["train", "val", "test"]:
            candidate = os.path.join(base, f"{split}.npz")
            if os.path.exists(candidate):
                try:
                    x_path, _ = _ensure_memmap_paths(candidate, _mm_dir, split)
                    a = np.load(x_path, mmap_mode='r')
                    node_counts.append(int(a.shape[2]))
                except Exception:
                    pass

    # Fallback: direct np.load on npz to infer shape
    if len(node_counts) == 0:
        for split in ["train", "val", "test"]:
            candidate = os.path.join(base, f"{split}.npz")
            if os.path.exists(candidate):
                try:
                    cat = np.load(candidate, allow_pickle=True)
                    node_counts.append(int(cat["x"].shape[2]))
                except Exception:
                    pass

    if len(node_counts) > 0:
        # Choose the maximum nodes observed across splits to satisfy eval shapes
        return int(max(node_counts))

    # Fallbacks
    if str(dataset).upper() == "SINPA":
        return 1687
    raise FileNotFoundError(
        f"Cannot infer num_nodes for dataset '{dataset}'. Place './data/{dataset}/train.npz' or add a fallback."
    )


def seed_everything(seed: int = 42):
    """Set seeds for reproducibility across random, numpy, torch."""
    if seed is None:
        return
    try:
        seed = int(seed)
    except Exception:
        seed = 42
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    try:
        import torch.backends.cudnn as cudnn
        cudnn.deterministic = True
        cudnn.benchmark = False
    except Exception:
        pass


def get_dataframe(datapath, batch_size, output_dim, mode="train"):
    """
    Load and process data from the specified datapath.

    Args:
        datapath (str): The path to the data directory.
        batch_size (int): The batch size for the data.
        output_dim (int): The output dimension of the data.
        mode (str, optional): The mode of the data. Defaults to "train".

    Returns:
        list: A list containing the processed training and testing data.
    """
    data = {}
    processed = {}
    results = {}
    for category in ["train", "test"]:
        print(os.path.join(datapath, category + ".npz"))
        cat_data = np.load(os.path.join(datapath, category + ".npz"), allow_pickle=True)
        data["x_" + category] = cat_data["x"]
        data["y_" + category] = cat_data["y"]
        print(data["x_" + category].shape)
        print(data["y_" + category].shape)
        if category == "train":
            train_ = np.squeeze(cat_data["y"][0:, 0:1, 0:, 0:])
        if category == "test":
            test_ = np.squeeze(cat_data["y"][0:, 0:1, 0:, 0:])
    return [train_, test_]


# Auto-build node groups from a CSV file
# csv_path should point to a file like './aux_data/lots_location.csv'
# It tries to detect node and group columns from common names and returns a dict: {group_name: [node_indices...]}
def build_node_groups_from_csv(csv_path: str, num_nodes: int, node_col_candidates=None, group_col_candidates=None):
    try:
        import pandas as pd
    except Exception:
        print("pandas is required to build node groups from csv")
        return None
    if not os.path.exists(csv_path):
        return None
    try:
        df = pd.read_csv(csv_path)
    except Exception as e:
        print(f"failed to read csv {csv_path}: {e}")
        return None
    node_col_candidates = node_col_candidates or ["node_id", "sensor_id", "id", "lot_id"]
    group_col_candidates = group_col_candidates or ["group", "region", "district", "location", "area"]
    node_col = next((c for c in node_col_candidates if c in df.columns), None)
    group_col = next((c for c in group_col_candidates if c in df.columns), None)
    if node_col is None or group_col is None:
        print(f"could not detect node/group columns in {csv_path}; columns: {list(df.columns)}")
        return None
    # sanitize node ids
    def to_int_safe(x):
        try:
            return int(x)
        except Exception:
            return None
    df = df[[node_col, group_col]].copy()
    df[node_col] = df[node_col].apply(to_int_safe)
    df = df.dropna()
    df = df[df[node_col].between(0, max(0, int(num_nodes) - 1))]
    groups = {}
    for gname, sub in df.groupby(group_col):
        idxs = [int(v) for v in sub[node_col].tolist() if v is not None]
        if len(idxs) == 0:
            continue
        # dedupe and sort
        idxs = sorted(set(idxs))
        groups[str(gname)] = idxs
    if len(groups) == 0:
        return None
    return groups


# Build groups from coordinates-only CSV via grid binning
# Supports files like './aux_data/lots_location.csv' without headers.
def build_geo_groups_from_csv(csv_path: str, num_nodes: int, num_lat_bins: int = 4, num_lon_bins: int = 4):
    try:
        import pandas as pd
        import numpy as np
    except Exception:
        print("pandas/numpy are required to build geo groups from csv")
        return None
    if not os.path.exists(csv_path):
        return None
    df = None
    # Try reading with no header first (common for coords-only files)
    try:
        df = pd.read_csv(csv_path, header=None)
    except Exception:
        pass
    if df is None:
        try:
            df = pd.read_csv(csv_path)
        except Exception as e:
            print(f"failed to read csv {csv_path}: {e}")
            return None
    # Detect lat/lon columns
    lat, lon = None, None
    colnames = [str(c).lower() for c in df.columns]
    # case: two unnamed columns
    if len(df.columns) >= 2 and set(colnames).issubset({"0", "1", "2"}):
        lat = pd.to_numeric(df.iloc[:, 0], errors="coerce")
        lon = pd.to_numeric(df.iloc[:, 1], errors="coerce")
    else:
        # try common names
        lat_candidates = ["lat", "latitude", "y", "coord_y"]
        lon_candidates = ["lon", "lng", "longitude", "x", "coord_x"]
        lat_col = next((c for c in df.columns if str(c).lower() in lat_candidates), None)
        lon_col = next((c for c in df.columns if str(c).lower() in lon_candidates), None)
        if lat_col is None or lon_col is None:
            # If only two columns exist, assume lat/lon
            if len(df.columns) == 2:
                lat = pd.to_numeric(df.iloc[:, 0], errors="coerce")
                lon = pd.to_numeric(df.iloc[:, 1], errors="coerce")
            else:
                print(f"could not detect lat/lon columns in {csv_path}; columns: {list(df.columns)}")
                return None
        else:
            lat = pd.to_numeric(df[lat_col], errors="coerce")
            lon = pd.to_numeric(df[lon_col], errors="coerce")
    # keep first num_nodes entries
    lat = lat.iloc[: int(num_nodes)].reset_index(drop=True)
    lon = lon.iloc[: int(num_nodes)].reset_index(drop=True)
    mask = lat.notna() & lon.notna()
    if mask.sum() == 0:
        return None
    lat = lat[mask]
    lon = lon[mask]
    idxs = np.arange(len(mask))[mask.values]
    # compute quantile-based bins
    try:
        lat_edges = np.quantile(lat.values, np.linspace(0, 1, num_lat_bins + 1))
        lon_edges = np.quantile(lon.values, np.linspace(0, 1, num_lon_bins + 1))
    except Exception:
        # fallback to uniform bins
        lat_edges = np.linspace(float(lat.min()), float(lat.max()), num_lat_bins + 1)
        lon_edges = np.linspace(float(lon.min()), float(lon.max()), num_lon_bins + 1)
    lat_bins = np.digitize(lat.values, lat_edges[1:-1], right=False)
    lon_bins = np.digitize(lon.values, lon_edges[1:-1], right=False)
    groups = {}
    for i, (lb, gb) in enumerate(zip(lat_bins, lon_bins)):
        name = f"lat{int(lb)}_lon{int(gb)}"
        groups.setdefault(name, []).append(int(idxs[i]))
    if len(groups) == 0:
        return None
    # sort indices in each group
    for k in list(groups.keys()):
        groups[k] = sorted(set(groups[k]))
    return groups
