import os
import sys
import json
import random
import types
import numpy as np
import torch
import scipy.signal as sps
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split

# Make LibEER importable (match training scripts' top-level imports)
PROJECT_ROOT = os.path.dirname(os.path.dirname(__file__))
LIBEER_REPO_ROOT = os.path.join(PROJECT_ROOT, 'LibEER')
LIBEER_PKG_ROOT = os.path.join(LIBEER_REPO_ROOT, 'LibEER')
for p in [LIBEER_PKG_ROOT, LIBEER_REPO_ROOT]:
    if p not in sys.path:
        sys.path.insert(0, p)

from config.setting import Setting, preset_setting
from data_utils.load_data import get_data
from data_utils.split import merge_to_part, get_split_index, index_to_data


def parse_list(arg):
    if arg is None or arg == '':
        return None
    if isinstance(arg, (list, tuple)):
        return [int(x) for x in arg]
    if isinstance(arg, str):
        return [int(x.strip()) for x in arg.split(',') if x.strip()]
    return None


def parse_float_pair(arg):
    if arg is None:
        return None
    if isinstance(arg, (list, tuple)) and len(arg) == 2:
        return [float(arg[0]), float(arg[1])]
    if isinstance(arg, str):
        parts = [x.strip() for x in arg.split(',') if x.strip()]
        if len(parts) != 2:
            raise ValueError('bounds 必须是两个数值，用逗号分隔，如 4.5,5.5')
        return [float(parts[0]), float(parts[1])]
    raise ValueError('bounds 应为长度为2的数组或形如 "a,b" 的字符串')


def labels_to_indices(y):
    y = np.array(y)
    if y.ndim > 1:
        return np.argmax(y, axis=-1).astype(np.int64)
    return y.astype(np.int64)


def compute_stats(train):
    if train.ndim == 3:  # (N, C, T)
        mean = train.mean(axis=(0, 2), keepdims=True)
        std = train.std(axis=(0, 2), keepdims=True) + 1e-8
    elif train.ndim == 4:  # (N, S, C, B)
        mean = train.mean(axis=(0, 1), keepdims=True)
        std = train.std(axis=(0, 1), keepdims=True) + 1e-8
    else:
        raise ValueError(f'不支持的数据维度: {train.shape}')
    return mean.astype(np.float32), std.astype(np.float32)


def apply_norm(x, mean, std):
    if x is None or (isinstance(x, np.ndarray) and x.size == 0):
        return None
    return ((x - mean) / std).astype(np.float32)

def _deap_original_order():
    return ['Fp1', 'AF3', 'F3', 'F7', 'FC5', 'FC1', 'C3', 'T7', 'CP5', 'CP1', 'P3', 'P7', 'PO3',
            'O1', 'Oz', 'Pz', 'Fp2', 'AF4', 'Fz', 'F4', 'F8', 'FC6', 'FC2', 'Cz', 'C4', 'T8', 'CP6',
            'CP2', 'P4', 'P8', 'PO4', 'O2']

def _deap_ts_order():
    return ['Fp1', 'AF3', 'F3', 'F7', 'FC5', 'FC1', 'C3', 'T7', 'CP5', 'CP1', 'P3', 'P7', 'PO3', 'O1',
            'Fp2', 'AF4', 'F4', 'F8', 'FC6', 'FC2', 'C4', 'T8', 'CP6', 'CP2', 'P4', 'P8', 'PO4', 'O2']

def _reorder_deap_channels(x, order):
    if x is None or (isinstance(x, np.ndarray) and x.size == 0):
        return x
    ori = _deap_original_order()
    if order == 'TS':
        target = _deap_ts_order()
    else:
        target = ori
    idx = [ori.index(ch) for ch in target]
    if x.ndim == 3:
        return x[:, idx, :]
    if x.ndim == 4:
        return x[:, :, idx, :]
    return x


def ensure_dir(path):
    os.makedirs(path, exist_ok=True)


def save_split(save_dir, name, samples, labels, mean, std, trial_ids=None):
    if samples is None:
        return
    obj = {
        'samples': torch.from_numpy(samples).float(),
        'labels': torch.from_numpy(labels).long(),
        'mean_vals': torch.from_numpy(np.array(mean)),
        'stdev_vals': torch.from_numpy(np.array(std)),
    }
    if trial_ids is not None:
        obj['trial_ids'] = torch.from_numpy(np.array(trial_ids)).long()
    torch.save(obj, os.path.join(save_dir, f'{name}.pt'))

def build_trial_ids(indexes, parts_data):
    if indexes is None or len(indexes) == 0:
        return None
    out = []
    for idx in indexes:
        # parts_data[idx] shape -> (num_segments, ...)
        nseg = len(parts_data[idx]) if parts_data[idx] is not None else 0
        out.extend([idx] * int(nseg))
    return np.array(out, dtype=np.int64)


def main():
    # 从固定 JSON 路径加载配置（请在该文件中直接修改配置）
    default_cfg = os.path.join(PROJECT_ROOT, 'Preprocess_data', 'configs', 'deap_config.json')
    if not os.path.isfile(default_cfg):
        raise FileNotFoundError(f'未找到配置文件: {default_cfg}，请创建后再运行')
    with open(default_cfg, 'r', encoding='utf-8') as f:
        cfg = json.load(f)

    # 兼容 fold_shuffle：布尔 -> 字符串
    if 'fold_shuffle' in cfg and isinstance(cfg['fold_shuffle'], bool):
        cfg['fold_shuffle'] = 'True' if cfg['fold_shuffle'] else 'False'

    # 校验必要参数
    for req_key in ['dataset', 'dataset_path', 'output_dir']:
        if req_key not in cfg or cfg[req_key] in (None, ''):
            raise ValueError(f'缺少必要配置项: {req_key}')

    # 构造 args 对象供后续复用
    args = types.SimpleNamespace(**cfg)

    sessions = parse_list(getattr(args, 'sessions', None))
    pr = parse_list(getattr(args, 'pr', None))
    sr = parse_list(getattr(args, 'sr', None))
    bounds = parse_float_pair(getattr(args, 'bounds', None)) if getattr(args, 'bounds', None) is not None else None
    label_used = None if getattr(args, 'label_used', None) is None else [s.strip() for s in str(args.label_used).split(',') if s.strip()]

    if getattr(args, 'preset', None) is not None and args.preset in preset_setting:
        # 复用 LibEER 预设构造 Setting
        preset_args = types.SimpleNamespace(
            dataset=args.dataset,
            dataset_path=args.dataset_path,
            low_pass=getattr(args, 'low_pass', 0.3),
            high_pass=getattr(args, 'high_pass', 50.0),
            time_window=getattr(args, 'time_window', 1.0),
            overlap=getattr(args, 'overlap', 0.0),
            sample_length=getattr(args, 'sample_length', 1),
            stride=getattr(args, 'stride', 1),
            seed=getattr(args, 'seed', 2024),
            feature_type=getattr(args, 'feature_type', 'de_lds'),
            only_seg=getattr(args, 'only_seg', False),
            cross_trail='true',
            experiment_mode=getattr(args, 'experiment_mode', 'subject-independent'),
            metrics=None,
            normalize=False,
            artifact_removal=getattr(args, 'artifact_removal', 'none'),
            split_type=getattr(args, 'split_type', 'train-val-test'),
            fold_num=getattr(args, 'fold_num', 5),
            fold_shuffle=getattr(args, 'fold_shuffle', 'True'),
            front=getattr(args, 'front', 9),
            test_size=getattr(args, 'test_size', 0.2),
            val_size=getattr(args, 'val_size', 0.2),
            sessions=sessions,
            pr=pr,
            sr=sr,
            bounds=bounds,
            onehot=getattr(args, 'onehot', True),
            label_used=label_used,
        )
        setting = preset_setting[args.preset](preset_args)
    else:
        setting = Setting(dataset=args.dataset,
                          dataset_path=args.dataset_path,
                          pass_band=[getattr(args, 'low_pass', 0.3), getattr(args, 'high_pass', 50.0)],
                          extract_bands=None,
                          time_window=getattr(args, 'time_window', 1.0),
                          overlap=getattr(args, 'overlap', 0.0),
                          sample_length=getattr(args, 'sample_length', 1),
                          stride=getattr(args, 'stride', 1),
                          seed=getattr(args, 'seed', 2024),
                          feature_type=getattr(args, 'feature_type', 'de_lds'),
                          only_seg=getattr(args, 'only_seg', False),
                          cross_trail='true',
                          experiment_mode=getattr(args, 'experiment_mode', 'subject-independent'),
                          eog_clean=getattr(args, 'eog_clean', True),
                          normalize=False,
                          artifact_removal=getattr(args, 'artifact_removal', 'none'),
                          split_type=getattr(args, 'split_type', 'train-val-test'),
                          fold_num=getattr(args, 'fold_num', 5),
                          fold_shuffle=getattr(args, 'fold_shuffle', 'True'),
                          front=getattr(args, 'front', 9),
                          test_size=getattr(args, 'test_size', 0.2),
                          val_size=getattr(args, 'val_size', 0.2),
                          sessions=sessions,
                          pr=pr,
                          sr=sr,
                          bounds=bounds,
                          onehot=getattr(args, 'onehot', True),
                          label_used=label_used)

    # seed python's RNG to stabilize split shuffling
    try:
        random.seed(getattr(setting, 'seed', 2024))
    except Exception:
        random.seed(2024)

    # 读取与预处理(在 LibEER 中完成滤波/特征/切片)
    all_data, all_label, channels, feature_dim, num_classes = get_data(setting)

    # 合并为划分单元(受试者/会话等)
    m_data, m_label = merge_to_part(all_data, all_label, setting)

    # subject-dependent: 按受试者分别划分后再汇总；其他模式：一次性划分
    if getattr(setting, 'experiment_mode', 'subject-independent') == 'subject-dependent':
        agg_train_x, agg_train_y = [], []
        agg_val_x, agg_val_y = [], []
        agg_test_x, agg_test_y = [], []

        for sub_idx in range(len(m_data)):
            parts_data = m_data[sub_idx]
            parts_label = m_label[sub_idx]

            tts = get_split_index(parts_data, parts_label, setting)
            ri = max(1, min(getattr(args, 'round_index', 1), len(tts['train']))) - 1
            tr_x, tr_y, va_x, va_y, te_x, te_y = index_to_data(
                parts_data, parts_label, tts['train'][ri], tts['test'][ri], tts['val'][ri], keep_dim=False
            )
            if (getattr(setting, 'split_type', 'train-val-test') == 'kfold') and (len(va_x) == 0):
                if len(tr_x) > 0:
                    X_tr, X_va, y_tr, y_va = train_test_split(np.array(tr_x), np.array(tr_y), test_size=getattr(args, 'val_size', 0.2), random_state=getattr(setting, 'seed', 2024), stratify=np.array(tr_y))
                    tr_x, tr_y, va_x, va_y = X_tr, y_tr, X_va, y_va

            # optional per-subject saving with trial-wise ids (test only)
            if bool(getattr(args, 'save_per_subject', False)):
                sub_base = os.path.join(
                    args.output_dir,
                    args.dataset,
                    f"{args.feature_type}-tw-{args.time_window}ol-{args.overlap}",
                    args.experiment_mode,
                    args.split_type,
                    f"round_{ri+1}",
                    "by_subject",
                    f"sub{sub_idx+1:02d}"
                )
                ensure_dir(sub_base)

                # numpy arrays
                tr_x_np = np.array(tr_x); va_x_np = np.array(va_x) if len(va_x) else None; te_x_np = np.array(te_x)
                # channel reorder if needed
                if getattr(args, 'feature_type', 'raw') == 'raw' and str(getattr(args, 'ts_channel_order', 'O')).upper() in ['TS', 'O'] and str(getattr(args, 'dataset', '')).startswith('deap'):
                    order = str(getattr(args, 'ts_channel_order', 'O')).upper()
                    tr_x_np = _reorder_deap_channels(tr_x_np, order)
                    if va_x_np is not None and va_x_np.size != 0:
                        va_x_np = _reorder_deap_channels(va_x_np, order)
                    te_x_np = _reorder_deap_channels(te_x_np, order)

                # subject-wise z-score using training stats
                s_mean, s_std = compute_stats(tr_x_np)
                tr_n = apply_norm(tr_x_np, s_mean, s_std)
                va_n = apply_norm(va_x_np, s_mean, s_std) if va_x_np is not None and va_x_np.size != 0 else None
                te_n = apply_norm(te_x_np, s_mean, s_std)

                # trial ids for test only (val/train may be segment-level split when kfold)
                test_trial_ids = build_trial_ids(tts['test'][ri], parts_data)

                save_split(sub_base, 'train', tr_n, np.array(tr_y), s_mean, s_std)
                if va_n is not None:
                    save_split(sub_base, 'val', va_n, np.array(va_y), s_mean, s_std)
                save_split(sub_base, 'test', te_n, np.array(te_y), s_mean, s_std, trial_ids=test_trial_ids)

            if len(tr_x) > 0:
                agg_train_x.append(np.array(tr_x))
                agg_train_y.append(np.array(tr_y))
            if len(va_x) > 0:
                agg_val_x.append(np.array(va_x))
                agg_val_y.append(np.array(va_y))
            if len(te_x) > 0:
                agg_test_x.append(np.array(te_x))
                agg_test_y.append(np.array(te_y))

        train_data = np.concatenate(agg_train_x, axis=0) if len(agg_train_x) else np.empty((0,))
        train_label = np.concatenate(agg_train_y, axis=0) if len(agg_train_y) else np.empty((0,))
        val_data = np.concatenate(agg_val_x, axis=0) if len(agg_val_x) else np.empty((0,))
        val_label = np.concatenate(agg_val_y, axis=0) if len(agg_val_y) else None
        test_data = np.concatenate(agg_test_x, axis=0) if len(agg_test_x) else np.empty((0,))
        test_label = np.concatenate(agg_test_y, axis=0) if len(agg_test_y) else np.empty((0,))
    else:
        # 取用于划分的维度(统一处理成一维 parts 列表)
        if isinstance(m_data, list) and len(m_data) > 0 and isinstance(m_data[0], list):
            parts_data = m_data[0]
            parts_label = m_label[0]
        else:
            parts_data = m_data
            parts_label = m_label

        # 生成划分索引
        tts = get_split_index(parts_data, parts_label, setting)
        ri = max(1, min(getattr(args, 'round_index', 1), len(tts['train']))) - 1

        def _to_names(n, labels):
            try:
                out = []
                for i in range(n):
                    v = labels[i]
                    s = None
                    if isinstance(v, (list, tuple)) and len(v) > 0 and isinstance(v[0], str):
                        s = str(v[0])
                    elif isinstance(v, str):
                        s = v
                    out.append(s)
                if any(x is None for x in out):
                    raise ValueError()
                return out
            except Exception:
                if str(getattr(args, 'dataset', '')).lower() == 'deap':
                    return [f"sub{i+1:02d}" for i in range(n)]
                return [f"part{i+1:02d}" for i in range(n)]

        def _names_from_idxs(idxs, names):
            if idxs is None:
                return []
            return [names[i] for i in idxs if isinstance(i, (int, np.integer)) and 0 <= int(i) < len(names)]

        names = _to_names(len(parts_data), parts_label)
        tr_idx = tts.get('train', [[]])[ri] if 'train' in tts else []
        va_idx = tts.get('val', [[]])[ri] if 'val' in tts else []
        te_idx = tts.get('test', [[]])[ri] if 'test' in tts else []
        te_set = set(_names_from_idxs(te_idx, names))
        va_set = set(_names_from_idxs(va_idx, names)) - te_set
        tr_set = set(_names_from_idxs(tr_idx, names)) - te_set - va_set
        def _fmt(xs):
            return ', '.join(sorted(xs))
        print(f"[SI split] round={ri+1} | TRAIN: {_fmt(tr_set)}", flush=True)
        if len(va_set) > 0:
            print(f"[SI split] round={ri+1} | VAL: {_fmt(va_set)}", flush=True)
        print(f"[SI split] round={ri+1} | TEST: {_fmt(te_set)}", flush=True)

        train_data, train_label, val_data, val_label, test_data, test_label = index_to_data(
            parts_data, parts_label, tts['train'][ri], tts['test'][ri], tts['val'][ri], keep_dim=False
        )
        if (getattr(setting, 'split_type', 'train-val-test') == 'kfold') and (val_data.size == 0):
            if train_data.size != 0:
                X_tr, X_va, y_tr, y_va = train_test_split(np.array(train_data), np.array(train_label), test_size=getattr(args, 'val_size', 0.2), random_state=getattr(setting, 'seed', 2024), stratify=np.array(train_label))
                train_data, train_label, val_data, val_label = X_tr, y_tr, X_va, y_va

    # 转换标签为整数
    train_label = labels_to_indices(train_label)
    val_label = labels_to_indices(val_label) if val_data.size != 0 else None
    test_label = labels_to_indices(test_label)

    # 统一为 numpy
    train_data = np.array(train_data)
    val_data = np.array(val_data) if val_label is not None else None
    test_data = np.array(test_data)

    if getattr(args, 'feature_type', 'raw') == 'raw' and str(getattr(args, 'ts_channel_order', 'O')).upper() in ['TS', 'O'] and str(getattr(args, 'dataset', '')).startswith('deap'):
        order = str(getattr(args, 'ts_channel_order', 'O')).upper()
        train_data = _reorder_deap_channels(train_data, order)
        if val_data is not None and val_data.size != 0:
            val_data = _reorder_deap_channels(val_data, order)
        test_data = _reorder_deap_channels(test_data, order)

    # 注意：滤波和伪迹去除已在LibEER的preprocess函数中完成（在分段前）
    # 这是正确的EEG预处理流程：滤波 -> 伪迹去除 -> 分段 -> 标准化
    # 不再需要在分段后进行滤波和EOG去除

    mean, std = compute_stats(train_data)
    if getattr(args, 'zscore', True):
        # 始终执行逐通道 z-score（使用训练集统计量）
        train_data_n = apply_norm(train_data, mean, std)
        val_data_n = apply_norm(val_data, mean, std) if val_data is not None else None
        test_data_n = apply_norm(test_data, mean, std)
    else:
        train_data_n = train_data.astype(np.float32)
        val_data_n = val_data.astype(np.float32) if val_data is not None else None
        test_data_n = test_data.astype(np.float32)

    # 保存
    save_base = os.path.join(
        args.output_dir,
        args.dataset,
        f"{args.feature_type}-tw-{args.time_window}ol-{args.overlap}",
        args.experiment_mode,
        args.split_type,
        f"round_{ri+1}"
    )
    ensure_dir(save_base)

    save_split(save_base, 'train', train_data_n, train_label, mean, std)
    if val_data_n is not None:
        save_split(save_base, 'val', val_data_n, val_label, mean, std)
    save_split(save_base, 'test', test_data_n, test_label, mean, std)


if __name__ == '__main__':
    main()
