import os
import glob
import warnings
import numpy as np
import mne
import scipy.io as sio
from mne.decoding import CSP
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV, StratifiedKFold
from sklearn.metrics import accuracy_score, precision_recall_fscore_support, confusion_matrix, cohen_kappa_score

# 仅显示错误，避免英文INFO日志
mne.set_log_level('ERROR')

def read_concat_raws(gdf_paths):
    """读取并拼接若干.gdf为一个Raw，修正可能被写反的高/低通。"""
    if len(gdf_paths) == 0:
        raise FileNotFoundError("未找到任何.gdf文件用于读取")
    raw_list = []
    for p in gdf_paths:
        # 在读取时屏蔽由头信息写反引起的RuntimeWarning
        with warnings.catch_warnings():
            warnings.filterwarnings(
                "ignore",
                message=r"Highpass cutoff frequency .* greater than lowpass cutoff frequency .*",
                category=RuntimeWarning,
            )
            r = mne.io.read_raw_gdf(p, preload=True, verbose=False)
        # 读取后立即修正高/低通顺序
        hp = r.info.get('highpass', None)
        lp = r.info.get('lowpass', None)
        if hp is not None and lp is not None and hp > lp:
            r.info['highpass'], r.info['lowpass'] = lp, hp
        raw_list.append(r)
    raw = mne.io.concatenate_raws(raw_list)
    return raw

def extract_epochs_from_events(raw, events, event_id_map, tmin=0.0, tmax=9.0, crop=None):
    """根据给定events切出Epochs，并可选裁剪到指定时间区间。"""
    # 仅保留 C3、Cz、C4 三个通道（自适应大小写与命名变体）
    def _resolve_c3_cz_c4_channel_names(ch_names):
        lower_to_orig = {c.lower(): c for c in ch_names}
        targets = ['c3', 'cz', 'c4']
        resolved = []
        # 精确小写匹配
        for t in targets:
            resolved.append(lower_to_orig.get(t))
        # 对未匹配者做模糊包含匹配（如 'EEG C3', 'C3-REF'）
        for i, t in enumerate(targets):
            if resolved[i] is not None:
                continue
            cand = [c for c in ch_names if t in c.lower()]
            if len(cand) > 0:
                resolved[i] = cand[0]
        if any(r is None for r in resolved):
            missing = [targets[i].upper() for i, r in enumerate(resolved) if r is None]
            raise ValueError(f"未找到所需通道: {missing}；可用通道: {ch_names}")
        return [resolved[0], resolved[1], resolved[2]]

    picks = _resolve_c3_cz_c4_channel_names(raw.info['ch_names'])
    raw.pick_channels(picks, ordered=True, verbose=False)
    raw.set_eeg_reference('average', projection=True, verbose=False)
    raw.filter(8, 30, fir_design='firwin', verbose=False)
    epochs = mne.Epochs(
        raw, events, event_id=event_id_map, tmin=tmin, tmax=tmax,
        baseline=(0, 0), preload=True, verbose=False
    )
    if crop is not None:
        crop_tmin, crop_tmax = crop
        # 裁剪到指定时间区间，例如(4.0, 5.0)
        epochs = epochs.copy().crop(tmin=crop_tmin, tmax=crop_tmax)
    X = epochs.get_data()
    y_raw = epochs.events[:, -1]
    y = np.where(y_raw == 769, 0, 1)
    return X, y

def load_mat_labels(mat_path):
    """从.mat读取标签，返回0/1数组。如果为1/2或769/770将转换。"""
    md = sio.loadmat(mat_path, squeeze_me=True, struct_as_record=False)
    # 优先键名
    for key in ['labels', 'y', 'Y', 'classlabel', 'label']:
        if key in md:
            lab = np.asarray(md[key]).ravel()
            break
    else:
        # 回退：选择第一个合适的一维小类别整数数组
        lab = None
        for v in md.values():
            if isinstance(v, np.ndarray) and v.ndim == 1 and v.dtype.kind in ('i', 'u'):
                uniq = np.unique(v)
                if 1 <= uniq.size <= 3:
                    lab = v
                    break
        if lab is None:
            raise ValueError(f"无法在{os.path.basename(mat_path)}中推断标签向量")

    uniq = np.unique(lab)
    if set(uniq.tolist()) <= {0, 1}:
        y = lab.astype(int)
    elif set(uniq.tolist()) <= {1, 2}:
        y = (lab.astype(int) - 1)
    elif set(uniq.tolist()) <= {769, 770}:
        y = (lab.astype(int) == 770).astype(int)
    else:
        raise ValueError(f"不支持的标签取值：{uniq}")
    return y

def build_events_from_onsets_and_labels(raw, labels, prefer_codes=("768", "781")):
    """使用原始注释中的trial起始事件（768或781），与labels一一对应，生成events。"""
    events_all, event_id_all = mne.events_from_annotations(raw, verbose=False)
    # 选择作为trial起点的注释码
    sel_code = None
    for code in prefer_codes:
        if code in event_id_all:
            sel_code = code
            break
    if sel_code is None:
        # 若均不存在，则使用所有事件按时间排序取前len(labels)个（保底策略）
        ev = events_all[:len(labels)]
    else:
        ev = events_all[events_all[:, 2] == event_id_all[sel_code]]
        if ev.shape[0] < len(labels):
            # 不足时回退到按时间取前len(labels)个
            ev = events_all[:len(labels)]

    if ev.shape[0] < len(labels):
        raise ValueError(f"试次起点数量({ev.shape[0]})少于标签数量({len(labels)})，无法对齐")

    # 仅取与标签数量一致的前N个onset
    ev = ev[:len(labels)].copy()
    # 将第三列替换为769/770编码
    ev[:, 2] = np.where(labels == 0, 769, 770)
    event_id_map = {'left': 769, 'right': 770}
    return ev, event_id_map

def main():
    # --- 1. 数据定位（BCI Competition IV 2b）---
    data_dir = os.path.join(os.path.dirname(__file__), '2b')
    all_gdf = sorted(glob.glob(os.path.join(data_dir, '**', '*.gdf'), recursive=True))
    if len(all_gdf) == 0:
        raise FileNotFoundError(f"未在目录中找到.gdf文件: {data_dir}")

    # 明确四个子目录
    dir_test = os.path.join(data_dir, 'test')
    dir_test_label = os.path.join(data_dir, 'test_label')
    dir_train = os.path.join(data_dir, 'train')
    dir_train_label = os.path.join(data_dir, 'train_label')

    test_gdf = sorted(glob.glob(os.path.join(dir_test, '*.gdf')))
    test_mat = {os.path.splitext(os.path.basename(p))[0]: p for p in sorted(glob.glob(os.path.join(dir_test_label, '*.mat')))}
    train_gdf = sorted(glob.glob(os.path.join(dir_train, '*.gdf')))
    train_mat = {os.path.splitext(os.path.basename(p))[0]: p for p in sorted(glob.glob(os.path.join(dir_train_label, '*.mat')))}

    if len(train_gdf) == 0 or len(test_gdf) == 0:
        raise FileNotFoundError("四个文件夹中未找到足够的.gdf文件")

    print("\n--- 数据文件 ---")
    print(f"训练集文件数：{len(train_gdf)}（gdf），标签文件数：{len(train_mat)}（mat）")
    print(f"测试集文件数：{len(test_gdf)}（gdf），标签文件数：{len(test_mat)}（mat）")

    # --- 2. 分别加载训练/测试，依据注释trial起点与MAT标签对齐 ---
    X_train_list, y_train_list = [], []
    for gdf_path in train_gdf:
        stem = os.path.splitext(os.path.basename(gdf_path))[0]
        mat_path = train_mat.get(stem.replace('.gdf', '').replace('.GDF', ''), None)
        if mat_path is None:
            # 精确按同名匹配
            mat_path = train_mat.get(stem, None)
        if mat_path is None:
            raise FileNotFoundError(f"未找到与{stem}匹配的训练标签.mat")

        raw = read_concat_raws([gdf_path])
        labels = load_mat_labels(mat_path)
        events, event_id_map = build_events_from_onsets_and_labels(raw, labels)
        X, y = extract_epochs_from_events(raw, events, event_id_map, tmin=0.0, tmax=9.0, crop=(4.0, 5.0))
        X_train_list.append(X)
        y_train_list.append(y)

    X_train = np.concatenate(X_train_list, axis=0)
    y_train = np.concatenate(y_train_list, axis=0)

    X_test_list, y_test_list = [], []
    for gdf_path in test_gdf:
        stem = os.path.splitext(os.path.basename(gdf_path))[0]
        mat_path = test_mat.get(stem.replace('.gdf', '').replace('.GDF', ''), None)
        if mat_path is None:
            mat_path = test_mat.get(stem, None)
        if mat_path is None:
            raise FileNotFoundError(f"未找到与{stem}匹配的测试标签.mat")

        raw = read_concat_raws([gdf_path])
        labels = load_mat_labels(mat_path)
        events, event_id_map = build_events_from_onsets_and_labels(raw, labels)
        X, y = extract_epochs_from_events(raw, events, event_id_map, tmin=0.0, tmax=9.0, crop=(4.0, 5.0))
        X_test_list.append(X)
        y_test_list.append(y)

    X_test = np.concatenate(X_test_list, axis=0)
    y_test = np.concatenate(y_test_list, axis=0)

    print("\n--- 数据准备完成 ---")
    print(f"训练集 Epochs 形状：{X_train.shape}，标签分布：{np.bincount(y_train)}")
    print(f"测试集 Epochs 形状：{X_test.shape}，标签分布：{np.bincount(y_test)}")

    # --- 4. CSP 特征（仅在训练集上拟合）---
    # 仅使用 3 通道，n_components 不超过通道数
    csp = CSP(n_components=3, reg='ledoit_wolf', log=True, rank='full', transform_into='average_power')
    X_train_csp = csp.fit_transform(X_train, y_train)
    X_test_csp = csp.transform(X_test)

    # 标准化（仅拟合训练集）
    scaler = StandardScaler()
    X_train_feat = scaler.fit_transform(X_train_csp)
    X_test_feat = scaler.transform(X_test_csp)

    # --- 5. RBF核SVM两阶段网格搜索：C=2^n, gamma=2^n ---
    print("\n--- 网格搜索（RBF核）阶段1：粗网格（Δn=1） ---")
    # 粗网格：n取整，范围可根据经验调整
    coarse_n = np.arange(-10, 11, 1, dtype=float)
    coarse_C = (2.0 ** coarse_n)
    coarse_gamma = (2.0 ** coarse_n)
    param_grid_coarse = {
        'C': coarse_C,
        'gamma': coarse_gamma,
        'kernel': ['rbf']
    }
    cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
    gs_coarse = GridSearchCV(
        estimator=SVC(random_state=42),
        param_grid=param_grid_coarse,
        scoring='accuracy',
        cv=cv,
        n_jobs=-1,
        refit=True,
        verbose=0
    )
    gs_coarse.fit(X_train_feat, y_train)
    best_params_coarse = gs_coarse.best_params_
    best_score_coarse = gs_coarse.best_score_
    print(f"粗网格最优：C={best_params_coarse['C']:.6g}, gamma={best_params_coarse['gamma']:.6g}，CV准确率={best_score_coarse:.4f}")

    # 在指数空间上做细网格，以最佳指数为中心±1，步长0.1
    best_c_exp = np.log2(best_params_coarse['C'])
    best_g_exp = np.log2(best_params_coarse['gamma'])
    fine_c_exp = np.arange(best_c_exp - 1.0, best_c_exp + 1.0 + 1e-9, 0.1)
    fine_g_exp = np.arange(best_g_exp - 1.0, best_g_exp + 1.0 + 1e-9, 0.1)
    fine_C = (2.0 ** fine_c_exp)
    fine_gamma = (2.0 ** fine_g_exp)

    print("--- 网格搜索（RBF核）阶段2：细网格（Δn=0.1） ---")
    param_grid_fine = {
        'C': fine_C,
        'gamma': fine_gamma,
        'kernel': ['rbf']
    }
    gs_fine = GridSearchCV(
        estimator=SVC(random_state=42),
        param_grid=param_grid_fine,
        scoring='accuracy',
        cv=cv,
        n_jobs=-1,
        refit=True,
        verbose=0
    )
    gs_fine.fit(X_train_feat, y_train)
    best_params = gs_fine.best_params_
    best_score = gs_fine.best_score_
    print(f"细网格最优：C={best_params['C']:.6g}, gamma={best_params['gamma']:.6g}，CV准确率={best_score:.4f}")

    # 用细网格的最优参数在训练集上重训，并在测试集上评估
    svm = SVC(kernel='rbf', C=best_params['C'], gamma=best_params['gamma'], random_state=42)
    svm.fit(X_train_feat, y_train)
    # 训练集
    y_pred_train = svm.predict(X_train_feat)
    acc_train = accuracy_score(y_train, y_pred_train)
    # 测试集
    y_pred_svm = svm.predict(X_test_feat)
    acc_svm = accuracy_score(y_test, y_pred_svm)
    prec_svm, rec_svm, f1_svm, _ = precision_recall_fscore_support(y_test, y_pred_svm, labels=[0, 1], zero_division=0)
    cm_svm = confusion_matrix(y_test, y_pred_svm, labels=[0, 1])
    # 左右手（按类0/1）的准确率（=召回率/敏感度）
    # 左手=类0: 真0中被判为0的比例；右手=类1: 真1中被判为1的比例
    acc_left = cm_svm[0, 0] / max(cm_svm[0].sum(), 1)
    acc_right = cm_svm[1, 1] / max(cm_svm[1].sum(), 1)
    # Cohen's kappa
    kappa = cohen_kappa_score(y_test, y_pred_svm)

    print("\n--- 评估（CSP + SVM, RBF核） ---")
    print(f"最优超参：C={best_params['C']:.6g}, gamma={best_params['gamma']:.6g}")
    print(f"训练集准确率：{acc_train:.4f}")
    print(f"测试集准确率：{acc_svm:.4f}")
    print(f"左右手准确率（左/右）：{acc_left:.4f} / {acc_right:.4f}")
    print(f"Cohen's kappa：{kappa:.4f}")
    print("精确率（0/1）：", np.round(prec_svm, 4))
    print("召回率（0/1）：", np.round(rec_svm, 4))
    print("F1（0/1）：", np.round(f1_svm, 4))
    print("混淆矩阵：\n", cm_svm)

if __name__ == '__main__':
    main()


