import os
import numpy as np
import mne
import pandas as pd
from scipy import signal
from mne.decoding import CSP
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV, StratifiedKFold
from sklearn.metrics import accuracy_score, confusion_matrix, cohen_kappa_score

# 仅显示错误，避免英文INFO日志
mne.set_log_level('ERROR')

# EEG数据增强类
class EEGDataAugmentation:
    def __init__(self, noise_factor=0.02, shift_limit=0.05):
        self.noise_factor = noise_factor
        self.shift_limit = shift_limit
    
    def add_noise(self, data):
        """添加高斯噪声"""
        noise = np.random.normal(0, self.noise_factor, data.shape)
        return data + noise
    
    def time_shift(self, data):
        """时间平移"""
        shift = int(data.shape[-1] * self.shift_limit)
        if shift > 0:
            direction = np.random.choice([-1, 1])
            shift_value = np.random.randint(1, shift)
            shifted = np.roll(data, direction*shift_value, axis=-1)
            return shifted
        return data
    
    # 应用数据增强
    def apply_augmentation(self, data, augment_prob=0.5):
        augmented_data = data.copy()
        if np.random.random() < augment_prob:
            augmented_data = self.add_noise(augmented_data)
        if np.random.random() < augment_prob:
            augmented_data = self.time_shift(augmented_data)
        return augmented_data

# 数据预处理
def process_eeg_data(data, sfreq=250, augment=False):
    processed_data = np.zeros_like(data)
    augmenter = EEGDataAugmentation() if augment else None
    
    for trial in range(data.shape[0]):
        ch_names = [f'EEG{i+1}' for i in range(data.shape[1])]
        ch_types = ['eeg'] * data.shape[1]
        info = mne.create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
        raw = mne.io.RawArray(data[trial], info)

        # 带通滤波 4-40Hz
        raw.filter(l_freq=4, h_freq=40, method='iir')
        
        try:
            nyq = sfreq / 2
            freq = 50 / nyq
            Q = 30
            b, a = signal.iirnotch(freq, Q)
            
            # 应用陷波滤波器
            processed = raw.get_data()
            for ch in range(processed.shape[0]):
                processed[ch] = signal.filtfilt(b, a, processed[ch])
            
            # 数据标准化 (每个通道独立)
            for ch in range(processed.shape[0]):
                ch_data = processed[ch]
                ch_mean = np.mean(ch_data)
                ch_std = np.std(ch_data)
                processed[ch] = (ch_data - ch_mean) / (ch_std + 1e-10)
            
            # 应用数据增强（如果启用）
            if augment and augmenter is not None:
                processed = augmenter.apply_augmentation(processed)
      
            processed_data[trial] = processed
            
        except Exception as e:
            print(f"Error in trial {trial}: {e}")
            continue

    return processed_data

# 从文件名中提取被试ID
def get_subject_id(filename):
    subject_num = filename.split('-')[-1][1:3]  
    return int(subject_num)

# 读取/预处理2a中的CSV数据，返回train/test数据与标签。
def load_and_process_from_csv_2a(base_dir):
    train_data_folder = os.path.join(base_dir, 'train')
    train_label_folder = os.path.join(base_dir, 'train_label')
    test_data_folder = os.path.join(base_dir, 'test')
    test_label_folder = os.path.join(base_dir, 'test_label')

    # 处理训练数据
    train_data_list, train_labels_list, train_pid_list = [], [], []
    train_files = sorted([f for f in os.listdir(train_data_folder) if f.endswith('T.csv')])
    for file in train_files:
        subject_id = get_subject_id(file)
        data = pd.read_csv(os.path.join(train_data_folder, file))
        labels = pd.read_csv(os.path.join(train_label_folder, f"Etiquetas{file.split('-')[-1]}"))
        labels = labels.values.flatten() - 1 

        # 重塑数据为(trials, channels=22, time_points=1000)格式
        data_array = data.values  
        n_trials = len(labels)
        n_channels = 22
        time_points = 1000
        reshaped_data = np.zeros((n_trials, n_channels, time_points))
        for trial in range(n_trials):
            for channel in range(n_channels):
                start_idx = channel * time_points
                end_idx = (channel + 1) * time_points
                reshaped_data[trial, channel, :] = data_array[trial, start_idx:end_idx]

        processed_data = process_eeg_data(reshaped_data, augment=True)
        # 基线校正（前100点）
        baseline_period = slice(0, 100)
        baseline_mean = np.mean(processed_data[..., baseline_period], axis=-1, keepdims=True)
        processed_data = processed_data - baseline_mean

        train_data_list.append(processed_data)
        train_labels_list.append(labels)
        train_pid_list.extend([subject_id] * len(labels))

    # 处理测试数据
    test_data_list, test_labels_list, test_pid_list = [], [], []
    test_files = sorted([f for f in os.listdir(test_data_folder) if f.endswith('E.csv')])
    for file in test_files:
        subject_id = get_subject_id(file)
        data = pd.read_csv(os.path.join(test_data_folder, file))
        labels = pd.read_csv(os.path.join(test_label_folder, f"Etiquetas{file.split('-')[-1]}"))
        labels = labels.values.flatten() - 1  

        # 重塑数据为(trials, channels=22, time_points=1000)格式
        data_array = data.values
        n_trials = len(labels)
        n_channels = 22
        time_points = 1000
        reshaped_data = np.zeros((n_trials, n_channels, time_points))
        for trial in range(n_trials):
            for channel in range(n_channels):
                start_idx = channel * time_points
                end_idx = (channel + 1) * time_points
                reshaped_data[trial, channel, :] = data_array[trial, start_idx:end_idx]

        processed_data = process_eeg_data(reshaped_data, augment=False)
        
        # 基线校正（前100点）
        baseline_period = slice(0, 100)
        baseline_mean = np.mean(processed_data[..., baseline_period], axis=-1, keepdims=True)
        processed_data = processed_data - baseline_mean

        test_data_list.append(processed_data)
        test_labels_list.append(labels)
        test_pid_list.extend([subject_id] * len(labels))

    # 归一化
    X_train = np.vstack(train_data_list) if train_data_list else np.array([])
    y_train = np.concatenate(train_labels_list) if train_labels_list else np.array([])
    pid_train = np.array(train_pid_list)
    X_test = np.vstack(test_data_list) if test_data_list else np.array([])
    y_test = np.concatenate(test_labels_list) if test_labels_list else np.array([])
    pid_test = np.array(test_pid_list)

    if X_train.size == 0 or X_test.size == 0:
        raise RuntimeError("CSV数据处理失败：训练或测试数据为空")

    X = np.concatenate((X_train, X_test))
    for ch in range(X.shape[1]):
        ch_mean = np.mean(X[:, ch, :])
        ch_std = np.std(X[:, ch, :])
        X[:, ch, :] = (X[:, ch, :] - ch_mean) / (ch_std + 1e-10)
    
    # 重新切分回 train/test
    X_train = X[:len(y_train)]
    X_test = X[len(y_train):]
    return X_train, y_train, pid_train, X_test, y_test, pid_test

def main():
    # 读取/预处理流程加载2a
    data_dir = os.path.join(os.path.dirname(__file__), '2a')
    X_train_all, y_train_all, pid_train, X_test_all, y_test_all, pid_test = load_and_process_from_csv_2a(data_dir)

    # 仅保留左右手（0:Left, 1:Right）
    train_mask = (y_train_all == 0) | (y_train_all == 1)
    test_mask = (y_test_all == 0) | (y_test_all == 1)
    X_train = X_train_all[train_mask]
    y_train = y_train_all[train_mask]
    X_test = X_test_all[test_mask]
    y_test = y_test_all[test_mask]

    print("\n--- 数据准备完成（CSV流程，使用完整数据集） ---")
    print(f"训练集形状：{X_train.shape}，标签分布：{np.bincount(y_train)}")
    print(f"测试集形状：{X_test.shape}，标签分布：{np.bincount(y_test)}")

    # --- 受试者依赖：按被试单独训练/评估 ---
    subjects = sorted(np.unique(np.concatenate([pid_train[train_mask], pid_test[test_mask]])))
    print("\n=== 受试者依赖评估（CSP+SVM, RBF核） ===")
    for sid in subjects:
        tr_idx = (pid_train[train_mask] == sid)
        te_idx = (pid_test[test_mask] == sid)
        if not (np.any(tr_idx) and np.any(te_idx)):
            print(f"被试 {sid}: 训练或测试样本为空，跳过")
            continue

        X_tr_s = X_train[tr_idx]
        y_tr_s = y_train[tr_idx]
        X_te_s = X_test[te_idx]
        y_te_s = y_test[te_idx]

        # CSP特征
        n_channels = X_tr_s.shape[1]
        n_comp = int(min(6, n_channels))
        csp = CSP(n_components=n_comp, reg='ledoit_wolf', log=True, rank='full', transform_into='average_power')
        X_tr_csp = csp.fit_transform(X_tr_s, y_tr_s)
        X_te_csp = csp.transform(X_te_s)

        # 标准化
        scaler = StandardScaler()
        X_tr_feat = scaler.fit_transform(X_tr_csp)
        X_te_feat = scaler.transform(X_te_csp)

        # 两阶段网格搜索
        coarse_n = np.arange(-10, 11, 1, dtype=float)
        coarse_C = (2.0 ** coarse_n)
        coarse_gamma = (2.0 ** coarse_n)
        param_grid_coarse = {'C': coarse_C, 'gamma': coarse_gamma, 'kernel': ['rbf']}
        cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
        gs_coarse = GridSearchCV(
            estimator=SVC(random_state=42),
            param_grid=param_grid_coarse,
            scoring='accuracy',
            cv=cv,
            n_jobs=-1,
            refit=True,
            verbose=0
        )
        gs_coarse.fit(X_tr_feat, y_tr_s)
        best_c_exp = np.log2(gs_coarse.best_params_['C'])
        best_g_exp = np.log2(gs_coarse.best_params_['gamma'])
        fine_c_exp = np.arange(best_c_exp - 1.0, best_c_exp + 1.0 + 1e-9, 0.1)
        fine_g_exp = np.arange(best_g_exp - 1.0, best_g_exp + 1.0 + 1e-9, 0.1)
        fine_C = (2.0 ** fine_c_exp)
        fine_gamma = (2.0 ** fine_g_exp)
        param_grid_fine = {'C': fine_C, 'gamma': fine_gamma, 'kernel': ['rbf']}
        gs_fine = GridSearchCV(
            estimator=SVC(random_state=42),
            param_grid=param_grid_fine,
            scoring='accuracy',
            cv=cv,
            n_jobs=-1,
            refit=True,
            verbose=0
        )
        gs_fine.fit(X_tr_feat, y_tr_s)
        best_params = gs_fine.best_params_

        svm = SVC(kernel='rbf', C=best_params['C'], gamma=best_params['gamma'], random_state=42)
        svm.fit(X_tr_feat, y_tr_s)

        # 训练集评估
        y_pred_tr = svm.predict(X_tr_feat)
        acc_tr = accuracy_score(y_tr_s, y_pred_tr)
        cm_tr = confusion_matrix(y_tr_s, y_pred_tr, labels=[0, 1])
        left_tr = cm_tr[0, 0] / max(cm_tr[0].sum(), 1)
        right_tr = cm_tr[1, 1] / max(cm_tr[1].sum(), 1)
        kappa_tr = cohen_kappa_score(y_tr_s, y_pred_tr)

        # 测试集评估
        y_pred_te = svm.predict(X_te_feat)
        acc_te = accuracy_score(y_te_s, y_pred_te)
        cm_te = confusion_matrix(y_te_s, y_pred_te, labels=[0, 1])
        left_te = cm_te[0, 0] / max(cm_te[0].sum(), 1)
        right_te = cm_te[1, 1] / max(cm_te[1].sum(), 1)
        kappa_te = cohen_kappa_score(y_te_s, y_pred_te)

        print(f"\n--- 被试 {sid} ---")
        print(f"训练: Acc={acc_tr:.4f}, Left={left_tr:.4f}, Right={right_tr:.4f}, Kappa={kappa_tr:.4f}")
        print(f"测试: Acc={acc_te:.4f}, Left={left_te:.4f}, Right={right_te:.4f}, Kappa={kappa_te:.4f}")

if __name__ == '__main__':
    main()