import warnings
import os
import numpy as np
import mne
from scipy import signal
from mne.decoding import CSP
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV, StratifiedKFold, train_test_split
from sklearn.metrics import accuracy_score, confusion_matrix, cohen_kappa_score
from pathlib import Path
warnings.filterwarnings("ignore", message="Online software filter detected")
mne.set_log_level('ERROR') # 仅显示错误，避免英文INFO日志

# EEG 数据读取器
class GZMI1Recording:
    def __init__(self, root="D:\code\Python\library_BCI\data\zqw", vhdr_name="zqw_mi_1.vhdr"):
        self.root = Path(root)
        self.vhdr_path = self.root / vhdr_name 
        if not self.vhdr_path.exists():
            raise FileNotFoundError(f"未找到 {self.vhdr_path}")
        self.raw = None

    def load(self, preload=True):
        self.raw = mne.io.read_raw_brainvision(
            str(self.vhdr_path), preload=preload, verbose=False )

    def get_data_matrix(self, channels=None):
        """返回 EEG 数据矩阵 (n_channels, n_times)"""
        if self.raw is None:
            raise RuntimeError("请先调用 load() 方法")
        return self.raw.get_data(picks=channels)

    def get_channel_names(self):
        """返回通道名称列表"""
        if self.raw is None:
            raise RuntimeError("请先调用 load() 方法")
        return self.raw.ch_names

    def get_sampling_rate(self):
        """返回采样率"""
        if self.raw is None:
            raise RuntimeError("请先调用 load() 方法")
        return self.raw.info["sfreq"]

    def get_events(self):
        """提取事件信息"""
        if self.raw is None:
            raise RuntimeError("请先调用 load() 方法")
        
        events, event_id = mne.events_from_annotations(self.raw, verbose=False)
        annotations = self.raw.annotations
        records = []
        for i, (onset, duration, desc) in enumerate(zip(annotations.onset,  annotations.duration,  annotations.description)):
            if desc in event_id:
                onset_sample = int(self.raw.time_as_index(onset)[0])
                records.append({
                    "onset_sample": onset_sample,
                    "onset_time_sec": onset,
                    "description": desc,
                    "code": event_id[desc] })     
        return events, event_id, records

# EEG数据增强类
class EEGDataAugmentation:
    def __init__(self, noise_factor=0.02, shift_limit=0.05):
        self.noise_factor = noise_factor
        self.shift_limit = shift_limit
    
    def add_noise(self, data):
        noise = np.random.normal(0, self.noise_factor, data.shape)
        return data + noise
    
    def time_shift(self, data):
        shift = int(data.shape[-1] * self.shift_limit)
        if shift > 0:
            direction = np.random.choice([-1, 1])
            shift_value = np.random.randint(1, shift)
            shifted = np.roll(data, direction*shift_value, axis=-1)
            return shifted
        return data
    
    # 应用数据增强
    def apply_augmentation(self, data, augment_prob=0.5):
        augmented_data = data.copy()
        if np.random.random() < augment_prob:
            augmented_data = self.add_noise(augmented_data)
        if np.random.random() < augment_prob:
            augmented_data = self.time_shift(augmented_data)
        return augmented_data

# 数据预处理
def process_eeg_data(data, sfreq=250, augment=False):
    processed_data = np.zeros_like(data)
    augmenter = EEGDataAugmentation() if augment else None
    
    baseline_samples = 100  
    
    for trial in range(data.shape[0]):
        ch_names = [f'EEG{i+1}' for i in range(data.shape[1])]
        ch_types = ['eeg'] * data.shape[1]
        info = mne.create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
        raw = mne.io.RawArray(data[trial], info)
        # 带通滤波 4-40Hz
        raw.filter(l_freq=4, h_freq=40, method='iir')
        
        try:
            nyq = sfreq / 2
            freq = 50 / nyq
            Q = 30
            b, a = signal.iirnotch(freq, Q)
            
            # 应用陷波滤波器
            processed = raw.get_data()
            for ch in range(processed.shape[0]):
                processed[ch] = signal.filtfilt(b, a, processed[ch])
            
            # 数据标准化
            for ch in range(processed.shape[0]):
                ch_data = processed[ch]
                ch_mean = np.mean(ch_data)
                ch_std = np.std(ch_data)
                processed[ch] = (ch_data - ch_mean) / (ch_std + 1e-10)
            
            # 应用数据增强（如果启用）
            if augment and augmenter is not None:
                processed = augmenter.apply_augmentation(processed)
      
            # 基线校正
            baseline_period = slice(0, baseline_samples)
            baseline_mean = np.mean(processed[..., baseline_period], axis=-1, keepdims=True)
            processed = processed - baseline_mean
            
            processed_data[trial] = processed
            
        except Exception as e:
            print(f"Error in trial {trial}: {e}")
            continue

    return processed_data

# 从文件名中提取被试ID
def get_subject_id(filename):
    parts = filename.split('_')
    if len(parts) > 1:
        num_str = parts[-1].split('.')[0]
        try:
            return int(num_str)
        except ValueError:
            pass
    subject_num = filename.split('-')[-1][1:3]  
    return int(subject_num)

# 读取/预处理BrainVision数据，返回train/test数据与标签。
def load_and_process_from_brainvision(base_dir):
    data_folder = base_dir  

    # 处理所有数据文件
    train_data_list, train_labels_list, train_pid_list = [], [], []
    test_data_list, test_labels_list, test_pid_list = [], [], []
    files = sorted([f for f in os.listdir(data_folder) if f.endswith('.vhdr')])
    for file in files:
        subject_id = get_subject_id(file)
        reader = GZMI1Recording(root=data_folder, vhdr_name=file)
        reader.load(preload=True)
        sfreq = reader.get_sampling_rate()
        events, event_id, records = reader.get_events()

        # 定义事件ID用于Epochs（仅左右手：4 left, 5 right）
        event_id_epochs = {k: v for k, v in event_id.items() if v in [4, 5]}
        if not event_id_epochs:
            print(f"  警告: 无有效事件ID，跳过文件 {file}")
            continue

        epochs = mne.Epochs(
            reader.raw, events, event_id=event_id_epochs, 
            tmin=-0.5, tmax=4.0, preload=True, baseline=None, verbose=False )
        if len(epochs) == 0:
            print(f"  警告: 无有效 epochs，跳过文件 {file}")
            continue

        data = epochs.get_data()  # (n_epochs, n_ch, n_times)

        # 标签：4 -> 0 (left), 5 -> 1 (right)
        labels = np.array([0 if code == 4 else 1 for code in epochs.events[:, -1]])

        n_trials = len(data)
        train_idx, test_idx = train_test_split(
            range(n_trials), test_size=0.2, stratify=labels, random_state=42 )

        # 训练部分（带增强）
        train_data_this = data[train_idx]
        processed_train = process_eeg_data(train_data_this, sfreq=sfreq, augment=True)
        train_data_list.append(processed_train)
        train_labels_list.append(labels[train_idx])
        train_pid_list.extend([subject_id] * len(labels[train_idx]))

        # 测试部分（无增强）
        test_data_this = data[test_idx]
        processed_test = process_eeg_data(test_data_this, sfreq=sfreq, augment=False)
        test_data_list.append(processed_test)
        test_labels_list.append(labels[test_idx])
        test_pid_list.extend([subject_id] * len(labels[test_idx]))

    # 拼接所有数据（每个文件/被试已独立预处理，无需全局归一化）
    X_train = np.vstack(train_data_list) if train_data_list else np.array([])
    y_train = np.concatenate(train_labels_list) if train_labels_list else np.array([])
    pid_train = np.array(train_pid_list)
    X_test = np.vstack(test_data_list) if test_data_list else np.array([])
    y_test = np.concatenate(test_labels_list) if test_labels_list else np.array([])
    pid_test = np.array(test_pid_list)

    if X_train.size == 0 or X_test.size == 0:
        raise RuntimeError("BrainVision数据处理失败：训练或测试数据为空")

    return X_train, y_train, pid_train, X_test, y_test, pid_test

def main():
    data_dir = r"D:\code\Python\library_BCI\data\zqw"
    X_train, y_train, pid_train, X_test, y_test, pid_test = load_and_process_from_brainvision(data_dir)

    print("\n--- 数据准备完成---")
    print(f"训练集形状：{X_train.shape}，标签分布：{np.bincount(y_train)}")
    print(f"测试集形状：{X_test.shape}，标签分布：{np.bincount(y_test)}")

    subjects = sorted(np.unique(np.concatenate([pid_train, pid_test])))
    print("\n=== 受试者依赖评估（CSP+SVM, RBF核） ===")
    for sid in subjects:
        tr_idx = (pid_train == sid)
        te_idx = (pid_test == sid)
        if not (np.any(tr_idx) and np.any(te_idx)):
            print(f"被试 {sid}: 训练或测试样本为空，跳过")
            continue

        X_tr_s = X_train[tr_idx]
        y_tr_s = y_train[tr_idx]
        X_te_s = X_test[te_idx]
        y_te_s = y_test[te_idx]

        # CSP特征
        n_channels = X_tr_s.shape[1]
        n_comp = int(min(6, n_channels))
        csp = CSP(n_components=n_comp, reg='ledoit_wolf', log=True, rank='full', transform_into='average_power')
        X_tr_csp = csp.fit_transform(X_tr_s, y_tr_s)
        X_te_csp = csp.transform(X_te_s)

        # 标准化
        scaler = StandardScaler()
        X_tr_feat = scaler.fit_transform(X_tr_csp)
        X_te_feat = scaler.transform(X_te_csp)

        # 两阶段网格搜索
        coarse_n = np.arange(-10, 11, 1, dtype=float)
        coarse_C = (2.0 ** coarse_n)
        coarse_gamma = (2.0 ** coarse_n)
        param_grid_coarse = {'C': coarse_C, 'gamma': coarse_gamma, 'kernel': ['rbf']}
        cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
        gs_coarse = GridSearchCV( estimator=SVC(random_state=42), param_grid=param_grid_coarse,
            scoring='accuracy', cv=cv, n_jobs=-1, refit=True, verbose=0)
        gs_coarse.fit(X_tr_feat, y_tr_s)
        best_c_exp = np.log2(gs_coarse.best_params_['C'])
        best_g_exp = np.log2(gs_coarse.best_params_['gamma'])
        fine_c_exp = np.arange(best_c_exp - 1.0, best_c_exp + 1.0 + 1e-9, 0.1)
        fine_g_exp = np.arange(best_g_exp - 1.0, best_g_exp + 1.0 + 1e-9, 0.1)
        fine_C = (2.0 ** fine_c_exp)
        fine_gamma = (2.0 ** fine_g_exp)
        param_grid_fine = {'C': fine_C, 'gamma': fine_gamma, 'kernel': ['rbf']}
        gs_fine = GridSearchCV( estimator=SVC(random_state=42),param_grid=param_grid_fine,
            scoring='accuracy', cv=cv,n_jobs=-1, refit=True, verbose=0)
        gs_fine.fit(X_tr_feat, y_tr_s)
        best_params = gs_fine.best_params_

        svm = SVC(kernel='rbf', C=best_params['C'], gamma=best_params['gamma'], random_state=42)
        svm.fit(X_tr_feat, y_tr_s)

        # 训练集评估
        y_pred_tr = svm.predict(X_tr_feat)
        acc_tr = accuracy_score(y_tr_s, y_pred_tr)
        cm_tr = confusion_matrix(y_tr_s, y_pred_tr, labels=[0, 1])
        left_tr = cm_tr[0, 0] / max(cm_tr[0].sum(), 1)
        right_tr = cm_tr[1, 1] / max(cm_tr[1].sum(), 1)
        kappa_tr = cohen_kappa_score(y_tr_s, y_pred_tr)

        # 测试集评估
        y_pred_te = svm.predict(X_te_feat)
        acc_te = accuracy_score(y_te_s, y_pred_te)
        cm_te = confusion_matrix(y_te_s, y_pred_te, labels=[0, 1])
        left_te = cm_te[0, 0] / max(cm_te[0].sum(), 1)
        right_te = cm_te[1, 1] / max(cm_te[1].sum(), 1)
        kappa_te = cohen_kappa_score(y_te_s, y_pred_te)

        print(f"\n--- 数据 {sid} ---")
        print(f"训练: Acc={acc_tr:.4f}, Left={left_tr:.4f}, Right={right_tr:.4f}, Kappa={kappa_tr:.4f}")
        print(f"测试: Acc={acc_te:.4f}, Left={left_te:.4f}, Right={right_te:.4f}, Kappa={kappa_te:.4f}")

if __name__ == '__main__':
    main()