#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
简化版EEG模型训练脚本
用于训练EEGCNT、EEGGNN和EEGMixedAttentionModel模型
"""

import os
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader, Dataset, random_split
from torch import nn, optim
from torch.optim.lr_scheduler import ReduceLROnPlateau
import argparse

from torchviz import make_dot
from tqdm import tqdm
import random
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, confusion_matrix, classification_report, roc_curve, auc, precision_recall_curve, average_precision_score, balanced_accuracy_score
import scipy.io
import sys
import copy
from sklearn.model_selection import KFold
import time
import torch.nn.functional as F
import math

# 添加支持numpy scalar
torch.serialization.add_safe_globals([np.uint8, np.uint16, np.uint32, np.uint64,
                                      np.int8, np.int16, np.int32, np.int64,
                                      np.float16, np.float32, np.float64,
                                      np.complex64, np.complex128, np.bool_])
from test.model.EEGGraphNet import EEGGraphNet

# 添加项目根目录到Python路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))

# 导入模型
from test.model.EEGCNT import EEGCNT
from test.model.EEGGNN import EEGGNN
from test.model.EEGMixedAttentionModel import EEGMixedAttentionModel
from test.load.EEGDataset import EEGDataset

def set_seed(seed):
    """设置随机种子以确保结果可重复"""
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    np.random.seed(seed)
    random.seed(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

def create_directory(directory):
    """创建目录（如果不存在）"""
    if not os.path.exists(directory):
        os.makedirs(directory)

def calculate_metrics(y_true, y_pred):
    """
    计算评估指标

    参数:
        y_true: 真实标签
        y_pred: 预测标签

    返回:
        metrics: 包含各种指标的字典
    """
    # 准确率
    accuracy = accuracy_score(y_true, y_pred)

    # 精确率、召回率、F1分数
    precision = precision_score(y_true, y_pred, average='binary', zero_division=0)
    recall = recall_score(y_true, y_pred, average='binary', zero_division=0)
    f1 = f1_score(y_true, y_pred, average='binary', zero_division=0)

    # 计算每个类别的数量
    class_counts = {}
    for label in set(y_true):
        class_counts[label] = sum(1 for y in y_true if y == label)

    # 计算类别平衡准确率
    balanced_accuracy = balanced_accuracy_score(y_true, y_pred)

    # 计算混淆矩阵
    cm = confusion_matrix(y_true, y_pred)

    # 组合指标
    metrics = {
        'accuracy': accuracy,
        'balanced_accuracy': balanced_accuracy,
        'precision': precision,
        'recall': recall,
        'f1': f1,
        'class_counts': class_counts,
        'confusion_matrix': cm
    }

    return metrics

# 添加数据预处理和特征提取函数
def preprocess_batch(batch_data, batch_labels=None, normalize=True, add_features=False):
    """
    对批次数据进行预处理，保持与原始模型兼容

    参数:
        batch_data: 批次数据 [batch, channels, 1, time]
        batch_labels: 批次标签
        normalize: 是否进行归一化
        add_features: 是否添加额外特征 (默认为False以保持兼容性)

    返回:
        processed_data: 处理后的数据，保持原始输入通道数
        batch_labels: 处理后的标签（如果提供）
    """
    # 复制数据以避免修改原始数据
    processed_data = batch_data.clone()

    # 1. 基于Z分数的归一化 (更稳健的跨被试标准化)
    if normalize:
        # 对每个样本的每个通道分别进行归一化
        batch_size, channels, _, time_points = processed_data.shape

        for b in range(batch_size):
            for c in range(channels):
                # 获取当前通道数据
                channel_data = processed_data[b, c, 0, :]

                # 计算均值和标准差，确保标准差不为0
                mean = torch.mean(channel_data)
                std = torch.std(channel_data)
                # 处理标准差接近0的情况
                if std < 1e-6:
                    std = 1.0

                # 应用Z-score归一化
                processed_data[b, c, 0, :] = (channel_data - mean) / std

    # 2. 模拟带通滤波 (保留重要的EEG频带信息)
    # 使用一维卷积实现简单的带通滤波器
    if True:  # 启用带通滤波
        batch_size, channels, _, time_points = processed_data.shape

        # 创建带通滤波器（覆盖delta, theta, alpha, beta和gamma波段: 0.5-50Hz）
        # 采样率为250Hz
        fs = 250
        low_cutoff = 0.5   # Hz - 包含更低频率的delta波
        high_cutoff = 50  # Hz - 包含beta和部分gamma波段

        # 创建滤波器（简单的FIR滤波）
        filter_size = 101  # 增加滤波器长度以提高精度
        nyquist = fs / 2

        # 生成滤波器核
        n = torch.arange(-(filter_size//2), filter_size//2 + 1, device=processed_data.device).float()
        h_ideal_lp = torch.sinc(high_cutoff/nyquist * n)  # 低通滤波器核
        h_ideal_hp = torch.sinc(n) - torch.sinc(low_cutoff/nyquist * n)  # 高通滤波器核
        h_bandpass = h_ideal_lp + h_ideal_hp  # 带通滤波器核

        # 应用汉宁窗以减少频谱泄漏
        window = 0.5 * (1 - torch.cos(2 * np.pi * torch.arange(filter_size, device=processed_data.device) / filter_size))
        h_bandpass = h_bandpass * window

        # 归一化滤波器核
        h_bandpass = h_bandpass / torch.sum(h_bandpass)

        # 可视化滤波器频率响应（仅首次调用时执行）
        if not hasattr(preprocess_batch, 'filter_visualized'):
            import matplotlib.pyplot as plt

            # 计算频率响应
            w, h = np.zeros(512), np.zeros(512, dtype=np.complex128)
            h_numpy = h_bandpass.cpu().numpy()

            for i in range(512):
                freq = i / 512 * nyquist
                w[i] = freq
                h[i] = np.sum(h_numpy * np.exp(-2j * np.pi * freq / fs * np.arange(-(filter_size//2), filter_size//2 + 1)))

            # 画出频率响应
            plt.figure(figsize=(10, 6))
            plt.plot(w, 20 * np.log10(np.abs(h)))
            plt.axvline(x=low_cutoff, color='r', linestyle='--', label=f'低截止频率: {low_cutoff}Hz')
            plt.axvline(x=high_cutoff, color='g', linestyle='--', label=f'高截止频率: {high_cutoff}Hz')

            # 标记EEG频带
            plt.axvspan(0.5, 4, alpha=0.2, color='blue', label='Delta (0.5-4Hz)')
            plt.axvspan(4, 8, alpha=0.2, color='green', label='Theta (4-8Hz)')
            plt.axvspan(8, 13, alpha=0.2, color='red', label='Alpha (8-13Hz)')
            plt.axvspan(13, 30, alpha=0.2, color='purple', label='Beta (13-30Hz)')
            plt.axvspan(30, 50, alpha=0.2, color='orange', label='Gamma (30-50Hz)')

            plt.title('带通滤波器频率响应 (采样率: 250Hz)')
            plt.xlabel('频率 (Hz)')
            plt.ylabel('增益 (dB)')
            plt.xlim(0, 125)  # 显示到Nyquist频率
            plt.ylim(-80, 10)
            plt.grid(True)
            plt.legend()

            # 保存图像
            #plt.savefig('filter_response.png')
            #plt.close()

            # 标记为已可视化
            preprocess_batch.filter_visualized = True

        # 重塑数据以应用滤波器
        reshaped_data = processed_data.view(batch_size * channels, 1, time_points)

        # 应用滤波器（确保时间点足够长）
        if time_points > filter_size:
            # 创建卷积层
            conv = torch.nn.Conv1d(1, 1, filter_size, padding=filter_size//2, bias=False)
            with torch.no_grad():
                conv.weight.data = h_bandpass.view(1, 1, -1)

            # 应用卷积（滤波）
            filtered_data = conv(reshaped_data)

            # 重塑回原始形状
            processed_data = filtered_data.view(batch_size, channels, 1, time_points)

    # 3. 添加额外特征 - 已禁用以保持兼容性
    if add_features and False:  # 禁用添加特征
        batch_size, channels, _, time_points = processed_data.shape

        # 计算微分特征（时域导数）- 捕捉信号变化率
        diff_data = torch.zeros_like(processed_data)
        diff_data[:, :, :, 1:] = processed_data[:, :, :, 1:] - processed_data[:, :, :, :-1]

        # 计算频带能量（近似）
        alpha_energy = torch.zeros((batch_size, channels, 1, 1), device=processed_data.device)
        beta_energy = torch.zeros((batch_size, channels, 1, 1), device=processed_data.device)
        theta_energy = torch.zeros((batch_size, channels, 1, 1), device=processed_data.device)

        for b in range(batch_size):
            for c in range(channels):
                signal = processed_data[b, c, 0, :]

                # 使用自相关近似计算频带能量
                fft_signal = torch.fft.rfft(signal)
                psd = torch.abs(fft_signal) ** 2
                freqs = torch.fft.rfftfreq(signal.size(0), 1/fs)

                # 提取主要频带能量
                alpha_mask = (freqs >= 8) & (freqs <= 13)
                beta_mask = (freqs >= 13) & (freqs <= 30)
                theta_mask = (freqs >= 4) & (freqs <= 8)

                alpha_energy[b, c, 0, 0] = torch.sum(psd[alpha_mask]) if torch.any(alpha_mask) else 0
                beta_energy[b, c, 0, 0] = torch.sum(psd[beta_mask]) if torch.any(beta_mask) else 0
                theta_energy[b, c, 0, 0] = torch.sum(psd[theta_mask]) if torch.any(theta_mask) else 0

        # 归一化能量值
        if batch_size > 0:
            alpha_energy = alpha_energy / (torch.mean(alpha_energy) + 1e-8)
            beta_energy = beta_energy / (torch.mean(beta_energy) + 1e-8)
            theta_energy = theta_energy / (torch.mean(theta_energy) + 1e-8)

        # 创建特征图
        alpha_map = alpha_energy.expand(-1, -1, -1, time_points)
        beta_map = beta_energy.expand(-1, -1, -1, time_points)
        theta_map = theta_energy.expand(-1, -1, -1, time_points)

        # 将新特征连接到原始数据
        processed_data = torch.cat([
            processed_data,          # 原始信号
            diff_data,               # 差分（导数）
            alpha_map,               # Alpha能量
            beta_map,                # Beta能量
            theta_map                # Theta能量
        ], dim=1)

    if batch_labels is not None:
        return processed_data, batch_labels
    else:
        return processed_data

def train(model, train_loader, val_loader, criterion, optimizer, scheduler, device, epochs, save_dir, model_name, patience=15):
    """
    训练模型

    参数:
        model: 模型
        train_loader: 训练数据加载器
        val_loader: 验证数据加载器
        criterion: 损失函数
        optimizer: 优化器
        scheduler: 学习率调度器
        device: 设备
        epochs: 训练轮数
        save_dir: 模型保存目录
        model_name: 模型名称
        patience: 早停耐心值

    返回:
        训练历史记录
    """
    # 创建保存目录
    create_directory(save_dir)

    # 跟踪最佳模型
    best_model_path = os.path.join(save_dir, f"{model_name}_best.pth")
    last_model_path = os.path.join(save_dir, f"{model_name}_last.pth")
    best_val_balanced_acc = 0
    best_val_loss = float('inf')
    patience_counter = 0

    # 设置记录器
    train_losses = []
    val_losses = []
    train_accuracies = []
    val_accuracies = []

    # 增强训练使用的参数
    mixup_alpha = 0.5  # 增大alpha参数使混合更加均衡
    consistency_weight = 0.5  # 增加一致性约束权重
    grad_clip_value = 0.5  # 降低梯度裁剪阈值提高稳定性
    use_consistency = True  # 使用一致性约束
    temperature = 1.0  # KL散度温度参数

    # 早停相关参数
    val_acc_threshold = 0.7  # 如果验证准确率超过此值，允许差距增大
    gap_tolerance = 0.3  # 可容忍的训练-验证准确率差距
    gap_patience = 5  # 在差距过大时的耐心值
    gap_patience_counter = 0  # 差距过大计数器

    print(f"开始训练...")

    for epoch in range(1, epochs + 1):
        # 训练模式
        model.train()
        total_loss = 0
        total_cls_loss = 0  # 分类损失
        total_consist_loss = 0  # 一致性损失
        correct = 0
        total = 0

        print(f"轮次 {epoch}/{epochs}")

        # 使用tqdm显示进度条
        progress_bar = tqdm(train_loader, desc=f"轮次 {epoch}/{epochs} [训练]")

        for batch_idx, (data, targets) in enumerate(progress_bar):
            # 移动数据到设备
            data, targets = data.to(device), targets.to(device)

            # 应用MixUp数据增强，提高泛化能力
            if mixup_alpha > 0:
                # 生成beta分布的混合系数
                lam = np.random.beta(mixup_alpha, mixup_alpha)
                # 随机选择混合数据
                index = torch.randperm(data.size(0)).to(device)
                mixed_data = lam * data + (1 - lam) * data[index]
                # 准备混合标签
                targets_a, targets_b = targets, targets[index]
                # 使用混合数据
                data_for_train = mixed_data
            else:
                data_for_train = data
                targets_a = targets
                targets_b = None

            # 添加额外的噪声扰动，增加训练难度
            if epoch > 5:  # 更早开始添加噪声，但降低强度
                noise_scale = 0.05 * torch.rand(1).item()  # 降低随机噪声系数
                noise = noise_scale * torch.randn_like(data_for_train)
                data_for_train = data_for_train + noise

            # 归一化输入数据
            batch_data, batch_targets = preprocess_batch(data_for_train, targets)

            # 执行两次前向传播，实现一致性约束
            # 第一次前向传播
            optimizer.zero_grad()
            outputs = model(batch_data, apply_activation=False)

            # 处理模型可能返回元组的情况
            if isinstance(outputs, tuple):
                main_outputs, aux_outputs = outputs

                # 计算主损失和辅助损失
                if mixup_alpha > 0 and targets_b is not None:
                    main_loss = lam * criterion(main_outputs, targets_a) + (1 - lam) * criterion(main_outputs, targets_b)
                    aux_loss = lam * criterion(aux_outputs, targets_a) + (1 - lam) * criterion(aux_outputs, targets_b)
                else:
                    main_loss = criterion(main_outputs, batch_targets)
                    aux_loss = criterion(aux_outputs, batch_targets)

                # 总损失 = 主损失 + 辅助损失权重 * 辅助损失
                cls_loss = main_loss + 0.3 * aux_loss

                # 使用主输出计算准确率
                outputs_for_acc = main_outputs
            else:
                # 如果模型只返回一个输出，直接计算损失
                if mixup_alpha > 0 and targets_b is not None:
                    cls_loss = lam * criterion(outputs, targets_a) + (1 - lam) * criterion(outputs, targets_b)
                else:
                    cls_loss = criterion(outputs, batch_targets)

                outputs_for_acc = outputs

            # 一致性约束：添加随机扰动后的输出应该相似
            if use_consistency and epoch > 3:  # 更早开始使用一致性约束
                # 添加随机扰动 - 多种类型的扰动
                # 1. 高斯噪声
                perturb_data = batch_data + 0.05 * torch.randn_like(batch_data)

                # 2. 随机特征mask (在通道维度)
                if batch_data.size(1) > 1 and torch.rand(1).item() > 0.7:
                    mask = torch.bernoulli(torch.ones(batch_data.size(0), batch_data.size(1), 1, 1).to(device) * 0.9)
                    perturb_data = perturb_data * mask

                # 第二次前向传播（使用扰动数据）
                perturb_outputs = model(perturb_data, apply_activation=False)

                # 处理模型可能返回元组的情况
                if isinstance(perturb_outputs, tuple):
                    perturb_outputs = perturb_outputs[0]

                # 计算一致性损失（两次输出的KL散度）
                # 双向KL散度，使学习更加稳定
                consistency_loss_1 = F.kl_div(
                    F.log_softmax(perturb_outputs / temperature, dim=1),
                    F.softmax(outputs_for_acc.detach() / temperature, dim=1),
                    reduction='batchmean'
                )

                consistency_loss_2 = F.kl_div(
                    F.log_softmax(outputs_for_acc / temperature, dim=1),
                    F.softmax(perturb_outputs.detach() / temperature, dim=1),
                    reduction='batchmean'
                )

                # 取双向KL散度的平均值
                consistency_loss = (consistency_loss_1 + consistency_loss_2) / 2

                # 随着训练进行，逐渐增加一致性约束的权重
                # 使用更平滑的曲线增加权重
                current_consistency_weight = consistency_weight * min(1.0, epoch / 20.0)
                loss = cls_loss + current_consistency_weight * consistency_loss
                total_consist_loss += consistency_loss.item()
            else:
                loss = cls_loss
                consistency_loss = torch.tensor(0.0)

            # 反向传播
            loss.backward()

            # 梯度裁剪，防止梯度爆炸并稳定训练
            # 采用更小的裁剪值以增强稳定性
            torch.nn.utils.clip_grad_norm_(model.parameters(), grad_clip_value)

            # 更新参数
            optimizer.step()

            # 更新总损失
            total_loss += loss.item()
            total_cls_loss += cls_loss.item()

            # 计算精度
            _, predicted = outputs_for_acc.max(1)
            total += batch_targets.size(0)
            correct += predicted.eq(batch_targets).sum().item()

            # 更新进度条
            progress_bar.set_postfix({
                'loss': total_loss / (batch_idx + 1),
                'cls_loss': total_cls_loss / (batch_idx + 1),
                'consist_loss': total_consist_loss / (batch_idx + 1) if use_consistency else 0,
                'acc': 100. * correct / total
            })

        # 计算训练损失和精度
        train_loss = total_loss / len(train_loader)
        train_accuracy = 100. * correct / total

        # 验证模式
        model.eval()
        val_loss = 0
        val_correct = 0
        val_total = 0

        # 保存所有预测和真实值，以计算更详细的指标
        all_predictions = []
        all_targets = []

        progress_bar = tqdm(val_loader, desc=f"轮次 {epoch}/{epochs} [验证]")

        with torch.no_grad():
            for batch_idx, (data, targets) in enumerate(progress_bar):
                # 移动数据到设备
                data, targets = data.to(device), targets.to(device)

                # 归一化输入数据
                batch_data, batch_targets = preprocess_batch(data, targets)

                # 前向传播
                outputs = model(batch_data, apply_activation=False)

                # 处理模型可能返回元组的情况
                if isinstance(outputs, tuple):
                    main_outputs, _ = outputs
                    outputs = main_outputs

                # 计算损失
                loss = criterion(outputs, batch_targets)

                # 更新总损失
                val_loss += loss.item()

                # 计算精度
                _, predicted = outputs.max(1)
                val_total += batch_targets.size(0)
                val_correct += predicted.eq(batch_targets).sum().item()

                # 保存预测和目标
                all_predictions.extend(predicted.cpu().numpy())
                all_targets.extend(batch_targets.cpu().numpy())

                # 更新进度条
                progress_bar.set_postfix({
                    'loss': val_loss / (batch_idx + 1),
                    'acc': 100. * val_correct / val_total
                })

        # 计算验证损失和精度
        val_loss = val_loss / len(val_loader)
        val_accuracy = 100. * val_correct / val_total

        # 计算平衡准确率和F1分数
        val_metrics = calculate_metrics(np.array(all_targets), np.array(all_predictions))
        val_balanced_acc = val_metrics['balanced_accuracy']
        val_f1 = val_metrics['f1']

        # 更新学习率
        if scheduler is not None:
            if isinstance(scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau):
                scheduler.step(val_loss)
            else:
                scheduler.step()

        # 记录当前学习率
        current_lr = optimizer.param_groups[0]['lr']

        # 打印训练和验证信息
        print(f"轮次 {epoch}, 训练损失: {train_loss:.4f}, 训练准确率: {train_accuracy:.2f}%, "
              f"验证损失: {val_loss:.4f}, 验证准确率: {val_accuracy:.2f}%, "
              f"验证平衡准确率: {val_balanced_acc*100:.2f}%, 验证F1: {val_f1:.4f}, "
              f"学习率: {current_lr:.6f}")

        # 保存指标历史
        train_losses.append(train_loss)
        val_losses.append(val_loss)
        train_accuracies.append(train_accuracy)
        val_accuracies.append(val_accuracy)

        # 更改保存策略：使用训练和验证指标的加权平均，并增加验证指标权重
        # 随着训练进度增加验证指标权重，确保模型收敛到更好的泛化点
        epoch_progress = min(1.0, epoch / epochs)
        val_weight = 0.5 + 0.3 * epoch_progress  # 随着训练进行增加验证指标权重
        gap_penalty = max(0, abs(train_accuracy/100 - val_accuracy/100) - 0.15)  # 超过15%的差距开始惩罚

        weighted_metric = val_balanced_acc * val_weight + val_f1 * 0.2 + (1.0 - gap_penalty) * (1.0 - val_weight)

        # 检查训练和验证准确率差距
        acc_gap = train_accuracy - val_accuracy
        if acc_gap > gap_tolerance * 100 and val_accuracy < val_acc_threshold * 100:
            gap_patience_counter += 1
            print(f"警告: 训练-验证准确率差距 ({acc_gap:.2f}%) 过大! 计数: {gap_patience_counter}/{gap_patience}")
            if gap_patience_counter >= gap_patience:
                print(f"训练-验证准确率差距持续过大，可能发生过拟合，提前停止训练。")
                break
        else:
            # 如果差距不大或验证准确率已经很高，重置计数器
            gap_patience_counter = max(0, gap_patience_counter - 1)

        # 保存最佳模型（根据新的加权指标）
        if weighted_metric > best_val_balanced_acc:
            print(f"发现更好的模型! 加权指标: {weighted_metric:.4f} > {best_val_balanced_acc:.4f}")
            print(f"训练和验证准确率差距: {abs(train_accuracy - val_accuracy):.2f}%")
            best_val_balanced_acc = weighted_metric
            patience_counter = 0

            # 保存最佳模型
            torch.save({
                'epoch': epoch,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'train_loss': train_loss,
                'val_loss': val_loss,
                'val_balanced_acc': val_balanced_acc,
                'val_f1': val_f1,
                'train_accuracy': train_accuracy,
                'val_accuracy': val_accuracy
            }, best_model_path, pickle_protocol=4)  # 使用兼容性更好的pickle协议

            print(f"最佳模型已保存: {best_model_path}")
        else:
            patience_counter += 1
            print(f"模型没有改进，耐心: {patience_counter}/{patience}")

        # 保存最新模型
        torch.save({
            'epoch': epoch,
            'model_state_dict': model.state_dict(),
            'optimizer_state_dict': optimizer.state_dict(),
            'train_loss': train_loss,
            'val_loss': val_loss
        }, last_model_path, pickle_protocol=4)  # 使用兼容性更好的pickle协议

        # 早停机制
        if patience_counter >= patience:
            print(f"早停! {patience} 轮没有改进。")
            break

    # 绘制训练历史
    history_path = os.path.join(save_dir, f"{model_name}_history.png")
    plot_history(train_losses, val_losses, train_accuracies, val_accuracies, history_path)

    # 返回训练历史
    return {
        'train_losses': train_losses,
        'val_losses': val_losses,
        'train_accuracies': train_accuracies,
        'val_accuracies': val_accuracies,
        'best_val_balanced_acc': best_val_balanced_acc,
        'best_model_path': best_model_path
    }

def test(model, test_loader, device, best_model_path):
    """
    在测试集上评估模型

    参数:
        model: 模型
        test_loader: 测试数据加载器
        device: 设备
        best_model_path: 最佳模型路径

    返回:
        测试指标
    """
    # 加载最佳模型，设置 weights_only=False 来解决 UnpicklingError
    checkpoint = torch.load(best_model_path, weights_only=False)
    model.load_state_dict(checkpoint['model_state_dict'])
    model.eval()

    # 初始化指标
    test_loss = 0
    test_correct = 0
    test_total = 0

    # 保存所有的预测和真实标签
    all_predictions = []
    all_targets = []
    all_probabilities = [] # 保存概率值用于绘制ROC和PR曲线

    # 使用交叉熵损失函数
    criterion = nn.CrossEntropyLoss()

    # 不计算梯度
    with torch.no_grad():
        for batch_idx, (data, targets) in enumerate(test_loader):
            # 移动数据到设备
            data, targets = data.to(device), targets.to(device)

            # 预处理数据
            batch_data, batch_targets = preprocess_batch(data, targets)

            # 前向传播
            outputs = model(batch_data, apply_activation=False)

            # 处理模型可能返回元组的情况（如EEGConformer）
            if isinstance(outputs, tuple):
                main_outputs, _ = outputs
                outputs = main_outputs

            # 计算损失
            loss = criterion(outputs, batch_targets)

            # 保存概率值
            probs = F.softmax(outputs, dim=1)
            all_probabilities.append(probs.cpu().numpy())

            # 更新总损失
            test_loss += loss.item()

            # 计算精度
            _, predicted = outputs.max(1)
            test_total += batch_targets.size(0)
            test_correct += predicted.eq(batch_targets).sum().item()

            # 保存预测和目标
            all_predictions.extend(predicted.cpu().numpy())
            all_targets.extend(batch_targets.cpu().numpy())

    # 计算平均损失和精度
    test_loss = test_loss / len(test_loader)
    test_accuracy = test_correct / test_total

    # 计算其他指标
    test_metrics = calculate_metrics(np.array(all_targets), np.array(all_predictions))

    # 添加损失和精度
    test_metrics['loss'] = test_loss
    test_metrics['accuracy'] = test_accuracy

    # 合并所有概率值
    if all_probabilities:
        all_probabilities = np.vstack(all_probabilities)
        test_metrics['probabilities'] = all_probabilities

    # 打印测试结果
    print(f"测试损失: {test_loss:.4f}")
    print(f"测试准确率: {test_accuracy:.4f}")
    print(f"测试平衡准确率: {test_metrics['balanced_accuracy']:.4f}")
    print(f"测试F1分数: {test_metrics['f1']:.4f}")

    return test_metrics

def plot_history(train_losses, val_losses, train_accuracies, val_accuracies, save_path):
    """绘制训练历史"""
    plt.figure(figsize=(12, 5))

    # 绘制损失
    plt.subplot(1, 2, 1)
    plt.plot(train_losses, label='Train Loss')
    plt.plot(val_losses, label='Val Loss')
    plt.title('Training and Validation Loss')
    plt.xlabel('Epochs')
    plt.ylabel('Loss')
    plt.legend()

    # 绘制准确率
    plt.subplot(1, 2, 2)
    plt.plot(train_accuracies, label='Train Accuracy')
    plt.plot(val_accuracies, label='Val Accuracy')
    plt.title('Training and Validation Accuracy')
    plt.xlabel('Epochs')
    plt.ylabel('Accuracy')
    plt.legend()

    plt.tight_layout()
    plt.savefig(save_path)
    plt.close()

def plot_confusion_matrix(cm, save_dir, model_name):
    """绘制混淆矩阵"""
    plt.figure(figsize=(8, 6))
    plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
    plt.title(f'Confusion Matrix - {model_name}')
    plt.colorbar()

    classes = ['Non-Depression', 'Depression']
    tick_marks = np.arange(len(classes))
    plt.xticks(tick_marks, classes, rotation=45)
    plt.yticks(tick_marks, classes)

    # 添加文本注释
    fmt = 'd'
    thresh = cm.max() / 2.
    for i in range(cm.shape[0]):
        for j in range(cm.shape[1]):
            plt.text(j, i, format(cm[i, j], fmt),
                     ha="center", va="center",
                     color="white" if cm[i, j] > thresh else "black")

    plt.ylabel('True Label')
    plt.xlabel('Predicted Label')
    plt.tight_layout()

    # 保存图像
    save_path = os.path.join(save_dir, f"{model_name}_confusion_matrix.png")
    plt.savefig(save_path)
    plt.close()

    print(f"混淆矩阵已保存到 {save_path}")

def main():
    """主函数 - 实现跨被试EEG抑郁检测"""
    # 参数设置
    parser = argparse.ArgumentParser(description='训练EEG抑郁症检测模型')
    parser.add_argument('--model', type=str, default='EEGCNT', help='模型名称: EEGGNN，EEGCNT, EEGGraphNet, EEGMixedAttentionModel')
    parser.add_argument('--window_size', type=int, default=1000, help='窗口大小')
    parser.add_argument('--step_size', type=int, default=750, help='步长 - 减小步长增加数据量并捕获更多变化')
    parser.add_argument('--noise_level', type=float, default=0.05, help='高斯噪声水平')
    parser.add_argument('--cross_record_noise', type=float, default=0.05, help='跨记录验证时的噪声水平')
    parser.add_argument('--data_dir', type=str, default='test/Data4/Data4', help='数据目录')
    parser.add_argument('--train_dir', type=str, default='train', help='训练数据子目录')
    parser.add_argument('--save_dir', type=str, default='models_simple', help='模型保存目录')
    parser.add_argument('--lr', type=float, default=3e-4, help='学习率 - 降低以提高稳定性')
    parser.add_argument('--batch_size', type=int, default=64, help='批量大小')
    parser.add_argument('--epochs', type=int, default=150, help='训练轮数')
    parser.add_argument('--n_folds', type=int, default=5, help='交叉验证折数')
    parser.add_argument('--patience', type=int, default=20, help='早停耐心值')
    parser.add_argument('--seed', type=int, default=42, help='随机种子')
    parser.add_argument('--use_cross_validation', action='store_true', help='是否使用交叉验证')
    parser.add_argument('--use_augmentation', action='store_true', help='是否使用数据增强')
    parser.add_argument('--aug_minority_ratio', type=float, default=1.2, help='少数类增强到多数类的比例')
    parser.add_argument('--time_shift_max', type=float, default=0.1, help='最大时间偏移比例')
    parser.add_argument('--dropout', type=float, default=0.5, help='Dropout率')
    parser.add_argument('--weight_decay', type=float, default=1e-2, help='权重衰减率 - 增强正则化')
    parser.add_argument('--label_smoothing', type=float, default=0.15, help='标签平滑系数')
    parser.add_argument('--gradient_clip', type=float, default=0.5, help='梯度裁剪值 - 降低以提高稳定性')
    parser.add_argument('--lr_patience', type=int, default=5, help='学习率调度器耐心值')
    parser.add_argument('--lr_factor', type=float, default=0.7, help='学习率衰减因子')
    args = parser.parse_args()

    # 设置随机种子 - 使用不同的随机种子以增加多样性
    seed = args.seed
    set_seed(seed)

    print("=============== 改进版EEG模型训练 ===============")
    print(f"- 启用0.5-50Hz带通滤波 (采样率250Hz)")
    print(f"- 训练/验证噪声差异化: {args.noise_level*1.5:.3f}/{args.noise_level*0.5:.3f}")
    print(f"- 使用标签平滑: {args.label_smoothing}")
    print(f"- Dropout率: {args.dropout}")
    print(f"- 权重衰减: {args.weight_decay}")
    print(f"- 学习率: {args.lr}")
    print(f"- 梯度裁剪: {args.gradient_clip}")
    print("================================================")

    # 检测可用设备
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    print(f"使用设备: {device}")

    # 完整数据路径
    train_data_dir = os.path.join(args.data_dir, args.train_dir)

    print(f"加载数据集 - 窗口大小: {args.window_size}, 步长: {args.step_size}, 噪声水平: {args.noise_level}")
    print(f"训练数据目录: {train_data_dir}")

    # 查找所有训练数据文件，text/DATA4/DATA4/train/文件名.mat
    train_files = [os.path.join(train_data_dir, f) for f in os.listdir(train_data_dir) if f.endswith('.mat')]

    # 确定被试者类别分布
    subject_labels = {}
    for file_path in train_files:
        # 取文件名放入subject_name
        filename = os.path.basename(file_path)
        subject_name = filename.split('.')[0]

        # 加载MAT文件以获取标签
        try:
            mat_data = scipy.io.loadmat(file_path)
            # MATLAB中保存的标量变量在Python中会被转换为 (1,1) 形状的二维数组
            label = mat_data['label'][0, 0]
            # 将文件名和标签挂钩
            subject_labels[subject_name] = label
        except Exception as e:
            print(f"警告: 无法读取文件 {filename} 的标签: {str(e)}")

    # 统计每个类别的被试者数量
    class_counts = {}
    for subject, label in subject_labels.items():
        if label not in class_counts:
            class_counts[label] = 0
        class_counts[label] += 1
    # 49
    print(f"找到 {len(subject_labels)} 个训练被试者数据文件")
    # 0:22;12:7
    print(f"被试者类别分布: " + ", ".join([f"类别{label}: {count}名" for label, count in class_counts.items()]))

    # 改进跨被试验证的参数设置
    # 1. 启用高级数据增强，提高模型对不同被试的泛化能力
    args.use_augmentation = True
    # 2. 使用多种增强方法，模拟不同被试的信号特性
    augmentation_methods = ['noise', 'shift', 'scale', 'hf_conv', 'lf_modulation']
    # 3. 设置合适的学习参数，避免过拟合单个被试的特征

    # 模型配置
    model_config = {
        'model_name': args.model,
        'window_size': args.window_size,
        'step_size': args.step_size,
        'noise_level': args.noise_level,
        # 是否增强
        'use_augmentation': args.use_augmentation,
        # 增强模型
        'augmentation_methods': augmentation_methods
    }

    # 创建保存目录结构，放在models_simple下面的文件夹
    model_save_dir = os.path.join(args.save_dir, args.model,
                                 f"w{args.window_size}_s{args.step_size}_n{args.noise_level}")

    # 设置结果字典，用于跟踪性能
    all_results = {
        'val_accuracy': [],
        'val_balanced_accuracy': [],
        'val_f1': [],
        'test_accuracy': [],
        'test_balanced_accuracy': [],
        'test_f1': [],
        'best_epochs': []
    }

    # 仅执行交叉验证
    if args.use_cross_validation:
        print(f"执行{args.n_folds}折交叉验证...")
        # 交叉验证折数
        for fold in range(1, args.n_folds + 1):
            print(f"\n开始第 {fold}/{args.n_folds} 折...")

            # 使用跨片段验证方式：同一被试者的不同时间片段
            print(f"使用跨片段验证: 训练集和验证集包含相同的被试者，但使用不同的时间片段")

            # 获取训练和验证数据加载器
            fold_train_loader, fold_val_loader, fold_test_loader = get_loaders(
                args, train_data_dir, None, fold, cross_subject=False  # 不使用test_data_dir
            )

            # 创建当前折的保存目录
            fold_save_dir = os.path.join(model_save_dir, f'fold_{fold}')
            create_directory(fold_save_dir)

            # 训练并评估当前折，调用交叉验证方法
            results = train_and_evaluate_fold(
                fold, args.n_folds, fold_train_loader, fold_val_loader,
                fold_test_loader, args, device, fold_save_dir
            )

            # 收集结果
            for key, value in results.items():
                if key in all_results:
                    all_results[key].append(value)

            # 收集最佳轮次
            all_results['best_epochs'].append(results.get('best_epoch', 0))

        # 计算并显示所有折的平均性能
        print("\n交叉验证结果汇总:")
        for key in ['val_accuracy', 'val_balanced_accuracy', 'val_f1',
                   'test_accuracy', 'test_balanced_accuracy', 'test_f1']:
            if key in all_results and all_results[key]:
                mean_value = np.mean(all_results[key])
                std_value = np.std(all_results[key])
                print(f"平均 {key}: {mean_value:.4f} ± {std_value:.4f}")

        # 打印最佳轮次
        if all_results['best_epochs']:
            mean_epoch = np.mean(all_results['best_epochs'])
            std_epoch = np.std(all_results['best_epochs'])
            print(f"平均最佳轮次: {mean_epoch:.1f} ± {std_epoch:.1f}")

    # 非交叉验证模式 - 直接拆分训练集80%和验证集20%，再进行测试集测试
    else:
        print("使用单次训练-验证-测试分割...")

        train_loader, val_loader, test_loader = get_loaders(
            args, train_data_dir, None, fold=0, cross_subject=False  # 不使用test_data_dir
        )

        train_and_evaluate_fold(
            0, 1, train_loader, val_loader, test_loader, args, device, model_save_dir
        )

    print("\n训练完成!")

def get_model(model_name, input_shape, num_classes, device, args=None):
    """
    根据模型名称创建模型实例
    """
    # 解析输入形状
    batch_size, channels, num_electrodes, num_time_points = input_shape
    print(f"输入形状: {input_shape}",batch_size,channels,num_electrodes,num_time_points,f"输入channels: {channels}")
    if model_name == "EEGCNT":
        from test.model.EEGCNT import EEGCNT
        # 使用增强的正则化和结构优化来提高跨被试泛化能力
        model = EEGCNT(
            input_channels=channels,
            num_electrodes=num_electrodes,
            num_time_points=num_time_points,
            output_dim=num_classes,
            hidden_channels=128,  # 增加通道数
            num_temporal_layers=4,  # 增加时间卷积层
            num_graph_layers=3,  # 增加图卷积层
            dropout=0.5,  # 保持dropout率
            weight_decay=1e-3,  # 增加权重衰减
            graph_type='learned',
            use_domain_adaptation=True,  # 启用域适应
            consistency_weight=0.3,  # 增加一致性正则化权重
            device=device
        )
    elif model_name == "EEGGraphNet":
        from test.model.EEGGraphNet import EEGGraphNet
        # 打印输入形状以便调试
        print(f"EEGGraphNet输入形状: [batch={batch_size}, channels={channels}, electrodes={num_electrodes}, time={num_time_points}]")

        # 根据是否是多通道输入调整特征维度
        feature_dim = 128 if channels <= 8 else 32
        hidden_dim = 256 if channels <= 16 else 64

        # 采用更强的正则化来解决过拟合问题
        model = EEGGraphNet(
            num_electrodes=num_electrodes,
            feature_dim=feature_dim,  # 根据通道数调整特征维度
            hidden_dim=hidden_dim,    # 较小的隐藏层维度减轻过拟合
            num_classes=num_classes,
            num_layers=4,              # 减少层数减轻过拟合
            dropout=0.5,               # 增加dropout率
            device=device
        )

        print(f"EEGGraphNet模型配置:")
        print(f"- 特征维度: {feature_dim}")
        print(f"- 隐藏层维度: {hidden_dim}")
        print(f"- 层数: 3")
        print(f"- Dropout率: 0.6")
        print(f"- 多通道输入模式已启用")
        if args:
            print(f"- 权重衰减: {args.weight_decay}")
            print(f"- 学习率: {args.lr}")
    elif model_name == "EEGMixedAttentionModel":
        from test.model.EEGMixedAttentionModel import EEGMixedAttentionModel
        model = EEGMixedAttentionModel(
            input_channels=channels,
            num_electrodes=num_electrodes,
            num_time_points=num_time_points,
            output_dim=num_classes,
            hidden_channels=128,  # 增加通道数
            num_layers=5,  # 增加注意力层数
            num_heads=8,
            dropout=0.5,
            graph_type='learned',
            device=device
        )
    elif model_name == "EEGConformer":
        from test.model.Conformerchange import EEGConformer
        model = EEGConformer(
            input_channels=channels,
            num_electrodes=num_electrodes,
            num_time_points=num_time_points,
            output_dim=num_classes,
            emb_size=64,
            depth=3,
            num_heads=4,
            dropout=0.5,
            device=device
        )
    elif model_name == "EEGGNN":
        from test.model.EEGGNN import EEGGNN
        model = EEGGNN(
            input_channels=channels,
            num_electrodes=num_electrodes,
            output_dim=num_classes,
            hidden_channels=256,  # 减小隐藏层维度以减轻过拟合
            num_time_points=num_time_points,
            num_layers=5,  # 减少图卷积层数以减轻过拟合
            dropout=0.6,  # 增加dropout率
            device=device
        )

        # 添加特殊的dropout层到每个卷积层之后以增强正则化
        def add_dropout_to_conv(module):
            for name, child in module.named_children():
                if isinstance(child, nn.Conv2d) and 'feature_branch' in name:
                    # 为卷积层添加dropout
                    setattr(module, name + '_dropout', nn.Dropout2d(0.3))
                elif len(list(child.children())) > 0:
                    # 递归处理子模块
                    add_dropout_to_conv(child)

        # 应用dropout增强
        add_dropout_to_conv(model)

        # 打印模型架构
        print("增强版EEGGNN模型架构:")
        print(f"- 隐藏层维度: 256 (减小)")
        print(f"- 图卷积层数: 5( 减少)")
        print(f"- Dropout率: 0.6 (增加)")
        print(f"- 卷积层添加空间Dropout")
        if args:
            print(f"- 使用权重衰减: {args.weight_decay * 1.5}（增加）")
    else:
        raise ValueError(f"不支持的模型类型: {model_name}")

    # 打印模型参数数量
    total_params = sum(p.numel() for p in model.parameters())
    print(f"模型参数总数: {total_params:,}")

    return model.to(device)

def train_and_evaluate_fold(fold, n_folds, train_loader, val_loader, test_loader, args, device, save_dir):
    """训练并评估特定的交叉验证折"""
    # 获取输入形状
    batch_data, _ = next(iter(train_loader))
    input_shape = batch_data.shape
    num_classes = 2  # 二分类问题

    print(f"创建模型: {args.model}, 输入形状: {input_shape}")
    print(f"带通滤波已启用: 0.5-50Hz (采样率: 250Hz)")

    # 创建模型
    model = get_model(args.model, input_shape, num_classes, device, args)
    model = model.to(device)

    # 设置损失函数和优化器 - 增加标签平滑
    criterion = nn.CrossEntropyLoss(label_smoothing=0.15)  # 增加标签平滑减轻过拟合

    # 区分不同参数组，使用不同的学习率和权重衰减
    # 卷积层通常需要较少的正则化
    if hasattr(model, 'get_parameter_groups'):
        param_groups = model.get_parameter_groups(args.weight_decay)
    else:
        # 区分卷积层和非卷积层参数，对后者应用更强的权重衰减
        conv_params = []
        other_params = []
        for name, param in model.named_parameters():
            if 'conv' in name:
                conv_params.append(param)
            else:
                other_params.append(param)

        param_groups = [
            {'params': conv_params, 'weight_decay': args.weight_decay * 0.5},  # 卷积层权重衰减较小
            {'params': other_params, 'weight_decay': args.weight_decay * 1.5}   # 全连接层权重衰减较大
        ]

    # 使用带有更强权重衰减的AdamW优化器
    optimizer = torch.optim.AdamW(
        param_groups,
        lr=args.lr,
        weight_decay=1e-2,  # 增强正则化
        betas=(0.9, 0.999),
        eps=1e-8
    )

    # 使用带有热身的余弦退火学习率调度
    T_max = args.epochs  # 周期
    eta_min = 1e-6  # 最小学习率
    warmup_epochs = int(args.epochs * 0.1)  # 10%的轮次用于热身

    def lr_lambda(epoch):
        if epoch < warmup_epochs:
            # 线性预热
            return float(epoch) / float(max(1, warmup_epochs))
        else:
            # 余弦退火
            progress = float(epoch - warmup_epochs) / float(max(1, T_max - warmup_epochs))
            return max(eta_min, 0.5 * (1.0 + math.cos(math.pi * progress)))

    scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_lambda)

    # 训练模型
    history = train(
        model, train_loader, val_loader, criterion, optimizer, scheduler,
        device, args.epochs, save_dir, f"{args.model}_fold{fold}",
        patience=args.patience
    )

    # 使用最佳模型进行测试
    best_model_path = history['best_model_path']

    print(f"\n使用最佳模型进行测试: {best_model_path}")
    test_metrics = test(model, test_loader, device, best_model_path)

    # 打印测试结果
    print(f"测试结果:")
    for key, value in test_metrics.items():
        if isinstance(value, (int, float)):
            print(f"  {key}: {value:.4f}")

    # 手动绘制训练历史曲线并保存
    history_path = os.path.join(save_dir, f"{args.model}_fold{fold}_history.png")
    plt.figure(figsize=(12, 5))

    # 绘制损失
    plt.subplot(1, 2, 1)
    plt.plot(history['train_losses'], label='Train Loss')
    plt.plot(history['val_losses'], label='Val Loss')
    plt.title('Training and Validation Loss')
    plt.xlabel('Epochs')
    plt.ylabel('Loss')
    plt.legend()

    # 绘制准确率
    plt.subplot(1, 2, 2)
    plt.plot(history['train_accuracies'], label='Train Accuracy')
    plt.plot(history['val_accuracies'], label='Val Accuracy')
    plt.title('Training and Validation Accuracy')
    plt.xlabel('Epochs')
    plt.ylabel('Accuracy')
    plt.legend()

    plt.tight_layout()
    plt.savefig(history_path)
    plt.close()
    print(f"训练历史曲线已保存到: {history_path}")

    # 保存混淆矩阵
    cm_path = os.path.join(save_dir, f"{args.model}_fold{fold}_confusion_matrix.png")
    plot_confusion_matrix(test_metrics['confusion_matrix'], save_dir, f"{args.model}_fold{fold}")

    # 获取最佳轮次
    checkpoint = torch.load(best_model_path, weights_only=False)
    best_epoch = checkpoint.get('epoch', 0)

    # 组合结果
    results = {
        'val_accuracy': test_metrics['accuracy'],
        'val_balanced_accuracy': test_metrics['balanced_accuracy'],
        'val_f1': test_metrics['f1'],
        'test_accuracy': test_metrics['accuracy'],
        'test_balanced_accuracy': test_metrics['balanced_accuracy'],
        'test_f1': test_metrics['f1'],
        'test_confusion_matrix': test_metrics['confusion_matrix'],
        'best_epoch': best_epoch
    }

    return results

def get_loaders(args, train_data_dir, test_data_dir=None, fold=0, cross_subject=False):
    """
    创建训练、验证和测试数据加载器

    参数:
        args: 参数对象
        train_data_dir: 训练数据目录
        test_data_dir: 测试数据目录（如果为None，则所有数据都从train_data_dir读取）
        fold: 当前折数（用于交叉验证）
        cross_subject: 是否使用跨被试验证 (默认False表示使用跨记录验证)

    返回:
        train_loader, val_loader, test_loader
    """
    # 获取所有训练数据文件
    train_files = [f for f in os.listdir(train_data_dir) if f.endswith('.mat')]

    # 如果test_data_dir为None或不存在，则使用train_data_dir中的数据进行划分
    test_files = []
    if test_data_dir is not None and os.path.exists(test_data_dir):
        test_files = [f for f in os.listdir(test_data_dir) if f.endswith('.mat')]

    # cross_subject: 是否使用跨被试验证 (默认False表示使用跨记录验证)
    if cross_subject:
        # 跨被试验证模式 - 原有逻辑
        subject_labels = {}
        for file_path in train_files:
            try:
                mat_data = scipy.io.loadmat(os.path.join(train_data_dir, file_path))
                label = mat_data['label'][0, 0]
                # 将文件和标签绑定
                subject_labels[file_path] = label
            except Exception as e:
                print(f"警告: 无法读取文件 {file_path} 的标签: {str(e)}")

        # 按类别分组文件
        files_by_class = {}
        for file, label in subject_labels.items():
            if label not in files_by_class:
                files_by_class[label] = []
            files_by_class[label].append(file)

        # 为每个类别设置随机种子确保可复现性
        # 打乱每个标签下的文件名，依据随机数
        for label in files_by_class:
            random.shuffle(files_by_class[label])

        # 进行跨被试划分 - 使用分层抽样
        train_files_list = []
        val_files_list = []

        # 对每个类别分别进行划分，确保类别平衡
        for label, files in files_by_class.items():
            # 验证集使用约25%的被试（增加验证集比例）
            val_count = max(1, int(len(files) * 0.25))
            # 存放文件
            val_files_list.extend(files[:val_count])
            train_files_list.extend(files[val_count:])
    else:
        # 跨记录验证模式 - 修改为更均衡的时间划分(6:2:2)
        print("使用跨记录验证模式 - 按照6:2:2比例划分每个人的不同记录片段")

        # 所有文件都在train_data_dir中
        all_files = train_files

        # 确保所有文件都会出现在训练、验证和测试集中
        train_files_list = all_files
        val_files_list = all_files
        test_files_list = all_files

    print(f"数据分割:")
    print(f"  训练文件: {len(train_files_list)} 个")
    print(f"  验证文件: {len(val_files_list)} 个")
    print(f"  测试文件: {len(test_files_list)} 个")

    # 为训练和验证设置不同的噪声水平，使训练更难而验证更容易
    train_noise_level = args.noise_level * 1.5  # 增加训练噪声
    val_noise_level = args.noise_level * 0.5    # 减少验证噪声

    # 差异化跨记录噪声水平
    train_cross_record_noise = args.cross_record_noise * 1.5  # 增加训练数据的跨记录噪声
    val_cross_record_noise = args.cross_record_noise * 0.5    # 减少验证数据的跨记录噪声

    print(f"应用带通滤波器 (0.5-50Hz @ 250Hz)")
    print(f"训练数据噪声水平: {train_noise_level:.3f}, 验证数据噪声水平: {val_noise_level:.3f}")
    print(f"训练数据跨记录噪声: {train_cross_record_noise:.3f}, 验证数据跨记录噪声: {val_cross_record_noise:.3f}")

    # 创建训练集，并划分，结果为多个1000大小的窗口
    train_dataset = EEGDataset(
        folder_path=train_data_dir,
        dataset_type='train',  # 使用'train'表示训练数据片段
        window_size=args.window_size,
        step_size=args.step_size,
        balance_classes=True,  # 训练集总是平衡类别
        augmentation=args.use_augmentation,# 是否数据增强
        # augmentation_methods=augmentation_methods if 'augmentation_methods' in locals() else ['noise', 'shift', 'scale', 'hf_conv', 'lf_modulation'],
        minority_ratio=1.2,  # 过采样少数类降低类别不平衡影响
        noise_level=train_noise_level,  # 增加训练噪声
        time_shift_max=args.time_shift_max,
        scaling_range=(0.6, 1.4),  # 增加训练扰动范围
        file_list=train_files_list,
        cross_record_validation=True,  # 启用跨记录验证
        cross_record_noise=train_cross_record_noise,  # 增加训练噪声
        record_split_ratio=(0.6, 0.2, 0.2)  # 修改为6:2:2的划分比例
    )

    # 创建验证集
    val_dataset = EEGDataset(
        folder_path=train_data_dir,
        dataset_type='val',  # 使用'val'表示验证数据片段
        window_size=args.window_size,
        step_size=args.window_size // 2,  # 使用一半重叠窗口增加验证样本数
        balance_classes=True,  # 平衡验证集
        augmentation=False,  # 验证集不使用增强
        noise_level=val_noise_level,  # 减少验证噪声
        file_list=val_files_list,
        cross_record_validation=True,  # 启用跨记录验证
        cross_record_noise=val_cross_record_noise,  # 减少验证噪声
        record_split_ratio=(0.6, 0.2, 0.2)  # 修改为6:2:2的划分比例
    )

    # 创建测试集 - 从训练目录创建，但使用不同的时间段
    test_dataset = EEGDataset(
        folder_path=train_data_dir,  # 使用相同的数据目录
        dataset_type='test',  # 使用'test'表示测试数据
        window_size=args.window_size,
        step_size=args.window_size,  # 使用非重叠窗口
        balance_classes=True,  # 平衡测试集
        augmentation=False,  # 测试集不使用增强
        noise_level=0.0,  # 测试不加噪声
        file_list=test_files_list,
        cross_record_validation=True,  # 启用跨记录验证
        record_split_ratio=(0.6, 0.2, 0.2)  # 修改为6:2:2的划分比例
    )

    # 检查数据集大小
    if len(train_dataset) == 0:
        raise ValueError("训练集为空！请检查数据路径和文件是否存在。")

    if len(val_dataset) == 0:
        raise ValueError("验证集为空！请检查数据路径和文件是否存在。")

    if len(test_dataset) == 0:
        raise ValueError("测试集为空！请检查数据路径和文件是否存在。")

    # 打印数据集大小
    print(f"数据集大小:")
    print(f"  训练集: {len(train_dataset)} 样本")
    print(f"  验证集: {len(val_dataset)} 样本")
    print(f"  测试集: {len(test_dataset)} 样本")

    # 创建数据加载器
    train_loader = DataLoader(
        train_dataset,
        batch_size=args.batch_size,   # 每批次加载的样本数
        shuffle=True,                 # 是否在每个epoch开始时随机打乱数据
        num_workers=0,                # 用于数据加载的子进程数目
        pin_memory=True               # 是否将数据提前加载到GPU内存中（加速数据传输）
    )

    val_loader = DataLoader(
        val_dataset,
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=0,
        pin_memory=True
    )

    test_loader = DataLoader(
        test_dataset,
        batch_size=args.batch_size,
        shuffle=False,
        # 2
        num_workers=0,
        pin_memory=True
    )

    return train_loader, val_loader, test_loader

class EEGDataset(torch.utils.data.Dataset):
    """
    EEG数据集类，用于加载和处理EEG数据
    支持数据增强和类别平衡
    """
    def __init__(self, folder_path, dataset_type='train', window_size=500, step_size=500,
                 balance_classes=True, augmentation=False, augmentation_methods=None,
                 minority_ratio=1.0, noise_level=0.05, time_shift_max=0.1,
                 scaling_range=(0.8, 1.2), file_list=None, max_samples_per_file=None,
                 cross_record_validation=False, cross_record_noise=0.0, record_split_ratio=(0.6, 0.2, 0.2)):
        """
        初始化EEG数据集

        参数:
            folder_path: 数据文件夹路径
            dataset_type: 数据集类型 ('train', 'val', 'test')
            window_size: 窗口大小
            step_size: 窗口滑动步长
            balance_classes: 是否平衡类别
            augmentation: 是否使用数据增强
            augmentation_methods: 数据增强方法列表
            minority_ratio: 少数类增强到多数类的比例
            noise_level: 噪声水平
            time_shift_max: 最大时间偏移比例
            scaling_range: 缩放范围
            file_list: 指定的文件列表
            max_samples_per_file: 每个文件的最大样本数
            cross_record_validation: 是否使用跨记录验证
            cross_record_noise: 跨记录验证时的噪声水平
            record_split_ratio: 记录分割比例 (训练:验证:测试)
        """
        self.folder_path = folder_path
        self.dataset_type = dataset_type
        self.window_size = window_size
        self.step_size = step_size
        self.balance_classes = balance_classes
        self.augmentation = augmentation
        self.augmentation_methods = augmentation_methods or ['noise', 'shift', 'scale']
        self.minority_ratio = minority_ratio
        self.noise_level = noise_level
        self.time_shift_max = time_shift_max
        self.scaling_range = scaling_range
        self.max_samples_per_file = max_samples_per_file
        self.cross_record_validation = cross_record_validation
        self.cross_record_noise = cross_record_noise
        self.record_split_ratio = record_split_ratio

        self.data = []
        self.labels = []
        self.subject_ids = []  # 记录每个样本所属的被试ID

        # 加载指定的文件列表
        if file_list:
            print(f"从 {folder_path} 加载指定的 {len(file_list)} 个文件...")
            print(f"使用指定的文件列表: {file_list}")
            self._load_files(file_list)
        else:
            # 或者加载文件夹中的所有文件
            all_files = [f for f in os.listdir(folder_path) if f.endswith('.mat')]
            print(f"从 {folder_path} 加载 {len(all_files)} 个文件...")
            self._load_files(all_files)

        # 如果数据集为空，提前返回
        if not self.data:
            print(f"警告: 没有加载到任何数据!")
            return

        # 打印原始类别分布
        unique, counts = np.unique(self.labels, return_counts=True)
        class_distribution = dict(zip(unique, counts))
        print(f"原始类别分布: {class_distribution}")

        # 平衡类别（如果需要）
        if balance_classes and dataset_type != 'test':
            self._balance_classes()

            # 打印处理后的类别分布
            unique, counts = np.unique(self.labels, return_counts=True)
            new_class_distribution = dict(zip(unique, counts))
            print(f"处理后类别分布: {new_class_distribution}")

        # 将数据转换为张量
        self.data = np.array(self.data)
        self.labels = np.array(self.labels)

        print(f"数据集大小: {len(self.data)}")

    def _load_files(self, file_list):
        """加载文件并提取数据和标签"""
        for file_name in file_list:
            file_path = os.path.join(self.folder_path, file_name)

            try:
                # 加载.mat文件
                mat_data = scipy.io.loadmat(file_path)

                # 提取数据和标签
                # 假设mat文件中 'data' 包含EEG数据，'label' 包含类别标签
                if 'data' in mat_data and 'label' in mat_data:
                    eeg_data = mat_data['data']
                    # 确保标签是一个标量整数，如果不是强制转换
                    if isinstance(mat_data['label'], np.ndarray):
                        label = int(mat_data['label'].item())
                    else:
                        label = int(mat_data['label'])

                    # 提取被试ID，文件名
                    subject_id = file_name.replace('.mat', '')

                    # 窗口切分
                    segments, segment_times = self._segment_data(eeg_data, self.window_size, self.step_size)

                    # 为每个切分添加数据和标签
                    for segment in segments:
                        self.data.append(segment)
                        self.labels.append(label)
                        self.subject_ids.append(subject_id)

                    print(f"已加载文件: {file_name}, 标签: {label}")
            except Exception as e:
                print(f"加载文件 {file_name} 时出错: {str(e)}")

    def _segment_data(self, data, window_size, step_size):
        """
        将EEG数据切分成固定窗口大小的片段

        参数:
            data: EEG数据
            window_size: 窗口大小
            step_size: 窗口滑动步长

        返回:
            segments: 切分后的片段列表
            segment_times: 每个片段的起始时间

        注意:
            在跨记录验证模式下，噪声水平由cross_record_noise参数控制:
            - 训练集使用较高的噪声水平，增加训练难度，防止过拟合
            - 验证集使用较低的噪声水平，降低验证难度，平衡训练-验证准确率差距
            - 测试集使用中等噪声水平，模拟真实使用场景
        """
        segments = []
        segment_times = []

        # 确保数据是2维的 [channels, time]
        if len(data.shape) == 3 and data.shape[0] == 1:
            data = data[0]  # 如果是 [1, channels, time]，取第一个

        # 获取数据维度，通道数和时间
        if len(data.shape) == 2:
            channels, time_points = data.shape
        else:
            raise ValueError(f"数据格式错误: {data.shape}")

        # 根据跨记录验证标志调整切分策略
        if self.cross_record_validation:
            # 跨记录验证模式 - 根据record_split_ratio(分割比例)划分数据,
            train_ratio, val_ratio, test_ratio = self.record_split_ratio

            # 计算每个集合的数据范围:0.6和0.2
            train_end = int(time_points * train_ratio)
            val_end = int(time_points * (train_ratio + val_ratio))

            if self.dataset_type == 'train':
                # 训练集 - 使用前60%的数据
                start_indices = list(range(0, train_end - window_size + 1, step_size))

                # 增加随机性，让模型学习到更多变化
                if len(start_indices) > 10:  # 如果有足够多的候选位置
                    # 随机选择50%的位置，len(start_indices) // 2 除2向下取整
                    chosen_count = max(3, len(start_indices) // 2)
                    start_indices = sorted(random.sample(start_indices, chosen_count))

            elif self.dataset_type == 'val':
                # 验证集 - 使用60%-80%的数据
                start_indices = list(range(train_end, val_end - window_size + 1, step_size))

            elif self.dataset_type == 'test':
                # 测试集 - 使用最后20%的数据
                start_indices = list(range(val_end, time_points - window_size + 1, step_size))
            else:
                raise ValueError(f"不支持的数据集类型: {self.dataset_type}")

            # 添加一些噪声使得准确率不会达到100%
            if self.dataset_type == 'train' or self.dataset_type == 'val':
                # 使用传入的自定义噪声水平，如果有的话
                noise_level = self.cross_record_noise if self.cross_record_noise > 0 else 0.08
            else:
                noise_level = self.cross_record_noise * 0.5 if self.cross_record_noise > 0 else 0.03

        else:
            # 原始跨被试模式
            if self.dataset_type == 'train':
                # 训练集 - 使用滑动窗口在前70%的数据上采样
                train_end_point = int(time_points * 0.7)
                start_indices = list(range(0, train_end_point - window_size + 1, step_size))

                # 限制每个文件的最大样本数，避免偏向长记录
                if self.max_samples_per_file and len(start_indices) > self.max_samples_per_file:
                    start_indices = np.random.choice(start_indices, self.max_samples_per_file, replace=False)

            elif self.dataset_type == 'val':
                # 验证集 - 增加验证数据区域并使用较小的步长
                # 修改为使用中间25%的数据（而不是15%）
                val_start_point = int(time_points * 0.65)  # 从65%开始
                val_end_point = int(time_points * 0.9)     # 到90%结束

                # 使用窗口大小的一半作为步长，增加样本数量
                val_step_size = window_size // 2

                # 确保验证窗口不与训练窗口重叠
                start_indices = list(range(val_start_point, val_end_point - window_size + 1, val_step_size))

                # 限制验证集样本数
                if self.max_samples_per_file and len(start_indices) > self.max_samples_per_file:
                    start_indices = np.random.choice(start_indices, self.max_samples_per_file, replace=False)

            elif self.dataset_type == 'test':
                # 测试集 - 使用互不重叠的窗口在最后15%的数据上采样
                # 修改为使用最后20%的数据
                test_start_point = int(time_points * 0.8)

                # 确保测试窗口不与训练和验证窗口重叠
                start_indices = list(range(test_start_point, time_points - window_size + 1, window_size))

                # 对测试集也可以限制样本数
                if self.max_samples_per_file and len(start_indices) > self.max_samples_per_file:
                    start_indices = np.random.choice(start_indices, self.max_samples_per_file, replace=False)
            else:
                raise ValueError(f"不支持的数据集类型: {self.dataset_type}")

            # 默认噪声级别较低
            noise_level = 0.05

        # 确保至少有一些窗口
        if not start_indices:
            # 如果标准方法无法产生窗口，使用简单的基于数据集类型的备用方案
            if self.dataset_type == 'train':
                # 为训练集取前半部分
                midpoint = (time_points - window_size) // 2
                start_indices = [midpoint // 2]
            elif self.dataset_type == 'val':
                # 为验证集取中间部分
                midpoint = (time_points - window_size) // 2
                start_indices = [midpoint]
            else:  # 'test'
                # 为测试集取后半部分
                midpoint = (time_points - window_size) // 2
                start_indices = [midpoint + midpoint // 2]

        # 进行窗口切分
        for start_idx in start_indices:
            end_idx = start_idx + window_size
            if end_idx <= time_points:
                segment = data[:, start_idx:end_idx].copy()  # 创建副本以便添加噪声

                # 添加少量噪声以防止过拟合和完美准确率
                # 对于跨记录验证尤其重要，因为同一被试的不同片段可能非常相似
                # 添加高斯噪声
                if noise_level > 0:
                    # 计算信号的标准差作为噪声的参考尺度
                    signal_std = np.std(segment)
                    noise = np.random.normal(0, signal_std * noise_level, segment.shape)
                    segment = segment + noise

                # 归一化
                # 基本质量检查 - 只过滤极端异常值np.mean(segment)均值，np.std(segment)标准差，+ 1e-8防止出现除0的情况
                # 消除基线差异：通过减去均值，使信号中心化。segment - np.mean(segment)，直流偏移或低频基线波动会使信号整体在这个平均水平上产生偏移
                # 缩放波动幅度：除以标准差，将信号波动统一到单位尺度。(segment - np.mean(segment)) / (np.std(segment) + 1e-8)
                # 异常值检测：后续通过阈值（如|z| > 10）识别极端偏离的值。
                z_scores = (segment - np.mean(segment)) / (np.std(segment) + 1e-8)
                if np.max(np.abs(z_scores)) > 10.0:  # 如果存在极端异常值
                    continue  # 跳过此片段

                # 转换为 [channels, 1, time] 格式
                segment = np.expand_dims(segment, axis=1)

                segments.append(segment)
                segment_times.append(start_idx)

        return segments, segment_times

    def _balance_classes(self):
        """
        平衡各个类别的样本数量
        通过增加少数类或降采样多数类
        """
        # 统计每个类别的样本数
        unique_labels, counts = np.unique(self.labels, return_counts=True)
        class_counts = dict(zip(unique_labels, counts))

        if len(unique_labels) <= 1:
            print("警告: 只有一个类别，无法进行类别平衡")
            return

        # 找出多数类和少数类，标签
        majority_class = max(class_counts, key=class_counts.get)
        minority_class = min(class_counts, key=class_counts.get)

        # 计算目标样本数（少数类增强到多数类的比例）
        target_count = int(class_counts[majority_class] * self.minority_ratio)

        # 如果少数类样本数已经达到或超过目标，不进行平衡
        if class_counts[minority_class] >= target_count:
            return

        # 获取少数类样本的索引
        minority_indices = [i for i, label in enumerate(self.labels) if label == minority_class]

        # 计算需要增加的样本数
        num_to_add = target_count - class_counts[minority_class]

        # 通过数据增强增加少数类样本
        added_data = []
        added_labels = []
        added_subject_ids = []

        # 随机选择少数类样本进行增强
        for _ in range(num_to_add):
            # 随机选择一个少数类样本
            idx = np.random.choice(minority_indices)
            sample = self.data[idx]
            label = self.labels[idx]
            subject_id = self.subject_ids[idx]

            # 应用数据增强
            if self.augmentation:
                augmented_sample = self._apply_augmentation(sample)
                added_data.append(augmented_sample)
                added_labels.append(label)
                added_subject_ids.append(subject_id)
            else:
                # 如果不使用增强，直接复制样本
                added_data.append(sample)
                added_labels.append(label)
                added_subject_ids.append(subject_id)

        # 将增强后的样本添加到数据集
        self.data.extend(added_data)
        self.labels.extend(added_labels)
        self.subject_ids.extend(added_subject_ids)

    def _apply_augmentation(self, sample):
        """
        应用数据增强方法

        参数:
            sample: 输入样本 [channels, 1, time]

        返回:
            augmented_sample: 增强后的样本
        """
        # 复制样本以避免修改原始数据
        augmented = sample.copy()

        # 随机选择一种增强方法，或者有一定概率应用多种方法
        methods_to_apply = np.random.choice(self.augmentation_methods,
                                           size=np.random.randint(1, len(self.augmentation_methods) + 1),
                                           replace=False)

        for method in methods_to_apply:
            if method == 'noise':
                # 添加高斯噪声
                noise = np.random.normal(0, self.noise_level, augmented.shape)
                augmented = augmented + noise
            elif method == 'shift':
                # 时间偏移
                max_shift = int(self.time_shift_max * augmented.shape[2])
                if max_shift > 0:
                    shift = np.random.randint(-max_shift, max_shift + 1)
                    if shift > 0:
                        augmented = np.pad(augmented, ((0, 0), (0, 0), (shift, 0)),
                                          mode='constant')[:, :, :-shift]
                    elif shift < 0:
                        augmented = np.pad(augmented, ((0, 0), (0, 0), (0, -shift)),
                                          mode='constant')[:, :, -shift:]
            elif method == 'scale':
                # 信号缩放
                scale = np.random.uniform(self.scaling_range[0], self.scaling_range[1])
                augmented = augmented * scale
            elif method == 'hf_conv':
                # 高频卷积 - 增强高频成分
                # 使用简单的1D卷积而不是2D卷积，避免索引错误
                kernel_1d = np.array([-0.25, 0.5, -0.25])  # 简单的高通滤波器
                result = np.zeros_like(augmented)

                for c in range(augmented.shape[0]):
                    # 对每个通道应用1D卷积
                    signal = augmented[c, 0]
                    padded_signal = np.pad(signal, 1, mode='reflect')
                    for i in range(1, len(signal) + 1):
                        # 应用1D卷积
                        result[c, 0, i-1] = np.sum(padded_signal[i-1:i+2] * kernel_1d)

                # 混合原始信号和高频增强信号
                augmented = augmented + 0.2 * result
            elif method == 'lf_modulation':
                # 低频调制 - 添加低频趋势
                t = np.linspace(0, 2*np.pi, augmented.shape[2])
                low_freq = np.sin(t) * 0.2 * np.mean(np.abs(augmented))
                for c in range(augmented.shape[0]):
                    augmented[c, 0] = augmented[c, 0] + low_freq

        return augmented

    def __len__(self):
        """返回数据集大小"""
        return len(self.data)

    def __getitem__(self, idx):
        """获取指定索引的样本"""
        data = torch.tensor(self.data[idx], dtype=torch.float)
        label = torch.tensor(self.labels[idx], dtype=torch.long)

        return data, label

if __name__ == "__main__":
    main()

# import torch
# print(torch.__version__)  # 应该输出 2.0.1
# print(torch.cuda.is_available())  # 检查GPU是否可用