import pandas as pd
import torch
import numpy as np
import os
import time
import wandb
from tqdm import tqdm
from scipy import stats
from training.utils.metrics_util import calculate_metrics
from training.utils.data_util import optimize_tensor_transfer, create_prefetcher
from training.utils.checkpoint_util import load_checkpoint, save_checkpoint
from training.utils.config_util import get_lr_scheduler
import torch.distributed as dist


class Trainer:
    def __init__(self, model, criterion, optimizer, config, fold_id, load_checkpoint=False, scheduler=None):
        self.model = model
        self.criterion = criterion
        self.optimizer = optimizer
        self.config = config
        self.fold_id = fold_id
        
        # 使用传入的scheduler或从配置中初始化学习率调度器管理器
        self.scheduler_manager = scheduler if scheduler is not None else get_lr_scheduler(optimizer, config)

        # 创建保存目录
        self.save_dir = os.path.join(config['trainer']['save_dir'], f'fold_{fold_id}')
        os.makedirs(self.save_dir, exist_ok=True)
        
        # 如果不加载检查点，初始化学习率调度器状态，避免第一轮训练时的警告
        if not load_checkpoint and self.scheduler_manager is not None:
            self._adjust_scheduler_state(0)
            
        # 领域适应相关参数
        self.use_domain_adaptation = config.get('domain_adaptation', {}).get('enabled', False)
        self.domain_loss_weight = config.get('domain_adaptation', {}).get('loss_weight', 0.1)
        self.domain_criterion = torch.nn.CrossEntropyLoss()
        self.lambda_scheduler = config.get('domain_adaptation', {}).get('lambda_scheduler', 'constant')
        # 初始化领域分类器准确率（用于动态lambda策略）
        self.domain_accuracy = 0 if self.use_domain_adaptation else None
        
        # 初始化动态损失权重调整相关参数
        self.weight_adjustment_strategy = config.get('domain_adaptation', {}).get('weight_adjustment', 'constant')
        self.initial_weight = self.domain_loss_weight
        self.min_weight = config.get('domain_adaptation', {}).get('min_weight', 0.01)
        self.max_weight = config.get('domain_adaptation', {}).get('max_weight', 1.0)

        # 全局检查点目录
        self.global_checkpoint_dir = config['trainer']['save_dir']
        
        # 梯度累积步数
        self.gradient_accumulation_steps = config.get('trainer', {}).get('gradient_accumulation_steps', 1)

        # 设备配置
        self.is_distributed = False
        self.local_rank = 0
        
        # 检查是否在分布式环境中
        if 'LOCAL_RANK' in os.environ:
            self.is_distributed = True
            self.local_rank = int(os.environ.get('LOCAL_RANK', 0))
            print(f"分布式训练环境检测到，local_rank={self.local_rank}")
            # 设置当前设备
            torch.cuda.set_device(self.local_rank)
            self.device = f"cuda:{self.local_rank}"
            # 在分布式环境中，首先将模型移至正确的设备
            self.model = self.model.to(self.device)
            # 然后导入DDP并包装模型
            from torch.nn.parallel import DistributedDataParallel as DDP
            self.model = DDP(
                self.model,
                device_ids=[self.local_rank],
                output_device=self.local_rank,
                find_unused_parameters=False
            )
            # 在分布式环境中，获取总GPU数量
            self.num_gpus = dist.get_world_size() if dist.is_initialized() else 1
            print(f"模型已使用DDP包装，使用GPU {self.local_rank}/{self.num_gpus}")
        else:
            # 单进程模式
            if torch.cuda.is_available():
                self.device = torch.device("cuda")
                # 单进程多GPU模式（DataParallel）
                gpu_count = torch.cuda.device_count()
                # 检查是否使用所有GPU
                if config['n_gpu'] == -1 or config['n_gpu'] > gpu_count:
                    self.num_gpus = gpu_count
                    print(f"使用所有可用GPU ({gpu_count}个)")
                    # 多GPU训练时设置DataParallel，明确指定device_ids以确保负载均衡
                    if gpu_count > 1:
                        # 明确指定device_ids列表，确保每个GPU都被使用
                        device_ids = list(range(self.num_gpus))
                        self.model = torch.nn.DataParallel(self.model, device_ids=device_ids)
                        print(f"DataParallel已配置，使用GPU设备: {device_ids}")
                elif config['n_gpu'] > 0:
                    self.num_gpus = config['n_gpu']
                    print(f"使用指定数量的GPU: {self.num_gpus}个")
                    # 多GPU训练时设置DataParallel，明确指定device_ids
                    if self.num_gpus > 1:
                        device_ids = list(range(self.num_gpus))
                        self.model = torch.nn.DataParallel(self.model, device_ids=device_ids)
                        print(f"DataParallel已配置，使用GPU设备: {device_ids}")
            else:
                self.device = torch.device("cpu")
                self.num_gpus = 0
                print("使用CPU进行训练")
        
        # 确保模型和损失函数移至正确设备
        self.model = self.model.to(self.device)
        self.criterion = self.criterion.to(self.device)
        if self.use_domain_adaptation:
            self.domain_criterion = self.domain_criterion.to(self.device)
            current_gpu = torch.cuda.current_device()
            gpu_name = torch.cuda.get_device_name(current_gpu)
            print(f"使用设备: CUDA (当前GPU: {gpu_name})")
            print(f"CUDA 版本: {torch.version.cuda}")
        else:
            # CPU模式
            self.device = torch.device("cpu")
            self.num_gpus = 0
            print("使用设备: CPU")

        # 混合精度训练配置
        self.use_amp = config.get('use_amp', False)
        if self.use_amp and torch.cuda.is_available():
            # 检查是否支持混合精度训练
            if hasattr(torch.cuda.amp, 'GradScaler'):
                self.scaler = torch.cuda.amp.GradScaler()
                print(f"启用混合精度训练 (AMP)")
            else:
                self.use_amp = False
                print("当前PyTorch版本不支持混合精度训练，已禁用")
        else:
            self.scaler = None

        # 将模型移到设备
        self.model.to(self.device)
        if hasattr(self.criterion, 'to'):
            self.criterion.to(self.device)

        # 训练状态
        self.best_acc = 0.0
        self.best_f1 = 0.0
        self.best_kappa = 0.0
        self.best_loss = float('inf')  # 初始化为无穷大，因为loss越小越好
        self.best_cm = None
        self.best_per_class = None
        self.epoch = 0
        self.train_history = []
        # 目标域最佳指标
        self.best_target_acc = 0.0
        self.best_target_f1 = 0.0
        self.best_target_kappa = 0.0
        self.best_target_cm = None
        
        # 早停相关参数
        self.early_stop = self.config['trainer'].get('early_stop', 0)  # 0表示不使用早停
        self.no_improvement_count = 0  # 记录没有改进的epoch数

        # 只有在明确要求时才加载检查点
        if load_checkpoint:
            self.load_checkpoint()

    def load_checkpoint(self):
        """加载全局检查点恢复训练"""
        epoch, best_acc, best_f1, best_kappa, best_cm, best_per_class, no_improvement_count, history, scaler_state, success = load_checkpoint(
            self.model, self.optimizer, self.config, self.fold_id, self.scaler
        )
        
        if success:
            self.epoch = epoch
            self.best_acc = best_acc
            self.best_f1 = best_f1
            self.best_kappa = best_kappa
            self.best_cm = best_cm
            self.best_per_class = best_per_class
            self.no_improvement_count = no_improvement_count
            self.train_history = history
            # 恢复scaler状态
            if scaler_state is not None and self.scaler is not None:
                self.scaler.load_state_dict(scaler_state)
            
            # 如果配置了学习率调度器管理器，恢复后需要调整到正确的epoch
            self._adjust_scheduler_state(epoch)
        else:
            # 对于新的训练折，初始化学习率调度器管理器状态
            self._adjust_scheduler_state(0)
        
        return success

    def save_checkpoint(self, is_best=False):
        """保存全局检查点"""
        # 获取scaler状态
        scaler_state = self.scaler.state_dict() if self.scaler is not None else None
        
        # 调用checkpoint_util中的save_checkpoint函数
        save_checkpoint(
            self.model,
            self.optimizer,
            self.config,
            self.fold_id,
            self.epoch,
            self.best_acc,
            self.best_f1,
            self.best_kappa,
            self.best_cm,
            self.best_per_class,
            self.no_improvement_count,
            self.train_history,
            is_best,
            scaler_state
        )

    def _get_domain_lambda(self, epoch, max_epochs):
        """计算当前轮次的领域适应权重lambda，支持更复杂的调度策略"""
        if self.lambda_scheduler == 'constant':
            return 1.0
        elif self.lambda_scheduler == 'linear':
            return epoch / max_epochs
        elif self.lambda_scheduler == 'exp':
            return 1 - np.exp(-epoch / max_epochs * 10)
        elif self.lambda_scheduler == 'sigmoid':
            return 2.0 / (1 + np.exp(-10 * epoch / max_epochs)) - 1
        elif self.lambda_scheduler == 'dynamic':
            # 动态策略：根据领域适应性能调整
            if hasattr(self, 'domain_accuracy'):
                # 如果领域分类器准确率接近50%（理想状态），增加lambda
                return min(1.0, 0.1 + (0.5 - abs(self.domain_accuracy - 0.5)) * 2)
            return 0.5
        else:
            return 1.0

    def _adjust_loss_weight(self, epoch, total_epochs, task_performance=None):
        """
        根据训练进度和任务性能动态调整domain_loss_weight
        
        参数:
            epoch: 当前轮次
            total_epochs: 总轮次
            task_performance: 任务性能指标（如准确率）
        """
        if self.weight_adjustment_strategy == 'constant':
            return  # 保持初始权重不变
            
        elif self.weight_adjustment_strategy == 'scheduled':
            # 根据训练轮次进行计划调整
            # 前一半轮次降低权重，后一半轮次增加权重
            if epoch < total_epochs / 2:
                factor = 1 - (epoch / (total_epochs / 2)) * 0.5
            else:
                factor = 0.5 + ((epoch - total_epochs / 2) / (total_epochs / 2)) * 0.5
            
        elif self.weight_adjustment_strategy == 'performance_based':
            # 根据任务性能动态调整
            # 任务性能好时，增加域适应权重；性能差时，减少域适应权重
            if task_performance is None:
                return  # 无法基于性能调整
            
            # 假设task_performance在0-1之间，设置0.7为基准点
            if task_performance > 0.7:
                # 性能良好，增加域适应权重
                factor = 1.0 + min(0.5, (task_performance - 0.7) * 2)
            else:
                # 性能较差，减少域适应权重
                factor = max(0.5, task_performance / 0.7)
        
        # 应用权重因子并限制在合理范围内
        new_weight = self.initial_weight * factor
        self.domain_loss_weight = max(self.min_weight, min(self.max_weight, new_weight))
        
    def compute_mmd(self, source_features, target_features, kernel_type='rbf'):
        """计算Maximum Mean Discrepancy (MMD)来衡量两个分布之间的差异
        
        参数:
            source_features (torch.Tensor): 源域特征
            target_features (torch.Tensor): 目标域特征
            kernel_type (str): 核函数类型，可选 'rbf' 或 'linear'
        
        返回:
            torch.Tensor: MMD值
        """
        n = source_features.size(0)
        m = target_features.size(0)
        
        # 合并所有特征
        features = torch.cat([source_features, target_features], dim=0)
        
        # 计算核矩阵
        if kernel_type == 'rbf':
            # RBF核: exp(-||x-y||^2 / (2*sigma^2))
            sigma = 1.0  # 可以根据实际情况调整
            dist_matrix = torch.cdist(features, features, p=2) ** 2
            kernel_matrix = torch.exp(-dist_matrix / (2 * sigma ** 2))
        else:
            # Linear核: x^T y
            kernel_matrix = features @ features.t()
        
        # 提取源域-源域、目标域-目标域和源域-目标域的核值
        k_ss = kernel_matrix[:n, :n]
        k_tt = kernel_matrix[n:, n:]
        k_st = kernel_matrix[:n, n:]
        
        # 计算MMD
        mmd = k_ss.sum() / (n * n) + k_tt.sum() / (m * m) - 2 * k_st.sum() / (n * m)
        
        return mmd
        
    def _process_batch_data(self, batch, is_domain_data=False):
        """
        统一处理批次数据，处理不同数据格式并移至设备
        假设数据已经在DataLoader中转换为正确的张量类型
        
        参数:
            batch: 数据批次，可以是字典或元组
            is_domain_data: 是否为领域数据（不需要标签）
            
        返回:
            处理后的数据和标签（如果不是领域数据）
        """
        # 从不同格式的batch中提取数据
        if isinstance(batch, dict):
            data = batch['data']
            target = batch['label'] if not is_domain_data else None
        else:
            # 处理元组格式
            if is_domain_data:
                data = batch[0] if len(batch) > 0 else batch
                target = None
            else:
                data = batch[0]
                target = batch[1]
        
        # 简化的类型检查，主要用于调试
        if not isinstance(data, torch.Tensor):
            raise TypeError(f"Expected torch.Tensor data, got {type(data).__name__}")
        
        if target is not None and not isinstance(target, torch.Tensor):
            raise TypeError(f"Expected torch.Tensor target, got {type(target).__name__}")
        
        # 将数据移至设备
        data, target = self._move_to_device(data, target)
        
        return data, target
    
    def train_epoch(self, data_loader, domain_loader=None):
        self.model.train()
        total_task_loss = 0
        total_domain_loss = 0 if self.use_domain_adaptation else None
        total_mmd_loss = 0
        correct = 0
        total = 0
        all_preds = []
        all_targets = []

        start_time = time.time()
        
        # 准备领域数据迭代器
        domain_iter = None
        if self.use_domain_adaptation and domain_loader is not None:
            domain_iter = iter(domain_loader)
        
        # 计算当前轮次的lambda值，使用_get_domain_lambda方法
        lambda_val = self._get_domain_lambda(self.epoch, self.config['trainer']['epochs'])
        
        # 统一处理批次的函数
        def process_and_train_batch(batch, i):
            nonlocal correct, total, total_task_loss, total_domain_loss, total_mmd_loss
            nonlocal all_preds, all_targets
            nonlocal domain_iter
            
            # 梯度累积实现
            if i % self.gradient_accumulation_steps == 0:
                self.optimizer.zero_grad()
            
            # 处理批次数据
            data, target = self._process_batch_data(batch)
            
            # 训练逻辑
            if self.use_domain_adaptation and domain_iter is not None:
                try:
                    # 获取并处理目标域数据
                    domain_batch = next(domain_iter)
                    domain_data, _ = self._process_batch_data(domain_batch, is_domain_data=True)
                except (StopIteration, TypeError):
                    # 重新开始迭代
                    domain_iter = iter(domain_loader)
                    domain_batch = next(domain_iter)
                    domain_data, _ = self._process_batch_data(domain_batch, is_domain_data=True)
                
                # 后续代码已被统一到process_and_train_batch函数中
                source_domain_labels = torch.zeros(len(data) if isinstance(data, torch.Tensor) else len(data[0]), 
                                                  device=self.device, dtype=torch.long)
                target_domain_labels = torch.ones(len(domain_data) if isinstance(domain_data, torch.Tensor) else len(domain_data[0]), 
                                                 device=self.device, dtype=torch.long)
                
                # 调整数据维度顺序：从[batch_size, seq_len, channels]到[batch_size, channels, seq_len]
                if isinstance(data, torch.Tensor) and data.dim() == 3 and data.shape[2] == 1:
                    data = data.permute(0, 2, 1)
                if isinstance(domain_data, torch.Tensor) and domain_data.dim() == 3 and domain_data.shape[2] == 1:
                    domain_data = domain_data.permute(0, 2, 1)
                
                # 只进行一次前向传播，避免重复计算导致的梯度版本不匹配
                # 联合训练：同时获取任务预测和领域预测
                task_pred, source_domain_pred = self.model(data, lambda_val)
                task_loss = self.criterion(task_pred, target)
                
                # 对目标域数据进行一次前向传播
                _, target_domain_pred = self.model(domain_data, lambda_val)
                
                # 计算领域损失
                source_d_loss = self.domain_criterion(source_domain_pred, source_domain_labels)
                target_d_loss = self.domain_criterion(target_domain_pred, target_domain_labels)
                domain_loss = (source_d_loss + target_d_loss) / 2
                
                # 计算并更新领域分类器准确率
                source_correct = (source_domain_pred.argmax(dim=1) == source_domain_labels).sum().item()
                target_correct = (target_domain_pred.argmax(dim=1) == target_domain_labels).sum().item()
                domain_total = len(source_domain_labels) + len(target_domain_labels)
                self.domain_accuracy = (source_correct + target_correct) / domain_total
                
                # 联合损失
                loss = task_loss + self.domain_loss_weight * lambda_val * domain_loss
                
                # 更新损失统计
                total_domain_loss += domain_loss.item()
            else:
                # 普通训练，没有领域适应
                task_pred, _ = self.model(data)
                task_loss = self.criterion(task_pred, target)
                loss = task_loss
            
            # 更新任务损失统计
            total_task_loss += task_loss.item()
            
            # 计算分类性能
            _, predicted = task_pred.max(1)
            total += target.size(0)
            correct += predicted.eq(target).sum().item()
            
            all_preds.extend(predicted.cpu().numpy())
            all_targets.extend(target.cpu().numpy())
            
            return loss
        
        # 根据是否使用混合精度选择不同的训练循环
        if self.use_amp and self.scaler is not None:
            # 使用预取器加速数据加载
            prefetcher = create_prefetcher(data_loader, self.device)
            batch = prefetcher.next()
            i = 0
            
            with tqdm(total=len(data_loader), desc=f"Fold {self.fold_id} Epoch {self.epoch+1} Train") as pbar:
                while batch is not None:
                    with torch.cuda.amp.autocast():
                        loss = process_and_train_batch(batch, i)
                    
                    # 缩放损失以适应梯度累积
                    scaled_loss = loss / self.gradient_accumulation_steps
                    
                    # 分布式训练优化 - 在累积梯度时减少同步次数
                    if self.is_distributed and (i + 1) % self.gradient_accumulation_steps != 0:
                        # 使用no_sync上下文管理器在累积步骤中减少同步次数，仅在最后一步同步
                        with self.model.no_sync():
                            self.scaler.scale(scaled_loss).backward()
                    else:
                        self.scaler.scale(scaled_loss).backward()
                    
                    # 只有在累积了足够的梯度后才更新权重
                    if (i + 1) % self.gradient_accumulation_steps == 0 or (i + 1) == len(data_loader):
                        self.scaler.step(self.optimizer)
                        self.scaler.update()
                        # 根据调度器类型，在每个batch后更新学习率
                        if self.scheduler_manager is not None and self.scheduler_manager.needs_batch_update():
                            self.scheduler_manager.step_batch()
                    
                    pbar.update(1)
                    i += 1
                    batch = prefetcher.next()
        else:
            # 常规训练循环
            for i, batch in enumerate(tqdm(data_loader, desc=f"Fold {self.fold_id} Epoch {self.epoch+1} Train")):
                loss = process_and_train_batch(batch, i)
                
                # 缩放损失以适应梯度累积
                scaled_loss = loss / self.gradient_accumulation_steps
                
                # 分布式训练优化 - 在累积梯度时减少同步次数
                if self.is_distributed and (i + 1) % self.gradient_accumulation_steps != 0:
                    # 使用no_sync上下文管理器在累积步骤中减少同步次数，仅在最后一步同步
                    with self.model.no_sync():
                        scaled_loss.backward()
                else:
                    scaled_loss.backward()
                
                # 只有在累积了足够的梯度后才更新权重
                if (i + 1) % self.gradient_accumulation_steps == 0 or (i + 1) == len(data_loader):
                    self.optimizer.step()
                    # 根据调度器类型，在每个batch后更新学习率
                    if self.scheduler_manager is not None and self.scheduler_manager.needs_batch_update():
                        self.scheduler_manager.step_batch()

        epoch_time = time.time() - start_time
        metrics = calculate_metrics(all_targets, all_preds)

        result = {
            "loss": total_task_loss / len(data_loader),
            "accuracy": correct / total,
            "f1_score": metrics['overall']['f1_score'],
            "kappa": metrics['overall']['kappa'],
            "per_class_f1": metrics['per_class'],
            "time": epoch_time
        }
        
        # 添加领域损失到结果中
        if self.use_domain_adaptation:
            result['domain_loss'] = total_domain_loss / len(data_loader)
            result['lambda'] = lambda_val
            # 添加MMD损失到结果中
            if total_mmd_loss > 0:
                result['mmd_loss'] = total_mmd_loss / len(data_loader)

        return result

    def validate(self, data_loader, domain_loader=None):
        """验证模型性能，支持同时评估源域和目标域"""
        self.model.eval()
        results = {}
        
        # 评估源域性能
        source_results = self._validate_domain(data_loader, "source")
        results.update(source_results)
        
        # 如果提供了目标域数据加载器，评估目标域性能
        if domain_loader is not None:
            target_results = self._validate_domain(domain_loader, "target")
            # 在键前添加前缀以区分源域和目标域
            for key, value in target_results.items():
                if key != "y_true" and key != "y_pred" and key != "confusion_matrix":
                    results[f"target_{key}"] = value
                else:
                    results[f"target_{key}"] = value
        
        return results
    
    def _validate_domain(self, data_loader, domain_name="source"):
        """评估特定域的模型性能"""
        total_loss = 0
        correct = 0
        total = 0
        all_preds = []
        all_targets = []

        start_time = time.time()
        
        if self.use_amp and self.scaler is not None:
            # 验证时也使用自动混合精度
            with torch.no_grad():
                prefetcher = create_prefetcher(data_loader, self.device)
                batch = prefetcher.next()
                desc = f"Fold {self.fold_id} Epoch {self.epoch+1} {domain_name.capitalize()} Val"
                with tqdm(total=len(data_loader), desc=desc) as pbar:
                    while batch is not None:
                        # 统一使用元组方式访问数据
                        data = batch[0]
                        target = batch[1]
                        # 确保数据是Tensor类型
                        # 确保数据已经是正确类型的张量并移动到设备
                        # 数据已经在DataLoader中转换为张量，这里只需要移动设备
                        data = data.to(self.device) if not data.is_cuda else data
                        target = target.to(self.device) if not target.is_cuda else target
                        with torch.cuda.amp.autocast():
                            # 数据已经通过prefetcher传输到设备，只需优化
                            data = optimize_tensor_transfer(data)
                            target = optimize_tensor_transfer(target)
                            
                            # 根据模型类型选择不同的前向传播方式
                            if self.use_domain_adaptation:
                                if hasattr(self.model, 'grl'):
                                    output, _ = self.model(data)
                                else:
                                    output = self.model(data)
                            else:
                                output = self.model(data)
                            loss = self.criterion(output, target)
                        
                        total_loss += loss.item()
                        _, predicted = output.max(1)
                        total += target.size(0)
                        correct += predicted.eq(target).sum().item()

                        all_preds.extend(predicted.cpu().numpy())
                        all_targets.extend(target.cpu().numpy())
                        
                        pbar.update(1)
                        batch = prefetcher.next()
        else:
            # 常规验证
            with torch.no_grad():
                # 使用prefetcher优化数据加载
                prefetcher = create_prefetcher(data_loader, self.device)
                batch = prefetcher.next()
                # 增加轮次和域显示
                desc = f"Fold {self.fold_id} Epoch {self.epoch+1} {domain_name.capitalize()} Val"
                with tqdm(total=len(data_loader), desc=desc) as pbar:
                    while batch is not None:
                        # 统一使用元组方式访问数据
                        data = batch[0]
                        target = batch[1]
                        # 确保数据是Tensor类型
                        if not isinstance(data, torch.Tensor):
                            data = torch.tensor(data, dtype=torch.float32, device=self.device)
                        if not isinstance(target, torch.Tensor):
                            target = torch.tensor(target, dtype=torch.long, device=self.device)
                        # 数据已经通过prefetcher传输到设备，只需优化
                        data = optimize_tensor_transfer(data)
                        target = optimize_tensor_transfer(target)
                        
                        # 根据模型类型选择不同的前向传播方式
                        if self.use_domain_adaptation:
                            if hasattr(self.model, 'grl'):
                                output, _ = self.model(data)
                            else:
                                output = self.model(data)
                        else:
                            output = self.model(data)
                        
                        loss = self.criterion(output, target)

                        # 确保在循环内部累加所有批次的数据
                        total_loss += loss.item()
                        _, predicted = output.max(1)
                        total += target.size(0)
                        correct += predicted.eq(target).sum().item()

                        all_preds.extend(predicted.cpu().numpy())
                        all_targets.extend(target.cpu().numpy())
                        
                        pbar.update(1)
                        batch = prefetcher.next()

        epoch_time = time.time() - start_time
        metrics = calculate_metrics(all_targets, all_preds)

        return {
            "loss": total_loss / len(data_loader),
            "accuracy": correct / total,
            "f1_score": metrics['overall']['f1_score'],
            "kappa": metrics['overall']['kappa'],
            "per_class_f1": metrics['per_class'],
            "confusion_matrix": metrics['confusion_matrix'],
            "time": epoch_time,
            "y_true": all_targets,
            "y_pred": all_preds
        }

    def train(self, train_loader, val_loader, domain_train_loader=None, domain_val_loader=None):
        """完整训练过程，支持领域适应"""

        for epoch in range(self.epoch, self.config['trainer']['epochs']):
            self.epoch = epoch
            print(f"\nEpoch {self.epoch + 1}/{self.config['trainer']['epochs']}")

            # 训练和验证
            train_metrics = self.train_epoch(train_loader, domain_train_loader)
            val_metrics = self.validate(val_loader, domain_val_loader)

            # 记录历史
            self.train_history.append({
                'train': train_metrics,
                'val': val_metrics
            })

            # 打印指标
            print(f"Train Loss: {train_metrics['loss']:.4f}, Acc: {train_metrics['accuracy']:.4f}, "
                  f"F1: {train_metrics['f1_score']:.4f}, Kappa: {train_metrics['kappa']:.4f}")
            print(f"Source Val Loss: {val_metrics['loss']:.4f}, Acc: {val_metrics['accuracy']:.4f}, "
                  f"F1: {val_metrics['f1_score']:.4f}, Kappa: {val_metrics['kappa']:.4f}")
            
            # 如果有目标域指标，也打印出来
            if 'target_accuracy' in val_metrics:
                print(f"Target Val Loss: {val_metrics['target_loss']:.4f}, Acc: {val_metrics['target_accuracy']:.4f}, "
                      f"F1: {val_metrics['target_f1_score']:.4f}, Kappa: {val_metrics['target_kappa']:.4f}")

            # 记录到wandb - 创建一个完整的日志字典
            log_dict = {
                # 每个折的训练指标
                f"fold_{self.fold_id}/train_loss": train_metrics['loss'],
                f"fold_{self.fold_id}/train_acc": train_metrics['accuracy'],
                f"fold_{self.fold_id}/train_f1": train_metrics['f1_score'],
                f"fold_{self.fold_id}/train_kappa": train_metrics['kappa'],
                
                # 每个折的源域验证指标
                f"fold_{self.fold_id}/source_val_acc": val_metrics['accuracy'],
                f"fold_{self.fold_id}/source_val_loss": val_metrics['loss'],
                f"fold_{self.fold_id}/source_val_f1": val_metrics['f1_score'],
                f"fold_{self.fold_id}/source_val_kappa": val_metrics['kappa'],
                
                # 每个折的目标域验证指标（如果存在）
                f"fold_{self.fold_id}/target_val_acc": val_metrics.get('target_accuracy', 0),
                f"fold_{self.fold_id}/target_val_loss": val_metrics.get('target_loss', 0),
                f"fold_{self.fold_id}/target_val_f1": val_metrics.get('target_f1_score', 0),
                f"fold_{self.fold_id}/target_val_kappa": val_metrics.get('target_kappa', 0),
                
                # 全局指标 - 当前折的轮次
                "final/epoch": epoch,
            }
            
            # 如果使用领域适应，添加领域损失和lambda值
            if self.use_domain_adaptation and 'domain_loss' in train_metrics:
                log_dict[f"fold_{self.fold_id}/domain_loss"] = train_metrics['domain_loss']
                log_dict[f"fold_{self.fold_id}/lambda"] = train_metrics['lambda']
                # 添加MMD损失
                if 'mmd_loss' in train_metrics:
                    log_dict[f"fold_{self.fold_id}/mmd_loss"] = train_metrics['mmd_loss']
            
            # 确定是否是最佳模型（同时考虑源域和目标域性能）
            if 'target_accuracy' in val_metrics:
                # 综合考虑源域损失和目标域准确率
                is_best = (val_metrics['loss'] < self.best_loss and 
                          val_metrics['target_accuracy'] > self.best_target_acc)
            else:
                is_best = val_metrics['loss'] < self.best_loss
            
            # 在验证阶段结束后再保存检查点，确保验证过程完成
            self.save_checkpoint(is_best)

            # 记录到wandb
            # 添加当前学习率到日志
            if self.scheduler_manager is not None:
                current_lr = self.optimizer.param_groups[0]['lr']
                log_dict[f"fold_{self.fold_id}/learning_rate"] = current_lr
            
            # 添加领域损失权重到日志
            if self.use_domain_adaptation:
                log_dict[f"fold_{self.fold_id}/domain_loss_weight"] = self.domain_loss_weight
            
            # 动态调整损失权重
            if self.use_domain_adaptation and self.weight_adjustment_strategy != 'constant':
                self._adjust_loss_weight(epoch, self.config['trainer']['epochs'], val_metrics['accuracy'])
            
            # 记录到wandb
            wandb.log(log_dict)

            # 保存最佳模型参数
            if is_best:
                self.best_acc = val_metrics['accuracy']
                self.best_f1 = val_metrics['f1_score']
                self.best_kappa = val_metrics['kappa']
                self.best_loss = val_metrics['loss']
                self.best_cm = val_metrics['confusion_matrix']
                self.best_per_class = val_metrics['per_class_f1']
                # 保存目标域的最佳指标
                if 'target_accuracy' in val_metrics:
                    self.best_target_acc = val_metrics['target_accuracy']
                    self.best_target_f1 = val_metrics['target_f1_score']
                    self.best_target_kappa = val_metrics['target_kappa']
                    self.best_target_cm = val_metrics['target_confusion_matrix']
                print(f"New best model found! Loss: {self.best_loss:.4f}")
                # 重置早停计数器
                self.no_improvement_count = 0
            else:
                # 增加早停计数器
                self.no_improvement_count += 1
                if self.early_stop > 0:
                    print(f"No improvement for {self.no_improvement_count} epochs. Early stop at {self.early_stop}.")

            
            # 在epoch结束后更新学习率调度器
            if self.scheduler_manager is not None:
                # 验证后更新依赖验证指标的学习率调度器
                if self.scheduler_manager.needs_val_update():
                    # 检查是否使用ReduceLROnPlateau且mode为max
                    if (hasattr(self.scheduler_manager.scheduler, 'mode') and 
                        self.scheduler_manager.scheduler.mode == 'max'):
                        # 使用accuracy作为监控指标
                        self.scheduler_manager.step_val(val_metrics['accuracy'])
                        print(f"调度器监控指标: accuracy = {val_metrics['accuracy']:.4f}")
                    else:
                        # 默认使用loss作为监控指标
                        self.scheduler_manager.step_val(val_metrics['loss'])
                        print(f"调度器监控指标: loss = {val_metrics['loss']:.4f}")
                # 更新需要在epoch后更新的调度器
                elif self.scheduler_manager.needs_epoch_update():
                    self.scheduler_manager.step_epoch()
            
            # 检查早停条件
            if self.early_stop > 0 and self.no_improvement_count >= self.early_stop:
                print(f"Early stopping triggered after {self.early_stop} epochs without improvement.")
                break

        # 保存最终的评估结果
        self.save_results()
        result = {
            'accuracy': self.best_acc,
            'f1_score': self.best_f1,
            'kappa': self.best_kappa,
            'per_class': self.best_per_class,
            'confusion_matrix': self.best_cm
        }
        
        # 添加目标域的最佳指标
        if self.best_target_acc > 0:
            result['target_accuracy'] = self.best_target_acc
            result['target_f1_score'] = self.best_target_f1
            result['target_kappa'] = self.best_target_kappa
            result['target_confusion_matrix'] = self.best_target_cm
        
        return result




    def _optimize_tensor_transfer(self, data):
        # 优化张量传输的函数
        # 此函数现在只接收已移至设备的数据
        return data

    def _move_to_device(self, data, target):
        # 优化数据传输到设备的函数
        # 处理数据
        if isinstance(data, list):
            data = [item.to(self.device, non_blocking=True) for item in data]
        else:
            data = data.to(self.device, non_blocking=True)
        
        # 处理标签
        if target is not None:
            if isinstance(target, list):
                target = [item.to(self.device, non_blocking=True) for item in target]
            else:
                target = target.to(self.device, non_blocking=True)
                
        # 应用额外优化 - 使用正确的方法名
        data = self._optimize_tensor_transfer(data)
        if target is not None:
            target = self._optimize_tensor_transfer(target)
        
        return data, target
    
    def _adjust_scheduler_state(self, epoch):
        """调整学习率调度器状态，避免直接调用scheduler.step()导致的警告"""
        if self.scheduler_manager is not None:
            # 使用调度器管理器的方法来调整状态
            self.scheduler_manager.adjust_state(epoch)
            print(f"学习率调度器步数已设置到: {epoch}")

    def save_results(self):
        """保存实验结果到文件"""
        # 保存历史记录
        history_df = pd.DataFrame([
            {
                'epoch': i,
                'train_loss': h['train']['loss'],
                'train_acc': h['train']['accuracy'],
                'train_f1': h['train']['f1_score'],
                'train_kappa': h['train']['kappa'],
                'val_loss': h['val']['loss'],
                'val_acc': h['val']['accuracy'],
                'val_f1': h['val']['f1_score'],
                'val_kappa': h['val']['kappa']
            }
            for i, h in enumerate(self.train_history)
        ])
        history_df.to_csv(os.path.join(self.save_dir, 'history.csv'), index=False)

        # 保存最佳指标
        results = {
            'overall': {
                'accuracy': self.best_acc,
                'f1_score': self.best_f1,
                'kappa': self.best_kappa
            },
            'per_class': self.best_per_class
        }

        # 使用wandb的Scalar chart记录最终的MF1和ACC指标
        final_log_dict = {
            f"fold_{self.fold_id}/final/accuracy": self.best_acc,
            f"fold_{self.fold_id}/final/f1_score": self.best_f1,
            f"fold_{self.fold_id}/final/kappa": self.best_kappa,
            # 为了在多折实验中能够比较所有折的最终结果
            f"final/{self.fold_id}_accuracy": self.best_acc,
            f"final/{self.fold_id}_f1_score": self.best_f1,
            f"final/{self.fold_id}_kappa": self.best_kappa
        }
        
        # 添加目标域的最终指标
        if self.best_target_acc > 0:
            final_log_dict[f"fold_{self.fold_id}/final/target_accuracy"] = self.best_target_acc
            final_log_dict[f"fold_{self.fold_id}/final/target_f1_score"] = self.best_target_f1
            final_log_dict[f"fold_{self.fold_id}/final/target_kappa"] = self.best_target_kappa
            final_log_dict[f"final/{self.fold_id}_target_accuracy"] = self.best_target_acc
            final_log_dict[f"final/{self.fold_id}_target_f1_score"] = self.best_target_f1
            final_log_dict[f"final/{self.fold_id}_target_kappa"] = self.best_target_kappa
        # 直接使用wandb.log记录最终指标
        wandb.log(final_log_dict)

        # 创建汇总的最终结果图表
        if self.fold_id == 0:  # 可以选择在最后一个折或特定折创建汇总图表
            # 这里可以添加更多图表自定义代码
            pass

        with open(os.path.join(self.save_dir, 'results.txt'), 'w') as f:
            f.write("Best Metrics:\n")
            f.write(f"Overall Accuracy: {results['overall']['accuracy']:.4f}\n")
            f.write(f"Overall F1 Score: {results['overall']['f1_score']:.4f}\n")
            f.write(f"Kappa: {results['overall']['kappa']:.4f}\n\n")
            f.write("Per Class F1 Scores:\n")
            for cls, score in results['per_class'].items():
                f.write(f"{cls}: {score:.4f}\n")

        # 保存混淆矩阵
        np.savetxt(os.path.join(self.save_dir, 'confusion_matrix.csv'),
                    self.best_cm, fmt='%d', delimiter=',')
        
        # 保存目标域混淆矩阵
        if self.best_target_cm is not None:
            np.savetxt(os.path.join(self.save_dir, 'target_confusion_matrix.csv'),
                        self.best_target_cm, fmt='%d', delimiter=',')
