# src/trainer.py
import torch
import torch.nn as nn
import numpy as np
import time
import os
from tqdm import tqdm
from torch.utils.tensorboard import SummaryWriter
from sklearn.metrics import roc_auc_score, accuracy_score, log_loss


class Trainer:
    """训练器类 - 修复数值稳定性"""

    def __init__(self, model, train_loader, val_loader, test_loader=None,
                 learning_rate=0.0001, weight_decay=0.001,
                 log_dir='./logs', save_dir='./models', device=None):
        self.model = model
        self.train_loader = train_loader
        self.val_loader = val_loader
        self.test_loader = test_loader

        # 设置设备
        self.device = device if device else torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.model.to(self.device)

        # 损失函数
        self.criterion = nn.BCEWithLogitsLoss(reduction='none')

        # 优化器
        self.optimizer = torch.optim.AdamW(
            model.parameters(),
            lr=learning_rate,
            weight_decay=weight_decay,
            betas=(0.9, 0.999),
            eps=1e-8
        )

        # 学习率调度器
        self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
            self.optimizer, mode='min', factor=0.5, patience=3, verbose=True,
            min_lr=1e-6
        )

        # TensorBoard日志
        os.makedirs(log_dir, exist_ok=True)
        os.makedirs(save_dir, exist_ok=True)
        self.log_dir = log_dir
        self.save_dir = save_dir
        self.writer = SummaryWriter(log_dir)

        # 混合精度训练
        self.use_amp = False
        self.scaler = torch.cuda.amp.GradScaler(enabled=self.use_amp)

        # 训练历史
        self.history = {
            'train_loss': [],
            'val_loss': [],
            'val_auc': [],
            'val_acc': []
        }

        # 最佳模型追踪
        self.best_val_auc = 0.0
        self.best_epoch = 0

        # 梯度裁剪值
        self.grad_clip_norm = 0.5

        # 允许的最大NaN批次比例
        self.max_nan_batch_percentage = 50.0

    def train(self, num_epochs=20, early_stopping_patience=5):
        """训练模型"""
        print(f"开始训练，共 {num_epochs} 轮...")
        print("跳过模型图可视化，仅记录训练指标")

        warmup_epochs = 3
        print(f"执行 {warmup_epochs} 轮预热训练 (使用较小学习率)")

        no_improve_epochs = 0
        original_lr = self.optimizer.param_groups[0]['lr']

        for epoch in range(num_epochs):
            if torch.cuda.is_available():
                torch.cuda.empty_cache()

            if epoch < warmup_epochs:
                warmup_factor = (epoch + 1) / warmup_epochs
                for param_group in self.optimizer.param_groups:
                    param_group['lr'] = original_lr * warmup_factor * 0.1
                self.use_amp = False
            else:
                if epoch == warmup_epochs:
                    for param_group in self.optimizer.param_groups:
                        param_group['lr'] = original_lr
                    self.use_amp = True
                    self.scaler = torch.cuda.amp.GradScaler(enabled=self.use_amp)
                    print("预热完成，启用混合精度训练")

            train_loss, nan_percentage = self._train_epoch(epoch)

            if nan_percentage > self.max_nan_batch_percentage:
                print(f"警告: NaN批次比例 ({nan_percentage:.1f}%) 过高")
                if nan_percentage > 90.0:
                    for param_group in self.optimizer.param_groups:
                        param_group['lr'] *= 0.1
                    self.use_amp = False
                    self.scaler = torch.cuda.amp.GradScaler(enabled=self.use_amp)
                    print(f"NaN比例极高，降低学习率至 {self.optimizer.param_groups[0]['lr']:.6f} 并禁用混合精度")

            val_metrics = self._validate()
            val_loss = val_metrics['loss'] if not np.isnan(val_metrics['loss']) else float('inf')
            val_auc = val_metrics['auc']
            val_acc = val_metrics['accuracy']

            self.scheduler.step(val_loss)

            alpha_str = ""
            if hasattr(self.model, 'collaboration_alpha'):
                alpha_val = self.model.collaboration_alpha.item()
                alpha_str = f", Alpha={alpha_val:.4f}"
                self.writer.add_scalar('Hyperparameters/collaboration_alpha', alpha_val, epoch)

            self.writer.add_scalar('Loss/train', train_loss, epoch)
            self.writer.add_scalar('Loss/val', val_loss, epoch)
            self.writer.add_scalar('Metrics/AUC', val_auc, epoch)
            self.writer.add_scalar('Metrics/Accuracy', val_acc, epoch)
            self.writer.add_scalar('Learning_Rate', self.optimizer.param_groups[0]['lr'], epoch)
            self.writer.add_scalar('NaN_Percentage', nan_percentage, epoch)

            self.history['train_loss'].append(train_loss)
            self.history['val_loss'].append(val_loss)
            self.history['val_auc'].append(val_auc)
            self.history['val_acc'].append(val_acc)

            print(f"Epoch {epoch + 1}/{num_epochs}: "
                  f"Loss={train_loss:.4f}, Val Loss={val_loss:.4f}, "
                  f"Val AUC={val_auc:.4f}, Val Acc={val_acc:.4f}, "
                  f"NaN={nan_percentage:.1f}%, LR={self.optimizer.param_groups[0]['lr']:.6f}"
                  f"{alpha_str}")

            current_score = val_auc
            if current_score > self.best_val_auc or \
               (current_score == self.best_val_auc and val_acc > self.history['val_acc'][self.best_epoch]):
                self.best_val_auc = current_score
                self.best_epoch = epoch
                no_improve_epochs = 0

                model_path = os.path.join(self.save_dir, 'best_model.pth')
                torch.save({
                    'epoch': epoch,
                    'model_state_dict': self.model.state_dict(),
                    'optimizer_state_dict': self.optimizer.state_dict(),
                    'val_auc': val_auc,
                    'val_acc': val_acc,
                    'val_loss': val_loss
                }, model_path)
                print(f"保存最佳模型到 {model_path}，验证AUC: {val_auc:.4f}")
            else:
                no_improve_epochs += 1

            if epoch >= warmup_epochs and no_improve_epochs >= early_stopping_patience:
                print(f"连续 {early_stopping_patience} 轮无改善，提前停止训练。")
                break

        model_path = os.path.join(self.save_dir, 'final_model.pth')
        torch.save({
            'epoch': epoch,
            'model_state_dict': self.model.state_dict(),
            'optimizer_state_dict': self.optimizer.state_dict(),
            'val_metrics': {'auc': val_auc, 'accuracy': val_acc, 'loss': val_loss}
        }, model_path)
        print(f"训练完成，保存最终模型到 {model_path}")
        self.writer.close()

        # ==================== [代码修改] 开始 ====================
        # 在进行最终评估前，主动释放训练数据加载器占用的庞大内存
        print("正在释放训练资源以进行最终评估...")
        # 显式删除训练加载器对象
        del self.train_loader
        # 建议Python进行垃圾回收
        import gc
        gc.collect()
        # 清理CUDA缓存
        if torch.cuda.is_available():
            torch.cuda.empty_cache()
        print("资源释放完毕。")
        # ==================== [代码修改] 结束 ====================

        if self.test_loader:
            self.load_best_model()
            test_metrics = self.evaluate(self.test_loader)
            print("\n测试集结果:")
            print(f"Loss: {test_metrics['loss']:.4f}")
            print(f"AUC: {test_metrics['auc']:.4f}")
            print(f"Accuracy: {test_metrics['accuracy']:.4f}")
            print(f"Log Loss: {test_metrics['log_loss']:.4f}")

        return self.model, self.history

    def _train_epoch(self, epoch):
        """训练一个轮次"""
        self.model.train()
        total_loss = 0
        batch_count = 0
        nan_loss_count = 0
        start_time = time.time()
        progress_bar = tqdm(total=len(self.train_loader), desc=f"Epoch {epoch + 1}")

        for batch_idx, batch in enumerate(self.train_loader):
            int_features = batch['int_features'].to(self.device)
            cat_features = batch['cat_features'].to(self.device)
            labels = batch['label'].to(self.device)

            self.optimizer.zero_grad()

            with torch.cuda.amp.autocast(enabled=self.use_amp):
                outputs = self.model(int_features, cat_features)
                losses = self.criterion(outputs['prediction'], labels)
                valid_mask = ~(torch.isnan(losses) | torch.isinf(losses))
                if valid_mask.sum() == 0:
                    nan_loss_count += 1
                    if nan_loss_count <= 3:
                        tqdm.write(f"批次 {batch_idx} 检测到无效损失值，跳过")
                    progress_bar.update(1)
                    continue
                loss = losses[valid_mask].mean()

            if torch.isnan(loss).item() or torch.isinf(loss).item():
                nan_loss_count += 1
                if nan_loss_count <= 3:
                    tqdm.write(f"批次 {batch_idx} 检测到无效损失值，跳过")
                progress_bar.update(1)
                continue

            if self.use_amp:
                self.scaler.scale(loss).backward()
                self.scaler.unscale_(self.optimizer)
                torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=self.grad_clip_norm)
                self.scaler.step(self.optimizer)
                self.scaler.update()
            else:
                loss.backward()
                torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=self.grad_clip_norm)
                self.optimizer.step()

            total_loss += loss.item()
            batch_count += 1
            progress_bar.update(1)
            if batch_idx % 10 == 0:
                progress_bar.set_postfix({'loss': loss.item()})

        progress_bar.close()

        if batch_count == 0:
            print(f"警告：Epoch {epoch + 1} 未处理任何有效批次。")
            return float('inf'), 100.0

        nan_percentage = (nan_loss_count / len(self.train_loader)) * 100
        if nan_loss_count > 0:
            print(f"Epoch {epoch + 1}: 检测到 {nan_loss_count} 个批次出现无效损失 ({nan_percentage:.1f}%)，已跳过")

        avg_loss = total_loss / batch_count
        elapsed_time = time.time() - start_time
        samples_per_second = batch_count * self.train_loader.batch_size / elapsed_time

        print(f"训练用时: {elapsed_time:.2f}秒, "
              f"每秒样本: {samples_per_second:.1f}, "
              f"平均损失: {avg_loss:.4f}")

        return avg_loss, nan_percentage

    def _validate(self):
        """在验证集上验证模型"""
        return self.evaluate(self.val_loader)

    def evaluate(self, data_loader):
        """评估模型"""
        self.model.eval()
        total_loss = 0
        valid_batches = 0
        all_preds = []
        all_labels = []

        with torch.no_grad():
            for batch in data_loader:
                int_features = batch['int_features'].to(self.device)
                cat_features = batch['cat_features'].to(self.device)
                labels = batch['label'].to(self.device)

                outputs = self.model(int_features, cat_features)
                logits = outputs['prediction']

                try:
                    losses = self.criterion(logits, labels)
                    valid_mask = ~(torch.isnan(losses) | torch.isinf(losses))
                    if valid_mask.sum() > 0:
                        loss = losses[valid_mask].mean()
                        total_loss += loss.item()
                        valid_batches += 1
                except Exception as e:
                    print(f"验证损失计算错误: {e}")
                    continue

                probs = torch.sigmoid(logits)
                probs = torch.nan_to_num(probs, nan=0.5)
                probs = torch.clamp(probs, 1e-6, 1 - 1e-6)
                all_preds.append(probs.cpu().numpy())
                all_labels.append(labels.cpu().numpy())

        if len(all_preds) == 0 or valid_batches == 0:
            print("警告：评估数据为空！")
            return {'loss': float('nan'), 'auc': 0.5, 'accuracy': 0.5, 'log_loss': float('nan')}

        all_preds = np.concatenate(all_preds)
        all_labels = np.concatenate(all_labels)

        valid_indices = ~np.isnan(all_preds)
        if np.sum(valid_indices) == 0:
            print("警告: 所有预测值都是NaN！")
            return {'loss': float('nan'), 'auc': 0.5, 'accuracy': 0.5, 'log_loss': float('nan')}

        all_preds = all_preds[valid_indices]
        all_labels = all_labels[valid_indices]

        avg_loss = total_loss / valid_batches if valid_batches > 0 else float('nan')

        try:
            auc = roc_auc_score(all_labels, all_preds)
        except Exception as e:
            print(f"AUC计算错误: {e}")
            auc = 0.5

        accuracy = accuracy_score(all_labels, all_preds > 0.5)

        try:
            all_preds_safe = np.clip(all_preds, 1e-15, 1 - 1e-15)
            logloss = log_loss(all_labels, all_preds_safe)
        except Exception as e:
            print(f"对数损失计算错误: {e}")
            logloss = float('nan')

        return {'loss': avg_loss, 'auc': auc, 'accuracy': accuracy, 'log_loss': logloss}

    def load_best_model(self):
        """加载最佳模型"""
        best_model_path = os.path.join(self.save_dir, 'best_model.pth')
        if os.path.exists(best_model_path):
            checkpoint = torch.load(best_model_path)
            self.model.load_state_dict(checkpoint['model_state_dict'])
            print(f"加载最佳模型，来自第 {checkpoint['epoch'] + 1} 轮，验证AUC: {checkpoint['val_auc']:.4f}")
        else:
            print(f"找不到最佳模型文件: {best_model_path}")