"""
训练器实现
"""

import torch
import torch.nn as nn
import numpy as np

from tqdm import tqdm
from sklearn.metrics import roc_auc_score

from ood_detectors import MahalanobisDetector
from model import IncrementalLearner
from config import cfg

class Trainer:
    """训练器类"""

    def __init__(self, model, optimizer, scheduler, device):
        self.model = model
        self.optimizer = optimizer
        self.scheduler = scheduler
        self.device = device
        self.criterion = nn.CrossEntropyLoss()

        # 创建Mahalanobis检测器
        self.mahalanobis_detector = MahalanobisDetector(
            cfg.NUM_KNOWN_CLASSES,
            model.feat_dim,
            use_relative=cfg.USE_RELATIVE_MAHALANOBIS,
            normalize_features=cfg.NORMALIZE_FEATURES,
        )

        # 梯度累积相关
        self.grad_accumulation_steps = cfg.GRAD_ACCUMULATION_STEPS
        self.effective_batch_size = cfg.BATCH_SIZE * cfg.GRAD_ACCUMULATION_STEPS
        print(
            f"Effective batch size: {self.effective_batch_size} (batch_size={cfg.BATCH_SIZE} x grad_accum={cfg.GRAD_ACCUMULATION_STEPS})"
        )

    def train_epoch(self, train_loader, epoch):
        """训练一个epoch"""
        self.model.train()

        total_loss = 0
        correct = 0
        total = 0
        accumulation_loss = 0

        # 重置优化器梯度
        self.optimizer.zero_grad()

        pbar = tqdm(train_loader, desc=f"Training Epoch {epoch + 1}")
        for batch_idx, (inputs, targets) in enumerate(pbar):
            inputs, targets = inputs.to(self.device), targets.to(self.device)

            # 混合精度训练
            with torch.cuda.amp.autocast_mode.autocast(enabled=True):
                # 前向传播
                outputs = self.model(inputs)
                loss = self.criterion(outputs, targets)

                # 梯度累积：将损失除以累积步数
                loss = loss / self.grad_accumulation_steps

            # 反向传播
            loss.backward()
            accumulation_loss += loss.item()

            # 每累积一定步数后更新参数
            if (batch_idx + 1) % self.grad_accumulation_steps == 0:
                # 梯度裁剪
                torch.nn.utils.clip_grad.clip_grad_norm_(
                    self.model.parameters(), max_norm=cfg.GRAD_NORM_CLIP
                )

                # 更新参数
                self.optimizer.step()
                self.optimizer.zero_grad()

                # 更新学习率
                if self.scheduler is not None:
                    self.scheduler.step()

                # 记录累积的损失
                total_loss += accumulation_loss
                accumulation_loss = 0

            # 统计
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()

            # 更新进度条
            if batch_idx % 10 == 0:
                current_lr = self.get_learning_rate()
                pbar.set_postfix(
                    {
                        "Loss": f"{loss.item() * self.grad_accumulation_steps:.4f}",
                        "Acc": f"{100.0 * correct / total:.2f}%",
                        "LR": f"{current_lr:.6f}",
                    }
                )

        # 处理最后不足累积步数的批次
        if (batch_idx + 1) % self.grad_accumulation_steps != 0:
            torch.nn.utils.clip_grad.clip_grad_norm_(
                self.model.parameters(), max_norm=cfg.GRAD_NORM_CLIP
            )
            self.optimizer.step()
            self.optimizer.zero_grad()
            if self.scheduler is not None:
                self.scheduler.step()
            total_loss += accumulation_loss

        avg_loss = total_loss / (len(train_loader) // self.grad_accumulation_steps)
        accuracy = 100.0 * correct / total

        return avg_loss, accuracy

    def evaluate(self, test_loader, train_loader):
        """评估模型性能和OOD检测"""
        self.model.eval()

        # 首先评估分类准确率
        correct = 0
        total = 0

        with torch.no_grad():
            for inputs, targets in test_loader:
                # 只评估已知类
                known_mask = targets >= 0
                if not known_mask.any():
                    continue

                inputs = inputs[known_mask].to(self.device)
                targets = targets[known_mask].to(self.device)

                with torch.cuda.amp.autocast_mode.autocast(enabled=True):
                    outputs = self.model(inputs)

                _, predicted = outputs.max(1)

                total += targets.size(0)
                correct += predicted.eq(targets).sum().item()

        accuracy = 100.0 * correct / total if total > 0 else 0

        # 更新Mahalanobis检测器
        print("Fitting Mahalanobis detector...")
        self.mahalanobis_detector.fit(self.model, train_loader, self.device)

        # 计算OOD检测性能
        print("Computing OOD scores...")
        scores, labels = self.mahalanobis_detector.get_ood_scores(
            self.model, test_loader, self.device
        )

        # 计算AUROC
        auroc = roc_auc_score(labels, scores) * 100

        return auroc, accuracy

    def get_learning_rate(self):
        """获取当前学习率"""
        for param_group in self.optimizer.param_groups:
            return param_group["lr"]


class IncrementalTrainer(Trainer):
    """增量学习训练器"""

    def __init__(self, model, optimizer, scheduler, device,old_model=None):
        super().__init__(model, optimizer, scheduler, device)
        self.old_model = old_model
        self.incremental_learner = IncrementalLearner(model, old_model)

        # 确保旧模型在相同设备
        if self.old_model is not None:
            self.old_model = self.old_model.to(device)
            self.old_model.eval()

    def train_incremental_epoch(self, train_loader, epoch, num_old_classes):
        """增量学习训练一个epoch"""
        self.model.train()
        if self.old_model is not None:
            self.old_model.eval()

        total_loss = 0
        total_cls_loss = 0
        total_dist_loss = 0
        correct = 0
        total = 0
        accumulation_loss = 0
        accumulation_cls_loss = 0
        accumulation_dist_loss = 0

        # 重置优化器梯度
        self.optimizer.zero_grad()

        pbar = tqdm(train_loader, desc=f"Incremental Training Epoch {epoch + 1}")
        for batch_idx, (inputs, targets) in enumerate(pbar):
            inputs, targets = inputs.to(self.device), targets.to(self.device)

            # 混合精度训练
            with torch.cuda.amp.autocast_mode.autocast(enabled=True):
                # 前向传播
                outputs = self.model(inputs)

                # 创建样本权重，给新类更高的权重
                sample_weights = torch.ones_like(targets, dtype=torch.float)
                new_class_mask = targets >= num_old_classes

                # 动态调整新类权重 - 根据训练进度调整
                if epoch < 2:  # 前2个epoch
                    sample_weights[new_class_mask] = 5.0  # 新类样本权重x5
                elif epoch < 4:  # 第3-4个epoch
                    sample_weights[new_class_mask] = 3.0  # 降为x3
                elif epoch < 6:  # 第5-6个epoch
                    sample_weights[new_class_mask] = 2.0  # 降为x2
                else:  # 后续epoch
                    sample_weights[new_class_mask] = 1.5  # 降为x1.5

                # 分类损失（使用加权）
                criterion = nn.CrossEntropyLoss(reduction="none")
                cls_loss_per_sample = criterion(outputs, targets)
                cls_loss = (cls_loss_per_sample * sample_weights).mean()

                # 知识蒸馏损失（如果有旧模型）
                dist_loss = 0
                if (
                    self.old_model is not None
                    and cfg.USE_KNOWLEDGE_DISTILLATION
                ):
                    # 只对包含旧类的样本进行蒸馏
                    old_classes_mask = targets < num_old_classes
                    if old_classes_mask.any():
                        with torch.no_grad():
                            # 在混合精度上下文中运行旧模型
                            with torch.cuda.amp.autocast_mode.autocast(enabled=True):
                                old_outputs = self.old_model(inputs[old_classes_mask])

                        dist_loss = self.incremental_learner.compute_distillation_loss(
                            outputs[old_classes_mask],
                            old_outputs,
                            cfg.TEMPERATURE,
                        )

                # 动态调整损失权重 - 更细致的调整策略
                if epoch < 2:
                    classification_weight = 0.95  # 前2轮几乎只关注分类
                    distillation_weight = 0.05
                elif epoch < 4:
                    classification_weight = 0.85  # 第3-4轮仍主要关注分类
                    distillation_weight = 0.15
                elif epoch < 6:
                    classification_weight = 0.75  # 第5-6轮开始平衡
                    distillation_weight = 0.25
                else:
                    classification_weight = (
                        cfg.CLASSIFICATION_WEIGHT
                    )  # 后续使用配置值
                    distillation_weight = cfg.DISTILLATION_WEIGHT

                # 总损失
                if isinstance(dist_loss, torch.Tensor) and dist_loss > 0:
                    loss = (
                        classification_weight * cls_loss
                        + distillation_weight * dist_loss
                    )
                else:
                    loss = cls_loss

                # 梯度累积
                loss = loss / self.grad_accumulation_steps
                cls_loss = cls_loss / self.grad_accumulation_steps
                if isinstance(dist_loss, torch.Tensor):
                    dist_loss = dist_loss / self.grad_accumulation_steps

            # 反向传播
            loss.backward()
            accumulation_loss += loss.item()
            accumulation_cls_loss += cls_loss.item()
            if isinstance(dist_loss, torch.Tensor):
                accumulation_dist_loss += dist_loss.item()

            # 每累积一定步数后更新参数
            if (batch_idx + 1) % self.grad_accumulation_steps == 0:
                # 梯度裁剪
                torch.nn.utils.clip_grad.clip_grad_norm_(
                    self.model.parameters(), max_norm=cfg.GRAD_NORM_CLIP
                )

                # 更新参数
                self.optimizer.step()
                self.optimizer.zero_grad()

                # 更新学习率
                if self.scheduler is not None:
                    self.scheduler.step()

                # 记录累积的损失
                total_loss += accumulation_loss
                total_cls_loss += accumulation_cls_loss
                total_dist_loss += accumulation_dist_loss
                accumulation_loss = 0
                accumulation_cls_loss = 0
                accumulation_dist_loss = 0

            # 统计
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()

            # 在第一个epoch的前几个batch打印类别分布
            if epoch == 0 and batch_idx < 3:
                old_count = (targets < num_old_classes).sum().item()
                new_count = (targets >= num_old_classes).sum().item()
                print(f"  Batch {batch_idx}: {old_count} old, {new_count} new samples")

            # 更新进度条
            if batch_idx % 10 == 0:
                current_lr = self.get_learning_rate()
                postfix = {
                    "Loss": f"{loss.item() * self.grad_accumulation_steps:.4f}",
                    "Acc": f"{100.0 * correct / total:.2f}%",
                    "LR": f"{current_lr:.6f}",
                }
                if self.old_model is not None and accumulation_dist_loss > 0:
                    postfix["Dist"] = f"{accumulation_dist_loss:.4f}"
                pbar.set_postfix(postfix)

        # 处理最后不足累积步数的批次
        if (batch_idx + 1) % self.grad_accumulation_steps != 0:
            torch.nn.utils.clip_grad.clip_grad_norm_(
                self.model.parameters(), max_norm=cfg.GRAD_NORM_CLIP
            )
            self.optimizer.step()
            self.optimizer.zero_grad()
            if self.scheduler is not None:
                self.scheduler.step()
            total_loss += accumulation_loss
            total_cls_loss += accumulation_cls_loss
            total_dist_loss += accumulation_dist_loss

        avg_loss = total_loss / (len(train_loader) // self.grad_accumulation_steps)
        accuracy = 100.0 * correct / total

        return avg_loss, accuracy

    def evaluate_incremental(self, test_loader, num_old_classes):
        """评估增量学习性能"""
        self.model.eval()

        # 分别统计旧类和新类的性能
        old_correct = 0
        old_total = 0
        new_correct = 0
        new_total = 0
        all_correct = 0
        all_total = 0

        with torch.no_grad():
            for inputs, targets in test_loader:
                inputs, targets = inputs.to(self.device), targets.to(self.device)

                with torch.cuda.amp.autocast_mode.autocast(enabled=True):
                    outputs = self.model(inputs)

                _, predicted = outputs.max(1)

                # 总体统计
                all_total += targets.size(0)
                all_correct += predicted.eq(targets).sum().item()

                # 旧类统计
                old_mask = targets < num_old_classes
                if old_mask.any():
                    old_total += old_mask.sum().item()
                    old_correct += (
                        predicted[old_mask].eq(targets[old_mask]).sum().item()
                    )

                # 新类统计
                new_mask = targets >= num_old_classes
                if new_mask.any():
                    new_total += new_mask.sum().item()
                    new_correct += (
                        predicted[new_mask].eq(targets[new_mask]).sum().item()
                    )

        # 计算准确率
        all_accuracy = 100.0 * all_correct / all_total if all_total > 0 else 0
        old_accuracy = 100.0 * old_correct / old_total if old_total > 0 else 0
        new_accuracy = 100.0 * new_correct / new_total if new_total > 0 else 0

        return all_accuracy, old_accuracy, new_accuracy


def create_optimizer(model: nn.Module, incremental=False):
    """创建优化器 - 使用SGD"""
    if incremental:
        # 增量学习使用不同的学习率策略
        # 收集所有参数
        backbone_params = []
        classifier_params = []

        for name, param in model.named_parameters():
            if "classifier" in name:
                classifier_params.append(param)
            else:
                backbone_params.append(param)

        param_groups = [
            # backbone使用较小的学习率
            {"params": backbone_params, "lr": cfg.INCREMENTAL_LR * 0.1},
            # 分类器使用标准学习率（新旧类会在训练中通过损失权重区分）
            {"params": classifier_params, "lr": cfg.INCREMENTAL_LR},
        ]

        optimizer = torch.optim.SGD(
            param_groups, momentum=cfg.MOMENTUM, weight_decay=cfg.WEIGHT_DECAY
        )
    else:
        lr = cfg.LEARNING_RATE
        optimizer = torch.optim.SGD(
            model.parameters(),
            lr=lr,
            momentum=cfg.MOMENTUM,
            weight_decay=cfg.WEIGHT_DECAY,
        )

    return optimizer


def create_scheduler(optimizer, total_steps):
    """创建学习率调度器 - 模仿论文设置"""
    from torch.optim.lr_scheduler import LambdaLR

    def lr_schedule(step):
        """Linear warmup followed by cosine decay"""
        if step < cfg.WARMUP_STEPS:
            # Linear warmup
            return float(step) / float(max(1, cfg.WARMUP_STEPS))
        else:
            # Cosine decay
            progress = float(step - cfg.WARMUP_STEPS) / float(
                max(1, total_steps - cfg.WARMUP_STEPS)
            )
            return 0.5 * (1.0 + np.cos(np.pi * progress))

    scheduler = LambdaLR(optimizer, lr_lambda=lr_schedule)

    return scheduler
