import os
import time
from typing import Dict, Any

import torch
from torch import amp
from tqdm import tqdm

from checkpoints_logger.checkpoint_manager import CheckpointManager
from data_manager import DataManager

from config_manager import TrainConfig
from display_manager import VisualizationManager
from models_manager import ModelManager
from optimizer_manager import OptimizerManager
from loss_manager import LossManager
from metrics_manager import MetricsManager


class TrainerManager:
    def __init__(self, train_config: TrainConfig):
        self.config = train_config

        # === 初始化各模块 Manager ===
        self.model_manager = ModelManager(self.config.model, self.config.device)
        self.model = self.model_manager.get_model()

        self.data_manager = DataManager(self.config.dataset)
        self.train_loader, self.val_loader, self.test_loader = self.data_manager.get_dataloader()

        self.optimizer_manager = OptimizerManager(self.model, self.config.optimizer)
        self.optimizer = self.optimizer_manager.optimizer
        self.scheduler = self.optimizer_manager.scheduler

        self.loss_manager = LossManager(self.config.loss)

        self.metrics_manager = MetricsManager(self.config.metrics)

        self.visualizer = VisualizationManager(self.config.visualization, model_name=self.config.model.model_name)

        self.checkpoint_manager = CheckpointManager(self, self.config.checkpoint)

        # === 训练相关参数 ===
        self.device = self.model_manager.device
        self.num_epochs = self.config.num_epochs

        # === 混合精度训练（AMP） ===
        self.scaler = amp.GradScaler(enabled=getattr(train_config, "use_amp", True))

        # === 最佳模型保存追踪 ===
        self.start_epoch = 0

    def _move_to_device(self, batch):
        """
        自动将 batch 中的所有张量移到 GPU。
        支持 tuple、list、dict、单 tensor 等格式。
        返回形式尽量标准化为 (images, targets, extras)
        """

        def to_device(x):
            if torch.is_tensor(x):
                return x.to(self.device, non_blocking=True)
            return x

        # tuple 或 list
        if isinstance(batch, (tuple, list)):
            moved = [to_device(x) for x in batch]
            if len(moved) == 2:
                return moved[0], moved[1]  # (images, targets)
            elif len(moved) >= 3:
                return moved[0], moved[1], moved[2:]  # (images, targets, extras)
            else:
                return moved[0], None  # 只有一个元素时
        # dict
        elif isinstance(batch, dict):
            batch = {k: to_device(v) for k, v in batch.items()}
            # 智能匹配 image / target key
            image_key = next((k for k in batch.keys() if "image" in k.lower() or "img" in k.lower()), None)
            target_key = next((k for k in batch.keys() if "target" in k.lower() or "label" in k.lower()), None)
            images = batch.get(image_key, None)
            targets = batch.get(target_key, None)
            extras = {k: v for k, v in batch.items() if k not in [image_key, target_key]}
            return images, targets, extras
        # 单 tensor
        elif torch.is_tensor(batch):
            return batch.to(self.device, non_blocking=True), None
        else:
            raise TypeError(f"Unsupported batch type: {type(batch)}")

    def train_one_epoch(self) -> float:
        """
        单轮次训练
        """
        self.model.train()
        total_loss = 0.0
        num_batches = len(self.train_loader)

        progress_bar = tqdm(self.train_loader, desc="Training", leave=False)
        for batch in progress_bar:
            result = self._move_to_device(batch)
            if isinstance(result, (list, tuple)):
                images = result[0]
                targets = result[1] if len(result) > 1 else None
            else:
                images, targets = result, None

            # 梯度清零
            self.optimizer.zero_grad()

            try:
                device_type = "cuda" if "cuda" in self.device else "cpu"
                with amp.autocast(device_type=device_type, enabled=self.scaler.is_enabled()):
                    outputs = self.model(images)
                    loss_dict = self.loss_manager.compute_loss(outputs, targets)
                    loss = loss_dict.get("total_loss", loss_dict.get("loss", None))
            except TypeError:  # 兼容旧 PyTorch 版本
                with amp.autocast(enabled=self.scaler.is_enabled()):
                    outputs = self.model(images)
                    loss_dict = self.loss_manager.compute_loss(outputs, targets)
                    loss = loss_dict.get("total_loss", loss_dict.get("loss", None))

            # backward
            self.scaler.scale(loss).backward()

            # 梯度裁剪（可选）
            if self.optimizer_manager.config.optimizer.clip_grad_norm is not None:
                self.scaler.unscale_(self.optimizer)
                torch.nn.utils.clip_grad_norm_(
                    self.model.parameters(),
                    self.optimizer_manager.config.optimizer.clip_grad_norm
                )

            self.scaler.step(self.optimizer)
            self.scaler.update()

            # scheduler 更新（按 step）
            if self.scheduler and getattr(self.config, "scheduler_step_per_batch", False):
                self.scheduler.step()

            total_loss += loss.item()
            progress_bar.set_postfix({"loss": f"{loss.item():.4f}"})

        avg_loss = total_loss / num_batches

        # scheduler 更新（按 epoch）
        if self.scheduler and not getattr(self.config, "scheduler_step_per_batch", False):
            if self.config.optimizer.scheduler.scheduler_type == "plateau":
                self.scheduler.step(avg_loss)
            else:
                self.scheduler.step()

        return avg_loss

    # 总训练流程
    def train(self):
        """
        训练开始入口
        """
        print(f"🚀 Start Training: {self.num_epochs} epochs on {self.device}")
        for epoch in range(self.start_epoch, self.num_epochs):
            start_time = time.time()
            train_loss = self.train_one_epoch()
            train_time = time.time() - start_time
            val_metrics = self.validate(epoch)
            val_time = time.time() - start_time - train_time
            val_metrics["loss"] = train_loss
            # === 打印日志信息 ===
            log_info = {
                "total_epoches": self.num_epochs,
                "epoch": epoch,
                "train_time": f"{round(train_time, 2)}s",
                "val_time": f"{round(val_time, 2)}s",
                "metrics": val_metrics,
            }
            # 日志打印和保存
            self.visualizer.update(**log_info)
            # 模型保存
            self.checkpoint_manager.save_checkpoint(epoch, val_metrics)

        self.visualizer.finalize()

    # 验证过程
    def validate(self, epoch) -> Dict[str, float]:
        self.model.eval()
        total_metrics = {}
        num_batches = len(self.val_loader)

        with torch.no_grad():
            for batch in tqdm(self.val_loader, desc="Validating", leave=False):
                images, targets = self._move_to_device(batch)
                outputs = self.model(images)

                # === 计算损失（可选） ===
                loss_dict = self.loss_manager.compute_loss(outputs, targets)

                # === 计算指标 ===
                metrics = self.metrics_manager.compute_metrics(outputs, targets)
                for k, v in metrics.items():
                    total_metrics[k] = total_metrics.get(k, 0.0) + v

        # === 计算平均指标 ===
        avg_metrics = {k: v / num_batches for k, v in total_metrics.items()}
        return avg_metrics
