import torch
from torch import nn
from torch.optim import SGD, Adam, AdamW, RMSprop
from torch.optim.lr_scheduler import (
    StepLR, MultiStepLR, CosineAnnealingLR, OneCycleLR, ReduceLROnPlateau
)
from config_manager.optimizer_config import OptimizerManagerConfig


class OptimizerManager:
    def __init__(self, model: nn.Module, config: OptimizerManagerConfig):
        """
        初始化 OptimizerManager
        Args:
            model: torch.nn.Module，模型对象
            config: OptimizerManagerConfig，优化器与调度器配置
        """
        self.model = model
        self.config = config
        self.optimizer = self._build_optimizer()
        self.scheduler = self._build_scheduler()

    def _build_optimizer(self):
        """根据配置创建优化器"""
        cfg = self.config.optimizer
        params = self.model.parameters()

        opt_type = cfg.optimizer_type.lower()
        if opt_type == "sgd":
            optimizer = SGD(
                params,
                lr=cfg.learning_rate,
                momentum=cfg.momentum,
                weight_decay=cfg.weight_decay,
            )
        elif opt_type == "adam":
            optimizer = Adam(
                params,
                lr=cfg.learning_rate,
                betas=cfg.betas,
                eps=cfg.eps,
                weight_decay=cfg.weight_decay,
                amsgrad=cfg.amsgrad
            )
        elif opt_type == "adamw":
            optimizer = AdamW(
                params,
                lr=cfg.learning_rate,
                betas=cfg.betas,
                eps=cfg.eps,
                weight_decay=cfg.weight_decay,
                amsgrad=cfg.amsgrad
            )
        elif opt_type == "rmsprop":
            optimizer = RMSprop(
                params,
                lr=cfg.learning_rate,
                alpha=cfg.alpha,
                momentum=cfg.momentum,
                weight_decay=cfg.weight_decay,
                centered=cfg.centered
            )
        else:
            raise ValueError(f"不支持的优化器类型: {cfg.optimizer_type}")

        return optimizer

    def _build_scheduler(self):
        """根据配置创建学习率调度器"""
        sched_cfg = self.config.scheduler
        if not sched_cfg or not sched_cfg.scheduler_type:
            return None

        stype = sched_cfg.scheduler_type.lower()
        if stype == "steplr":
            return StepLR(self.optimizer, step_size=sched_cfg.step_size, gamma=sched_cfg.gamma, last_epoch=sched_cfg.last_epoch)
        elif stype == "multisteplr":
            milestones = sched_cfg.milestones or [30, 60, 90]
            return MultiStepLR(self.optimizer, milestones=milestones, gamma=sched_cfg.gamma, last_epoch=sched_cfg.last_epoch)
        elif stype == "cosine":
            return CosineAnnealingLR(self.optimizer, T_max=sched_cfg.step_size, eta_min=sched_cfg.min_lr, last_epoch=sched_cfg.last_epoch)
        elif stype == "onecycle":
            max_lr = sched_cfg.max_lr or self.config.optimizer.learning_rate
            return OneCycleLR(self.optimizer, max_lr=max_lr, total_steps=None, epochs=sched_cfg.step_size, steps_per_epoch=1)  # steps_per_epoch 需根据实际训练设置
        elif stype == "plateau":
            return ReduceLROnPlateau(self.optimizer, mode=sched_cfg.mode, factor=sched_cfg.factor, patience=sched_cfg.patience, min_lr=sched_cfg.min_lr)
        else:
            raise ValueError(f"不支持的调度器类型: {sched_cfg.scheduler_type}")

    def step(self):
        """优化器单步更新（梯度裁剪可在训练循环中使用）"""
        cfg = self.config.optimizer
        if cfg.clip_grad_norm:
            torch.nn.utils.clip_grad_norm_(self.model.parameters(), cfg.clip_grad_norm)
        self.optimizer.step()

    def zero_grad(self):
        """清空梯度"""
        self.optimizer.zero_grad()

    def step_scheduler(self, metrics=None):
        """更新学习率调度器"""
        if self.scheduler is None:
            return
        if isinstance(self.scheduler, ReduceLROnPlateau):
            self.scheduler.step(metrics)
        else:
            self.scheduler.step()

    def summary(self):
        """打印优化器与调度器信息"""
        self.config.summary()