import torch


class FP32Optimizer:
    def __init__(self,
                 optimizer: torch.optim.Optimizer,
                 params_have_main_grad: bool,
                 use_contiguous_buffers_in_local_ddp: bool = True):
        """
        简化后的初始化参数:
        - optimizer: PyTorch 原生优化器实例 (e.g. Adam)
        - params_have_main_grad: 参数是否有独立的 main_grad 属性
        - use_contiguous_buffers_in_local_ddp: 是否使用连续缓冲区
        """
        self.optimizer = optimizer
        self.params_have_main_grad = params_have_main_grad
        # self.use_contiguous_buffers_in_local_ddp = use_contiguous_buffers_in_local_ddp

        # 用于兼容接口的伪 scale
        self._scale = torch.cuda.FloatTensor([1.0])

    def zero_grad(self, set_to_none: bool = True):
        """清空梯度，支持 set_to_none 优化"""
        for group in self.optimizer.param_groups:
            for param in group['params']:
                if param.grad is not None:
                    if set_to_none:
                        param.grad = None
                    else:
                        if param.grad.grad_fn is not None:
                            param.grad.detach_()
                        else:
                            param.grad.requires_grad_(False)
                        param.grad.zero_()

    def get_loss_scale(self):
        """FP32 优化器不进行损失缩放"""
        return self._scale

    @torch.no_grad()
    def step(self, args=None, timers=None):
        """执行优化步骤（移除梯度裁剪和零值统计）"""

        # 1. 将 main_grad 复制到 param.grad（如果启用了 main_grad）
        if self.params_have_main_grad:
            for param_group in self.optimizer.param_groups:
                for param in param_group['params']:
                    if hasattr(param, 'main_grad'):
                        param.grad = param.main_grad
                        # # 释放 main_grad 引用（如果未使用连续缓冲区）
                        # if not self.use_contiguous_buffers_in_local_ddp:
                        #     param.main_grad = None

        # 2. 直接执行优化器更新（无梯度裁剪）
        self.optimizer.step()

        # 3. 固定返回格式（grad_norm 和 num_zeros_in_grad 设为 None）
        return True, None, None

    def state_dict(self):
        """返回优化器状态字典"""
        return self.optimizer.state_dict()

    def load_state_dict(self, state_dict):
        """加载优化器状态字典"""
        self.optimizer.load_state_dict(state_dict)

    def reload_model_params(self):
        """兼容性空方法"""
        pass