from pathlib import Path
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Generator, Union
import numpy as np
import re
from utils.overwatch import initialize_overwatch
from quantize.utils.tools import cleanup_memory

from quantize.utils.grads import _check_grad

logger = initialize_overwatch("vla_qat")

CONST_GRAD_CLIP = True
USE_ONLINE_ACT_QUANT = False
CLIP_GRAD_RATE = 0.05
# CLIPMIN = torch.finfo(torch.bfloat16).min * 1e
CLIPMIN = 1e-7
# CLIPMIN = 1e-8
CLIPMAX = 1e7
# CLIPMAX = torch.finfo(torch.bfloat16).max

layer_regex = re.compile(r"\.(\d+?)\.")


@torch.compiler.disable
def grad_hook(x, name):
    hook = x.register_hook(
        lambda grad, name=name, model=None, param=x: _check_grad(
            grad, name, param, model
        )
    )
    return hook


def get_quant_range(n_bits, symmetic=False, dtype="INT") -> tuple[int, int]:
    if symmetic or dtype == "INT":
        return -(2 ** (n_bits - 1)), 2 ** (n_bits - 1) - 1  # INT
    elif dtype == "UINT":
        return 0, 2**n_bits - 1  # UNIN
    else:
        raise ValueError(f"Not support dtype {dtype}")


class RoundSTE(torch.autograd.Function):
    """
    Straight-through Estimator(STE) for torch.round()
    """

    @staticmethod
    def forward(ctx, x):
        return torch.round(x)

    @staticmethod
    def backward(ctx, grad_output):
        return grad_output


def round_ste(x: torch.Tensor) -> torch.Tensor:
    return RoundSTE.apply(x)


class ClampSTE(torch.autograd.Function):
    @staticmethod
    def forward(ctx, x, min, max):
        return torch.clamp(x, min, max)

    @staticmethod
    def backward(ctx, grad_output):
        return grad_output, None, None  # 直传梯度，忽略 min/max 的梯度


def clamp_ste(x: torch.Tensor, min=None, max=None) -> torch.Tensor:
    return ClampSTE.apply(x, min, max)


# def round_ste(x: torch.Tensor) -> torch.Tensor:
#     """
#     Implement Straight-Through Estimator for rounding operation.
#     """
#     return (x.round() - x).detach() + x


# def clamp_ste(
#     x: torch.Tensor,
#     min: int | torch.Tensor | None = None,
#     max: int | torch.Tensor | None = None,
# ) -> torch.Tensor:
#     """
#     Implement Straight-Through Estimator for clamp operation.
#     """
#     return (x.clamp(min, max) - x).detach() + x


class FakeQuantizeSTEForSpin(torch.autograd.Function):
    @staticmethod
    def forward(ctx, x, scale, zero, qmin, qmax):
        q = torch.clamp(torch.round(x / scale) + zero, qmin, qmax)
        return scale * (q - zero)

    @staticmethod
    def backward(ctx, grad_output):
        return grad_output, None, None, None, None


class FakeQuantSTE(torch.autograd.Function):
    """
    一个简化的、对torch.compile友好的伪量化Autograd函数。
    - 使用直通估计器（STE）处理 x 和 z 的梯度。
    - 在前向传播中预计算 s 的梯度因子，以简化反向传播。
    """

    @staticmethod
    def forward(ctx, x, s, z, qmin, qmax):
        # 预计算缩放后的值
        x_scaled = x / s

        # 预计算s的梯度因子，这是反向传播中唯一复杂的计算
        # grad_s_factor = round(x/s) - x/s
        s_grad_factor = torch.round(x_scaled) - x_scaled

        x_to_clamp = torch.round(x_scaled) + z

        z_mask = (x_to_clamp <= qmax) & (x_to_clamp >= qmin)

        assert not torch.isnan(x_scaled).any(), f"s:{s}"
        # assert not (x_scaled == 0).any(), f"s_grad_factor:{s_grad_factor}"
        # 将梯度因子保存起来，以备反向传播使用
        ctx.save_for_backward(s, s_grad_factor, z_mask)

        # --- 标准的伪量化前向逻辑 ---
        xq = torch.clamp(x_to_clamp, qmin, qmax)

        x_d = (xq - z) * s

        return x_d

    @staticmethod
    def backward(ctx, grad_output):
        """
        反向传播函数。
        这里的逻辑非常简单，只是将上游梯度与预计算的因子或常数1相乘。
        """
        # 从ctx中取出保存的梯度因子
        (s, s_grad_factor, z_mask) = ctx.saved_tensors

        # grad_x = grad_output * 1 (STE)
        grad_x = grad_output * 0.8

        # grad_s = grad_output * (round(x/s) - x/s)
        grad_s = grad_output * s_grad_factor * 0.2

        # grad_z = grad_output * 1
        grad_z = grad_output * torch.where(z_mask, 0.0, -1) * s

        # qmin和qmax是常量，没有梯度
        return grad_x, grad_s, grad_z, None, None


# 为了方便调用，我们可以将其包装成一个普通的函数
def fake_quant_fn(x, s, z, qmin, qmax):
    return FakeQuantSTE.apply(x, s, z, qmin, qmax)


def get_grad_scale(name):
    if "language_model.model.norm" in name or CONST_GRAD_CLIP:
        return CLIP_GRAD_RATE

    layer_idx = layer_regex.search(name)

    if layer_idx is None or "lang" not in name:
        return None
    else:
        layer_idx = int(layer_idx.group(1))

    grad_clip_rate = (
        CLIP_GRAD_RATE
        if (layer_idx > 21)
        else CLIP_GRAD_RATE * (10 ** (layer_idx - 22))
    )
    return grad_clip_rate


class Quantizer(nn.Module):
    """Base class for quantizer

    Args:
        nn (_type_): _description_
    """

    @property
    def enable(self):
        return self._enable

    @enable.setter
    def enable(self, value):
        self._enable = value
        if not value and not hasattr(self, "_enable_hook"):
            self._enable_hook = self.register_forward_hook(
                lambda m, args, output: args[0]
            )
        elif value and hasattr(self, "_enable_hook"):
            self._enable_hook.remove()

    @torch.no_grad
    def fix_zero_point(self):
        if hasattr(self, "zero_point"):
            self.zero_point = (
                self.zero_point.clamp_(min=-1e4, max=1e4)
                .round_()
                .clamp_(self.qmin, self.qmax)
            )

    @torch.no_grad
    def save(self, tensor: torch.Tensor, name: str, dtype=torch.float16):
        if self.save_tensors:
            dir: Path = self.save_path
            dir.mkdir(exist_ok=True, parents=True)
            torch.save(
                (tensor.to(dtype) if torch.is_tensor(tensor) else tensor), dir / name
            )

    @torch.no_grad
    def save_quantizer(self):
        if hasattr(self, "save_path") and self.save_tensors:
            if hasattr(self, "scale"):
                self.save(self.scale.view(-1), "scale.pth")
            if hasattr(self, "zero_point"):
                self.save(
                    self.zero_point.round()
                    .clamp(self.qmin, self.qmax)
                    .view(-1)
                    .squeeze(),
                    "zeropoint.pth",
                )

    def qat_init_params(self, activate: torch.Tensor):
        raise NotImplementedError()

    @staticmethod
    def get_reduced_dims(x: torch.Tensor, arg: str):
        dims = tuple(range(x.dim()))
        match arg:
            case "per_tensor":
                return dims
            case "per_channel_weight":
                return 1
            case "per_channel_activation":
                return dims[:-1]
            case "per_token":
                return -1
            case "per_channel_embedding":
                return 0
            case "per_head_softmax":
                return (0, 2, 3)

    @torch.no_grad
    def cal_quant_range(self):
        info = dict(
            scale_min=self.scale.double().min(),
            scale_max=self.scale.double().max(),
            scale_mean=self.scale.double().mean(),
            scale_abs_min=self.scale.double().abs().min(),
            scale_abs_max=self.scale.double().abs().max(),
            scale_abs_mean=self.scale.double().abs().mean(),
        )
        return info

    @staticmethod
    @torch.no_grad
    def qat_calibration_quantizer(model: torch.nn.Module, cal_dataset):
        raise NotImplementedError


class LSQPlusQuantizer(Quantizer):
    def __init__(
        self,
        n_bits: int = 8,
        symmetric: bool = False,
        zeropoint="per_tensor",
        scale="per_tensor",
        grad_factor=0.5,  # for LSQ gradient refactor ,
        dtype="INT",
        enable=True,
        no_clamp_init=True,
        quant_shape=None,
        compile=True,
        is_nonlinear=False,
        **kwargs,
    ):
        """
        support cluster quantize
        dynamic_method support per_token and per_cluster
        """
        super().__init__()
        assert zeropoint in [
            "per_tensor",
            "per_channel_weight",
            "per_channel_activation",
            "per_token",
            "per_channel_embedding",
        ], zeropoint

        assert scale in [
            "per_tensor",
            "per_channel_weight",
            "per_channel_activation",
            "per_token",
            "per_channel_embedding",
            "per_head_softmax",
        ], scale

        self.name: str = "LSQPlusQuantizer"
        self.symmetric = symmetric
        assert 2 <= n_bits <= 16, "bitwidth not supported"
        self.n_bits = n_bits
        self.qmin, self.qmax = get_quant_range(n_bits, symmetric, dtype=dtype)

        self.scale_arg: str = scale
        self.zeropoint_arg: str = zeropoint

        self.grad_factor = grad_factor
        self.no_clamp_init = no_clamp_init

        self.round_zero_point = None
        self.save_tensors = False
        self.enable = enable
        self._enable = enable

        self.uninitialized = True
        self.quant_shape = quant_shape

        self.scale: torch.nn.Parameter | torch.Tensor = None
        self.zero_point: torch.nn.Parameter | torch.Tensor = None

        self.compile_quantizer = compile

        self.forward = self.forward_fake
        self.is_weight = "weight" in self.scale_arg
        self.is_nonlinear = is_nonlinear
        # self.loss: torch.Tensor | None = None

    def extra_repr(self) -> str:
        repr_parts = [
            f"n_bits={self.n_bits}",
            f"symmetric={self.symmetric}",
            f"scale={self.scale_arg}",
            f"zeropoint={self.zeropoint_arg}",
            f"grad_factor={self.grad_factor}",
            f"enable={self.enable}",
        ]

        if self.quant_shape is not None:
            repr_parts.append(f"quant_shape={tuple(self.quant_shape)}")

        if not self.no_clamp_init:
            repr_parts.append(f"no_clamp_init={self.no_clamp_init}")

        if not self.compile_quantizer:
            repr_parts.append(f"compile={self.compile_quantizer}")

        return ", ".join(repr_parts)

    @torch.no_grad()
    def find_quant_params(
        self, activate: torch.Tensor
    ) -> tuple[torch.Tensor, torch.Tensor]:
        activate_bf16 = activate.to(torch.bfloat16)
        # activate_fp32 = activate.to(torch.float32)

        # clip_val = torch.quantile(activate_fp32.abs(), 0.99)
        # activate_fp32 = activate_fp32.clamp(-clip_val, clip_val)

        scale_reduced_dims = Quantizer.get_reduced_dims(
            activate_bf16, self.scale_arg
        )  # 对于 per_tensor 相当于展平
        zero_point_reduced_dims = Quantizer.get_reduced_dims(
            activate_bf16, self.zeropoint_arg
        )  # 对于 per_tensor 相当于全部展平，per_channel_activation 相当于 最后一个维度除外

        xmin = activate_bf16.amin(dim=scale_reduced_dims, keepdim=True)  # [Do,1]
        xmax = activate_bf16.amax(dim=scale_reduced_dims, keepdim=True)

        if self.symmetric:
            abs_max = torch.max(xmax.abs(), xmin.abs())
            scale = abs_max / (self.qmax - 1)
            scale: torch.Tensor = clamp_ste(scale, min=CLIPMIN, max=CLIPMAX)
            zero_point: torch.Tensor = torch.tensor(0.0)
        else:
            range_ = xmax - xmin
            scale = range_ / (self.qmax - self.qmin)
            scale: torch.Tensor = clamp_ste(scale, min=CLIPMIN, max=CLIPMAX)

            if self.scale_arg == "per_tensor" and self.zeropoint_arg != "per_tensor":
                xmin = activate_bf16.amin(zero_point_reduced_dims, keepdim=True)

            zero_point: torch.Tensor = round_ste(
                clamp_ste((self.qmin - (xmin).div(scale)), min=self.qmin, max=self.qmax)
            )
        return scale, zero_point

    def fast_int_time_quant(self, activate: torch.Tensor) -> torch.Tensor:
        scale, zero_point = self.find_quant_params(activate)
        return FakeQuantizeSTEForSpin.apply(
            activate, scale.to(activate), zero_point.to(activate), self.qmin, self.qmax
        )

    def cal_minmax(self, activate: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
        # activate = activate.to(torch.float32)

        if self.quant_shape is not None:
            activate = self.quant_shape[0](activate)
        scale_reduced_dims = Quantizer.get_reduced_dims(
            activate, self.scale_arg
        )  # 对于 per_tensor 相当于展平
        if self.scale_arg == "per_tensor" and self.zeropoint_arg != "per_tensor":
            zero_point_reduced_dims = Quantizer.get_reduced_dims(
                activate, self.zeropoint_arg
            )  # 对于 per_tensor 相当于全部展平，per_channel_activation 相当于 最后一个维度除外
            xmin_zp = activate.amin(zero_point_reduced_dims, keepdim=True)
        else:
            xmin_zp = None

        xmin = activate.amin(dim=scale_reduced_dims, keepdim=True)  # [Do,1]
        xmax = activate.amax(dim=scale_reduced_dims, keepdim=True)
        return xmin, xmax, xmin_zp

    def init_activate_quant(
        self, xmin, xmax, xmin_zp
    ) -> tuple[torch.Tensor, torch.Tensor]:
        xmin, xmax = xmin * 0.95, xmax * 0.95
        if self.symmetric:
            abs_max = torch.max(xmax.abs(), xmin.abs())
            scale = abs_max / (self.qmax - 1)
            scale = scale.clamp_(min=CLIPMIN, max=CLIPMAX)
            zero_point = torch.tensor(0.0, requires_grad=False)
            if "layernorm" in self.name:
                # logger.info(
                #     f"{self.name} {scale.view(-1).item()} {xmax.view(-1).item()} {xmin.view(-1).item()} {(abs_max / (self.qmax - 1)).view(-1).item()}"
                # )
                if (scale > 3).all():
                    scale /= 3.125
        else:
            range_ = xmax - xmin
            scale = range_ / (self.qmax - self.qmin)
            scale = scale.clamp_(min=CLIPMIN, max=CLIPMAX)

            if self.scale_arg == "per_tensor" and self.zeropoint_arg != "per_tensor":
                xmin_zp = xmin_zp
            else:
                xmin_zp = xmin

            zp_tmp = self.qmin - (xmin_zp) / scale

            zero_point = zp_tmp.round_().clamp_(self.qmin, self.qmax)
            # zero_point = xmax / range_ * q
        return torch.log2(scale.double()), zero_point

    def init_weight_quant(
        self, xmin, xmax, xmin_zp, activate
    ) -> tuple[torch.Tensor, torch.Tensor]:
        assert len(xmin.shape) == len(xmax.shape) == 2
        best = torch.full(
            xmax.shape,
            torch.finfo(torch.bfloat16).max,
            dtype=torch.bfloat16,
            device="cuda",
        )
        scale = torch.rand(xmax.shape, dtype=torch.bfloat16, device="cuda")
        zero_point = torch.zeros(xmax.shape, dtype=scale.dtype, device="cuda")

        for i in range(0, 990, 99):
            p = 1 - i / 1000
            cur_xmin = xmin * p
            cur_xmax = xmax * p
            if self.symmetric:
                abs_max = torch.max(cur_xmin.abs(), cur_xmax.abs())
                cur_scale = abs_max.div_(self.qmax - 1)
                cur_scale = cur_scale.clamp_(min=CLIPMIN, max=CLIPMAX)
                cur_zero_point = zero_point

            else:
                range_ = cur_xmax - cur_xmin
                cur_scale = range_.div_(self.qmax - self.qmin)
                cur_scale = cur_scale.clamp_(min=CLIPMIN, max=CLIPMAX)
                cur_zero_point = (
                    (self.qmin - (xmin) / cur_scale)
                    .round_()
                    .clamp_(self.qmin, self.qmax)
                )

            q = self._fake_quant(
                activate, cur_scale, cur_zero_point, qmin=self.qmin, qmax=self.qmax
            )
            # err = q.sub_(activate).abs_().pow_(2.4).sum(dim=1)
            err = torch.nn.functional.mse_loss(q, activate, reduction="none").sum(
                dim=1, keepdim=True
            )
            tmp = err < best
            if torch.any(tmp):
                best[tmp] = err[tmp]
                scale[tmp] = cur_scale[tmp]
                zero_point[tmp] = cur_zero_point[tmp]
        assert not torch.any(scale <= 0.0), self.name
        return torch.log2(scale.double()), zero_point

    def qat_init_params(self, scale, zero_point, x_dtype=torch.bfloat16) -> None:
        self.scale = nn.Parameter(scale.to(torch.double), requires_grad=True)
        if not self.symmetric:
            self.zero_point = nn.Parameter(zero_point.to(x_dtype), requires_grad=True)
        else:
            delattr(self, "zero_point")
            self.register_buffer("zero_point", zero_point.to(x_dtype))

        if self.compile_quantizer:
            pass
            # self.forward_with_reshape = torch.compile(
            #     self.forward_with_reshape,
            #     fullgraph=True,
            #     mode="max-autotune",
            # )
            # self.forward_without_reshape = torch.compile(
            #     self.forward_without_reshape,
            #     fullgraph=True,
            #     mode="max-autotune",
            # )
            # self.forward_without_reshape_symmetric = torch.compile(
            #     self.forward_without_reshape_symmetric,
            #     fullgraph=True,
            #     mode="max-autotune",
            # )

        if self.enable:
            if self.symmetric:
                self.forward = (
                    lambda x, *args, **kwargs: self.forward_without_reshape_symmetric(x)
                )
            elif self.quant_shape is not None:
                self.forward = lambda x, *args, **kwargs: self.forward_with_reshape(x)
            else:
                self.forward = lambda x, *args, **kwargs: self.forward_without_reshape(
                    x
                )
            # self._fake_quant = fake_quant_fn
            # self.loss = torch.tensor(
            #     0.0, dtype=torch.bfloat16, device=self.scale.device
            # )

    def forward_fake(self, x: torch.Tensor, *args, **kwargs) -> torch.FloatTensor:
        return x

    def forward_for_save(self, x: torch.Tensor, *args, **kwargs) -> torch.FloatTensor:
        x_shape = x.shape
        if self.quant_shape is not None:
            x = self.quant_shape[0](x)

        scale = torch.pow(2, self.scale).bfloat16()

        zero_point = clamp_ste(torch.round(self.zero_point), self.qmin, self.qmax)

        x_dequant, x_int = self._fake_quant_for_save(
            x, scale, zero_point, self.qmin, self.qmax
        )
        if self.quant_shape is not None:
            x_dequant = self.quant_shape[1](x_dequant, x_shape)
        assert torch.is_tensor(x_dequant), f"{self.name} {x_dequant} "
        return x_dequant
        # self.save(x, "input.pth")
        # self.save(x_int, "x_int_add_zp_clamp.pth")
        # self.save(x_dequant, "dequant.pth")
        # self.save_quantizer()

    # @torch.compile(fullgraph=True, mode="max-autotune-no-cudagraphs")
    def forward_with_reshape(
        self, x: torch.Tensor, *args, **kwargs
    ) -> torch.FloatTensor:
        # grad_clip_rate = get_grad_scale(self.name)
        grad_clip_rate = CLIP_GRAD_RATE

        x_shape = x.shape
        x_reshape = self.quant_shape[0](x)
        scale = torch.pow(2.0, self.scale).bfloat16()
        zero_point = clamp_ste(round_ste(self.zero_point), self.qmin, self.qmax)
        # if self.is_weight or self.uninitialized or not USE_ONLINE_ACT_QUANT:
        #     scale = torch.pow(2.0, self.scale).bfloat16()
        #     zero_point = clamp_ste(round_ste(self.zero_point), self.qmin, self.qmax)
        # else:
        #     scale, zero_point = self.find_quant_params(x_reshape)
        # with torch.no_grad():
        #     self.scale.data = scale
        #     if not self.symmetric:
        #         self.zero_point.data = zero_point

        # assert not (scale == 0).any(),f"{self.name} {scale} {self.scale.min()}"

        # scale = clamp_ste(self.scale, min=CLIPMIN, max=CLIPMAX)

        x_dequant = self._fake_quant(
            x_reshape, scale, zero_point, self.qmin, self.qmax, grad_clip_rate
        )
        # self.loss = self.loss + nn.functional.mse_loss(x_reshape, x_dequant)

        x_dequant = self.quant_shape[1](x_dequant, x_shape)
        # self.save(x, "input.pth")
        # self.save(x_dequant, "dequant.pth")
        # self.save_quantizer()
        # grad_hook(x_dequant,self.name)

        # print(f"{self.name} {self.uninitialized} {torch.nn.functional.mse_loss(x,x_dequant)}")
        return x_dequant

    # @torch.compile(fullgraph=True, mode="max-autotune-no-cudagraphs")
    def forward_without_reshape(
        self, x: torch.Tensor, *args, **kwargs
    ) -> torch.FloatTensor:
        # grad_clip_rate = get_grad_scale(self.name)
        grad_clip_rate = CLIP_GRAD_RATE

        # if self.is_weight or self.uninitialized or not USE_ONLINE_ACT_QUANT or 1:
        #     scale = torch.pow(2.0, self.scale).bfloat16()
        #     zero_point = clamp_ste(round_ste(self.zero_point), self.qmin, self.qmax)

        # # assert not (scale == 0).any(),f"{self.name} {scale} {self.scale.min()}"
        # else:
        # scale, zero_point = self.find_quant_params(x)
        # with torch.no_grad():
        #     self.scale.data = scale
        #     if not self.symmetric:
        #         self.zero_point.data = zero_point
        scale = torch.pow(2.0, self.scale).bfloat16()
        zero_point = clamp_ste(round_ste(self.zero_point), self.qmin, self.qmax)
        # scale = clamp_ste(self.scale, min=CLIPMIN, max=CLIPMAX)

        x_dequant = self._fake_quant(
            x, scale, zero_point, self.qmin, self.qmax, grad_clip_rate
        )
        # self.loss = self.loss + nn.functional.mse_loss(x, x_dequant)
        # grad_hook(x_dequant,self.name)

        return x_dequant

    # @torch.compile(fullgraph=True, mode="max-autotune-no-cudagraphs")
    def forward_without_reshape_symmetric(
        self, x: torch.Tensor, *args, **kwargs
    ) -> torch.FloatTensor:
        grad_clip_rate = kwargs.get("grad_clip", CLIP_GRAD_RATE)
        # grad_clip_rate = get_grad_scale(self.name)
        scale = torch.pow(2.0, self.scale).bfloat16()
        zero_point = self.zero_point

        # if self.is_nonlinear and 0:
        #     scale, zero_point = self.find_quant_params(x)
        # elif self.is_weight or self.uninitialized or not USE_ONLINE_ACT_QUANT:
        #     scale = torch.pow(2.0, self.scale).bfloat16()
        #     zero_point = self.zero_point
        # else:
        #     raise ValueError(f"{self.name}")

        # with torch.no_grad():
        #     self.scale.data = scale
        #     if not self.symmetric:
        #         self.zero_point.data = zero_point

        # assert not (scale == 0).any(),f"{self.name} {scale} {self.scale.min()}"
        # scale = clamp_ste(self.scale, min=CLIPMIN, max=CLIPMAX)

        x_dequant = self._fake_quant(
            x, scale, zero_point, self.qmin, self.qmax, grad_clip_rate
        )
        # if "norm" in self.name and "lang" in self.name:
        #     from utils.testing.loss import print_loss

        #     print_loss(x, x_dequant, self.name)

        # print(self.name,loss)
        # print(f"{self.name} {loss.mean()} {loss.max()}")
        # grad_hook(x_dequant,self.name)
        # self.loss = self.loss + nn.functional.mse_loss(x, x_dequant)

        return x_dequant

    # @torch.compile(fullgraph=True, mode="reduce-overhead")
    @staticmethod
    def _fake_quant(x, scale, zero_point, qmin, qmax, grad_clip_rate=CLIP_GRAD_RATE):
        # return fake_quant_fn(x, scale, zero_point, qmin, qmax)
        # if grad_clip_rate is not None:
        scale_grad = (
            torch.tensor(grad_clip_rate, device=scale.device, dtype=torch.bfloat16)
            * scale
        )
        scale = (scale - scale_grad).detach() + scale_grad
        # zero_point_grad = (
        #     torch.tensor(
        #         grad_clip_rate, device=zero_point.device, dtype=torch.bfloat16
        #     )
        #     * zero_point
        # )
        # zero_point = (zero_point - zero_point_grad).detach() + zero_point_grad
        # x = (1 - CLIP_GRAD_RATE) * x.detach() +

        # 伪量化训练
        x_int = round_ste(x / scale)  # dtype:int

        x_int_zp = x_int + zero_point

        # x_int_clamp = clamp_ste(x_int_zp, qmin, qmax)
        x_int_clamp = torch.clamp(x_int_zp, qmin, qmax)

        x_dequant: torch.Tensor = (x_int_clamp - zero_point) * scale  # dtype: fp

        return x_dequant

    @staticmethod
    def _fake_quant_for_save(x, scale, zero_point, qmin, qmax):
        # 伪量化保存中间 x_int

        x_int = round_ste(x / scale)  # dtype:int

        x_int = x_int + zero_point

        x_int = torch.clamp(x_int, qmin, qmax)

        x_dequant: torch.Tensor = (x_int - zero_point) * scale  # dtype: fp

        return x_dequant, x_int

    @staticmethod
    @torch.no_grad()
    def qat_calibration_quantizer(
        model: torch.nn.Module,
        cal_datasets,
        len_cal_dataset: int,
        subfix: str,
        num_bins: int = 8192,
        quantiles=0.95,
    ):
        nn_modules = [
            m
            for m in model.modules()
            if isinstance(m, LSQPlusQuantizer) and m.uninitialized
        ]
        # ps_bar = logger.tqdm(None, desc="PTQ init ", total=len(nn_modules))
        cache_dir = Path.cwd() / "cache"
        minmax_cache_path = cache_dir / f"minmax{subfix}.pt"
        act_cache_path = cache_dir / f"act{subfix}.pt"
        if minmax_cache_path.exists() and act_cache_path.exists():
            logger.info(f"ptq cache detected in {cache_dir}")
            minmax_dict = torch.load(minmax_cache_path, weights_only=False)
            act_dict = torch.load((act_cache_path), weights_only=False)
        elif torch.distributed.get_rank() == 0:
            logger.info("starting ptq")

            minmax_dict = dict()
            act_dict = dict()
            bin_width_dict = dict()

            def _hook(module: LSQPlusQuantizer, args, kwargs, output):
                x: torch.Tensor = args[0]
                xmin, xmax, xmin_zp = module.cal_minmax(x)
                if module.is_weight and module.name not in act_dict:
                    act_dict[module.name] = x.detach().cpu()

                if module.name not in minmax_dict:
                    minmax_dict[module.name] = (
                        xmin.detach().cpu(),
                        xmax.detach().cpu(),
                        (xmin_zp.detach().cpu() if xmin_zp is not None else None),
                    )
                elif not module.is_weight:
                    minmax_dict[module.name] = (
                        torch.min(minmax_dict[module.name][0], xmin.detach().cpu()),
                        torch.max(minmax_dict[module.name][1], xmax.detach().cpu()),
                        (
                            torch.min(
                                minmax_dict[module.name][2], xmin_zp.detach().cpu()
                            )
                            if xmin_zp is not None
                            else None
                        ),
                    )
                return x

            hooks = []
            for m in nn_modules:
                hooks.append(m.register_forward_hook(_hook, with_kwargs=True))
            cal_dataset = cal_datasets[0]
            # first_data = cal_dataset[0]
            for i in logger.tqdm(cal_dataset, total=len_cal_dataset, desc="PTQ cal"):
                i.pop("labels")
                model(**(i))
            # if ps_bar is not None:
            #     ps_bar.close()

            for hook in hooks:
                hook.remove()

            # ----------------------------------- Stage2 ---------------------------------------
            # hist_dict = {}
            # def _hook(module: LSQPlusQuantizer, args, kwargs, output):
            #     x: torch.Tensor = args[0]
            #     min_val = minmax_dict[module.name][0]
            #     max_val = minmax_dict[module.name][1]
            #     bin_width = bin_width_dict.setdefault(
            #         module.name,
            #         (max_val - min_val) / num_bins,
            #     )
            #     indices = ((x - min_val) / bin_width).long()
            #     indices.clamp_(0, num_bins - 1)
            #     chunk_hist = torch.bincount(indices.view(-1), minlength=num_bins)
            #     if module.name not in hist_dict:
            #         hist_dict[module.name] = torch.zeros(
            #             num_bins, dtype=torch.int64, device="cpu"
            #         )
            #     hist_dict[module.name] += chunk_hist.to("cpu")

            #     xmin, xmax, xmin_zp = module.cal_minmax(x)
            #     if module.is_weight and module.name not in act_dict:
            #         act_dict[module.name] = x.detach().cpu()

            #     if module.name not in minmax_dict:
            #         minmax_dict[module.name] = (
            #             xmin.detach().cpu(),
            #             xmax.detach().cpu(),
            #             (xmin_zp.detach().cpu() if xmin_zp is not None else None),
            #         )
            #     elif not module.is_weight:
            #         minmax_dict[module.name] = (
            #             torch.min(minmax_dict[module.name][0], xmin.detach().cpu()),
            #             torch.max(minmax_dict[module.name][1], xmax.detach().cpu()),
            #             (
            #                 torch.min(
            #                     minmax_dict[module.name][2], xmin_zp.detach().cpu()
            #                 )
            #                 if xmin_zp is not None
            #                 else None
            #             ),
            #         )
            #     return x

            cache_dir.mkdir(exist_ok=True, parents=True)
            torch.save(minmax_dict, minmax_cache_path)
            torch.save(act_dict, act_cache_path)
        torch.distributed.barrier(device_ids=torch.distributed.get_rank())
        minmax_dict = torch.load(minmax_cache_path, weights_only=False)
        act_dict = torch.load((act_cache_path), weights_only=False)
        for m in logger.tqdm(nn_modules, desc="initialing quantizer"):
            m: LSQPlusQuantizer = m
            name = m.name
            if (minmax := minmax_dict.get(name)) is not None:
                if m.is_weight:
                    s, z = m.init_weight_quant(
                        minmax[0].cuda(),
                        minmax[1].cuda(),
                        minmax[2].cuda() if minmax[2] is not None else None,
                        act_dict[name].cuda(),
                    )
                else:
                    s, z = m.init_activate_quant(
                        minmax[0].cuda(),
                        minmax[1].cuda(),
                        minmax[2].cuda() if minmax[2] is not None else None,
                    )
                m.qat_init_params(s, z)
                m.uninitialized = False
                # if not m.is_weight:
                #     m.scale.requires_grad = False
                #     m.zero_point.requires_grad = False

        # for m in model.modules():
        #     if isinstance(m,LSQPlusQuantizer) and m.uninitialized:
        #         logger.info(m.name)

    @staticmethod
    @torch.no_grad()
    def stat_quantizer(model: torch.nn.Module, data_batch):
        nn_modules = [
            m
            for m in model.modules()
            if isinstance(m, LSQPlusQuantizer) and not m.uninitialized
        ]
        stat_path = Path.cwd() / "stat"
        stat_path.mkdir(exist_ok=True, parents=True)
        stat_dict = {}

        def _hook(module: LSQPlusQuantizer, args, kwargs, output):
            x: torch.Tensor = args[0]
            name = module.name
            assert module.scale is not None, name
            logger.info(f"stat {name}...")
            stat = dict(
                act=x.detach().cpu(),
                act_fq=output.detach().cpu(),
                loss=torch.nn.functional.mse_loss(output, x).detach().cpu(),
                scale=module.scale.detach().cpu(),
                zp=module.zero_point.detach().cpu(),
            )
            stat_dict[name] = stat

            return x

        hooks = []
        for m in nn_modules:
            hooks.append(m.register_forward_hook(_hook, with_kwargs=True))
        logger.info("collect quantizer statics...")
        cleanup_memory()
        model(**data_batch)

        for hook in hooks:
            hook.remove()
        save_path = stat_path / f"stat_{str(str(time.time())[:8])}.pt"
        torch.save(stat_dict, save_path)
        logger.info(f"save statics to {save_path}")


def create_quantizer(**kwargs):
    return LSQPlusQuantizer(**kwargs)
