from pathlib import Path
import torch
import torch.nn as nn
import torch.nn.functional as F
from quantize.quantizer import create_quantizer, CLIP_GRAD_RATE
from quantize.base import QuantModuleNoLinear

try:
    raise ImportError
    from liger_kernel.transformers.functional import liger_rms_norm

    # @torch.compiler.disable
    def rms_norm(x, w, eps):
        return liger_rms_norm(x, w, eps)
except ImportError:

    def rms_norm(x, w, eps):
        return F.rms_norm(x, x.shape[-1:], w, eps)


class QuantRMSNorm(QuantModuleNoLinear):
    """
    Quantized Module that can perform quantized convolution or normal convolution.
    To activate quantization, please use set_quant_state function.
    """

    def __init__(
        self,
        org_module: nn.RMSNorm,
        # weight_quant_params: dict = {},
        input_quant_params: dict = {},
        output_quant_params: dict | None = None,
    ):
        super().__init__(org_module)
        # self.fwd_kwargs = dict()
        # self.fwd_func = F.linear
        # self.weight = org_module.weight
        # self.register_buffer("weight", org_module.weight.data, persistent=False)
        self.weight = org_module.weight

        self.channel_dim = self.weight.shape[0]  # type: ignore

        self.eps: float = org_module.variance_epsilon  # type: ignore

        # de-activate the quantized forward default
        self.integer_only = False
        self.use_act_quant = False
        # initialize quantizer
        self.in_quantizer = create_quantizer(
            **input_quant_params, channel_dim=self.channel_dim, is_nonlinear=True
        )
        if output_quant_params is not None:
            # print(f"use rmsnorm out quantizer {output_quant_params}")
            self.out_quantizer = create_quantizer(
                **output_quant_params, channel_dim=self.channel_dim, is_nonlinear=True
            )
        else:
            self.out_quantizer = None
            # print("not use rmsnorm out quantizer")
        # self.disable_output_quant = disable_input_quant
        self.save_tensors = False
        # self.use_temporary_parameter = False
        # self.use_temporary_parameter = False
        self.M = 8
        # self.grad_clip = 1e-18
        self.grad_clip = 1.0 / int(self.weight.shape[-1])
        self.grad_clip_ch = 1e-1 / int(self.weight.shape[-1])
        self.grad_clip_const = CLIP_GRAD_RATE * 1e-1

    @torch.compiler.disable
    def rmsnorm_integer(
        self,
        x_fp: torch.Tensor,
        scaling_factor: torch.Tensor,
        weight: torch.Tensor,
        eps=1e-6,
        **kwargs,
    ):
        M = self.M
        # assert not torch.any(scaling_factor < 0), f"{self.name} {scaling_factor}"
        # scaling_factor = scaling_factor.clamp_(min=1e-4)
        assert torch.all(self.in_quantizer.zero_point == 0), (
            self.in_quantizer.zero_point
        )

        x_fp, scaling_factor = x_fp.double(), scaling_factor.double()

        def int_quant(x, bits=8):
            x_int = torch.round(x)
            qmin = -(2 ** (bits - 1))
            qmax = 2 ** (bits - 1) - 1
            return torch.clamp_(x_int, qmin, qmax)  # type: ignore

        def fixed(x, frac_bit, total_bit):
            return int_quant(x * (2**frac_bit), total_bit) / (2**frac_bit)

        x_int = torch.round(x_fp / scaling_factor).clamp_(-128, 127).double()

        inv_dim = fixed(1 / torch.tensor(x_fp.shape[-1]), 16, 16)

        var_int = int_quant(
            torch.sum(x_int**2, dim=-1, keepdim=True) * inv_dim * 2**8, 32
        )  # INT32

        def int_sqrt(var_int):
            zero_pos = var_int < eps
            k = 2 ** torch.ceil(torch.log2(var_int) / 2)
            for _ in range(10):
                k = torch.clamp(k, 1e-4, None)
                k_1 = torch.floor((k + torch.floor(var_int / k)) / 2)
                k = k_1

            k[zero_pos] = eps

            return k

        # std_int = torch.round(
        #     torch.sqrt(var_int + 0)
        # )  # INT32? PPT里说是 16
        std_int = int_sqrt(var_int).double() / 2**4
        # std_int = torch.sqrt(var_int)

        # y_int = int_quant(
        #     x_int * (2**M) / (std_int), 32
        # )  # INT32
        y_int = torch.round(x_int * (2**M) / (std_int))

        # y_int = (
        #     x_int.double()
        #     # * (2**M)
        #     * torch.rsqrt(torch.mean(torch.pow(x_int, 2), dim=-1, keepdim=True) + eps)
        # )
        w_div_2M = weight / (2**M)

        # self.save(scaling_factor.view(-1), "scale.pth")
        # self.save(x_int, "x_int.pth")
        # self.save(var_int.view(1, -1, 1), "var_int.pth")
        # self.save(std_int.view(1, -1, 1), "std_int.pth")
        # self.save(y_int, "y_int.pth")
        # self.save(w_div_2M, "w_div_2M.pth")
        scale = scaling_factor.view(-1)
        var_int = var_int.view(1, -1, 1)
        std_int = std_int.view(1, -1, 1)

        output = y_int * w_div_2M

        return output.float()

    def quant_input(self, x):
        if self.use_act_quant:
            x: torch.FloatTensor = self.in_quantizer(x, grad_clip=self.grad_clip)
        return x

    def quant_output(self, x):
        if self.use_act_quant:
            x: torch.FloatTensor = self.out_quantizer(x)

        return x

    def rms_norm_fc(self, x):
        return rms_norm(x, self.weight, self.eps)

    def f(
        self,
        input: torch.Tensor,
        quant_input=True,
        quant_output=False,
        output_int=True,
        output_residual=False,
        **kwargs,
    ):
        org_dtype = input.dtype

        if self.integer_only:
            org_dtype = input.dtype
            # print("rmsintegeronly!!!!!!!!!!!!!!!!!!!!!")
            # input = input.to(torch.float32)
            out = self.rmsnorm_integer(
                input,
                torch.pow(2.0, self.in_quantizer.scale).bfloat16(),
                self.weight,
                self.eps,
                output_int=output_int,
            )  # dtype fp BFP16/FP16
            # input = (
            #     torch.round(input / self.in_quantizer.scale).clamp(-128, 127)
            #     * self.in_quantizer.scale
            # )
            # out1 = self.rms_norm_fc(self.quant_input(input))

            # torch.testing.assert_close(out1, out, rtol=5e-2, atol=5e-2)
            # if torch.nn.functional.mse_loss(out, out1) > 1e-4:

            #     print(self.name, torch.nn.functional.mse_loss(out, out1))
            # out = out.to(org_dtype)
            # out = out1.to(org_dtype)

            # if "9" in self.name and "19" not in self.name:
            #     out = out1
            if hasattr(self, "out_quantizer") and self.out_quantizer is not None:
                out = self.quant_output(out)
        else:
            quant_output = (
                hasattr(self, "out_quantizer") and self.out_quantizer is not None
            )
            # input_o = input
            # grad_clip = self.grad_clip
            # input_grad = input.detach()
            # input = input_grad * (1 - grad_clip) + input * grad_clip
            input = self.quant_input(input)
            out = self.rms_norm_fc(input)
            if quant_output:
                out = self.quant_output(out)
            # with torch.inference_mode():
            #     loss = torch.nn.functional.mse_loss(
            #         self.rms_norm_fc(input_o), out, reduction="none"
            #     )
            #     # print(f"{self.name} {loss.mean()} {loss.max()}")
            #     print(self.name, loss.max()[0],loss.mean()[0])
            # from utils.testing.loss import print_loss
            # print_loss(self.rms_norm_fc(input_o), out, self.name)

        if output_residual:
            return out.to(org_dtype), input.to(org_dtype)
        return out.to(org_dtype)
