from pathlib import Path
import torch
import torch.nn as nn
import torch.nn.functional as F
from quantize.quantizer import (
    create_quantizer,
    round_ste,
    CLIPMIN,
    CLIPMAX,
    CLIP_GRAD_RATE,
)
from quantize.base import QuantModuleNoLinear

try:
    raise ImportError("xxxx....")
    from liger_kernel.transformers.functional import liger_layer_norm

    def layer_norm(x, w, b, eps):
        return liger_layer_norm(x, w, b, eps)
except ImportError:

    def layer_norm(x, w, b, eps):
        return F.layer_norm(x, x.shape[-1:], weight=w, bias=b, eps=eps)


class QuantLayerNorm(QuantModuleNoLinear):
    """
    Quantized Module that can perform quantized convolution or normal convolution.
    To activate quantization, please use set_quant_state function.
    """

    def __init__(
        self,
        org_module: nn.LayerNorm,
        input_quant_params: dict = {},
        output_quant_params: dict | None = None,
    ):
        super().__init__(org_module)
        # self.register_buffer("weight", org_module.weight.data, persistent=False)
        # self.register_buffer("bias", org_module.bias.data, persistent=False)
        # self.weight = nn.Parameter(org_module.weight.data.clone())
        # self.bias = None if org_module.bias is None else nn.Parameter(org_module.bias.data.clone())
        self.weight = org_module.weight
        self.bias = org_module.bias

        self.channel_dim = self.weight.shape[0]
        self.norm_shape = org_module.normalized_shape
        self.eps: float = org_module.eps

        # de-activate the quantized forward default
        self.integer_only = False
        self.use_act_quant = False

        # initialize quantizer
        self.in_quantizer = create_quantizer(
            **input_quant_params, channel_dim=self.channel_dim
        )
        if output_quant_params is not None:
            self.out_quantizer = create_quantizer(
                **output_quant_params, channel_dim=self.channel_dim
            )
        else:
            self.out_quantizer = None

        self.M = 8
        self.grad_clip = 1 / int(self.weight.shape[-1])
        self.grad_clip_ch = 1e-10 / int(self.weight.shape[-1])
        self.grad_clip_const = CLIP_GRAD_RATE * 1e-8
        # self.grad_clip = 1 / int(self.weight.shape[-1])

    @torch.compiler.disable
    def layernorm_integer(
        self,
        x_fp: torch.Tensor,
        scaling_factor: torch.Tensor,
        weight: torch.Tensor,
        bias: torch.Tensor,
        eps=1e-6,
        **kwargs,
    ) -> torch.Tensor:
        M = self.M
        # scaling_factor = scaling_factor.clamp_(CLIPMIN, CLIPMAX)
        # assert not torch.any(scaling_factor < 1e-6), f"{self.name} {scaling_factor}"
        # assert torch.all(self.in_quantizer.zero_point == 0), (
        #     self.in_quantizer.zero_point
        # )

        x_fp, scaling_factor = x_fp.float(), scaling_factor.float()

        def int_quant(x, bits=8):
            # return x
            x_int = torch.round(x)
            qmin = -(2 ** (bits - 1))
            qmax = 2 ** (bits - 1) - 1
            return torch.clamp(x_int, qmin, qmax)  # type: ignore

        def fixed(x, frac_bit, total_bit):
            # return x
            return int_quant(x * (2**frac_bit), total_bit) / (2**frac_bit)

        # x_int = (x_fp / scaling_factor).float()
        x_int = torch.round(x_fp / scaling_factor).clamp(-128, 127).float()

        # def norm(x, w, b, eps):
        #     x_dtype = x.dtype
        #     x = x.float()
        #     inv_dim = 1 / torch.tensor(x.shape[-1])
        #     x_mean = x.sum(dim=-1, keepdim=True) * inv_dim
        #     x_int_sub_mean = x - x_mean
        #     var_int = torch.sum(x_int_sub_mean**2, dim=-1, keepdim=True) * inv_dim
        #     std_int = torch.sqrt(var_int + eps)
        #     return ((x_int_sub_mean / std_int) * w + b).to(x_dtype)

        # return norm(x_int, weight, bias, eps)
        # return F.layer_norm(x_int.bfloat16(), x_int.shape[-1:], weight.bfloat16(), bias.bfloat16(), eps)
        # x_int = x_fp.float()

        inv_dim = fixed(1 / torch.tensor(x_fp.shape[-1]), 16, 16)

        mean_int = int_quant(x_int.sum(dim=-1, keepdim=True) * inv_dim, 16)
        # mean_int = x_int.sum(dim=-1, keepdim=True) * inv_dim

        # x_int_sub_mean = x_int - mean_int
        x_int_sub_mean = int_quant(x_int - mean_int, 32)

        var_int = (
            int_quant(
                torch.sum(x_int_sub_mean**2, dim=-1, keepdim=True) * inv_dim * 2**8, 32
            )
            # / 2**16
        )  # INT32
        # var_int = torch.sum(x_int_sub_mean**2, dim=-1, keepdim=True) * inv_dim

        floor = torch.floor
        # floor = lambda x: x
        fp_min = torch.finfo(torch.float).min

        def int_sqrt(var_int):
            zero_pos = var_int < eps
            k = 2 ** torch.ceil(torch.log2(var_int) / 2)
            for _ in range(30):
                k = torch.clamp(k, fp_min, None)
                k_1 = floor((k + floor(var_int / k)) / 2)
                k = k_1

            k[zero_pos] = eps

            return k

        # int_sqrt = torch.sqrt
        # std_int = torch.sqrt_(var_int).float()
        std_int = int_sqrt(var_int + eps).float() / 2**4

        # y_int = (x_int_sub_mean * (2**M) / (std_int))
        # y_int = x_int_sub_mean / (std_int)
        y_int = torch.round(x_int_sub_mean * (2**M) / (std_int))

        w_div_2M = weight / (2**M)

        scale = scaling_factor.view(-1)
        var_int = var_int.view(1, -1, 1)
        std_int = std_int.view(1, -1, 1)

        output = y_int * w_div_2M + bias
        # output = y_int * weight + bias

        return output

    def f(
        self,
        input: torch.Tensor,
        output_residual: bool = False,
    ):
        org_dtype = input.dtype

        quant_input = self.use_act_quant and (
            hasattr(self, "in_quantizer") and self.in_quantizer is not None
        )
        quant_output = self.use_act_quant and (
            hasattr(self, "out_quantizer") and self.out_quantizer is not None
        )
        # if "fused" not in self.name:
        #     quant_input = quant_output = False
        if self.integer_only:
            out = self.layernorm_integer(
                input,
                torch.pow(2.0, self.in_quantizer.scale).bfloat16(),
                self.weight,
                self.bias,
                self.eps,
            )
            out = out.to(org_dtype)
        else:
            # with torch.no_grad():
            #     if torch.any(torch.isnan(input)) or torch.any(torch.isinf(input)):
            #         raise ValueError(
            #             f"{self.name} {torch.isnan(input).sum()} {torch.isinf(input).sum()}"
            #         )
            # grad_clip = self.grad_clip
            # input_grad = input.detach()
            # input = input_grad * (1 - grad_clip) + input * grad_clip
            if quant_input:
                input = self.in_quantizer(input)

            # input = input.to(torch.float32)
            out = layer_norm(input, w=self.weight, b=self.bias, eps=self.eps)
            # out = torch.nn.functional.layer_norm(input,normalized_shape=self.norm_shape,weight=self.weight,bias=self.bias,eps=self.eps)

        if quant_output:
            out = self.out_quantizer(out)

        out = out.to(org_dtype)

        if output_residual:
            out = (out, input.to(org_dtype))

        return out
