import torch
import torch.nn as nn
import torch.nn.functional as F
from quantize.quantizer import create_quantizer
from quantize.base import QuantModuleNoLinear
import math


class Softmax(nn.Softmax):
    def forward(self, input: torch.Tensor, *args, **kwargs) -> torch.Tensor:
        return F.softmax(input, dim=-1, _stacklevel=5, dtype=torch.float32).to(
            input.dtype
        )


class QuantSoftmax(QuantModuleNoLinear):
    """
    Quantized Module that can perform quantized convolution or normal convolution.
    To activate quantization, please use set_quant_state function.
    """

    def __init__(
        self,
        # org_module: nn.Softmax,
        # weight_quant_params: dict = {},
        input_quant_params: dict = {},
        disable_input_quant=False,
    ):
        super().__init__(Softmax())
        # self.fwd_kwargs = dict()
        # self.fwd_func = F.linear

        self.integer_only = False
        self.use_act_quant = False
        # initialize quantizer
        self.in_quantizer = create_quantizer(**input_quant_params)

        self.disable_output_quant = disable_input_quant

        self.save_tensors = False
        self.M = 16

    @torch.compiler.disable
    def softmax_integer(
        self,
        x_fp: torch.Tensor,
        scaling_factor: torch.Tensor,
        attn_mask=None,
    ):
        M = self.M
        scaling_factor = scaling_factor.double()

        # assert (not torch.any(scaling_factor < 1e-6)) and scaling_factor.numel() == 1, (
        #     f"{self.name} {scaling_factor}"
        # )
        # assert torch.all(self.in_quantizer.zero_point == 0), (
        #     self.in_quantizer.zero_point
        # )

        # ori_dtype = x_fp.dtype
        # x_fp = x_fp.double()
        # scaling_factor = scaling_factor.double()
        # x_int = (x_fp / scaling_factor).round().clamp(-128, 127)
        # x_fp = (
        #     self.quant_input(x_fp) + attn_mask
        #     if attn_mask is not None
        #     else self.quant_input(x_fp)
        # )
        # ln2 = math.log(2)

        # def exp(x: torch.Tensor) -> torch.Tensor:
        #     z = torch.floor(-x / ln2)
        #     p = x + ln2 * z
        #     return (1 + 0.5 * p) / torch.pow(2.0, z)
        #     return torch.exp(x)

        # def softmax(x: torch.Tensor) -> torch.Tensor:
        #     max_vals = torch.max(x, dim=-1, keepdim=True).values
        #     exp_x = exp(x - max_vals)
        #     return (
        #         exp_x / torch.sum(exp_x, dim=-1, dtype=torch.float32, keepdim=True)
        #     ).to(x.dtype)

        # return softmax(x_fp).bfloat16()

        # def int_quant(x, bits=8):
        #     # return x
        #     x_int = torch.round(x)
        #     qmin = -(2 ** (bits - 1))
        #     qmax = 2 ** (bits - 1) - 1
        #     return torch.clamp(x_int, qmin, qmax)

        # def fixed(x, frac_bit, total_bit):
        #     # return x
        #     return int_quant(x * (2**frac_bit), total_bit) / (2**frac_bit)

        # scale_s = scaling_factor.view(-1)
        # if attn_mask is not None:
        #     x_int = x_int + attn_mask

        # # if attn_mask is None or 1:
        # #     fmin = torch.finfo(torch.bfloat16).min
        # #     attn_mask = x_fp <= fmin
        # #     x_int[attn_mask] = fmin

        # x_int_max, _ = x_int.max(dim=-1, keepdim=True)
        # x_int_s = x_int
        # x_int_max_s = x_int_max

        # x_int = x_int - x_int_max
        # x_int_sub_max_s = x_int

        # ln2 = math.log(2)
        # t1 = fixed(scaling_factor / ln2, 12, 32)
        # t2 = fixed(ln2 / scaling_factor, 8, 32)
        # t3 = fixed(2 / scaling_factor, 8, 32)

        # z = torch.floor(-x_int * t1)
        # # z = -x_int * t1
        # p = fixed(x_int + fixed(t2 * z, 16, 32) + t3, 16, 32)
        # # p_real = scaling_factor * (p - t3)
        # # print(f"\x1b[32m{self.name} {p_real.min()} {p_real.max()} \x1b[0m")
        # v = fixed(p / torch.pow(2, z), 48, 64)

        # t1_s = t1.view(-1)
        # z_s = z
        # p_s = p
        # v_s = v
        # t2_s = t2.view(-1)
        # t3_s = t3.view(-1)

        # v_sum = torch.sum(v, dim=-1, keepdim=True)
        # v_sum = fixed(v_sum, 16, 32)

        # v = v * (2**M)
        # # assert not torch.any(torch.isnan(v)), self.name
        # # assert not torch.any(torch.isinf(v)), self.name
        # # assert not torch.any(torch.isinf(v_sum)), self.name
        # # assert not torch.any(torch.isnan(v_sum)), self.name

        # v_fp = v / v_sum
        # # assert not torch.any(torch.isnan(v_fp)), self.name

        # v = int_quant(v_fp, bits=32) / (2**M)
        # # v = int_quant(v_fp, bits=32)
        # # v = v_fp
        # # assert not torch.any(torch.isnan(v)), self.name
        # # assert not torch.any(torch.isinf(v)), self.name

        # v_sum_s = v_sum
        # result = v

        # return v.to(ori_dtype)

        ori_dtype = x_fp.dtype
        x_fp = x_fp.double()
        x_int = (x_fp / scaling_factor).round().clamp(-128, 127)
        split_bit_exp = 23
        split_bit = math.pow(2, split_bit_exp)

        def int_quant(x, bits=8):
            x_int = torch.round(x)
            qmin = -(2 ** (bits - 1))
            qmax = 2 ** (bits - 1) - 1
            return torch.clamp(x_int, qmin, qmax)

        def fixed(x, frac_bit, total_bit):
            # return x
            return int_quant(x * (2**frac_bit), total_bit) / (2**frac_bit)

        def fixed_i(x: torch.Tensor, frac_bit, total_bit):
            # return x
            return int_quant(x * (2**frac_bit), total_bit)

        def int_trunc(x: torch.Tensor):
            shift_mask = x.abs() > split_bit
            large_32 = torch.floor(x / split_bit)
            fixed_int = torch.where(
                shift_mask,
                large_32.clamp(-(split_bit), split_bit - 1),
                x,
            )

            return fixed_int, shift_mask

        scale_s = scaling_factor.view(-1)
        floor = torch.floor

        # if attn_mask is not None:
        #     x_int = x_int + attn_mask
        # print(self.name)
        # print(torch.sum(attn_mask > torch.finfo(torch.bfloat16).min,dim=-1).detach().cpu().numpy().tolist())
        # input()

        # torch.finfo(torch.bfloat16).min

        x_int_max, _ = x_int.max(dim=-1, keepdim=True)
        x_int_s = x_int
        x_int_max_s = x_int_max

        x_int = x_int - x_int_max
        x_int_sub_max_s = x_int

        ln2 = math.log(2)
        t1 = fixed(scaling_factor / ln2, 12, 32)
        t1_i = fixed_i(scaling_factor / ln2, 12, 32)

        t2 = fixed(ln2 / scaling_factor, 16, 32)
        t2_i = fixed_i(ln2 / scaling_factor, 8, 32)

        t3 = fixed(2 / scaling_factor, 8, 32)
        t3_i = fixed_i(2 / scaling_factor, 8, 32)

        # z = torch.floor(-x_int * t1)
        z = floor(-x_int * t1_i / 2**12)
        p = fixed(x_int + fixed(t2 * z, 8, 32) + t3, 8, 32)
        p_i = x_int * 2**8 + t2_i * z + t3_i  # i32 8

        v = fixed(p / torch.pow(2, z) + 2**-36, 48, 64)  # adjust from 48,64
        v_t_i = floor(p_i * 2**32 / torch.pow(2, z))  # i32 16
        v_t_i_tr, v_i_mask = int_trunc(v_t_i)

        t1_s = t1_i.view(-1)
        z_s = z
        p_s = p_i
        v_s = v_t_i
        t2_s = t2_i.view(-1)
        t3_s = t3_i.view(-1)
        v_t_i_al = torch.where(v_i_mask, v_t_i_tr * split_bit, v_t_i_tr)
        if attn_mask is not None:
            v_t_i_al = torch.where(
                attn_mask > torch.finfo(torch.bfloat16).min, v_t_i_al, 0
            )

        v_sum = torch.sum(v, dim=-1, keepdim=True)
        v_sum = fixed(v_sum, 16, 32)

        v_sum_i = v_t_i_al.sum(dim=-1, keepdim=True)

        # v = v * math.pow(2, M)
        m_pow_2 = math.pow(2, M)

        v_fp = floor(v_t_i_al * m_pow_2 / (v_sum_i))

        v = v_fp / m_pow_2

        v_sum_s = v_sum_i
        result = v

        return v.to(ori_dtype)

    @torch.compiler.disable
    def softmax_integer_old(
        self,
        x_int: torch.Tensor,
        scaling_factor: torch.Tensor,
        attn_mask=None,
    ):
        # print(self.name)

        ori_dtype = x_int.dtype
        # print(ori_dtype)
        # assert 0==1

        x_int = x_int.double()
        scaling_factor = scaling_factor.double()
        assert not torch.any(scaling_factor < 1e-6), f"{self.name} {scaling_factor}"

        assert not torch.any(scaling_factor < 0), f"{self.name} {scaling_factor}"
        assert torch.all(self.in_quantizer.zero_point == 0), (
            self.in_quantizer.zero_point
        )

        if attn_mask is not None:
            attn_mask = attn_mask.to(torch.float32)

        assert scaling_factor.numel() == 1

        def int_quant(x, bits=8):
            x_int = torch.round(x)
            qmin = -(2 ** (bits - 1))
            qmax = 2 ** (bits - 1) - 1
            return torch.clamp(x_int, qmin, qmax)

        def fixed(x, frac_bit, total_bit):
            # return x
            return int_quant(x * (2**frac_bit), total_bit) / (2**frac_bit)

        # if self.save_tensors:
        #     dir = self.save_dir
        #     torch.save(scaling_factor.view(-1), dir + "/scale.pth")
        # self.save(scaling_factor.view(-1), "scale.pth")
        scale_s = scaling_factor.view(-1)

        # TODO pass the real attn mask as arguments
        if attn_mask is None:
            attn_mask = x_int < -1e10
            x_int[attn_mask] = -1e10

        assert not torch.any(torch.isnan(x_int)), self.name
        assert not torch.any(torch.isinf(x_int)), self.name

        x_int_max, _ = x_int.max(dim=-1, keepdim=True)

        # if self.save_tensors:
        #     torch.save(x_int, dir + "/x_int.pth")
        #     torch.save(x_int_max, dir + "/x_int_max.pth")
        # self.save(x_int, "x_int.pth")
        x_int_s = x_int
        # self.save(x_int_max, "x_int_max.pth")
        x_int_max_s = x_int_max
        x_int = x_int - x_int_max  # int, int32

        # assert not torch.any(torch.isinf(x_int)), self.name
        # assert not torch.any(scaling_factor == 0), self.name
        # assert not torch.any(torch.isinf(x_int)), self.name

        # torch.save(x_int, self.name)
        # if self.save_tensors:
        #     torch.save(x_int, dir + "/x_int_sub_max.pth")
        # self.save(x_int, "x_int_sub_max.pth")
        x_int_sub_max_s = x_int
        # x_int = x_int + floor_ste.apply(x_int / 2) - floor_ste.apply(x_int / 2 ** 4)
        ln2 = math.log(2)

        t1 = fixed(scaling_factor / ln2, 30, 32)

        # print(scaling_factor/ ln2, t1)

        # z = -x_int * t1
        z = torch.floor(-x_int * t1)

        t2 = fixed(ln2 / scaling_factor, 8, 16)
        t3 = fixed(2 / scaling_factor, 8, 32)

        p = fixed(x_int + fixed(t2 * z, 8, 32) + t3, 8, 32)

        # p = x_int + fixed(ln2/ scaling_factor, 8, 16 ) * z  + fixed( 2/ scaling_factor, 8, 16)
        # float_p =  x_int + ln2 / scaling_factor * z + 2 /scaling_factor
        test = fixed(ln2 / scaling_factor, 8, 16) * z

        assert not torch.any(torch.isnan(test)), self.name
        assert not torch.any(torch.isinf(test)), self.name
        assert not torch.any(torch.isnan(p)), self.name

        # v = p.to(torch.int32) >> z.to(torch.int32)
        v = fixed(p / torch.pow(2, z), 16, 32)

        if attn_mask is not None:
            v[attn_mask] = 0

        # if torch.any(torch.isnan(v)):
        #         print("v produce nan")
        #         exit(-1)

        # if self.save_tensors:
        #     torch.save(t1.view(-1), dir + "/t1.pth")
        #     torch.save(z, dir + "/z.pth")
        #     torch.save(p, dir + "/p.pth")
        #     torch.save(v, dir + "/v.pth")
        #     torch.save(t2.view(-1)  , dir + "/t2.pth")
        #     torch.save(t3.view(-1), dir + "/t3.pth")
        # self.save(t1.view(-1), "t1.pth")
        # self.save(z, "z.pth")
        # self.save(p, "p.pth")
        # self.save(v, "v.pth")
        # self.save(t2.view(-1), "t2.pth")
        # self.save(t3.view(-1), "t3.pth")
        t1_s = t1.view(-1)
        z_s = z
        p_s = p
        v_s = v
        t2_s = t2.view(-1)
        t3_s = t3.view(-1)

        # v_sum =  torch.sum(v.to(torch.float32), dim=-1, keepdim=True)
        # v=  v /  v_sum
        # M = 10#sum 1024 10
        v_sum = torch.sum(v.to(torch.float32), dim=-1, keepdim=True)
        v_sum = fixed(v_sum, 16, 32)

        # print(v_sum.shape)
        # if torch.any(v_sum==0):
        #     for b in range(v_sum.shape[0]):
        #         for i in range(v_sum.shape[1]):
        #             if v_sum[b,i]==0:
        #                 print(scaling_factor)
        #                 print(x[b,i ])
        #                 if attn_mask is not None:
        #                     print(attn_mask[b, i ])
        #                 print(v[b,i ])
        #                 torch.save(x[b,i],"errors/x.pth")
        #                 torch.save(t1 , "errors/t1.pth")
        #                 torch.save(z[b,i],  "errors/z.pth")
        #                 torch.save(p[b,i],  "errors/p.pth")
        #                 torch.save(v[b,i], "errors/v.pth")
        #                 torch.save( t2 ,  "errors/t2.pth")
        #                 torch.save( t3, "errors/t3.pth")
        #                 print("v_sum contains 0")
        #                 exit(-1)

        assert not torch.any(torch.isnan(v_sum)), self.name

        # v =  floor_ste.apply( (v * (2 ** M) ) /  v_sum ) / (2**M)
        M = self.M

        # update on 5.6
        v = v * (2**M)
        v = int_quant(v / v_sum, bits=32) / (2**M)
        # v = v / (v_sum)
        # old
        # v = fixed(v /v_sum, 16, 32)

        assert not torch.any(torch.isnan(v)), self.name

        # self.save(v_sum, "v_sum.pth")
        # self.save(v, "result.pth")
        v_sum_s = v_sum
        result = v

        # if self.save_tensors and self.use_old:
        #     torch.save(v_sum, dir + "/v_sum.pth")
        #     torch.save(v, dir + "/result.pth")
        # else:
        # if not self.use_old:
        #     v = (v / next_layer_scale + next_layer_zp).round().clamp(-128, 127)

        # if self.save_tensors and not self.use_old:
        #     torch.save(x_int, dir + "/input_int.pth")
        #     torch.save(v, dir + "/output_int.pth")
        #     torch.save(next_layer_scale, dir + "/next_layer_scale.pth")
        #     torch.save(next_layer_zp, dir + "/next_layer_zp.pth")

        return v.to(ori_dtype)

    def quant_input(self, x):
        if self.use_act_quant and not self.integer_only:
            x = self.in_quantizer(x)
        return x

    def f(
        self,
        input: torch.Tensor,
        attn_mask: torch.Tensor,
        **kwargs,
    ):
        if self.integer_only:
            if attn_mask is not None:
                input_with_mask = input + attn_mask
            else:
                input_with_mask = input

            out = self.softmax_integer(
                input,
                scaling_factor=torch.pow(2.0, self.in_quantizer.scale).bfloat16(),
                attn_mask=attn_mask,
            )
            # input = self.in_quantizer(input)

            # if attn_mask is not None:
            #     input_with_mask = input + attn_mask
            # out1 = F.softmax(input_with_mask, dim=-1, dtype=torch.float32).to(
            #     input.dtype
            # )
            # loss = torch.nn.functional.mse_loss(out, out1, reduction="none")
            # # out = out if loss.max().item() < 0.1 else out1
            # print(
            #     f"{self.name} {loss.max().item():.4f} {loss.mean().item():.6f} {loss.max().item() < 0.1}"
            # )
            # s = torch.pow(2.0, self.in_quantizer.scale).view(-1)
            # print(f"=> {s.min()} {s.max()} {s.mean()}")

            # try:
            #     torch.testing.assert_close(
            #         out.tril_(), out1.tril_(), atol=1e-2, rtol=1e-1
            #     )
            # except Exception as e:
            #     print(
            #         self.name,
            #         self.in_quantizer.scale,
            #         out.shape[-1] * out.shape[-2] / (attn_mask != 0).sum(),
            #         e,
            #     )
            #     # raise

        else:
            # s = input.shape
            # input_shape = (s[0], s[2], s[3], s[1])
            # input = input.flatten(-2).transpose(-1, -2)
            # input = input.flatten(-2).transpose(-1, -2)
            if self.use_act_quant:
                input = self.quant_input(input)
            # input = input.transpose(-1, -2).reshape(*s)
            # input = input.transpose(-1, -2).reshape(*s)

            if attn_mask is not None:
                input = input + attn_mask

            out = F.softmax(input, dim=-1, dtype=torch.float32).to(input.dtype)

        return out
