import torch
import torch.nn as nn
import torch.nn.functional as F
from quantize.quantizer import create_quantizer, round_ste
from quantize.base import QuantModuleNoLinear
from quantize.utils.recorder import variable_recorder as vr
import math


def get_scale(obj):
    if hasattr(obj, "scale"):
        return obj.scale
    else:
        return obj.scales


# @torch.compiler.disable
# def sigmoid_integer(x_fp: torch.Tensor, M: int, scaling_factor: torch.Tensor, **kwargs):
#     # assert not torch.any(scaling_factor < 1e-6), f"{self.name} {scaling_factor}"
#     x_fp, scaling_factor = x_fp.double(), scaling_factor.double()
#     # self.save(scaling_factor, "scale.pth")
#     # self.save(x_fp, "x_fp.pth")
#     scale = scaling_factor

#     # floor = torch.floor
#     floor = torch.round_
#     # floor = lambda x : x

#     def int_quant(x: torch.Tensor, bits=8):
#         ## 将浮点数 x 量化到整数范围 [-2^(bits-1), 2^(bits-1)-1]
#         # return x
#         x_int = torch.round_(x)
#         qmin = -(2 ** (bits - 1))
#         qmax = (2 ** (bits - 1)) - 1
#         return torch.clamp_(x_int, qmin, qmax)

#     def fixed(x: torch.Tensor, frac_bit, total_bit):
#         ## 固定点量化函数，通过 frac_bit 表示小数位数，total_bit 表示总位数。
#         return x
#         # fix = int_quant(x * (2**frac_bit), total_bit) / (2**frac_bit)
#         # return fix
#         # return x

#     x_int = torch.round_(x_fp / scaling_factor).clamp_(-128, 127)
#     # self.save(x_int, "x_int.pth")
#     x_int_s = x_int

#     x_int_max, _ = x_int.max(dim=-1, keepdim=True)
#     # self.save(x_int_max, "x_int_max.pth")
#     x_int_max_s = x_int_max

#     x_int = x_int - x_int_max
#     # self.save(x_int, "x_int_sub_max.pth")
#     x_int_sub_max_s = x_int

#     ln2 = math.log(2)

#     t1 = fixed(scaling_factor / ln2, 24, 32)

#     t2 = fixed(ln2 / scaling_factor, 16, 32)

#     t3 = fixed(2 / scaling_factor, 16, 32)

#     z = floor(-x_int * t1)

#     p = fixed(x_int + t2 * z + t3, 24, 32)

#     v = fixed(p / torch.pow(2, z), 16, 32)

#     z0 = floor(x_int_max * t1)

#     p0 = fixed((-x_int_max + t2 * z0 + t3), 24, 32)

#     l = fixed(p0 / torch.pow(2, z0), 24, 32)  # e^-x_max

#     v_s = fixed((v + l), 24, 32)  # 分母

#     y_i = torch.round(fixed(v * (2**M), 16, 32) / (v_s + 1e-6))

#     y_f = y_i / 2**M
#     # y_f = v / (v_s + 1e-8)

#     # self.save(t1, "t1.pth")
#     # self.save(t2, "t2.pth")
#     # self.save(t3, "t3.pth")

#     # self.save(l, "l.pth")

#     # self.save(z, "z.pth")
#     # self.save(z0, "z0.pth")
#     # self.save(p, "p.pth")
#     # self.save(p0, "p0.pth")
#     # self.save(v, "v.pth")

#     # self.save(v_s, "v_s.pth")
#     # self.save(v_s, "y_f.pth")
#     # self.save(y_i, "y_i.pth")
#     # if "23" in self.name:
#     #     exit(0)

#     return y_f.float()
try:
    from liger_kernel.transformers.functional import liger_swiglu as silu

except ImportError:

    def silu(a, b):
        return b * nn.functional.silu(a)


class FusedSiLU(nn.Module):
    def __init__(self):
        super().__init__()

    def forward(self, a1, a2):
        # return silu(a2,a1)
        return a1 * nn.functional.silu(a2)


class HadamardProduct(nn.Module):
    def __init__(self):
        super().__init__()

    def forward(self, x1, x2):
        return x1 * x2


class QuantFusedSiLU(QuantModuleNoLinear):
    def __init__(
        self,
        sigmoid_quant_params: dict = {},
        hadamard1_a2_quant_params: dict = {},
        hadamard2_act_quant_params: dict = {},
        hadamard2_a1_quant_params: dict = {},
    ):
        super().__init__(FusedSiLU())

        self.save_tensors = False
        self.integer_only = False
        self.use_act_quant = False
        self.use_old = False

        self.sigmoid = QuantSigmoid(sigmoid_quant_params)
        self.hadamard1 = QuantHadamardProduct(
            hadamard1_a2_quant_params, act2_quantizer=self.sigmoid.act_quantizer
        )
        self.hadamard2 = QuantHadamardProduct(
            hadamard2_a1_quant_params, hadamard2_act_quant_params
        )

    @torch.compiler.disable
    def integer_silu(
        self,
        a1_int: torch.Tensor,
        a2_sigma_int: torch.Tensor,
        a2_hadamard_int: torch.Tensor,
        next_layer_scale: torch.Tensor,
        next_layer_zp: torch.Tensor,
    ):
        sigma_int = self.sigmoid(
            a2_sigma_int,
            next_layer_scale=self.hadamard1.act1_quantizer.scale,
            next_layer_zp=self.hadamard1.act1_quantizer.zero_point,
        )

        gate_int = self.hadamard1(
            sigma_int,
            a2_hadamard_int,
            next_layer_scale=self.hadamard2.act2_quantizer.scale,
            next_layer_zp=self.hadamard2.act2_quantizer.zero_point,
        )

        output_int: torch.Tensor = self.hadamard2(
            a1_int,
            gate_int,
            next_layer_scale=next_layer_scale,
            next_layer_zp=next_layer_zp,
        )

        return output_int

    def f(
        self,
        a1_fp: torch.Tensor,
        a2_fp: torch.Tensor,
        a2_hadamard: torch.Tensor = None,
        next_layer_scale: torch.Tensor = None,
        next_layer_zp: torch.Tensor = None,
    ) -> torch.Tensor:
        # if self.integer_only:
        #     return self.integer_silu(
        #         a1_fp, a2_fp, a2_hadamard, next_layer_scale, next_layer_zp
        #     )
        # assert not torch.isnan(a1_fp).any(), f"mlp {self.name}"
        # assert not torch.isnan(a2_fp).any(), f"mlp {self.name}"

        sigma = self.sigmoid(a2_fp)
        # assert not torch.isnan(sigma).any(), f"mlp {self.name}"

        gate = self.hadamard1(sigma, a2_fp)
        # assert not torch.isnan(gate).any(), f"mlp {self.name}"

        # print(f"{torch.nn.functional.mse_loss(F.silu(a2_fp),gate)}")

        # gate = F.silu(a2_fp)

        output: torch.Tensor = self.hadamard2(a1_fp, gate)
        # assert not torch.isnan(output).any(), f"mlp {self.name}"

        output = output.type_as(a1_fp)

        return output

    def f_linear(self, up, gate):
        return silu(gate, up)


class QuantSigmoid(QuantModuleNoLinear):
    def __init__(self, act_quant_params: dict = {}):
        super().__init__(nn.Sigmoid())

        self.save_tensors = False
        self.integer_only = False
        self.use_act_quant = False

        self.act_quantizer = create_quantizer(**act_quant_params)
        self.M = 8

    def f(
        self,
        x_fp: torch.Tensor,
    ) -> torch.Tensor:
        if self.integer_only:
            output = self.sigmoid_integer(
                x_fp,
                torch.pow(2.0, self.act_quantizer.scale).bfloat16(),
            )

        else:
            if self.use_act_quant:
                x_fake_q = self.act_quantizer(x_fp)
            else:
                x_fake_q = x_fp
            output = torch.nn.functional.sigmoid(x_fake_q)

        return output

    @torch.compiler.disable
    def sigmoid_integer(
        self,
        x_fp: torch.Tensor,
        scaling_factor: torch.Tensor,
        input_int=False,
        **kwargs,
    ):
        M = self.M
        # scaling_factor = scaling_factor.double().clamp_(min=1e-4)
        x_fp, scaling_factor = x_fp.double(), scaling_factor
        scale = scaling_factor
        split_bit = math.pow(2, 32)

        floor = torch.floor
        # floor = lambda x : x

        def int_quant(x: torch.Tensor, bits=8):
            ## 将浮点数 x 量化到整数范围 [-2^(bits-1), 2^(bits-1)-1]
            x_int = torch.round(x)
            qmin = -(math.pow(2, (bits - 1)))
            qmax = (math.pow(2, (bits - 1))) - 1
            return torch.clamp(x_int, qmin, qmax)

        def fixed(x: torch.Tensor, frac_bit, total_bit):
            return x
            return int_quant(x * (2**frac_bit), total_bit) / (2**frac_bit)

        def fixed_i(x: torch.Tensor, frac_bit, total_bit):
            # return x
            return int_quant(x * (2**frac_bit), total_bit)

        def fixed_trunc(x: torch.Tensor, frac_bit, total_bit):
            # frac_bit = 48
            ## 固定点量化函数，通过 frac_bit 表示小数位数，total_bit 表示总位数。
            assert not torch.any(torch.isnan(x)), self.name
            assert not torch.any(x == 0), (x.min, x.max(), self.name)
            frac_bias = torch.pow(
                2, torch.tensor(frac_bit, device=x.device, dtype=torch.float64)
            )
            fixed_int = int_quant(x * (frac_bias), total_bit)
            # fixed_int = torch.where(
            #     fixed_int.abs() < 1e-1, torch.sign(x) + fixed_int, fixed_int
            # )
            shift_mask = torch.zeros_like(x, dtype=torch.bool)
            # shift_mask = fixed_int > split_bit

            # large_32 = torch.floor(fixed_int / split_bit)

            # fixed_int = torch.where(
            #     shift_mask,
            #     large_32,
            #     fixed_int,
            # )

            fix = fixed_int / frac_bias
            # assert not torch.any(fix == 0), (fixed_int.min(), (x * (frac_bias)).min())
            # assert not torch.any(torch.isnan(frac_bias)), frac_bias
            # assert not torch.any(torch.isnan(fix)), fixed_int.min()
            # try:
            #     torch.testing.assert_close(fix, x)
            # except:
            #     print(x.min(), x.max())
            #     print(fix.min(), fix.max())
            #     raise
            return fix, shift_mask

        def int_trunc(x: torch.Tensor):
            shift_mask = x.abs() > split_bit
            large_32 = torch.floor(x / split_bit)
            fixed_int = torch.where(
                shift_mask,
                large_32,
                x % math.pow(2, 32),
            )

            return fixed_int, shift_mask

        if not input_int:
            x_int = torch.round_(x_fp / scaling_factor).clamp_(-128, 127)
        else:
            x_int = x_fp
        # self.save(x_int, "x_int.pth")
        x_int_s = x_int

        x_int_max, _ = x_int.max(dim=-1, keepdim=True)
        # self.save(x_int_max, "x_int_max.pth")
        x_int_max_s = x_int_max

        x_int = x_int - x_int_max
        # self.save(x_int, "x_int_sub_max.pth")
        x_int_sub_max_s = x_int

        ln2 = math.log(2)
        # x_int_f = Fixed.from_float(x_int,0,16)

        # t1 = fixed(scaling_factor / ln2, 16, 32)

        # t2 = fixed(ln2 / scaling_factor, 16, 32)

        # t3 = fixed(2 / scaling_factor, 8, 32)
        # t1 = fixed(scaling_factor / ln2, 12, 16)
        t1_i = fixed_i(scaling_factor / ln2, 12, 16)
        # t1_f = Fixed.from_float(scaling_factor / ln2, 12, 16)

        # t2 = fixed(ln2 / scaling_factor, 8, 16)
        t2_i = fixed_i(ln2 / scaling_factor, 8, 16)
        # t2_f = Fixed.from_float(ln2 / scaling_factor, 8, 16)

        # t3 = fixed(2 / scaling_factor, 8, 32)
        t3_i = fixed_i(2 / scaling_factor, 8, 32)
        # t3_f = Fixed.from_float(2 / scaling_factor, 8, 32)

        z = floor(-x_int * t1_i / 2**12)  # i16 0
        # assert not torch.any(torch.isnan(x_int)), self.name
        # assert not torch.any(torch.isnan(t1)), self.name
        # assert not torch.any(torch.isnan(t1_i)), self.name
        # assert not torch.any(torch.isnan(z)), self.name
        # torch.testing.assert_close(z, -x_int * t1, rtol=1.1, atol=1.1, msg=self.name)
        # z = floor(-x_int * t1)

        # vr.record("z", z)

        # p = fixed(x_int + fixed(t2 * z, 8, 32) + t3, 8, 32)
        p_i = x_int * 2**8 + t2_i * z + t3_i  # i32 8
        # torch.testing.assert_close(
        #     p_i / 2**8, p, msg=lambda s: f"{self.name} {s}", atol=1, rtol=1e-1
        # )

        # v_t = p / torch.pow(2, z)
        v_t_i = floor(p_i / 2**8 / torch.pow(2, z) * 2**48)  # i32 16
        v_t_i_tr, v_i_mask = int_trunc(v_t_i)

        # vr.record("vt", v_t)

        # v, v_shift_mask = fixed_trunc(v_t, 48, 64)
        # torch.testing.assert_close(
        #     v_t_i / 2**48, v, msg=lambda s: f"{self.name} {s}", atol=1, rtol=1e-1
        # )

        # assert not torch.any(v == 0), (v.min(), v_t.min(), v_t.max())

        # v = fixed(v_t, 48, 55)
        # v = v_t

        z0 = floor(x_int_max * t1_i / 2**12)
        # z0 = floor(x_int_max * t1)

        # vr.record("z0", z0)

        # p0 = fixed((-x_int_max + t2 * z0 + t3), 8, 32)
        p_0_i = -x_int_max * 2**8 + t2_i * z0 + t3_i  # i32 8 + 8
        # torch.testing.assert_close(
        #     p_0_i / 2**8, p0, msg=lambda s: f"{self.name} {s}", atol=1, rtol=1e-1
        # )

        # l_t = p0 / torch.pow(2, z0)
        l_t_i = floor(p_0_i / 2**8 / torch.pow(2, z0) * 2**48)  # i32 16
        l_t_i_tr, l_i_mask = int_trunc(l_t_i)

        # l, l_shift_mask = fixed_trunc(l_t, 48, 64)  # e^-x_max
        # assert not torch.any(l == 0), (l.min(),)
        # torch.testing.assert_close(
        #     l_t_i / 2**48, l, msg=lambda s: f"{self.name} {s}", atol=1, rtol=1e-1
        # )

        # l = p0 / torch.pow(2, z0)

        # v_s = fixed((v + l), 16, 32)  # 分母
        # v_s = v + l  # 分母
        # eq_mask = v_shift_mask == l_shift_mask
        eq_mask_i = v_i_mask == l_i_mask

        # v_s = fixed(v + l, 16, 32) if ()  else(fixed(v, 16, 32) if v_shift  else fixed(l, 16, 32))
        # v_s = torch.where(
        #     eq_mask,
        #     v + l,
        #     torch.where(v_shift_mask, v, l),
        # )
        v_s_i_tr_m = torch.where(
            eq_mask_i,
            v_t_i_tr + l_t_i_tr,
            torch.where(v_i_mask, v_t_i_tr, l_t_i_tr),
        )
        v_s_i_tr = torch.where(v_s_i_tr_m == 0, 1, v_s_i_tr_m)
        # v_s = v + l
        # v_s = fixed(
        #     torch.where((v_s) != 0, v_s, 2**-48 * torch.sign(v_t + l_t)), 48, 64
        # )

        # assert not torch.any(v_s == 0), (v.min(), l.min())

        # v_s = fixed(v + l, 48, 64)

        # y_i = torch.round(fixed(v * (2**M), 16, 32) / (v_s + 1e-6))

        m_pow_2 = math.pow(2, M)
        # y_i_no_scale = torch.round(v * m_pow_2 / (v_s))
        # y_i_both_scale = torch.round(v * m_pow_2 / (v_s))
        # y_i_vs_scale = torch.round(v * m_pow_2 / (v_s * split_bit))

        y_i_no_scale_tr = torch.round(v_t_i_tr * m_pow_2 / (v_s_i_tr)).clamp_(
            -(2**31), 2**31 - 1
        )
        # y_i_both_scale_tr = torch.round(v_t_i_tr * m_pow_2 / (v_s_i_tr)).clamp_(
        #     -(2**31), 2**31 - 1
        # )
        y_i_both_scale_tr = y_i_no_scale_tr
        y_i_vs_scale_tr = torch.round(
            v_t_i_tr * m_pow_2 / (v_s_i_tr * split_bit)
        ).clamp_(-(2**31), 2**31 - 1)
        # print(self.name, y_i_vs_scale_tr.abs().mean())
        # y_i_vs_scale_tr = torch.round(v_t_i_tr * m_pow_2 / (v_s_i_tr * split_bit))

        # assert not torch.any(torch.isnan(y_i_no_scale)), (v_s.min(), v.max())
        # assert not torch.any(torch.isnan(y_i_both_scale))
        # assert not torch.any(torch.isnan(y_i_vs_scale))

        # y_i = torch.where(
        #     ~(l_shift_mask | v_shift_mask),
        #     y_i_no_scale,
        #     torch.where(
        #         v_shift_mask,
        #         y_i_both_scale,
        #         y_i_vs_scale,
        #     ),
        # )
        y_i = torch.where(
            ~(l_i_mask | v_i_mask),
            y_i_no_scale_tr,
            torch.where(
                v_i_mask,
                y_i_both_scale_tr,
                y_i_vs_scale_tr,
            ),
        )
        # v_s_i = torch.where(
        #     (v_t_i + l_t_i).abs() == 0, torch.sign(v_t + l_t), v_t_i + l_t_i
        # )
        # y_i = floor(v_t_i * m_pow_2 / (v_s_i))
        # assert not torch.any(v_s_i == 0), (
        #     v_t_i.min(),
        #     l_t_i.min(),
        #     torch.any((v_t + l_t) == 0),
        #     torch.any((p) == 0),
        #     torch.any((p0) == 0),
        # )
        # torch.testing.assert_close(
        #     (v_t_i + l_t_i) / 2**48,
        #     (v + l),
        #     msg=lambda s: f"{self.name} {s}",
        #     atol=1,
        #     rtol=1e-1,
        # )
        # torch.testing.assert_close(
        #     y_i, y_i_no_scale, msg=lambda s: f"{self.name} {s}", atol=1, rtol=1e-1
        # )
        # y_i = y_i_no_scale

        # assert not torch.any((v_s_i) == 0), self.name

        # y_i = floor(v * m_pow_2 / (v_s_i))

        y_f = y_i / m_pow_2

        return y_f.float()


class QuantHadamardProduct(QuantModuleNoLinear):
    def __init__(
        self,
        act1_quant_params: dict = {},
        act2_quant_params: dict | None = None,
        act2_quantizer=None,
    ):
        super().__init__(HadamardProduct())

        self.save_tensors = False
        self.integer_only = False
        self.use_act_quant = False

        self.act1_quantizer = create_quantizer(**act1_quant_params)
        if act2_quant_params is not None:
            self.act2_quantizer = create_quantizer(**act2_quant_params)
        elif act2_quantizer is not None:
            self.act2_quantizer = act2_quantizer
        else:
            raise ValueError(f"{self.name}: no quantizer for activate 2!")

    @torch.compiler.disable
    def integer_hadamard(
        self,
        x1_int: torch.Tensor,
        x2_int: torch.Tensor,
        next_layer_scale: torch.Tensor,
        next_layer_zp: torch.Tensor,
    ):
        return (
            (
                self.act1_quantizer.scale
                * self.act2_quantizer.scale
                / next_layer_scale
                * (x1_int - self.act1_quantizer.zero_point)
                * (x2_int - self.act2_quantizer.zero_point)
                + next_layer_zp
            )
            .round()
            .clamp(-128, 127)
        )

    def f(
        self,
        x1_fp: torch.Tensor,
        x2_fp: torch.Tensor,
        **kwargs,
    ) -> torch.Tensor:
        # if self.integer_only and not self.use_old:
        #     output = self.integer_hadamard(
        #         x1_fp,
        #         x2_fp,
        #         next_layer_scale=next_layer_scale,
        #         next_layer_zp=next_layer_zp,
        #     )
        # else:
        if self.use_act_quant:
            x1_fake_q = self.act1_quantizer(x1_fp)
            x2_fake_q = self.act2_quantizer(x2_fp)
        else:
            x1_fake_q = x1_fp
            x2_fake_q = x2_fp
            # print(f"-----{self.name}-----")
            # print(nn.functional.mse_loss(x1_fake_q,x1_fp))
            # print(nn.functional.mse_loss(x2_fake_q,x2_fp))
            # print(

            #     nn.functional.mse_loss(x1_fake_q * x2_fake_q, x1_fp * x2_fp),
            # )
            # print("-----end-----")

            # if self.integer_only:
            #     #  x1_q = x1_fp.to(torch.float32) / get_scale(self.act1_quantizer).to(torch.float32) # int
            #     #  x2_q = x2_fp.to(torch.float32) / get_scale(self.act2_quantizer).to(torch.float32)

            #     #  output = get_scale(self.act1_quantizer).to(torch.float32) * get_scale(self.act2_quantizer).to(torch.float32) * (x1_q * x2_q)
            #     #  output.type_as(x1_fp)

            # else:
        output = x1_fake_q * x2_fake_q
        return output

        # if self.save_tensors and self.integer_only:
        #     dir = self.save_dir
        #     # torch.save(x1_q, dir + "x1_q.pth")
        #     # torch.save(x2_q, dir + "x2_q.pth")
        #     torch.save(get_scale(self.act1_quantizer), dir + "/x1_scale.pth")
        #     torch.save(get_scale(self.act2_quantizer), dir + "/x2_scale.pth")
        #     torch.save(output, dir + "/output_int.pth")
        #     torch.save(x1_fp, dir + "/x1_int.pth")
        #     torch.save(x2_fp, dir + "/x1_int.pth")
        #     torch.save(next_layer_scale, dir + "/next_layer_scale.pth")
        #     torch.save(next_layer_zp, dir + "/next_layer_zp.pth")

        return output


class QuantSiLUGate(nn.Module):
    """
    Quantized Module that can perform quantized convolution or normal convolution.
    To activate quantization, please use set_quant_state function.
    """

    def __init__(
        self,
        a1_in_quant_params: dict = {},
        a2_in_quant_params: dict = {},
    ):
        super().__init__()

        self.integer_only = False
        self.use_act_quant = False

        self.a1_in_quantizer = create_quantizer(**a1_in_quant_params)  # up_proj
        self.a2_in_quantizer = create_quantizer(**a2_in_quant_params)  # gate_project

        self.save_tensors = False

    def sigmoid_integer(self, x: torch.Tensor, scaling_factor: torch.Tensor):
        # print(self.name)

        x = x.to(torch.float32)
        scaling_factor = scaling_factor.to(torch.float32)

        assert scaling_factor.numel() == 1

        def int_quant(x, bits=8):
            ## 将浮点数 x 量化到整数范围 [-2^(bits-1), 2^(bits-1)-1]
            x_int = round_ste.apply(x)
            qmin = -(2 ** (bits - 1))
            qmax = 2 ** (bits - 1) - 1
            return torch.clamp(x_int, qmin, qmax)

        def fixed(x, frac_bit, total_bit):
            ## 固定点量化函数，通过 frac_bit 表示小数位数，total_bit 表示总位数。
            return int_quant(x * (2**frac_bit), total_bit) / (2**frac_bit)

        # if self.save_tensors:
        #     dir = self.save_dir
        #     torch.save(scaling_factor, dir+"scale.pth")

        x_int = x / scaling_factor

        ## 确保缩放后的 x_int 中不存在 NaN 或无穷值。
        assert not torch.any(torch.isnan(x_int))
        assert not torch.any(torch.isinf(x_int))

        x_int_max, _ = x_int.max(dim=-1, keepdim=True)

        if self.save_tensors:
            dir = self.save_dir
            torch.save(x_int, dir + "x_int.pth")
            torch.save(x_int_max, dir + "x_int_max.pth")
        x_int = x_int - x_int_max  # int, int32

        assert not torch.any(torch.isinf(x_int))
        assert not torch.any(scaling_factor == 0)
        assert not torch.any(torch.isinf(x_int))

        # torch.save(x_int, self.name)
        if self.save_tensors:
            dir = self.save_dir
            torch.save(x_int, dir + "x_int_sub_max.pth")

        ## 接下来是ppt中的公式计算
        # t1、t2、t3 分别是指数公式中的固定点表示。
        # z 是 exp 函数中的指数部分。
        # p 是通过量化计算的指数结果。
        # x_int = x_int + floor_ste.apply(x_int / 2) - floor_ste.apply(x_int / 2 ** 4)
        ln2 = math.log(2)

        t1 = fixed(scaling_factor / ln2, 30, 32)
        # t1 = scaling_factor/ ln2
        # print("t1", torch.nn.functional.mse_loss(t1, scaling_factor/ ln2  ))
        # print(scaling_factor/ ln2, t1)

        z = torch.floor(-x_int * t1)

        t2 = fixed(ln2 / scaling_factor, 8, 16)
        # t2 = ln2/ scaling_factor
        # print("t2", torch.nn.functional.mse_loss(t2, ln2/ scaling_factor))

        t3 = fixed(2 / scaling_factor, 8, 32)
        # t3 = 2/ scaling_factor
        # print("t3", torch.nn.functional.mse_loss(t3, 2/ scaling_factor))

        p = fixed(x_int + fixed(t2 * z, 8, 32) + t3, 8, 32)
        # p = x_int + t2 * z  + t3
        # print("p", torch.nn.functional.mse_loss(p, x_int + fixed ( t2 * z , 8, 32)  + t3))

        # p = x_int + fixed(ln2/ scaling_factor, 8, 16 ) * z  + fixed( 2/ scaling_factor, 8, 16)
        # float_p =  x_int + ln2 / scaling_factor * z + 2 /scaling_factor
        # test = fixed(ln2/ scaling_factor, 8, 16 ) * z
        # test = (ln2/ scaling_factor) * z
        # print("test", torch.nn.functional.mse_loss((ln2/ scaling_factor)*z, test))

        # assert not  torch.any(torch.isnan(test))
        # assert not torch.any(torch.isinf(test))
        assert not torch.any(torch.isnan(p))

        # v = p.to(torch.int32) >> z.to(torch.int32)
        ## fix 1.10
        v = fixed(p / torch.pow(2, z), 16, 32)
        # v = p/torch.pow(2, z)
        # print("v", torch.nn.functional.mse_loss(v, p/torch.pow(2, z)))

        # if torch.any(torch.isnan(v)):
        #     print("v produce nan")
        #     exit(-1)

        if self.save_tensors:
            dir = self.save_dir
            torch.save(t1, dir + "t1.pth")
            torch.save(z, dir + "z.pth")
            torch.save(p, dir + "p.pth")
            torch.save(v, dir + "v.pth")
            torch.save(t2, dir + "t2.pth")
            torch.save(t3, dir + "t3.pth")

        M = 10

        #### fix 1.10
        # v_s =  v + fixed((1 / scaling_factor), 16, 32 )
        # print("v_s", torch.nn.functional.mse_loss(v_s, v + (1 / scaling_factor)))
        z0 = x_int_max * t1
        p0 = fixed((-x_int_max + t2 * z0 + t3), 24, 32)
        l = fixed(p0 / torch.pow(2, z0), 8, 32)
        v_s = fixed((v + l), 16, 32) + (2 ** (-M))

        assert not torch.any(torch.isnan(v_s))
        assert not torch.any(torch.isinf(v_s))
        assert not torch.any(v_s == 0)

        # v =  floor_ste.apply( (v * (2 ** M) ) /  v_s ) / (2**M)

        # update on 5.6
        v = v * (2**M)
        v = (int_quant(v / v_s, bits=32)) / (2**M)
        # v = v / v_s

        assert not torch.any(torch.isnan(v))

        if self.save_tensors:
            dir = self.save_dir
            torch.save(v_s, dir + "v_s.pth")
            torch.save(v, dir + "result.pth")

        return v

    def silu_mul_integer(
        self,
        a1: torch.Tensor,
        scaling_factor1: torch.Tensor,
        a2: torch.Tensor,
        scaling_factor2: torch.Tensor,
        scaling_factor3: torch.Tensor,
    ):
        ori_dtype = a1.dtype

        a1 = a1.to(torch.float32)
        a2 = a2.to(torch.float32)

        scaling_factor1 = scaling_factor1.to(torch.float32)
        scaling_factor2 = scaling_factor2.to(torch.float32)
        scaling_factor3 = scaling_factor3.to(torch.float32)

        assert scaling_factor1.numel() == 1
        assert scaling_factor2.numel() == 1
        assert scaling_factor3.numel() == 1

        def int_quant(x: torch.Tensor, bits: int = 8) -> torch.Tensor:
            """
            将浮点数 x 量化到整数范围 [-2^(bits-1), 2^(bits-1)-1], 直接截断到bits位可以表示的范围，带梯度的Round+clamp
            """
            x_int = round_ste.apply(x)
            qmin = -(2 ** (bits - 1))
            qmax = 2 ** (bits - 1) - 1
            return torch.clamp(x_int, qmin, qmax)

        def fixed(x, frac_bit, total_bit):
            ## 固定点量化函数，通过 frac_bit 表示小数位数，total_bit 表示总位数。
            return int_quant(x * (2**frac_bit), total_bit) / (2**frac_bit)

        if self.save_tensors:
            dir = self.save_dir
            torch.save(scaling_factor1, dir + "/scale1.pth")
            torch.save(scaling_factor2, dir + "/scale2.pth")
            torch.save(scaling_factor3, dir + "/scale3.pth")

        a1_int = a1 / scaling_factor1
        a2_int = a2 / scaling_factor2

        ## 确保缩放后的 x_int 中不存在 NaN 或无穷值。
        assert not torch.any(torch.isnan(a1_int))
        assert not torch.any(torch.isinf(a1_int))
        assert not torch.any(torch.isnan(a2_int))
        assert not torch.any(torch.isinf(a2_int))

        if self.save_tensors:
            dir = self.save_dir
            torch.save(a1_int, dir + "/a1_int.pth")
            torch.save(a2_int, dir + "/a2_int.pth")

        y_q = self.sigmoid_integer(a2, scaling_factor2)  ## fp
        y_q_int = y_q / scaling_factor3  ## int8
        w = (a2_int * y_q_int) * scaling_factor2 * scaling_factor3  # 分开写一层
        # w = scaling_factor1 * scaling_factor2 * scaling_factor3 * (a1_int * a2_int * y_q_int)
        w2 = a1 * (a2 * y_q)

        if self.save_tensors:
            dir = self.save_dir
            torch.save(y_q_int, dir + "y_q_int.pth")
            torch.save(
                scaling_factor1 * scaling_factor3 * scaling_factor2, dir + "mul.pth"
            )
            torch.save(y_q * a2_int * a1_int, dir + "tmp.pth")
            torch.save(w, dir + "w.pth")

        return w.to(ori_dtype)

    def forward(self, a1: torch.Tensor, a2: torch.Tensor):
        if self.use_act_quant:  # == True
            a1 = self.a1_in_quantizer(a1)
            a2 = self.a2_in_quantizer(a2)

            if not self.integer_only:  # == True
                out = a1 * (a2 * self.a2_out_quantizer(torch.sigmoid(a2)))  #
            else:
                out = self.silu_mul_integer(
                    a1,
                    self.a1_in_quantizer.scales,
                    a2,
                    self.a2_in_quantizer.scales,
                    self.a2_out_quantizer.scales,
                )
                # out = a1 * self.a2_out_quantizer((a2 / self.a2_in_quantizer.scales) * self.a2_in_quantizer.scales * self.sigmoid_integer(a2, self.a2_in_quantizer.scales))
                # x3=self.sigmoid_integer(a2, self.a2_in_quantizer.scales)
                # x3_q, s3= new_quantizer(x3)#new_quantizer per tesnor
                # y= x3_q *

                # out = a1 * self.a2_out_quantizer(a2 * torch.sigmoid(a2))
                # out2 =  a1 * self.a2_out_quantizer(F.silu(a2))
                # print(self.name, torch.nn.functional.mse_loss(self.sigmoid_integer(a2, self.a2_in_quantizer.scales), torch.sigmoid(a2)))
                # print(self.name, torch.nn.functional.mse_loss(out, out2))
        else:  # never run
            print("silu is not quantized!!!!!!!")
            if not self.integer_only:
                out = a1 * F.silu(a2)
            else:
                out = a1 * self.silu_integer(a2, self.a2_in_quantizer.scales)

        return out

    def set_quant_state(self, integer_only: bool = False, act_quant: bool = False):
        self.integer_only = integer_only
        self.use_act_quant = act_quant
