from pathlib import Path
import torch
import torch.nn as nn
import torch.nn.functional as F
from quantize.quantizer import create_quantizer, round_ste
from quantize.base import QuantModuleNoLinear
from quantize.silu_ewm import QuantSigmoid
import math


gelu = torch.nn.functional.gelu


def gelu_exact(x):
    """精确的GELU实现，使用标准正态分布的CDF"""
    return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))


def gelu_tanh_approx(x):
    """Tanh近似实现，计算量较大但精度较好"""
    return (
        0.5
        * x
        * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
    )


def gelu_sigmoid_approx(x, alpha=1.704):
    """Sigmoid近似实现，计算量中等"""
    return x * torch.sigmoid(alpha * x.float()).bfloat16()


def gelu_sigmoid_2nd_order(x, alpha=1.704, beta=0.018):
    """带二阶修正项的Sigmoid近似"""
    sig = torch.sigmoid(alpha * x)
    return x * (sig + beta * x * sig * (1 - sig))


def gelu_hybrid(x, threshold=2.0):
    """混合Sigmoid和Tanh近似"""
    # 小值区间用Sigmoid，大值区间用Tanh近似
    mask = (x.abs() < threshold).float()
    sigmoid_part = x * torch.sigmoid(1.704 * x)
    tanh_part = (
        0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * x**3)))
    )
    return mask * sigmoid_part + (1 - mask) * tanh_part


def gelu_fast_approx(x):
    """快速近似实现，计算量最小但精度较低"""
    return 0.5 * x * (1 + torch.tanh(x * 0.7978845608 * (1 + 0.0356774 * x * x)))


def gelu_poly_approx(x):
    """多项式近似实现，适合硬件优化"""
    return (
        0.5
        * x
        * (
            1.0
            + torch.minimum(
                1.0, torch.maximum(-1.0, x * 0.7978845608 + 0.0356774 * x * x * x)
            )
        )
    )


# gelu_alpha = torch.nn.Parameter(torch.tensor([1.743], dtype=torch.double))
gelu_alpha = 1.743


class QuantGELU(QuantModuleNoLinear):
    def __init__(
        self,
        org_module: nn.GELU,
        input_quant_params: dict = {},
        sigmoid_quant_params: dict = {},
        sigma_quant_params: dict = {},
    ):
        super().__init__(org_module)
        self.integer_only = False
        self.use_act_quant = False

        self.gelu_alpha = torch.nn.Parameter(
            torch.tensor([1.743], dtype=torch.bfloat16)
        )

        self.in_quantizer = create_quantizer(**input_quant_params)
        # self.sigmoid_quantizer = create_quantizer(**sigmoid_quant_params)
        self.sigma_quantizer = create_quantizer(**sigma_quant_params)
        self.sigma_quant_params = sigma_quant_params
        self.sigmoid = QuantSigmoid(input_quant_params)
        del self.sigmoid.act_quantizer.scale
        del self.sigmoid.act_quantizer.zero_point

    @torch.compiler.disable
    def gelu_integer(self, x: torch.Tensor, threshold=7) -> torch.Tensor:
        org_type = x.dtype
        x = x.double()
        scale = torch.pow(2.0, self.in_quantizer.scale.data)

        # mask = (x.abs() < threshold).double()
        alpha = gelu_alpha
        # alpha = self.gelu_alpha
        x_int = torch.round_(x / scale).clamp_(-128, 127)
        # fused_s = scale * alpha  # fix ?
        # alpha_x_q = self.sigmoid_quantizer(alpha * x)
        # alpha_x_q = scale * x_int * alpha
        alpha_x_q = alpha * x
        sigma = self.sigmoid.sigmoid_integer(
            alpha_x_q,
            scaling_factor=alpha * scale,
            input_int=False,
        )
        # sigma = torch.sigmoid(self.gelu_alpha * self.in_quantizer(x))
        # alpha_x_q = self.sigmoid_quantizer(alpha * x)
        # sigma = self.sigmoid.sigmoid_integer(
        #     alpha_x_q,
        #     scaling_factor=self.sigmoid_quantizer.scale,
        # )
        # sigma = torch.sigmoid(alpha_x_q)
        sigma_q = self.sigma_quantizer(sigma)
        gelu_out = (
            scale
            * x_int
            # * F.sigmoid(fused_s * x_int)
            * sigma_q
        )  # 复用 mlp 里 slilu的 sigmoid

        # relu_out = torch.nn.functional.gelu(x)  # torch.nn.functional.gelu / tanh_gelu
        # relu_out = torch.nn.functional.relu(x)  # torch.nn.functional.gelu / tanh_gelu
        # fix_out = gelu(x)  # torch.nn.functional.gelu / tanh_gelu

        # out = mask * gelu_out + (1 - mask) * relu_out
        out = gelu_out

        # x = self.in_quantizer(x)
        # if x.abs().max() > 3:
        #     print(self.name, x.min(), x.max())
        # out = gelu_poly_approx(x)
        # out = gelu_sigmoid_2nd_order(x)
        # out = gelu_hybrid(x)
        # torch.testing.assert_close(out, gelu(x))
        return out.to(org_type)

    def f(
        self,
        input: torch.Tensor,
    ):
        gelu_alpha = self.gelu_alpha
        if self.integer_only:
            output = self.gelu_integer(input)
        else:
            if self.use_act_quant:
                input_q = self.in_quantizer(input)
                sigma = self.sigma_quantizer(
                    torch.sigmoid((gelu_alpha * input_q).to(input.dtype))
                )

            else:
                input_q = input
                sigma = torch.sigmoid((gelu_alpha * input_q).to(input.dtype))
            output = input_q * sigma

            # output = torch.nn.functional.relu(input)
            # output = gelu_sigmoid_approx(input, gelu_alpha)
            # output = torch.nn.functional.gelu(input, approximate="tanh")
            # output = torch.nn.functional.gelu(input, approximate="none")
            # output = self.gelu_integer(input)

        # mask = input.abs() < 2.55
        # input_q = self.in_quantizer(input)
        # # relu_out = torch.nn.functional.relu(input_q)

        # gelu_output = input_q * self.sigma_quantizer(
        #     torch.sigmoid(self.sigmoid_quantizer(gelu_alpha * input).float()).bfloat16()
        # )
        # output = gelu_output

        # return mask * gelu_output + (~mask) * relu_out
        return output
