import math
import numpy as np

import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable

from function import FakeQuantize, interp

def calcScaleZeroPoint(min_val, max_val, num_bits=8):
    """线性量化公式
    计算量化参数的缩放因子(scale)和零点(zero_point)。

    Parameters:
    min_val (torch.Tensor): 输入张量的最小值。
    max_val (torch.Tensor): 输入张量的最大值。
    num_bits (int): 量化位数。默认为8。

    Returns:
    tuple: 包含缩放因子(scale)和零点(zero_point)的元组。
    """
    qmin = 0.0
    qmax = 2.0**num_bits - 1.0
    scale = (max_val - min_val) / (qmax - qmin)

    zero_point = qmax - max_val / scale

    if zero_point < qmin:
        zero_point = torch.tensor([qmin], dtype=torch.float32).to(min_val.device)
    elif zero_point > qmax:
        zero_point = torch.tensor([qmax], dtype=torch.float32).to(max_val.device)

    zero_point.round_()

    return scale, zero_point

def quantize_tensor(x, scale, zero_point, num_bits=8, signed=False):
    """
    对输入张量进行量化。

    Parameters:
    x (torch.Tensor): 输入数据张量。
    scale (torch.Tensor): 量化缩放因子。
    zero_point (torch.Tensor): 量化零点。
    num_bits (int): 量化位数。默认为8。
    signed (bool): 是否使用有符号量化。默认为False。

    Returns:
    torch.Tensor: 量化后的数据张量。
    """
    if signed:
        qmin = -(2.0 ** (num_bits - 1))
        qmax = 2.0 ** (num_bits - 1) - 1
    else:
        qmin = 0.0
        qmax = 2.0**num_bits - 1.0

    q_x = zero_point + x / scale
    q_x.clamp_(qmin, qmax).round_()

    return q_x
def dequantize_tensor(q_x, scale, zero_point):
    """
    对量化后的张量进行反量化。

    Parameters:
    q_x (torch.Tensor): 量化后的数据张量。
    scale (torch.Tensor): 量化缩放因子。
    zero_point (torch.Tensor): 量化零点。

    Returns:
    torch.Tensor: 反量化后的数据张量。
    """
    return scale * (q_x - zero_point)
def search(M):
    """
    搜索最优的量化参数。

    Parameters:
    M (float): 输入参数。

    Returns:
    tuple: 包含最优的量化参数 Mo 和 n 的元组。
    """
    P = 7000
    n = 1
    while True:
        Mo = int(round(2**n * M))
        approx_result = Mo * P >> n
        result = int(round(M * P))
        error = approx_result - result

        print(
            "n=%d, Mo=%f, approx=%d, result=%d, error=%f"
            % (n, Mo, approx_result, result, error)
        )

        if math.fabs(error) < 1e-9 or n >= 22:
            return Mo, n
        n += 1



class QParam(nn.Module):
    def __init__(self, num_bits=8):
        """
        初始化量化参数。

        Parameters:
        num_bits (int): 量化位数。默认为8。
        """
        super(QParam, self).__init__()
        self.num_bits = num_bits
        scale = torch.tensor([], requires_grad=False)
        zero_point = torch.tensor([], requires_grad=False)
        min = torch.tensor([], requires_grad=False)
        max = torch.tensor([], requires_grad=False)
        self.register_buffer("scale", scale)
        self.register_buffer("zero_point", zero_point)
        self.register_buffer("min", min)
        self.register_buffer("max", max)
    def update(self, tensor):
        """
        更新量化参数的最小值和最大值，并计算缩放因子和零点。

        Parameters:
        tensor (torch.Tensor): 输入数据张量。
        """
        if self.max.nelement() == 0 or self.max.data < tensor.max().data:
            self.max.data = tensor.max().data
        self.max.clamp_(min=0)

        if self.min.nelement() == 0 or self.min.data > tensor.min().data:
            self.min.data = tensor.min().data
        self.min.clamp_(max=0)

        self.scale, self.zero_point = calcScaleZeroPoint(
            self.min, self.max, self.num_bits
        )

    def quantize_tensor(self, tensor):
        """
        对输入张量进行量化。

        Parameters:
        tensor (torch.Tensor): 输入数据张量。

        Returns:
        torch.Tensor: 量化后的数据张量。
        """
        return quantize_tensor(
            tensor, self.scale, self.zero_point, num_bits=self.num_bits
        )
    def dequantize_tensor(self, q_x):
        """
        对量化后的张量进行反量化。

        Parameters:
        q_x (torch.Tensor): 量化后的数据张量。

        Returns:
        torch.Tensor: 反量化后的数据张量。
        """
        return dequantize_tensor(q_x, self.scale, self.zero_point)
    def _load_from_state_dict(
        self,
        state_dict,
        prefix,
        local_metadata,
        strict,
        missing_keys,
        unexpected_keys,
        error_msgs,
    ):
        """
        从状态字典加载量化参数。

        Parameters:
        state_dict (dict): 状态字典。
        prefix (str): 键的前缀。
        local_metadata (dict): 本地元数据。
        strict (bool): 是否严格匹配键。
        missing_keys (list): 缺少的键列表。
        unexpected_keys (list): 不期望的键列表。
        error_msgs (list): 错误消息列表。
        """
        key_names = ["scale", "zero_point", "min", "max"]
        for key in key_names:
            value = getattr(self, key)
            value.data = state_dict[prefix + key].data
            state_dict.pop(prefix + key)

    def __str__(self):
        info = "scale: %.10f " % self.scale
        info += "zp: %d " % self.zero_point
        info += "min: %.6f " % self.min
        info += "max: %.6f" % self.max
        return info


class QModule(nn.Module):

    def __init__(self, qi=True, qo=True, num_bits=8):
        """初始化量化模块。指定是否提供量化输入 (qi) 及输出参数 (qo)。

        :param bool qi: 输入参数, defaults to True
        :param bool qo: 输出参数, defaults to True
        :param int num_bits: 量化位数, defaults to 8
        """
        super(QModule, self).__init__()
        if qi:
            self.qi = QParam(num_bits=num_bits)
        if qo:
            self.qo = QParam(num_bits=num_bits)

    def freeze(self):
        """
        冻结量化参数，使其在后续训练中不更新。
        """
        pass
    def quantize_inference(self, x):
        """
        使用量化参数进行推理。

        Parameters:
        x (torch.Tensor): 输入数据张量。

        Returns:
        torch.Tensor: 量化推理的输出数据张量。
        """
        raise NotImplementedError("quantize_inference should be implemented.")


class QConv2d(QModule):

    def __init__(self, conv_module, qi=True, qo=True, num_bits=8):
        """conv_module这个模块对应全精度的卷积层,
        另外的 qw 参数则是用来统计 weight 的 min、max 以及对 weight 进行量化用的。

        :param _type_ conv_module: 对应全精度的卷积层
        :param bool qi: 是否需要提供输入参数, defaults to True
        :param bool qo: 是否需要提供输出参数, defaults to True
        :param int num_bits: 量化位数, defaults to 8
        """
        super(QConv2d, self).__init__(qi=qi, qo=qo, num_bits=num_bits)
        self.num_bits = num_bits
        self.conv_module = conv_module
        self.qw = QParam(num_bits=num_bits)
        self.register_buffer(
            "M", torch.tensor([], requires_grad=False)
        )  # 将M注册为buffer
    def freeze(self, qi=None, qo=None):
        """
        冻结量化参数，使其在后续训练中不更新。
        这个函数会在统计完 min、max 后发挥作用。正如上文所说的，公式 (4) 中有很多项是可以提前计算好的
        freeze 就是把这些项提前固定下来，同时也将网络的权重由浮点实数转化为定点整数。
        Parameters:
        qi (QParam): 输入量化参数。如果在初始化时已提供，则不应再次提供。
        qo (QParam): 输出量化参数。如果在初始化时已提供，则不应再次提供。
        """
        if hasattr(self, "qi") and qi is not None:
            raise ValueError("qi has been provided in init function.")
        if not hasattr(self, "qi") and qi is None:
            raise ValueError("qi is not existed, should be provided.")

        if hasattr(self, "qo") and qo is not None:
            raise ValueError("qo has been provided in init function.")
        if not hasattr(self, "qo") and qo is None:
            raise ValueError("qo is not existed, should be provided.")

        if qi is not None:
            self.qi = qi
        if qo is not None:
            self.qo = qo
        self.M.data = (self.qw.scale * self.qi.scale / self.qo.scale).data

        self.conv_module.weight.data = self.qw.quantize_tensor(
            self.conv_module.weight.data
        )
        self.conv_module.weight.data = self.conv_module.weight.data - self.qw.zero_point

        self.conv_module.bias.data = quantize_tensor(
            self.conv_module.bias.data,
            scale=self.qi.scale * self.qw.scale,
            zero_point=0,
            num_bits=32,
            signed=True,
        )
    def forward(self, x):
        """
        前向传播。
        这儿的前向传播是通过训练数据来搜集min,max
        self.conv_module.weight 量化后的权重
        FakeQuantize.apply(self.conv_module.weight, self.qw):还原到float的权重
        Parameters:
        x (torch.Tensor): 输入数据张量。

        Returns:
        torch.Tensor: 前向传播的输出数据张量。
        """
        if hasattr(self, "qi"):
            self.qi.update(x)
            x = FakeQuantize.apply(x, self.qi)

        self.qw.update(self.conv_module.weight.data)

        x = F.conv2d(
            x,
            FakeQuantize.apply(self.conv_module.weight, self.qw),
            self.conv_module.bias,
            stride=self.conv_module.stride,
            padding=self.conv_module.padding,
            dilation=self.conv_module.dilation,
            groups=self.conv_module.groups,
        )

        if hasattr(self, "qo"):
            self.qo.update(x)
            x = FakeQuantize.apply(x, self.qo)

        return x
    def quantize_inference(self, x):
        """
        使用量化参数进行推理。

        Parameters:
        x (torch.Tensor): 输入数据张量。

        Returns:
        torch.Tensor: 量化推理的输出数据张量。
        """
        x = x - self.qi.zero_point
        x = self.conv_module(x)
        x = self.M * x
        x.round_()
        x = x + self.qo.zero_point
        x.clamp_(0.0, 2.0**self.num_bits - 1.0).round_()
        return x


class QLinear(QModule):
    def __init__(self, fc_module, qi=True, qo=True, num_bits=8):
        """
        初始化量化全连接层。

        Parameters:
        fc_module (nn.Linear): 对应全精度的全连接层。
        qi (bool): 是否提供量化输入参数。默认为True。
        qo (bool): 是否提供量化输出参数。默认为True。
        num_bits (int): 量化位数。默认为8。
        """
        super(QLinear, self).__init__(qi=qi, qo=qo, num_bits=num_bits)
        self.num_bits = num_bits
        self.fc_module = fc_module
        self.qw = QParam(num_bits=num_bits)
        self.register_buffer(
            "M", torch.tensor([], requires_grad=False)
        )  # 将M注册为buffer
    def freeze(self, qi=None, qo=None):
        """
        冻结量化参数，使其在后续训练中不更新。

        Parameters:
        qi (QParam): 输入量化参数。如果在初始化时已提供，则不应再次提供。
        qo (QParam): 输出量化参数。如果在初始化时已提供，则不应再次提供。
        """
        if hasattr(self, "qi") and qi is not None:
            raise ValueError("qi has been provided in init function.")
        if not hasattr(self, "qi") and qi is None:
            raise ValueError("qi is not existed, should be provided.")

        if hasattr(self, "qo") and qo is not None:
            raise ValueError("qo has been provided in init function.")
        if not hasattr(self, "qo") and qo is None:
            raise ValueError("qo is not existed, should be provided.")

        if qi is not None:
            self.qi = qi
        if qo is not None:
            self.qo = qo
        self.M.data = (self.qw.scale * self.qi.scale / self.qo.scale).data

        self.fc_module.weight.data = self.qw.quantize_tensor(self.fc_module.weight.data)
        self.fc_module.weight.data = self.fc_module.weight.data - self.qw.zero_point
        self.fc_module.bias.data = quantize_tensor(
            self.fc_module.bias.data,
            scale=self.qi.scale * self.qw.scale,
            zero_point=0,
            num_bits=32,
            signed=True,
            )
    def forward(self, x):
        """
        前向传播。

        Parameters:
        x (torch.Tensor): 输入数据张量。

        Returns:
        torch.Tensor: 前向传播的输出数据张量。
        """
        if hasattr(self, "qi"):
            self.qi.update(x)
            x = FakeQuantize.apply(x, self.qi)

        self.qw.update(self.fc_module.weight.data)

        x = F.linear(
            x, FakeQuantize.apply(self.fc_module.weight, self.qw), self.fc_module.bias
        )

        if hasattr(self, "qo"):
            self.qo.update(x)
            x = FakeQuantize.apply(x, self.qo)

        return x
    def quantize_inference(self, x):
        """
        使用量化参数进行推理。

        Parameters:
        x (torch.Tensor): 输入数据张量。

        Returns:
        torch.Tensor: 量化推理的输出数据张量。
        """
        x = x - self.qi.zero_point
        x = self.fc_module(x)
        x = self.M * x
        x.round_()
        x = x + self.qo.zero_point
        x.clamp_(0.0, 2.0**self.num_bits - 1.0).round_()
        return x


class QReLU(QModule):

    def __init__(self, qi=False, num_bits=None):
        super(QReLU, self).__init__(qi=qi, num_bits=num_bits)

    def freeze(self, qi=None):

        if hasattr(self, "qi") and qi is not None:
            raise ValueError("qi has been provided in init function.")
        if not hasattr(self, "qi") and qi is None:
            raise ValueError("qi is not existed, should be provided.")

        if qi is not None:
            self.qi = qi

    def forward(self, x):
        if hasattr(self, "qi"):
            self.qi.update(x)
            x = FakeQuantize.apply(x, self.qi)

        x = F.relu(x)

        return x

    def quantize_inference(self, x):
        x = x.clone()
        x[x < self.qi.zero_point] = self.qi.zero_point
        return x


class QMaxPooling2d(QModule):

    def __init__(self, kernel_size=3, stride=1, padding=0, qi=False, num_bits=None):
        super(QMaxPooling2d, self).__init__(qi=qi, num_bits=num_bits)
        self.kernel_size = kernel_size
        self.stride = stride
        self.padding = padding

    def freeze(self, qi=None):
        if hasattr(self, "qi") and qi is not None:
            raise ValueError("qi has been provided in init function.")
        if not hasattr(self, "qi") and qi is None:
            raise ValueError("qi is not existed, should be provided.")
        if qi is not None:
            self.qi = qi

    def forward(self, x):
        if hasattr(self, "qi"):
            self.qi.update(x)
            x = FakeQuantize.apply(x, self.qi)

        x = F.max_pool2d(x, self.kernel_size, self.stride, self.padding)

        return x

    def quantize_inference(self, x):
        return F.max_pool2d(x, self.kernel_size, self.stride, self.padding)


class QConvBNReLU(QModule):

    def __init__(self, conv_module, bn_module, qi=True, qo=True, num_bits=8):
        super(QConvBNReLU, self).__init__(qi=qi, qo=qo, num_bits=num_bits)
        self.num_bits = num_bits
        self.conv_module = conv_module
        self.bn_module = bn_module
        self.qw = QParam(num_bits=num_bits)
        self.qb = QParam(num_bits=32)
        self.register_buffer(
            "M", torch.tensor([], requires_grad=False)
        )  # 将M注册为buffer

    def fold_bn(self, mean, std):
        if self.bn_module.affine:
            gamma_ = self.bn_module.weight / std
            weight = self.conv_module.weight * gamma_.view(
                self.conv_module.out_channels, 1, 1, 1
            )
            if self.conv_module.bias is not None:
                bias = (
                    gamma_ * self.conv_module.bias - gamma_ * mean + self.bn_module.bias
                )
            else:
                bias = self.bn_module.bias - gamma_ * mean
        else:
            gamma_ = 1 / std
            weight = self.conv_module.weight * gamma_
            if self.conv_module.bias is not None:
                bias = gamma_ * self.conv_module.bias - gamma_ * mean
            else:
                bias = -gamma_ * mean

        return weight, bias

    def forward(self, x):

        if hasattr(self, "qi"):
            self.qi.update(x)
            x = FakeQuantize.apply(x, self.qi)

        if self.training:
            y = F.conv2d(
                x,
                self.conv_module.weight,
                self.conv_module.bias,
                stride=self.conv_module.stride,
                padding=self.conv_module.padding,
                dilation=self.conv_module.dilation,
                groups=self.conv_module.groups,
            )
            y = y.permute(1, 0, 2, 3)  # NCHW -> CNHW
            y = y.contiguous().view(self.conv_module.out_channels, -1)  # CNHW -> C,NHW
            # mean = y.mean(1)
            # var = y.var(1)
            mean = y.mean(1).detach()
            var = y.var(1).detach()
            self.bn_module.running_mean = (
                1 - self.bn_module.momentum
            ) * self.bn_module.running_mean + self.bn_module.momentum * mean
            self.bn_module.running_var = (
                1 - self.bn_module.momentum
            ) * self.bn_module.running_var + self.bn_module.momentum * var
        else:
            mean = Variable(self.bn_module.running_mean)
            var = Variable(self.bn_module.running_var)

        std = torch.sqrt(var + self.bn_module.eps)

        weight, bias = self.fold_bn(mean, std)

        self.qw.update(weight.data)

        x = F.conv2d(
            x,
            FakeQuantize.apply(weight, self.qw),
            bias,
            stride=self.conv_module.stride,
            padding=self.conv_module.padding,
            dilation=self.conv_module.dilation,
            groups=self.conv_module.groups,
        )

        x = F.relu(x)

        if hasattr(self, "qo"):
            self.qo.update(x)
            x = FakeQuantize.apply(x, self.qo)

        return x

    def freeze(self, qi=None, qo=None):
        if hasattr(self, "qi") and qi is not None:
            raise ValueError("qi has been provided in init function.")
        if not hasattr(self, "qi") and qi is None:
            raise ValueError("qi is not existed, should be provided.")

        if hasattr(self, "qo") and qo is not None:
            raise ValueError("qo has been provided in init function.")
        if not hasattr(self, "qo") and qo is None:
            raise ValueError("qo is not existed, should be provided.")

        if qi is not None:
            self.qi = qi
        if qo is not None:
            self.qo = qo
        self.M.data = (self.qw.scale * self.qi.scale / self.qo.scale).data

        std = torch.sqrt(self.bn_module.running_var + self.bn_module.eps)

        weight, bias = self.fold_bn(self.bn_module.running_mean, std)
        self.conv_module.weight.data = self.qw.quantize_tensor(weight.data)
        self.conv_module.weight.data = self.conv_module.weight.data - self.qw.zero_point

        self.conv_module.bias.data = quantize_tensor(
            bias,
            scale=self.qi.scale * self.qw.scale,
            zero_point=0,
            num_bits=32,
            signed=True,
        )

    def quantize_inference(self, x):
        x = x - self.qi.zero_point
        x = self.conv_module(x)
        x = self.M * x
        x.round_()
        x = x + self.qo.zero_point
        x.clamp_(0.0, 2.0**self.num_bits - 1.0).round_()
        return x


class QSigmoid(QModule):

    def __init__(self, qi=True, qo=True, num_bits=8, lut_size=64):
        super(QSigmoid, self).__init__(qi=qi, qo=qo, num_bits=num_bits)
        self.num_bits = num_bits
        self.lut_size = lut_size

    def forward(self, x):
        if hasattr(self, "qi"):
            self.qi.update(x)
            x = FakeQuantize.apply(x, self.qi)

        x = torch.sigmoid(x)

        if hasattr(self, "qo"):
            self.qo.update(x)
            x = FakeQuantize.apply(x, self.qo)

        return x

    def freeze(self, qi=None, qo=None):
        if hasattr(self, "qi") and qi is not None:
            raise ValueError("qi has been provided in init function.")
        if not hasattr(self, "qi") and qi is None:
            raise ValueError("qi is not existed, should be provided.")

        if hasattr(self, "qo") and qo is not None:
            raise ValueError("qo has been provided in init function.")
        if not hasattr(self, "qo") and qo is None:
            raise ValueError("qo is not existed, should be provided.")

        if qi is not None:
            self.qi = qi
        if qo is not None:
            self.qo = qo

        lut_qx = torch.tensor(
            np.linspace(0, 2**self.num_bits - 1, self.lut_size), dtype=torch.uint8
        )
        lut_x = self.qi.dequantize_tensor(lut_qx)
        lut_y = torch.sigmoid(lut_x)
        lut_qy = self.qo.quantize_tensor(lut_y)

        self.register_buffer("lut_qy", lut_qy)
        self.register_buffer("lut_qx", lut_qx)

    def quantize_inference(self, x):
        y = interp(x, self.lut_qx, self.lut_qy)
        y = y.round_().clamp_(0.0, 2.0**self.num_bits - 1.0)
        return y
