import torch
from torch.ao.quantization import quantize


class QuantizationUtils:
    def __init__(self,scale=2/255,zero_point=0):
        self.scale = scale
        self.zero_point = zero_point
        self.clip_max = 127.5 * scale
        self.clip_min = -127.5 * scale
        pass
    @staticmethod
    def round_half_to_even(tensor):

        """
        实现 round half to even（银行家舍入）
        :param tensor: 输入张量
        :return: 四舍五入后的张量
        """

        original_dtype = tensor.dtype

        rounded = torch.floor(tensor + 0.5)
        adjustment = ((rounded - tensor) == 0.5) & ((rounded % 2) != 0)
        # return rounded - adjustment.float()
        ans = rounded - adjustment.float()

        return ans.to(original_dtype)

    def custom_clip(self, tensor):
        """
        自定义截断操作：大于 127.5 的截断到 127，小于 -127.5 的截断到 -127
        :param tensor: 输入张量
        :return: 自定义剪辑后的张量
        """
        tensor = torch.where(tensor > 127.5, torch.tensor(127.0, dtype=tensor.dtype), tensor)
        tensor = torch.where(tensor < -127.5, torch.tensor(-127.0, dtype=tensor.dtype), tensor)
        return tensor

    def quantize(self, tensor):
        """
        对张量进行量化
        :param tensor: 输入张量
        :param scale: 量化的缩放比例
        :param zero_point: 量化的零点偏移
        :return: 量化后的张量
        """
        quantized = tensor / self.scale
        quantized = self.round_half_to_even(quantized) + self.zero_point
        quantized = self.custom_clip(quantized)  # 应用自定义截断
        return quantized

    def dequantize(self, tensor):
        """
        对张量进行反量化
        :param tensor: 量化后的张量
        :param scale: 量化的缩放比例
        :param zero_point: 量化的零点偏移
        :return: 反量化后的张量
        """
        return (tensor - self.zero_point) * self.scale


# 示例用法
if __name__ == "__main__":
    # 创建工具类实例


    # 原始张量
    tensor = torch.rand(10,dtype=torch.float32) * 2 -1

    tensor[0] += 2

    # print(tensor)


    print("Original Tensor:", tensor)

    # 量化参数
    scale = 2 / 255
    zero_point = 0

    quant_utils = QuantizationUtils(scale, zero_point)

    # 量化
    quantized_tensor = quant_utils.quantize(tensor)
    print("Quantized Tensor:", quantized_tensor)

    # 反量化
    dequantized_tensor = quant_utils.dequantize(quantized_tensor)
    print("Dequantized Tensor:", dequantized_tensor)




