import torch
import torch.nn as nn
# 需要手动做padding
def Pad(ifm, pad_h, pad_w, pad_val):
    batch = ifm.shape[0]
    in_channel = ifm.shape[1]
    in_height = ifm.shape[2]
    in_width = ifm.shape[3]
    ofm = torch.ones((batch, in_channel, in_height+2*pad_h, in_width+2*pad_w))
    ofm *= pad_val
    for b in range(batch):
        for i in range(in_channel):
            for j in range(in_height):
                for k in range(in_width):
                    ofm[b][i][j+pad_h][k+pad_w] = ifm[b][i][j][k]
    return ofm
# 定义卷积的参数
batch = torch.randint(1, 5, (1,)).item()
out_channel = torch.randint(1, 64, (1,)).item()
in_channel = torch.randint(1, 64, (1,)).item()
kernel = torch.randint(1, 5, (1,)).item()
pad = torch.randint(1, 5, (1,)).item()
pad_h = pad_w = pad
conv_layer = nn.Conv2d(in_channels=in_channel, out_channels=out_channel, kernel_size=kernel, stride=1, padding=pad, dilation=1)
ifm = torch.randn(batch, in_channel, 6, 6)
ofm = conv_layer(ifm)

# 量化
n = 8
weight = conv_layer.weight
bias = conv_layer.bias
# 计算输入特征图的量化缩放系数，使用非对称量化，需要计算零点
scale_ifm = (2**n - 1) / (torch.max(ifm) - torch.min(ifm))
offset_ifm = 0 - torch.min(ifm)
# 将特征图进行量化
quant_ifm = torch.round(scale_ifm*(ifm + offset_ifm))
# 计算整数域的零点
zero_point = torch.round(scale_ifm*offset_ifm)
zero_ifm = torch.ones(ifm.shape, dtype=torch.float32) * zero_point

# 计算权重的量化缩放系数，使用对称量化
scale_weight = torch.zeros(out_channel, dtype=torch.float32)
quant_weight = torch.zeros(weight.shape, dtype=torch.float32)
scale = torch.zeros(out_channel, dtype=torch.float32)
for i in range(out_channel):
 scale_weight[i] = (2**(n-1) - 1) / torch.max(torch.abs(weight[i]))
 quant_weight[i] = torch.round(scale_weight[i] * weight[i])
 scale[i] = scale_ifm * scale_weight[i]

# 定义一个新的卷积，实现整数域卷积
# 将量化后的权重赋值给conv_layer
conv_layer.weight = nn.Parameter(quant_weight)
# 将conv_layer的偏置清零，后面我们要手动加上量化后的偏置
conv_layer.bias = nn.Parameter(torch.zeros(bias.shape, dtype=torch.float32))
# 因为在整数域做卷积，padding的值不是0，而是整数域的zero_point，所以需要手动padding
conv_layer.padding = 0
# 对量化后的输入特征图做padding，注意pad_val是zero_point
quant_ifm_pad = Pad(quant_ifm, pad_h, pad_w, zero_point)
# 对零点特征图做padding，做完padding的zero_ifm_pad里的数据应该都是zero_point
zero_ifm_pad = Pad(zero_ifm, pad_h, pad_w, zero_point)
# 在整数域做卷积
quant_ofm = conv_layer(quant_ifm_pad)
# 对零点做卷积的目的是Zx * W8
zero_ofm = conv_layer(zero_ifm_pad)
# 理论上zero_ofm_pad每个输出通道里的数据应该完全一致
zero_bias = torch.mean(zero_ofm , dim=(0, 2, 3))

# 将zero_bias融合进量化后的bias
quant_bias = torch.zeros(out_channel)
for i in range(out_channel):
    quant_bias[i] = torch.round(bias[i]*(scale_ifm * scale_weight[i]) - zero_bias[i])

# 对quant_ofm做反量化
antiquant_ofm = torch.zeros(ofm.shape, dtype=torch.float32)
for b in range(batch):
    for i in range(out_channel):
        antiquant_ofm[b][i] = (quant_ofm[b][i] + quant_bias[i]) / scale[i]

# 计算量化带来的相对误差
diff = antiquant_ofm - ofm
error_rate = diff.abs() / ofm.abs()
print("error_rate: {:2f}%".format(error_rate.mean()))