from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
from torch.nn.parameter import Parameter
from torch.autograd import Function


# ********************* range_trackers(范围统计器，统计量化前范围) *********************
class RangeTracker(nn.Module):
    def __init__(self, q_level):
        super().__init__()
        self.q_level = q_level

    def update_range(self, min_val, max_val):
        raise NotImplementedError

    @torch.no_grad()
    def forward(self, input):
        if self.q_level == 'L':  # A,min_max_shape=(1, 1, 1, 1),layer级
            min_val = torch.min(input)
            max_val = torch.max(input)
        elif self.q_level == 'C':  # W,min_max_shape=(N, 1, 1, 1),channel级
            min_val = torch.min(torch.min(torch.min(input, 3, keepdim=True)[0], 2, keepdim=True)[0], 1, keepdim=True)[0]
            max_val = torch.max(torch.max(torch.max(input, 3, keepdim=True)[0], 2, keepdim=True)[0], 1, keepdim=True)[0]

        self.update_range(min_val, max_val)


class GlobalRangeTracker(RangeTracker):  # W,min_max_shape=(N, 1, 1, 1),channel级,取本次和之前相比的min_max —— (N, C, W, H)
    def __init__(self, q_level, out_channels):
        super().__init__(q_level)
        self.register_buffer('min_val', torch.zeros(out_channels, 1, 1, 1))
        self.register_buffer('max_val', torch.zeros(out_channels, 1, 1, 1))
        self.register_buffer('first_w', torch.zeros(1))

    def update_range(self, min_val, max_val):
        temp_minval = self.min_val
        temp_maxval = self.max_val
        if self.first_w == 0:
            self.first_w.add_(1)
            self.min_val.add_(min_val)
            self.max_val.add_(max_val)
        else:
            self.min_val.add_(-temp_minval).add_(torch.min(temp_minval, min_val))
            self.max_val.add_(-temp_maxval).add_(torch.max(temp_maxval, max_val))


class AveragedRangeTracker(RangeTracker):  # A,min_max_shape=(1, 1, 1, 1),layer级,取running_min_max —— (N, C, W, H)
    def __init__(self, q_level, momentum=0.1):
        super().__init__(q_level)
        self.momentum = momentum
        self.register_buffer('min_val', torch.zeros(1))
        self.register_buffer('max_val', torch.zeros(1))
        self.register_buffer('first_a', torch.zeros(1))

    def update_range(self, min_val, max_val):
        if self.first_a == 0:
            self.first_a.add_(1)
            self.min_val.add_(min_val)
            self.max_val.add_(max_val)
        else:
            self.min_val.mul_(1 - self.momentum).add_(min_val * self.momentum)
            self.max_val.mul_(1 - self.momentum).add_(max_val * self.momentum)


# ********************* quantizers（量化器，量化） *********************
class Round(Function):

    @staticmethod
    def forward(self, input):
        output = torch.round(input)
        return output

    @staticmethod
    def backward(self, grad_output):
        grad_input = grad_output.clone()
        return grad_input


class Quantizer(nn.Module):
    def __init__(self, bits, range_tracker):
        super().__init__()
        self.bits = bits
        self.range_tracker = range_tracker
        self.register_buffer('scale', None)  # 量化比例因子
        self.register_buffer('zero_point', None)  # 量化零点

    def update_params(self):
        raise NotImplementedError

    # 量化
    def quantize(self, input):
        output = input * self.scale - self.zero_point
        return output

    def round(self, input):
        output = Round.apply(input)
        return output

    # 截断
    def clamp(self, input):
        output = torch.clamp(input, self.min_val, self.max_val)
        return output

    # 反量化
    def dequantize(self, input):
        output = (input + self.zero_point) / self.scale
        return output

    def forward(self, input):
        if self.bits == 32:
            output = input
        elif self.bits == 1:
            print('！Binary quantization is not supported ！')
            assert self.bits != 1
        else:
            self.range_tracker(input)
            self.update_params()
            output = self.quantize(input)  # 量化
            output = self.round(output)
            output = self.clamp(output)  # 截断
            output = self.dequantize(output)  # 反量化
        return output


class SignedQuantizer(Quantizer):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.register_buffer('min_val', torch.tensor(-(1 << (self.bits - 1))))
        self.register_buffer('max_val', torch.tensor((1 << (self.bits - 1)) - 1))


class UnsignedQuantizer(Quantizer):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.register_buffer('min_val', torch.tensor(0))
        self.register_buffer('max_val', torch.tensor((1 << self.bits) - 1))


# 对称量化
class SymmetricQuantizer(SignedQuantizer):

    def update_params(self):
        quantized_range = torch.min(torch.abs(self.min_val), torch.abs(self.max_val))  # 量化后范围
        float_range = torch.max(torch.abs(self.range_tracker.min_val), torch.abs(self.range_tracker.max_val))  # 量化前范围
        self.scale = quantized_range / float_range  # 量化比例因子
        self.zero_point = torch.zeros_like(self.scale)  # 量化零点


# 非对称量化
class AsymmetricQuantizer(UnsignedQuantizer):

    def update_params(self):
        quantized_range = self.max_val - self.min_val  # 量化后范围
        float_range = self.range_tracker.max_val - self.range_tracker.min_val  # 量化前范围
        self.scale = quantized_range / float_range  # 量化比例因子
        self.zero_point = torch.round(self.range_tracker.min_val * self.scale)  # 量化零点


# ********************* 量化卷积（同时量化A/W，并做卷积） *********************
class Conv2d_Q(nn.Conv2d):
    def __init__(
            self,
            in_channels,
            out_channels,
            kernel_size,
            stride=1,
            padding=0,
            dilation=1,
            groups=1,
            bias=True,
            a_bits=8,
            w_bits=8,
            q_type=1,
            first_layer=0,
    ):
        super().__init__(
            in_channels=in_channels,
            out_channels=out_channels,
            kernel_size=kernel_size,
            stride=stride,
            padding=padding,
            dilation=dilation,
            groups=groups,
            bias=bias
        )
        # 实例化量化器（A-layer级，W-channel级）
        if q_type == 0:
            self.activation_quantizer = SymmetricQuantizer(bits=a_bits, range_tracker=AveragedRangeTracker(q_level='L'))
            self.weight_quantizer = SymmetricQuantizer(bits=w_bits, range_tracker=GlobalRangeTracker(q_level='C',
                                                                                                     out_channels=out_channels))
        else:
            self.activation_quantizer = AsymmetricQuantizer(bits=a_bits,
                                                            range_tracker=AveragedRangeTracker(q_level='L'))
            self.weight_quantizer = AsymmetricQuantizer(bits=w_bits, range_tracker=GlobalRangeTracker(q_level='C',
                                                                                                      out_channels=out_channels))
        self.first_layer = first_layer

    def forward(self, input):
        # 量化A和W
        if not self.first_layer:
            input = self.activation_quantizer(input)
        q_input = input
        q_weight = self.weight_quantizer(self.weight)
        # 量化卷积
        output = F.conv2d(
            input=q_input,
            weight=q_weight,
            bias=self.bias,
            stride=self.stride,
            padding=self.padding,
            dilation=self.dilation,
            groups=self.groups
        )
        return output


def reshape_to_activation(input):
    return input.reshape(1, -1, 1, 1)


def reshape_to_weight(input):
    return input.reshape(-1, 1, 1, 1)


def reshape_to_bias(input):
    return input.reshape(-1)


# ********************* bn融合_量化卷积（bn融合后，同时量化A/W，并做卷积） *********************
class BNFold_Conv2d_Q(Conv2d_Q):
    def __init__(
            self,
            in_channels,
            out_channels,
            kernel_size,
            stride=1,
            padding=0,
            dilation=1,
            groups=1,
            bias=False,
            eps=1e-5,
            momentum=0.01,  # 考虑量化带来的抖动影响,对momentum进行调整(0.1 ——> 0.01),削弱batch统计参数占比，一定程度抑制抖动。经实验量化训练效果更好,acc提升1%左右
            a_bits=8,
            w_bits=8,
            q_type=1,
            first_layer=0,
    ):
        super().__init__(
            in_channels=in_channels,
            out_channels=out_channels,
            kernel_size=kernel_size,
            stride=stride,
            padding=padding,
            dilation=dilation,
            groups=groups,
            bias=bias
        )
        self.eps = eps
        self.momentum = momentum
        self.gamma = Parameter(torch.Tensor(out_channels))
        self.beta = Parameter(torch.Tensor(out_channels))
        self.register_buffer('running_mean', torch.zeros(out_channels))
        self.register_buffer('running_var', torch.ones(out_channels))
        self.register_buffer('first_bn', torch.zeros(1))
        init.uniform_(self.gamma)
        init.zeros_(self.beta)

        # 实例化量化器（A-layer级，W-channel级）
        if q_type == 0:
            self.activation_quantizer = SymmetricQuantizer(bits=a_bits, range_tracker=AveragedRangeTracker(q_level='L'))
            self.weight_quantizer = SymmetricQuantizer(bits=w_bits, range_tracker=GlobalRangeTracker(q_level='C',
                                                                                                     out_channels=out_channels))
        else:
            self.activation_quantizer = AsymmetricQuantizer(bits=a_bits,
                                                            range_tracker=AveragedRangeTracker(q_level='L'))
            self.weight_quantizer = AsymmetricQuantizer(bits=w_bits, range_tracker=GlobalRangeTracker(q_level='C',
                                                                                                      out_channels=out_channels))
        self.first_layer = first_layer

    def forward(self, input):
        # 训练态
        if self.training:
            # 先做普通卷积得到A，以取得BN参数
            output = F.conv2d(
                input=input,
                weight=self.weight,
                bias=self.bias,
                stride=self.stride,
                padding=self.padding,
                dilation=self.dilation,
                groups=self.groups
            )
            # 更新BN统计参数（batch和running）
            dims = [dim for dim in range(4) if dim != 1]
            batch_mean = torch.mean(output, dim=dims)
            batch_var = torch.var(output, dim=dims)
            with torch.no_grad():
                if self.first_bn == 0:
                    self.first_bn.add_(1)
                    self.running_mean.add_(batch_mean)
                    self.running_var.add_(batch_var)
                else:
                    self.running_mean.mul_(1 - self.momentum).add_(batch_mean * self.momentum)
                    self.running_var.mul_(1 - self.momentum).add_(batch_var * self.momentum)
            # BN融合
            if self.bias is not None:
                bias = reshape_to_bias(
                    self.beta + (self.bias - batch_mean) * (self.gamma / torch.sqrt(batch_var + self.eps)))
            else:
                bias = reshape_to_bias(
                    self.beta - batch_mean * (self.gamma / torch.sqrt(batch_var + self.eps)))  # b融batch
            weight = self.weight * reshape_to_weight(self.gamma / torch.sqrt(self.running_var + self.eps))  # w融running
        # 测试态
        else:
            # BN融合
            if self.bias is not None:
                bias = reshape_to_bias(self.beta + (self.bias - self.running_mean) * (
                        self.gamma / torch.sqrt(self.running_var + self.eps)))
            else:
                bias = reshape_to_bias(
                    self.beta - self.running_mean * (self.gamma / torch.sqrt(self.running_var + self.eps)))  # b融running
            weight = self.weight * reshape_to_weight(self.gamma / torch.sqrt(self.running_var + self.eps))  # w融running

        # 量化A和bn融合后的W
        if not self.first_layer:
            input = self.activation_quantizer(input)
        q_input = input
        q_weight = self.weight_quantizer(weight)
        # 量化卷积
        if self.training:  # 训练态
            output = F.conv2d(
                input=q_input,
                weight=q_weight,
                bias=self.bias,  # 注意，这里不加bias（self.bias为None）
                stride=self.stride,
                padding=self.padding,
                dilation=self.dilation,
                groups=self.groups
            )
            # （这里将训练态下，卷积中w融合running参数的效果转为融合batch参数的效果）running ——> batch
            output *= reshape_to_activation(torch.sqrt(self.running_var + self.eps) / torch.sqrt(batch_var + self.eps))
            output += reshape_to_activation(bias)
        else:  # 测试态
            output = F.conv2d(
                input=q_input,
                weight=q_weight,
                bias=bias,  # 注意，这里加bias，做完整的conv+bn
                stride=self.stride,
                padding=self.padding,
                dilation=self.dilation,
                groups=self.groups
            )
        return output


class QuanConv2d(nn.Module):
    def __init__(self, input_channels, output_channels,
                 kernel_size=-1, stride=-1, padding=-1, groups=1, last_relu=0, abits=8, wbits=8, bn_fold=0, q_type=1,
                 first_layer=0):
        super(QuanConv2d, self).__init__()
        self.last_relu = last_relu
        self.bn_fold = bn_fold
        self.first_layer = first_layer

        if self.bn_fold == 1:
            self.bn_q_conv = BNFold_Conv2d_Q(input_channels, output_channels,
                                             kernel_size=kernel_size, stride=stride, padding=padding, groups=groups,
                                             a_bits=abits, w_bits=wbits, q_type=q_type, first_layer=first_layer)
        else:
            self.q_conv = Conv2d_Q(input_channels, output_channels,
                                   kernel_size=kernel_size, stride=stride, padding=padding, groups=groups, a_bits=abits,
                                   w_bits=wbits, q_type=q_type, first_layer=first_layer)
            self.bn = nn.BatchNorm2d(output_channels,
                                     momentum=0.01)  # 考虑量化带来的抖动影响,对momentum进行调整(0.1 ——> 0.01),削弱batch统计参数占比，一定程度抑制抖动。经实验量化训练效果更好,acc提升1%左右
        self.relu = nn.ReLU(inplace=True)

    def forward(self, x):
        if not self.first_layer:
            x = self.relu(x)
        if self.bn_fold == 1:
            x = self.bn_q_conv(x)
        else:
            x = self.q_conv(x)
            x = self.bn(x)
        if self.last_relu:
            x = self.relu(x)
        return x


class Net(nn.Module):
    def __init__(self, cfg=None, abits=8, wbits=8, bn_fold=0, q_type=1):
        super(Net, self).__init__()

        # model - A/W全量化(除输入、输出外)
        self.quan_model = nn.Sequential(
            QuanConv2d(1, 8, kernel_size=3, stride=1, padding=1, abits=abits, wbits=wbits, bn_fold=bn_fold,
                       q_type=q_type, first_layer=1),
            nn.MaxPool2d(kernel_size=2, stride=2),

            QuanConv2d(8, 16, kernel_size=3, stride=1, padding=1, abits=abits, wbits=wbits, bn_fold=bn_fold,
                       q_type=q_type),
            QuanConv2d(16, 32, kernel_size=3, stride=1, padding=1, abits=abits, wbits=wbits, bn_fold=bn_fold,
                       q_type=q_type),
            nn.MaxPool2d(kernel_size=2, stride=2),

            QuanConv2d(32, 10, kernel_size=3, stride=1, padding=1, last_relu=1, abits=abits, wbits=wbits,
                       bn_fold=bn_fold, q_type=q_type),
            nn.AvgPool2d(kernel_size=7, stride=1, padding=0),
        )

    def forward(self, x):
        x = self.quan_model(x)
        x = x.view(x.size(0), -1)
        return x


import time
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import torchvision
import torchvision.transforms as transforms

device = torch.device('cuda:0')


def setup_seed(seed):
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    np.random.seed(seed)
    torch.backends.cudnn.deterministic = True


def adjust_learning_rate(optimizer, epoch):
    update_list = [15, 17, 20]
    if epoch in update_list:
        for param_group in optimizer.param_groups:
            param_group['lr'] = param_group['lr'] * 0.1
    return


def train(epoch):
    model.train()

    for batch_idx, (data, target) in enumerate(train_loader):
        data, target = data.to(device), target.to(device)
        data, target = Variable(data), Variable(target)
        output = model(data)
        loss = criterion(output, target)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if batch_idx % 100 == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tLR: {}'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                       100. * batch_idx / len(train_loader), loss.data.item(),
                optimizer.param_groups[0]['lr']))
    return


def test():
    model.eval()
    test_loss = 0
    correct = 0

    for data, target in test_loader:
        data, target = data.to(device), target.to(device)
        data, target = Variable(data), Variable(target)
        output = model(data)
        test_loss += criterion(output, target).data.item()
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.data.view_as(pred)).cpu().sum()
    acc = 100. * float(correct) / len(test_loader.dataset)

    print('acc is {}'.format(acc))


def quant(x, scale):
    return torch.clamp(torch.round(x * scale), -128, 127)


def dequant(x, scale):
    return x / scale


def param_fold(weight, bias, mean, var, gamma, beta, eps):
    if bias == None:
        bias = reshape_to_bias(beta - mean * (gamma / torch.sqrt(var + eps)))
    else:
        bias = reshape_to_bias(beta + (bias - mean) * (gamma / torch.sqrt(var + eps)))
    weight = weight * reshape_to_weight(gamma / torch.sqrt(var + eps))  # w融running
    return weight, bias


if __name__ == '__main__':
    setup_seed(int(time.time()))

    print('==> Preparing data..')
    train_dataset = torchvision.datasets.MNIST(root='../../data', train=True, transform=transforms.ToTensor(),
                                               download=True)
    test_dataset = torchvision.datasets.MNIST(root='../../data', train=False, transform=transforms.ToTensor())
    train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=128, shuffle=True)
    test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=128, shuffle=False)

    print('******Initializing model******')
    model = Net(abits=8, wbits=8, bn_fold=1, q_type=0)
    model.to(device)
    for m in model.modules():
        if isinstance(m, nn.Conv2d):
            nn.init.xavier_uniform_(m.weight.data)
            if m.bias is not None:
                m.bias.data.zero_()
        elif isinstance(m, nn.Linear):
            m.weight.data.normal_(0, 0.01)
            m.bias.data.zero_()

    base_lr = float(0.0005)
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=base_lr, weight_decay=0.00001)

    for epoch in range(1, 30+1):
        adjust_learning_rate(optimizer, epoch)
        train(epoch)
        test()

    param = model.state_dict()
    torch.save(model.state_dict(),"param.pth")

    # 第一个卷积层
    Wc0 = param['quan_model.0.bn_q_conv.weight']

    bn0_mean = param['quan_model.0.bn_q_conv.running_mean']
    bn0_var = param['quan_model.0.bn_q_conv.running_var']
    bn0_gamma = param['quan_model.0.bn_q_conv.gamma']
    bn0_beta = param['quan_model.0.bn_q_conv.beta']
    Wscale0 = param['quan_model.0.bn_q_conv.weight_quantizer.scale']
    # 第二个卷积层
    Wq1 = param['quan_model.2.bn_q_conv.weight']

    bn1_mean = param['quan_model.2.bn_q_conv.running_mean']
    bn1_var = param['quan_model.2.bn_q_conv.running_var']
    bn1_gamma = param['quan_model.2.bn_q_conv.gamma']
    bn1_beta = param['quan_model.2.bn_q_conv.beta']
    Wscale1 = param['quan_model.2.bn_q_conv.weight_quantizer.scale']
    Ascale1 = param['quan_model.2.bn_q_conv.activation_quantizer.scale']
    # 第三个卷积层
    Wq2 = param['quan_model.3.bn_q_conv.weight']

    bn2_mean = param['quan_model.3.bn_q_conv.running_mean']
    bn2_var = param['quan_model.3.bn_q_conv.running_var']
    bn2_gamma = param['quan_model.3.bn_q_conv.gamma']
    bn2_beta = param['quan_model.3.bn_q_conv.beta']
    Wscale2 = param['quan_model.3.bn_q_conv.weight_quantizer.scale']
    Ascale2 = param['quan_model.3.bn_q_conv.activation_quantizer.scale']
    # 第四个卷积层
    Wq3 = param['quan_model.5.bn_q_conv.weight']

    bn3_mean = param['quan_model.5.bn_q_conv.running_mean']
    bn3_var = param['quan_model.5.bn_q_conv.running_var']
    bn3_gamma = param['quan_model.5.bn_q_conv.gamma']
    bn3_beta = param['quan_model.5.bn_q_conv.beta']
    Wscale3 = param['quan_model.5.bn_q_conv.weight_quantizer.scale']
    Ascale3 = param['quan_model.5.bn_q_conv.activation_quantizer.scale']

    w0, b0 = param_fold(weight=Wc0, bias=None, mean=bn0_mean, var=bn0_var, gamma=bn0_gamma, beta=bn0_beta,eps=0.00001)
    w0 = dequant(quant(w0, Wscale0), Wscale0)

    w1, b1 = param_fold(weight=Wq1, bias=None, mean=bn1_mean, var=bn1_var, gamma=bn1_gamma, beta=bn1_beta,eps=0.00001)
    wq1 = quant(w1, Wscale1)
    bq1 = torch.round(Wscale1.view(-1) * Ascale1 * b1)  # (16,1,1,1) (1,) (16,)

    w2, b2 = param_fold(weight=Wq2, bias=None, mean=bn2_mean, var=bn2_var, gamma=bn2_gamma, beta=bn2_beta,eps=0.00001)
    wq2 = quant(w2, Wscale2)
    bq2 = torch.round(Wscale2.view(-1) * Ascale2 * b2)

    w3, b3 = param_fold(weight=Wq3, bias=None, mean=bn3_mean, var=bn3_var, gamma=bn3_gamma, beta=bn3_beta,eps=0.00001)
    wq3 = quant(w3, Wscale3)
    bq3 = torch.round(Wscale3.view(-1) * Ascale3 * b3)

    w0.cpu().numpy().tofile("w0.bin")    #(8,1,3,3)
    b0.cpu().numpy().tofile("b0.bin")    #(8,)
    wq1.cpu().numpy().tofile("w1.bin")    #(16,8,3,3)
    bq1.cpu().numpy().tofile("b1.bin")    #(16,)
    wq2.cpu().numpy().tofile("w2.bin")    #(32,16,3,3)
    bq2.cpu().numpy().tofile("b2.bin")    #(32,)
    wq3.cpu().numpy().tofile("w3.bin")    #(10,32,3,3)
    bq3.cpu().numpy().tofile("b3.bin")    #(10,)
    Ascale1.cpu().numpy().tofile("As1.bin")
    Ascale2.cpu().numpy().tofile("As2.bin")
    Ascale3.cpu().numpy().tofile("As3.bin")
    Wscale1.cpu().numpy().tofile("Ws1.bin")
    Wscale2.cpu().numpy().tofile("Ws2.bin")
    Wscale3.cpu().numpy().tofile("Ws3.bin")

    w0  = torch.from_numpy(np.fromfile("w0.bin",dtype=np.float32)).to(device).view(8,1,3,3)
    b0  = torch.from_numpy(np.fromfile("b0.bin",dtype=np.float32)).to(device)
    wq1 = torch.from_numpy(np.fromfile("w1.bin",dtype=np.float32)).to(device).view(16,8,3,3)
    bq1 = torch.from_numpy(np.fromfile("b1.bin",dtype=np.float32)).to(device)
    wq2 = torch.from_numpy(np.fromfile("w2.bin",dtype=np.float32)).to(device).view(32,16,3,3)
    bq2 = torch.from_numpy(np.fromfile("b2.bin",dtype=np.float32)).to(device)
    wq3 = torch.from_numpy(np.fromfile("w3.bin",dtype=np.float32)).to(device).view(10,32,3,3)
    bq3 = torch.from_numpy(np.fromfile("b3.bin",dtype=np.float32)).to(device)

    cnt = 0
    for data, target in test_loader:
        data,target=data.to(device),target.to(device)
        x = torch.nn.functional.conv2d(data, weight=w0 ,stride=1, bias=b0, padding=1)  # 128,8,28,28
        x = torch.nn.functional.max_pool2d(x, kernel_size=2, stride=2)  # 128,8,14,14
        x = torch.relu(x)

        xq = quant(x, Ascale1)
        x = torch.nn.functional.conv2d(xq, weight=wq1, stride=1, bias=bq1, padding=1)  # (128,16,14,14) uint8 convolution
        x = x / (Ascale1.view(1, -1, 1, 1) * Wscale1.view(1, -1, 1, 1))  # x(128,16,14,14) As(1,) Ws(16,1,1,1) bq(16,)

        x = torch.relu(x)

        xq = quant(x, Ascale2)
        x = torch.nn.functional.conv2d(xq, weight=wq2, stride=1, bias=bq2, padding=1)  # (128,32,14,14)  uint8 convolution
        x = x / (Ascale2.view(1, -1, 1, 1) * Wscale2.view(1, -1, 1, 1))  # x(128,32,14,14) As(1,) Ws(32,1,1,1) bq(32,)

        x = torch.nn.functional.max_pool2d(x, kernel_size=2, stride=2)  # (128,32,7,7)
        x = torch.relu(x)

        xq = quant(x, Ascale3)
        x = torch.nn.functional.conv2d(xq, weight=wq3, stride=1, bias=bq3, padding=1)  # 128,10,7,7   uint8 convolution
        x = x / (Ascale3.view(1, -1, 1, 1) * Wscale3.view(1, -1, 1, 1))  # (128,10,7,7) As(1,) Ws(10,1,1,1) bq(10,)

        x = torch.relu(x)
        x = torch.nn.functional.avg_pool2d(x, kernel_size=7)  # (128,10,1,1)
        x = x.view(x.size(0), -1)  # (128,10)
        output = torch.argmax(x, dim=1)

        for i in range(output.size(0)):
            if output[i] == target[i]:
                cnt += 1
    print(cnt / 10000)

    data = np.zeros((10000, 1, 28, 28), dtype=np.float32)
    label = np.zeros((10000,), dtype=np.float32)
    n = 0
    for d, l in test_loader:
        data[128 * n:128 * n + 128, :, :, :] = d.numpy()  # 128,1,28,28
        label[128 * n:128 * n + 128] = l.numpy().astype(np.float32)  # 128
        n += 1

    data.tofile("img.bin")
    label.tofile("label.bin")




