#*
# @file Different utility functions
# Copyright (c) Yaohui Cai, Zhewei Yao, Zhen Dong, Amir Gholami
# All rights reserved.
# This file is part of ZeroQ repository.
#
# ZeroQ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ZeroQ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ZeroQ repository.  If not, see <http://www.gnu.org/licenses/>.
#*

import argparse
import torch
import numpy as np
import torch.nn as nn
import math
from utils import *
from mobilenetv2 import *
from quantize import *
# model settings

parser = argparse.ArgumentParser(description="MobileNetv2 Tunable Activation Imbalance Transfer Quantization")
parser.add_argument('--dataset',
                        type=str,
                        default='imagenet',
                        choices=['imagenet', 'cifar10'],
                        help='type of dataset')

parser.add_argument('--batch_size',
                        type=int,
                        default=32,
                        help='batch size of distilled data')
parser.add_argument('--test_batch_size',
                        type=int,
                        default=128,
                        help='batch size of test data')

parser.add_argument("--nw", type=int, default=8)
parser.add_argument("--na", type=int, default=8)
parser.add_argument("--nb", type=int, default=24)
parser.add_argument("--nm", type=int, default=24)
parser.add_argument("--t", type=float, default=0.2)


def inference(x, conv_layers, QReLUs, x_shape):

    # init_block
    x = conv_layers[0](x)
    x_shape.append(x.shape)
    x = QReLU(x, QReLUs[0])

    # stage1 unit1
    x = conv_layers[1](x)
    x_shape.append(x.shape)
    x = QReLU(x, QReLUs[1])
    x = conv_layers[2](x)
    x_shape.append(x.shape)
    x = QReLU(x, QReLUs[2])
    x_shape.append(x.shape)
    x = conv_layers[3](x)

    # stage2 unit1
    x = conv_layers[4](x)
    x_shape.append(x.shape)
    x = QReLU(x, QReLUs[4])
    x = conv_layers[5](x)
    x_shape.append(x.shape)
    x = QReLU(x, QReLUs[5])
    x = conv_layers[6](x)
    x_shape.append(x.shape)

    # stage2 unit2
    x_ = x
    x = conv_layers[7](x_)
    x_shape.append(x.shape)
    x = QReLU(x, QReLUs[7])
    x = conv_layers[8](x)
    x_shape.append(x.shape)
    x = QReLU(x, QReLUs[8])
    x = conv_layers[9](x)
    x_shape.append(x.shape)
    x = x + x_

    # stage3 unit1
    x = conv_layers[10](x)
    x_shape.append(x.shape)
    x = QReLU(x, QReLUs[10])
    x = conv_layers[11](x)
    x_shape.append(x.shape)
    x = QReLU(x, QReLUs[11])
    x = conv_layers[12](x)
    x_shape.append(x.shape)
    
    # stage3 unit2
    x_ = x
    x = conv_layers[13](x_)
    x_shape.append(x.shape)
    x = QReLU(x, QReLUs[13])
    x = conv_layers[14](x)
    x_shape.append(x.shape)
    x = QReLU(x, QReLUs[14])
    x = conv_layers[15](x)
    x_shape.append(x.shape)
    x_ = x + x_

    # stage3 unit3
    x = conv_layers[16](x_)
    x_shape.append(x.shape)
    x = QReLU(x, QReLUs[16])
    x = conv_layers[17](x)
    x_shape.append(x.shape)
    x = QReLU(x, QReLUs[17])
    x = conv_layers[18](x)
    x_shape.append(x.shape)
    x = x + x_

    # stage4 unit1
    x = conv_layers[19](x)
    x_shape.append(x.shape)
    x = QReLU(x, QReLUs[19])
    x = conv_layers[20](x)
    x_shape.append(x.shape)
    x = QReLU(x, QReLUs[20])
    x = conv_layers[21](x)
    x_shape.append(x.shape)
    x_ = x

    # stage4 unit2
    x = conv_layers[22](x_)
    x_shape.append(x.shape)
    x = QReLU(x, QReLUs[22])
    x = conv_layers[23](x)
    x_shape.append(x.shape)
    x = QReLU(x, QReLUs[23])
    x = conv_layers[24](x)
    x_shape.append(x.shape)
    x_ = x + x_
    
    # stage4 unit3
    x = conv_layers[25](x_)
    x_shape.append(x.shape)
    x = QReLU(x, QReLUs[25])
    x = conv_layers[26](x)
    x_shape.append(x.shape)
    x = QReLU(x, QReLUs[26])
    x = conv_layers[27](x)
    x_shape.append(x.shape)
    x_ = x + x_

    # stage4 unit4
    x = conv_layers[28](x_)
    x_shape.append(x.shape)
    x = QReLU(x, QReLUs[28])
    x = conv_layers[29](x)
    x_shape.append(x.shape)
    x = QReLU(x, QReLUs[29])
    x = conv_layers[30](x)
    x_shape.append(x.shape)
    x = x + x_

    # stage4 unit5
    x = conv_layers[31](x)
    x_shape.append(x.shape)
    x = QReLU(x, QReLUs[31])
    x = conv_layers[32](x)
    x_shape.append(x.shape)
    x = QReLU(x, QReLUs[32])
    x = conv_layers[33](x)
    x_shape.append(x.shape)

    # stage4 unit6
    x_ = x
    x = conv_layers[34](x_)
    x_shape.append(x.shape)
    x = QReLU(x, QReLUs[34])
    x = conv_layers[35](x)
    x_shape.append(x.shape)
    x = QReLU(x, QReLUs[35])
    x = conv_layers[36](x)
    x_shape.append(x.shape)
    x_ = x + x_

    # stage4 unit7
    x = conv_layers[37](x_)
    x_shape.append(x.shape)
    x = QReLU(x, QReLUs[37])
    x = conv_layers[38](x)
    x_shape.append(x.shape)
    x = QReLU(x, QReLUs[38])
    x = conv_layers[39](x)
    x_shape.append(x.shape)
    x = x + x_

    # stage5 unit1
    x = conv_layers[40](x)
    x_shape.append(x.shape)
    x = QReLU(x, QReLUs[40])
    x = conv_layers[41](x)
    x_shape.append(x.shape)
    x = QReLU(x, QReLUs[41])
    x = conv_layers[42](x)
    x_shape.append(x.shape)
    
    # stage5 unit2
    x_ = x
    x = conv_layers[43](x_)
    x_shape.append(x.shape)
    x = QReLU(x, QReLUs[43])
    x = conv_layers[44](x)
    x_shape.append(x.shape)
    x = QReLU(x, QReLUs[44])
    x = conv_layers[45](x)
    x_shape.append(x.shape)
    x_ = x + x_

    # stage5 unit3
    x = conv_layers[46](x_)
    x_shape.append(x.shape)
    x = QReLU(x, QReLUs[46])
    x = conv_layers[47](x)
    x_shape.append(x.shape)
    x = QReLU(x, QReLUs[47])
    x = conv_layers[48](x)
    x_shape.append(x.shape)
    x = x + x_

    # stage5 unit4
    x = conv_layers[49](x)
    x_shape.append(x.shape)
    x = QReLU(x, QReLUs[49])
    x = conv_layers[50](x)
    x_shape.append(x.shape)
    x = QReLU(x, QReLUs[50])
    x = conv_layers[51](x)
    x_shape.append(x.shape)

    # final_block
    x = conv_layers[52](x)
    x_shape.append(x.shape)
    x = QReLU(x, QReLUs[52])
    # final_pool

    x = nn.AvgPool2d(kernel_size=7, stride=1, padding=0)(x)
    # output
    x = conv_layers[53](x)
    x_shape.append(x.shape)
    x = x.view(x.size(0), -1)

    return x

def tait(layer1, layer2, r, t):
    assert(layer1.weight.size(0) == layer2.weight.size(1))
    for i in range(layer1.weight.size(0)):
        layer1.weight[i] = layer1.weight[i]*math.pow(6/max(r[i][0],1e-04), t)
        layer1.bias[i] = layer1.bias[i]*math.pow(6/max(r[i][0],1e-04), t)
        layer2.weight[:,i] = layer2.weight[:,i]/math.pow(6/max(r[i][0],1e-04), t)

def tait_residual(layer1, layer2, layer3, layer4, r4, t):
    assert(layer3.weight.size(0) == layer4.weight.size(1))
    for i in range(layer4.weight.size(1)):
        layer1.weight[i] = layer1.weight[i]/math.pow(r[i][0], t)
        layer1.bias[i] = layer1.bias[i]/math.pow(r[i][0], t)
        layer2.weight[:,i] = layer2.weight[:,i]*math.pow(r[i][0], t)
        layer3.weight[i] = layer3.weight[i]/math.pow(r[i][0], t)
        layer3.bias[i] = layer3.bias[i]/math.pow(r[i][0], t)
        layer4.weight[:,i] = layer4.weight[:,i]*math.pow(r[i][0], t)

args = parser.parse_args()
print(args)

torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = True

# Load pretrained model
model = mobilenetv2_w1(pretrained=True).cuda()
model.eval()

# Load validation data
test_loader = getTestData(args.dataset,
                        batch_size=args.test_batch_size,
                        path='./data/imagenet/',
                        for_inception=False)

calibration_loader = getCalibrationData(args.dataset,
                        batch_size=args.test_batch_size,
                        path='./data/imagenet/',
                        for_inception=False)

# Test the final quantized model
conv_layers = []
bn_layers = []
for module in model.named_modules():
    if isinstance(module[1], nn.Conv2d):
        conv_layers.append(module[1])
    if isinstance(module[1], nn.BatchNorm2d):
        bn_layers.append(module[1])
    if isinstance(module[1], nn.Linear):
        fc_layer = module[1]
fuse_bn(conv_layers, bn_layers)

Rs = []
QReLUs = []
for i in range(len(conv_layers)-1):
    r = torch.zeros((conv_layers[i].out_channels, 2))
    qrelu = torch.ones(conv_layers[i].out_channels)*6
    Rs.append(r)
    QReLUs.append(qrelu)

total, correct = 0, 0
xshape = []
with torch.no_grad():
    for batch_idx, (inputs, targets) in enumerate(test_loader):
        inputs, targets = inputs.cuda(), targets.cuda()
        outputs = inference(inputs[0].unsqueeze(0), conv_layers, QReLUs, xshape)
        break
gops = 0
for i in range(len(conv_layers)):
    print(conv_layers[i])
    print(xshape[i])
    l = conv_layers[i]
    print(xshape[i][2])
    gops += xshape[i][2]*xshape[i][3]*l.in_channels*l.out_channels*l.kernel_size[0]*l.kernel_size[1]/l.groups
print(gops)
print(gops/1000000)
