#*
# @file Different utility functions
# Copyright (c) Yaohui Cai, Zhewei Yao, Zhen Dong, Amir Gholami
# All rights reserved.
# This file is part of ZeroQ repository.
#
# ZeroQ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ZeroQ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ZeroQ repository.  If not, see <http://www.gnu.org/licenses/>.
#*

import argparse
import torch
import numpy as np
import torch.nn as nn
import math
from utils import *
from mobilenetv2 import *
from quantize import *
# model settings

parser = argparse.ArgumentParser(description="MobileNetv2 Tunable Activation Imbalance Transfer Quantization")
parser.add_argument('--dataset',
						type=str,
						default='imagenet',
						choices=['imagenet', 'cifar10'],
						help='type of dataset')

parser.add_argument('--batch_size',
						type=int,
						default=32,
						help='batch size of distilled data')
parser.add_argument('--test_batch_size',
						type=int,
						default=128,
						help='batch size of test data')

parser.add_argument("--nw", type=int, default=8)
parser.add_argument("--na", type=int, default=8)
parser.add_argument("--nb", type=int, default=24)
parser.add_argument("--nm", type=int, default=24)
parser.add_argument("--t", type=float, default=0.2)

prefixs = [
"conv0",

"b1n1_c1", "b1n1_c2", "b1n1_c3",

"b2n1_c1", "b2n1_c2", "b2n1_c3",
"b2n2_c1", "b2n2_c2", "b2n2_c3",

"b3n1_c1", "b3n1_c2", "b3n1_c3",
"b3n2_c1", "b3n2_c2", "b3n2_c3",
"b3n3_c1", "b3n3_c2", "b3n3_c3",

"b4n1_c1", "b4n1_c2", "b4n1_c3",
"b4n2_c1", "b4n2_c2", "b4n2_c3",
"b4n3_c1", "b4n3_c2", "b4n3_c3",
"b4n4_c1", "b4n4_c2", "b4n4_c3",

"b5n1_c1", "b5n1_c2", "b5n1_c3",
"b5n2_c1", "b5n2_c2", "b5n2_c3",
"b5n3_c1", "b5n3_c2", "b5n3_c3",

"b6n1_c1", "b6n1_c2", "b6n1_c3",
"b6n2_c1", "b6n2_c2", "b6n2_c3",
"b6n3_c1", "b6n3_c2", "b6n3_c3",

"b7n1_c1", "b7n1_c2", "b7n1_c3",

"conv8",
"conv9"
]

# feature map to binary (H,W,C)
def fm2bin(fm, prefix):
	fm_t = fm.permute(0, 3, 1, 2)
	fm_t.detach().cpu().numpy().astype(np.float32).tofile("./blob/"+prefix+".fm")

def qinference(x, conv_layers, qws, qbs, qms):
	# conv0
	i = 0
	x = qconv(x, conv_layers[i], qws[i])
	x = qact(x, qbs[i], qms[i], nm=args.nm)
	i = i + 1

	# b1n1
	x = qconv(x, conv_layers[i], qws[i])
	x = qact(x, qbs[i], qms[i], nm=args.nm)
	i = i + 1
	x = qconv(x, conv_layers[i], qws[i])
	x = qact(x, qbs[i], qms[i], nm=args.nm)
	i = i + 1
	x = qconv(x, conv_layers[i], qws[i])
	x = qact(x, qbs[i], qms[i], nm=args.nm, relu=False)
	i = i + 1

	# b2n1
	x = qconv(x, conv_layers[i], qws[i])
	x = qact(x, qbs[i], qms[i], nm=args.nm)
	i = i + 1
	x = qconv(x, conv_layers[i], qws[i])
	x = qact(x, qbs[i], qms[i], nm=args.nm)
	i = i + 1
	x = qconv(x, conv_layers[i], qws[i])
	b2n1_c3 = qact(x, qbs[i], qms[i], nm=args.nm, relu=False)
	i = i + 1

	# b2n2
	x = qconv(b2n1_c3, conv_layers[i], qws[i])
	x = qact(x, qbs[i], qms[i], nm=args.nm)
	i = i + 1
	x = qconv(x, conv_layers[i], qws[i])
	x = qact(x, qbs[i], qms[i], nm=args.nm)
	i = i + 1
	x = qconv(x, conv_layers[i], qws[i])
	b2n2_c3 = qact(x, qbs[i], qms[i], nm=args.nm, relu=False)
	i = i + 1

	# b3n1
	x = b2n2_c3 + b2n1_c3
	x = x.clamp(-128,127)
	x = qconv(x, conv_layers[i], qws[i])
	x = qact(x, qbs[i], qms[i], nm=args.nm)
	i = i + 1
	x = qconv(x, conv_layers[i], qws[i])
	x = qact(x, qbs[i], qms[i], nm=args.nm)
	i = i + 1
	x = qconv(x, conv_layers[i], qws[i])
	b3n1_c3 = qact(x, qbs[i], qms[i], nm=args.nm, relu=False)
	i = i + 1

	# b3n2
	x = qconv(b3n1_c3, conv_layers[i], qws[i])
	x = qact(x, qbs[i], qms[i], nm=args.nm)
	i = i + 1
	x = qconv(x, conv_layers[i], qws[i])
	x = qact(x, qbs[i], qms[i], nm=args.nm)
	i = i + 1
	x = qconv(x, conv_layers[i], qws[i])
	b3n2_c3 = qact(x, qbs[i], qms[i], nm=args.nm, relu=False)
	i = i + 1

	# b3n3
	x = b3n1_c3 + b3n2_c3
	x = x.clamp(-128,127)
	x = qconv(x, conv_layers[i], qws[i])
	x = qact(x, qbs[i], qms[i], nm=args.nm)
	i = i + 1
	x = qconv(x, conv_layers[i], qws[i])
	x = qact(x, qbs[i], qms[i], nm=args.nm)
	i = i + 1
	x = qconv(x, conv_layers[i], qws[i])
	b3n3_c3 = qact(x, qbs[i], qms[i], nm=args.nm, relu=False)
	i = i + 1

	# b4n1
	x = b3n1_c3 + b3n2_c3 + b3n3_c3
	x = x.clamp(-128,127)
	x = qconv(x, conv_layers[i], qws[i])
	x = qact(x, qbs[i], qms[i], nm=args.nm)
	i = i + 1
	x = qconv(x, conv_layers[i], qws[i])
	x = qact(x, qbs[i], qms[i], nm=args.nm)
	i = i + 1
	x = qconv(x, conv_layers[i], qws[i])
	b4n1_c3 = qact(x, qbs[i], qms[i], nm=args.nm, relu=False)
	i = i + 1

	# b4n2
	x = b4n1_c3
	x = qconv(x, conv_layers[i], qws[i])
	x = qact(x, qbs[i], qms[i], nm=args.nm)
	i = i + 1
	x = qconv(x, conv_layers[i], qws[i])
	x = qact(x, qbs[i], qms[i], nm=args.nm)
	i = i + 1
	x = qconv(x, conv_layers[i], qws[i])
	b4n2_c3 = qact(x, qbs[i], qms[i], nm=args.nm, relu=False)
	i = i + 1

	# b4n3
	x = b4n1_c3 + b4n2_c3
	x = x.clamp(-128,127)
	x = qconv(x, conv_layers[i], qws[i])
	x = qact(x, qbs[i], qms[i], nm=args.nm)
	i = i + 1
	x = qconv(x, conv_layers[i], qws[i])
	x = qact(x, qbs[i], qms[i], nm=args.nm)
	i = i + 1
	x = qconv(x, conv_layers[i], qws[i])
	b4n3_c3 = qact(x, qbs[i], qms[i], nm=args.nm, relu=False)
	i = i + 1

	# b4n4
	x = b4n1_c3 + b4n2_c3 + b4n3_c3
	x = x.clamp(-128,127)
	x = qconv(x, conv_layers[i], qws[i])
	x = qact(x, qbs[i], qms[i], nm=args.nm)
	i = i + 1
	x = qconv(x, conv_layers[i], qws[i])
	x = qact(x, qbs[i], qms[i], nm=args.nm)
	i = i + 1
	x = qconv(x, conv_layers[i], qws[i])
	b4n4_c3 = qact(x, qbs[i], qms[i], nm=args.nm, relu=False)
	i = i + 1

	# b5n1
	x = b4n1_c3 + b4n2_c3 + b4n3_c3 + b4n4_c3
	x = x.clamp(-128,127)
	x = qconv(x, conv_layers[i], qws[i])
	x = qact(x, qbs[i], qms[i], nm=args.nm)
	i = i + 1
	x = qconv(x, conv_layers[i], qws[i])
	x = qact(x, qbs[i], qms[i], nm=args.nm)
	i = i + 1
	x = qconv(x, conv_layers[i], qws[i])
	b5n1_c3 = qact(x, qbs[i], qms[i], nm=args.nm, relu=False)
	i = i + 1

	# b5n2
	x = b5n1_c3
	x = qconv(x, conv_layers[i], qws[i])
	x = qact(x, qbs[i], qms[i], nm=args.nm)
	i = i + 1
	x = qconv(x, conv_layers[i], qws[i])
	x = qact(x, qbs[i], qms[i], nm=args.nm)
	i = i + 1
	x = qconv(x, conv_layers[i], qws[i])
	b5n2_c3 = qact(x, qbs[i], qms[i], nm=args.nm, relu=False)
	i = i + 1

	# b5n3
	x = b5n1_c3 + b5n2_c3
	x = x.clamp(-128,127)
	x = qconv(x, conv_layers[i], qws[i])
	x = qact(x, qbs[i], qms[i], nm=args.nm)
	i = i + 1
	x = qconv(x, conv_layers[i], qws[i])
	x = qact(x, qbs[i], qms[i], nm=args.nm)
	i = i + 1
	x = qconv(x, conv_layers[i], qws[i])
	b5n3_c3 = qact(x, qbs[i], qms[i], nm=args.nm, relu=False)
	i = i + 1

	# b6n1
	x = b5n1_c3 + b5n2_c3 + b5n3_c3
	x = x.clamp(-128,127)
	x = qconv(x, conv_layers[i], qws[i])
	x = qact(x, qbs[i], qms[i], nm=args.nm)
	i = i + 1
	x = qconv(x, conv_layers[i], qws[i])
	x = qact(x, qbs[i], qms[i], nm=args.nm)
	i = i + 1
	x = qconv(x, conv_layers[i], qws[i])
	b6n1_c3 = qact(x, qbs[i], qms[i], nm=args.nm, relu=False)
	i = i + 1

	# b6n2
	x = b6n1_c3
	x = qconv(x, conv_layers[i], qws[i])
	x = qact(x, qbs[i], qms[i], nm=args.nm)
	i = i + 1
	x = qconv(x, conv_layers[i], qws[i])
	x = qact(x, qbs[i], qms[i], nm=args.nm)
	i = i + 1
	x = qconv(x, conv_layers[i], qws[i])
	b6n2_c3 = qact(x, qbs[i], qms[i], nm=args.nm, relu=False)
	i = i + 1

	# b6n3
	x = b6n1_c3 + b6n2_c3
	x = x.clamp(-128,127)
	x = qconv(x, conv_layers[i], qws[i])
	x = qact(x, qbs[i], qms[i], nm=args.nm)
	i = i + 1
	x = qconv(x, conv_layers[i], qws[i])
	x = qact(x, qbs[i], qms[i], nm=args.nm)
	i = i + 1
	x = qconv(x, conv_layers[i], qws[i])
	b6n3_c3 = qact(x,  qbs[i], qms[i], nm=args.nm, relu=False)
	i = i + 1

	# b7n1
	x = b6n1_c3 + b6n2_c3 + b6n3_c3
	x = x.clamp(-128,127)
	x = qconv(x, conv_layers[i], qws[i])
	x = qact(x, qbs[i], qms[i], nm=args.nm)
	i = i + 1
	x = qconv(x, conv_layers[i], qws[i])
	x = qact(x, qbs[i], qms[i], nm=args.nm)
	i = i + 1
	x = qconv(x, conv_layers[i], qws[i])
	b7n1_c3 = qact(x,  qbs[i], qms[i], nm=args.nm, relu=False)
	i = i + 1

	# conv8
	x = b7n1_c3
	conv8 = qconv(x, conv_layers[i], qws[i])
	x = qact(conv8, qbs[i], qms[i], nm=args.nm)
	i = i + 1

	# final_pool
	x = x.reshape(x.shape[0], x.shape[1], -1)
	x = torch.sum(x, dim=2).reshape(x.shape[0], x.shape[1], 1, 1)

	# conv9
	x = qconv(x, conv_layers[i], qws[i])
	x = qact_last(x, conv_layers[i], qms[i], nm=args.nm)
	x = x.view(x.size(0), -1)

	return x

def tait(layer1, layer2, r, t):
	assert(layer1.weight.size(0) == layer2.weight.size(1))
	for i in range(layer1.weight.size(0)):
		layer1.weight[i] = layer1.weight[i]*math.pow(6/max(r[i][0],1e-04), t)
		layer1.bias[i] = layer1.bias[i]*math.pow(6/max(r[i][0],1e-04), t)
		layer2.weight[:,i] = layer2.weight[:,i]/math.pow(6/max(r[i][0],1e-04), t)

def tait_residual(layer1, layer2, layer3, layer4, r4, t):
	assert(layer3.weight.size(0) == layer4.weight.size(1))
	for i in range(layer4.weight.size(1)):
		layer1.weight[i] = layer1.weight[i]/math.pow(r[i][0], t)
		layer1.bias[i] = layer1.bias[i]/math.pow(r[i][0], t)
		layer2.weight[:,i] = layer2.weight[:,i]*math.pow(r[i][0], t)
		layer3.weight[i] = layer3.weight[i]/math.pow(r[i][0], t)
		layer3.bias[i] = layer3.bias[i]/math.pow(r[i][0], t)
		layer4.weight[:,i] = layer4.weight[:,i]*math.pow(r[i][0], t)

args = parser.parse_args()
print(args)

torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = True

# Load pretrained model
model = mobilenetv2_w1(pretrained=True).cuda()
model.eval()

# Load validation data
test_loader = getTestData(args.dataset,
						batch_size=args.test_batch_size,
						path='./data/imagenet/',
						for_inception=False)

calibration_loader = getCalibrationData(args.dataset,
						batch_size=args.test_batch_size,
						path='./data/imagenet/',
						for_inception=False)

# Test the final quantized model
conv_layers = []
bn_layers = []
for module in model.named_modules():
	if isinstance(module[1], nn.Conv2d):
		conv_layers.append(module[1])
	if isinstance(module[1], nn.BatchNorm2d):
		bn_layers.append(module[1])
	if isinstance(module[1], nn.Linear):
		fc_layer = module[1]
fuse_bn(conv_layers, bn_layers)

Rs = []
QReLUs = []
for i in range(len(conv_layers)-1):
	r = torch.zeros((conv_layers[i].out_channels, 2))
	qrelu = torch.ones(conv_layers[i].out_channels)*6
	Rs.append(r)
	QReLUs.append(qrelu)

bar = Bar('Calibration', max=len(calibration_loader))
total, correct = 0, 0
with torch.no_grad():
	for batch_idx, (inputs, targets) in enumerate(calibration_loader):
		inputs, targets = inputs.cuda(), targets.cuda()
		outputs = calibration(inputs, conv_layers, Rs)
		_, predicted = outputs.max(1)
		total += targets.size(0)
		correct += predicted.eq(targets).sum().item()
		acc = correct / total

		bar.suffix = f'({batch_idx + 1}/{len(calibration_loader)}) | ETA: {bar.eta_td} | top1: {acc}'
		bar.next()
print()

tait(conv_layers[0], conv_layers[1], Rs[0], args.t)
tait(conv_layers[2], conv_layers[3], Rs[2], args.t)
tait(conv_layers[3], conv_layers[4], Rs[3], args.t)
tait(conv_layers[5], conv_layers[6], Rs[5], args.t)
tait(conv_layers[8], conv_layers[9], Rs[8], args.t)
tait_residual(conv_layers[6], conv_layers[7], conv_layers[9], conv_layers[10], Rs[9], args.t)
tait(conv_layers[11], conv_layers[12], Rs[11], args.t)
tait(conv_layers[14], conv_layers[15], Rs[14], args.t)
tait(conv_layers[17], conv_layers[18], Rs[17], args.t)
tait(conv_layers[20], conv_layers[21], Rs[20], args.t)
tait(conv_layers[23], conv_layers[24], Rs[23], args.t)
tait(conv_layers[26], conv_layers[27], Rs[26], args.t)
tait(conv_layers[29], conv_layers[30], Rs[29], args.t)
tait(conv_layers[32], conv_layers[33], Rs[32], args.t)
tait(conv_layers[35], conv_layers[36], Rs[35], args.t)
tait(conv_layers[38], conv_layers[39], Rs[38], args.t)
tait(conv_layers[41], conv_layers[42], Rs[41], args.t)
tait(conv_layers[44], conv_layers[45], Rs[44], args.t)
tait(conv_layers[47], conv_layers[48], Rs[47], args.t)
tait(conv_layers[50], conv_layers[51], Rs[50], args.t)

bar = Bar('Calibration', max=len(calibration_loader))
total, correct = 0, 0
with torch.no_grad():
	for batch_idx, (inputs, targets) in enumerate(calibration_loader):
		inputs, targets = inputs.cuda(), targets.cuda()
		outputs = calibration(inputs, conv_layers, Rs)
		_, predicted = outputs.max(1)
		total += targets.size(0)
		correct += predicted.eq(targets).sum().item()
		acc = correct / total

		bar.suffix = f'({batch_idx + 1}/{len(calibration_loader)}) | ETA: {bar.eta_td} | top1: {acc}'
		bar.next()
print()

r = torch.zeros((3, 2))
r[0,0] = (1-0.485)/0.229
r[0,1] = (0-0.485)/0.229
r[1,0] = (1-0.456)/0.224
r[1,1] = (0-0.456)/0.224
r[2,0] = (1-0.406)/0.225
r[2,1] = (0-0.406)/0.225
Rs.insert(0, r)

SW, SA = [], []
for i in range(len(conv_layers)-1):
	sw = quantize_weight(conv_layers[i].weight, args.nw)
	SW.append(sw)
	if(conv_layers[i+1].groups>1):
		next_dw = True
	else:
		next_dw = False
	sa = quantize_activation(Rs[i], next_dw, args.na)
	SA.append(sa)

sw = quantize_weight(conv_layers[-1].weight, args.nw)
SW.append(sw)
sa = quantize_activation(Rs[i], False, args.na)
SA.append(sa)

merge_list = [
(7,10),
(13,16,19),
(22,25,28,31),
(34,37,40),
(43,46,49)
]

SA[7] = SA[10]
SA[13] = SA[19]
SA[16] = SA[19]
SA[22] = SA[31]
SA[25] = SA[31]
SA[28] = SA[31]
SA[34] = SA[40]
SA[37] = SA[40]
SA[43] = SA[49]
SA[46] = SA[49]

qws = []
for i in range(len(conv_layers)):
	qw = gen_qw(conv_layers[i], SW[i])
	qws.append(qw)

qbs = []
for i in range(len(conv_layers)-1):
	qb = gen_qb(conv_layers[i], SW[i], SA[i])
	qbs.append(qb)

qms = []
for i in range(len(conv_layers)-1):
	qm = gen_qm(SW[i], SA[i], SA[i+1], nm=args.nm)
	qms.append(qm)
qm = gen_qm(SW[-1], SA[-1], 1, nm=args.nm)
qms.append(qm)

bar = Bar('Testing', max=len(test_loader))
total, correct = 0, 0
with torch.no_grad():
	for batch_idx, (inputs, targets) in enumerate(test_loader):
		inputs, targets = inputs.cuda(), targets.cuda()
		qinputs = dequantize(inputs, SA[0])
		outputs = qinference(qinputs, conv_layers, qws, qbs, qms)
		_, predicted = outputs.max(1)
		total += targets.size(0)
		correct += predicted.eq(targets).sum().item()
		acc = correct / total

		bar.suffix = f'({batch_idx + 1}/{len(test_loader)}) | ETA: {bar.eta_td} | top1: {acc}'
		bar.next()
print('\nFinal acc: %.2f%% (%d/%d)' % (acc*100, correct, total))
bar.finish()