import torch
import torch.nn as nn
import numpy as np

def quantize_activation(R, next_dw, na):
	nC = len(R)
	if(R[:,1].sum()==0.):
		amax = 2.**na - 1
		S = torch.zeros(1)
		S[0] = max(R.max()/amax,1e-08)
	else:
		amin = -2.**(na-1)
		amax = 2.**(na-1) - 1
		S = torch.zeros(1)
		if(abs(R.max())>abs(R.min())):
			S[0] = max(R.max()/amax,1e-08)
		else:
			S[0] = max(R.min()/amin,1e-08)
	return S

def quantize_weight(W, nw):
	qmin = -2.**(nw - 1)
	qmax = 2.**(nw - 1) - 1

	S = torch.zeros(W.shape[0])
	for i in range(W.shape[0]):
		min_value = abs(W[i].min())
		max_value = abs(W[i].max())
		if max_value > min_value:
			S[i] = max_value / qmax
			S[i] = max(S[i], 1e-8)
		else:
			S[i] = min_value / (qmax + 1)
			S[i] = max(S[i], 1e-8)	
	return S

def quantize_weight_fake(W, nw):
	qmin = -2.**(nw - 1)
	qmax = 2.**(nw - 1) - 1

	S = torch.zeros(W.shape[0])
	QW = torch.zeros(W.shape)
	for i in range(W.shape[0]):
		min_value = abs(W[i].min())
		max_value = abs(W[i].max())
		if max_value > min_value:
			S[i] = max_value / qmax
			S[i] = max(S[i], 1e-8)
		else:
			S[i] = min_value / (qmax + 1)
			S[i] = max(S[i], 1e-8)
		QW[i] = W[i].div(S[i])
		QW[i] = QW[i].round().clamp(qmin, qmax)
		QW[i] = QW[i].mul(S[i])
	return QW

def quantize(conv_layers, Rs, nw, na):
	SW, SA = [], []
	for i in range(len(conv_layers)-1):
		if(conv_layers[i+1].groups>1):
			next_dw = True
		else:
			next_dw = False
		sw = quantize_weight(conv_layers[i].weight, nw)
		SW.append(sw)
		sa = quantize_activation(Rs[i], next_dw, na)
		SA.append(sa)
	sw = quantize_weight(conv_layers[-1].weight, nw)
	SW.append(sw)
	sa = quantize_activation(Rs[-1], False, na, relu = False)
	SA.append(sa)
	return SW, SA

def get_range(R, A):
	assert(R.size(0)==A.size(1))
	for i in range(A.size(1)):
		if A[:,i,:,:].max().item() > R[i,0]:
			R[i,0] = A[:,i,:,:].max().item()
		if A[:,i,:,:].min().item() < R[i,1]:
			R[i,1] = A[:,i,:,:].min().item()

def fuse_bn(conv_layers, bn_layers):
	for i in range(len(bn_layers)):
		eps = bn_layers[i].eps
		beta = bn_layers[i].bias
		gamma = bn_layers[i].weight
		mu = bn_layers[i].running_mean
		var = bn_layers[i].running_var
		weight = conv_layers[i].weight
		bias = conv_layers[i].bias

		conv_layers[i].bias = torch.nn.Parameter(beta - gamma.mul(mu).div(torch.sqrt(var + eps)))
		scale = gamma.div(torch.sqrt(var + eps))
		for j in range(weight.shape[0]):
			conv_layers[i].weight[j] = weight[j]*scale[j]

def calibration(x, conv_layers, R):
	# init_block
	x = conv_layers[0](x)
	x = nn.ReLU6(inplace=True)(x)
	get_range(R[0], x)
	# stage1 unit1
	x = conv_layers[1](x)
	x = nn.ReLU6(inplace=True)(x)
	get_range(R[1], x)
	x = conv_layers[2](x)
	x = nn.ReLU6(inplace=True)(x)
	get_range(R[2], x)
	x = conv_layers[3](x)
	get_range(R[3], x)

	# stage2 unit1
	x = conv_layers[4](x)
	x = nn.ReLU6(inplace=True)(x)
	get_range(R[4], x)
	x = conv_layers[5](x)
	x = nn.ReLU6(inplace=True)(x)
	get_range(R[5], x)
	x = conv_layers[6](x)
	get_range(R[6], x)

	# stage2 unit2
	x_ = x
	x = conv_layers[7](x_)
	x = nn.ReLU6(inplace=True)(x)
	get_range(R[7], x)
	x = conv_layers[8](x)
	x = nn.ReLU6(inplace=True)(x)
	get_range(R[8], x)
	x = conv_layers[9](x)
	x = x + x_

	# stage3 unit1
	get_range(R[9], x)
	x = conv_layers[10](x)
	x = nn.ReLU6(inplace=True)(x)
	get_range(R[10], x)
	x = conv_layers[11](x)
	x = nn.ReLU6(inplace=True)(x)
	get_range(R[11], x)
	x = conv_layers[12](x)
	get_range(R[12], x)
	 
	# stage3 unit2
	x_ = x
	x = conv_layers[13](x_)
	x = nn.ReLU6(inplace=True)(x)
	get_range(R[13], x)
	x = conv_layers[14](x)
	x = nn.ReLU6(inplace=True)(x) 
	get_range(R[14], x)
	x = conv_layers[15](x)
	x_ = x + x_

	# stage3 unit3
	get_range(R[15], x_)
	x = conv_layers[16](x_)
	x = nn.ReLU6(inplace=True)(x)
	get_range(R[16], x)
	x = conv_layers[17](x)
	x = nn.ReLU6(inplace=True)(x)
	get_range(R[17], x)
	x = conv_layers[18](x)
	x = x + x_

	# stage4 unit1
	get_range(R[18], x)
	x = conv_layers[19](x)
	x = nn.ReLU6(inplace=True)(x)
	get_range(R[19], x)
	x = conv_layers[20](x)
	x = nn.ReLU6(inplace=True)(x)
	get_range(R[20], x)
	x = conv_layers[21](x)
	get_range(R[21], x)
	x_ = x

	# stage4 unit2
	x = conv_layers[22](x_)
	x = nn.ReLU6(inplace=True)(x)
	get_range(R[22], x)
	x = conv_layers[23](x)
	x = nn.ReLU6(inplace=True)(x)
	get_range(R[23], x)
	x = conv_layers[24](x)
	x_ = x + x_
	
	# stage4 unit3
	get_range(R[24], x_)
	x = conv_layers[25](x_)
	x = nn.ReLU6(inplace=True)(x)
	get_range(R[25], x)
	x = conv_layers[26](x)
	x = nn.ReLU6(inplace=True)(x)
	get_range(R[26], x)
	x = conv_layers[27](x)
	x_ = x + x_
	
	# stage4 unit4
	get_range(R[27], x_)
	x = conv_layers[28](x_)
	x = nn.ReLU6(inplace=True)(x)
	get_range(R[28], x)
	x = conv_layers[29](x)
	x = nn.ReLU6(inplace=True)(x)
	get_range(R[29], x)
	x = conv_layers[30](x)
	x = x + x_
	
	# stage4 unit5
	get_range(R[30], x)
	x = conv_layers[31](x)
	x = nn.ReLU6(inplace=True)(x)
	get_range(R[31], x)
	x = conv_layers[32](x)
	x = nn.ReLU6(inplace=True)(x)
	get_range(R[32], x)
	x = conv_layers[33](x)
	get_range(R[33], x)

	# stage4 unit6
	x_ = x
	x = conv_layers[34](x_)
	x = nn.ReLU6(inplace=True)(x)
	get_range(R[34], x)
	x = conv_layers[35](x)
	x = nn.ReLU6(inplace=True)(x)
	get_range(R[35], x)
	x = conv_layers[36](x)
	x_ = x + x_

	# stage4 unit7
	get_range(R[36], x_)
	x = conv_layers[37](x_)
	x = nn.ReLU6(inplace=True)(x)
	get_range(R[37], x)
	x = conv_layers[38](x)
	x = nn.ReLU6(inplace=True)(x)
	get_range(R[38], x)
	x = conv_layers[39](x)
	x = x + x_

	# stage5 unit1
	get_range(R[39], x)
	x = conv_layers[40](x)
	x = nn.ReLU6(inplace=True)(x)
	get_range(R[40], x)
	x = conv_layers[41](x)
	x = nn.ReLU6(inplace=True)(x)
	get_range(R[41], x)
	x = conv_layers[42](x)
	get_range(R[42], x)
	
	# stage5 unit2
	x_ = x
	x = conv_layers[43](x_)
	x = nn.ReLU6(inplace=True)(x)
	get_range(R[43], x)
	x = conv_layers[44](x)
	x = nn.ReLU6(inplace=True)(x)
	get_range(R[44], x)
	x = conv_layers[45](x)
	x_ = x + x_

	# stage5 unit3
	get_range(R[45], x_)
	x = conv_layers[46](x_)
	x = nn.ReLU6(inplace=True)(x)
	get_range(R[46], x)
	x = conv_layers[47](x)
	x = nn.ReLU6(inplace=True)(x)
	get_range(R[47], x)
	x = conv_layers[48](x)
	x = x + x_

	# stage5 unit4
	get_range(R[48], x)
	x = conv_layers[49](x)
	x = nn.ReLU6(inplace=True)(x)
	get_range(R[49], x)
	x = conv_layers[50](x)
	x = nn.ReLU6(inplace=True)(x)
	get_range(R[50], x)
	x = conv_layers[51](x)
	get_range(R[51], x)

	# final_block
	x = conv_layers[52](x)
	x = nn.ReLU6(inplace=True)(x)
	get_range(R[52], x)
	# final_pool
	x = nn.AvgPool2d(kernel_size=7, stride=1, padding=0)(x)
	# output
	x = conv_layers[53](x)
	x = x.view(x.size(0), -1)

	return x

def QReLU(x, R):
	assert(x.size(1)==R.size(0))
	for i in range(x.size(1)):
		x[:,i,:,:] = x[:,i,:,:].clamp(0, R[i])
	return x

def qconv(x, conv_layer, qw, nw=8, na=8, nb=24, nm=24, ns=24):
	l = conv_layer
	conv2d = nn.Conv2d(l.in_channels, l.out_channels, l.kernel_size, l.stride, l.padding, groups=l.groups, bias=False)
	conv2d.weight = nn.Parameter(qw)
	qy = conv2d(x)
	return qy

def gen_qb(l, SW, SA, nb=24):
	bmin = -2.**(nb - 1)
	bmax = 2.**(nb - 1) - 1
	qb = l.bias.div(SW.mul(SA).cuda()).clamp(bmin, bmax).round()
	return qb

def gen_qw(l, SW, nw=8):
	OC = l.weight.size(0)
	nI = l.weight.size(1)
	K = l.weight.size(2)

	wmin = -2.**(nw - 1)
	wmax = 2.**(nw - 1) - 1
	QSW = SW.unsqueeze(1).unsqueeze(2).unsqueeze(3).expand(OC, nI, K, K).cuda()
	qw = l.weight.div(QSW).clamp(wmin, wmax).round().cuda()
	return qw

def gen_qm(SW0, SA0, SA1, nw=8, na=8, nm=24, ns=24):
	mmin = -2.**(nm - 1)
	mmax = 2.**(nm - 1) - 1
	qm = SW0.mul(SA0).div(SA1)
	qm = qm.mul(2**nm).clamp(mmin, mmax).round().cuda()
	return qm

def dequantize(x, SA1, na=8):
	nB = x.size(0)
	IC = x.size(1)
	IH = x.size(2)
	IW = x.size(3)
	QSA1 = SA1.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand(nB,IC,IH,IW).cuda()
	if(x.min()<0):
		amin = -2.**(na - 1)
		amax = 2.**(na - 1) - 1
	else:
		amin = 0
		amax = 2.**na - 1
	qx = x.div(QSA1).clamp(amin, amax).round().cuda()
	return qx

def qact(qx, qb, qm, nw=8, na=8, nb=24, nm=24, ns=24, relu=True):
	nB = qx.size(0)
	IC = qx.size(1)
	IH = qx.size(2)
	IW = qx.size(3)
	bmin = -2.**(2*nb - 1)
	bmax = 2.**(2*nb - 1) - 1
	if(relu):
		amin = 0
		amax = 2.**na - 1
	else:
		amin = -2.**(na - 1)
		amax = 2.**(na - 1) - 1

	qy = qx + qb.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand(nB,IC,IH,IW)
	qm = qm.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand(nB,IC,IH,IW).cuda()
	if(relu):
		qy = nn.ReLU()(qy)
	y = qy.mul(qm)
	y = y.long()
	y = y >> (nm-1)
	ylast = y & 0x01
	#print("ylast = 1 length: {}".format(len(ylast[ylast>0])))
	#print("ylast = 0 length: {}".format(len(ylast[ylast==0])))
	y = y >> 1
	y[ylast>0] += 1
	y = y.clamp(amin, amax).float()

	return y

def qact_last(qx, conv_layer, qm, nw=8, na=8, nb=24, nm=24, ns=24, relu=True):
	l = conv_layer
	nB = qx.size(0)
	IC = qx.size(1)
	IH = qx.size(2)
	IW = qx.size(3)
	qm = qm.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand(nB,IC,IH,IW).cuda()
	y = qx.mul(qm)
	return y