import torch
import copy
import numpy as np
import sys
from SkyNet import *
from utils import *


def calculate_p(conv_layer, signed = False):
    AP = 0
    for i in range(len(conv_layer)):
        P = torch.zeros(conv_layer[i].weight.shape[0])
        for j in range(conv_layer[i].weight.shape[0]):
            if signed:
                R = torch.max(torch.abs(conv_layer[i].weight))
                r = torch.max(torch.abs(conv_layer[i].weight[j]))
                P[j] = r/R
            else:
                R = torch.max(conv_layer[i].weight) - torch.min(conv_layer[i].weight)
                r = torch.max(conv_layer[i].weight[j]) - torch.min(conv_layer[i].weight[j])
                P[j] = r/R
        AP = torch.mean(P).item() + AP
    return AP

def cross_p(W1, W2, signed = False):
    if signed:
        R1 = torch.max(torch.abs(W1))
        R2 = torch.max(torch.abs(W2))
    else:
        R1 = torch.max(W1) - torch.min(W1)
        R2 = torch.max(W2) - torch.min(W2)
    
    P = torch.zeros(W1.shape[0])
    if(W1.shape[0]!=W2.shape[1]):
        group = W1.shape[0]/W2.shape[1]
        for i in range(W1.shape[0]):
            if signed:
                r1 = torch.max(torch.abs(W1[i]))
                r2 = torch.max(torch.abs(W2[:,i]))
            else:
                r1 = torch.max(W1[i]) - torch.min(W1[i])
                r2 = torch.max(W2[i]) - torch.min(W2[i])
            P[i] = r1*r2/(R1*R2)
    else:
        group = 1
        for i in range(W1.shape[0]):
            if signed:
                r1 = torch.max(torch.abs(W1[i]))
                r2 = torch.max(torch.abs(W2[:,i]))
            else:
                r1 = torch.max(W1[i]) - torch.min(W1[i])
                r2 = torch.max(W2[:,i]) - torch.min(W2[:,i])
            P[i] = r1*r2/(R1*R2)

    return torch.mean(P).item()

def calculate_cross_p(conv_layer, signed = False):
    AP = 0
    for i in range(5):
        P = cross_p(conv_layer[i].weight, conv_layer[i+1].weight)
        AP = AP + P

    temp = torch.cat((conv_layer[6].weight.view(1,192,3,3).clone(), conv_layer[10].weight[0:768].view(4,192,3,3).clone()))
    P = cross_p(conv_layer[5].weight, temp)
    AP = AP + P

    for i in range(6,9):
        P = cross_p(conv_layer[i].weight, conv_layer[i+1].weight)
        AP = AP + P

    P = cross_p(conv_layer[9].weight, conv_layer[10].weight[768:1280])
    AP = AP + P

    for i in range(10, 12):
        P = cross_p(conv_layer[i].weight, conv_layer[i+1].weight)
        AP = AP + P
    
    return AP

def layer_equalization(W1, W2, BN_M, BN_B, signed=False, eps=0):
    if(W1.shape[0]!=W2.shape[1]):
        group = W1.shape[0]/W2.shape[1]
    else:
        group = 1

    S = torch.zeros(W1.shape[0])
    R1 = torch.max(torch.abs(W1))
    R2 = torch.max(torch.abs(W2))
    if(group==1): # pixelwise convolution
        for i in range(W1.shape[0]):
            if signed:
                r1 = torch.max(torch.abs(W1[i]))
                r2 = torch.max(torch.abs(W2[:,i]))
            else:
                r1 = torch.max(W1[i]) - torch.min(W1[i])
                r2 = torch.max(W2[:,i]) - torch.min(W2[:,i])
            s = (1/(r1 + eps))*torch.sqrt(r1*r2 + eps)
            W1[i] = W1[i]*s
            BN_M[i] = BN_M[i]*s
            BN_B[i] = BN_B[i]*s
            W2[:,i] = W2[:,i]/s
            S[i] = s
    else:        # depthwise convolution
        for i in range(W1.shape[0]):
            if signed:
                r1 = torch.max(torch.abs(W1[i]))
                r2 = torch.max(torch.abs(W2[i]))
            else:
                r1 = torch.max(W1[i]) - torch.min(W1[i])
                r2 = torch.max(W2[i]) - torch.min(W2[i])
            s = (1/(r1 + eps))*torch.sqrt(r1*r2 + eps)
            W1[i] = W1[i]*s
            BN_M[i] = BN_M[i]*s
            BN_B[i] = BN_B[i]*s
            W2[i] = W2[i]/s
            S[i] = s
    return S
    
def cross_layer_equalization(conv_layer, bn_layer, converge_thres=2e-4, iteration=10):
    diff = 10

    PP = calculate_cross_p(conv_layer)
    while (diff>converge_thres)or(iteration>0):
        for i in range(5):
            S = layer_equalization(conv_layer[i].weight, conv_layer[i+1].weight, bn_layer[i].running_mean, bn_layer[i].bias)
        temp = torch.cat((conv_layer[6].weight.view(1,192,3,3).clone(), conv_layer[10].weight[0:768].view(4,192,3,3).clone()))
        S= layer_equalization(conv_layer[5].weight, temp, bn_layer[5].running_mean, bn_layer[5].bias)
        for i in range(192):
            conv_layer[6].weight[i] = conv_layer[6].weight[i]/S[i]
            conv_layer[10].weight[i] = conv_layer[10].weight[i]/S[i]
            conv_layer[10].weight[i+192*1] = conv_layer[10].weight[i+192*1]/S[i]
            conv_layer[10].weight[i+192*2] = conv_layer[10].weight[i+192*2]/S[i]
            conv_layer[10].weight[i+192*3] = conv_layer[10].weight[i+192*3]/S[i]
        for i in range(6,9):
            S = layer_equalization(conv_layer[i].weight, conv_layer[i+1].weight, bn_layer[i].running_mean, bn_layer[i].bias)
        S = layer_equalization(conv_layer[9].weight, conv_layer[10].weight[768:1280], bn_layer[9].running_mean, bn_layer[9].bias)
        for i in range(10, 12):
            S = layer_equalization(conv_layer[i].weight, conv_layer[i+1].weight, bn_layer[i].running_mean, bn_layer[i].bias)
        
        AP = calculate_cross_p(conv_layer)
        diff = AP - PP
        print(PP, AP, diff)
        PP = AP 
        iteration = iteration - 1

def fuse_bn(conv_layer, bn_layer):
    betas = []
    gammas = []
    for i in range(len(conv_layer)):
        eps = bn_layer[i].eps
        beta = bn_layer[i].bias
        gamma = bn_layer[i].weight
        mu = bn_layer[i].running_mean
        var = bn_layer[i].running_var
        weight = conv_layer[i].weight
        bias = conv_layer[i].bias

        betas.append(copy.deepcopy(beta))
        gammas.append(copy.deepcopy(gamma))

        conv_layer[i].bias = torch.nn.Parameter(beta - gamma.mul(mu).div(torch.sqrt(var + eps)))
        scale = gamma.div(torch.sqrt(var + eps))
        for j in range(weight.shape[0]):
            conv_layer[i].weight[j] = weight[j]*scale[j]
        
        bn_layer[i].running_var = torch.nn.Parameter(torch.ones(bn_layer[i].running_var.shape))
        bn_layer[i].running_mean = torch.nn.Parameter(torch.zeros(bn_layer[i].running_mean.shape))
        bn_layer[i].weight = torch.nn.Parameter(torch.ones(bn_layer[i].weight.shape))
        bn_layer[i].bias = torch.nn.Parameter(torch.zeros(bn_layer[i].bias.shape))
    
    return betas, gammas

def bias_absorption(W1, W2, B1, B2, V2, G1, G2, N, eps=1e-05): # W:weight, B:beta, V:variance, G:gamma

    c = B1-N*G1 # beta-3*gamma
    c = c.clamp(0)

    Wc = torch.zeros(W2.shape[0])
    if(W1.shape[0]==W2.shape[1]): # pixelwise convolution
        for i in range(W2.shape[0]):
            Wc[i] = torch.sum(W2[i].squeeze()*c)
    else:                         # depthwise convolution
        for i in range(W2.shape[0]):
            Wc[i] = torch.sum(W2[i])*c[i]
    for i in range(len(c)):
        B1[i] = B1[i] - c[i]
    for i in range(len(Wc)):
        B2[i] = B2[i] + Wc[i]*G2[i]/torch.sqrt(V2[i]+eps)
    return c

def cross_bias_absorption(conv_layer, bn_layer, N=3):
    print("absorbing bias")
    for i in range(9):
        c = bias_absorption(conv_layer[i].weight, conv_layer[i+1].weight, bn_layer[i].bias, bn_layer[i+1].bias, \
                        bn_layer[i+1].running_var, bn_layer[i].weight, bn_layer[i+1].weight, N)

    for i in range(4):
        c = bias_absorption(conv_layer[5].weight, conv_layer[10].weight[i*192:i*192+192], bn_layer[5].bias, bn_layer[10].bias[i*192:i*192+192], \
                        bn_layer[10].running_var[i*192:i*192+192], bn_layer[5].weight, bn_layer[10].weight[i*192:i*192+192], N)
    for i in range(len(c)):
        bn_layer[5].bias[i] = bn_layer[5].bias[i] + 4*c[i]
    c = bias_absorption(conv_layer[9].weight, conv_layer[10].weight[768:1280], bn_layer[9].bias, bn_layer[10].bias[768:1280], \
                    bn_layer[10].running_var[768:1280], bn_layer[9].weight, bn_layer[10].weight[768:1280], N)

    for i in range(10,12):
        c = bias_absorption(conv_layer[i].weight, conv_layer[i+1].weight, bn_layer[i].bias, bn_layer[i+1].bias, \
                        bn_layer[i+1].running_var, bn_layer[i].weight, bn_layer[i+1].weight, N)

def bias_absorption_bn(W1, W2, B1, B2, beta, gamma, N=3): # W:weight, B:bias
    c = beta-N*gamma # beta-3*gamma
    c = c.clamp(0)

    Wc = torch.zeros(W2.shape[0])
    if(W1.shape[0]==W2.shape[1]): # pixelwise convolution
        for i in range(W2.shape[0]):
            Wc[i] = torch.sum(W2[i].squeeze()*c)
    else:                         # depthwise convolution
        for i in range(W2.shape[0]):
            Wc[i] = torch.sum(W2[i])*c[i]
    for i in range(len(c)):
        B1[i] = B1[i] - c[i]
    for i in range(len(Wc)):
        B2[i] = B2[i] + Wc[i]
    return c

def cross_bias_absorption_bn(conv_layer, bn_layers, betas, gammas, N=3):
    print("absorbing bias bn")
    for i in range(9):
        c = bias_absorption_bn(conv_layer[i].weight, conv_layer[i+1].weight, bn_layers[i].bias, conv_layer[i+1].bias, \
                        betas[i], gammas[i], N)
                    
    for i in range(4):
        c = bias_absorption_bn(conv_layer[5].weight, conv_layer[10].weight[i*192:i*192+192], bn_layers[5].bias, conv_layer[10].bias[i*192:i*192+192], \
                        betas[5], gammas[5], N)
    for i in range(len(c)):
        bn_layer[5].bias[i] = bn_layer[5].bias[i] + 4*c[i]
    
    c = bias_absorption_bn(conv_layer[9].weight, conv_layer[10].weight[768:1280], bn_layers[9].bias,\
                            conv_layer[10].bias[768:1280], betas[9], gammas[9], N)
    for i in range(10,12):
        c = bias_absorption_bn(conv_layer[i].weight, conv_layer[i+1].weight, conv_layer[i].bias, conv_layer[i+1].bias, \
                        betas[i], gammas[i], N)


def quantization(W, num_bits): # adopt symmetric to improve inference time
    qmin = -2.**(num_bits - 1)
    qmax = 2.**(num_bits - 1) - 1

    S = torch.zeros(W.shape[0])
    for i in range(W.shape[0]):
        min_value = abs(W[i].min())
        max_value = abs(W[i].max())
        if max_value > min_value:
            S[i] = max_value / qmax
            S[i] = max(S[i], 1e-8)
        else:
            S[i] = min_value / (qmax + 1)
            S[i] = max(S[i], 1e-8)

    QW = torch.zeros(W.shape)
    for i in range(QW.shape[0]):
        QW[i] = W[i].div(S[i])
        QW[i] = QW[i].clamp(qmin, qmax).round()
        QW[i] = QW[i].mul(S[i])
    
    return QW, S

def bias_correction(W, B, G, V, PB, PG, num_bits=8, eps=1e-05): # weight, beta, gamma, var, previous beta, previous gamma
    from scipy.stats import norm
    standard_normal = lambda x: norm(0, 1).pdf(x)
    standard_cdf = lambda x: norm.cdf(x)
    calculate_mean = lambda weight, bias: weight*standard_normal(-bias/weight) + bias*(1 - standard_cdf(-bias/weight))

    QW, S = quantization(W, num_bits)
    QE = QW - W # Quantization Error

    Ex = torch.zeros(PB.shape)
    Dy = torch.zeros(B.shape)

    for i in range(len(Ex)):
        Ex[i] = calculate_mean(PG.detach().numpy()[i], PB.detach().numpy()[i])
    
    if(W.shape[1]==PB.shape[0]): # pixelwise convolution
        for i in range(W.shape[0]):
            Dy[i] = torch.sum(QE[i].squeeze()*Ex)*G[i]/torch.sqrt(V[i]+eps)
    else:                         # depthwise convolution
        for i in range(W.shape[0]):
            Dy[i] = torch.sum(QE[i])*Ex[i]*G[i]/torch.sqrt(V[i]+eps)
    
    for i in range(W.shape[0]):
        W[i] = QW[i]

    return Dy

def cross_bias_correction(conv_layer, bn_layer):

    Dy = []
    for i in range(1, 10):
        print("correct bias of " + str(conv_layer[i]))
        dy = bias_correction(conv_layer[i].weight, bn_layer[i].bias, bn_layer[i].weight, \
                            bn_layer[i].running_var, bn_layer[i-1].bias,  bn_layer[i-1].weight)
        Dy.append(dy)

    dy = torch.zeros(1280)
    print("correct bias of " + str(conv_layer[10]))
    for i in range(4):
        dy[i*192:(i+1)*192] = bias_correction(conv_layer[10].weight[i*192:(i+1)*192], bn_layer[10].bias[i*192:(i+1)*192], \
        bn_layer[10].weight[i*192:(i+1)*192], bn_layer[10].running_var[i*192:(i+1)*192], bn_layer[5].bias, bn_layer[5].weight)
    dy[768:1280] = bias_correction(conv_layer[10].weight[768:1280], bn_layer[10].bias[768:1280], \
    bn_layer[10].weight[768:1280], bn_layer[10].running_var[768:1280], bn_layer[9].bias, bn_layer[9].weight)
    Dy.append(dy)

    print("correct bias of " + str(conv_layer[11]))
    dy = bias_correction(conv_layer[11].weight, bn_layer[11].bias, bn_layer[11].weight, \
                         bn_layer[11].running_var, bn_layer[10].bias, bn_layer[10].weight)
    Dy.append(dy)

    for i in range(1,12):
        print(bn_layer[i])
        for j in range(len(bn_layer[i].bias)):
            bn_layer[i].bias[j] = bn_layer[i].bias[j] - Dy[i-1][j]


def Quantize(conv_layer):
    QW, S = quantization(conv_layer.weight, 8)
    for i in range(conv_layer.weight.shape[0]):
        conv_layer.weight[i] = nn.Parameter(QW[i])

net = SkyNet()
net.load_state_dict(torch.load('./SkyNet.pth'))
net.to("cpu")
conv_layer = []
bn_layer = []
for module in net.named_modules():
    if isinstance(module[1], nn.Conv2d):
        conv_layer.append(module[1])
    if isinstance(module[1], nn.BatchNorm2d):
        bn_layer.append(module[1])

import sys
N = int(sys.argv[1])
print("bias absorption coefficient: %d"%N)

cross_layer_equalization(conv_layer, bn_layer)
cross_bias_absorption(conv_layer, bn_layer, N)
cross_bias_correction(conv_layer, bn_layer)
# betas, gammas = fuse_bn(conv_layer, bn_layer)
torch.save(net.state_dict(),'./SkyNet_dfq.pth')

