import time, sys, os, argparse
import numpy as np
from PIL import Image
import torch
from torch.utils.data import Dataset
import torchvision.transforms as transforms

from SkyNet import *

def convert2cpu(gpu_matrix):
    return torch.FloatTensor(gpu_matrix.size()).copy_(gpu_matrix)

class DACDataset(Dataset):
    def __init__(self, root, shape=None, transform=None, batch_size=32, num_workers=4):
        self.files = [file for file in os.listdir(root) if os.path.isfile(os.path.join(root, file))]
        self.imageNames = [file.split('.')[0] for file in self.files]
        self.files = [os.path.join(root, file) for file in self.files]
        self.nSamples = len(self.files)
        self.transform = transform
        self.shape = shape
        self.batch_size = batch_size
        self.num_workers = num_workers
       
    def __len__(self):
        return self.nSamples

    def __getitem__(self, index):
        imgpath = self.files[index]
        img = Image.open(imgpath).convert('RGB')
        if self.shape:
            img = img.resize(self.shape)

        if self.transform is not None:
            img = self.transform(img)

        return img

def fuse_bn(conv_layers, bn_layers):
    for i in range(len(conv_layers)):
        eps = bn_layers[i].eps
        beta = bn_layers[i].bias
        gamma = bn_layers[i].weight
        mu = bn_layers[i].running_mean
        var = bn_layers[i].running_var
        weight = conv_layers[i].weight
        bias = conv_layers[i].bias

        conv_layers[i].bias = torch.nn.Parameter(beta - gamma.mul(mu).div(torch.sqrt(var + eps)))
        scale = gamma.div(torch.sqrt(var + eps))
        for j in range(weight.shape[0]):
            conv_layers[i].weight[j] = weight[j]*scale[j]

def quantize_weight(W, nw):
    qmin = -2.**(nw - 1)
    qmax = 2.**(nw - 1) - 1

    S = torch.zeros(W.shape[0])
    for i in range(W.shape[0]):
        min_value = abs(W[i].min())
        max_value = abs(W[i].max())
        if max_value > min_value:
            S[i] = max_value / qmax
            S[i] = max(S[i], 1e-8)
        else:
            S[i] = min_value / (qmax + 1)
            S[i] = max(S[i], 1e-8)    
    return S

def quantize_activation(R, next_dw, na, relu = True):

    nC = len(R)
    if(relu):
        amax = 2.**na - 1
        if(next_dw==True):
            S = torch.zeros(nC)
            for i in range(nC):
                S[i] = R[i][0]/amax
        else:
            S = torch.zeros(1)
            S[0] = R.max()/amax
    else:
        amin = -2.**(na - 1)
        amax = 2.**(na-1) - 1
        if(next_dw==True):
            S = torch.zeros(nC)
            for i in range(nC):
                if(abs(R[i][0])>abs(R[i][1])):
                    S[i] = R[i][0]/amax
                else:
                    S[i] = R[i][1]/amin
        else:
            S = torch.zeros(1)
            if(abs(R.max())>abs(R.min())):
                S[0] = R.max()/amax
            else:
                S[0] = R.min()/amin
    return S

def quantize(conv_layers, R, nw, na):
    SW, SA = [], []
    for i in range(len(conv_layers)-1):
        if(conv_layers[i+1].groups>1):
            next_dw = True
        else:
            next_dw = False
        sw = quantize_weight(conv_layers[i].weight, nw)
        SW.append(sw)
        sa = quantize_activation(R[i], next_dw, na)
        SA.append(sa)
    sw = quantize_weight(conv_layers[-1].weight, nw)
    SW.append(sw)
    sa = quantize_activation(R[-1], False, na, relu = False)
    SA.append(sa)
    return SW, SA

def qconv_layer(conv_layer, SW, SA0, SA1, nw, nb, nm):
    l = conv_layer

    nO = l.weight.size(0)
    nI = l.weight.size(1)
    nK = l.weight.size(2)
    
    wmin = -2.**(nw - 1)
    wmax = 2.**(nw - 1) - 1
    bmin = -2.**(nb - 1)
    bmax = 2.**(nb - 1) - 1
    # mmin = -2.**(nm - 1)
    mmin = 0
    mmax = 2.**(nm - 1) - 1

    QSW = SW.unsqueeze(1).unsqueeze(2).unsqueeze(3).expand(nO, nI, nK, nK).cuda()
    qm = SW.mul(SA0)
    QSA1 = SA1.expand(nO)
    qm = qm.div(QSA1)
    qm = qm.mul(2**nm).clamp(mmin, mmax).round()

    qw = l.weight.div(QSW).clamp(wmin, wmax).round()
    qb = l.bias.div(SW.mul(SA0).cuda()).clamp(bmin, bmax).round()

    conv2d = nn.Conv2d(l.in_channels, l.out_channels, l.kernel_size, l.stride, l.padding, groups=l.groups, bias=False)
    conv2d.weight = nn.Parameter(qw)
    conv2d.bias = nn.Parameter(qb)

    return conv2d, qm

def qconv_layers(conv_layers, SW, SA, nw, nb, nm):
    qconv_layers, qms = [], []
    for i in range(10):
        qlayer, qm = qconv_layer(conv_layers[i], SW[i], SA[i], SA[i+1], nw, nb, nm)
        qconv_layers.append(qlayer)
        qms.append(qm)

    qlayer, qm = qconv_layer(conv_layers[10], SW[10], torch.cat((SA[6],SA[6],SA[6],SA[6],SA[10])), SA[11], nw, nb, nm)
    qconv_layers.append(qlayer)
    qms.append(qm)

    qlayer, qm = qconv_layer(conv_layers[11], SW[11], SA[11], SA[12], nw, nb, nm)
    qconv_layers.append(qlayer)
    qms.append(qm)

    qlayer, qm = qconv_layer(conv_layers[12], SW[12], SA[12], torch.ones(1), nw, nb, nm)
    qconv_layers.append(qlayer)
    qms.append(qm)

    return qconv_layers, qms

def qdata(x, SA, na):
    nB = x.size(0)
    nC = x.size(1)
    nH = x.size(2)
    nW = x.size(3)

    amin = -2.**(na - 1)
    amax = 2.**(na - 1) - 1

    QSA = SA.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand(nB,nC,nH,nW).cuda()
    qx = x.div(QSA).clamp(amin, amax).round().cuda()
    return qx

def qconv(x, conv_layer, qm, nw, na, nb, nm, relu = True, last = False):
    l = conv_layer
    nB = x.size(0)
    nC = x.size(1)
    nH = x.size(2)
    nW = x.size(3)
    
    nO = l.weight.size(0)
    nI = l.weight.size(1)
    nK = l.weight.size(2)
    
    bmin = -2.**(nb - 1)
    bmax = 2.**(nb - 1) - 1

    conv2d = nn.Conv2d(l.in_channels, l.out_channels, l.kernel_size, l.stride, l.padding, groups=l.groups, bias=False)
    conv2d.weight = nn.Parameter(l.weight)
    qb = l.bias.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand(nB,nO,nH,nW)
    qm = qm.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand(nB,nO,nH,nW).cuda()

    qy = conv2d(x)
    qy = qy.clamp(bmin, bmax)
    qy = qy + qb
    if(relu):
        qy = nn.ReLU()(qy)
        # y = qy.mul(qm).div(2**nm).round()
        y = qy.mul(qm).detach().cpu().numpy().astype(np.int32)
        y = y >> nm
        y = torch.from_numpy(y).float().cuda()
        y = y.clamp(0, 2.**na-1).round()
    else:

        if(not last):
            qy = qy + qb
            y = qy.mul(qm).detach().cpu().numpy().astype(np.int32)
            y = y >> nm
            y = torch.from_numpy(y).float().cuda()
            y = y.clamp(-2.**(na-1), 2.**(na-1)-1)
        else:
            y = qy
    return y

def find_max(x):
    max_temp = x[0,0]
    w = 0
    h = 0
    for i in range(x.size(0)):
        for j in range(x.size(1)):
            if(x[i,j] > max_temp):
                max_temp = x[i,j]
                w = j
                h = i
    return w, h, max_temp

def compute_bbox(data, anchors, bias, qm):# x.shape = (10,20,40)
    x1, y1, max1 = find_max(data[4])
    x2, y2, max2 = find_max(data[9])
    if(max2.mul(qm[9])>max1.mul(qm[4])):
        x = x2
        y = y2
        z = 1
        max_ = max2
    else:
        x = x1
        y = y1
        z = 0
        max_ = max1

    xs = (data[z*5+0][y][x] + bias[z*5+0]).mul(qm[z*5+0]).div(2**args.nm)
    ys = (data[z*5+1][y][x] + bias[z*5+1]).mul(qm[z*5+1]).div(2**args.nm)
    ws = (data[z*5+2][y][x] + bias[z*5+2]).mul(qm[z*5+2]).div(2**args.nm)
    hs = (data[z*5+3][y][x] + bias[z*5+3]).mul(qm[z*5+3]).div(2**args.nm)
    xs_inb = torch.sigmoid(xs) + x
    ys_inb = torch.sigmoid(ys) + y
    ws_inb = torch.exp(ws)*anchors[z*2+0]
    hs_inb = torch.exp(hs)*anchors[z*2+1]

    bcx = xs_inb / w
    bcy = ys_inb / h
    bw = ws_inb / w
    bh = hs_inb / h

    xmin = bcx - bw / 2.0
    ymin = bcy - bh / 2.0
    xmax = xmin + bw
    ymax = ymin + bh

    return xmin, ymin, xmax, ymax

def qinference(x, qconv_layers, qms, nw, na, nb, nm):

    x = qdata(x, SA[0], na)
    x = qconv(x, qconv_layers[0], qms[0], nw, na, nb, nm)
    x = qconv(x, qconv_layers[1], qms[1], nw, na, nb, nm)
    x = nn.MaxPool2d(kernel_size=2, stride=2)(x)
    x = qconv(x, qconv_layers[2], qms[2], nw, na, nb, nm)
    x = qconv(x, qconv_layers[3], qms[3], nw, na, nb, nm)
    x = nn.MaxPool2d(kernel_size=2, stride=2)(x)
    x = qconv(x, qconv_layers[4], qms[4], nw, na, nb, nm)
    x = qconv(x, qconv_layers[5], qms[5], nw, na, nb, nm)

    reorg = ReorgLayer()(x)
    
    x = nn.MaxPool2d(kernel_size=2, stride=2)(x)
    x = qconv(x, qconv_layers[6], qms[6], nw, na, nb, nm)
    x = qconv(x, qconv_layers[7], qms[7], nw, na, nb, nm)
    x = qconv(x, qconv_layers[8], qms[8], nw, na, nb, nm)
    x = qconv(x, qconv_layers[9], qms[9], nw, na, nb, nm)

    x = torch.cat([reorg,x],1)

    x = qconv(x, qconv_layers[10], qms[10], nw, na, nb, nm)
    x = qconv(x, qconv_layers[11], qms[11], nw, na, nb, nm)
    x = qconv(x, qconv_layers[12], qms[12], nw, na, nb, nm, relu = False, last = True)

    return x

def calculate_bias_offset(conv_layer, value):
    l = conv_layer
    bias_offset = torch.zeros(l.weight.size(0))
    if(l.groups>1):  #depthwise convolution
        for i in range(l.weight.size(0)):
            bias_offset[i] = value*torch.sum(l.weight[i])
    else:  #pixelwise convolution
        for i in range(l.weight.size(0)):
            bias_offset[i] = torch.sum(value*l.weight[i].squeeze())
    return bias_offset

def get_range(A, R):
    nC = A.size(1)
    assert(R.shape[0]==A.size(1))

    for i in range(nC):
        R[i,0] = A[:,i,:,:].max().item()
        R[i,1] = A[:,i,:,:].min().item()

def calibration(x, conv_layers, R):
    x = conv_layers[0](x)
    get_range(x, R[0])
    x = nn.ReLU()(x)
    
    x = conv_layers[1](x)
    get_range(x, R[1])
    x = nn.ReLU()(x)
    
    x = nn.MaxPool2d(kernel_size=2, stride=2)(x)

    x = conv_layers[2](x)
    get_range(x, R[2])
    x = nn.ReLU()(x)
    
    x = conv_layers[3](x)
    get_range(x, R[3])
    x = nn.ReLU()(x)
    x = nn.MaxPool2d(kernel_size=2, stride=2)(x)
    
    x = conv_layers[4](x)
    get_range(x, R[4])
    x = nn.ReLU()(x)
    x = conv_layers[5](x)
    get_range(x, R[5])
    x = nn.ReLU()(x)

    reorg = ReorgLayer()(x)
    
    x = nn.MaxPool2d(kernel_size=2, stride=2)(x)
    x = conv_layers[6](x)
    get_range(x, R[6])
    x = nn.ReLU()(x)
    x = conv_layers[7](x)
    get_range(x, R[7])
    x = nn.ReLU()(x)
    x = conv_layers[8](x)
    get_range(x, R[8])
    x = nn.ReLU()(x)
    x = conv_layers[9](x)
    get_range(x, R[9])
    x = nn.ReLU()(x)

    x = torch.cat([reorg,x],1)

    x = conv_layers[10](x)
    get_range(x, R[10])
    x = nn.ReLU()(x)
    x = conv_layers[11](x)
    get_range(x, R[11])
    x = nn.ReLU()(x)
    x = conv_layers[12](x)
    get_range(x, R[12])
    return x

imgDir = '/media/Workspace/DAC/val/jpg'
DEVICE = torch.device("cuda:0")

parser = argparse.ArgumentParser(description="SkyNet Data Free Quantization on DAC dataset.")
parser.add_argument("--nw", type=int, default=6)
parser.add_argument("--na", type=int, default=8)
parser.add_argument("--nb", type=int, default=16)
parser.add_argument("--nm", type=int, default=17)
parser.add_argument("--batch_size", type=int, default=16)
parser.add_argument('--workers', type=int, default=16, metavar='N')
args = parser.parse_args()
print(args)

if __name__ == '__main__':
    net = SkyNet()
    net.load_state_dict(torch.load('./SkyNet.pth'))
    net = net.to(DEVICE)
    dataset = DACDataset(imgDir, shape=(net.width, net.height),
                        transform=transforms.Compose([
                                transforms.ToTensor(),
                                transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.25, 0.25, 0.25]),]))
    test_loader = torch.utils.data.DataLoader(
        dataset,
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=args.workers, pin_memory=True)

    net = net.cuda()
    net.eval()
    anchors = net.anchors
    num_anchors = net.num_anchors
    anchor_step = len(anchors) // num_anchors
    h = 20
    w = 40
    total = 0
    imageNum = dataset.__len__()
    results = np.zeros((imageNum, 5))

    conv_layers = []
    bn_layers = []
    for module in net.named_modules():
        if isinstance(module[1], nn.Conv2d):
            conv_layers.append(module[1])
        if isinstance(module[1], nn.BatchNorm2d):
            bn_layers.append(module[1])
    fuse_bn(conv_layers, bn_layers)

    R = []
    for i in range(len(conv_layers)):
        r = np.zeros((conv_layers[i].out_channels, 2))
        R.append(r)

    for batch_idx, data in enumerate(test_loader):
        data = data.cuda()
        output = calibration(data, conv_layers, R)

    SW, SA = quantize(conv_layers, R, nw = args.nw, na = args.na)
    SA0 = torch.Tensor([0.015625,0.015625,0.015625])
    SA.insert(0, SA0) # input image quantization scale

    qconv_layers, qms = qconv_layers(conv_layers, SW, SA, args.nw, args.nb, args.nm)

    stime = time.time()
    for batch_idx, data in enumerate(test_loader):
        data = data.cuda()
        output = qinference(data, qconv_layers, qms, args.nw, args.na, args.nb, args.nm)
        for b in range(output.size(0)):
            xmin, ymin, xmax, ymax = compute_bbox(output[b], anchors, qconv_layers[12].bias, qms[12])
            results[total + b, 1:] = np.asarray([xmin * 640, xmax * 640, ymin * 360, ymax * 360])
        total += output.size(0)
    etime = time.time()

    results[:, 0] = dataset.imageNames
    index = np.argsort(results[:, 0])

    file = open('./result.txt','w+')
    for i in range(len(index)):
        name = '%03d'%int(results[index[i]][0])+'.jpg '
        bbox = '['+str(int(round(results[index[i]][1])))+', '+str(int(round(results[index[i]][2])))+', '+str(int(round(results[index[i]][3])))+', '+str(int(round(results[index[i]][4])))+']\n'
        file.write(name+bbox)
    file.close()

qnet = SkyNet()
qconvs = []
qbns = []
for module in qnet.named_modules():
    if isinstance(module[1], nn.Conv2d):
        qconvs.append(module[1])
    if isinstance(module[1], nn.BatchNorm2d):
        qbns.append(module[1])


for i in range(len(qconvs)):
    qconvs[i].weight = qconv_layers[i].weight
    qconvs[i].bias = qconv_layers[i].bias
    qbns[i].weight = nn.Parameter(qms[i])

torch.save(qnet, './FQSkyNet.pth')
