import time, sys, os, argparse
import numpy as np
from PIL import Image
import torch
from torch.utils.data import Dataset
import torchvision.transforms as transforms

from SkyNet import *

def convert2cpu(gpu_matrix):
    return torch.FloatTensor(gpu_matrix.size()).copy_(gpu_matrix)

class DACDataset(Dataset):
    def __init__(self, root, shape=None, transform=None, batch_size=32, num_workers=4):
        self.files = [file for file in os.listdir(root) if os.path.isfile(os.path.join(root, file))]
        self.imageNames = [file.split('.')[0] for file in self.files]
        self.files = [os.path.join(root, file) for file in self.files]
        self.nSamples = len(self.files)
        self.transform = transform
        self.shape = shape
        self.batch_size = batch_size
        self.num_workers = num_workers
       
    def __len__(self):
        return self.nSamples

    def __getitem__(self, index):
        imgpath = self.files[index]
        img = Image.open(imgpath).convert('RGB')
        if self.shape:
            img = img.resize(self.shape)

        if self.transform is not None:
            img = self.transform(img)

        return img

def qdata(x, SA, na):
    nB = x.size(0)
    nC = x.size(1)
    nH = x.size(2)
    nW = x.size(3)

    amin = 0
    amax = 2.**na - 1

    QSA = SA.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand(nB,nC,nH,nW).cuda()
    qx = (x.div(QSA)+128).clamp(amin, amax).round().cuda()
    return qx

def qconv(x, w, b ,m, na, nb, nm, relu = True, last = False, first = False):
    nB = x.size(0)
    nI = x.size(1)
    nH = x.size(2)
    nW = x.size(3)
    nO = w.size(0)
    nG = int(x.size(1)/w.size(1))
    nK = w.size(2)
    nP = int((nK-1)/2)
    
    if(first):
        pad2d = nn.ConstantPad2d(padding=(1, 1, 1, 1), value=128)
        conv2d = nn.Conv2d(nI, nO, nK, 1, 0, groups=nG, bias=False)
        x = pad2d(x)
    else:
        conv2d = nn.Conv2d(nI, nO, nK, 1, nP, groups=nG, bias=False)
    
    conv2d.weight = nn.Parameter(w)
    qb = b.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand(nB,nO,nH,nW)
    qm = m.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand(nB,nO,nH,nW)
    
    qy = conv2d(x) 
    qy = qy.clamp(-2.**(19-1), 2.**(19-1)-1)
    if(relu):
        qy = qy + qb
        qy = nn.ReLU()(qy)
        if(not last):
            y = qy.long()
            y = (y*qm)>>nm
            y = y.float().clamp(0, 2.**na-1)
        else:
            y = qy.mul(qm).div(2**nm)
            y = y.clamp(0, 2.**na-1)
    else:
        if(not last):
            qy = qy + qb
            y = qy.long()
            y = (y*qm)>>nm
            y = y.float().clamp(-2.**(na-1), 2.**(na-1)-1)
        else:
            y = qy
    return y

def find_max(x):
    max_temp = x[0,0]
    w = 0
    h = 0
    for i in range(x.size(0)):
        for j in range(x.size(1)):
            if(x[i,j] > max_temp):
                max_temp = x[i,j]
                w = j
                h = i
    return w, h, max_temp

def compute_bbox(data, anchors, bias, qm):# x.shape = (10,20,40)
    x1, y1, max1 = find_max(data[4])
    x2, y2, max2 = find_max(data[9])
    if(max2.mul(qm[9])>max1.mul(qm[4])):
    #if(max2>max1):
        x = x2
        y = y2
        z = 1
        max_ = max2
    else:
        x = x1
        y = y1
        z = 0
        max_ = max1

    xs = (data[z*5+0][y][x] + bias[z*5+0]).mul(qm[z*5+0]).div(2**args.nm)
    ys = (data[z*5+1][y][x] + bias[z*5+1]).mul(qm[z*5+1]).div(2**args.nm)
    ws = (data[z*5+2][y][x] + bias[z*5+2]).mul(qm[z*5+2]).div(2**args.nm)
    hs = (data[z*5+3][y][x] + bias[z*5+3]).mul(qm[z*5+3]).div(2**args.nm)
    xs_inb = torch.sigmoid(xs) + x
    ys_inb = torch.sigmoid(ys) + y
    ws_inb = torch.exp(ws)*anchors[z*2+0]
    hs_inb = torch.exp(hs)*anchors[z*2+1]

    bcx = xs_inb / w
    bcy = ys_inb / h
    bw = ws_inb / w
    bh = hs_inb / h

    xmin = bcx - bw / 2.0
    ymin = bcy - bh / 2.0
    xmax = xmin + bw
    ymax = ymin + bh

    return xmin, ymin, xmax, ymax

def qinference(x, qconv_layers, qbn_layers, na, nb, nm):
    x = qdata(x, torch.Tensor([0.015625,0.015625,0.015625]), na)
    x = torch.cat([x,torch.zeros(x.size(0),32-x.size(1),x.size(2),x.size(3)).cuda()],1)
    x = qconv(x, qconv_layers[0].weight, qconv_layers[0].bias, qbn_layers[0].weight, na, nb, nm, first=True )
    x = qconv(x, qconv_layers[1].weight, qconv_layers[1].bias, qbn_layers[1].weight, na, nb, nm)
    x = nn.MaxPool2d(kernel_size=2, stride=2)(x)
    x = qconv(x, qconv_layers[2].weight, qconv_layers[2].bias, qbn_layers[2].weight, na, nb, nm)
    x = qconv(x, qconv_layers[3].weight, qconv_layers[3].bias, qbn_layers[3].weight, na, nb, nm)
    x = nn.MaxPool2d(kernel_size=2, stride=2)(x)
    x = qconv(x, qconv_layers[4].weight, qconv_layers[4].bias, qbn_layers[4].weight, na, nb, nm)
    x = qconv(x, qconv_layers[5].weight, qconv_layers[5].bias, qbn_layers[5].weight, na, nb, nm)

    reorg = ReorgLayer()(x)
    
    x = nn.MaxPool2d(kernel_size=2, stride=2)(x)
    x = qconv(x, qconv_layers[6].weight, qconv_layers[6].bias, qbn_layers[6].weight, na, nb, nm)
    x = qconv(x, qconv_layers[7].weight, qconv_layers[7].bias, qbn_layers[7].weight, na, nb, nm)
    x = qconv(x, qconv_layers[8].weight, qconv_layers[8].bias, qbn_layers[8].weight, na, nb, nm)
    x = qconv(x, qconv_layers[9].weight, qconv_layers[9].bias, qbn_layers[9].weight, na, nb, nm)

    x = torch.cat([reorg,x],1)

    x = qconv(x, qconv_layers[10].weight, qconv_layers[10].bias, qbn_layers[10].weight, na, nb, nm)
    x = qconv(x, qconv_layers[11].weight, qconv_layers[11].bias, qbn_layers[11].weight, na, nb, nm)
    x = qconv(x, qconv_layers[12].weight, qconv_layers[12].bias, qbn_layers[12].weight, na, nb, nm, relu = False, last = True)
    return x

imgDir = '/media/Workspace/DAC/val/jpg'
DEVICE = torch.device("cuda:0")

parser = argparse.ArgumentParser(description="SkyNet Data Free Quantization on DAC dataset.")
parser.add_argument("--nw", type=int, default=6)
parser.add_argument("--na", type=int, default=8)
parser.add_argument("--nb", type=int, default=16)
parser.add_argument("--nm", type=int, default=17)
parser.add_argument("--batch_size", type=int, default=16)
parser.add_argument('--workers', type=int, default=16, metavar='N')
args = parser.parse_args()
print(args)

if __name__ == '__main__':
    net = torch.load('./EQSkyNet.pth')
    net = net.to(DEVICE)
    dataset = DACDataset(imgDir, shape=(net.width, net.height),
                        transform=transforms.Compose([
                                transforms.ToTensor(),
                                transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.25, 0.25, 0.25]),]))
    test_loader = torch.utils.data.DataLoader(
        dataset,
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=args.workers, pin_memory=True)

    net = net.cuda()
    net.eval()
    anchors = net.anchors
    num_anchors = net.num_anchors
    anchor_step = len(anchors) // num_anchors
    h = 20
    w = 40
    total = 0
    imageNum = dataset.__len__()
    results = np.zeros((imageNum, 5))

    conv_layers = []
    bn_layers = []
    for module in net.named_modules():
        if isinstance(module[1], nn.Conv2d):
            conv_layers.append(module[1])
        if isinstance(module[1], nn.BatchNorm2d):
            bn_layers.append(module[1])

    stime = time.time()
    for batch_idx, data in enumerate(test_loader):
        data = data.cuda()
        output = qinference(data, conv_layers, bn_layers, args.na, args.nb, args.nm)
        for b in range(output.size(0)):
            xmin, ymin, xmax, ymax = compute_bbox(output[b], anchors, conv_layers[12].bias, bn_layers[12].weight)
            results[total + b, 1:] = np.asarray([xmin * 640, xmax * 640, ymin * 360, ymax * 360])
        total += output.size(0)
    etime = time.time()

    results[:, 0] = dataset.imageNames
    index = np.argsort(results[:, 0])

    file = open('./result.txt','w+')
    for i in range(len(index)):
        name = '%03d'%int(results[index[i]][0])+'.jpg '
        bbox = '['+str(int(round(results[index[i]][1])))+', '+str(int(round(results[index[i]][2])))+', '+str(int(round(results[index[i]][3])))+', '+str(int(round(results[index[i]][4])))+']\n'
        file.write(name+bbox)
    file.close()

for batch_idx, data in enumerate(test_loader):
    data = data.cuda()
data.detach().cpu().numpy().tofile('blob/batch0.bb')
i = 0
x = data[0].unsqueeze(0).cuda()
x = qdata(x, torch.Tensor([0.015625,0.015625,0.015625]), args.na)
x = torch.cat([x,torch.zeros(x.size(0),4-x.size(1),x.size(2),x.size(3)).cuda()],1)
x.detach().cpu().numpy().astype(np.uint8).tofile('blob/conv%d.bb'%i)

i = i+1
x = qconv(x, conv_layers[0].weight, conv_layers[0].bias, bn_layers[0].weight, args.na, args.nb, args.nm, first=True)
x.detach().cpu().numpy().astype(np.uint8).tofile('blob/conv%d.bb'%i)

i = i+1
x = qconv(x, conv_layers[1].weight, conv_layers[1].bias, bn_layers[1].weight, args.na, args.nb, args.nm)
x.detach().cpu().numpy().astype(np.uint8).tofile('blob/conv%d.bb'%i)

i = i+1
x = nn.MaxPool2d(kernel_size=2, stride=2)(x)
x.detach().cpu().numpy().astype(np.uint8).tofile('blob/pool1.bb')
x = qconv(x, conv_layers[2].weight, conv_layers[2].bias, bn_layers[2].weight, args.na, args.nb, args.nm)
x.detach().cpu().numpy().astype(np.uint8).tofile('blob/conv%d.bb'%i)
i = i+1
x = qconv(x, conv_layers[3].weight, conv_layers[3].bias, bn_layers[3].weight, args.na, args.nb, args.nm)
x.detach().cpu().numpy().astype(np.uint8).tofile('blob/conv%d.bb'%i)

i = i+1
x = nn.MaxPool2d(kernel_size=2, stride=2)(x)
x.detach().cpu().numpy().astype(np.uint8).tofile('blob/pool2.bb')
x = qconv(x, conv_layers[4].weight, conv_layers[4].bias, bn_layers[4].weight, args.na, args.nb, args.nm)
x.detach().cpu().numpy().astype(np.uint8).tofile('blob/conv%d.bb'%i)

i = i+1
x = qconv(x, conv_layers[5].weight, conv_layers[5].bias, bn_layers[5].weight, args.na, args.nb, args.nm)
x.detach().cpu().numpy().astype(np.uint8).tofile('blob/conv%d.bb'%i)

i = i+1

reorg = ReorgLayer()(x)
reorg.detach().cpu().numpy().astype(np.uint8).tofile('blob/reorg.bb')

x = nn.MaxPool2d(kernel_size=2, stride=2)(x)
x.detach().cpu().numpy().astype(np.uint8).tofile('blob/pool3.bb')

x = qconv(x, conv_layers[6].weight, conv_layers[6].bias, bn_layers[6].weight, args.na, args.nb, args.nm)
x.detach().cpu().numpy().astype(np.uint8).tofile('blob/conv%d.bb'%i)

i = i+1
x = qconv(x, conv_layers[7].weight, conv_layers[7].bias, bn_layers[7].weight, args.na, args.nb, args.nm)
x.detach().cpu().numpy().astype(np.uint8).tofile('blob/conv%d.bb'%i)

i = i+1
x = qconv(x, conv_layers[8].weight, conv_layers[8].bias, bn_layers[8].weight, args.na, args.nb, args.nm)
x.detach().cpu().numpy().astype(np.uint8).tofile('blob/conv%d.bb'%i)

i = i+1
x = qconv(x, conv_layers[9].weight, conv_layers[9].bias, bn_layers[9].weight, args.na, args.nb, args.nm)
x.detach().cpu().numpy().astype(np.uint8).tofile('blob/conv%d.bb'%i)

i = i+1
x = torch.cat([reorg,x],1)
x.detach().cpu().numpy().astype(np.uint8).tofile('blob/cat.bb')
x = qconv(x, conv_layers[10].weight, conv_layers[10].bias, bn_layers[10].weight, args.na, args.nb, args.nm)
x.detach().cpu().numpy().astype(np.uint8).tofile('blob/conv%d.bb'%i)

i = i+1
x = qconv(x, conv_layers[11].weight, conv_layers[11].bias, bn_layers[11].weight, args.na, args.nb, args.nm)
x.detach().cpu().numpy().astype(np.uint8).tofile('blob/conv%d.bb'%i)

i = i+1

x = qconv(x, conv_layers[12].weight, conv_layers[12].bias, bn_layers[12].weight, args.na, args.nb, args.nm, relu = False, last = True)
x.detach().cpu().numpy().astype(np.int16).tofile('blob/conv%d.bb'%i)

xmin, ymin, xmax, ymax = compute_bbox(x[0], anchors, conv_layers[12].bias, bn_layers[12].weight)
print(xmin.item(), ymin.item(), xmax.item(), ymax.item())