import time, sys, os, argparse
import numpy as np
from PIL import Image
import torch
from torch.utils.data import Dataset
import torchvision.transforms as transforms

from SkyNet import *

def convert2cpu(gpu_matrix):
    return torch.FloatTensor(gpu_matrix.size()).copy_(gpu_matrix)

class DACDataset(Dataset):
    def __init__(self, root, shape=None, transform=None, batch_size=32, num_workers=4):
        self.files = [file for file in os.listdir(root) if os.path.isfile(os.path.join(root, file))]
        self.imageNames = [file.split('.')[0] for file in self.files]
        self.files = [os.path.join(root, file) for file in self.files]
        self.nSamples = len(self.files)
        self.transform = transform
        self.shape = shape
        self.batch_size = batch_size
        self.num_workers = num_workers
       
    def __len__(self):
        return self.nSamples

    def __getitem__(self, index):
        imgpath = self.files[index]
        img = Image.open(imgpath).convert('RGB')
        if self.shape:
            img = img.resize(self.shape)

        if self.transform is not None:
            img = self.transform(img)

        return img

def qdata(x, SA, na):
    nB = x.size(0)
    nC = x.size(1)
    nH = x.size(2)
    nW = x.size(3)

    amin = 0
    amax = 2.**na - 1

    QSA = SA.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand(nB,nC,nH,nW)
    qx = (x.div(QSA)+128).clamp(amin, amax).round()
    return qx

imgDir = '/media/Workspace/DAC/val/jpg'
DEVICE = torch.device("cuda:0")

parser = argparse.ArgumentParser(description="SkyNet Data Free Quantization on DAC dataset.")
parser.add_argument("--nw", type=int, default=8)
parser.add_argument("--na", type=int, default=8)
parser.add_argument("--nb", type=int, default=18)
parser.add_argument("--nm", type=int, default=18)
parser.add_argument("--ns", type=int, default=22)
parser.add_argument("--i", type=int, default=0)
parser.add_argument("--batch_size", type=int, default=16)
parser.add_argument('--workers', type=int, default=16, metavar='N')
args = parser.parse_args()
print(args)


dataset = DACDataset(imgDir, shape=(320, 160),
                        transform=transforms.Compose([
                                transforms.ToTensor(),
                                transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.25, 0.25, 0.25]),]))
test_loader = torch.utils.data.DataLoader(
        dataset,
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=args.workers, pin_memory=True)

for batch_idx, data in enumerate(test_loader):
    print(batch_idx)
    for i in range(data.size(0)):
        x = qdata(data[i].unsqueeze(0), torch.Tensor([0.015625,0.015625,0.015625]), args.na)
        x = torch.cat([x,torch.zeros(x.size(0),4-x.size(1),x.size(2),x.size(3))],1)
        x = x.squeeze().permute(1,2,0)
        x.detach().numpy().astype(np.uint8).tofile('deploy/%s.bb'%dataset.imageNames[batch_idx*16+i])
print(x.shape)