import os
import torchvision.transforms as transforms
from torch.utils.data import Dataset
from PIL import Image, ImageOps
import numpy as np
import torch
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True

class CocoDataset(Dataset):
    def __init__(self, img_path, label_path, to_square=False, transform=None):
        self.img_list = os.listdir(img_path)
        self.img_path = img_path
        self.label_path = label_path
        self.to_square = to_square
        if transform is None:
            self.transform = transforms.ToTensor()
        else:
            self.transform = transform

    def __getitem__(self, index):
        image_path = os.path.join(self.img_path, self.img_list[index])
        label_path = os.path.join(self.label_path, self.img_list[index][:-3]) + "txt"

        img = Image.open(image_path)
        w, h = img.width, img.height
        if self.to_square:
            pad_tuple = ( 0, (w-h)//2, 0, (w-h)//2) \
                if w>h else ( abs(w-h)//2, 0, abs(w-h)//2, 0)
            img = ImageOps.expand(img, pad_tuple, fill="black")
        img = self.transform(img)
        if img.shape[0] == 1:
            img = img.repeat([3,1,1])

        label = np.loadtxt(label_path).reshape(-1,5)
        if self.to_square:
            # 需要调整bbox
            if pad_tuple[0] == 0:
                # 上下pad情况
                pad = pad_tuple[1]
                label[:,2] = (label[:,2] * h + pad) / (h + 2 * pad)
                label[:, 4] = (label[:, 4] * h ) / (h + 2 * pad)
            else:
                pad = pad_tuple[0]
                label[:,1] = (label[:, 1] * w + pad) / (w + 2 * pad)
                label[:, 3] = (label[:, 3] * w ) / (w + 2 * pad)

        # 拓展一列用于标记batch中的图片序号
        label = np.concatenate( (np.zeros((label.shape[0], 1 )), label), axis=1)

        return img, torch.Tensor(label)

    def collate_fn(self, batch):
        img, label = list(zip(*batch))
        imgs = torch.stack([i for i in img])
        for idx, i in enumerate(label):
            i[:,0] = idx

        labels = torch.cat([i for i in label])
        # 按batch划分label，取最长的目标数为batch
        # max_objs = max(label, key=lambda x:x.shape[0]).shape[0]
        # labels = torch.zeros((len(label), max_objs, 6)) - 1
        # for idx, i in enumerate(label):
        #     labels[idx, :i.shape[0], :] = i
        return imgs, labels

    def __len__(self):
        return len(self.img_list)

# import matplotlib.pyplot as plt
# import cv2
# transform = transforms.Compose([
#     transforms.Resize(size=(416,416)),
#     transforms.ToTensor(),
# ])
# dataset =  CocoDataset("./sub_set/train2014","./sub_set/labels/train2014",to_square=True, transform=transform)
# img, label = dataset.__getitem__(3)
# label = label[:,1:]
# img_arr = (255*img.squeeze().permute(1,2,0).contiguous().numpy()).astype(np.uint8)
# for i in range(label.shape[0]):
#     x1, y1 = int(label[i,1]*416 - label[i,3]*416/2), int(label[i,2]*416 - label[i,4]*416/2)
#     x2, y2 = int(label[i,1]*416 + label[i,3]*416/2), int(label[i,2]*416 + label[i,4]*416/2)
#     cv2.rectangle(img_arr,(x1,y1),(x2,y2),(255,0,0),3)
#     print(x1,y1,x2,y2)
#
# plt.imshow(img_arr)
# plt.show()
