from torch.utils.data import Dataset
import torch.nn.functional as F
import torch
import glob
import random
import os
import numpy as np
from PIL import Image
from PIL import ImageFile
from torchvision import transforms
from .transforms import ToTensor, PadSquare, RelativeLabels, AbsoluteLabels, ImgAug
import imgaug.augmenters as iaa
ImageFile.LOAD_TRUNCATED_IMAGES = True
class DefaultAug(ImgAug):
    def __init__(self, ):
        self.augmentations = iaa.Sequential([
            iaa.Sharpen((0.0, 0.1)),
            iaa.Affine(rotate=(-0, 0), translate_percent=(-0.1, 0.1), scale=(0.8, 1.5)),
            iaa.AddToBrightness((-60, 40)),
            iaa.AddToHue((-10, 10)),
            iaa.Fliplr(0.5),
        ])

def pad_to_square(img, pad_value):
    c, h, w = img.shape
    dim_diff = np.abs(h - w)
    # (upper / left) padding and (lower / right) padding
    pad1, pad2 = dim_diff // 2, dim_diff - dim_diff // 2
    # Determine padding
    pad = (0, 0, pad1, pad2) if h <= w else (pad1, pad2, 0, 0)
    # Add padding
    img = F.pad(img, pad, "constant", value=pad_value)

    return img, pad


def resize(image, size):
    image = F.interpolate(image.unsqueeze(0), size=size, mode="nearest").squeeze(0)
    return image


class ImageFolder(Dataset):
    def __init__(self, folder_path, transform=None):
        self.files = sorted(glob.glob("%s/*.*" % folder_path))
        self.transform = transforms.Compose([
                            AbsoluteLabels(),
                            DefaultAug(),
                            PadSquare(),
                            RelativeLabels(),
                            ToTensor(),
                        ])

    def __getitem__(self, index):

        img_path = self.files[index % len(self.files)]
        img = np.array(
            Image.open(img_path).convert('RGB'),
            dtype=np.uint8)
        # Label Placeholder
        boxes = np.zeros((1, 5))

        # Apply transforms
        if self.transform:
            img, _ = self.transform((img, boxes))

        return img_path, img

    def __len__(self):
        return len(self.files)


class ListDataset(Dataset):
    def __init__(self, data_path, img_size=416, multiscale=True, transform=None):
        img_path = os.path.join(data_path, "images", "train2014")
        img_file_names = os.listdir(img_path)
        self.img_files = []
        self.label_files = []
        label_path = os.path.join(data_path, "labels", "train2014")
        for fn in img_file_names:
            tag = fn.split(".")[-1]
            if tag not in ["jpg"]:continue 
            dfn = fn.replace(".jpg", ".txt")
            path1 = os.path.join(img_path, fn)
            path2 = os.path.join(label_path, dfn)
            if os.path.exists(path2)==False:continue
            self.img_files.append(path1)
            self.label_files.append(path2)

        self.img_size = img_size
        self.max_objects = 100
        self.multiscale = multiscale
        self.min_size = self.img_size - 3 * 32
        self.max_size = self.img_size + 3 * 32
        self.batch_count = 0
        self.transform = transforms.Compose([
                            AbsoluteLabels(),
                            DefaultAug(),
                            PadSquare(),
                            RelativeLabels(),
                            ToTensor(),
                        ])

    def __getitem__(self, index):
        """
        获取一个样本和标签
        """
        img_path = self.img_files[index % len(self.img_files)].rstrip()
        img = np.array(Image.open(img_path).convert('RGB'), dtype=np.uint8)
        label_path = self.label_files[index % len(self.img_files)].rstrip()
        boxes = np.loadtxt(label_path).reshape(-1, 5)
        # 转换图像和BOX 
        
        img, bb_targets = self.transform((img, boxes))

        return img_path, img, bb_targets

    def collate_fn(self, batch):
        self.batch_count += 1

        # Drop invalid images
        batch = [data for data in batch if data is not None]

        paths, imgs, bb_targets = list(zip(*batch))

        # Selects new image size every tenth batch
        if self.multiscale and self.batch_count % 10 == 0:
            self.img_size = random.choice(
                range(self.min_size, self.max_size + 1, 32))

        # Resize images to input shape
        imgs = torch.stack([resize(img, self.img_size) for img in imgs])

        # Add sample index to targets
        for i, boxes in enumerate(bb_targets):
            boxes[:, 0] = i
        bb_targets = torch.cat(bb_targets, 0)

        return paths, imgs, bb_targets

    def __len__(self):
        return len(self.img_files)
