import json
import os
import random
import torch
import torch.utils.data as Data
import torchvision.transforms.functional as FT
from PIL import Image


def photometric_distort(image):
    """
    随机改变亮度、对比度、饱和度、色度，概率为50%，
    image:a PIL image
    return: 变化后的图像
    """
    new_image = image
    distortions = [
        FT.adjust_brightness,
        FT.adjust_contrast,
        FT.adjust_saturation,
        FT.adjust_hue
    ]
    random.shuffle(distortions)

    for d in distortions:
        if random.random() < 0.5:
            if d.__name__ == 'adjust_hue':
                adjust_factor = random.uniform(-18 / 255., 18 / 255)  # random.uniform随机生成一个(x,y)之间的实数
            else:
                adjust_factor = random.uniform(0.5, 1.5)
            new_image = d(new_image, adjust_factor)
    return new_image

def expand(image, boxes, filler):
    """
    Perform a zooming out operation by placing the image in a larger canvas of filler material.
    Helps to learn to detect smaller objects.
    :param image: image, a tensor of dimensions (3, original_h, original_w)
    :param boxes: bounding boxes in boundary coordinates, a tensor of dimensions (n_objects, 4)
    :param filler: RBG values of the filler material, a list like [R, G, B]
    :return: expanded image, updated bounding box coordinates
    """
    # Calculate dimensions of proposed expanded (zoomed-out) image
    original_h = image.size(1)
    original_w = image.size(2)
    max_scale = 4
    scale = random.uniform(1, max_scale)
    new_h = int(scale * original_h)
    new_w = int(scale * original_w)

    # Create such an image with the filler
    filler = torch.FloatTensor(filler)  # (3)
    new_image = torch.ones((3, new_h, new_w), dtype=torch.float) * filler.unsqueeze(1).unsqueeze(1)  # (3, new_h, new_w)
    # Note - do not use expand() like new_image = filler.unsqueeze(1).unsqueeze(1).expand(3, new_h, new_w)
    # because all expanded values will share the same memory, so changing one pixel will change all

    # Place the original image at random coordinates in this new image (origin at top-left of image)
    left = random.randint(0, new_w - original_w)
    right = left + original_w
    top = random.randint(0, new_h - original_h)
    bottom = top + original_h
    new_image[:, top:bottom, left:right] = image

    # Adjust bounding boxes' coordinates accordingly
    new_boxes = boxes + torch.FloatTensor([left, top, right, bottom]).unsqueeze(0)  # (n_objects, 4), n_objects is the no. of objects in this image

    return new_image, new_boxes


# def expand(image, boxes, filler):
#     """
#     将图像放在一个更大的填充材料画布上，来执行方法操作。有助于学习小目标
#     image: tensor,(3,original_h,original_w)
#     boxes: 边界坐标形式的Bbox，tensor,(n_objects,4)
#     filler: 填充材料的RGB值，[R,G,B]列表
#     return: 扩充后的图像，更新bbox坐标
#
#     该图像有50%的概率，进行方法，并将其中对应的object ground-truth进行更改(只要图像一更改，对应的Bbox就必须更改)
#     """
#
#     original_h = image.size(1)
#     original_w = image.size(2)
#
#     max_scale = 4  # 设置最大放大尺度
#     scale = random.uniform(1, max_scale)  # 生成随机数
#     new_h = int(scale * original_h)
#     new_w = int(scale * original_w)
#
#     filler = torch.FloatTensor(filler)  # filler=torch.FloatTensor([0.485,0.456,0.406]) #mean=[0.485,0.456,0.406]
#     new_image = torch.ones((3, new_h, new_w), dtype=torch.float) * filler.unsqueeze(1).unsqueeze(
#         1)
#     left = random.randint(0, new_w - original_w)
#     right = left + original_w
#     top = random.randint(0, new_h - original_h)
#     bottom = top + original_h
#     new_image[:, top:bottom, left:right] = image  # (3,h,w)
#
#     # 根据新图像调整bbox坐标
#     new_boxes = boxes + torch.FloatTensor([left, top, left, top]).unsqueeze(0)
#         # bbox原始边界形式为[xmin,ymin,xmax,ymax]，最终得到new_boxes还是(n_objects,4)
#
#     return new_image, new_boxes


def random_crop(image, boxes, labels, difficulties):
    """
    有助于学习检测更大和部分目标。有些目标可能被完全裁切掉

    image: tensor,(3,original_h,original_w)
    boxes: 边界坐标形式得到Bbox，tensor,(n_objects,4)
    labels: 目标标签，(n_objects)
    difficulties: 目标检测的难易程度，tensor (n_objects)
    return: 返回裁切后的图像，更新Bbox坐标、标签、难易程度
    """
    original_h = image.size(1)
    original_w = image.size(2)
    while True:
        min_overlap = random.choice([0., .1, .3, .5, .7, .9, None])
        if min_overlap is None:
            return image, boxes, labels, difficulties  # 结束while

        max_trials = 50
        for _ in range(max_trials):

            min_scale = 0.3
            scale_h = random.uniform(min_scale, 1)
            scale_w = random.uniform(min_scale, 1)
            new_h = int(scale_h * original_h)
            new_w = int(scale_w * original_w)

            # 横纵比应该在[0.5,2]之间
            aspect_ratio = new_h / new_w  # 这里是高比宽
            if not 0.5 < aspect_ratio < 2:
                continue  # 跳出for

            # 裁切维度
            # 相当于随机选择放在原始图像的哪个位置
            left = random.randint(0, original_w - new_w)
            right = left + new_w
            top = random.randint(0, original_h - new_h)
            bottom = top + new_h
            crop = torch.FloatTensor([left, top, right, bottom])
            # 这里返回的[left,top,right,bottom]的坐标基准还是原始图像，返回的是新的缩小后的图像

            # 计算crop和bbox之间的IOU
            overlop = find_jaccard_overlop(crop.unsqueeze(0),
                                           boxes)
            overlop = overlop.squeeze(0)  # 去掉维度为1的相应维度，得到n_objects个IOU

            # 如果上述，缩小后的图像与原始图像中的IOU，小于随机选择的(预先定义)的IOU，则不再向下执行
            if overlop.max().item() < min_overlap:
                continue

            # 正式裁切图像
            new_image = image[:, top:bottom, left:right]  # 即从原始图像中直接索引新的裁切图像，[3,new_h,new_w]

            # 计算原始Bbox的中心
            bb_centers = (boxes[:, :2] + boxes[:, 2:]) / 2  # (n_objects,2)

            # 查找裁切后的图像中，中点落在该裁切图像上的Bbox
            certer_in_crop = (bb_centers[:, 0] > left) * (bb_centers[:, 0] < right) * (bb_centers[:, 1] > top) * (
                        bb_centers[:, 1] < bottom)  # (n_objects)
            if not certer_in_crop.any():
                continue

            # 抛弃不满足条件的Bbox
            new_boxes = boxes[certer_in_crop, :]  # ground-truth 一直就是tensor,在n_objects维度中，索引满足要求的，利用的是比较表达式索引法
            new_labels = labels[certer_in_crop]
            new_difficulties = difficulties[certer_in_crop]

            # 计算裁切图像中bbox新的坐标
            # 这里同时将超出裁切后的图像的Bbox边界做了裁切
            new_boxes[:, :2] = torch.max(new_boxes[:, :2], crop[:2])
            new_boxes[:, :2] -= crop[:2]
            new_boxes[:, 2:] = torch.min(new_boxes[:, 2:], crop[2:])
            new_boxes[:, 2:] -= crop[:2]

            return new_image, new_boxes, new_labels, new_difficulties


def find_intersection(set_1, set_2):
    """
    计算交集
    set_1: tensor,(n1,4)
    set_2: tensor(n2,4)
    return: 返回set1中每个box中与set2中每个box的交集，tensor (n1,n2)
    """
    lower_bounds = torch.max(set_1[:, :2].unsqueeze(1), set_2[:, :2].unsqueeze(0))  # (n1,n2,2)
    upper_bounds = torch.min(set_1[:, 2:].unsqueeze(1), set_2[:, 2:].unsqueeze(0))  # (n1,n2,2)

    intersection_dims = torch.clamp(upper_bounds - lower_bounds,
                                    min=0)  # (n1,n2,2) torch.clamp()将输入tensor中的元素，限制在指定扰动范围内，该函数中是将所有元素都限定到大于0
    return intersection_dims[:, :, 0] * intersection_dims[:, :, 1]  # (n1,n2)


def find_jaccard_overlop(set_1, set_2):
    """
    计算IOU
    set_1: tensor (n1,4)
    set_2: tensor (n2,4)
    return: set1中的每个box相对于set2中每个box的IOU，tensor (n1,n2)
    """
    intersection = find_intersection(set_1, set_2)  # (n1,n2)

    # 找到每个box的面积
    areas_set_1 = (set_1[:, 2] - set_1[:, 0]) * (set_1[:, 3] - set_1[:, 1])  # (n1)
    areas_set_2 = (set_2[:, 2] - set_2[:, 0]) * (set_2[:, 3] - set_2[:, 1])  # (n1)

    # 找到并集
    union = areas_set_1.unsqueeze(1) + areas_set_2.unsqueeze(0) - intersection  # (n1,n2) 相当于x+z+y+z-z

    # 计算IOU
    return intersection / union


def flip(image, boxes):
    """
    水平翻转
    image: PIl image
    boxes: 边界坐标形式，tensor (n_objects,4)
    return: 翻转后的图像，更新bbox坐标
    """
    # 翻转图像
    new_image = FT.hflip(image)

    # 翻转bbox
    # 减一可能是跟算法有关，不懂...???...
    new_boxes = boxes
    new_boxes[:, 0] = image.width - boxes[:, 0] - 1
    new_boxes[:, 2] = image.height - boxes[:, 2] - 1
    new_boxes = new_boxes[:, [2, 1, 0, 3]]

    return new_image, new_boxes


def resize(image, boxes, dim=(300, 300), return_percent_coords=True):
    """
    将图像缩放到(300,300)
    image: PIL image
    boxes: 边界坐标形式，tensor (n_objects,4)
    return: 返回缩放后的图像，更新Bbox坐标
    """
    # 缩放图像
    new_image = FT.resize(image, dim)

    # 缩放Bbox
    old_dims = torch.FloatTensor([image.width, image.height, image.width, image.height]).unsqueeze(0)  # (1,4)
    new_boxes = boxes / old_dims  # n_objects中的美一个都除以上面的维度，变成分数形式

    if not return_percent_coords:
        new_dims = torch.FloatTensor([dim[1], dim[0], dim[1], dim[0]]).unsqueeze(0)
        new_boxes = new_boxes * new_dims
        # 这里边，不管图像缩放不缩放，Bbox的分数形式都是不会变的

    return new_image, new_boxes


def transform(image, boxes, labels, difficulties, split):
    """
    image:image,a PIL Image
    boxes: 边界坐标形式，维度为(n_objects,4)的tensor
    labels: 目标的标签，维度为[]，数量为(n_objects)的tensor
    difficulties: 目标检测的难易度，维度为[],数量为(n_objects)的tensor
    split: “TRAIN" 或 "TEST"之一，因为会应用到不同的转换结合
    return: 返回转换后的图像、bbox坐标、标签、难易度
    """
    # 值得注意的是整个transform作用的都是一张图像
    assert split in {'TRAIN', 'TEST'}
    mean = [0.485, 0.456, 0.406]
    std = [0.229, 0.224, 0.225]

    new_image = image
    new_boxes = boxes
    new_labels = labels
    new_difficulties = difficulties
    if split == 'TRAIN':
        new_image = photometric_distort(new_image)
        new_image = FT.to_tensor(new_image)
        if random.random() < 0.5:
            new_image, new_boxes = expand(new_image, boxes, filler=mean)
        new_image, new_boxes, new_labels, new_difficulties = random_crop(new_image, new_boxes, new_labels,
                                                                         new_difficulties)
        new_image = FT.to_pil_image(new_image)
        if random.random() < 0.5:
            new_image, new_boxes = flip(new_image, new_boxes)
    new_image, new_boxes = resize(new_image, new_boxes, dim=(300, 300))
    new_image = FT.to_tensor(new_image)
    new_image = FT.normalize(new_image, mean=mean, std=std)
    return new_image, new_boxes, new_labels, new_difficulties


class PascalVOCDataset(Data.Dataset):
    def __init__(self, data_folder, split, keep_difficult=False):
        """
        :param data_folder: 存储数据文件的文件夹
        :param split: split,TRAIN或TEST中的一个
        :param keep_difficult: 保留或抛弃被定义为难检测的目标
        """
        # 实例化某个数据集的时候，传入保存的文件夹，并定义是训练还是测试
        # 一共五个文件：
        # train_images.json
        # train_objects.json
        # label_map.json
        # test_images.json
        # test_objects.json
        self.split = split.upper()
        assert self.split in {'TRAIN', 'TEST'}

        self.data_folder = data_folder
        self.keep_difficult = keep_difficult

        with open(os.path.join(data_folder, self.split + '_images.json'), 'r') as j:
            self.images = json.load(j)
        with open(os.path.join(data_folder, self.split + '_objects.json'), 'r') as j:
            self.objects = json.load(j)

        assert len(self.images) == len(self.objects)

    def __getitem__(self, i):
        # 读取图像
        image = Image.open(self.images[i], mode='r')  # open(路径)
        image = image.convert('RGB')  # 转换模式

        objects = self.objects[i]
        boxes = torch.FloatTensor(objects['boxes'])
        labels = torch.LongTensor(objects['labels'])
        difficulties = torch.ByteTensor(objects['difficulties'])

        # 如果想要忽略难识别的目标，则执行下面的操作
        if not self.keep_difficult:
            boxes = boxes[1 - difficulties]
            labels = labels[1 - difficulties]
            difficulties = difficulties[1 - difficulties]

        image, boxes, labels, difficulties = transform(image, boxes, labels, difficulties, split=self.split)

        return image, boxes, labels, difficulties

    def __len__(self):
        return len(self.images)

    def collate_fn(self, batch):
        images = list()
        boxes = list()
        labels = list()
        difficulties = list()

        for b in batch:
            images.append(b[0])
            boxes.append(b[1])
            labels.append(b[2])
            difficulties.append(b[3])

        images = torch.stack(images, dim=0)

        return images, boxes, labels, difficulties
