import math
import random

import albumentations as albu
import cv2
import numpy as np
import torch
import torchvision.transforms as transforms
from albumentations.pytorch.transforms import ToTensor


def get_augumentation(phase, width=512, height=512, min_area=0., min_visibility=0.):
    list_transforms = []
    if phase == 'train':
        list_transforms.extend([
            albu.augmentations.transforms.LongestMaxSize(
                max_size=width, always_apply=True),
            albu.PadIfNeeded(min_height=height, min_width=width,
                             always_apply=True, border_mode=0, value=[0, 0, 0]),
            albu.augmentations.transforms.RandomResizedCrop(
                height=height,
                width=width, p=0.3),
            albu.augmentations.transforms.Flip(),
            albu.augmentations.transforms.Transpose(),
            albu.OneOf([
                albu.RandomBrightnessContrast(brightness_limit=0.5,
                                              contrast_limit=0.4),
                albu.RandomGamma(gamma_limit=(50, 150)),
                albu.NoOp()
            ]),
            albu.OneOf([
                albu.RGBShift(r_shift_limit=20, b_shift_limit=15,
                              g_shift_limit=15),
                albu.HueSaturationValue(hue_shift_limit=5,
                                        sat_shift_limit=5),
                albu.NoOp()
            ]),
            albu.CLAHE(p=0.8),
            albu.HorizontalFlip(p=0.5),
            albu.VerticalFlip(p=0.5),
        ])
    if (phase == 'test' or phase == 'valid'):
        list_transforms.extend([
            albu.Resize(height=height, width=width)
        ])
    list_transforms.extend([
        albu.Normalize(mean=(0.485, 0.456, 0.406),
                       std=(0.229, 0.224, 0.225), p=1),
        ToTensor()
    ])
    if (phase == 'test'):
        return albu.Compose(list_transforms)
    return albu.Compose(list_transforms, bbox_params=albu.BboxParams(format='pascal_voc', min_area=min_area,
                                                                     min_visibility=min_visibility,
                                                                     label_fields=['category_id']))


def detection_collate(batch):
    images = [s['image'] for s in batch]
    annots = [s['bboxes'] for s in batch]
    labels = [s['category_id'] for s in batch]

    max_num_annots = max(len(annot) for annot in annots)
    annot_padded = np.ones((len(annots), max_num_annots, 5)) * -1

    if max_num_annots > 0:
        for idx, (annot, lab) in enumerate(zip(annots, labels)):
            if len(annot) > 0:
                annot_padded[idx, :len(annot), :4] = annot
                annot_padded[idx, :len(annot), 4] = lab
    return (torch.stack(images, 0), torch.FloatTensor(annot_padded))


def collater(data):
    images = [s['image'] for s in data]
    annots = [s['annot'] for s in data]
    scales = [s['scale'] for s in data]

    images = torch.from_numpy(np.stack(images, axis=0))

    max_num_annots = max(annot.shape[0] for annot in annots)

    if max_num_annots > 0:

        annot_padded = torch.ones((len(annots), max_num_annots, 5)) * -1

        if max_num_annots > 0:
            for idx, annot in enumerate(annots):
                if annot.shape[0] > 0:
                    annot_padded[idx, :annot.shape[0], :] = annot
    else:
        annot_padded = torch.ones((len(annots), 1, 5)) * -1

    images = images.permute(0, 3, 1, 2)

    return (images, torch.FloatTensor(annot_padded))


class Resizer(object):
    """Convert ndarrays in sample to Tensors."""

    def __init__(self, img_size=512):
        self.img_size = img_size

    def __call__(self, sample):
        image, annots = sample['image'], sample['annot']
        height, width, _ = image.shape
        if height > width:
            scale = self.img_size / height
            resized_height = self.img_size
            resized_width = int(width * scale)
        else:
            scale = self.img_size / width
            resized_height = int(height * scale)
            resized_width = self.img_size

        image = cv2.resize(image, (resized_width, resized_height), interpolation=cv2.INTER_LINEAR)

        new_image = np.zeros((self.img_size, self.img_size, 3))
        new_image[0:resized_height, 0:resized_width] = image

        annots[:, :4] *= scale

        # try:                                                                                                  #错误
        #     annots[:, :4] *= scale
        # except IndexError:
        #     cv2.imshow('1',image)
        #     cv2.imshow('2',new_image)
        #     print(annots)
        #     print(annots.shape)
        #     cv2.waitKey(0)

        # print(torch.from_numpy(new_image).to(torch.float32),torch.from_numpy(annots),scale)
        # x = torch.from_numpy(new_image).to(torch.float32)
        # y = torch.from_numpy(annots)
        # z = scale
        return {'image': torch.from_numpy(new_image).to(torch.float32), 'annot': torch.from_numpy(annots), 'scale': scale}

class Augmenter(object):
    """Convert ndarrays in sample to Tensors."""
    def __call__(self, sample, flip_x=0.5):
        if np.random.rand() < flip_x:
            image, annots = sample['image'], sample['annot']
            image = image[:, ::-1, :]

            rows, cols, channels = image.shape
            x1 = annots[:, 0].copy()
            x2 = annots[:, 2].copy()

            x_tmp = x1.copy()

            annots[:, 0] = cols - x2
            annots[:, 2] = cols - x_tmp
            sample = {'image': image, 'annot': annots}
        return sample


class Normalizer(object):

    def __init__(self):
        self.mean = np.array([[[0.485, 0.456, 0.406]]])
        self.std = np.array([[[0.229, 0.224, 0.225]]])

    def __call__(self, sample):
        image, annots = sample['image'], sample['annot']
        image = np.array(image)
        image = image.astype(np.float32) / 255.
        image = ((image.astype(np.float32) - self.mean) / self.std)
        # try:
        #     image = ((image.astype(np.float32) - self.mean) / self.std)
        # except IndexError:
        #     cv2.imshow('1', image)
        #     print(annots)
        #     print(annots.shape)
        #     cv2.waitKey(0)
        return {'image': image, 'annot': annots}


def colorJitter(image, annots):
    # image = Image.fromarray(image)
    brightness = random.randint(1, 3) / 10
    contrast = random.randint(1, 3) / 10
    saturation = random.randint(1, 3) / 10
    hue = random.randint(1, 3) / 10
    image = transforms.ColorJitter(brightness=brightness,
                                   contrast=contrast, saturation=saturation, hue=hue)(image)
    return image, annots


def _box_inter(box1, box2):
    tl = torch.max(box1[:, None, :2].float(), box2[:, :2].float())  # [n,m,2]
    br = torch.min(box1[:, None, 2:].float(), box2[:, 2:].float())  # [n,m,2]
    hw = (br - tl).clamp(min=0)  # [n,m,2]
    inter = hw[:, :, 0] * hw[:, :, 1]  # [n,m]
    return inter

class SmallObjectAugmentation(object):
    def __init__(self, thresh=32*32, prob=0.9, copy_times=5, epochs=100, all_objects=True, one_object=False):
        """
        sample = {'img':img, 'annot':annots}
        img = [height, width, 3]
        annot = [xmin, ymin, xmax, ymax, label]
        thresh：the detection threshold of the small object. If annot_h * annot_w < thresh, the object is small
        prob: the prob to do small object augmentation
        epochs: the epochs to do
        """
        self.thresh = thresh
        self.prob = prob
        self.copy_times = copy_times
        self.epochs = epochs
        self.all_objects = all_objects
        self.one_object = one_object
        if self.all_objects or self.one_object:
            self.copy_times = 1

    def issmallobject(self, h, w):                      #是否是小目标
        if h * w <= self.thresh and h>0 and w>0:
            return True
        else:
            return False

    def compute_overlap(self, annot_a, annot_b):        #计算重叠
        if annot_a is None: return False
        left_max = max(annot_a[0], annot_b[0])
        top_max = max(annot_a[1], annot_b[1])
        right_min = min(annot_a[2], annot_b[2])
        bottom_min = min(annot_a[3], annot_b[3])
        inter = max(0, (right_min-left_max)) * max(0, (bottom_min-top_max))
        if inter != 0:
            return True
        else:
            return False

    def donot_overlap(self, new_annot, annots):     #循环判断是否重叠
        for annot in annots:
            if self.compute_overlap(new_annot, annot): return False
        return True

    def create_copy_annot(self, h, w, annot, annots):        #创建新的annot

        annot_h, annot_w = annot[3] - annot[1], annot[2] - annot[0]

        for epoch in range(self.epochs):
            random_x, random_y = np.random.randint(0, int(w - annot_w)), np.random.randint(0, int(h - annot_h))
            xmin, ymin = random_x, random_y
            xmax, ymax = xmin + annot_w, ymin + annot_h
            if xmin <= 0 or xmax >= w or ymin <= 0 or ymax >= h:
                continue
            new_annot = np.array([xmin, ymin, xmax, ymax, annot[4]]).astype(np.int)
            if self.donot_overlap(new_annot, annots) is False:
                continue

            return new_annot
        return None

    def add_patch_in_img(self, new_annot, copy_annot, image):
        origin_patch = image[copy_annot[1]:copy_annot[3], copy_annot[0]:copy_annot[2], :]
        get_patch = image[new_annot[1]:new_annot[3], new_annot[0]:new_annot[2], :]
        try:
            image[new_annot[1]:new_annot[3], new_annot[0]:new_annot[2], :] = origin_patch
        except ValueError:
            name = f"\nget :{get_patch.shape},origin:{origin_patch.shape}"
            print(name)
            exit(0)
        return image

    def __call__(self, sample):
        if self.all_objects and self.one_object:
            return sample
        if np.random.rand() > self.prob:
            return sample

        img, annots = sample['image'], sample['annot']
        h, w= img.shape[0], img.shape[1]

        small_object_list = list()         #是否是小目标
        for idx in range(annots.shape[0]):
            annot = annots[idx]
            annot_h, annot_w = annot[3] - annot[1], annot[2] - annot[0]
            if self.issmallobject(annot_h, annot_w):
                small_object_list.append(idx)

        l = len(small_object_list)
        # No Small Object
        if l == 0:
            return sample

        # Refine the copy_object by the given policy
        # Policy 2:
        copy_object_num = np.random.randint(0, l)
        # Policy 3:
        if self.all_objects:
            copy_object_num = l
        # Policy 1:
        if self.one_object:
            copy_object_num = 1

        random_list = random.sample(range(l), copy_object_num)
        annot_idx_of_small_object = [small_object_list[idx] for idx in random_list]
        select_annots = annots[annot_idx_of_small_object, :]
        annots = annots.tolist()
        for idx in range(copy_object_num):
            annot = select_annots[idx]
            annot_h, annot_w = annot[3] - annot[1], annot[2] - annot[0]

            if self.issmallobject(annot_h, annot_w) is False:
                continue

            for i in range(self.copy_times):
                new_annot = self.create_copy_annot(h, w, annot, annots,)
                annot = annot.astype(np.int)
                # print(f"img:{img.shape},annot:{annot},new_annot:{new_annot}")
                if new_annot is not None:
                    origin_patch = img[annot[1]:annot[3], annot[0]:annot[2], :]
                    get_patch = img[new_annot[1]:new_annot[3], new_annot[0]:new_annot[2],:]
                    if origin_patch.shape == get_patch.shape:
                        img = self.add_patch_in_img(new_annot, annot, img)
                        annots.append(new_annot)
        return {'image': img, 'annot': np.array(annots)}