import torch
import numpy as np
import random
from PIL import Image
import torchvision.transforms.functional as FT
from vortex.utils.bbox import find_jaccard_overlap


class Resize(object):
    def __init__(self, size, interpolation=Image.BILINEAR):
        if isinstance(size, int):
            self.size = (size, size)
        else:
            self.size = size
        self.interpolation = interpolation
    
    def __call__(self, image, boxes=None, labels=None):
        """
        boxes are of percent-coordinates, no resized is needed for boxes.
        """
        image = image.resize(self.size, self.interpolation)
        # image = self.rsize(image)
        return image, boxes, labels


class ResizeKeepAspectRatio(object):
    def __init__(self, size=640, interpolation=Image.BILINEAR):
        # size is the minimum dimension in width and height
        self.size = size
        self.interpolation = interpolation
    
    def __call__(self, image, boxes=None, labels=None):
        w, h = image.size
        if (w <= h and w == self.size) or (h <= w and h == self.size):
            return image, boxes, labels
        if w < h:
            ow = self.size
            oh = int(h * self.size / w)
        else:
            oh = self.size
            ow = int(w * self.size / h)
        image = image.resize((ow, oh), self.interpolation)
        return image, boxes, labels


class LetterBoxResize(object):
    def __init__(self, size=(640, 640), fill_color=(114, 114, 114)):
        if isinstance(size, int):
            self.size = (size, size)
        else:
            self.size = size
        self.fill_color = fill_color
    
    def __call__(self, image, boxes=None, labels=None):
        """
        convert to cv2, to transformation and back to PIL
        """
        original_w = image.size[0]
        original_h = image.size[1]
        target_w = self.size[0]
        target_h = self.size[1]
        scale = min(target_w / original_w, target_h / original_h)
        new_w = int(scale * original_w)
        new_h = int(scale * original_h)
        dx = (target_w - new_w) // 2
        dy = (target_h - new_h) // 2

        resized_image = image.resize((new_w, new_h), Image.BICUBIC)
        new_image = Image.new('RGB', (target_w, target_h), self.fill_color)
        new_image.paste(resized_image, (dx, dy))

        # adjust boxes
        new_boxes = boxes
        new_boxes[:, [0, 2]] *= original_w
        new_boxes[:, [1, 3]] *= original_h
        new_boxes[:, [0, 2]] += dx
        new_boxes[:, [1, 3]] += dy
        new_boxes[:, [0, 2]] /= target_w
        new_boxes[:, [1, 3]] /= target_h

        new_labels = labels

        return new_image, new_boxes, new_labels


class RandomHorizontalFlip(object):
    def __init__(self, prob=0.5):
        self.prob = prob

    def __call__(self, image, boxes=None, labels=None):
        p = random.random()
        if p >= self.prob:
            # do nothing
            return image, boxes, labels
        
        new_image = FT.hflip(image)
        new_boxes = boxes.clone()

        new_boxes[:, 0] = 1.0 - boxes[:, 2]
        new_boxes[:, 2] = 1.0 - boxes[:, 0]

        return new_image, new_boxes, labels


class RandomExpand(object):
    """
    perform zooming out by placing the image in a larger canvas of
        filler material.
    """
    def __init__(self, max_scale=4.0, fill_color=(114, 114, 114)):
        self.max_scale = max_scale
        self.fill_color = fill_color
    
    def __call__(self, image, boxes, labels):
        original_w = image.size[0]
        original_h = image.size[1]

        scale = random.uniform(1, self.max_scale)
        new_w = int(scale * original_w)
        new_h = int(scale * original_h)

        # position
        left = random.randint(0, new_w - original_w)
        right = left + original_w
        top = random.randint(0, new_h - original_h)
        bottom = top + original_h

        # create target image
        new_image = Image.new('RGB', (new_w, new_h), self.fill_color)
        region = (left, top, right, bottom)
        new_image.paste(image, region)

        new_boxes = boxes
        new_boxes[:, [0, 2]] = (new_boxes[:, [0, 2]] * original_w + left) / new_w
        new_boxes[:, [1, 3]] = (new_boxes[:, [1, 3]] * original_h + top) / new_h
        
        new_labels = labels
        return new_image, new_boxes, new_labels


class RandomScaledCrop(object):
    def __init__(self, max_trials=50, min_scale=0.3):
        # self.crop_size = crop_size
        self.max_trials = max_trials
        self.min_scale = min_scale

    def __call__(self, image, boxes=None, labels=None):
        original_w = image.size[0]
        original_h = image.size[1]

        # keep choosing a minimum overlap until a successful crop is made
        while True:
            min_overlap = random.choice([0.0, 0.1, 0.3, 0.5, 0.7, 0.9, None])
            if min_overlap is None:
                return image, boxes, labels
            
            for _ in range(self.max_trials):
                scale_h = random.uniform(self.min_scale, 1.0)
                scale_w = random.uniform(self.min_scale, 1.0)
                new_h = int(scale_h * original_h)
                new_w = int(scale_w * original_w)
                aspect_ratio = new_h / new_w
                if aspect_ratio < 0.5 or aspect_ratio > 2.0:
                    continue
                
                # crop coordinates
                left = random.randint(0, original_w - new_w)
                right = left + new_w
                top = random.randint(0, original_h - new_h)
                bottom = top + new_h
                crop = torch.FloatTensor([left / original_w,
                                          top / original_h,
                                          right / original_w,
                                          bottom / original_h])
                # crop = crop.unsqueeze(0)
                overlaps = find_jaccard_overlap(crop.unsqueeze(0), boxes).squeeze(0)
                if overlaps.max().item() < min_overlap:
                    continue
                
                new_image = image.crop([left, top, right, bottom])

                # check center of boxes
                bb_centers = (boxes[:, :2] + boxes[:, 2:]) / 2
                centers_in_crop = (bb_centers[:, 0] > left / original_w) * (
                    bb_centers[:, 0] < right / original_w) * (
                        bb_centers[:, 1] > top / original_h) * (
                            bb_centers[:, 1] < bottom / original_h)
                if not centers_in_crop.any():
                    continue
                
                new_boxes = boxes[centers_in_crop, :]
                new_labels = labels[centers_in_crop]
                
                # convert to absolute coordinate.
                crop[[0, 2]] *= original_w
                crop[[1, 3]] *= original_h
                new_boxes[:, [0, 2]] *= original_w
                new_boxes[:, [1, 3]] *=original_h

                new_boxes[:, :2] = torch.max(new_boxes[:, :2], crop[:2])
                new_boxes[:, :2] -= crop[:2]
                new_boxes[:, 2:] = torch.min(new_boxes[:, 2:], crop[2:])
                new_boxes[:, 2:] -= crop[:2]

                new_boxes[:, [0, 2]] /= new_w
                new_boxes[:, [1, 3]] /= new_h

                return new_image, new_boxes, new_labels
            
            # if no crop is made after max_trials, choose new overlap value.


class RandomCrop(object):
    def __init__(self, size):
        pass

    def __call__(self, image, boxes, labels):
        pass


class CenterCrop(object):
    def __init__(self, size):
        pass

    def __call__(self, image, boxes, labels):
        pass


class CornerCrop(object):
    def __init__(self, size):
        pass

    def __call__(self, image, boxes, labels):
        pass
