import torch
import numpy as np
import cv2
from torchvision.ops.boxes import batched_nms

def box_transform(anchors, regression):
    """
    decode_box_outputs adapted from https://github.com/google/automl/blob/master/efficientdet/anchors.py

    Args:
        anchors: [batchsize, boxes, (y1, x1, y2, x2)] or [boxes, (y1, x1, y2, x2)]
        regression: [batchsize, boxes, (dy, dx, dh, dw)] or [boxes, (dy, dx, dh, dw)]

    Returns:
        torch.Tensor: [batchsize, boxes, (y1, x1, y2, x2)] or [boxes, (y1, x1, y2, x2)]
    """
    y_centers_a = (anchors[..., 0] + anchors[..., 2]) / 2
    x_centers_a = (anchors[..., 1] + anchors[..., 3]) / 2
    ha = anchors[..., 2] - anchors[..., 0]
    wa = anchors[..., 3] - anchors[..., 1]

    w = regression[..., 3].exp() * wa
    h = regression[..., 2].exp() * ha

    y_centers = regression[..., 0] * ha + y_centers_a
    x_centers = regression[..., 1] * wa + x_centers_a

    ymin = y_centers - h / 2.
    xmin = x_centers - w / 2.
    ymax = y_centers + h / 2.
    xmax = x_centers + w / 2.

    return torch.stack([xmin, ymin, xmax, ymax], dim=-1)


def clip_boxes(boxes, classification, params):
    """
    clip boxes and remove invalid boxes

    Args:
        boxes (torch tensor):
        params (dict):
    
    Returns:
        boxes, classification
    """
    boxes[:, 0::2] = torch.clamp(boxes[:, 0::2], min=0, max=params['width']-1)
    boxes[:, 1::2] = torch.clamp(boxes[:, 1::2], min=0, max=params['height']-1)
    indice = ~(boxes[:, 0:2] == boxes[:, 2:4]).any(dim=1)
    return boxes[indice], classification[indice]


def resize_padding(image, size, interpolation=None):
    """
    Args:
        image (numpy array):
        size (tuple[int, int]): (new_width, new_height)
    """
    width, height = size
    old_h, old_w = image.shape[0], image.shape[1]
    if old_w > old_h:
        scale_ratio = width / old_w
        new_w = width
        new_h = int(old_h * scale_ratio)
    else:
        scale_ratio = height / old_h
        new_w = int(old_w * scale_ratio)
        new_h = height

    canvas = np.zeros((height, width) + image.shape[2:], np.float32)
    
    if new_w != old_w or new_h != old_h:
        if interpolation is None:
            canvas[:new_h, :new_w] = cv2.resize(image, (new_w, new_h))
        else:
            canvas[:new_h, :new_w] = cv2.resize(image, (new_w, new_h), interpolation=interpolation)

    return canvas, {"resize": {"scale_ratio": scale_ratio}}

def invert_resize(boxes, params):
    boxes /= params['resize']['scale_ratio']
    return boxes

def postprocess(boxes, classification, threshold, nms_threshold):
    if len(boxes) == 0:
        return {
            'rois': np.empty((0,4)),
            'class_ids': np.empty((0)),
            'scores': np.empty((0)),
        }

    scores, classes = classification.max(dim=1)
    scores_over_thresh = (scores > threshold)
    
    scores = scores[scores_over_thresh]
    classes = classes[scores_over_thresh]
    boxes = boxes[scores_over_thresh]

    if len(boxes) == 0:
        return {
            'rois': np.empty((0,4)),
            'class_ids': np.empty((0)),
            'scores': np.empty((0)),
        }
    
    nms_index = batched_nms(boxes, scores, classes, iou_threshold=nms_threshold)

    if len(nms_index) == 0:
        return {
            'rois': np.empty((0,4)),
            'class_ids': np.empty((0)),
            'scores': np.empty((0)),
        }

    scores = scores[nms_index]
    classes = classes[nms_index]
    boxes = boxes[nms_index]

    return {
        'rois': boxes.cpu().numpy(),
        'class_ids': classes.cpu().numpy(),
        'scores': scores.cpu().numpy(),
    }


def modified_nms(boxes, scores, threshold):
    x1 = boxes[:, 0]
    y1 = boxes[:, 1]
    x2 = boxes[:, 2]
    y2 = boxes[:, 3]
    areas = (x2 - x1) * (y2 - y1)
    _, order = scores.sort(0, descending=True)

    keep = []
    while order.numel() > 0:
        i = order[0].item()
        keep.append(i)
        if order.numel() == 1:
            break

        xx1 = x1[order[1:]].clamp(min=x1[i])
        yy1 = y1[order[1:]].clamp(min=y1[i])
        xx2 = x2[order[1:]].clamp(max=x2[i])
        yy2 = x2[order[1:]].clamp(max=y2[i])
        inter = (xx2 - xx1).clamp(min=0) * (yy2 - yy1).clamp(min=0)

        # iou = inter / (areas[i] + areas[order[1:]] - inter)
        # idx = (iou <= threshold).nonzero().squeeze()
        overlap = torch.max(inter / areas[i], inter / areas[order[1:]])
        idx = (overlap <= threshold).nonzero().squeeze()
        if idx.numel() == 0:
            break
        order = order[idx + 1]
    return torch.LongTensor(keep)


def modified_batched_nms(boxes, scores, idxs, threshold):
    if boxes.numel() == 0:
        return torch.empty((0,), dtype=torch.int64, device=boxes.device)
    else:
        max_coordinate = boxes.max()
        offsets = idxs.to(boxes) * (max_coordinate + torch.tensor(1).to(boxes))
        boxes_for_nms = boxes + offsets[:, None]
        keep = modified_nms(boxes_for_nms, scores, threshold)
        return keep


def modified_postprocess(boxes, classification, threshold, nms_threshold):
    if len(boxes) == 0:
        return {
            'rois': np.empty((0,4)),
            'class_ids': np.empty((0)),
            'scores': np.empty((0)),
        }

    scores, classes = classification.max(dim=1)
    scores_over_thresh = (scores > threshold)

    scores = scores[scores_over_thresh]
    classes = classes[scores_over_thresh]
    boxes = boxes[scores_over_thresh]

    if len(boxes) == 0:
        return {
            'rois': np.empty((0,4)),
            'class_ids': np.empty((0)),
            'scores': np.empty((0)),
        }

    nms_index = modified_batched_nms(boxes, scores, classes, threshold=nms_threshold)

    if len(nms_index) == 0:
        return {
            'rois': np.empty((0,4)),
            'class_ids': np.empty((0)),
            'scores': np.empty((0)),
        }

    scores = scores[nms_index]
    classes = classes[nms_index]
    boxes = boxes[nms_index]

    return {
        'rois': boxes.cpu().numpy(),
        'class_ids': classes.cpu().numpy(),
        'scores': scores.cpu().numpy(),
    }
