import numpy as np
import cv2
import torch


class ResizePaddingTransform:
    @classmethod
    def resize_padding(cls, image, size):
        width, height = size
        old_height, old_width = image.shape[:2]
        if old_height > old_width:
            scale_ratio = width / old_width
            new_width = width
            new_height = int(old_height * scale_ratio)
        else:
            scale_ratio = height / old_height
            new_width = int(old_width * scale_ratio)
            new_height = height

        if (new_width, new_height) != (old_height, old_width):
            canvas = np.zeros((height, width) + image.shape[2:], np.float32)
            canvas[:new_height, :new_width] = cv2.resize(image, (new_width, new_height))
        else:
            canvas = image

        return canvas, {'resize_padding': {"scale_ratio": scale_ratio}}

    @classmethod
    def invert_resize_padding(cls, boxes, params):
        boxes /= params['resize']['scale_ratio']
        return boxes



def box_transform(anchors, regression):
    """
    decode_box_outputs adapted from https://github.com/google/automl/blob/master/efficientdet/anchors.py

    Args:
        anchors: [batchsize, boxes, (y1, x1, y2, x2)] or [boxes, (y1, x1, y2, x2)]
        regression: [batchsize, boxes, (dy, dx, dh, dw)] or [boxes, (dy, dx, dh, dw)]

    Returns:
        torch.Tensor: [batchsize, boxes, (y1, x1, y2, x2)] or [boxes, (y1, x1, y2, x2)]
    """
    y_centers_a = (anchors[..., 0] + anchors[..., 2]) / 2
    x_centers_a = (anchors[..., 1] + anchors[..., 3]) / 2
    ha = anchors[..., 2] - anchors[..., 0]
    wa = anchors[..., 3] - anchors[..., 1]

    w = regression[..., 3].exp() * wa
    h = regression[..., 2].exp() * ha

    y_centers = regression[..., 0] * ha + y_centers_a
    x_centers = regression[..., 1] * wa + x_centers_a

    ymin = y_centers - h / 2.
    xmin = x_centers - w / 2.
    ymax = y_centers + h / 2.
    xmax = x_centers + w / 2.

    return torch.stack([xmin, ymin, xmax, ymax], dim=-1)
