import torch
import torchvision
import numpy as np
import cv2 as cv

anchor_grid = torch.tensor([[[[[[10., 13.]]], [[[16., 30.]]], [[[33., 23.]]]]],
                            [[[[[30., 61.]]], [[[62., 45.]]], [[[59., 119.]]]]],
                            [[[[[116., 90.]]], [[[156., 198.]]], [[[373., 326.]]]]]])


def decode_box(x):
    nc = int(x[0].shape[1] / 3 - 5)  # number of classes
    prediction = []
    stride = [8, 16, 32]
    for i in range(3):
        pred = x[i]
        bs, _, ny, nx = pred.shape  # x(bs,255,20,20) to x(bs,3,20,20,85)
        pred = pred.view(bs, 3, (nc + 5), ny, nx).permute(0, 1, 3, 4, 2).contiguous()
        yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])
        grid = torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()
        pred = pred.sigmoid()
        pred[..., 0:2] = (pred[..., 0:2] * 2. - 0.5 + grid) * stride[i]  # xy
        pred[..., 2:4] = (pred[..., 2:4] * 2) ** 2 * anchor_grid[i]  # wh
        prediction.append(pred.view(bs, -1, 85))
    prediction = torch.cat(prediction, dim=1)
    conf_thres = 0.25
    iou_thres = 0.45
    max_wh = 4096  # (pixels) minimum and maximum box width and height
    max_det = 300  # maximum number of detections per image
    max_nms = 30000  # maximum number of boxes into torchvision.ops.nms()
    xc = prediction[..., 4] > conf_thres  # candidates
    output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]
    for xi, x in enumerate(prediction):  # image index, image inference
        x = x[xc[xi]]  # confidence
        if not x.shape[0]:
            continue
        x[:, 5:] *= x[:, 4:5]
        box = xywh2xyxy(x[:, :4])
        conf, j = x[:, 5:].max(1, keepdim=True)
        x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]  # xyxy,conf,cls
        n = x.shape[0]
        if not n:  # no boxes
            continue
        elif n > max_nms:  # excess boxes
            x = x[x[:, 4].argsort(descending=True)[:max_nms]]  # sort by confidence
        c = x[:, 5:6] * max_wh  # offset
        boxes, scores = x[:, :4] + c, x[:, 4]  # boxes (offset by class), scores
        i = torchvision.ops.nms(boxes, scores, iou_thres)  # NMS
        if i.shape[0] > max_det:  # limit detections
            i = i[:max_det]
        output[xi] = x[i]
    return output


def xywh2xyxy(x):
    # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
    y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
    y[:, 0] = x[:, 0] - x[:, 2] / 2  # top left x
    y[:, 1] = x[:, 1] - x[:, 3] / 2  # top left y
    y[:, 2] = x[:, 0] + x[:, 2] / 2  # bottom right x
    y[:, 3] = x[:, 1] + x[:, 3] / 2  # bottom right y
    return y


def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
    # Rescale coords (xyxy) from img1_shape to img0_shape
    if ratio_pad is None:  # calculate from img0_shape
        gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1])  # gain  = old / new
        pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2  # wh padding
    else:
        gain = ratio_pad[0][0]
        pad = ratio_pad[1]
    coords[:, [0, 2]] -= pad[0]  # x padding
    coords[:, [1, 3]] -= pad[1]  # y padding
    coords[:, :4] /= gain
    # Clip bounding xyxy bounding boxes to image shape (height, width)
    coords[:, 0].clamp_(0, img0_shape[1])  # x1
    coords[:, 1].clamp_(0, img0_shape[0])  # y1
    coords[:, 2].clamp_(0, img0_shape[1])  # x2
    coords[:, 3].clamp_(0, img0_shape[0])  # y2
    return coords


def plot_one_box(x, img, color, label=None, line_thickness=3):
    # Plots one bounding box on image img
    c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
    cv.rectangle(img, c1, c2, color=color, thickness=line_thickness, lineType=cv.LINE_AA)
    cv.putText(img, label, c1, cv.FONT_HERSHEY_PLAIN, 1.2, (0, 0, 255), 1)
