import torch
import numpy as np
import cv2

# TORCH_1_10 = check_version(torch.__version__, '1.10.0')

def dist2xyxy(boxes, size, xyxy=True, device=torch.device('cpu')):
    sx, sy = size
    x = torch.arange(end=sx)
    y = torch.arange(end=sy)
    sx,  sy = torch.meshgrid(x, y)

def dist2bbox(distance, anchor_points, xywh=True, dim=-1):
    """Transform distance(ltrb) to box(xywh or xyxy)."""
    lt, rb = torch.split(distance, 2, dim)
    x1y1 = anchor_points - lt
    x2y2 = anchor_points + rb
    if xywh:
        c_xy = (x1y1 + x2y2) / 2
        wh = x2y2 - x1y1
        return torch.cat((c_xy, wh), dim)  # xywh bbox
    return torch.cat((x1y1, x2y2), dim)  # xyxy bbox

def make_anchors(feats, strides, grid_cell_offset=0.5):
    """Generate anchors from features."""
    anchor_points, stride_tensor = [], []
    assert feats is not None
    dtype, device = feats[0].dtype, feats[0].device
    for i, stride in enumerate(strides):
        _, _, h, w = feats[i].shape
        sx = torch.arange(end=w, device=device, dtype=dtype) + grid_cell_offset  # shift x
        sy = torch.arange(end=h, device=device, dtype=dtype) + grid_cell_offset  # shift y
        # sy, sx = torch.meshgrid(sy, sx, indexing='ij') if TORCH_1_10 else torch.meshgrid(sy, sx)
        sy, sx = torch.meshgrid(sy, sx)
        anchor_points.append(torch.stack((sx, sy), -1).view(-1, 2))
        stride_tensor.append(torch.full((h * w, 1), stride, dtype=dtype, device=device))
    return torch.cat(anchor_points), torch.cat(stride_tensor)

# 输入的src是numpy类型，彩色图像
def image_preprocess(src, out_size=(640, 640), standard=True):
    gray_value = 128
    img = src.copy()
    h, w, c = img.shape
    scale = min(out_size[0] / h, out_size[1] / w)
    nh = int(scale * h)
    nw = int(scale * w)
    img = cv2.resize(img, (nw, nh))
    canvas = np.zeros((out_size[0], out_size[1], c), dtype=np.uint8)
    canvas += gray_value
    dw = abs(out_size[1] - nw) // 2
    dh = abs(out_size[0] - nh) // 2

    # canvas[dh:out_size[0] - dh, dw:out_size[1] - dw, :] = img
    canvas[dh:img.shape[0] + dh, dw:img.shape[1] + dw, :] = img
    if standard:
        canvas = canvas.astype(np.float64) / 255.0
    return canvas

# annot.shape = (batch, 6)，基本格式为idx, cls xyxy，类型为numpy数组
# size的格式为h, w, c可以直接从cv2 Mat .shape得到
def annotation_resize(annot, src_size, dst_size, xyxy=True):
    scale = min(dst_size[0] / src_size[0], dst_size[1] / src_size[1] )
    nh = int(scale * src_size[0])
    nw = int(scale * src_size[1])
    dw = abs(dst_size[1] - nw) // 2
    dh = abs(dst_size[0] - nh) // 2
    annot[:, 2:] = annot[:, 2:] * scale
    if xyxy:
        annot[:, [2, 4]] += dw
        annot[:, [3, 5]] += dh
    else:             #xywh
        annot[:, 2] += dw
        annot[:, 3] += dh
    return annot

def xywh2xyxy(annot):

    annot[:, 4] = annot[:, 2] + annot[:, 4]
    annot[:, 5] = annot[:, 3] + annot[:, 5]

    return annot