import cv2
import numpy as np
import paddle
from paddle.nn.functional import sigmoid
def transImg(img, outshape=(416, 416)):
    w, h, _ = img.shape
    scale = outshape[0]/w if outshape[0]/w < outshape[1]/h else outshape[1]/h
    img = cv2.resize(img, fx=scale, fy=scale, dsize=(0, 0))
    if outshape[0]/w == scale:
        p = outshape[1] - img.shape[1]
        img = np.pad(img, ((0, 0), (p//2, p-p//2), (0, 0)), 'constant')
    else:
        p = outshape[0] - img.shape[0]
        img = np.pad(img, ((p//2, p-p//2), (0, 0), (0, 0)), 'constant')
    return img

def head(feats, anchors, cal_loss=False):
    feats = feats.transpose((0, 2, 3, 1))
    anchors = paddle.to_tensor(anchors)
    batchsize = feats.shape[0]
    anchors = anchors.reshape((1, 1, 1, 3, 2))
    grid_shape = feats.shape[1:2][0]
    grid_y = paddle.tile(paddle.reshape(paddle.arange(0, grid_shape), (-1, 1, 1, 1)), (1, grid_shape, 1, 1))
    grid_x = paddle.tile(paddle.reshape(paddle.arange(0, grid_shape), [1, -1, 1, 1]), [grid_shape, 1, 1, 1])
    grid = paddle.concat([grid_x, grid_y], axis=-1)  # (13,13,1,2)
    feats = feats.reshape((batchsize, grid_shape, grid_shape, 3, 5+4))  # (bsize,13,13,3,9)
    box_xy = sigmoid(feats[..., :2]+grid) / grid_shape
    box_wh = paddle.exp(feats[..., 2:4]) * anchors / grid_shape
    box_confidence = sigmoid(feats[..., 4:5])
    box_class_probs = sigmoid(feats[..., 5:])
    if cal_loss is True:
        return grid, feats, box_xy, box_wh
    else:
        return box_xy, box_wh, box_confidence, box_class_probs
# 将box还原到正确的位置,因为图片进行了预处理
def correct_boxes(box_xy,box_wh,image_shape,input_shape=(416,416)):
    box_yx = box_xy[...,::-1]
    box_hw = box_wh[...,::-1]
    temp = np.round(image_shape*np.min(input_shape/image_shape))
    offset = (input_shape-temp)/2/input_shape
    scale = input_shape/temp
    box_yx = (box_yx-offset)*scale
    box_hw *= scale
    box_min = box_yx -(box_hw/2.0)
    box_max = box_yx +(box_wh/2.0)
    boxes = np.concatenate([box_min[...,0:2],box_max[...,0:2]],axis=-1)
    boxes *= np.concatenate([image_shape,image_shape])
    return boxes
# 返回box，和每个box对应的各个类的概率
def box_and_score(feats, anchor,numclasses,imgshape):
    box_xy, box_wh, box_confidence, box_class_probs = head(feats, anchor)
    boxes = correct_boxes(box_xy,box_wh,imgshape)
    boxes = boxes.reshape((-1,4))
    boxes_scores = box_confidence*box_class_probs
    boxes_scores = boxes_scores.reshape((-1,numclasses))
    return boxes,boxes_scores

def nms(bboxs, thresh):
    # bboxs:形似上面设置的boxes，是一组包含了诸多框坐标的数组
    # thresh： IOU阈值
    # 1.获取左上角右下角四个坐标
    x1 = bboxs[:, 0]                          # 获取所有框的左上角横坐标
    y1 = bboxs[:, 1]                          # 获取所有框的左上角纵坐标
    x2 = bboxs[:, 2]                          # 获取所有框的右下角横坐标
    y2 = bboxs[:, 3]                          # 获取所有框的右下角纵坐标
    # 2.计算每个框的面积
    areas = (y2 - y1 + 1) * (x2 - x1 + 1)
    # 3.获取得分以排序
    scores = bboxs[:, 4]
    index = scores.argsort()[::-1]            # argsort默认从小到大排序，[::-1]实现翻转
    res = []
    while index.size > 0:
        i = index[0]                          # index中存储Bbox按分排序后的索引，所以第一个就是得分最高的Bbox索引，直接保留
        res.append(i)
        x11 = np.maximum(x1[i], x1[index[1:]])  # 用X11表示重叠区域的左上角横坐标
        y11 = np.maximum(y1[i], y1[index[1:]])  # 用y11表示重叠区域的左上角横坐标
        x22 = np.minimum(x2[i], x2[index[1:]])  # 用X22表示重叠区域的左上角横坐标
        y22 = np.minimum(y2[i], y2[index[1:]])  # 用y221表示重叠区域的左上角横坐标
        w = np.maximum(0, x22 - x11 + 1)  # the weights of overlap
        h = np.maximum(0, y22 - y11 + 1)  # the height of overlap
        overlaps = w * h
        ious = overlaps / (areas[i] + areas[index[1:]] - overlaps)
        idx = np.where(ious <= thresh)[0]
        index = index[idx + 1]
    return res



def eval(output,anchor,imgshape,numclasses,score_threshold=0.7,iou_threshold=0.5,maxbox=20):
    anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
    box = []  # (n,4)
    box_score = []  #(n,numclasses)
    for i in range(3):
        _box, _score = box_and_score(output[i],anchor[anchor_mask[i]],numclasses,imgshape)
        box.extend(_box)
        box_score.extend(_score)
    mask = box_score >= score_threshold
    all_box = []
    all_score = []
    all_class =[]
    for i in range(numclasses):
        class_box = (box[mask[:,i]]).reshape((-1,4))  # (k,4)
        class_box_score = ((box_score[:,i])[mask[:, i]]).reshape(-1,1)  #(k,1)
        index = nms(class_box,iou_threshold)
        class_box = class_box[index]
        class_box_score = class_box_score[index]
        classes = np.ones_like(class_box_score,'int32')*i
        all_box.extend(class_box)
        all_score.extend(class_box_score)
        all_class.extend(classes)
    return all_box, all_score, all_class



