import torch
from torchvision import transforms
import cfg.cfg_voc2012 as cfg
import models
import utils
import cv2
import numpy as np
import os

class Detector(torch.nn.Module):
    def __init__(self, weights_path):
        super().__init__()
        self.net = models.YoloV3Net()
        checkpoint = torch.load(weights_path)



        self.net.load_state_dict(checkpoint['state_dict'])
        # optimizer_arg = checkpoint['optimizer']
        epoch = checkpoint['epoch']

        print("load weights ok , the loss is:",checkpoint["best_testloss"])


        self.net.eval()

    def forward(self, img, thresh, anchors):
        # 输出

        out_13, out_26, out_52 = self.net(img)
        print(out_13.shape)

        idxs, vects = self._filter(out_13, thresh)
        box_13 = self.get_box(idxs, vects, 32, anchors[13])
        idxs, vects = self._filter(out_26, thresh)
        box_26 = self.get_box(idxs, vects, 16, anchors[26])
        idxs, vects = self._filter(out_52, thresh)
        box_52 = self.get_box(idxs, vects, 8, anchors[52])

        pic_num = img.shape[0]
        pred_boxes = torch.cat([box_13, box_26, box_52], dim=0)
        return pred_boxes.tolist()
        box = self.allNMS(pred_boxes, pic_num, cfg.class_number)

        return box

    def _filter(self, feature_map, thresh):
        """
        :param feature_map: 模型输出的特征图
        :param thresh: 置信度
        :return:
        """
        # 特征图形状为NCHW,要转换成 NHWC 然后再转成NHW3C
        feature_adj = feature_map.permute(0, 2, 3, 1)
        feature_adj = feature_adj.reshape(feature_map.shape[0], feature_map.shape[2], feature_map.shape[3], 3, -1)
        #加上sigmoid
        feature_adj[..., 0:3] = torch.sigmoid(feature_adj[..., 0:3])
        # print(feature_adj.shape)
        #
        # print(feature_adj[..., :2])
        # exit()

        # 筛选置信度小于thresh的
        mask = feature_adj[..., 0] > thresh
        idxs = mask.nonzero()

        return idxs, feature_adj[mask]

    def get_box(self, idxs, vects, size, anchors):
        """
        :param idxs:  坐标
        :param vects: 向量
        :return:      返回列表类型，包括坐标和置信度
        """
        n = idxs[:, 0]  # 属于哪张图片
        a = idxs[:, 3]  # 建议框类型

        # 计算中心点
        cent_x = (vects[:, 1] + idxs[:, 2]) * size
        cent_y = (vects[:, 2] + idxs[:, 1]) * size

        # 计算坐标
        anchors = torch.tensor(anchors)

        w = torch.exp(vects[:, 3]) * anchors[a, 0]
        h = torch.exp(vects[:, 4]) * anchors[a, 1]

        # 计算类别
        cls = torch.argmax(vects[:, 5:], dim=1)

        return torch.stack([n.float(), cls.float(), vects[:, 0], cent_x, cent_y, w, h], dim=1)

    # 非极大值抑制
    def allNMS(self, boxes, pic_num, cls_num):
        # 相同图片
        pic_histc = torch.histc(boxes[:, 0], bins=pic_num, min=0, max=pic_num - 1)
        newboxes = []
        for i, num in enumerate(pic_histc):
            if num:
                box_oneitem = boxes[boxes[:, 0].int() == i]
                cls_histc = torch.histc(box_oneitem[:, 1], bins=cls_num, min=0, max=cls_num - 1)
                for j, box_num in enumerate(cls_histc):
                    if box_num:
                        box_onecls = box_oneitem[box_oneitem[:, 1].int() == j, 2:]

                        # print(box_onecls.shape)
                        # 拿对应类别的box做置信度筛选
                        # 传入NMS的要为 置信度，左上角坐标和右小角坐标，所以这里要转换一下
                        x1 = box_onecls[:, 1] - box_onecls[:, 3] / 2
                        y1 = box_onecls[:, 2] - box_onecls[:, 4] / 2
                        x2 = box_onecls[:, 1] + box_onecls[:, 3] / 2
                        y2 = box_onecls[:, 2] + box_onecls[:, 4] / 2

                        ret_box = utils.myNMS(torch.stack([box_onecls[:, 0], x1, y1, x2, y2], dim=1), iouthre=0.3)
                        newboxes.extend(ret_box)
        return newboxes
        # 相同类别


if __name__ == '__main__':
    img_handler = transforms.ToTensor()
    # det = Detector(r"d:\1.课程记录\Prj_yolo\weights\yolov3_172.pt")
    det = Detector(r"./weights/case_v0_4.pth")

    for filename in os.listdir("./imges"):
        img = cv2.imread(fr"./imges/{filename}")


        # 将图片转一下
        h, w = img.shape[0], img.shape[1]
        print(h, w)
        max_side = max(h, w)
        new_img = np.zeros((max_side, max_side, 3), dtype=np.uint8)
        new_img[(max_side - h) // 2:(max_side - h) // 2 + h, (max_side - w) // 2:(max_side - w) // 2 + w] = img

        new_img = cv2.resize(new_img, (416, 416))
        showimg = new_img.copy()
        new_img = cv2.cvtColor(new_img,cv2.COLOR_BGR2RGB)
        new_img = img_handler(new_img)
        print(new_img.shape)
        new_img = new_img.reshape(-1, 3, 416, 416)

        boxes = det(new_img, 0, cfg.anchor_box)
        print(len(boxes))
        i = 0
        for box in boxes:
            if box[2] <0.8:
                continue

            # if int(box[1]) != 0:
            #     continue
            print(box)
            i+=1
            # print(cfg.classes[int(box[1])])
            x1 = int(box[3] - box[5] / 2)
            y1 = int(box[4] - box[6] / 2)
            x2 = int(box[3] + box[5] / 2)
            y2 = int(box[4] + box[6] / 2)

            if 0 < x1 < 416 and 0 < y1 < 416 and 0 < x2 < 416 and 0 < y2 < 416:
                cv2.rectangle(showimg, (x1, y1), (x2, y2), (0, 0, 255), thickness=1)
                cv2.putText(showimg,f"{cfg.classes[int(box[1])]}",(x1,y1),cv2.FONT_HERSHEY_COMPLEX,0.5,(0,100,0),1)
        print(i)
        cv2.imshow("show img", showimg)
        cv2.waitKey(0)

    # x = torch.randn(3, 3, 416, 416)
    # print(det(x, 0.5, cfg.anchor_box))
