"""
分析

1. 网络初始化，加载权重参数 net
2. 输入数据预处理(归一化) img_norm
3. 前向传播获取预期输出，计算获取 cx cy pred_w pred_h
   - 切片获取数据 cls, tx, ty, tw, th, one_hot
   - cx = (索引 + tx) * 特征规模大小
   - cy = (索引 + ty) * 特征规模大小
   - pred_w = exp(tw) * anchor_w
   - pred_h = exp(th) * anchor_h
4. 绘制检测框

"""
import os

import cv2
import torch
from torch import nn

from yolov3 import YoLov3
from config import cfg
from util import util


class Detector(nn.Module):
    def __init__(self):
        super().__init__()
        # 1. 网络初始化
        net = YoLov3()
        # 开启验证
        net.eval()
        # 加载权重参数 net
        net.load_state_dict(torch.load(cfg.WEIGHT_PATH))
        print('loading weights successfully')
        self.net = net

    def normalize(self, frame):
        frame_tensor = util.t(frame)
        frame_tensor = torch.unsqueeze(frame_tensor, dim=0)
        return frame_tensor

    def decode(self, pred_out, feature, threshold):
        # 1. 预期输出值更换通道 N 27 H W --> N H W 27 --> N H W 3 9
        pred_out = pred_out.permute((0, 2, 3, 1))
        n, h, w, _ = pred_out.shape
        pred_out = torch.reshape(pred_out, (n, h, w, 3, -1))
        # 2. 获取检测框的坐标索引 锚框数量
        idx = torch.where(pred_out[:, :, :, :, 0] > threshold)
        # N H W 3(锚框数量)
        # - N: idx[0]
        # - H(rows): idx[1]
        # - W(cols): idx[2]
        # - anchor_num = idx[3]
        h_idx = idx[1]
        w_idx = idx[2]
        anchor_num = idx[3]
        # 3. 获取检测框的标签信息 [[conf, tx, ty, tw, th, cls], ...]
        # 方式1
        # label = pred_out[idx[0], idx[1], idx[2], idx[3], :]
        # 方式2
        label = pred_out[idx]
        # N V
        # - N：大于阈值的目标数量
        # - V：[conf, tx, ty, tw, th, cls]
        # [[conf, tx, ty, tw, th, cls], ...]
        conf = label[:, 0]
        tx = label[:, 1]
        ty = label[:, 2]
        tw = label[:, 3]
        th = label[:, 4]
        cls = torch.argmax(label[:, 5:], dim=1)
        # 4. 计算检测框的中心坐标 宽高
        # 规模因子 = 原图大小 / 特征大小
        scale_factor = cfg.IMG_ORI_SIZE / feature
        cx = (tx + w_idx) * scale_factor
        cy = (ty + h_idx) * scale_factor
        # 当前特征对应的三种锚框
        anchors = cfg.ANCHORS_GROUP[feature]
        # anchors 类型是list 转为张量便于高级索引
        anchors = torch.tensor(anchors)
        # 获取索引对应的锚框的宽高
        anchor_w = anchors[anchor_num][:, 0]
        anchor_h = anchors[anchor_num][:, 1]
        pred_w = torch.exp(tw) * anchor_w
        pred_h = torch.exp(th) * anchor_h
        # 5. 坐标转换：中心点坐标+宽高 --> 左上角坐标+右下角坐标
        x_min = cx - pred_w / 2
        y_min = cy - pred_h / 2
        x_max = cx + pred_w / 2
        y_max = cy + pred_h / 2
        # torch.stack 整合坐标 [conf, x_min, y_min, x_max, y_max, cls]
        out = torch.stack((conf, x_min, y_min, x_max, y_max, cls), dim=1)
        return out

    def show_image(self, img, x1, y1, x2, y2, cls):
        cv2.rectangle(img,
                      (int(x1), int(y1)),
                      (int(x2), int(y2)),
                      color=cfg.COLOR_DIC[int(cls)],
                      thickness=2)
        cv2.putText(img,
                    text=cfg.CLS_DIC[int(cls)],
                    org=(int(x1) + 5, int(y1) + 10),
                    color=cfg.COLOR_DIC[int(cls)],
                    fontScale=0.5,
                    fontFace=cv2.FONT_ITALIC)
        cv2.imshow('img', img)
        cv2.waitKey(25)

    def forward(self, img, threshold):
        img_norm = self.normalize(img)
        pred_out_13, pred_out_26, pred_out_52 = self.net(img_norm)

        f_big, f_mid, f_sml = cfg.ANCHORS_GROUP.keys()
        box_13 = self.decode(pred_out_13, f_big, threshold)
        box_26 = self.decode(pred_out_26, f_mid, threshold)
        box_52 = self.decode(pred_out_52, f_sml, threshold)
        boxes = torch.cat((box_13, box_26, box_52), dim=0)
        return box_52

    def run(self, img_names):
        for img_name in img_names:
            img_path = os.path.join(cfg.BASE_IMG_PATH, img_name)
            img = cv2.imread(img_path)
            detect_out = detect(img, cfg.THRESHOLD_BOX)
            if len(detect_out) == 0:
                continue

            filter_boxes = []
            for cls in range(4):
                mask_cls = detect_out[..., -1] == cls
                _boxes = detect_out[mask_cls]
                boxes = util.nms(_boxes, cfg.THRESHOLD_NMS)
                if len(boxes) == 0:
                    continue
                filter_boxes.append(boxes)
            for boxes in filter_boxes:
                for box in boxes:
                    conf, x1, y1, x2, y2, cls = box
                    self.show_image(img, x1, y1, x2, y2, cls)
                    # cv2.imwrite(os.path.join(f"./run/imgs/{img_name}"), img)
                    # 保存box信息
                    # file_name = img_name.split('.')[0] + '.txt'
                    # file_path = os.path.join('../data/cal_map/input/detection-results', file_name)
                    # with open(file_path, 'a', encoding='utf-8') as file:
                    #     conf_norm = nn.Sigmoid()(conf)
                    #     file.write(f"{cfg.CLS_DIC[int(cls)]} {conf_norm} {int(x1)} {int(y1)} {int(x2)} {int(y2)}\n")


if __name__ == '__main__':
    detect = Detector()
    # frame = cv2.imread('../data/VOC2007/YOLOv3_JPEGImages/2.jpg')
    # boxes = detect(frame, 1)
    # # 获取同种类的框，进行NMS
    # boxes = util.nms(boxes, 0.1)
    # for box in boxes:
    #     conf, x1, y1, x2, y2, cls = box.detach().cpu().numpy()
    #     detect.show_image(frame, x1, y1, x2, y2, cls)
    # # cv2.imshow('frame', frame)
    # cv2.waitKey(0)
    # cv2.destroyAllWindows()
    # 多张图片
    img_names = os.listdir(cfg.BASE_IMG_PATH)
    detect.run(img_names)
    pass
