import cv2
import mmcv
import numpy as np
import onnxruntime
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.runner import load_checkpoint

import models  # noqa: F401,F403
from mmdet.core import multiclass_nms, preprocess_example_input
from mmdet.datasets.voc import VOCDataset
from mmdet.models import build_detector


def visualize_bbox(img, bbox, class_name, thickness=2):
    """Visualizes a single bounding box on the image"""
    BOX_COLOR = (255, 0, 0)  # Red
    TEXT_COLOR = (255, 255, 255)  # White
    if len(bbox) >= 5:
        x_min, y_min, x_max, y_max = [int(e) for e in bbox[:-1]]
        score = bbox[-1]
        str_info = F"{class_name} {score:.2f}"
    else:
        assert len(bbox) == 4
        x_min, y_min, x_max, y_max = [int(e) for e in bbox]
        str_info = F"{class_name}"
    cv2.rectangle(img, (x_min, y_min), (x_max, y_max),
                  color=BOX_COLOR,
                  thickness=thickness)

    ((text_width, text_height), _) = cv2.getTextSize(str_info,
                                                     cv2.FONT_HERSHEY_SIMPLEX,
                                                     0.35, 1)
    cv2.rectangle(img, (x_min, y_min - int(1.3 * text_height)),
                  (x_min + text_width, y_min), BOX_COLOR, -1)
    cv2.putText(
        img,
        text=str_info,
        org=(x_min, y_min - int(0.3 * text_height)),
        fontFace=cv2.FONT_HERSHEY_SIMPLEX,
        fontScale=0.35,
        color=TEXT_COLOR,
        lineType=cv2.LINE_AA,
    )
    return img


class ONNXModel(nn.Module):
    def __init__(self,
                 cfg_file_path: str,
                 checkpoint_file_path: str,
                 strides: tuple = (8, 16, 32),
                 reg_max: int = 16):
        super().__init__()
        self.model = self.create_model(cfg_file_path, checkpoint_file_path)
        self.strides = strides
        self.reg_max = reg_max
        self.project = torch.linspace(0,
                                      reg_max,
                                      reg_max + 1,
                                      dtype=torch.float32)

    @staticmethod
    def create_model(cfg_file_path, checkpoint_file_path):
        cfg = mmcv.Config.fromfile(cfg_file_path)
        cfg.model.pretrained = None
        cfg.data.test.test_mode = True
        model = build_detector(cfg.model,
                               train_cfg=None,
                               test_cfg=cfg.test_cfg)
        load_checkpoint(model, checkpoint_file_path, map_location="cpu")
        model.cpu().eval()
        return model

    @staticmethod
    def cls_score_post_process(cls_score):
        cls_score = cls_score.permute(0, 2, 3, 1)
        cls_score = cls_score.reshape(-1, cls_score.size(-1))
        cls_score = cls_score.sigmoid()
        return cls_score

    def bbox_pred_post_process(self, bbox_pred, feat_h, feat_w, stride):
        y, x = torch.meshgrid(
            torch.arange(feat_h) * stride,
            torch.arange(feat_w) * stride)
        centers = torch.stack([x.reshape(-1), y.reshape(-1)], dim=1)
        centers = centers.type(torch.float32)

        bbox_pred = bbox_pred.permute(0, 2, 3, 1)
        bbox_pred = bbox_pred.reshape(-1, self.reg_max + 1)
        bbox_pred = F.softmax(bbox_pred, dim=1)
        bbox_pred = F.linear(bbox_pred, self.project)
        bbox_pred = bbox_pred.reshape(-1, 4) * stride

        x1 = centers[:, 0] - bbox_pred[:, 0]
        y1 = centers[:, 1] - bbox_pred[:, 1]
        x2 = centers[:, 0] + bbox_pred[:, 2]
        y2 = centers[:, 1] + bbox_pred[:, 3]
        return torch.stack([x1, y1, x2, y2], dim=-1)

    def forward(self, img):
        with torch.no_grad():
            x = self.model.extract_feat(img)
            cls_score_list, bbox_pred_list = self.model.bbox_head(x)
            cls_score_results, bbox_pred_results = list(), list()
            for i, (cls_score,
                    bbox_pred) in enumerate(zip(cls_score_list,
                                                bbox_pred_list)):
                # feat_h, feat_w = cls_score.size(2).item(), cls_score.size(3).item()
                feat_h = cls_score.size(2)
                feat_w = cls_score.size(3)
                cls_score = self.cls_score_post_process(cls_score)
                bbox_pred = self.bbox_pred_post_process(
                    bbox_pred, feat_h, feat_w, self.strides[i])
                cls_score_results.append(cls_score)
                bbox_pred_results.append(bbox_pred)
            cls_scores = torch.cat(cls_score_results)
            bbox_preds = torch.cat(bbox_pred_results)
        return cls_scores, bbox_preds


def verify_model_forward(model, input_config):
    one_img, one_meta = preprocess_example_input(input_config)
    cls_scores, bbox_preds = model(one_img)

    padding = cls_scores.new_zeros(cls_scores.shape[0], 1)
    cls_scores = torch.cat([cls_scores, padding], dim=1)
    nms_cfg = dict(type="nms", iou_threshold=0.6)
    bboxes, labels = multiclass_nms(bbox_preds,
                                    cls_scores,
                                    score_thr=0.3,
                                    nms_cfg=nms_cfg,
                                    max_num=100)
    input_path = input_config["input_path"]
    input_shape = input_config["input_shape"]
    image = cv2.imread(input_path)
    image = cv2.resize(image, dsize=(input_shape[-1], input_shape[-2]))
    for bbox, label in zip(bboxes, labels):
        class_name = VOCDataset.CLASSES[label.item()]
        for bbox in bboxes:
            image = visualize_bbox(image, bbox, class_name)
    cv2.namedWindow("show", cv2.WINDOW_NORMAL)
    cv2.imshow("show", image)
    cv2.waitKey()
    cv2.destroyAllWindows()


def export_onnx(model, input_config, onnx_file_path):
    one_img, one_meta = preprocess_example_input(input_config)

    input_names = ["images"]
    output_names = ["cls_scores", "bbox_preds"]
    dynamic_axes = {
        "images": {
            2: "height",
            3: "width"
        },
        "cls_scores": {
            0: "size"
        },
        "bbox_preds": {
            0: "size"
        }
    }
    torch.onnx.export(model,
                      one_img,
                      onnx_file_path,
                      input_names=input_names,
                      output_names=output_names,
                      dynamic_axes=dynamic_axes,
                      export_params=True,
                      keep_initializers_as_inputs=True,
                      verbose=True,
                      opset_version=11)


def verity_onnx_model(onnx_file_path, input_config):
    model = onnxruntime.InferenceSession(onnx_file_path)
    input_name = model.get_inputs()[0].name
    output_names = [output.name for output in model.get_outputs()]

    input_path = input_config["input_path"]
    input_shape = input_config["input_shape"]
    normalize_cfg = input_config["normalize_cfg"]
    mean = np.array(normalize_cfg["mean"], dtype=np.float32)
    std = np.array(normalize_cfg["std"], dtype=np.float32)

    image = cv2.imread(input_path)
    image = cv2.resize(image, dsize=(input_shape[-1], input_shape[-2]))
    image = mmcv.imnormalize(image, mean, std)
    image = image.transpose(2, 0, 1)[np.newaxis, :, :, :]

    cls_scores, bbox_preds = model.run(output_names,
                                       input_feed={input_name: image})

    # show
    cls_scores = torch.from_numpy(cls_scores)
    bbox_preds = torch.from_numpy(bbox_preds)
    padding = cls_scores.new_zeros(cls_scores.shape[0], 1)
    cls_scores = torch.cat([cls_scores, padding], dim=1)
    nms_cfg = dict(type="nms", iou_threshold=0.6)
    bboxes, labels = multiclass_nms(bbox_preds,
                                    cls_scores,
                                    score_thr=0.3,
                                    nms_cfg=nms_cfg,
                                    max_num=100)
    image = cv2.imread(input_path)
    image = cv2.resize(image, dsize=(input_shape[-1], input_shape[-2]))
    for bbox, label in zip(bboxes, labels):
        class_name = VOCDataset.CLASSES[label.item()]
        for bbox in bboxes:
            image = visualize_bbox(image, bbox, class_name)
    # cv2.namedWindow("show", cv2.WINDOW_NORMAL)
    cv2.imshow("show", image)
    cv2.waitKey()
    cv2.destroyAllWindows()


def main():
    cfg_file_path = "configs/gfl/gfl_shufflenetv2_pan.py"
    checkpoint_file_path = "log/epoch_24.pth"

    input_shape = (1, 3, 375, 500)
    input_path = R"G:\temp\000606.jpg"
    normalize_cfg = {
        "mean": [123.675, 116.28, 103.53],
        "std": [58.395, 57.12, 57.375]
    }
    input_config = {
        "input_shape": input_shape,
        "input_path": input_path,
        "normalize_cfg": normalize_cfg
    }
    onnx_file_path = "log/gfl_shufflenetv2_pan.onnx"
    # verify_model_forward(model, input_config)
    # export_onnx(model, input_config, onnx_file_path)
    verity_onnx_model(onnx_file_path, input_config)


if __name__ == "__main__":
    main()
