import argparse
import mmcv
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.runner import load_checkpoint
from torchvision.ops import nms

import models  # noqa: F401,F403
from mmdet.models import build_detector


class GFLONNXModel(nn.Module):
    def __init__(self,
                 cfg_file_path: str,
                 checkpoint_file_path: str,
                 score_thres: float = 0.5,
                 iou_thres: float = 0.5):
        super().__init__()
        self.cfg = mmcv.Config.fromfile(cfg_file_path)
        self.model = self.create_model(cfg_file_path, checkpoint_file_path)
        self.mean = torch.tensor(self.cfg.img_norm_cfg.mean)
        self.mean = self.mean[None, :, None, None]  # shape is: 1 x 3 x 1 x 1
        self.std = torch.tensor(self.cfg.img_norm_cfg.std)
        self.std = self.std[None, :, None, None]  # shape is: 1 x 3 x 1 x 1

        self.strides = self.cfg.model.bbox_head.anchor_generator.strides
        self.reg_max = self.cfg.model.bbox_head.reg_max
        self.project = torch.linspace(0,
                                      self.reg_max,
                                      self.reg_max + 1,
                                      dtype=torch.float32)
        self.score_thres = score_thres
        self.iou_thres = iou_thres

    def create_model(self, cfg_file_path, checkpoint_file_path):
        self.cfg.model.pretrained = None
        self.cfg.data.test.test_mode = True
        model = build_detector(self.cfg.model,
                               train_cfg=None,
                               test_cfg=self.cfg.test_cfg)
        load_checkpoint(model, checkpoint_file_path, map_location="cpu")
        model.cpu().eval()
        return model

    @staticmethod
    def cls_score_post_process(cls_score):
        cls_score = cls_score.permute(0, 2, 3, 1)
        cls_score = cls_score.reshape(-1, cls_score.size(-1))
        cls_score = cls_score.sigmoid()
        cls_score = torch.squeeze(cls_score)
        return cls_score

    def bbox_pred_post_process(self, bbox_pred, feat_h, feat_w, stride):
        y, x = torch.meshgrid(
            torch.arange(feat_h) * stride,
            torch.arange(feat_w) * stride)
        centers = torch.stack([x.reshape(-1), y.reshape(-1)], dim=1)
        centers = centers.type(torch.float32)

        bbox_pred = bbox_pred.permute(0, 2, 3, 1)
        bbox_pred = bbox_pred.reshape(-1, self.reg_max + 1)
        bbox_pred = F.softmax(bbox_pred, dim=1)
        bbox_pred = F.linear(bbox_pred, self.project)
        bbox_pred = bbox_pred.reshape(-1, 4) * stride

        x1 = centers[:, 0] - bbox_pred[:, 0]
        y1 = centers[:, 1] - bbox_pred[:, 1]
        x2 = centers[:, 0] + bbox_pred[:, 2]
        y2 = centers[:, 1] + bbox_pred[:, 3]
        return torch.stack([x1, y1, x2, y2], dim=-1)

    def forward(self, img):
        img = img.permute(2, 0, 1).unsqueeze(0).float()
        img.sub_(self.mean).div_(self.std)
        with torch.no_grad():
            x = self.model.extract_feat(img)
            cls_score_list, bbox_pred_list = self.model.bbox_head(x)
            cls_score_results, bbox_pred_results = list(), list()
            for i, (cls_score,
                    bbox_pred) in enumerate(zip(cls_score_list,
                                                bbox_pred_list)):
                feat_h = cls_score.size(2)
                feat_w = cls_score.size(3)
                cls_score = self.cls_score_post_process(cls_score)
                bbox_pred = self.bbox_pred_post_process(
                    bbox_pred, feat_h, feat_w, self.strides[i])
                cls_score_results.append(cls_score)
                bbox_pred_results.append(bbox_pred)
            cls_scores = torch.cat(cls_score_results)
            bbox_preds = torch.cat(bbox_pred_results)
        keep = cls_scores > self.score_thres
        cls_scores = cls_scores[keep]
        bbox_preds = bbox_preds[keep]
        keep = nms(bbox_preds, cls_scores, self.iou_thres)
        cls_scores = cls_scores[keep]
        bbox_preds = bbox_preds[keep]
        return cls_scores, bbox_preds


def argument_parser():
    parser = argparse.ArgumentParser(description="export model to onnx")
    parser.add_argument("--config-file",
                        default="",
                        metavar="FILE",
                        help="path to config file")
    parser.add_argument("--checkpoint",
                        default="",
                        metavar="FILE",
                        help="path to checkpoint file")
    parser.add_argument("--onnx-file",
                        default="out.onnx",
                        help="path to onnx file")
    parser.add_argument("--image",
                        default="images/person.jpg",
                        help="path to image file")
    args = parser.parse_args()
    return args


def main():
    args = argument_parser()
    model = GFLONNXModel(args.config_file, args.checkpoint)

    image = mmcv.imread(args.image)
    image = torch.from_numpy(image)

    input_names = ["image"]
    output_names = ["scores", "bboxes"]
    dynamic_axes = {
        "image": {
            0: "height",
            1: "width"
        },
        "scores": {
            0: "size"
        },
        "bboxes": {
            0: "size"
        }
    }
    torch.onnx.export(model,
                      image,
                      args.onnx_file,
                      input_names=input_names,
                      output_names=output_names,
                      dynamic_axes=dynamic_axes,
                      export_params=True,
                      verbose=True,
                      opset_version=11)


if __name__ == "__main__":
    main()
