#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import sys
sys.path.append("/home/ma-user/work/")
import argparse
import os
import onnx
import torch

from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import build_detection_test_loader, MetadataCatalog, DatasetCatalog
from detectron2.evaluation import COCOEvaluator, inference_on_dataset, print_csv_format
from detectron2.export import Caffe2Tracer, add_export_config
from detectron2.modeling import build_model
from detectron2.utils.logger import setup_logger

from detectron2.data.datasets import register_coco_instances
import random
from detectron2.utils.visualizer import Visualizer
from detectron2.engine import DefaultPredictor
from fsdet.engine import DefaultTrainer, default_argument_parser, default_setup

import cv2
import torch
from detectron2.data import MetadataCatalog
from detectron2.utils.video_visualizer import VideoVisualizer
from detectron2.utils.visualizer import ColorMode, Visualizer

from fsdet.engine import DefaultTrainer, default_argument_parser, default_setup
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.export import Caffe2Tracer, add_export_config
from detectron2.modeling import build_model
from detectron2.utils.logger import setup_logger




def setup(args):
    """
    Create configs and perform basic setups.
    """
    cfg = get_cfg()
    cfg.merge_from_file(args.config_file)
    if args.opts:
        cfg.merge_from_list(args.opts)
    cfg.freeze()
    # set_global_cfg(cfg)
    default_setup(cfg, args)
    return cfg


def GetOnnx(cfg):
    npu_device = torch.device('npu:0')
    model = build_model(cfg)
    DetectionCheckpointer(model).resume_or_load(cfg.MODEL.WEIGHTS)
    data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0])
    first_batch = next(iter(data_loader))

    # convert and save caffe2 model
    tracer = Caffe2Tracer(cfg, model, first_batch)
    onnx_model = tracer.export_onnx()
    torch.onnx.save(onnx_model, os.path.join(args.output, "model.onnx"), opset_version=11)
    
    
    
## class GetOnnx(object):
#     def __init__(self, cfg):
#         self.npu_device = torch.device('npu:0')
#         self.model = build_model(cfg)
#         DetectionCheckpointer(torch_model).resume_or_load(cfg.MODEL.WEIGHTS)
#     def pytorch2onnx(self):
#         print("###################################:",self.model.parameters)
#         model_state_dict=torch.load("model_final.pth",map_location=self.npu_device)['model']
#         model=self.model
#         model.load_state_dict(model_state_dict)
#         model.eval()
#         # print(model_state_dict)
#         # self.model.load_state_dict(model_state_dict)
#         # self.model.eval()
#         # print(self.model)
#         # torch.save(self.model,"model_net.pth")
#         model.eval()
#         print(model)
#         # torch.save(model,"model_net.pth")
#         output_onnx = 'faster_rcnn_fpn.onnx'
#         print("==> Exporting model to ONNX format at '{}'".format(output_onnx))
#         input_names = ["input0"]
#         output_names = ["cls_score","bbox_pred"]
#         image = torch.randn(1,3, 448, 448).to(self.npu_device)

#         inputs=({'image':image,'image_size':image.shape[-2:],'image_preprocess':image})
#         print(inputs)

#         torch_out = torch.onnx.export(model, 
#                                       image, 
#                                       output_onnx, 
#                                       export_params=True, 
#                                       verbose=True,
#                                       input_names=input_names, 
#                                       output_names=output_names)
#                                       # dynamic_axes= {
#                                       # input_names: {0: 'batch_size', 2 : 'in_width', 3: 'int_height'},
#                                       # output_names: {0: 'batch_size', 2: 'out_width', 3:'out_height'}
#                                       # }
                  
    
    
if __name__ == "__main__":
    args=default_argument_parser().parse_args()
    cfg=setup(args)
    GetOnnx(cfg)
    # pth2onnx(cfg)
    