import argparse
import torch


from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg

from fsdet.engine import DefaultTrainer, default_argument_parser, default_setup
from detectron2.data import build_detection_test_loader, detection_utils

from fvcore.common.file_io import PathManager
from detectron2.modeling import build_model


import detectron2
from detectron2.utils.logger import setup_logger
setup_logger()

# import some common libraries
import numpy as np
import os, json, cv2, random

# import some common detectron2 utilities
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog


def setup_cfg(args):
    cfg = get_cfg()
    # cuda context is initialized before creating dataloader, so we don't fork anymore
    cfg.DATALOADER.NUM_WORKERS = 0
    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    # cfg.freeze()
    return cfg



if __name__ == "__main__":
    
    # # Disable respecialization on new shapes. Otherwise --run-eval will be slow
    # torch._C._jit_set_bailout_depth(1)
    args=default_argument_parser().parse_args()
    cfg = setup_cfg(args)

    # create a torch model
    torch_model = build_model(cfg)
    DetectionCheckpointer(torch_model).resume_or_load(cfg.MODEL.WEIGHTS)
    torch_model.eval()
    torch._C._jit_set_bailout_depth(1)
    # get sample data
    data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0])
    sample_inputs = next(iter(data_loader))
    
    image = sample_inputs[0]["image"]
    inputs=({'image':image,'image_size':image.shape[-2:],'image_preprocess':image})

    with PathManager.open(os.path.join(".", "model.onnx"), "wb") as f:
        torch.onnx.export(
            torch_model,
            inputs,
            f,
        )
