import argparse
import os
import sys

import onnx
import numpy as np
import torch
from PIL import Image, ImageDraw, ImageFont

from groundingdino.util.slconfig import SLConfig
from groundingdino.models import build_model
from groundingdino.util.utils import clean_state_dict



def load_model(model_config_path, model_checkpoint_path, cpu_only=True):
	args = SLConfig.fromfile(model_config_path)
	args.device = 'cpu'
	model = build_model(args)
	checkpoint = torch.load(model_checkpoint_path, map_location='cpu')
	load_res = model.load_state_dict(clean_state_dict(checkpoint["model"]), strict=False)
	_ = model.eval()
	return model

if __name__ == "__main__":
	parse = argparse.ArgumentParser(
		"Grounding DINO convert to ONNX")
	parse.add_argument("--config_file", type=str, required=True)
	parse.add_argument("--checkpoint_path", type=str, required=True)
	parse.add_argument("--device", type=str, default="cpu")
	parse.add_argument("--image_shape", type=str)
	parse.add_argument("--batchsize", type=str)
	parse.add_argument("--onnx", type=str)
	args = parse.parse_args()

	model = load_model(args.config_file, args.checkpoint_path)
	model.eval()
	input_names = ["images", "encoded_text", "text_token_mask", "position_ids", "text_self_attention_mask"]
	output_names = ["label", "boxes"]
	H = args.image_shape.split(",")[0]
	W = args.image_shape.split(",")[1]
	N = args.batchsize
	input_tensor = (torch.randn((int(N), 3, int(H), int(W)), dtype=torch.float32), torch.randn([int(N),256,256], dtype=torch.float32), torch.ones([int(N),256]).bool(), torch.zeros([int(N),256]).long(), torch.zeros([int(N),256,256]).bool())
	torch.onnx.export(
		model,
		input_tensor,
		args.onnx,
		input_names=input_names,
		output_names=output_names,
		opset_version=16)


