import argparse
import os
import sys

import numpy as np
import torch
import torchvision
from torch import nn
from torch.utils.data import DataLoader
from transformers import BertModel

import groundingdino.datasets.transforms as T
from groundingdino.util import get_tokenlizer
from groundingdino.util.misc import clean_state_dict, collate_fn
from groundingdino.util.slconfig import SLConfig
from groundingdino.models.GroundingDINO.bertwarper import (
    BertModelWarper,
    generate_masks_with_special_tokens,
    generate_masks_with_special_tokens_and_transfer_map,
)

class Tokenize(nn.Module):
    """This is the Cross-Attention Detector module that performs object detection"""

    def __init__(
        self,
        text_encoder_type="bert-base-uncased",
        sub_sentence_present=True,
        max_text_len=256,
    ):
        super().__init__()
        self.hidden_dim = 256
        self.max_text_len = 256
        self.sub_sentence_present = sub_sentence_present

        # bert
        self.tokenizer = get_tokenlizer.get_tokenlizer(text_encoder_type)
        self.bert = get_tokenlizer.get_pretrained_language_model(text_encoder_type)
        self.bert.pooler.dense.weight.requires_grad_(False)
        self.bert.pooler.dense.bias.requires_grad_(False)
        self.bert = BertModelWarper(bert_model=self.bert)

        self.feat_map = nn.Linear(self.bert.config.hidden_size, self.hidden_dim, bias=True)
        nn.init.constant_(self.feat_map.bias.data, 0)
        nn.init.xavier_uniform_(self.feat_map.weight.data)

        self.specical_tokens = self.tokenizer.convert_tokens_to_ids(["[CLS]", "[SEP]", ".", "?"])

    def forward(self, input_ids, token_type_ids, attention_mask, position_ids):
        bert_output = self.bert(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask, position_ids=pooler)  # bs, 195, 768

        encoded_text = self.feat_map(bert_output["last_hidden_state"])  # bs, 195, d_model

        return encoded_text

def load_model(model_config_path, model_checkpoint_path, cpu_only=True):
    model = Tokenize()
    checkpoint = torch.load(model_checkpoint_path, map_location='cpu')
    load_res = model.load_state_dict(clean_state_dict(checkpoint["model"]), strict=False)
    model.eval()
    return model

def main(args):
    cfg = SLConfig.fromfile(args.config_file)

    model = load_model(args.config_file, args.checkpoint_path)
    model = model.to("cpu")
    input_names = ["input_ids", "token_type_ids", "attention_mask", "position_ids"]
    output_names = ["encode_text"]
    input_tensor = (torch.zeros([1, 256]).long(), torch.zeros([1, 256]).long(), torch.zeros([1,256,256]).bool(), torch.zeros([1,256]).long())
    torch.onnx.export(
        model,
        input_tensor,
        args.onnx,
        input_names = input_names,
        output_names=output_names,
        opset_version=16)

if __name__ == "__main__":

    parse = argparse.ArgumentParser(
        "Bert convert to ONNX")
    parse.add_argument("--config_file", type=str, required=True)
    parse.add_argument("--checkpoint_path", type=str, required=True)
    parse.add_argument("--onnx", type=str)
    args = parse.parse_args()
    main(args)