import argparse
import os
import sys

import numpy as np
import torch
import torchvision
from torch import nn
from torch.utils.data import DataLoader
from transformers import BertModel

import groundingdino.datasets.transforms as T
from groundingdino.util import get_tokenlizer
from groundingdino.util.misc import clean_state_dict, collate_fn
from groundingdino.util.slconfig import SLConfig
from groundingdino.models.GroundingDINO.bertwarper import (
    BertModelWarper,
    generate_masks_with_special_tokens,
    generate_masks_with_special_tokens_and_transfer_map,
)


class Tokenize(nn.Module):
    """This is the Cross-Attention Detector module that performs object detection"""

    def __init__(
        self,
        text_encoder_type="bert-base-uncased",
        sub_sentence_present=True,
        max_text_len=256,
    ):
        super().__init__()
        self.hidden_dim = 256
        self.max_text_len = 256
        self.sub_sentence_present = sub_sentence_present

        # bert
        self.tokenizer = get_tokenlizer.get_tokenlizer(text_encoder_type)
        self.bert = get_tokenlizer.get_pretrained_language_model(text_encoder_type)
        self.bert.pooler.dense.weight.requires_grad_(False)
        self.bert.pooler.dense.bias.requires_grad_(False)
        self.bert = BertModelWarper(bert_model=self.bert)

        self.feat_map = nn.Linear(self.bert.config.hidden_size, self.hidden_dim, bias=True)
        nn.init.constant_(self.feat_map.bias.data, 0)
        nn.init.xavier_uniform_(self.feat_map.weight.data)

        self.specical_tokens = self.tokenizer.convert_tokens_to_ids(["[CLS]", "[SEP]", ".", "?"])

    def forward(self, captions):
        tokenized = self.tokenizer(captions, padding="max_length", return_tensors="pt").to('cpu')
        (
            text_self_attention_masks,
            position_ids,
            cate_to_token_mask_list,
        ) = generate_masks_with_special_tokens_and_transfer_map(
            tokenized, self.specical_tokens, self.tokenizer
        )
        if text_self_attention_masks.shape[1] > self.max_text_len:
            text_self_attention_masks = text_self_attention_masks[
                :, : self.max_text_len, : self.max_text_len
            ]
            position_ids = position_ids[:, : self.max_text_len]
            tokenized["input_ids"] = tokenized["input_ids"][:, : self.max_text_len]
            tokenized["attention_mask"] = tokenized["attention_mask"][:, : self.max_text_len]
            tokenized["token_type_ids"] = tokenized["token_type_ids"][:, : self.max_text_len]

        # extract text embeddings
        if self.sub_sentence_present:
            tokenized_for_encoder = {k: v for k, v in tokenized.items() if k != "attention_mask"}
            tokenized_for_encoder["attention_mask"] = text_self_attention_masks
            tokenized_for_encoder["position_ids"] = position_ids
        else:
            # import ipdb; ipdb.set_trace()
            tokenized_for_encoder = tokenized

        text_token_mask = tokenized.attention_mask.bool()  # bs, 195

        text_dict = {
            "input_ids": tokenized_for_encoder["input_ids"],  # bs, 195, d_model
            "text_token_mask": text_token_mask,  # bs, 195
            "position_ids": position_ids,  # bs, 195
            "text_self_attention_masks": text_self_attention_masks,  # bs, 195,195
            "token_type_ids": tokenized_for_encoder["token_type_ids"]
        }

        return text_dict

class CocoDetection(torchvision.datasets.CocoDetection):
    def __init__(self, img_folder, ann_file, transforms):
        super().__init__(img_folder, ann_file)
        self._transforms = transforms

    def __getitem__(self, idx):
        img, target = super().__getitem__(idx)  # target: list

        # import ipdb; ipdb.set_trace()

        w, h = img.size
        boxes = [obj["bbox"] for obj in target]
        boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4)
        boxes[:, 2:] += boxes[:, :2]  # xywh -> xyxy
        boxes[:, 0::2].clamp_(min=0, max=w)
        boxes[:, 1::2].clamp_(min=0, max=h)
        # filt invalid boxes/masks/keypoints
        keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
        boxes = boxes[keep]

        target_new = {}
        image_id = self.ids[idx]
        target_new["image_id"] = image_id
        target_new["boxes"] = boxes
        target_new["orig_size"] = torch.as_tensor([int(h), int(w)])

        if self._transforms is not None:
            img, target = self._transforms(img, target_new)

        return img, target

def load_model(model_config_path, model_checkpoint_path, cpu_only=True):
    model = Tokenize()
    checkpoint = torch.load(model_checkpoint_path, map_location='cpu')
    load_res = model.load_state_dict(clean_state_dict(checkpoint["model"]), strict=False)
    model.eval()
    return model

def main(args):
    cfg = SLConfig.fromfile(args.config_file)

    model = load_model(args.config_file, args.checkpoint_path)
    model = model.to("cpu")
    transform = T.Compose(
        [
            T.RandomResize([800], max_size=1333),
            T.ToTensor(),
            T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
        ]
    )

    dataset = CocoDetection(args.image_dir, args.anno_path, transforms=transform)
    data_loader = DataLoader(dataset, shuffle=False, collate_fn=collate_fn)

    if args.coco:
        category_dict = dataset.coco.dataset['categories']
        cat_list = [item['name'] for item in category_dict]
    else:
        cat_list = args.txt.split(" ")
    caption = " . ".join(cat_list) + ' .'
    text_dict = model(caption)
    H = args.img_shape.split(",")[0]
    W = args.img_shape.split(",")[1]
    os.makedirs(args.txt_dir, exist_ok = True)
    os.makedirs(os.path.join(args.txt_dir,"t1"), exist_ok = True)
    os.makedirs(os.path.join(args.txt_dir,"t2"), exist_ok = True)
    os.makedirs(os.path.join(args.txt_dir,"t3"), exist_ok = True)
    os.makedirs(os.path.join(args.txt_dir,"t4"), exist_ok = True)
    os.makedirs(os.path.join(args.txt_dir,"t5"), exist_ok = True)

    for i, (images, targets) in enumerate(data_loader):
        images = images.tensors.to('cpu')
        if images.shape[2] == int(H) and images.shape[3] == int(W):
            save_path = os.path.join(args.save_dir, "{}.bin".format(i))
            os.makedirs(args.save_dir, exist_ok = True)
            images.numpy().astype(np.float32).tofile(save_path)
            os.makedirs(args.txt_dir, exist_ok = True)
            text_dict["input_ids"].detach().numpy().tofile(os.path.join(args.txt_dir,"t1","{}.bin".format(i)))
            text_dict["text_token_mask"].detach().numpy().tofile(os.path.join(args.txt_dir,"t2","{}.bin".format(i)))
            text_dict["position_ids"].detach().numpy().tofile(os.path.join(args.txt_dir,"t3","{}.bin".format(i)))
            text_dict["text_self_attention_masks"].detach().numpy().tofile(os.path.join(args.txt_dir,"t4","{}.bin".format(i)))
            text_dict["token_type_ids"].detach().numpy().tofile(os.path.join(args.txt_dir,"t5","{}.bin".format(i)))


if __name__ == "__main__":
    parse = argparse.ArgumentParser(
        "Grounding DINO preprocess")
    parse.add_argument("--img_shape", type=str, required=True)
    parse.add_argument("--config_file", type=str, required=True)
    parse.add_argument("--checkpoint_path", type=str, required=True)
    parse.add_argument("--coco", type=bool, default=True)
    parse.add_argument("--txt", type=str, help="input txt")
    parse.add_argument("--anno_path", type=str, help="coco dataset")
    parse.add_argument("--image_dir", type=str, help="coco dataset")
    parse.add_argument("--save_dir", type=str)
    parse.add_argument("--txt_dir", type=str)
    args = parse.parse_args()
    main(args)