import random
import argparse
import torch
import os
import re
import json
import torch.amp as amp
from tqdm import tqdm
from pathlib import Path

from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
from llava import conversation as conversation_lib
from llava.model.builder import load_pretrained_model
from llava.utils import disable_torch_init
from llava.mm_utils import tokenizer_image_token, process_images, get_model_name_from_path
from torch.utils.data import Dataset, DataLoader
from transformers.generation import StoppingCriteriaList
from . import StoppingCriteriaSub

from PIL import Image


# COCO Caption evaluation dataset class
class CocoCaptionEvalDataset(Dataset):
    def __init__(self, image_folder, tokenizer, image_processor, model_config, prompt_file):
        self.img_paths = list(Path(image_folder).iterdir())
        self.tokenizer = tokenizer
        self.img_processor = image_processor
        self.model_cfg = model_config
        prompt_path = Path(prompt_file)
        assert prompt_path.exists(), f"prompt file for coco caption {prompt_path} does not exist"
        with open(prompt_path, "r") as f:
            prompts = f.readlines()
        self.prompts = [p.strip() for p in prompts]
        self.pat1, self.pat2 = r"COCO_(train|val|test)2014_0*", r".jpg"

    def __getitem__(self, idx):
        img_p = self.img_paths[idx]
        img_id = int(re.sub(self.pat2, "", re.sub(self.pat1, "", img_p.name)))
        img_prompt = random.choice(self.prompts)
        if self.model_cfg.mm_use_im_start_end:
            # alternative prompt
            # My additions for debugging
            # corresponding to the comments in the function preprocess_plain in llava/train/train.py
            # qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\nA picture of '
            # When only using COCO to train Mamba-VL and plain as conversation template, the following is right
            # qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\nA picture of'
            # qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN
            qs = DEFAULT_IMAGE_TOKEN + "\n" + img_prompt
        else:
            # alternative prompt
            # qs = DEFAULT_IMAGE_TOKEN + '\nA picture of '
            # When only using COCO to train Mamba-VL and plain as conversation template, the following is right
            # qs = DEFAULT_IMAGE_TOKEN + '\nA picture of'
            # qs = DEFAULT_IMAGE_TOKEN
            qs = DEFAULT_IMAGE_TOKEN + "\n" + img_prompt

        conv = conversation_lib.default_conversation.copy()
        conv.append_message(conv.roles[0], qs)
        conv.append_message(conv.roles[1], None)
        prompt = conv.get_prompt().strip()
        if conversation_lib.default_conversation.sep_style == conversation_lib.SeparatorStyle.PLAIN:
            if conversation_lib.default_conversation.sep2 is not None:
                prompt = prompt.split(conversation_lib.default_conversation.sep2)[0]
            else:
                prompt = prompt.split(conversation_lib.default_conversation.sep)[0]

        img = Image.open(img_p).convert('RGB')
        img_tensor = process_images([img], self.img_processor, self.model_cfg)[0]

        input_ids = tokenizer_image_token(prompt, self.tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt')

        return input_ids, img_tensor, img.size, img_id

    def __len__(self):
        return len(self.img_paths)


def collate_fn(batch):
    input_ids, img_tensors, img_sizes, img_ids = zip(*batch)
    input_ids = torch.stack(input_ids, dim=0)
    img_tensors = torch.stack(img_tensors, dim=0)
    return input_ids, img_tensors, img_sizes, img_ids


# DataLoader
def create_data_loader(args):
    assert args.batch_size == 1, "batch_size must be 1"
    dataset = CocoCaptionEvalDataset(args.images_folder, args.tokenizer, args.image_processor, args.model_cfg, args.prompt_file)
    data_loader = DataLoader(dataset, batch_size=args.batch_size, num_workers=args.num_workers, shuffle=False, 
                             collate_fn=collate_fn, drop_last=False)
    return data_loader


@torch.no_grad()
def eval_model(args):
    # Model
    disable_torch_init()
    model_path = os.path.expanduser(args.model_path)
    model_name = get_model_name_from_path(model_path)
    # single gpu inference
    tokenizer, model, image_processor, context_len = load_pretrained_model(model_path, args.model_base, args.vision_tower, model_name, device_map="cuda:0")
    # no noisy embeddings during evaluation
    setattr(model.config, "neftune_alpha", 0.0)

    # Conversation instance
    conversation_lib.default_conversation = conversation_lib.conv_templates[args.conv_mode]
    # My additions
    if conversation_lib.default_conversation.sep_style == conversation_lib.SeparatorStyle.PLAIN:
        conversation_lib.default_conversation.sep = tokenizer.pad_token

    # Customized stopping criteria for text generation process
    eos_token_id = torch.LongTensor([tokenizer.eos_token_id])
    # My additions
    if conversation_lib.default_conversation.sep2 is not None:
        split_str = conversation_lib.default_conversation.sep2
    else:
        split_str = conversation_lib.default_conversation.sep
    if conversation_lib.default_conversation.sep2 is not None:
        sep_tokens_ids = tokenizer(conversation_lib.default_conversation.sep2, return_tensors="pt").input_ids[0]
    else:
        sep_tokens_ids = tokenizer(conversation_lib.default_conversation.sep, return_tensors="pt").input_ids[0]
    stop_criterion = StoppingCriteriaSub([eos_token_id, sep_tokens_ids])
    stop_criteria_list = StoppingCriteriaList([stop_criterion])

    os.makedirs(args.output_folder, exist_ok=True)
    cap_res_f = open(Path(args.output_folder) / "caption_results_file.json", "w")
    caption_res = []

    if 'plain' in model_name and 'finetune' not in model_name.lower() and 'mmtag' not in args.conv_mode:
        args.conv_mode = args.conv_mode + '_mmtag'
        print(f'It seems that this is a plain model, but it is not using a mmtag prompt, auto switching to {args.conv_mode}.')
    if ('mamba' in model_path) and ('vl' in model_path):
        assert args.conv_mode in ('plain', 'mpt'), "Using Mamba-VL to evaluation the karpathy split of COCO datasets"

    args.tokenizer, args.image_processor, args.model_cfg = tokenizer, image_processor, model.config
    args.model_cfg.image_aspect_ratio = args.image_aspect_ratio
    eval_loader = create_data_loader(args)

    for input_ids, img_tensor, img_size, img_id in tqdm(eval_loader, desc="COCO caption eval", total=len(eval_loader)):
        input_ids = input_ids.to(device=model.device, non_blocking=True)
        img = img_tensor.to(dtype=torch.float16 if 'mamba' not in model_path else torch.float, 
                            device=model.device, non_blocking=True)

        output_ids = model.generate(
            input_ids,
            images=img,
            image_sizes=img_size,
            max_new_tokens=args.max_new_tokens, 
            num_beams=args.num_beams,
            do_sample=args.do_sample,
            min_length=args.min_length,
            top_p=args.top_p,
            top_k=args.top_k,
            repetition_penalty=args.repetition_penalty,
            length_penalty=args.length_penalty,
            temperature=args.temperature,
            stopping_criteria=stop_criteria_list, 
            use_cache=True)

        img_cap = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0].split(split_str)[0].strip().split('\n')[0].strip()
        cur_res = {"image_id": img_id if type(img_id) is int else img_id[0], "caption": img_cap}
        caption_res.append(cur_res)

    json.dump(caption_res, cap_res_f)
    if not cap_res_f.closed:
        cap_res_f.close()


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--data-split", type=str, choices=("val", "test"), default="val")
    parser.add_argument("--prompt-file", type=str, default="llava/eval/coco_caption_prompt.txt")
    parser.add_argument("--model-path", type=str, default="state-spaces/mamba-130m-hf")
    parser.add_argument("--model-base", type=str, default=None)
    parser.add_argument("--vision-tower", type=str, default=None)
    parser.add_argument("--images-folder", type=str, default="")
    parser.add_argument("--output-folder", type=str, default="")
    parser.add_argument("--max-new-tokens", type=int, default=50)
    parser.add_argument("--num-beams", type=int, default=1)
    parser.add_argument("--do-sample", type=bool, default=False)
    parser.add_argument("--min-length", type=int, default=0)
    parser.add_argument("--top-p", type=float, default=1.0)
    parser.add_argument("--top-k", type=int, default=50)
    parser.add_argument("--repetition-penalty", type=float, default=1.0)
    parser.add_argument("--length-penalty", type=float, default=1.0)
    parser.add_argument("--temperature", type=float, default=1.0)
    parser.add_argument("--conv-mode", type=str, default="plain")
    parser.add_argument("--batch-size", type=int, default=1)
    parser.add_argument("--num-workers", type=int, default=4)
    parser.add_argument("--image-aspect-ratio", type=str, default="pad")
    args = parser.parse_args()
    if args.model_base.lower() == "none":
        args.model_base = None
    if args.vision_tower.lower() == "none":
        args.vision_tower = None

    eval_model(args)
