import os, re, json
import random
import argparse
from pathlib import Path
from typing import List, Dict
from tqdm import tqdm
from llava.constants import DEFAULT_IMAGE_TOKEN


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="transform the karpathy split coco annotation file into the LLaVA style")
    parser.add_argument("--data-split", default="train", type=str, choices=["train", "val", "test"], 
                        help="dataset split of karpathy split dataset")
    args = parser.parse_args()

    coco_karpathy_split_dir = Path(os.getenv("HOME")) / "datasets" / "coco_karpathy_split"
    imgs_dir = coco_karpathy_split_dir / "images" / args.data_split
    assert imgs_dir.exists(), f"coco karpathy split image directory {imgs_dir} doesn't exist"
    native_anns_path = coco_karpathy_split_dir / "annotations" / f"coco_karpathy_{args.data_split}_gt.json"
    assert native_anns_path.exists(), f"coco karpathy split annotation file {native_anns_path} doesn't exist"

    all_imgs_names = os.listdir(imgs_dir)
    llava_anns_path = Path(os.getenv("HOME")) / "datasets" / "LLaVA-Instruct-150K" / f"coco_karpathy_split_{args.data_split}_llava.json"
    with open(native_anns_path, "r") as f:
        native_infos = json.load(f)
        native_anns: List[Dict] = native_infos["annotations"]
        native_imgs_ids: List[int] = [img_id_info["id"] for img_id_info in native_infos["images"]]
    assert len(native_imgs_ids) == len(all_imgs_names), f"number of images must be equal to image info in the native annotation file"
    pat = r"COCO_(train|val)2014_0*"
    for img_name in tqdm(all_imgs_names):
        id_in_img_name = int(re.sub(pattern=r".jpg", repl="", string=re.sub(pattern=pat, repl="", string=img_name)))
        assert id_in_img_name in native_imgs_ids, f"id in image name: {id_in_img_name}"
        native_imgs_ids.remove(id_in_img_name)
    assert native_imgs_ids == [], f"images' name mismatch with images' information in the native annotation file"

    llava_anns = []
    for native_ann in tqdm(native_anns):
        conversations = [{"from": "human", "value": DEFAULT_IMAGE_TOKEN}, {"from": "gpt", "value": ""}]
        llava_ann = {"id": "", "image": f"coco_karpathy_split/images/{args.data_split}/", "conversations": conversations}
        img_id = native_ann["image_id"]
        complete_image_id = f"{img_id:012}"
        llava_ann["id"] += complete_image_id
        if args.data_split == "train":
            if "COCO_train2014_" + complete_image_id + ".jpg" in all_imgs_names:
                complete_image_name = "COCO_train2014_" + complete_image_id + ".jpg"
            else:
                complete_image_name = "COCO_val2014_" + complete_image_id + ".jpg"
        else:  # val/test karpathy split coco dataset
            complete_image_name = "COCO_val2014_" + complete_image_id + ".jpg"
        assert complete_image_name in all_imgs_names, f"Misalignment for the complete image id: {complete_image_id}"
        llava_ann["image"] += complete_image_name
        llava_ann["conversations"][1]["value"] += native_ann["caption"]
        llava_anns.append(llava_ann)

    random.shuffle(llava_anns)
    with open(llava_anns_path, "w") as f:
        json.dump(llava_anns, f)
