import os
import argparse
import json
import numpy as np
import cv2
import torch
import torchvision
from torchvision.ops import box_convert
# Grounding DINO
from groundingdino.util.inference import load_model, load_image, predict, annotate
# segment anything
from segment_anything import (
    sam_model_registry,
    sam_hq_model_registry,
    SamPredictor
)


def get_file_in_dir(dir_path, file_type):
    all_files = os.listdir(dir_path)
    desired_type_files = [os.path.join(dir_path, file) for file in all_files if file.endswith(file_type)]
    return desired_type_files


def cxcywh_to_xyxy(boxes, size):
    h, w = size[0], size[1]
    # for i in range(len(boxes)):
    # boxes[i] = boxes[i] * torch.Tensor([w, h, w, h])
    # boxes[i][:2] -= boxes[i][2:] / 2
    # boxes[i][2:] += boxes[i][:2]
    boxes = boxes * torch.Tensor([w, h, w, h])
    xyxy = torchvision.ops.box_convert(boxes=boxes, in_fmt="cxcywh", out_fmt="xyxy")
    return xyxy


if __name__ == '__main__':
    parser = argparse.ArgumentParser("Dynamic object segmentation", add_help=True)
    parser.add_argument(
        "--input_dir", "-i", type=str, required=True, help="input directory"
    )
    parser.add_argument("--config", type=str, default="./GroundingDINO/groundingdino/config/GroundingDINO_SwinT_OGC.py", help="path to config file")
    parser.add_argument(
        "--grounded_checkpoint", type=str, default="./groundingdino_swint_ogc.pth", help="path to checkpoint file"
    )
    parser.add_argument(
        "--sam_version", type=str, default="vit_h", required=False, help="SAM ViT version: vit_b / vit_l / vit_h"
    )
    parser.add_argument(
        "--sam_checkpoint", type=str, default="./sam_vit_h_4b8939.pth", help="path to sam checkpoint file"
    )
    parser.add_argument(
        "--sam_hq_checkpoint", type=str, default="./sam_vit_h_4b8939_hq.pth", help="path to sam-hq checkpoint file"
    )
    parser.add_argument(
        "--use_sam_hq", action="store_true", help="using sam-hq for prediction"
    )
    parser.add_argument("--text_prompt", type=str, required=True, help="text prompt")
    parser.add_argument(
        "--output_dir", "-o", type=str, default="./outputs", help="output directory"
    )
    parser.add_argument("--box_threshold", type=float, default=0.3, help="box threshold")
    parser.add_argument("--text_threshold", type=float, default=0.25, help="text threshold")
    parser.add_argument(
        "--use_nms", action="store_true", help="use NMS to handle overlapped boxes"
    )
    parser.add_argument("--iou_threshold", type=float, default=0.5, help="iou threshold for NMS")

    parser.add_argument("--device", type=str, default="cpu", help="running on cpu only!, default=False")
    parser.add_argument("--bert_base_uncased_path", type=str, required=False, help="bert_base_uncased model path, default=False")
    args = parser.parse_args()
    # cfg
    config_file = args.config  # change the path of the model config file
    grounded_checkpoint = args.grounded_checkpoint  # change the path of the model
    sam_version = args.sam_version
    sam_checkpoint = args.sam_checkpoint
    sam_hq_checkpoint = args.sam_hq_checkpoint
    use_sam_hq = args.use_sam_hq
    use_nms = args.use_nms
    text_prompt = args.text_prompt
    output_dir = args.output_dir
    box_threshold = args.box_threshold
    text_threshold = args.text_threshold
    iou_threshold = args.iou_threshold
    device = args.device
    bert_base_uncased_path = args.bert_base_uncased_path

    semantic_dir = output_dir + "/semantic"
    instance_dir = output_dir + "/instance"
    debug_dir = output_dir + "/debug"
    nothing_dir = debug_dir + "/nothing"
    # make sure the output directory exists
    os.makedirs(semantic_dir, exist_ok=True)
    os.makedirs(instance_dir, exist_ok=True)
    os.makedirs(debug_dir, exist_ok=True)
    os.makedirs(nothing_dir, exist_ok=True)

    # initialize GroundingDINO
    model = load_model(config_file, grounded_checkpoint)
    # initialize SAM
    if use_sam_hq:
        predictor = SamPredictor(sam_hq_model_registry[sam_version](checkpoint=sam_hq_checkpoint).to(device))
    else:
        predictor = SamPredictor(sam_model_registry[sam_version](checkpoint=sam_checkpoint).to(device))

    # predict each image in the input directory
    category_map = {'background': 0}  # category mapping dictionary
    image_paths = get_file_in_dir(args.input_dir, 'png')
    image_paths.sort()
    for image_path in image_paths:
        image_source, image = load_image(image_path)
        boxes_cxcywh, logits, phrases = predict(
            model=model,
            image=image,
            caption=text_prompt,
            box_threshold=box_threshold,
            text_threshold=text_threshold,
            device=device,
        )

        image_name = os.path.splitext(os.path.basename(image_path))[0]

        boxes_xyxy = cxcywh_to_xyxy(boxes_cxcywh, image_source.shape)
        if use_nms:
            # use NMS to handle overlapped boxes
            print(f"Before NMS: {boxes_xyxy.shape[0]} boxes")
            nms_idx = (
                torchvision.ops.nms(boxes_xyxy, logits, iou_threshold).numpy().tolist()
            )
            print(f"After NMS: {boxes_xyxy.shape[0]} boxes")
            boxes_xyxy = boxes_xyxy[nms_idx]
            boxes_cxcywh = boxes_cxcywh[nms_idx]
            logits = logits[nms_idx]
            phrases = [phrases[idx] for idx in nms_idx]

        annotated_frame = annotate(image_source=image_source, boxes=boxes_cxcywh, logits=logits, phrases=phrases)
        print(phrases)

        if len(boxes_xyxy) == 0:
            cv2.imwrite(os.path.join(nothing_dir, image_name + ".png"), annotated_frame)
            continue

        transformed_boxes = predictor.transform.apply_boxes_torch(boxes_xyxy, image_source.shape[:2])
        predictor.set_image(np.asarray(image_source))
        masks, ious, low_res_masks = predictor.predict_torch(
            point_coords=None,
            point_labels=None,
            boxes=transformed_boxes.to(device),
            multimask_output=False,
        )

        # Initialize the merged mask and corresponding logits
        semantic_mask = np.zeros(masks.shape[-2:], dtype=np.uint8)
        instance_mask = np.zeros(masks.shape[-2:], dtype=np.uint8)
        merged_logits = np.full(masks.shape[-2:], -np.inf)
        for idx, mask in enumerate(masks):
            mask_np = mask.cpu().numpy()[0]
            logit_value = logits[idx].item()
            label = phrases[idx]
            if label not in category_map:
                category_map[label] = len(category_map)
            # Find positions where the current mask is True and logits are larger
            update_mask = np.logical_and(mask_np, logit_value > merged_logits)
            # Update the merged mask and corresponding logits
            semantic_mask[update_mask] = category_map[label]
            instance_mask[update_mask] = idx + 1
            merged_logits[update_mask] = logit_value
        # Convert the merged mask to an RGB image
        mask_img = np.zeros((masks.shape[-2], masks.shape[-1], 3), dtype=np.uint8)
        mask_img[semantic_mask > 0] = [0, 0, 255]  # Blue color for foreground mask
        annotated_mask = cv2.addWeighted(annotated_frame, 0.7, mask_img, 0.3, 0)
        cv2.imwrite(os.path.join(debug_dir, image_name + ".png"), annotated_mask)
        cv2.imwrite(os.path.join(semantic_dir, image_name + ".png"), semantic_mask)
        cv2.imwrite(os.path.join(instance_dir, image_name + ".png"), instance_mask)

    with open(os.path.join(output_dir, 'category_map.json'), 'w') as f:
        json.dump(category_map, f)
