import os
import argparse
import json
import numpy as np
import cv2
import torch
import torchvision
# Grounding DINO
from groundingdino.util.inference import load_model, load_image, predict, annotate
from semantic_segment import get_file_in_dir, cxcywh_to_xyxy


def high_contrast_color():
    # 生成HSV颜色时确保数组形状为(1, 1, 3)
    hsv = np.uint8([[
        [np.random.randint(0, 180),   # 色调 (0-180)
         np.random.randint(150, 256), # 饱和度 (150-255)
         np.random.randint(150, 256)]  # 明度 (150-255)
    ]])
    # 转换到BGR格式（注意输入已经是正确的3D形状）
    bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
    # 提取BGR值 [0,0]对应第一个（唯一）像素
    return [int(bgr[0,0,0]), int(bgr[0,0,1]), int(bgr[0,0,2])]


if __name__ == '__main__':
    parser = argparse.ArgumentParser("Dynamic object segmentation", add_help=True)
    parser.add_argument(
        "--input_dir", "-i", type=str, required=True, help="input directory"
    )
    parser.add_argument("--config", type=str, default="./GroundingDINO/groundingdino/config/GroundingDINO_SwinT_OGC.py", help="path to config file")
    parser.add_argument(
        "--grounded_checkpoint", type=str, default="./groundingdino_swint_ogc.pth", help="path to checkpoint file"
    )
    parser.add_argument("--text_prompt", type=str, required=True, help="text prompt")
    parser.add_argument(
        "--output_dir", "-o", type=str, default="./outputs", help="output directory"
    )
    parser.add_argument("--box_threshold", type=float, default=0.3, help="box threshold")
    parser.add_argument("--text_threshold", type=float, default=0.25, help="text threshold")
    parser.add_argument(
        "--use_nms", action="store_true", help="use NMS to handle overlapped boxes"
    )
    parser.add_argument("--iou_threshold", type=float, default=0.5, help="iou threshold for NMS")

    parser.add_argument("--device", type=str, default="cpu", help="running on cpu only!, default=False")
    parser.add_argument("--bert_base_uncased_path", type=str, required=False, help="bert_base_uncased model path, default=False")
    args = parser.parse_args()
    # cfg
    config_file = args.config  # change the path of the model config file
    grounded_checkpoint = args.grounded_checkpoint  # change the path of the model
    use_nms = args.use_nms
    text_prompt = args.text_prompt
    output_dir = args.output_dir
    box_threshold = args.box_threshold
    text_threshold = args.text_threshold
    iou_threshold = args.iou_threshold
    device = args.device
    bert_base_uncased_path = args.bert_base_uncased_path

    semantic_dir = output_dir + "/semantic"
    debug_dir = output_dir + "/debug"
    nothing_dir = debug_dir + "/nothing"
    # make sure the output directory exists
    os.makedirs(semantic_dir, exist_ok=True)
    os.makedirs(debug_dir, exist_ok=True)
    os.makedirs(nothing_dir, exist_ok=True)

    # initialize GroundingDINO
    model = load_model(config_file, grounded_checkpoint)

    # predict each image in the input directory
    category_map = {'background': 0}  # category mapping dictionary
    color_map = [[255, 255, 255]]  # color mapping dictionary
    image_paths = get_file_in_dir(args.input_dir, 'png')
    image_paths.sort()
    for image_path in image_paths:
        image_source, image = load_image(image_path)
        boxes_cxcywh, logits, phrases = predict(
            model=model,
            image=image,
            caption=text_prompt,
            box_threshold=box_threshold,
            text_threshold=text_threshold,
            device=device,
        )

        image_name = os.path.splitext(os.path.basename(image_path))[0]

        boxes_xyxy = cxcywh_to_xyxy(boxes_cxcywh, image_source.shape)
        if use_nms:
            # use NMS to handle overlapped boxes
            print(f"Before NMS: {boxes_xyxy.shape[0]} boxes")
            nms_idx = (
                torchvision.ops.nms(boxes_xyxy, logits, iou_threshold).numpy().tolist()
            )
            print(f"After NMS: {boxes_xyxy.shape[0]} boxes")
            boxes_xyxy = boxes_xyxy[nms_idx]
            boxes_cxcywh = boxes_cxcywh[nms_idx]
            logits = logits[nms_idx]
            phrases = [phrases[idx] for idx in nms_idx]

        annotated_frame = annotate(image_source=image_source, boxes=boxes_cxcywh, logits=logits, phrases=phrases)
        print(phrases)

        if len(boxes_xyxy) == 0:
            cv2.imwrite(os.path.join(nothing_dir, image_name + ".png"), annotated_frame)
            continue

        # Initialize the merged mask and corresponding logits
        semantic_mask = np.zeros(image_source.shape[:2], dtype=np.uint8)
        merged_logits = np.full(image_source.shape[:2], -np.inf)
        for idx, box in enumerate(boxes_xyxy):
            logit_value = logits[idx].item()
            label = phrases[idx]
            x0, y0, x1, y1 = map(int, box)
            x0, y0 = max(0, x0), max(0, y0)
            x1, y1 = min(image_source.shape[1], x1), min(image_source.shape[0], y1)
            box_mask = np.zeros(image_source.shape[:2], dtype=np.uint8)
            box_mask[y0:y1, x0:x1] = 1
            if label not in category_map:
                category_map[label] = len(category_map)
                color_map.append(high_contrast_color())
            # Find positions where the current mask is True and logits are larger
            update_mask = np.logical_and(box_mask, logit_value > merged_logits)
            # Update the merged mask and corresponding logits
            semantic_mask[update_mask] = category_map[label]
            merged_logits[update_mask] = logit_value
        # Convert the merged mask to an RGB image
        mask_img = np.array(color_map, dtype=np.uint8)[semantic_mask]
        annotated_mask = cv2.addWeighted(annotated_frame, 0.7, mask_img, 0.3, 0)
        background_mask = (semantic_mask == 0)
        annotated_mask[background_mask] = annotated_frame[background_mask]
        cv2.imwrite(os.path.join(debug_dir, image_name + ".png"), annotated_mask)
        cv2.imwrite(os.path.join(semantic_dir, image_name + ".png"), semantic_mask)

    with open(os.path.join(output_dir, 'category_map.json'), 'w') as f:
        json.dump(category_map, f)
