#本脚本提供了使用sam、groundingdino、sv将为标注的图像通过预推理的方法进行预标注的问题
#你可能会遇到的问题
#1 使用 sv.plot_images_grid报无法展开axis的错误，在定义sv.plot_images_grid的文件下找到
# fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=size)
# 并在下面添加
# axes = np.atleast_2d(axes)
# 2 sv.DetectionDataset 报无法进行xml格式化
# 在报错的文件目录下修改
# xml_string = fromstring(tostring(annotation)).toprettyxml(indent="  ")
# 并改成
# from xml.dom.minidom import parseString
# byte_string = tostring(annotation)
# dom = parseString(byte_string)
# xml_string = dom.toprettyxml(indent="  ")

import cv2
from segment_anything import sam_model_registry, SamPredictor
import numpy as np
import supervision as sv
from groundingdino.util.inference import Model
import math
from tqdm import tqdm

def enhance_class_name(class_names):
    return [
        f"all {class_name}s"
        for class_name
        in class_names
    ]

def segment(sam_predictor, image, xyxy):
    sam_predictor.set_image(image)
    result_masks = []
    for box in xyxy:
        masks, scores, logits = sam_predictor.predict(
            box=box,
            multimask_output=True
        )
        index = np.argmax(scores)
        result_masks.append(masks[index])
    return np.array(result_masks)

def main():
    grounding_dino_model = Model(model_config_path="myconfig/GroundingDINO_config/GroundingDINO_SwinB_cfg.py",
                                 model_checkpoint_path="modelweight/groundingdino_swinb_cogcoor.pth")
    sam = sam_model_registry["vit_h"](checkpoint="modelweight/sam_vit_h.pth").to("cuda")
    sam_predictor = SamPredictor(sam)
    # IMAGE_PATH = "data/dog.jpg"
    BOX_TRESHOLD = 0.25
    TEXT_TRESHOLD = 0.25
    CLASSES = ['rope']
    IMAGES_DIRECTORY = "/home/luoluoluo/data/dataset/bk"
    IMAGES_EXTENSIONS = ['png']
    ANNOTATIONS_DIRECTORY="/home/luoluoluo/data/dataset/bk"
    MIN_IMAGE_AREA_PERCENTAGE = 0.002
    MAX_IMAGE_AREA_PERCENTAGE = 0.80
    APPROXIMATION_PERCENTAGE = 0.75

    #———————————————————————————单张图像————————————————————————————
    # load image
    # image = cv2.imread(IMAGE_PATH)

    # # detect objects
    # detections = grounding_dino_model.predict_with_classes(
    #     image=image,
    #     classes=enhance_class_name(class_names=CLASSES),
    #     box_threshold=BOX_TRESHOLD,
    #     text_threshold=TEXT_TRESHOLD
    # )
    # detections.mask = segment(
    #     sam_predictor=sam_predictor,
    #     image=cv2.cvtColor(image, cv2.COLOR_BGR2RGB),
    #     xyxy=detections.xyxy
    # )
    # # annotate image with detections
    # box_annotator = sv.BoxAnnotator()
    # mask_annotator = sv.MaskAnnotator()
    # labels = [
    #     f"{CLASSES[class_id]} {confidence:0.2f}"
    #     for _, _, confidence, class_id, _, _ in detections]
    # annotated_image = mask_annotator.annotate(scene=image.copy(), detections=detections)
    # annotated_image = box_annotator.annotate(scene=annotated_image, detections=detections, labels=labels)
    # grid_size_dimension = math.ceil(math.sqrt(len(detections.mask)))

    # titles = [
    #     CLASSES[class_id]
    #     for class_id
    #     in detections.class_id
    # ]

    # sv.plot_images_grid(
    #     images=detections.mask,
    #     titles=titles,
    #     grid_size=(grid_size_dimension, grid_size_dimension),
    #     size=(16, 16)
    # )
    #———————————————————————————单张图像————————————————————————————
    #———————————————————————————图像目录————————————————————————————
    images = {}
    annotations = {}

    image_paths = sv.list_files_with_extensions(
        directory=IMAGES_DIRECTORY,
        extensions=IMAGES_EXTENSIONS)

    for image_path in tqdm(image_paths):
        image_name = image_path.name
        image_path = str(image_path)
        image = cv2.imread(image_path)

        detections = grounding_dino_model.predict_with_classes(
            image=image,
            classes=enhance_class_name(class_names=CLASSES),
            box_threshold=BOX_TRESHOLD,
            text_threshold=TEXT_TRESHOLD
        )
        detections = detections[detections.class_id != None]
        detections.mask = segment(
            sam_predictor=sam_predictor,
            image=cv2.cvtColor(image, cv2.COLOR_BGR2RGB),
            xyxy=detections.xyxy
        )
        images[image_name] = image
        annotations[image_name] = detections

    #———————————————————————————可视化————————————————————————————
    plot_images = []
    plot_titles = []

    box_annotator = sv.BoxAnnotator()
    mask_annotator = sv.MaskAnnotator()

    for image_name, detections in annotations.items():
        image = images[image_name]
        plot_images.append(image)
        plot_titles.append(image_name)

        labels = [
            f"{CLASSES[class_id]} {confidence:0.2f}"
            for _, _, confidence, class_id, _, _
            in detections]
        annotated_image = mask_annotator.annotate(scene=image.copy(), detections=detections)
        annotated_image = box_annotator.annotate(scene=annotated_image, detections=detections, labels=labels)
        plot_images.append(annotated_image)
        title = " ".join(set([
            CLASSES[class_id]
            for class_id
            in detections.class_id
        ]))
        plot_titles.append(title)

    sv.plot_images_grid(
        images=plot_images,
        titles=plot_titles,
        grid_size=(len(annotations), 2),
        size=(2 * 4, len(annotations) * 4)
    )
    sv.DetectionDataset(images=images,classes=CLASSES, annotations=annotations).as_pascal_voc(
        annotations_directory_path=ANNOTATIONS_DIRECTORY,
        min_image_area_percentage=MIN_IMAGE_AREA_PERCENTAGE,
        max_image_area_percentage=MAX_IMAGE_AREA_PERCENTAGE,
        approximation_percentage=APPROXIMATION_PERCENTAGE
    )
    #———————————————————————————可视化————————————————————————————
    #———————————————————————————图像目录————————————————————————————


if __name__ == "__main__":
    main()
