import multiprocessing
import PIL
import cv2
import numpy as np
import supervision as sv
import torch
from process_utils.fastsam import FastSAM
from PIL import Image
class Annotator:
    # MIN_AREA_PERCENTAGE = 0.005
    # MAX_AREA_PERCENTAGE = 0.05
    MIN_AREA_PERCENTAGE = 0.025
    MAX_AREA_PERCENTAGE = 0.25

    def __init__(self, device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu")):
        self.model = FastSAM(f"/home/lwc/python_script/URgrasp_agent/UR/URgrasp_agent/process_utils/sam_pth/FastSAM_X.pt")
        self.device = device
        self.image = None
        self.detections = None

    def run_sam_model(self, image: np.ndarray) -> str:
        self.image = image
        image = Image.fromarray(image)
        image_rgb = image.convert("RGB")
        sam_result = self.model(
            image_rgb,
            device=self.device,
            retina_masks=True,
            imgsz=1024,
            conf=0.4,
            iou=0.9
        )
        sam_detections = sv.Detections.from_ultralytics(sam_result[0])
        height, width = image.size
        image_area = height * width

        min_area_mask = (sam_detections.area / image_area) > self.MIN_AREA_PERCENTAGE
        max_area_mask = (sam_detections.area / image_area) < self.MAX_AREA_PERCENTAGE
        sam_detections = sam_detections[min_area_mask & max_area_mask]
        self.detections = sam_detections
        return "sam already init"

    def get_centers(self) -> list:
        sam_detections = self.detections
        masks = sam_detections.mask.tolist()
        centers = []
        for mask in masks:
            y_indices, x_indices = np.where(mask)
            center = np.array([np.mean(x_indices), np.mean(y_indices)])
            center = np.round(center).astype(int)
            centers.append(center.tolist())
        return centers

    def get_place_center(self, object_no: int) -> list:
        centers = self.get_centers()
        return centers[object_no]

    def get_annotated_image(self) -> np.ndarray:
        """Get annotated image and detections from image.
        Note: The input image should be in BGR format. The returned image is in RGB format.
        """
        # setup annotators
        polygon_annotator = sv.PolygonAnnotator(thickness=2, color_lookup=sv.ColorLookup.INDEX)
        label_annotator = sv.LabelAnnotator(
            color_lookup=sv.ColorLookup.INDEX,
            text_position=sv.Position.CENTER,
            text_scale=0.5,
            text_color=sv.Color.WHITE,
            color=sv.Color.BLACK,
            text_thickness=1,
            text_padding=2,
        )

        # annotate
        labels = [str(i) for i in range(len(self.detections))]
        annotated_img = polygon_annotator.annotate(scene=self.image.copy(),
                                                   detections=self.detections)
        annotated_img = label_annotator.annotate(scene=annotated_img, detections=self.detections, labels=labels)
        annotated_img = np.asarray(annotated_img)
        annotated_img = cv2.cvtColor(annotated_img, cv2.COLOR_RGB2BGR)
        return annotated_img

    def get_mask_image(self, object_no: int) -> np.ndarray:
        mask = self.detections.mask[object_no]  # 形状 [H, W], 值范围 True/False
        binary_mask = (mask > 0).astype(np.uint8) * 255  # 转为0或255
        # cv2.imwrite(save_path, binary_mask, [cv2.IMWRITE_PNG_BILEVEL, 1])
        return binary_mask



if __name__ == "__main__":
    import time

    start_time = time.time()
    image = Image.open("./data/color.jpg")
    anno = Annotator()
    print(anno.run_sam_model(image))
    # obj_centers = anno.get_centers()
    # i = 0
    # # mask list转换为ndarray后尝试保存
    # for center in obj_centers:
    #     print(i)
    #     mask = anno.get_mask_image(i)
    #     mask = np.asarray(mask).astype(np.uint8)
    #     cv2.imwrite(f"./data/mask/mask{i}.png", mask, [cv2.IMWRITE_PNG_BILEVEL, 1])
    #     print(center)
    #     print("\n")
    #     i += 1

    # ann list转换为ndarray后尝试保存
    annotated_image = anno.get_annotated_image()
    # print(annotated_image)
    # cv2.imwrite("./data/tool_mask.png", annotated_image)
    end_time = time.time()
    print(f"代码执行时间{end_time - start_time}")
