import os
import sys
import json
import torch
import argparse
import platform
import numpy as np
from datetime import datetime
from sam2.build_sam import build_sam2
from sam2.sam2_image_predictor import SAM2ImagePredictor
from skimage import measure

parent_path = os.path.abspath(os.path.join(
    os.path.dirname(__file__), '..', '..'))
sys.path.insert(0, parent_path)
from mq.worker import BaseWorker

class Sam2Worker(BaseWorker):
    def __init__(self, mq_type, mq_address, node_server_address):
        super().__init__('vision:detection:Sam2Worker', mq_type, mq_address)
        self.client.setCustomInfo({
            "worker": "Sam2Worker",
            "version": "1.0.0"
        })
        self.predictor = None
        self.node_server_address = node_server_address

    def start(self):
        """初始化 SAM2 模型"""
        # 检测设备类型
        if torch.cuda.is_available():
            device = torch.device("cuda")
        elif platform.system() == "Darwin" and torch.backends.mps.is_available():
            device = torch.device("mps")
            os.environ['CUDA_VISIBLE_DEVICES'] = ''
        else:
            device = torch.device("cpu")
            os.environ['CUDA_VISIBLE_DEVICES'] = ''
        print(f"Using device: {device}")
        self.device = device

        # 加载模型配置和检查点
        checkpoint = os.path.join("./", "checkpoints/sam2.1_hiera_large.pt")
        model_cfg = os.path.join("./", "configs/sam2.1/sam2.1_hiera_l.yaml")

        # 初始化 SAM2 模型
        self.predictor = SAM2ImagePredictor(
            build_sam2(model_cfg, checkpoint, device=device)
        )
        print("SAM2 model initialized.")
        print(
            f"Model device: {next(self.predictor.model.parameters()).device}")

        # 启动本地 MQ 服务
        super().start()

    def mask_to_polygon(self, mask):
        """
        将掩码转换为多边形。
        :param mask: 二值掩码（0 表示背景，1 表示前景）。
        :return: 多边形的顶点坐标列表。
        """
        # 找到掩码的轮廓
        contours = measure.find_contours(mask, 0.5)
        polygons = []
        for contour in contours:
            # 简化多边形（减少顶点数量）
            contour = measure.approximate_polygon(contour, tolerance=1.0)
            # 将顶点坐标转换为列表
            polygons.append(contour.tolist())
        return polygons

    def convert_to_python_types(self, obj):
        """将 NumPy 数据类型转换为 Python 原生类型"""
        if isinstance(obj, (np.int64, np.int32, np.float64, np.float32)):
            return int(obj) if isinstance(obj, (np.int64, np.int32)) else float(obj)
        elif isinstance(obj, np.ndarray):
            return obj.tolist()
        elif isinstance(obj, dict):
            return {key: self.convert_to_python_types(value) for key, value in obj.items()}
        elif isinstance(obj, list):
            return [self.convert_to_python_types(item) for item in obj]
        return obj

    def process(self, message):
        """
        处理标注任务，支持点标注、框标注和掩码标注。
        :param input_data: 输入数据，包含图像路径和标注信息。
        :return: 标注结果，包含边界框和标签。
        """
        result = {
            "taskId": None,
            "result": {
                "bboxes": [],
                "labels": [],
                "segs": [],
                "scores": [],
            }
        }
        try:
            taskId = message['taskId']
            result['taskId'] = taskId

            # 获取当前时间并格式化为字符串
            input_data = message['inputs']
            timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
            print(f"[{timestamp}][Sam2Worker][{taskId}] process: ",
                  self.truncate_message(message))

            # 从输入数据中获取图像路径和标注信息
            image_path = input_data.get("image_path")

            point_coords = input_data.get("points_prompt", None)
            if isinstance(point_coords, str):
                try:
                    point_coords = json.loads(point_coords)
                except json.JSONDecodeError:
                    point_coords = None
            point_labels = [1 for _ in point_coords] if point_coords else None

            box_coords = input_data.get("bboxes_prompt", None)
            if isinstance(box_coords, str):
                try:
                    box_coords = json.loads(box_coords)
                except json.JSONDecodeError:
                    box_coords = None
            if box_coords is not None:
                for box_coord in box_coords:
                    box_coord[2] = box_coord[0]+box_coord[2]
                    box_coord[3] = box_coord[1]+box_coord[3]
                # print(box_coords)

            mask_prompt = input_data.get("mask_prompt", None)

            # 打开图像并转换为RGB格式
            # image = Image.open(image_path)
            # image = np.array(image.convert("RGB"))
            image = self.fetch_image(image_path)
            image = np.array(image)

            # 将 mask_prompt 从 JSON 兼容格式转换为 NumPy 数组
            if mask_prompt is not None:
                mask_prompt = np.array(mask_prompt, dtype=np.uint8)

            # 使用SAM2进行预测
            with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16):
                self.predictor.set_image(image)
                if point_coords is not None and point_labels is not None:
                    # 点标注
                    masks, scores, _ = self.predictor.predict(
                        point_coords=np.array(point_coords),
                        point_labels=np.array(point_labels),
                        multimask_output=True,
                    )
                    print('point_prompt = ', point_coords)
                    print('point_prompt\'s masks.shape', masks.shape)
                elif box_coords is not None:
                    # 框标注
                    masks, scores, _ = self.predictor.predict(
                        box=np.array(box_coords),
                        multimask_output=False,
                    )
                    print('bboxes_prompt = ', box_coords)
                    print('bboxes_prompt\'s masks.shape', masks.shape)
                elif mask_prompt is not None:
                    # 掩码标注
                    masks, scores, _ = self.predictor.predict(
                        mask_input=mask_prompt,
                        multimask_output=True,
                    )
                else:
                    raise ValueError(
                        "No valid prompt provided (point, box, or mask).")

            # 将结果转换为边界框、多边形和标签
            bbox_list = []
            polygons_list = []
            scores_list = []
            labels_list = []

            # 处理多bbox情况
            if masks.ndim == 4:
                # 将masks从 (N_boxes, N_masks, H, W) 转换为 (N_boxes*N_masks, H, W)
                masks = masks.reshape(-1, *masks.shape[-2:])
                scores = scores.reshape(-1)

            for mask, score in zip(masks, scores):
                # 确保处理的是二维掩码
                if mask.ndim != 2:
                    continue
                print('mask.shape = ', mask.shape)

                # 计算mask的边界框
                pos = np.where(mask)
                if len(pos[0]) > 0 and len(pos[1]) > 0:  # 确保掩码有效
                    xmin = np.min(pos[1])
                    xmax = np.max(pos[1])
                    ymin = np.min(pos[0])
                    ymax = np.max(pos[0])
                    bbox = [xmin, ymin, xmax, ymax]
                    bbox_list.append(bbox)

                    # 将掩码转换为多边形
                    polygons = self.mask_to_polygon(mask)
                    polygons_list.append(polygons)

                    # 收集分数
                    scores_list.append(float(score))

                    # 添加默认标签
                    labels_list.append("object")

            # 转换为 Python 原生类型
            bbox_list = self.convert_to_python_types(bbox_list)
            polygons_list = self.convert_to_python_types(polygons_list)
            scores_list = self.convert_to_python_types(scores_list)
            labels_list = self.convert_to_python_types(labels_list)
            print('bbox_list = ', bbox_list, ', scores_list = ', scores_list)  

            # 构建返回结果
            result['result']['bboxes'] = bbox_list
            result['result']['labels'] = labels_list
            result['result']['segs'] = polygons_list
            result['result']['scores'] = scores_list
        except Exception as e:
            print(f"[Error] processing image: {e}")

        # 返回结果
        return result


def main():
    # 定义默认值
    DEFAULT_MQ_TYPE = "zmq"
    DEFAULT_MQ_URLS = {
        "zmq": "tcp://127.0.0.1:5555",
        "redis": "redis://localhost:6379/0"
    }
    DEFAULT_LABELAPP_URL = "http://127.0.0.1:3000"

    # 从环境变量中获取默认值
    mq_type_env = os.getenv("MQ_TYPE", DEFAULT_MQ_TYPE)
    mq_url_env = os.getenv("MQ_URL", DEFAULT_MQ_URLS.get(
        mq_type_env, DEFAULT_MQ_URLS["zmq"]))
    labelapp_url_env = os.getenv("LABELAPP_URL", DEFAULT_LABELAPP_URL)

    # 解析命令行参数
    parser = argparse.ArgumentParser(description="SAM2 Worker 启动脚本")
    parser.add_argument("--mq-type", type=str, choices=["zmq", "redis"], default=mq_type_env,
                        help="消息队列类型，默认为环境变量 MQ_TYPE 或 zmq")
    parser.add_argument("--mq-url", type=str, default=mq_url_env,
                        help="消息队列地址，默认为环境变量 MQ_URL 或根据 mq-type 选择默认值")
    parser.add_argument("--labelapp-url", type=str, default=labelapp_url_env,
                        help="LabelApp Node.js 服务地址，默认为环境变量 LABELAPP_URL")
    args = parser.parse_args()

    # 打印选择的参数
    print(f"[mq_type] = {args.mq_type}")
    print(f"[mq_url] = {args.mq_url}")
    print(f"[labelapp_url] = {args.labelapp_url}")

    # 初始化 SAM2 Worker
    worker = Sam2Worker(args.mq_type, args.mq_url, args.labelapp_url)
    worker.start()


if __name__ == "__main__":
    main()
