import os
import sys
import torch
import requests
import argparse
import numpy as np
import torchvision
import torch.nn.functional as F

from PIL import Image
from io import BytesIO
from skimage import measure
from datetime import datetime
from urllib.parse import urlparse, unquote
from detectron2.config import get_cfg
from projects.GLEE.glee.models.glee_model import GLEE_Model
from projects.GLEE.glee.config import add_glee_config

parent_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
sys.path.insert(0, parent_path)
from mq.worker import BaseWorker

class GLEEWorker(BaseWorker):
    def __init__(self, mq_type, address, node_server_address):
        super().__init__('vision:detection', mq_type, address)
        self.client.setCustomInfo({
            "worker": "GLEEWorker",
            "version": "1.0.0"
        })

        self.model = None
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).to(self.device).view(3, 1, 1)
        self.pixel_std = torch.Tensor([58.395, 57.12, 57.375]).to(self.device).view(3, 1, 1)
        self.inference_size = 800
        self.size_divisibility = 32
        self.model_type = None
        self.coco_class_name = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush']
        self.class_agnostic_name = ['object']
        self.node_server_address = node_server_address

    def start(self, model_type='lite'):
        """Initialize GLEE model."""
        self.model_type = model_type
        if model_type == 'lite':
            config_file = "./configs/images/Lite/Stage2_joint_training_CLIPteacher_R50.yaml"
            checkpoint_path = "./checkpoints/GLEE_Lite_joint.pth"
        elif model_type == 'plus':
            config_file = "./configs/images/Plus/Stage2_joint_training_CLIPteacher_SwinL.yaml"
            checkpoint_path = "./checkpoints/GLEE_Plus_joint.pth"
        else:
            raise ValueError("Invalid model type. Choose 'lite' or 'plus'.")

        self.model = self.load_model(config_file, checkpoint_path)
        print(f"GLEE {model_type} model initialized.")
        super().start()

    def load_model(self, model_config_path, model_checkpoint_path):
        """Load GLEE model."""
        cfg = get_cfg()
        add_glee_config(cfg)
        cfg.merge_from_file(model_config_path)
        model = GLEE_Model(cfg, None, self.device, None, True).to(self.device)
        checkpoint = torch.load(model_checkpoint_path, map_location="cpu")
        model.load_state_dict({k.replace('glee.', ''): v for k, v in checkpoint.items()}, strict=False)
        model.eval()
        return model

    def load_image(self, image_path):
        """Load and preprocess image."""
        image_pil = self.fetch_image(image_path)
        transform = torchvision.transforms.Compose([
            torchvision.transforms.Resize(self.inference_size),
            torchvision.transforms.ToTensor(),
            torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
        ])
        image = transform(image_pil).unsqueeze(0).to(self.device)
        return image_pil, image

    def mask_to_polygon(self, mask):
        """
        将掩码转换为多边形。
        :param mask: 二值掩码（0 表示背景，1 表示前景）。
        :return: 多边形的顶点坐标列表。
        """
        # 找到掩码的轮廓
        contours = measure.find_contours(mask, 0.5)
        polygons = []
        for contour in contours:
            # 简化多边形（减少顶点数量）
            contour = measure.approximate_polygon(contour, tolerance=1.0)
            # 将顶点坐标转换为列表
            polygons.append(contour.tolist())
        return polygons

    def process(self, message):
        """
        处理检测任务，调用 GLEE 模型进行开集检测。
        :param message: 输入消息，包含图像路径、任务类型和提示信息。
        :return: 检测结果，包含边界框、标签、分数和多边形。
        """
        result = {
            "taskId": None,
            "result": {
                "bboxes": [],
                "labels": [],
                "segs": [],
                "scores": [],
            }
        }
        try:
            taskId = message['taskId']
            result['taskId'] = taskId

            input_data = message['inputs']
            timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
            print(f"[{timestamp}][GroundingDinoWorker][{taskId}] process: ", input_data)

            # 提取输入
            image_path = input_data.get("image_path")
            categories = input_data.get("categories")
            text_prompt = input_data.get("text_prompt")
            points_prompt = input_data.get("points_prompt")
            bboxes_prompt = input_data.get("bboxes_prompt")

            # 确定任务类型和提示
            if categories:
                task_type = 'categories'
                prompt = categories
            elif text_prompt:
                task_type = 'expression'
                prompt = text_prompt
            elif points_prompt or bboxes_prompt:
                task_type = 'visual'
                prompt = points_prompt if points_prompt else bboxes_prompt
            else:
                task_type = 'categories'
                prompt = 'object'
            threshold = 0.2  # 置信度阈值

            # 加载图像
            image_pil, image = self.load_image(image_path)
            _, _, ori_height, ori_width = image.shape

            # 运行 GLEE 模型
            with torch.no_grad():
                if task_type == "categories":
                    batch_category_name = prompt.split(",") if isinstance(prompt, str) else prompt
                    prompt_list = []
                    (outputs, _, _) = self.model(image, prompt_list, task="coco", batch_name_list=batch_category_name, is_train=False)
                    outputs = outputs[0]  # 提取第一个元素
                elif task_type == "expression":
                    prompt_list = {'grounding': [prompt]}
                    (outputs, _, _) = self.model(image, prompt_list, task="grounding", batch_name_list=[], is_train=False)
                    outputs = outputs[0]  # 提取第一个元素
                elif task_type == "visual":
                    visual_prompt = self._get_visual_prompt(image_pil, prompt)
                    prompt_list = {'spatial': [visual_prompt]}
                    (outputs, _) = self.model(image, prompt_list, task="coco", batch_name_list=["object"], is_train=False)
                    outputs = outputs[0]  # 提取第一个元素
                else:
                    raise ValueError("Invalid task type. Choose 'categories', 'expression', or 'visual'.")

            # 解析结果
            mask_pred = outputs['pred_masks'][0]
            mask_cls = outputs['pred_logits'][0]
            boxes_pred = outputs['pred_boxes'][0]

            # 计算分数并过滤结果
            scores = mask_cls.sigmoid().max(-1)[0]
            if task_type == "categories" or task_type == 'expression':
                valid = scores > threshold  # 仅保留分数高于阈值的实例
                topk_indices = torch.where(valid)[0]  # 获取所有满足条件的索引
            else:
                topk_indices = torch.arange(len(scores))  # 返回所有实例

            # 提取类别、边界框和掩码
            pred_class = mask_cls[topk_indices].max(-1)[1].tolist()
            pred_boxes = boxes_pred[topk_indices]
            pred_masks = mask_pred[topk_indices]

            # 获取过滤后的分数
            filtered_scores = scores[topk_indices].tolist()

            # 将边界框转换为图像坐标系
            scale_tensor = torch.tensor([ori_width, ori_height, ori_width, ori_height]).to(self.device)  # 将 scale_tensor 移动到 GPU
            boxes = [box * scale_tensor for box in pred_boxes]
            boxes = [box.round().int().cpu().tolist() for box in boxes]  # 将结果移动到 CPU 并转换为列表

            # 将掩码转换为多边形
            polygons = []
            for mask in pred_masks:
                mask = F.interpolate(mask[None, None], size=(ori_height, ori_width), mode="bilinear", align_corners=False)
                mask = (mask > 0).squeeze().cpu().numpy()  # 将掩码移动到 CPU 并转换为 NumPy 数组
                polygons.append(self.mask_to_polygon(mask))

            # 将标签转换为字符串
            if task_type == "categories":
                # 如果输入的是 categories，从输入的 categories 列表中获取标签
                label_names = [prompt[label] for label in pred_class]
            else:
                # 其他任务类型，直接将标签转换为字符串
                label_names = [str(label) for label in pred_class]
            # print(polygons)

            # 构建返回结果
            result['result']['bboxes'] = boxes
            result['result']['labels'] = label_names
            result['result']['segs'] = polygons
            result['result']['scores'] = filtered_scores
        except Exception as e:
            print(f"[Error] processing image: {e}")

        # 返回结果
        return result

    def _get_visual_prompt(self, image_pil, prompt):
        """
        根据视觉提示生成模型输入。
        :param image_pil: PIL 图像对象。
        :param prompt: 视觉提示信息（点或框）。
        :return: 视觉提示张量。
        """
        if isinstance(prompt, list) and len(prompt) == 4:  # 框
            x1, y1, x2, y2 = prompt
            mask = np.zeros((image_pil.height, image_pil.width), dtype=np.float32)
            mask[y1:y2, x1:x2] = 1
        elif isinstance(prompt, list) and len(prompt) == 2:  # 点
            x, y = prompt
            mask = np.zeros((image_pil.height, image_pil.width), dtype=np.float32)
            mask[y, x] = 1
        else:
            raise ValueError("Invalid visual prompt. Provide points or bounding boxes.")
        return torch.from_numpy(mask).unsqueeze(0).to(self.device)

if __name__ == "__main__":
   # 定义默认值
    DEFAULT_MQ_TYPE = "zmq"
    DEFAULT_MQ_URLS = {
        "zmq": "tcp://127.0.0.1:5555",
        "redis": "redis://localhost:6379/0"
    }
    DEFAULT_LABELAPP_URL = "http://127.0.0.1:3000"

    # 从环境变量中获取默认值
    mq_type_env = os.getenv("MQ_TYPE", DEFAULT_MQ_TYPE)
    mq_url_env = os.getenv("MQ_URL", DEFAULT_MQ_URLS.get(mq_type_env, DEFAULT_MQ_URLS["zmq"]))
    labelapp_url_env = os.getenv("LABELAPP_URL", DEFAULT_LABELAPP_URL)

    # 解析命令行参数
    parser = argparse.ArgumentParser(description="SAM2 Worker 启动脚本")
    parser.add_argument("--mq-type", type=str, choices=["zmq", "redis"], default=mq_type_env,
                        help="消息队列类型，默认为环境变量 MQ_TYPE 或 zmq")
    parser.add_argument("--mq-url", type=str, default=mq_url_env,
                        help="消息队列地址，默认为环境变量 MQ_URL 或根据 mq-type 选择默认值")
    parser.add_argument("--labelapp-url", type=str, default=labelapp_url_env,
                        help="LabelApp Node.js 服务地址，默认为环境变量 LABELAPP_URL")
    parser.add_argument("--build", action="store_true", default=False, 
                        help="下载模型文件并保存到本地，用于构建 Docker 镜像")
    args = parser.parse_args()

    # 打印选择的参数
    print(f"[mq_type] = {args.mq_type}")
    print(f"[mq_url] = {args.mq_url}")
    print(f"[labelapp_url] = {args.labelapp_url}")

    # 初始化 GLEE Worker
    worker = GLEEWorker(args.mq_type, args.mq_url, args.labelapp_url)
    worker.start('lite')