import os
import sys
import torch
import argparse
import platform
from datetime import datetime
import groundingdino.datasets.transforms as T
from groundingdino.models import build_model
from groundingdino.util.slconfig import SLConfig
from groundingdino.util.utils import clean_state_dict, get_phrases_from_posmap

parent_path = os.path.abspath(os.path.join(
    os.path.dirname(__file__), '..', '..'))
sys.path.insert(0, parent_path)
from mq.worker import BaseWorker

class GroundingDINOWorker(BaseWorker):
    def __init__(self, mq_type, mq_address, node_server_address):
        super().__init__('vision:detection:GroundingDINOWorker', mq_type, mq_address)
        self.client.setCustomInfo({
            "worker": "GroundingDINOWorker",
            "version": "1.0.0"
        })
        self.model = None
        self.node_server_address = node_server_address

    def start(self, build=False):
        """初始化 Grounding DINO 模型"""
        # 检测设备类型
        if torch.cuda.is_available():
            device = torch.device("cuda")
        elif platform.system() == "Darwin" and torch.backends.mps.is_available():
            device = torch.device("mps")
            os.environ['CUDA_VISIBLE_DEVICES'] = ''
        else:
            device = torch.device("cpu")
            os.environ['CUDA_VISIBLE_DEVICES'] = ''
        print(f"Using device: {device}")
        self.device = device

        config_file = "./configs/GroundingDINO_SwinT_OGC.py"
        checkpoint_path = "./checkpoints/groundingdino_swint_ogc.pth"
        self.model = self.load_model(
            config_file, checkpoint_path, device=device)
        print("Grounding DINO model initialized.")
        print(f"Model device: {next(self.model.parameters()).device}")

        # 启动本地 MQ 服务
        if not build:
            super().start()

    def load_model(self, model_config_path, model_checkpoint_path, device="cuda"):
        """加载 Grounding DINO 模型"""
        args = SLConfig.fromfile(model_config_path)
        args.device = device
        model = build_model(args)
        checkpoint = torch.load(model_checkpoint_path, map_location="cpu")
        model.load_state_dict(clean_state_dict(
            checkpoint["model"]), strict=False)
        model = model.to(device)
        model.eval()
        return model

    def load_image(self, image_path):
        """加载图像并进行预处理"""
        image_pil = self.fetch_image(image_path)
        transform = T.Compose(
            [
                T.RandomResize([800], max_size=1333),
                T.ToTensor(),
                T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
            ]
        )
        image, _ = transform(image_pil, None)  # 3, h, w
        return image_pil, image

    def get_grounding_output(self, image, caption, box_threshold, text_threshold=None, device="cuda", token_spans=None):
        assert text_threshold is not None or token_spans is not None, "text_threshould and token_spans should not be None at the same time!"
        caption = caption.lower()
        caption = caption.strip()
        if not caption.endswith("."):
            caption = caption + "."
        image = image.to(device)
        with torch.no_grad():
            outputs = self.model(image[None], captions=[caption])
        logits = outputs["pred_logits"].sigmoid()[0]  # (nq, 256)
        boxes = outputs["pred_boxes"][0]  # (nq, 4)

        # filter output
        logits_filt = logits.cpu().clone()
        boxes_filt = boxes.cpu().clone()
        filt_mask = logits_filt.max(dim=1)[0] > box_threshold
        logits_filt = logits_filt[filt_mask]  # num_filt, 256
        boxes_filt = boxes_filt[filt_mask]  # num_filt, 4

        # get phrase
        tokenlizer = self.model.tokenizer
        tokenized = tokenlizer(caption)
        # build pred
        labels = []
        scores = []
        for logit, box in zip(logits_filt, boxes_filt):
            pred_phrase = get_phrases_from_posmap(
                logit > text_threshold, tokenized, tokenlizer)
            labels.append(pred_phrase)  # 标签
            scores.append(logit.max().item())  # 分数

        return boxes_filt, labels, scores

    def process(self, message):
        """
        处理检测任务，调用 Grounding DINO 模型进行开集检测。
        :param message: 输入消息，包含图像路径和文本提示。
        :return: 检测结果，包含边界框和标签。
        """
        print(f"Received message: {message}")

        result = {
            "taskId": None,
            "result": {
                "bboxes": [],
                "labels": [],
                "segs": [],
                "scores": [],
            }
        }
        try:
            taskId = message['taskId']
            result['taskId'] = taskId

            input_data = message['inputs']
            timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
            print(
                f"[{timestamp}][GroundingDinoWorker][{taskId}] process: ", input_data)

            # 从输入数据中获取参数
            image_path = input_data.get("image_path")
            text_prompt = input_data.get("text_prompt", "object")
            if text_prompt is None:
                print(
                    f"[{timestamp}][GroundingDinoWorker][{taskId}] Warning: text_prompt is None, using default value 'object'")
                text_prompt = "object"

            # 加载图像
            image_pil, image = self.load_image(image_path)
            image_width, image_height = image_pil.size

            # 运行 Grounding DINO 模型
            box_threshold = 0.3
            text_threshold = 0.25
            token_spans = None
            boxes_filt, labels, scores = self.get_grounding_output(
                image, text_prompt, box_threshold, text_threshold, device=self.device, token_spans=eval(
                    f"{token_spans}")
            )

            # 将归一化坐标转换到图像坐标系并取整
            boxes_filt = boxes_filt * \
                torch.tensor([image_width, image_height,
                             image_width, image_height])
            
            # 将[center_x, center_y, width, height]转换为[left, top, width, height]
            left = boxes_filt[:, 0] - boxes_filt[:, 2] / 2
            top = boxes_filt[:, 1] - boxes_filt[:, 3] / 2
            width = boxes_filt[:, 2]
            height = boxes_filt[:, 3]
            
            # 合并为新的bbox格式
            boxes_filt = torch.stack([left, top, width, height], dim=1)
            
            boxes_filt = boxes_filt.round().int().tolist()  # 取整并转换为列表
            print('bboxes_filt = ', boxes_filt)

            # 构建返回结果
            result['result']['bboxes'] = boxes_filt
            result['result']['labels'] = labels
            result['result']['segs'] = None
            result['result']['scores'] = scores
        except Exception as e:
            print(f"[Error] processing image: {e}")

        # 返回结果
        return result


if __name__ == "__main__":
    # 定义默认值
    DEFAULT_MQ_TYPE = "zmq"
    DEFAULT_MQ_URLS = {
        "zmq": "tcp://127.0.0.1:5555",
        "redis": "redis://localhost:6379/0"
    }
    DEFAULT_LABELAPP_URL = "http://127.0.0.1:3000"

    # 从环境变量中获取默认值
    mq_type_env = os.getenv("MQ_TYPE", DEFAULT_MQ_TYPE)
    mq_url_env = os.getenv("MQ_URL", DEFAULT_MQ_URLS.get(
        mq_type_env, DEFAULT_MQ_URLS["zmq"]))
    labelapp_url_env = os.getenv("LABELAPP_URL", DEFAULT_LABELAPP_URL)

    # 解析命令行参数
    parser = argparse.ArgumentParser(description="SAM2 Worker 启动脚本")
    parser.add_argument("--mq-type", type=str, choices=["zmq", "redis"], default=mq_type_env,
                        help="消息队列类型，默认为环境变量 MQ_TYPE 或 zmq")
    parser.add_argument("--mq-url", type=str, default=mq_url_env,
                        help="消息队列地址，默认为环境变量 MQ_URL 或根据 mq-type 选择默认值")
    parser.add_argument("--labelapp-url", type=str, default=labelapp_url_env,
                        help="LabelApp Node.js 服务地址，默认为环境变量 LABELAPP_URL")
    parser.add_argument("--build", action="store_true", default=False,
                        help="下载模型文件并保存到本地，用于构建 Docker 镜像")
    args = parser.parse_args()

    # 打印选择的参数
    print(f"[mq_type] = {args.mq_type}")
    print(f"[mq_url] = {args.mq_url}")
    print(f"[labelapp_url] = {args.labelapp_url}")

    # 初始化 Grounding DINO Worker
    worker = GroundingDINOWorker(args.mq_type, args.mq_url, args.labelapp_url)
    worker.start(args.build)
