import os
import urllib
import traceback
import time
import sys
import numpy as np
import cv2
from rknn.api import RKNN
import torch.nn.functional as F
import torch
import threading
from queue import Queue
from spirems import Subscriber, Publisher, cvimg2sms, sms2cvimg, def_msg, QoS, BaseNode, get_extra_args
from spirems.mod_helper import download_model
import argparse
from typing import Union
import platform
from copy import copy
import json
from spirecv.algorithm.utils import calc_fov, calc_los_pos, draw_bbox


IMG_SIZE = 448
MASK_THRESHOLD = 0.0


def get_preprocess_shape(oldh, oldw):
    scale = IMG_SIZE * 1.0 / max(oldh, oldw)
    newh, neww = oldh * scale, oldw * scale
    newh, neww = int(newh + 0.5), int(neww + 0.5)
    return (newh, neww)


def img_preprocess(img_src):
    img = cv2.cvtColor(img_src, cv2.COLOR_BGR2RGB)

    oldh, oldw = img.shape[:2]
    newh, neww = get_preprocess_shape(oldh, oldw)
    padh, padw = IMG_SIZE - newh, IMG_SIZE - neww

    img = cv2.resize(img, (neww, newh), interpolation=cv2.INTER_LINEAR)
    img = cv2.copyMakeBorder(img, 0, padh, 0, padw, cv2.BORDER_CONSTANT, value=(0, 0, 0))  # add border
    img = np.array([img]).astype(np.float32)
    return img


def coords_preprocess(coords, ori_shape):
    oldh, oldw = ori_shape
    newh, neww = get_preprocess_shape(oldh, oldw)

    coords[..., 0] = coords[..., 0] * (neww / oldw)
    coords[..., 1] = coords[..., 1] * (newh / oldh)
    return coords


def postprocess(masks, input_shape, ori_shape):
    masks = F.interpolate(torch.from_numpy(masks), (IMG_SIZE, IMG_SIZE), mode='bilinear')

    masks = masks[..., : input_shape[0], : input_shape[1]]
    masks = F.interpolate(masks, ori_shape, mode='bilinear')
    return masks.numpy()


def draw(images, masks, coords_list, labels_list, color=(144, 144, 30)):
    alpha = 0.5
    h, w = masks.shape[-2:]
    color = np.array(color)
    mask_image = masks.reshape(h, w, 1) * color.reshape(1, 1, -1).astype(np.uint8)
    images = np.where(mask_image != 0, cv2.add(alpha * images, (1 - alpha) * mask_image), images)

    top, left, right, bottom = None, None, None, None
    for coords, labels in zip(coords_list, labels_list):
        if labels == 0:
            cv2.circle(images, tuple(coords), 9, color=(0, 0, 255), thickness=2)
        elif labels == 1:
            cv2.circle(images, tuple(coords), 9, color=(100, 220, 150), thickness=2)
        elif labels in [2, 3]:
            if labels == 2:
                top, left = coords
            elif labels == 3:
                right, bottom = coords
            if top and left and right and bottom:
                cv2.rectangle(images, (top, left), (right, bottom), (0, 255, 0), 2)

    cv2.imshow('samwin', images.astype(np.uint8))
    cv2.waitKey(5)


ref_point = [0, 0]
clicked_point = [0, 0]
sam_bbox = [0, 0, 0, 0]
sam_or_tracking = 0




def find_bounding_box(bool_image):
    """
    返回布尔图像中True区域的最小外包矩形框。
    返回值: (x_min, y_min, x_max, y_max)
    """
    # 获取所有True像素的坐标 (y, x)
    # print(bool_image.shape)
    points = np.argwhere(bool_image)
    
    if len(points) == 0:
        return None  # 无True像素
    
    # 计算边界
    y_min, x_min = points.min(axis=0)
    y_max, x_max = points.max(axis=0)
    
    return (x_min, y_min, x_max, y_max)


class MobileSAMClickTrackNode_Rknn(threading.Thread, BaseNode):
    def __init__(
        self,
        job_name: str,
        ip: str = '127.0.0.1',
        port: int = 9094,
        param_dict_or_file: Union[dict, str] = None,
        sms_shutdown: bool = True,
        **kwargs
    ):
        threading.Thread.__init__(self)
        sms_shutdown = True if sms_shutdown in ['True', 'true', '1', True] else False
        BaseNode.__init__(
            self,
            self.__class__.__name__,
            job_name,
            ip=ip,
            port=port,
            param_dict_or_file=param_dict_or_file,
            sms_shutdown=sms_shutdown,
            **kwargs
        )
        self.launch_next_emit = self.get_param("launch_next_emit", False)
        self.specified_input_topic = self.get_param("specified_input_topic", "")
        self.specified_output_topic = self.get_param("specified_output_topic", "")
        self.realtime_det = self.get_param("realtime_det", True)
        self.remote_ip = self.get_param("remote_ip", "127.0.0.1")
        self.remote_port = self.get_param("remote_port", 9094)
        self.objs_in_meter = self.get_param("objs_in_meter", {"object": [-1, 1.8]})
        self.encoder_model = self.get_param("encoder_model", "sms::mobilesam_encoder.rknn")
        self.decoder_model = self.get_param("decoder_model", "sms::mobilesam_decoder.rknn")
        self.target = self.get_param("target", "rk3588")
        self.use_shm = self.get_param("use_shm", -1)
        self.params_help()

        self.b_use_shm = False
        if self.use_shm == 1 or (self.use_shm == -1 and platform.system() == 'Linux'):
            self.b_use_shm = True

        if self.encoder_model.startswith("sms::"):
            self.local_encoder_model = download_model(self.__class__.__name__, self.encoder_model)
            assert self.local_encoder_model is not None
        else:
            self.local_encoder_model = self.encoder_model
        
        if self.decoder_model.startswith("sms::"):
            self.local_decoder_model = download_model(self.__class__.__name__, self.decoder_model)
            assert self.local_decoder_model is not None
        else:
            self.local_decoder_model = self.decoder_model


        self.en_rknn = RKNN(verbose=False)
        self.en_rknn.load_rknn(self.local_encoder_model)
        self.en_rknn.init_runtime(target=self.target)

        self.de_rknn = RKNN(verbose=False)
        self.de_rknn.load_rknn(self.local_decoder_model)
        self.de_rknn.init_runtime(target=self.target)


        input_url = '/' + job_name + '/sensor/image_raw'
        if len(self.specified_input_topic) > 0:
            input_url = self.specified_input_topic

        output_url = '/' + job_name + '/detector/results'
        if len(self.specified_output_topic) > 0:
            output_url = self.specified_output_topic
        
        calib_url = '/' + job_name + '/sensor/calibration_info'

        self.job_queue = Queue()
        self.queue_pool.append(self.job_queue)
    
        self.calib_width, self.calib_height = -1, -1
        self.camera_matrix = [712.12, 0,645.23, 0, 705.87, 327.34, 0, 0, 1]
        self.camera_matrix = np.array(self.camera_matrix).reshape(3, 3)
        self.distortion = [0.0, 0.0, 0.0, 0.0, 0.0]
        self.distortion = np.array(self.distortion)

        self.mask_input = np.zeros((1, 1, 112, 112), dtype=np.float32)
        self.has_mask_input = np.zeros(1, dtype=np.float32)

        # point inputs and box inputs.
        # Boxes are encoded using two points, one for the top-left corner and one for the bottom-right corner
        # such as points and boxes: [[400, 400], [0, 0]] and [[190, 70], [460, 280]]
        self.point_coords_list = [[190, 70], [460, 280]]
        # 0 is a negative input point
        # 1 is a positive input point
        # 2 is a top-left box corner
        # 3 is a bottom-right box corner
        # -1 is a padding point
        # if there is no box input, a single padding point with label -1 and point_coords (0.0, 0.0) should be concatenated.
        self.point_labels_list = [2, 3]
        self.point_coords = np.array(self.point_coords_list).astype(np.float32)
        self.point_labels = np.array(self.point_labels_list).astype(np.float32)

        self._image_reader = Subscriber(
            input_url, 'std_msgs::Null', self.image_callback,
            ip=ip, port=port, qos=QoS.Reliability
        )
        self._calibration_reader = Subscriber(
            calib_url, 'sensor_msgs::CameraCalibration', self.calibration_callback,
            ip=ip, port=port, qos=QoS.Reliability
        )
        self._track_writer = Publisher(
            '/click_track/sensor/image_raw', 'memory_msgs::RawImage' if self.b_use_shm else 'sensor_msgs::CompressedImage',
            ip=ip, port=port
        )
        self._track_calib_writer = Publisher(
            '/click_track/sensor/calibration_info', 'sensor_msgs::CameraCalibration',
            ip=ip, port=port
        )
        self._track_info_writer = Publisher(
            '/click_track/info', 'spirecv_msgs::ClickInfo',
            ip=ip, port=port
        )
        self._track_result_reader = Subscriber(
            '/click_track/track/image_results', 'std_msgs::Null', self.track_result_callback,
            ip=ip, port=port
        )

        self._result_writer = Publisher(
            output_url, 'spirecv_msgs::2DTargets',
            ip=self.remote_ip, port=self.remote_port, qos=QoS.Reliability
        )
        self._show_writer = Publisher(
            '/' + job_name + '/detector/image_results', 'memory_msgs::RawImage' if self.b_use_shm else 'sensor_msgs::CompressedImage',
            ip=ip, port=port
        )
        if self.launch_next_emit:
            self._next_writer = Publisher(
                '/' + job_name + '/launch_next', 'std_msgs::Boolean',
                ip=ip, port=port, qos=QoS.Reliability
            )

        self.start()

    def mouse_moving(self, event, x, y, flags, param):
        global ref_point, clicked_point, sam_or_tracking
        if event == cv2.EVENT_MOUSEMOVE:
            ref_point = [x, y]
        elif event == cv2.EVENT_LBUTTONDOWN:
            clicked_point = [x, y]
            if sam_or_tracking == 0:
                if sam_bbox[0] < x < sam_bbox[0] + sam_bbox[2] and sam_bbox[1] < y < sam_bbox[1] + sam_bbox[3]:
                    info_msg = def_msg("spirecv_msgs::ClickInfo")
                    info_msg["bbox"] = sam_bbox
                    self._track_info_writer.publish(info_msg)
                    sam_or_tracking = 1
            else:
                sam_or_tracking = 0

    def trans_det_results(self, masks, h, w, camera_matrix, calib_wh, objs_in_meter):
        global sam_bbox
        sms_results = def_msg('spirecv_msgs::2DTargets')

        sms_results["file_name"] = ""
        sms_results["height"] = h
        sms_results["width"] = w
        has_calib = False
        if calib_wh[0] > 0 and calib_wh[1] > 0:
            sms_results["fov_x"], sms_results["fov_y"] = calc_fov(camera_matrix, calib_wh)
            has_calib = True

        sms_results["targets"] = []
        bbox = find_bounding_box(np.squeeze(masks, axis=0))
        if bbox is not None:
            ann = dict()
            name = 'object'
            ann["category_name"] = name
            ann["category_id"] = 0
            ann["bbox"] = [int(bbox[0]), int(bbox[1]), int(bbox[2]-bbox[0]), int(bbox[3]-bbox[1])]
            sam_bbox = ann["bbox"]
            ann["cxy"] = [
                (ann["bbox"][0] + ann["bbox"][2] / 2.) / sms_results["width"], 
                (ann["bbox"][1] + ann["bbox"][3] / 2.) / sms_results["height"]
            ]
            if has_calib and name in objs_in_meter:
                ann["los"], ann["pos"] = calc_los_pos(
                    camera_matrix, calib_wh, 
                    ann["cxy"], [ann["bbox"][2], ann["bbox"][3]], 
                    objs_in_meter[name]
                )
            sms_results["targets"].append(ann)

        return sms_results

    def release(self):
        BaseNode.release(self)
        self._image_reader.kill()
        self._result_writer.kill()
        self._show_writer.kill()
        self._next_writer.kill()

    def image_callback(self, msg):
        if sam_or_tracking == 1:
            self._track_writer.publish(msg)
        else:
            if self.realtime_det:
                if not self.job_queue.empty():
                    self.job_queue.queue.clear()
            img = sms2cvimg(msg)
            self.job_queue.put({'msg': msg, 'img': img})

    def calibration_callback(self, msg):
        if sam_or_tracking == 1:
            self._track_calib_writer.publish(msg)

        self.calib_width = msg['width']
        self.calib_height = msg['height']

        self.camera_matrix = np.array(msg['K']).reshape(3, 3)
        self.distortion = np.array(msg['D'])

    def track_result_callback(self, msg):
        if sam_or_tracking == 1:
            img = sms2cvimg(msg)
            self.job_queue.put({'msg': msg, 'img': img})

    def run(self):
        global sam_or_tracking
        cv2.namedWindow("samwin")
        cv2.setMouseCallback("samwin", self.mouse_moving)
        while self.is_running():
            msg_dict = self.job_queue.get(block=True)
            if msg_dict is None:
                break

            msg, img_src = msg_dict['msg'], msg_dict['img']


            t1 = time.time()
            file_name = msg['file_name'] if 'file_name' in msg else ''

            output_res = False
            if sam_or_tracking == 1:
                # print("TRACKING_POINT:", clicked_point)
                if 'spirecv_msgs::2DTargets' in msg:
                    img = draw_bbox(img_src, msg)
                    res_msg = msg['spirecv_msgs::2DTargets']
                    output_res = True
                else:
                    img = img_src
                cv2.imshow('samwin', img)
                key = cv2.waitKey(5)
                if key == 27:
                    sam_or_tracking = 0
            else:
                # DO MobileSAM
                img = img_preprocess(img_src)
                img_embeds = self.en_rknn.inference(inputs=[img])[0]

                point_coords_l = [ref_point, [0, 0]]
                point_labels_l = [1, -1]
                point_coords = np.array(point_coords_l).astype(np.float32)
                point_labels = np.array(point_labels_l).astype(np.float32)
                point_coords = coords_preprocess(point_coords[None, :, :], img_src.shape[:2])
                point_labels = point_labels[None, :]

                iou_predictions, low_res_masks = self.de_rknn.inference(
                    inputs=[img_embeds, point_coords, point_labels, self.mask_input, self.has_mask_input], 
                    data_format='NCHW'
                )

                input_shape = get_preprocess_shape(img_src.shape[0], img_src.shape[1])
                masks = postprocess(low_res_masks, input_shape, img_src.shape[:2])


                masks = masks > MASK_THRESHOLD
                masks = masks[:, np.argmax(iou_predictions), :, :]

                draw(img_src.copy(), masks, point_coords_l, point_labels_l, color=(255, 255, 255))
                
                res_msg = self.trans_det_results(masks, img_src.shape[0], img_src.shape[1], self.camera_matrix, [self.calib_width, self.calib_height], self.objs_in_meter)
                output_res = True
            
            if output_res:
                res_msg['file_name'] = file_name
                if 'client_id' in msg:
                    res_msg['client_id'] = msg['client_id']
                if 'file_name' in msg:
                    res_msg['file_name'] = msg['file_name']
                if 'img_id' in msg:
                    res_msg['img_id'] = msg['img_id']
                if 'img_total' in msg:
                    res_msg['img_total'] = msg['img_total']
                res_msg['time_used'] = time.time() - t1
                if "img_id" in msg:
                    res_msg["img_id"] = msg["img_id"]

                self._result_writer.publish(res_msg)

                if 'img_total' in msg and self.launch_next_emit:
                    next_msg = def_msg('std_msgs::Boolean')
                    next_msg['data'] = True
                    self._next_writer.publish(next_msg)
                    print('img_id', msg['img_id'])

                if self.b_use_shm:
                    msg = self._show_writer.cvimg2sms_mem(img_src)
                msg['spirecv_msgs::2DTargets'] = res_msg
                self._show_writer.publish(msg)
            # END

        self.release()
        print('{} quit!'.format(self.__class__.__name__))


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--config',
        type=str,
        default='default_params.json',
        help='SpireCV2 Config (.json)')
    parser.add_argument(
        '--job-name', '-j',
        type=str,
        default='live',
        help='SpireCV Job Name')
    parser.add_argument(
        '--ip',
        type=str,
        default='127.0.0.1',
        help='SpireMS Core IP')
    parser.add_argument(
        '--port',
        type=int,
        default=9094,
        help='SpireMS Core Port')
    args, unknown_args = parser.parse_known_args()
    if not os.path.isabs(args.config):
        current_path = os.path.abspath(__file__)
        params_dir = os.path.join(current_path[:current_path.find('spirecv-pro') + 11], 'params', 'spirecv2')
        args.config = os.path.join(params_dir, args.config)
    print("--config:", args.config)
    print("--job-name:", args.job_name)
    extra = get_extra_args(unknown_args)

    node = MobileSAMClickTrackNode_Rknn(args.job_name, param_dict_or_file=args.config, ip=args.ip, port=args.port, **extra)
    node.spin()
