"""
【节点名称】：
    ArucoDetNode
【依赖项安装】：
    pip install spirems
    pip install opencv-contrib-python
    git clone -b spirecv2-dds https://gitee.com/amovlab/SpireCV.git
    cd SpireCV && pip install -e .
【订阅类型】：
    sensor_msgs::CompressedImage （输入图像）
    sensor_msgs::CameraCalibration (输入相机参数)
【发布类型】：
    spirecv_msgs::2DTargets （检测结果）
    sensor_msgs::CompressedImage （可视化结果，需借助可视化工具）
【构造参数说明】：
    parameter_file (str): 全局参数文件
    sms_shutdown (bool): 是否接收全局关闭信号,如果需要长期后台执行,建议设置为False
    specified_input_topic (str): 指定输入的话题地址
    specified_output_topic (str): 指定输出的话题地址
【节点参数】：
    markerIds (list): 待检测的Aruco码ID号,如[1,2,3],需要检测所有ID号,可设置[-1]
    markerLengths (list): 每个Aruco码对应的实际边长
    dictionaryId (int): 定义二维码类型,默认(10),即DICT_6X6_250
【备注】：
    无
"""

import cv2  
import cv2.aruco as aruco 
import threading
from queue import Queue
import numpy as np
from spirems import Subscriber, Publisher, cvimg2sms, sms2cvimg, def_msg, QoS
from spirecv.base.BaseNode import BaseNode
from spirecv.dataloader.CameraNode import CameraNode
from spirecv.dataloader.VideoFileNode import VideoFileNode
from typing import Union
import math


SV_RAD2DEG = 180.0 / math.pi  


def camera_matrix2fov(camera_matrix, img_w, img_h):  
    fx = camera_matrix[0, 0]  
    fy = camera_matrix[1, 1] 
    # 计算水平和垂直FOV（以度为单位）  
    fov_x = 2 * math.atan(img_w / 2.0 / fx) * SV_RAD2DEG  
    fov_y = 2 * math.atan(img_h / 2.0 / fy) * SV_RAD2DEG  
    
    return fov_x, fov_y  


def setYaw(vec_x_, vec_y_):  
    if vec_x_ == 0.0 and vec_y_ > 0.0:  
        yaw_a = 180  
    elif vec_x_ == 0.0 and vec_y_ < 0.0:  
        yaw_a = 0  
    elif vec_x_ > 0.0 and vec_y_ == 0.0:  
        yaw_a = 90  
    elif vec_x_ > 0.0 and vec_y_ > 0.0:  
        yaw_a = 180 - math.atan(vec_x_ / vec_y_) * SV_RAD2DEG  
    elif vec_x_ > 0.0 and vec_y_ < 0.0:  
        yaw_a = math.atan(vec_x_ / (-vec_y_)) * SV_RAD2DEG  
    elif vec_x_ < 0.0 and vec_y_ == 0.0:  
        yaw_a = -90  
    elif vec_x_ < 0.0 and vec_y_ > 0.0:  
        yaw_a = math.atan(-vec_x_ / vec_y_) * SV_RAD2DEG - 180  
    elif vec_x_ < 0.0 and vec_y_ < 0.0:  
        yaw_a = -math.atan(-vec_x_ / (-vec_y_)) * SV_RAD2DEG 
    return yaw_a 


def trans_det_results(ids, corners, rvecs, tvecs, camera_matrix, img_width=640, img_height=480):
    sms_results = def_msg('spirecv_msgs::2DTargets')
    #sms_results["file_name"] = ""
    sms_results["height"] = img_height
    sms_results["width"] = img_width
    sms_results["fov_x"], sms_results["fov_y"] = camera_matrix2fov(camera_matrix, img_width, img_height)
    sms_results["targets"] = []
    
    if ids is not None:  
        for i in range(len(ids)):
            ann = dict()  
            ann["category_name"] = "aruco_" + str(int(ids[i][0]))
            ann["category_id"] = int(ids[i][0])
            corners_ = [list(row) for row in corners[i][0]]
            
            # 计算中点坐标  
            x_mid = (corners_[0][0] + corners_[1][0]) / 2.0  
            y_mid = (corners_[0][1] + corners_[1][1]) / 2.0  
            
            # 计算边界坐标  
            left = min(corners_[0][0], corners_[1][0], corners_[2][0], corners_[3][0])  
            right = max(corners_[0][0], corners_[1][0], corners_[2][0], corners_[3][0])  
            top = min(corners_[0][1], corners_[1][1], corners_[2][1], corners_[3][1])  
            bottom = max(corners_[0][1], corners_[1][1], corners_[2][1], corners_[3][1])  
            
            # 计算向量坐标  
            x_vec = x_mid - (left + right) / 2.0  
            y_vec = y_mid - (top + bottom) / 2.0  
            
            ann["bbox"] = [
                float(left),
                float(top),
                float(right - left),
                float(bottom - top)
            ]
            
            ann["yaw_a"] = setYaw(x_vec, y_vec)
            
            sms_results["targets"].append(ann)
    return sms_results
    

def aruco_det(img, _ids_need, _lengths_need, camera_matrix, distortion, aruco_dict_name):
    
    rvecs = []
    tvecs = []
    ids_final = []
    corners_final = []
    
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    
    aruco_dicts = {
        '0': cv2.aruco.DICT_4X4_50,
        '1': cv2.aruco.DICT_4X4_100,
        '2': cv2.aruco.DICT_4X4_250,
        '3': cv2.aruco.DICT_4X4_1000,
        '4': cv2.aruco.DICT_5X5_50,
        '5': cv2.aruco.DICT_5X5_100,
        '6': cv2.aruco.DICT_5X5_250,
        '7': cv2.aruco.DICT_5X5_1000,
        '8': cv2.aruco.DICT_6X6_50,
        '9': cv2.aruco.DICT_6X6_100,
        '10': cv2.aruco.DICT_6X6_250,
        '11': cv2.aruco.DICT_6X6_1000,
        '12': cv2.aruco.DICT_7X7_50,
        '13': cv2.aruco.DICT_7X7_100,
        '14': cv2.aruco.DICT_7X7_250,
        '15': cv2.aruco.DICT_7X7_1000,
        '16': cv2.aruco.DICT_ARUCO_ORIGINAL,
        '17': cv2.aruco.DICT_APRILTAG_16h5,
        '18': cv2.aruco.DICT_APRILTAG_25h9,
        '19': cv2.aruco.DICT_APRILTAG_36h10,
        '20': cv2.aruco.DICT_APRILTAG_36h11
    }

    if str(aruco_dict_name) not in set(aruco_dicts.keys()):
        print("unknown aruco dictionary name")
        return None
    # 创建字典  
    dictionary = aruco.getPredefinedDictionary(aruco_dicts[str(aruco_dict_name)])    
    # 检测标记  
    corners, ids, _ = aruco.detectMarkers(gray, dictionary)  

    if isinstance(ids, np.ndarray) and len(ids) > 0:
        if _ids_need[0] == -1:
            rvecs, tvecs, _ = cv2.aruco.estimatePoseSingleMarkers(corners, _lengths_need[0], camera_matrix, distortion)
            ids_final = ids
            corners_final = corners
        else:
            for i in range(len(_ids_need)):
                id_need = _ids_need[i]
                length_need = _lengths_need[i]
                t_corners = []
                for j in range(len(ids)):
                    if ids[j] == id_need:
                        t_corners.append(corners[j])
                        ids_final.append(ids[j])
                        corners_final.append(corners[j])
                if len(t_corners) > 0:
                    t_rvecs, t_tvecs, _ = cv2.aruco.estimatePoseSingleMarkers(
                        t_corners, length_need, camera_matrix, distortion)
                    rvecs.extend(t_rvecs)
                    tvecs.extend(t_tvecs)
    return ids_final, corners_final, rvecs, tvecs, 


class ArucoDetNode(threading.Thread, BaseNode):
    def __init__(
        self,
        job_name: str,
        ip: str = '127.0.0.1',
        port: int = 9094,
        param_dict_or_file: Union[dict, str] = None,
        sms_shutdown: bool = True,
        launch_next_emit: bool = True,
        specified_input_topic: str = None,
        specified_output_topic: str = None,
        realtime_det: bool = True
    ):
        threading.Thread.__init__(self)
        BaseNode.__init__(self, self.__class__.__name__, job_name, ip=ip, port=port,
                          param_dict_or_file=param_dict_or_file, sms_shutdown=sms_shutdown)
        input_url = '/' + job_name + '/sensor/image_raw'
        if specified_input_topic is not None:
            input_url = specified_input_topic

        input_url_c = '/' + job_name + '/sensor/calibration_info'
        if specified_output_topic is not None:
            input_url_c = specified_output_topic

        output_url = '/' + job_name + '/detector/results'
        if specified_output_topic is not None:
            output_url = specified_output_topic

        self.realtime_det = realtime_det
        self.job_queue = Queue()
        self.queue_pool.append(self.job_queue)

        self._image_reader = Subscriber(
            input_url, 'sensor_msgs::CompressedImage', self.image_callback,
            ip=ip, port=port, qos=QoS.Reliability
        )
        self._calibration_reader = Subscriber(
            input_url_c, 'sensor_msgs::CameraCalibration', self.calibration_callback,
            ip=ip, port=port, qos=QoS.Reliability
        )
        
        self._result_writer = Publisher(
            output_url, 'spirecv_msgs::2DTargets',
            ip=ip, port=port, qos=QoS.Reliability
        )
        self._show_writer = Publisher(
            '/' + job_name + '/detector/image_results', 'sensor_msgs::CompressedImage',
            ip=ip, port=port
        )

        self.launch_next_emit = launch_next_emit
        if self.launch_next_emit:
            self._next_writer = Publisher(
                '/' + job_name + '/launch_next', 'std_msgs::Boolean',
                ip=ip, port=port, qos=QoS.Reliability
            )

        self.markerIds = self.get_param("markerIds", [-1])
        self.markerLengths = self.get_param("markerLengths",[0.2])
        self.dictionaryId = self.get_param("dictionaryId", 10)
        self.start()

    def release(self):
        BaseNode.release(self)
        self._image_reader.kill()
        self._calibration_reader.kill()
        self._result_writer.kill()
        self._show_writer.kill()
        self._next_writer.kill()
        self._image_reader.join()
        self._calibration_reader.join()
        self._result_writer.join()
        self._show_writer.join()
        self._next_writer.join()

    def image_callback(self, msg):
        if self.realtime_det:
            while not self.job_queue.empty():
                self.job_queue.get()
        self.job_queue.put(msg)

    def calibration_callback(self, msg):
        self.width = msg['width']
        self.height = msg['height']
        
        self.camera_matrix = np.array(msg['K']).reshape(3,3)
        self.distortion = np.array(msg['D'])

    def run(self):
        while self.is_running():
            msg = self.job_queue.get(block=True)
            if msg is None:
                break
            
            img = sms2cvimg(msg)
            img = cv2.resize(img, (self.width, self.height))
           
            ids, corners, rvecs, tvecs  = aruco_det(img, self.markerIds, self.markerLengths, self.camera_matrix,
                                                    self.distortion, self.dictionaryId)
            res_msg = trans_det_results(ids, corners, rvecs, tvecs, self.camera_matrix, self.width, self.height)
            
            if 'img_id' in msg:
               res_msg['img_id'] = msg['frame_id']
            if 'img_total' in msg:
                res_msg['img_total'] = msg['img_total']
            self._result_writer.publish(res_msg)

            if 'img_total' in msg:
                next_msg = def_msg('std_msgs::Boolean')
                next_msg['data'] = True
                self._next_writer.publish(next_msg)

            img_msg = cvimg2sms(img)
            img_msg['spirecv_msgs::2DTargets'] = res_msg
            self._show_writer.publish(img_msg)
            # END

        self.release()
        print('{} quit!'.format(self.__class__.__name__))


if __name__ == '__main__':
    from spirecv.visualizer.DetectionVisNode import DetectionVisNode
    job_name = 'detection'
    parameter_file = '../../../params/spirecv2/default_params.json'
    dataloader = CameraNode(job_name, param_dict_or_file=parameter_file)
    algorithm = ArucoDetNode(job_name, param_dict_or_file=parameter_file)
    cam = DetectionVisNode("live", specified_input_topic="/detection/detector/image_results")





