
"""
【节点名称】：
    SingleObjectTrackerNode
【依赖项安装】：
    pip install spirems
    git clone -b spirecv2-dds https://gitee.com/amovlab/SpireCV.git
    cd SpireCV && pip install -e .
【订阅类型】：
    sensor_msgs::CompressedImage （输入图像）
    sensor_msgs::CameraCalibration (输入相机参数)
【发布类型】：
    spirecv_msgs::2DTargets （检测结果）
【构造参数说明】：
    parameter_file (str): 全局参数文件
    sms_shutdown (bool): 是否接收全局关闭信号,如果需要长期后台执行,建议设置为False
    specified_input_topic (str): 指定输入的话题地址
    specified_output_topic (str): 指定输出的话题地址
【节点参数】：
    algorithm (str): 单目标跟踪算法, 如"mil"、"goturn"、"DaSiamRPN"、"NanoTrack"
【备注】：
    单目标跟踪算法所需模型需提前下载，然后默认加载模型文件地址为当前文件夹
    For usage download models by following links
    For GOTURN:
        goturn.prototxt and goturn.caffemodel: https://github.com/opencv/opencv_extra/tree/c4219d5eb3105ed8e634278fad312a1a8d2c182d/testdata/tracking
    For DaSiamRPN:
        network:     https://www.dropbox.com/s/rr1lk9355vzolqv/dasiamrpn_model.onnx?dl=0
        kernel_r1:   https://www.dropbox.com/s/999cqx5zrfi7w4p/dasiamrpn_kernel_r1.onnx?dl=0
        kernel_cls1: https://www.dropbox.com/s/qvmtszx5h339a0w/dasiamrpn_kernel_cls1.onnx?dl=0
    For NanoTrack:
        nanotrack_backbone: https://github.com/HonglinChu/SiamTrackers/blob/master/NanoTrack/models/nanotrackv2/nanotrack_backbone_sim.onnx
        nanotrack_headneck: https://github.com/HonglinChu/SiamTrackers/blob/master/NanoTrack/models/nanotrackv2/nanotrack_head_sim.onnx 
"""


# Python 2/3 compatibility
#from __future__ import print_function
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import yaml
import math
import threading
import cv2
import time
from queue import Queue
import numpy as np
from typing import Union
import argparse
import platform
from spirems import Subscriber, Publisher, cvimg2sms, sms2cvimg, def_msg, QoS, BaseNode, get_extra_args
from spirems.mod_helper import download_model
from spirecv.algorithm.utils import calc_fov, calc_los_pos


cropping = False
ref_point = []


def box_selection(event, x, y, flags, param):
    global ref_point, cropping, img, clone
    if event == cv2.EVENT_LBUTTONDOWN:
        ref_point = [(x, y)]
        cropping = True
    elif event == cv2.EVENT_MOUSEMOVE:
        if cropping:
            img_copy = clone.copy()
            cv2.rectangle(img_copy, ref_point[0], (x, y), (0, 0, 255), 2)
            cv2.imshow("trackwin", img_copy)
    elif event == cv2.EVENT_LBUTTONUP:
        ref_point.append((x, y))
        cropping = False
        cv2.rectangle(img, ref_point[0], (x, y), (0, 0, 255), 2)
        cv2.imshow("trackwin", img)


class SingleObjectTrackerNode(threading.Thread, BaseNode):
    def __init__(
        self,
        job_name: str,
        ip: str = '127.0.0.1',
        port: int = 9094,
        param_dict_or_file: Union[dict, str] = None,
        sms_shutdown: bool = True,
        **kwargs
    ):
        threading.Thread.__init__(self)
        sms_shutdown = True if sms_shutdown in ['True', 'true', '1', True] else False
        BaseNode.__init__(
            self,
            self.__class__.__name__,
            job_name,
            ip=ip,
            port=port,
            param_dict_or_file=param_dict_or_file,
            sms_shutdown=sms_shutdown,
            **kwargs
        )
        self.launch_next_emit = self.get_param("launch_next_emit", True)
        self.specified_input_topic = self.get_param("specified_input_topic", "")
        self.specified_output_topic = self.get_param("specified_output_topic", "")
        self.realtime_det = self.get_param("realtime_det", False)
        self.remote_ip = self.get_param("remote_ip", "127.0.0.1")
        self.remote_port = self.get_param("remote_port", 9094)
        self.algorithm = self.get_param("algorithm", 'nanotrack')
        self.objs_in_meter = self.get_param("objs_in_meter", {"object": [-1, 1.8]})  # {category_name: [w, h], ...}
        self.use_shm = self.get_param("use_shm", -1)
        self.show_selection_win = self.get_param("show_selection_win", True)
        self.params_help()

        self.b_use_shm = False
        if self.use_shm == 1 or (self.use_shm == -1 and platform.system() == 'Linux'):
            self.b_use_shm = True

        input_url = '/' + job_name + '/sensor/image_raw'
        if len(self.specified_input_topic) > 0:
            input_url = specified_input_topic

        output_url = '/' + job_name + '/track/results'
        if len(self.specified_output_topic) > 0:
            output_url = specified_output_topic

        calib_url = '/' + job_name + '/sensor/calibration_info'

        self.job_queue = Queue()
        self.queue_pool.append(self.job_queue)
        self.new_track = True
        self.tracking = False
        self.tracker = self.create_tracker()

        self.calib_width, self.calib_height = -1, -1
        self.camera_matrix = [712.12, 0,645.23, 0, 705.87, 327.34, 0, 0, 1]
        self.camera_matrix = np.array(self.camera_matrix).reshape(3, 3)
        self.distortion = [0.0, 0.0, 0.0, 0.0, 0.0]
        self.distortion = np.array(self.distortion)

        self._image_reader = Subscriber(
            input_url, 'std_msgs::Null', self.image_callback,
            ip=ip, port=port
        )
        self._calibration_reader = Subscriber(
            calib_url, 'sensor_msgs::CameraCalibration', self.calibration_callback,
            ip=ip, port=port, qos=QoS.Reliability
        )
        self._click_info_reader = Subscriber(
            '/' + job_name + '/info', 'spirecv_msgs::ClickInfo', self.click_info_callback,
            ip=ip, port=port, qos=QoS.Reliability
        )
        self._result_writer = Publisher(
            output_url, 'spirecv_msgs::2DTargets',
            ip=self.remote_ip, port=self.remote_port, qos=QoS.Reliability
        )
        self._show_writer = Publisher(
            '/' + job_name + '/track/image_results', 'memory_msgs::RawImage' if self.b_use_shm else 'sensor_msgs::CompressedImage',
            ip=ip, port=port
        )
        if self.launch_next_emit:
            self._next_writer = Publisher(
                '/' + job_name + '/launch_next', 'std_msgs::Boolean',
                ip=ip, port=port, qos=QoS.Reliability
            )

        self.start()

    def release(self):
        BaseNode.release(self)
        self._image_reader.kill()
        self._calibration_reader.kill()
        self._result_writer.kill()
        self._show_writer.kill()
        self._next_writer.kill()

    def image_callback(self, msg):
        if self.realtime_det:
            if not self.job_queue.empty():
                self.job_queue.queue.clear()
        img = sms2cvimg(msg)
        self.job_queue.put({'msg': msg, 'img': img})

    def calibration_callback(self, msg):
        self.calib_width = msg['width']
        self.calib_height = msg['height']

        self.camera_matrix = np.array(msg['K']).reshape(3,3)
        self.distortion = np.array(msg['D'])
    
    def click_info_callback(self, msg):
        self.init_rect = [int(b) for b in msg['bbox']]
        self.tracking = True
        self.new_track = True

    def create_tracker(self):
        if self.algorithm == 'mil':
            tracker = cv2.TrackerMIL_create()
        elif self.algorithm == 'goturn':
            params = cv2.TrackerGOTURN_Params()
            params.modelTxt = download_model(self.__class__.__name__, "sms::goturn.prototxt")
            params.modelBin = download_model(self.__class__.__name__, "sms::goturn.caffemodel")
            tracker = cv2.TrackerGOTURN_create(params)
        elif self.algorithm == 'dasiamrpn':
            params = cv2.TrackerDaSiamRPN_Params()
            params.model = download_model(self.__class__.__name__, "sms::dasiamrpn_model.onnx")
            params.kernel_cls1 = download_model(self.__class__.__name__, "sms::dasiamrpn_kernel_cls1.onnx")
            params.kernel_r1 = download_model(self.__class__.__name__, "sms::dasiamrpn_kernel_r1.onnx")
            tracker = cv2.TrackerDaSiamRPN_create(params)
        elif self.algorithm == 'nanotrack':
            params = cv2.TrackerNano_Params()
            params.backbone = download_model(self.__class__.__name__, "sms::nanotrack_backbone_sim.onnx")
            params.neckhead = download_model(self.__class__.__name__, "sms::nanotrack_head_sim.onnx")
            tracker = cv2.TrackerNano_create(params)
        else:
            sys.exit("Tracker {} is not recognized. Please use one of three available: mil, goturn, dasiamrpn, nanotrack.".format(self.algorithm))
        return tracker

    def trans_det_results(self, boxes, classes, scores, h, w, camera_matrix, calib_wh, objs_in_meter):
        sms_results = def_msg('spirecv_msgs::2DTargets')

        sms_results["file_name"] = ""
        sms_results["height"] = h
        sms_results["width"] = w
        has_calib = False
        if calib_wh[0] > 0 and calib_wh[1] > 0:
            sms_results["fov_x"], sms_results["fov_y"] = calc_fov(camera_matrix, calib_wh)
            has_calib = True
        sms_results["targets"] = []

        if boxes is not None: #boxes is (x, y, w, h)
            ann = dict()
            name = 'object'
            ann["category_name"] = name
            ann["category_id"] = classes
            if scores is not None:
                ann["score"] = float(round(scores, 3))
            ann["bbox"] = [round(j, 3) for j in boxes]
            ann["cxy"] = [
                (ann["bbox"][0] + ann["bbox"][2] / 2.) / sms_results["width"], 
                (ann["bbox"][1] + ann["bbox"][3] / 2.) / sms_results["height"]
            ]
            if has_calib and name in objs_in_meter:
                ann["los"], ann["pos"] = calc_los_pos(
                    camera_matrix, calib_wh, 
                    ann["cxy"], [ann["bbox"][2], ann["bbox"][3]], 
                    objs_in_meter[name]
                )

            sms_results["targets"].append(ann)

        return sms_results

    def run(self):
        global img, clone, cropping, ref_point
        self.init_rect = [0, 0, 0, 0]  # xywh
        count = 0

        while self.is_running():
            t1 = time.time()
            msg_dict = self.job_queue.get(block=True)
            if msg_dict is None:
                break

            msg, img = msg_dict['msg'], msg_dict['img']
            if self.calib_width > 0 and self.calib_height > 0:
                img = cv2.resize(img, (self.calib_width, self.calib_height))

            clone = img.copy()
            file_name = msg['file_name'] if 'file_name' in msg else ''
            if self.tracking:
                if self.new_track:
                    self.tracker.init(img, self.init_rect)
                    self.new_track = False
                    print("track init")
                else:
                    ok, outputs = self.tracker.update(img)
                    # print(outputs)
                    boxes = list(map(int, outputs))
                    scores = None # float(outputs['best_score'])

                    # print('box:', boxes[0], boxes[1], boxes[2], boxes[3], 'scores', scores)
                    res_msg = self.trans_det_results(boxes, 0, scores, img.shape[0], img.shape[1], self.camera_matrix, [self.calib_width, self.calib_height], self.objs_in_meter)
                    res_msg['file_name'] = file_name
                    if 'client_id' in msg:
                        res_msg['client_id'] = msg['client_id']
                    if 'file_name' in msg:
                        res_msg['file_name'] = msg['file_name']
                    if 'img_id' in msg:
                        res_msg['img_id'] = msg['img_id']
                    if 'img_total' in msg:
                        res_msg['img_total'] = msg['img_total']
                    res_msg['fps'] = 1.0 / (time.time() - t1)
                    if "img_id" in msg:
                        res_msg["img_id"] = msg["img_id"]
                    self._result_writer.publish(res_msg)

                    if 'img_total' in msg and self.launch_next_emit:
                        next_msg = def_msg('std_msgs::Boolean')
                        next_msg['data'] = True
                        self._next_writer.publish(next_msg)
                        print('img_id', msg['img_id'])

                    if self.b_use_shm:
                        msg = self._show_writer.cvimg2sms_mem(img)
                    msg['spirecv_msgs::2DTargets'] = res_msg
                    self._show_writer.publish(msg)
                    cv2.rectangle(img, (boxes[0], boxes[1]), (boxes[2]+boxes[0], boxes[3]+boxes[1]), (0, 255, 0), 2)

            if self.show_selection_win:
                cv2.namedWindow("trackwin")
                cv2.setMouseCallback("trackwin", box_selection)
                cv2.imshow("trackwin", img)
                cv2.waitKey(5)
                if len(ref_point) == 2:
                    print(ref_point)
                    self.init_rect = [ref_point[0][0], ref_point[0][1], ref_point[1][0] - ref_point[0][0], ref_point[1][1] - ref_point[0][1]]
                    if self.init_rect[2] >= 5 and self.init_rect[3] >= 5:
                        self.new_track  = True
                        self.tracking = True
                    ref_point = []

            # END

        self.release()
        print('{} quit!'.format(self.__class__.__name__))


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--config',
        type=str,
        default='default_params.json',
        help='SpireCV2 Config (.json)')
    parser.add_argument(
        '--job-name', '-j',
        type=str,
        default='live',
        help='SpireCV Job Name')
    parser.add_argument(
        '--ip',
        type=str,
        default='127.0.0.1',
        help='SpireMS Core IP')
    parser.add_argument(
        '--port',
        type=int,
        default=9094,
        help='SpireMS Core Port')
    # args = parser.parse_args()
    args, unknown_args = parser.parse_known_args()
    if not os.path.isabs(args.config):
        current_path = os.path.abspath(__file__)
        params_dir = os.path.join(current_path[:current_path.find('spirecv-pro') + 11], 'params', 'spirecv2')
        args.config = os.path.join(params_dir, args.config)
    print("--config:", args.config)
    print("--job-name:", args.job_name)
    extra = get_extra_args(unknown_args)

    node = SingleObjectTrackerNode(args.job_name, param_dict_or_file=args.config, ip=args.ip, port=args.port, **extra)
    node.join()
