from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals

import numpy as np
import cv2

from core.config import cfg
from utils.bbox import corner2center
from rknnlite.api import RKNNLite

import os
import urllib
import traceback
import time
import sys
#from rknn.api import RKNN
import threading
from queue import Queue
from spirems import Subscriber, Publisher, cvimg2sms, sms2cvimg, def_msg, QoS, BaseNode, get_extra_args
from spirems.mod_helper import download_model
from spirecv.algorithm.utils import calc_fov, calc_los_pos
from typing import Union
import platform
from copy import copy
import json


cropping = False
ref_point = []


def box_selection(event, x, y, flags, param):
    global ref_point, cropping, img, clone
    if event == cv2.EVENT_LBUTTONDOWN:
        ref_point = [(x, y)]
        cropping = True
    elif event == cv2.EVENT_MOUSEMOVE:
        if cropping:
            img_copy = clone.copy()
            cv2.rectangle(img_copy, ref_point[0], (x, y), (0, 0, 255), 2)
            cv2.imshow("nanotrack", img_copy)
    elif event == cv2.EVENT_LBUTTONUP:
        ref_point.append((x, y))
        cropping = False
        cv2.rectangle(img, ref_point[0], (x, y), (0, 0, 255), 2)
        cv2.imshow("nanotrack", img)


class NanoTrackNode_Rknn(threading.Thread, BaseNode):
    def __init__(
        self,
        job_name: str,
        ip: str = '127.0.0.1',
        port: int = 9094,
        param_dict_or_file: Union[dict, str] = None,
        sms_shutdown: bool = True,
        **kwargs
    ):
        threading.Thread.__init__(self)
        sms_shutdown = True if sms_shutdown in ['True', 'true', '1', True] else False
        BaseNode.__init__(
            self,
            self.__class__.__name__,
            job_name,
            ip=ip,
            port=port,
            param_dict_or_file=param_dict_or_file,
            sms_shutdown=sms_shutdown,
            **kwargs
        )
        self.launch_next_emit = self.get_param("launch_next_emit", True)
        self.specified_input_topic = self.get_param("specified_input_topic", "")
        self.specified_output_topic = self.get_param("specified_output_topic", "")
        self.realtime_det = self.get_param("realtime_det", True)
        self.remote_ip = self.get_param("remote_ip", "127.0.0.1")
        self.remote_port = self.get_param("remote_port", 9094)
        self.T_model_path = self.get_param("T_model_path", "sms::track_backbone_T.rknn")
        self.X_model_path = self.get_param("X_model_path", "sms::track_backbone_X.rknn")
        self.H_model_path = self.get_param("H_model_path", "sms::head.rknn")
        self.objs_in_meter = self.get_param("objs_in_meter", {"object": [-1, 1.8]})  # {category_name: [w, h], ...}
        self.target = self.get_param("target", "rk3588")
        #self.device_id = self.get_param("device_id", "")
        self.use_shm = self.get_param("use_shm", -1)
        self.params_help()

        self.new_track = True
        self.tracking = False
        self.track_yaml = os.path.dirname(os.path.abspath(__file__)) + "/config.yaml"
        self.calib_width, self.calib_height = -1, -1
        self.camera_matrix = [712.12, 0,645.23, 0, 705.87, 327.34, 0, 0, 1]
        self.camera_matrix = np.array(self.camera_matrix).reshape(3, 3)
        self.distortion = [0.0, 0.0, 0.0, 0.0, 0.0]
        self.distortion = np.array(self.distortion)

        self.b_use_shm = False
        if self.use_shm == 1 or (self.use_shm == -1 and platform.system() == 'Linux'):
            self.b_use_shm = True

        if self.T_model_path.startswith("sms::"):
            self.local_T_model_path = download_model(self.__class__.__name__, self.T_model_path)
            assert self.local_T_model_path is not None
        else:
            self.local_T_model_path = self.T_model_path
        if self.X_model_path.startswith("sms::"):
            self.local_X_model_path = download_model(self.__class__.__name__, self.X_model_path)
            assert self.local_X_model_path is not None
        else:
            self.local_X_model_path = self.X_model_path
        if self.H_model_path.startswith("sms::"):
            self.local_H_model_path = download_model(self.__class__.__name__, self.H_model_path)
            assert self.local_H_model_path is not None
        else:
            self.local_H_model_path = self.H_model_path

        self.self_init(self.local_T_model_path, self.local_X_model_path, self.local_H_model_path)
        #self.co_helper = COCO_test_helper(enable_letter_box=True)

        input_url = '/' + job_name + '/sensor/image_raw'
        if len(self.specified_input_topic) > 0:
            input_url = self.specified_input_topic

        output_url = '/' + job_name + '/track/results'
        if len(self.specified_output_topic) > 0:
            output_url = self.specified_output_topic
        
        calib_url = '/' + job_name + '/sensor/calibration_info'

        self.job_queue = Queue()
        self.queue_pool.append(self.job_queue)

        self._image_reader = Subscriber(
            input_url, 'std_msgs::Null', self.image_callback,
            ip=ip, port=port, qos=QoS.Reliability
        )
        self._calibration_reader = Subscriber(
            calib_url, 'sensor_msgs::CameraCalibration', self.calibration_callback,
            ip=ip, port=port, qos=QoS.Reliability
        )
        self._result_writer = Publisher(
            output_url, 'spirecv_msgs::2DTargets',
            ip=self.remote_ip, port=self.remote_port, qos=QoS.Reliability
        )
        self._show_writer = Publisher(
            '/' + job_name + '/track/image_results', 'memory_msgs::RawImage' if self.b_use_shm else 'sensor_msgs::CompressedImage',
            ip=ip, port=port
        )
        if self.launch_next_emit:
            self._next_writer = Publisher(
                '/' + job_name + '/launch_next', 'std_msgs::Boolean',
                ip=ip, port=port, qos=QoS.Reliability
            )
        self.start()
    
    def calibration_callback(self, msg):
        self.calib_width = msg['width']
        self.calib_height = msg['height']

        self.camera_matrix = np.array(msg['K']).reshape(3,3)
        self.distortion = np.array(msg['D'])

    def trans_det_results(self, boxes, classes, scores, h, w, camera_matrix, calib_wh, objs_in_meter):
        sms_results = def_msg('spirecv_msgs::2DTargets')

        sms_results["file_name"] = ""
        sms_results["height"] = h
        sms_results["width"] = w
        has_calib = False
        if calib_wh[0] > 0 and calib_wh[1] > 0:
            sms_results["fov_x"], sms_results["fov_y"] = calc_fov(camera_matrix, calib_wh)
            has_calib = True
        sms_results["targets"] = []

        if boxes is not None: # boxes is (x, y, w, h)
            ann = dict()
            name = 'object'
            ann["category_name"] = name
            ann["category_id"] = classes
            if scores is not None:
                ann["score"] = float(round(scores, 3))
            ann["bbox"] = [round(j, 3) for j in boxes]
            ann["cxy"] = [
                (ann["bbox"][0] + ann["bbox"][2] / 2.) / sms_results["width"], 
                (ann["bbox"][1] + ann["bbox"][3] / 2.) / sms_results["height"]
            ]
            if has_calib and name in objs_in_meter:
                ann["los"], ann["pos"] = calc_los_pos(
                    camera_matrix, calib_wh, 
                    ann["cxy"], [ann["bbox"][2], ann["bbox"][3]], 
                    objs_in_meter[name]
                )

            sms_results["targets"].append(ann)

        return sms_results

    def release(self):
        BaseNode.release(self)
        self._image_reader.kill()
        self._calibration_reader.kill()
        self._result_writer.kill()
        self._show_writer.kill()
        self._next_writer.kill()

    def image_callback(self, msg):
        if self.realtime_det:
            if not self.job_queue.empty():
                return
        img = sms2cvimg(msg)
        self.job_queue.put({'msg': msg, 'img': img})

    def run(self):
        global img, clone, cropping, ref_point
        init_rect = [0, 0, 0, 0]  # xywh
        count = 0

        while self.is_running():
            msg_dict = self.job_queue.get(block=True)
            if msg_dict is None:
                break

            msg, img = msg_dict['msg'], msg_dict['img']
            if self.calib_width > 0 and self.calib_height > 0:
                img = cv2.resize(img, (self.calib_width, self.calib_height))
            clone = img.copy()
            t1 = time.time()

            file_name = msg['file_name'] if 'file_name' in msg else ''
            if self.tracking:
                if self.new_track:
                    self.init(img, init_rect)
                    self.new_track = False
                    print("track init")
                else:
                    outputs = self.track(img)
                    boxes = list(map(int, outputs['bbox']))
                    scores = float(outputs['best_score'])

                    # print('box:', boxes[0], boxes[1], boxes[2], boxes[3], 'scores', scores)
                    res_msg = self.trans_det_results(boxes, 0, scores, img.shape[0], img.shape[1], self.camera_matrix, [self.calib_width, self.calib_height], self.objs_in_meter)
                    res_msg['file_name'] = file_name
                    #res_msg['dataset'] = self.dataset_name
                    if 'client_id' in msg:
                        res_msg['client_id'] = msg['client_id']
                    if 'file_name' in msg:
                        res_msg['file_name'] = msg['file_name']
                    if 'img_id' in msg:
                        res_msg['img_id'] = msg['img_id']
                    if 'img_total' in msg:
                        res_msg['img_total'] = msg['img_total']
                    res_msg['time_used'] = time.time() - t1
                    if "img_id" in msg:
                        res_msg["img_id"] = msg["img_id"]
                    self._result_writer.publish(res_msg)

                    if 'img_total' in msg and self.launch_next_emit:
                        next_msg = def_msg('std_msgs::Boolean')
                        next_msg['data'] = True
                        self._next_writer.publish(next_msg)
                        print('img_id', msg['img_id'])

                    if self.b_use_shm:
                        msg = self._show_writer.cvimg2sms_mem(img)
                    msg['spirecv_msgs::2DTargets'] = res_msg
                    self._show_writer.publish(msg)
                    cv2.rectangle(img, (boxes[0], boxes[1]), (boxes[2]+boxes[0], boxes[3]+boxes[1]), (0, 255, 0), 2)

            cv2.namedWindow("nanotrack")
            cv2.setMouseCallback("nanotrack", box_selection)
            cv2.imshow("nanotrack", img)
            cv2.waitKey(5)
            if len(ref_point) == 2:
                print(ref_point)
                init_rect = [ref_point[0][0], ref_point[0][1], ref_point[1][0] - ref_point[0][0], ref_point[1][1] - ref_point[0][1]]
                if init_rect[2] >= 5 and init_rect[3] >= 5:
                    self.new_track  = True
                    self.tracking = True
                ref_point = []

        self.release()
        print('{} quit!'.format(self.__class__.__name__))

    def self_init(self, Tback_weight, Xback_weight, Head_weight):
        # parser.add_argument('--config', default=self.track_yaml, type=str, help='config file')
        # parser.add_argument('--save', action='store_true', help='whether visualzie result')
        # args = parser.parse_args()
        cfg.merge_from_file(self.track_yaml)
        
        self.score_size = (cfg.TRACK.INSTANCE_SIZE - cfg.TRACK.EXEMPLAR_SIZE) // \
                          cfg.POINT.STRIDE + 1 + cfg.TRACK.BASE_SIZE
        hanning = np.hanning(self.score_size)
        window = np.outer(hanning, hanning)
        self.cls_out_channels = 2
        self.window = window.flatten()

        self.points = self.generate_points(cfg.POINT.STRIDE, self.score_size)

        #--------------------------------------------------------#
        #--------------modify environment------------------------#
        # 1. T init
        self.rknn_Tback = RKNNLite()

        # load RKNN model
        print('--> Load RKNN model')
        ret = self.rknn_Tback.load_rknn(Tback_weight)
        if ret != 0:
            print('Load RKNN model failed')
            exit(ret)

        # init runtime environment
        print('--> Init runtime environment')

        ret = self.rknn_Tback.init_runtime(core_mask=RKNNLite.NPU_CORE_0)
        if ret != 0:
            print('Init runtime environment failed')
            exit(ret)

        # 2. X init
        self.rknn_Xback = RKNNLite()

        # Load model
        print('--> rknn_Xback: Loading model')
        ret = self.rknn_Xback.load_rknn(Xback_weight)
        if ret != 0:
            print('rknn_Xback: Load model failed!')
            exit(ret)

        # Init runtime environment
        print('--> Init runtime environment')
        ret = self.rknn_Xback.init_runtime(core_mask=RKNNLite.NPU_CORE_1)
        if ret != 0:
            print('Init runtime environment failed!')
            exit(ret)

        # 3. Head init
        self.rknn_Head = RKNNLite()

        # Load model
        print('--> rknn_Head: Loading model')
        ret = self.rknn_Head.load_rknn(Head_weight)
        if ret != 0:
            print('rknn_Head: Load model failed!')
            exit(ret)

        # Init runtime environment
        print('--> Init runtime environment')
        ret = self.rknn_Head.init_runtime(core_mask=RKNNLite.NPU_CORE_2)
        if ret != 0:
            print('Init runtime environment failed!')
            exit(ret)
        print('done')

    def generate_points(self, stride, size):
        ori = - (size // 2) * stride
        x, y = np.meshgrid([ori + stride * dx for dx in np.arange(0, size)],
                           [ori + stride * dy for dy in np.arange(0, size)])
        points = np.zeros((size * size, 2), dtype=np.float32)
        points[:, 0], points[:, 1] = x.astype(np.float32).flatten(), y.astype(np.float32).flatten()

        return points

    def _convert_bbox(self, delta, point):
        delta = delta.permute(1, 2, 3, 0).contiguous().view(4, -1)
        delta = delta.detach().cpu().numpy()

        delta[0, :] = point[:, 0] - delta[0, :]  # x1
        delta[1, :] = point[:, 1] - delta[1, :]  # y1
        delta[2, :] = point[:, 0] + delta[2, :]  # x2
        delta[3, :] = point[:, 1] + delta[3, :]  # y2
        delta[0, :], delta[1, :], delta[2, :], delta[3, :] = corner2center(delta)
        return delta

    def _convert_score(self, score):
        if self.cls_out_channels == 1:
            score = score.permute(1, 2, 3, 0).contiguous().view(-1)
            score = score.sigmoid().detach().cpu().numpy()
        else:
            score = score.permute(1, 2, 3, 0).contiguous().view(self.cls_out_channels, -1).permute(1, 0)
            score = score.softmax(1).detach()[:, 1].cpu().numpy()
        return score

    def _convert_bbox_numpy(self, delta, point):
        # delta = delta.permute(1, 2, 3, 0).contiguous().view(4, -1)
        # delta = delta.detach().cpu().numpy()  
        delta = delta.transpose((1,2,3,0)).reshape(4, -1)

        delta[0, :] = point[:, 0] - delta[0, :]  # x1
        delta[1, :] = point[:, 1] - delta[1, :]  # y1
        delta[2, :] = point[:, 0] + delta[2, :]  # x2
        delta[3, :] = point[:, 1] + delta[3, :]  # y2
        delta[0, :], delta[1, :], delta[2, :], delta[3, :] = corner2center(delta)
        return delta

    def _convert_score_numpy(self, score):
        def sofmax(logits):
            e_x = np.exp(logits)
            probs = e_x / np.sum(e_x, axis=-1, keepdims=True)
            return probs

        # score = score.permute(1, 2, 3, 0).contiguous().view(self.cls_out_channels, -1).permute(1, 0)
        # score = score.softmax(1).detach()[:, 1].cpu().numpy()

        score = score.transpose((1,2,3,0)).reshape(self.cls_out_channels, -1).transpose((1,0))
        score = sofmax(score)[:,1]

        return score

    def _bbox_clip(self, cx, cy, width, height, boundary):
        cx = max(0, min(cx, boundary[1]))
        cy = max(0, min(cy, boundary[0]))
        width = max(10, min(width, boundary[1]))
        height = max(10, min(height, boundary[0]))
        return cx, cy, width, height

    def get_subwindow(self, im, pos, model_sz, original_sz, avg_chans):
        """
        args:
            im: bgr based image
            pos: center position
            model_sz: exemplar size
            s_z: original size
            avg_chans: channel average
        """
        if isinstance(pos, float):
            pos = [pos, pos]
        sz = original_sz
        im_sz = im.shape
        c = (original_sz + 1) / 2
        # context_xmin = round(pos[0] - c) # py2 and py3 round
        context_xmin = np.floor(pos[0] - c + 0.5)
        context_xmax = context_xmin + sz - 1
        # context_ymin = round(pos[1] - c)
        context_ymin = np.floor(pos[1] - c + 0.5)
        context_ymax = context_ymin + sz - 1
        left_pad = int(max(0., -context_xmin))
        top_pad = int(max(0., -context_ymin))
        right_pad = int(max(0., context_xmax - im_sz[1] + 1))
        bottom_pad = int(max(0., context_ymax - im_sz[0] + 1))

        context_xmin = context_xmin + left_pad
        context_xmax = context_xmax + left_pad
        context_ymin = context_ymin + top_pad
        context_ymax = context_ymax + top_pad

        r, c, k = im.shape
        if any([top_pad, bottom_pad, left_pad, right_pad]):
            size = (r + top_pad + bottom_pad, c + left_pad + right_pad, k)
            te_im = np.zeros(size, np.uint8)
            te_im[top_pad:top_pad + r, left_pad:left_pad + c, :] = im
            if top_pad:
                te_im[0:top_pad, left_pad:left_pad + c, :] = avg_chans
            if bottom_pad:
                te_im[r + top_pad:, left_pad:left_pad + c, :] = avg_chans
            if left_pad:
                te_im[:, 0:left_pad, :] = avg_chans
            if right_pad:
                te_im[:, c + left_pad:, :] = avg_chans
            im_patch = te_im[int(context_ymin):int(context_ymax + 1),
                       int(context_xmin):int(context_xmax + 1), :]
        else:
            im_patch = im[int(context_ymin):int(context_ymax + 1),
                       int(context_xmin):int(context_xmax + 1), :]

        if not np.array_equal(model_sz, original_sz):
            im_patch = cv2.resize(im_patch, (model_sz, model_sz))
        im_patch = im_patch.transpose(2, 0, 1)
        im_patch = im_patch[np.newaxis, :, :, :]
        im_patch = im_patch.astype(np.float32)

        return im_patch

    def init(self, img, bbox):
        """
        args:
            img(np.ndarray): BGR image
            bbox: (x, y, w, h) bbox
        """
        self.center_pos = np.array([bbox[0] + (bbox[2] - 1) / 2,
                                    bbox[1] + (bbox[3] - 1) / 2])
        self.size = np.array([bbox[2], bbox[3]])

        # calculate z crop size
        w_z = self.size[0] + cfg.TRACK.CONTEXT_AMOUNT * np.sum(self.size)
        h_z = self.size[1] + cfg.TRACK.CONTEXT_AMOUNT * np.sum(self.size)
        s_z = round(np.sqrt(w_z * h_z))

        # calculate channle average
        self.channel_average = np.mean(img, axis=(0, 1))

        # get crop
        z_crop = self.get_subwindow(img, self.center_pos,
                                    cfg.TRACK.EXEMPLAR_SIZE,
                                    s_z, self.channel_average)

        back_T_in = z_crop.transpose((0,2,3,1))

        # self.Toutput = self.rknn_Tback.inference(inputs=[z_crop], data_format='nchw')
        self.Toutput = self.rknn_Tback.inference(inputs=[back_T_in])

        # self.rknn_Tback.release()

    def track(self, img):
        """
        args:
            img(np.ndarray): BGR image
        return:
            bbox(list):[x, y, width, height]
        """
        w_z = self.size[0] + cfg.TRACK.CONTEXT_AMOUNT * np.sum(self.size)
        h_z = self.size[1] + cfg.TRACK.CONTEXT_AMOUNT * np.sum(self.size)
        s_z = np.sqrt(w_z * h_z)
        scale_z = cfg.TRACK.EXEMPLAR_SIZE / s_z
        s_x = s_z * (cfg.TRACK.INSTANCE_SIZE / cfg.TRACK.EXEMPLAR_SIZE)
        x_crop = self.get_subwindow(img, self.center_pos,
                                    cfg.TRACK.INSTANCE_SIZE,
                                    round(s_x), self.channel_average)

        ## yuce
        back_X_in = x_crop.transpose((0,2,3,1))
        # self.Xoutput = self.rknn_Xback.inference(inputs=[x_crop], data_format='nchw')
        self.Xoutput = self.rknn_Xback.inference(inputs=[back_X_in])

        head_T_in = self.Toutput[0].transpose((0,2,3,1))
        head_X_in = self.Xoutput[0].transpose((0,2,3,1))

        # outputs = self.rknn_Head.inference(inputs=[self.Toutput[0], self.Xoutput[0]], data_format='nchw')
        outputs = self.rknn_Head.inference(inputs=[head_T_in, head_X_in])

        score = self._convert_score_numpy(outputs[0])
        pred_bbox = self._convert_bbox_numpy(outputs[1], self.points)

        # score = self._convert_score(outputs['cls'])
        # pred_bbox = self._convert_bbox(outputs['loc'], self.points)

        def change(r):
            return np.maximum(r, 1. / r)

        def sz(w, h):
            pad = (w + h) * 0.5
            return np.sqrt((w + pad) * (h + pad))

        # scale penalty
        s_c = change(sz(pred_bbox[2, :], pred_bbox[3, :]) /
                     (sz(self.size[0] * scale_z, self.size[1] * scale_z)))

        # aspect ratio penalty
        r_c = change((self.size[0] / self.size[1]) /
                     (pred_bbox[2, :] / pred_bbox[3, :]))
        penalty = np.exp(-(r_c * s_c - 1) * cfg.TRACK.PENALTY_K)

        # score
        pscore = penalty * score

        # window penalty
        pscore = pscore * (1 - cfg.TRACK.WINDOW_INFLUENCE) + \
                 self.window * cfg.TRACK.WINDOW_INFLUENCE

        best_idx = np.argmax(pscore)
        bbox = pred_bbox[:, best_idx] / scale_z
        lr = penalty[best_idx] * score[best_idx] * cfg.TRACK.LR
        cx = bbox[0] + self.center_pos[0]
        cy = bbox[1] + self.center_pos[1]

        # smooth bbox
        width = self.size[0] * (1 - lr) + bbox[2] * lr
        height = self.size[1] * (1 - lr) + bbox[3] * lr

        # clip boundary
        cx, cy, width, height = self._bbox_clip(cx, cy, width,
                                                height, img.shape[:2])

        # udpate state
        self.center_pos = np.array([cx, cy])
        self.size = np.array([width, height])

        bbox = [cx - width / 2,
                cy - height / 2,
                width,
                height]

        best_score = score[best_idx]
        return {
            'bbox': bbox,
            'best_score': best_score
        }