import os
import time
import sys
import numpy as np
import cv2
import torch.nn.functional as F
import torch
import threading
from queue import Queue
from spirems import Subscriber, Publisher, cvimg2sms, sms2cvimg, def_msg, QoS, BaseNode, get_extra_args
from spirems.mod_helper import download_model
import argparse
from typing import Union
import platform
from copy import copy
import json
from spirecv.algorithm.utils import calc_fov, calc_los_pos, draw_bbox
from ultralytics import SAM
from pycocotools import mask as pycoco_mask
import base64


IMG_SIZE = 448
MASK_THRESHOLD = 0.0


def get_preprocess_shape(oldh, oldw):
    scale = IMG_SIZE * 1.0 / max(oldh, oldw)
    newh, neww = oldh * scale, oldw * scale
    newh, neww = int(newh + 0.5), int(neww + 0.5)
    return (newh, neww)


def img_preprocess(img_src):
    img = cv2.cvtColor(img_src, cv2.COLOR_BGR2RGB)

    oldh, oldw = img.shape[:2]
    newh, neww = get_preprocess_shape(oldh, oldw)
    padh, padw = IMG_SIZE - newh, IMG_SIZE - neww

    img = cv2.resize(img, (neww, newh), interpolation=cv2.INTER_LINEAR)
    img = cv2.copyMakeBorder(img, 0, padh, 0, padw, cv2.BORDER_CONSTANT, value=(0, 0, 0))  # add border
    img = np.array([img]).astype(np.float32)
    return img


def coords_preprocess(coords, ori_shape):
    oldh, oldw = ori_shape
    newh, neww = get_preprocess_shape(oldh, oldw)

    coords[..., 0] = coords[..., 0] * (neww / oldw)
    coords[..., 1] = coords[..., 1] * (newh / oldh)
    return coords


def postprocess(masks, input_shape, ori_shape):
    masks = F.interpolate(torch.from_numpy(masks), (IMG_SIZE, IMG_SIZE), mode='bilinear')

    masks = masks[..., : input_shape[0], : input_shape[1]]
    masks = F.interpolate(masks, ori_shape, mode='bilinear')
    return masks.numpy()


def draw(images, masks, coords_list, labels_list, color=(144, 144, 30)):
    alpha = 0.5
    h, w = masks.shape[-2:]
    color = np.array(color)
    mask_image = masks.reshape(h, w, 1) * color.reshape(1, 1, -1).astype(np.uint8)
    images = np.where(mask_image != 0, cv2.add(alpha * images, (1 - alpha) * mask_image), images)

    top, left, right, bottom = None, None, None, None
    for coords, labels in zip(coords_list, labels_list):
        if labels == 0:
            cv2.circle(images, tuple(coords), 9, color=(0, 0, 255), thickness=2)
        elif labels == 1:
            cv2.circle(images, tuple(coords), 9, color=(100, 220, 150), thickness=2)
        elif labels in [2, 3]:
            if labels == 2:
                top, left = coords
            elif labels == 3:
                right, bottom = coords
            if top and left and right and bottom:
                cv2.rectangle(images, (top, left), (right, bottom), (0, 255, 0), 2)

    cv2.imshow('samwin', images.astype(np.uint8))
    cv2.waitKey(5)


ref_point = [0, 0]


def mouse_moving(event, x, y, flags, param):
    global ref_point
    if event == cv2.EVENT_MOUSEMOVE:
        ref_point = [x, y]


class MobileSAMNode_Cuda(threading.Thread, BaseNode):
    def __init__(
        self,
        job_name: str,
        ip: str = '127.0.0.1',
        port: int = 9094,
        param_dict_or_file: Union[dict, str] = None,
        sms_shutdown: bool = True,
        **kwargs
    ):
        threading.Thread.__init__(self)
        sms_shutdown = True if sms_shutdown in ['True', 'true', '1', True] else False
        BaseNode.__init__(
            self,
            self.__class__.__name__,
            job_name,
            ip=ip,
            port=port,
            param_dict_or_file=param_dict_or_file,
            sms_shutdown=sms_shutdown,
            **kwargs
        )
        self.launch_next_emit = self.get_param("launch_next_emit", False)
        self.specified_input_topic = self.get_param("specified_input_topic", "")
        self.specified_output_topic = self.get_param("specified_output_topic", "")
        self.realtime_det = self.get_param("realtime_det", True)
        self.remote_ip = self.get_param("remote_ip", "127.0.0.1")
        self.remote_port = self.get_param("remote_port", 9094)
        self.objs_in_meter = self.get_param("objs_in_meter", {"object": [-1, 1.8]})
        self.model_pt = self.get_param("model_pt", "sms::mobile_sam.pt")
        self.use_shm = self.get_param("use_shm", -1)
        self.params_help()

        self.b_use_shm = False
        if self.use_shm == 1 or (self.use_shm == -1 and platform.system() == 'Linux'):
            self.b_use_shm = True

        if self.model_pt.startswith("sms::"):
            self.local_model_pt = download_model(self.__class__.__name__, self.model_pt)
            assert self.local_model_pt is not None
        else:
            self.local_model_pt = self.model_pt

        self.model = SAM(self.local_model_pt)

        input_url = '/' + job_name + '/sensor/image_raw'
        if len(self.specified_input_topic) > 0:
            input_url = self.specified_input_topic

        output_url = '/' + job_name + '/detector/results'
        if len(self.specified_output_topic) > 0:
            output_url = self.specified_output_topic
        
        calib_url = '/' + job_name + '/sensor/calibration_info'

        self.job_queue = Queue()
        self.queue_pool.append(self.job_queue)
    
        self.calib_width, self.calib_height = -1, -1
        self.camera_matrix = [712.12, 0,645.23, 0, 705.87, 327.34, 0, 0, 1]
        self.camera_matrix = np.array(self.camera_matrix).reshape(3, 3)
        self.distortion = [0.0, 0.0, 0.0, 0.0, 0.0]
        self.distortion = np.array(self.distortion)


        self._image_reader = Subscriber(
            input_url, 'std_msgs::Null', self.image_callback,
            ip=ip, port=port, qos=QoS.Reliability
        )
        self._calibration_reader = Subscriber(
            calib_url, 'sensor_msgs::CameraCalibration', self.calibration_callback,
            ip=ip, port=port, qos=QoS.Reliability
        )
        self._result_writer = Publisher(
            output_url, 'spirecv_msgs::2DTargets',
            ip=self.remote_ip, port=self.remote_port, qos=QoS.Reliability
        )
        self._show_writer = Publisher(
            '/' + job_name + '/detector/image_results', 'memory_msgs::RawImage' if self.b_use_shm else 'sensor_msgs::CompressedImage',
            ip=ip, port=port
        )
        if self.launch_next_emit:
            self._next_writer = Publisher(
                '/' + job_name + '/launch_next', 'std_msgs::Boolean',
                ip=ip, port=port, qos=QoS.Reliability
            )

        self.start()
    
    def trans_det_results(self, sam_results, h, w, camera_matrix, calib_wh, objs_in_meter):
        sms_results = def_msg('spirecv_msgs::2DTargets')

        sms_results["file_name"] = ""
        sms_results["height"] = h
        sms_results["width"] = w
        has_calib = False
        if calib_wh[0] > 0 and calib_wh[1] > 0:
            sms_results["fov_x"], sms_results["fov_y"] = calc_fov(camera_matrix, calib_wh)
            has_calib = True
    
        sms_results["targets"] = []
        # print(sam_results)
        clss = sam_results.boxes.cls.cpu().numpy().astype(np.float64)
        conf = sam_results.boxes.conf.cpu().numpy().astype(np.float64)
        xywh = sam_results.boxes.xywh.cpu().numpy().astype(np.float64)
        masks = sam_results.masks.data.cpu().numpy()
        sh = masks.shape[1] / sms_results["height"]
        sw = masks.shape[2] / sms_results["width"]
        for i in range(len(clss)):
            ann = dict()
            name = 'object'
            ann["category_name"] = name
            ann["category_id"] = int(clss[i])
            ann["score"] = round(conf[i], 3)
            ann["bbox"] = [round(j, 3) for j in xywh[i].tolist()]
            ann["bbox"][0] = ann["bbox"][0] - ann["bbox"][2] / 2
            ann["bbox"][1] = ann["bbox"][1] - ann["bbox"][3] / 2
            ann["cxy"] = [
                (ann["bbox"][0] + ann["bbox"][2] / 2.) / sms_results["width"], 
                (ann["bbox"][1] + ann["bbox"][3] / 2.) / sms_results["height"]
            ]
            if has_calib and name in objs_in_meter:
                ann["los"], ann["pos"] = calc_los_pos(
                    camera_matrix, calib_wh, 
                    ann["cxy"], [ann["bbox"][2], ann["bbox"][3]], 
                    objs_in_meter[name]
                )
            mask = np.zeros(sam_results.orig_shape[:2], dtype=np.uint8)
            x1 = int(ann["bbox"][0] * sw)
            y1 = int(ann["bbox"][1] * sh)
            x2 = x1 + int(ann["bbox"][2] * sw)
            y2 = y1 + int(ann["bbox"][3] * sh)
            if x1 < 0:
                x1 = 0
            if y1 < 0:
                y1 = 0
            if x2 > x1 >= 0 and y2 > y1 >= 0:
                bbox_mask = masks[i][y1:y2,  x1:x2]
                ww = int(ann["bbox"][2])
                hh = int(ann["bbox"][3])
                bbox_mask = np.where(bbox_mask >= 0.5, 1, 0)
                bbox_mask = bbox_mask.astype(np.uint8)
                bbox_mask = cv2.resize(bbox_mask, (ww, hh))
                xx1 = int(ann["bbox"][0])
                yy1 = int(ann["bbox"][1])
                mask[yy1:yy1+hh, xx1:xx1+ww] = bbox_mask
                encoded_mask = pycoco_mask.encode(mask.copy('F'))
                # print(encoded_mask)
                ann["segmentation"] = {}
                ann["segmentation"]["size"] = encoded_mask["size"]
                ann["segmentation"]["counts"] = base64.b64encode(encoded_mask["counts"]).decode('utf-8')
            sms_results["targets"].append(ann)

        return sms_results

    def release(self):
        BaseNode.release(self)
        self._image_reader.kill()
        self._result_writer.kill()
        self._show_writer.kill()
        self._next_writer.kill()

    def image_callback(self, msg):
        if self.realtime_det:
            if not self.job_queue.empty():
                self.job_queue.queue.clear()
        img = sms2cvimg(msg)
        self.job_queue.put({'msg': msg, 'img': img})
    
    def calibration_callback(self, msg):
        self.calib_width = msg['width']
        self.calib_height = msg['height']

        self.camera_matrix = np.array(msg['K']).reshape(3, 3)
        self.distortion = np.array(msg['D'])

    def run(self):
        cv2.namedWindow("samwin")
        cv2.setMouseCallback("samwin", mouse_moving)
        while self.is_running():
            msg_dict = self.job_queue.get(block=True)
            if msg_dict is None:
                break

            msg, img_src = msg_dict['msg'], msg_dict['img']


            t1 = time.time()
            file_name = msg['file_name'] if 'file_name' in msg else ''

            # DO MobileSAM
            outputs = self.model.predict(img_src, points=ref_point, labels=[1], verbose=False)
            
            res_msg = self.trans_det_results(
                outputs[0], 
                img_src.shape[0], img_src.shape[1], 
                self.camera_matrix, 
                [self.calib_width, self.calib_height], 
                self.objs_in_meter
            )

            res_msg['file_name'] = file_name
            if 'client_id' in msg:
                res_msg['client_id'] = msg['client_id']
            if 'file_name' in msg:
                res_msg['file_name'] = msg['file_name']
            if 'img_id' in msg:
                res_msg['img_id'] = msg['img_id']
            if 'img_total' in msg:
                res_msg['img_total'] = msg['img_total']
            res_msg['time_used'] = time.time() - t1
            if "img_id" in msg:
                res_msg["img_id"] = msg["img_id"]

            self._result_writer.publish(res_msg)

            if 'img_total' in msg and self.launch_next_emit:
                next_msg = def_msg('std_msgs::Boolean')
                next_msg['data'] = True
                self._next_writer.publish(next_msg)
                print('img_id', msg['img_id'])

            if self.b_use_shm:
                msg = self._show_writer.cvimg2sms_mem(img_src)
            msg['spirecv_msgs::2DTargets'] = res_msg
            self._show_writer.publish(msg)

            img_show = img_src.copy()
            img_show = draw_bbox(img_show, msg, show_seg=True)
            cv2.imshow('samwin', img_show)
            cv2.waitKey(5)
            # END

        self.release()
        print('{} quit!'.format(self.__class__.__name__))


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--config',
        type=str,
        default='default_params.json',
        help='SpireCV2 Config (.json)')
    parser.add_argument(
        '--job-name', '-j',
        type=str,
        default='live',
        help='SpireCV Job Name')
    parser.add_argument(
        '--ip',
        type=str,
        default='127.0.0.1',
        help='SpireMS Core IP')
    parser.add_argument(
        '--port',
        type=int,
        default=9094,
        help='SpireMS Core Port')
    args, unknown_args = parser.parse_known_args()
    if not os.path.isabs(args.config):
        current_path = os.path.abspath(__file__)
        params_dir = os.path.join(current_path[:current_path.find('spirecv-pro') + 11], 'params', 'spirecv2')
        args.config = os.path.join(params_dir, args.config)
    print("--config:", args.config)
    print("--job-name:", args.job_name)
    extra = get_extra_args(unknown_args)

    node = MobileSAMNode_Cuda(args.job_name, param_dict_or_file=args.config, ip=args.ip, port=args.port, **extra)
    node.spin()
