#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# @Author: renjin@bit.edu.cn
# @Date  : 2024-09-12


"""
【节点名称】：
    SpireViewSAM2Node
【依赖项安装】：
    pip install spirems
    git clone https://github.com/facebookresearch/segment-anything-2.git
    cd segment-anything-2 & pip install -e .
【订阅类型】：
    spirecv_msgs::SAMJob （输入SAM任务）
【发布类型】：
    spirecv_msgs::SAMResult （SAM分割结果）
【构造参数说明】：
    parameter_file (str): 全局参数文件
【节点参数】：
    sam2_checkpoint (str): segment-anything-2 的模型，如 sam2_hiera_large.pt
    model_cfg (str): segment-anything-2 的模型配置，如 sam2_hiera_l.yaml
【备注】：
    无
"""

import os

import cv2
# if using Apple MPS, fall back to CPU for unsupported ops
# os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
import numpy as np
import torch
import matplotlib.pyplot as plt
from PIL import Image
import threading
from queue import Queue
import time
from spirems import Subscriber, Publisher, cvimg2sms, sms2cvimg, def_msg
from BaseNode import BaseNode
from sam2.build_sam import build_sam2
from sam2.sam2_image_predictor import SAM2ImagePredictor
from pycocotools import mask as mask_utils
import base64


def show_mask(mask, ax, random_color=False, borders=True):
    if random_color:
        color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0)
    else:
        color = np.array([30/255, 144/255, 255/255, 0.6])
    h, w = mask.shape[-2:]
    mask = mask.astype(np.uint8)
    mask_image =  mask.reshape(h, w, 1) * color.reshape(1, 1, -1)
    if borders:
        import cv2
        contours, _ = cv2.findContours(mask,cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
        # Try to smooth contours
        contours = [cv2.approxPolyDP(contour, epsilon=0.01, closed=True) for contour in contours]
        mask_image = cv2.drawContours(mask_image, contours, -1, (1, 1, 1, 0.5), thickness=2)
    ax.imshow(mask_image)

def show_points(coords, labels, ax, marker_size=375):
    pos_points = coords[labels==1]
    neg_points = coords[labels==0]
    ax.scatter(pos_points[:, 0], pos_points[:, 1], color='green', marker='*', s=marker_size, edgecolor='white', linewidth=1.25)
    ax.scatter(neg_points[:, 0], neg_points[:, 1], color='red', marker='*', s=marker_size, edgecolor='white', linewidth=1.25)

def show_box(box, ax):
    x0, y0 = box[0], box[1]
    w, h = box[2] - box[0], box[3] - box[1]
    ax.add_patch(plt.Rectangle((x0, y0), w, h, edgecolor='green', facecolor=(0, 0, 0, 0), lw=2))

def show_masks(image, masks, scores, point_coords=None, box_coords=None, input_labels=None, borders=True):
    for i, (mask, score) in enumerate(zip(masks, scores)):
        plt.figure(figsize=(10, 10))
        plt.imshow(image)
        show_mask(mask, plt.gca(), borders=borders)
        if point_coords is not None:
            assert input_labels is not None
            show_points(point_coords, input_labels, plt.gca())
        if box_coords is not None:
            # boxes
            show_box(box_coords, plt.gca())
        if len(scores) > 1:
            plt.title(f"Mask {i+1}, Score: {score:.3f}", fontsize=18)
        plt.axis('off')
        plt.show()


class SAM2Pipeline(threading.Thread):
    def __init__(self, sam2_model, client_id, ip, port):
        threading.Thread.__init__(self)
        self.predictor = SAM2ImagePredictor(sam2_model)
        self.client_id = client_id
        self.msg_queue = Queue()
        self._result_writer = Publisher(
            '/{}/OutputSAMResult'.format(self.client_id), 'spirecv_msgs::SAMResult', ip=ip, port=port
        )
        self.is_running = True
        self.start()

    def put_msg(self, msg):
        self.msg_queue.put(msg)

    def quit(self):
        print('quit')
        self._result_writer.kill()
        self.is_running = False
        self.msg_queue.put(None)

    def run(self):
        while self.is_running:
            msg = self.msg_queue.get(block=True)
            if msg is None:
                break

            try:
                set_image = 0
                if 'type' in msg['image'] and msg['image']['type'] == 'sensor_msgs::CompressedImage':
                    # t1 = time.time()
                    img_bgr = sms2cvimg(msg['image'])
                    img_rgb = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)
                    self.predictor.set_image(img_rgb)
                    set_image = 1
                    # print('dt1: {}'.format(time.time() - t1))

                input_point = None
                if len(msg['input_point']):
                    input_point = np.array(msg['input_point'])
                input_label = None
                if len(msg['input_label']):
                    input_label = np.array(msg['input_label'])
                input_box = None
                if len(msg['input_box']):
                    input_box = np.array(msg['input_box'])
                    if input_box.ndim == 1:
                        input_box = input_box[None, :]
                # t2 = time.time()
                res_msg = def_msg('spirecv_msgs::SAMResult')
                res_msg["set_image"] = set_image
                res_msg["image_id"] = msg["image_id"]
                if "input_id" in msg:
                    res_msg["input_id"] = msg["input_id"]
                if input_point is None and input_label is None and input_box is None:
                    res_msg["encoded_masks"] = {}
                else:
                    masks, scores, logits = self.predictor.predict(
                        point_coords=input_point,
                        point_labels=input_label,
                        box=input_box if input_box is not None else None,
                        multimask_output=False,
                    )
                    # print('before')
                    # show_masks(img_rgb, masks, scores, point_coords=input_point, input_labels=input_label, borders=True)
                    # print('after')
                    masks = masks.squeeze()
                    if masks.ndim == 3:
                        masks = np.transpose(masks, (1, 2, 0))

                    masks = masks.astype(np.uint8)
                    encoded_masks = mask_utils.encode(masks.copy(order='F'))

                    # print('dt2: {}'.format(time.time() - t2))
                    if isinstance(encoded_masks, dict):
                        encoded_masks = [encoded_masks]
                    for j in range(len(encoded_masks)):
                        encoded_masks[j]['counts'] = base64.b64encode(encoded_masks[j]['counts']).decode('utf-8')
                    res_msg["encoded_masks"] = encoded_masks
                self._result_writer.publish(res_msg)
            except Exception as e:
                print('Client: {}, Error: {}'.format(self.client_id, e))
                self.is_running = False


class SpireViewSAM2Node(threading.Thread, BaseNode):
    def __init__(
        self,
        ip: str = '127.0.0.1',
        port: int = 9094,
        parameter_file: str = ''
    ):
        threading.Thread.__init__(self)
        BaseNode.__init__(self, self.__class__.__name__, 'SpireViewSAM2', ip=ip, port=port,
                          parameter_file=parameter_file)

        self.ip = ip
        self.port = port
        self.job_queue = Queue()
        self.queue_pool.append(self.job_queue)

        self._image_reader = Subscriber(
            '/SpireView/InputSAMJob', 'spirecv_msgs::SAMJob', self.job_callback,
            ip=ip, port=port
        )

        # select the device for computation
        if torch.cuda.is_available():
            device = torch.device("cuda")
        elif torch.backends.mps.is_available():
            device = torch.device("mps")
        else:
            device = torch.device("cpu")
        print(f"using device: {device}")

        if device.type == "cuda":
            # use bfloat16 for the entire notebook
            torch.autocast("cuda", dtype=torch.bfloat16).__enter__()
            # turn on tfloat32 for Ampere GPUs (https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices)
            if torch.cuda.get_device_properties(0).major >= 8:
                torch.backends.cuda.matmul.allow_tf32 = True
                torch.backends.cudnn.allow_tf32 = True
        elif device.type == "mps":
            print(
                "\nSupport for MPS devices is preliminary. SAM 2 is trained with CUDA and might "
                "give numerically different outputs and sometimes degraded performance on MPS. "
                "See e.g. https://github.com/pytorch/pytorch/issues/84936 for a discussion."
            )

        self.sam2_checkpoint = self.get_param("sam2_checkpoint", "G:/deep/segment-anything-2/checkpoints/sam2_hiera_large.pt")
        self.model_cfg = self.get_param("model_cfg", "G:/deep/segment-anything-2/sam2_configs/sam2_hiera_l.yaml")
        self.sam2_model = build_sam2(self.model_cfg, self.sam2_checkpoint, device=device)
        self.client_buff = {}
        self.start()

    def release(self):
        BaseNode.release(self)
        self._image_reader.kill()

    def job_callback(self, msg):
        self.job_queue.put(msg)

    def run(self):
        while self.is_running():
            msg = self.job_queue.get(block=True)
            if msg is None:
                break
            t1 = time.time()

            if 'client_id' not in msg or not isinstance(msg['client_id'], str):
                continue

            if msg['client_id'] not in self.client_buff:
                pipeline = SAM2Pipeline(self.sam2_model, msg['client_id'], self.ip, self.port)
                self.client_buff[msg['client_id']] = {
                    'pipeline': pipeline,
                    'time': time.time()
                }
                to_del = []
                for _key, _value in self.client_buff.items():
                    if time.time() - _value['time'] > 600:
                        to_del.append(_key)
                for _key in to_del:
                    self.client_buff[_key]['pipeline'].quit()
                    del self.client_buff[_key]
            else:
                self.client_buff[msg['client_id']]['time'] = time.time()

            if self.client_buff[msg['client_id']]['pipeline'].is_running:
                self.client_buff[msg['client_id']]['pipeline'].put_msg(msg)
            else:
                del self.client_buff[msg['client_id']]
        self.release()
        print('{} quit!'.format(self.__class__.__name__))


if __name__ == '__main__':
    import uuid
    parameter_file = 'default_params.json'
    sam2 = SpireViewSAM2Node(parameter_file=parameter_file)
    """
    img = cv2.imread('G:/deep/segment-anything-2/notebooks/images/truck.jpg')
    pub = Publisher('/SpireView/InputSAMJob', 'spirecv_msgs::SAMJob')
    sam_msg = def_msg('spirecv_msgs::SAMJob')
    sam_msg['client_id'] = str(uuid.uuid4()).replace('-', '_')
    sam_msg['image_id'] = str(uuid.uuid4()).replace('-', '_')
    sam_msg['image'] = cvimg2sms(img)

    while 1:
        sam_msg['image'] = cvimg2sms(img)
        sam_msg['input_point'] = []
        sam_msg['input_label'] = []
        sam_msg['input_box'] = []
        pub.publish(sam_msg, True)
        sam_msg['image'] = {}
        for i in range(10):
            time.sleep(1)
            sam_msg['input_point'] = [[500, 375]]
            sam_msg['input_label'] = [1]
            sam_msg['input_box'] = []
            pub.publish(sam_msg, True)
        time.sleep(30)
    """
