"""
【节点名称】：
    ColorLineDetNode
【依赖项安装】：
    pip install spirems
    git clone -b spirecv2-dds https://gitee.com/amovlab/SpireCV.git
    cd SpireCV && pip install -e .
【订阅类型】：
    sensor_msgs::CompressedImage （输入图像）
    sensor_msgs::CameraCalibration (输入相机参数)
【发布类型】：
    spirecv_msgs::2DTargets （检测结果）
    sensor_msgs::CompressedImage （可视化结果，需借助可视化工具）
【构造参数说明】：
    parameter_file (str): 全局参数文件
    sms_shutdown (bool): 是否接收全局关闭信号,如果需要长期后台执行,建议设置为False
    specified_input_topic (str): 指定输入的话题地址
    specified_output_topic (str): 指定输出的话题地址
【节点参数】：
    line_color: 巡线的颜色，默认为"black"，黑色；可选"red"，红色、"yello"，黄色、"green"，绿色、"blue"，蓝色 
    line_location: 图像中间,巡线扫描区域起始系数,默认0.5 
    line_location_a1: 图像上方颜色巡线区域扫描位置,相当于在图像正偏上方位置,默认0.3
    line_location_a2: 图像下方颜色巡线区域扫描位置,相当于在图像正偏下方位置,默认0.7
    
【备注】：
    无
"""

#!/usr/bin/env python
# -*- coding: utf-8 -*-
from spirems import Subscriber, Publisher, cvimg2sms, sms2cvimg, def_msg, QoS
from spirecv.base.BaseNode import BaseNode
from spirecv.dataloader.CameraNode import CameraNode
from spirecv.dataloader.VideoFileNode import VideoFileNode
from typing import Union
from geometry_msgs.msg import Pose, Point, Quaternion
import numpy as np
import cv2
import os
import yaml
import math
import threading
from queue import Queue

# 线距底边的距离，0-1，0.5表示在图像中间
# 待检测颜色，没有此颜色时，默认检测黑色
# 可选：black，red，yellow，green，blue
global line_location, line_location_a1, line_location_a2, line_color
global cy_a1, cy_a2, half_h, half_w
global suspand

def get_line_area(frame):
    global line_location, line_location_a1, line_location_a2, line_color
    global cy_a1, cy_a2, half_h, half_w

    h = frame.shape[0]
    half_h = h / 2
    half_w = frame.shape[1] / 2
    l1 = int(h * (1 - line_location - 0.05))
    l2 = int(h * (1 - line_location))
    line_area = frame[l1:l2, :]

    l1 = int(h * (1 - line_location_a1 - 0.05))
    l2 = int(h * (1 - line_location_a1))
    line_area_a1 = frame[l1:l2, :]
    cy_a1 = l1

    l1 = int(h * (1 - line_location_a2 - 0.05))
    l2 = int(h * (1 - line_location_a2))
    cy_a2 = l1
    line_area_a2 = frame[l1:l2, :]

    return line_area, line_area_a1, line_area_a2


def cnt_area(cnt):
    area = cv2.contourArea(cnt)
    return area


def seg(line_area, line_area_a1, line_area_a2, _line_color='black'):
    if _line_color == 'black':
        hmin, smin, vmin = 0, 0, 0
        hmax, smax, vmax = 180, 255, 46
    elif _line_color == 'red':
        hmin, smin, vmin = 0, 43, 46
        hmax, smax, vmax = 10, 255, 255
    elif _line_color == 'yellow':
        hmin, smin, vmin = 26, 43, 46
        hmax, smax, vmax = 34, 255, 255
    elif _line_color == 'green':
        hmin, smin, vmin = 35, 43, 46
        hmax, smax, vmax = 77, 255, 255
    elif _line_color == 'blue':
        hmin, smin, vmin = 100, 43, 46
        hmax, smax, vmax = 124, 255, 255
    else:
        hmin, smin, vmin = 0, 0, 0
        hmax, smax, vmax = 180, 255, 46

    line_area = cv2.cvtColor(line_area, cv2.COLOR_BGR2HSV)
    line_area = cv2.inRange(line_area, (hmin, smin, vmin), (hmax, smax, vmax))

    kernel = np.ones((5, 5), np.uint8)
    line_area = cv2.morphologyEx(line_area, cv2.MORPH_OPEN, kernel)

    # cv2.MORPH_CLOSE 先进行膨胀，再进行腐蚀操作
    kernel = np.ones((5, 5), np.uint8)
    line_area = cv2.morphologyEx(line_area, cv2.MORPH_CLOSE, kernel)

    contours, hierarchy = cv2.findContours(line_area, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

 
    # 使用sorted函数和key参数对轮廓进行排序，得到一个新的已排序列表  
    contours = sorted(contours, key=cnt_area, reverse=True) 

    center = (0, 0)
    area = -1
    if len(contours) > 0:
        x, y, w, h = cv2.boundingRect(contours[0])
        cx, cy = int(x + w/2), int(y + h/2)
        area = cnt_area(contours[0])
        center = (cx, cy)
    
    line_area_a1 = cv2.cvtColor(line_area_a1, cv2.COLOR_BGR2HSV)
    line_area_a1 = cv2.inRange(line_area_a1, (hmin, smin, vmin), (hmax, smax, vmax))

    kernel = np.ones((5, 5), np.uint8)
    line_area_a1 = cv2.morphologyEx(line_area_a1, cv2.MORPH_OPEN, kernel)

    # cv2.MORPH_CLOSE 先进行膨胀，再进行腐蚀操作
    kernel = np.ones((5, 5), np.uint8)
    line_area_a1 = cv2.morphologyEx(line_area_a1, cv2.MORPH_CLOSE, kernel)

    contours_a1, hierarchy = cv2.findContours(line_area_a1, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    #contours_a1.sort(key=cnt_area, reverse=True)

    global cy_a1, cy_a2, half_h, half_w
    center_a1 = (0, 0)
    if len(contours_a1) > 0:
        x, y, w, h = cv2.boundingRect(contours_a1[0])
        cx, cy = int(x + w/2), int(y + h/2) + cy_a1
        center_a1 = (cx - half_w, cy - half_h)

    line_area_a2 = cv2.cvtColor(line_area_a2, cv2.COLOR_BGR2HSV)
    line_area_a2 = cv2.inRange(line_area_a2, (hmin, smin, vmin), (hmax, smax, vmax))

    kernel = np.ones((5, 5), np.uint8)
    line_area_a2 = cv2.morphologyEx(line_area_a2, cv2.MORPH_OPEN, kernel)

    # cv2.MORPH_CLOSE 先进行膨胀，再进行腐蚀操作
    kernel = np.ones((5, 5), np.uint8)
    line_area_a2 = cv2.morphologyEx(line_area_a2, cv2.MORPH_CLOSE, kernel)

    contours_a2, hierarchy = cv2.findContours(line_area_a2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    #contours_a2.sort(key=cnt_area, reverse=True)

    center_a2 = (0, 0)
    if len(contours_a2) > 0:
        x, y, w, h = cv2.boundingRect(contours_a2[0])
        cx, cy = int(x + w/2), int(y + h/2) + cy_a2
        center_a2 = (cx - half_w, cy - half_h)

    return line_area, center, area, center_a1, center_a2


def color_det(frame, camera_matrix):
    global line_location, line_location_a1, line_location_a2, line_color

    area_base, area_base_a1, area_base_a2 = get_line_area(frame)
    area, cxcy, a, center_a1, center_a2 = seg(area_base, area_base_a1, area_base_a2, _line_color=line_color)

    pose = Pose(Point(0, -1, 0), Quaternion(center_a1[0], center_a1[1], center_a2[0], center_a2[1]))
    if a > 0:
        cv2.circle(area, (cxcy[0], cxcy[1]), 4, (0, 0, 255), -1)
        angle = (cxcy[0] - camera_matrix[0][2]) / camera_matrix[0][2] * math.atan((area.shape[1] / 2) / camera_matrix[0][0])
        pose = Pose(Point(angle, 1, 0), Quaternion(center_a1[0], center_a1[1], center_a2[0], center_a2[1]))
    else:
        area, cxcy, a, center_a1, center_a2 = seg(area_base, area_base_a1, area_base_a2,)
        if a > 0:
            cv2.circle(area, (cxcy[0], cxcy[1]), 4, (0, 0, 255), -1)
            angle = (cxcy[0] - camera_matrix[0][2]) / camera_matrix[0][2] * math.atan((area.shape[1] / 2) / camera_matrix[0][0])
            pose = Pose(Point(angle, 1, 0), Quaternion(center_a1[0], center_a1[1], center_a2[0], center_a2[1]))
    
    h, w = frame.shape[:2]
    img_resize = 360
    if h > w:
        h = int(float(h) / w * img_resize)
        w = img_resize
    else:
        w = int(float(w) / h * img_resize)
        h = img_resize
    frame = cv2.resize(frame, (w, h))
    cv2.imshow("cap", frame)
    cv2.imshow("area", area)
    cv2.waitKey(1)
    
    return pose
 
 
class ColorLineDetNode(threading.Thread, BaseNode):
    def __init__(
        self,
        job_name: str,
        ip: str = '127.0.0.1',
        port: int = 9094,
        param_dict_or_file: Union[dict, str] = None,
        sms_shutdown: bool = True,
        launch_next_emit: bool = True,
        specified_input_topic: str = None,
        specified_output_topic: str = None,
        realtime_det: bool = True
    ):
        threading.Thread.__init__(self)
        BaseNode.__init__(self, self.__class__.__name__, job_name, ip=ip, port=port,param_dict_or_file=param_dict_or_file, sms_shutdown=sms_shutdown)
        input_url = '/' + job_name + '/sensor/image_raw'
        if specified_input_topic is not None:
            input_url = specified_input_topic
            
        input_url_c = '/' + job_name + '/sensor/calibration_info'
        if specified_output_topic is not None:
            input_url_c = specified_output_topic

        output_url = '/' + job_name + '/detector/results'
        if specified_output_topic is not None:
            output_url = specified_output_topic

        self.realtime_det = realtime_det
        self.job_queue = Queue()
        self.queue_pool.append(self.job_queue)

        self._image_reader = Subscriber(
            input_url, 'sensor_msgs::CompressedImage', self.image_callback,
            ip=ip, port=port, qos=QoS.Reliability
        )
        self._calibration_reader = Subscriber(
            input_url_c, 'sensor_msgs::CameraCalibration', self.calibration_callback,
            ip=ip, port=port, qos=QoS.Reliability
        )
        self._result_writer = Publisher(
            output_url, 'geometry_msgs::Pose',
            ip=ip, port=port, qos=QoS.Reliability
        )
        #self._show_writer = Publisher(
        #    '/' + job_name + '/detector/image_results', 'sensor_msgs::CompressedImage',
        #    ip=ip, port=port
        #)
        self.launch_next_emit = launch_next_emit
        if self.launch_next_emit:
            self._next_writer = Publisher(
                '/' + job_name + '/launch_next', 'std_msgs::Boolean',
                ip=ip, port=port, qos=QoS.Reliability
            )

        self.start()

    def release(self):
        BaseNode.release(self)
        self._image_reader.kill()
        self._calibration_reader.kill()
        self._result_writer.kill()
        #self._show_writer.kill()
        self._next_writer.kill()
        self._image_reader.join()
        self._calibration_reader.join()
        self._result_writer.join()
        #self._show_writer.join()
        self._next_writer.join()

    def image_callback(self, msg):
        if self.realtime_det:
            while not self.job_queue.empty():
                self.job_queue.get()
        self.job_queue.put(msg)
        
        
    def calibration_callback(self, msg):
        self.width = msg['width']
        self.height = msg['height']
        
        self.camera_matrix = np.array(msg['K']).reshape(3,3)
        self.distortion = np.array(msg['D'])

    def run(self):
        while self.is_running():
            msg = self.job_queue.get(block=True)
            if msg is None:
                break

            img = sms2cvimg(msg)
            img = cv2.resize(img, (self.width, self.height))
            
            pose = color_det(img, self.camera_matrix)
            res_msg = def_msg('geometry_msgs::Pose')
            res_msg = {  
                "type": "geometry_msgs::Pose",
                    'position': {  
                        'x': pose.position.x,
                        'y': pose.position.y,
                        'z': pose.position.z
                    },  
                    'orientation': {  
                        'w': pose.orientation.w,
                        'x': pose.orientation.x,
                        'y': pose.orientation.y,
                        'z': pose.orientation.z
                    }  
                }         
            
            if 'img_total' in msg:
                next_msg = def_msg('std_msgs::Boolean')
                next_msg['data'] = True
                self._next_writer.publish(next_msg)
            
            self._result_writer.publish(res_msg)
            # END

        self.release()
        print('{} quit!'.format(self.__class__.__name__))

if __name__ == '__main__':

    # global line_location, line_color
    line_location = 0.5
    line_location_a1 = 0.3
    line_location_a2 = 0.7
    line_color = 'white'  

    job_name = 'detection'
    parameter_file = '/home/amov/Desktop/SpireCV/params/spirecv2/default_params.json'
    #dataloader = VideoFileNode(job_name, param_dict_or_file=parameter_file)
    dataloader = CameraNode(job_name, param_dict_or_file=parameter_file)
    algorithm = ColorLineDetNode(job_name, param_dict_or_file=parameter_file)
