#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import traceback
from threading import Thread

import cv2
import numpy as np
import rospy
from cv_bridge import CvBridge
from geometry_msgs.msg import Pose
from marker_location.msg import markers
from scipy.spatial.transform import Rotation as R
from sensor_msgs.msg import CameraInfo, Image
from std_msgs.msg import UInt8MultiArray


def map_img77(img):
    segment = [6, 14, 22, 30, 37, 44]
    ass = np.split(img, segment, axis=0)
    all_subs = np.array([[np.sum(k) / k.size / 255. for k in np.split(a, segment, axis=1)] for a in ass], dtype=np.float32)
    return (all_subs > 0.5).astype(np.uint8) * 255


templates = []
tpl_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "template")
rospy.loginfo(tpl_path)
for i in range(1, 9):
    tpl = cv2.imread(tpl_path + "/{}.png".format(i), 0)
    templates.append(map_img77(tpl))
rospy.loginfo("size of templates: {}".format(len(templates)))


def pose_aruco_2_ros(rvec, tvec):
    aruco_pose_msg = Pose()
    aruco_pose_msg.position.x = tvec[0, 0]
    aruco_pose_msg.position.y = tvec[1, 0]
    aruco_pose_msg.position.z = tvec[2, 0]
    rotation_matrix = cv2.Rodrigues(rvec)
    r_quat = R.from_matrix(rotation_matrix[0]).as_quat()
    aruco_pose_msg.orientation.x = r_quat[0]
    aruco_pose_msg.orientation.y = r_quat[1]
    aruco_pose_msg.orientation.z = r_quat[2]
    aruco_pose_msg.orientation.w = r_quat[3]
    return aruco_pose_msg


def sort_contour(cnt):

    if not len(cnt) == 4:
        assert False
    new_cnt = cnt.copy()

    cx = (cnt[0, 0, 0] + cnt[1, 0, 0] + cnt[2, 0, 0] + cnt[3, 0, 0]) / 4.0
    cy = (cnt[0, 0, 1] + cnt[1, 0, 1] + cnt[2, 0, 1] + cnt[3, 0, 1]) / 4.0

    x_left_n = 0
    for i in range(4):
        if cnt[i, 0, 0] < cx:
            x_left_n += 1
    if x_left_n != 2:
        return None
    lefts = np.array([c for c in cnt if c[0, 0] < cx])
    rights = np.array([c for c in cnt if c[0, 0] >= cx])
    if lefts[0, 0, 1] < lefts[1, 0, 1]:
        new_cnt[0, 0, 0] = lefts[0, 0, 0]
        new_cnt[0, 0, 1] = lefts[0, 0, 1]
        new_cnt[3, 0, 0] = lefts[1, 0, 0]
        new_cnt[3, 0, 1] = lefts[1, 0, 1]
    else:
        new_cnt[0, 0, 0] = lefts[1, 0, 0]
        new_cnt[0, 0, 1] = lefts[1, 0, 1]
        new_cnt[3, 0, 0] = lefts[0, 0, 0]
        new_cnt[3, 0, 1] = lefts[0, 0, 1]

    if rights[0, 0, 1] < rights[1, 0, 1]:
        new_cnt[1, 0, 0] = rights[0, 0, 0]
        new_cnt[1, 0, 1] = rights[0, 0, 1]
        new_cnt[2, 0, 0] = rights[1, 0, 0]
        new_cnt[2, 0, 1] = rights[1, 0, 1]
    else:
        new_cnt[1, 0, 0] = rights[1, 0, 0]
        new_cnt[1, 0, 1] = rights[1, 0, 1]
        new_cnt[2, 0, 0] = rights[0, 0, 0]
        new_cnt[2, 0, 1] = rights[0, 0, 1]
    return new_cnt


def classification(frame, quads, template_ids=range(1, 9)):
    """Correct implementation of classfication of quad number

    Args:
        frame (_type_): _description_
        quads (_type_): _description_
        template_ids (list of ints, optional): the choices of classification. Defaults to range(1, 9).

    """
    quads_ID = []
    minpoints_list = []
    wrapped_img_list = []
    for i in range(len(quads)):
        points_src = np.array(
            [
                [(quads[i][0, 0, 0], quads[i][0, 0, 1])],
                [(quads[i][1, 0, 0], quads[i][1, 0, 1])],
                [(quads[i][2, 0, 0], quads[i][2, 0, 1])],
                [(quads[i][3, 0, 0], quads[i][3, 0, 1])],
            ],
            dtype="float32",
        )
        points_dst = np.array([[0, 0], [49, 0], [49, 49], [0, 49]], dtype="float32")
        out_img = cv2.warpPerspective(frame, cv2.getPerspectiveTransform(points_src, points_dst), (50, 50))
        out_img = cv2.cvtColor(out_img, cv2.COLOR_BGR2GRAY)
        out_img = cv2.threshold(out_img, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
        wrapped_img_list.append(out_img)

        resize = False
        if resize:
            try:
                # resize trick from XL, adjust outimg just for classification
                out_img[:3, :] = 0
                out_img[47:, :] = 0
                out_img[:, :3] = 0
                out_img[:, 47:] = 0
                num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(out_img)
                for label_i in range(1, num_labels):
                    if stats[label_i, cv2.CC_STAT_AREA].astype(float) < 35:  # 原50
                        out_img[labels == label_i] = 0

                nonzero_img = np.nonzero(out_img)
                left, right = np.min(nonzero_img[0]), np.max(nonzero_img[0])
                top, bottom = np.min(nonzero_img[1]), np.max(nonzero_img[1])
                right, bottom = min(right + 1, 49), min(bottom + 1, 49)
                nonzero_img = out_img[left:right, top:bottom]
                nonzero_img = cv2.resize(nonzero_img, (36, 36), interpolation=cv2.INTER_NEAREST)
                out_img = np.zeros((50, 50), dtype=np.uint8)
                out_img[7:7 + 36, 7:7 + 36] = nonzero_img
            except:
                print("resize trick failed, back to original img as tempate")
        out_img = map_img77(out_img)

        match_candidate = []
        match_candidate.append(out_img)
        match_candidate.append(cv2.rotate(out_img, cv2.ROTATE_180))
        match_candidate.append(cv2.rotate(out_img, cv2.ROTATE_90_CLOCKWISE))
        match_candidate.append(cv2.rotate(out_img, cv2.ROTATE_90_COUNTERCLOCKWISE))

        min_diff = 10000
        min_diff_target = 0

        for tid in template_ids:
            for tt in range(4):
                diff_img = cv2.absdiff(templates[tid - 1], match_candidate[tt])
                sum = np.sum(diff_img) / 255.0 / diff_img.size
                if min_diff > sum:
                    min_diff = sum
                    min_diff_target = tid

        if min_diff < 0.2:
            quads_ID.append(min_diff_target)
            minpoints_list.append(min_diff)
        else:
            quads_ID.append(-1)
            minpoints_list.append(min_diff)

    return quads_ID, minpoints_list, wrapped_img_list


class Processor:

    def __init__(self, verbose=True):
        self.verbose = verbose
        self.bridge = CvBridge()
        self.image_raw = None
        self.image_bool = None

        rospy.wait_for_message("/camera/color/image_raw", Image)
        camera_info = rospy.wait_for_message("/camera/color/camera_info", CameraInfo, timeout=1.0)

        self.camera_matrix = np.array(camera_info.K).reshape(3, 3)

        if self.verbose:
            try:
                Thread(target=self.vis_images).start()
            except:
                rospy.logerr("The visualization window has collapsed!")

        rospy.Subscriber("/camera/color/image_raw", Image, self.on_image_raw, queue_size=1)
        self.pub = rospy.Publisher('/marker_poses', markers, queue_size=1)

    def vis_images(self):
        while not rospy.is_shutdown():
            try:
                if self.image_raw is not None:
                    cv2.imshow('image_raw', self.image_raw)
                if self.image_bool is not None:
                    cv2.imshow('image_preprocess', self.image_bool)
                cv2.waitKey(1)
            except:
                rospy.logerr("The visualization window has collapsed!")

    def on_image_raw(self, image):

        ##################################################
        # Convert image messages to opencv images
        # Preprocess input image and obtain a MASK of the same shape
        ##################################################
        self.image_raw = self.bridge.imgmsg_to_cv2(image, "bgr8")
        self.image_bool = self.preprocess(self.image_raw)

        ##################################################
        # Estimate quad poses according to the MASK
        ##################################################
        quads, tvec_list, rvec_list = self.detect_square(
            self.image_bool,
            self.camera_matrix,
        )

        ##################################################
        # Identify the number within the quads
        ##################################################
        quads_ID, _, _ = classification(
            self.image_raw,
            quads,
            template_ids=range(1, 9),
        )
        ids = [i for i in range(len(quads_ID)) if 1 <= quads_ID[i] <= 5]
        pose_list = [pose_aruco_2_ros(r, t) for t, r in zip(tvec_list, rvec_list)]

        id_list, quads_list, pose_list = [quads_ID[_] for _ in ids], [quads[_] for _ in ids], [pose_list[_] for _ in ids]

        ##################################################
        # Add captions on visualizations
        ##################################################
        if self.verbose:
            cv2.drawContours(self.image_raw, quads_list, -1, (0, 255, 0), 1)
            id_cnd = [0 for _ in range(5)]
            color_lst = [(255, 0, 255), (0, 255, 255), (255, 255, 0), (0, 0, 255), (0, 255, 0), (255, 0, 0)]
            for i in range(len(id_list)):
                bbox = cv2.boundingRect(quads_list[i])
                cv2.putText(self.image_raw, '{}'.format(id_list[i]), (bbox[0], (-20 * id_cnd[id_list[i] - 1] - 20) + bbox[1]), cv2.FONT_HERSHEY_SIMPLEX, 1.0, color_lst[id_cnd[id_list[i] - 1]], 2)
                cv2.putText(self.image_raw, '({:.1f},{:.1f},{:.1f})'.format(pose_list[i].position.x * 100, pose_list[i].position.y * 100, pose_list[i].position.z * 100),
                            (bbox[0], -20 * id_cnd[id_list[i] - 1] + bbox[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.75, color_lst[id_cnd[id_list[i] - 1]], 2)
                id_cnd[id_list[i] - 1] += 1
                if id_cnd[id_list[i] - 1] > 5:
                    id_cnd[id_list[i] - 1] = 5

        ##################################################
        # Construct ROS messages
        ##################################################
        marker_msg = markers()
        marker_msg.header.seq = image.header.seq
        marker_msg.header.stamp = image.header.stamp
        marker_msg.header.frame_id = image.header.frame_id
        msg_id_lst = []
        for i in range(len(id_list)):
            msg_id_lst.append(id_list[i])
            marker_msg.poses.append(pose_list[i])
        marker_msg.ids = UInt8MultiArray(data=msg_id_lst)
        self.pub.publish(marker_msg)

    def preprocess(self, frame):
        """Preprocess the image captured by the camera, so that pixels that belong to cubes have the value of 255 while the counterparts have the value of 0

        Args:
            frame (numpy.ndarray): Captured image in RGB channels

        Returns:
            numpy.ndarray: The mask of cubes. Numpy array of the same shape as frame
        """
        image_bool = frame

        ############# TODO ##############
        
        #################################

        return image_bool

    def detect_square(self, image_bool, camera_matrix):
        """Find the quad containing numbers within given frames, along with their poses in the environment

        Args:
            image_bool (numpy.ndarray): The mask of cubes provided by preprocess function
            camera_matrix (numpy.ndarray): The intrinsic matrix of the camera, 3x3

        Returns:
            (q, t, r):
            - q: quad position on the image
            - t: quad translation vectors
            - r: quad rotation vectors
        """

        quads_prj, tvec_list, rvec_list = None, None, None

        ############# TODO ##############
        
        #################################

        return quads_prj, tvec_list, rvec_list


if __name__ == "__main__":
    rospy.init_node('marker_detection', anonymous=True)

    pcer = Processor(verbose=True)
    rospy.loginfo("Image thread started")
    rospy.spin()
