#!/home/robot/anaconda3/envs/yolograsp/bin/python3
import os
from matplotlib.pyplot import table
import numpy as np
import sys

from numpy.lib.arraysetops import isin
import rospy
import torchvision.transforms as transforms
import warnings
warnings.filterwarnings('ignore')
from argparse import ArgumentParser
import ros_np_multiarray as rosnm
from vision_messages.srv import MaskPlaneDetect, MaskPlaneDetectResponse

sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../..')) # src/
from utilities.tools.image_converter import convert_msg_to_nparray
from perception.plane_detect.scripts.plane_detect import plane_detect
sys.path.pop(0)

sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../vision_mask/'))
from mmdet.apis import init_detector
from demo import get_mask
sys.path.pop(0)

cate_map = {
    5: 'mug',
    1: 'bowl',
    0: 'bottle'
}

class Mask_Server(object):
    def __init__(self, args):
        self.args = args
        self.args.config_mask = os.path.join(os.path.dirname(os.path.abspath(__file__)), self.args.config_mask)
        self.args.checkpoint_mask = os.path.join(os.path.dirname(os.path.abspath(__file__)), self.args.checkpoint_mask)
        self.device=self.args.device
        self.model_mask=init_detector(self.args.config_mask, self.args.checkpoint_mask, self.device)
        rospy.loginfo("2D Mask Estimater(Solo) has initiated! Ready to get image!")

        rospy.Service("/perception/mask_with_plane_detection", MaskPlaneDetect, self.callback)
        rospy.wait_for_service("/perception/mask_with_plane_detection")


    def callback(self, req):
        rospy.loginfo("Get image! Processing ...")

        image = convert_msg_to_nparray(req.color)
        depth = convert_msg_to_nparray(req.depth)
        result_mask = get_mask(image, self.model_mask, self.args)
        result_pd = plane_detect(depth, self.args.intrinsics)
        
        category_name = cate_map[result_mask['class_ids']]
        assert np.allclose(result_mask['masks'].shape, result_pd['mask'].shape)
        unified_mask = result_mask['masks'] & result_pd['mask']
        cam_pc = self.get_masked_pc(depth, unified_mask)
        cam_pc = rosnm.to_multiarray_f32(cam_pc)
        table_normal = rosnm.to_multiarray_f32(result_pd['normal'])
        
        return MaskPlaneDetectResponse(category_name, cam_pc, table_normal)
    
    def get_masked_pc(self, depth, mask):
        choose = mask[:, :].flatten().nonzero()[0]
        depth_masked = depth[:, :].flatten()[choose][:, np.newaxis]
        xmap_masked = self.args.xmap[:, :].flatten()[choose][:, np.newaxis]
        ymap_masked = self.args.ymap[:, :].flatten()[choose][:, np.newaxis]
        cam_fx, cam_fy, cam_cx, cam_cy = self.args.intrinsics[0,0], self.args.intrinsics[1,1], \
                                         self.args.intrinsics[0,2], self.args.intrinsics[1,2]
        pt2 = depth_masked / self.args.norm_scale  # make z coordinate unit be meter
        pt0 = (xmap_masked - cam_cx) * pt2 / cam_fx  # calculate x coordinate under camera axis
        pt1 = (ymap_masked - cam_cy) * pt2 / cam_fy  # calculate y coordinate under camera axis
        points = np.concatenate((pt0, pt1, pt2), axis=1)

        return points


if __name__ == '__main__':
    parser = ArgumentParser()
    parser.add_argument('--device', default='cuda:0', help='Device used for inference')
    parser.add_argument('--config_mask',
                        default='../../vision_mask/configs/solo/decoupled_solo_light_r50_fpn_3x_coco.py',
                        help='Config file')
    parser.add_argument('--checkpoint_mask',
                        default='../../vision_mask/decoupled_solo_weights.pth',
                        help='Checkpoint file')
    parser.add_argument('--show_mask', action='store_true', default=False)
    
    config = parser.parse_args()
    config.intrinsics = np.array([[603.1332397460938, 0, 323.1732177734375],
                                  [0, 601.6819458007812, 241.5373077392578],
                                  [0, 0, 1]])
    config.xmap = np.array([[i for i in range(640)] for j in range(480)])
    config.ymap = np.array([[j for i in range(640)] for j in range(480)])
    config.norm_scale = 1000.0
    config.norm_color = transforms.Compose(
        [transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])

    rospy.init_node('perception_server', anonymous=True)
    perception = Mask_Server(config)
    rospy.spin()
