#!/home/robot/anaconda3/envs/yolograsp/bin/python3
import os
import numpy as np
import sys
import rospy
import torchvision.transforms as transforms
import ros_np_multiarray as rosnm
from vision_messages.msg import SingleObjPose
from vision_messages.srv import ObjPoseEst, ObjPoseEstResponse

############## Import Each Module #############
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../vision_mask/'))
from mmdet.apis import init_detector
from demo import get_mask, get_mask_multi_obj
sys.path.pop(0)
from pose_estimator.demo import get_pose, load_model_pose
###############################################

import warnings
warnings.filterwarnings('ignore')

from argparse import ArgumentParser

sys.path.insert(1, os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../..')) # src/
from utilities.tools.image_converter import convert_msg_to_nparray
sys.path.pop(1)

parser = ArgumentParser()
parser.add_argument('--device', default='cuda:0', help='Device used for inference')
################# 2D mask #####################
parser.add_argument('--config_mask',
                    default='../../vision_mask/configs/solo/decoupled_solo_light_r50_fpn_3x_coco.py',
                    help='Config file')
parser.add_argument('--checkpoint_mask',
                    default='../../vision_mask/decoupled_solo_weights.pth',
                    help='Checkpoint file')
parser.add_argument('--show_mask', action='store_true', default=False)
parser.add_argument('--mode', type=str, default='single', choices=['single', 'multi'])
################# 6D pose #####################
parser.add_argument('--n_cat', type=int, default=6, help='number of object categories')
parser.add_argument('--nv_prior', type=int,
                    default=1024,
                    help='number of vertices in shape priors')
parser.add_argument('--checkpoint_pose', type=str,
                    default='pose_estimator/checkpoints/real/model_50.pth',
                    help='resume from saved model')
parser.add_argument('--mean_points_emb', type=str,
                    default='pose_estimator/assets/mean_points_emb.npy',
                    help='resume from saved model')
parser.add_argument('--n_pts', type=int,
                    default=1024, help='number of foreground points')
parser.add_argument('--img_size', type=int, default=192, help='cropped image size')
parser.add_argument("--save_pose", action='store_true', default=False)
parser.add_argument("--show_pose", action='store_true', default=False)
config = parser.parse_args()
############### Other params ##################
config.intrinsics = np.array([[603.1332397460938, 0, 323.1732177734375],
                            [0, 601.6819458007812, 241.5373077392578],
                            [0, 0, 1]])
config.xmap = np.array([[i for i in range(640)] for j in range(480)])
config.ymap = np.array([[j for i in range(640)] for j in range(480)])
config.norm_scale = 1000.0
config.norm_color = transforms.Compose(
    [transforms.ToTensor(),
     transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
###############################################

cate_map = {
    5: 'mug',
    1: 'bowl',
    0: 'bottle'
}

def trans2array(result_pose):
    result = []
    for i, class_id in enumerate(result_pose['pred_class_ids']):
        pose_inst = SingleObjPose()
        # print(result_pose)
        pose_inst.category_name = cate_map[class_id]
        pose_inst.box = result_pose['pred_bboxes'][i]
        pose_inst.scale = result_pose['pred_s'][i]
        result_pose['pred_sRT'][i][:3,:3] = result_pose['pred_sRT'][i][:3,:3] / result_pose['pred_s'][i]
        pose_inst.RT = rosnm.to_multiarray_f32(result_pose['pred_sRT'][i])
        pose_inst.obj_pcs = rosnm.to_multiarray_f32(result_pose['obj_pcs'][i])
        pose_inst.scales = result_pose['pred_scales'][i]
        pose_inst.cam_pcs = rosnm.to_multiarray_f32(result_pose['cam_pcs'][i])
        result.append(pose_inst)
    return result

class Perception_Server(object):
    def __init__(self, args):
        self.args = args
        self.args.config_mask = os.path.join(os.path.dirname(os.path.abspath(__file__)), self.args.config_mask)
        self.args.checkpoint_mask = os.path.join(os.path.dirname(os.path.abspath(__file__)), self.args.checkpoint_mask)
        self.args.checkpoint_pose = os.path.join(os.path.dirname(os.path.abspath(__file__)), self.args.checkpoint_pose)
        self.device=self.args.device
        self.model_mask=init_detector(self.args.config_mask, self.args.checkpoint_mask, self.device)
        rospy.loginfo("2D Mask Estimater(Solo) has initiated! Ready to get image!")

        self.mean_shapes = np.load(os.path.join(os.path.dirname(os.path.abspath(__file__)), self.args.mean_points_emb))
        self.model_pose = load_model_pose(self.args)
        rospy.loginfo("6D Pose Estimater has initiated! Ready to get image!")

        rospy.Service("/perception/vision_perception", ObjPoseEst, self.perception_callback)
        rospy.wait_for_service("/perception/vision_perception")


    def perception_callback(self, req):
        rospy.loginfo("Get image! Processing ...")

        image = convert_msg_to_nparray(req.color)
        depth = convert_msg_to_nparray(req.depth)
        if config.mode == 'single':
            result_mask = get_mask(image, self.model_mask, self.args)
        elif config.mode == 'multi':
            result_mask = get_mask_multi_obj(image, self.model_mask, self.args)
        else:
            raise ValueError('\'mode\' must be \'single\' or \'multi\', but now it is {}!'.format(config.mode))
        result_pose = get_pose(image, depth, result_mask, self.mean_shapes, model=self.model_pose, args=self.args)

        if type(result_pose) is dict:
            result_pose = trans2array(result_pose)

        rospy.loginfo("Process Done! Return Pose Results!")
        return ObjPoseEstResponse(result_pose)


if __name__ == '__main__':
    rospy.init_node('perception_server', anonymous=True)
    perception = Perception_Server(config)
    rospy.spin()
