#!/home/robot/anaconda3/envs/6dof-graspnet/bin/python

"""
This is the server script of 6dof-graspnet
"""


from __future__ import print_function
import ros_np_multiarray
import rospy
from vision_messages.srv import GraspPoseEst, GraspPoseEstResponse
import os, sys
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'    # ignore tensorflow log info
sys.path.append('./')
BASE_DIR = os.path.dirname(__file__)
import numpy as np
import argparse
import grasp_estimator
import tensorflow as tf

import warnings
warnings.filterwarnings('ignore')


sys.path.insert(1, os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../..')) # src/
from utilities.tools.image_converter import convert_msg_to_nparray
sys.path.pop(1)


def make_parser():
    parser = argparse.ArgumentParser(
        description='6-DoF GraspNet Demo',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument(
        '--vae_checkpoint_folder',
        type=str, 
        default='../checkpoints/latent_size_2_ngpus_1_gan_1_confidence_weight_0.1_npoints_1024_num_grasps_per_object_256_train_evaluator_0_')
        # default = 'logs/vae_cylinder_train_log/')
    parser.add_argument(
        '--evaluator_checkpoint_folder', 
        type=str, 
        default='../checkpoints/npoints_1024_train_evaluator_1_allowed_categories__ngpus_8_/'
    )
    parser.add_argument(
        '--gradient_based_refinement',
        action='store_true',
        default=False,
    )
    parser.add_argument('--threshold', type=float, default=0.0)
    parser.add_argument('--result_num', type=int, default=1)

    return parser


class GraspNet_Server(object):
    def __init__(self, args=None):
        rospy.init_node('graspnet_server', anonymous=True)
        self.args = args

        args.vae_checkpoint_folder = os.path.join(BASE_DIR, args.vae_checkpoint_folder)
        args.evaluator_checkpoint_folder = os.path.join(BASE_DIR, args.evaluator_checkpoint_folder)
        cfg = grasp_estimator.joint_config(
            args.vae_checkpoint_folder,
            args.evaluator_checkpoint_folder,
        )
        cfg['threshold'] = args.threshold
        cfg['sample_based_improvement'] = 1 - int(args.gradient_based_refinement)
        cfg['num_refine_steps'] = 10 if args.gradient_based_refinement else 20
        
        estimator = grasp_estimator.GraspEstimator(cfg)
        os.environ['CUDA_VISIBLE_DEVICES'] = str(cfg.gpu)
        self.sess = tf.Session()
        estimator.build_network()
        estimator.load_weights(self.sess)

        self.cfg = cfg
        self.estimator = estimator

    
        rospy.Service("/perception/graspnet", GraspPoseEst, self.detector_callback)
        rospy.wait_for_service("/perception/graspnet")

        rospy.loginfo("6DoF-Graspnet has initiated! Ready to get image!")


    def detector_callback(self, req):
        rospy.loginfo("Get image! Processing ...")
        obj_info = req.single_obj_pose
        object_pc = obj_info.cam_pcs
        object_pc = ros_np_multiarray.to_numpy_f32(object_pc)

        if object_pc is None:
            rospy.loginfo('WARNING! No point clouds obtained! Please check again!')
            grasp_pose = []
            return GraspPoseEstResponse(grasp_pose, obj_info)
        
        index = np.where((object_pc[:,2] > 0) * (object_pc[:,2] <= 1))[0]
        object_pc = object_pc[index, :]

        args = self.args
        cfg = self.cfg

        latents = self.estimator.sample_latents()
        generated_grasps, generated_scores, _ = self.estimator.predict_grasps(
            self.sess,
            object_pc,
            latents,
            num_refine_steps=cfg.num_refine_steps,
        )

        # sort the grasps by score
        num = args.result_num
        sort_indices = np.argsort(generated_scores)[::-1][:num]
        # if num == 1:
        #     sort_indices = sort_indices[np.newaxis, :]
        generated_grasps = np.array(generated_grasps)[sort_indices, :]
        generated_scores = np.array(generated_scores)[sort_indices]

        # Here, we should transform the generated grasps by rotating 90 degrees along the z axis
        rotate_RT = np.array([[1, 0, 0, 0],
                              [0, 1, 0, 0],
                              [0, 0, 1, 1.12169998e-01],
                              [0, 0, 0, 1]])
        generated_grasps_CAM = np.matmul(np.array(generated_grasps), rotate_RT)[0]

        # This result grasp follows the defination in our simulational experiments.
        
        generated_grasps_CAM = ros_np_multiarray.to_multiarray_f32(generated_grasps_CAM)
        rospy.loginfo("Process Done! Return Results!")
        return GraspPoseEstResponse(generated_grasps_CAM, obj_info)


if __name__ == '__main__':
    parser = make_parser()
    config = parser.parse_args()
    perception = GraspNet_Server(args=config)
    rospy.spin()
