import cv2 
import rospy
import os
import time
import matplotlib.pyplot as plt
import numpy as np
import torch

from cv_bridge import CvBridge, CvBridgeError
from hardware.camera import RealSenseCamera
from hardware.device import get_device
from inference.post_process import post_process_output
from message_filters import TimeSynchronizer, Subscriber
from utils.data.camera_data import CameraData
from utils.dataset_processing.grasp import detect_grasps
from utils.dataset_processing.calculate_pose import CalculatePose
from utils.visualisation.plot import plot_grasp
from sensor_msgs.msg import Image
from geometry_msgs.msg import Pose

class GraspGenerator:
    def __init__(self):
        self.count = True
        self.model = None
        self.device = None
        self.camera_pama_file_path = None
        self.saved_model_path = None

        self.__bridge = CvBridge()
        self.cam_data = CameraData(width=1920,height=1080,include_depth=True, include_rgb=True)
        
        # self.img_hdir = os.path.join(os.path.expanduser('~'), "Downloads", "1.jpg")
        self.__img_pub  = rospy.Publisher('grasp_detection/rectified_image', Image, queue_size=1)
        self.__img_pub2  = rospy.Publisher('grasp_detection/rectified_image2', Image, queue_size=1)

        self.__pose_pub = rospy.Publisher('grasp_detection/predict_pose', Pose, queue_size=1)

    def load_model(self):
        print('[info]    Loading model... ')
        self.model = torch.load(self.saved_model_path,map_location="cpu")
        # Get the compute device,CPU or CUDA
        self.device = get_device(force_cpu=True)

    def generate(self, bgr=None, depth=None):

        rgb = cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB)
        x, depth_img, rgb_img = self.cam_data.get_data(rgb=rgb, depth=depth)
        
        # Predict the grasp pose using the saved model
        with torch.no_grad():
            xc = x.to(self.device)
            pred = self.model.predict(xc)
        q_img, ang_img, width_img = post_process_output(pred['pos'], pred['cos'], pred['sin'], pred['width'])
        # grasps = detect_grasps(q_img, ang_img, width_img)

        grasps, marked_image = self.cam_data.get_marked_image(bgr ,q_img, ang_img, width_img, no_grasps=1)
        
        img_msg = self.__bridge.cv2_to_imgmsg(marked_image, "bgr8")
        self.__img_pub.publish(img_msg)

        if grasps is not None:
            for grasps_object in grasps:
                center = grasps_object.center
                angle  = grasps_object.angle
                depth_value = depth[grasps_object.center]

                cp = CalculatePose(self.camera_pama_file_path, center, depth_value, angle)
                pose = cp.runcalculate()
                self.__pose_pub.publish(pose)
    
    # def __image_callback(self, msg_image):
    def __image_callback(self, msg_image, msg_deth):
        cv_image = self.__bridge.imgmsg_to_cv2(msg_image, desired_encoding="passthrough")
        cv_depth = self.__bridge.imgmsg_to_cv2(msg_deth, desired_encoding="passthrough")
        self.generate(cv_image, cv_depth)

        # print("image3")
        # image = self.__bridge.imgmsg_to_cv2(msg_image, desired_encoding="passthrough")
        image = cv_image[:,:,1:2]
        print image.shape
        img_msg = self.__bridge.cv2_to_imgmsg(image)
        img1_msg = self.__bridge.cv2_to_imgmsg(cv_depth)
        self.__img_pub.publish(img_msg)
        self.__img_pub2.publish(img1_msg)

    # def __image_callback2(self, msg_image):
    #     print("image1")
    #     image = self.__bridge.imgmsg_to_cv2(msg_image, desired_encoding="passthrough")
    #     print image.shape
    #     img_msg = self.__bridge.cv2_to_imgmsg(image)
    #     self.__img_pub2.publish(img_msg)
        

        


    def run(self):
        
        self.camera_pama_file_path = rospy.get_param('~camera_pama_file_path',None)
        self.saved_model_path = rospy.get_param('~trained_model_file_path',None)
        
        image_raw_topic_str = rospy.get_param('~image_src','/image_raw')
        depth_src_topic_str = rospy.get_param('~depth_src','/depth_raw')
        self.load_model()
        # rospy.Subscriber(image_raw_topic_str, Image, self.__image_callback, queue_size=1)
        ts = TimeSynchronizer(
                [Subscriber(image_raw_topic_str, Image),
                 Subscriber(depth_src_topic_str, Image)],queue_size=1)
        ts.registerCallback(self.__image_callback)
        
        # rospy.Subscriber(depth_src_topic_str, Image, self.__image_callback2, queue_size=1)
        
        print('[INFO]    subscrib to ', image_raw_topic_str, depth_src_topic_str, 'and wait for data')
        rospy.spin()
        rospy.loginfo('END')

        # ts = TimeSynchronizer(
        #         [Subscriber(image_raw_topic_str, Image),
        #          Subscriber(depth_src_topic_str, Image)],1)
        # ts.registerCallback(self.__image_callback)
        # print('[INFO]    subscrib to ',image_raw_topic_str,depth_src_topic_str)
        # rospy.spin()


        


