from detect_new import FastestDet
from Camrea import Carmera
from util import *
import numpy as np
import rospy
import time
from scipy.spatial.transform import Rotation as R
from FastestDet_ROS import Vision
from geometry_msgs.msg import Pose


class DataInference:
    def __init__(self):
        self.cam = Carmera()
        self.Vision = Vision()
        self.detect = FastestDet()
        self.boxes, self.scores, self.classes, self.images = None, None, None, None
        self.aligned_depth_frame = None
        self.color_frame = None
        self.color_intrin_part = None
        self.NoSystemSend = True
        #cv2.namedWindow('image', )

    def output(self, image, aligned_depth_frame, object_classify):
        shape = [image.shape[0], image.shape[1]]
        boxes, scores, classes, im0s = self.detect.run(image)
        self.boxes, self.scores, self.classes, self.images, self.aligned_depth_frame = boxes, scores, classes,\
                                                                                       im0s, aligned_depth_frame
        boxes = np.asarray(self.boxes, dtype=int)
        target_xyz_trues, track_cls = get_aim_(boxes, self.classes, self.color_intrin_part,
                                               self.aligned_depth_frame, shape, object_classify)
        datas, outputs = imagexyz2wordxyz(target_xyz_trues, track_cls,
                                          '/home/ros/catkin_ws/src/VC/FastestDet/eyeToHand_calibration.yaml')
        is_eye_in_hand = rospy.get_param('IsEyeInHand')
        if is_eye_in_hand:
            try:
                object_pose = rospy.wait_for_message("/tool_point", Pose, timeout=0.5)
                x = object_pose.position.x
                y = object_pose.position.y
                z = object_pose.position.z
                Qx = object_pose.orientation.x
                Qy = object_pose.orientation.y
                Qz = object_pose.orientation.z
                Qw = object_pose.orientation.w
                Rq = [Qx, Qy, Qz, Qw]
                r = R.from_quat(Rq)
                robotCurrentMatrix = r.as_matrix()
                robotCurrentMatrix = np.concatenate([robotCurrentMatrix, np.array([x, y, z]).reshape(3,1)], 1)
                robotCurrentPoseMatrix = np.concatenate([robotCurrentMatrix, np.array([0, 0, 0, 1]).reshape(1, 4)], 0)
                matrix = robotCurrentPoseMatrix.dot(outputs)
                XYZ = [[datas[0][0], matrix[0, 3], matrix[1, 3], matrix[2, 3]]]
                return XYZ
            except:
                return [[4, 0, 0, 0]]

        else:
            return datas

    def show(self):
        cv2.imshow('image', self.images)
        k = cv2.waitKey(1)
        if k == 27:
            cv2.destroyAllWindows()
            self.cam.stop_pipline()

    def main(self):
        while True:
            time.sleep(0.1)
            try:
                vision_enable = rospy.get_param('vision_enable')
            except:
                vision_enable = False
            try:
                object_classify = rospy.get_param('object_classify')
            except:
                object_classify = 0

            color_frame, aligned_depth_frame = self.cam.get_frame()
            self.color_intrin_part = self.cam.get_color_intrin_part(color_frame)
            image = np.asanyarray(color_frame.get_data())
            if vision_enable:
                data = self.output(image, aligned_depth_frame, object_classify)
                self.Vision.msg_publish(data[0])
            else:
                cv2.imshow('image', image)
                k = cv2.waitKey(1)
                if k == 27:
                    cv2.destroyAllWindows()
                    self.cam.stop_pipline()
                continue
            self.show()


if __name__ == '__main__':
    print('0: apple','\n','1: pear','\n','2: green apple', '\n', '3: orange')
    M = DataInference()
    M.main()






