# region import basic library
import os
import sys
import cmd
import time
import numpy as np
# endregion

# region import JAKA
from GraspInit import *
from poseTransForm.jaka import JAKA
from poseTransForm.poseTransform import *
# endregion

IPaddress = "192.168.1.8"

# class Query(cmd.Cmd):
#   # region 成员参数
#     intro = '机器人智能抓取系统，输入 help 或者?查看帮助。\n'  # 命令行欢迎
#     prompt = 'cmd>'      # 命令行提示符
#     depth_image = None      # 深度图像
#     color_image = None       # 彩色图像
#     toolCoordinate = None # 根据坐标系 可以用APP设置
#     gg = None  # grasp group 用于存储算法输出结果
#     homePose = None      # home 位姿 用于存储home
#     basketPose = None    # basket 位姿 basket
#   # endregion
#   # region 成员方法
#       # def do_change_pos(self,arg):
#       #   tcp = JAKA(IPaddress)
#       #   self.homePose = tcp.getpos6DoF()
#       #   tcp.robot.logout()

#     def do_gripper_open(self,arg):
#       if(not arg):
#         arg = 1000
#       tcp = JAKA(IPaddress)
#       tcp.robot.set_analog_output(iotype = 2,index = 0,value = 0)#
#       tcp.robot.set_analog_output(iotype = 2,index = 1,value = 20)#
#       tcp.robot.set_analog_output(iotype = 2,index = 3,value = int(arg))#
#       tcp.robot.logout()

#     def do_gripper_close(self,arg):
#       if(not arg):
#         arg = 400
#       tcp = JAKA(IPaddress)
#       tcp.robot.set_analog_output(iotype = 2,index = 0,value = 0)#
#       tcp.robot.set_analog_output(iotype = 2,index = 1,value = 20)#
#       tcp.robot.set_analog_output(iotype = 2,index = 3,value = int(arg))#
#       tcp.robot.logout()

#     def do_set_home(self,arg):
#       tcp = JAKA(IPaddress)
#       self.homePose = tcp.getpos6DoF()
#       tcp.robot.logout()

#     def do_go_home(self,arg):
#       tcp = JAKA(IPaddress)
#       if not arg:
#         arg = 25
#       tcp.liner_move(self.homePose,int(arg))
#       tcp.robot.logout()

#     def do_set_basket(self,arg):
#       tcp = JAKA(IPaddress)
#       self.basketPose = tcp.getpos6DoF()
#       tcp.robot.logout()

#     def do_go_basket(self,arg):
#       tcp = JAKA(IPaddress)
#       if not arg:
#         arg = 15
#       tcp.liner_move(self.basketPose,int(arg))
#       tcp.robot.logout()

#     def do_grasp(self,arg):
#       tcp = JAKA(IPaddress)
#       curPos = tcp.getpos6DoF()
#       print("cur pos:",curPos)
#       tcp.robot.logout()
#       dkr = GG_2_Liner_move(self.gg,curPos)
#       print("tar pos:",dkr)
#       tcp = JAKA(IPaddress)
#       if not arg:
#         arg = 15
#       before_pos = dkr
#       before_pos[2] = dkr[2] + 100
#       tcp.liner_move(before_pos,int(arg))
#       print('point 1')
#       time.sleep(2)
#       dkr[2] = dkr[2] -100
#       print(dkr)
#       tcp.liner_move(dkr,int(arg))
#       print('point 2')
#       tcp.robot.logout()

#     def do_capture(self,arg):
#       time.sleep(1)
#       while True:
#         # Get frameset of color and depth
#         frames = pipeline.wait_for_frames()
#         # frames.get_depth_frame() is a 1280 x 720 depth image
#         # Align the depth frame to color frame
#         aligned_frames = align.process(frames)
#         # Get aligned frames
#         aligned_depth_frame = aligned_frames.get_depth_frame() # aligned_depth_frame is a 640x480 depth image
#         color_frame = aligned_frames.get_color_frame()
#         # Validate that both frames are valid
#         if not aligned_depth_frame or not color_frame:
#             continue
#         else:
#           self.depth_image = np.asanyarray(aligned_depth_frame.get_data())
#           self.color_image = np.asanyarray(color_frame.get_data())
#           break

#     def do_show_image(self,arg):
#       while True:
#         # Remove background - Set pixels further than clipping_distance to grey
#         grey_color = 153
#         depth_image_3d = np.dstack((self.depth_image,self.depth_image,self.depth_image)) #depth image is 1 channel, color is 3 channels
#         bg_removed = np.where((depth_image_3d > clipping_distance) | (depth_image_3d <= 0), grey_color, self.color_image)

#         # Render images:
#         depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(self.depth_image, alpha=0.03), cv2.COLORMAP_JET)
#         images = np.hstack((bg_removed, depth_colormap))

#         cv2.namedWindow('Align Example', cv2.WINDOW_NORMAL)
#         cv2.imshow('Align Example', images)
#         key = cv2.waitKey(0)

#         # Press esc or 'q' to close the image window
#         if key & 0xFF == ord('q') or key == 27:
#             cv2.destroyAllWindows()
#             break
#         elif key & 0xFF == ord('p'):
#             print("AI Calculating ...")
#             if(arg == ''):
#               arg = '1'
#             self.gg = demo_inner(self.color_image,self.depth_image , './',int(arg))
#             print("SAVE: ",self.gg)

#     def do_exit(self, _):
#         '退出'
#         exit(0)
#   # endregion

class Query():
  # region 成员参数
    intro = '机器人智能抓取系统，输入 help 或者?查看帮助。\n'  # 命令行欢迎
    prompt = 'cmd>'      # 命令行提示符
    depth_image = None      # 深度图像
    color_image = None       # 彩色图像
    toolCoordinate = None # 根据坐标系 可以用APP设置
    gg = None  # grasp group 用于存储算法输出结果
    homePose = None      # home 位姿 用于存储home
    basketPose = None    # basket 位姿 basket
  # endregion
  # region 成员方法
      # def do_change_pos(self,arg):
      #   tcp = JAKA(IPaddress)
      #   self.homePose = tcp.getpos6DoF()
      #   tcp.robot.logout()

    def do_gripper_open(self,arg):
      if(not arg):
        arg = 1000
      tcp = JAKA(IPaddress)
      tcp.robot.set_analog_output(iotype = 2,index = 0,value = 0)#
      tcp.robot.set_analog_output(iotype = 2,index = 1,value = 20)#
      tcp.robot.set_analog_output(iotype = 2,index = 3,value = int(arg))#
      tcp.robot.logout()

    def do_gripper_close(self,arg):
      if(not arg):
        arg = 400
      tcp = JAKA(IPaddress)
      tcp.robot.set_analog_output(iotype = 2,index = 0,value = 0)#
      tcp.robot.set_analog_output(iotype = 2,index = 1,value = 20)#
      tcp.robot.set_analog_output(iotype = 2,index = 3,value = int(arg))#
      tcp.robot.logout()

    def do_set_home(self,arg):
      tcp = JAKA(IPaddress)
      self.homePose = tcp.getpos6DoF()
      tcp.robot.logout()

    def do_go_home(self,arg):
      tcp = JAKA(IPaddress)
      if not arg:
        arg = 25
      tcp.liner_move(self.homePose,int(arg))
      tcp.robot.logout()

    def do_set_basket(self,arg):
      tcp = JAKA(IPaddress)
      self.basketPose = tcp.getpos6DoF()
      tcp.robot.logout()

    def do_go_basket(self,arg):
      tcp = JAKA(IPaddress)
      if not arg:
        arg = 15
      tcp.liner_move(self.basketPose,int(arg))
      tcp.robot.logout()

    def do_grasp(self,arg):
      tcp = JAKA(IPaddress)
      curPos = tcp.getpos6DoF()
      print("cur pos:",curPos)
      tcp.robot.logout()
      dkr = GG_2_Liner_move(self.gg,curPos)
      print("tar pos:",dkr)
      tcp = JAKA(IPaddress)
      if not arg:
        arg = 15
      before_pos = dkr
      before_pos[2] = dkr[2] + 100
      tcp.liner_move(before_pos,int(arg))
      print('point 1')
      time.sleep(2)
      dkr[2] = dkr[2] -100
      print(dkr)
      tcp.liner_move(dkr,int(arg))
      print('point 2')
      tcp.robot.logout()

    def do_capture(self,arg):
      time.sleep(1)
      while True:
        # Get frameset of color and depth
        frames = pipeline.wait_for_frames()
        # frames.get_depth_frame() is a 1280 x 720 depth image
        # Align the depth frame to color frame
        aligned_frames = align.process(frames)
        # Get aligned frames
        aligned_depth_frame = aligned_frames.get_depth_frame() # aligned_depth_frame is a 640x480 depth image
        color_frame = aligned_frames.get_color_frame()
        # Validate that both frames are valid
        if not aligned_depth_frame or not color_frame:
            continue
        else:
          self.depth_image = np.asanyarray(aligned_depth_frame.get_data())
          self.color_image = np.asanyarray(color_frame.get_data())
          break

    def do_show_image(self,arg):
      while True:
        # Remove background - Set pixels further than clipping_distance to grey
        grey_color = 153
        depth_image_3d = np.dstack((self.depth_image,self.depth_image,self.depth_image)) #depth image is 1 channel, color is 3 channels
        bg_removed = np.where((depth_image_3d > clipping_distance) | (depth_image_3d <= 0), grey_color, self.color_image)

        # Render images:
        depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(self.depth_image, alpha=0.03), cv2.COLORMAP_JET)
        images = np.hstack((bg_removed, depth_colormap))

        cv2.namedWindow('Align Example', cv2.WINDOW_NORMAL)
        cv2.imshow('Align Example', images)
        key = cv2.waitKey(0)

        # Press esc or 'q' to close the image window
        if key & 0xFF == ord('q') or key == 27:
            cv2.destroyAllWindows()
            break
        elif key & 0xFF == ord('p'):
            print("AI Calculating ...")
            if(arg == ''):
              arg = '1'
            self.gg = demo_inner(self.color_image,self.depth_image , './',int(arg))
            print("SAVE: ",self.gg)

    def do_exit(self, _):
        '退出'
        exit(0)
  # endregion


if __name__ == '__main__':
    a = Query()
    a.do_capture('')
    a.do_show_image('3')
