#!/usr/bin/env python
# coding:utf-8
# import demo_msgs.msg
# import rospy
import cv2
import numpy as np
from config import Config
# import pyrealsense2 as rs
import math
# from sensor_msgs.msg import Image
# from cv_bridge import CvBridge, CvBridgeError
import rospy

import rocr6_msgs.msg
from rocr6_msgs.msg import Feedback

# from CameraClient import CameraClient, ImageType, Command, CameraIntri

class Calibration_hand_eye(object):
    def __init__(self):
        super(Calibration_hand_eye, self).__init__()
        self.b = 0.025
        self.w = 4
        self.h = 5
        self.circleCenters = []
        self.objectPoints = []
        # self.objectPointsVec = []
        self.circleCenters_nums = 0
        self.cameraMatrix = []
        self.distCoeffs = []
        self.target2CameraRotateVecs = []
        self.target2CameraTranslationVecs = []
        self.end2BaseRotateVecs = []
        self.end2BaseTranslationVecs = []

    def File_read(self, cameraIntrinsicCalibrationUrl):# 读取标定文件中保存的结果（相机内参矩阵和畸变系数）到对应的变量中
        fs = cv2.FileStorage(cameraIntrinsicCalibrationUrl, cv2.FileStorage_READ)
        cameraMatrix = fs.getNode('cameraMatrix').mat()
        distCoeffs = fs.getNode('distCoeffs').mat()
        fs.release()
        self.cameraMatrix = np.float32(cameraMatrix)
        self.distCoeffs = np.float32(distCoeffs)
        # print(self.cameraMatrix)
        # print(self.distCoeffs)

    def findCirclesGrid(self, color_image):
        params = cv2.SimpleBlobDetector_Params()
        params.maxArea = 30000
        params.minArea = 2000
        blobDetector = cv2.SimpleBlobDetector_create(params)
        ret, corners = cv2.findCirclesGrid(image=color_image, patternSize=(self.w, self.h),
                                           flags=cv2.CALIB_CB_ASYMMETRIC_GRID,
                                           blobDetector=blobDetector)
        return ret, corners

    def show(self, color_image, ret, corners):
        self.circleCenters.append(corners)
        self.circleCenters_nums = len(self.circleCenters)
        cv2.drawChessboardCorners(color_image, (self.w, self.h), corners, ret)
        cv2.imshow('camera image', 255 - color_image)
        cv2.waitKey(1000)

    def Cam_calibrationplate(self, color_image, corners):
        print(self.cameraMatrix)
        objectPoints = []
        for i in range(self.h):
            for j in range(self.w):
                objectPoints.append([(i % 2 + j * 2) * self.b, i * self.b, 0])
        self.objectPoints = np.float32(objectPoints)
        # for i in range(0, len(self.circleCenters)):
        #     self.objectPointsVec.append(self.objectPoints)
        # 计算标定板坐标系 ==> 相机坐标系 的坐标变换
        Ret, rotateRodriguesVec, translationVec = cv2.solvePnP(self.objectPoints, corners, self.cameraMatrix, self.distCoeffs)
        if Ret == True:
            projectImagePoints, _ = cv2.projectPoints(self.objectPoints, rotateRodriguesVec, translationVec, self.cameraMatrix,
                                                      self.distCoeffs)
            projectOffsetPowSum = 0
            for j in range(len(projectImagePoints)):
                # 使用opencv提供的相机外参计算函数，传入上面已经获取到的参数，计算标定板坐标系到相机坐标系的坐标变换，并存储到rotateRodriguesVec和translationVec
                error = cv2.norm(projectImagePoints[j], corners[j], cv2.NORM_L2) / len(projectImagePoints)
                projectOffsetPowSum += error
            projectRMS = projectOffsetPowSum / math.sqrt(len(projectImagePoints))
            print("total error: {}".format(projectRMS))
            if projectRMS < 1.0:
                print('重投影均方根误差在可接受范围内，本次采集为有效数据')
                msgPtr = rospy.wait_for_message('/rocr6_msgs/feedback', Feedback, rospy.Duration(2))
                if msgPtr != None:
                    end2BaseTranslationVec = np.zeros((3, 1), dtype=float)
                    end2BaseRotateVec = np.zeros((3, 3), dtype=float)
                    end2BaseTranslationVec[0, 0] = msgPtr.homogeneous.X[3]
                    end2BaseTranslationVec[1, 0] = msgPtr.homogeneous.Y[3]
                    end2BaseTranslationVec[2, 0] = msgPtr.homogeneous.Z[3]
                    end2BaseRotateVec[0, 0] = msgPtr.homogeneous.X[0]
                    end2BaseRotateVec[0, 1] = msgPtr.homogeneous.X[1]
                    end2BaseRotateVec[0, 2] = msgPtr.homogeneous.X[2]
                    end2BaseRotateVec[1, 0] = msgPtr.homogeneous.Y[0]
                    end2BaseRotateVec[1, 1] = msgPtr.homogeneous.Y[1]
                    end2BaseRotateVec[1, 2] = msgPtr.homogeneous.Y[2]
                    end2BaseRotateVec[2, 0] = msgPtr.homogeneous.Z[0]
                    end2BaseRotateVec[2, 1] = msgPtr.homogeneous.Z[1]
                    end2BaseRotateVec[2, 2] = msgPtr.homogeneous.Z[2]
                    self.end2BaseRotateVecs.append(end2BaseRotateVec)
                    self.end2BaseTranslationVecs.append(end2BaseTranslationVec)
                    print(rotateRodriguesVec)
                    rotateVec, _ = cv2.Rodrigues(rotateRodriguesVec)
                    self.target2CameraRotateVecs.append(rotateVec)
                    self.target2CameraTranslationVecs.append(translationVec)
                    # print(type(target2CameraRotateVecs))
                    print('target2CameraRotateVecs:', self.target2CameraRotateVecs)
                    print('The image has been checked and added (RMS re-projection error: %f) ! [%d] sets of data have been added...',
                        projectRMS, len(self.target2CameraTranslationVecs))
                    print("calibrationBoard => camera Rotate: ", rotateVec)
                    print("calibrationBoard => camera Translation: ", translationVec)
                    print("robotEnd => robotBase Rotate: ", end2BaseRotateVec)
                    print("robotEnd => robotBase Translation: ", end2BaseTranslationVec)
                else:
                    print("Robot Pose get Failed !")
                    # rospy.logwarn("Robot Pose get Failed !")
            else:
                print(
                    "The image has been checked and deprecated (RMS re-projection error: %f, greater than the standard value) !",
                    projectRMS)
        cv2.drawChessboardCorners(color_image, (4, 5), corners, Ret)
        cv2.imshow('img', color_image)
        cv2.waitKey(1000)
        cv2.destroyAllWindows()

    # 手眼标定求解运算
    def eyeTohand(self, eyeToHand_calibration_url):

        camera2TargetRotateVecs = []         # 相机坐标系 ==> 标定板坐标系 旋转矩阵
        camera2TargetTranslationVecs = []    # 相机坐标系 ==> 标定板坐标系 平移向量
        for i in range(len(self.target2CameraRotateVecs)):
            camera2TargetRotateVecs.append(self.target2CameraRotateVecs[i].T)
            camera2TargetTranslationVecs.append(np.dot(-self.target2CameraRotateVecs[i].T, self.target2CameraTranslationVecs[i]))

        base2CameraRotate, base2CameraTranslation = cv2.calibrateHandEye(camera2TargetRotateVecs,
                                                                         camera2TargetTranslationVecs,
                                                                         self.end2BaseRotateVecs, self.end2BaseTranslationVecs)
        camera2BaseRotate = base2CameraRotate.T
        camera2BaseTranslation = np.dot(-base2CameraRotate.T, base2CameraTranslation)
        fs_mechbase = cv2.FileStorage(eyeToHand_calibration_url, cv2.FileStorage_WRITE)
        fs_mechbase.write('Rotate', camera2BaseRotate)
        fs_mechbase.write('Translation', camera2BaseTranslation)
        Tohand_result = fs_mechbase
        return Tohand_result

    def eyeInhand(self, eyeInHand_calibration_url):
        # camera2EndRotate = []
        # camera2EndTranslation = []
        camera2EndRotate, camera2EndTranslation = cv2.calibrateHandEye(self.end2BaseRotateVecs,
                                                                       self.end2BaseTranslationVecs,
                                                                       self.target2CameraRotateVecs,
                                                                       self.target2CameraTranslationVecs)
        fs_eysinhand = cv2.FileStorage(eyeInHand_calibration_url, cv2.FileStorage_WRITE)
        fs_eysinhand.write('Rotate', camera2EndRotate)
        fs_eysinhand.write('Translation', camera2EndTranslation)
        Inhand_result = fs_eysinhand
        return Inhand_result





