import datetime
import os

import cv2
import numpy as np
import math

import numpy as np
import cv2
from sklearn.metrics import mean_squared_error
# import open3d as o3d

camera = np.array(
    [[1879.8673, 1074.4441],
     [1886.1239, 1373.5436],
     [1892.246, 1672.8093],

     [1579.1383, 1080.607],
     [1585.3181, 1379.3751],
     [1591.549, 1678.3741],

     [1278.1143, 1086.836],
     [1284.4696, 1385.3065],
     [1290.6469, 1683.6536]]
)
robot = np.array(
    [[-345.309, 495.652],
     [-369.388, 498.592],
     [-393.159, 498.638],
     [-343.783,520.338 ],
     [-369.680,522.605 ],
     [-391.836,522.417 ],
     [-344.728,546.034 ],
     [-368.975,544.476 ],
     [-392.295,545.591 ]]
)


def get_m(points_camera, points_robot):
    """
    取得相机坐标转换到机器坐标的仿射矩阵
    :param points_camera:
    :param points_robot:
    :return:
    """
    # 确保两个点集的数量级不要差距过大，否则会输出None
    m, _ = cv2.estimateAffine2D(points_camera, points_robot)
    return m


def get_points_robot(x_camera, y_camera, m):
    """
    相机坐标通过仿射矩阵变换取得机器坐标
    :param x_camera:
    :param y_camera:
        :return:
        """
    robot_x = (m[0][0] * x_camera) + (m[0][1] * y_camera) + m[0][2]
    robot_y = (m[1][0] * x_camera) + (m[1][1] * y_camera) + m[1][2]
    return robot_x, robot_y


def outerr_m(m, STC_points_camera_r, STC_points_robot_r):
    # print(m)
    esrobxy = []
    for i in range(len(STC_points_camera_r)):
        xy = []
        x, y = get_points_robot(STC_points_camera_r[i][0], STC_points_camera_r[i][1], m)
        xy.append(x)
        xy.append(y)
        esrobxy.append(xy)

    print(" rmse:", math.sqrt(mean_squared_error(np.array(esrobxy), np.array(STC_points_robot_r))))
    print(np.array(STC_points_robot_r) - np.array(esrobxy))


# 获取仿射矩阵
m = get_m(camera, robot)
print(m)
# 输出标定是否正确
outerr_m(m, camera, robot)

# px,py图像点坐标
x, y = get_points_robot(px, py, m)
