import cv2
import glob
import numpy as np
import math

cbraw = 6
cbcol = 7


def cam_calibrate():
    '''
    cbraw和cbcol是我自己加的。tutorial用的棋盘足够大包含了7×6以上
    个角点，我自己用的只有6×4。这里如果角点维数超出的话，标定的时候会报错。
    '''

    # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
    objp = np.zeros((cbraw * cbcol, 3), np.float32)
    '''
    设定世界坐标下点的坐标值，因为用的是棋盘可以直接按网格取；
    假定棋盘正好在x-y平面上，这样z值直接取0，简化初始化步骤。
    mgrid把列向量[0:cbraw]复制了cbcol列，把行向量[0:cbcol]复制了cbraw行。
    转置reshape后，每行都是cbraw×cbcol网格中的某个点的坐标。
    '''
    objp[:, :2] = np.mgrid[0:cbraw, 0:cbcol].T.reshape(-1, 2)
    print(objp)

    objpoints = []  # 3d point in real world space
    imgpoints = []  # 2d points in image plane.
    # glob是个文件名管理工具
    images = glob.glob("./imgs3/*.jpg")
    for fname in images:
        # 对每张图片，识别出角点，记录世界物体坐标和图像坐标
        img = cv2.imread(fname)  # source image
        # 我用的图片太大，缩小了一半
        # img = cv2.resize(img, None, fx=2, fy=2, interpolation=cv2.INTER_CUBIC)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)  # 转灰度
        # cv2.imshow('img',gray)
        # cv2.waitKey(1000)
        # 寻找角点，存入corners，ret是找到角点的flag
        ret, corners = cv2.findChessboardCorners(gray, (6, 7), None)
        # criteria:角点精准化迭代过程的终止条件
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
        # 执行亚像素级角点检测
        corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)

        objpoints.append(objp)
        imgpoints.append(corners2)
        # 在棋盘上绘制角点,只是可视化工具
        img = cv2.drawChessboardCorners(gray, (cbraw, cbcol), corners2, ret)
        cv2.imshow('img', img)
        cv2.waitKey(5000)
    '''
    传入所有图片各自角点的三维、二维坐标，相机标定。
    每张图片都有自己的旋转和平移矩阵，但是相机内参和畸变系数只有一组。
    mtx，相机内参；dist，畸变系数；revcs，旋转矩阵；tvecs，平移矩阵。
    '''
    ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None,
                                                       )

    print("ret:", ret)

    print("内参数矩阵mtx:\n", mtx)  # 内参数矩阵

    print("dist:\n", dist)
    # 畸变系数   distortion cofficients = (k_1,k_2,p_1,p_2,k_3)
    print("rvecs:\n", rvecs)
    # 旋转向量  # 外参数
    print("tvecs:\n", tvecs)
    # 平移向量  # 外参数

    # dist:
    # [[-1.48931403e-03  4.40793192e-01  8.22553252e-04  1.13295365e-03 - 2.07625129e+00]]
    # 内参数矩阵mtx:
    # [[1.40787017e+03 0.00000000e+00 9.48648860e+02]
    #  [0.00000000e+00 1.45577464e+03 5.63655869e+02]
    # [0.00000000e+00 0.00000000e+00 1.00000000e+00]]

    # dist参数
    # k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4 , \tau_x, \tau_y


def add_id():
    fname = './1m.jpg'
    img = cv2.imread(fname)  # source image

    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)  # 转灰度

    # 寻找角点，存入corners，ret是找到角点的flag
    ret, corners = cv2.findChessboardCorners(gray, (6, 7), None)
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
    corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)

    count = 0
    for corner in corners2:
        a, b = corner[0]
        a = int(a)
        b = int(b)

        cv2.putText(img, '(%s)' % (count), (a, b), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 255, 0), 1)
        print('id={}:({},{})'.format(count, a, b))
        count += 1

        img = cv2.drawChessboardCorners(img, (cbraw, cbcol), corners2, ret)
        cv2.imshow('img', img)
        cv2.waitKey(50000)
        cv2.imwrite('1m_id.jpg', img)


def pos_estimate():
    # 1m.jpg 中： 单位：格子（一个格子34mm）       对应像素坐标和id
    # 左上：0，0                id = 5:(578,266)
    # 左下：0，5              id = 0:(568,502)
    # 右上：6，0              id = 41:(858,265)
    # 右下：6，5            id = 36:(857,500)

    # camX = 240
    # camY = 315

    object_3d_points = np.array(([0, 0, 0],
                                 [0, 5, 0],
                                 [6, 0, 0],
                                 [6, 5, 0]), dtype=np.double)
    object_2d_point = np.array(([578, 266],
                                [568, 502],
                                [858, 265],
                                [857, 500]), dtype=np.double)

    dist_coefs = np.array([-1.48931403e-03, 4.40793192e-01, 8.22553252e-04, 1.13295365e-03 - 2.07625129e+00],
                          dtype=np.double)
    camera_matrix = np.array((
        [1.40787017e+03, 0.00000000e+00, 9.48648860e+02],
        [0.00000000e+00, 1.45577464e+03, 5.63655869e+02],
        [0.00000000e+00, 0.00000000e+00, 1.00000000e+00]), dtype=np.double)
    # 求解相机位姿
    found, rvec, tvec = cv2.solvePnP(object_3d_points, object_2d_point, camera_matrix, dist_coefs)

    print('found', found)
    print('rvec', rvec)
    print('tvec', tvec)

    rotM = cv2.Rodrigues(rvec)[0]
    camera_postion = -np.matrix(rotM).T * np.matrix(tvec)
    print(camera_postion.T)
    # 验证根据博客http://www.cnblogs.com/singlex/p/pose_estimation_1.html提供方法求解相机位姿
    # 计算相机坐标系的三轴旋转欧拉角，旋转后可以转出世界坐标系。旋转顺序z,y,x
    thetaZ = math.atan2(rotM[1, 0], rotM[0, 0]) * 180.0 / math.pi
    thetaY = math.atan2(-1.0 * rotM[2, 0], math.sqrt(rotM[2, 1] ** 2 + rotM[2, 2] ** 2)) * 180.0 / math.pi
    thetaX = math.atan2(rotM[2, 1], rotM[2, 2]) * 180.0 / math.pi
    # 相机坐标系下值
    x = tvec[0] * 34
    y = tvec[1] * 34
    z = tvec[2] * 34
    print('=' * 30)
    print(thetaX, thetaY, thetaZ)
    print("camPos:", x, y, z)


# 后面的位姿估计部分
# http://www.cnblogs.com/subic/p/8296794.html


if __name__ == '__main__':
    # cam_calibrate()
    # add_id()
    pos_estimate()
