import numpy as np
import cv2 as cv
import glob

"""
bool cv::solvePnP	(	InputArray 	objectPoints,
                        InputArray 	imagePoints,
                        InputArray 	cameraMatrix,
                        InputArray 	distCoeffs,
                        OutputArray 	rvec,
                        OutputArray 	tvec,
                        bool useExtrinsicGuess = false,
                        int flags = SOLVEPNP_ITERATIVE 
)		

Python:
        retval, rvec, tvec = cv.solvePnP(objectPoints, imagePoints, cameraMatrix, distCoeffs[, rvec[, tvec[, useExtrinsicGuess[, flags]]]])

Finds an object pose from 3D-2D point correspondences.

Parameters
            objectPoints	Array of object points in the object coordinate space, Nx3 1-channel or 1xN/Nx1 3-channel, where N is the number of points. vector<Point3f> can be also passed here.
            imagePoints	Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel, where N is the number of points. vector<Point2f> can be also passed here.
            cameraMatrix	Input camera matrix A.
            distCoeffs	Input vector of distortion coefficients (k1,k2,p1,p2[,k3[,k4,k5,k6[,s1,s2,s3,s4[,τx,τy]]]]) of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
            rvec	Output rotation vector (see Rodrigues ) that, together with tvec , brings points from the model coordinate system to the camera coordinate system.
            tvec	Output translation vector.
            useExtrinsicGuess	Parameter used for SOLVEPNP_ITERATIVE. If true (1), the function uses the provided rvec and tvec values as initial approximations of the rotation and translation vectors, respectively, and further optimizes them.
            flags	Method for solving a PnP problem:
                    SOLVEPNP_ITERATIVE Iterative method is based on Levenberg-Marquardt optimization. In this case the function finds such a pose that minimizes reprojection error, that is the sum of squared distances between the observed projections imagePoints and the projected (using projectPoints ) objectPoints .
                    SOLVEPNP_P3P Method is based on the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang "Complete Solution Classification for the Perspective-Three-Point Problem" ([68]). In this case the function requires exactly four object and image points.
                    SOLVEPNP_AP3P Method is based on the paper of T. Ke, S. Roumeliotis "An Efficient Algebraic Solution to the Perspective-Three-Point Problem" ([102]). In this case the function requires exactly four object and image points.
                    SOLVEPNP_EPNP Method has been introduced by F.Moreno-Noguer, V.Lepetit and P.Fua in the paper "EPnP: Efficient Perspective-n-Point Camera Pose Estimation" ([113]).
                    SOLVEPNP_DLS Method is based on the paper of Joel A. Hesch and Stergios I. Roumeliotis. "A Direct Least-Squares (DLS) Method for PnP" ([88]).
                    SOLVEPNP_UPNP Method is based on the paper of A.Penate-Sanchez, J.Andrade-Cetto, F.Moreno-Noguer. "Exhaustive Linearization for Robust Camera Pose and Focal Length Estimation" ([154]). In this case the function also estimates the parameters fx and fy assuming that both have the same value. Then the cameraMatrix is updated with the estimated focal length.
                    SOLVEPNP_AP3P Method is based on the paper of Tong Ke and Stergios I. Roumeliotis. "An Efficient Algebraic Solution to the Perspective-Three-Point Problem" ([102]). In this case the function requires exactly four object and image points. 
"""

"""
void cv::drawContours	(	InputOutputArray 	image,
                            InputArrayOfArrays 	contours,
                            int 	contourIdx,
                            const Scalar & 	color,
                            int 	thickness = 1,
                            int 	lineType = LINE_8,
                            InputArray 	hierarchy = noArray(),
                            int 	maxLevel = INT_MAX,
                            Point 	offset = Point() 
)		
Python:
        image = cv.drawContours(image, contours, contourIdx, color[, thickness[, lineType[, hierarchy[, maxLevel[, offset]]]]])

Draws contours outlines or filled contours.
The function draws contour outlines in the image if thickness≥0 or fills the area bounded by the contours if thickness<0.

Parameters
            image	Destination image.
            contours	All the input contours. Each contour is stored as a point vector.
            contourIdx	Parameter indicating a contour to draw. If it is negative, all the contours are drawn.
            color	Color of the contours.
            thickness	Thickness of lines the contours are drawn with. If it is negative (for example, thickness=FILLED ), the contour interiors are drawn.
            lineType	Line connectivity. See LineTypes
            hierarchy	Optional information about hierarchy. It is only needed if you want to draw only some of the contours (see maxLevel ).
            maxLevel	Maximal level for drawn contours. If it is 0, only the specified contour is drawn. If it is 1, the function draws the contour(s) and all the nested contours. If it is 2, the function draws the contours, all the nested contours, all the nested-to-nested contours, and so on. This parameter is only taken into account when there is hierarchy available.
            offset	Optional contour shift parameter. Shift all the drawn contours by the specified offset=(dx,dy) .

"""

# 读取之前保存的姿态估计的文件
# mtx为相机内参矩阵
# dist为相机的畸变系数
# rvecs和tvecs为相机旋转和平移向量
with np.load('B.npz') as X:
    mtx, dist, _, _ = [X[i] for i in ('mtx', 'dist', 'rvecs', 'tvecs')]


def draw(img, imgpts,rvecs,tvecs):
    # 将数组中的元素转换为32位整数位
    # 将图像平面上的三维物体坐标转换为int类型的像素坐标
    imgpts = np.int32(imgpts).reshape(-1, 2)

    # draw the axis of the camera
    img = cv.line(img, tuple(imgpts[0]), tuple(imgpts[1]), (0, 0, 255), 3)
    img = cv.line(img, tuple(imgpts[0]), tuple(imgpts[2]),(0 , 255,0), 3)
    img = cv.line(img, tuple(imgpts[0]), tuple(imgpts[3]), (255, 0, 0), 3)

    # # 使用绿色画出图像中基础面积
    # img = cv.drawContours(img, [imgpts[:4]], -1, (0, 255, 0), -3)
    # # 用蓝色绘制支柱，体现出投影关系
    # for i, j in zip(range(4), range(4, 8)):
    #     img = cv.line(img, tuple(imgpts[i]), tuple(imgpts[j]), 255, 3)
    # # 使用最高层画出对应投影层
    # img = cv.drawContours(img, [imgpts[4:]], -1, (0, 0, 255), 3)

    #  write the context of the out  parameters
    font = cv.FONT_HERSHEY_SIMPLEX
    text_color = (0, 0, 255)
    font_scale = 1
    thickness = 2
    img = cv.putText(img, "R: {}".format(rvecs), (50, 50), font, font_scale, text_color, thickness, cv.LINE_AA)
    img = cv.putText(img, "T: {}".format(tvecs), (50, 100), font, font_scale, text_color, thickness, cv.LINE_AA)


    return img



def drawTraingle(img, imgpts,rvecs,tvecs):
    # 将数组中的元素转换为32位整数位
    # 将图像平面上的三维物体坐标转换为int类型的像素坐标
    imgpts = np.int32(imgpts).reshape(-1, 2)

    # draw the base area with red
    img = cv.drawContours(img, [imgpts[:4]], -1, ( 255,0, 0), -3)

    # draw the axis of the camera
    img = cv.line(img, tuple(imgpts[0]), tuple(imgpts[1]), (0, 0, 255), 3)
    img = cv.line(img, tuple(imgpts[0]), tuple(imgpts[2]),(0 , 255,0), 3)
    img = cv.line(img, tuple(imgpts[0]), tuple(imgpts[3]), (255, 0, 0), 3)


    # # 用蓝色绘制支柱，体现出投影关系
    # for i, j in zip(range(4), range(4, 8)):
    #     img = cv.line(img, tuple(imgpts[i]), tuple(imgpts[j]), 255, 3)
    # # 使用最高层画出对应投影层
    # img = cv.drawContours(img, [imgpts[4:]], -1, (0, 0, 255), 3)

    #  write the context of the out  parameters
    font = cv.FONT_HERSHEY_SIMPLEX
    text_color = (0, 0, 255)
    font_scale = 1
    thickness = 2
    img = cv.putText(img, "R: {}".format(rvecs), (50, 50), font, font_scale, text_color, thickness, cv.LINE_AA)
    img = cv.putText(img, "T: {}".format(tvecs), (50, 100), font, font_scale, text_color, thickness, cv.LINE_AA)


    return img




criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001)
objp = np.zeros((24 * 13, 3), np.float32)
objp[:, :2] = np.mgrid[0:24, 0:13].T.reshape(-1, 2)
axis1 = np.float32([[0, 0, 0], [0, 10, 0], [10, 0, 0], [0, 0, -10]],)

for fname in glob.glob('data/pose/*.jpg'):
    img = cv.imread(fname)
    gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
    # ret返回 True，否则返回 False
    # corners：一个 NumPy 数组，表示检测到的内角点在图像中的坐标，每个内角点通常用一个长度为 2 的数组表示。
    #      如果函数返回了 True，则该数组中包含所有内角点的坐标信息
    ret, corners = cv.findChessboardCorners(gray, (24, 13), None)
    if ret:
        # 用于精细化角点的检测结果，用于对检测到的角点进行亚像素级别的的精确定位
        # ，提高相机标定的精度
        corners2 = cv.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)

        # 取中心点坐标
        center = [gray.shape[0]/2,gray.shape[1]/2]
        print(center)
        # 定义三棱锥的顶点坐标
        axis = np.float32([[center[0] + 0, center[1] +0, 0],
                           [center[0] +0, center[1] +3, 0],
                           [center[0] +3, center[1] +3, 0],
                           [center[0] +3, center[1] +0, 0],
                           [center[0] +1.5, center[1] +1.5, -3]])


        # 计算三棱锥在图像中的投影坐标
        ret, rvecs, tvecs = cv.solvePnP(objp, corners2, mtx, dist)
        imgptsTrangle, jac = cv.projectPoints(axis, rvecs, tvecs, mtx, dist)
        imgptsTrangle = np.int32(imgptsTrangle).reshape(-1, 2)

        # 将3d的点投影到2d当中，投影到图像层面
        imgpts, jac = cv.projectPoints(axis1, rvecs, tvecs, mtx, dist)

        # draw the base area
        img = cv.drawContours(img, [imgptsTrangle[1:4]], -1, ( 0,0,255), -3)
        img = cv.line(img, tuple(imgptsTrangle[4]), tuple(imgptsTrangle[2]), ( 0,255, 0), 3)
        img = cv.line(img, tuple(imgptsTrangle[4]), tuple(imgptsTrangle[3]), (0, 255, 0), 3)
        img = cv.line(img, tuple(imgptsTrangle[4]), tuple(imgptsTrangle[1]), ( 0, 255, 0), 3)

        # img = cv.drawContours(img, [imgptsTrangle[4:]], -1, (0, 0, 255), 3)

        img = draw(img, imgpts, rvecs, tvecs)
        cv.imshow('img', img)
        k = cv.waitKey(0) & 0xFF
        if k == ord('s'):
            cv.imwrite(fname[:6] + '.png', img)
cv.destroyAllWindows()