﻿/// 相机参数标定
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui.hpp>

#include <iostream>
#include <fstream>

using namespace cv;
using namespace std;

/// 演示如何获取角点
inline void findChessBoardCornersDemos()
{
    string imageFileName("D:/sai/opencv/images/pattern.png");
    Size boardSize = Size(9,6);        // 定标板上每行、列的角点数
    vector<Point2f> corners;            // 缓存每幅图像上检测到的角点

    Mat image = imread(imageFileName);
    bool found = findChessboardCorners(image, boardSize, corners, CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE+
          CALIB_CB_FAST_CHECK );

    Mat imageGray;
    cvtColor(image, imageGray , COLOR_BGR2GRAY);

    Mat outImage = image.clone();

    vector<Point2f> cornersGray(corners);

    if (found) {
        drawChessboardCorners( outImage, boardSize, Mat(corners), true );
        imwrite("D:/sai/opencv/tmp/pattern.png", outImage);

        /* 亚像素精确化 */
         cornerSubPix(imageGray, corners, Size(5, 5), Size(-1, -1),
                      TermCriteria(TermCriteria::EPS+TermCriteria::COUNT, 30, 0.001));

         drawChessboardCorners( imageGray, boardSize, Mat(corners), false );
         imwrite("D:/sai/opencv/tmp/pattern_corners.png", outImage);
    }

    // print all corners
    for (int i = 0; i < corners.size(); i++) {
        cout << i+1 << ":" << corners[i] << ", " << cornersGray[i] << endl;
    }

    // namedWindow( "Pattern", WINDOW_AUTOSIZE ); // Create a window for display.
    //imshow( "Pattern", outImage );                // Show our image inside it.
}

/// 获取单目参数
inline void getCalibrateParams()
{
    ofstream fout("D:/sai/opencv/tmp/caliberation_result.txt");  /* 保存标定结果的文件 */
//    string imageFolder("D:/sai/opencv/images/calib_example/");
    string imageFolder("D:/sai/opencv/images/stereo_example/");
    string outputFolder("D:/sai/opencv/tmp/");

    int imageCount = 14;
    Size imageSize = Size(640,480);
    Size boardSize = Size(9,6);             // 定标板上每行、列的角点数
    vector<Point2f> corners;                // 缓存每幅图像上检测到的角点
    vector<vector<Point2f>>  cornersSeq;    // 保存检测到的所有角点

    /************************************************************************
           读取每一幅图像，从中提取出角点，然后对角点进行亚像素精确化
    *************************************************************************/
    cout << "开始提取角点………………" << endl;
    for( int i = 0; i != imageCount ; i++) {
        char strBuf[20]={0};
        snprintf(strBuf, 20, "left%02d.jpg", i+1);
        //std::stringstream strStm;
//        strStm << "Left" << i+1;

        string imageFileName(strBuf);
//        strStm >> imageFileName;
        Mat image = imread(imageFolder+imageFileName, IMREAD_GRAYSCALE);

        bool found = findChessboardCorners(image, boardSize, corners,
              CALIB_CB_ADAPTIVE_THRESH | CALIB_CB_NORMALIZE_IMAGE | CALIB_CB_FAST_CHECK );

        if (found) {
            cout << "found corners in " << imageFileName << endl;
            /* 亚像素精确化 */
            cornerSubPix(image, corners, Size(5, 5), Size(-1, -1),
                          TermCriteria(TermCriteria::EPS+TermCriteria::COUNT, 30, 0.001));

            cornersSeq.push_back(corners);  // push to cornerSeq

            Mat outImage = image.clone();
            drawChessboardCorners( outImage, boardSize, Mat(corners), true );
            imwrite("D:/sai/opencv/tmp/" + imageFileName, outImage);
        } else {
            cout << imageFileName << " not found!" << endl;
        }
    }
    cout<<"角点提取完成！\n";

    /************************************************************************
           摄像机定标
    *************************************************************************/

    cout<<"开始定标………………"<<endl;
    Size squareSize = Size(30,30);    // 实际测量得到的定标板上每个棋盘格的大小
    vector<vector<Point3f>> objectPoints;  // 保存标定板上角点的三维坐标
    /*内外参数*/
    Mat intrinsicMatrix=Mat(3,3,CV_32FC1,Scalar::all(0)); /* 摄像机内参数矩阵 */
    vector<int> pointCounts;  // 每幅图像中角点的数量
    Mat distortionCoeffs=Mat(1,5,CV_32FC1,Scalar::all(0)); /* 摄像机的5个畸变系数：k1,k2,p1,p2,k3 */
    vector<cv::Mat> rotationVectors;       /* 每幅图像的旋转向量 */
    vector<cv::Mat> translationVectors;    /* 每幅图像的平移向量 */

    /* 初始化定标板上角点的三维坐标 */
    for (int t=0;t<imageCount;t++)
    {
        vector<Point3f> tempPointSet;
        for (int i=0;i<boardSize.height;i++)
        {
            for (int j=0;j<boardSize.width;j++)
            {
                /* 假设定标板放在世界坐标系中z=0的平面上 */
                Point3f tempPoint;
                tempPoint.x = i*squareSize.width;
                tempPoint.y = j*squareSize.height;
                tempPoint.z = 0;
                tempPointSet.push_back(tempPoint);
            }
        }
        objectPoints.push_back(tempPointSet);
    }

    /* 初始化每幅图像中的角点数量，这里我们假设每幅图像中都可以看到完整的定标板 */
    for (int i=0; i< imageCount; i++)
    {
        pointCounts.push_back(boardSize.width*boardSize.height);
    }
    /* 开始定标 */
    calibrateCamera(objectPoints, cornersSeq, imageSize, intrinsicMatrix, distortionCoeffs,
                    rotationVectors, translationVectors, 0);
    cout<<"定标完成！\n";
    cout << "内参:" << intrinsicMatrix << endl;
    cout << "畸变系数:" << distortionCoeffs << endl;

    /************************************************************************
              对定标结果进行评价
     *************************************************************************/
    cout<<"开始评价定标结果………………"<<endl;
    double totalErr = 0.0;                /* 所有图像的平均误差的总和 */
    double err = 0.0;                      /* 每幅图像的平均误差 */
    vector<Point2f>  imagePoints2;        /*  保存重新计算得到的投影点  */

    cout<<"每幅图像的定标误差："<<endl;
    cout<<"每幅图像的定标误差："<<endl<<endl;
    for (int i=0; i<imageCount;  i++)
    {
        vector<Point3f> tempPointSet = objectPoints[i];
        /* 通过得到的摄像机内外参数，对空间的三维点进行重新投影计算，得到新的投影点 */
        projectPoints(tempPointSet, rotationVectors[i], translationVectors[i], intrinsicMatrix, distortionCoeffs, imagePoints2);
        /* 计算新的投影点和旧的投影点之间的误差*/
        vector<Point2f> tempImagePoint = cornersSeq[i];
        Mat tempImagePointMat = Mat(1,tempImagePoint.size(),CV_32FC2);
        Mat imagePoints2mat = Mat(1,imagePoints2.size(), CV_32FC2);
        for (size_t j = 0 ; j != tempImagePoint.size(); j++)
        {
            imagePoints2mat.at<Vec2f>(0,j) = Vec2f(imagePoints2[j].x, imagePoints2[j].y);
            tempImagePointMat.at<Vec2f>(0,j) = Vec2f(tempImagePoint[j].x, tempImagePoint[j].y);
        }
        fout << i+1 << ": 3DPoints" << tempPointSet << endl;
        fout << i+1 << ": original:" << tempImagePointMat << endl;
        fout << i+1 << ": projected:" << imagePoints2mat << endl;
        err = norm(imagePoints2mat, tempImagePointMat, NORM_L2);
        totalErr += err/=  pointCounts[i];
        cout<<"第"<<i+1<<"幅图像的平均误差："<<err<<"像素"<<endl;
        fout<<"第"<<i+1<<"幅图像的平均误差："<<err<<"像素"<<endl;
    }
    cout<<"总体平均误差："<<totalErr/imageCount<<"像素"<<endl;
    fout<<"总体平均误差："<<totalErr/imageCount<<"像素"<<endl<<endl;
    cout<<"评价完成！"<<endl;

    /************************************************************************
            保存定标结果
     *************************************************************************/
    cout<<"开始保存定标结果………………"<<endl;
    Mat rotationMatrix = Mat(3,3,CV_32FC1, Scalar::all(0)); /* 保存每幅图像的旋转矩阵 */

    fout<<"相机内参数矩阵："<<endl;
    fout<<intrinsicMatrix<<endl;
    fout<<"畸变系数：\n";
    fout<<distortionCoeffs<<endl;
    for (int i=0; i<imageCount; i++)
    {
        fout<<"第"<<i+1<<"幅图像的旋转向量："<<endl;
        fout<<rotationVectors[i]<<endl;

        /* 将旋转向量转换为相对应的旋转矩阵 */
        Rodrigues(rotationVectors[i],rotationMatrix);
        fout<<"第"<<i+1<<"幅图像的旋转矩阵："<<endl;
        fout<<rotationMatrix<<endl;
        fout<<"第"<<i+1<<"幅图像的平移向量："<<endl;
        fout<<translationVectors[i]<<endl;
    }
    cout<<"完成保存"<<endl;
    fout<<endl;
    /************************************************************************
            测试一张图片
     *************************************************************************/
    Mat mapx = Mat(imageSize,CV_32FC1);
    Mat mapy = Mat(imageSize,CV_32FC1);
    Mat R = Mat::eye(3,3,CV_32F);
    cout<<"TestImage ..."<<endl;
    Mat testImage = imread(imageFolder + "left01.jpg", IMREAD_GRAYSCALE);
    initUndistortRectifyMap(intrinsicMatrix,distortionCoeffs,R,
          getOptimalNewCameraMatrix(intrinsicMatrix, distortionCoeffs, imageSize, 1, imageSize, 0),
              imageSize,CV_32FC1,mapx,mapy);
    Mat t = testImage.clone();
    cv::remap(testImage,t,mapx, mapy, INTER_LINEAR);
    imwrite(outputFolder + "left01_correct.jpg",t);
    cout<<"保存结束"<<endl;
}

/// 矫正图像
Mat calibrate(Mat image)
{
    Size imageSize = image.size();
    float intrinsic[3][3] = {
        589.2526583947847,0,321.8607532099886,
        0,585.7784771038199,251.0338528599469,
        0,0,1};
    float distortion[1][5] = {-0.5284205687061442, 0.3373615384253201, -0.002133029981628697,
                              0.001511983002864886, -0.1598661778309496};
    Mat intrinsicMatrix = Mat(3,3,CV_32FC1,intrinsic);
    Mat distortionCoeffs = Mat(1,5,CV_32FC1,distortion);
    Mat R = Mat::eye(3,3,CV_32F);
    Mat mapx = Mat(imageSize,CV_32FC1);
    Mat mapy = Mat(imageSize,CV_32FC1);

    initUndistortRectifyMap(intrinsicMatrix,distortionCoeffs,R,
         getOptimalNewCameraMatrix(intrinsicMatrix, distortionCoeffs, imageSize, 1, imageSize, 0),
                            imageSize,CV_32FC1,mapx,mapy);
    Mat t = image.clone();
    cv::remap( image, t, mapx, mapy, INTER_LINEAR);
    return t;
}

//************************************
// Description: 根据左右相机中成像坐标求解空间坐标
// Method:    uv2xyz
// FullName:  uv2xyz
// Access:    public
// Parameter: Point2f uvLeft
// Parameter: Point2f uvRight
// Returns:   cv::Point3f
// Author:    小白
// Date:      2017/01/10
// History:
//************************************
Point3f uv2xyz(Point2f uvLeft,Point2f uvRight,
            float leftIntrinsic[3][3], float leftRotation[3][3], float leftTranslation[1][3],
            float rightIntrinsic[3][3], float rightRotation[3][3], float rightTranslation[1][3])
{
    //  [u1]      |X|					  [u2]      |X|
    //Z*|v1| = Ml*|Y|					Z*|v2| = Mr*|Y|
    //  [ 1]      |Z|					  [ 1]      |Z|
    //			  |1|								|1|
    Mat mLeftRotation = Mat(3,3,CV_32F,leftRotation);
    Mat mLeftTranslation = Mat(3,1,CV_32F,leftTranslation);
    Mat mLeftRT = Mat(3,4,CV_32F);//左相机M矩阵
    hconcat(mLeftRotation,mLeftTranslation,mLeftRT);
    Mat mLeftIntrinsic = Mat(3,3,CV_32F,leftIntrinsic);
    Mat mLeftM = mLeftIntrinsic * mLeftRT;
    //cout<<"左相机M矩阵 = "<<endl<<mLeftM<<endl;

    Mat mRightRotation = Mat(3,3,CV_32F,rightRotation);
    Mat mRightTranslation = Mat(3,1,CV_32F,rightTranslation);
    Mat mRightRT = Mat(3,4,CV_32F);//右相机M矩阵
    hconcat(mRightRotation,mRightTranslation,mRightRT);
    Mat mRightIntrinsic = Mat(3,3,CV_32F,rightIntrinsic);
    Mat mRightM = mRightIntrinsic * mRightRT;
    //cout<<"右相机M矩阵 = "<<endl<<mRightM<<endl;

    //最小二乘法A矩阵
    Mat A = Mat(4,3,CV_32F);
    A.at<float>(0,0) = uvLeft.x * mLeftM.at<float>(2,0) - mLeftM.at<float>(0,0);
    A.at<float>(0,1) = uvLeft.x * mLeftM.at<float>(2,1) - mLeftM.at<float>(0,1);
    A.at<float>(0,2) = uvLeft.x * mLeftM.at<float>(2,2) - mLeftM.at<float>(0,2);

    A.at<float>(1,0) = uvLeft.y * mLeftM.at<float>(2,0) - mLeftM.at<float>(1,0);
    A.at<float>(1,1) = uvLeft.y * mLeftM.at<float>(2,1) - mLeftM.at<float>(1,1);
    A.at<float>(1,2) = uvLeft.y * mLeftM.at<float>(2,2) - mLeftM.at<float>(1,2);

    A.at<float>(2,0) = uvRight.x * mRightM.at<float>(2,0) - mRightM.at<float>(0,0);
    A.at<float>(2,1) = uvRight.x * mRightM.at<float>(2,1) - mRightM.at<float>(0,1);
    A.at<float>(2,2) = uvRight.x * mRightM.at<float>(2,2) - mRightM.at<float>(0,2);

    A.at<float>(3,0) = uvRight.y * mRightM.at<float>(2,0) - mRightM.at<float>(1,0);
    A.at<float>(3,1) = uvRight.y * mRightM.at<float>(2,1) - mRightM.at<float>(1,1);
    A.at<float>(3,2) = uvRight.y * mRightM.at<float>(2,2) - mRightM.at<float>(1,2);

    //最小二乘法B矩阵
    Mat B = Mat(4,1,CV_32F);
    B.at<float>(0,0) = mLeftM.at<float>(0,3) - uvLeft.x * mLeftM.at<float>(2,3);
    B.at<float>(1,0) = mLeftM.at<float>(1,3) - uvLeft.y * mLeftM.at<float>(2,3);
    B.at<float>(2,0) = mRightM.at<float>(0,3) - uvRight.x * mRightM.at<float>(2,3);
    B.at<float>(3,0) = mRightM.at<float>(1,3) - uvRight.y * mRightM.at<float>(2,3);

    Mat XYZ = Mat(3,1,CV_32F);
    //采用SVD最小二乘法求解XYZ
    solve(A,B,XYZ,DECOMP_SVD);

    //cout<<"空间坐标为 = "<<endl<<XYZ<<endl;

    // 世界坐标系中坐标
    Point3f world;
    world.x = XYZ.at<float>(0,0);
    world.y = XYZ.at<float>(1,0);
    world.z = XYZ.at<float>(2,0);

    return world;
}

//************************************
// Description: 将世界坐标系中的点投影到左右相机成像坐标系中
// Method:    xyz2uv
// FullName:  xyz2uv
// Access:    public
// Parameter: Point3f worldPoint
// Parameter: float intrinsic[3][3]
// Parameter: float translation[1][3]
// Parameter: float rotation[3][3]
// Returns:   cv::Point2f
// Author:    小白
// Date:      2017/01/10
// History:
//************************************
Point2f xyz2uv(Point3f worldPoint,float intrinsic[3][3],float translation[1][3],float rotation[3][3])
{
    //    [fx s x0]							[Xc]		[Xw]		[u]	  1		[Xc]
    //K = |0 fy y0|       TEMP = [R T]		|Yc| = TEMP*|Yw|		| | = —*K *|Yc|
    //    [ 0 0 1 ]							[Zc]		|Zw|		[v]	  Zc	[Zc]
    //													[1 ]
    Point3f c;
    c.x = rotation[0][0]*worldPoint.x + rotation[0][1]*worldPoint.y + rotation[0][2]*worldPoint.z + translation[0][0]*1;
    c.y = rotation[1][0]*worldPoint.x + rotation[1][1]*worldPoint.y + rotation[1][2]*worldPoint.z + translation[0][1]*1;
    c.z = rotation[2][0]*worldPoint.x + rotation[2][1]*worldPoint.y + rotation[2][2]*worldPoint.z + translation[0][2]*1;

    Point2f uv;
    uv.x = (intrinsic[0][0]*c.x + intrinsic[0][1]*c.y + intrinsic[0][2]*c.z)/c.z;
    uv.y = (intrinsic[1][0]*c.x + intrinsic[1][1]*c.y + intrinsic[1][2]*c.z)/c.z;

    return uv;
}

/// 已经考虑了畸变带来的影响
inline void objectPointToUV()
{
    Point3f worldPoint(0, 0, 0);

    double intrinsic[3][3] = {532.8131633945694, 0, 341.7576639498432,
                              0, 532.9715934581859, 234.0803300310574,
                              0, 0, 1};
    double rotation[3][3] = {0.009862606224224857, 0.9623048963954239, -0.2717940679482514,
                            0.9861741295114361, 0.03560364388307402, 0.161842413552841,
                            0.1654186062111135, -0.2696324663604888, -0.9486490488075833};

    double translation[1][3] = {-89.80591391405241, -129.3746996515438, 477.0602040898529};
    double distCoeffs[1][5] = {-0.2824253174854313, 0.04402206768049022, 0.001119488735405228, -0.0002731112054257219, 0.1266034942314877};
    double k[5] = {-0.2824253174854313, 0.04402206768049022, 0.001119488735405228, -0.0002731112054257219, 0.1266034942314877};
    Point3d c;
    c.x = rotation[0][0]*worldPoint.x + rotation[0][1]*worldPoint.y + rotation[0][2]*worldPoint.z + translation[0][0]*1;
    c.y = rotation[1][0]*worldPoint.x + rotation[1][1]*worldPoint.y + rotation[1][2]*worldPoint.z + translation[0][1]*1;
    c.z = rotation[2][0]*worldPoint.x + rotation[2][1]*worldPoint.y + rotation[2][2]*worldPoint.z + translation[0][2]*1;

    double x = c.x/c.z;
    double y = c.y/c.z;

    double r2=x*x + y*y;
    double r4 = r2*r2;
    double r6 = r4*r2;
    double a1 = 2*x*y;
    double a2 = r2 + 2*x*x;
    double a3 = r2 + 2*y*y;
    double cdist = 1 + k[0]*r2 + k[1]*r4 + k[4]*r6;
    double xd0 = x*cdist + k[2] * a1 + k[3]*a2;
    double yd0 = y*cdist + k[2] * a3 + k[3]*a1;

    Point2d uv;
    uv.x = (intrinsic[0][0]*xd0 + intrinsic[0][1]*yd0 + intrinsic[0][2]*1)/1;
    uv.y = (intrinsic[1][0]*xd0 + intrinsic[1][1]*yd0 + intrinsic[1][2]*1)/1;

    cout << "uv:" << uv << endl;
}

int main(int argc, char const *argv[])
{

    int option = 2;
    switch (option) {
    case 0:
        // 演示如何获取角点
        findChessBoardCornersDemos();
        break;
    case 1:
        // 获取单目参数
        getCalibrateParams();
        break;
    case 2:
        objectPointToUV();
        break;
    default:
        break;
    }

    waitKey(0); // Wait for a keystroke in the window

    return 0;
}
