#include "triangulate3d.h"

triangulate3D::triangulate3D()
{
    //a boring constructor - most initializtion occurs in init()

    terminateCheck=false;

}

int triangulate3D::init(QString projName, QString eventName, int camNumber, QString camRes)
{
    //Initialize event details, read camera params, read laser sequence list, etc

    //formulate the camera parameter file name
    QString cameraYML = QString(projName + "/Cam" + QString::number(camNumber)
                                + "_" + camRes + ".yml");

    // Open the camera
    FileStorage fs(cameraYML.toStdString(), FileStorage::READ);  // file to read
    if (!fs.isOpened()) {
        qDebug("Failed to read the camera parameter file.");
        return 1;
    }

    fs["camera_matrix"] >> cameraMatrix;  // read in the camera matrix
    fs["distortion_coefficients"] >> distCoeffs;  // read in the lens distortion coefficients

    if(cameraMatrix.empty() || distCoeffs.empty()) {
        qDebug("Not a valid camera parameter file.");
        return 1;
    }

    if( true  ) // toggled - will add an option for this on the dialog
                // currently I don't account for distortion coeffs in the video files,
                // so I'm shutting them off here so they don't efffect the target solvePnP calculation
    distCoeffs = distCoeffs * 0;  //Zero the distortion coefficients

    //formulate the laser line sequence parameter file name
    QString laserYML = QString(projName + "/LaserLineSequence.yml");
    fs.open(laserYML.toStdString(),FileStorage::READ);
    if (!fs.isOpened())
    {
        qDebug("Failed to read the laser line sequence parameter file.");
        return 1;
    }

    fs["LaserLineSequence"] >> lasers;
    if( lasers.empty() ) {
        qDebug("Not a laser line sequence parameter file.");
        return 1;
    }


    //formulate the registration target point file name
    QString regTargetsYML = QString(eventName + "/RegistrationTargets/targets.yml");
    fs.open(regTargetsYML.toStdString(),FileStorage::READ);
    if (!fs.isOpened())
    {
        qDebug("Failed to read the Registration Target file.");
        return 1;
    }

    if(camNumber==0)
        fs["Cam0_Targets"] >> imagePoints;
        else
        fs["Cam1_Targets"] >> imagePoints;

    fs["World_Targets"] >> objectPoints;

    if(imagePoints.rows <4 || objectPoints.rows<4 ) {
        qDebug("Insufficient number of Registration Targets, or invalid target file.");
        return 1;
    }

    //formulate the line processing result file name
    QString lineProcessingYML = QString(eventName + "/ProcessedResults/cam"
                                    + QString::number(camNumber) + "_lineProcessingResult.yml.gz");

    fsinput.open(lineProcessingYML.toStdString(),FileStorage::READ);
    if (!fsinput.isOpened())
    {
        qDebug("Failed to read the Line Processing Result file.");
        return 1;
    }

    //formulate the 3D point output file name
    QString output3DpointsYML = QString(eventName + "/ProcessedResults/cam"
                                    + QString::number(camNumber) + "_3Dpts.yml.gz");
    // open the output file
    fsout.open( output3DpointsYML.toStdString(), FileStorage::WRITE); //open for writing new file
    if (!fsout.isOpened())
    {
        qDebug("Failed to create the 3D point output file.");
        return 1;
    }


    //convert from Physical XYZ coordinates to XYZ of the camera system
    //  Physical X: fuel tank cylinder axis, Y: horizontal, Z: Vertical
    //  Camera X: horizontal, Y: Vertical, Z: depth away from the camera
    Mat_<float> _objectPoints=objectPoints; //template for simpler access
    for(int j=0;j<objectPoints.rows;j++) {
        float temp=_objectPoints(j,0); //store the physical X temporarily
        _objectPoints(j,0) = _objectPoints(j,1); //assign physical Y to camera X
        _objectPoints(j,1) = _objectPoints(j,2); //assign physical Z to camera Y
        _objectPoints(j,2) = temp; //assign physical X to camera Z
    }


    //solve the camera pose
    rvec = Mat(1,3,DataType<double>::type);
    tvec = Mat(1,3,DataType<double>::type);
    rotationMatrix = Mat(3,3,DataType<double>::type);

    solvePnP(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec);
    Rodrigues(rvec,rotationMatrix);


    return 0; //no errors return
}


int triangulate3D::doIt()
{

    //Read and process frames
    FileNode frames = fsinput["Frames"];
    FileNodeIterator it = frames.begin(), it_end = frames.end();

    int framesTotal=it.remaining;
    //initialize the output
    fsout <<"Frames"<<"["; //starting sequence

    int FrameNumber;
    for( ; it != it_end; it++)
    {

        (*it)["FrameNumber"] >> FrameNumber;  //currently unused


        Mat points;
        (*it)["ExportedPoints"] >> points;

 /*
        //cout<<points;
        Mat image(1080,1920,CV_8UC3);
        //image=image*0;
        Mat_< Vec<uchar, 3> > _image;
        Mat_<Vec3i> _points;

        _image=image;
        _points=points;

        for(int i=0;i<points.rows;i++) {
            int x,y,line;
            line=_points(0,i)[0];
            y=_points(0,i)[1];
            x=_points(0,i)[2];
            _image(y,x)[0]=255*(line-1)/14;  //blue chan
            _image(y,x)[1]=0; //green chan
            _image(y,x)[2]=255-255*(line-1)/14; //red chan
        }
        imshow("blank image",image);
        waitKey(33);
*/

        fsout << "{";
        if(!points.empty())
            triangulatePoints(points);
        fsout << "}";

    emit status(100 - it.remaining*100/framesTotal); //send a signal to tell percent complete
    if(terminateCheck) break;
    }
    fsout <<"]"; //ending sequence
    fsout.release();

    emit finished(); //send signal that we are done

    terminateCheck=false; //reset to default value, for subsequent runs

    return 0;


}

void triangulate3D::triangulatePoints(Mat pts)
{
    //Transform image points to 3D space
    //distortion coefficients are not accounted for at this time

    //solving this perspective transformation:  s{m} = [A][R|t]{M}
    // where {m} is the homogenous coordinates of image pixel.  {m}={u,v,1}.  u and v are known.
    // and [A] is the camera matrix.  [A] is known.
    // and [R|t] is the combined rotation matrix [R] and translation vector {t}.  [R|t] is known.
    // and {M} is the homogenous world coordinates.  {M}={X,Y,Z,1}.  One coordinate of {M} is known: Laser plane depth.
    //   The other coordinates of {M} need to be solved for.
    // and s is an unknown constant, which is solved for as an intermediate step, but the value is not needed afterward.

    Mat_<Vec3i> _pts=pts;  // template for easier notation
    Mat_<float> _lasers=lasers; //template for easier notation

    //create a matrix to hold the points to export
    Mat_<Vec3f> exportPoints;  //World coordinates: X, Y, Z
    exportPoints.reserve(pts.cols); //allocate a block of memory

    Mat_<double> _tempMat, _tempMat2, _tempMat3;
    _tempMat3 = rotationMatrix.inv() * cameraMatrix.inv();  // [R]^-1 [A]^-1
    _tempMat2 = rotationMatrix.inv() * tvec; // [R]^-1 {t}

    for(int i=0;i<pts.rows;i++)
    {
        Mat_<double> _uvPoint = Mat(3,1,DataType<double>::type); //u,v,1  (homogenous image coordinates: {m})

        _uvPoint(0) = _pts(0,i)[2]; //horizontal pixel value
        _uvPoint(1) = _pts(0,i)[1]; //vertical pixel value
        _uvPoint(2) = 1.0;

        float laserPos= _lasers( _pts(0,i)[0] - 1); //laser position

        double s;
        _tempMat = _tempMat3 * _uvPoint;
        s = (laserPos + _tempMat2(2)) / _tempMat(2);  //solve for the intermediate constant s

        Mat_<double> _result = s * _tempMat3 * _uvPoint - _tempMat2;
        exportPoints.push_back( Point3f( _result(2),       //map Camera Z to Physical X
                                         _result(0),       //map Camera X to Physical Y
                                         _result(1) ) );   //map Camera Y to Physical Z
    }

    //write to file

    fsout << "ExportedPoints" << exportPoints;

}

void triangulate3D::terminateProcessing()
{
    terminateCheck=true;
}
