#include <iostream>
#include <vector>
#include <fstream>

using namespace std;

#include <boost/timer.hpp>

//for sophus
#include <sophus/se3.hpp>

using Sophus::SE3d;

//for eigen
#include <eigen3/Eigen/Core>
#include <eigen3/Eigen/Geometry>

using namespace Eigen;

#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>

using namespace cv;

/*****************************************************
 * 本程序演示了单目相机在已知轨迹下的周密深度估计
 * 使用极线搜索+NCC匹配
*******************************/

//---------------------------------------------------------------------
//parameters
const int boarder=20; //边缘宽度
const int width=640;  //图像宽度
const int height=480; //图像高度
const double fx=481.2f;
const double fy=-480.0f;
const double cx=319.5f;
const double cy=239.5f;
const int ncc_window_size=3; //NCC取值的窗口半宽度
const int ncc_area=(2*ncc_window_size+1)*(2*ncc_window_size+1); //NCC窗口面积
const double min_cov=0.1; //收敛判定：最小方差
const double max_cov=10;  //发散判定：最大方差

//--------------------------------------------------------------------------
//从REMODE数据集读取数据
bool readDatasetFiles(
    const string &path,
    vector<string> &color_image_files,
    vector<SE3d> &poses;
    cv::Mat &ref_depth
     );

/**
 * @param ref 参考图像
 * @param curr 当前图像
 * @param T——C——R 参考图像到当前图像的位姿
 * @param depth 深度
 * @param depth——cov 深度方差
 * @return bool 是否成功
 */
 
bool update(
    const Mat &ref,
    const Mat &curr,
    const SE3d &T_C_R,
    Mat &depth,
    Mat &depth_cov2
     );

/**
 * 极线搜索
 * @param ref 参考图像
 * @param curr 当前图像
 * @param T——C-R 位姿
 * @param pt——ref 参考图像中点的位置
 * @param depth——mu 深度均值
 * @param depth——cov 深度方差
 * @param pt——curr 当前点
 * @param epipolar_direction 极线方向
 * @return bool 是否成功
 * 、
 */
 
bool epipolarSearch(
    const Mat &ref,
    const Mat &curr,
    const SE3d &T_C_R,
    const Vector2d &pt_ref,
    const double &depth_mu,
    const double &depth_cov,
    Vector2d &pt_curr,
    Vector2d &epipolar_direction
     );

/**
 * 更新深度滤波器
 * @param pt-ref 参考图像点
 * @param pt-curr 当前图像点
 * @param T-C-R 位姿
 * @param epipolar_direction 极线方向
 * @param depth 深度均值
 * @param depth-cov2 深度方向
 * @return bool 是否成功
 * */
bool updateDepthFilter(
    const Vector2d &pt_ref,
    const Vector2d &pt_curr,
    const SE3d &T_C_R,
    const Vector2d &epipolar_direction,
    Mat &depth,
    Mat &depth_cov2
     );

/**
 * 计算NCC评分
 * @param ref 参考图像
 * @param curr 当前图像
 * @param pt-ref 参考点
 * @param pt-curr 当前点
 * @return NCC 评分
 * */

double NCC(const Mat &ref, const Mat &curr, const Vector2d &pt_ref, const Vector2d &pt_curr);

//双线性灰度插值  向量整体计算
inline double getBilinearInterPolatedVlue(const Mat &img, const Vector2d &pt){
    uchar *d=&img.data[int(pt(1，0））*img.step+int(pt(0,0))];
    double xx=pt(0,0)-floor(pt(0,0));
    double yy=pt(1,0)-floot(pt(1,0));
    return ((1-xx)*(1-yy)*double(d[0])+
            xx*(1-yy)*double(d[1])+
            (1-xx)*yy*double(d[img.step])+
            xx*yy*double(d[img.step+1])/255.0;
}

//显示深度图
void plotDepth(const Mat &depth_truth, const Mat &depth_estimate);

//像素到相机坐标系
inline Vector3d px2cam(const Vector2d px){
    return Vector3d(
        (px(0,0)-cx)/fx,
        (px(1,0)-cy)/fy,
        1
           );
}

//检测一个点是否在图像框内
inline bool inside(const Vector2d &pt){
    return pt(0,0)>=boarder&&pt(1,0)>=boarder
            &&pt(0,0)+boarder<width&&pt(1,0)+border<=height;
}

//显示极线匹配
void showEpiplarMatch(const Mat &ref, const Mat &curr, const Vector2d &px_ref, const Vector2d &px_curr);

//显示极线
void showEpiplarLine(const Mat &ref, const Mat &curr, const Vector2d &px_ref, const Vector2d &px_min_curr,
                     const Vcetor2d &px_max_curr);

//评测深度估计
void evaludateDepth(const Mat &depth_truth, const Mat &depth_estimate);
//-------------------------------------------------------------------------------------

int main(int argc, char **argv)
{
    if(argc!=2){
        cout<<"Usage: dense_mapping path_to_test_dataset"<<endl;
        return -1;
    }
    
    //读取数据
    vector<string> color_image_files;
    vector<SE3d> poses_TWC;
    Mat ref_depth;
    bool ret =readDatasetFiles(argv[1],color_image_files,poses_TWC,ref_depth);
    if(ref==false){
        cout<<"Reading image files failed!"<<endl;
        return -1;
    }
    cout<<"read total "<<color_image_files.size()<<" files. "<<endl;
    
    //第一张图
    Mat ref=imread(color_image_files[0],0);
    SE3d pose_ref_TWC=poses_TWC[0];
    double init_depth=3.0; //深度初始值
    double init_cov2=3.0;  //方差初始化
    Mat depth(height, width,CV_64F,init_depth);
    Mat depth_cov2(height, width, CV_64F, init_cov2); //深度图方差
    
    for(int index=1,index<color_image_files.size();index++){
        cout<<" *** loop"<<index<<" *** "<<endl;
        Mat curr=imread(color_image_files[index],0);
        if(curr.data==nullptr) continue;
        SE3d pose_curr_TWC=poses_TWC[index];
        SE3d pose_T_C_R=pose_curr_TWC.inverse()*pose_ref_TWC; // 坐标转换关系： T_C_W * T_W_R = T_C_R
        update(ref,curr,pose_T_C_R,depth,depth_cov2);
        evaludateDepth(ref_depth,depth);
        plotDepth(ref_depth,depth);
        imshow("image", curr);
        waitKey(1);
    }
    
    cout<<"estimation returns, saving depth map ... "<<endl;
    imwrite("depth.png",depth);
    cout<<"done."<<endl;
    
    return 0;
}

bool readDatesetFiles(
    const string &path,
    vector<string> &color_image_files,
    std::vector<SE3d> &poses,
    cv::Mat &ref_depth){
    ifstream fin(path+"./first_200_frame_traj_over_table_imput_squence.txt");
    if(!fin) return false;
            
    while(!fin.eof()){
        // 数据格式：图像文件名 tx, ty, tz, qx, qy, qz, qw ，注意是 TWC 而非 TCW
        string image;
        fin>>image;
        double data[7];
        for(double &d:data) fin>>d;
            
            color_image_files.push_back(path+string("/iamges/")+image);
        poses.push_back(
            SE3d(Quaterniond(data[6],data[3],data[4],data[5]),
                 Vector3d(data[0],data[1],data[2]))
              );
        if(!fin.good()) break;
    }
    fin.close();
    
    //load reference depth
    fin.open(path+"/depthmaps/scence_000.depth");
    ref_depth=cv::Mat(height,width,CV_64F);
    if(!fin) return false;
    for(int y=0;y<height;y++)
        for(int x=0;x<width;x++){
            double depth=0;
            fin>>depth;
            ref_depth.ptr<double>[y][x]=depth/100.0;
        }
        return true;
}

//更新深度图
bool update(const Mat &ref, const Mat &curr, const SE3d &T_C_R, Mat &depth, Mat &depth_cov2)
{
    for(int x=boarder;x<width-boarder;x++)
        for(int y=boarder;y<width-boarder;y++){
            //遍历每一个像素
            if(depth_cov2.ptr<double>[y][x]<min_cov||depth_cov2.ptr<double>[y][x]>max_cov) continue;
            
            //在极线上搜索（x，y）的匹配
            Vcetor2d pt_curr;
            Vector2d epipolar_direction;
            bool ret=epipolarSearch(
                ref,
                curr,
                T_C_R,
                Vector2d(x,y),
                depth.ptr<double>[y],[x],
                sqrt(depth_cov2.ptr<double>[y][x]),
                pt_curr,
                epipolar_direction
            );
            
            if(ret==false) continue;
            
            //显示匹配结果
            //showEpiplarMatch(ref, curr, vector2d(x,y),pt_curr);
            
            //匹配成功，更新深度图
            updateDepthFilter(Vector2d(x,y),pt_curr,T_C_R,epipolar_direction,depth,depth_cov2);
        }
}

//极线搜索
bool epipolarSearch(
    const Mat &ref, const Mat &curr,
    const SE3d &T_C_R, const Vector2d &pt_ref,
    const double &depth_mu, const double &depth_cov,
    Vector2d &pt_curr, Vcetor2d &epipolar_direction)
{
    Vector3d f_ref=px2cam(pt_ref);
    f_ref.normalize();
    Vector3d P_ref=f_ref*depth_mu;  //参考帧的P向量
    Vector2d px_mean_curr=cam2px(T_C_R*P_ref); //按深度均值投影的像素
    double d_min=depth_mu-3*depth_cov,d_max=depth_mu+3*depth_cov;
    if(d_min<0.1) d_min=0.1;
    Vector2d px_min_curr=cam2px(T_C_R*(f_ref*d_min));  //按最小深度投影的像素
    Vector2d px_max_curr=cam2px(T_C_R*(f_ref*d_max));  //按最大深度投影的像素
        
    Vector2d epipolar_line=px_max_curr-px_min_curr; //极线线段的长度
    epipolar_direction=epipolar_line;               //极线方向
    epipolar_direction.normalize();
    double half_length=0.5*epipolar_line.norm(); //极线线段的半长度
    if(half_length>100) half_length=100;         //不希望搜索太多的东西
            
    //显示极线（线段）
    //showEpiplarLine(ref,curr,pt_ref,px_min_curr,px_max_curr);
    
    //在极线上搜索，左右
    double best_ncc=-1.0;
    Vector2d best_px_curr;
    for(double l=-half_length;l<half_length;l+=0.7){
        Vector2d px_curr=px_mean_curr+l*epipolar_direction;
        if(!inside(px_curr))
            continue;
        //计算待匹配点与参考帧的NCC
        double ncc=NCC(ref,curr,pt_ref,px_curr);
        if(ncc>best_ncc){
            best_ncc=ncc;
            best_px_curr=px_curr;
        }
    }
    if(best_ncc<0.85f)   //只相信NCC很高的匹配
        return false;
    pt_curr=best_px_curr;
    return true;
}

double NCC(
    const Mat &ref, const Mat &curr,
    const Vector2d &pt_ref, const Vector2d &pt_curr){
    //零均值归一化互相关
    //先算均值
    double mean_ref=0,mean_curr=0;
    vector<double> values_ref,values_curr;  //参考帧与当前帧的均值
    for(int x=-ncc_window_size;x<=ncc_window_size;x++)
        for(int y=-ncc_window_size;y<=ncc_window_size;y++){
            double values_ref=double(ref.ptr<uchar>(int(y+pt_ref(1,0)))[int(x+pt_ref(0,0))])/255.0;
            mean_ref+=value_ref;
            
            double values_curr=getBilinearInterPolatedVlue(curr,pt_curr+Vector2d(x,y));
            mean_curr+=value_curr;
            
            values_ref.push_back(value_ref);
            value_curr.push_back(value-curr);
        }
        
        mean_ref/=ncc_area;
        mean_curr/=ncc_area;
        
        //计算Zero mean ncc
        double numerator=0;demoniator1=0, demoniator2=0;
        for(int i=0;i<values_ref.size();i++){
            double n=(values_ref[i]-mean_ref)*(values_curr[i]-mean_curr);
            numerator+=n;
            demoniator1+=(values_ref[i]-mean_ref)*(values_ref[i]-mean_curr);
            demoniator2+=(values_curr[i]-mean_curr)*(values_curr[i]-mean_curr);
        }
        return numerator/sqrt(demoniator1*demoniator2 + 1e-10); //防止分母出现0
}

bool updateDepthFilter(
    const Vector2d &pt_ref,
    const Vector2d &pt_curr,
    const SE3d &T_C_R,
    const Vector2d &epipolar_direction,
    Mat &depth,
    Mat &depth_cov2){
    //三角化计算深度
    SE3d T_R_C=T_C_R.inverse();
    Vector3d f_ref=px2cam(pt_ref);
    f_ref.normalize（）；
    Vector3d f_curr=px2cam(pt_curr);
    f_curr.normalize();
    
    //方程
    //d_ref*f_ref=d_cur*(R_RC*f_cur)+t_RC
    //f2=R_RC*f_cur
    //转化成下面这个矩阵方程组
    //=>[f_ref^T f_ref, -f_ref^T f2] [d_ref]=[f_ref^T t]
    //  [f_2^T f_ref, -f2^T f2     ] [d_cur]=[f2^T t  ]
    Vector3d t=T_R_C.translation();
    Vector3d f2=T_R_C.so3()*f_curr;
    Vector2d b=Vector2d(t.dot(f_ref),t.dot(f2));
    Matrix2d A;
    A(0,0)=f_ref.dot(f_ref);
    A(0,1)=-f_ref.dot(f2);
    A(1,0)=-A(0,1);
    A(1,1)=-f2.dot(f2);
    Vector2d ans=A.inverse()*b;
    Vector3d xm=ans[0]*f_ref;
    Vector3d xn=t+ans[1]*f2;
    Vector3d p_esti=(xm+xn)/2.0;  //p的位置取二者的均值
    dpuble depth_estimate=p_esti.norm();  //深度值
    
    //计算不确定性
    Vector3d p=f_ref*depth_estimation;
    Vector3d a=p-t;
    double t_norm=t.norm();
    double a_norm=a.norm();
    double alpha=acos(f_ref.dot(t)/t_norm);
    double beta=acos(-a.dot(t)/(a_norm*t_norm);
    double gamma=M_PI-alpha-beta_prime;
    double p_prime=t_norm*sin(beta_prime)/sin(gamma);
    double d_cov=p_prime-depth_estimation;
    double d_cov2=d_cov*d-cov；
    
    //高斯融合
    double mu=depth.ptr<double>(int(pt_ref(1,0)))[int(pt_ref(0,0))];
    double sigma2=depth_cov2.ptr<double>(int(pt_ref(1,0)))[int(pt_ref(0,0))];
    
    double mu_fuse=(d_cov2*mu+sigma2*depth_estimation)/(sigma2+d_cov2);
    double sigma_fuse2=(sigma2*d_cov2)/(sigma2+d_cov2);
    
    depth.ptr<double>(int(pt_ref(1,0)))[int(pt_ref(0,0))]=mu_fuse;
    depth_cov2.ptr<double>(int(pt_ref(1,0)))[int(pt_ref(0,0))]=sigma_fuse2;
    
    return true;
}

void plotDepth(const Mat &depth_truth,const Mat &depth_estimate){
    imshow("depth_truth", depth_truth*0.4);
    imshow("depth_estimate", depth_estimate*0.4);
    imshow("depth_error",depth_truth-depth_estimate);
    waitKey(1);
}

void evaludateDepth(const Mat &depth-depth_truth,const Mat &depth_estimate)
{
    double ave_depth_error=0;    //平均误差
    double ave_depth_error_sq=0;  //平方误差
    int cnt_depth_data=0;
    for(int y=boarder;y<depth_truth.rows-boarder;y++)
        for(int x=boarder;x<depth_truth.cols-boarder;x++){
            double error=depth_truth.ptr<double>[y][x]-depth-depth_estimate.ptr<double>[y][x];
            ave_depth_error+=error;
            ave_depth_error_sq=error*error;
            cnt_depth_data++;
        }
        ave_depth_error/=cnt_depth_data;
        ave_depth_error_sq/=cnt_depth_data;
    
    cout<<"Average squared error= "<<ave_depth_error_sq<<", average error: "<<ave_depth_error<<endl;
}
        
void showEpiplarMatch(const Mat &ref, const Mat &curr, const Vector2d &px_ref, const Vector2d &px_min_curr)
{
    Mat ref_show,curr_show;
    cv::cvtColor(ref,ref_show,CV_GRAY2BGR);
    cv::cvtColor(curr,curr_show,CV_GRAY2BGR);
    
    cv::circle(ref_show, cv::Point2f(px_ref(0,0),px_ref(1,0)),5,cv::Scalar(0,255,0),2);
    cv::circle(curr_show,cv::Point2f(px_min_curr(0,0),px_min_curr(1,0)),5,cv::Scalar(0,255,0),2);
    cv::circle(curr_show,cv::Point2f(px_max_curr(0,0),px_max_curr(1,0)),5,cv::Scalar(0,255,0),2);
    cv::line(curr_show,Point2f(px_min_curr(0,0),px_min_curr(1,0)),Point2f(px_max_curr(0,0),px_max_curr(1,0)),
             Scalar(0,255,0),1);
    imshow("ref",ref_show);
    imshow("curr",curr_show);
    waitKey(1);
}
    
    
                
            



    


    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    










