#include "depth_calculating.hpp"

DepthCal::DepthCal():point_cloud(new PointCloud<pcl::PointXYZ>)
{
    calculate_remapping_matrix();
}

DepthCal::~DepthCal(){

}

/**
 * @brief 计算立体矫正的映射表
 * @param 
 * @return 
 * @details 
*/
void DepthCal::calculate_remapping_matrix(){
    stereoRectify(cameraMatrixL, distCoeffL, cameraMatrixR, distCoeffR, imageSize, R, T, 
        Rl, Rr, Pl, Pr, Q, CALIB_ZERO_DISPARITY, 0, imageSize, &validROIL, &validROIR);
    initUndistortRectifyMap(cameraMatrixL, distCoeffL, Rl, Pr, imageSize, CV_16SC2, mapLx, mapLy);
	initUndistortRectifyMap(cameraMatrixR, distCoeffR, Rr, Pr, imageSize, CV_16SC2, mapRx, mapRy);
}

/**
 * @brief 对图像进行畸变矫正和立体矫正
 * @param input_img 输入左右相机图像
 * @return 
 * @details 使用get_rectifyImageL()和get_rectifyImageR()来获得矫正后的图像
*/
bool DepthCal::stereo_correction(const Mat& input_img_l, const Mat& input_img_r){
    if(input_img_l.empty() || input_img_r.empty()) return 0;
	// 畸变矫正及立体矫正
    remap(input_img_l, rectifyImageL, mapLx, mapLy, INTER_LINEAR);
	remap(input_img_r, rectifyImageR, mapRx, mapRy, INTER_LINEAR);
    // // 显示矫正后的图像并画线
    // Mat concat_img;
    // hconcat(rectifyImageL, rectifyImageR, concat_img);
    // for (int i = 0; i < concat_img.rows; i += 50){
    //     line(concat_img, Point(0, i), Point(concat_img.cols, i), Scalar(0, 255, 0), 1, 8);
    // }
    // imshow("concat_img", concat_img);
    return 1;
}

/**
 * @brief 根据左图的排球位置，计算出右图中排球的位置，并输出两个对应区域的图片
 * @param input_img 输入左右相机图像
 * @param rect_l 左图中使用yolo识别出的排球的位置框
 * @param rect_r 计算出的右图中的排球框
 * @param rect_roi 加宽后的左图roi区域
 * @param roi_img 适当加宽后的包含排球的区域图像
 * @return 
 * @details 
*/
bool DepthCal::get_roi_img(const Mat& input_img_l, const Mat& input_img_r, const Rect& rect_l, Rect& rect_r, 
                            Rect& rect_roi, Mat& roi_img_l, Mat& roi_img_r){
    if(input_img_l.empty() || input_img_r.empty()) return 0;
    if(rect_l.size().area() > 50000) return 0;
    // 根据排球的宽度的像素值估算视差值，然后对应到右图
    int disparity = (int)((-T.at<double>(0,0) / ball_diameter) * rect_l.height);
    int x = rect_l.x - disparity;
    x > 0 ? x = x : x = 0;
    rect_r = Rect(x, rect_l.y, rect_l.width, rect_l.height);

    // 加宽定位框然后取出框中的图像，加宽时左图往左加宽0.6倍原宽度，往右加宽0.2倍原宽度，右图反之
    float ratio = 0.2, ratio2 = 0.6;
    int x_l_min = rect_l.x - rect_l.width * ratio2;
    int x_l_max = rect_l.x + rect_l.width * (ratio + 1);
    int x_r_min = rect_r.x - rect_r.width * ratio;
    int x_r_max = rect_r.x + rect_r.width * (ratio2 + 1);
    x_l_min > 0 ? x_l_min = x_l_min : x_l_min = 0;
    x_l_max < imageWidth - 1 ? x_l_max = x_l_max : x_l_max = imageWidth - 1;
    x_r_min > 0 ? x_r_min = x_r_min : x_r_min = 0;
    x_r_max < imageWidth - 1 ? x_r_max = x_r_max : x_r_max = imageWidth - 1;

    roi_img_l = input_img_l(Rect(x_l_min, rect_l.y, x_l_max - x_l_min, rect_l.height)).clone();
    roi_img_r = input_img_r(Rect(x_r_min, rect_r.y, x_r_max - x_r_min, rect_r.height)).clone();
    rect_roi = Rect(x_l_min, rect_l.y, x_l_max - x_l_min, rect_l.height);

    // Mat right_img = input_img_r.clone();
    // rectangle(right_img, rect_r, Scalar(0, 255, 0), 2);
    // namedWindow("right_img", WINDOW_NORMAL);
    // imshow("right_img", right_img);
    // namedWindow("roi_img_l", WINDOW_NORMAL);
    // imshow("roi_img_l", roi_img_l);
    // namedWindow("roi_img_r", WINDOW_NORMAL);
    // imshow("roi_img_r", roi_img_r);
    return 1;
}

/**
 * @brief 将获得的视差图转为电云
 * @param disparity_matrix 视差图
 * @param point_cloud 输出转换后的电云
 * @return 
 * @details 
*/
bool DepthCal::disparity2pointcloud(const Mat& disparity_matrix, const Rect& rect_roi, pcl::PointCloud<pcl::PointXYZ>::Ptr point_cloud){
    if(disparity_matrix.empty()) return 0;
    point_cloud->clear();
    // cout << disparity_matrix.size() << endl;
    // 参数定义
    float fx = cameraMatrixL.at<double>(0, 0), fy = cameraMatrixL.at<double>(1, 1), 
        cx = cameraMatrixL.at<double>(0, 2), cy = cameraMatrixL.at<double>(1, 2), 
        b = -T.at<double>(0, 0);
    float clip_thres = disparity_matrix.at<float>(0, 0); // 用于去除错误视差
    Rect center_rect(rect_roi.x + rect_roi.width * 0.533, rect_roi.y + rect_roi.height * 0.375, 
                        rect_roi.width * 0.1337, rect_roi.height * 0.25); // 球的中心区域
    float depth_sum = 0;
    int sum_num = 0;
    // 计算每个点的坐标
    for(int i = rect_roi.x; i < rect_roi.x + rect_roi.width; i++){
        for(int j = rect_roi.y; j < rect_roi.y + rect_roi.height; j++){
            float dis = disparity_matrix.at<float>(j - rect_roi.y, i - rect_roi.x);
            if(dis <= clip_thres) continue;
            float x = (i - cx) / fx;
            float y = (j - cy) / fy;
            float depth = fx * b / dis / 1000.0;
            // cout << depth << "   ";
            PointXYZ point(x * depth, y * depth, depth);
            if(point.z > 1 && point.z < 10){
                // cout << point.z << " ";
                point_cloud->push_back(point);
                if(i > center_rect.x && i < center_rect.x + center_rect.width && 
                    j > center_rect.y && j < center_rect.y + center_rect.height){
                    depth_sum += depth;
                    sum_num++;
                }
            }
            // cout << disparity_matrix.at<float>(j - rect_roi.y, i - rect_roi.x) << "   ";
        }
        
    }
    float average_depth = 0;
    if(sum_num != 0) average_depth = depth_sum / sum_num;
    cout << average_depth << endl;
    // cout << "end" << endl;
    return 1;
}

/**
 * @brief 对图像进行立体匹配
 * @param input_img 输入左右相机图像
 * @param output_img 输出处理后的图像
 * @return 
 * @details 
*/
bool DepthCal::stereo_match(const Mat& input_img_l, const Mat& input_img_r){
    if(input_img_l.empty() || input_img_r.empty()) return 0;
    if(input_img_l.size() != input_img_r.size()) return 0;
    Ptr<cv::StereoSGBM> sgbm = cv::StereoSGBM::create(mindisparity, ndisparities, blockSize);
    // disparity_matrix = Mat(imageWidth, imageHeight, CV_16SC1);
    // 设置相关参数，核心参数调节在.hpp中
    int P1 = 8 * img_channels * blockSize * blockSize;
	int P2 = 32 * img_channels * blockSize * blockSize;
	sgbm->setP1(P1);
	sgbm->setP2(P2);
	sgbm->setPreFilterCap(1);
	sgbm->setUniquenessRatio(10);
	sgbm->setSpeckleRange(0);
	sgbm->setSpeckleWindowSize(0);
	sgbm->setDisp12MaxDiff(-1);
	sgbm->setMode(cv::StereoSGBM::MODE_SGBM_3WAY);
    // 匹配计算视差，然后归一化进行显示
	sgbm->compute(input_img_l, input_img_r, disparity_matrix);
	Mat disp8(disparity_matrix.rows, disparity_matrix.cols, CV_8UC1);
	normalize(disparity_matrix, disp8, 0, 255, NORM_MINMAX, CV_8UC1);
    applyColorMap(disp8, disp8, 2);
    imshow("disp8", disp8);
    return 1;
}

/**
 * @brief 对框选区域图像进行立体匹配
 * @param input_img 输入左右相机图像
 * @param output_img 输出处理后的图像
 * @param rect_l 框选区域
 * @return 
 * @details 
*/
bool DepthCal::stereo_match(const Mat& input_img_l, const Mat& input_img_r, const Rect& rect_l){
    if(input_img_l.empty() || input_img_r.empty()) return 0;
    if(rect_l.size().area() > 50000) return 0;
    Rect rect_r, rect_roi;
    Mat left_img, right_img;
    get_roi_img(input_img_l, input_img_r, rect_l, rect_r, rect_roi, left_img, right_img); // 获取框选区域对应的图像
    if(left_img.empty() || left_img.size() != right_img.size()) return 0;
    // left_img = imread("./data/frame/camL/2024_10_18.bmp");
    // right_img = imread("./data/frame/camR/2024_10_18.bmp");
    cvtColor(left_img, left_img, COLOR_BGR2GRAY);
    cvtColor(right_img, right_img, COLOR_BGR2GRAY);
    namedWindow("left_roi_img", WINDOW_NORMAL);
    imshow("left_roi_img", left_img);
    namedWindow("right_roi_img", WINDOW_NORMAL);
    imshow("right_roi_img", right_img);
    // 计算视差范围
    int max_disparity = 0.6 * rect_l.height;
    int num_disparity = (int)(max_disparity / 16) * 16;
    Ptr<cv::StereoBM> left_matcher = cv::StereoBM::create(num_disparity, 5);
    // disparity_matrix = Mat(left_img.size().width, left_img.size().height, CV_16SC1);
    left_matcher->setMinDisparity(max_disparity - num_disparity);
    left_matcher->setSpeckleWindowSize(30);
    left_matcher->setSpeckleRange(30);
    // 匹配计算视差，然后归一化进行显示
	left_matcher->compute(left_img, right_img, disparity_matrix);
	Mat disp8(disparity_matrix.rows, disparity_matrix.cols, CV_8UC1);
	normalize(disparity_matrix, disp8, 0, 255, NORM_MINMAX, CV_8UC1);
    namedWindow("disp8", WINDOW_NORMAL);
    imshow("disp8", disp8);
    // applyColorMap(disp8, disp8, 2);
    // 叠加获取选框时计算的视差
    disparity_matrix.convertTo(disparity_matrix, CV_32F, 1.0 / 16.0f);
    disparity_matrix = disparity_matrix + (rect_l.x - rect_r.x) - 0.4 * rect_l.width;
    // 根据视差图计算三维坐标，并存入点云
    disparity2pointcloud(disparity_matrix, rect_roi, point_cloud);


    return 1;
}

void DepthCal::test(const Mat& src){
    rectangle(src, Rect(100, 100, 100, 100), Scalar(0, 255, 0), 2);
}