/**
 * @file FrameProcessor.cpp
 * @brief 感知器的实现
 *        感知器由两种状态：LIBREALSENSE 和 VIDEOCAPTURE
 *        LIBREALSENSE 状态下调用RealSense相机获取相机影像流
 *        VIDEOCAPTURE 状态下调用input视频影像流
 *        只提供初始化感知器状态以及获取RGB图像和深度图像的接口
 * @author Rosen (1018477962@qq.com)
 * @version 1.0
 * @date 2021-09-26
 * 
 * @copyright Copyright (c) 2021 by Rosen.
 */
#include "FrameProcessor.h"

using namespace cv;

/**
 * @brief  初始化感知器状态
 * @param  frameStream  影像流输入路径
 */
void FrameProcessor::InitState(const std::string &frameStream)
{
    std::string format;
    if (!this->_frameStream.empty())
    {
        this->_frameStream = frameStream;
        format = this->_frameStream.substr(this->_frameStream.size() - 4, this->_frameStream.size() - 1);
    }
    if (this->_frameStream.empty())
    {
        this->_state = LIBREALSENSE;
        // 设置影像流
        this->_set_OriginStream();
        // 打开影像流通道
        this->_startPipe();
        // 获取数据流
        auto rgbStream = this->_profile.get_stream(RS2_STREAM_COLOR).as<rs2::video_stream_profile>();
        auto depthStream = this->_profile.get_stream(RS2_STREAM_DEPTH).as<rs2::video_stream_profile>();
        // 获取相机内参
        this->_rgbIntri = rgbStream.get_intrinsics();
        this->_depthIntri = depthStream.get_intrinsics();
        // 直接获取从深度相机坐标系到RGB相机坐标系的欧式变换矩阵
        rs2_error *error;
        rs2_get_extrinsics(depthStream, rgbStream, &this->_extrinDepth2Color, &error);
    }
    else if (format == ".jpg" || format == ".png")
    {
        this->_state = IMREADFIGURE;
        this->_rgbImg = cv::imread(this->_frameStream);
    }
    else if (format == ".mp4" || format == ".avi")
    {
        this->_state = VIDEOCAPTURE;
        this->open(this->_frameStream);
    }
    else
        throw std::runtime_error("the input path of frameStream is invaid!");
}

/**
 * @brief 打开影像流通道，打开前请先运行setOriginStream()进行影像流设置
 */
void FrameProcessor::_startPipe()
{
    auto cfg = this->_cfg;
    // 数据管道的profile
    this->_profile = this->start(cfg);
}

/**
 * @brief 设置原始影像流的参数（分辨率、类型、帧率等）
 * @note  RealSense D435i的图像分辨率与帧率有关，当分辨率较大时所允许的帧率最高为30FPS，
 *        帧率最高可设置为60，此时分辨率最高为640x480
 */
void FrameProcessor::_set_OriginStream()
{
    auto &w = cameraParam.imgWidth;
    auto &h = cameraParam.imgHeight;
    // 设置RBG影像流的大小和帧率，格式位8位BGR彩色图
    this->_cfg.enable_stream(RS2_STREAM_COLOR, w, h, RS2_FORMAT_BGR8, cameraParam.FPS);
    // 设置灰度影像流的大小和帧率，格式为16位灰度图
    this->_cfg.enable_stream(RS2_STREAM_DEPTH, w, h, RS2_FORMAT_Z16, cameraParam.FPS);
}

/**
 * @brief 根据感知器类型获取 RGB 图像
 *        1 LIBREALSENSE realsense 相机影像流
 *        2 VIDEOCAPTURE cv::VideoCapture 视频影像流
 * @return cv::Mat 
 */
cv::Mat FrameProcessor::obtain_RGBImage()
{
    // 感知器状态为 LIBREALSENSE 则获取RGB影像流
    if (this->_state == LIBREALSENSE)
    {
        frameset frames = this->wait_for_frames();
        this->_alignedSet = this->process(frames);
        this->_rgbImg = this->_frameToMat(this->_alignedSet.get_color_frame());
    }
    else if (this->_state == IMREADFIGURE)
        this->_rgbImg = cv::imread(this->_frameStream);
    // 感知器状态为 VIDEOCAPTURE 则获取RGB视频流
    else if (this->_state == VIDEOCAPTURE)
        this->read(this->_rgbImg);
    else
        throw std::runtime_error("the state of FrameProcessor is error!");
    return this->_rgbImg;
}

/**
 * @brief 根据感知器类型获取深度图像
 *        1 LIBREALSENSE realsense 相机影像流
 *        2 VIDEOCAPTURE cv::VideoCapture 视频影像流
 * @return cv::Mat 
 */
cv::Mat FrameProcessor::obtain_DepthImage()
{
    // 感知器状态为 LIBREALSENSE 则获取深度影像流
    if (this->_state == LIBREALSENSE)
    {
        frame depth = this->_alignedSet.get_depth_frame();
        this->_depthImg = this->_frameToMat(depth);

        // cv::Mat originDepth = this->_frameToMat(depth);
        // rs2::colorizer c;
        // rs2::frame depthFilter = depth.apply_filter(c);
        // this->_depthImg = this->_frameToMat(depthFilter);
        
        // //实现深度图对齐到彩色图
        // this->_depthImg = this->_depAlign2Rgb(depthFrame, this->_rgbImg);
    }
    // 感知器状态为 VIDEOCAPTURE 则不获取深度图像
    else if (!(this->_state == VIDEOCAPTURE || this->_state == IMREADFIGURE))
        throw std::runtime_error("the state of FrameProcessor is error!");
    return this->_depthImg;
}

/**
 * @brief 将RealSense相机获得的源影像流转成OpenCV的Mat
 * 
 * @param frame 原始影像流
 * @return Mat 将realsense2得到的原始影像流转化为Mat类型
 */
Mat FrameProcessor::_frameToMat(const rs2::frame &frame)
{
    auto vf = frame.as<rs2::video_frame>();
    const int &w = vf.get_width();
    const int &h = vf.get_height();

    // 根据源影像流格式转换成对应的Mat格式
    switch (frame.get_profile().format())
    {
    case RS2_FORMAT_BGR8:
        return Mat(Size(w, h), CV_8UC3, (void *)frame.get_data(), Mat::AUTO_STEP);
    case RS2_FORMAT_RGB8:
    {
        auto r_rgb = Mat(Size(w, h), CV_8UC3, (void *)frame.get_data(), Mat::AUTO_STEP);
        Mat r_bgr;
        cvtColor(r_rgb, r_bgr, COLOR_RGB2BGR);
        return r_bgr;
    }
    case RS2_FORMAT_Z16:
        return Mat(Size(w, h), CV_16UC1, (void *)frame.get_data(), Mat::AUTO_STEP);
    case RS2_FORMAT_Y8:
        return Mat(Size(w, h), CV_8UC1, (void *)frame.get_data(), Mat::AUTO_STEP);
    case RS2_FORMAT_DISPARITY32:
        return Mat(Size(w, h), CV_32FC1, (void *)frame.get_data(), Mat::AUTO_STEP);
    default:
        throw std::runtime_error("Frame format is not supported yet!");
        break;
    }
}

/**
 * @brief  深度图对齐到彩色图
 * @return cv::Mat 对齐后的深度图像
 */
cv::Mat FrameProcessor::_depAlign2Rgb(const cv::Mat &depth, const cv::Mat &rgbImg)
{
    //平面点定义
    float pd_uv[2], pc_uv[2];
    //空间点定义
    float Pdc3[3], Pcc3[3];
    cv::Mat alignedMat = cv::Mat::zeros(rgbImg.rows, rgbImg.cols, CV_8UC3);

    // 获取深度像素对应长度单位（米）的换算比例
    float depthScale = this->_getDepScale(this->_profile.get_device());

    for (int row = 0; row < depth.rows; row++)
    {
        for (int col = 0; col < depth.cols; col++)
        {
            int x, y = 0;
            //将当前的(x,y)放入数组pd_uv，表示当前深度图的点
            pd_uv[0] = col;
            pd_uv[1] = row;

            // 获取深度图像素点值
            uint16_t depPix = rgbImg.at<uint16_t>(row, col);
            // 转换单位成 m
            float depth2m = depPix * depthScale;

            //将深度图的像素点根据内参转换到深度相机坐标系下的三维点
            rs2_deproject_pixel_to_point(Pdc3, &this->_depthIntri, pd_uv, depth2m);
            //将深度相机坐标系的三维点转化到 RGB 相机坐标系下
            rs2_transform_point_to_point(Pcc3, &this->_extrinDepth2Color, Pdc3);
            //将 RGB 相机坐标系下的深度三维点映射到二维平面上
            rs2_project_point_to_pixel(pc_uv, &this->_rgbIntri, Pcc3);

            // 获得映射后的深度点并限值
            int u = (int)pc_uv[0];
            int v = (int)pc_uv[1];
            x = u < 0 ? 0 : (u > depth.cols - 1 ? depth.cols - 1 : u);
            y = v < 0 ? 0 : (v > depth.rows - 1 ? depth.rows - 1 : v);

            //将成功映射的点用 RGB 图对应点的 RGB 数据覆盖
            for (int i = 0; i < 3; i++)
                //这里设置了只显示1米距离内的东西
                alignedMat.at<cv::Vec3b>(y, x)[i] = rgbImg.at<cv::Vec3b>(y, x)[i];
        }
    }
    return alignedMat;
}

/**
 * @brief  获取深度像素对应长度单位（米）的换算比例
 * @param  dev   相机设备
 * @return float 换算比例
 */
float FrameProcessor::_getDepScale(rs2::device dev)
{
    for (rs2::sensor &sensor : dev.query_sensors())
    {
        if (rs2::depth_sensor dpt = sensor.as<rs2::depth_sensor>())
        {
            return dpt.get_depth_scale();
        }
    }
    throw std::runtime_error("Device does not have a depth sensor");
}
