#ifndef VISION_H
#define VISION_H

#include <opencv2/opencv.hpp>
#include <opencv2/xfeatures2d.hpp>
#include "zhnmat.hpp"

constexpr double IMAGE_WIDTH = 640;
constexpr double IMAGE_HIGHT = 480;

class Vision
{
public:
    Vision();
	~Vision() {};
    // Read image from file and calculate its SURF feature.
    void Add_ImageFromFile(const std::string imagePath);
    bool ImgShow;
    int ImgShow_Init();
    int ImgShow_Update(bool calibra);
    void ImgShow_Exit();
    int Detect_Once(cv::Mat& src);
    int Point_Perspective(const cv::Point2f& boxPoint, cv::Point2f& scenePoint);
    zhnmat::Vector3d Cal_LocationCamera(double xl, double xr, double yl);
    zhnmat::Vector3d Cal_LocationWorld(const zhnmat::Vector3d& v);
    void Camera_Calibrate(const cv::Mat& src, cv::Mat& imgL, cv::Mat& imgR);
    static int cursorx, cursory;
    static bool clicked;

private:
    cv::VideoCapture cap;
    cv::Size imageSize = cv::Size(IMAGE_WIDTH, IMAGE_HIGHT);
    cv::Mat cameraMatrix1, cameraMatrix2;
    cv::Mat distCoeffs1, distCoeffs2;
    cv::Mat R1, R2, P1, P2;
    cv::Mat mapl1, mapl2, mapr1, mapr2;
    zhnmat::Mat zhnP1;
    zhnmat::Mat WR, WT;  // Transform from camera coordinates to world coordinotes
    cv::Mat H;
    double TtimesF;
    cv::Ptr<cv::xfeatures2d::SURF> detector;
    std::vector<cv::KeyPoint> objKeypoints;
    std::vector<cv::Point2f>objCorners;
    cv::Mat objDescriptors;
    int imgSaveCnt;
};

#endif
