/**
* This file is part of ORB-SLAM2.
*
* Copyright (C) 2014-2016 Raúl Mur-Artal <raulmur at unizar dot es> (University of Zaragoza)
* For more information see <https://github.com/raulmur/ORB_SLAM2>
*
* ORB-SLAM2 is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* ORB-SLAM2 is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with ORB-SLAM2. If not, see <http://www.gnu.org/licenses/>.
*/

#ifndef FRAME_H
#define FRAME_H

#include<vector>
#include <Eigen/Core>
#include <Eigen/Dense>
#include <Eigen/Geometry>
#include <unsupported/Eigen/MatrixFunctions>
#include <Eigen/Eigenvalues>
#include <unordered_map>

#include "MapPoint.h"
#include "Thirdparty/DBoW2/DBoW2/BowVector.h"
#include "Thirdparty/DBoW2/DBoW2/FeatureVector.h"
#include "ORBVocabulary.h"
#include "KeyFrame.h"
#include "ORBextractor.h"

#include <opencv2/opencv.hpp>

namespace ORB_SLAM2
{
#define FRAME_GRID_ROWS 48
#define FRAME_GRID_COLS 64

class MapPoint;
class KeyFrame;

class Frame
{
public:
    Frame();

    // Copy constructor.
    Frame(const Frame &frame);

    // Constructor for stereo cameras.
    // The imDepth here is an optional depth map for same camera to be used
    // for evaluation purposes. In order for imDepth to actually be used as 
    // ground truth depth gtDepthAvailable should be set to true. If 
    // gtDepthAvailable is "false" and imDepth is provided, it is assumed
    // that it is a predicted image quality heatmap to be used for scoring
    // the extracted keypoints.
    Frame(const cv::Mat &imLeft, 
          const cv::Mat &imRight, 
          const double &timeStamp, 
          ORBextractor* extractorLeft, 
          ORBextractor* extractorRight, 
          ORBVocabulary* voc, 
          cv::Mat &K, 
          cv::Mat &distCoef, 
          const float &bf, 
          const float &thDepth,
          const bool &gtDepthAvailable = false,
          const cv::Mat &imDepth = cv::Mat(0,0,CV_32F),
          const bool &bLogOutliers = false);

    // Constructor for RGB-D cameras.
    Frame(const cv::Mat &imGray, const cv::Mat &imDepth, const double 
&timeStamp, ORBextractor* extractor,ORBVocabulary* voc, cv::Mat &K, cv::Mat 
&distCoef, const float &bf, const float &thDepth, 
const bool &bLogOutliers = false);

    // Constructor for Monocular cameras.
    // The imDepth here is an optional depth map for same camera to be used
    // for evaluation purposes. In order for imDepth to actually be used as 
    // ground truth depth gtDepthAvailable should be set to true. If 
    // gtDepthAvailable is "false" and imDepth is provided, it is assumed
    // that it is a predicted image quality heatmap to be used for scoring
    // the extracted keypoints.
    Frame(const cv::Mat &imGray, 
          const double &timeStamp, 
          ORBextractor* extractor,ORBVocabulary* voc, 
          cv::Mat &K, 
          cv::Mat &distCoef, 
          const float &bf, 
          const float &thDepth,
          const bool &gtDepthAvailable = false,
          const cv::Mat &imDepth = cv::Mat(0,0,CV_32F),
          const bool &bLogOutliers = false);

    // Extract ORB on the image. 0 for left image and 1 for right image.
    void ExtractORB(int flag, const cv::Mat &im);
    
    // Extract ORB on the image. 0 for left image and 1 for right image.
    // This version weights the number of extracted features across the image
    // given a predicted cost heatmap image (higher values at regions of the 
    // image with potentially worse image feautres)
    void ExtractORBWeighted(int flag, 
                            const cv::Mat &im,
                            const cv::Mat &costmap);

    // Compute Bag of Words representation.
    void ComputeBoW();

    // Set the camera pose.
    void SetPose(cv::Mat Tcw);
    
    // Set the ground truth pose of the camera.
    void SetGroundTruthPose(cv::Mat Twc_gt);
    
    // Set the ground truth pose of the camera as well as the associated 
    // covariance.
    void SetGroundTruthPose(cv::Mat Twc_gt, 
                            const Eigen::Matrix<double, 6, 6> &SigmaTwc_gt);
    
    // Sets the reference (ground truth) camera pose as the current estimate
    // of the pose of the camera
    void ApplyReferencePose();
    
    // Copies newly detected map points from mvpMapPoints to mvpMapPointsComp
    // This is for logging purposes as mvpMapPointsComp is supposed to keep 
    // map points that are ruled out as outliers as well
    void BackupNewMapPoints();
    
    // Computes keypoints quality scores given the logged reprojection errors
    void ComputeKeyPtQualScores();

    // Computes rotation, translation and camera center matrices from the camera pose.
    void UpdatePoseMatrices();

    // Returns the camera center.
    inline cv::Mat GetCameraCenter(){
        return mOw.clone();
    }

    // Returns inverse of rotation
    inline cv::Mat GetRotationInverse(){
        return mRwc.clone();
    }

    // Check if a MapPoint is in the frustum of the camera
    // and fill variables of the MapPoint to be used by the tracking
    bool isInFrustum(MapPoint* pMP, float viewingCosLimit);

    // Compute the cell of a keypoint (return false if outside the grid)
    bool PosInGrid(const cv::KeyPoint &kp, int &posX, int &posY);

    vector<size_t> GetFeaturesInArea(const float &x, const float  &y, const float  &r, const int minLevel=-1, const int maxLevel=-1) const;

    // Search a match for each keypoint in the left image to a keypoint in the right image.
    // If there is a match, depth is computed and the right coordinate associated to the left keypoint is stored.
    void ComputeStereoMatches();

    // Associate a "right" coordinate to a keypoint if there is valid depth in the depthmap.
    void ComputeStereoFromRGBD(const cv::Mat &imDepth);

    // Backprojects a keypoint (if stereo/depth info available) into 3D world coordinates.
    cv::Mat UnprojectStereo(const int &i);
    
    // Returns the corresponding keypoint to the given map point. Returns false
    // if such correspondence does not exist
    bool GetCorrespondingKeyPt(MapPoint* map_point, 
                               cv::KeyPoint* keypoint);
    
    // Returns the corresponding keypoint to the given map point as well as 
    // its index. Returns false if such correspondence does not exist
    bool GetCorrespondingKeyPt(MapPoint* map_point, 
                               cv::KeyPoint* keypoint,
                               int* idx);
    
    // Returns the median of the unmaskede regions of the image
    double GetMaskedMedian(const cv::Mat& input_img, const cv::Mat& mask);

public:
    // Vocabulary used for relocalization.
    ORBVocabulary* mpORBvocabulary;

    // Feature extractor. The right is used only in the stereo case.
    ORBextractor* mpORBextractorLeft, *mpORBextractorRight;

    // Frame timestamp.
    double mTimeStamp;

    // Calibration matrix and OpenCV distortion parameters.
    cv::Mat mK;
    static float fx;
    static float fy;
    static float cx;
    static float cy;
    static float invfx;
    static float invfy;
    cv::Mat mDistCoef;

    // Stereo baseline multiplied by fx.
    float mbf;

    // Stereo baseline in meters.
    float mb;

    // Threshold close/far points. Close points are inserted from 1 view.
    // Far points are inserted as in the monocular case from 2 views.
    float mThDepth;

    // Number of KeyPoints.
    int N;

    // Vector of keypoints (original for visualization) and undistorted (actually used by the system).
    // In the stereo case, mvKeysUn is redundant as images must be rectified.
    // In the RGB-D case, RGB images can be distorted.
    std::vector<cv::KeyPoint> mvKeys, mvKeysRight;
    std::vector<cv::KeyPoint> mvKeysUn;
    
    // The ground truth depth for each keypoint. Obtained from simulation and
    // used for evaluation purposes.
    std::vector<float> mvKeysGTDepth;
    
    // Vector of keypoint quality score. These are used to perform weighted BA
    std::vector<float> mvKeyQualScore;
    
    // Vector of keypoint quality scores that are calculated online in 
    // unsupervised training mode. These values are not used to perform weighted
    // BA and are only used for generating training heatmaps and for 
    // visualization purposes in the frame drawer.
    std::vector<float> mvKeyQualScoreTrain;
    
    // Vector of the squared l2 norm of the normalized reprojection error for
    // each of the keypoints
    std::vector<float> mvChi2;
    
    // Vector of the number of degrees of freedom for the chi2 distribution
    // of the squared l2 norm of the normalized reprojection error for
    // each of the keypoints
    std::vector<int> mvChi2Dof;
    
    // The comprehensive vector of mapPoints associated to keypoints. The 
    // difference with mvpMapPoints is that this list keeps map points that
    // have been ruled out as outliers as well. It is used for post processing
    // and evalaution of the mappoints.
    // NULL pointer if no association.
    std::vector<MapPoint*> mvpMapPointsComp;

    // Corresponding stereo coordinate and depth for each keypoint.
    // "Monocular" keypoints have a negative value.
    std::vector<float> mvuRight;
    std::vector<float> mvDepth;

    // Bag of Words Vector structures.
    DBoW2::BowVector mBowVec;
    DBoW2::FeatureVector mFeatVec;

    // ORB descriptor, each row associated to a keypoint.
    cv::Mat mDescriptors, mDescriptorsRight;

    // MapPoints associated to keypoints, NULL pointer if no association.
    std::vector<MapPoint*> mvpMapPoints;

    // Flag to identify outlier associations.
    std::vector<bool> mvbOutlier;

    // Keypoints are assigned to cells in a grid to reduce matching complexity when projecting MapPoints.
    static float mfGridElementWidthInv;
    static float mfGridElementHeightInv;
    std::vector<std::size_t> mGrid[FRAME_GRID_COLS][FRAME_GRID_ROWS];

    // Camera pose.
    // The transformation takes a point from the world coordinate frame to
    // the camera reference frame
    cv::Mat mTcw;
    
    // The ground truth camera pose (filled in and used only when in training 
    // mode)
    // The transformation takes a point from the camera reference frame to
    // the world reference frame
    cv::Mat mTwc_gt;
    
    // The same as above, but takes points from the world frame to that of the 
    // camera
    cv::Mat mTcw_gt;
    
    // The covariance of mTwc_gt. It is a 6 by 6 matrix, where the first 
    // 3 rows/columns correspond to the rotational part of the transformation
    // and the last 3 correspond to the translational part.
    Eigen::Matrix<double,6,6, Eigen::DontAlign>  mSigmaTwc_gt;
    
    // Denotes if the covariance of the ground truth camera pose has been set.
    bool mbPoseUncertaintyAvailable = false;
    
    std::string mstrLeftImgName;

    // Current and Next Frame id.
    static long unsigned int nNextId;
    long unsigned int mnId;

    // Reference Keyframe.
    KeyFrame* mpReferenceKF;

    // Scale pyramid info.
    int mnScaleLevels;
    float mfScaleFactor;
    float mfLogScaleFactor;
    vector<float> mvScaleFactors;
    vector<float> mvInvScaleFactors;
    vector<float> mvLevelSigma2;
    vector<float> mvInvLevelSigma2;

    // Undistorted Image Bounds (computed once).
    static float mnMinX;
    static float mnMaxX;
    static float mnMinY;
    static float mnMaxY;

    static bool mbInitialComputations;


private:

    // Undistort keypoints given OpenCV distortion parameters.
    // Only for the RGB-D case. Stereo must be already rectified!
    // (called in the constructor).
    void UndistortKeyPoints();

    // Computes image bounds for the undistorted image (called in the constructor).
    void ComputeImageBounds(const cv::Mat &imLeft);

    // Assign keypoints to the grid for speed up feature matching (called in the constructor).
    void AssignFeaturesToGrid();

    // Rotation, translation and camera center
    cv::Mat mRcw;
    cv::Mat mtcw;
    cv::Mat mRwc;
    cv::Mat mOw; //==mtwc
};

}// namespace ORB_SLAM

#endif // FRAME_H
