#include <ros/ros.h>
#include <image_transport/image_transport.h>
#include <cv_bridge/cv_bridge.h>
#include <opencv2/opencv.hpp>
#include <drive/BoundingBox.h>
#include <drive/BoundingBoxes.h>
#include "yolov5/yolov5.h"
#include "pfld/pfld.h"
#include "data/results.h"
#include "utils/dataset.h"

using namespace std;
using namespace std::chrono::_V2;
drive::BoundingBox boundingRect;
const int CAMERA_ID_HAND = 0;

ros::Time CurAnalyzeStamp;
ros::Publisher StampInfoPub;
static float prob[PFLD::OUTPUT_SIZE];

DriverResult CurDriverResult;//公共变量存储从yolo直接得到的数据
Results CurResults;//存储经过pfld分析之后的数据，部分数据从DriverResult获得
//BoudingRect boundingRect;//建立一个结构体对象用来将检测出来的yolo框push_back入msg

void doFacePointDetect(cv::Mat &frameDriver, ros::Time &stampDriver, DriverResult &driverResult)
{
    cv::Rect faceRect = YoloV5::get_rect(frameDriver, driverResult.RectFacePoint);

    // 下标容易越界，必须保护一下，不然会异常崩溃
    if (faceRect.x > 0 && faceRect.y > 0 && faceRect.width > 0 && faceRect.height > 0 &&
        (faceRect.x + faceRect.width) < frameDriver.cols &&
        (faceRect.y + faceRect.height) < frameDriver.rows)
    {
        cv::Mat faceMat = frameDriver(faceRect);

        // 测试执行人脸
        cout << "抓住人脸，开始分析关键点" << endl;
        PFLD::AnalyzeOneFace(faceMat, prob);    // 正式使用时，改为faceMat
        driverResult.DealFaceResult(prob);
    }
}

void imageCallback(const sensor_msgs::ImageConstPtr& msg)
{
    //sensor_msgs::Image ROS中image传递的消息形式
    try
    {
        ros::Time nowStamp = ros::Time::now();//两个时间戳，一个时间戳是当前的时间，另一个时间戳是当前处理的图片所带的时间戳
        ros::Time stamp = msg->header.stamp;
        cout << "接收的图像是 " << (int)((nowStamp - stamp).toSec() * 1000) << " 毫秒之前拍摄的" << endl;

        cv::Mat frame = cv_bridge::toCvShare(msg, "bgr8")->image;
        drive::BoundingBoxes result_msg;//定义一个msg，获得result结果后可以发布出去
	result_msg.LatestResultStamp = CurAnalyzeStamp;//下列三个时间戳的解释为：CurAnalyzeStamp之所以能被获得到，是因为在处理上一帧后消息发布出去，这些信息才会被当前帧获知，所以CurAnalyzeStamp在当前已经被分析好，成为LatestResultStamp帧
        CurAnalyzeStamp = stamp;
	result_msg.CurAnalyzeStamp  = stamp;

#pragma region 用于将results中的数据放入msg中
        // 准备发布yolo检测结果,将几个检测框数据push到msg中
        if(CurResults.HeadCaptured){
            boundingRect.Class="HEAD";
            //boundingRect.class_id=0;
            cv::Rect rect=YoloV5::get_rect(frame,CurDriverResult.RectHead);
            boundingRect.Rect_x = rect.x;
            boundingRect.Rect_y = rect.y;
            boundingRect.Rect_w = rect.width;
            boundingRect.Rect_h = rect.height;
            result_msg.HeadCaptured=true;
            result_msg.boundingboxes.push_back(boundingRect);
        }
        if(CurResults.FaceCaptured){
            boundingRect.Class="FACE";
            //boundingRect.class_id=1;
            cv::Rect rect=YoloV5::get_rect(frame,CurDriverResult.RectFace);
            boundingRect.Rect_x = rect.x;
            boundingRect.Rect_y = rect.y;
            boundingRect.Rect_w = rect.width;
            boundingRect.Rect_h = rect.height;

            result_msg.FaceCaptured=true;
            result_msg.boundingboxes.push_back(boundingRect);

        }
        if(CurResults.FaceLeftCaptured){
            boundingRect.Class="FACE_LEFT";
            //boundingRect.class_id=2;
            cv::Rect rect=YoloV5::get_rect(frame,CurDriverResult.RectFace);
            boundingRect.Rect_x = rect.x;
            boundingRect.Rect_y = rect.y;
            boundingRect.Rect_w = rect.width;
            boundingRect.Rect_h = rect.height;
            result_msg.boundingboxes.push_back(boundingRect);
            result_msg.FaceLeftCaptured=true;

        }
        if(CurResults.FaceRightCaptured){
            boundingRect.Class="FACE_RIGHT";
            //boundingRect.class_id=3;
            cv::Rect rect=YoloV5::get_rect(frame,CurDriverResult.RectFace);
            boundingRect.Rect_x = rect.x;
            boundingRect.Rect_y = rect.y;
            boundingRect.Rect_w = rect.width;
            boundingRect.Rect_h = rect.height;
            result_msg.boundingboxes.push_back(boundingRect);
            result_msg.FaceRightCaptured=true;
        }
        if(CurResults.FaceUpCaptured){
            boundingRect.Class="FACE_UP";
            //boundingRect.class_id=4;
            cv::Rect rect=YoloV5::get_rect(frame,CurDriverResult.RectFace);
            boundingRect.Rect_x = rect.x;
            boundingRect.Rect_y = rect.y;
            boundingRect.Rect_w = rect.width;
            boundingRect.Rect_h = rect.height;
            result_msg.boundingboxes.push_back(boundingRect);
            result_msg.FaceUpCaptured=true;

        }
        if(CurResults.FaceDownCaptured){
            boundingRect.Class="FACE_DOWN";
            //boundingRect.class_id=5;
            cv::Rect rect=YoloV5::get_rect(frame,CurDriverResult.RectFace);
            boundingRect.Rect_x = rect.x;
            boundingRect.Rect_y = rect.y;
            boundingRect.Rect_w = rect.width;
            boundingRect.Rect_h = rect.height;
            result_msg.boundingboxes.push_back(boundingRect);
            result_msg.FaceDownCaptured=true;

        }
        if(CurResults.HasCigarette){
            boundingRect.Class="CIGARETTE";
            //boundingRect.class_id=6;
            cv::Rect rect=YoloV5::get_rect(frame,CurDriverResult.RectCigarette);
            boundingRect.Rect_x = rect.x;
            boundingRect.Rect_y = rect.y;
            boundingRect.Rect_w = rect.width;
            boundingRect.Rect_h = rect.height;
            
            result_msg.boundingboxes.push_back(boundingRect);
            result_msg.HasCigarette=true;

        }
        if(CurResults.IsEyeClosed){
            boundingRect.Class="LEFTEYE";
            //boundingRect.class_id=7;
            cv::Rect rect1=YoloV5::get_rect(frame,CurResults.rectEyeLeft);
            boundingRect.Rect_x = rect1.x;
            boundingRect.Rect_y = rect1.y;
            boundingRect.Rect_w = rect1.width;
            boundingRect.Rect_h = rect1.height;
            result_msg.boundingboxes.push_back(boundingRect);
            boundingRect.Class="RIGHTEYE";
            cv::Rect rect2=YoloV5::get_rect(frame,CurResults.rectEyeRight);
            boundingRect.Rect_x = rect2.x;
            boundingRect.Rect_y = rect2.y;
            boundingRect.Rect_w = rect2.width;
            boundingRect.Rect_h = rect2.height;
            result_msg.boundingboxes.push_back(boundingRect);
            result_msg.IsEyeClosed=true;

        }
        if(CurResults.IsYawn){
            boundingRect.Class="YAWN";
            //boundingRect.class_id=7;
            cv::Rect rect=YoloV5::get_rect(frame,CurResults.rectMouth);
            boundingRect.Rect_x = rect.x;
            boundingRect.Rect_y = rect.y;
            boundingRect.Rect_w = rect.width;
            boundingRect.Rect_h = rect.height;
            result_msg.boundingboxes.push_back(boundingRect);
            result_msg.IsYawn=true;

        }
        if(CurResults.HasPhone){
            boundingRect.Class="PHONE";
            //boundingRect.class_id=7;
            cv::Rect rect=YoloV5::get_rect(frame,CurDriverResult.RectPhone);
            boundingRect.Rect_x = rect.x;
            boundingRect.Rect_y = rect.y;
            boundingRect.Rect_w = rect.width;
            boundingRect.Rect_h = rect.height;
            result_msg.boundingboxes.push_back(boundingRect);
            result_msg.HasPhone=true;

        }
        if(CurResults.IsDistracted){
            result_msg.IsDistracted=true;
        }
        if(CurResults.IsDozeNod){
            result_msg.IsDozeNod=true;
        }
       // if(CurResults.IsEyeClosed){
           // result_msg.IsEyeClosed=true;
      //  }
        //if(CurResults.IsYawn){
         //   result_msg.IsYawn=true;
       // }
        if(CurResults.IsEyeOcclusion){
            result_msg.IsEyeOcclusion=true;
        }
        if(CurResults.IsMouthOcclusion){
            result_msg.IsMouthOcclusion=true;
        }

        StampInfoPub.publish(result_msg);
#pragma region 准备分析结果,包括yolo和pfld
        // 得到分析结果，发布特定数据
        vector<Yolo::Detection> result = YoloV5::AnalyzeOneShot(frame);
        CurDriverResult.DealYoloResult(result); // 这一步将所有检测到的数据放进curdriverresult中，rect先不做转换。在canvas上绘图时再具体生成坐标
        cout << "Yolo V5 检测完成" << endl;
        if (CurDriverResult.FaceCaptured   // 有正面的像样的人脸了，实在太好了，可以识别身份和面部动作了
            || CurDriverResult.FaceLeftCaptured || CurDriverResult.FaceRightCaptured || CurDriverResult.FaceUpCaptured || CurDriverResult.FaceDownCaptured)  // 测试用
            {
                doFacePointDetect(frame, nowStamp, CurDriverResult);//这个函数里面已经调用过了dealfaceresult和analyzeoneface
            }
        else
            {
                CurDriverResult.ResetPointState();
            }
        CurResults.pushDriverResult(nowStamp,CurDriverResult);
        CurResults.AnalyzeState();
#pragma endregion
    }
    catch (cv_bridge::Exception& e)
    {
        ROS_ERROR("Could not convert from '%s' to 'bgr8'.", msg->encoding.c_str());
    }
}

int main(int argc, char **argv)
{
    ros::init(argc, argv, "image_analyze_node");
    ros::NodeHandle node_analyze;

    //防止崩溃，所有模型引擎优先初始化
    cudaSetDevice(DEVICE);
    YoloV5::InitYoloV5Engine();
    cout << "YoloV5 引擎序列化完成" << endl;
    PFLD::InitPFLDEngine();
    cout<<"PFLD 引擎序列化完成"<<endl;

    CurAnalyzeStamp = ros::Time::now();
    StampInfoPub = node_analyze.advertise<drive::BoundingBoxes>("/camera_csi0/cur_result", 1);

    image_transport::ImageTransport it(node_analyze);
    image_transport::Subscriber sub = it.subscribe("/camera_csi0/frames", 1, imageCallback);

    ros::spin();

    YoloV5::ReleaseYoloV5Engine();
    PFLD::ReleasePFLDEngine();
}
