/**
 * @file object_detect.cc
 * @author your name (you@domain.com)
 * @brief 目标检测相关事务
 * @version 0.1
 * @date 2022-09-17
 * 
 * @copyright Copyright (c) 2022
 * 
 */

#include "imageProcess.h"
#include "face_detect.h"
#include "object_detect.h"
#include "object_postprocess.h"
#include "pose_detect.h"
#include "rknn_api.h"
#include <semaphore.h> // sem_init()
extern sem_t binSem_1;

extern mutex mtxQueueReadImage_640;
extern queue<pair<pair<string,Mat>,Mat>> queueReadImage_640;
extern int target_368;
extern char *yolov5_model_path;

//目标检测模型输出到人体关键点检测模型的数据 Todo:可以换成静态变量
mutex mtxQueueOutput_368;
queue<pair<vector<int>, Mat>> queueOutput_368;

//1、3号线程同步使用的数据类型
extern sem_t binSem_save;
extern mutex mtxQueueSaveimg_368; 
extern queue<pair<string, Mat>> queueSaveimg_368;

//rknn模型输入输出参数
static rknn_context ctx;  
static rknn_input inputs[1];
static rknn_output outputs[3];
static vector<float> out_scales;
static vector<uint8_t> out_zps;

static void init_rknn_inoutput(){
    inputs[0].index = 0;
    inputs[0].type = RKNN_TENSOR_UINT8;
    inputs[0].size = 640 * 640 * 3;             //width * height * channel;
    inputs[0].fmt = RKNN_TENSOR_NHWC;
    inputs[0].pass_through = 0;

    for (int i = 0; i <3; i++)
    {
        outputs[i].want_float = 0;
    }
}

// @brief 检测一张图，将图上的目标和位置信息通过vector<int>型数组传递出来
// @param img 
// @return ret -1 表示异常，0表示正常
static int detect(Mat img, vector<int> &result, int *nums){
    detect_result_group_t detect_result_group;
    int ret = -1;
    inputs[0].buf = img.data;
    ret = rknn_inputs_set(ctx, 1, inputs);
    if(ret != 0)
            cout <<"rknn_inputs_set ret:" << ret <<endl;
    ret = rknn_run(ctx, NULL);
    if(ret != 0)
            cout <<"rknn_run ret:" << ret <<endl;
    ret = rknn_outputs_get(ctx, 3, outputs, NULL);
    if(ret != 0)
            cout <<"rknn_outputs_get ret:" << ret <<endl;
    if(ret != 0)    return ret;

    //post process
    int img_width = img.cols;
    int img_height = img.rows;
    float scale_w = (float)640 / img_width;
    float scale_h = (float)640 / img_height;
    //图片自适应缩放后的h缩放比例，等同于scale_w
    scale_h = scale_w;
    float offset_h = 0.5*(640 - img_height * scale_w); 

    //后处理参数设置
    float vis_threshold = 0.1;
    float nms_threshold = 0.5;
    float conf_threshold = 0.1;

    ret = post_process((uint8_t *)outputs[0].buf, (uint8_t *)outputs[1].buf, (uint8_t *)outputs[2].buf, 640, 640,
                conf_threshold, nms_threshold, vis_threshold, scale_w, scale_h, out_zps, out_scales, &detect_result_group, offset_h);  
    if(detect_result_group.count == 0)       *nums = 0;   
    //char型数组记录检测出的目标
    int max_value = 0;
    for(int i = 0; i < detect_result_group.count; i++){
        int result_id = detect_result_group.results[i].box.classId;
        result[result_id] = 1;
        
        //保存置信度最大的框的信息     
        if(detect_result_group.results[i].prop > max_value)
        {
            max_value = detect_result_group.results[i].prop;
            result[OBJ_CLASS_NUM] = detect_result_group.results[i].box.left;
            result[OBJ_CLASS_NUM + 1] = detect_result_group.results[i].box.top;
            result[OBJ_CLASS_NUM + 2] = detect_result_group.results[i].box.right;
            result[OBJ_CLASS_NUM + 3] = detect_result_group.results[i].box.bottom;
        }
        
        *nums += 1;
    }
    return ret;
}

unsigned char *load_model(const char *filename, int *model_size)
{
    FILE *fp = fopen(filename, "rb");
    if(fp == nullptr) {
        printf("fopen %s fail!\n", filename);
        return NULL;
    }
    fseek(fp, 0, SEEK_END);
    int model_len = ftell(fp);
    unsigned char *model = (unsigned char*)malloc(model_len);
    fseek(fp, 0, SEEK_SET);
    if(model_len != fread(model, 1, model_len, fp)) {
        printf("fread %s fail!\n", filename);
        free(model);
        return NULL;
    }
    *model_size = model_len;
    if(fp) {
        fclose(fp);
    }
    return model;
}

//载入yolov5模型, 并打印模型信息
static int init_rknn_model(){
    int ret;
    int model_data_size = 0;
    unsigned char *model_data = load_model(yolov5_model_path, &model_data_size);
  
    ret = rknn_init(&ctx, model_data, model_data_size, 0);
    if(ret != 0)
    {
        cout <<"rknn_inputs_set failed!" <<endl;
        return -1;
    }        
    
    //查询rknn output的值放入
    rknn_tensor_attr output_attrs[3];
    memset(output_attrs, 0, sizeof(output_attrs));
    for (int i = 0; i < 3; i++)
    {
        output_attrs[i].index = i;
        ret = rknn_query(ctx, RKNN_QUERY_OUTPUT_ATTR, &(output_attrs[i]),
                         sizeof(rknn_tensor_attr));
        printRKNNTensor(&(output_attrs[i]));
    }

    //记录后处理需要的scale 和 zero_point
    for (int i = 0; i < 3; ++i)
    {   
        out_scales.push_back(output_attrs[i].scale);
        out_zps.push_back(output_attrs[i].zp);
    }
    return ret;
}

// @brief 同步获取数据的队列量
// @param nums ：需要从队列中删去的数据量
static void sysn_queue(int nums){
    for(int i = 0; i < nums; i++)
    {
        //同步640队列 需判断该队列不为空
        while(queueReadImage_640.empty())
        {   
            usleep(1000);
        }
        //队列pop操作绑定为原子操作，保证其线程安全
        mtxQueueReadImage_640.lock();
        queueReadImage_640.pop();
        mtxQueueReadImage_640.unlock();
    }
}

// @brief 统计2s10张图片输入，对应各类行为特征的个数
// @param *p_cnt_sleepy p_cnt_distracted >=5 返回
// @param  *p_i 使用多少张图片进行检测
// @return 返回Mat格式图像数据 
static Mat calc_cnt(int *p_cnt_sleepy, int *p_cnt_distracted, int *p_i){
    int ret;
    Mat img;
    int total_objects = 0;
    int i = 0;
    for(; i< 10; i++)
    {
        vector<int> results(OBJ_CLASS_NUM + 4, 0);              //顺序是类别1\0，left，top，right，bottom 共 classes + 4个值
        img = select_img_from_640_queue(1);
        ret = detect(img, results, &total_objects);
        if(ret <= -1){
            cout << "后处理异常，退出！"<<endl;
            return img;
        }
        if(total_objects == 0){
            //无目标
            continue;
        }
        else{
            for(int id = 0; id < 4; id++)
            {
                //如果是闭眼和打哈欠目标
                if(results[id] == 1 && (id == 0 || id == 1))  
                {
                    *p_cnt_sleepy += 1;
                    break;
                }

                //如果是手机目标
                if(results[id] == 1 && (id == 2 || id == 3))
                {
                    *p_cnt_distracted += 1;
                    //img 和 检测框信息放入队列， 在人体关键点检测模型进一步检测 
                    mtxQueueOutput_368.lock();
                    queueOutput_368.push(make_pair(results ,img));
                    mtxQueueOutput_368.unlock();
                    break;
                }  
            }
        }

        if(*p_cnt_sleepy >= 5 || *p_cnt_distracted >= 5){
            *p_i = i + 1;
            cout << (results[OBJ_CLASS_NUM], results[OBJ_CLASS_NUM + 1]) << (results[OBJ_CLASS_NUM + 2], results[OBJ_CLASS_NUM + 3]) << endl;
            cv::rectangle(img, Point(results[OBJ_CLASS_NUM], results[OBJ_CLASS_NUM + 1]), Point(results[OBJ_CLASS_NUM + 2], results[OBJ_CLASS_NUM + 3]), Scalar(255,0,255),3,8,0);
            return img;
        }  
    }
    //证明用完10张了也没满足
    *p_i = 10;
    return img;
}

/*-------------------------------------------
                线程3:目标检测
-------------------------------------------*/
int object_detect(int cpu_id, int remain_task){
    struct timeval start_time, stop_time, start_time1, stop_time1;
    gettimeofday(&start_time, NULL);
    //线程对cpu亲和性设定   
    cpu_set_t mask;
    CPU_ZERO(&mask);
    CPU_SET(cpu_id, &mask);
    if (pthread_setaffinity_np(pthread_self(), sizeof(mask), &mask) < 0)
    {
        cerr << "set thread affinity failed" << endl;
        return -1;
    }
    printf("Bind object_detect thread to CPU %d\n", cpu_id);

    //先和人脸检测 队列数据保持同步
    int n = target_368;
    sysn_queue(n - remain_task);
    n = remain_task;
    printf("目标检测线程和人脸检测线程 数据已同步\n");    

    int ret = 0;
    ret = init_rknn_model();
    if(ret != 0)             return -1;
    init_rknn_inoutput();

    int status = NORMALDRING;
    int sleepy_start_time = 0, sleepy_during_time = 0;                      //疲劳开始和持续时间
    int distracted_start_time = 0, distracted_during_time = 0;              //分心开始和持续时间
    //主逻辑 
    while(n > 0)
    {
        int cnt_sleepy = 0;
        int cnt_distracted = 0;
        int i = 0;
        queueOutput_368 = queue<pair<vector<int>, Mat>>();
        switch (status)
        {
            case NORMALDRING:
            {
                int current_time = (target_368 - n) / 5;     //(96 - 80) / 5 = 5
                //如果还剩的待测量小于10，说明
                if(n < 10)
                {
                    i = 10 - n;
                    break;
                }

                Mat img = calc_cnt(&cnt_sleepy, &cnt_distracted, &i);
                //Todo:记录的时间 已经是行为刚开始的时间 
                //10张有5张说明已经疲劳驾驶
                if(cnt_sleepy >= 5){
                    //疲劳开始对应视频时间
                    sleepy_start_time = current_time;
                    status = SLEEPY;
                    printf("在第%d秒, 开始疲劳驾驶...\n", sleepy_start_time);
                    sleepy_during_time += 2;
                    //Todo:通知1号线程，进行图像数据的本地保存，1号线程一次只保存一张图片，其余时间阻塞
                    string s = "sleepy";
                    s += "_" + to_string(sleepy_start_time) + "_2";
                    mtxQueueSaveimg_368.lock();
                    queueSaveimg_368.push(make_pair(s, img));
                    mtxQueueSaveimg_368.unlock();
                    sem_post(&binSem_save);
                    break;
                }
                else if(cnt_distracted >= 5){
                    //启动pose_detect函数，进一步从输入的五张图中分析身体 - 物体有没有交互，如果判断有，返回0，否则返回-1
                    ret = pose_detect();
                    if(!ret){
                        status = DISTRACTED;
                        distracted_start_time = current_time;
                        printf("在第%d秒, 开始分心驾驶...\n", distracted_start_time);
                        distracted_during_time += 2;
                        //Todo:通知1号线程，进行图像数据的本地保存，1号线程一次只保存一张图片，其余时间阻塞
                        string s = "distracted";
                        s += "_" + to_string(distracted_start_time) + "_2";
                        mtxQueueSaveimg_368.lock();
                        queueSaveimg_368.push(make_pair(s, img));
                        mtxQueueSaveimg_368.unlock();
                        sem_post(&binSem_save);     
                        break;
                    }
                    //Todo:如果未通过 “手持物体检测” ...
                    
                }
                printf("在第%d秒, 检测结果是正常驾驶\n", current_time);
                break;
            }
            case SLEEPY:
            {
                Mat img = calc_cnt(&cnt_sleepy, &cnt_distracted, &i);
                if(cnt_sleepy <= 2)     {
                    status = NORMALDRING;
                    printf("在第%d秒, 开始的疲劳驾驶切换为正常驾驶, 一共持续%d秒\n", sleepy_start_time, sleepy_during_time);
                    sleepy_start_time = 0, sleepy_during_time = 0;
                    break;       
                }
                sleepy_during_time += 2;
                printf("在第%d秒, 开始的疲劳驾驶, 持续%d秒\n", sleepy_start_time, sleepy_during_time);
                //Todo:通知1号线程，进行图像数据的本地保存，1号线程一次只保存一张图片，其余时间阻塞
                string s = "sleepy";
                s += "_" + to_string(sleepy_start_time) + "_" + to_string(sleepy_during_time);
                mtxQueueSaveimg_368.lock();
                queueSaveimg_368.push(make_pair(s, img));
                mtxQueueSaveimg_368.unlock();
                sem_post(&binSem_save);   
                break; 
            }
            case DISTRACTED:
            {
                Mat img = calc_cnt(&cnt_sleepy, &cnt_distracted, &i);
                if(cnt_distracted <= 2) {
                    status = NORMALDRING;
                    printf("在第%d秒, 开始的分心驾驶切换为正常驾驶, 一共持续%d秒\n", distracted_start_time, distracted_during_time);
                    distracted_start_time = 0, distracted_during_time = 0;
                    break;
                }
                distracted_during_time += 2;
                printf("在第%d秒, 开始的分心驾驶, 持续%d秒\n", distracted_start_time, distracted_during_time);
                //Todo:通知1号线程，进行图像数据的本地保存，1号线程一次只保存一张图片，其余时间阻塞
                string s = "distracted";
                s += "_" + to_string(distracted_start_time) + "_" + to_string(distracted_during_time);
                mtxQueueSaveimg_368.lock();
                queueSaveimg_368.push(make_pair(s, img));
                mtxQueueSaveimg_368.unlock();
                sem_post(&binSem_save); 
                break;  
            }  
        }
        sem_post(&binSem_1);
        sysn_queue(10 - i);         //2号线程获取数据的队列没有跟上这里减的个数
        n = n - 10;         
    }
    gettimeofday(&stop_time, NULL);
    printf(" 线程3:目标检测总共用时： %f ms\n",(__get_us(stop_time) - __get_us(start_time)) / 1000);
    return ret;
}
