/*******************************************************************************
  Copyright (C), 1988-2020, zeno Tech. Co., Ltd.
  Description:   Analyse模块
  Function List:
  History:
    <wangmc -- 2021-04-11>  创建
*******************************************************************************/

/*=============================================================================
                                 Includes
 =============================================================================*/
#include "videoPriv.h"
#include "rockx.h"

/*=============================================================================
                               Macro Definition
 =============================================================================*/
#define ANALYSER_RATIO          8192    /* 算法比例因子 */
#define ANALYSER_MAX_POINT      20      /* 区域最大点数 */
#define ANALYSER_MAX_YUVSIZE    (500*500)   /* 最大抠图分辨率 */
#define ANALYSER_ALIGN_UP(i, n)          (((i) + (n) - 1) & ~((n) - 1))
#define ANALYSER_ALIGN_DOWN(i, n)        ((i) & ~(n - 1))

//#define ALG_COUNTTIME

#define ANALYSE_FACE_LICTION_AUTH_PATH "/mnt/mtd/ReadSense/content.auth"
#define ANALYSE_FACE_LICTION_PATH "/mnt/mtd/ReadSense/license.lic"

#define SCRALG_WIDTH  1280
#define SCRALG_HEIGHT 720

#define AF_ZONE_RADIO 15



/*=============================================================================
                        Local Structures and Typedef
 =============================================================================*/

/* 区域头信息结构体 */
typedef struct
{
    unsigned char   alarmType;      /* 区域告警类型，见定义IVS_regionType_t */
    unsigned char   pointNum;       /* 区域边界点数 */
    unsigned char   pointType;      /* 区域边界点类型 */
} ANALYSER_regionHead_t;


typedef struct
{
    struct list_head list;              /* 在算法列表中的结点 */
    void *hIvs;                         /* IVS算法句柄 */
    int algType;                        /* 算法类型，定义见IVS_algType_t */
    ANALYSER_ctl_t *pAnalyserCtl;       /* 视频分析控制结构体 */
	//ANALYSER_ivsFrameList_t ivsFrmlist; /* 帧缓存列表 */
    int frmNum;                         /* 帧缓存数 */
        
    IVS_region_t ivsRegion;                     /* 区域信息缓存 */
    IVS_monitorRegion_t	*pWarningRegions;       /* 告警区域 */
    IVS_monitorRegion_t	*pTrackedRegions;       /* 跟踪区域 */
    struct timespec regionTime;                 /* 区域信息获取时间 */
} ANALYSER_ivsObj_t;

//跟踪框外形坐标策略
typedef enum
{
    CONTOUR_ORIG = 0,
    CONTOUR_CONVERT
}ANALYSER_contourStra_t;


/*=============================================================================
                             Extern Variables
=============================================================================*/

/*=============================================================================
                             Local Variables
 =============================================================================*/
#ifdef BUILD_IVS

static int ivsInit_s;
static ANALYSER_ctl_t *pAnalyserCtl_s;

static const char* ROCKX_MODEL_DATA_PATH =  "/user/alg/models/ROCKX"; //ROCKX算法模型路径

/*=============================================================================
                             Function Definition
 =============================================================================*/
/*******************************************************************************
  Function:     ANALYSER_AddRef
  Description:  增加视频分析接口引用计数
  Input:        - pThiz: 视频检测接口
  Output:       无
  Return:       0: 操作成功
                -1: 操作失败
  Others:       无
*******************************************************************************/
static int ANALYSER_AddRef(VideoAnalyser *pThiz)
{
    ANALYSER_ctl_t *pAnalyserCtl = NULL;

    if(!pThiz)
    {
        LOG_PrintError("Input paramenter is NULL\n");
        return -1;
    }

    pAnalyserCtl = (ANALYSER_ctl_t *)pThiz->priv;

    pthread_mutex_lock(&pAnalyserCtl->mutex);

    pAnalyserCtl->refCount++;

    pthread_mutex_unlock(&pAnalyserCtl->mutex);

    return 0;
}

/*******************************************************************************
  Function:     ANALYSER_Release
  Description:  释放接口引用
  Input:        - pThiz: 视频检测接口
  Output:       无
  Return:       0: 操作成功
                -1: 操作失败
  Others:       无
*******************************************************************************/
static int ANALYSER_Release(VideoAnalyser *pThiz)
{
    int ret = 0, result;
    ANALYSER_ctl_t *pAnalyserCtl = NULL;

    if(!pThiz)
    {
        LOG_PrintError("Input paramenter is NULL\n");
        return -1;
    }

    pAnalyserCtl = (ANALYSER_ctl_t *)pThiz->priv;

    pthread_mutex_lock(&pAnalyserCtl->mutex);

    pAnalyserCtl->refCount--;
    if(pAnalyserCtl->refCount)
    {
        goto unlock_mutex;
    }

    free(pAnalyserCtl);
    pAnalyserCtl_s = NULL;

unlock_mutex:
    pthread_mutex_unlock(&pAnalyserCtl->mutex);
    return ret;
}

/*******************************************************************************
  Function:     ANALYSER_GetCaps
  Description:  得到智能分析支持的特性
  Input:        - pThiz: 视频分析接口
  Output:       - VideoAnalyseCaps: 视频分析能力值的指针
  Return:       0: 操作成功
                -1: 操作失败
  Others:       无
*******************************************************************************/
static int ANALYSER_GetCaps(struct VideoAnalyser *pThiz, VideoAnalyseCaps *pCaps)
{
    return 0;
}

/*******************************************************************************
  Function:     ANALYSER_GetPacket
  Description:  获取视频分析结果，只包含物体的基本信息
  Input:        - pThiz: 视频分析接口
  Output:       - pPacket : 视频分析结果videoAnalysePacket指针
  Return:       0: 操作成功
                -1: 操作失败
                -2:无报警事件
  Others:       无
*******************************************************************************/
static int ANALYSER_GetPacket(struct VideoAnalyser *pThiz,
                                   VideoAnalysePacket *pPacket)
{
    return 0;
}

/*******************************************************************************
 Function:     ANALYSER_GettRegion
 Description:  获取视频分析结果，只包含物体的基本信息
 Input:        - pThiz: 视频分析接口
 Output:       - pPacket : 视频分析结果videoAnalysePacket指针
 Return:       0: 操作成功
               -1: 操作失败
               -2:无报警事件
 Others:       无
*******************************************************************************/
static int ANALYSER_GettRegion(ANALYSER_ctl_t *pAnalyserCtl,
                                   VideoObjectPacket *pPacket,
                                   struct timespec *pCurTimeVal,
                                   ANALYSER_contourStra_t straType)
{
   int i = 0;
   VideoObject* pObjects;
   int algW, algH;
   IVS_monitorRegion_t *pMonitorRegions;
   ANALYSER_regionInfo_t *pIvsRegion;
   long timedif = 0;
       
   pIvsRegion = &pAnalyserCtl->flushRegion;

   algW = pAnalyserCtl->algSize.u32Width;
   algH = pAnalyserCtl->algSize.u32Height;
   
   /* 跟踪框延时消失 */
   if (pIvsRegion->trackedNum || pIvsRegion->warningNum)
   {
       timedif = (pCurTimeVal->tv_sec - pAnalyserCtl->regionTime.tv_sec)*1000
                  +(pCurTimeVal->tv_nsec - pAnalyserCtl->regionTime.tv_nsec)/1000000;
       
       if (labs(timedif) > 500)
       {
           return 0;
       }
   }
   else
   {
       return 0;
   }
           
   for(i = 0; i < pIvsRegion->trackedNum; i++)
   {
       if(pPacket->number >= ANALYSER_OBJ_NUM)
       {
           break;
       }
   
       pMonitorRegions = &pIvsRegion->tracked[i];
       pObjects = &pPacket->objects[pPacket->number];
   
       if(pMonitorRegions->algType == IVS_TYPE_FACE)
       {
           pObjects->type = videoAnalyseHumanFace;

           pObjects->extra.face.frontalFaceFlag =
               pMonitorRegions->faceEvent.frontalFaceFlag;
           pObjects->extra.face.confidenceFlag = 0;
       }
       else if (pMonitorRegions->algType == IVS_TYPE_HDT)
       {
           pObjects->type = videoAnalyseHumanHead;
       }
       else if (pMonitorRegions->algType == IVS_TYPE_HUMAN)
       {
           pObjects->type = videoAnalyseHuman;
       }
       else
       {
           pObjects->type = videoAnalyseUnknown;
       }
       pObjects->id = pMonitorRegions->id;
   
       pObjects->ruleType = 255;

       switch(straType)
       {
           case CONTOUR_ORIG:
           {
               pObjects->contour.left = pMonitorRegions->points[0].x;
               pObjects->contour.top =   pMonitorRegions->points[0].y;
               pObjects->contour.right = pMonitorRegions->points[2].x;
               pObjects->contour.bottom = pMonitorRegions->points[2].y;

               pPacket->number++;
               break;
           }
           case CONTOUR_CONVERT:
           {
               if( (0 == pAnalyserCtl->iCurSensorIdforDetectResult) && (1 == pAnalyserCtl->iIsCrop) )
               {
                    int iCropStartX = pAnalyserCtl->stCropRect.x * algW / pAnalyserCtl->stSrcRect.w;
                    int iCropStartY = pAnalyserCtl->stCropRect.y * algH / pAnalyserCtl->stSrcRect.h;
                    int iCropWid    = pAnalyserCtl->stCropRect.w * algW / pAnalyserCtl->stSrcRect.w;
                    int iCropHei    = pAnalyserCtl->stCropRect.h * algH / pAnalyserCtl->stSrcRect.h;

                    int iLeft   = pMonitorRegions->points[0].x - iCropStartX;
                    int iTop    = pMonitorRegions->points[0].y - iCropStartY;
                    int iRight  = pMonitorRegions->points[2].x - iCropStartX;
                    int iBottom = pMonitorRegions->points[2].y - iCropStartY;

                    if(iLeft < 0)
                    {
                        iLeft = 0;
                    }
                    if(iTop < 0)
                    {
                        iTop = 0;
                    }
                    if(iRight > iCropWid)
                    {
                        iRight = iCropWid;
                    }
                    if(iBottom > iCropHei)
                    {
                        iBottom = iCropHei;
                    }


                    pObjects->contour.left   = (float)1.0 * iLeft   * ANALYSER_RATIO / iCropWid;
                    pObjects->contour.top    = (float)1.0 * iTop    * ANALYSER_RATIO / iCropHei;
                    pObjects->contour.right  = (float)1.0 * iRight  * ANALYSER_RATIO / iCropWid;
                    pObjects->contour.bottom = (float)1.0 * iBottom * ANALYSER_RATIO / iCropHei;

#if 0
                    LOG_PrintInfo("sensor-%d-index:%d src2k-crop-(%d,%d)-(%d*%d)  --> 720p-crop-(%d,%d)-(%d*%d)\n",
                                                                            pAnalyserCtl->iCurSensorIdforDetectResult,i,
                                                                            pAnalyserCtl->stCropRect.x,
                                                                            pAnalyserCtl->stCropRect.y,
                                                                            pAnalyserCtl->stCropRect.w,
                                                                            pAnalyserCtl->stCropRect.h,
                                                                            iCropStartX,
                                                                            iCropStartY,
                                                                            iCropWid,
                                                                            iCropHei);
                    LOG_PrintInfo("sensor-%d-index:%d src-detect-(%d,%d)-(%d*%d) --> small-detect-(%d,%d)-(%d*%d) --> dst-detect-(%d,%d)-(%d*%d)\n",
                                                                            pAnalyserCtl->iCurSensorIdforDetectResult,i,
                                                                            pMonitorRegions->points[0].x,
                                                                            pMonitorRegions->points[0].y,
                                                                            (pMonitorRegions->points[2].x - pMonitorRegions->points[0].x),
                                                                            (pMonitorRegions->points[2].y - pMonitorRegions->points[0].y),
                                                                            iLeft,iTop,
                                                                            (iRight  - iLeft),
                                                                            (iBottom - iTop),
                                                                            pObjects->contour.left,
                                                                            pObjects->contour.top,
                                                                            (pObjects->contour.right  - pObjects->contour.left),
                                                                            (pObjects->contour.bottom - pObjects->contour.top));

#endif

               }
               else
               {
                    pObjects->contour.left   = pMonitorRegions->points[0].x*ANALYSER_RATIO/algW;
                    pObjects->contour.top    = pMonitorRegions->points[0].y*ANALYSER_RATIO/algH;
                    pObjects->contour.right  = pMonitorRegions->points[2].x*ANALYSER_RATIO/algW;
                    pObjects->contour.bottom = pMonitorRegions->points[2].y*ANALYSER_RATIO/algH;
               }

               if(pObjects->contour.left < 0)
               {
                    pObjects->contour.left = 0;
               }
               if(pObjects->contour.top < 0)
               {
                    pObjects->contour.top = 0;
               }
               if(pObjects->contour.right > ANALYSER_RATIO)
               {
                    pObjects->contour.right = ANALYSER_RATIO;
               }
               if(pObjects->contour.bottom > ANALYSER_RATIO)
               {
                    pObjects->contour.bottom = ANALYSER_RATIO;
               }
 
               if(pObjects->contour.left >= pObjects->contour.right)
               {
                    pObjects->contour.left = pObjects->contour.right;
               }
               if(pObjects->contour.top >= pObjects->contour.bottom)
               {
                    pObjects->contour.top = pObjects->contour.bottom;
               }

               pPacket->number++;
               break;
           }          
           default:
               break;
       }   
       
   }

   // TODO: 一次可能获取 
   pPacket->pts = pIvsRegion->pts/1000 + timedif;

   return 0;
}

/*******************************************************************************
Function:     ANALYSER_GetAlgCropObj
Description:  获取算法人脸区域信息
Input:        无
Output:       - faceBlk : 人脸区域数据
              -num: 人脸个数
Return:       0: 操作成功
              -1: 操作失败
Others:       无
*******************************************************************************/
int ANALYSER_GetAlgCropObj(ENC_cropBlk_t *faceBlk, int *num)
{
    int ret = -1;
    
    if(!faceBlk || !num)
    {
       LOG_PrintError("ANALYSER_GetAlgCrop: faceBlk or num is NULL\n");
       return -1;
    }

    int result = 0;
    struct timespec curTimeVal;

    if(!pAnalyserCtl_s)
    {
        LOG_PrintError("ANALYSER_GetAlgCrop: pAnalyserCtl_s is NULL\n");
        return -1;
    }

    if(pAnalyserCtl_s->sensorId != 0)//算法运行在非变焦镜头，优先中间聚焦
    {
    	return -1;
    }

    pthread_mutex_lock(&pAnalyserCtl_s->mutex);

    VideoObjectPacket pPacket;
    memset(&pPacket, 0, sizeof(pPacket));
    
    pPacket.number = 0;
    pPacket.objects = pAnalyserCtl_s->oriObj;

    clock_gettime(CLOCK_MONOTONIC, &curTimeVal);

    if (pAnalyserCtl_s->ivsRegion.trackedNum > 0) //考虑顺时移动情况，trackedNum为0时也要处理
    {
        memcpy(&pAnalyserCtl_s->flushRegion, &pAnalyserCtl_s->ivsRegion, sizeof(ANALYSER_regionInfo_t));
        pAnalyserCtl_s->regionTime = curTimeVal;
    }  

    ANALYSER_GettRegion(pAnalyserCtl_s, &pPacket, &curTimeVal, CONTOUR_ORIG);
    if (pPacket.number > 0)
    {
        ret = 0;        
        Rect wireframe = {0};
        unsigned int uintRow = pAnalyserCtl_s->algSize.u32Width/AF_ZONE_RADIO;     //1280/15=85
        unsigned int uintColumn = pAnalyserCtl_s->algSize.u32Height/AF_ZONE_RADIO; //720/15=48
        int faceHeight = 0;
        int index = 0;

        for(int i = 0; i < pPacket.number; ++i)
        {
            if(pPacket.objects[i].type != videoAnalyseHumanFace) //过滤人脸检测数据
		    {
                continue;
		    }
		    
            wireframe = pPacket.objects[i].contour;

            faceBlk[index].xBlk = wireframe.left/uintRow;
                
            faceBlk[index].yBlk = wireframe.top/uintColumn;
  
            faceBlk[index].wBlk = wireframe.right/uintRow + 1;

            faceBlk[index].hBlk = wireframe.bottom /uintColumn;

            if(faceBlk[index].wBlk > AF_ZONE_RADIO)
            {
                faceBlk[index].wBlk = AF_ZONE_RADIO;
            }
            if(faceBlk[index].hBlk > AF_ZONE_RADIO)
            {
                faceBlk[index].hBlk = AF_ZONE_RADIO;
            }

            if((faceBlk[index].xBlk == faceBlk[index].wBlk) || (faceBlk[index].yBlk == faceBlk[index].hBlk))
            {
                continue; //排除区域是线的情况
            }

            //printf("GetAlgCropObj:face[%d]-->xBlk=%d, yBlk=%d, wBlk=%d, hBlk=%d \n",
                    //index + 1,faceBlk[index].xBlk, faceBlk[index].yBlk, faceBlk[index].wBlk, faceBlk[index].hBlk);

            index++; //下标更新
        }

        *num = index; //计数更新
    }

    pthread_mutex_unlock(&pAnalyserCtl_s->mutex);

    return ret;
}

/*******************************************************************************
  Function:	    ANALYSER_GetVideoObj
  Description:  获取元数据的物体信息
  Input:		- pThiz: 视频分析接口
  Output: 	    - pPacket : 源数据分析包
  Return: 	    0: 操作成功
			    -1: 操作失败
  Others: 	    无
*******************************************************************************/
static int ANALYSER_GetVideoObj(struct VideoAnalyser *pThiz,
									  VideoObjectPacket *pPacket)
{
    int ret = -1;
    int result;
    ANALYSER_ctl_t *pAnalyserCtl = NULL;
    struct timespec curTimeVal;

    if(!pThiz)
    {
       LOG_PrintError("Input paramenter is NULL\n");
       return -1;
    }

    pAnalyserCtl = (ANALYSER_ctl_t *)pThiz->priv;
    pthread_mutex_lock(&pAnalyserCtl->mutex);

    pPacket->number = 0;
    pPacket->objects = pAnalyserCtl->oriObj;

    clock_gettime(CLOCK_MONOTONIC, &curTimeVal);

    if (pAnalyserCtl->ivsRegion.trackedNum > 0)
    {
        memcpy(&pAnalyserCtl->flushRegion, &pAnalyserCtl->ivsRegion, sizeof(ANALYSER_regionInfo_t));
        pAnalyserCtl->regionTime = curTimeVal;
    }

    ANALYSER_GettRegion(pAnalyserCtl, pPacket, &curTimeVal, CONTOUR_CONVERT);
    if (pPacket->number > 0)
    {
       ret = 0;
    }

    pthread_mutex_unlock(&pAnalyserCtl->mutex);
    return ret;
}


/*******************************************************************************
  Function:     ANALYSER_ReleasePacket
  Description:  释放视频分析数据资源
  Input:        - pThiz: 视频分析接口
  Output:       无
  Return:       0: 操作成功
                -1: 操作失败
  Others:       无
*******************************************************************************/
static int ANALYSER_ReleasePacket(struct VideoAnalyser *pThiz)
{
    int ret = 0;
    if(!pThiz)
    {
        LOG_PrintError("Input paramenter is NULL\n");
        return -1;
    }
    return ret;
}

/*******************************************************************************
  Function:     ANALYSER_Start
  Description:  开始分析
  Input:        - pThiz: 视频分析接口
  Output:       无
  Return:       0: 操作成功
                -1: 操作失败
  Others:       无
*******************************************************************************/
static int ANALYSER_Start(struct VideoAnalyser *pThiz, int streamId)
{
    int result, ret = 0;
    ANALYSER_ctl_t *pAnalyserCtl = NULL;

    if(!pThiz)
    {
        LOG_PrintError("Input paramenter is NULL\n");
        return -1;
    }

    pAnalyserCtl = (ANALYSER_ctl_t *)pThiz->priv;

    pthread_mutex_lock(&pAnalyserCtl->mutex);

    if (pAnalyserCtl->state != ANALYSER_STOP)
    {
    	LOG_PrintError("alg state is not stop!\n");
        ret = -1;
        goto mutex_unlock;
    } 

    pAnalyserCtl->viPipe = streamId;

    LOG_PrintWarn("streamId:%d\n", streamId);

    result = VI_RkEnableChn(pAnalyserCtl->viPipe, pAnalyserCtl->viChn, &pAnalyserCtl->viChnAttr);
    if (result)
    {
        LOG_PrintError("Fail to create encode channel!\n");
        ret = -1;
        goto mutex_unlock;
    }  

    result = RK_MPI_VI_StartStream(pAnalyserCtl->viPipe, pAnalyserCtl->viChn);
    if (result)
    {
        LOG_PrintError("Fail to RK_MPI_VI_StartStream chn(%d) with ret=%d\n",
                        pAnalyserCtl->viChn, result);
        ret = -1;
        goto mutex_unlock;
    }

    pAnalyserCtl->state = ANALYSER_START;

mutex_unlock:
    pthread_mutex_unlock(&pAnalyserCtl->mutex);
    
    return ret;
}

/*******************************************************************************
  Function:     ANALYSER_Stop
  Description:  停止分析
  Input:        - pThiz: 视频分析接口
  Output:       无
  Return:       0: 操作成功
                -1: 操作失败
  Others:       无
*******************************************************************************/
static int ANALYSER_Stop(struct VideoAnalyser *pThiz)
{
    int result, ret = 0;
    ANALYSER_ctl_t *pAnalyserCtl = NULL;

    if(!pThiz)
    {
        LOG_PrintError("Input paramenter is NULL\n");
        return -1;
    }

    pAnalyserCtl = (ANALYSER_ctl_t *)pThiz->priv;

   	pthread_mutex_lock(&pAnalyserCtl->mutex);

   	if (pAnalyserCtl->state != ANALYSER_START)
    {
    	LOG_PrintError("alg state is not start!\n");
        ret = -1;
        goto mutex_unlock;
    }

    result = VI_RkDisableChn(pAnalyserCtl->viPipe, pAnalyserCtl->viChn);
    if (result)
    {
        LOG_PrintError("Fail to disable vi channel!\n");
        ret = -1;
        goto mutex_unlock;
    }

    pAnalyserCtl->state = ANALYSER_STOP;

mutex_unlock:
    pthread_mutex_unlock(&pAnalyserCtl->mutex);
    return ret;
}


/*******************************************************************************
  Function:     ANALYSER_SetFormat
  Description:  设置视频格式配置
  Input:        - pThiz: 视频分析接口
                - pFormat: 视频格式参数
  Output:       无
  Return:       0: 操作成功
                -1: 操作失败
  Others:       无
*******************************************************************************/
static int ANALYSER_SetFormat(struct VideoAnalyser *pThiz,
                              VideoAnalyseFormat *pFormat)
{
    return 0;
}


/*******************************************************************************
  Function:     ANALYSER_SetCompModuleConfig
  Description:  配置比对算法配置参数
  Input:        - pAnalyserCtl: 视频分析控制结构体
                - pModuleConfig: 视频分析模块配置结构VideoAnalyseModuleConfig指针
  Output:       无
  Return:       0: 操作成功
                -1: 操作失败
  Others:       无
*******************************************************************************/
static void ANALYSER_SetCompModuleConfig(ANALYSER_ctl_t *pAnalyserCtl,
               									 VideoAnalyseModuleConfig *pModuleConfig,
               									 VideoAnalyseRuleConfig *pRuleConfig,
               									 int ruleNum)
{
	return;
}


/*******************************************************************************
  Function:     ANALYSER_SetRuleConfig
  Description:  设置视频分析规则配置
  Input:        - pThiz: 视频分析接口
                - count: 规则数目，每个分析器可以设置多个规则,最大20个
                - pRuleConfig: 视频分析规则配置结构VideoAnalyseRuleConfig指针
  Output:       无
  Return:       0: 操作成功
                -1: 操作失败
  Others:       无
*******************************************************************************/
static int ANALYSER_SetRuleConfig(struct VideoAnalyser *pThiz,
                                  	    int count,
                                  		VideoAnalyseRuleConfig *pRuleConfig)
{
 return 0;
}


/*******************************************************************************
  Function:     ANALYSER_SetModuleConfig
  Description:  设置视频分析模块配置
  Input:        - pThiz: 视频分析接口
                - count: 模块数目，每个分析器可以设置多个模块
                - pModuleConfig: 视频分析模块配置结构VideoAnalyseModuleConfig指针
  Output:       无
  Return:       0: 操作成功
                -1: 操作失败
  Others:       无
*******************************************************************************/
static int ANALYSER_SetModuleConfig(struct VideoAnalyser *pThiz,
                                    int count,
                                    VideoAnalyseModuleConfig *pModuleConfig)
{
    return 0;
}

/*******************************************************************************
  Function:     ANALYSER_SetModuleState
  Description:  设置模块状态
  Input:        - pThiz: 视频分析接口
                - index: 模块编号
                - state: 0-正常工作,1-暂停
  Output:       无
  Return:       0: 操作成功
                -1: 操作失败
  Others:       无
*******************************************************************************/
static int ANALYSER_SetModuleState(struct VideoAnalyser *pThiz,
                                   int index, int state)
{
    return 0;
}

/*******************************************************************************
  Function:     ANALYSER_SetSnapOption
  Description:  配置抓图属性
  Input:        - pThiz: 视频分析接口
                - pSnapOption: 强制抓图属性
  Output:       无
  Return:       0: 操作成功
                -1: 操作失败
  Others:       无
*******************************************************************************/
static int ANALYSER_SetSnapOption(struct VideoAnalyser *pThiz,
                                  const VideoAnalyseSnapOption *pSnapOption)
{
   return 0;
}


/*******************************************************************************
  Function:     ANALYSER_GetVersion
  Description:  获取版本信息
  Input:        - pThiz: 视频分析接口
                - pVersion: 存放版本信息的指针
  Output:       无
  Return:       0: 操作成功
                -1: 操作失败
  Others:       无
*******************************************************************************/
static int ANALYSER_GetVersion(struct VideoAnalyser *pThiz,
                               struct Version *pVersion)
{
    return -1;
}

/*******************************************************************************
  Function:     ANALYSER_GetVersions
  Description:  获取算法各个模块版本信息
  Input:        - pThiz: 视频分析接口
                - pVersion: 存放版本信息的指针
                - pCount: 输入最大的版本个数，返回实际的版本个数
  Output:       无
  Return:       0: 操作成功
                -1: 操作失败
  Others:       无
*******************************************************************************/
static int ANALYSER_GetVersions(struct VideoAnalyser *pThiz,
                                struct Version *pVersion,
                                int *pCount)
{
    return 0;
}

/*******************************************************************************
  Function:     ANALYSER_SetTrafficWorkMode
  Description:  设置智能交通车牌检测模式
  Input:        - pThiz: 视频分析接口
                - mode: 车牌检测模式 静态、动态模式及混合模式
  Output:       无
  Return:       0: 操作成功
                -1: 操作失败
  Others:       无
*******************************************************************************/
static int ANALYSER_SetTrafficWorkMode(struct VideoAnalyser *pThiz,
                                       VideoAnalysePlateRecognizeMode mode)
{
    return 0;
}

/*******************************************************************************
  Function:     ANALYSER_ForceVideoBreaking
  Description:  强制抓拍产生违章
  Input:        - pThiz: 视频分析接口
                - pBreaking: 强制抓拍结构体指针
  Output:       无
  Return:       0: 操作成功
                -1: 操作失败
  Others:       无
*******************************************************************************/
static int ANALYSER_ForceVideoBreaking(struct VideoAnalyser *pThiz,
                                       const VideoForceBreaking *pBreaking)
{
    return 0;
}

/*******************************************************************************
  Function:	   ANALYSER_GetDelay
  Description: 算法结果返回数据帧跟视频帧延迟时间
  Input: 	   - pThiz: 视频分析接口
  			   - pTime: 时间，单位：ms
  Output:	   无
  Return:	   0: 操作成功
			   -1: 操作失败
  Others:	   无
*******************************************************************************/
static int ANALYSER_GetDelay(struct VideoAnalyser *pThiz, unsigned int *pTime)
{
    return 0;
}

static int ANALYSER_SelectSensor(struct VideoAnalyser *pThiz, int sensorId)
{
    ANALYSER_ctl_t *pAnalyserCtl = NULL;

    if(!pThiz)
    {
        LOG_PrintError("Input paramenter is NULL\n");
        return -1;
    }

    pAnalyserCtl = (ANALYSER_ctl_t *)pThiz->priv;

   	pthread_mutex_lock(&pAnalyserCtl->mutex);

    pAnalyserCtl->sensorId = sensorId;

    pthread_mutex_unlock(&pAnalyserCtl->mutex);
    return 0;
}

static int ANALYSER_GetSensorId(struct VideoAnalyser *pThiz, int* sensorId)
{
    ANALYSER_ctl_t *pAnalyserCtl = NULL;

    if(!pThiz)
    {
        LOG_PrintError("Input paramenter is NULL\n");
        return -1;
    }

    pAnalyserCtl = (ANALYSER_ctl_t *)pThiz->priv;

   	pthread_mutex_lock(&pAnalyserCtl->mutex);

    *sensorId = pAnalyserCtl->sensorId;

    pthread_mutex_unlock(&pAnalyserCtl->mutex);
    return 0;
}

static int ANALYSER_SetUpgradeFlg(struct VideoAnalyser *pThiz, int flag)
{
    ANALYSER_ctl_t *pAnalyserCtl = NULL;

    if(!pThiz)
    {
        LOG_PrintError("Input paramenter is NULL\n");
        return -1;
    }

    pAnalyserCtl = (ANALYSER_ctl_t *)pThiz->priv;

   	pthread_mutex_lock(&pAnalyserCtl->mutex);

    pAnalyserCtl->upgradeFlg = flag;

    pthread_mutex_unlock(&pAnalyserCtl->mutex);
    return 0;
}
/*******************************************************************************
  Function:     selectVideoAnalyserMessage
  Description:  用于获取智能分析的消息，
                （注意：此函数会阻塞，直到获得消息或超时到才返回）
  Input:        - timeout: 超时返回时间，超时时间到，即使没有消息，函数将返回,
                           单位：ms; -1 表示无限时长;
                - pCount:  表示的是，message结构体的内存大小个数；
  Output:       - pMessage: 视频分析消息结构体，指向内存由调用者管理.
                - pCount: 表示返回的消息数量。
  Return:       0: 操作成功
                -1: 操作失败
  Others:       无
*******************************************************************************/
int PAL_API selectVideoAnalyserMessage(int timeout,
                                       VideoAnalyserMessage *pMessage,
                                       int *pCount)
{
    return 0;
}

/*******************************************************************************
  Function:     getVideoAnalyseChannels
  Description:  得到视频分析最大通道的个数，暂时为1
  Input:        无
  Output:       无
  Return:       >=0: 返回的个数
                -1: 操作失败
  Others:       无
*******************************************************************************/
int getVideoAnalyseChannels(void)
{
    return 1;
}

static void *ANALYSER_AlgThd(void *parg)
{
    int ret, i;
    MEDIA_BUFFER srcMb = NULL, dstMb;
    MEDIA_BUFFER secSrcMb = NULL;
    MEDIA_BUFFER tmpMb;
    rga_buffer_t rgaSrc;
    ANALYSER_ctl_t *pAnalyserCtl = (ANALYSER_ctl_t *)parg;
    RSImage rsImage;
    RSFaceDetectResult faceResult = {0};
    RSBodyDetectResult bodyResult = {0};
    RSBodyTrackResult bodyTrackResult = {0};
    RSFaceTrackResult faceTrackResult = {0};
    
    MB_IMAGE_INFO_S mbInfo;
    IM_STATUS status;
    float srcFps = 30.0;
    float dstFps = 7.0;
    float fpsRatio;
    float calcVal;
    float residual = 0.0;
    int frameCnt = 0;
    IVS_monitorRegion_t *pRegion;
    int count = 0;
    unsigned int curTime;
    struct timespec tmpTime;
    int srcResizeW = 0;
    int srcResizeH = 0;
    im_rect imCropRect = {0};
    rga_buffer_t rgaCrop;
    rga_buffer_t *pSrcBuf;
    im_rect dstRect = {0};
    int sync = 1;
    double fxConvert = 0.0;
    double fyConvert = 0.0;
    int midX, midY;

    int lastSensorId = 0;

    fpsRatio = dstFps/srcFps;

    VI_ctl_t *pViCtl = videoCtl_g.pViCtl[0];
    if (!pViCtl)
    {
        LOG_PrintError("pViCtl is NULL\n");
        return NULL;
    }

    VIDEO_picSize_t picSize = {0};
    VIDEO_GetPicSize(videoCtl_g.standard, 
                 videoCtl_g.resolution, 
                 &picSize,
                 videoCtl_g.rotate); //获取机芯最大图像分辨率——4k

    while (1)
    {
        /*if(pAnalyserCtl->upgradeFlg == 1) //升级时释放内存，停止检测
        {
            if (cropMb)
            {
                RK_MPI_MB_ReleaseBuffer(cropMb);
                cropMb = NULL;
            }
            
            rsUninitFaceDetection(pAnalyserCtl->rsHandle, pAnalyserCtl->faceDetHandle);
            rsUninitBodyDetection(pAnalyserCtl->rsHandle, pAnalyserCtl->bodyDetHandle);
            rsUninitAlgoSDK(&pAnalyserCtl->rsHandle);

            printf("[%s][%d] [%s]\n",__FILE__,__LINE__,__FUNCTION__);   
            break;
        }*/
        
        srcMb = RK_MPI_SYS_GetMediaBuffer(RK_ID_VI, pAnalyserCtl->viChn, -1);
        if (!srcMb) 
        {
            LOG_PrintError("Fail to RK_MPI_SYS_GetMediaBuffer!\n");
            continue;
        }

        secSrcMb = RK_MPI_SYS_GetMediaBuffer(RK_ID_VI, pAnalyserCtl->viSecChn, -1);
        if (!secSrcMb) 
        {
            LOG_PrintError("Fail to RK_MPI_SYS_GetMediaBuffer!\n");
            continue;
        }

        if (videoCtl_g.pEncCtls[0])
        {
            if (videoCtl_g.pEncCtls[0]->encPicSize.u32Width >= 3840)
            {
                goto release_buffer;
            }
        }

        frameCnt++;
        calcVal = fpsRatio*frameCnt + residual;
        if (calcVal < 1.0)
        {
            goto release_buffer;
        }
        else
        {
            residual = calcVal - 1.0;
            frameCnt = 0;
        }

        //LOG_PrintWarn("##########,pAnalyserCtl->sensorId =%d\n",pAnalyserCtl->sensorId);

        if (pAnalyserCtl->sensorId == 0)
        {
            tmpMb = srcMb;            
        }
        else if (pAnalyserCtl->sensorId == 1)
        {
            tmpMb = secSrcMb;  
        }

        if(lastSensorId != pAnalyserCtl->sensorId)
        {
            lastSensorId = pAnalyserCtl->sensorId;
            
            ret = rsClearTrackingState(pAnalyserCtl->rsHandle);
            if (ret)
            {
                LOG_PrintError("Fail to rsClearTrackingState!\n");
                goto release_buffer;
            }

            LOG_PrintInfo("success to rsClearTrackingState!\n");
        }

        RK_MPI_MB_GetImageInfo(tmpMb, &mbInfo);

        if ((mbInfo.u32Width != pAnalyserCtl->algSize.u32Width)
            || (mbInfo.u32Height != pAnalyserCtl->algSize.u32Height))
        {
            LOG_PrintError("Src frame width(%d) height(%d) is invalid!\n", 
                            mbInfo.u32Width, mbInfo.u32Height);
            goto release_buffer;
        }

        dstMb = tmpMb;

        rsImage.width = mbInfo.u32Width;
        rsImage.height = mbInfo.u32Height;
        rsImage.format = RS_IMG_FORMAT_NV12;
        rsImage.data = (unsigned char*)RK_MPI_MB_GetPtr(dstMb);

#if 1
#ifdef ALG_COUNTTIME
        clock_gettime(CLOCK_MONOTONIC, &tmpTime);
        curTime = tmpTime.tv_sec*1000 + tmpTime.tv_nsec / (1000*1000);
#endif

        /*ret = rsRunBodyAndFaceDetection(pAnalyserCtl->rsHandle,
                                        &rsImage,
                                        &bodyResult,
                                        &faceResult);  */

        ret = rsRunBodyAndFaceTracking(pAnalyserCtl->rsHandle,
                                        &rsImage,
                                        &bodyTrackResult,
                                        &faceTrackResult);                        
        if (ret)
        {
            LOG_PrintError("Fail to rsRunBodyAndFaceTracking!\n");
            goto release_buffer;
        }
        if( faceTrackResult.faceNum > 0 || bodyTrackResult.bodyNum > 0)
        {
            //LOG_PrintInfo("Run faceNum----->> %d, bodyNum----->> %d\n", faceTrackResult.faceNum, bodyTrackResult.bodyNum);
        }

#ifdef ALG_COUNTTIME
        clock_gettime(CLOCK_MONOTONIC, &tmpTime);
    	curTime = (tmpTime.tv_sec*1000 + tmpTime.tv_nsec / (1000*1000)) - curTime;
    	LOG_PrintInfo("Run Face Time ----->> %dms\n", curTime);
#endif
#else
    #ifdef ALG_COUNTTIME
        clock_gettime(CLOCK_MONOTONIC, &tmpTime);
        curTime = tmpTime.tv_sec*1000 + tmpTime.tv_nsec / (1000*1000);
#endif

        ret = rsRunFaceDetection(pAnalyserCtl->faceDetHandle, &rsImage, &faceResult);
        if (ret)
        {
            LOG_PrintError("Fail to rsRunFaceDetection!\n");
            goto release_buffer;
        }

#ifdef ALG_COUNTTIME
        clock_gettime(CLOCK_MONOTONIC, &tmpTime);
    	curTime = (tmpTime.tv_sec*1000 + tmpTime.tv_nsec / (1000*1000)) - curTime;
    	LOG_PrintInfo("Run Face Time ----->> %dms\n", curTime);
#endif

#ifdef ALG_COUNTTIME
        clock_gettime(CLOCK_MONOTONIC, &tmpTime);
        curTime = tmpTime.tv_sec*1000 + tmpTime.tv_nsec / (1000*1000);
#endif

        ret = rsRunBodyDetection(pAnalyserCtl->bodyDetHandle, &rsImage, &bodyResult);
        if (ret)
        {
            LOG_PrintError("Fail to rsRunFaceDetection!\n");
            goto release_buffer;
        }

#ifdef ALG_COUNTTIME
        clock_gettime(CLOCK_MONOTONIC, &tmpTime);
        curTime = (tmpTime.tv_sec*1000 + tmpTime.tv_nsec / (1000*1000)) - curTime;
        LOG_PrintInfo("Run Body Time ----->> %dms\n", curTime);
#endif

#endif
        count = 0;

        pthread_mutex_lock(&pAnalyserCtl->mutex);

        if (faceResult.faceNum > ANALYSER_TRACE_NUM)
        {
            faceResult.faceNum = ANALYSER_TRACE_NUM;
        }

        Rect_ext* pstSrcRect = &(pAnalyserCtl->stSrcRect);
        pstSrcRect->x = 0;
        pstSrcRect->y = 0;
        pstSrcRect->w = picSize.u32Width;
        pstSrcRect->h = picSize.u32Height;

        Rect_ext* pstCropRect = &(pAnalyserCtl->stCropRect);
        if(pViCtl->isCrop)
        {
            pstCropRect->x = pViCtl->imRect.x;
            pstCropRect->y = pViCtl->imRect.y;
            pstCropRect->w = pViCtl->imRect.width;
            pstCropRect->h = pViCtl->imRect.height;
        }
        else
        {
            pstCropRect->x = 0;
            pstCropRect->y = 0;
            pstCropRect->w = picSize.u32Width;
            pstCropRect->h = picSize.u32Height;
        }
        pAnalyserCtl->iCurSensorIdforDetectResult = pAnalyserCtl->sensorId;

        if( 0 == pAnalyserCtl->sensorId )
        {
            pAnalyserCtl->iIsCrop   = pViCtl->isCrop;
        }
        else
        {
            pAnalyserCtl->iIsCrop   = 0;
        }

        for (i = 0; i < faceTrackResult.faceNum; i++)
        {
            /*LOG_PrintWarn("index:%d, x:%d, y:%d, w:%d, h:%d\n", i, 
            faceTrackResult.faceRectArray[i].x, faceTrackResult.faceRectArray[i].y, 
            faceTrackResult.faceRectArray[i].width, faceTrackResult.faceRectArray[i].height);*/

            if (pAnalyserCtl->iIsCrop)
            {
                imCropRect.width  = mbInfo.u32Width  * pViCtl->imRect.width  / picSize.u32Width;
                imCropRect.height = mbInfo.u32Height * pViCtl->imRect.height / picSize.u32Height;
                imCropRect.x      = mbInfo.u32Width  * pViCtl->imRect.x      / picSize.u32Width;
                imCropRect.y      = mbInfo.u32Height * pViCtl->imRect.y      / picSize.u32Height;

                int iFaceDetectX = faceTrackResult.faceRectArray[i].x;
                int iFaceDetectY = faceTrackResult.faceRectArray[i].y;
                int iFaceDetectW = faceTrackResult.faceRectArray[i].width;
                int iFaceDetectH = faceTrackResult.faceRectArray[i].height;

                midX = iFaceDetectX + iFaceDetectW/2;
                midY = iFaceDetectY + iFaceDetectH/2;

                if((midX < imCropRect.x)
                    || (midX > (imCropRect.x + imCropRect.width))
                    || (midY < imCropRect.y) 
                    || (midY > (imCropRect.y + imCropRect.height)))
                {
#if 0
                    LOG_PrintInfo("sensor-%d index-%d crop-(%d,%d)-(%d*%d)  faces-detect-(%d,%d)-(%d*%d)\n",
                                                                            pAnalyserCtl->sensorId,i,
                                                                            imCropRect.x,imCropRect.y,
                                                                            imCropRect.width,
                                                                            imCropRect.height,
                                                                            iFaceDetectX,iFaceDetectY,
                                                                            iFaceDetectW,iFaceDetectH);
#endif
                    continue;
                }
            }

            pRegion = &pAnalyserCtl->ivsRegion.tracked[count++];	   
            pRegion->algType = IVS_TYPE_FACE;
            pRegion->pointNum = 4;
            pRegion->regionType = IVS_RGN_ATM_KEYMASK;
            pRegion->id = faceTrackResult.trackID[i];

            pRegion->points[0].x = faceTrackResult.faceRectArray[i].x;
            pRegion->points[0].y = faceTrackResult.faceRectArray[i].y;
            pRegion->points[1].x = faceTrackResult.faceRectArray[i].x;
            pRegion->points[1].y = faceTrackResult.faceRectArray[i].y + 3*faceTrackResult.faceRectArray[i].height;
            pRegion->points[2].x = faceTrackResult.faceRectArray[i].x + faceTrackResult.faceRectArray[i].width;
            pRegion->points[2].y = faceTrackResult.faceRectArray[i].y + 3*faceTrackResult.faceRectArray[i].height;
            pRegion->points[3].x = faceTrackResult.faceRectArray[i].x + faceTrackResult.faceRectArray[i].width;
            pRegion->points[3].y = faceTrackResult.faceRectArray[i].y;
        }

        if ((bodyResult.bodyNum + count) > ANALYSER_TRACE_NUM)
        {
            bodyResult.bodyNum = ANALYSER_TRACE_NUM - count;
        }

        for (i = 0; i < bodyTrackResult.bodyNum; i++)
        {
            /*LOG_PrintWarn("index:%d, trackid:%d, x:%d, y:%d, w:%d, h:%d,sensorId:%d\n", i, bodyTrackResult.trackID[i],
            bodyTrackResult.bodyRectArray[i].x, bodyTrackResult.bodyRectArray[i].y, 
            bodyTrackResult.bodyRectArray[i].width, bodyTrackResult.bodyRectArray[i].height, pAnalyserCtl->sensorId);*/
            if (pAnalyserCtl->iIsCrop)
            {
                imCropRect.width  = mbInfo.u32Width  * pViCtl->imRect.width  / picSize.u32Width;
                imCropRect.height = mbInfo.u32Height * pViCtl->imRect.height / picSize.u32Height;
                imCropRect.x      = mbInfo.u32Width  * pViCtl->imRect.x      / picSize.u32Width;
                imCropRect.y      = mbInfo.u32Height * pViCtl->imRect.y      / picSize.u32Height;

                int iBodyDetectX = bodyTrackResult.bodyRectArray[i].x;
                int iBodyDetectY = bodyTrackResult.bodyRectArray[i].y;
                int iBodyDetectW = bodyTrackResult.bodyRectArray[i].width;
                int iBodyDetectH = bodyTrackResult.bodyRectArray[i].height;

                midX = iBodyDetectX + iBodyDetectW/2;
                midY = iBodyDetectY + iBodyDetectH/2;

                if((midX < imCropRect.x)
                    || (midX > (imCropRect.x + imCropRect.width))
                    || (midY < imCropRect.y) 
                    || (midY > (imCropRect.y + imCropRect.height)))
                {
#if 0
                    LOG_PrintInfo("sensor-%d index-%d crop-(%d,%d)-(%d*%d)  body-detect-(%d,%d)-(%d*%d)\n",
                                                                                            pAnalyserCtl->sensorId,i,
                                                                                            imCropRect.x,imCropRect.y,
                                                                                            imCropRect.width,
                                                                                            imCropRect.height,
                                                                                            iBodyDetectX,iBodyDetectY,
                                                                                            iBodyDetectW,iBodyDetectH);
#endif
                    continue;
                }
            }

            pRegion = &pAnalyserCtl->ivsRegion.tracked[count++];
            pRegion->algType = IVS_TYPE_HUMAN;
            pRegion->pointNum = 4;
            pRegion->regionType = IVS_RGN_ATM_KEYMASK;
            pRegion->id = bodyTrackResult.trackID[i];
            pRegion->points[0].x = bodyTrackResult.bodyRectArray[i].x;
            pRegion->points[0].y = bodyTrackResult.bodyRectArray[i].y;
            pRegion->points[1].x = bodyTrackResult.bodyRectArray[i].x;
            pRegion->points[1].y = bodyTrackResult.bodyRectArray[i].y + bodyTrackResult.bodyRectArray[i].height;
            pRegion->points[2].x = bodyTrackResult.bodyRectArray[i].x + bodyTrackResult.bodyRectArray[i].width;
            pRegion->points[2].y = bodyTrackResult.bodyRectArray[i].y + bodyTrackResult.bodyRectArray[i].height;
            pRegion->points[3].x = bodyTrackResult.bodyRectArray[i].x + bodyTrackResult.bodyRectArray[i].width;
            pRegion->points[3].y = bodyTrackResult.bodyRectArray[i].y;
        }

#if 0      
        for (i = 0; i < faceResult.faceNum; i++)
        {
            /*LOG_PrintWarn("index:%d, x:%d, y:%d, w:%d, h:%d\n", i, 
            faceResult.faceRectArray[i].x, faceResult.faceRectArray[i].y, 
            faceResult.faceRectArray[i].width, faceResult.faceRectArray[i].height);*/
            pRegion = &pAnalyserCtl->ivsRegion.tracked[count++];	   
            pRegion->algType = IVS_TYPE_FACE;
            pRegion->pointNum = 4;
            pRegion->regionType = IVS_RGN_ATM_KEYMASK;
            pRegion->id = 0;
            pRegion->points[0].x = faceResult.faceRectArray[i].x;
            pRegion->points[0].y = faceResult.faceRectArray[i].y;
            pRegion->points[1].x = faceResult.faceRectArray[i].x;
            pRegion->points[1].y = faceResult.faceRectArray[i].y + 3*faceResult.faceRectArray[i].height;
            pRegion->points[2].x = faceResult.faceRectArray[i].x + faceResult.faceRectArray[i].width;
            pRegion->points[2].y = faceResult.faceRectArray[i].y + 3*faceResult.faceRectArray[i].height;
            pRegion->points[3].x = faceResult.faceRectArray[i].x + faceResult.faceRectArray[i].width;
            pRegion->points[3].y = faceResult.faceRectArray[i].y;
        }

        if ((bodyResult.bodyNum + count) > ANALYSER_TRACE_NUM)
        {
            bodyResult.bodyNum = ANALYSER_TRACE_NUM - count;
        }

        for (i = 0; i < bodyResult.bodyNum; i++)
        {
            /*LOG_PrintWarn("index:%d, x:%d, y:%d, w:%d, h:%d\n", i, 
            bodyResult.bodyRectArray[i].x, bodyResult.bodyRectArray[i].y, 
            bodyResult.bodyRectArray[i].width, bodyResult.bodyRectArray[i].height);*/
            pRegion = &pAnalyserCtl->ivsRegion.tracked[count++];	   
            pRegion->algType = IVS_TYPE_HUMAN;
            pRegion->pointNum = 4;
            pRegion->regionType = IVS_RGN_ATM_KEYMASK;
            pRegion->id = 0;
            pRegion->points[0].x = bodyResult.bodyRectArray[i].x;
            pRegion->points[0].y = bodyResult.bodyRectArray[i].y;
            pRegion->points[1].x = bodyResult.bodyRectArray[i].x;
            pRegion->points[1].y = bodyResult.bodyRectArray[i].y + bodyResult.bodyRectArray[i].height;
            pRegion->points[2].x = bodyResult.bodyRectArray[i].x + bodyResult.bodyRectArray[i].width;
            pRegion->points[2].y = bodyResult.bodyRectArray[i].y + bodyResult.bodyRectArray[i].height;
            pRegion->points[3].x = bodyResult.bodyRectArray[i].x + bodyResult.bodyRectArray[i].width;
            pRegion->points[3].y = bodyResult.bodyRectArray[i].y;
        }
#endif

        clock_gettime(CLOCK_MONOTONIC, &tmpTime);
        curTime = tmpTime.tv_sec*1000 + tmpTime.tv_nsec / (1000*1000);
        pAnalyserCtl->ivsRegion.pts = curTime;
        pAnalyserCtl->ivsRegion.trackedNum = count;

        pthread_mutex_unlock(&pAnalyserCtl->mutex);


release_buffer: 
        if (srcMb)
        {
            RK_MPI_MB_ReleaseBuffer(srcMb);
            srcMb = NULL;
        }
        if (secSrcMb)
        {
            RK_MPI_MB_ReleaseBuffer(secSrcMb);
            secSrcMb = NULL;
        }
    }
    
    return NULL;
}

/*******************************************************************************
  Function:     createVideoAnalyser
  Description:  创建视频分析接口
  Input:        - pAnalyseDesc: 视频分析接口描述结构VideoAnalyseDesc指针
  Output:       - ppVideoOutput: 视频分析指针的指针
  Return:       0: 操作成功
                -1: 操作失败
  Others:       无
*******************************************************************************/
int PAL_API createVideoAnalyser(VideoAnalyseDesc *pAnalyseDesc,
                                VideoAnalyser **ppVideoAnalyser)
{
    int ret=0, result;
    int width = 0, height = 0;
    VideoAnalyser *pVideoAnalyser = NULL;
    ANALYSER_ctl_t *pAnalyserCtl = NULL;
    int algNum, i, algType;
    char license[4096] = {0};
    char auth[4096] = {0};
    int authFd, licFd;
    int authLen, licLen;
    
    if((NULL == pAnalyseDesc) || (NULL == ppVideoAnalyser))
    {
        LOG_PrintError("Parameter is NULL!\n");
        return -1;
    }

    if (access(ANALYSE_FACE_LICTION_PATH, 0))
    {
        LOG_PrintError("licence file:%s is not exist\n", ANALYSE_FACE_LICTION_PATH);
        return -1;
    }
    
    if (access(ANALYSE_FACE_LICTION_AUTH_PATH, 0))
    {
        LOG_PrintError("licence auth file:%s is not exist\n", ANALYSE_FACE_LICTION_AUTH_PATH);
        return -1;
    }

    pAnalyserCtl = pAnalyserCtl_s;
    if(pAnalyserCtl)
    {
        pAnalyserCtl->refCount++;
        *ppVideoAnalyser = &pAnalyserCtl->videoAnalyser;
        ret = 0;
        goto mutex_unlock;
    }

    pAnalyserCtl = (ANALYSER_ctl_t *)malloc(sizeof(ANALYSER_ctl_t));
    if(!pAnalyserCtl)
    {
        LOG_PrintError("Fail to malloc analyser!\n");
        ret = -1;
        goto mutex_unlock;
    }
    memset(pAnalyserCtl, 0, sizeof(ANALYSER_ctl_t));

    licFd = open(ANALYSE_FACE_LICTION_PATH, O_RDONLY);
    licLen = read(licFd, license, 4096);
    close(licFd);

    authFd = open(ANALYSE_FACE_LICTION_AUTH_PATH, O_RDONLY);
    authLen = read(authFd, auth, 4096);
    close(authFd);

    const char *sdk_version = NULL;

    sdk_version = rsGetAlgoSDKVersion();
    
    LOG_PrintInfo("RS SDK VERSION: %s\n", sdk_version);

    RSLicInitParams rsLicPar;

    rsLicPar.licContentLen = licLen;
    rsLicPar.licContent = license;
    rsLicPar.licAuthContentLen = authLen;
    rsLicPar.licAuthContent = auth;

    ret = rsInitAlgoSDK(&pAnalyserCtl->rsHandle, &rsLicPar);
    if (ret)
    {
        LOG_PrintError("Fail to rsInitAlgoSDK!\n");
        ret = -1;
        goto mutex_unlock;
    }

    /*RSFaceDetectInitParams facePar;

    facePar.maxDetectFaceNum = 10;
    ret = rsInitFaceDetection(pAnalyserCtl->rsHandle, &facePar, &pAnalyserCtl->faceDetHandle);
    if (ret)
    {
        LOG_PrintError("Fail to rsInitBodyDetection!\n");
        ret = -1;
        goto mutex_unlock;
    }

    RSBodyDetectInitParams bodyPar;

    bodyPar.maxDetectBodyNum = 10;
    ret = rsInitBodyDetection(pAnalyserCtl->rsHandle, &bodyPar, &pAnalyserCtl->bodyDetHandle);
    if (ret)
    {
        LOG_PrintError("Fail to rsInitBodyDetection!\n");
        ret = -1;
        goto mutex_unlock;
    }*/

    VIDEO_GetPicSize(videoCtl_g.standard, 
                     VIDEO_RESOLUTION_H720, 
                     &pAnalyserCtl->algSize,
                     videoCtl_g.rotate);

    pAnalyserCtl->viPipe = 0;
    pAnalyserCtl->viChn = 3;
    pAnalyserCtl->viChnAttr.pcVideoNode = "rkispp_scale1";
    pAnalyserCtl->viChnAttr.u32BufCnt = 5;
    pAnalyserCtl->viChnAttr.u32Width = pAnalyserCtl->algSize.u32Width;
    pAnalyserCtl->viChnAttr.u32Height = pAnalyserCtl->algSize.u32Height;
    pAnalyserCtl->viChnAttr.enPixFmt = IMAGE_TYPE_NV12;
    pAnalyserCtl->viChnAttr.enWorkMode = VI_WORK_MODE_NORMAL;

    result = VI_RkEnableChn(pAnalyserCtl->viPipe, pAnalyserCtl->viChn, &pAnalyserCtl->viChnAttr);
    if (result)
    {
        LOG_PrintError("Fail to create encode channel!\n");
        ret = -1;
        goto mutex_unlock;
    }  

    result = RK_MPI_VI_StartStream(pAnalyserCtl->viPipe, pAnalyserCtl->viChn);
    if (result)
    {
        LOG_PrintError("Fail to RK_MPI_VI_StartStream chn(%d) with ret=%d\n",
                        pAnalyserCtl->viChn, result);
        ret = -1;
        goto mutex_unlock;
    }

    pAnalyserCtl->viSecPipe = 1;
    pAnalyserCtl->viSecChn = 4;
    pAnalyserCtl->viSeAttr.pcVideoNode = "rkispp_scale0";
    pAnalyserCtl->viSeAttr.u32BufCnt = 5;
    pAnalyserCtl->viSeAttr.u32Width = pAnalyserCtl->algSize.u32Width;
    pAnalyserCtl->viSeAttr.u32Height = pAnalyserCtl->algSize.u32Height;
    pAnalyserCtl->viSeAttr.enPixFmt = IMAGE_TYPE_NV12;
    pAnalyserCtl->viSeAttr.enWorkMode = VI_WORK_MODE_NORMAL;

    result = VI_RkEnableChn(pAnalyserCtl->viSecPipe, pAnalyserCtl->viSecChn, &pAnalyserCtl->viSeAttr);
    if (result)
    {
        LOG_PrintError("Fail to create encode channel!\n");
        ret = -1;
        goto mutex_unlock;
    }  

    result = RK_MPI_VI_StartStream(pAnalyserCtl->viSecPipe, pAnalyserCtl->viSecChn);
    if (result)
    {
        LOG_PrintError("Fail to RK_MPI_VI_StartStream chn(%d) with ret=%d\n",
                        pAnalyserCtl->viSecChn, result);
        ret = -1;
        goto mutex_unlock;
    }

    pAnalyserCtl->sensorId = 1;
    result = pthread_create(&pAnalyserCtl->algThd, NULL, ANALYSER_AlgThd, pAnalyserCtl);
    if (result)
    {
        LOG_PrintError("can't create thread: %s\n", strerror(result));
        ret = -1;
        goto mutex_unlock;
    }

    pVideoAnalyser = &pAnalyserCtl->videoAnalyser;

    pVideoAnalyser->priv = pAnalyserCtl;
    pVideoAnalyser->addRef = ANALYSER_AddRef;
    pVideoAnalyser->release = ANALYSER_Release;
    pVideoAnalyser->getCaps = ANALYSER_GetCaps;
    pVideoAnalyser->start = ANALYSER_Start;
    pVideoAnalyser->stop = ANALYSER_Stop;
    pVideoAnalyser->setFormat = ANALYSER_SetFormat;
    pVideoAnalyser->setRuleConfig = ANALYSER_SetRuleConfig;
    pVideoAnalyser->setModuleConfig = ANALYSER_SetModuleConfig;
    pVideoAnalyser->getPacket = ANALYSER_GetPacket;
    pVideoAnalyser->releasePacket = ANALYSER_ReleasePacket;
    pVideoAnalyser->setSnapOption = ANALYSER_SetSnapOption;
    pVideoAnalyser->setModuleState = ANALYSER_SetModuleState;
    pVideoAnalyser->getVersion = ANALYSER_GetVersion;
    pVideoAnalyser->getVersions = ANALYSER_GetVersions;
    pVideoAnalyser->setTrafficWorkMode = ANALYSER_SetTrafficWorkMode;
    pVideoAnalyser->forceVideoBreaking = ANALYSER_ForceVideoBreaking;
	pVideoAnalyser->getVideoObjectData = ANALYSER_GetVideoObj;
	pVideoAnalyser->getDelayTime = ANALYSER_GetDelay;
	pVideoAnalyser->selectSensor = ANALYSER_SelectSensor;
	pVideoAnalyser->getSensorId = ANALYSER_GetSensorId;
    pVideoAnalyser->notifyUpgradeStatus = ANALYSER_SetUpgradeFlg;
    
    *ppVideoAnalyser = pVideoAnalyser;

    pthread_mutex_init(&pAnalyserCtl->mutex, NULL);

    INIT_LIST_HEAD(&pAnalyserCtl->ivsHead);

    pAnalyserCtl_s = pAnalyserCtl;

    LOG_PrintInfo("Create successfully!\n");

    return 0;

mutex_unlock:
	if (pAnalyserCtl)
	{
		free(pAnalyserCtl);
	}

    return ret;
}

#else

/*******************************************************************************
  Function:     selectVideoAnalyserMessage
  Description:  用于获取智能分析的消息，
                （注意：此函数会阻塞，直到获得消息或超时到才返回）
  Input:        - timeout: 超时返回时间，超时时间到，即使没有消息，函数将返回,
                           单位：ms; -1 表示无限时长;
                - pCount:  表示的是，message结构体的内存大小个数；
  Output:       - pMessage: 视频分析消息结构体，指向内存由调用者管理.
                - pCount: 表示返回的消息数量。
  Return:       0: 操作成功
                -1: 操作失败
  Others:       无
*******************************************************************************/
int PAL_API selectVideoAnalyserMessage(int timeout,
                                       VideoAnalyserMessage *pMessage,
                                       int *pCount)
{
    return 0;
}

/*******************************************************************************
  Function:     getVideoAnalyseChannels
  Description:  得到视频分析最大通道的个数，暂时为1
  Input:        无
  Output:       无
  Return:       >=0: 返回的个数
                -1: 操作失败
  Others:       无
*******************************************************************************/
int getVideoAnalyseChannels(void)
{
    return -1;
}

/*******************************************************************************
  Function:     createVideoAnalyser
  Description:  创建视频分析接口
  Input:        - pAnalyseDesc: 视频分析接口描述结构VideoAnalyseDesc指针
  Output:       - ppVideoOutput: 视频分析指针的指针
  Return:       0: 操作成功
                -1: 操作失败
  Others:       无
*******************************************************************************/
int PAL_API createVideoAnalyser(VideoAnalyseDesc *pAnalyseDesc,
                                VideoAnalyser **ppVideoAnalyser)
{
    return -1;
}

#endif
						
