/*
 * Copyright (c) 2021 HiSilicon (Shanghai) Technologies CO., LIMITED.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <errno.h>

#include "sample_comm_nnie.h"
#include "nnie_sample_plug.h"
#include "sample_nnie_main.h"
#include "rfcn_face_detect.h"  // 人脸检测的头文件

#include "hi_ext_util.h"
#include "mpp_help.h"
#include "ai_plug.h"

#define PLUG_UUID          "\"hi.cnn_gender_classify\""
#define PLUG_DESC          "\"性别分类(cnn)\""     // UTF8 encode

#define FRM_WIDTH           640 // 输入到人脸检测模型的分辨率一定要是 640*360
#define FRM_HEIGHT          360

#define MODEL_FILE_GENDER    "./plugs/face_gender_resnet.wk" // 开源模型转换
#define CLASS_NUM            3  // 模型输出的分类数

#define RET_NUM_MAX          4 		// 返回number的最大数目trr
#define SCORE_MAX            4096 	// 最大概率对应的score
#define GENDER_MIN           30 		// 可接受的概率阈值(超过此值则返回给app)
#define DETECT_OBJ_MAX		 16      // 一帧图像中检测目标的最大数目
#define IMAGE_WIDTH		     224 		// 送到性别分类的模型IMAGE的分辨率是256*256
#define IMAGE_HEIGHT	     224
#define OSD_FONT_WIDTH		 16
#define OSD_FONT_HEIGHT		 24
#define DRAW_RETC_THICK		 2 		// 绘制图形的线条宽度
#define EXPANDRATE           10      // 人脸检测框的坐标扩大比例
#define WIDTH_LIMIT        	 32		// VGS 支持的32*32以上的分辨率
#define HEIGHT_LIMIT       	 32

static OsdSet* g_osdsGender = NULL;

static const HI_CHAR CNN_GENDER_CLASSIFY[] = "{"
    "\"uuid\": " PLUG_UUID ","
    "\"desc\": " PLUG_DESC ","
    "\"frmWidth\": " HI_TO_STR(FRM_WIDTH) ","
    "\"frmHeight\": " HI_TO_STR(FRM_HEIGHT) ","
    "\"butt\": 0"
"}";

static const HI_CHAR* CnnGenderClassifyProf(void)
{
    return CNN_GENDER_CLASSIFY;
}

static HI_S32 CnnGenderClassifyLoad(uintptr_t* model, OsdSet* osds)
{
    SAMPLE_SVP_NNIE_CFG_S *self = NULL;
    HI_S32 ret;

    g_osdsGender = osds;
	g_rfcnCfg.classNum = CLASS_NUM;
    ret = CnnCreate(&self, MODEL_FILE_GENDER);
    *model = ret < 0 ? 0 : (uintptr_t)self;

	FaceDetectInit(); // 初始化人脸检测模型
	
	return ret;
}

static HI_S32 CnnGenderClassifyUnload(uintptr_t model)
{
    CnnDestroy((SAMPLE_SVP_NNIE_CFG_S*)model);

	if (g_osdsGender) {
		OsdsClear(g_osdsGender);
		g_osdsGender = NULL;
	}
	FaceDetectExit(); // 去初始化人脸检测模型
	
    return HI_SUCCESS;
}

/**
    将计算结果打包为resJson.
*/
HI_CHAR* CnnGenderClassifyToJson(const RecogNumInfo items[], HI_S32 itemNum)
{
    HI_S32 jsonSize = TINY_BUF_SIZE + itemNum * TINY_BUF_SIZE; // 每个item的打包size为TINY_BUF_SIZE
    HI_CHAR *jsonBuf = (HI_CHAR*)malloc(jsonSize);
    HI_ASSERT(jsonBuf);
    HI_S32 offset = 0;

    offset += snprintf_s(jsonBuf + offset, jsonSize - offset, jsonSize - offset - 1, "[");
    for (HI_S32 i = 0; i < itemNum; i++) {
        const RecogNumInfo *item = &items[i];
        uint32_t score = item->score * HI_PER_BASE / SCORE_MAX;
        if (score < GENDER_MIN) {
            break;
        }

        offset += snprintf_s(jsonBuf + offset, jsonSize - offset, jsonSize - offset - 1,
            "%s{ \"classify num\": %u, \"score\": %u }", (i == 0 ? "\n  " : ", "), (uint)item->num, (uint)score);
        HI_ASSERT(offset < jsonSize);
    }
    offset += snprintf_s(jsonBuf + offset, jsonSize - offset, jsonSize - offset - 1, "]");
    HI_ASSERT(offset < jsonSize);
    return jsonBuf;
}

/**
    添加性别信息到矩形框旁
*/
static void FaceDetectAddTxt(const RectBox box, const RecogNumInfo resBuf, uint32_t color)
{
	HI_OSD_ATTR_S osdRgn;
	char osdTxt[TINY_BUF_SIZE];
	HI_CHAR *gender_name = NULL;
	HI_ASSERT(g_osdsGender);
	if (resBuf.num == 0u) {
		gender_name = "male";
	} else if (resBuf.num == 1u) {
		gender_name = "female";
	} else {
		gender_name = "unknow";
	}
	
	uint32_t score = (resBuf.score) * HI_PER_BASE / SCORE_MAX;
	int res = snprintf_s(osdTxt, sizeof(osdTxt), sizeof(osdTxt) - 1, "%d_%s,%d %%", resBuf.num, gender_name, score);
	HI_ASSERT(res > 0);
	
	int osdId = OsdsCreateRgn(g_osdsGender);
	HI_ASSERT(osdId >= 0);
	
	int x = box.xmin / HI_OVEN_BASE * HI_OVEN_BASE;
	int y = (box.ymin - 30) / HI_OVEN_BASE * HI_OVEN_BASE;
	if (y < 0) {
		LOGD("osd_y < 0, y=%d\n", y);
		OsdsDestroyRgn(g_osdsGender, osdId);
	} else {
		TxtRgnInit(&osdRgn, osdTxt, x, y, color, OSD_FONT_WIDTH, OSD_FONT_HEIGHT);
		OsdsSetRgn(g_osdsGender, osdId, &osdRgn);
	}
}

static HI_S32 CnnGenderClassifyCal(uintptr_t model,
    VIDEO_FRAME_INFO_S *srcFrm, VIDEO_FRAME_INFO_S *resFrm, HI_CHAR** resJson)
{
    SAMPLE_SVP_NNIE_CFG_S *self = (SAMPLE_SVP_NNIE_CFG_S*)model;
	IVE_IMAGE_S img;
	DetectObjInfo resBuf[DETECT_OBJ_MAX] = {0};
	RectBox boxs[DETECT_OBJ_MAX] = {0};
	RectBox cnnBoxs[DETECT_OBJ_MAX] = {0}; // 为送到性别分类模型的Box,及人脸的坐标位置
    RecogNumInfo numInfo[RET_NUM_MAX] = {0};
	int detectNum = 0;
	HI_S32 resLen = 0;
	HI_S32 ret;
	IVE_IMAGE_S imgIn;
	IVE_IMAGE_S imgDst;
	VIDEO_FRAME_INFO_S frmIn;
	VIDEO_FRAME_INFO_S frmDst;

	OsdsClear(g_osdsGender);
	
	ret = FrmToOrigImg(srcFrm, &img);
	HI_EXP_RET(ret < 0, ret, "CnnGenderClassifyCal FAIL, for frm_to_img FAIL\n");
	// 将img送到人脸检测模型进行检测，detectNum为从img中检测出来的人脸的数目， resBuf对应每张人脸的位置
	detectNum = FaceDetectCal(&img, resBuf);
	
	for (int i = 0; i < detectNum; i++) {
		cnnBoxs[i] = resBuf[i].box;
		ret = ImgYuvCrop(&img, &imgIn, &cnnBoxs[i]); // 将人脸所在的位置crop出来，保存到另一个IVE_IMAGE_S中
		HI_EXP_LOGE(ret < 0, "ImgYuvCrop FAIL, ret = %d\n", ret);
		
		if((imgIn.u32Width >= WIDTH_LIMIT) && (imgIn.u32Height >= HEIGHT_LIMIT)) {
			COMPRESS_MODE_E enCompressMode = srcFrm->stVFrame.enCompressMode;
			ret = OrigImgToFrm(&imgIn, &frmIn);
			frmIn.stVFrame.enCompressMode = enCompressMode;
			ret = MppFrmResize(&frmIn, &frmDst, IMAGE_WIDTH, IMAGE_HEIGHT);
			ret = FrmToOrigImg(&frmDst, &imgDst);
			
			ret = CnnCalU8c1Img(self,  &imgDst, numInfo, HI_ARRAY_SIZE(numInfo), &resLen);
			HI_EXP_LOGE(ret < 0, "CnnCalU8c1Img FAIL, ret = %d\n", ret);
			HI_ASSERT(resLen <= sizeof(numInfo) / sizeof(numInfo[0]));
			RectBoxTran(&cnnBoxs[i], FRM_WIDTH, FRM_HEIGHT,
						resFrm->stVFrame.u32Width, resFrm->stVFrame.u32Height);
			FaceDetectAddTxt(cnnBoxs[i], numInfo[0], ARGB1555_WHITE); 
			
			MppFrmDestroy(&frmDst);
		}
		
		RectBox *box = &resBuf[i].box;
		RectBoxTran(box, FRM_WIDTH, FRM_HEIGHT,
					resFrm->stVFrame.u32Width, resFrm->stVFrame.u32Height);
		LOGD("FaceDetect: {%d, %d, %d, %d}, objNum = %d\n",
			box->xmin, box->ymin, box->xmax, box->ymax, detectNum);
		boxs[i] = *box;
		
		IveImgDestroy(&imgIn);
	}
	
	if (detectNum > 0) {
		MppFrmDrawRects(resFrm, boxs, detectNum, RGB888_RED, DRAW_RETC_THICK);
	}
	
	// 生成resJson
	HI_CHAR *jsonBuf = CnnGenderClassifyToJson(numInfo, resLen);
	*resJson = jsonBuf;
	
    return ret;
}

static const AiPlug G_CNN_GENDER_CLASSIFY_ITF = {
    .Prof = CnnGenderClassifyProf,
    .Load = CnnGenderClassifyLoad,
    .Unload = CnnGenderClassifyUnload,
    .Cal = CnnGenderClassifyCal,
};

const AiPlug* AiPlugItf(uint32_t* magic)
{
    if (magic) {
        *magic = AI_PLUG_MAGIC;
    }

    return (AiPlug*)&G_CNN_GENDER_CLASSIFY_ITF;
}
