/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/

#include <numeric>

#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/quantization_util.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/op_macros.h"
#include "tensorflow/lite/micro/kernels/kernel_util.h"
#include "tensorflow/lite/micro/micro_utils.h"

#include <iostream>
#include <stdint.h>
#include <math.h>
#include <string.h>
#ifndef NMS_PER_CLASS_MAX_CLASS
#define NMS_PER_CLASS_MAX_CLASS 8
#endif 

typedef float locfp_t[4];  
typedef int8_t loci8_t[4];  

typedef struct tagLocI8Tensor {
	// affine quantization: f = (i8 - z) * s
	float s;    // scale
	int z;      // zero point
	uint32_t itemCnt;   
	loci8_t* pAry;
}LocI8Tensor_t;

typedef struct tagClsI8Tensor {
	// affine quantization: f = (i8 - z) * s
	float s;    // scale
	int z;      // zero point
	uint32_t itemCnt;   
	int8_t* pAry;
	uint32_t clsCnt;   
	uint32_t bgClsNdx;	
}ClsI8Tensor_t;

typedef struct tagLocF32Tensor {
	uint32_t itemCnt;  
	locfp_t* pAry;
}LocF32Tensor_t;

typedef struct tagClsF32Tensor {
	uint32_t itemCnt;  
	float* pAry;
	uint32_t clsCnt;   
	uint32_t bgClsNdx;	
}ClsF32Tensor_t;

typedef struct tagNmsCCHW_Min {
	int16_t cchwX2[4]; 
	uint16_t cls;   
	uint16_t cfd_x1W; 
}NmsCCHW_Min_t;

#if NMS_PER_CLASS_MAX_CLASS != 0
typedef struct tagNmsCCHW_Linked {
	int16_t cchwX2[4]; 
	uint16_t cls;   
	uint16_t cfd_x1W; 
	uint16_t prevNdx;
	uint16_t nextNdx;
}NmsCCHW_t;
#else
typedef struct tagNmsCCHW_Min NmsCCHW_t;
#endif

typedef struct tagCCWH_F32 {
	float cx, cy, w, h;
}CCWH_F32_t;

typedef struct tagYXYX_F32 {
	float y1, x1, y2, x2;
}YXYX_F32_t;


// ??tflm detection post processing???
typedef struct tagNmsOutput_F32 {
	float* pNumDetections;
	YXYX_F32_t* pYXYXs;
	float* pClss;
	float* pCfds;
}NmsOutput_F32_t;


#define MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define MIN(a,b) ( (a) < (b) ? (a) : (b) )

namespace tflite {
namespace {

/**
 * This version of detection_postprocess is specific to TFLite Micro. It
 * contains the following differences between the TFLite version:
 *
 * 1.) Temporaries (temporary tensors) - Micro use instead scratch buffer API.
 * 2.) Output dimensions - the TFLite version does not support undefined out
 * dimensions. So model must have static out dimensions.
 */

// Input tensors
constexpr int kInputTensorBoxEncodings = 0;
constexpr int kInputTensorClassPredictions = 1;
constexpr int kInputTensorAnchors = 2;

// Output tensors
constexpr int kOutputTensorDetectionBoxes = 0;
constexpr int kOutputTensorDetectionClasses = 1;
constexpr int kOutputTensorDetectionScores = 2;
constexpr int kOutputTensorNumDetections = 3;

constexpr int kNumCoordBox = 4;
constexpr int kBatchSize = 1;

constexpr int nmsAryCap = 1000;
constexpr int isPerClass = 1;

constexpr uint16_t Default_ImgH = 1000;
constexpr uint16_t Default_ImgW = 1000;

struct CenterSizeEncoding {
  float y;
  float x;
  float h;
  float w;
};

struct OpData {
  int max_detections;
  float non_max_suppression_score_threshold;
  float intersection_over_union_threshold;
  int num_classes;
  CenterSizeEncoding scale_values;

  int nmsAry_idx;

  // Cached tensor scale and zero point values for quantized operations
  TfLiteQuantizationParams input_box_encodings;
  TfLiteQuantizationParams input_class_predictions;
  TfLiteQuantizationParams input_anchors;
};

void* Init(TfLiteContext* context, const char* buffer, size_t length) {
  TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
  OpData* op_data = nullptr;

  const uint8_t* buffer_t = reinterpret_cast<const uint8_t*>(buffer);

  op_data = reinterpret_cast<OpData*>(
      context->AllocatePersistentBuffer(context, sizeof(OpData)));

  const flexbuffers::Map& m =
      micro::FlexbuffersWrapperGetRootAsMap(buffer_t, length);

  op_data->max_detections =
      micro::FlexbuffersWrapperAsInt32(m, "max_detections");
  
  op_data->non_max_suppression_score_threshold =
      micro::FlexbuffersWrapperAsFloat(m, "nms_score_threshold");

  op_data->intersection_over_union_threshold =
      micro::FlexbuffersWrapperAsFloat(m, "nms_iou_threshold");
  op_data->num_classes = micro::FlexbuffersWrapperAsInt32(m, "num_classes");
  op_data->scale_values.y = micro::FlexbuffersWrapperAsFloat(m, "y_scale");
  op_data->scale_values.x = micro::FlexbuffersWrapperAsFloat(m, "x_scale");
  op_data->scale_values.h = micro::FlexbuffersWrapperAsFloat(m, "h_scale");
  op_data->scale_values.w = micro::FlexbuffersWrapperAsFloat(m, "w_scale");

  return op_data;
}

void Free(TfLiteContext* context, void* buffer) {}

TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
  auto* op_data = static_cast<OpData*>(node->user_data);

  // Inputs: box_encodings, scores, anchors
  TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
  const TfLiteTensor* input_box_encodings =
      GetInput(context, node, kInputTensorBoxEncodings);
  const TfLiteTensor* input_class_predictions =
      GetInput(context, node, kInputTensorClassPredictions);
  const TfLiteTensor* input_anchors =
      GetInput(context, node, kInputTensorAnchors);
  TF_LITE_ENSURE_EQ(context, NumDimensions(input_box_encodings), 3);
  TF_LITE_ENSURE_EQ(context, NumDimensions(input_class_predictions), 3);
  TF_LITE_ENSURE_EQ(context, NumDimensions(input_anchors), 2);

  TF_LITE_ENSURE_EQ(context, NumOutputs(node), 4);
  const int num_boxes = input_box_encodings->dims->data[1];
  const int num_classes = op_data->num_classes;

  op_data->input_box_encodings.scale = input_box_encodings->params.scale;
  op_data->input_box_encodings.zero_point =
      input_box_encodings->params.zero_point;
  op_data->input_class_predictions.scale =
      input_class_predictions->params.scale;
  op_data->input_class_predictions.zero_point =
      input_class_predictions->params.zero_point;
  op_data->input_anchors.scale = input_anchors->params.scale;
  op_data->input_anchors.zero_point = input_anchors->params.zero_point;

  context->RequestScratchBufferInArena(
      context, nmsAryCap * sizeof(NmsCCHW_t), &op_data->nmsAry_idx);

  // Outputs: detection_boxes, detection_scores, detection_classes,
  // num_detections
  TF_LITE_ENSURE_EQ(context, NumOutputs(node), 4);

  return kTfLiteOk;
}

uint32_t _Nms_IOUx1W_CCHW(int16_t cchw1[4], int16_t cchw2[4]) {
	int dCY = cchw1[0] - cchw2[0];
	int dCYAbs = dCY > 0 ? dCY : -dCY;
	int dCX = cchw1[1] - cchw2[1];
	int dCXAbs = dCX > 0 ? dCX : -dCX;

	int dCYMax = (cchw1[2] + cchw2[2]) >> 1;
	int dCXMax = (cchw1[3] + cchw2[3]) >> 1;
	if (dCYAbs >= dCYMax || dCXAbs >= dCXMax) {
		return 0;
	}
	
	int hI = dCYMax - dCYAbs;
	int hU = dCYMax * 2 - hI;
	int wI = dCXMax - dCXAbs;
	int wU = dCXMax * 2 - wI;

	int hMin = MIN(cchw1[2], cchw2[2]);
	int hMax = MAX(cchw1[2], cchw2[2]);
	int wMin = MIN(cchw1[3], cchw2[3]);
	int wMax = MAX(cchw1[3], cchw2[3]);

	if (hU < hMax) hU = hMax;
	if (wU < wMax) wU = wMax;
	if (hI > hMin) hI = hMin;
	if (wI > wMin) wI = wMin;

	uint32_t iou_x_1W = (uint64_t)(hI * wI) * 10000ULL / (hU * wU);
	return iou_x_1W;
}

#define USE_SOFT_NMS
#define SOFT_NMS_DECAY(iou) ( expf(-(iou)*(iou)/0.36f) )
#undef USE_SOFT_NMS


static void _Nms_Output(NmsCCHW_t nmsAry[], uint32_t cnt, NmsOutput_F32_t* pOut) {
	NmsCCHW_t* p;
	uint32_t j;
	pOut->pNumDetections[0] = (float)cnt;
	YXYX_F32_t* pYXYXs = pOut->pYXYXs;
	p = nmsAry;
	for (j = 0; j < cnt; j++, pYXYXs++, p++) {
		pOut->pCfds[j] = (float)(p->cfd_x1W / 10000.0f);
		pOut->pClss[j] = (float)p->cls;
		int16_t cyX2, cxX2, h, w;
		cyX2 = p->cchwX2[0];
		cxX2 = p->cchwX2[1];
		h = p->cchwX2[2] >> 1;
		w = p->cchwX2[3] >> 1;
		pYXYXs->y1 = (float)(((cyX2 - h) >> 1) * 1.0f / Default_ImgH);
		pYXYXs->x1 = (float)(((cxX2 - w) >> 1) * 1.0f / Default_ImgW);
		pYXYXs->y2 = pYXYXs->y1 + h * 1.0f / Default_ImgH;
		pYXYXs->x2 = pYXYXs->x1 + w * 1.0f / Default_ImgW;
	}
}

uint32_t _Nms_IsToDel(uint32_t iou_x1W, uint32_t confThreshold_x1W, uint32_t iouThreshold_x1W, NmsCCHW_t* pIter)
{
	uint32_t isToDel = 0;
#ifdef USE_SOFT_NMS
	uint32_t decay_x1024 = iou_x1W < 3000 ? 1024 :
		(uint32_t)(SOFT_NMS_DECAY((iou_x1W - 3000) / 7000.0f) * 1024);
	uint32_t decayedCfd = (pIter->cfd_x1W * decay_x1024 + 512) >> 10;
	if (decayedCfd < confThreshold_x1W) isToDel = 1;
	else pIter->cfd_x1W = (uint16_t)decayedCfd;
#else
	if (iou_x1W >= iouThreshold_x1W) isToDel = 1;
#endif
	return isToDel;
}

int _Nms_DoNms(uint32_t isPerClass, uint32_t inCnt, NmsCCHW_t nmsAry[], uint32_t nmsAryUsedCnt,
	NmsOutput_F32_t* pOut, uint32_t confThreshold_x1W, uint32_t iouThreshold_x1W, uint32_t maxDetCnt)
{
	uint32_t i, j;
	uint32_t nmsItemCnt = nmsAryUsedCnt;
	NmsCCHW_Min_t tmp;
	uint32_t iterNdx = 0;
	for (i = 0; i < maxDetCnt; i++, iterNdx++) {
		if (nmsItemCnt == i) {
			break;
		}
		NmsCCHW_Min_t* pCfdMax = 0, * pCfdCur;
		int32_t maxVal = (int32_t)(1 << 31);
		NmsCCHW_t* pIter;
		if (!isPerClass) {
			pCfdCur = (NmsCCHW_Min_t*)(nmsAry + i);
			for (j = i, pIter = nmsAry + j; j < nmsItemCnt; j++, pIter++) {
				if (nmsAry[j].cfd_x1W > maxVal)
				{
					maxVal = pIter->cfd_x1W;
					pCfdMax = (NmsCCHW_Min_t*)pIter;
				}
			}
		}
#if NMS_PER_CLASS_MAX_CLASS > 0
		else {
			while (nmsAry[iterNdx].prevNdx == 0xFFFF) {
				iterNdx++;
			}
			pCfdCur = (NmsCCHW_Min_t*)(nmsAry + iterNdx);
			uint32_t nextNdx = iterNdx;
			do {
				pIter = nmsAry + nextNdx;
				if (pIter->cfd_x1W > maxVal) {
					maxVal = pIter->cfd_x1W;
					pCfdMax = (NmsCCHW_Min_t*)pIter;
				}
				nextNdx = pIter->nextNdx;
			} while (nextNdx != 0xFFFF);
		}
#endif
		if (pCfdCur != pCfdMax) {
			tmp = pCfdCur[0];
			pCfdCur[0] = pCfdMax[0];
			pCfdMax[0] = tmp;
		}

		uint32_t iou_x1W = 0;
		if (isPerClass == 0) {
			for (j = i + 1; j < nmsItemCnt; ) {
				uint32_t isToDel = 0;
				iou_x1W = _Nms_IOUx1W_CCHW(nmsAry[i].cchwX2, nmsAry[j].cchwX2);
				isToDel = _Nms_IsToDel(iou_x1W, confThreshold_x1W, iouThreshold_x1W, nmsAry + j);
				if (isToDel) {
					nmsAry[j] = nmsAry[nmsItemCnt - 1];
					nmsItemCnt--;
				}
				else {
					j++;
				}
			}
		}
#if NMS_PER_CLASS_MAX_CLASS > 0
		else {
			uint32_t nextNdx;
			NmsCCHW_t* pWinner = nmsAry + iterNdx;
			for (nextNdx = pWinner->nextNdx; nextNdx != 0xFFFF; ) {
				pIter = nmsAry + nextNdx;
				uint32_t isToDel = 0;
				iou_x1W = _Nms_IOUx1W_CCHW(pWinner->cchwX2, pIter->cchwX2);
				isToDel = _Nms_IsToDel(iou_x1W, confThreshold_x1W, iouThreshold_x1W, pIter);
				if (isToDel) {
					nmsAry[pIter->prevNdx].nextNdx = pIter->nextNdx;
					if (pIter->nextNdx != 0xFFFF) nmsAry[pIter->nextNdx].prevNdx = pIter->prevNdx;
					pIter->prevNdx = 0xFFFF; 
					nmsItemCnt--;
				}
				nextNdx = pIter->nextNdx;
			}
		}
		if ((NmsCCHW_t*)pCfdCur != nmsAry + i) {
			((NmsCCHW_Min_t*)(nmsAry + i))[0] = pCfdCur[0];
		}
#endif

	}
AfterNMS:
	_Nms_Output(nmsAry, i, pOut);
	return i;
}
int RS_SSDNms_I8(OpData* op_data, uint32_t isPerClass, LocI8Tensor_t* pLocTns, uint32_t isEncoded, locfp_t* pABs, uint16_t imgHW[2],
	ClsI8Tensor_t* pClsTns, NmsCCHW_t nmsAry[], uint32_t nmsAryCapacity, NmsOutput_F32_t* pOut,
	float confThreshold, uint32_t maxDetCnt, uint32_t* pDroppedCnt)
{
	uint32_t i, j;
	// decode
	uint32_t inCnt = pLocTns->itemCnt;
	float s;
	int z;
	int32_t confThreshold_x1W = int32_t(confThreshold * 10000.0f);
	loci8_t* pLocI8 = pLocTns->pAry;
	s = pLocTns->s, z = pLocTns->z;
	int8_t* pClsData = pClsTns->pAry;
	NmsCCHW_t* p = nmsAry;
	uint32_t nmsAryUsedCnt = 0;
#if NMS_PER_CLASS_MAX_CLASS > 0
	uint16_t prevNdc[NMS_PER_CLASS_MAX_CLASS];
	memset(prevNdc, 0xFFFF, sizeof(prevNdc));
#endif
	for (i = 0; i < inCnt; i++, pLocI8++)
	{
		float enc, dec;
		uint32_t maxAt = 0;
		int32_t maxVal = (int32_t)(1 << 31);
		{
			for (j = 0; j < pClsTns->clsCnt; j++, pClsData++) {
				if (maxVal < *pClsData) {
					maxVal = *pClsData;
					maxAt = j;
				}
			}
		}
		if (maxAt == pClsTns->bgClsNdx) {
			continue;
		}
		int32_t conf_x1W = (int32_t)((maxVal - z) * s * 10000.0f);
		if (conf_x1W < confThreshold_x1W) {
			continue;
		}
		p->cls = maxAt;
		p->cfd_x1W = conf_x1W;
		if (isEncoded) {
			float *scales = (float*)(&(op_data->scale_values));
			for (j = 2; j < 4; j++) {
				enc = (pLocI8[0][j] - z) * s;
				dec = expf(enc / scales[j]) * pABs[i][j];
				p->cchwX2[j] = (int16_t)(dec * imgHW[j - 2] * 2 );
			}
			for (j = 0; j < 2; j++) {
				enc = (pLocI8[0][j] - z) * s;
				dec = enc / scales[j] * pABs[i][j + 2] + pABs[i][j]; 
				p->cchwX2[j] = (int16_t)(dec * imgHW[j] * 2 );
			}
		}
		else {
			for (j = 0; j < 4; j++) {
				dec = (pLocI8[0][j] - z) * s;
				p->cchwX2[j] = (int16_t)(dec * imgHW[j & 1] * 2 );
			}
		}
#if NMS_PER_CLASS_MAX_CLASS > 0
		uint32_t prevNdx = prevNdc[maxAt];
		if (prevNdc[maxAt] != 0xFFFF) {
			NmsCCHW_t* pPrev = nmsAry + prevNdc[maxAt];
			pPrev->nextNdx = nmsAryUsedCnt;
			p->prevNdx = prevNdc[maxAt];
		}
		else {
			p->prevNdx = nmsAryUsedCnt;
		}
		prevNdc[maxAt] = nmsAryUsedCnt;
		p->nextNdx = 0xFFFF;
#endif
		p++;
		if (++nmsAryUsedCnt >= nmsAryCapacity)
		{
			i++;
			break;
		}
	}
	if (pDroppedCnt) pDroppedCnt[0] = inCnt - i;
	return _Nms_DoNms(isPerClass, inCnt, nmsAry, nmsAryUsedCnt, pOut, confThreshold_x1W, 
					 (uint32_t)(op_data->intersection_over_union_threshold * 10000.0f + 0.5f), maxDetCnt);
	return i;
}

int RS_SSDNms_F32(OpData* op_data, uint32_t isPerClass, LocF32Tensor_t* pLocTns, uint32_t isEncoded, locfp_t* pABs, uint16_t imgHW[2],
	ClsF32Tensor_t* pClsTns, NmsCCHW_t nmsAry[], uint32_t nmsAryCapacity, NmsOutput_F32_t* pOut,
	uint32_t* pDroppedCnt)
{
	uint32_t i, j;
	// decode
	uint32_t inCnt = pLocTns->itemCnt;

	int32_t confThreshold_x1W = int32_t(op_data->non_max_suppression_score_threshold * 10000.0f);
	locfp_t* pLocF32 = pLocTns->pAry;
	float* pClsData = pClsTns->pAry;
	NmsCCHW_t* p = nmsAry;
	uint32_t nmsAryUsedCnt = 0;
#if NMS_PER_CLASS_MAX_CLASS > 0
	uint16_t prevNdc[NMS_PER_CLASS_MAX_CLASS];
	memset(prevNdc, 0xFFFF, sizeof(prevNdc));
#endif
	for (i = 0; i < inCnt; i++, pLocF32++)
	{
		float enc, dec;
		uint32_t maxAt = 0;
		float maxVal = -1E8f;
		{
			for (j = 0; j < pClsTns->clsCnt; j++, pClsData++) {
				if (maxVal < *pClsData) {
					maxVal = *pClsData;
					maxAt = j;
				}
			}
		}
		if (maxAt == pClsTns->bgClsNdx) {
			continue;
		}
		int32_t conf_x1W = (int32_t)(maxVal * 10000.0f);
		if (conf_x1W < confThreshold_x1W) {
			continue;
		}
		p->cls = maxAt;
		p->cfd_x1W = conf_x1W;
		if (isEncoded) {
			float *scales = (float*)(&(op_data->scale_values));
			for (j = 2; j < 4; j++) {
				enc = pLocF32[0][j];  
				dec = expf(enc / scales[j]) * pABs[i][j];
				p->cchwX2[j] = (int16_t)(dec * imgHW[j - 2] * 2 );
			}
			for (j = 0; j < 2; j++) {
				enc = pLocF32[0][j] / scales[j];
				dec = enc * pABs[i][j + 2] + pABs[i][j]; 
				p->cchwX2[j] = (int16_t)(dec * imgHW[j] * 2 );
			}
		}
		else {
			for (j = 0; j < 4; j++) {
				dec = pLocF32[0][j];
				p->cchwX2[j] = (int16_t)(dec * imgHW[j & 2] * 2 );
			}
		}
#if NMS_PER_CLASS_MAX_CLASS > 0
		uint32_t prevNdx = prevNdc[maxAt];
		if (prevNdc[maxAt] != 0xFFFF) {
			NmsCCHW_t* pPrev = nmsAry + prevNdc[maxAt];
			pPrev->nextNdx = nmsAryUsedCnt;
			p->prevNdx = prevNdc[maxAt];
		}
		else {
			p->prevNdx = nmsAryUsedCnt;
		}
		prevNdc[maxAt] = nmsAryUsedCnt;
		p->nextNdx = 0xFFFF;
#endif
		p++;
		if (++nmsAryUsedCnt >= nmsAryCapacity)
		{
			i++;
			break;
		}
	}
	if (pDroppedCnt) pDroppedCnt[0] = inCnt - i;
	return _Nms_DoNms(isPerClass, inCnt, nmsAry, nmsAryUsedCnt, pOut, confThreshold_x1W, 
					 (uint32_t)(op_data->intersection_over_union_threshold * 10000.0f + 0.5f), op_data->max_detections);
	return i;
}

TfLiteStatus NonMaxSuppressionMultiClass(TfLiteContext* context,
                                         TfLiteNode* node, OpData* op_data) {
  // Get the input tensors
  const TfLiteEvalTensor* input_box_encodings =
      tflite::micro::GetEvalInput(context, node, kInputTensorBoxEncodings);
  const TfLiteEvalTensor* input_class_predictions =
      tflite::micro::GetEvalInput(context, node, kInputTensorClassPredictions);
  const int num_boxes = input_box_encodings->dims->data[1];
  const int num_classes = op_data->num_classes;
  
  TF_LITE_ENSURE_EQ(context, input_box_encodings->dims->data[0], kBatchSize);
  TF_LITE_ENSURE(context, input_box_encodings->dims->data[2] >= kNumCoordBox);
  
  const TfLiteEvalTensor* input_anchors =
      tflite::micro::GetEvalInput(context, node, kInputTensorAnchors);

  TF_LITE_ENSURE_EQ(context, input_class_predictions->dims->data[0],
                    kBatchSize);
  TF_LITE_ENSURE_EQ(context, input_class_predictions->dims->data[1], num_boxes);
  const int num_classes_with_background =
      input_class_predictions->dims->data[2];

  TF_LITE_ENSURE(context, (num_classes_with_background - num_classes <= 1));
  TF_LITE_ENSURE(context, (num_classes_with_background >= num_classes));

  // get output
  TfLiteEvalTensor* detection_boxes =
      tflite::micro::GetEvalOutput(context, node, kOutputTensorDetectionBoxes);
  TfLiteEvalTensor* detection_classes = tflite::micro::GetEvalOutput(
      context, node, kOutputTensorDetectionClasses);
  TfLiteEvalTensor* detection_scores =
      tflite::micro::GetEvalOutput(context, node, kOutputTensorDetectionScores);
  TfLiteEvalTensor* num_detections =
      tflite::micro::GetEvalOutput(context, node, kOutputTensorNumDetections);  

  const void *scores, *box_encoding, *anchors;
  scores = tflite::micro::GetTensorData<void>(input_class_predictions);
  box_encoding = tflite::micro::GetTensorData<void>(input_box_encodings);
  anchors = tflite::micro::GetTensorData<void>(input_anchors);
  
  LocF32Tensor_t pLocTns = {
	  .itemCnt = (uint32_t)num_boxes,
	  .pAry = (locfp_t*)box_encoding,
  };
  
  locfp_t* pABs = (locfp_t*)anchors;
  
  uint16_t imgHW[2] = {Default_ImgH, Default_ImgW};
  
  ClsF32Tensor_t pClsTns = {
	.itemCnt = (uint32_t)num_boxes,
	.pAry = (float*)scores,
	.clsCnt = (uint32_t)num_classes_with_background,
	.bgClsNdx = 0,
  };
  
  NmsCCHW_t* nmsAry = reinterpret_cast<NmsCCHW_t*>(
      context->GetScratchBuffer(context, op_data->nmsAry_idx));
  
  NmsOutput_F32_t pOut = {
	.pNumDetections = tflite::micro::GetTensorData<float>(num_detections),
	.pYXYXs = tflite::micro::GetTensorData<YXYX_F32_t>(detection_boxes),
	.pClss = tflite::micro::GetTensorData<float>(detection_classes),
	.pCfds = tflite::micro::GetTensorData<float>(detection_scores),	  
  };

  switch (input_class_predictions->type) {
    case kTfLiteInt8: 
      
		break;
    case kTfLiteFloat32:
		RS_SSDNms_F32(op_data, isPerClass, &pLocTns, 1, pABs, imgHW,
			&pClsTns, nmsAry, nmsAryCap, &pOut,
			nullptr);   
      break;
    default:
      // Unsupported type.
      return kTfLiteError;
  }

  return kTfLiteOk;
}
										 
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
  TF_LITE_ENSURE(context, (kBatchSize == 1));
  auto* op_data = static_cast<OpData*>(node->user_data);

  TF_LITE_ENSURE_STATUS(NonMaxSuppressionMultiClass(context, node, op_data));

  return kTfLiteOk;
}
}  // namespace

TfLiteRegistration* Register_DETECTION_POSTPROCESS() {
  static TfLiteRegistration r = {/*init=*/Init,
                                 /*free=*/Free,
                                 /*prepare=*/Prepare,
                                 /*invoke=*/Eval,
                                 /*profiling_string=*/nullptr,
                                 /*builtin_code=*/0,
                                 /*custom_name=*/nullptr,
                                 /*version=*/0};
  return &r;
}

}  // namespace tflite
