﻿#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include "mbv3_humanseg_tflite.h"

using namespace cv;
using namespace std;


MBV3HumanSegTFLite::~MBV3HumanSegTFLite()
{
    if (model_){
        TfLiteModelDelete(model_);
    }
    
    delete src_data_net_in_;
    delete data_net_out_;
}


TfLiteTensor * MBV3HumanSegTFLite::getOutputTensorByName(TfLiteInterpreter * interpreter, const char * name)
{
	int count = TfLiteInterpreterGetOutputTensorCount(interpreter);
	for (int i = 0; i < count; ++i) {
		TfLiteTensor* ts = (TfLiteTensor*)TfLiteInterpreterGetOutputTensor(interpreter, i);
		if (!strcmp(ts->name, name)) {
			return ts;
		}
	}
	return nullptr;
}
TfLiteTensor * MBV3HumanSegTFLite::getInputTensorByName(TfLiteInterpreter * interpreter, const char * name)
{
	int count = TfLiteInterpreterGetInputTensorCount(interpreter);
	for (int i = 0; i < count; ++i) {
		TfLiteTensor* ts = TfLiteInterpreterGetInputTensor(interpreter, i);
		if (!strcmp(ts->name, name)) {
			return ts;
		}
	}
	return nullptr;
}


void MBV3HumanSegTFLite::initModel(string path )
{
	  
	model_ = TfLiteModelCreateFromFile(path.c_str()); 
	TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
	interpreter_ = TfLiteInterpreterCreate(model_, options); 
	if (interpreter_ == nullptr) {
		printf("Failed to create interpreter");
		cout << (path) << endl;
		return  ;
	}
	// Allocate tensor buffers.
	if (TfLiteInterpreterAllocateTensors(interpreter_) != kTfLiteOk) {
		printf("Failed to allocate tensors!");
		return  ;
	} 

	input_tf_ = getInputTensorByName(interpreter_, "input_1"); 
	output_tf_ = getOutputTensorByName(interpreter_, "segment"); 
  
}

MBV3HumanSegTFLite::MBV3HumanSegTFLite(const char *model_name, int model_width, int model_height, int is_video, int enable_debug):HumanSegBase(model_name, model_width, model_height, is_video,enable_debug)
{
    initModel(model_name_);
    src_data_net_in_ = new float[model_width_ * model_height_ * 3];
    data_net_out_ = new float[model_width_ * model_height_ * 2];
 }



int MBV3HumanSegTFLite::detect_rgb24_do(unsigned char *buf_rgb, int src_pitch, unsigned char *alpha, int alpha_pitch)
{
    //1 input  rgb24 -> float nhwc
    vector <cv::Mat> split_rgb;

    cv::Mat mat_rgb(model_height_, model_width_, CV_8UC3, buf_rgb, src_pitch);
    cv::Mat mat_f32(model_height_, model_width_, CV_32FC3, src_data_net_in_);

    mat_rgb.convertTo(mat_f32, CV_32F, 1.0 / 255, 0);

 
        
    //2 run detector 

    TfLiteTensorCopyFromBuffer(input_tf_, src_data_net_in_, model_width_ * model_height_ * 3 *sizeof(float));
    /// execute 
    TfLiteInterpreterInvoke(interpreter_);
    /// copy result out 

    TfLiteTensorCopyToBuffer(output_tf_, data_net_out_, model_width_ * model_height_ * 2 * sizeof(float));

    // output alpha 
    {
        int col, row;
        uint8_t *p_dst;
        float *p_src;
        int32_t pix;
        for(row = 0 ; row < model_height_ ; ++row){
            //  for mobilenet3.tflite 256x144
            p_src = data_net_out_ + row *  model_width_ * 2;
            p_dst = p_dst = alpha + row * alpha_pitch;;

            for(col = 0 ; col < model_width_ ; ++col){
                pix = (int32_t)(p_src[col * 2 + 1] * 255);
                p_dst[col] =  CLIP_UIN8(pix);

            }
        }
    }

    return 0;
}


