﻿#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include "rvm_humanseg_ort.h"


using namespace cv;
using namespace std;



static uint16_t float32_to_float16(float value);
static float float16_to_float32(uint16_t value);


RVMHumanSegOrt::RVMHumanSegOrt(const char *model_name, int model_width, int model_height, int is_video,int enable_debug):HumanSegBase(model_name, model_width, model_height, is_video,enable_debug)
{
    int i;
    env_ = Env(ORT_LOGGING_LEVEL_ERROR, "RVMHumanSegORT");

	sessionOptions_ = SessionOptions();


	std::wstring widestr = std::wstring(model_name_.begin(), model_name_.end());
	//OrtStatus* status = OrtSessionOptionsAppendExecutionProvider_CUDA(sessionOptions_, 0);
    sessionOptions_.SetIntraOpNumThreads(2);
    sessionOptions_.SetInterOpNumThreads(1);	
	sessionOptions_.SetGraphOptimizationLevel(ORT_ENABLE_ALL);
	ort_session_ = new Session(env_, widestr.c_str(), sessionOptions_);

	size_t numInputNodes = ort_session_->GetInputCount();
	size_t numOutputNodes = ort_session_->GetOutputCount();
    num_inputs_ = (int32_t)numInputNodes;
    num_outputs_ =(int32_t)numOutputNodes;
    
	AllocatorWithDefaultOptions allocator;
	for ( i = 0; i < numInputNodes; i++)
	{
		input_names_.push_back(ort_session_->GetInputName(i, allocator));
		Ort::TypeInfo input_type_info = ort_session_->GetInputTypeInfo(i);
		auto input_tensor_info = input_type_info.GetTensorTypeAndShapeInfo();
		auto input_dims = input_tensor_info.GetShape();
		input_node_dims_.push_back(input_dims);
	}
	for ( i = 0; i < numOutputNodes; i++)
	{
		output_names_.push_back(ort_session_->GetOutputName(i, allocator));
		Ort::TypeInfo output_type_info = ort_session_->GetOutputTypeInfo(i);
		auto output_tensor_info = output_type_info.GetTensorTypeAndShapeInfo();
		auto output_dims = output_tensor_info.GetShape();
		output_node_dims_.push_back(output_dims);
	}

    src_net_in_ = new float[model_height_ * model_width_ * 3];
    src_net_in_fp16_ = new uint16_t[model_height_ * model_width_ * 3];

    i_fp16_ = 0;
    downsample_ratio_ = 0.25f;

    if (i_fp16_){
        dynamic_r1i_value_handler_fp16_.push_back(0);
        dynamic_r2i_value_handler_fp16_.push_back(0);
        dynamic_r3i_value_handler_fp16_.push_back(0);
        dynamic_r4i_value_handler_fp16_.push_back(0);
    }else {
        dynamic_r1i_value_handler_.push_back(0.0f);
        dynamic_r2i_value_handler_.push_back(0.0f);
        dynamic_r3i_value_handler_.push_back(0.0f);
        dynamic_r4i_value_handler_.push_back(0.0f);
        
    }       

    dynamic_dsr_value_handler_ =  { 0.25f };

    vector<vector<int64_t>> dynamic_input_node_dims_tmp = {
		{1, 3, model_height_, model_width_}, // src  (b=1,c,h,w)
		{1, 1, 1,    1}, // r1i
		{1, 1, 1,    1}, // r2i
		{1, 1, 1,    1}, // r3i
		{1, 1, 1,    1}, // r4i
		{1} // downsample_ratio dsr
	};

    dynamic_input_node_dims_ = dynamic_input_node_dims_tmp;


    
 }

RVMHumanSegOrt::~RVMHumanSegOrt()
{
    delete ort_session_;
    delete src_net_in_;
    delete src_net_in_fp16_;

    output_names_.clear();
    input_names_.clear();
    input_node_dims_.clear();
    output_node_dims_.clear();

}


int64_t RVMHumanSegOrt::value_size_of(const std::vector<int64_t> &dims)
{
	if (dims.empty()) return 0;
	int64_t value_size = 1;
	for (const auto &size : dims) value_size *= size;
	return value_size;
}


void RVMHumanSegOrt::transform( std::vector<Ort::Value> & input_tensors)
{
	// assume that rxi's dims and value_handler was updated by last step in a while loop.
    vector<int64_t> &src_dims = dynamic_input_node_dims_.at(0);	
	std::vector<int64_t> &r1i_dims = dynamic_input_node_dims_.at(1); // (1,?,?h,?w)
	std::vector<int64_t> &r2i_dims = dynamic_input_node_dims_.at(2); // (1,?,?h,?w)
	std::vector<int64_t> &r3i_dims = dynamic_input_node_dims_.at(3); // (1,?,?h,?w)
	std::vector<int64_t> &r4i_dims = dynamic_input_node_dims_.at(4); // (1,?,?h,?w)
	std::vector<int64_t> &dsr_dims = dynamic_input_node_dims_.at(5); // (1)
	
	//int64_t src_value_size = this->value_size_of(src_dims); // (1*3*h*w)
	int64_t r1i_value_size = this->value_size_of(r1i_dims); // (1*?*?h*?w)
	int64_t r2i_value_size = this->value_size_of(r2i_dims); // (1*?*?h*?w)
	int64_t r3i_value_size = this->value_size_of(r3i_dims); // (1*?*?h*?w)
	int64_t r4i_value_size = this->value_size_of(r4i_dims); // (1*?*?h*?w)
	int64_t dsr_value_size = this->value_size_of(dsr_dims); // 1

	src_dims.at(2) = model_height_;
	src_dims.at(3) = model_width_;
    
	auto allocator_info = MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU);

    if (i_fp16_){
        input_tensors.push_back(Value::CreateTensor(allocator_info, (void *)src_net_in_fp16_, model_height_ * model_width_ * 3 * sizeof(uint16_t), src_dims.data(), src_dims.size(), ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT16));
        input_tensors.push_back(Value::CreateTensor(allocator_info, (void *)dynamic_r1i_value_handler_fp16_.data(), r1i_value_size  * sizeof(uint16_t), r1i_dims.data(), r1i_dims.size(), ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT16));
        input_tensors.push_back(Value::CreateTensor(allocator_info, (void *)dynamic_r2i_value_handler_fp16_.data(), r2i_value_size  * sizeof(uint16_t), r2i_dims.data(), r2i_dims.size(), ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT16));
        input_tensors.push_back(Value::CreateTensor(allocator_info, (void *)dynamic_r3i_value_handler_fp16_.data(), r3i_value_size  * sizeof(uint16_t), r3i_dims.data(), r3i_dims.size(), ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT16));
        input_tensors.push_back(Value::CreateTensor(allocator_info, (void *)dynamic_r4i_value_handler_fp16_.data(), r4i_value_size  * sizeof(uint16_t), r4i_dims.data(), r4i_dims.size(), ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT16));
    }else {
        input_tensors.push_back(Value::CreateTensor<float>(allocator_info, src_net_in_, model_height_ * model_width_ * 3 , src_dims.data(), src_dims.size()));
    	input_tensors.push_back(Value::CreateTensor<float>(allocator_info, dynamic_r1i_value_handler_.data(), r1i_value_size, r1i_dims.data(), r1i_dims.size()));
    	input_tensors.push_back(Value::CreateTensor<float>(allocator_info, dynamic_r2i_value_handler_.data(), r2i_value_size, r2i_dims.data(), r2i_dims.size()));
    	input_tensors.push_back(Value::CreateTensor<float>(allocator_info, dynamic_r3i_value_handler_.data(), r3i_value_size, r3i_dims.data(), r3i_dims.size()));
    	input_tensors.push_back(Value::CreateTensor<float>(allocator_info, dynamic_r4i_value_handler_.data(), r4i_value_size, r4i_dims.data(), r4i_dims.size()));
    }
	
	input_tensors.push_back(Value::CreateTensor<float>(allocator_info, dynamic_dsr_value_handler_.data(), dsr_value_size, dsr_dims.data(), dsr_dims.size()));
}


void RVMHumanSegOrt::update_context(std::vector<Ort::Value> &output_tensors)
{
	// 0. update context for video matting.
	Ort::Value &r1o = output_tensors.at(2); // fgr (1,?,?h,?w)
	Ort::Value &r2o = output_tensors.at(3); // pha (1,?,?h,?w)
	Ort::Value &r3o = output_tensors.at(4); // pha (1,?,?h,?w)
	Ort::Value &r4o = output_tensors.at(5); // pha (1,?,?h,?w)
	auto r1o_dims = r1o.GetTypeInfo().GetTensorTypeAndShapeInfo().GetShape();
	auto r2o_dims = r2o.GetTypeInfo().GetTensorTypeAndShapeInfo().GetShape();
	auto r3o_dims = r3o.GetTypeInfo().GetTensorTypeAndShapeInfo().GetShape();
	auto r4o_dims = r4o.GetTypeInfo().GetTensorTypeAndShapeInfo().GetShape();
	// 1. update rxi's shape according to last rxo
	dynamic_input_node_dims_.at(1) = r1o_dims;
	dynamic_input_node_dims_.at(2) = r2o_dims;
	dynamic_input_node_dims_.at(3) = r3o_dims;
	dynamic_input_node_dims_.at(4) = r4o_dims;
	// 2. update rxi's value according to last rxo
	int64_t new_r1i_value_size = this->value_size_of(r1o_dims); // (1*?*?h*?w)
	int64_t new_r2i_value_size = this->value_size_of(r2o_dims); // (1*?*?h*?w)
	int64_t new_r3i_value_size = this->value_size_of(r3o_dims); // (1*?*?h*?w)
	int64_t new_r4i_value_size = this->value_size_of(r4o_dims); // (1*?*?h*?w)
	

    if (i_fp16_){
        uint16_t *new_r1i_value_ptr = r1o.GetTensorMutableData<uint16_t>();
        uint16_t *new_r2i_value_ptr = r2o.GetTensorMutableData<uint16_t>();
        uint16_t *new_r3i_value_ptr = r3o.GetTensorMutableData<uint16_t>();
        uint16_t *new_r4i_value_ptr = r4o.GetTensorMutableData<uint16_t>();  
        
        dynamic_r1i_value_handler_fp16_.resize(new_r1i_value_size);
        dynamic_r2i_value_handler_fp16_.resize(new_r2i_value_size);
        dynamic_r3i_value_handler_fp16_.resize(new_r3i_value_size);
        dynamic_r4i_value_handler_fp16_.resize(new_r4i_value_size);
        
      
        std::memcpy(dynamic_r1i_value_handler_fp16_.data(), new_r1i_value_ptr, new_r1i_value_size * sizeof(uint16_t));
        std::memcpy(dynamic_r2i_value_handler_fp16_.data(), new_r2i_value_ptr, new_r2i_value_size * sizeof(uint16_t));
        std::memcpy(dynamic_r3i_value_handler_fp16_.data(), new_r3i_value_ptr, new_r3i_value_size * sizeof(uint16_t));
        std::memcpy(dynamic_r4i_value_handler_fp16_.data(), new_r4i_value_ptr, new_r4i_value_size * sizeof(uint16_t));

    }else {
    	float *new_r1i_value_ptr = r1o.GetTensorMutableData<float>();
    	float *new_r2i_value_ptr = r2o.GetTensorMutableData<float>();
    	float *new_r3i_value_ptr = r3o.GetTensorMutableData<float>();
    	float *new_r4i_value_ptr = r4o.GetTensorMutableData<float>();  
        
        dynamic_r1i_value_handler_.resize(new_r1i_value_size);
        dynamic_r2i_value_handler_.resize(new_r2i_value_size);
        dynamic_r3i_value_handler_.resize(new_r3i_value_size);
        dynamic_r4i_value_handler_.resize(new_r4i_value_size);
        
      
    	std::memcpy(dynamic_r1i_value_handler_.data(), new_r1i_value_ptr, new_r1i_value_size * sizeof(float));
    	std::memcpy(dynamic_r2i_value_handler_.data(), new_r2i_value_ptr, new_r2i_value_size * sizeof(float));
    	std::memcpy(dynamic_r3i_value_handler_.data(), new_r3i_value_ptr, new_r3i_value_size * sizeof(float));
    	std::memcpy(dynamic_r4i_value_handler_.data(), new_r4i_value_ptr, new_r4i_value_size * sizeof(float));
    }
}


int RVMHumanSegOrt::detect_rgb24_do(unsigned char *buf_rgb, int src_pitch, unsigned char *alpha, int alpha_pitch)
{
    //1 input  rgb24 -> float nchw
    vector <cv::Mat> split_rgb;
    
    cv::Mat mat_rgb(model_height_, model_width_, CV_8UC3, buf_rgb, src_pitch);

    cv::Mat mat_f32(model_height_, model_width_, CV_32FC3);
    
    cv::Mat mat_r(model_height_, model_width_, CV_32FC1, src_net_in_);
    cv::Mat mat_g(model_height_, model_width_, CV_32FC1, src_net_in_ + model_width_ * model_height_);
    cv::Mat mat_b(model_height_, model_width_, CV_32FC1, src_net_in_ + 2 * model_width_ * model_height_);
      
    mat_rgb.convertTo(mat_f32, CV_32F, 1.0 / 255, 0);
    split_rgb.push_back(mat_r);
    split_rgb.push_back(mat_g);
    split_rgb.push_back(mat_b);        

    cv::split(mat_f32, split_rgb);

    if (i_fp16_){
        int len = model_height_ * model_width_ * 3;
        int i;
        
        for(i = 0 ; i < len ; ++i){
            src_net_in_fp16_[i] = float32_to_float16(src_net_in_[i]);
        }
    }
            
    //2 run detector 
    // generate input tensors 
    std::vector<Ort::Value>  input_tensors;
    transform(input_tensors);
    
    // run output 
    auto output_tensors = ort_session_->Run(
        Ort::RunOptions{ nullptr }, input_names_.data(),
        input_tensors.data(), num_inputs_, output_names_.data(),
        num_outputs_
    );
    
    // get output 
    Ort::Value &fgr = output_tensors.at(0); // fgr (1,3,h,w) 0.~1.
    Ort::Value &pha = output_tensors.at(1); // pha (1,1,h,w) 0.~1.
    auto fgr_dims = fgr.GetTypeInfo().GetTensorTypeAndShapeInfo().GetShape();
    auto pha_dims = pha.GetTypeInfo().GetTensorTypeAndShapeInfo().GetShape();

    if (i_fp16_){
        uint16_t * fgr_ptr = fgr.GetTensorMutableData<uint16_t>();
        uint16_t *pha_ptr = pha.GetTensorMutableData<uint16_t>();

        (void)fgr_ptr;

        {
            int col, row;
            uint8_t *p_dst;
            uint16_t *p_src;
            float v;
            int32_t pix;
            for(row = 0 ; row < model_height_ ; ++row){

                p_src = pha_ptr + row *  model_width_;
                p_dst = alpha + row * alpha_pitch;
        
                for(col = 0 ; col < model_width_ ; ++col){
                    v = float16_to_float32(p_src[col]);
                    pix = (int32_t)(v * 255);
                    p_dst[col] = CLIP_UIN8(pix);
                }
            }
        }   

    }else {
        float *fgr_ptr = fgr.GetTensorMutableData<float>();
        float *pha_ptr = pha.GetTensorMutableData<float>();

        (void)fgr_ptr;

        {
        
            int col, row;
            uint8_t *p_dst;
            float *p_src;
            int32_t pix;
            for(row = 0 ; row < model_height_ ; ++row){
                p_src = pha_ptr + row *  model_width_;
                p_dst = alpha + row * alpha_pitch;
        
                for(col = 0 ; col < model_width_ ; ++col){
                    pix = (int32_t)(p_src[col] * 255);
                    p_dst[col] = CLIP_UIN8(pix);
                }
            }
        }   
    }

    // update context 
    update_context(output_tensors);


    return 0;
}



static uint16_t float32_to_float16(float value)
{
    // 1 : 8 : 23
    union
    {
        unsigned int u;
        float f;
    } tmp;

    tmp.f = value;

    // 1 : 8 : 23
    unsigned short sign = (tmp.u & 0x80000000) >> 31;
    unsigned short exponent = (tmp.u & 0x7F800000) >> 23;
    unsigned int significand = tmp.u & 0x7FFFFF;

    //     NCNN_LOGE("%d %d %d", sign, exponent, significand);

    // 1 : 5 : 10
    unsigned short fp16;
    if (exponent == 0)
    {
        // zero or denormal, always underflow
        fp16 = (sign << 15) | (0x00 << 10) | 0x00;
    }
    else if (exponent == 0xFF)
    {
        // infinity or NaN
        fp16 = (sign << 15) | (0x1F << 10) | (significand ? 0x200 : 0x00);
    }
    else
    {
        // normalized
        short newexp = exponent + (-127 + 15);
        if (newexp >= 31)
        {
            // overflow, return infinity
            fp16 = (sign << 15) | (0x1F << 10) | 0x00;
        }
        else if (newexp <= 0)
        {
            // Some normal fp32 cannot be expressed as normal fp16
            fp16 = (sign << 15) | (0x00 << 10) | 0x00;
        }
        else
        {
            // normal fp16
            fp16 =(uint16_t)( (sign << 15) | (newexp << 10) | (significand >> 13));
        }
    }

    return fp16;
}


static float float16_to_float32(uint16_t value)
{
    // 1 : 5 : 10
    unsigned short sign = (value & 0x8000) >> 15;
    unsigned short exponent = (value & 0x7c00) >> 10;
    unsigned short significand = value & 0x03FF;

    //     NCNN_LOGE("%d %d %d", sign, exponent, significand);

    // 1 : 8 : 23
    union
    {
        unsigned int u;
        float f;
    } tmp;
    if (exponent == 0)
    {
        if (significand == 0)
        {
            // zero
            tmp.u = (sign << 31);
        }
        else
        {
            // denormal
            exponent = 0;
            // find non-zero bit
            while ((significand & 0x200) == 0)
            {
                significand <<= 1;
                exponent++;
            }
            significand <<= 1;
            significand &= 0x3FF;
            tmp.u = (sign << 31) | ((-exponent + (-15 + 127)) << 23) | (significand << 13);
        }
    }
    else if (exponent == 0x1F)
    {
        // infinity or NaN
        tmp.u = (sign << 31) | (0xFF << 23) | (significand << 13);
    }
    else
    {
        // normalized
        tmp.u = (sign << 31) | ((exponent + (-15 + 127)) << 23) | (significand << 13);
    }

    return tmp.f;
}

