#include "task/facenet.h"
#include "task/rknn_box_priors.h"

facenetCustom::facenetCustom()
{
    engine_ = CreateRKNNEngine();
    input_tensor_.data = nullptr;
    want_float_ = false;
    ready_ = false;
}

facenetCustom::~facenetCustom()
{
    // release input tensor and output tensor
    NN_LOG_DEBUG("release input tensor");
    if (input_tensor_.data != nullptr)
    {
        free(input_tensor_.data);
        input_tensor_.data = nullptr;
    }
    NN_LOG_DEBUG("release output tensor");
    for (auto &tensor : output_tensors_)
    {
        if (tensor.data != nullptr)
        {
            free(tensor.data);
            tensor.data = nullptr;
        }
    }
}

nn_error_e facenetCustom::LoadModel(const char *model_path)
{
    auto ret = engine_->LoadModelFile(model_path);
    if (ret != NN_SUCCESS)
    {
        NN_LOG_ERROR("facenet load model file failed");
        return ret;
    }
    // get input tensor
    auto input_shapes = engine_->GetInputShapes();
    // NN_LOG_INFO("facenet input tensor number is %ld", input_shapes.size());

    // check number of input and n_dims
    if (input_shapes.size() != 1)
    {
        NN_LOG_ERROR("facenet input tensor number is not 1, but %ld", input_shapes.size());
        return NN_RKNN_INPUT_ATTR_ERROR;
    }
    nn_tensor_attr_to_cvimg_input_data(input_shapes[0], input_tensor_);
    input_tensor_.data = malloc(input_tensor_.attr.size);

    auto output_shapes = engine_->GetOutputShapes();
 
    if (output_shapes[0].type == NN_TENSOR_FLOAT16)
    {
        want_float_ = true;
        NN_LOG_WARNING("facenet output tensor type is float16, want type set to float32");
    }
    for (int i = 0; i < output_shapes.size(); i++)
    {
        tensor_data_s tensor;
        tensor.attr.n_elems = output_shapes[i].n_elems;
        tensor.attr.n_dims = output_shapes[i].n_dims;
        for (int j = 0; j < output_shapes[i].n_dims; j++)
        {
            tensor.attr.dims[j] = output_shapes[i].dims[j];
        }
        // output tensor needs to be float32
        tensor.attr.type = want_float_ ? NN_TENSOR_FLOAT : output_shapes[i].type;
        tensor.attr.index = 0;
        tensor.attr.size = output_shapes[i].n_elems * nn_tensor_type_to_size(tensor.attr.type);
        tensor.data = malloc(tensor.attr.size);
        output_tensors_.push_back(tensor);
        out_zps_.push_back(output_shapes[i].zp);
        out_scales_.push_back(output_shapes[i].scale);
    }

    ready_ = true;
    return NN_SUCCESS;
}

nn_error_e facenetCustom::Preprocess(const cv::Mat &img, cv::Mat &image_letterbox)
{
    //比例
    float wh_ratio = (float)input_tensor_.attr.dims[2] / (float)input_tensor_.attr.dims[1];
    img_width = img.cols;
    img_height = img.rows;
    //BGR2RGB
    // printf("img size : %d, %d \n", img.rows, img.cols);
    letterbox(img, image_letterbox, letter_box, input_tensor_.attr.dims[2], input_tensor_.attr.dims[1]);
    // printf("image_letterbox size : %d, %d \n", image_letterbox.rows, image_letterbox.cols);
    cvimg2tensor(image_letterbox, input_tensor_.attr.dims[2], input_tensor_.attr.dims[1], input_tensor_);
    return NN_SUCCESS;
}


nn_error_e facenetCustom::Inference()
{
    std::vector<tensor_data_s> inputs;
    inputs.push_back(input_tensor_);
    return engine_->Run(inputs, output_tensors_, want_float_);
}

nn_error_e facenetCustom::Run(const cv::Mat &img, float* res_vector)
{
    cv::Mat img_letterbox;
    Preprocess(img, img_letterbox);
    Inference();
    Postprocess(res_vector);
    return NN_SUCCESS;
}

nn_error_e facenetCustom::Postprocess(float* res_vector)
{
    const int vector_size = 128;
    // 复制数据
    std::memcpy(res_vector, output_tensors_[0].data, vector_size * sizeof(float));
    l2_normalize(res_vector);
    return NN_SUCCESS;
}

nn_error_e facenetCustom::SaveEmbeddings(float* res_vector, std::string outPath)
{
    std::ofstream outfile(outPath, std::ofstream::out);
    for (int i = 0; i < 128; ++i)
    {
        outfile << res_vector[i] << "\n";
    }
    outfile.close();
}



static int clamp(int x, int min, int max) {
    if (x > max) return max;
    if (x < min) return min;
    return x;
}

void l2_normalize(float* input)
{
	float sum = 0;
	for (int i = 0; i < 128; ++i)
	{
		sum = sum + input[i] * input[i];
	}
	sum = sqrt(sum);
	for (int i = 0; i < 128; ++i)
	{
		input[i] = input[i] / sum;
	}
}