//
// Created by liu on 2021/3/16.
//
#include "zitai.h"
detector::detector(detector_input input) {
    Ort::Env env(ORT_LOGGING_LEVEL_WARNING, "test");
    Ort::SessionOptions session_options;
    session_options.SetIntraOpNumThreads(10);
    session_options.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_EXTENDED);
    _session = new  Ort::Session(env, input.model_path, session_options);
    _input_node_dims = {input.batch,input.channel,input.net_input_row,input.net_input_col};
    in = input;
    input_tensor_size = in.net_input_row* in.net_input_col * in.channel;
    _input_layer_names = in.input_layer_names;
    _output_node_names = in.output_node_names;
}

unsigned char* detector::get_net_image(unsigned char* image,const int16_t &org_rows,const int16_t &org_cols,const cv::Rect &box){
    cv::Mat org(org_rows,org_cols,CV_8UC3,image,0);
    cv::Mat roi_image = org(box);
    cv::Mat out_img;
    cv::resize(roi_image,out_img,cv::Size(in.net_input_col,in.net_input_row));
    cv::cvtColor(out_img,out_img,cv::COLOR_BGR2RGB);
    return out_img.data;
}




void detector::onnx_detect(unsigned char* image, vector<cv::Point_<int>> &kps){
    /**
     * input image is rgb three channel image;
     * */
    cv::Mat orgBox(in.net_input_row,in.net_input_col,CV_8UC3,image);
    std::vector<float> input_tensor_values(input_tensor_size);
    // 这里解释一下 别看错了  这个因为他是 1 3 w h的网络输入  所以这里要做成如下  不是说图像是 RR...GGGGG...BBB...这么排列的
//    for (int c = 0; c < in.channel; c++) {
//        for (int i = 0; i < in.net_input_col; i++) {
//            for (int j = 0; j < in.net_input_row; j++) {
//                if (c == 0) {
//                    input_tensor_values[c*in.net_input_row*in.net_input_col + i * in.net_input_row + j] = ((orgBox.ptr<uchar>(i)[j * 3 + c] / 255  - 0.406 ) / 0.225  ); // 255.0 - 0.406) / 0.225
//                }
//                if (c == 1) {
//                    input_tensor_values[c*in.net_input_row*in.net_input_col + i * in.net_input_row + j] = ((orgBox.ptr<uchar>(i)[j * 3 + c] / 255  - 0.456 ) / 0.224 ); //255.0 - 0.456) / 0.224
//                }
//                if (c == 2) {
//                    input_tensor_values[c*in.net_input_row*in.net_input_col + i * in.net_input_row + j] = ((orgBox.ptr<uchar>(i)[j * 3 + c] / 255.0 - 0.485) / 0.229 ); // 255.0 - 0.485) / 0.229
//                }
//            }
//        }
//    }
    // h w c -> c h w
    for (int c = 0; c < in.channel; c++) {
        for (int i = 0; i < in.net_input_row; i++) {
            for (int j = 0; j < in.net_input_col ; j++) {
                if (c == 0) {
                    input_tensor_values[c*in.net_input_row*in.net_input_col + i * in.net_input_col + j] = ((orgBox.ptr<uchar>(i)[j * 3 + c] / 255  - 0.406 ) / 0.225  ); // 255.0 - 0.406) / 0.225
                }
                if (c == 1) {
                    input_tensor_values[c*in.net_input_row*in.net_input_col + i * in.net_input_col + j] = ((orgBox.ptr<uchar>(i)[j * 3 + c] / 255  - 0.456 ) / 0.224 ); //255.0 - 0.456) / 0.224
                }
                if (c == 2) {
                    input_tensor_values[c*in.net_input_row*in.net_input_col + i * in.net_input_col + j] = ((orgBox.ptr<uchar>(i)[j * 3 + c] / 255.0 - 0.485) / 0.229 ); // 255.0 - 0.485) / 0.229
                }
            }
        }
    }




    auto memory_info = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);
    Ort::Value input_tensor = Ort::Value::CreateTensor<float>(memory_info, input_tensor_values.data(), input_tensor_size, _input_node_dims.data(), 4);
    assert(input_tensor.IsTensor());


    Ort::Env env(ORT_LOGGING_LEVEL_WARNING, "test");
    Ort::SessionOptions session_options;
    session_options.SetIntraOpNumThreads(2);
    session_options.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_EXTENDED);
    Ort::Session session(env, in.model_path, session_options);


    auto output_tensors = _session->Run(Ort::RunOptions{nullptr},_input_layer_names.data(), &input_tensor, 1, _output_node_names.data(), 1);
    float* feature_map = output_tensors.front().GetTensorMutableData<float>();



    for(int k = 0;k<17;k++){
        //扩充特征平面 下一滤波做准备
        cv::Mat FeatureMap = cv::Mat(in.net_out_row,in.net_out_col,CV_32FC1,&feature_map[k*in.net_out_row*in.net_out_col],0);
//        string image_path = std::to_string(k) +".jpg";
//        cv::imwrite(image_path,FeatureMap);
        cv::Mat dr;
        cv::copyMakeBorder(FeatureMap,dr,10,10,10,10,CV_HAL_BORDER_CONSTANT,0.0f);
        cv::GaussianBlur(dr,dr,cv::Size(11,11),0);

        float score_max = 0.0;
        float x; float y ;  //xy坐标
        for(int i=0;i<dr.rows;i++){
            for(int j =0 ;j<dr.cols;j++){
                float score = dr.at<float>(i,j);
                if (score > score_max){
                    score_max = score;
                    x = j*1.0f - 10 ; y = i*1.0f - 10 ;
                }
            }
        }

        dr.at<float>(x,y) = 0.0f;   //置0 再计算偏执点

        float px;float py;
        score_max = 0.0;
        for(int i=0;i<dr.rows;i++){
            for(int j =0 ;j<dr.cols;j++){
                float score = dr.at<float>(i,j);
                if (score> score_max){
                    score_max = score;
                    px = j*1.0f -10 - x; py = i*1.0f -10 - y;
                }
            }
        }

        float ln = max(pow(pow(px,2)+pow(py,2),0.5),1e-3);

        x = min(max(x + 0.25f*px /ln,0.0f),in.net_out_col*1.0f - 1.0f );
        y = min(max(y + 0.25f*py /ln,0.0f),in.net_out_row*1.0f - 1.0f );


        cv::Point_<int> kp = cv::Point_<int>(int(x)*4+2,int(y)*4+2);
        kps.push_back(kp);
    }
}

vector<cv::Point_<int>> detector::kps_to_org( const vector<cv::Point_<int>> kps,const int org_rows , const int org_cols ,const cv::Rect box){
    vector<cv::Point_<int>> new_kp;
    for(auto kp:kps){
        int x = min(max( kp.x *1.0f / in.net_input_col * box.width + box.x,0.0f),org_cols*1.0f);
        int y = min(max( kp.y *1.0f / in.net_input_row * box.height + box.y,0.0f),org_rows*1.0f);
        new_kp.push_back(cv::Point(x,y));
    }
    return new_kp;
}











/** onnx 文档
 * onnx  support layer name
 * typedef enum ONNXTensorElementDataType {
  ONNX_TENSOR_ELEMENT_DATA_TYPE_UNDEFINED,
  ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT,   // maps to c type float
  ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8,   // maps to c type uint8_t
  ONNX_TENSOR_ELEMENT_DATA_TYPE_INT8,    // maps to c type int8_t
  ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT16,  // maps to c type uint16_t
  ONNX_TENSOR_ELEMENT_DATA_TYPE_INT16,   // maps to c type int16_t
  ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32,   // maps to c type int32_t
  ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64,   // maps to c type int64_t
  ONNX_TENSOR_ELEMENT_DATA_TYPE_STRING,  // maps to c++ type std::string
  ONNX_TENSOR_ELEMENT_DATA_TYPE_BOOL,
  ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT16,
  ONNX_TENSOR_ELEMENT_DATA_TYPE_DOUBLE,      // maps to c type double
  ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT32,      // maps to c type uint32_t
  ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT64,      // maps to c type uint64_t
  ONNX_TENSOR_ELEMENT_DATA_TYPE_COMPLEX64,   // complex with float32 real and imaginary components
  ONNX_TENSOR_ELEMENT_DATA_TYPE_COMPLEX128,  // complex with float64 real and imaginary components
  ONNX_TENSOR_ELEMENT_DATA_TYPE_BFLOAT16     // Non-IEEE floating-point format based on IEEE754 single-precision
} ONNXTensorElementDataType;
 * */

