#include "resnet.h"
ResNet::ResNet(std::string modelPath, std::string classFilePath)
{
    std::string classesFile = classFilePath;
    std::string model_path = modelPath;
    std::wstring widestr = std::wstring(model_path.begin(), model_path.end());
    OrtStatus *status = OrtSessionOptionsAppendExecutionProvider_CUDA(sessionOptions, 0);
    sessionOptions.SetGraphOptimizationLevel(ORT_ENABLE_BASIC);
    ort_session = new Session(env, model_path.c_str(), sessionOptions);
    size_t numInputNodes = ort_session->GetInputCount();
    size_t numOutputNodes = ort_session->GetOutputCount();

    //---------------------------------替换GetInputName-----------------------------
    for (int i = 0; i < numInputNodes; i++)
    {
        AllocatorWithDefaultOptions allocator;
        In_AllocatedStringPtr.push_back(ort_session->GetInputNameAllocated(i, allocator));
        input_names.push_back(In_AllocatedStringPtr.at(i).get());
        Ort::TypeInfo input_type_info = ort_session->GetInputTypeInfo(i);
        auto input_tensor_info = input_type_info.GetTensorTypeAndShapeInfo();
        auto input_dims = input_tensor_info.GetShape();
        input_node_dims.push_back(input_dims);
    }
    for (int i = 0; i < numOutputNodes; i++)
    {
        AllocatorWithDefaultOptions allocator;
        Out_AllocatedStringPtr.push_back(ort_session->GetOutputNameAllocated(i, allocator));
        output_names.push_back(Out_AllocatedStringPtr.at(i).get());
        Ort::TypeInfo output_type_info = ort_session->GetOutputTypeInfo(i);
        auto output_tensor_info = output_type_info.GetTensorTypeAndShapeInfo();
        auto output_dims = output_tensor_info.GetShape();
        output_node_dims.push_back(output_dims);
    }
    //----------------------------------结束替换------------------------------------
    this->inpHeight = input_node_dims[0][2];
    this->inpWidth = input_node_dims[0][3];
    this->nout = output_node_dims[0][2];
    this->num_proposal = output_node_dims[0][1];

    std::ifstream ifs(classesFile.c_str());
    std::string line;
    while (getline(ifs, line))
        this->class_names.push_back(line);
    this->num_class = class_names.size();
}

void ResNet::normalize_(cv::Mat img)
{
    //    img.convertTo(img, CV_32F);
    int row = img.rows;
    int col = img.cols;

    this->input_image_.resize(row * col * img.channels());
    for (int c = 0; c < 3; ++c)
    {
        for (int i = 0; i < row; ++i)
        {
            for (int j = 0; j < col; ++j)
            {
                float pix = img.ptr<uchar>(i)[j * 3 + 2 - c];
                this->input_image_[c * row * col + i * col + j] = pix / 255.0;
            }
        }
    }
}

int ResNet::detect(cv::Mat &frame)
{
    cv::Mat dstimg;
    resize(frame, dstimg, cv::Size(this->inpWidth, this->inpHeight));
    this->normalize_(dstimg);
    std::array<std::int64_t, 4> input_shape_{1, 3, this->inpHeight, this->inpWidth};

    auto allocator_info = MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU);
    Value input_tensor_ = Value::CreateTensor<float>(allocator_info, input_image_.data(), input_image_.size(), input_shape_.data(), input_shape_.size());
    // 推理
    std::vector<Value> ort_outputs = ort_session->Run(RunOptions{nullptr}, &input_names[0], &input_tensor_, 1, output_names.data(), output_names.size()); // ��ʼ����
    const float *pdata = ort_outputs[0].GetTensorMutableData<float>();
    int index = -1;
    int prob = -9999999;
    for (int i = 0; i < this->num_class; i++)
    {
        if (pdata[i] > prob)
        {
            index = i;
            prob = pdata[i];
        }
    }
    return index;
}
