/*
 *
 * GLIBC Version:
      GLIBC 2.29 or newer for cxx11 ABI version
      GLIBC 2.17 or newer for pre-cxx11 ABI version
   GCC Version:
      GCC 9 or newer for cxx11 and pre-cxx11 ABI versions

   The PyTorch C++ frontend is a C++17 library for CPU and GPU tensor computation,
   with automatic differentiation and high level building blocks for state of the art machine learning applications.

   libtorch infer:
    https://blog.csdn.net/oqqENvY12/article/details/103276462
 */
#include <opencv2/opencv.hpp>
#include "TorchInclude.h"
#include <QtCore>
#include "ctime"


cv::Mat preprocessSampleInput(){
    QString sampleImage = "../../../resource/raw_resize/1.jpg";
    cv::Mat mat = cv::imread(sampleImage.toLocal8Bit().constData());

//    qDebug() << "Mat basic Info" << mat.rows << mat.cols << mat.dims;
    if(mat.rows != 512 && mat.cols != 512){
        cv::resize(mat, mat, cv::Size(512, 512));
    }

    mat.convertTo(mat, CV_32F);
    cv::Scalar matMean; cv::Mat matStd;
    cv::meanStdDev(mat, matMean, matStd);
    matStd += 1e-8;
    double *stdPtr = matStd.ptr<double>();
    cv::Scalar stdScalar(*(stdPtr + 0), *(stdPtr + 1), *(stdPtr + 2));

    cv::subtract(mat, matMean, mat);
    mat /= stdScalar;

    return cv::dnn::blobFromImage(mat);
}

void print_tensor_data(torch::Tensor &data, int offset, int print_size){
    std::cout << "Print data" << std::endl;
    float *dataPtr = data.data_ptr<float>();
    for (int i = offset; i < 3 * 512 * 512; ++i) {
        std::cout << *(dataPtr + i) << " ";
        if(i % 10 == 0) std::cout << std::endl;
        if(i > offset + print_size) break;
    }
}

int main(int argc, char *argv[])
{
    double start, end;
    double elapsed;

    start = (double )clock();
    QString jitModel = "../../../3-NNUnetWithOnnxAndOpenVino/v1_jit.pth";
    torch::jit::script::Module model = torch::jit::load(jitModel.toLocal8Bit().constData());
    end = (double )clock();
    elapsed = (end - start) / CLOCKS_PER_SEC / 10;  // bug
    qDebug() << "Time cost: " << elapsed << "s";

    start = (double )clock();
    cv::Mat sampleMat = preprocessSampleInput();
    end = (double )clock();
    elapsed = (end - start) / CLOCKS_PER_SEC / 10;  // bug
    qDebug() << "Time cost: " << elapsed << "s";

    start = (double )clock();
    torch::Tensor inputTensor = torch::from_blob(sampleMat.data, {1, 3, 512, 512}, torch::kFloat32);
    end = (double )clock();
    elapsed = (end - start) / CLOCKS_PER_SEC / 10;  // bug
    qDebug() << "Time cost: " << elapsed << "s";

    // 大概1s一次，写法应该不正确
    torch::Tensor pred = model.forward({inputTensor}).toTensor().softmax(1);
    end = (double )clock();
    elapsed = (end - start) / CLOCKS_PER_SEC / 10;  // bug
    qDebug() << "Time cost: " << elapsed << "s";

    pred += model.forward({inputTensor.flip({3})}).toTensor().softmax(1).flip({3});
    end = (double )clock();
    elapsed = (end - start) / CLOCKS_PER_SEC / 10;  // bug
    qDebug() << "Time cost: " << elapsed << "s";

    pred += model.forward({inputTensor.flip({2})}).toTensor().softmax(1).flip({2});
    end = (double )clock();
    elapsed = (end - start) / CLOCKS_PER_SEC / 10;  // bug
    qDebug() << "Time cost: " << elapsed << "s";

    pred += model.forward({inputTensor.flip({3, 2})}).toTensor().softmax(1).flip({3, 2});
    pred.div_(4);
    torch::Tensor output = pred.argmax(1).toType(torch::kUInt8);
    output.mul_(255);
    // print_tensor_data(output, 0, 100);
    end = (double )clock();
    elapsed = (end - start) / CLOCKS_PER_SEC / 10;  // bug
    qDebug() << "Time cost: " << elapsed << "s";

    // back to cv mat
    cv::Mat argmaxMat(512, 512, CV_8UC1, output.data_ptr<unsigned char>());
    cv::imwrite("cpp output.png", argmaxMat);
    end = (double )clock();
    elapsed = (end - start) / CLOCKS_PER_SEC / 10;  // bug
    qDebug() << "Time cost: " << elapsed << "s";
}
