//
// Created by zhangcc on 2020/12/21.
//
#include <torch/torch.h>
#include <torch/script.h>
#include <trtorch/trtorch.h>

#include <opencv2/opencv.hpp>
#include <opencv2/dnn/dnn.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/imgcodecs/imgcodecs.hpp>

#include <iostream>
#include <memory>
#include <sstream>

#include "trilinear_kernel.h"

int main()
{
    int gpu_id = 0;
    int batch_size = 1;
    int in_channels = 3;
    int inputH = 512;
    int inputW = 512;
    int scale = 1;
    int lut_dim = 33;

    int out_channels = 3;
    int outputH = scale * inputH;
    int outputW = scale * inputW;
    int frame_size_out = batch_size * out_channels * outputH * outputW;

    cv::Mat image = cv::imread("../000000000139.jpg");
    cv::cvtColor(image, image, cv::COLOR_BGR2RGB);
    cv::resize(image, image, cv::Size(inputW, inputH));
    image.convertTo(image, CV_32FC3, 1.0f/255.0f);

//    auto module = torch::jit::load("../color_enhancement_inference_dynamic-v2.0.0.pt");
    auto module = torch::jit::load("../color_enhancement_inference_expertO-v2.0.0-gpu.pt", torch::Device(torch::kCUDA, gpu_id));
    module.eval();

    cv::Mat model_in;
    cv::resize(image, model_in, cv::Size(256, 256));

    float bit_num = 8.0f;
    torch::TensorOptions option(torch::kFloat);
    torch::Tensor input_tensor = torch::from_blob(model_in.data, {batch_size, 256, 256, in_channels}, option);
//    torch::Tensor bitNum_tensor = torch::from_blob(&bit_num, {1}, option);
    input_tensor = input_tensor.to(torch::Device(torch::kCUDA, gpu_id));

    torch::NoGradGuard no_grad;
    torch::Tensor lut_tensor = module.forward({input_tensor}).toTensor();
    lut_tensor = lut_tensor.to(torch::kCPU);
    image.convertTo(image, CV_32FC3, 255.0f);

    cv::Mat result(outputH, outputW, CV_32FC3);
    float *lut_cpu = (float *)lut_tensor.data_ptr();
    TriLinearForwardCpu((float *)lut_cpu, (float *)image.data, \
                    (float *)result.data, lut_dim, lut_dim * lut_dim * lut_dim, \
                    ((1 << int(bit_num)) - 1.0f) / (lut_dim - 1), \
                    inputW * scale, inputH * scale, out_channels, int(bit_num));

    result.convertTo(result, CV_8UC3, 1.0f);
    cv::cvtColor(result, result, cv::COLOR_RGB2BGR);
    cv::imwrite(std::to_string(0) + "_" + std::to_string(gpu_id) + ".jpg", result);
    std::cout << std::to_string(0) + "_" + std::to_string(gpu_id) + ".jpg" << std::endl;

    trtorch::set_device(gpu_id);
    // auto ranges = std::vector<trtorch::CompileSpec::InputRange>({input_tensor.sizes()});
    // auto compile_settings = trtorch::CompileSpec(ranges);
    std::vector<int64_t> min_shapes{1,300,300, 3}, opt_shapes{1, 512, 512, 3}, max_shapes{1,1024, 1024, 3};
//    std::vector<trtorch::CompileSpec::InputRange> dims{{opt_shapes, opt_shapes, opt_shapes}};
    c10::ArrayRef<int64_t> input_ranges{1, 512, 512, 3};
    std::vector<trtorch::CompileSpec::InputRange> dims{c10::ArrayRef<int64_t>{1, 256, 256, 3}};
//    for (auto & item : dims)
//        item.input_is_dynamic = false;
    auto compile_settings = trtorch::CompileSpec(dims);
    compile_settings.workspace_size = 1 << 20;
    bool use_half = false;
    if (use_half) compile_settings.op_precision = torch::kF16;
    else compile_settings.op_precision = torch::kFloat;

    compile_settings.device.device_type = trtorch::CompileSpec::Device::DeviceType::kGPU;
//    compile_settings.truncate_long_and_double = "true";
    //auto trt_mod = trtorch::CompileGraph(module, compile_spec);
    trtorch::CheckMethodOperatorSupport(module, "forward");
    auto engine = trtorch::ConvertGraphToTRTEngine(module, "forward", compile_settings);
    std::ofstream out("../color_enhancement_inference_dynamic-v2.0.0.trt");
    out << engine;
    out.close();


    return 0;
}