#include "cuda_tools.hpp"
#include <opencv2/opencv.hpp>
#include "my_tensor.hpp"
#include "my_mixmemory.hpp"
#include "cudaRGB.h"
#include <thread>

int main()
{
    using namespace std;
    using namespace cv;

#if 1
    int dev_id = 2;
    cudaSetDevice(dev_id);

    MMem::MixMemory mm(dev_id);

    //注意这里可能造成的bug，如果使用2 * 1024 * 1024 * 1024，
    //那么sz的值会超过int的最大值，导致意料之外的结果
    // size_t sz = size_t(2 * 1024 * 1024 * 1024);
    size_t sz = size_t(2l * 1024l * 1024l * 1024l);
    cout << "sz: " << sz << endl;
    mm.new_gpu_mem(sz);

    // 确认一下cudaGetDevice是获取当前GPU id还是默认获取GPU0
    int old_ = -1;
    cudaGetDevice(&old_);
    cout << "old_: " << old_ << endl;

    this_thread::sleep_for(chrono::milliseconds(200));

    system("nvidia-smi");
#endif

#if 0
    string img_path = R"(/home/lzc/work/Code/py/test-resnet50/dataset/cat.12029.jpg)";
    Mat img_mat = imread(img_path);
    size_t img_bytes = img_mat.rows * img_mat.cols * img_mat.channels() * sizeof(uint8_t);

    int device_id = 0;
    CUDATools::AutoCuStream cuStream(device_id);

#if 0
    MMem::MixMemory mm(device_id);
    mm.set_custream(cuStream.stream());
    mm.new_gpu_mem(img_bytes);
    mm.synchronize();
    mm.copy_mem_cpu2gpu(0, img_mat.data, img_bytes, device_id);
    mm.synchronize();

    MMem::MixMemory mm1(device_id);
    mm1.set_custream(cuStream.stream());
    mm1.new_gpu_mem(img_bytes);
    mm1.synchronize();
    RGBToBGR_infer((uchar3*)mm.gpu(), (uchar3*)mm1.gpu(), img_mat.cols, img_mat.rows);
    mm1.to_cpu();
    mm1.synchronize();

    Mat img_mat1(img_mat.rows, img_mat.cols, CV_8UC3, mm1.cpu());
    imshow("img_mat1", img_mat1);
    waitKey(0);
#else
    MTsr::MyTensor mts(device_id, MTsr::DataType::UInt8, 1, img_mat.channels(), img_mat.rows, img_mat.cols);
    mts.copy_mem_cpu2gpu(0, img_mat.data, img_bytes, device_id);
    mts.to_cpu();

    auto a = mts.cpu_at<uint8_t>(0,0,0);
    auto a1 = mts.gpu_at<uint8_t>(0,0,0);
#endif
#endif

    return 0;
}
