#include <stdio.h>
#include <iostream>
#include <random>
#include <vector>
// #include "cuda_runtime.h"
#include "DpsCostVolume.h"
#include "cuda_util.h"


#define checkCudaErrors(status)                                   \
{                                                                 \
  if (status != 0)                                                \
  {                                                               \
    std::cout << "Cuda failure: " << cudaGetErrorString(status)   \
              << " at line " << __LINE__                          \
              << " in file " << __FILE__                          \
              << " error status: " << status                      \
              << std::endl;                                       \
              abort();                                            \
    }                                                             \
}

// void Getinfo(void)
// {
//   cudaDeviceProp prop;

//   int count = 0;
//   cudaGetDeviceCount(&count);
//   printf("\nGPU has cuda devices: %d\n", count);
//   for (int i = 0; i < count; ++i) {
//     cudaGetDeviceProperties(&prop, i);
//     printf("----device id: %d info----\n", i);
//     printf("  GPU : %s \n", prop.name);
//     printf("  Capbility: %d.%d\n", prop.major, prop.minor);
//     printf("  Global memory: %luMB\n", prop.totalGlobalMem >> 20);
//     printf("  Const memory: %luKB\n", prop.totalConstMem  >> 10);
//     printf("  SM in a block: %luKB\n", prop.sharedMemPerBlock >> 10);
//     printf("  warp size: %d\n", prop.warpSize);
//     printf("  threads in a block: %d\n", prop.maxThreadsPerBlock);
//     printf("  block dim: (%d,%d,%d)\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
//     printf("  grid dim: (%d,%d,%d)\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
//   }
//   printf("\n");
// }


// 辅助函数：生成随机数据
void generateRandomData(float* data, size_t size) {
    std::random_device rd;
    std::mt19937 gen(rd());
    std::uniform_real_distribution<float> dist(0.0f, 1.0f);
    for (size_t i = 0; i < size; ++i) {
        data[i] = dist(gen);
    }
}

// void generateRandomIntData(float* data, size_t size){
//     std::random_device rd;
//     std::mt19937 gen(rd());
//     for (size_t i=0; i < size; ++i){
//         data[i] = gen;
//     }
// }

// use namespace std;


int main(){
                // Getinfo();
                // cudaEvent_t start, stop;
                // float elapsedTime = 0.0f;
                // cudaStream_t stream = NULL;
                // checkCudaErrors(cudaEventCreate(&start));
                // checkCudaErrors(cudaEventCreate(&stop));
                // checkCudaErrors(cudaStreamCreate(&stream));
                
                // 创建left，right trt张量
                // 创建随机数据
                const int batchsize=1;
                const int channels = 96;
                const int height=480;
                const int width=960;
                size_t  datasize = batchsize * channels * height * width;
                float *left = new float[datasize];
                float *right = new float[datasize];
                
                // generateRandomData(left, datasize);
                // generateRandomData(right, datasize);

                // // 创建张量
                // checkCudaErrors(cudaMallocManaged((float **)&left, datasize))
                // checkCudaErrors(cudaMallocManaged((float *)right, datasize))
                // // checkCudaErrors(cudaDeviceSynchronize());1

                // // create shift trt tensor
                const int num_disps=72;
                size_t dispsize = batchsize * num_disps;
                float *shift = new float[dispsize];
                // generateRandomData(shift, dispsize);
                // checkCudaErrors(cudaMallocManaged((float *)shift, dispsize))

                // // create psv_channel trt tensor
                const int num_psv = 65;
                size_t psvsize = batchsize * num_psv;
                float *psv_channel = new float[psvsize];
                // generateRandomIntData(psv_channel, psvsize);
                // checkCudaErrors(cudaMallocManaged((int *)psv_channel, psvsize))


                // // create output trt tensor
                size_t costsize = batchsize * 32 * num_disps * height/4 * width/4 ;
                float *cost = new float[costsize];
                // checkCudaErrors(cudaMallocManaged((float **)&cost, costsize))
                // // cudaMemcpy(cost_ts->getMultableData(), cost, costsize*sizeof(float), cudaMemcpyHostToDevice);

                int downsample =  4;
                int interval =  1;
                int sep =  32;
                // DpsCostVolumeLaunch(cost, left, right, shift, psv_channel, downsample, sep, interval, stream);
                nvinfer1::plugin::DpsCostVolumeLaunch();

                return 0;
                
            }
