#include <iostream>
#include <fstream>
#include <cuda.h>
#include <stdio.h>
#include <cassert>
#include <cuda_runtime.h>
#include "NvInfer.h"
#include "NvOnnxConfig.h"
#include "NvOnnxParser.h"
#include "NvInferRuntime.h"
#include "CostVolume_kernel.hpp"
#include <chrono>

// TODO make it in a common file
#define CUDA_1D_KERNEL_LOOP(i, n)                            \
  for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
       i += blockDim.x * gridDim.x)


// template <typename T>
__global__ void BuildCostVolumeKernel(const int nthreads, 
    const float* left, const float* right, const float* shift, 
    const int num_batch, const int channels, const int height,
    const int width, const int max_disp,
    float* cost, const int downsample) {
  CUDA_1D_KERNEL_LOOP(index, nthreads) {
    int pw = index % width;
    int ph = (index / width) % height;
    int pd = (index / width / height) % max_disp;
    int c = (index / width / height/ max_disp) % channels;
    int n = index / width / height / max_disp / channels;

    // int iw = pw * downsample;
    // int ih = ph * downsample;
    // int img_height = height * downsample;
    // int img_width = width * downsample;
    int iw = pw ;
    int ih = ph;
    int img_height = height;
    int img_width = width;
    int index_L = (((n * 2 * channels + c) * max_disp + pd) * height + ph) * width + pw;
    int index_R = index_L + channels * max_disp * height * width;

    float shift_pd = -shift[n * max_disp + pd];

    cost[index_L] = left[((n * channels + c) * img_height + ih) * img_width + iw];

    if ((float)iw + shift_pd >= (float)0 && (float)iw + shift_pd <= (float)img_width - (float)1)
    {
        const float* offset_right = right + (n * channels + c) * img_height * img_width;
        // cost[index_R] = bilinear_interpolate(offset_right, img_height, img_width, (T)ih, (T)iw + shift_pd);
        cost[index_R] = (float) offset_right[(int)ih*(int)img_width + (int)iw + (int)shift_pd];

    }
    else 
    {
        cost[index_R] = 0.;
    }
  }
}



// template <typename T>
// __global__ void BuildCostVolumeHalfKernel(const int nthreads, 
//     const half* left, const half* right, const half* shift, 
//     const int num_batch, const int channels, const int height,
//     const int width, const int max_disp,
//     half* cost, const int downsample) {
//   CUDA_1D_KERNEL_LOOP(index, nthreads) {
//     int pw = index % width;
//     int ph = (index / width) % height;
//     int pd = (index / width / height) % max_disp;
//     int c = (index / width / height/ max_disp) % channels;
//     int n = index / width / height / max_disp / channels;

//     // int iw = pw * downsample;
//     // int ih = ph * downsample;
//     // int img_height = height * downsample;
//     // int img_width = width * downsample;
//     int iw = pw ;
//     int ih = ph;
//     int img_height = height;
//     int img_width = width;
//     int index_L = (((n * 2 * channels + c) * max_disp + pd) * height + ph) * width + pw;
//     int index_R = index_L + channels * max_disp * height * width;

//     half shift_pd = -shift[n * max_disp + pd];

//     cost[index_L] = left[((n * channels + c) * img_height + ih) * img_width + iw];

//     if (__float2half(iw) + shift_pd >= __float2half(0) && __float2half(iw) + shift_pd <= __float2half(img_width) - __float2half(1))
//     {
//         const float* offset_right = right + (n * channels + c) * img_height * img_width;
//         // cost[index_R] = bilinear_interpolate(offset_right, img_height, img_width, (T)ih, (T)iw + shift_pd);
//         cost[index_R] = __float2half(offset_right[(int)ih*(int)img_width + (int)iw + (int)shift_pd]);

//     }
//     else 
//     {
//         cost[index_R] = 0.;
//     }
//   }
// }


int BuildCostVolumeKernelLaunch(const float* left,
                                   const float* right,
                                   const float* shift,
                                   float *output,
                                   const int num_batch,
                                   const int channels,
                                   const int height,
                                   const int width,
                                   const int max_disp,
                                   const int downsample,
                                   cudaStream_t stream)
{
  const int single_size = num_batch * channels * max_disp * height * width;

  dim3 grid(std::ceil(single_size / 1024.0));
  grid=4096;
  dim3 block(1024);
  BuildCostVolumeKernel<<<grid, block, 0, stream>>>(single_size, left, right, shift,
                                                       num_batch, channels, height, width,
                                                      max_disp, output, downsample);
  
  cudaError_t err = cudaGetLastError();
  if (err != cudaSuccess) {
    fprintf(stderr, "CUDA kernel launch failed: %s\n", cudaGetErrorString(err));
    return -1; // 返回适当的错误码
  } else {
    return 0;
  }
}

// int BuildCostVolumeHalfKernelLaunch(const half* left,
//                                    const half* right,
//                                    const half* shift,
//                                    half *output,
//                                    const int num_batch,
//                                    const int channels,
//                                    const int height,
//                                    const int width,
//                                    const int max_disp,
//                                    const int downsample,
//                                    cudaStream_t stream)
// {
//   const int single_size = num_batch * channels * max_disp * height * width;

//   dim3 grid(std::ceil(single_size / 1024.0));
//   grid=4096;
//   dim3 block(1024);
//   BuildCostVolumeKernel<<<grid, block, 0, stream>>>(single_size, left, right, shift,
//                                                        num_batch, channels, height, width,
//                                                       max_disp, output, downsample);
  
//   cudaError_t err = cudaGetLastError();
//   if (err != cudaSuccess) {
//     fprintf(stderr, "CUDA kernel launch failed: %s\n", cudaGetErrorString(err));
//     return -1; // 返回适当的错误码

//   } else {
//     return 0;
//   }
// }