#include <iostream>
#include <fstream>
// #include <vector>
#include <cuda.h>
#include <stdio.h>
#include <cassert>
#include <cuda_runtime.h>
// #include <opencv2/opencv.hpp>
#include "NvInfer.h"
#include "NvOnnxConfig.h"
#include "NvOnnxParser.h"
#include "NvInferRuntime.h"
#include "DpsCostVolume_kernel.hpp"
#include <chrono>

#define CUDA_1D_KERNEL_LOOP(i, n)                           \
  for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
       i += blockDim.x * gridDim.x)

// #ifdef CHECK_MEMORY_DEBUG(index_R)
//   printf("index:%d", index_R)
// #endif

template <typename T>
__device__ T bilinear_interpolate(const T *bottom_data,
                                  const int height, const int width,
                                  T y, T x)
{

  // deal with cases that inverse elements are out of feature map boundary
  if (y < -1.0 || y > height || x < -1.0 || x > width)
  {
    // empty
    return 0;
  }

  if (y <= 0)
    y = 0;
  if (x <= 0)
    x = 0;

  int y_low = (int)y;
  int x_low = (int)x;
  int y_high;
  int x_high;

  if (y_low >= height - 1)
  {
    y_high = y_low = height - 1;
    y = (T)y_low;
  }
  else
  {
    y_high = y_low + 1;
  }

  if (x_low >= width - 1)
  {
    x_high = x_low = width - 1;
    x = (T)x_low;
  }
  else
  {
    x_high = x_low + 1;
  }

  T ly = y - y_low;
  T lx = x - x_low;
  T hy = 1. - ly, hx = 1. - lx;
  // do bilinear interpolation
  T v1 = bottom_data[y_low * width + x_low];
  T v2 = bottom_data[y_low * width + x_high];
  T v3 = bottom_data[y_high * width + x_low];
  T v4 = bottom_data[y_high * width + x_high];
  T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
  T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
  return val; // 1 ms diff


  // T v1 = bottom_data[y_low * width + x_low];
  // T v2 = bottom_data[y_low * width + x_high];
  // T w1 = hx;
  // T w2 = lx;
  // return (w1 * v1 + w2 * v2);
}


// __device__ half half_bilinear_interpolate(const half *bottom_data,
//                                   const int height, const int width,
//                                   half y, half x)
// {

//   // deal with cases that inverse elements are out of feature map boundary
//   const half czero(0.0);
//   const half c_one(1.0);
//   const half c_neg_one(-1.0);
//   assert (y >= c_one && y <= half(height));
//   if (y < c_neg_one || y > half(height) || x < c_neg_one || x > half(width))
//   {
//     // empty
//     return 0;
//   }

//   if (y <= czero)
//     y = 0;
//   if (x <= czero)
//     x = 0;

//   int y_low = __float2int_rd(y);
//   int x_low = __float2int_rd(x);
//   int y_high;
//   int x_high;

//   if (y_low >= height - 1)
//   {
//     y_high = y_low = height - 1;
//     y = (half)y_low;
//   }
//   else
//   {
//     y_high = y_low + 1;
//   }

//   if (x_low >= width - 1)
//   {
//     x_high = x_low = width - 1;
//     x = (half)x_low;
//   }
//   else
//   {
//     x_high = x_low + 1;
//   }

//   half ly = y - half(y_low);
//   half lx = x - half(x_low);
//   half hy = c_one - ly;
//   half hx = c_one - lx;
//   // do bilinear interpolation
//   half v1 = bottom_data[y_low * width + x_low];
//   half v2 = bottom_data[y_low * width + x_high];
//   half v3 = bottom_data[y_high * width + x_low];
//   half v4 = bottom_data[y_high * width + x_high];
//   half w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
//   return (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
//   // half v1 = bottom_data[y_low * width + x_low];
//   // half v2 = bottom_data[y_low * width + x_high];
//   // half w1 = hx;
//   // half w2 = lx;
//   // return (w1 * v1 + w2 * v2);
// }

template <typename T>
__global__ void BuildDpsCostVolumeKernel(const int nthreads,
                                         const T *left, const T *right, const T *shift, const int *psv_channels,
                                         const int num_batch, const int channels, const int height,
                                         const int width, const int max_disp,
                                         T *cost, 
                                         const int downsample, const int sep, const int interval)
{
  CUDA_1D_KERNEL_LOOP(index, nthreads) //Grid-stride loop
  {
    // sep is channels
    // Optimizations:
    // - Avoid DPS since mem access order is not sequential
    // - Avoid bilinear interpolation since it is not necessary
    // - Downsample before the module to ensure sequential access
    // printf("index: %d", index);
    int pw = index % width;
    int ph = (index / width) % height;
    int pd = (index / width / height) % max_disp;
    int c = (index / width / height / max_disp) % sep;
    int n = index / width / height / max_disp / sep; // 0-1

    // int iw = pw * downsample;
    // int ih = ph * downsample;
    int iw = pw ;
    int ih = ph;
    // int img_height = height * downsample;
    // int img_width = width * downsample;
    int img_height = height ;
    int img_width = width ;
    int index_L = (((n  * 2 * sep + c) * max_disp + pd) * height + ph) * width + pw;
    int index_R = index_L + sep * max_disp * height * width ;
    // CHECK_MEMORY_DEBUG(index_R);
    // printf("index_R: %d", index_R);
    T scale = (T)((max_disp - 1) / interval * interval) / (max_disp - 1.);

    T shift_pd = -shift[n * max_disp + pd];

    // // shift channels by the ratio of pd/maxdisp
    // int c_shift = int((T)(pd / interval * interval / scale) / (max_disp - 1.) * (channels - sep + 1. - 1e-9)); // 0 -> 32

    // // AT_ASSERTM(c_shift <= (channels - sep), "c_shift is (channels - sep) at max");
    // c_shift = psv_channels[c_shift];

    // // // compute the separated channel
    // int sep_c = (c_shift / sep + 1) * sep;

    // int cc;
    // if (c < c_shift + sep - sep_c)
    //   cc = sep_c + c;
    // else
    //   cc = sep_c - (sep - c);
    
    int cc = c; // !!!1

    assert(cc < channels);


    cost[index_L] = left[((n * channels + cc) * img_height + ih) * img_width + iw];

    if (iw + shift_pd >= 0. && iw + shift_pd <= img_width - 1)
    {
      const T *offset_right = right + (n * channels + cc) * img_height * img_width;
      // cost[index_R] = bilinear_interpolate(offset_right, img_height, img_width, (T)ih, (T)iw + shift_pd);
      cost[index_R] = (T) offset_right[(int)ih*(int)img_width + (int)iw + (int)shift_pd]; /// 2
    }
    else
    {
      cost[index_R] = 0.;
    }
  }
}

int BuildDpsCostVolumeKernelLaunch(const float* left,
                                   const float* right,
                                   const float* shift,
                                   const int* psv_channels,
                                   float *output,
                                   const int num_batch,
                                   const int channels,
                                   const int height,
                                   const int width,
                                   const int max_disp,
                                   const int downsample,
                                   const int sep,
                                   const int interval,
                                   cudaStream_t stream)
{
  const int output_size = num_batch * 2 * sep * max_disp * height * width;
  // const int output_size = num_batch * 2 * sep * max_disp * height;
  // cudaStream_t stream = at::cuda::getCurrentCUDAStream();
  // int blockSize = 512;
  // int maxBlocks = 4096;

  // long gridX = std::min(static_cast<long>(std::ceil(static_cast<double>(output_size) / blockSize)), static_cast<long>(maxBlocks));
  // dim3 grid(static_cast<long>(std::ceil(static_cast<double>(output_size / 2) / 1024L)));
  const int single_size = num_batch * sep * max_disp * height * width;
  // const int single_size = 10;
  dim3 grid(std::ceil(single_size / 1024.0));
  grid=4096;
  // std::cout << "grid.x: " << grid.x << std::endl;
  dim3 block(1024);
  // std::cout << "block.x: " << block.x << std::endl;
  // !!! Debug !!!
  // cudaDeviceSynchronize();
  // cudaEvent_t start, stop;
  // cudaEventCreate(&start);
  // cudaEventCreate(&stop);
  // cudaEventRecord(start);
  BuildDpsCostVolumeKernel<<<grid, block, 0, stream>>>(single_size, left, right, shift, psv_channels,
                                                       num_batch, channels, height, width, max_disp, output, downsample, sep, interval);
  // cudaDeviceSynchronize();  // 异步传输
  // cudaEventSynchronize(stop);
  // cudaEventRecord(stop);
  // float milliseconds = 0;
  // cudaEventElapsedTime(&milliseconds, start, stop);
  // std::cout << "BuildDpsCostVolumeKernel time: " << milliseconds << " ms" << std::endl;
  // 检查 CUDA 核函数调用是否成功
  cudaError_t err = cudaGetLastError();
  if (err != cudaSuccess) {
    fprintf(stderr, "CUDA kernel launch failed: %s\n", cudaGetErrorString(err));
    return -1; // 返回适当的错误码
  } else {
    return 0;
  }
}


// int BuildDpsCostVolumeHalfKernelLaunch(const half* left,
//                                         const half* right,
//                                         const half* shift,
//                                         const int* psv_channels,
//                                         half *output,
//                                         const int num_batch,
//                                         const int channels,
//                                         const int height,
//                                         const int width,
//                                         const int max_disp,
//                                         const int downsample,
//                                         const int sep,
//                                         const int interval,
//                                         cudaStream_t stream)
// {
//   const int output_size = num_batch * 2 * sep * max_disp * height * width;

//   // long gridX = std::min(static_cast<long>(std::ceil(static_cast<double>(output_size) / blockSize)), static_cast<long>(maxBlocks));
//   // dim3 grid(static_cast<long>(std::ceil(static_cast<double>(output_size / 2) / 1024L)));
//   const int single_size = num_batch * sep * max_disp * height * width;
//   dim3 grid(std::ceil(single_size / 1024.0));
//   grid=4096;
//   // std::cout << "grid.x: " << grid.x << std::endl;
//   dim3 block(1024);
//   // std::cout << "block.x: " << block.x << std::endl;


//   BuildDpsCostVolumeHalfKernel<<<grid, block, 0, stream>>>(single_size, left, right, shift, psv_channels,
//                                                        num_batch, channels, height, width, max_disp, output, downsample, sep, interval);
//   // cudaDeviceSynchronize();  // 异步传输
//   // 检查 CUDA 核函数调用是否成功
//   cudaError_t err = cudaGetLastError();
//   if (err != cudaSuccess) {
//     fprintf(stderr, "CUDA kernel launch failed: %s\n", cudaGetErrorString(err));
//     return -1; // 返回适当的错误码
//   } else {
//     return 0;
//   }
// }
