 //****************************************************************************

// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.

// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
//  1) RRRRRRRR...
//  2) GGGGGGGG...
//  3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).

// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.

//****************************************************************************

// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.

// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.

//****************************************************************************

// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.

// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.

//****************************************************************************

// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.

// Finally, remember to free the memory you allocate at the end of the function.

//****************************************************************************

#include <device_launch_parameters.h>
#include <device_functions.h>
#include <stdio.h>
#include "./utils.h"

#define SHARED
#define FILTERWIDTH 9
#define NTHREADS 32

__global__
void gaussian_blur(const unsigned char* const inputChannel,
                   unsigned char* const outputChannel,
                   int numRows, int numCols,
                   const float* const filter, const int filterWidth) {
  // NOTE: Be sure to compute any intermediate results in floating point
  // before storing the final result as unsigned char.

  // NOTE: If a thread's absolute position 2D position is within the image, but some of
  // its neighbors are outside the image, then you will need to be extra careful. Instead
  // of trying to read such a neighbor value from GPU memory (which won't work because
  // the value is out of bounds), you should explicitly clamp the neighbor values you read
  // to be within the bounds of the image. If this is not clear to you, then please refer
  // to sequential reference solution for the exact clamping semantics you should follow.


  ///share version
  // --- copy filter to shared memory --- //
  __shared__ float shared_filter[FILTERWIDTH * FILTERWIDTH];
  // 在CUDA中，每个线程都有一个唯一的线程索引，
  // 对于二维线程块来说，这个索引由threadIdx.x（x方向索引）和threadIdx.y（y方向索引）组成
  // map 一条thread处理一个filter[r][c]拷贝
  // 一条thread由thread.x与thread.y确定，filter是一维数组
  // map: index = threadIdx.x * FILTERWIDTH + threadIdx.y
  if(threadIdx.x < FILTERWIDTH && threadIdx.y < FILTERWIDTH){
    int idx = threadIdx.x * FILTERWIDTH + threadIdx.y;
    shared_filter[idx]=filter[idx];

  }
  // barrier
  __syncthreads();
  ///share version


  // map threadIdx and blockIdx to pixel in image
  //@note 实现的重点
  //@note 已知c++ cpu实现版本，请见reference_cal.cpp
  int row = threadIdx.y + blockIdx.y * blockDim.y;
  int col = threadIdx.x + blockIdx.x * blockDim.x;
  
  if(col>=numCols || row >=numRows){
    return;
  }

  const index = row * numCols +col;

  float new_pixel_value =0.f;

  //@note 对比cpu实现的版本，遍历image的两个for循环没了，但是对于
  //每个thread而言，卷积的过程还是要加for循环
  for(int filter_r = -filterWidth/2; filter_r <=filterWidth/2;++filter_r){
    for(int filter_c=-filterWidth/2;filter_c<=filterWidth/2;++filter_c){
      // 由row,col->访问邻居点
      // image_r = row + filter_r, 但是要[0,numRows-1]
      int nerghbor_r = min(max(row + filter_r,0),numRows-1);
      int nerghbor_c = min(max(col + filter_c,0),numCols-1);
      int index = nerghbor_r*numCols+nerghbor_c;
      // get the pixel value from neighbor cell
      float pixel_value = static_cast<float>(inputChannel[index]);
      // get the pixel value from 1-d filter
      int index_filter = (filter_r + filterWidth/2) * filterWidth + (filter_c+filterWidth/2);
      ///share version
      float filter_value = shared_filter[index_filter];
      ///share version

      float filter_value = filter[index_filter];
      new_pixel_value+=pixel_value * filter_value;
    }
  }

  outputChannel[index]=static_cast<unsigned char>(new_pixel_value);
  
  // 我是实现的版本
  /*
  unsigned char v0,v1,v2,v3,v4,v5,v6,v7,v8=0;
  int count=0;
  if (col < numCols && row < numRows) {
    int index = numCols * row + col;
    uchar pixel = inputChannel[index];
    // convert to greyscale
    v0 = static_cast<unsigned char>(pixel);
    count++;
  }

  // 拿邻居点的pixel值
  int row1 = row+1;
  int col1 = col+1;
  if (col1 < numCols && row1 < numRows) {
    int index = numCols * row1 + col1;
    uchar pixel = inputChannel[index];
    // convert to greyscale
    v1 = static_cast<unsigned char>(pixel);
    count++;

  }

  int row2 = row+1;
  int col2 = col;
  if (col2 < numCols && row2 < numRows) {
    int index = numCols * row2 + col2;
    uchar pixel = inputChannel[index];
    // convert to greyscale
    v2 = static_cast<unsigned char>(pixel);
    count++;
  }

  int row3 = row+1;
  int col3 = col-1;
  if (col3 < numCols && row3 < numRows) {
    int index = numCols * row3 + col3;
    uchar pixel = inputChannel[index];
    // convert to greyscale
    v3 = static_cast<unsigned char>(pixel);
    count++;
  }

  // 依此类推 v4/v5/v6/v7/v8/v9
  int index = numCols * row + col;
  outputChannel[index] = if count == 0? 0:(v0+v1+v2+v3+v4+v5+v6+v7+v8)/count;
  */
  

}

// This kernel takes in an image represented as a uchar4 and splits
// it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
                      int numRows,
                      int numCols,
                      unsigned char* const redChannel,
                      unsigned char* const greenChannel,
                      unsigned char* const blueChannel) {
  
  // TODO
  //
  // NOTE: Be careful not to try to access memory that is outside the bounds of
  // the image. You'll want code that performs the following check before accessing
  // GPU memory:
  //
  // if ( absolute_image_position_x >= numCols ||
  //      absolute_image_position_y >= numRows )
  // {
  //     return;
  // }

  //@note step1:map the thread of block to the pixel position
  // map threadIdx and blockIdx to pixel in image

  int row = threadIdx.y + blockIdx.y * blockDim.y;
  int col = threadIdx.x + blockIdx.x * blockDim.x;

  if(col < numCols && row < numRows){
    int index = numCols * row +col;
    // rgbaImage 二维矩阵给了一维指针
    uchar4 pixel = inputImageRGBA[index];
    redChannel[index] = pixel.x; // pixel.at(0)
    greenChannel[index] = pixel.y; // pixel.at(1)
    blueChannel[index] = pixel.z; // pixel.at(2)
  }
  return;
}

// This kernel takes in three color channels and recombines them
// into one image.  The alpha channel is set to 255 to represent
// that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
                       const unsigned char* const greenChannel,
                       const unsigned char* const blueChannel,
                       uchar4* const outputImageRGBA,
                       int numRows,
                       int numCols) {
  // build map from index to pixel. way1
  // const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
  //                                      blockIdx.y * blockDim.y + threadIdx.y);
  // const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
  
  // make sure we don't try and access memory outside the image
  // by having any threads mapped there return early
  // if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) {
  //   return;
  // }

  // 从图片全局来看的二维坐标
  int row = threadIdx.y + blockDim.y * blockIdx.y; // y在纵轴
  int col = threadIdx.x + blockDim.x * blockIdx.x; // x在横轴

  if(row >= numRows || col>=numCols ){
    return;
  }
  // int index = col * numCols + row;
  int index = row * numRows + col;

  // unsigned char red   = redChannel[thread_1D_pos];
  // unsigned char green = greenChannel[thread_1D_pos];
  // unsigned char blue  = blueChannel[thread_1D_pos];
  unsigned char red   = redChannel[index];
  unsigned char green = greenChannel[index];
  unsigned char blue  = blueChannel[index];

  // Alpha should be 255 for no transparency
  //@note 这一句很精妙，make_uchar4
  uchar4 outputPixel = make_uchar4(red, green, blue, 255);

  // outputImageRGBA[thread_1D_pos] = outputPixel;
  outputImageRGBA[index] = outputPixel;

}

unsigned char *d_red, *d_green, *d_blue;
float         *d_filter;

void allocateMemoryAndCopyToGPU(const size_t numRowsImage,
                                const size_t numColsImage,
                                const float* const h_filter,
                                const size_t filterWidth) {
  // allocate memory for the three different channels
  checkCudaErrors(cudaMalloc(&d_red,   sizeof(unsigned char) * numRowsImage * numColsImage));
  checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
  checkCudaErrors(cudaMalloc(&d_blue,  sizeof(unsigned char) * numRowsImage * numColsImage));

  //TODO:
  //Allocate memory for the filter on the GPU
  //Use the pointer d_filter that we have already declared for you
  //You need to allocate memory for the filter with cudaMalloc
  //be sure to use checkCudaErrors like the above examples to
  //be able to tell if anything goes wrong
  //IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc
  checkCudaErrors(cudaMalloc((void**)&d_filter, sizeof(unsigned char) * numRowsImage * numColsImage));
  

  //TODO:
  //Copy the filter on the host (h_filter) to the memory you just allocated
  //on the GPU.  cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice);
  //Remember to use checkCudaErrors!
  checkCudaErrors(cudaMemcpy(d_filter, h_filter, sizeof(unsigned char) * numRowsImage * numColsImage, cudaMemcpyHostToDevice));
  
}

void gaussian_blur(const uchar4 * const h_inputImageRGBA,
                   uchar4 * const d_inputImageRGBA,
                   uchar4* const d_outputImageRGBA,
                   const size_t numRows, const size_t numCols,
                   unsigned char *d_redBlurred,
                   unsigned char *d_greenBlurred,
                   unsigned char *d_blueBlurred,
                   const int filterWidth) {
  //TODO: Set reasonable block size (i.e., number of threads per block)
  int gridX = numCols/NTHREADS +1;
  int gridY = numRows/NTHREADS +1;

  const dim3 blockSize;
  blockSize(NTHREADS,NTHREADS,1);

  //TODO:
  //Compute correct grid size (i.e., number of blocks per kernel launch)
  //from the image size and and block size.
  const dim3 gridSize;
  grid(gridX,gridY,1);

  //TODO: Launch a kernel for separating the RGBA image into different color channels
  //@note this has been done in preProcess() in utils.cpp
  // cudaMemcpy(d_inputImageRGBA, h_inputImageRGBA, sizeof(uchar4)*numRows*numCols, cudaMemcpyHostToDevice);
  
  //@reminder 已经声明了，在line208
  //unsigned char * d_red,d_green,d_blue;
  //@todo 需不需要给d_red cudamemcpy()?
  separateChannels<<<gridSize, blockSize>>>>(d_inputImageRGBA,
                                            numRows,
                                            numCols,
                                            d_red,
                                            d_green,
                                            d_blue);

  // Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
  // launching your kernel to make sure that you didn't make any mistakes.
  //@note 会阻塞主机线程,直到separateChannels执行完成
  cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());

  //TODO: Call your convolution kernel here 3 times, once for each color channel.
  gaussian_blur<<<gridSize, blockSize>>>(d_red,
                                        d_redBlurred,
                                        numRows,
                                        numCols,
                                        d_filter,
                                        filterWidth);
  cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());

  gaussian_blur<<<gridSize, blockSize>>>(d_green,
                                        d_greenBlurred,
                                        numRows,
                                        numCols,
                                        d_filter,
                                        filterWidth);
  cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());

  gaussian_blur<<<gridSize, blockSize>>>(d_blue,
                                      d_blueBlurred,
                                      numRows,
                                      numCols,
                                      d_filter,
                                      filterWidth);
  // Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
  // launching your kernel to make sure that you didn't make any mistakes.
  cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());

  // Now we recombine your results. We take care of launching this kernel for you.
  //
  // NOTE: This kernel launch depends on the gridSize and blockSize variables,
  // which you must set yourself.
  recombineChannels<<<gridSize, blockSize>>>(d_redBlurred,
                                             d_greenBlurred,
                                             d_blueBlurred,
                                             d_outputImageRGBA,
                                             numRows,
                                             numCols);
  cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());



  }


// Free all the memory that we allocated
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanupCu() {

}
