#include <iostream>
#define _USE_MATH_DEFINES
#include <math.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuComplex.h>

using namespace std;

#define CudaSafeCall(expr) { \
  cudaError_t code = expr; \
  if (code != CUDA_SUCCESS) { \
    cout << "CUDA error: " << #expr << ", code " << code \
         << ", line " << __LINE__ << ", file " << __FILE__ << endl; \
    exit(EXIT_FAILURE); \
  } \
}

#define DivUp(total, grain) (((total) + (grain) - 1) / (grain))

namespace cudaimpl_internal {

  __global__ void ReorderElements(float2* x, const int num_bits) {
    int idx = blockIdx.x * blockDim.x + threadIdx.x;

    int r = 0;
    int k = idx;
    for (int i = 0; i < num_bits; ++i) {
      r <<= 1;
      r |= k & 1;
      k >>= 1;
    }

    if (idx < r) {
      float2 temp = x[idx];
      x[idx] = x[r];
      x[r] = temp;
    }
  }


  template <int block_size, int block_size_log>
  __global__ void DftInplaceSmallSizes(float2* x) {
    int idx = blockIdx.x * 2 * block_size;

    __shared__ float s_x[block_size * 4];
    float2 a = x[idx + threadIdx.x];
    float2 b = x[idx + threadIdx.x + block_size];
    s_x[threadIdx.x] = a.x;
    s_x[threadIdx.x + block_size] = b.x;
    s_x[threadIdx.x + block_size * 2] = a.y;
    s_x[threadIdx.x + block_size * 3] = b.y;
    __syncthreads();

    for (int size_log = 0; size_log <= block_size_log; ++size_log) {
      int size = 1 << size_log;
      int i = threadIdx.x >> size_log;
      int j = threadIdx.x - (i << size_log);
      i <<= size_log + 1;

      a.x = s_x[i + j]; 
      b.x = s_x[i + j + size];
      a.y = s_x[i + j + block_size * 2];
      b.y = s_x[i + j + block_size * 2 + size];

      float ang = -(float)M_PI / size  * j;
      b = cuCmulf(b, make_float2(__cosf(ang), __sinf(ang)));

      s_x[i + j] = a.x + b.x; 
      s_x[i + j + size] = a.x - b.x;
      s_x[i + j + block_size * 2] = a.y + b.y;
      s_x[i + j + block_size * 2 + size] = a.y - b.y;

      __syncthreads();
    }

    a.x = s_x[threadIdx.x];
    b.x = s_x[threadIdx.x + block_size];
    a.y = s_x[threadIdx.x + block_size * 2];
    b.y = s_x[threadIdx.x + block_size * 3];
    x[idx + threadIdx.x] = a;
    x[idx + threadIdx.x + block_size] = b;
  }


  template <int size, int size_log>
  __global__ void DftInplaceOnePass(float2* x) {
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    int i = idx >> size_log;
    int j = idx - (i << size_log);
    i <<= size_log + 1;

    float2 a = x[i + j];
    float2 b = x[i + j + size];

    float ang = -(float)M_PI / size * j;
    b = cuCmulf(b, make_float2(__cosf(ang), __sinf(ang)));

    x[i + j] = cuCaddf(a, b);
    x[i + j + size] = cuCsubf(a, b);
  }

 
  // Assumes that n is power of two
  void DftInplace(float* x, int n) {
    //
    // Copy data to device
    //

    float2* d_x;
    CudaSafeCall(cudaMalloc(&d_x, n * sizeof(float2)));
    CudaSafeCall(cudaMemcpy(d_x, x, n * sizeof(float2), cudaMemcpyHostToDevice));
    
    //
    // Reorder data
    //

    const int threads_per_block = 512;
    dim3 grid;
    dim3 threads;
    if (n < threads_per_block) {
      grid = dim3(1);
      threads = dim3(n);
    } 
    else {
      threads = dim3(threads_per_block);
      grid = dim3(n / threads.x);
    }
    
    int num_bits = int(log(double(n)) / log(2.));
    ReorderElements<<<grid, threads>>>(d_x, num_bits);   

    //
    // Process small parts of input data in shared memory
    //

    const int first_pass_size = 512;
    const int first_pass_size_log = 9;
    int processed_size = 1;

    if (n > first_pass_size) {
      threads = dim3(first_pass_size);
      grid = dim3(n / 2 / threads.x);
      DftInplaceSmallSizes<first_pass_size, first_pass_size_log><<<grid, threads>>>(d_x);
      processed_size = first_pass_size * 2;
    }

    //
    // Process remaining parts in global memory
    //

    threads = dim3(min(256, n / 2));
    grid = dim3(n / 2 / threads.x);

    #define DFT_INPLACE_ONE_PASS(s) \
      if ((1 << s) >= processed_size && (1 << s) < n) \
        DftInplaceOnePass<(1 << s), (s)><<<grid, threads>>>(d_x)
    DFT_INPLACE_ONE_PASS(0);
    DFT_INPLACE_ONE_PASS(1);
    DFT_INPLACE_ONE_PASS(2);
    DFT_INPLACE_ONE_PASS(3);
    DFT_INPLACE_ONE_PASS(4);
    DFT_INPLACE_ONE_PASS(5);
    DFT_INPLACE_ONE_PASS(6);
    DFT_INPLACE_ONE_PASS(7);
    DFT_INPLACE_ONE_PASS(8);
    DFT_INPLACE_ONE_PASS(9);
    DFT_INPLACE_ONE_PASS(10);
    DFT_INPLACE_ONE_PASS(11);
    DFT_INPLACE_ONE_PASS(12);
    DFT_INPLACE_ONE_PASS(13);
    DFT_INPLACE_ONE_PASS(14);
    DFT_INPLACE_ONE_PASS(15);
    DFT_INPLACE_ONE_PASS(16);
    DFT_INPLACE_ONE_PASS(17);
    DFT_INPLACE_ONE_PASS(18);
    DFT_INPLACE_ONE_PASS(19);
    DFT_INPLACE_ONE_PASS(20);
    DFT_INPLACE_ONE_PASS(21);
    DFT_INPLACE_ONE_PASS(22);
    DFT_INPLACE_ONE_PASS(23);
    DFT_INPLACE_ONE_PASS(24);
    DFT_INPLACE_ONE_PASS(25);
    DFT_INPLACE_ONE_PASS(26);
    DFT_INPLACE_ONE_PASS(27);
    DFT_INPLACE_ONE_PASS(28);
    DFT_INPLACE_ONE_PASS(29);

    // 
    // Synchronize thread and copy data from device to host
    //

    CudaSafeCall(cudaThreadSynchronize());  
    CudaSafeCall(cudaMemcpy(x, d_x, n * sizeof(float2), cudaMemcpyDeviceToHost));
    CudaSafeCall(cudaFree(d_x));
  }

} // namespace cudaimpl_internal
