#include <cstdlib>
#include <ctime>
#include <torch/extension.h>
// #include "gswitch.h"


// auto transpose_bench(int64_t x, int64_t y)
// {
//   auto tensor = torch::randn({x,y}).cuda();
//   // double t1 = mwtime();
//   auto trans = tensor.t().contiguous();
//   cudaDeviceSynchronize();
//   // double t2 = mwtime();
//   // printf("transpose pytorch = %.8f\n", t2-t1);
//   return trans.cpu()[10][10];
// }

// auto mm_bench(int64_t x, int64_t y, int64_t k)
// {
//   auto A = torch::randn({y,k}).cuda();
//   auto B = torch::randn({k,x}).cuda();

//   double t1 = mwtime();
//   auto C = A.mm(B);
//   cudaDeviceSynchronize();
//   // double t2 = mwtime();
//   // printf("mm bench %.8f\n", t2-t1);
//   return C.cpu();
// }

// double mv_bench(int64_t x, int64_t y)
// {
//   auto A = torch::randn({y,x}).cuda();
//   auto _x = torch::randn(x).cuda();
  
//   // double t1 = mwtime();
//   auto _y = A.mv(_x);
//   cudaDeviceSynchronize();
//   // double t2 = mwtime();
//   printf("_y[0][0]=%.8f\n", _y.cpu()[0][0]);
//   // return t2-t1;
// }

// double mul_bench(int64_t x, int64_t y)
// {
//   auto A = torch::randn({y,x}).cuda();

//   // double t1 = mwtime();
//   auto _A = A.mul(rand());
//   cudaDeviceSynchronize();
//   // double t2 = mwtime();
//   printf("_A[0][0]=%.8f\n", _A.cpu()[0][0]);

//   // return t2-t1;
// }


#define __anti_conflict(x) ((x)+((x)>>5))
template <typename scalar_t>
__global__ void 
my_transpose_kernel(scalar_t *__restrict__ src, scalar_t* __restrict__ dst, int y, int x)
{
  // __shared__ scalar_t buffer[__anti_conflict(512)];

  const int tx = threadIdx.x+blockIdx.x*blockDim.x;
  const int ty = threadIdx.y+blockIdx.y*blockDim.y;
  const int stride_x = blockDim.x * gridDim.x;
  const int stride_y = blockDim.y * gridDim.y;

  for(int iy = ty; iy < y; iy += stride_y)
  {
    // #pragma unroll
    int ix;
    for (ix = tx; ix < x-stride_x*4; ix += stride_x*4)
    {
      auto t1 = src[iy*x+ix];
      auto t2 = src[iy*x+ix+1];
      auto t3 = src[iy*x+ix+2];
      auto t4 = src[iy*x+ix+3];
      // __syncthreads();
      dst[ix*y+iy] = t1;
      dst[(ix+1)*y+iy] = t2;
      dst[(ix+2)*y+iy] = t3;
      dst[(ix+3)*y+iy] = t4;
      // dst[ix*y+iy] = buffer[__anti_conflict(lx*blockDim.y+ly)];
    }

    for(;ix<x;ix+=stride_x)
    {
      dst[ix*y+iy] = src[iy*x+ix];
    }
  }
}

template <typename scalar_t>
__global__ void
my_mm_kernel(const scalar_t * __restrict__ A, 
    const scalar_t * __restrict__ B, 
    scalar_t * __restrict__ C, 
    int x, int y, int k)
{
  // __shared__ scalar_t A_buf[256];
  
  const int Tx = threadIdx.x + blockDim.x * blockIdx.x;
  const int Ty = threadIdx.y + blockDim.y * blockIdx.y;

  const int stride_x = blockDim.x * gridDim.x;
  const int stride_y = blockDim.y * gridDim.y;

  for (int yi = Ty; yi < y; yi += stride_y)
  {
    #pragma unroll
    for (int xi = Tx; xi < k; xi += stride_x)
    {
      scalar_t local_reduce = 0;
      for (int i = 0; i < k; ++i)
      {
        local_reduce += A[yi*k+i] * B[i*x+xi];
      }
      C[yi*x+xi] = local_reduce;
    }
  }
}

template <typename scalar_t>
void cpu_transpose(scalar_t * __restrict__ src, scalar_t * __restrict__ dst, int y, int x)
{
  #pragma omp parallel for num_threads(40)
  for (int i=0; i<y; ++i)
  {
    int j;
    scalar_t x1, x2, x3, x4;
    for (j=0; j<x-4; j+=4)
    {
      x1 = src[i*x+j];
      x2 = src[i*x+j+1];
      x3 = src[i*x+j+2];
      x4 = src[i*x+j+3];
      dst[(j)*y+i] = x1;
      dst[(j+1)*y+i] = x2;
      dst[(j+2)*y+i] = x3;
      dst[(j+3)*y+i] = x4;
    }
    for (; j<x; ++j)
    {
      dst[j*y+i] = src[i*x+j];
    }
  }
}


auto my_transpose(int bx, int by, int64_t x, int64_t y)
{
  auto A = torch::randn({y,x}).cuda();
  auto B = torch::zeros_like(A).cuda();
  AT_DISPATCH_FLOATING_TYPES(
    A.scalar_type(), "my benchmark", [&]{
      auto src = A.data_ptr<scalar_t>();
      auto dst = B.data_ptr<scalar_t>();
      const int block_x = bx;
      const int block_y = by;

      // double t1 = mwtime();
      my_transpose_kernel<<<dim3(x/block_x, y/block_y), dim3(block_x, block_y)>>>(src, dst, y, x);
      cudaDeviceSynchronize();
      // double t2 = mwtime();
      // printf("time elapses: %.8f\n", t2-t1);
      // cpu_transpose(src, dst, y, x);

    });
  return B.cpu()[10][10];
}

auto my_mm(int bx, int by, int x, int y, int k)
{
  auto A = torch::randn({y,k}).cuda();
  auto B = torch::randn({k,x}).cuda();
  auto C = torch::zeros({y,x}).cuda();

  AT_DISPATCH_FLOATING_TYPES(
    A.scalar_type(), "my mm", [&]{
      auto Aptr = A.data_ptr<scalar_t>();
      auto Bptr = B.data_ptr<scalar_t>();
      auto Cptr = C.data_ptr<scalar_t>();

      // double t1 = mwtime();
      my_mm_kernel<<<dim3(60, 1), dim3(bx, by)>>>(Aptr, Bptr, Cptr, x, y, k);
      cudaDeviceSynchronize();
      // double t2 = mwtime();
      // printf("my_mm time elapses: %.8f\n", t2-t1); 
    });
  
  return C.cpu();
}

#undef __anti_conflict

int main(int argc, char** argv)
{
  srand(time(nullptr));
  auto opt = torch::TensorOptions().layout(torch::kStrided);
  auto x = torch::zeros({3,6}, opt);
  printf("[%d, %d]\n",x.stride(0), x.stride(1));
  // int bx = atoi(argv[1]);
  // int by = atoi(argv[2]);

  // auto y = mm_bench(150000, 16, 1433);
  // auto x = my_mm(bx, by, 150000, 16, 1433);
  // printf("%i\n",x == y);
  // printf("transpose_bench: %.8f ms\n", transpose_bench(15000));
  // printf("mm_bench: %.8f ms\n", mm_bench(15000, 1433, 16));
  // printf("mul_bench %.8f ms\n", mul_bench(15000,512));
  // printf("my_transpose %.8f ms\n", my_transpose(bx, by, 15000, 15000));
}