#ifndef _TRANSPOSE_
#define _TRANSPOSE_

__global__ void naive_transpose(float4 * i_data,
        float4 * o_data,
        int rows, int cols)
{
    unsigned int x = blockDim.x*blockIdx.x + threadIdx.x;
    unsigned int y = blockDim.y*blockIdx.y + threadIdx.y;
	if (x < rows && y < cols)
		o_data[x*cols + y] = i_data[y*rows + x];
}

__global__ void optimized_transpose(float4 * i_data,
        float4 * o_data,
        int rows, int cols)
{
#define TILE_DIM 16
#define BLOCK_ROWS 8

	__shared__ float4 tile[TILE_DIM][TILE_DIM+1];
	int blockIdx_x, blockIdx_y;
	// diagonal reordering
	blockIdx_y = blockIdx.x;
	blockIdx_x = (blockIdx.x+blockIdx.y)%gridDim.x;
	int xIndex = blockIdx_x*TILE_DIM + threadIdx.x;
	int yIndex = blockIdx_y*TILE_DIM + threadIdx.y;
	int index_in = xIndex + (yIndex)*rows;
	xIndex = blockIdx_y*TILE_DIM + threadIdx.x;
	yIndex = blockIdx_x*TILE_DIM + threadIdx.y;
	int index_out = xIndex + (yIndex)*cols;
	for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
		tile[threadIdx.y+i][threadIdx.x] =
			i_data[index_in+i*rows];
	}
	__syncthreads();
	for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
		o_data[index_out+i*cols] =
			tile[threadIdx.x][threadIdx.y+i];
	}
}

void launch_optimized_transpose(float4 * dev_i_data,
        float4 * dev_o_data,
        int rows, int cols) 
{
    //OPTIMIZE MATRIX TRANSPOSE
    //rows of dev_i_data into columns of dev_o_data
    dim3 block(TILE_DIM,BLOCK_ROWS,1);
    dim3 grid(cols/TILE_DIM, rows/TILE_DIM,1);
    optimized_transpose<<<grid,block>>>(dev_i_data,
            dev_o_data,
            rows,cols);
}

void launch_naive_transpose(float4 * dev_i_data, 
        float4 * dev_o_data,
        int rows, int cols)
{
   //NAIVE MATRIX TRANPOSE 
    //rows of dev_i_data into columns of dev_o_data
    dim3 block(8,8,1);
    dim3 grid(cols/block.x, rows/block.y,1);
    naive_transpose<<<grid,block>>>(dev_i_data,
            dev_o_data,
            rows,cols);
}

#endif // #ifndef _TRANSPOSE_
