#include "Matrix.hpp"

void createMatrix( float** matrix, int xSize, int ySize )
{
  
  cudaMalloc( matrix, xSize*ySize*sizeof(float));
  
}

void releaseMatrix( float* matrix )
{
  
  cudaFree( matrix );
  
}

void uploadMatrix( float* gpuValues, float* cpuValues, int xSize, int ySize )
{
  cudaMemcpy( gpuValues, cpuValues, xSize*ySize*sizeof( float ), cudaMemcpyHostToDevice );
}

void downloadMatrix( float* gpuValues, float* cpuValues, int xSize, int ySize )
{
  cudaMemcpy( cpuValues, gpuValues, xSize*ySize*sizeof( float ), cudaMemcpyDeviceToHost );
}

__global__ void copyMatrixGPU( float *source, float *dest, int xOffset, int yOffset, int xSize , int ySize, int destXSize )
{
  
  int idx = threadIdx.x + blockIdx.x*blockDim.x;
  int idy = threadIdx.y + blockIdx.y*blockDim.y;
  
  if( idx>=xSize || idy>=ySize )
    return;
  
  dest[ (idy+yOffset) * destXSize + idx + xOffset ] = source[ idy*xSize + idx ];
  
}

void copyMatrix( float* sourceMatrix, float* destMatrix, int xSize, int ySize, int destXSize, int destYSize, int xOffset, int yOffset )
{
  
//   float *sourceMatrixGPU, *destMatrixGPU;
//   cudaMalloc( (void**)&sourceMatrixGPU, xSize*ySize*sizeof( float ) );
//   cudaMalloc( (void**)&destMatrixGPU, destXSize*destYSize*sizeof( float ) );
  
//   cudaMemcpy( sourceMatrixGPU, sourceMatrix, xSize*ySize*sizeof( float ), cudaMemcpyHostToDevice );
//   cudaMemcpy( destMatrixGPU, destMatrix, destXSize*destYSize*sizeof( float ), cudaMemcpyHostToDevice );
  
  dim3 blocks, threads;
  blocks.x = (xSize+15)/16;
  blocks.y = (ySize+15)/16;
  threads.x = 16;
  threads.y = 16;
  
  copyMatrixGPU<<< blocks, threads >>>( sourceMatrix, destMatrix, xOffset, yOffset, xSize, ySize, destXSize );
//   cudaMemcpy( destMatrix, destMatrixGPU, destXSize*destYSize*sizeof( float ), cudaMemcpyDeviceToHost );
  
//   cudaFree( sourceMatrixGPU );
//   cudaFree( destMatrixGPU );
  
}

__global__ void subMatrixGPU( float *source, float *dest, int xOffset, int yOffset, int xSize , int ySize, int destXSize )
{
  
  int idx = threadIdx.x + blockIdx.x*blockDim.x;
  int idy = threadIdx.y + blockIdx.y*blockDim.y;
  
  if( idx>=xSize || idy>=ySize )
    return;
  
  dest[ (idy+yOffset) * destXSize + idx + xOffset ] -= source[ idy*xSize + idx ];
  
}

void subMatrix( float* sourceMatrix, float* destMatrix, int xSize, int ySize, int destXSize, int destYSize, int xOffset, int yOffset )
{
  
//   float *sourceMatrixGPU, *destMatrixGPU;
//   cudaMalloc( (void**)&sourceMatrixGPU, xSize*ySize*sizeof( float ) );
//   cudaMalloc( (void**)&destMatrixGPU, destXSize*destYSize*sizeof( float ) );
  
//   cudaMemcpy( sourceMatrixGPU, sourceMatrix, xSize*ySize*sizeof( float ), cudaMemcpyHostToDevice );
//   cudaMemcpy( destMatrixGPU, destMatrix, destXSize*destYSize*sizeof( float ), cudaMemcpyHostToDevice );
  
  dim3 blocks, threads;
  blocks.x = (xSize+15)/16;
  blocks.y = (ySize+15)/16;
  threads.x = 16;
  threads.y = 16;
  
  subMatrixGPU<<< blocks, threads >>>( sourceMatrix, destMatrix, xOffset, yOffset, xSize, ySize, destXSize );
//   cudaMemcpy( destMatrix, destMatrixGPU, destXSize*destYSize*sizeof( float ), cudaMemcpyDeviceToHost );
  
//   cudaFree( sourceMatrixGPU );
//   cudaFree( destMatrixGPU );
  
}

__global__ void mulGPU( float* dest, float* source1, float *source2, int xSize, int ySize, int zSize )
{
  
  int idx = threadIdx.y + blockIdx.y*blockDim.y;
  int idy = threadIdx.x + blockIdx.x*blockDim.x;
  
  float value = 0;
  
  __shared__ float data1[ 256 ];
  __shared__ float data2[ 256 ];
  
  for( int i=0; i<ySize; i+=16 )
  {
    
    if( idx<xSize && (i+threadIdx.x)<ySize )
      data1[ threadIdx.y*16 + threadIdx.x ] = source1[ idx*ySize + i+threadIdx.x ];
    if( idy<zSize && (i+threadIdx.y)<ySize )
      data2[ threadIdx.y*16 + threadIdx.x ] = source2[ (i+threadIdx.y)*zSize + idy ];
    syncthreads();
    
    if( idx<xSize && idy<zSize )
      for( int j=0; j<min(16, ySize-i ); j++ )
        value += data1[ threadIdx.y*16 + j ]* data2[ j*16 + threadIdx.x ];
    syncthreads();
    
  }
    
  if( idx>=xSize || idy>=zSize )
    return;
  
  dest[ idx*zSize + idy ] = value;
  
}

void mulMatrix( float* destMatrix, float* source1, float* source2, int xSize, int ySize, int zSize )
{
  
//   float *source1GPU, *source2GPU, *destMatrixGPU;
//   cudaMalloc( (void**)&source1GPU, xSize*ySize*sizeof( float ) );
//   cudaMalloc( (void**)&source2GPU, ySize*zSize*sizeof( float ) );
//   cudaMalloc( (void**)&destMatrixGPU, xSize*zSize*sizeof( float ) );
  
//   cudaMemcpy( source1GPU, source1, xSize*ySize*sizeof( float ), cudaMemcpyHostToDevice );
//   cudaMemcpy( source2GPU, source2, ySize*zSize*sizeof( float ), cudaMemcpyHostToDevice );
//   cudaMemcpy( destMatrixGPU, destMatrix, xSize*zSize*sizeof( float ), cudaMemcpyHostToDevice );
  
  dim3 blocks, threads;
  blocks.y = (xSize+15)/16;
  blocks.x = (zSize+15)/16;
  threads.x = 16;
  threads.y = 16;
  
  mulGPU<<< blocks, threads >>>( destMatrix, source1, source2, xSize, ySize, zSize );
//   cudaMemcpy( destMatrix, destMatrixGPU, xSize*zSize*sizeof( float ), cudaMemcpyDeviceToHost );
  
//   cudaFree( source1GPU );
//   cudaFree( source2GPU );
//   cudaFree( destMatrixGPU );
  
}

__global__ void getSubmatrixGPU( float *source, float *dest, int xOffset, int yOffset, int xSize , int ySize, int srcXSize )
{
  
  int idx = threadIdx.x + blockIdx.x*blockDim.x;
  int idy = threadIdx.y + blockIdx.y*blockDim.y;
  
  if( idx>=xSize || idy>=ySize )
    return;
  
  dest[ idy*xSize + idx ] = source[ (idy+yOffset)*srcXSize + idx+xOffset ];
  
}

void getSubmatrix( float* submatrix, float* matrix, int xOffset, int yOffset, int xSize, int ySize, int srcXSize, int srcYSize )
{
  
//   float *sourceMatrixGPU, *destMatrixGPU;
//   cudaMalloc( (void**)&sourceMatrixGPU, srcXSize*srcYSize*sizeof( float ) );
//   cudaMalloc( (void**)&destMatrixGPU, xSize*ySize*sizeof( float ) );
  
//   cudaMemcpy( sourceMatrixGPU, matrix, srcXSize*srcYSize*sizeof( float ), cudaMemcpyHostToDevice );
//   cudaMemcpy( destMatrixGPU, submatrix, xSize*ySize*sizeof( float ), cudaMemcpyHostToDevice );
  
  dim3 blocks, threads;
  blocks.x = (xSize+15)/16;
  blocks.y = (ySize+15)/16;
  threads.x = 16;
  threads.y = 16;
  
  getSubmatrixGPU<<< blocks, threads >>>( matrix, submatrix, xOffset, yOffset, xSize, ySize, srcXSize );
//   cudaMemcpy( submatrix, destMatrixGPU, xSize*ySize*sizeof( float ), cudaMemcpyDeviceToHost );
  
//   cudaFree( sourceMatrixGPU );
//   cudaFree( destMatrixGPU );
  
}

__global__ void makeLUStepGPU( float* uMatrix, float* lMatrix, int baseLine, int xSize, int ySize )
{
  
  int idx = threadIdx.x + blockIdx.x*blockDim.x + baseLine;
  int idy = threadIdx.y + blockIdx.y*blockDim.y + baseLine+1;
  
  if( idx>=xSize )
    return;
  
  __shared__ float value;
  if( threadIdx.x == 0 )
  {
    value = uMatrix[ idy*xSize + baseLine ] / uMatrix[ baseLine*xSize + baseLine ];
    lMatrix[ idy*xSize + baseLine ] = value;
  }
  __syncthreads();
  
  uMatrix[ idy*xSize + idx ] -= value*uMatrix[ baseLine*xSize + idx ]; 
  
}

void makeLUStep( float* matrix, float* lMatrix, int baseLine, int xSize, int ySize )
{
  
//   float *uGPU, *lGPU;
//   cudaMalloc( (void**)&uGPU, xSize*ySize*sizeof(float) );
//   cudaMalloc( (void**)&lGPU, xSize*ySize*sizeof(float) );
  
//   cudaMemcpy( uGPU, matrix, xSize*ySize*sizeof(float), cudaMemcpyHostToDevice );
//   cudaMemcpy( lGPU, lMatrix, xSize*ySize*sizeof(float), cudaMemcpyHostToDevice );
  
  dim3 blocks, threads;
  blocks.x = (xSize-baseLine+255)/256;
  blocks.y = ySize-baseLine-1;
  threads.x = 256;
  threads.y = 1;
  
  makeLUStepGPU<<< blocks, threads >>>( matrix, lMatrix, baseLine, xSize, ySize );
//   cudaMemcpy( matrix, uGPU, xSize*ySize*sizeof( float ), cudaMemcpyDeviceToHost );
//   cudaMemcpy( lMatrix, lGPU, xSize*ySize*sizeof( float ), cudaMemcpyDeviceToHost );
  
//   cudaFree( uGPU );
//   cudaFree( lGPU );
  
}

__global__ void makeIdentityMatrixGPU( float* matrix, int size )
{
  
  int idx = threadIdx.x + blockIdx.x*blockDim.x;
  int idy = threadIdx.y + blockIdx.y*blockDim.y;
  
  if( idx>=size || idy>=size )
    return;
  
  matrix[ idy*size + idx ] = (idx==idy)?1:0;
  
}

void generateIdentityMatrix( float* matrix, int size )
{
  
//   float *matrixGPU;
//   cudaMalloc( (void**)&matrixGPU, size*size*sizeof(float ));
//   cudaMemcpy( matrixGPU, matrix, size*size*sizeof(float), cudaMemcpyHostToDevice );
  
  dim3 blocks, threads;
  blocks.x = (size+15)/16;
  blocks.y = (size+15)/16;
  threads.x = 16;
  threads.y = 16;
  
  makeIdentityMatrixGPU<<< blocks, threads >>>( matrix, size );
//   cudaMemcpy( matrix, matrixGPU, size*size*sizeof( float ), cudaMemcpyDeviceToHost );
  
//   cudaFree( matrixGPU );
  
}

__global__ void solveRightStepGPU( float* uMatrix, float* lMatrix, int xSize, int ySize, int zSize, int offset )
{
  
  int idx = threadIdx.x + blockIdx.x*blockDim.x;
  int idy = threadIdx.y + blockIdx.y*blockDim.y + offset+1;
  
  if( idx>=xSize )
    return;
  
  __shared__ float value;
  if( threadIdx.x == 0 )
    value = lMatrix[ idy*zSize + offset ];
  __syncthreads();
  
  uMatrix[ idy*xSize + idx ] -= uMatrix[ offset*xSize + idx ] * value;
  
}

void solveSystemRightStep( float *uMatrix, float *lMatrix, int xSize, int ySize, int zSize, int offset )
{
  
//   float *uGPU, *lGPU;
//   cudaMalloc( (void**)&uGPU, xSize*ySize*sizeof(float) );
//   cudaMalloc( (void**)&lGPU, zSize*zSize*sizeof(float) );
  
//   cudaMemcpy( uGPU, uMatrix, xSize*ySize*sizeof(float), cudaMemcpyHostToDevice );
//   cudaMemcpy( lGPU, lMatrix, xSize*zSize*sizeof(float), cudaMemcpyHostToDevice );
  
  dim3 blocks, threads;
  blocks.x = (xSize+255)/256;
  blocks.y = ySize-offset-1;
  threads.x = 256;
  threads.y = 1;
  
  solveRightStepGPU<<< blocks, threads >>>( uMatrix, lMatrix, xSize, ySize, zSize, offset );
//   cudaMemcpy( uMatrix, uGPU, xSize*ySize*sizeof( float ), cudaMemcpyDeviceToHost );
  
//   cudaFree( uGPU );
//   cudaFree( lGPU );
 
}

__global__ void solveLeftStepGPU( float* uMatrix, float* lMatrix, int xSize, int ySize, int zSize, int offset )
{
  
  int idx = threadIdx.x + blockIdx.x*blockDim.x + offset+1;
  int idy = threadIdx.y + blockIdx.y*blockDim.y;
  
  if( idy>=ySize )
    return;
  
  __shared__ float value;
  if( threadIdx.y == 0 )
    value = lMatrix[ offset*xSize + idx ];
  __syncthreads();
  
  uMatrix[ idy*xSize + idx ] -= uMatrix[ idy*xSize + offset ] * value;
  
}

void systemRightStep( float *matrix, float *uMatrix, int xSize, int ySize, int zSize, int offset )
{
  
//   float *uGPU, *matrixGPU;
//   cudaMalloc( (void**)&matrixGPU, xSize*ySize*sizeof(float) );
//   cudaMalloc( (void**)&uGPU, zSize*zSize*sizeof(float) );
  
//   cudaMemcpy( uGPU, uMatrix, zSize*zSize*sizeof(float), cudaMemcpyHostToDevice );
//   cudaMemcpy( matrixGPU, matrix, xSize*ySize*sizeof(float), cudaMemcpyHostToDevice );
  
  dim3 blocks, threads;
  blocks.x = (xSize-offset-1);
  blocks.y = (ySize+255)/256;
  threads.x = 1;
  threads.y = 256;
  
  solveLeftStepGPU<<< blocks, threads >>>( matrix, uMatrix, xSize, ySize, zSize, offset );
//   cudaMemcpy( matrix, matrixGPU, xSize*ySize*sizeof( float ), cudaMemcpyDeviceToHost );
  
//   cudaFree( uGPU );
//   cudaFree( matrixGPU );
 
}

__global__ void cleanGPU( float* matrix, int size, int offset, int cleanOffset, int cleanSize )
{
  
  int idx = threadIdx.x + blockIdx.x*blockDim.x + cleanOffset;
  int idy = threadIdx.y + blockIdx.y*blockDim.y + offset;
  
  if( idx>=cleanSize || idy>=size )
    return;
  
  matrix[ idy*size + idx ] = 0;
  
}

void clearUp( float* matrix, int size, int offset, int cleanOffset, int cleanSize )
{
  
//   float* matrixGPU;
//   cudaMalloc( (void**)&matrixGPU, size*size*sizeof(float) );
  
//   cudaMemcpy( matrixGPU, matrix, size*size*sizeof(float), cudaMemcpyHostToDevice );
  
  dim3 blocks, threads;
  blocks.x = (cleanSize-cleanOffset+15)/16;
  blocks.y = (size-offset+15)/16;
  threads.x = 16;
  threads.y = 16;
  
  cleanGPU<<< blocks, threads >>>( matrix, size, offset, cleanOffset, cleanSize );
//   cudaMemcpy( matrix, matrixGPU, size*size*sizeof( float ), cudaMemcpyDeviceToHost );
  
//   cudaFree( matrixGPU );
   
}

__global__ void scaleGPU( float* matrix, float* uMatrix, int xSize, int ySize, int scaleX, int scaleY, int row )
{
  
  int idx = threadIdx.x + blockIdx.x*blockDim.x;
  
  if( idx>=xSize )
    return;
  
  __shared__ float value;
  if( threadIdx.x == 0 )
   value = uMatrix[ row*scaleX + row ];
  __syncthreads();
  
  matrix[ idx*xSize + row ] /= value;
  
}

void scaleRow( float* matrix, float* uMatrix, int xSize, int ySize, int scaleX, int scaleY, int row )
{
  
//   float *matrixGPU, *uGPU;
//   cudaMalloc( (void**)&uGPU, scaleX*scaleY*sizeof(float) );
//   cudaMalloc( (void**)&matrixGPU, xSize*ySize*sizeof(float) );
  
//   cudaMemcpy( uGPU, uMatrix, xSize*ySize*sizeof(float), cudaMemcpyHostToDevice );
//   cudaMemcpy( matrixGPU, matrix, scaleX*scaleY*sizeof(float), cudaMemcpyHostToDevice );
  
  dim3 blocks, threads;
  blocks.x = (ySize+255)/256;
  threads.x = 256;
  
  scaleGPU<<< blocks, threads >>>( matrix, uMatrix, xSize, ySize, scaleX, scaleY, row );
//   cudaMemcpy( matrix, matrixGPU, xSize*ySize*sizeof( float ), cudaMemcpyDeviceToHost );
  
//   cudaFree( uGPU );
//   cudaFree( matrixGPU );
  
}