#include "math_ops.h"
#include <cuda.h>
#include <cuda_runtime_api.h>


namespace uzu
{
    __global__ void matrixmul_kernel(float* mat1, float* mat2, float* result, int P, int Q, int K)
    {
        __shared__ float Ms[SQUARE_BLOCK_SIZE][SQUARE_BLOCK_SIZE];
        __shared__ float Ns[SQUARE_BLOCK_SIZE][SQUARE_BLOCK_SIZE];

        // blockDim == SQUARE_BLOCK_SIZE;
        int bx = blockIdx.x;
        int by = blockIdx.y;
        int tx = threadIdx.x;
        int ty = threadIdx.y;

        int row = by * SQUARE_BLOCK_SIZE + ty;
        int col = bx * SQUARE_BLOCK_SIZE + tx;

        if (row >= 0 && row < P && col >= 0 && col < Q)
        {
            float value = 0;
            int steps = (K + SQUARE_BLOCK_SIZE - 1) / SQUARE_BLOCK_SIZE;
            for (int s = 0; s < steps; ++s)
            {
                // copy global data and do partial summation
                if (s * SQUARE_BLOCK_SIZE + tx < K)
                    Ms[ty][tx] = mat1[row * K + s * SQUARE_BLOCK_SIZE + tx];
                else
                    Ms[ty][tx] = 0.0f;
                if (s * SQUARE_BLOCK_SIZE + ty < K)
                    Ns[ty][tx] = mat2[(s * SQUARE_BLOCK_SIZE + ty) * Q + col];
                else
                    Ns[ty][tx] = 0.0f;
                __syncthreads();

                for (int k = 0; k < SQUARE_BLOCK_SIZE; ++k)
                {
                    value += Ms[ty][k] * Ns[k][tx];
                }
                __syncthreads();
            }
            result[row * K + col] = value;
        }
    }

    MatrixF MatrixMul(const MatrixF& mat1, const MatrixF& mat2)
    {
        assert(mat1.cols == mat2.rows);
        MatrixF result(mat1.rows, mat2.cols);

        // allocate device memory for matrices
        float* mat1_device;
        float* mat2_device;
        float* result_device;
        AllocateDeviceMemory(&mat1_device, mat1, true);
        AllocateDeviceMemory(&mat2_device, mat2, true);
        AllocateDeviceMemory(&result_device, result, false);

        // launch kernel
        dim3 block_size(SQUARE_BLOCK_SIZE, SQUARE_BLOCK_SIZE, 1);
        int blocksx = (mat1.rows + SQUARE_BLOCK_SIZE - 1) / SQUARE_BLOCK_SIZE;
        int blocksy = (mat2.cols + SQUARE_BLOCK_SIZE - 1) / SQUARE_BLOCK_SIZE;
        dim3 grid_size(blocksx, blocksy, 1);
        matrixmul_kernel<<<grid_size, block_size>>>(mat1_device, mat2_device, result_device, mat1.rows, mat2.cols, mat1.cols);

        // copy result back 
        cudaMemcpy(result.data, result_device, result.cols * result.rows * sizeof(float), cudaMemcpyDeviceToHost);

        // free device memory
        cudaFree(mat1_device);
        cudaFree(mat2_device);
        cudaFree(result_device);

        return result;
    }
}
