#include "reduction.h"

#include "core.h"
#include <cuda.h>
#include <cuda_runtime_api.h>

namespace uzu
{
    void SequentialScan(Tensor& input, Tensor& output)
    {
        output.Create(input.Shape(), DeviceType::CPU);
        uint32_t size = input.Size();
        float* input_data = input.Data();
        float* output_data = output.Data();
        float accum = 0.0f;
        for (uint32_t i = 0; i < size; ++i)
        {
            accum += input_data[i];
            output_data[i] = accum;
        }
    }

    __global__ void kogge_stone_kernel(float* input, float* output, int size)
    {
        __shared__ float XY [LINEAR_BLOCK_SIZE];
        int i = threadIdx.x + blockIdx.x * blockDim.x;
        if (i < size)
            XY[threadIdx.x] = input[i];
        
        // do processing
        for (unsigned int stride = 1; stride < blockDim.x; stride *= 2)
        {
            __syncthreads();
            if (threadIdx.x >= stride)
                XY[threadIdx.x] = XY[threadIdx.x - stride];
        }

        output[i] = XY[threadIdx.x];
    }

    void KoggeStoneScan(Tensor& input, Tensor& output)
    {
        input.ToDevice();
        output.Create(input.Shape(), DeviceType::CUDA);

        // launch kernel
        uint32_t size = input.Size();
        int block_size = LINEAR_BLOCK_SIZE;
        int grid_size = (size + LINEAR_BLOCK_SIZE - 1) / LINEAR_BLOCK_SIZE;
        kogge_stone_kernel<<<grid_size, block_size>>>(input.DataGpu(), output.DataGpu(), size);
    }

    __global__ void reduce_sum_kernel1(float* input, float* output, int size)
    {
        __shared__ float partialSum[LINEAR_BLOCK_SIZE];
        // copy data into shared memory
        int t = threadIdx.x;
        int global_index = blockIdx.x * blockDim.x + threadIdx.x;
        if (global_index < size)
            partialSum[t] = input[global_index];
        else
            partialSum[t] = 0.0f;
        __syncthreads();

        // do block-wise accumulation
        for (int stride = LINEAR_BLOCK_SIZE / 2; stride > 0; stride /= 2)
        {
            if (t < stride)
                partialSum[t] += partialSum[t + stride];
            __syncthreads();
        }

        if (global_index < size)
            output[global_index] = partialSum[t];
    }

    __global__ void reduce_sum_kernel2(float* input, int size)
    {
        __shared__ float partialSum[LINEAR_BLOCK_SIZE];
        int t = threadIdx.x;

        // copy block sum into shared memory
        // this threads is responsible for (i * N + t)th block sum;
        int block_idx = t;
        partialSum[t] = 0.0f;
        while (true)
        {
            int input_index = block_idx * LINEAR_BLOCK_SIZE;
            if (input_index >= size)
                break;
            partialSum[t] += input[input_index];
            block_idx += LINEAR_BLOCK_SIZE;
        }
        __syncthreads();

        for (int stride = LINEAR_BLOCK_SIZE / 2; stride > 0; stride /= 2)
        {
            if (t < stride)
                partialSum[t] += partialSum[t + stride];
            __syncthreads();
        }
        input[t] = partialSum[t];
    }

    float ReduceSum(Tensor& input)
    {
        input.ToDevice();
        uint32_t size = input.Size();

        Tensor output;
        output.Create(input.Shape(), DeviceType::CUDA);

        int block_size = LINEAR_BLOCK_SIZE;
        int grid_size = (size + LINEAR_BLOCK_SIZE - 1) / LINEAR_BLOCK_SIZE;
        // launch first kernel
        reduce_sum_kernel1<<<grid_size, block_size>>>(input.DataGpu(), output.DataGpu(), size);
        // launch second kernel
        grid_size = 1;
        reduce_sum_kernel2<<<grid_size, block_size>>>(output.DataGpu(), size);

        // copy single into host
        float result;
        cudaMemcpy(&result, output.DataGpu(), sizeof(float), cudaMemcpyDeviceToHost);

        return result;
    }

    __global__ void reduceSumKernelSection(float* data, int size)
    {
        // reduce sum for single block
        __shared__ float partialSum[LINEAR_BLOCK_SIZE];
        int t = threadIdx.x;
        // copy from global memory to shared memory
        int global_index = blockIdx.x * blockDim.x + t;
        if (global_index < size)
            partialSum[t] = data[global_index];
        else
            partialSum[t] = 0.0f;
        __syncthreads();

        // for (int stride = 1; stride < LINEAR_BLOCK_SIZE; stride *= 2)
        for (int stride = LINEAR_BLOCK_SIZE / 2; stride > 0; stride /= 2)
        {
            if (t < stride)
                partialSum[t] += partialSum[t + stride];
            __syncthreads();
        }
        // copy block result to global memory
        if (t == 0 && blockIdx.x * blockDim.x + t < size)
            data[blockIdx.x * blockDim.x + t] = partialSum[t];
    }

    float ReduceSum(float* data, int size)
    {
        // allocate device memory
        float* data_device;
        cudaMalloc((void**)&data_device, size * sizeof(float));
        cudaMemcpy(data_device, data, size * sizeof(float), cudaMemcpyHostToDevice);

        // launch kernel
        int block_size = LINEAR_BLOCK_SIZE;
        int grid_size = (size + block_size - 1) / block_size;
        reduceSumKernelSection<<<grid_size, block_size>>>(data_device, size);

        // copy result
        cudaMemcpy(data, data_device, size * sizeof(float), cudaMemcpyDeviceToHost);

        float sum = 0;
        int nblocks = (size + LINEAR_BLOCK_SIZE - 1) / LINEAR_BLOCK_SIZE;
        for (int i = 0; i < nblocks; ++i)
            sum += data[i * LINEAR_BLOCK_SIZE];
        return sum;
    }

    float ReduceSumCpu(Tensor& input)
    {
        input.ToHost();
        float* data = input.Data();
        uint32_t size = input.Size();
        return ReduceSumCpu(data, size);
    }

    float ReduceSumCpu(float* data, int size)
    {
        float sum = 0;
        for (int i = 0; i < size; ++i)
            sum += data[i];
        return sum;
    }
}
