#include "conv1d.h"
#include <cuda.h>
#include <cuda_runtime_api.h>
#include "core.h"
#include <cstdio>
#include <cassert>

namespace uzu
{
    __constant__ float filter_buffer_1d[MAX_CONV1D_KERNEL_SIZE];

    // TODO: use constant memory for filter data.
    __global__ void conv1d_kernel(float* source, float* dest, int size, int filter_size)
    {
        // use shared memory for conv blocks
        __shared__ float buffer[LINEAR_BLOCK_SIZE + MAX_CONV1D_KERNEL_SIZE];

        int half_filter_size = filter_size / 2;

        // copy source buffer segment with all threads
        int real_block_size = size - blockIdx.x * blockDim.x;
        if (real_block_size >= blockDim.x)
            real_block_size = blockDim.x;
        int start_index = blockIdx.x * blockDim.x - half_filter_size;
        // filter size not necessarily an odd number
        int end_index = blockIdx.x * blockDim.x + real_block_size + filter_size - half_filter_size - 1;
        int nrepeats = (end_index - start_index + LINEAR_BLOCK_SIZE - 1) / LINEAR_BLOCK_SIZE;
        for (int r = 0; r < nrepeats; ++r)
        {
            int buffer_pos = r * LINEAR_BLOCK_SIZE + threadIdx.x;
            int source_pos = start_index + buffer_pos;
            if (buffer_pos < real_block_size + filter_size - 1)
            {
                if (source_pos < 0) 
                    buffer[buffer_pos] = 0.0f;
                else if (source_pos >= size || source_pos >= end_index) 
                    buffer[buffer_pos] = 0.0f;
                else 
                    buffer[buffer_pos] = source[source_pos];
            }
        }

        __syncthreads();
        int index = blockIdx.x * blockDim.x + threadIdx.x;
        if (index >= 0 && index < size)
        {
            // calculate with copied neighbour data.
            float sum = 0.0f;
            for (int i = 0; i < filter_size; ++i)
            {
                int filter_index = i;
                int buffer_index = threadIdx.x + i;
                sum += buffer[buffer_index] * filter_buffer_1d[filter_index];
            }
            dest[index] = sum;
        }
    }

    void Conv1d(Tensor& source, Tensor& filter, Tensor& dest)
    {
        assert(source.NDim() == 1);
        assert(filter.NDim() == 1);

        // transfer data to gpu
        source.ToDevice();
        // filter.ToDevice();

        auto shape = source.Shape();
        dest.Create(shape, DeviceType::CUDA);
        
        // launch kernel
        int array_size = source.Size();
        int filter_size = filter.Size();
        assert(filter_size < MAX_CONV1D_KERNEL_SIZE && "filter size not supported");
        int block_size = LINEAR_BLOCK_SIZE;
        int grid_size = (array_size + block_size - 1) / block_size;
        cudaMemcpyToSymbol(filter_buffer_1d, filter.Data(), filter_size * sizeof(float));
        conv1d_kernel<<<grid_size, block_size, filter_size>>>(
            source.DataGpu(), dest.DataGpu(), array_size, filter_size);
    }
}
