#include "img_arith.h"
#include <cassert>
#include "core.h"

namespace uzu
{
    __global__ void rgb2gray_kernel(unsigned char* input, unsigned char* output, int width, int height)
    {
        // 
        int x = threadIdx.x + blockIdx.x * blockDim.x;
        int y = threadIdx.y + blockIdx.y * blockDim.y;
        if (x >= 0 && x < width && y >= 0 && y < height)
        {
            int index = y * width + x;
            unsigned char r = input[index * 3 + 0];
            unsigned char g = input[index * 3 + 1];
            unsigned char b = input[index * 3 + 2];
            output[index] = (unsigned char)(r * 0.30 + g * 0.59 + b * 0.11);
        }
    }

    __global__ void boxblur_kernel(unsigned char* input, unsigned char* output, int width, int height, int half_size)
    {
        int x = threadIdx.x + blockIdx.x * blockDim.x;
        int y = threadIdx.y + blockIdx.y * blockDim.y;
        if (x >= 0 && x < width && y >= 0 && y < height)
        {
            // do calculation
            int sum = 0;
            int npixels = 0;
            for (int offsetx = -half_size; offsetx <= half_size; ++offsetx)
            {
                for (int offsety = -half_size; offsety <= half_size; ++offsety)
                {
                    int cur_x = x + offsetx;
                    int cur_y = y + offsety;
                    if (cur_x >= 0 && cur_x < width && cur_y >= 0 && cur_y < height)
                    {
                        int cur_index = cur_x + cur_y * width;
                        sum += input[cur_index];
                        npixels += 1;
                    }
                }
            }

            // 
            int index = x + y * width;
            output[index] = (unsigned char)(sum / npixels);
        }
    }

    void rgb2gray(ImageU8& image, ImageU8& gray)
    {
        assert(image.channels == 3);
        image.ToDevice();
        gray.Create(image.width, image.height, 1, DeviceType::CUDA);

        dim3 block_size(16, 16, 1);
        int blocks_x = (image.width + 15) / 16;
        int blocks_y = (image.height + 15) / 16;
        dim3 grid_size(blocks_x, blocks_y, 1);
        rgb2gray_kernel<<<grid_size, block_size>>>(image.dataGpu, gray.dataGpu, image.width, image.height);
    }

    void boxBlur(ImageU8& image, ImageU8& blurred, int boxsize)
    {
        assert (image.channels == 1);
        image.ToDevice();
        blurred.Create(image.width, image.height, image.channels, DeviceType::CUDA);

        // launch kernel
        dim3 block_size(16, 16, 1);
        int blocks_x = (image.width + 15) / 16;
        int blocks_y = (image.height + 15) / 16;
        dim3 grid_size(blocks_x, blocks_y, 1);

        int halfsize = int(boxsize / 2);
        boxblur_kernel<<<grid_size, block_size>>>(image.dataGpu, blurred.dataGpu, image.width, image.height, halfsize);
    }

    void medianBlur(ImageU8& image, ImageU8& blurred, int boxsize)
    {
        // do nothing
    }

    __global__ void histogram_kernel(uint8_t* data, float* hist, int size, int nbins)
    {
        __shared__ float hist_s[HISTOGRAM_BINS];

        int t = threadIdx.x;
        // initialize bins
        for (int bin_index = t; bin_index < nbins; bin_index += blockDim.x)
        {
            hist_s[bin_index] = 0;
        }
        __syncthreads();

        // each thread handles a set of positions
        // access continuous pixels for cache coherence.
        int num_threads = blockDim.x * gridDim.x;
        int pixel_per_threads = (size + num_threads - 1) / num_threads;
        int index = threadIdx.x + blockIdx.x * blockDim.x;
        int start = index * pixel_per_threads;
        int end = (index + 1) * pixel_per_threads;
        if (end >= size)
            end = size;
        
        // aggrgation to reduce # of atomic ops.
        int prev_index = -1;
        int accumulator = 0;
        for (int i = start; i < end; ++i)
        {
            uint8_t v = data[i];
            if (v != prev_index)
            {
                if (accumulator > 0) 
                    atomicAdd(&hist_s[v], accumulator);
                prev_index = v;
                accumulator = 1;
            }
            else
            {
                accumulator += 1;
            }
            // atomicAdd(&hist_s[v], 1);
        }
        __syncthreads();

        // increment the block histogram to the total histogram 
        for (int bin_index = t; bin_index < nbins; bin_index += blockDim.x)
        {
            atomicAdd(&hist[bin_index], hist_s[bin_index]);
        }
    }

    void histogram(ImageU8& image, Tensor& hist)
    {
        image.ToDevice();
        uint32_t width = image.Width();
        uint32_t height = image.Height();
        uint32_t size = width * height;

        std::vector<uint32_t> shape;
        shape.push_back(HISTOGRAM_BINS);
        hist.Create(shape, DeviceType::CUDA);

        int block_size = LINEAR_BLOCK_SIZE;
        int grid_size = 128;
        histogram_kernel<<<grid_size, block_size>>>(image.DataGpu(), hist.DataGpu(), size, HISTOGRAM_BINS);
    }

    void histogramCpu(ImageU8& image, Tensor& hist)
    {
        image.ToHost();
        uint32_t width = image.Width();
        uint32_t height = image.Height();
        uint32_t size = width * height;

        std::vector<uint32_t> shape;
        shape.push_back(HISTOGRAM_BINS);
        hist.Create(shape, DeviceType::CPU);

        float* hist_data = hist.Data();
        uint8_t* image_data = image.Data();

        for (int i = 0; i < HISTOGRAM_BINS; ++i)
            hist_data[i] = 0;

        for (uint32_t i = 0; i < size; ++i)
        {
            uint8_t v = image_data[i];
            hist_data[v] += 1;
        }
    }

}
