#include <cuda.h>
#include <cuda_runtime.h>

#include <algorithm>
#include <stdexcept>

#include <torch/torch.h>

#include "hashencoder.h"

#define CHECK_CUDA(x) TORCH_CHECK(x->device().is_cuda(), #x " must be a CUDA tensor")
#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x->is_contiguous(), #x " must be a contiguous tensor")
#define CHECK_IS_INT(x) TORCH_CHECK(x->scalar_type() == at::ScalarType::Int, #x " must be an int tensor")
#define CHECK_IS_FLOATING(x) TORCH_CHECK(x->scalar_type() == at::ScalarType::Float || x->scalar_type() == at::ScalarType::Half || x->scalar_type() == at::ScalarType::Double, #x " must be a floating tensor")

// just for compatability of half precision in AT_DISPATCH_FLOATING_TYPES_AND_HALF... program will never reach here!
__device__ inline at::Half atomicAdd(at::Half* address, at::Half val) {
    // requires CUDA >= 10 and ARCH >= 70
    // this is very slow compared to float or __half2, never use it.
    //return atomicAdd(reinterpret_cast<__half*>(address), val);
}

template <typename T>
__host__ __device__ inline T div_round_up(T val, T divisor) {
    return (val + divisor - 1) / divisor;
}

template <uint32_t D>
__device__ inline void init_offsets(uint32_t arr[])
{
    uint32_t end = 1 << D;
    uint32_t mask = end >> 1;
    #pragma unroll
    for (uint32_t i = 0; i < D; i++)
    {
        for (uint32_t j = 0; j < end; j++) 
        {
            uint32_t local = (j & mask) == mask ? 1 : 0;
            arr[j * D + i] = local;
        }
        mask = mask >> 1;
    }
}

template <typename scalar_t, uint32_t D, uint32_t C>
__global__ void kernel_grid(
    const float* __restrict__ inputs,
    const scalar_t* __restrict__ grid, 
    const int* __restrict__ offsets, 
    const float* __restrict__ min,
    const float* __restrict__ max,
    scalar_t* __restrict__ outputs,
    const uint32_t B, const uint32_t L, const double b, const double base_resolution, scalar_t* dy_dx)
{
    const uint32_t voxel_size = 1 << D;
    constexpr uint32_t PRIMES[7] = { 1u, 2654435761u, 805459861u, 3674653429u, 2097192037u, 1434869437u, 2165219737u };
    uint32_t OFFSET[voxel_size * D] = { 0 };
    init_offsets<D>(OFFSET);

    const uint32_t index = blockIdx.x * blockDim.x + threadIdx.x;
    if (index >= B) return;
    const uint32_t level = blockIdx.y;
    const uint32_t hashmap_size = offsets[level + 1] - offsets[level];

    grid += (uint32_t)offsets[level] * C;
    inputs += index * D;
    outputs += level * C + index * L * C;

    const double resolution = floor(base_resolution * pow(b, level));
    float grid_size[D];
    uint32_t bottom_left_idx[D];
    float voxel_min_vertex[D];
    float voxel_max_vertex[D];
    uint32_t voxel_idx[voxel_size][D];
    uint32_t hash_idx[voxel_size];
    float voxel_embedds[voxel_size][C];
    float weights[D];

    //calculate grid size
    #pragma unroll
    for(uint32_t i = 0; i < D; i++)
    {
        grid_size[i] = (max[i] - min[i]) / resolution;
    }
    
    //get voxel min vertex and max vertex
    #pragma unroll
    for (uint32_t i = 0; i < D; i++)
    {
        uint32_t idx = (uint32_t)floor((inputs[i] - min[i]) / grid_size[i]);
        bottom_left_idx[i] = idx;
        float min_vertex = idx * grid_size[i] + min[i];
        voxel_min_vertex[i] = min_vertex;
        voxel_max_vertex[i] = min_vertex + grid_size[i];
    }

    //get voxel idx 
    #pragma unroll
    for(uint32_t i = 0; i < voxel_size; i++)
    {
        #pragma unroll
        for(uint32_t j = 0; j < D; j++)
        {
            voxel_idx[i][j] = bottom_left_idx[j] + OFFSET[i * D + j];
        }
    }

    //get hash_idx
    #pragma unroll
    for(uint32_t i = 0; i < voxel_size; i++)
    {
        uint32_t hash_result = 0;
        #pragma unroll
        for(uint32_t j = 0; j < D; j++)
        {
            hash_result ^= voxel_idx[i][j] * PRIMES[j];
        }
        hash_result = (hashmap_size - 1) & hash_result;
        hash_idx[i] = hash_result;
    }

    //query embeddings
    #pragma unroll
    for(uint32_t i = 0; i < voxel_size; i++)
    {
        #pragma unroll
        for(uint32_t j = 0; j < C; j++)
        {
            voxel_embedds[i][j] = grid[hash_idx[i] + j];
        }
    }

    //calculate weight
    #pragma unroll
    for(uint32_t i = 0; i < D; i++)
    {
        weights[i] = (inputs[i] - voxel_min_vertex[i]) / (voxel_max_vertex[i] - voxel_min_vertex[i]);
    }


    uint32_t local = voxel_size;
    uint32_t weight_index = 0;
    float c[voxel_size * C];
    for(uint32_t i = 0; i < local; i++)
    {
        #pragma unroll
        for(uint32_t j = 0; j < C; j++)
        {
            c[i * C + j] = voxel_embedds[i][j];
        }
    }
    local >>= 1;
    for(local; local > 0; local >>= 1)
    {
        for(uint32_t i = 0; i < local; i++)
        {
            #pragma unroll
            for(uint32_t j = 0; j < C; j++)
            {
                float c0 = c[i * C + j];
                float c1 = c[(local + i) * C + j];
                c[i * C + j] = c0 * (1 - weights[weight_index]) + c1 * weights[weight_index];
            }
        }
        weight_index++;
    }

    for(uint32_t ch = 0; ch < C; ch++)
    {
        outputs[ch] = c[ch];
    }
}

template <typename scalar_t, uint32_t D, uint32_t C>
__global__ void kernel_grid_backward(
    const scalar_t* __restrict__ grad,
    const float* __restrict__ inputs,
    const scalar_t* __restrict__ grid,
    const int* __restrict__ offsets,
    const float* __restrict__ min,
    const float* __restrict__ max,
    scalar_t* __restrict__ grad_grid,
    const uint32_t B, const uint32_t L, const double b, const double base_resolution)
{
    const uint32_t voxel_size = 1 << D;
    constexpr uint32_t PRIMES[7] = { 1u, 2654435761u, 805459861u, 3674653429u, 2097192037u, 1434869437u, 2165219737u };
    uint32_t OFFSET[voxel_size * D] = { 0 };
    init_offsets<D>(OFFSET);

    const uint32_t index = blockIdx.x * blockDim.x + threadIdx.x;
    if (index >= B) return;
    const uint32_t level = blockIdx.y;
    const uint32_t hashmap_size = offsets[level + 1] - offsets[level];

    grad_grid += (uint32_t)offsets[level] * C;
    inputs += index * D;
    grad += level * C + index * L * C;

    const double resolution = floor(base_resolution * pow(b, level));
    float grid_size[D];
    uint32_t bottom_left_idx[D];
    float voxel_min_vertex[D];
    float voxel_max_vertex[D];
    uint32_t voxel_idx[voxel_size][D];
    uint32_t hash_idx[voxel_size];
    float weights[D];

    //calculate grid size
    #pragma unroll
    for (uint32_t i = 0; i < D; i++)
    {
        grid_size[i] = (max[i] - min[i]) / resolution;
    }

    //get voxel min vertex and max vertex
    #pragma unroll
    for (uint32_t i = 0; i < D; i++)
    {
        uint32_t idx = (uint32_t)floor((inputs[i] - min[i]) / grid_size[i]);
        bottom_left_idx[i] = idx;
        float min_vertex = idx * grid_size[i] + min[i];
        voxel_min_vertex[i] = min_vertex;
        voxel_max_vertex[i] = min_vertex + grid_size[i];
    }

    //get voxel idx 
    #pragma unroll
    for (uint32_t i = 0; i < voxel_size; i++)
    {
        #pragma unroll
        for (uint32_t j = 0; j < D; j++)
        {
            voxel_idx[i][j] = bottom_left_idx[j] + OFFSET[i * D + j];
        }
    }

    //get hash_idx
    #pragma unroll
    for (uint32_t i = 0; i < voxel_size; i++)
    {
        uint32_t hash_result = 0;
        #pragma unroll
        for (uint32_t j = 0; j < D; j++)
        {
            hash_result ^= voxel_idx[i][j] * PRIMES[j];
        }
        hash_result = (hashmap_size - 1) & hash_result;
        hash_idx[i] = hash_result;
    }

    //calculate weight
    #pragma unroll
    for (uint32_t i = 0; i < D; i++)
    {
        weights[i] = (inputs[i] - voxel_min_vertex[i]) / (voxel_max_vertex[i] - voxel_min_vertex[i]);
    }

    scalar_t grad_cur[C] = { 0 };
    #pragma unroll
    for (uint32_t c = 0; c < C; c++)
    {
        grad_cur[c] = grad[c];
    }

    #pragma unroll
    for (uint32_t i = 0; i < voxel_size; i++)
    {
        float w = 1;
        #pragma unroll
        for (uint32_t d = 0; d < D; d++)
        {
            uint32_t mask = 1 << d;
            if ((i & mask) == 0) {
                w *= (1 - weights[d]);
            }
            else {
                w *= weights[d];
            }
        }

        uint32_t index = hash_idx[i];
        #pragma unroll
        for (uint32_t c = 0; c < C; c++)
        {
            atomicAdd(&grad_grid[index + c], w * grad_cur[c]);
        }
    }
}


template <typename scalar_t, uint32_t D>
void kernel_grid_wrapper(const float* inputs, const scalar_t* embeddings, const int* offsets,
    const float* min, const float* max, scalar_t* outputs,
    const uint32_t B, const uint32_t C, const uint32_t L,
    const double b, const double base_resolution, scalar_t* dy_dx)
{
    static constexpr uint32_t N_THREAD = 512;
    const dim3 blocks_hashgrid = { div_round_up<uint32_t>(B, N_THREAD), L, 1 };
    switch (C)
    {
        case 1: kernel_grid<scalar_t, D, 1><<<blocks_hashgrid, N_THREAD>>>(inputs, embeddings, offsets, min, max, outputs, B, L, b, base_resolution, dy_dx); break;
        case 2: kernel_grid<scalar_t, D, 2><<<blocks_hashgrid, N_THREAD>>>(inputs, embeddings, offsets, min, max, outputs, B, L, b, base_resolution, dy_dx); break;
        case 4: kernel_grid<scalar_t, D, 4><<<blocks_hashgrid, N_THREAD>>>(inputs, embeddings, offsets, min, max, outputs, B, L, b, base_resolution, dy_dx); break;
        case 8: kernel_grid<scalar_t, D, 8><<<blocks_hashgrid, N_THREAD>>>(inputs, embeddings, offsets, min, max, outputs, B, L, b, base_resolution, dy_dx); break;
        default: throw new std::runtime_error{ "HashEncoding: C must be 1, 2, 4, or 8." };
    }
}

template <typename scalar_t, uint32_t D>
void kernel_grid_backward_wrapper(const scalar_t* grad, const float* inputs, const scalar_t* embeddings, const int* offsets,
    const float* min, const float* max, scalar_t* grad_embeddings,
    const uint32_t B, const uint32_t C, const uint32_t L, const double b, const double base_resolution)
{
    static constexpr uint32_t N_THREAD = 256;
    const dim3 blocks_hashgrid = { div_round_up<uint32_t>(B, N_THREAD), L, 1 };
    switch (C)
    {
        case 1: kernel_grid_backward<scalar_t, D, 1><<<blocks_hashgrid, N_THREAD>>>(grad, inputs, embeddings, offsets, min, max, grad_embeddings, B, L, b, base_resolution); break;
        case 2: kernel_grid_backward<scalar_t, D, 2><<<blocks_hashgrid, N_THREAD>>>(grad, inputs, embeddings, offsets, min, max, grad_embeddings, B, L, b, base_resolution); break;
        case 4: kernel_grid_backward<scalar_t, D, 4><<<blocks_hashgrid, N_THREAD>>>(grad, inputs, embeddings, offsets, min, max, grad_embeddings, B, L, b, base_resolution); break;
        case 8: kernel_grid_backward<scalar_t, D, 8><<<blocks_hashgrid, N_THREAD>>>(grad, inputs, embeddings, offsets, min, max, grad_embeddings, B, L, b, base_resolution); break;
        default: throw new std::runtime_error{ "HashEncoding: C must be 1, 2, 4, or 8." };
    }
    printf("\r\n");
}

template <typename scalar_t>
void grid_encode_forward_cuda(const float* inputs, const scalar_t* embeddings, const int* offsets,
    const float* min, const float* max, scalar_t* outputs,
    const uint32_t B, const uint32_t D, const uint32_t C,
    const uint32_t L, const double b, const double base_resolution, scalar_t* dy_dx)
{
    switch (D)
    {
        case 2: kernel_grid_wrapper<scalar_t, 2>(inputs, embeddings, offsets, min, max, outputs, B, C, L, b, base_resolution, dy_dx); break;
        case 3: kernel_grid_wrapper<scalar_t, 3>(inputs, embeddings, offsets, min, max, outputs, B, C, L, b, base_resolution, dy_dx); break;
        case 4: kernel_grid_wrapper<scalar_t, 4>(inputs, embeddings, offsets, min, max, outputs, B, C, L, b, base_resolution, dy_dx); break;
        case 5: kernel_grid_wrapper<scalar_t, 5>(inputs, embeddings, offsets, min, max, outputs, B, C, L, b, base_resolution, dy_dx); break;
        default: throw new std::runtime_error{ "HashEncoding: D must be 2, 3, 4, or 5." };
    }
}

template <typename scalar_t>
void kernel_grid_backward_cuda(const scalar_t* grad, const float* inputs, const scalar_t* embeddings, const int* offsets,
    const float* min, const float* max, scalar_t* grad_embeddings,
    const uint32_t B, const uint32_t D, const uint32_t C,
    const uint32_t L, const double b, const double base_resolution)
{
    switch(D)
    {
        case 2: kernel_grid_backward_wrapper<scalar_t, 2>(grad, inputs, embeddings, offsets,
            min, max, grad_embeddings, B, C, L, b, base_resolution); break;
        case 3: kernel_grid_backward_wrapper<scalar_t, 3>(grad, inputs, embeddings, offsets,
            min, max, grad_embeddings, B, C, L, b, base_resolution); break;
        case 4: kernel_grid_backward_wrapper<scalar_t, 4>(grad, inputs, embeddings, offsets,
            min, max, grad_embeddings, B, C, L, b, base_resolution); break;
        case 5: kernel_grid_backward_wrapper<scalar_t, 5>(grad, inputs, embeddings, offsets,
            min, max, grad_embeddings, B, C, L, b, base_resolution); break;
        default: throw new std::runtime_error{ "HashEncoding: D must be 2, 3, 4, or 5." };
    }
}

void hash_encoder_forward(const at::Tensor* inputs, const at::Tensor* embeddings, const at::Tensor* offsets,
    const at::Tensor* box_min, const at::Tensor* box_max, const double base_resolution, const double b, const at::Tensor* outputs, const at::Tensor* dy_dx)
{
    CHECK_CUDA(inputs);
    CHECK_CUDA(embeddings);
    CHECK_CUDA(offsets);
    CHECK_CUDA(outputs);

    CHECK_CONTIGUOUS(inputs);
    CHECK_CONTIGUOUS(embeddings);
    CHECK_CONTIGUOUS(offsets);
    CHECK_CONTIGUOUS(outputs);

    CHECK_IS_FLOATING(inputs);
    CHECK_IS_FLOATING(embeddings);
    CHECK_IS_INT(offsets);
    CHECK_IS_FLOATING(outputs);

    uint32_t B = static_cast<uint32_t>(inputs->size(0));
    uint32_t D = static_cast<uint32_t>(inputs->size(1));
    uint32_t L = static_cast<uint32_t>(offsets->size(0) - 1);
    uint32_t C = static_cast<uint32_t>(embeddings->size(1));

    AT_DISPATCH_FLOATING_TYPES_AND_HALF(
        embeddings->scalar_type(), "grid_encode_forward", ([&] {
            grid_encode_forward_cuda<scalar_t>(inputs->data_ptr<float>(), embeddings->data_ptr<scalar_t>(), offsets->data_ptr<int>(),
                box_min->data_ptr<float>(), box_max->data_ptr<float>(), outputs->data_ptr<scalar_t>(), B, D, C, L, b, base_resolution,
                dy_dx != nullptr ? dy_dx->data_ptr<scalar_t>() : nullptr);
            }));
}

void hash_encoder_backward(const at::Tensor* grad, const at::Tensor* inputs, const at::Tensor* embeddings, const at::Tensor* offsets,
    const at::Tensor* box_min, const at::Tensor* box_max, const double base_resolution, const double b, const at::Tensor* grad_embeddings)
{
    CHECK_CUDA(grad);
    CHECK_CUDA(inputs);
    CHECK_CUDA(embeddings);
    CHECK_CUDA(offsets);
    CHECK_CUDA(grad_embeddings);

    CHECK_CONTIGUOUS(grad);
    CHECK_CONTIGUOUS(inputs);
    CHECK_CONTIGUOUS(embeddings);
    CHECK_CONTIGUOUS(offsets);
    CHECK_CONTIGUOUS(grad_embeddings);

    CHECK_IS_FLOATING(grad);
    CHECK_IS_FLOATING(inputs);
    CHECK_IS_FLOATING(embeddings);
    CHECK_IS_INT(offsets);
    CHECK_IS_FLOATING(grad_embeddings);

    uint32_t B = static_cast<uint32_t>(inputs->size(0));
    uint32_t D = static_cast<uint32_t>(inputs->size(1));
    uint32_t L = static_cast<uint32_t>(offsets->size(0) - 1);
    uint32_t C = static_cast<uint32_t>(embeddings->size(1));

    AT_DISPATCH_FLOATING_TYPES_AND_HALF(
    grad->scalar_type(), "grid_encode_backward", ([&] {
            kernel_grid_backward_cuda<scalar_t>(grad->data_ptr<scalar_t>(), inputs->data_ptr<float>(), embeddings->data_ptr<scalar_t>(), offsets->data_ptr<int>(),
                box_min->data_ptr<float>(), box_max->data_ptr<float>(), grad_embeddings->data_ptr<scalar_t>(),
                B, D, C, L, b, base_resolution);
    }));
}