#pragma once

// Custom implementation of inclusive_scan

// 本来不应该有这个文件的
// thrust（或cub）的inclusive_scan / exclusive_scan有奇怪的bug，无法启动kernel

#include <driver_types.h>

#include <bit>
#include <type_traits>

#include <utils/Concepts.h>

namespace solar::cuda::algorithm
{
    namespace internal::parallel_scan
    {
        constexpr auto kLogMemBanks = 5;
        constexpr auto kThreadsPerBlock = 512;
        constexpr auto kElementsPerBlock = kThreadsPerBlock * 2;

        constexpr auto conflictFreeOffset(int n) { return n >> kLogMemBanks; }

        // from https://stackoverflow.com/a/3638454
        constexpr auto isPowerOfTwo(int x) -> bool { return (x != 0) && ((x & (x - 1)) == 0); }

        // from https://stackoverflow.com/a/12506181
        template <typename T>
        constexpr auto nextPowerOfTwo(T x) -> T
            requires std::is_integral_v<T>
        {
            return std::bit_cast<T>(std::bit_ceil(std::make_unsigned_t<T>(x)));
        }

        template <typename T1, typename T2>
        __global__ void preScanArbitrary(T1* input, T2* output, int n, int power_of_two)
            requires std::is_same_v<std::remove_cvref_t<T1>, std::remove_cvref_t<T2>> &&
                     std::is_trivially_copy_assignable_v<T2> && std::is_trivially_copyable_v<T1>
        {
            extern __shared__ char temp_t[]; // allocated on invocation
            auto* temp = reinterpret_cast<T2*>(temp_t);
            int thread_id = threadIdx.x;

            int ai = thread_id;
            int bi = thread_id + (n / 2);
            int bank_offset_a = conflictFreeOffset(ai);
            int bank_offset_b = conflictFreeOffset(bi);

            bool should_add = thread_id < n;
            temp[ai + bank_offset_a] = input[ai] * should_add;
            temp[bi + bank_offset_b] = input[bi] * should_add;
            // if (thread_id < n)
            // {
            //     temp[ai + bank_offset_a] = input[ai];
            //     temp[bi + bank_offset_b] = input[bi];
            // }
            // else
            // {
            //     temp[ai + bank_offset_a] = 0;
            //     temp[bi + bank_offset_b] = 0;
            // }

            int offset = 1;
// build sum in place up the tree
#pragma unroll
            for (int d = power_of_two >> 1; d > 0; d >>= 1)
            {
                __syncthreads();
                if (thread_id < d)
                {
                    int ai = offset * (2 * thread_id + 1) - 1;
                    int bi = offset * (2 * thread_id + 2) - 1;
                    ai += conflictFreeOffset(ai);
                    bi += conflictFreeOffset(bi);

                    temp[bi] += temp[ai];
                }
                offset *= 2;
            }

            if (thread_id == 0)
            {
                // clear the last element
                temp[power_of_two - 1 + conflictFreeOffset(power_of_two - 1)] = 0;
            }

// traverse down tree & build scan
#pragma unroll
            for (int d = 1; d < power_of_two; d *= 2)
            {
                offset >>= 1;
                __syncthreads();
                if (thread_id < d)
                {
                    int ai = offset * (2 * thread_id + 1) - 1;
                    int bi = offset * (2 * thread_id + 2) - 1;
                    ai += conflictFreeOffset(ai);
                    bi += conflictFreeOffset(bi);

                    T2 t = temp[ai];
                    temp[ai] = temp[bi];
                    temp[bi] += t;
                }
            }
            __syncthreads();

            if (thread_id < n)
            {
                output[ai] = temp[ai + bank_offset_a];
                output[bi] = temp[bi + bank_offset_b];
            }
        }

        template <typename T1, typename T2>
        __global__ void preScanLarge(T1* input, T2* output, int n, int* sums)
            requires std::is_same_v<std::remove_cvref_t<T1>, std::remove_cvref_t<T2>> &&
                     std::is_trivially_copy_assignable_v<T2> && std::is_trivially_copyable_v<T1>
        {
            extern __shared__ char temp_t[]; // allocated on invocation
            auto* temp = reinterpret_cast<T2*>(temp_t);

            int block_id = blockIdx.x;
            int thread_id = threadIdx.x;
            int block_offset = block_id * n;

            int ai = thread_id;
            int bi = thread_id + (n / 2);
            int bank_offset_a = conflictFreeOffset(ai);
            int bank_offset_b = conflictFreeOffset(bi);
            temp[ai + bank_offset_a] = input[block_offset + ai];
            temp[bi + bank_offset_b] = input[block_offset + bi];

            int offset = 1;
#pragma unroll
            for (int d = n >> 1; d > 0; d >>= 1) // build sum in place up the tree
            {
                __syncthreads();
                if (thread_id < d)
                {
                    int ai = offset * (2 * thread_id + 1) - 1;
                    int bi = offset * (2 * thread_id + 2) - 1;
                    ai += conflictFreeOffset(ai);
                    bi += conflictFreeOffset(bi);

                    temp[bi] += temp[ai];
                }
                offset *= 2;
            }
            __syncthreads();

            if (thread_id == 0)
            {
                sums[block_id] = temp[n - 1 + conflictFreeOffset(n - 1)];
                temp[n - 1 + conflictFreeOffset(n - 1)] = 0;
            }

#pragma unroll
            for (int d = 1; d < n; d *= 2) // traverse down tree & build scan
            {
                offset >>= 1;
                __syncthreads();
                if (thread_id < d)
                {
                    int ai = offset * (2 * thread_id + 1) - 1;
                    int bi = offset * (2 * thread_id + 2) - 1;
                    ai += conflictFreeOffset(ai);
                    bi += conflictFreeOffset(bi);

                    T2 t = temp[ai];
                    temp[ai] = temp[bi];
                    temp[bi] += t;
                }
            }
            __syncthreads();

            output[block_offset + ai] = temp[ai + bank_offset_a];
            output[block_offset + bi] = temp[bi + bank_offset_b];
        }

        template <typename T1, typename T2>
        __global__ void add(T2* output, int length, T1* n)
            requires std::is_same_v<std::remove_cvref_t<T1>, std::remove_cvref_t<T2>> &&
                     std::is_trivially_copy_assignable_v<T2> && std::is_trivially_copyable_v<T1>
        {
            int block_id = blockIdx.x;
            int thread_id = threadIdx.x;
            int block_offset = block_id * length;

            output[block_offset + thread_id] += n[block_id];
        }

        template <typename T1, typename T2>
        __global__ void add(T2* output, int length, T1* n1, T1* n2)
            requires std::is_same_v<std::remove_cvref_t<T1>, std::remove_cvref_t<T2>> &&
                     std::is_trivially_copy_assignable_v<T2> && std::is_trivially_copyable_v<T1>
        {
            int block_id = blockIdx.x;
            int thread_id = threadIdx.x;
            int block_offset = block_id * length;

            output[block_offset + thread_id] += n1[block_id] + n2[block_id];
        }

        template <typename T1, typename T2>
        void scanSmallDeviceArray(T1* d_in, T2* d_out, int length, cudaStream_t stream = nullptr)
            requires std::is_same_v<std::remove_cvref_t<T1>, std::remove_cvref_t<T2>> &&
                     std::is_trivially_copy_assignable_v<T2> && std::is_trivially_copyable_v<T1>
        {
            int power_of_two = nextPowerOfTwo(length);
            preScanArbitrary<<<1, (length + 1) / 2, 2 * power_of_two * sizeof(T2), stream>>>(
                d_in, d_out, length, power_of_two);
        }

        template <typename T1, typename T2>
        void scanLargeDeviceArray(T1* d_in, T2* d_out, int length, cudaStream_t stream = nullptr)
            requires std::is_same_v<std::remove_cvref_t<T1>, std::remove_cvref_t<T2>> &&
                     std::is_trivially_copy_assignable_v<T2> && std::is_trivially_copyable_v<T1>;

        template <typename T1, typename T2>
        void scanLargeEvenDeviceArray(T1* d_in, T2* d_out, int length,
                                      cudaStream_t stream = nullptr)
            requires std::is_same_v<std::remove_cvref_t<T1>, std::remove_cvref_t<T2>> &&
                     std::is_trivially_copy_assignable_v<T2> && std::is_trivially_copyable_v<T1>
        {
            const int blocks = length / kElementsPerBlock;
            const int shared_mem_array_size = kElementsPerBlock * sizeof(T2);

            T2* d_sums = nullptr;
            T2* d_incr = nullptr;
            cudaMallocAsync(&d_sums, blocks * sizeof(T2), stream);
            cudaMallocAsync(&d_incr, blocks * sizeof(T2), stream);

            preScanLarge<<<blocks, kThreadsPerBlock, 2 * shared_mem_array_size, stream>>>(
                d_in, d_out, kElementsPerBlock, d_sums);

            const int sums_arr_threads_needed = (blocks + 1) / 2;
            if (sums_arr_threads_needed > kThreadsPerBlock)
            {
                // perform a large scan on the sums arr
                scanLargeDeviceArray(d_sums, d_incr, blocks);
            }
            else
            {
                // only need one block to scan sums arr so can use small scan
                scanSmallDeviceArray(d_sums, d_incr, blocks);
            }

            add<<<blocks, kElementsPerBlock, 0, stream>>>(d_out, kElementsPerBlock, d_incr);

            cudaFreeAsync(d_sums, stream);
            cudaFreeAsync(d_incr, stream);
        }

        template <typename T1, typename T2>
        void scanLargeDeviceArray(T1* d_in, T2* d_out, int length, cudaStream_t stream)
            requires std::is_same_v<std::remove_cvref_t<T1>, std::remove_cvref_t<T2>> &&
                     std::is_trivially_copy_assignable_v<T2> && std::is_trivially_copyable_v<T1>
        {
            const int remainder = length % (kElementsPerBlock);
            if (remainder == 0)
            {
                scanLargeEvenDeviceArray(d_in, d_out, length, stream);
            }
            else
            {
                // perform a large scan on a compatible multiple of elements
                int length_multiple = length - remainder;
                scanLargeEvenDeviceArray(d_in, d_out, length_multiple, stream);

                // scan the remaining elements and add the (inclusive) last element of the large
                // scan to this
                T2* start_of_output_array = &(d_out[length_multiple]);
                scanSmallDeviceArray(&(d_in[length_multiple]), start_of_output_array, remainder,
                                     stream);

                add<<<1, remainder, 0, stream>>>(start_of_output_array, remainder,
                                                 &(d_in[length_multiple - 1]),
                                                 &(d_out[length_multiple - 1]));
            }
        }
    } // namespace internal::parallel_scan

    template <typename T1, typename T2>
    void inclusiveScan(T1* d_input, T2* d_output, int length, cudaStream_t stream = nullptr)
        requires std::is_same_v<std::remove_cvref_t<T1>, std::remove_cvref_t<T2>> &&
                 std::is_trivially_copy_assignable_v<T2> && std::is_trivially_copyable_v<T1>
    {
        if (length > internal::parallel_scan::kElementsPerBlock)
        {
            internal::parallel_scan::scanLargeDeviceArray(d_input, d_output, length, stream);
        }
        else
        {
            internal::parallel_scan::scanSmallDeviceArray(d_input, d_output, length, stream);
        }
    }
} // namespace solar::cuda::algorithm
