#include <cstdint>


// fk u suiyuan, why you make constexpr global variable not constant (and even contains mangling error)?
#define L1_BUFFER_LEN ::std::uint32_t(128)

#pragma GCC optimize("Ofast")
template<::std::uint32_t thread_num>
__attribute__((global, cooperative))
void kernel_var(
        float const* const __restrict dev_inp,
        float * const __restrict dev_out,
        ::std::uint32_t const nr_elems)
{
    __shared__ ::std::uint32_t dev_welford_n[thread_num];
    __shared__ float dev_welford_mean[thread_num];
    __shared__ float dev_welford_m2[thread_num];

    tops_dte_ctx_t ctx;
    // FIXME use dte_scope instead
    ctx.init();
    ::std::uint32_t const thread_idx{blockIdx.x * blockDim.x + threadIdx.x};

    float l1_buf_lhs[L1_BUFFER_LEN];
    tops::mdspan l1_buf_lhs_view(tops::Private, l1_buf_lhs, L1_BUFFER_LEN);

    ::std::uint32_t const elems_per_thread {nr_elems / thread_num};
    ::std::uint32_t welford_n{};
    float welford_mean{};
    float welford_m2{};

    ::std::uint32_t const pos_end {thread_idx == thread_num - 1 ? nr_elems : (thread_idx + 1) * elems_per_thread};
    // TODO 向量化
    for (::std::uint32_t index{thread_idx * elems_per_thread}; index < pos_end; index += L1_BUFFER_LEN) {
        ::std::uint32_t const chunk {index + L1_BUFFER_LEN < pos_end ? L1_BUFFER_LEN : pos_end - index};
        // Create mdspan for current chunk in L3
        tops::mdspan l3_dev_inp_view(tops::Global, const_cast<float*>(dev_inp + index), chunk);
        // load data from L3 to L1
        tops::memcpy(ctx, l1_buf_lhs_view, l3_dev_inp_view);

        for (::std::uint32_t j{}; j < chunk; ++j) {
            ++welford_n;
            float delta {l1_buf_lhs[j] - welford_mean};
            welford_mean += delta / welford_n;
            welford_m2 += delta * (l1_buf_lhs[j] - welford_mean);
        }
    }

    if constexpr (thread_num == 1) {
        float result {welford_m2 / (float(welford_n) - 1)};
        ::tops::mdspan result_view(::tops::Private, __builtin_addressof(result), 1);
        ::tops::mdspan dev_out_view(::tops::Global, dev_out, 1);
        ::tops::memcpy(ctx, dev_out_view, result_view);
        return;
    } else {
        ::tops::mdspan l2_dev_welford_n_view(::tops::Shared, __builtin_addressof(dev_welford_n[thread_idx]), 1);
        ::tops::mdspan l1_dev_welford_n_view(::tops::Private, __builtin_addressof(welford_n), 1);
        ::tops::memcpy(ctx, l2_dev_welford_n_view, l1_dev_welford_n_view);

        ::tops::mdspan l2_dev_welford_mean_view(::tops::Shared, __builtin_addressof(dev_welford_mean[thread_idx]), 1);
        ::tops::mdspan l1_dev_welford_mean_view(::tops::Private, __builtin_addressof(welford_mean), 1);
        ::tops::memcpy(ctx, l2_dev_welford_mean_view, l1_dev_welford_mean_view);

        ::tops::mdspan l2_dev_welford_m2_view(::tops::Shared, __builtin_addressof(dev_welford_m2[thread_idx]), 1);
        ::tops::mdspan l1_dev_welford_m2_view(::tops::Private, __builtin_addressof(welford_m2), 1);
        ::tops::memcpy(ctx, l2_dev_welford_m2_view, l1_dev_welford_m2_view);

        __syncblocks();

        if (thread_idx != 0)
        {
            return;
        }

        {
            ::tops::mdspan dev_welford_n_view(::tops::Shared, dev_welford_n, thread_num);
            ::tops::mdspan dev_welford_mean_view(::tops::Shared, dev_welford_mean, thread_num);
            ::tops::mdspan dev_welford_m2_view(::tops::Shared, dev_welford_m2, thread_num);

            ::std::uint32_t l1_dev_welford_n[thread_num];
            float l1_dev_welford_mean[thread_num];
            float l1_dev_welford_m2[thread_num];

            ::tops::mdspan l1_dev_welford_n_view(::tops::Private, l1_dev_welford_n, thread_num);
            ::tops::mdspan l1_dev_welford_mean_view(::tops::Private, l1_dev_welford_mean, thread_num);
            ::tops::mdspan l1_dev_welford_m2_view(::tops::Private, l1_dev_welford_m2, thread_num);

            ::tops::memcpy(ctx, l1_dev_welford_n_view, dev_welford_n_view);
            ::tops::memcpy(ctx, l1_dev_welford_mean_view, dev_welford_mean_view);
            ::tops::memcpy(ctx, l1_dev_welford_m2_view, dev_welford_m2_view);

            ::std::uint32_t& total_n = l1_dev_welford_n[0];
            float& total_mean = l1_dev_welford_mean[0];
            float& total_m2 = l1_dev_welford_m2[0];

#pragma unroll
            for (::std::uint32_t i{1}; i < thread_num; ++i) {
                float delta = l1_dev_welford_mean[i] - total_mean;
                ::std::uint32_t n {total_n + l1_dev_welford_n[i]};
                total_mean = total_mean + delta * l1_dev_welford_n[i] / n;
                total_m2   = total_m2 + l1_dev_welford_m2[i] + delta * delta * total_n * l1_dev_welford_n[i] / n;
                total_n    = n;
            }

#if 0
            if (total_n == 1) {
                __builtin_trap();
            }
#else
            __builtin_assume(total_n != 1);
#endif
            float result {total_m2 / (float(total_n) - 1)};

            ::tops::mdspan result_view(::tops::Private, __builtin_addressof(result), 1);
            ::tops::mdspan dev_out_view(::tops::Global, dev_out, 1);
            ::tops::memcpy(ctx, dev_out_view, result_view);
        }
    }
}

void GCU_VAR(float * __restrict dev_inp, float * __restrict dev_out, const int nr_elems) {
    if (nr_elems <= 128) {
        ::kernel_var<1><<<1, 1>>>(dev_inp, dev_out, ::std::uint32_t(nr_elems));
    } else if (nr_elems <= 1024) {
        ::kernel_var<8><<<1, 8>>>(dev_inp, dev_out, ::std::uint32_t(nr_elems));
    } else if (nr_elems < 50'000) {
        ::kernel_var<12><<<1, 12>>>(dev_inp, dev_out, ::std::uint32_t(nr_elems));
    } else if (nr_elems < 2'000'000) {
        ::kernel_var<16><<<2, 8>>>(dev_inp, dev_out, ::std::uint32_t(nr_elems));
    } else {
        ::kernel_var<24><<<2, 12>>>(dev_inp, dev_out, ::std::uint32_t(nr_elems));
    }
}
