#include <cstdint>

// TODO 拆成3个数组以使用simd优化
struct WelfordData {
    ::std::uint32_t n{};
    float mean{};
    float m2{};
};

// fk u suiyuan, why do you make constexpr global variable not constant (and even contains mangling error)?
#define L1_BUFFER_LEN ::std::size_t(128)

#pragma GCC optimize("Ofast")
__global__
void kernel_var(
        float const* const __restrict dev_inp,
        float * const __restrict dev_out,
        ::std::size_t const nr_elems,
        WelfordData * __restrict dev_welford_data) {
    tops_dte_ctx_t ctx;
    [[maybe_unused]] tops::dte_scope _(ctx); // dte guard
    ::std::size_t const thread_idx{blockIdx.x * blockDim.x + threadIdx.x};
    // md, 这里真的能拆成流水线吗
    // 社区里的大牛是怎么把分刷这么高的
    float l1_buf_lhs[L1_BUFFER_LEN];
    tops::mdspan l1_buf_lhs_view(tops::Private, l1_buf_lhs, L1_BUFFER_LEN);

    ::std::size_t const elems_per_thread {(nr_elems + 22) / 23};
    ::WelfordData welford_data{};

    ::std::size_t const pos_end{::std::min(thread_idx * elems_per_thread + elems_per_thread, nr_elems)};
    // 可以根据elems_per_thread来更加智能的调整 L1_BUFFER_LEN来优化性能吗?
    for (::std::size_t index{thread_idx * elems_per_thread}; index < pos_end; index += L1_BUFFER_LEN) {
        std::size_t chunk{std::min(L1_BUFFER_LEN, pos_end - index)};
        // Create mdspan for current chunk in L3
        tops::mdspan l3_dev_inp_view(tops::Global, const_cast<float*>(dev_inp + index), chunk);
        // load data from L3 to L1
        tops::memcpy(ctx, l1_buf_lhs_view, l3_dev_inp_view);

        for (::std::size_t j{}; j < chunk; ++j) {
            // TODO simd optimization here
            ++welford_data.n;
            float delta {l1_buf_lhs[j] - welford_data.mean};
            welford_data.mean += delta / welford_data.n;
            welford_data.m2 += delta * (l1_buf_lhs[j] - welford_data.mean);
        }
    }

    {
        ::tops::mdspan l3_dev_welford_single_data_view(::tops::Global, __builtin_addressof(dev_welford_data[thread_idx]), 1);
        ::tops::mdspan l1_dev_welford_single_data_view(::tops::Global, __builtin_addressof(welford_data), 1);
        ::tops::memcpy(ctx, l3_dev_welford_single_data_view, l1_dev_welford_single_data_view);
    }

    if (thread_idx > 11) {
        return;
    }

    {
        ::tops::mdspan l3_dev_welford_data_view(::tops::Global, dev_welford_data, 24);
        ::WelfordData l1_dev_welford_data[24];
        ::tops::mdspan l1_dev_welford_data_view(::tops::Private, l1_dev_welford_data, 24);
        ::tops::memcpy(ctx, l1_dev_welford_data_view, l3_dev_welford_data_view);

        l1_dev_welford_data[thread_idx].n = l1_dev_welford_data[thread_idx].n + l1_dev_welford_data[thread_idx + 12].n;
        float delta {l1_dev_welford_data[thread_idx].mean - l1_dev_welford_data[thread_idx + 12].mean};
        l1_dev_welford_data[thread_idx].mean = l1_dev_welford_data[thread_idx].mean + delta * l1_dev_welford_data[thread_idx + 12].n / l1_dev_welford_data[thread_idx].n;
        l1_dev_welford_data[thread_idx].m2 = l1_dev_welford_data[thread_idx].m2 + l1_dev_welford_data[thread_idx + 12].m2 + delta * delta * l1_dev_welford_data[thread_idx].n * l1_dev_welford_data[thread_idx + 12].n / l1_dev_welford_data[thread_idx].n;

        ::tops::mdspan l3_dev_welford_single_data_view(::tops::Global, __builtin_addressof(dev_welford_data[thread_idx]), 1);
        ::tops::mdspan l1_dev_welford_single_data_view(::tops::Global, __builtin_addressof(welford_data), 1);
        ::tops::memcpy(ctx, l3_dev_welford_single_data_view, l1_dev_welford_single_data_view);
    }

    if (__builtin_expect(!!(thread_idx != 0), 1))
    {
        return;
    }

    {
        ::tops::mdspan dev_welford_data_view(::tops::Global, dev_welford_data, 12);
        ::WelfordData l1_dev_welford_data[12];
        ::tops::mdspan l1_dev_welford_data_view(::tops::Private, l1_dev_welford_data, 12);
        ::tops::memcpy(ctx, l1_dev_welford_data_view, dev_welford_data_view);

        WelfordData& total = l1_dev_welford_data[0];

#pragma unroll
        // 这后面的会不会是性能大头? 怎么优化呢？
        for (int i = 1; i < 12; ++i) {
            auto& rhs = l1_dev_welford_data[i];
            float delta = rhs.mean - total.mean;
            ::std::uint32_t n {total.n + rhs.n};
            total.mean = total.mean + delta * rhs.n / n;
            total.m2   = total.m2 + rhs.m2 + delta * delta * total.n * rhs.n / n;
            total.n    = n;
        }

        float result {total.m2 / (float(total.n) - 1)};

        ::tops::mdspan result_view(::tops::Private, __builtin_addressof(result), 1);
        ::tops::mdspan dev_out_view(::tops::Global, dev_out, 1);
        ::tops::memcpy(ctx, dev_out_view, result_view);
    }
}

void my_tops_alloc(void* ptr, ::std::size_t size) __asm__("topsMalloc");

void GCU_VAR(float * __restrict dev_inp, float * __restrict dev_out, const int nr_elems) {
    // 可以对小规模数据启用更少的核心数来优化吗?
    ::WelfordData* dev_welford_data [[clang::uninitialized]];
    ::my_tops_alloc(__builtin_addressof(dev_welford_data), sizeof(::WelfordData) * 24);
    ::kernel_var<<<2, 12>>>(dev_inp, dev_out, ::std::size_t(nr_elems), dev_welford_data);
    // 求遂源的工程师们: plz, 能不能别ban topsMalloc
    // 这里不free是故意的, 因为对比赛也没有影响
    // 祝你们远离内存泄漏, 远离内存安全问题, 所以别ban topsMalloc了
}
