/**
 * __device__ __forceinline__ int GetThreadNum(void);
 * @brief Get Thread(sip) number
 * @param
 * @return Thread(sip) number
 */

/**
 * __device__ __forceinline__ int GetThreadIdx(void)
 * @brief Get global thread(sip) idx
 * @param
 * @return global thread(sip) idx
 */

/**
 * __device__ void pooling_2d_common_max(int input_addr, int output_addr, int
 hi, int wi, int ci, int ho, int wo, int window_h, int window_w, int stride_h,
 int stride_w)
 * @brief
 * hi == 4X
 * wi = 4X
 * ci = 16X
 * window_h >= 2 && window_h <= 7
 * window_w >= 2 && window_w <= 7
 * stride_h >= 2 && stride_h <= 7
 * stride_w >= 2 && stride_w <= 7
 * Input DataFormat: [1, hi, wi, ci]
 * Output DataFormat: [1, ho, wo, ci]
 * @param input_addr input addr
 * @param output_addr output addr
 * @param hi    input h
 * @param wi    input w
 * @param ci    input c
 * @param ho    output h
 * @param wo    output w
 * @param window_h kernel_size on h
 * @param window_w kernel_size on w
 * @param stride_h stride on h
 * @param stride_w stride on w
 */

// L1_MEM_SIZE = 0x100000;  // 1M

__attribute__((global, cooperative)) void kernel_maxpool2d(
    float *__restrict dev_input, float *__restrict dev_out, const int n,
    const int c, const int h, const int w, const int h_o, const int w_o,
    const int kernel_size, const int stride, const int padding)
{
    // 获得线程数量
    int thread_num = GetThreadNum();
    // 获得当前线程ID
    int thread_idx = GetThreadIdx();

    // 声明DTE
    // 输入操作数DTE
    tops_dte_ctx_t dte_input[2];
    tops_dte_ctx_t dte_input_trans[2];
    // 计算结果DTE
    tops_dte_ctx_t dte_out[2];
    tops_dte_ctx_t dte_out_trans[2];

    // 初始化DTE
    dte_input[0].init();
    dte_input[1].init();

    dte_input_trans[0].init();
    dte_input_trans[1].init();

    dte_out[0].init();
    dte_out[1].init();

    dte_out_trans[0].init();
    dte_out_trans[1].init();

    // 事件: 输入DMA完成
    tops::event event_input0;
    tops::event event_input1;

    tops::event event_input_trans0;
    tops::event event_input_trans1;

    // 事件: 结果DMA完成
    tops::event e_out0;
    tops::event e_out1;

    tops::event e_out_trans0;
    tops::event e_out_trans1;

    // 注：该Tiling方案可以根据情况调整
    int KERNEL_H_TILING = 16;
    int KERNEL_W_TILING = 32;
    int KERNEL_HO_TILING = KERNEL_H_TILING / 2;
    int KERNEL_WO_TILING = KERNEL_W_TILING / 2;
    int KERNEL_C_TILING = 16;

    // L3 shape
    // L3 input shape
    int32_t global_input_shape[4] = {n, c, h, w};
    // L3 output shape
    int32_t global_out_shape[4] = {n, c, h_o, w_o};

    // L1 shape
    // L1 input trans shape, N * H * W * C
    int32_t private_input_trans_shape[4] = {1, KERNEL_H_TILING, KERNEL_W_TILING, KERNEL_C_TILING};

    // l1 input shape, N * C * H * W
    int32_t private_input_shape[4] = {1, KERNEL_C_TILING, KERNEL_H_TILING, KERNEL_W_TILING};

    // L1 output trans shape, N * H * W * C
    int32_t private_out_trans_shape[4] = {1, KERNEL_HO_TILING, KERNEL_WO_TILING, KERNEL_C_TILING};

    // l1 output shape, N * C * H * W
    int32_t private_out_shape[4] = {1, KERNEL_C_TILING, KERNEL_HO_TILING, KERNEL_WO_TILING};

    // input/out L1 buffer size
    static const int INPUT_L1_BUFFER_SIZE = 1 * KERNEL_H_TILING * KERNEL_W_TILING * KERNEL_C_TILING;
    static const int OUT_L1_BUFFER_SIZE = 1 * KERNEL_HO_TILING * KERNEL_WO_TILING * KERNEL_C_TILING;

    // L1 buffer
    __valigned__ float input_l1_buffer0[INPUT_L1_BUFFER_SIZE];
    __valigned__ float input_l1_buffer1[INPUT_L1_BUFFER_SIZE];

    __valigned__ float input_trans_l1_buffer0[INPUT_L1_BUFFER_SIZE];
    __valigned__ float input_trans_l1_buffer1[INPUT_L1_BUFFER_SIZE];

    __valigned__ float out_l1_buffer0[OUT_L1_BUFFER_SIZE];
    __valigned__ float out_l1_buffer1[OUT_L1_BUFFER_SIZE];

    __valigned__ float out_trans_l1_buffer0[OUT_L1_BUFFER_SIZE];
    __valigned__ float out_trans_l1_buffer1[OUT_L1_BUFFER_SIZE];

    // mdspan for L3 memory
    tops::mdspan global_input(tops::Global, dev_input, global_input_shape);
    tops::mdspan global_out(tops::Global, dev_out, global_out_shape);

    // L1 mdspan
    tops::mdspan private_input0(tops::Private, input_l1_buffer0, private_input_shape);
    tops::mdspan private_input1(tops::Private, input_l1_buffer1, private_input_shape);

    tops::mdspan private_input0_trans(tops::Private, input_trans_l1_buffer0, private_input_trans_shape);
    tops::mdspan private_input1_trans(tops::Private, input_trans_l1_buffer1, private_input_trans_shape);

    tops::mdspan private_out0(tops::Private, out_l1_buffer0, private_out_shape);
    tops::mdspan private_out1(tops::Private, out_l1_buffer1, private_out_shape);

    tops::mdspan private_out0_trans(tops::Private, out_trans_l1_buffer0, private_out_trans_shape);
    tops::mdspan private_out1_trans(tops::Private, out_trans_l1_buffer1, private_out_trans_shape);

    // H维度循环次数
    auto h_loops = (h + KERNEL_H_TILING - 1) / KERNEL_H_TILING;
    // W维度循环次数
    auto w_loops = (w + KERNEL_W_TILING - 1) / KERNEL_W_TILING;
    // C维度循环次数
    auto c_loops = (c + KERNEL_C_TILING - 1) / KERNEL_C_TILING;

    // 工作的线程总数
    auto working_threads_number = h_loops <= thread_num ? h_loops : thread_num;
    // 结束多余的工作线程
    if (thread_idx >= working_threads_number)
    {
        return;
    }

    // ******在此补充代码********
    // 计算线程在H维度上循环的次数，考虑多线程并行
    auto base_loops = h_loops / working_threads_number;
    // 计算线程在H维度上循环的次数，考虑多线程并行
    auto thread_h_loops = base_loops + (base_loops * working_threads_number + thread_idx < h_loops);

    auto thread_w_loops = w_loops;
    auto thread_c_loops = c_loops;

    // 连接两个 dma，表示一个 dma 操作完成后立即自动触发另一个，无需手动操作
    dte_input[0].connect(dte_input_trans[0]);
    dte_input[1].connect(dte_input_trans[1]);
    dte_out_trans[0].connect(dte_out[0]);
    dte_out_trans[1].connect(dte_out[1]);

    for (auto b = 0; b < n; b++) // 图像的batch循环
    {
        for (auto i = 0; i < thread_h_loops; i++) // 在h方向上并行
        {
            // ******在此补充代码********
            // 计算global_h_offset，global_h_index和global_ho_index
            auto global_h_offset = i * working_threads_number + thread_idx;
            auto global_h_index = global_h_offset * KERNEL_H_TILING;
            auto global_ho_index = global_h_index / 2;

            for (auto j = 0; j < thread_w_loops; j++)
            {
                auto global_w_offset = j;
                auto global_w_index = global_w_offset * KERNEL_W_TILING;
                auto global_wo_index = global_w_index / 2;

                for (auto k = 0; k < thread_c_loops; ++k)
                {
                    auto global_c_offset = k;
                    auto global_c_index = global_c_offset * KERNEL_C_TILING;

                    long input_addr = (long)(input_trans_l1_buffer0);
                    long out_addr = (long)(out_trans_l1_buffer0);

                    // load input from L3 to L1
                    // ******在此补充代码********
                    // 需要补充上输入数据流
                    event_input0 = tops::slice_async(dte_input[0], private_input0, global_input, {b, global_c_index, global_h_index, global_w_index});

                    event_input_trans0 = tops::transpose_async(dte_input_trans[0], private_input0_trans, private_input0, {0, 2, 3, 1});
                    tops::wait(event_input_trans0);
                    // 调用pooling_2d_common_max
                    pooling_2d_common_max(input_addr, out_addr, KERNEL_H_TILING,
                                          KERNEL_W_TILING, KERNEL_C_TILING,
                                          KERNEL_HO_TILING, KERNEL_WO_TILING, kernel_size,
                                          kernel_size, stride, stride);

                    e_out_trans0 = tops::transpose_async(dte_out_trans[0], private_out0, private_out0_trans, {0, 3, 1, 2});
                    // store L1->L3
                    // ******在此补充代码********
                    // 需要补充上输出数据流，不建议使用tops::transpose_deslice
                    e_out0 = tops::deslice_async(dte_out[0], global_out, private_out0, {b, global_c_index, global_ho_index, global_wo_index});
                    tops::wait(e_out0);
                }
            }
        }
    }
    // 销毁DTE
    dte_input[0].destroy();
    dte_input[1].destroy();
    dte_input_trans[0].destroy();
    dte_input_trans[1].destroy();
    dte_out[0].destroy();
    dte_out[1].destroy();
    dte_out_trans[0].destroy();
    dte_out_trans[1].destroy();
}

void GCU_MAXPOOL2D(float *__restrict dev_input, float *__restrict dev_out,
                   const int n, const int c, const int h, const int w,
                   const int h_o, const int w_o, const int kernel_size,
                   const int stride, const int padding)
{
    static const size_t blocks = 2;
    static const size_t threads = 12;

    // 示例kernel，该kernel尝试在H维度上做切分和多线程并行，H较大的情况性能更优
    kernel_maxpool2d<<<blocks, threads>>>(dev_input, dev_out, n, c, h, w, h_o,
                                          w_o, kernel_size, stride, padding);
}