/**
 * __device__ __forceinline__ int GetThreadNum(void);
 * @brief Get Thread(sip) number
 * @param
 * @return Thread(sip) number
 */

/**
 * __device__ __forceinline__ int GetThreadIdx(void)
 * @brief Get global thread(sip) idx
 * @param
 * @return global thread(sip) idx
 */

/**
 * __device__ void pooling_2d_common_max(int input_addr, int output_addr, int
 hi, int wi, int ci, int ho, int wo, int window_h, int window_w, int stride_h,
 int stride_w)
 * @brief
 * hi == 4X
 * wi = 4X
 * ci = 16X
 * window_h >= 2 && window_h <= 7
 * window_w >= 2 && window_w <= 7
 * stride_h >= 2 && stride_h <= 7
 * stride_w >= 2 && stride_w <= 7
 * Input DataFormat: [1, hi, wi, ci]
 * Output DataFormat: [1, ho, wo, ci]
 * @param input_addr input addr
 * @param output_addr output addr
 * @param hi    input h
 * @param wi    input w
 * @param ci    input c
 * @param ho    output h
 * @param wo    output w
 * @param window_h kernel_size on h
 * @param window_w kernel_size on w
 * @param stride_h stride on h
 * @param stride_w stride on w
 */

// L1_MEM_SIZE = 0x100000;  // 1M

__attribute__((global, cooperative)) void kernel_maxpool2d(
    float *__restrict dev_input, float *__restrict dev_out, const int n,
    const int c, const int h, const int w, const int h_o, const int w_o,
    const int kernel_size, const int stride, const int padding) {
  // 获得线程数量
  int thread_num = GetThreadNum();
  // 获得当前线程ID
  int thread_idx = GetThreadIdx();

  // 声明DTE
  // 输入操作数DTE
  tops_dte_ctx_t dte_input[2];
  tops_dte_ctx_t dte_input_trans[2];
  // 计算结果DTE
  tops_dte_ctx_t dte_out[2];
  tops_dte_ctx_t dte_out_trans[2];

  // 初始化DTE
  dte_input[0].init();
  dte_input[1].init();
  dte_input_trans[0].init();
  dte_input_trans[1].init();

  dte_out[0].init();
  dte_out[1].init();
  dte_out_trans[0].init();
  dte_out_trans[1].init();

  // 事件: 输入DMA完成
  tops::event event_input0;
  tops::event event_input1;

  // 事件: 结果DMA完成
  tops::event e_out0;
  tops::event e_out1;

  // 注：该Tiling方案可以根据情况调整
  int KERNEL_H_TILING = 16;
  int KERNEL_W_TILING = 16;
  int KERNEL_HO_TILING = KERNEL_H_TILING / 2;
  int KERNEL_WO_TILING = KERNEL_W_TILING / 2;
  int KERNEL_C_TILING = 16;

  //! L3 shape
  int32_t global_input_shape[4] = {n, c, h, w};
  int32_t global_out_shape[4] = {n, c, h_o, w_o};
  // L1 shape
  int32_t private_input_trans_shape[4] = {1, KERNEL_H_TILING, KERNEL_W_TILING,
                                          KERNEL_C_TILING};
  int32_t private_input_shape[4] = {1, KERNEL_C_TILING, KERNEL_H_TILING,
                                    KERNEL_W_TILING};
  int32_t private_out_trans_shape[4] = {1, KERNEL_HO_TILING, KERNEL_WO_TILING,
                                        KERNEL_C_TILING};
  int32_t private_out_shape[4] = {1, KERNEL_C_TILING, KERNEL_HO_TILING,
                                  KERNEL_WO_TILING};

  // input/out L1 buffer size
  int INPUT_L1_BUFFER_SIZE =
      1 * KERNEL_H_TILING * KERNEL_W_TILING * KERNEL_C_TILING;
  int OUT_L1_BUFFER_SIZE =
      1 * KERNEL_HO_TILING * KERNEL_WO_TILING * KERNEL_C_TILING;

  // L1 memory
  __local__ __valigned__ char l1_buffer[104000];
  auto l1_mem_used_size = 0;
  // input
  float *input_l1_buffer0 = reinterpret_cast<float *>(l1_buffer);
  l1_mem_used_size += INPUT_L1_BUFFER_SIZE * sizeof(float);
  float *input_trans_l1_buffer0 =
      reinterpret_cast<float *>(l1_buffer + l1_mem_used_size);
  l1_mem_used_size += INPUT_L1_BUFFER_SIZE * sizeof(float);

  // out
  float *out_l1_buffer0 =
      reinterpret_cast<float *>(l1_buffer + l1_mem_used_size);
  l1_mem_used_size += OUT_L1_BUFFER_SIZE * sizeof(float);
  float *out_trans_l1_buffer0 =
      reinterpret_cast<float *>(l1_buffer + l1_mem_used_size);
  l1_mem_used_size += OUT_L1_BUFFER_SIZE * sizeof(float);

  // mdspan for L3 memory
  tops::mdspan global_input(tops::Global, dev_input, global_input_shape);
  tops::mdspan global_out(tops::Global, dev_out, global_out_shape);

  // L1 mdspan
  tops::mdspan private_input0(tops::Private, input_l1_buffer0,
                              private_input_shape);
  tops::mdspan private_input0_trans(tops::Private, input_trans_l1_buffer0,
                                    private_input_trans_shape);

  tops::mdspan private_out0(tops::Private, out_l1_buffer0, private_out_shape);
  tops::mdspan private_out0_trans(tops::Private, out_trans_l1_buffer0,
                                  private_out_trans_shape);

  // H维度循环次数
  auto h_loops = (h + KERNEL_H_TILING - 1) / KERNEL_H_TILING;
  // W维度循环次数
  auto w_loops = (w + KERNEL_W_TILING - 1) / KERNEL_W_TILING;
  // C维度循环次数
  auto c_loops = (c + KERNEL_C_TILING - 1) / KERNEL_C_TILING;

  // 工作的线程总数
  auto working_threads_number = h_loops <= thread_num ? h_loops : thread_num;
  // 结束多余的工作线程
  if (thread_idx >= working_threads_number) {
    return;
  }

  // ******在此补充代码********
  // 计算线程在H维度上循环的次数，考虑多线程并行
  // auto thread_h_loops = ?
  auto thread_w_loops = w_loops;
  auto thread_c_loops = c_loops;

  // 连接两个 dma，表示一个 dma 操作完成后立即自动触发另一个，无需手动操作
  dte_input[0].connect(dte_input_trans[0]);
  // dte_input[1].connect(dte_input_trans[1]);
  dte_out_trans[0].connect(dte_out[0]);
  // dte_out_trans[1].connect(dte_out[1]);

  for (auto b = 0; b < n; b++) {
    for (auto i = 0; i < thread_h_loops; i++) {
      // ******在此补充代码********
      // 计算global_h_offset，global_h_index和global_ho_index
      // auto global_h_offset = ?;
      // auto global_h_index = ?;
      // auto global_ho_index = ?;
      for (auto j = 0; j < thread_w_loops; j++) {
        auto global_w_offset = j;
        auto global_w_index = global_w_offset * KERNEL_W_TILING;
        auto global_wo_index = global_w_offset * KERNEL_WO_TILING;
        for (auto k = 0; k < thread_c_loops; ++k) {
          auto global_c_offset = k;
          auto global_c_index = global_c_offset * KERNEL_C_TILING;

          long input_addr = (long)(input_trans_l1_buffer0);
          long out_addr = (long)(out_trans_l1_buffer0);

          // load input from L3 to L1
          // ******在此补充代码********
          // 需要补充上输入数据流

          // 调用pooling_2d_common_max
          pooling_2d_common_max(input_addr, out_addr, KERNEL_H_TILING,
                                KERNEL_W_TILING, KERNEL_C_TILING,
                                KERNEL_HO_TILING, KERNEL_WO_TILING, kernel_size,
                                kernel_size, stride, stride);

          // store L1->L3
          // ******在此补充代码********
          // 需要补充上输出数据流，不建议使用tops::transpose_deslice
        }
      }
    }
  }
  // 销毁DTE
  dte_input[0].destroy();
  dte_input[1].destroy();
  dte_input_trans[0].destroy();
  dte_input_trans[1].destroy();
  dte_out[0].destroy();
  dte_out[1].destroy();
  dte_out_trans[0].destroy();
  dte_out_trans[1].destroy();
}

void GCU_MAXPOOL2D(float *__restrict dev_input, float *__restrict dev_out,
                   const int n, const int c, const int h, const int w,
                   const int h_o, const int w_o, const int kernel_size,
                   const int stride, const int padding) {
  static const size_t blocks = 1;
  static const size_t threads = 12;

  // 示例kernel，该kernel尝试在H维度上做切分和多线程并行，H较大的情况性能更优
  kernel_maxpool2d<<<blocks, threads>>>(dev_input, dev_out, n, c, h, w, h_o,
                                        w_o, kernel_size, stride, padding);
}
