/**
 * __device__ void reduce_dim0_fp32(int in_addr_, int dim1,int dim0,int out_addr_)

 * @brief
 * Semantic: [dim1,dim0] == [dim1]
 * @param in_addr_
 * @param out_addr_
 * @param dim1
 * @param dim0
 */

#define TEMPLATE_ALIGN_UP(a, b) (((a + b - 1) / b) * b)
#define L1_ALIGN_SIZE (256)

__attribute__((global, cooperative)) void kernel_softmax(
    float *__restrict dev_input, float *__restrict dev_out, const int rows,
    const int cols) {
  // 获得线程数量
  int thread_num = GetThreadNum();
  // 获得当前线程ID
  int thread_idx = GetThreadIdx();

  // 声明DTE
  // 输入操作数DTE
  tops_dte_ctx_t dte_input[2];
  // 计算结果DTE
  tops_dte_ctx_t dte_out[2];

  // 初始化DTE
  dte_input[0].init();
  dte_input[1].init();
  dte_out[0].init();
  dte_out[1].init();

  // 事件: 输入DMA完成
  tops::event event_input0;
  tops::event event_input1;

  // 事件: 结果DMA完成
  tops::event e_out0;
  tops::event e_out1;

  auto v_len = tops::vlength<vfloat>();
  int KERNEL_ROW_TILING = 1;
  int KERNEL_COL_TILING = TEMPLATE_ALIGN_UP(cols, v_len);

  int32_t global_input_shape[2] = {rows, cols};
  int32_t global_out_shape[2] = {rows, cols};

  int32_t private_input_shape[2] = {KERNEL_ROW_TILING, KERNEL_COL_TILING};
  int32_t private_exp_out_shape[2] = {KERNEL_ROW_TILING, KERNEL_COL_TILING};
  int32_t private_mask_shape[2] = {KERNEL_ROW_TILING, v_len};
  int32_t private_softmax_out_shape[2] = {KERNEL_ROW_TILING, KERNEL_COL_TILING};

  // input/out L1 buffer size
  int INPUT_L1_BUFFER_SIZE = KERNEL_ROW_TILING * KERNEL_COL_TILING;
  int OUT_L1_BUFFER_SIZE = KERNEL_ROW_TILING * KERNEL_COL_TILING;

  // L1 memory
  __local__ __valigned__ char l1_buffer[104000];
  __shared__ __valigned__ char l2_buffer[1024 * 1024 * 24];
  auto l1_mem_used_size = 0;
  // input
  float *input_l1_buffer0 = reinterpret_cast<float *>(l1_buffer);
  l1_mem_used_size += TEMPLATE_ALIGN_UP(
      KERNEL_ROW_TILING * KERNEL_COL_TILING * sizeof(float), L1_ALIGN_SIZE);
  // out
  float *exp_out_l1_buffer0 =
      reinterpret_cast<float *>(l1_buffer + l1_mem_used_size);
  l1_mem_used_size += TEMPLATE_ALIGN_UP(
      KERNEL_ROW_TILING * KERNEL_COL_TILING * sizeof(float), L1_ALIGN_SIZE);

  float *mask_l1_buffer0 =
      reinterpret_cast<float *>(l1_buffer + l1_mem_used_size);
  l1_mem_used_size += TEMPLATE_ALIGN_UP(
      KERNEL_ROW_TILING * v_len * sizeof(float), L1_ALIGN_SIZE);

  float *softmax_out_l1_buffer0 =
      reinterpret_cast<float *>(l1_buffer + l1_mem_used_size);
  l1_mem_used_size += TEMPLATE_ALIGN_UP(
      KERNEL_ROW_TILING * KERNEL_COL_TILING * sizeof(float), L1_ALIGN_SIZE);

  // mdspan for L3 memory
  tops::mdspan global_input(tops::Global, dev_input, global_input_shape);
  tops::mdspan global_out(tops::Global, dev_out, global_out_shape);

  // L1 mdspan
  tops::mdspan private_input0(tops::Private, input_l1_buffer0,
                              private_input_shape);
  tops::mdspan private_exp_out0(tops::Private, exp_out_l1_buffer0,
                                private_exp_out_shape);
  tops::mdspan private_mask(tops::Private, mask_l1_buffer0, private_mask_shape);
  tops::mdspan private_softmax_out0(tops::Private, softmax_out_l1_buffer0,
                                    private_softmax_out_shape);

  auto row_loops = (rows + KERNEL_ROW_TILING - 1) / KERNEL_ROW_TILING;
  auto working_threads_number =
      row_loops <= thread_num ? row_loops : thread_num;

  auto base_row_loops_each_thread = row_loops / working_threads_number;
  auto thread_row_loops =
      base_row_loops_each_thread +
      ((base_row_loops_each_thread * working_threads_number + thread_idx) <
       row_loops);

  if ((KERNEL_COL_TILING) > cols) {
    // add mask
    u_int32_t pad_len = KERNEL_COL_TILING - cols;
    u_int32_t pad_low[] = {0, 0};
    u_int32_t pad_high[] = {0, pad_len};
    u_int32_t pad_mid[] = {0, 0};

    int32_t private_tmp_shape[2] = {KERNEL_ROW_TILING, cols % v_len};

    tops::mdspan private_tmp(tops::Private, softmax_out_l1_buffer0,
                             private_tmp_shape);
    tops::memset(dte_input[0], private_tmp, 0);
    tops::pad(dte_input[0], private_mask, private_tmp, pad_low, pad_high,
              pad_mid, -1000.0f);
  } else {
    tops::memset(dte_input[0], private_mask, 0);
  }

  for (auto r = 0; r < thread_row_loops; r++) {
    auto global_r_offset = r * working_threads_number + thread_idx;
    auto global_r_index = global_r_offset * KERNEL_ROW_TILING;
    if (global_r_index < rows) {
      event_input0 = tops::slice_async(dte_input[0], private_input0,
                                       global_input, {global_r_index, 0});
      tops::wait(event_input0);

      for (int k = 0; k < KERNEL_COL_TILING - v_len; k += v_len) {
        const vfloat &fv = tops::vload<vfloat>(input_l1_buffer0 + k);
        auto vexp = tops::vexp(fv);
        tops::vstore(vexp, exp_out_l1_buffer0 + k);
      }

      // mask for tail
      const vfloat &fv =
          tops::vload<vfloat>(input_l1_buffer0 + KERNEL_COL_TILING - v_len);
      const vfloat &mask = tops::vload<vfloat>(mask_l1_buffer0);
      const auto tmp = tops::vadd(fv, mask);
      auto vexp = tops::vexp(tmp);
      tops::vstore(vexp, exp_out_l1_buffer0 + KERNEL_COL_TILING - v_len);

      // reduce sum
      float sum = 0.0f;
      long input_addr = (long)(exp_out_l1_buffer0);
      long out_addr = (long)(&sum);
      reduce_dim0_fp32(input_addr, KERNEL_ROW_TILING, KERNEL_COL_TILING,
                       out_addr);

      // div
      auto vsum = tops::vbroadcast<vfloat, float>(sum);
      for (int k = 0; k < KERNEL_COL_TILING; k += v_len) {
        const vfloat &fv = tops::vload<vfloat>(exp_out_l1_buffer0 + k);
        auto vsoftmax_value = tops::vdiv(fv, vsum);
        tops::vstore(vsoftmax_value, softmax_out_l1_buffer0 + k);
      }

      // save
      e_out0 = tops::deslice_async(dte_out[0], global_out, private_softmax_out0,
                                   {global_r_index, 0});
      tops::wait(e_out0);
    }
  }

  // 销毁DTE
  dte_input[0].destroy();
  dte_input[1].destroy();
  dte_out[0].destroy();
  dte_out[1].destroy();
}

void GCU_SOFTMAX(float *__restrict dev_input, float *__restrict dev_out,
                 const int rows, const int cols) {
  static const size_t blocks = 2;
  static const size_t threads = 12;

  // 调用kernel
  kernel_softmax<<<blocks, threads>>>(dev_input, dev_out, rows, cols);
}
