#include "broadcast_helper.h"
#include "kernel_operator.h"
#include "local_list.h"
#include "reduce_tensorflow.h"
#include "simple_hashmap.h"
#include "simple_transpose.h"
#include <cstdint>
#include <type_traits>
using namespace AscendC;
template <typename _DT_X, typename _DT_Y> struct SumReducer {
  DefInTensor(X);
  DefInTensor(Y);
  DefBufVECIN(SUM_VALUE);
  DefBufVECIN(TOTAL_VALUE);
  DefBufVECIN(WORK_LOCAL);
  int64_t tileLength = 0;
  __aicore__ inline SumReducer() {}
  __aicore__ inline void init(TPipe &pipe, GlobalTensor<_DT_X> x,
                              GlobalTensor<_DT_Y> y) {
    GTensor(X) = x;
    GTensor(Y) = y;
    constexpr int ub_size = 200 * 1024 - 256 - 32 - 32;
    auto split_ub = int(float(ub_size) / 3.0f);
    tileLength = split_ub / 256 * 256 / sizeof(_DT_X);
    InitQueueSimple(X, tileLength);
    InitTBufBuffer(SUM_VALUE, 32);
    InitTBufBuffer(TOTAL_VALUE, 32);
    InitTBufBuffer(WORK_LOCAL, workLocalSizeBtyte<float>(tileLength));
    TBufGet(TOTAL_VALUE, float);
    MY_DUP_INT32(BTensor(TOTAL_VALUE), zeroFloatInt, elementsPerBlock<float>());
  }

  __aicore__ inline void copyIn(int i, int calcCount) {
    EnQueGlobal2Local(X, i * tileLength, calcCount);
  }

  __aicore__ inline void copyInWithStride(int64_t index, int i, int64_t stride,
                                          int calcCount) {
    QueAllocSimple(X);
    auto startIdx = i * tileLength * stride;
    for (auto j = 0; j < calcCount; ++j) {
      auto targetIndex = index + startIdx + j * stride;
      LTensor(X)(j) = GTensor(X)(targetIndex);
    }
    EnQue(X);
  }

  __aicore__ inline void compute(int calcCount) {
    DeQueSimple(X);
    TBufGet(SUM_VALUE, float);
    TBufGet(TOTAL_VALUE, float);
    TBufGet(WORK_LOCAL, float);
    ReduceSum(BTensor(SUM_VALUE), LTensor(X), BTensor(WORK_LOCAL), calcCount);
    Add(BTensor(TOTAL_VALUE), BTensor(TOTAL_VALUE), BTensor(SUM_VALUE), 1);
    QueFree(X);
  }

  __aicore__ inline void process() {
    auto loopCount = GTensor(X).GetSize() / tileLength;
    for (auto i = 0; i < loopCount; ++i) {
      copyIn(i, tileLength);
      compute(tileLength);
    }
    auto finalLength = GTensor(X).GetSize() % tileLength;
    if (finalLength > 0) {
      copyIn(loopCount, ALIGN_TO(finalLength, elementsPerBlock<float>()));
      compute(finalLength);
    }
    TBufGet(TOTAL_VALUE, float);
    DataCopy(GTensor(Y), BTensor(TOTAL_VALUE), elementsPerBlock<float>());
  }

  __aicore__ inline void processWithStide(int64_t dim0, int64_t dim1,
                                          int64_t stride) {
    auto loopCount = dim0 / tileLength;
    for (auto j = 0; j < dim1; ++j) {
      for (auto i = 0; i < loopCount; ++i) {
        copyIn(i, tileLength);
        compute(tileLength);
      }
      auto finalLength = GTensor(X).GetSize() % tileLength;
      if (finalLength > 0) {
        copyIn(loopCount, ALIGN_TO(finalLength, elementsPerBlock<float>()));
        compute(finalLength);
      }
      TBufGet(TOTAL_VALUE, float);
      DataCopy(GTensor(Y), BTensor(TOTAL_VALUE), elementsPerBlock<float>());
    }
  }
};

template <typename _DT_X, typename _DT_AXES, typename _DT_Y> class Kernel {
public:
  TPipe pipe;
  ShapeData xShape;
  ShapeData axesShape;
  ReductionHelper helper;
  int64_t axis[MAX_SHAPE_DIM] = {0};
  int64_t num_axis = 0;
  DefGlobalTensor(AXES);
  DefGlobalTensor(X);
  DefGlobalTensor(Y);
  int64_t leafSize = 0;
  int64_t ySize = 0;

public:
  __aicore__ inline Kernel() {}

  template <typename T>
  __aicore__ inline void Init(GM_ADDR x, GM_ADDR axes, GM_ADDR y,
                              T &tiling_data) {
    xShape.init(tiling_data.x_shape);
    axesShape.init(tiling_data.axes_shape);
    // if constexpr (std::is_same_v<stackT, LocalList<int>>) {
    //   stack.init(pipe, tiling_data.stackSize);
    // } else {
    //   stack.init(workspace);
    // }
    // if constexpr (std::is_same_v<retListT, LocalList<TypeOf(X)>>) {
    //   retList.init(pipe, tiling_data.retListSize);
    // } else {
    //   retList.init(workspace + tiling_data.stackSize);
    // }
    // leafSize = tiling_data.leafSize;
    // 切分global
    GSetBuffer(X, x, 0, xShape.size);
    GSetBuffer(AXES, axes, 0, axesShape.size);
    ySize = xShape.size;
    if (axesShape.size > 0) {
      for (auto i = 0; i < axesShape.size; ++i) {
        axis[i] = GTensor(AXES).GetValue(i);
        ySize /= xShape[axis[i]];
      }
      num_axis = axesShape.size;
    } else {
      for (auto i = 0; i < xShape.dimNum; ++i) {
        axis[i] = i;
      }
      ySize = 1;
      num_axis = xShape.dimNum;
    }
    GSetBuffer(Y, y, 0, ySize);
  }

  template <typename T>
  __aicore__ inline bool
  reduce_sum(GlobalTensor<T> &input_data, const int64_t *input_dims,
             const int64_t input_num_dims, const int64_t *axis,
             const int64_t num_axis, GlobalTensor<T> &output_data) {
    int64_t input_iter[MAX_SHAPE_DIM] = {0};
    do {
      size_t input_offset = ReducedOutputOffset(input_num_dims, input_dims,
                                                input_iter, 0, nullptr);
      size_t output_offset = ReducedOutputOffset(input_num_dims, input_dims,
                                                 input_iter, num_axis, axis);
      SIMPLE_ADD(TypeOf(X), output_data(output_offset),
                 output_data(output_offset), input_data(input_offset));
    } while (NextIndex(input_num_dims, input_dims, input_iter));
    return true;
  }

  __aicore__ inline void Process2() {
    for (auto i = 0; i < ySize; ++i) {
      GTensor(Y).SetValue(i, 0);
    }
    int64_t axis_out[MAX_SHAPE_DIM];
    int64_t out_num_axis = 0;
    ResolveAxis(xShape.dimNum, axis, num_axis, axis_out, out_num_axis,
                xShape.shape, helper.data_reshape_.shape,
                helper.data_reshape_.dimNum);
    reduce_sum(GTensor(X), helper.data_reshape_.shape,
               helper.data_reshape_.dimNum, axis_out, out_num_axis, GTensor(Y));
  }

  // 参考：tensorflow/core/kernels/reduction_ops_common.h
  // 搜索: ReductionOp
  // __aicore__ inline void Process() {
  //   // printf("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n");
  //   // axis去重
  //   IntHashMap<MAX_SHAPE_DIM, int, int> cache;
  //   for (auto i = 0; i < num_axis; ++i) {
  //     cache.set(axis[i], 1);
  //   }
  //   num_axis = cache.size_;
  //   for (auto i = 0; i < num_axis; ++i) {
  //     axis[i] = cache.data_[i].key;
  //   }
  //   helper.Simplify(xShape, axis, num_axis, false);
  //   bool is_scalar_identity = true;
  //   // is_trivial表示是否不改变输入
  //   bool is_trivial = helper.ndims() == 0 ||
  //                     (helper.ndims() == 1 && !helper.reduce_first_axis_);
  //   if (is_scalar_identity && is_trivial) {
  //     // 特殊情况，直接拷贝
  //     for (auto i = 0; i < xShape.size; ++i) {
  //       GTensor(Y).SetValue(i, GTensor(X).GetValue(i));
  //     }
  //     return;
  //   }
  //   if ((helper.ndims() == 1) && helper.reduce_first_axis_) {
  //     auto sum_value = fullReduce(GTensor(X), 0, xShape.size, &stack,
  //     &retList); GTensor(Y).SetValue(0, sum_value);
  //   } else if ((helper.ndims() == 2) && helper.reduce_first_axis_) {
  //     auto stride = helper.data_reshape_[1];
  //     for (auto i = 0; i < helper.data_reshape_[1]; ++i) {
  //       TypeOf(Y) accum = 0;
  //       for (auto j = 0; j < helper.data_reshape_[0]; ++j) {
  //         SIMPLE_ADD(TypeOf(X), accum, accum,
  //                    GTensor(X).GetValue(i + j * stride));
  //       }
  //       GTensor(Y).SetValue(i, accum);
  //     }
  //   } else if ((helper.ndims() == 2) && !helper.reduce_first_axis_) {
  //     auto numValues = helper.data_reshape_[1];
  //     auto index = 0;
  //     while (index < helper.data_reshape_[0]) {
  //       auto v = InnerMostDimReducer<TypeOf(X), false, true>::reduce(
  //           GTensor(X), index * numValues, numValues, &stack, &retList);
  //       GTensor(Y).SetValue(index++, v);
  //       stack.clear();
  //       retList.clear();
  //     }
  //   } else if ((helper.ndims() == 3) && helper.reduce_first_axis_) {
  //     int64_t strides[2] = {1};
  //     int64_t dims[2] = {0};
  //     dims[0] = helper.data_reshape_[0];
  //     dims[1] = helper.data_reshape_[2];
  //     strides[0] = helper.data_reshape_[1] * helper.data_reshape_[2];
  //     strides[1] = 1;
  //     auto loopStride = helper.data_reshape_[2];
  //     for (auto i = 0; i < helper.data_reshape_[1]; ++i) {
  //       TypeOf(Y) accum = 0;
  //       GenericDimReducer<1, TypeOf(X)>::reduce(GTensor(X), i * loopStride,
  //                                               &accum, dims, strides);
  //       GTensor(Y).SetValue(i, accum);
  //     }
  //   } else if ((helper.ndims() == 3) && !helper.reduce_first_axis_) {
  //     auto numValues = helper.data_reshape_[1];
  //     auto compute_cost = numValues * functor_traits<TypeOf(X)>::Cost;
  //     int64_t strides[1] = {1};
  //     int64_t dims[1] = {0};
  //     dims[0] = helper.data_reshape_[1];
  //     strides[0] = helper.data_reshape_[2];
  //     auto loopStride = helper.data_reshape_[1] * helper.data_reshape_[2];
  //     auto dstStride = helper.data_reshape_[2];
  //     for (auto j = 0; j < helper.data_reshape_[2]; ++j) {
  //       for (auto i = 0; i < helper.data_reshape_[0]; ++i) {
  //         TypeOf(Y) accum = 0;
  //         GenericDimReducer<0, TypeOf(X)>::reduce(
  //             GTensor(X), i * loopStride + j, &accum, dims, strides);
  //         GTensor(Y).SetValue(i * dstStride + j, accum);
  //       }
  //     }
  //   } else {
  //     // If we don't hit one of the cases above, transpose the data so that
  //     // all reduced dimensions are last and reuse the 2-D -> 1-D case.
  //     // GSetBuffer(X, xTmp, 0, xShape.size);
  //     int64_t shuffled_shape[MAX_SHAPE_DIM] = {0};
  //     int64_t perm[MAX_SHAPE_DIM] = {0};
  //     helper.shuffled_shape(shuffled_shape);
  //     helper.permutation(perm);

  //     print_tensor("shuffled_shape", shuffled_shape, helper.ndims(), "%d");
  //     print_tensor("perm", perm, helper.ndims(), "%d");

  //     PermuteHelper permuteHelper;
  //     permuteHelper.init(helper.data_reshape_.shape,
  //                        helper.data_reshape_.dimNum, shuffled_shape, perm);
  //     // 重新指定X的目的地址
  //     const int64_t unreduced = helper.out_shape_.size;
  //     auto numValues = helper.data_reshape_.size / unreduced;
  //     auto index = 0;
  //     while (index < unreduced) {
  //       auto v = InnerMostDimReducer<TypeOf(X), false, true>::reduce(
  //           GTensor(X), index * numValues, numValues, &stack, &retList,
  //           &permuteHelper);
  //       GTensor(Y).SetValue(index++, v);
  //       stack.clear();
  //       retList.clear();
  //     }
  //     // GTensor(Y).SetValue(0, TypeOf(Y)(123));
  //   }
  //   // printf("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n");
  // }

  // 参考：tensorflow/core/kernels/reduction_ops_common.h
  // 搜索: ReductionOp
  __aicore__ inline void Process3() {
    for (auto i = 0; i < ySize; ++i) {
      GTensor(Y).SetValue(i, 0);
    }
    int64_t axis_out[MAX_SHAPE_DIM];
    int64_t out_num_axis = 0;
    ResolveAxis(xShape.dimNum, axis, num_axis, axis_out, out_num_axis,
                xShape.shape, helper.data_reshape_.shape,
                helper.data_reshape_.dimNum);
    reduce_sum(GTensor(X), helper.data_reshape_.shape,
               helper.data_reshape_.dimNum, axis_out, out_num_axis, GTensor(Y));
  }
};

extern "C" __global__ __aicore__ void reduce_sum(GM_ADDR x, GM_ADDR axes,
                                                 GM_ADDR y, GM_ADDR workspace,
                                                 GM_ADDR tiling) {
  GET_TILING_DATA(tiling_data, tiling);
  Kernel<DTYPE_X, DTYPE_AXES, DTYPE_Y> op;
  op.Init(x, axes, y, tiling_data);
  op.Process3();
}