#include "kernel_operator.h"
#include "op_common.h"
#include <cstdint>
#include <type_traits>

using namespace AscendC;

template <typename _DT_X, typename _DT_AXIS, typename _DT_Y> class Kernel {
public:
  TPipe pipe;
  ShapeData xShape;
  DefInTensor(X);
  DefGlobalTensor(AXIS);
  DefOutTensor(Y);
  DefBufVECOUT(SUM_VALUE);
  DefBufVECCALC(SUM_VALUE_CASTED);
  DefBufVECCALC(X_CASTED);
  DefBufVECCALC(TEMP);
  DefBufVECCALC(COPY_TEMP);

  bool exclusive = false;
  bool reverse = false;
  int32_t targetAxis = 0;

  int32_t inner = 1;
  int32_t depth = 1;
  int32_t outer = 1;
  int32_t tileLength = 0;
  int32_t tileCount = 1;
  int32_t finalLength = 0;
  int32_t loopCount = 0;
  int32_t innerStride = 0;
  int32_t depthEnd = 0;
  int32_t step = 0;
  int32_t flagLength = 0;
  int32_t copyAlignSize = 64 / sizeof(_DT_X);
  int32_t bufferSize = 0;

public:
  __aicore__ inline Kernel() {}

  template <typename T>
  __aicore__ inline void Init(GM_ADDR x, GM_ADDR axis, GM_ADDR y,
                              T &tiling_data) {
    xShape.init(tiling_data.xShape);
    exclusive = tiling_data.exclusive;
    reverse = tiling_data.reverse;

    // 切分global
    GSetBuffer(X, x, 0, xShape.size);
    GSetBuffer(AXIS, axis, 0, 1);
    GSetBuffer(Y, y, 0, xShape.size);
    targetAxis = GTensorName(AXIS).GetValue(0);
    if (targetAxis < 0) {
      targetAxis += xShape.dimNum;
    }

    // 计算分段
    inner = 1;
    depth = 1;
    outer = 1;
    for (int32_t i = 0; i < xShape.dimNum; i++) {
      if (i < targetAxis)
        inner *= xShape[i];
      else if (i > targetAxis)
        outer *= xShape[i];
      else
        depth = xShape[i];
    }
    innerStride = depth * outer;
    depthEnd = exclusive ? depth - 1 : depth;
    step = reverse ? -outer : outer;

    const int32_t ub_size = tiling_data.ubSize - 1024;
    // X * 2 + Y * 2 + SUM_VALUE
    float splitCount = 2.0f + 2.0f + 1.0f;
    if constexpr (std::is_same_v<_DT_X, int8_t>) {
      // SUM_VALUE_CASTED + X_CASTED + TEMP
      splitCount += 2.0f + 2.0f + 2.0f;
    }
    auto splitUb =
        int32_t(float(ub_size) / splitCount) / 256 * 256 / sizeof(_DT_X);
    auto dataAlignLength = alignToRepeat<_DT_X>(outer);
    if (dataAlignLength < splitUb) {
      tileLength = dataAlignLength;
    } else {
      tileLength = splitUb;
    }
    tileCount = splitUb / tileLength;
    if (tileCount > inner) {
      tileCount = inner;
    }
    finalLength = outer % tileLength;
    loopCount = outer / tileLength;
    bufferSize = tileLength * tileCount;
    InitQueueSimple(X, bufferSize);
    InitQueueSimple(Y, bufferSize);
    InitTBufBuffer(SUM_VALUE, bufferSize * sizeof(_DT_X));
    InitTBufBuffer(COPY_TEMP, 32);
    if constexpr (std::is_same_v<_DT_X, int8_t>) {
      InitTBufBuffer(SUM_VALUE_CASTED, bufferSize * sizeof(int16_t));
      InitTBufBuffer(X_CASTED, bufferSize * sizeof(int16_t));
      InitTBufBuffer(TEMP, bufferSize * sizeof(half));
    }
    print("exclusive = %d\n", exclusive);
    print("reverse = %d\n", reverse);
    print("inner = %d\n", inner);
    print("depth = %d\n", depth);
    print("outer = %d\n", outer);
    print("tileLength = %d\n", tileLength);
    print("finalLength = %d\n", finalLength);
    print("loopCount = %d\n", loopCount);
    print("tileCount = %d\n", tileCount);
    print("bufferSize = %d\n", bufferSize);
  }

  __aicore__ inline void Process() {
    auto finalCopyCount = ALIGN_TO(finalLength, elementsPerBlock<_DT_X>());
    auto innerLoopEnd = inner / tileCount * tileCount;
    print("innerLoopEnd = %d\n", innerLoopEnd);
    for (auto i = 0; i < innerLoopEnd; i += tileCount) {
      doCumSum(i, finalCopyCount);
    }
    auto finalTileCount = inner % tileCount;
    if (finalTileCount > 0) {
      tileCount = finalTileCount;
      doCumSum(innerLoopEnd, finalCopyCount);
    }
  }

  __aicore__ inline void doCumSum(int64_t innerIdx, uint32_t finalCopyCount) {
    auto fixOffset = innerIdx * innerStride;
    if (reverse) {
      fixOffset += (depth - 1) * outer;
    }
    for (auto j = 0; j < loopCount; ++j) {
      doCumSumOnce(fixOffset, tileLength, tileLength);
      fixOffset += tileLength;
    }
    if (finalLength > 0) {
      doCumSumOnce(fixOffset, finalLength, finalCopyCount);
    }
  }

  __aicore__ inline void doCumSumOnce(int64_t fixOffset, int64_t calcCount,
                                      int64_t copyCount) {
    TBufGet(SUM_VALUE, _DT_X);
    MY_DUP_ZERO(BTensor(SUM_VALUE), bufferSize);
    uint16_t repeatTimes = CEIL_DIV(bufferSize, elementsPerRepeat<_DT_X>());
    if (exclusive) {
      copyOutFrom(BTensor(SUM_VALUE), fixOffset, calcCount);
    }
    for (auto k = 0; k < depthEnd; ++k) {
      auto startIdx = fixOffset + k * step;
      auto targetIdx = exclusive ? startIdx + step : startIdx;
      copyIn(startIdx, copyCount);
      compute(targetIdx, bufferSize, repeatTimes);
      copyOut(targetIdx, calcCount);
    }
  }

  __aicore__ inline void copyIn(int64_t startIdx, int64_t copyCount) {
    // EnQueGlobal2Local(X, startIdx, copyCount);
    QueAllocSimple(X);
    for (auto i = 0; i < tileCount; ++i) {
      auto offset = i * tileLength;
      DataCopy(LTensor(X)[offset], GTensor(X)[startIdx + i * innerStride],
               copyCount);
    }
    EnQue(X);
  }

  __aicore__ inline void compute(int64_t targetIdx, int64_t calcCount,
                                 uint16_t repeatTimes) {
    DeQueSimple(X);
    QueAllocSimple(Y);
    TBufGet(SUM_VALUE, _DT_X);
    if constexpr (std::is_same_v<_DT_X, float> ||
                  std::is_same_v<_DT_X, int32_t> ||
                  std::is_same_v<_DT_X, half>) {
      Add(BTensor(SUM_VALUE), BTensor(SUM_VALUE), LTensor(X), calcCount);
    } else if constexpr (std::is_same_v<_DT_X, int8_t>) {
      TBufGet(SUM_VALUE_CASTED, int16_t);
      TBufGet(X_CASTED, int16_t);
      TBufGet(TEMP, half);
      Cast(BTensor(TEMP), BTensor(SUM_VALUE), RoundMode::CAST_NONE, calcCount);
      Cast(BTensor(SUM_VALUE_CASTED), BTensor(TEMP), RoundMode::CAST_FLOOR,
           calcCount);
      Cast(BTensor(TEMP), LTensor(X), RoundMode::CAST_NONE, calcCount);
      Cast(BTensor(X_CASTED), BTensor(TEMP), RoundMode::CAST_FLOOR, calcCount);
      Add(BTensor(SUM_VALUE_CASTED), BTensor(SUM_VALUE_CASTED),
          BTensor(X_CASTED), calcCount);
      ShiftLeft(BTensor(X_CASTED), BTensor(SUM_VALUE_CASTED), int16_t(8),
                calcCount);
      ShiftRight(BTensor(SUM_VALUE_CASTED), BTensor(X_CASTED), int16_t(8),
                 calcCount);
      Cast(BTensor(TEMP), BTensor(SUM_VALUE_CASTED), RoundMode::CAST_FLOOR,
           calcCount);
      Cast(BTensor(SUM_VALUE), BTensor(TEMP), RoundMode::CAST_FLOOR, calcCount);
    }
    DataCopy(LTensor(Y), BTensor(SUM_VALUE),
             ALIGN_TO(calcCount, elementsPerBlock<_DT_X>()));
    QueFree(X);
    EnQue(Y);
  }

  __aicore__ inline void copyOutFrom(LocalTensor<_DT_Y> from, int32_t targetIdx,
                                     uint32_t calcCount) {
    TBufGet(COPY_TEMP, _DT_X);
    // uint16_t copyCount = ALIGN_TO(calcCount, elementsPerRepeat<_DT_X>());
    uint16_t copyCount = CEIL_DIV(calcCount, elementsPerBlock<_DT_X>());
    // auto dst = GTensor(Y).GetPhyAddr();
    // uint16_t copyCount = calcCount / elementsPerBlock<_DT_X>();
    for (auto i = 0; i < tileCount; ++i) {
      auto dst_offset = targetIdx + i * innerStride;
      auto from_offset = i * tileLength;
      for (auto j = 0; j < calcCount; ++j) {
        GTensor(Y).SetValue(dst_offset + j, from.GetValue(from_offset + j));
      }
      // for (auto j = 0; j < elementsPerBlock<_DT_X>(); ++j) {
      //   GTensor(Y).SetValue(dst_offset + j, from.GetValue(from_offset + j));
      // }
      // DataCopy(GTensor(Y)[dst_offset], from[from_offset],
      //          ALIGN_TO(calcCount, elementsPerBlock<_DT_X>()));
      // event_t eventIDSToMTE3 =
      //     static_cast<event_t>(pipe.FetchEventID(HardEvent::S_MTE3));
      // SetFlag<HardEvent::S_MTE3>(eventIDSToMTE3);
      // WaitFlag<HardEvent::S_MTE3>(eventIDSToMTE3);
      // PipeBarrier<PIPE_MTE3>();
      // DataCopy(GTensor(Y)[dst_offset], from[from_offset], copyCount);
      // PipeBarrier<PIPE_MTE3>();
      // DataCopyPad(GTensor(Y)[dst_offset], from[from_offset],
      //             {copyCount, 32, 0, 0});
      // PipeBarrier<PIPE_MTE3>();
      // for (auto j = 0; j < calcCount; ++j) {
      //   GTensor(Y).SetValue(dst_offset + j, from.GetValue(from_offset + j));
      // }
      // if (calcCount < elementsPerRepeat<_DT_X>()) {
      //   for (auto j = 0; j < calcCount; ++j) {
      //     GTensor(Y).SetValue(dst_offset + j, from.GetValue(from_offset +
      //     j));
      //   }
      // } else {
      //   // 自动忽略后面不足alignSize的数据
      //   DataCopy(GTensor(Y)[dst_offset], from[from_offset],
      //            ALIGN_TO(calcCount, elementsPerBlock<_DT_X>()));
      //   if (dst_offset % elementsPerBlock<_DT_X>() != 0) {
      //     DataCopyPad(GTensor(Y)[dst_offset], from[from_offset],
      //                 {1, 256, 0, 0});
      //   }
      // }
    }
  }

  __aicore__ inline void copyOut(int32_t targetIdx, uint32_t calcCount) {
    {
      DeQueSimple(Y);
      copyOutFrom(LTensor(Y), targetIdx, calcCount);
      QueFree(Y);
    }
  }

  // tensorflow/lite/kernels/internal/reference/cumsum.h
  __aicore__ inline void Process2() {
    if constexpr (std::is_same_v<TypeOf(X), float> ||
                  std::is_same_v<TypeOf(X), int32_t> ||
                  std::is_same_v<TypeOf(X), int8_t>) {

      for (size_t outer_index = 0; outer_index < outer; outer_index++) {
        size_t outer_index_adj;
        if (reverse)
          outer_index_adj = (outer - 1) - outer_index;
        else
          outer_index_adj = outer_index;
        for (size_t inner_index = 0; inner_index < inner; inner_index++) {
          TypeOf(X) accumulator = 0;
          size_t inner_index_adj;
          if (reverse)
            inner_index_adj = (inner - 1) - inner_index;
          else
            inner_index_adj = inner_index;
          for (size_t depth_index = 0; depth_index < depth; depth_index++) {
            size_t depth_index_adj;
            if (reverse)
              depth_index_adj = (depth - 1) - depth_index;
            else
              depth_index_adj = depth_index;

            size_t index = outer_index_adj;
            index += inner_index_adj * depth * outer;
            index += depth_index_adj * outer;

            if (exclusive) {
              GTensorName(Y).SetValue(index, accumulator);
              accumulator += GTensorName(X).GetValue(index);
              // output_data[index] = accumulator;
              // accumulator += input_data[index];
            } else {
              accumulator += GTensorName(X).GetValue(index);
              GTensorName(Y).SetValue(index, accumulator);
              // accumulator += input_data[index];
              // output_data[index] = accumulator;
            }
          }
        }
      }
    } else if constexpr (std::is_same_v<TypeOf(X), half>) {
      for (size_t outer_index = 0; outer_index < outer; outer_index++) {
        size_t outer_index_adj;
        if (reverse)
          outer_index_adj = (outer - 1) - outer_index;
        else
          outer_index_adj = outer_index;
        for (size_t inner_index = 0; inner_index < inner; inner_index++) {
          half accumulator = 0;
          size_t inner_index_adj;
          if (reverse)
            inner_index_adj = (inner - 1) - inner_index;
          else
            inner_index_adj = inner_index;
          for (size_t depth_index = 0; depth_index < depth; depth_index++) {
            size_t depth_index_adj;
            if (reverse)
              depth_index_adj = (depth - 1) - depth_index;
            else
              depth_index_adj = depth_index;

            size_t index = outer_index_adj;
            index += inner_index_adj * depth * outer;
            index += depth_index_adj * outer;

            if (exclusive) {
              GTensorName(Y).SetValue(index, accumulator);
              accumulator = half(float(accumulator) +
                                 float(GTensorName(X).GetValue(index)));
              // output_data[index] = accumulator;
              // accumulator += input_data[index];
            } else {
              accumulator = half(float(accumulator) +
                                 float(GTensorName(X).GetValue(index)));
              GTensorName(Y).SetValue(index, accumulator);
              // accumulator += input_data[index];
              // output_data[index] = accumulator;
            }
          }
        }
      }
    }
  }
};

extern "C" __global__ __aicore__ void
cumsum(GM_ADDR x, GM_ADDR axis, GM_ADDR y, GM_ADDR workspace, GM_ADDR tiling) {
  GET_TILING_DATA(tiling_data, tiling);
  Kernel<DTYPE_X, DTYPE_AXIS, DTYPE_Y> op;
  op.Init(x, axis, y, tiling_data);
  op.Process();
}