#include "kernel_operator.h"
#include "op_common.h"
#include <cstdint>

using namespace AscendC;

template <typename _DT_X, typename _DT_Y> class Kernel {
public:
  TPipe pipe;
  DefGlobalTensor(X);
  DefGlobalTensor(Y);
  int64_t b = 0;
  int64_t w = 0;
  int64_t h = 0;
  int64_t diagonal = 0;
  int64_t wxh = 0;

public:
  __aicore__ inline Kernel() {}

  __aicore__ inline void Init(GM_ADDR x, GM_ADDR y, int64_t b_value,
                              int64_t w_value, int64_t h_value,
                              int64_t diagonal_value) {
    b = b_value;
    w = w_value;
    h = h_value;
    diagonal = diagonal_value;
    wxh = w * h;
    auto size = b * w * h;
    // 切分global
    GSetBuffer(X, x, 0, size);
    GSetBuffer(Y, y, 0, size);
  }

  __aicore__ inline void Process() {
    for (int64_t bi = 0; bi < b; ++bi) {
      for (int64_t i = 0; i < w; ++i) {
        auto seg_index = diagonal + i;
        for (int64_t j = 0; j < h; ++j) {
          auto index = bi * wxh + i * h + j;
          if (j < seg_index) {
            GTensorName(Y).SetValue(index, TypeOf(Y)(0));
          } else {
            GTensorName(Y).SetValue(index, GTensorName(X).GetValue(index));
          }
        }
      }
    }
  }
};

template <typename _DT_X, typename _DT_Y> class Kernel2 {
public:
  TPipe pipe;
  DefInTensor(X);
  DefOutTensor(Y);
  int64_t b = 0;
  int64_t w = 0;
  int64_t h = 0;
  int64_t diagonal = 0;
  int64_t wxh = 0;
  int64_t tileLength = 0;
  int64_t bi = 0;
  int64_t wi = 0;
  int64_t hi = 0;
  int64_t size = 0;

public:
  __aicore__ inline Kernel2() {}

  template <typename T>
  __aicore__ inline void Init(GM_ADDR x, GM_ADDR y, T &tiling_data) {
    b = tiling_data.b;
    w = tiling_data.w;
    h = tiling_data.h;
    diagonal = tiling_data.diagonal;
    wxh = w * h;
    tileLength = tiling_data.tileLength;
    size = b * w * h;
    // 切分global
    GSetBuffer(X, x, 0, size);
    GSetBuffer(Y, y, 0, size);
    // 初始化队列
    InitQueueSimple(X, tileLength);
    InitQueueSimple(Y, tileLength);
  }

  __aicore__ inline bool increaseHi(int64_t v) {
    hi += v;
    if (hi >= h) {
      wi += hi / h;
      hi = hi % h;
      if (wi >= w) {
        bi += wi / w;
        wi = wi % w;
        if (bi >= b) {
          return true;
        }
      }
    }
    return false;
  }

  __aicore__ inline int64_t getZeroCount() {
    return GET_MIN(diagonal + wi - hi, h - hi);
  }

  __aicore__ inline void Process() {
    auto loopCount = CEIL_DIV(size, tileLength);
    for (auto i = 0; i < loopCount; ++i) {
      auto calcCount = tileLength;
      auto copyCount = tileLength;
      if (i == loopCount - 1) {
        calcCount = size % tileLength;
        copyCount = alignToBlock<_DT_X>(calcCount);
      }
      if (diagonal >= w) {
        QueAllocSimple(Y);
        Muls(LTensor(Y), LTensor(Y), _DT_X(0), copyCount);
        EnQue(Y);
      } else {
        { EnQueGlobal2Local(X, i * tileLength, copyCount); }
        {
          DeQueSimple(X);
          QueAllocSimple(Y);
          Muls(LTensor(Y), LTensor(Y), _DT_X(0), copyCount);
          if (diagonal <= -(w - 1) && diagonal <= -(h - 1)) {
            Add(LTensor(Y), LTensor(X), LTensor(Y), calcCount);
          } else {
            auto tensorIdx = 0;
            auto extraIdx = 0;
            while (tensorIdx < calcCount) {
              auto zeroDataCount = getZeroCount();
              // 刚好可以占满一个repeat
              if (zeroDataCount >= elementsPerRepeat<_DT_X>()) {
                auto count = GET_MIN(zeroDataCount, copyCount - tensorIdx) /
                             elementsPerRepeat<_DT_X>() *
                             elementsPerRepeat<_DT_X>();
                increaseHi(count);
                tensorIdx += count;
                continue;
              }
              // 构建mask
              uint64_t mask[2] = {UINT64_MAX, UINT64_MAX};
              int32_t maskFlagIdx = 0;
              while (maskFlagIdx < elementsPerRepeat<_DT_X>()) {
                if (zeroDataCount <= 0) {
                  // 统计该行还有多少个0
                  auto dataCount = h - hi;
                  auto newMaskFlagIdx = maskFlagIdx + dataCount;
                  // 如果超出mask承载，则表示该次repeat全是0，不用Add,直接结束
                  if (newMaskFlagIdx >= elementsPerRepeat<_DT_X>()) {
                    auto increaseCount =
                        elementsPerRepeat<_DT_X>() - maskFlagIdx;
                    auto lastDataCount = dataCount - increaseCount;
                    extraIdx =
                        GET_MIN(lastDataCount, copyCount - tensorIdx -
                                                   elementsPerRepeat<_DT_X>()) /
                        elementsPerRepeat<_DT_X>() * elementsPerRepeat<_DT_X>();
                    increaseHi(increaseCount + extraIdx);
                    break;
                  }
                  maskFlagIdx = newMaskFlagIdx;
                  // 判断是否需要直接结束， 因为都是数据了，不用再加了
                  if (increaseHi(dataCount)) {
                    break;
                  }
                  // 重新获取数据个数
                  zeroDataCount = getZeroCount();
                  continue;
                }
                zeroDataCount = GET_MIN(
                    zeroDataCount, elementsPerRepeat<_DT_X>() - maskFlagIdx);
                MASK_XOR_ONE(_DT_X, mask, maskFlagIdx, zeroDataCount);
                if (maskFlagIdx >= elementsPerRepeat<_DT_X>()) {
                  increaseHi(zeroDataCount);
                  goto maskCalc;
                }
                // 判断是否已经没有数据了
                if (increaseHi(zeroDataCount)) {
                  goto maskCalc;
                }

                // 重新获取数据个数
                zeroDataCount = getZeroCount();
              }
            maskCalc:
              Add(LTensor(Y)[tensorIdx], LTensor(Y)[tensorIdx],
                  LTensor(X)[tensorIdx], mask, 1, {1, 1, 1, 8, 8, 8});
              tensorIdx += elementsPerRepeat<_DT_X>();
              if (extraIdx > 0) {
                Add(LTensor(Y)[tensorIdx], LTensor(Y)[tensorIdx],
                    LTensor(X)[tensorIdx], extraIdx);
                tensorIdx += extraIdx;
                extraIdx = 0;
              }
            }
          }
          EnQue(Y);
          QueFree(X);
        }
      }
      { DeQueLocal2Global(Y, i * tileLength, copyCount); }
    }
  }
};

extern "C" __global__ __aicore__ void triu(GM_ADDR x, GM_ADDR y,
                                           GM_ADDR workspace, GM_ADDR tiling) {
  GET_TILING_DATA(tiling_data, tiling);
  // Kernel<DTYPE_X, DTYPE_Y> op;
  // op.Init(x, y, tiling_data.b, tiling_data.w, tiling_data.h,
  // tiling_data.diagonal); op.Process();

  Kernel2<DTYPE_X, DTYPE_Y> op;
  op.Init(x, y, tiling_data);
  op.Process();
}