#include "kernel_operator.h"
using namespace AscendC;

#define BUFFER_NUM  1
#define BLOCK_SIZE  32
#define COMPUTE_ALIGNED  256
template <typename T>
class kernelHeaviside {
 public:
  __aicore__ inline kernelHeaviside() {}
  __aicore__ inline void Init(GM_ADDR input, GM_ADDR values, GM_ADDR out,
                              uint32_t iter,uint32_t coreDataNum, uint32_t tail,
                              TPipe *pipeIn) {
    this->pipe = pipeIn;
    int coreIdx = GetBlockIdx();
    int globalIndex;
    int bigCoreDataNum = coreDataNum + 1;
    uint8_t nBigCore = tail;
    globalIndex = bigCoreDataNum * coreIdx;
    if (coreIdx < nBigCore) {
      this->coreDataNum = bigCoreDataNum;
    } else {
      this->coreDataNum = coreDataNum;
      globalIndex -= (coreIdx - nBigCore);
    }
    globalIndex *= blockElem;
    this->blockElem = BLOCK_SIZE / sizeof(T);
    int32_t elemCount = blockElem * this->coreDataNum;
    inputGm.SetGlobalBuffer((__gm__ T *)input + globalIndex, elemCount);
    valuesGm.SetGlobalBuffer((__gm__ T *)values + globalIndex, elemCount);
    outputGm.SetGlobalBuffer((__gm__ T *)out + globalIndex, elemCount);
    // Data may be unable to load into UB
    if (elemCount < iter * blockElem) {
      this->loop = 1;
      this->tailCount = coreDataNum;
      iterCount = coreDataNum;
      bufferElem = ((elemCount * sizeof(T) + COMPUTE_ALIGNED - 1) / COMPUTE_ALIGNED) * COMPUTE_ALIGNED / sizeof(T);  //为了保证compare的256byte对齐
    } else {
      iterCount = iter;
      this->loop = (coreDataNum + iterCount - 1) / iterCount;
      this->tailCount = coreDataNum % iterCount;
      bufferElem = ((iter * blockElem * sizeof(T) + COMPUTE_ALIGNED - 1) / COMPUTE_ALIGNED) * COMPUTE_ALIGNED / sizeof(T); 
    }
    pipe->InitBuffer(inputQueue, BUFFER_NUM, bufferElem * sizeof(T));
    pipe->InitBuffer(valuesQueue, BUFFER_NUM, bufferElem * sizeof(T));
    pipe->InitBuffer(outQueue, BUFFER_NUM, bufferElem * sizeof(T));
    pipe->InitBuffer(cmpBuf, bufferElem / 8);

  }
  __aicore__ inline void process() {
    for (int i = 0; i < loop-1; i++) {
      count = iterCount * blockElem;
      elemPerIter = count;
      roundElem = ((count * sizeof(T) + COMPUTE_ALIGNED - 1) / COMPUTE_ALIGNED) * COMPUTE_ALIGNED / sizeof(T);
      copyIn(i);
      compute(i);
      copyOut(i);
    }
    count = tailCount * blockElem;
    roundElem =((count * sizeof(T) + COMPUTE_ALIGNED - 1) / COMPUTE_ALIGNED) * COMPUTE_ALIGNED / sizeof(T);
    copyIn(loop - 1);
    compute(loop - 1);
    copyOut(loop - 1);
  }

  __aicore__ inline void copyIn(int i) {
    auto inputLocal = inputQueue.AllocTensor<T>();
    auto valuesLocal = valuesQueue.AllocTensor<T>();
    DataCopy(inputLocal, inputGm[elemPerIter * i], count);
    DataCopy(valuesLocal, valuesGm[elemPerIter * i], count);
    inputQueue.EnQue(inputLocal);
    valuesQueue.EnQue(valuesLocal);
  }
  __aicore__ inline void compute(int i) {
    // printf("curDataRange:%d - %d 4cmp %d\n", localOffset, localOffset +
    // count, roundElem);
    auto outLocal = outQueue.AllocTensor<T>();
    auto cmpRes = cmpBuf.Get<uint8_t>();
    auto inputLocal = inputQueue.DeQue<T>();

    Duplicate(outLocal, (T)0, roundElem);
    CompareScalar(cmpRes, inputLocal, (T)0, CMPMODE::LE, roundElem);
    Select(outLocal, cmpRes, outLocal, (T)1, SELMODE::VSEL_TENSOR_SCALAR_MODE, roundElem);
    CompareScalar(cmpRes, inputLocal, (T)0, CMPMODE::EQ, roundElem);
    auto valuesLocal = valuesQueue.DeQue<T>();
    Select(outLocal, cmpRes, valuesLocal, outLocal, SELMODE::VSEL_TENSOR_TENSOR_MODE, roundElem);
    outQueue.EnQue(outLocal);
    inputQueue.FreeTensor(inputLocal);
    valuesQueue.FreeTensor(valuesLocal);
  }
  __aicore__ inline void copyOut(int i) {
    auto outLocal = outQueue.DeQue<T>();
    DataCopy(outputGm[elemPerIter * i], outLocal, count);
    outQueue.FreeTensor(outLocal);
  }

 private:
  TPipe *pipe;
  GlobalTensor<T> inputGm;
  GlobalTensor<T> valuesGm;
  GlobalTensor<T> outputGm;
  TQue<QuePosition::VECIN, BUFFER_NUM> inputQueue;
  TQue<QuePosition::VECIN, BUFFER_NUM> valuesQueue;
  TQue<QuePosition::VECOUT, BUFFER_NUM> outQueue;
  TBuf<QuePosition::VECCALC> cmpBuf;
  int32_t coreDataNum, bufferElem;
  int32_t loop, tailCount, count, iterCount, roundElem;
  int32_t blockElem;
  int32_t elemPerIter;
};

extern "C" __global__ __aicore__ void heaviside(GM_ADDR input, GM_ADDR values,
                                                GM_ADDR out, GM_ADDR workspace,
                                                GM_ADDR tiling) {
  GET_TILING_DATA(tiling_data, tiling);
  // TODO: user kernel impl
  TPipe pipe;
  kernelHeaviside<DTYPE_INPUT> op;
  op.Init(input, values, out, tiling_data.iter,
          tiling_data.coreDataNum, tiling_data.tail, &pipe);
  op.process();
}