#include "kernel_operator.h"
#include "op_common.h"
namespace AscendC {

template <typename _DT_X, typename _DT_Y> class Kernel {
public:
  TPipe pipe;
  int64_t size = 0;
  int64_t tileLength = 0;
  int64_t finalLength = 0;
  DefInTensor(X);
  DefOutTensor(Y);

  DefBufVECCALC(CASTED_X);
  DefBufVECCALC(CASTED_Y);

  DefBufVECCALC(TEMP);
  DefBufVECCALC(TEMP2);

public:
  __aicore__ inline Kernel() {}

  template <typename T>
  __aicore__ inline void Init(GM_ADDR x, GM_ADDR y, T tiling_data) {
    this->size = tiling_data.size;
    this->tileLength = tiling_data.tileLength;
    this->finalLength = tiling_data.finalLength;
    // 切分global
    GSetBuffer(X, x, 0, tiling_data.size);
    GSetBuffer(Y, y, 0, tiling_data.size);
    // 初始化队列
    InitQueueSimple(X, tileLength);
    InitQueueSimple(Y, tileLength);
    // 初始化BUF
    InitTBufBuffer(TEMP, tileLength * sizeof(float));
    InitTBufBuffer(TEMP2, tileLength * sizeof(float));
    if constexpr (std::is_same_v<_DT_X, half>) {
      InitTBufBuffer(CASTED_X, tileLength * sizeof(float));
      InitTBufBuffer(CASTED_Y, tileLength * sizeof(float));
    }
  }

  __aicore__ inline void Process() {
    auto loopCount = size / tileLength;
    for (uint32_t i = 0; i < loopCount; ++i) {
      CopyIn(i, tileLength);
      Compute(tileLength);
      CopyOut(i, tileLength);
    }
    if (finalLength > 0) {
      auto copyCount = alignToBlock<_DT_X>(finalLength);
      CopyIn(loopCount, copyCount);
      Compute(finalLength);
      CopyOut(loopCount, copyCount);
    }
  }

  __aicore__ inline void CopyIn(uint32_t i, uint32_t calcCount) {
    EnQueGlobal2Local(X, i * tileLength, calcCount);
  }

  __aicore__ inline void CopyOut(uint32_t i, uint32_t calcCount) {
    DeQueLocal2Global(Y, i * tileLength, calcCount);
  }

  __aicore__ inline void MyTanhFormulaImpl(const LocalTensor<float> &dstTensor,
                                           const LocalTensor<float> &srcTensor,
                                           const LocalTensor<float> &tmpClip,
                                           uint32_t calcCount) {
    constexpr float FP32_MIN_V2 = -8.8;
    constexpr float FP32_MAX_V2 = 8.8;
    constexpr float DOUBLE_X = 2;
    // Clip x to [FP32_MIN_V2, FP32_MAX_V2] in float
    Mins(tmpClip, srcTensor, FP32_MAX_V2, calcCount);
    Maxs(tmpClip, tmpClip, FP32_MIN_V2, calcCount);
    // 2 * x
    Muls(tmpClip, tmpClip, DOUBLE_X, calcCount);
    // e^(2 * x)
    Exp(tmpClip, tmpClip, calcCount);
    // e^(2 * x) - 1
    Adds(dstTensor, tmpClip, -1.0f, calcCount);
    // e^(2 * x) + 1
    Adds(tmpClip, tmpClip, 1.0f, calcCount);
    Div(dstTensor, dstTensor, tmpClip, calcCount);
  }

  __aicore__ inline void MishFloat(LocalTensor<float> &x, LocalTensor<float> &y,
                                   uint32_t calcCount) {
    TBufGet(TEMP, float);
    TBufGet(TEMP2, float);
    // softplus
    Exp(y, x, calcCount);
    Adds(y, y, float(1), calcCount);
    Ln(y, y, calcCount);

    // auto temp_int8 = BTensor(TEMP).template ReinterpretCast<int8_t>();
    // auto temp_int16 = BTensor(TEMP).template ReinterpretCast<int16_t>();
    // Duplicate(BTensor(TEMP2), float(20), alignToBlock<float>(calcCount));
    // Compare(temp_int8, x, BTensor(TEMP2), CMPMODE::LE, calcCount);
    // Select(y, temp_int8, y, float(0), SELMODE::VSEL_TENSOR_SCALAR_MODE,
    //        calcCount);
    // Not(temp_int16, temp_int16, CEIL_DIV(calcCount, 16));
    // Select(BTensor(TEMP2), temp_int8, x, float(0),
    //        SELMODE::VSEL_TENSOR_SCALAR_MODE, calcCount);
    // Add(y, y, BTensor(TEMP2), calcCount);
    // print_tensor("y", y, 8, "%f");
    // tanh
    MyTanhFormulaImpl(BTensor(TEMP2), y, BTensor(TEMP), calcCount);
    Mul(y, BTensor(TEMP2), x, calcCount);
  }

  __aicore__ inline void Compute(uint32_t calcCount) {
    if constexpr (std::is_same_v<_DT_X, half>) {
      DeQueSimple(X);
      QueAllocSimple(Y);
      TBufGet(TEMP, float);
      TBufGet(TEMP2, float);
      TBufGet(CASTED_X, float);
      TBufGet(CASTED_Y, float);
      Cast(BTensor(CASTED_X), LTensor(X), RoundMode::CAST_NONE, calcCount);
      MishFloat(BTensor(CASTED_X), BTensor(CASTED_Y), calcCount);
      
      auto temp_int8 = BTensor(TEMP).template ReinterpretCast<int8_t>();    
      Duplicate(BTensor(TEMP2), 65504.0f, alignToBlock<float>(calcCount));
      Compare(temp_int8, BTensor(CASTED_Y), BTensor(TEMP2), CMPMODE::LE, calcCount);
      Select(BTensor(CASTED_Y), temp_int8, BTensor(CASTED_Y), infFloat, SELMODE::VSEL_TENSOR_SCALAR_MODE, calcCount);
      Duplicate(BTensor(TEMP2), -65504.0f, alignToBlock<float>(calcCount));
      Compare(temp_int8, BTensor(CASTED_Y), BTensor(TEMP2), CMPMODE::GE, calcCount);
      Select(BTensor(CASTED_Y), temp_int8, BTensor(CASTED_Y), ninfFloat, SELMODE::VSEL_TENSOR_SCALAR_MODE, calcCount);

      Cast(LTensor(Y), BTensor(CASTED_Y), RoundMode::CAST_NONE, calcCount);
      EnQue(Y);
      QueFree(X);
    } else {
      DeQueSimple(X);
      QueAllocSimple(Y);
      MishFloat(LTensor(X), LTensor(Y), calcCount);
      EnQue(Y);
      QueFree(X);
    }
  }
};
} // namespace AscendC
extern "C" __global__ __aicore__ void mish(GM_ADDR x, GM_ADDR y,
                                           GM_ADDR workspace, GM_ADDR tiling) {
  GET_TILING_DATA(tiling_data, tiling);
  AscendC::Kernel<DTYPE_X, DTYPE_Y> op;
  op.Init(x, y, tiling_data);
  op.Process();
}