#pragma once
#include "kernel_operator.h"
#include "op_common.h"
#include <cstdint>
#include <math.h>
namespace AscendC {

template <typename T>
__aicore__ inline void tensorSet(LocalTensor<T> x, T v, uint32_t calcCount) {
  // for (auto i = 0; i < calcCount; ++i) {
  //   x(i) = v;
  // }
  Duplicate(x, v, calcCount);
}

static const float huge = 1.0e+30;
static const float o_threshold = 8.8721679688e+01;
__aicore__ inline void my_expm1f(LocalTensor<float> y, LocalTensor<float> x,
                                 LocalTensor<float> temp1,
                                 LocalTensor<uint8_t> cmpRet1,
                                 LocalTensor<uint8_t> cmpRet2,
                                 uint32_t calcCount) {
  const auto cmpCount = ALIGN_TO(calcCount, elementsPerRepeat<float>());
  const auto bitCount = cmpCount / 16;
  auto temp1_uint8 = temp1.ReinterpretCast<uint8_t>();
  auto temp1_uint16 = temp1.ReinterpretCast<uint16_t>();
  auto cmpRet1_uint16 = cmpRet1.ReinterpretCast<uint16_t>();
  auto cmpRet2_uint16 = cmpRet2.ReinterpretCast<uint16_t>();
  // if (x <= -27 * ln2) {
  //     return -1.0f;
  // }
  tensorSet(temp1, -18.714973875118524f, calcCount);
  Compare(cmpRet1, x, temp1, CMPMODE::GE, cmpCount);
  Select(y, cmpRet1, x, -1.0f, SELMODE::VSEL_TENSOR_SCALAR_MODE, calcCount);
  Not(cmpRet2_uint16, cmpRet1_uint16, bitCount);

  //  if (x >= o_threshold) {
  //     return huge
  // }
  tensorSet(temp1, o_threshold, calcCount);
  Compare(cmpRet1, x, temp1, CMPMODE::LT, cmpCount);
  Or(temp1_uint16, cmpRet2_uint16, cmpRet1_uint16, bitCount);
  Select(y, temp1_uint8, y, huge * huge, SELMODE::VSEL_TENSOR_SCALAR_MODE,
         calcCount);
  Not(temp1_uint16, temp1_uint16, bitCount);
  Or(cmpRet2_uint16, cmpRet2_uint16, temp1_uint16, bitCount);

  // when |x|<2**-25(2.7506350927670065e-34), return x
  Abs(x, y, calcCount);
  tensorSet(temp1, 2.7506350927670065e-34f, calcCount);
  Compare(cmpRet1, x, temp1, CMPMODE::LT, cmpCount);
  Or(cmpRet2_uint16, cmpRet2_uint16, cmpRet1_uint16, bitCount);

  // if (fabsf(x) < 1e-5f) {
  //     return x + 0.5f * x * x;
  // }
  // tensorSet(temp1, 1e-5f, calcCount);
  // Mul(x, y, y, calcCount);
  // Muls(x, x, 0.5f, calcCount);
  // Add(x, x, y, calcCount);
  // Compare(cmpRet1, x, temp1, CMPMODE::GE, cmpCount);
  // Or(temp1_uint16, cmpRet2_uint16, cmpRet1_uint16, bitCount);
  // Select(y, temp1_uint8, y, 0.0f, SELMODE::VSEL_TENSOR_SCALAR_MODE,
  // calcCount); Not(temp1_uint16, temp1_uint16, bitCount); Select(x,
  // temp1_uint8, x, 0.0f, SELMODE::VSEL_TENSOR_SCALAR_MODE, calcCount); Add(y,
  // y, x, calcCount); Or(cmpRet2_uint16, cmpRet2_uint16, temp1_uint16,
  // bitCount);

  Exp(x, y, calcCount);
  Adds(x, x, -1.0f, calcCount);
  Select(y, cmpRet2, y, 0.0f, SELMODE::VSEL_TENSOR_SCALAR_MODE, calcCount);
  Not(temp1_uint16, cmpRet2_uint16, bitCount);
  Select(x, temp1_uint8, x, 0.0f, SELMODE::VSEL_TENSOR_SCALAR_MODE, calcCount);
  Add(y, y, x, calcCount);
}

// typedef union {
//   float value;
//   uint32_t word;
// } ieee_float_shape_type;
// #ifndef GET_FLOAT_WORD
// #define GET_FLOAT_WORD(i, d) \
//   do { \
//     ieee_float_shape_type gf_u; \
//     gf_u.value = (d); \
//     (i) = gf_u.word; \
//   } while (0)
// #endif
static const float one = 1.0, two = 2.0, tiny = 1.0e-30;
//* float __tanhf(float x) {
//*   float t, z;
//*   int32_t jx, ix;
//*   GET_FLOAT_WORD(jx, x);
//*   ix = jx & 0x7fffffff;
//*   /* x is INF or NaN */
//*   if (ix >= 0x7f800000) {
//*     if (jx >= 0)
//*       return one / x + one; /* tanh(+-inf)=+-1 */
//*     else
//*       return one / x - one; /* tanh(NaN) = NaN */
//*   }
//*   /* |x| < 22 */
//*   if (ix < 0x41b00000) { /* |x|<22 */
//*     if (ix == 0)
//*       return x;          /* x == +-0 */
//*     if (ix < 0x24000000) /* |x|<2**-55 */
//*     {
//*       return x * (one + x); /* tanh(small) = small */
//*     }
//*     if (ix >= 0x3f800000) { /* |x|>=1  */
//*       t = __expm1f(two * fabsf(x));
//*       z = one - two / (t + two);
//*     } else {
//*       t = __expm1f(-two * fabsf(x));
//*       z = -t / (t + two);
//*     }
//*     /* |x| > 22, return +-1 */
//*   } else {
//*     z = one - tiny; /* raised inexact flag */
//*   }
//*   return (jx >= 0) ? z : -z;
//* }
__aicore__ inline void
my_tanhf(LocalTensor<float> y, LocalTensor<float> x, LocalTensor<float> temp1,
         LocalTensor<uint8_t> temp2_uint8, LocalTensor<uint8_t> temp3_uint8,
         LocalTensor<float> temp4, LocalTensor<uint8_t> temp5_uint8,
         LocalTensor<uint8_t> temp6_uint8, LocalTensor<float> t0,
         LocalTensor<float> t1, LocalTensor<float> t2, LocalTensor<float> t22,
         LocalTensor<float> tInf, LocalTensor<float> tNinf,
         LocalTensor<float> l2PowerN55, uint32_t calcCount) {
  const auto cmpCount = ALIGN_TO(calcCount, elementsPerRepeat<float>());
  const auto bitCount = cmpCount / 16;
  auto y_int32 = y.ReinterpretCast<int32_t>();
  auto temp1_uint8 = temp1.ReinterpretCast<uint8_t>();
  auto temp1_uint16 = temp1.ReinterpretCast<uint16_t>();
  auto temp1_int32 = temp1.ReinterpretCast<int32_t>();
  auto temp2_uint16 = temp2_uint8.ReinterpretCast<uint16_t>();
  auto temp3_uint16 = temp3_uint8.ReinterpretCast<uint16_t>();
  auto temp4_uint8 = temp4.ReinterpretCast<uint8_t>();

  print_tensor("[0]x", x, 8, "%f");
  // 判断NaN NaN == Nan = false
  // x  =    [2, inf, -inf, nan, 0, 23, 2**-56, -25, 0.5] == x?
  // temp2 = [0, 0, 0, 1, 0, 0, 0, 0. 0]
  Compare(temp2_uint8, x, x, CMPMODE::EQ, cmpCount);
  print_tensor("[1]temp2_uint8 != nan", temp2_uint8, 1, "%x");
  Not(temp2_uint16, temp2_uint16, bitCount);
  print_tensor("[2]temp2_uint8 != nan", temp2_uint8, 1, "%x");
  print_tensor("[3]x nan", x, 8, "%f");
  // 判断正无穷 y = isinf? 1 : x
  // x =     [2, inf, -inf, nan, 0, 23, 2**-56, -25, 0.5] != inf ?
  // temp1 = [1, 0, 1, 0, 1, 1, 1, 1, 1]
  // temp3 = [0, 0, 0, 1, 0, 0, 0, 0, 0] or
  // temp4 = [1, 0, 1, 1, 1, 1, 1, 1, 1]
  // x =     [2, inf, -inf, nan, 0, 23, 2**-56, -25, 0.5]
  // y =     [2, 1.0, -inf, nan, 0, 23, 2**-56, -25, 0.5]
  // temp1 = [1, 0, 1, 0, 1, 1, 1, 1, 1] not
  // temp2 = [0, 1, 0, 1, 0, 0, 0, 0. 0]
  Compare(temp1_uint8, x, tInf, CMPMODE::NE, cmpCount);
  print_tensor("[4]temp1_uint8 != inf", temp1_uint8, 1, "%x");
  Or(temp3_uint16, temp1_uint16, temp2_uint16, bitCount);
  print_tensor("[5]temp3_uint8 or temp1_uint8", temp3_uint8, 1, "%x");
  Select(y, temp3_uint8, x, 1.0f, SELMODE::VSEL_TENSOR_SCALAR_MODE, calcCount);
  print_tensor("[6]y", y, 8, "%f");
  Not(temp2_uint16, temp1_uint16, bitCount);
  print_tensor("[7]temp2_uint8 inf", temp2_uint8, 1, "%x");
  // 判断负无穷 y = isninf? -1 : x, 排除ninf,
  // y =     [2, 1.0, -inf, nan, 0, 23, 2**-56, -25, 0.5] != ninf ?
  // temp1 = [1, 1, 0, 0, 1, 1, 1, 1, 1]
  // temp2 = [0, 1, 0, 1, 0, 0, 0, 0, 0] or
  // temp3 = [1, 1, 0, 1, 1, 1, 1, 1, 1]
  // y =     [2, 1.0, -1.0, nan, 0, 23, 2**-56, -25, 0.5]
  // temp1 = [1, 1, 0, 0, 1, 1, 1, 1, 1] not
  // temp1 = [0, 0, 1, 1, 0, 0, 0, 0, 0] or
  // temp2 = [0, 1, 0, 1, 0, 0, 0, 0, 0]
  // temp2 = [0, 1, 1, 1, 0, 0, 0, 0, 0]
  Compare(temp1_uint8, y, tNinf, CMPMODE::NE, cmpCount);
  print_tensor("[8]temp1_uint8 != tNinf", temp1_uint8, 1, "%x");
  Or(temp3_uint16, temp1_uint16, temp2_uint16, bitCount);
  print_tensor("[9]temp3_uint8 or temp1_uint8", temp3_uint8, 1, "%x");
  Select(y, temp3_uint8, y, -1.0f, SELMODE::VSEL_TENSOR_SCALAR_MODE, calcCount);
  Not(temp3_uint16, temp3_uint16, bitCount);
  print_tensor("[10]not temp3_uint8", temp3_uint8, 1, "%x");
  Or(temp2_uint16, temp2_uint16, temp3_uint16, bitCount);
  print_tensor("[11]temp2_uint8 ninf", temp2_uint8, 1, "%x");
  print_tensor("[12]y", y, 8, "%f");
  // 判断 |x| >= 22 ?  (x > 0? one - tiny : - (one - tiny)) : x
  // y =     [2, 1.0, -1.0, nan, 0, 23, 2**-56, -25, 0.5] < 22.0f
  // temp1 = [1, 1, 1, 0, 1, 0, 1, 0, 1] or
  // temp2 = [0, 1, 1, 1, 0, 0, 0, 0, 0]
  // temp3 = [1, 1, 1, 1, 1, 0, 1, 0, 1]
  // 判断 x > 0?
  // y =     [2, 1.0, -1.0, nan, 0, 23, 2**-56, -25, 0.5] > 0 ?
  // temp1 = [1, 1, 0, 0, 0, 1, 1, 0, 1] or
  // temp3 = [1, 1, 1, 1, 1, 0, 1, 0, 1]
  // temp1 = [1, 1, 1, 1, 1, 1, 1, 0, 1]
  // y =     [2, 1.0, -1.0, nan, 0, 23, 2**-56, -(one-tiny), 0.5]
  // 判断 x < 0?
  // y =     [2, 1.0, -1.0, nan, 0, 23, 2**-56, -(one-tiny), 0.5] < 0 ?
  // temp1 = [0, 0, 1, 0, 0, 0, 0, 1, 0] or
  // temp3 = [1, 1, 1, 1, 1, 0, 1, 0, 1]
  // temp1 = [1, 1, 1, 1, 1, 0, 1, 1, 1]
  // y =     [2, 1.0, -1.0, nan, 0, one-tiny, 2**-56, -(one-tiny), 0.5]
  // temp3 = [1, 1, 1, 1, 1, 0, 1, 0, 1] not
  // temp3 = [0, 0, 0, 0, 0, 1, 0, 1, 0] or
  // temp2 = [0, 1, 1, 1, 0, 0, 0, 0, 0]
  // temp2 = [0, 1, 1, 1, 0, 1, 0, 1, 0]
  // |x| < 22
  Abs(x, y, calcCount);
  print_tensor("[13]|x|", x, 8, "%f");
  Compare(temp1_uint8, x, t22, CMPMODE::LT, cmpCount);
  print_tensor("[14]temp1_uint8  |x| < 22", temp1_uint8, 1, "%x");
  Or(temp3_uint16, temp2_uint16, temp1_uint16, bitCount);
  print_tensor("[15]temp3_uint8 or temp1_uint8", temp3_uint8, 1, "%x");
  // x > 0
  Compare(temp1_uint8, y, t0, CMPMODE::GT, cmpCount);
  print_tensor("[16]temp1_uint8  y > 0", temp1_uint8, 1, "%x");
  Or(temp1_uint16, temp3_uint16, temp1_uint16, bitCount);
  print_tensor("[17]temp1_uint8 or temp3_uint8", temp1_uint8, 1, "%x");
  Select(y, temp1_uint8, y, -(one - tiny), SELMODE::VSEL_TENSOR_SCALAR_MODE,
         calcCount);
  print_tensor("[18]y", y, 8, "%f");
  // x < 0
  Compare(temp1_uint8, y, t0, CMPMODE::LT, cmpCount);
  print_tensor("[19]temp1_uint8  y < 0", temp1_uint8, 1, "%x");
  Or(temp1_uint16, temp3_uint16, temp1_uint16, bitCount);
  print_tensor("[20]temp1_uint8 or temp3_uint8", temp1_uint8, 1, "%x");
  Select(y, temp1_uint8, y, (one - tiny), SELMODE::VSEL_TENSOR_SCALAR_MODE,
         calcCount);
  Not(temp3_uint16, temp3_uint16, bitCount);
  Or(temp2_uint16, temp2_uint16, temp3_uint16, bitCount);
  print_tensor("[21]temp2_uint8 去除>22", temp2_uint8, 1, "%x");
  print_tensor("[22]y", y, 8, "%f");
  // 判断 |x| <= 2**-55 值不变
  // y =     [2, 1.0, -1.0, nan, 0, one-tiny, 2**-56, -(one-tiny), 0.5]
  // |x| =   [2, 1.0, 1.0, nan, 0, one-tiny, 2**-56, one-tiny, 0.5] <= 2**-55
  // temp1 = [0, 0, 0, 0, 1, 0, 1, 0, 0] or
  // temp2 = [0, 1, 1, 1, 0, 1, 0, 1, 0]
  // temp2 = [0, 1, 1, 1, 1, 1, 1, 1, 0]
  Abs(x, y, calcCount);
  print_tensor("[23]|x|", x, 8, "%f");
  Compare(temp1_uint8, x, l2PowerN55, CMPMODE::LE, cmpCount);
  print_tensor("[24]temp1_uint8  |x| <= 2**-55", temp1_uint8, 1, "%x");
  Or(temp2_uint16, temp2_uint16, temp1_uint16, cmpCount / 16);
  print_tensor("[25]temp2_uint8 去除|x| <= 2**-55", temp2_uint8, 1, "%x");
  print_tensor("[26]y", y, 8, "%f");
  // 判断 |x| >= 1
  // y =     [2, 1.0, -1.0, nan, 0, one-tiny, 2**-56, -(one-tiny), 0.5]
  // |x| =   [2, 1.0, 1.0, nan, 0, one-tiny, 2**-56, one-tiny, 0.5] >= 1
  // temp1 = [1, 0, 0, 0, 0, 0, 0, 0, 0] or
  // temp2 = [0, 1, 1, 1, 1, 1, 1, 1, 0]
  // temp2 = [1, 1, 1, 1, 1, 1, 1, 1, 0]
  // y =     [2, 1.0, -1.0, nan, 0, one-tiny, 2**-56, -(one-tiny), t =
  // __expm1f(-two * fabsf(x));z = -t / (t + two);]
  // temp3 = [1, 1, 1, 1, 1, 1, 1, 1, 0] not
  // temp3 = [0, 0, 0, 0, 0, 0, 0, 0, 1] or
  // temp2 = [0, 1, 1, 1, 1, 1, 1, 1, 0]
  // temp2 = [0, 1, 1, 1, 1, 1, 1, 1, 1]
  Compare(temp1_uint8, x, t1, CMPMODE::GE, cmpCount);
  print_tensor("[27]temp1_uint8  x >= 1", temp1_uint8, 1, "%x");
  Or(temp3_uint16, temp1_uint16, temp2_uint16, bitCount);
  print_tensor("[28]temp3_uint8 or temp1_uint8", temp3_uint8, 1, "%x");

  // temp1 = exp(-2|x|) - 1
  Mul(x, x, t2, calcCount);
  Sub(x, t0, x, calcCount);
  Exp(x, x, calcCount);
  Sub(temp1, x, t1, calcCount);
  // my_expm1f(temp1, x, temp4, temp5_uint8, temp6_uint8, calcCount);
  // y = -t / (t+2)
  Add(x, temp1, t2, calcCount);
  Div(x, temp1, x, calcCount);
  Sub(x, t0, x, calcCount);

  // x >= 0
  Compare(temp1_uint8, y, t0, CMPMODE::GE, cmpCount);
  Or(temp1_uint16, temp1_uint16, temp3_uint16, bitCount);
  print_tensor("[29]x tanh", x, 8, "%f");
  print_tensor("[30]temp1_uint8", temp1_uint8, 1, "%x");
  print_tensor("[31]y", y, 8, "%f");
  Select(y, temp1_uint8, y, 0.0f, SELMODE::VSEL_TENSOR_SCALAR_MODE, calcCount);
  print_tensor("[32]y", y, 8, "%f");
  Not(temp1_uint16, temp1_uint16, bitCount);
  print_tensor("[32_0]temp1_uint8", temp1_uint8, 1, "%x");
  Sub(temp4, t0, x, calcCount);
  Select(temp4, temp1_uint8, temp4, 0.0f, SELMODE::VSEL_TENSOR_SCALAR_MODE,
         calcCount);
  print_tensor("[32_1]temp4", temp4, 8, "%f");
  Add(y, y, temp4, calcCount);
  print_tensor("[32_2]y", y, 8, "%f");
  // x < 0
  Compare(temp1_uint8, y, t0, CMPMODE::LT, cmpCount);
  Or(temp1_uint16, temp1_uint16, temp3_uint16, bitCount);
  print_tensor("[33]x tanh", x, 8, "%f");
  print_tensor("[34]temp1_uint8", temp1_uint8, 1, "%x");
  Select(y, temp1_uint8, y, 0.0f, SELMODE::VSEL_TENSOR_SCALAR_MODE, calcCount);
  print_tensor("[35]y", y, 8, "%f");
  Not(temp1_uint16, temp1_uint16, bitCount);
  print_tensor("[36]temp1_uint8", temp1_uint8, 1, "%x");
  Select(x, temp1_uint8, x, 0.0f, SELMODE::VSEL_TENSOR_SCALAR_MODE, calcCount);
  print_tensor("[37]x", x, 8, "%f");
  Add(y, y, x, calcCount);

  Not(temp3_uint16, temp3_uint16, bitCount);
  Or(temp2_uint16, temp2_uint16, temp3_uint16, bitCount);
  print_tensor("[38]temp2_uint8 去除|x| < 1", temp2_uint8, 1, "%x");
  print_tensor("[39]y", y, 8, "%f");

  // temp1 = exp(2|x|) - 1
  Abs(x, y, calcCount);
  Mul(x, x, t2, calcCount);
  Exp(x, x, calcCount);
  Sub(temp1, x, t1, calcCount);
  // my_expm1f(temp1, x, temp4, temp5_uint8, temp6_uint8, calcCount);
  // y = 1 - 2 / (t + 2)
  Add(x, temp1, t2, calcCount);
  Div(x, t2, x, calcCount);
  Sub(x, t1, x, calcCount);
  // x > 0
  Compare(temp1_uint8, y, t0, CMPMODE::GE, cmpCount);
  Or(temp1_uint16, temp1_uint16, temp2_uint16, bitCount);
  print_tensor("[40]]x tanh", x, 8, "%f");
  print_tensor("[41]temp1_uint8", temp1_uint8, 1, "%x");
  print_tensor("[42]y", y, 8, "%f");
  Select(y, temp1_uint8, y, 0.0f, SELMODE::VSEL_TENSOR_SCALAR_MODE, calcCount);
  print_tensor("[43]y", y, 8, "%f");
  Not(temp1_uint16, temp1_uint16, bitCount);
  print_tensor("[44]temp1_uint8", temp1_uint8, 1, "%x");
  Sub(temp4, t0, x, calcCount);
  Select(temp4, temp1_uint8, temp4, 0.0f, SELMODE::VSEL_TENSOR_SCALAR_MODE,
         calcCount);
  print_tensor("[45]temp4", temp4, 8, "%f");
  Add(y, y, temp4, calcCount);
  print_tensor("[46]y", y, 8, "%f");

  // x < 0
  Compare(temp1_uint8, y, t0, CMPMODE::LT, cmpCount);
  Or(temp1_uint16, temp1_uint16, temp2_uint16, bitCount);
  print_tensor("[47]x tanh", x, 8, "%f");
  print_tensor("[48]temp1_uint8", temp1_uint8, 1, "%x");
  Select(y, temp1_uint8, y, 0.0f, SELMODE::VSEL_TENSOR_SCALAR_MODE, calcCount);
  print_tensor("[49]y", y, 8, "%f");
  Not(temp1_uint16, temp1_uint16, bitCount);
  print_tensor("[50]temp1_uint8", temp1_uint8, 1, "%x");
  Select(x, temp1_uint8, x, 0.0f, SELMODE::VSEL_TENSOR_SCALAR_MODE, calcCount);
  print_tensor("[51]x", x, 8, "%f");
  Add(y, y, x, calcCount);
}

__aicore__ inline void my_tanhf2(const LocalTensor<float> &dstTensor,
                                 const LocalTensor<float> &srcTensor,
                                 const LocalTensor<float> &tmpClip,
                                 uint32_t calcCount) {
  // Clip x to [FP32_MIN_V2, FP32_MAX_V2] in float
  // Mins(tmpClip, srcTensor, FP32_MAX_V2, calcCount);
  // Maxs(tmpClip, tmpClip, FP32_MIN_V2, calcCount);
  // // 2 * x
  // Muls(tmpClip, tmpClip, DOUBLE_X, calcCount);
  // // e^(2 * x)
  // Exp(tmpClip, tmpClip, calcCount);
  // // e^(2 * x) - 1
  // Adds(dstTensor, tmpClip, -1.0f, calcCount);
  // // e^(2 * x) + 1
  // Adds(tmpClip, tmpClip, 1.0f, calcCount);
  // Div(dstTensor, dstTensor, tmpClip, calcCount);
}

template <typename T>
__aicore__ inline void myGeluCalcTanhParams(const LocalTensor<T> &tempTensorA,
                                            const LocalTensor<T> &tempTensorB,
                                            const LocalTensor<T> &srcLocal,
                                            uint32_t calcCount) {
  const T coefficientsA = 0.044715;
  const T coefficientsB = 1.5957691216057308;
  // 1.5957691216057308 *( x + 0.044715*x^3)
  Mul(tempTensorA, srcLocal, srcLocal, calcCount);
  Mul(tempTensorB, srcLocal, tempTensorA, calcCount);
  Muls(tempTensorA, tempTensorB, coefficientsA, calcCount);
  Add(tempTensorB, srcLocal, tempTensorA, calcCount);
  Muls(tempTensorA, tempTensorB, coefficientsB, calcCount);
}

template <typename T>
__aicore__ inline void
myGeluCalcYGreaterThanZero(const LocalTensor<T> &tempTensorA,
                           const LocalTensor<T> &tempTensorB,
                           uint32_t calcCount) {
  // exp(min(y, 0)) to avoid overflow, keep exp negative
  Mins(tempTensorB, tempTensorA, T(0), calcCount);
  Exp(tempTensorB, tempTensorB, calcCount);
}

template <typename T>
__aicore__ inline void
myGeluCalcYLessThanZero(const LocalTensor<T> &tempTensorA,
                        const LocalTensor<T> &tempTensorB,
                        const LocalTensor<T> &srcLocal, uint32_t calcCount) {
  // x / (exp^(-abs(y)) + 1)
  Abs(tempTensorA, tempTensorA, calcCount);
  Muls(tempTensorA, tempTensorA, T(-1), calcCount);
  Exp(tempTensorA, tempTensorA, calcCount);
  Adds(tempTensorA, tempTensorA, T(1), calcCount);
  Div(tempTensorA, srcLocal, tempTensorA, calcCount);
}

template <typename T>
__aicore__ inline void
myGelu(const LocalTensor<T> &dstLocal, const LocalTensor<T> &srcLocal,
       const LocalTensor<T> &tempTensorA, const LocalTensor<T> &tempTensorB,
       uint32_t calcCount) {
  // y = (input_x + 0.044715 * input_x ^ 3) * 1.5957691
  // x / (exp^(-abs(y)) + 1) * exp(min(y, 0))

  // tempTensorA = 1.5957691216057308 *( x + 0.044715*x^3)
  myGeluCalcTanhParams(tempTensorA, tempTensorB, srcLocal, calcCount);

  // exp(min(y, 0)) to avoid overflow, keep exp negative
  // tempTensorB = exp(min(tempTensorA, 0))
  myGeluCalcYGreaterThanZero(tempTensorA, tempTensorB, calcCount);

  // tempTensorA = x / (exp^(-abs(y)) + 1)
  myGeluCalcYLessThanZero(tempTensorA, tempTensorB, srcLocal, calcCount);

  // x / (exp^(-abs(y)) + 1) * exp(min(y, 0))
  // dstLocal = tempTensorA * tempTensorB
  Mul(dstLocal, tempTensorA, tempTensorB, calcCount);
}

constexpr float geglu_a = -0.0713548162726;
constexpr float geglu_b = 2.2363860002236e1;
template <typename T>
__aicore__ inline void my_geglu(LocalTensor<T> y, LocalTensor<T> x0,
                                LocalTensor<T> x1, uint32_t calcCount) {
  // y = x1^2
  Mul(y, x1, x1, calcCount);
  // y = y + b
  Adds(y, y, T(geglu_b), calcCount);
  // y = y * x1
  Mul(y, y, x1, calcCount);
  // y = y * a
  Muls(y, y, T(geglu_a), calcCount);
  // y = e^y
  Exp(y, y, calcCount);
  // y = y + 1
  Adds(y, y, T(1), calcCount);
  // y = x1 / y
  Div(y, x1, y, calcCount);
  // y = x0 * y
  Mul(y, y, x0, calcCount);
}

//  a ^ x = e ^ (x * lna)
template <typename T>
__aicore__ void my_pow(LocalTensor<T> dst, LocalTensor<T> a, T x,
                       uint32_t calcCount) {
  Ln(dst, a, calcCount);
  Muls(dst, dst, x, calcCount);
  Exp(dst, dst, calcCount);
}

// dst = a ^ x
template <typename T>
__aicore__ void my_pow_int(LocalTensor<T> dst, LocalTensor<T> a, int x,
                           uint32_t calcCount) {
  Mul(dst, a, a, calcCount);
  for (auto i = 0; i < x - 2; ++i) {
    Mul(dst, dst, a, calcCount);
  }
}

// dst = 1 / x
template <typename T>
__aicore__ void my_reciprocal(LocalTensor<T> dst, LocalTensor<T> x,
                              uint32_t calcCount) {
  Muls(dst, dst, T(0), calcCount);
  Adds(dst, dst, T(1), calcCount);
  Div(dst, dst, x, calcCount);
}

static const float halfFloat = 5.0000000000e-01, /* 0x3F000000 */
    /* c = (subfloat)0.84506291151 */
    erx = 8.4506291151e-01, /* 0x3f58560b */
    /*
     * Coefficients for approximation to  erf on [0,0.84375]
     */
    efx = 1.2837916613e-01,  /* 0x3e0375d4 */
    pp0 = 1.2837916613e-01,  /* 0x3e0375d4 */
    pp1 = -3.2504209876e-01, /* 0xbea66beb */
    pp2 = -2.8481749818e-02, /* 0xbce9528f */
    pp3 = -5.7702702470e-03, /* 0xbbbd1489 */
    pp4 = -2.3763017452e-05, /* 0xb7c756b1 */
    qq1 = 3.9791721106e-01,  /* 0x3ecbbbce */
    qq2 = 6.5022252500e-02,  /* 0x3d852a63 */
    qq3 = 5.0813062117e-03,  /* 0x3ba68116 */
    qq4 = 1.3249473704e-04,  /* 0x390aee49 */
    qq5 = -3.9602282413e-06, /* 0xb684e21a */
    /*
     * Coefficients for approximation to  erf  in [0.84375,1.25]
     */
    pa0 = -2.3621185683e-03, /* 0xbb1acdc6 */
    pa1 = 4.1485610604e-01,  /* 0x3ed46805 */
    pa2 = -3.7220788002e-01, /* 0xbebe9208 */
    pa3 = 3.1834661961e-01,  /* 0x3ea2fe54 */
    pa4 = -1.1089469492e-01, /* 0xbde31cc2 */
    pa5 = 3.5478305072e-02,  /* 0x3d1151b3 */
    pa6 = -2.1663755178e-03, /* 0xbb0df9c0 */
    qa1 = 1.0642088205e-01,  /* 0x3dd9f331 */
    qa2 = 5.4039794207e-01,  /* 0x3f0a5785 */
    qa3 = 7.1828655899e-02,  /* 0x3d931ae7 */
    qa4 = 1.2617121637e-01,  /* 0x3e013307 */
    qa5 = 1.3637083583e-02,  /* 0x3c5f6e13 */
    qa6 = 1.1984500103e-02,  /* 0x3c445aa3 */
    /*
     * Coefficients for approximation to  erfc in [1.25,1/0.35]
     */
    ra0 = -9.8649440333e-03, /* 0xbc21a093 */
    ra1 = -6.9385856390e-01, /* 0xbf31a0b7 */
    ra2 = -1.0558626175e+01, /* 0xc128f022 */
    ra3 = -6.2375331879e+01, /* 0xc2798057 */
    ra4 = -1.6239666748e+02, /* 0xc322658c */
    ra5 = -1.8460508728e+02, /* 0xc3389ae7 */
    ra6 = -8.1287437439e+01, /* 0xc2a2932b */
    ra7 = -9.8143291473e+00, /* 0xc11d077e */
    sa1 = 1.9651271820e+01,  /* 0x419d35ce */
    sa2 = 1.3765776062e+02,  /* 0x4309a863 */
    sa3 = 4.3456588745e+02,  /* 0x43d9486f */
    sa4 = 6.4538726807e+02,  /* 0x442158c9 */
    sa5 = 4.2900814819e+02,  /* 0x43d6810b */
    sa6 = 1.0863500214e+02,  /* 0x42d9451f */
    sa7 = 6.5702495575e+00,  /* 0x40d23f7c */
    sa8 = -6.0424413532e-02, /* 0xbd777f97 */
    /*
     * Coefficients for approximation to  erfc in [1/.35,28]
     */
    rb0 = -9.8649431020e-03, /* 0xbc21a092 */
    rb1 = -7.9928326607e-01, /* 0xbf4c9dd4 */
    rb2 = -1.7757955551e+01, /* 0xc18e104b */
    rb3 = -1.6063638306e+02, /* 0xc320a2ea */
    rb4 = -6.3756646729e+02, /* 0xc41f6441 */
    rb5 = -1.0250950928e+03, /* 0xc480230b */
    rb6 = -4.8351919556e+02, /* 0xc3f1c275 */
    sb1 = 3.0338060379e+01,  /* 0x41f2b459 */
    sb2 = 3.2579251099e+02,  /* 0x43a2e571 */
    sb3 = 1.5367296143e+03,  /* 0x44c01759 */
    sb4 = 3.1998581543e+03,  /* 0x4547fdbb */
    sb5 = 2.5530502930e+03,  /* 0x451f90ce */
    sb6 = 4.7452853394e+02,  /* 0x43ed43a7 */
    sb7 = -2.2440952301e+01; /* 0xc1b38712 */

#define SIMPLE_IF_NOT(x, cmpMode, condition, cmp, cmp16, cmp2_16, cmpSum16,    \
                      cmpCount, bitCount)                                      \
  Compare(cmp, x, condition, cmpMode, cmpCount);                               \
  Or(cmp2_16, cmp16, cmpSum16, bitCount);

#define SIMPLE_SELECT_TENSOR(y, y_from, value, value_tmp, cmp2, cmp2_16,       \
                             bitCount, calcCount)                              \
  Select(y, cmp2, y_from, 0.0f, SELMODE::VSEL_TENSOR_SCALAR_MODE, calcCount);  \
  Not(cmp2_16, cmp2_16, bitCount);                                             \
  Select(value_tmp, cmp2, value, 0.0f, SELMODE::VSEL_TENSOR_SCALAR_MODE,       \
         calcCount);                                                           \
  Add(y, y, value_tmp, calcCount);

#define SIMPLE_SAVE_CMP_STATE(cmpSum16, cmp2_16, bitCount)                     \
  Or(cmpSum16, cmpSum16, cmp2_16, bitCount);

#define FIX_IF_NOT(x, cmpMode, condition)                                      \
  SIMPLE_IF_NOT(x, cmpMode, condition, cmp, cmp16, cmp2_16, cmpSum16,          \
                cmpCount, bitCount)
#define FIX_IF_NOT_CHANGE_SUM(x, cmpMode, condition, cmpSum16)                 \
  SIMPLE_IF_NOT(x, cmpMode, condition, cmp, cmp16, cmp2_16, cmpSum16,          \
                cmpCount, bitCount)
#define FIX_IF_NOT_CHANGE_TEMP(x, cmpMode, condition, cmp2_16)                 \
  SIMPLE_IF_NOT(x, cmpMode, condition, cmp, cmp16, cmp2_16, cmpSum16,          \
                cmpCount, bitCount)
#define FIX_IF_NOT_CHANGE_TEMP_AND_SUM(x, cmpMode, condition, cmp2_16,         \
                                       cmpSum16)                               \
  SIMPLE_IF_NOT(x, cmpMode, condition, cmp, cmp16, cmp2_16, cmpSum16,          \
                cmpCount, bitCount)
#define FIX_SELECT_TENSOR(y, y_from, value, value_tmp)                         \
  SIMPLE_SELECT_TENSOR(y, y_from, value, value_tmp, cmp2, cmp2_16, bitCount,   \
                       calcCount)
#define FIX_SELECT_TENSOR_CHANGE_TEMP(y, y_from, value, value_tmp, cmp2,       \
                                      cmp2_16)                                 \
  SIMPLE_SELECT_TENSOR(y, y_from, value, value_tmp, cmp2, cmp2_16, bitCount,   \
                       calcCount)

#define FIX_SAVE_CMP_STATE(to, from) SIMPLE_SAVE_CMP_STATE(to, from, bitCount)

#define SIMPLE_IF_NOT_EQUAL_TENSOR(y, y_from, x, cmpMode, condition, value,    \
                                   cmp, cmp16, cmpSum16, cmp2, cmp2_16,        \
                                   cmpCount, calcCount, bitCount)              \
  Compare(cmp, x, condition, cmpMode, cmpCount);                               \
  Or(cmp2_16, cmp16, cmpSum16, bitCount);                                      \
  Select(y, cmp2, y_from, 0.0f, SELMODE::VSEL_TENSOR_SCALAR_MODE, calcCount);  \
  Not(cmp2_16, cmp2_16, bitCount);                                             \
  Select(value, cmp2, value, 0.0f, SELMODE::VSEL_TENSOR_SCALAR_MODE,           \
         calcCount);                                                           \
  Add(y, y, value, calcCount);                                                 \
  Or(cmpSum16, cmpSum16, cmp2_16, bitCount);

#define SIMPLE_IF_NOT_EQUAL_SCALAR(y, y_from, x, cmpMode, condition, value,    \
                                   cmp, cmp16, cmpSum16, cmp2, cmp2_16,        \
                                   cmpCount, calcCount, bitCount)              \
  Compare(cmp, x, condition, cmpMode, cmpCount);                               \
  Or(cmp2_16, cmp16, cmpSum16, bitCount);                                      \
  Select(y, cmp2, y_from, value, SELMODE::VSEL_TENSOR_SCALAR_MODE, calcCount); \
  Not(cmp2_16, cmp2_16, bitCount);                                             \
  Or(cmpSum16, cmpSum16, cmp2_16, bitCount);

#define SIMPLE_IF_NO_CHANGE(y, x, cmpMode, condition, value, cmp, cmp16,       \
                            cmpSum16, cmp2, cmp2_16, cmpCount, calcCount,      \
                            bitCount)                                          \
  Compare(cmp, x, condition, cmpMode, cmpCount);                               \
  Or(cmpSum16, cmpSum16, cmp16, bitCount);

#define FIXED_IFNOT_EQUAL_TENSOR(y, y_from, x, cmpMode, condition, value)      \
  SIMPLE_IF_NOT_EQUAL_SCALAR(y, y_from, x, cmpMode, condition, value, cmp,     \
                             cmp16, cmpSum16, cmp2, cmp2_16, cmpCount,         \
                             calcCount, bitCount)

#define FIXED_IFNOT_EQUAL_SCALAR(y, y_from, x, cmpMode, condition, value)      \
  SIMPLE_IF_NOT_EQUAL_SCALAR(y, y_from, x, cmpMode, condition, value, cmp,     \
                             cmp16, cmpSum16, cmp2, cmp2_16, cmpCount,         \
                             calcCount, bitCount)
#define FIXED_IF_NO_CHANGE(y, x, cmpMode, condition, value)                    \
  SIMPLE_IF_NO_CHANGE(y, x, cmpMode, condition, value, cmp, cmp16, cmpSum16,   \
                      cmp2, cmp2_16, cmpCount, calcCount, bitCount)

#define PRINT_STATE(desc)                                                      \
  print("----------%s---------\n", desc);                                      \
  print_tensor("cmp", cmp, 1, "%x");                                           \
  print_tensor("cmp2", cmp2, 1, "%x");                                         \
  print_tensor("cmp3", cmp3, 1, "%x");                                         \
  print_tensor("cmpSum", cmpSum, 1, "%x");                                     \
  print_tensor("y", y, 8, "%f");                                               \
  print("-----------------------\n");

#define FIXED_NOT
__aicore__ inline void
my_erf(LocalTensor<float> y, LocalTensor<float> x, LocalTensor<uint8_t> cmpSum,
       LocalTensor<uint8_t> cmp, LocalTensor<uint8_t> cmp2,
       LocalTensor<uint8_t> cmp3, LocalTensor<float> temp1,
       LocalTensor<float> temp2, LocalTensor<float> temp3,
       LocalTensor<float> temp4, LocalTensor<float> temp5, uint32_t calcCount) {
  const auto cmpCount = ALIGN_TO(calcCount, elementsPerRepeat<float>());
  const auto bitCount = cmpCount / 16;
  auto temp1_uint8 = temp1.ReinterpretCast<uint8_t>();
  auto temp1_uint16 = temp1.ReinterpretCast<uint16_t>();
  auto temp1_int32 = temp1.ReinterpretCast<int32_t>();
  auto temp2_uint8 = temp2.ReinterpretCast<uint8_t>();
  auto temp2_uint16 = temp2.ReinterpretCast<uint16_t>();
  auto temp2_int32 = temp2.ReinterpretCast<int32_t>();
  auto cmpSum16 = cmpSum.ReinterpretCast<uint16_t>();
  auto cmp16 = cmp.ReinterpretCast<uint16_t>();
  auto cmp2_16 = cmp2.ReinterpretCast<uint16_t>();
  auto cmp3_16 = cmp3.ReinterpretCast<uint16_t>();

  // 判断NaN NaN == Nan = false
  Compare(cmpSum, x, x, CMPMODE::EQ, cmpCount);
  Not(cmpSum16, cmpSum16, bitCount);

  /* erf(+-inf)=+-1 */
  // 判断 inf
  tensorSet(temp2, infFloat, calcCount);
  FIXED_IFNOT_EQUAL_SCALAR(y, x, x, CMPMODE::NE, temp2, 1.0f);

  // 判断-inf
  tensorSet(temp2, ninfFloat, calcCount);
  FIXED_IFNOT_EQUAL_SCALAR(y, y, x, CMPMODE::NE, temp2, -1.0f);

  // 判断 |x| < 1.5046328e-36 (0x04000000)
  Abs(x, y, calcCount);
  tensorSet(temp2, 1.5046328e-36f, calcCount);
  FIX_IF_NOT(x, CMPMODE::GE, temp2);
  // y = 0.0625f * (16.0f * x + (16.0f * efx) * x);
  Muls(temp1, y, 16.0f * efx, calcCount);
  Muls(temp2, y, 16.0f, calcCount);
  Add(temp1, temp1, temp2, calcCount);
  Muls(temp1, y, 0.0625f, calcCount);
  FIX_SELECT_TENSOR(y, y, temp1, temp4);
  FIX_SAVE_CMP_STATE(cmpSum16, cmp2_16);

  /*判断 |x|<2**-28 */
  tensorSet(temp2, 3.725290298461914e-09f, calcCount);
  // y = x + efx * x;
  Muls(temp1, y, efx, calcCount);
  Add(temp1, temp1, y, calcCount);
  FIXED_IFNOT_EQUAL_TENSOR(y, y, x, CMPMODE::GE, temp2, temp1);
  FIX_SAVE_CMP_STATE(cmpSum16, cmp2_16);

  /* |x|<0.84375 */
  tensorSet(temp2, 0.84375f, calcCount);
  FIX_IF_NOT(x, CMPMODE::GE, temp2);
  // z = x * x;
  Mul(x, y, y, calcCount);
  // r = pp0 + z * (pp1 + z * (pp2 + z * (pp3 + z * pp4)));
  Muls(temp1, x, pp4, calcCount);
  Adds(temp1, temp1, pp3, calcCount);
  Mul(temp1, temp1, x, calcCount);
  Adds(temp1, temp1, pp2, calcCount);
  Mul(temp1, temp1, x, calcCount);
  Adds(temp1, temp1, pp1, calcCount);
  Mul(temp1, temp1, x, calcCount);
  Adds(temp1, temp1, pp0, calcCount);
  // s = one + z * (qq1 + z * (qq2 + z * (qq3 + z * (qq4 + z * qq5))));
  Muls(temp2, x, qq5, calcCount);
  Adds(temp2, temp2, qq4, calcCount);
  Mul(temp2, temp2, x, calcCount);
  Adds(temp2, temp2, qq3, calcCount);
  Mul(temp2, temp2, x, calcCount);
  Adds(temp2, temp2, qq2, calcCount);
  Mul(temp2, temp2, x, calcCount);
  Adds(temp2, temp2, qq1, calcCount);
  Mul(temp2, temp2, x, calcCount);
  Adds(temp2, temp2, one, calcCount);
  // y = r / s;
  Div(temp1, temp1, temp2, calcCount);
  // return x + x * y;
  Mul(temp1, temp1, y, calcCount);
  Add(temp1, temp1, y, calcCount);
  FIX_SELECT_TENSOR(y, y, temp1, temp4);
  FIX_SAVE_CMP_STATE(cmpSum16, cmp2_16);

  /* 0.84375 <= |x| < 1.25 */
  // s = fabsf(x) - one;
  Abs(x, y, calcCount);
  tensorSet(temp2, 1.25f, calcCount);
  FIX_IF_NOT_CHANGE_TEMP(x, CMPMODE::GE, temp2, cmp3_16);
  Adds(temp3, x, -1.0f, calcCount);
  // P=pa0+s*(pa1+s*(pa2+s*(pa3+s*(pa4+s*(pa5+s*pa6)))));
  Muls(temp1, temp3, pa6, calcCount);
  Adds(temp1, temp1, pa5, calcCount);
  Mul(temp1, temp1, temp3, calcCount);
  Adds(temp1, temp1, pa4, calcCount);
  Mul(temp1, temp1, temp3, calcCount);
  Adds(temp1, temp1, pa3, calcCount);
  Mul(temp1, temp1, temp3, calcCount);
  Adds(temp1, temp1, pa2, calcCount);
  Mul(temp1, temp1, temp3, calcCount);
  Adds(temp1, temp1, pa1, calcCount);
  Mul(temp1, temp1, temp3, calcCount);
  Adds(temp1, temp1, pa0, calcCount);
  // Q=one+s*(qa1+s*(qa2+s*(qa3+s*(qa4+s*(qa5+s*qa6)))));
  Muls(temp2, temp3, qa6, calcCount);
  Adds(temp2, temp2, qa5, calcCount);
  Mul(temp2, temp2, temp3, calcCount);
  Adds(temp2, temp2, qa4, calcCount);
  Mul(temp2, temp2, temp3, calcCount);
  Adds(temp2, temp2, qa3, calcCount);
  Mul(temp2, temp2, temp3, calcCount);
  Adds(temp2, temp2, qa2, calcCount);
  Mul(temp2, temp2, temp3, calcCount);
  Adds(temp2, temp2, qa1, calcCount);
  Mul(temp2, temp2, temp3, calcCount);
  Adds(temp2, temp2, one, calcCount);
  // erx + P / Q;
  Div(temp1, temp1, temp2, calcCount);
  Adds(temp1, temp1, erx, calcCount);
  //   if (hx >= 0)
  //     return erx + P / Q;
  //   else
  //     return -erx - P / Q;
  tensorSet(temp2, 0.0f, calcCount);
  FIX_IF_NOT_CHANGE_SUM(y, CMPMODE::LT, temp2, cmp3_16);
  FIX_SELECT_TENSOR(y, y, temp1, temp4);
  Not(cmp16, cmp16, bitCount);
  Or(cmp2_16, cmp3_16, cmp16, bitCount);
  Muls(temp1, temp1, -1.0f, calcCount);
  FIX_SELECT_TENSOR(y, y, temp1, temp4);
  Not(cmp3_16, cmp3_16, bitCount);
  FIX_SAVE_CMP_STATE(cmpSum16, cmp3_16);
  //  x > 6.0 = 1
  tensorSet(temp2, 6.0f, calcCount);
  FIXED_IFNOT_EQUAL_SCALAR(y, y, y, CMPMODE::LE, temp2, one - tiny);

  // x < - 6.0 = -1
  tensorSet(temp2, -6.0f, calcCount);
  FIXED_IFNOT_EQUAL_SCALAR(y, y, y, CMPMODE::GE, temp2, tiny - one);

  /* |x| < 1/0.35 */
  Abs(x, y, calcCount);
  tensorSet(temp2, 2.857143f, calcCount);
  FIX_IF_NOT_CHANGE_TEMP(x, CMPMODE::GE, temp2, cmp3_16);

  // s = one / (x * x);
  Mul(temp1, x, x, calcCount);
  tensorSet(temp2, 1.0f, calcCount);
  Div(x, temp2, temp1, calcCount);
  // R = ra0 + s * (ra1 + s * (ra2 + s * (ra3 + s * (ra4 + s * (ra5 + s * (ra6 +
  // s * ra7))))));
  Muls(temp1, x, ra7, calcCount);
  Adds(temp1, temp1, ra6, calcCount);
  Mul(temp1, temp1, x, calcCount);
  Adds(temp1, temp1, ra5, calcCount);
  Mul(temp1, temp1, x, calcCount);
  Adds(temp1, temp1, ra4, calcCount);
  Mul(temp1, temp1, x, calcCount);
  Adds(temp1, temp1, ra3, calcCount);
  Mul(temp1, temp1, x, calcCount);
  Adds(temp1, temp1, ra2, calcCount);
  Mul(temp1, temp1, x, calcCount);
  Adds(temp1, temp1, ra1, calcCount);
  Mul(temp1, temp1, x, calcCount);
  Adds(temp1, temp1, ra0, calcCount);
  // S = one + s * (sa1 + s * (sa2 + s * (sa3 + s * (sa4 + s * (sa5 + s * (sa6 +
  // s * (sa7 + s * sa8)))))));
  Muls(temp2, x, sa8, calcCount);
  Adds(temp2, temp2, sa7, calcCount);
  Mul(temp2, temp2, x, calcCount);
  Adds(temp2, temp2, sa6, calcCount);
  Mul(temp2, temp2, x, calcCount);
  Adds(temp2, temp2, sa5, calcCount);
  Mul(temp2, temp2, x, calcCount);
  Adds(temp2, temp2, sa4, calcCount);
  Mul(temp2, temp2, x, calcCount);
  Adds(temp2, temp2, sa3, calcCount);
  Mul(temp2, temp2, x, calcCount);
  Adds(temp2, temp2, sa2, calcCount);
  Mul(temp2, temp2, x, calcCount);
  Adds(temp2, temp2, sa1, calcCount);
  Mul(temp2, temp2, x, calcCount);
  Adds(temp2, temp2, one, calcCount);
  // GET_FLOAT_WORD(ix, x);
  // SET_FLOAT_WORD(z, ix & 0xfffff000);
  Div(temp1, temp1, temp2, calcCount);
  Abs(x, y, calcCount);
  auto x_int32 = x.ReinterpretCast<int32_t>();
  auto z_int32 = temp2.ReinterpretCast<int32_t>();
  auto z = temp2;
  for (auto i = 0; i < calcCount; ++i) {
    z_int32(i) = x_int32(i) & int32_t(0xfffff000);
  }
  // r = __ieee754_expf(-z * z - (float)0.5625) *
  //     __ieee754_expf((z - x) * (z + x) + R / S);
  Mul(temp3, z, z, calcCount);
  Adds(temp3, temp3, 0.5625f, calcCount);
  Muls(temp3, temp3, -1.0f, calcCount);
  Exp(temp3, temp3, calcCount);

  Add(temp5, x, y, calcCount);
  Sub(temp4, x, y, calcCount);
  Mul(temp5, temp5, temp4, calcCount);
  Add(temp5, temp5, temp1, calcCount);
  Exp(temp5, temp5, calcCount);
  // temp4 = r
  Mul(temp4, temp5, temp3, calcCount);
  // temp5 = r / x
  Div(temp5, temp4, x, calcCount);
  // if x > 0
  tensorSet(temp2, 0.0f, calcCount);
  FIX_IF_NOT_CHANGE_SUM(y, CMPMODE::LT, temp2, cmp3_16);
  tensorSet(temp2, 1.0f, calcCount);
  // return one - r / x;
  Sub(temp1, temp2, temp5, calcCount);
  FIX_SELECT_TENSOR(y, y, temp1, temp4);
  // return r / x - one;
  Not(cmp16, cmp16, bitCount);
  Or(cmp2_16, cmp3_16, cmp16, bitCount);
  Sub(temp1, temp5, temp2, calcCount);
  FIX_SELECT_TENSOR(y, y, temp1, temp4);
  Not(cmp3_16, cmp3_16, bitCount);
  FIX_SAVE_CMP_STATE(cmpSum16, cmp3_16);

  /* |x| >= 1/0.35 2.857142857142857 */
  Abs(x, y, calcCount);
  // s = one / (x * x);
  Mul(temp1, x, x, calcCount);
  tensorSet(temp2, 1.0f, calcCount);
  Div(x, temp2, temp1, calcCount);
  // R = rb0 + s * (rb1 + s * (rb2 + s * (rb3 + s * (rb4 + s * (rb5 + s *
  // rb6)))));
  Muls(temp1, x, rb6, calcCount);
  Adds(temp1, temp1, rb5, calcCount);
  Mul(temp1, temp1, x, calcCount);
  Adds(temp1, temp1, rb4, calcCount);
  Mul(temp1, temp1, x, calcCount);
  Adds(temp1, temp1, rb3, calcCount);
  Mul(temp1, temp1, x, calcCount);
  Adds(temp1, temp1, rb2, calcCount);
  Mul(temp1, temp1, x, calcCount);
  Adds(temp1, temp1, rb1, calcCount);
  Mul(temp1, temp1, x, calcCount);
  Adds(temp1, temp1, rb0, calcCount);
  // S = one + s * (sb1 + s * (sb2 +  s * (sb3 + s * (sb4 + s * (sb5 + s * (sb6
  // + s * sb7))))));
  Muls(temp2, x, sb7, calcCount);
  Adds(temp2, temp2, sb6, calcCount);
  Mul(temp2, temp2, x, calcCount);
  Adds(temp2, temp2, sb5, calcCount);
  Mul(temp2, temp2, x, calcCount);
  Adds(temp2, temp2, sb4, calcCount);
  Mul(temp2, temp2, x, calcCount);
  Adds(temp2, temp2, sb3, calcCount);
  Mul(temp2, temp2, x, calcCount);
  Adds(temp2, temp2, sb2, calcCount);
  Mul(temp2, temp2, x, calcCount);
  Adds(temp2, temp2, sb1, calcCount);
  Mul(temp2, temp2, x, calcCount);
  Adds(temp2, temp2, one, calcCount);
  // GET_FLOAT_WORD(ix, x);
  // SET_FLOAT_WORD(z, ix & 0xfffff000);
  Div(temp1, temp1, temp2, calcCount);
  Abs(x, y, calcCount);
  for (auto i = 0; i < calcCount; ++i) {
    z_int32(i) = x_int32(i) & int32_t(0xfffff000);
  }
  // r = __ieee754_expf(-z * z - (float)0.5625) *
  //     __ieee754_expf((z - x) * (z + x) + R / S);
  Mul(temp3, z, z, calcCount);
  Adds(temp3, temp3, 0.5625f, calcCount);
  Muls(temp3, temp3, -1.0f, calcCount);
  Exp(temp3, temp3, calcCount);

  Add(temp5, x, y, calcCount);
  Sub(temp4, x, y, calcCount);
  Mul(temp5, temp5, temp4, calcCount);
  Add(temp5, temp5, temp1, calcCount);
  Exp(temp5, temp5, calcCount);
  // temp4 = r
  Mul(temp4, temp5, temp3, calcCount);
  // temp5 = r / x
  Div(temp5, temp4, x, calcCount);
  // if x > 0
  tensorSet(temp2, 0.0f, calcCount);
  FIX_IF_NOT(y, CMPMODE::LT, temp2);
  tensorSet(temp2, 1.0f, calcCount);
  // return one - r / x;
  Sub(temp1, temp2, temp5, calcCount);
  FIX_SELECT_TENSOR(y, y, temp1, temp4);
  // return r / x - one;
  Not(cmp16, cmp16, bitCount);
  Or(cmp2_16, cmpSum16, cmp16, bitCount);
  Sub(temp1, temp5, temp2, calcCount);
  FIX_SELECT_TENSOR(y, y, temp1, temp4);
  // FIX_SAVE_CMP_STATE(cmpSum16, cmp2_16);
}

__aicore__ inline void my_erf2(LocalTensor<float> y, LocalTensor<float> x,
                               LocalTensor<float> temp1,
                               LocalTensor<float> temp2, LocalTensor<float> t0,
                               LocalTensor<float> t1, uint32_t calcCount) {
  // 近似计算误差函数的多项式系数
  constexpr float a1 = 0.254829592;
  constexpr float a2 = -0.284496736;
  constexpr float a3 = 1.421413741;
  constexpr float a4 = -1.453152027;
  constexpr float a5 = 1.061405429;
  constexpr float p = 0.3275911;
  // int sign = 1;
  // if (x < 0) sign = -1;
  // x = fabs(x);
  Abs(temp2, x, calcCount);
  // float t = 1.0 / (1.0 + p * x);
  Muls(y, temp2, p, calcCount);
  Adds(y, y, 1.0f, calcCount);
  Div(y, t1, y, calcCount);
  // float y = 1.0 - (((((a5 * t + a4) * t) + a3) * t + a2) * t + a1) * t *
  // exp(-x * x);
  Muls(temp1, y, a5, calcCount);
  Adds(temp1, temp1, a4, calcCount);
  Mul(temp1, temp1, y, calcCount);
  Adds(temp1, temp1, a3, calcCount);
  Mul(temp1, temp1, y, calcCount);
  Adds(temp1, temp1, a2, calcCount);
  Mul(temp1, temp1, y, calcCount);
  Adds(temp1, temp1, a1, calcCount);
  Mul(temp1, temp1, y, calcCount);

  Mul(y, temp2, temp2, calcCount);
  Exp(y, y, calcCount);
  Div(y, t1, y, calcCount);

  Mul(temp1, temp1, y, calcCount);
  // + value
  Sub(y, t1, temp1, calcCount);
  Div(temp2, temp2, x, calcCount);
  Mul(y, y, temp2, calcCount);

  // auto temp2_uint8 = temp2.ReinterpretCast<uint8_t>();
  // auto temp2_uint16 = temp2.ReinterpretCast<uint16_t>();
  // Compare(temp2_uint8, x, t0, CMPMODE::GE, cmpCount);
  // Select(y, temp2_uint8, y, 0.0f, SELMODE::VSEL_TENSOR_SCALAR_MODE,
  // calcCount); Not(temp2_uint16, temp2_uint16, bitCount); Select(temp1,
  // temp2_uint8, temp1, 0.0f, SELMODE::VSEL_TENSOR_SCALAR_MODE,
  //        calcCount);
  // Add(y, y, temp1, calcCount);
  // return sign * y;
}
} // namespace AscendC
