#pragma once
#include "kernel_operator.h"
#include <cstdint>
namespace AscendC {
#define BUFFER_NUM 2
#define ALIGN_SIZE 32
#define CAST_GM_ADDR(dtype, gm_addr, offset)                                   \
  (reinterpret_cast<__gm__ dtype *>(gm_addr) + offset)
#define MY_SCALAR_REINTERPRET_CAST(T, from) (*((T *)(&from)))

constexpr int64_t fp32AlignSize = 32 / sizeof(float);

// NaN：符号位可以是 0 或 1，指数位全为 1，尾数位不全为
// 0。根据尾数位的最高位，可以区分信号 NaN（signaling NaN, sNaN）和静默
// NaN（quiet NaN, qNaN）。
static const int32_t nanInt = 0X7F800001;
static float nanFloat = *((float *)(&nanInt));

// 无穷大（INF）：
// 正无穷大：符号位为 0，指数位全为 1，尾数位全为 0。
// 负无穷大：符号位为 1，指数位全为 1，尾数位全为 0。
static const int32_t infInt = 0x7f800000;  // 正无穷
static const int32_t ninfInt = 0xff800000; // 负无穷
static const float infFloat = *((float *)(&infInt));
static const float ninfFloat = *((float *)(&ninfInt));
static const int16_t infInt16 = 0b0111110000000000;
static const int16_t ninfInt16 = 0b1111110000000000;
static const half infHalf = *((half *)(&infInt16));
static const half ninfHalf = *((half *)(&ninfInt16));
static const float oneFloat = 1.0f;
static const int32_t oneFloatInt = *((int32_t *)(&oneFloat));
static const half oneHalf = 1.0f;
static const int16_t oneHalfInt = *((int16_t *)(&oneFloat));
static const float zeroFloat = 0.0f;
static const int32_t zeroFloatInt = *((int32_t *)(&zeroFloat));
static const half zeroHalf = 0.0f;
static const int16_t zeroHalfInt = *((int16_t *)(&oneFloat));

// 名称拼接
#define LTensorName(varName) localTensor##varName
#define LTensor(varName) LTensorName(varName)
#define GTensorName(varName) globalTensor##varName
#define GTensor(varName) GTensorName(varName)
#define QueName(varName) varName##_queue
#define TypeOf(varName) _DT_##varName
#define TBufName(varName) TBuf##varName
#define TBufTensorName(varName) TBufTensor##varName
#define BTensor(varName) TBufTensorName(varName)

// 定义
#define DefGlobalTensor(varName)                                               \
  GlobalTensor<TypeOf(varName)> GTensorName(varName);
#define DefInQue(varName) TQue<TPosition::VECIN, BUFFER_NUM> QueName(varName);
#define DefInQue1(varName) TQue<TPosition::VECIN, 1> QueName(varName);
#define DefOutQue(varName) TQue<TPosition::VECOUT, BUFFER_NUM> QueName(varName);
#define DefOutQue1(varName) TQue<TPosition::VECOUT, 1> QueName(varName);
#define DefCalcQue(varName)                                                    \
  TQue<TPosition::VECCALC, BUFFER_NUM> QueName(varName);
#define DefBufVECCALC(varName) TBuf<TPosition::VECCALC> TBufName(varName);
#define DefBufVECIN(varName) TBuf<TPosition::VECIN> TBufName(varName);
#define DefBufVECOUT(varName) TBuf<TPosition::VECIN> TBufName(varName);
#define DefInTensor(varName)                                                   \
  DefGlobalTensor(varName);                                                    \
  DefInQue(varName);
#define DefOutTensor(varName)                                                  \
  DefGlobalTensor(varName);                                                    \
  DefOutQue(varName);

// 初始化
#define GSetBuffer(varName, src, start_idx, blockLength)                       \
  GTensorName(varName).SetGlobalBuffer(                                        \
      CAST_GM_ADDR(TypeOf(varName), src, start_idx), blockLength);
#define InitQueue(varName, tileLength)                                         \
  pipe.InitBuffer(QueName(varName), BUFFER_NUM, tileLength);
#define InitQueueSimple(varName, tileLength)                                   \
  InitQueue(varName, tileLength * sizeof(TypeOf(varName)));
#define InitTBufBuffer(varName, tileLength)                                    \
  pipe.InitBuffer(TBufName(varName), (tileLength + 31) / 32 * 32);

// 操作
#define QueAlloc(varName, dtype)                                               \
  auto LTensorName(varName) = QueName(varName).AllocTensor<dtype>();
#define QueAllocSimple(varName) QueAlloc(varName, TypeOf(varName));
#define DeQue(varName, dtype)                                                  \
  auto LTensorName(varName) = QueName(varName).DeQue<dtype>();
#define DeQueSimple(varName) DeQue(varName, TypeOf(varName));
#define EnQue(varName) QueName(varName).EnQue(LTensorName(varName));
#define QueFree(varName) QueName(varName).FreeTensor(LTensorName(varName));
#define TBufGet(varName, dtype)                                                \
  auto TBufTensorName(varName) = TBufName(varName).Get<dtype>();

// 融合操作
#define EnQueGlobal2Local(varName, index, calcCount)                           \
  QueAllocSimple(varName);                                                     \
  DataCopy(LTensorName(varName), GTensorName(varName)[index], calcCount);      \
  EnQue(varName);

#define DeQueLocal2Global(varName, index, calcCount)                           \
  DeQueSimple(varName);                                                        \
  DataCopy(GTensorName(varName)[index], LTensorName(varName), calcCount);      \
  QueFree(varName);

#define CALL_0_LEVEL(maxRepeatCount, dtype, calcCount, call_code)              \
  {                                                                            \
    uint64_t oneRepeatDataCount = 8 * 32 / sizeof(dtype);                      \
    uint64_t needRepeatCount =                                                 \
        (calcCount + oneRepeatDataCount - 1) / oneRepeatDataCount;             \
    uint64_t needMaxRepeat =                                                   \
        (needRepeatCount + maxRepeatCount - 1) / maxRepeatCount;               \
    for (auto i = 0; i < needMaxRepeat; ++i) {                                 \
      auto repeatTimes = maxRepeatCount;                                       \
      auto index = i * maxRepeatCount * oneRepeatDataCount;                    \
      if (i == needMaxRepeat - 1) {                                            \
        repeatTimes = needRepeatCount % maxRepeatCount;                        \
      }                                                                        \
      call_code                                                                \
    }                                                                          \
  }

#define CRATE_COMMON_TILING(name)                                              \
  AscendC::CommonTiling name;                                                  \
  name.Init(tiling_data.size, tiling_data.formerNum, tiling_data.formerLength, \
            tiling_data.formerTileLength, tiling_data.formerFinalCalcCount,    \
            tiling_data.tailNum, tiling_data.tailLength,                       \
            tiling_data.tailTileLength, tiling_data.tailFinalCalcCount,        \
            tiling_data.finalKernelCaclCount);
#define CRATE_COMMON_TILING_SIMPLE CRATE_COMMON_TILING(commonTiling)

#define CEIL_DIV(a, b) (((a) + (b)-1) / (b))
#define ALIGN_TO(a, b) CEIL_DIV(a, b) * (b)

#define SIMPLE_ADD(T, dst, a, b)                                               \
  {                                                                            \
    if constexpr (std::is_same_v<T, half>) {                                   \
      auto v = float(a) + float(b);                                            \
      if (v > 65504.0f) {                                                      \
        dst = infHalf;                                                         \
      } else if (v < -65504.0f) {                                              \
        dst = ninfHalf;                                                        \
      } else {                                                                 \
        dst = half(v);                                                         \
      }                                                                        \
    } else {                                                                   \
      dst = a + b;                                                             \
    }                                                                          \
  }

#define GET_MIN(a, b) (((a) < (b)) ? a : b)
#define GET_MAX(a, b) (((a) > (b)) ? a : b)

#define MY_DUP_INT32(x, v, calcCount)                                          \
  {                                                                            \
    auto __count = (calcCount);                                                \
    auto __x_int32 = x.template ReinterpretCast<int32_t>();                    \
    __count /= sizeof(int32_t) / elementSize(x);                               \
    Muls(__x_int32, __x_int32, int32_t(0), __count);                           \
    Adds(__x_int32, __x_int32, int32_t(v), __count);                           \
  }

#define MY_DUP_INT16(x, v, calcCount)                                          \
  {                                                                            \
    auto __count = (calcCount);                                                \
    auto __x_int16 = x.template ReinterpretCast<int16_t>();                    \
    __count /= sizeof(int16_t) / elementSize(x);                               \
    Muls(__x_int16, __x_int16, int16_t(0), __count);                           \
    Adds(__x_int16, __x_int16, int16_t(v), __count);                           \
  }

#ifdef ASCENDC_DUMP
template <typename descT, typename tensorT, typename format>
__aicore__ inline void print_tensor(descT desc, tensorT t, int count,
                                    format f) {
  printf(desc);
  printf(": [");
  for (auto i = 0; i < count; ++i) {
    if constexpr (std::is_same_v<tensorT, int64_t *> ||
                  std::is_same_v<tensorT, int32_t *> ||
                  std::is_same_v<tensorT, int16_t *> ||
                  std::is_same_v<tensorT, int8_t *> ||
                  std::is_same_v<tensorT, float *> ||
                  std::is_same_v<tensorT, half *> ||
                  std::is_same_v<tensorT, __gm__ int64_t *> ||
                  std::is_same_v<tensorT, __gm__ int32_t *> ||
                  std::is_same_v<tensorT, __gm__ int16_t *> ||
                  std::is_same_v<tensorT, __gm__ int8_t *> ||
                  std::is_same_v<tensorT, __gm__ float *> ||
                  std::is_same_v<tensorT, __gm__ half *>) {
      printf(f, t[i]);
    } else {
      printf(f, t(i));
    }
    printf(", ");
  }
  printf("]\n");
}
#define print(...) printf(__VA_ARGS__)
#else
#define print_tensor(desc, t, count, f)
#define print(...)
#endif

template <typename T> __aicore__ inline int32_t elementSize(LocalTensor<T> x) {
  return sizeof(T);
}
template <typename T> __aicore__ inline int32_t elementSize(GlobalTensor<T> x) {
  return sizeof(T);
}
template <typename T> struct TensorTrait {};
template <> struct TensorTrait<LocalTensor<int32_t>> {
  typedef int32_t DType;
};
template <> struct TensorTrait<LocalTensor<uint32_t>> {
  typedef uint32_t DType;
};
template <> struct TensorTrait<LocalTensor<float>> {
  typedef float DType;
};
template <> struct TensorTrait<LocalTensor<half>> {
  typedef half DType;
};
template <> struct TensorTrait<LocalTensor<int16_t>> {
  typedef int16_t DType;
};
template <> struct TensorTrait<LocalTensor<uint16_t>> {
  typedef uint16_t DType;
};
template <> struct TensorTrait<LocalTensor<int8_t>> {
  typedef int8_t DType;
};
template <> struct TensorTrait<LocalTensor<uint8_t>> {
  typedef uint8_t DType;
};
template <> struct TensorTrait<GlobalTensor<int32_t>> {
  typedef int32_t DType;
};
template <> struct TensorTrait<GlobalTensor<uint32_t>> {
  typedef uint32_t DType;
};
template <> struct TensorTrait<GlobalTensor<float>> {
  typedef float DType;
};
template <> struct TensorTrait<GlobalTensor<half>> {
  typedef half DType;
};
template <> struct TensorTrait<GlobalTensor<int16_t>> {
  typedef int16_t DType;
};
template <> struct TensorTrait<GlobalTensor<uint16_t>> {
  typedef uint16_t DType;
};
template <> struct TensorTrait<GlobalTensor<int8_t>> {
  typedef int8_t DType;
};
template <> struct TensorTrait<GlobalTensor<uint8_t>> {
  typedef uint8_t DType;
};

template <typename T> __aicore__ inline int32_t constexpr elementsPerBlock() {
  return 32 / sizeof(T);
};

template <typename T> __aicore__ inline int32_t constexpr elementsPerRepeat() {
  return 256 / sizeof(T);
};

template <typename T, int bitCount>
__aicore__ inline auto constexpr elementsPerBit() {
  return (bitCount / 8) / sizeof(T);
};

template <typename T> __aicore__ inline auto alignToBlock(int64_t count) {
  return ALIGN_TO(count, elementsPerBlock<T>());
};

template <typename T> __aicore__ inline auto alignToRepeat(int64_t count) {
  return ALIGN_TO(count, elementsPerRepeat<T>());
};

#define MY_DUP_ZERO(x, calcCount)                                              \
  {                                                                            \
    if constexpr (std::is_same_v<typename TensorTrait<decltype(x)>::DType,     \
                                 float>) {                                     \
      MY_DUP_INT32(x, zeroFloatInt, calcCount);                                \
    } else if constexpr (std::is_same_v<                                       \
                             typename TensorTrait<decltype(x)>::DType,         \
                             int32_t>) {                                       \
      Muls(x, x, int32_t(0), calcCount);                                       \
    } else if constexpr (std::is_same_v<                                       \
                             typename TensorTrait<decltype(x)>::DType,         \
                             half>) {                                          \
      MY_DUP_INT16(x, zeroHalfInt, calcCount);                                 \
    } else if constexpr (std::is_same_v<                                       \
                             typename TensorTrait<decltype(x)>::DType,         \
                             int8_t>) {                                        \
      Muls(x.template ReinterpretCast<int16_t>(),                              \
           x.template ReinterpretCast<int16_t>(), int16_t(0),                  \
           CEIL_DIV(calcCount, 2));                                            \
    }                                                                          \
  }

#define BUILD_MASK(dataCount)                                                  \
  (dataCount == 64 ? 0xFFFFFFFFFFFFFFFF : (((uint64_t)1 << dataCount) - 1))
#define BUILD_MASK_WITH_OFFSET(dataCount, offset)                              \
  (BUILD_MASK(dataCount) << offset)

#define MASK_OP(T, mask, maskFlagIdx, oneCount, OP)                            \
  {                                                                            \
    if constexpr (sizeof(T) == 2) {                                            \
      if (maskFlagIdx >= 64) {                                                 \
        mask[1] OP BUILD_MASK_WITH_OFFSET(oneCount, maskFlagIdx % 64);         \
      } else {                                                                 \
        auto tempMaskFlagIdx = maskFlagIdx + oneCount;                         \
        if (tempMaskFlagIdx > 64) {                                            \
          auto dataCountTemp = tempMaskFlagIdx - 64;                           \
          mask[1] OP BUILD_MASK(dataCountTemp);                                \
          dataCountTemp = oneCount - dataCountTemp;                            \
          mask[0] OP BUILD_MASK_WITH_OFFSET(dataCountTemp, maskFlagIdx);       \
        } else {                                                               \
          auto flag = BUILD_MASK_WITH_OFFSET(oneCount, maskFlagIdx);           \
          mask[0] OP flag;                                                     \
        }                                                                      \
      }                                                                        \
    } else if constexpr (sizeof(T) == 4) {                                     \
      mask[0] OP BUILD_MASK_WITH_OFFSET(oneCount, maskFlagIdx);                \
    }                                                                          \
    maskFlagIdx += oneCount;                                                   \
  }

#define MASK_OR_ONE(T, mask, maskFlagIdx, oneCount)                            \
  MASK_OP(T, mask, maskFlagIdx, oneCount, |=)

#define MASK_XOR_ONE(T, mask, maskFlagIdx, oneCount)                           \
  MASK_OP(T, mask, maskFlagIdx, oneCount, ^=)

class CommonTiling {
public:
  int64_t loopCount = 0;
  int64_t bufferSize = 0;
  int64_t finalCalcCount = 0;
  int64_t startIdx = 0;
  int64_t blockLength = 0;
  int64_t last_size = 0;
  int64_t size = 0;
  bool is_final = false;
  __aicore__ inline CommonTiling() {}
  __aicore__ inline void Init(int64_t size, int64_t formerNum,
                              int64_t formerLength, int64_t formerTileLength,
                              int64_t formerFinalCalcCount, int64_t tailNum,
                              int64_t tailLength, int64_t tailTileLength,
                              int64_t tailFinalCalcCount,
                              int64_t finalKernelCaclCount = 0) {
    this->size = size;
    // 初始化配置信息
    uint64_t blockidx = GetBlockIdx();
    this->is_final = blockidx == (formerNum + tailNum) - 1;

    if (blockidx < formerNum) {
      this->blockLength = formerLength;
      this->bufferSize = formerTileLength;
      this->finalCalcCount = formerFinalCalcCount;
    } else {
      this->blockLength = tailLength;
      this->bufferSize = tailTileLength;
      this->finalCalcCount = tailFinalCalcCount;
      if (GetBlockNum() - 1 == blockidx) {
        this->finalCalcCount = finalKernelCaclCount;
      }
    }
    this->loopCount = CEIL_DIV(blockLength, bufferSize);
    this->startIdx = blockidx * formerLength;
    if (blockidx >= formerNum) {
      this->startIdx =
          formerLength * formerNum + (blockidx - formerNum) * tailLength;
    }
    this->last_size = size % bufferSize;
    if (this->last_size == 0) {
      this->last_size = bufferSize;
    }
  }

  template <typename T> __aicore__ inline int32_t getFinalCopyCount() {
    return ALIGN_TO(this->last_size, 32 / sizeof(T));
  }
};

// 先定义一个向上取整函数
__aicore__ inline int64_t RoundUp(int64_t a, int64_t b) {
  return (a + b - 1) / b;
}

class NdTensorSortByDimHelper {
public:
  GM_ADDR data_addr = nullptr;
  GM_ADDR workspace = nullptr;
  int64_t sortByDimPreSize = 0;
  int64_t sortByDimDimSize = 0;
  int64_t sortByDimDimLength = 0;
  int64_t sortByDimDimBlockLength = 0;

public:
  __aicore__ inline NdTensorSortByDimHelper() {
    data_addr = nullptr;
    workspace = nullptr;
    sortByDimPreSize = 0;
    sortByDimDimSize = 0;
    sortByDimDimLength = 0;
    sortByDimDimBlockLength = 0;
  }

  __aicore__ inline GM_ADDR transpose() {
    if (sortByDimDimSize == 0) {
      return data_addr;
    }

    int64_t k_length =
        sortByDimDimSize * sortByDimDimLength * sortByDimDimBlockLength;
    int64_t counter = 0;
    for (int64_t k = 0; k < sortByDimPreSize; ++k) {
      for (int64_t i = 0; i < sortByDimDimLength; ++i) {
        for (int64_t j = 0; j < sortByDimDimSize; ++j) {
          for (int64_t q = 0; q < sortByDimDimBlockLength; ++q) {
            workspace[counter++] =
                data_addr[k * k_length +
                          j * sortByDimDimLength * sortByDimDimBlockLength +
                          i * sortByDimDimBlockLength + q];
          }
        }
      }
    }

    return workspace;
  }
};

template <typename T> __aicore__ inline void swap(T *a, T *b) {
  T t = a[0];
  a[0] = b[0];
  b[0] = t;
}

template <typename T>
__aicore__ inline void my_memcpy(T *dst, const T *src, int64_t n) {
  for (int64_t i = 0; i < n; ++i) {
    dst[i] = src[i];
  }
}

constexpr int MAX_SHAPE_DIM = 64;
template <int64_t shape_max_size = MAX_SHAPE_DIM, typename T = int64_t>
class ShapeDataHelper {
public:
  T shape[shape_max_size] = {0};
  int64_t size = 0;
  int64_t dimNum = 0;
  bool needBroadBast = false;
  __aicore__ inline ShapeDataHelper() {}
  __aicore__ inline void init(T *shape_data) {
    dimNum = shape_data[0];
    needBroadBast = shape_data[1];
    my_memcpy(shape, shape_data + 2, dimNum);
    calcSize();
  }

  template <typename IndexType>
  __aicore__ inline T operator[](IndexType index) {
    return shape[index];
  }

  template <typename IndexType>
  __aicore__ inline void set(IndexType index, T value) {
    shape[index] = value;
    calcSize();
  }

  __aicore__ inline void push_back(T value) { set(dimNum++, value); }

  template <typename NT> __aicore__ inline void print_(NT name) {
    printf("%s:(", name);
    for (auto i = 0; i < dimNum; ++i) {
      printf("%d, ", shape[i]);
    }
    printf(")\n");
  }

  template <typename IndexType>
  __aicore__ inline void Offset2Indexs(IndexType offset, T *indexs) {
    int64_t temp_size = size;
    for (auto i = 0; i < dimNum; ++i) {
      temp_size /= (*this)[i];
      indexs[i] = offset / temp_size;
      offset = offset % temp_size;
    }
  }

  __aicore__ inline T
  broadcastIndex2targetOffset(ShapeDataHelper<shape_max_size, T> &targetShape) {
    T ret = 0;
    int64_t index = 0;
    int64_t size_temp = targetShape.size;
    for (auto i = 0; i < dimNum; ++i) {
      size_temp = size_temp / targetShape[i];
      index = (*this)[i];
      if (index >= targetShape[i]) {
        index = 0;
      }
      ret += index * size_temp;
    }
    return ret;
  }

private:
  __aicore__ inline void calcSize() {
    size = 1;
    for (auto i = 0; i < dimNum; ++i) {
      size *= (*this)[i];
    }
  }
};

typedef ShapeDataHelper<MAX_SHAPE_DIM, int64_t> ShapeData;

template <typename T_DST, typename T_FROM>
__aicore__ inline void myDataCopy(T_DST dst, T_FROM from, int64_t calcCount,
                                  int64_t dst_offset = 0,
                                  int64_t from_offset = 0) {
  using dstDT = typename TensorTrait<T_DST>::DType;
  using fromDT = typename TensorTrait<T_FROM>::DType;
  if constexpr (std::is_same_v<dstDT, fromDT> == false) {
    printf(u8"myDataCopy目标类型与源类型不匹配\n");
    return;
  }
  constexpr uint32_t alignSize = elementsPerBlock<dstDT>();
  if (calcCount < alignSize) {
    for (auto j = 0; j < calcCount; ++j) {
      dst.SetValue(dst_offset + j, from.GetValue(from_offset + j));
    }
  } else {
    // 自动忽略后面不足alignSize的数据
    DataCopy(dst[dst_offset], from[from_offset], calcCount);
    // 最后32B
    auto index2 = calcCount / alignSize * alignSize;
    for (auto j = index2; j < calcCount; ++j) {
      dst.SetValue(dst_offset + j, from.GetValue(from_offset + j));
    }
  }
}

// struct DataCopyHelper {
//   DefBufVECOUT(GATHER_FLAG);
//   DefOutQue(TEMP);
//   int32_t flagLength = 0;
//   __aicore__ inline DataCopyHelper() {}
//   __aicore__ inline void init(TPipe &pipe, int32_t dataLength,
//                               int32_t dtypeSize) {
//     flagLength = ALIGN_TO(CEIL_DIV(dataLength, 8), 4);
//     InitTBufBuffer(GATHER_FLAG, flagLength);
//     TBufGet(GATHER_FLAG, uint32_t);
//     MY_DUP_INT32(BTensor(GATHER_FLAG), int32_t(0xFFFFFFFF), flagLength / 4);
//     InitQueue(TEMP, dataLength * dtypeSize);
//   }
//   template <typename T_DST, typename T_FROM>
//   __aicore__ inline void copy(T_DST dst, T_FROM from, uint32_t calcCount,
//                               int32_t dstOffset) {
//     using dstDT = typename TensorTrait<T_DST>::DType;
//     using fromDT = typename TensorTrait<T_FROM>::DType;
//     if constexpr (std::is_same_v<dstDT, fromDT> == false) {
//       print(u8"DataCopyHelper::copy目标类型与源类型不匹配\n");
//       return;
//     }

//     if (calcCount < elementsPerBlock<dstDT>()) {
//       for (auto j = 0; j < calcCount; ++j) {
//         dst.SetValue(dstOffset + j, from.GetValue(j));
//       }
//       return;
//     }

//     // 判断dstOffset是否32B对齐
//     int32_t duoyu = ALIGN_TO(dstOffset, elementsPerBlock<dstDT>()) -
//     dstOffset; if (duoyu == 0) {
//       // myDataCopy(dst, from, calcCount, dstOffset);
//       // 自动忽略后面不足alignSize的数据
//       DataCopy(dst[dstOffset], from, calcCount);
//       // 最后32B
//       auto index2 =
//           calcCount / elementsPerBlock<dstDT>() * elementsPerBlock<dstDT>();
//       for (auto j = index2; j < calcCount; ++j) {
//         dst.SetValue(dstOffset + j, from.GetValue(j));
//       }
//       return;
//     }

//     if constexpr (sizeof(dstDT) == 1) {
//       for (auto j = 0; j < calcCount; ++j) {
//         dst.SetValue(dstOffset + j, from.GetValue(j));
//       }
//       return;
//     }

//     if constexpr (sizeof(dstDT) == 4) {
//       QueAlloc(TEMP, dstDT);
//       TBufGet(GATHER_FLAG, uint32_t);
//       uint32_t first = 0xFFFFFFFF;
//       for (auto i = 0; i < duoyu; ++i) {
//         first ^= (1 << i);
//       }
//       BTensor(GATHER_FLAG)(0) = first;
//       uint64_t counter = 0;
//       uint16_t repeatTimes = CEIL_DIV(calcCount, elementsPerRepeat<dstDT>());
//       GatherMask(LTensor(TEMP), from, BTensor(GATHER_FLAG), false,
//       uint32_t(0),
//                  {1, repeatTimes, 8, 8}, counter);
//       // print_tensor("TEMP", LTensor(TEMP), 8, "%f");
//       EnQue(TEMP);
//     } else if constexpr (sizeof(dstDT) == 2) {
//       QueAlloc(TEMP, dstDT);
//       TBufGet(GATHER_FLAG, uint16_t);
//       uint32_t first = 0xFFFFFFFF;
//       for (auto i = 0; i < duoyu; ++i) {
//         first ^= (1 << i);
//       }
//       BTensor(GATHER_FLAG)(0) = (&first)[0];
//       BTensor(GATHER_FLAG)(1) = (&first)[1];
//       uint64_t counter = 0;
//       uint16_t repeatTimes = CEIL_DIV(calcCount, elementsPerRepeat<dstDT>());
//       GatherMask(LTensor(TEMP), from, BTensor(GATHER_FLAG), false,
//       uint32_t(0),
//                  {1, repeatTimes, 8, 8}, counter);
//       EnQue(TEMP);
//     }
//     {
//       DeQue(TEMP, dstDT);
//       // print_tensor("TEMP", LTensor(TEMP), 8, "%f");
//       myDataCopy(dst, LTensor(TEMP), calcCount - duoyu, dstOffset + duoyu);
//       QueFree(TEMP);
//     }

//     // 前面几个需要单独处理
//     for (auto j = 0; j < duoyu; ++j) {
//       dst.SetValue(dstOffset + j, from.GetValue(j));
//     }
//   }
// };

template <typename T, bool callIndex = false>
__aicore__ inline int64_t workLocalSize(int64_t count) {
  // 最后确定首次最大repeat值
  int64_t firstMaxRepeat = CEIL_DIV(count, elementsPerRepeat<T>());
  // 对于calIndex = false的情况，只需一轮操作就可以计算出最小所需空间：
  if (!callIndex) {
    // 第一轮操作产生的元素个数
    auto iter1OutputCount = firstMaxRepeat * 2;
    // 第一轮产生的元素个数做向上取整
    return ALIGN_TO(iter1OutputCount, elementsPerBlock<T>());
  }
  // 对于calIndex = true的情况，分为三轮操作计算出最小所需空间：
  // 第一轮操作产生的元素个数
  auto iter1OutputCount = firstMaxRepeat * 2;
  // 第二轮操作起始位置偏移，即第一轮产生的元素个数做向上取整后的结果
  auto iter2AlignStart = alignToBlock<T>(iter1OutputCount);
  // 第二轮操作产生的元素个数
  auto iter2OutputCount =
      CEIL_DIV(iter1OutputCount, elementsPerRepeat<T>()) * 2;
  // 第三轮操作起始位置偏移，即第二轮产生的元素个数做向上取整后的结果
  auto iter3AlignStart = ALIGN_TO(iter2OutputCount, elementsPerBlock<T>());
  // 第三轮操作产生的元素个数
  auto iter3OutputCount =
      CEIL_DIV(iter2OutputCount, elementsPerRepeat<T>()) * 2;
  // 第三轮产生的元素个数做向上取整
  auto iter3AlignEnd = ALIGN_TO(iter3OutputCount, elementsPerBlock<T>());
  // 最终workLocal所需的空间大小
  return (iter2AlignStart + iter3AlignStart + iter3AlignEnd);
};

template <typename T, bool callIndex = false>
__aicore__ inline int64_t workLocalSizeBtyte(int64_t count) {
  return workLocalSize<T, callIndex>(count) * sizeof(T);
}

template <int32_t dataSize> struct MaskHelper {
  uint64_t mask_[2] = {0, 0};
  int32_t maskFlagIdx_ = 0;
  __aicore__ inline MaskHelper() {}
  __aicore__ inline void reset() {
    mask_[0] = 0;
    mask_[1] = 0;
    maskFlagIdx_ = 0;
  }
  __aicore__ inline bool full() { return maskFlagIdx_ >= 256 / dataSize; }
  __aicore__ inline void addZero(int32_t dataCount) {
    maskFlagIdx_ += dataCount;
  }
  __aicore__ inline void addOne(int32_t dataCount) {
    if constexpr (dataSize == 2) {
      if (maskFlagIdx_ >= 64) {
        mask_[1] |= BUILD_MASK_WITH_OFFSET(dataCount, maskFlagIdx_ % 64);
        return;
      }
      auto tempMaskFlagIdx = maskFlagIdx_ + dataCount;
      if (tempMaskFlagIdx > 64) {
        auto dataCountTemp = tempMaskFlagIdx - 64;
        mask_[1] = BUILD_MASK(dataCountTemp);
        dataCountTemp = dataCount - dataCountTemp;
        mask_[0] |= BUILD_MASK_WITH_OFFSET(dataCountTemp, maskFlagIdx_);
        return;
      }
      mask_[0] |= BUILD_MASK_WITH_OFFSET(dataCount, maskFlagIdx_);
    } else if constexpr (dataSize == 4) {
      mask_[0] |= BUILD_MASK_WITH_OFFSET(dataCount, maskFlagIdx_);
    }
    maskFlagIdx_ += dataCount;
  }
};

#ifdef __CCE_KT_TEST__
#include <iostream>
#define PRINT_TENSOR(tensor, count, progress)                                  \
  std::cout << #tensor << "---------" << progress << "----------"              \
            << std::endl;                                                      \
  tensor.Print(count);                                                         \
  std::cout << "------------------------------------------" << std::endl;
#define PRINT_SCALAR(v) std::cout << #v << " : " << v << std::endl;
#define CALL_ASCEND_OP(op, stream, blockNum, ...)                              \
  ICPU_RUN_KF(op, blockNum, __VA_ARGS__);
#else
#define PRINT_TENSOR(tensor, count, progress)
#define PRINT_SCALAR(v)
#define CALL_ASCEND_OP(op, stream, blockNum, ...)                              \
  op<<<blockNum, nullptr, stream>>>(__VA_ARGS__);
#endif
} // namespace AscendC
