#include "kernel_operator.h"
#include <cstdint>
#ifndef __KERNEL_COMMON_H__
#define __KERNEL_COMMON_H__

#define BUFFER_NUM 2
#define ALIGN_SIZE 32
#define CAST_GM_ADDR(dtype, gm_addr, offset) (reinterpret_cast<__gm__ dtype *>(gm_addr) + offset)

// 名称拼接
#define LTensorName(varName) localTensor##varName
#define GTensorName(varName) globalTensor##varName
#define QueName(varName) varName##_queue
#define TypeOf(varName) _DT_##varName
#define TBufName(varName) TBuf##varName
#define TBufTensorName(varName) TBufTensor##varName

// 定义
#define DefGlobalTensor(varName) GlobalTensor<TypeOf(varName)> GTensorName(varName);
#define DefInQue(varName) TQue<TPosition::VECIN, BUFFER_NUM> QueName(varName);
#define DefOutQue(varName) TQue<TPosition::VECOUT, BUFFER_NUM> QueName(varName);
#define DefCalcQue(varName) TQue<TPosition::VECCALC, BUFFER_NUM> QueName(varName);
#define DefBufVECCALC(varName) TBuf<TPosition::VECCALC> TBufName(varName);
#define DefBufVECIN(varName) TBuf<TPosition::VECIN> TBufName(varName);
#define DefBufVECOUT(varName) TBuf<TPosition::VECIN> TBufName(varName);
#define DefInTensor(varName)                                                                                           \
    DefGlobalTensor(varName);                                                                                          \
    DefInQue(varName);
#define DefOutTensor(varName)                                                                                          \
    DefGlobalTensor(varName);                                                                                          \
    DefOutQue(varName);

// 初始化
#define SetGlobalBuffer(varName, src, start_idx, blockLength)                                                          \
    GTensorName(varName).SetGlobalBuffer(CAST_GM_ADDR(TypeOf(varName), src, start_idx), blockLength);
#define InitQueue(varName, tileLength) pipe.InitBuffer(QueName(varName), BUFFER_NUM, tileLength);
#define InitQueueSimple(varName, tileLength) InitQueue(varName, tileLength * sizeof(TypeOf(varName)));
#define InitTBufBuffer(varName, tileLength) pipe.InitBuffer(TBufName(varName), (tileLength + 31) / 32 * 32);

// 操作
#define QueAlloc(varName, dtype) auto LTensorName(varName) = QueName(varName).AllocTensor<dtype>();
#define QueAllocSimple(varName) QueAlloc(varName, TypeOf(varName));
#define DeQue(varName, dtype) auto LTensorName(varName) = QueName(varName).DeQue<dtype>();
#define DeQueSimple(varName) DeQue(varName, TypeOf(varName));
#define EnQue(varName) QueName(varName).EnQue(LTensorName(varName));
#define QueFree(varName) QueName(varName).FreeTensor(LTensorName(varName));
#define TBufGet(varName, dtype) auto TBufTensorName(varName) = TBufName(varName).Get<dtype>();

// 融合操作
#define EnQueGlobal2Local(varName, index, calcCount)                                                                   \
    QueAllocSimple(varName);                                                                                           \
    DataCopy(LTensorName(varName), GTensorName(varName)[index], calcCount);                                            \
    EnQue(varName);

#define DeQueLocal2Global(varName, index, calcCount)                                                                   \
    DeQueSimple(varName);                                                                                              \
    DataCopy(GTensorName(varName)[index], LTensorName(varName), calcCount);                                            \
    QueFree(varName);

#define CALL_0_LEVEL(maxRepeatCount, dtype, calcCount, call_code)                                                      \
    {                                                                                                                  \
        uint64_t oneRepeatDataCount = 8 * 32 / sizeof(dtype);                                                          \
        uint64_t needRepeatCount = (calcCount + oneRepeatDataCount - 1) / oneRepeatDataCount;                          \
        uint64_t needMaxRepeat = (needRepeatCount + maxRepeatCount - 1) / maxRepeatCount;                              \
        for (auto i = 0; i < needMaxRepeat; ++i)                                                                       \
        {                                                                                                              \
            auto repeatTimes = maxRepeatCount;                                                                         \
            auto index = i * maxRepeatCount * oneRepeatDataCount;                                                      \
            if (i == needMaxRepeat - 1)                                                                                \
            {                                                                                                          \
                repeatTimes = needRepeatCount % maxRepeatCount;                                                        \
            }                                                                                                          \
            call_code                                                                                                  \
        }                                                                                                              \
    }

#define CRATE_COMMON_TILING(name)                                                                                      \
    AscendC::CommonTiling name;                                                                                        \
    name.Init(tiling_data.size, tiling_data.formerNum, tiling_data.formerLength, tiling_data.formerTileLength,         \
              tiling_data.formerFinalCalcCount, tiling_data.tailNum, tiling_data.tailLength,                           \
              tiling_data.tailTileLength, tiling_data.tailFinalCalcCount);
#define CRATE_COMMON_TILING_SIMPLE CRATE_COMMON_TILING(commonTiling)

#define CEIL_DIV(a, b) (((a) + (b)-1) / (b))
#define ALIGN_TO(a, b) CEIL_DIV(a, b) * (b)

namespace AscendC
{
class CommonTiling
{
  public:
    int64_t loopCount = 0;
    int64_t bufferSize = 0;
    int64_t finalCalcCount = 0;
    int64_t startIdx = 0;
    int64_t blockLength = 0;
    int64_t last_size = 0;
    int64_t size = 0;
    bool is_final = false;
    __aicore__ inline CommonTiling()
    {
    }
    __aicore__ inline void Init(int64_t size, int64_t formerNum, int64_t formerLength, int64_t formerTileLength,
                                int64_t formerFinalCalcCount, int64_t tailNum, int64_t tailLength,
                                int64_t tailTileLength, int64_t tailFinalCalcCount)
    {
        this->size = size;
        // 初始化配置信息
        uint64_t blockidx = GetBlockIdx();
        this->is_final = blockidx == (formerNum + tailNum) - 1;

        if (blockidx < formerNum)
        {
            this->blockLength = formerLength;
            this->bufferSize = formerTileLength;
            this->finalCalcCount = formerFinalCalcCount;
        }
        else
        {
            this->blockLength = tailLength;
            this->bufferSize = tailTileLength;
            this->finalCalcCount = tailFinalCalcCount;
        }
        this->loopCount = CEIL_DIV(blockLength, bufferSize);
        this->startIdx = blockidx * formerLength;
        if (blockidx >= formerNum)
        {
            this->startIdx = formerLength * formerNum + (blockidx - formerNum) * tailLength;
        }
        this->last_size = size % bufferSize;
        if (this->last_size == 0)
        {
            this->last_size = bufferSize;
        }
    }

    template <typename T> __aicore__ inline int32_t getFinalCopyCount()
    {
        return ALIGN_TO(this->last_size, 32 / sizeof(T));
    }
};

// 先定义一个向上取整函数
__aicore__ inline int64_t RoundUp(int64_t a, int64_t b)
{
    return (a + b - 1) / b;
}

class NdTensorSortByDimHelper
{
  public:
    GM_ADDR data_addr = nullptr;
    GM_ADDR workspace = nullptr;
    int64_t sortByDimPreSize = 0;
    int64_t sortByDimDimSize = 0;
    int64_t sortByDimDimLength = 0;
    int64_t sortByDimDimBlockLength = 0;

  public:
    __aicore__ inline NdTensorSortByDimHelper()
    {
        data_addr = nullptr;
        workspace = nullptr;
        sortByDimPreSize = 0;
        sortByDimDimSize = 0;
        sortByDimDimLength = 0;
        sortByDimDimBlockLength = 0;
    }

    __aicore__ inline GM_ADDR transpose()
    {
        if (sortByDimDimSize == 0)
        {
            return data_addr;
        }

        int64_t k_length = sortByDimDimSize * sortByDimDimLength * sortByDimDimBlockLength;
        int64_t counter = 0;
        for (int64_t k = 0; k < sortByDimPreSize; ++k)
        {
            for (int64_t i = 0; i < sortByDimDimLength; ++i)
            {
                for (int64_t j = 0; j < sortByDimDimSize; ++j)
                {
                    for (int64_t q = 0; q < sortByDimDimBlockLength; ++q)
                    {
                        workspace[counter++] =
                            data_addr[k * k_length + j * sortByDimDimLength * sortByDimDimBlockLength +
                                      i * sortByDimDimBlockLength + q];
                    }
                }
            }
        }

        return workspace;
    }
};

//  a ^ x = e ^ (x * lna)
template <typename T> __aicore__ void MyPow(LocalTensor<T> dst, LocalTensor<T> a, T x, uint32_t calcCount)
{
    Ln(dst, a, calcCount);
    Muls(a, dst, x, calcCount);
    Exp(dst, a, calcCount);
}

#ifdef __CCE_KT_TEST__
#include <iostream>
#define PRINT_TENSOR(tensor, count, progress)                                                                          \
    std::cout << #tensor << "---------" << progress << "----------" << std::endl;                                      \
    tensor.Print(count);                                                                                               \
    std::cout << "------------------------------------------" << std::endl;
#define PRINT_SCALAR(v) std::cout << #v << " : " << v << std::endl;
#else
#define PRINT_TENSOR(tensor, count, progress)
#define PRINT_SCALAR(v)
#endif

} // namespace AscendC

#endif