#include "kernel_operator.h"
#include <cstdint>
#ifndef __KERNEL_COMMON_H__
#define __KERNEL_COMMON_H__

#ifdef __CCE_KT_TEST__
#include <iostream>
#define PRINT_TENSOR(tensor, count, progress)                                                                          \
    std::cout << #tensor << "---------" << progress << "----------" << std::endl;                                      \
    tensor.Print(count);                                                                                               \
    std::cout << "------------------------------------------" << std::endl;
#define PRINT_SCALAR(v) std::cout << #v << " : " << v << std::endl << std::flush;
#else
#define PRINT_TENSOR(tensor, count, progress)
#define PRINT_SCALAR(v)
#endif

#define BUFFER_NUM 2
#define ALIGN_SIZE 32
#define CAST_GM_ADDR(dtype, gm_addr, offset) (reinterpret_cast<__gm__ dtype *>(gm_addr) + offset)

// 名称拼接
#define LTensorName(varName) localTensor##varName
#define GTensorName(varName) globalTensor##varName
#define QueName(varName) varName##_queue
#define TypeOf(varName) _DT_##varName
#define TBufName(varName) TBuf##varName
#define TBufTensorName(varName) TBufTensor##varName

// 定义
#define DefGlobalTensor(varName) GlobalTensor<TypeOf(varName)> GTensorName(varName);
#define DefInQue(varName) TQue<TPosition::VECIN, BUFFER_NUM> QueName(varName);
#define DefOutQue(varName) TQue<TPosition::VECOUT, BUFFER_NUM> QueName(varName);
#define DefCalcQue(varName) TQue<TPosition::VECCALC, BUFFER_NUM> QueName(varName);
#define DefBufVECCALC(varName) TBuf<TPosition::VECCALC> TBufName(varName);
#define DefBufVECIN(varName) TBuf<TPosition::VECIN> TBufName(varName);
#define DefBufVECOUT(varName) TBuf<TPosition::VECIN> TBufName(varName);
#define DefInTensor(varName)                                                                                           \
    DefGlobalTensor(varName);                                                                                          \
    DefInQue(varName);
#define DefOutTensor(varName)                                                                                          \
    DefGlobalTensor(varName);                                                                                          \
    DefOutQue(varName);

// 初始化
#define SetGlobalBuffer(varName, src, start_idx, blockLength)                                                          \
    GTensorName(varName).SetGlobalBuffer(CAST_GM_ADDR(TypeOf(varName), src, start_idx), blockLength);
#define InitQueue(varName, tileLength) pipe.InitBuffer(QueName(varName), BUFFER_NUM, tileLength);
#define InitQueueSimple(varName, tileLength) InitQueue(varName, tileLength * sizeof(TypeOf(varName)));
#define InitTBufBuffer(varName, tileLength) pipe.InitBuffer(TBufName(varName), (tileLength + 31) / 32 * 32);
#define SET_TENSOR_VALUE(t, v)                                                                                         \
    for (auto i = 0; i < tilingData.bufferSize; ++i)                                                                   \
    {                                                                                                                  \
        t.SetValue(i, v);                                                                                              \
    }
#define INIT_BUF_TENSOR_VALUE_SIMPLE(varName, v)                                                                       \
    TBufGet(varName, decltype(v));                                                                                     \
    SET_TENSOR_VALUE(TBufTensorName(varName), v);
#define InitTbufWithDefault(varName, tileLength, v)                                                                    \
    InitTBufBuffer(varName, tileLength * sizeof(decltype(v)));                                                         \
    INIT_BUF_TENSOR_VALUE_SIMPLE(varName, v)

// 操作
#define QueAlloc(varName, dtype) auto LTensorName(varName) = QueName(varName).AllocTensor<dtype>();
#define QueAllocSimple(varName) QueAlloc(varName, TypeOf(varName));
#define DeQue(varName, dtype) auto LTensorName(varName) = QueName(varName).DeQue<dtype>();
#define DeQueSimple(varName) DeQue(varName, TypeOf(varName));
#define EnQue(varName) QueName(varName).EnQue(LTensorName(varName));
#define QueFree(varName) QueName(varName).FreeTensor(LTensorName(varName));
#define TBufGet(varName, dtype) auto TBufTensorName(varName) = TBufName(varName).Get<dtype>();

// 融合操作
#define EnQueGlobal2Local(varName, index, calcCount)                                                                   \
    QueAllocSimple(varName);                                                                                           \
    DataCopy(LTensorName(varName), GTensorName(varName)[index], calcCount);                                            \
    EnQue(varName);

#define DeQueLocal2Global(varName, index, calcCount)                                                                   \
    DeQueSimple(varName);                                                                                              \
    DataCopy(GTensorName(varName)[index], LTensorName(varName), calcCount);                                            \
    QueFree(varName);

#define CALL_0_LEVEL(maxRepeatCount, dtype, calcCount, call_code)                                                      \
    {                                                                                                                  \
        uint64_t oneRepeatDataCount = 8 * 32 / sizeof(dtype);                                                          \
        uint64_t needRepeatCount = (calcCount + oneRepeatDataCount - 1) / oneRepeatDataCount;                          \
        uint64_t needMaxRepeat = (needRepeatCount + maxRepeatCount - 1) / maxRepeatCount;                              \
        for (auto i = 0; i < needMaxRepeat; ++i)                                                                       \
        {                                                                                                              \
            auto repeatTimes = maxRepeatCount;                                                                         \
            auto index = i * maxRepeatCount * oneRepeatDataCount;                                                      \
            if (i == needMaxRepeat - 1)                                                                                \
            {                                                                                                          \
                repeatTimes = needRepeatCount % maxRepeatCount;                                                        \
            }                                                                                                          \
            call_code                                                                                                  \
        }                                                                                                              \
    }

#define CRATE_COMMON_TILING(name)                                                                                      \
    AscendC::CommonTiling name;                                                                                        \
    name.Init(tiling_data.formerNum, tiling_data.formerLength, tiling_data.formerTileLength,                           \
              tiling_data.formerFinalCalcCount, tiling_data.tailNum, tiling_data.tailLength,                           \
              tiling_data.tailTileLength, tiling_data.tailFinalCalcCount);
#define CRATE_COMMON_TILING_SIMPLE CRATE_COMMON_TILING(commonTiling)

#define CEIL_DIV(a, b) ((a + b - 1) / b)
#define ALIGN_TO(a, b) CEIL_DIV(a, b) * b
#define MAX(a, b) (a < b ? b : a)
#define MIN(a, b) (a > b ? b : a)

namespace AscendC
{
class CommonTiling
{
  public:
    uint64_t loopCount = 0;
    uint64_t bufferSize = 0;
    uint64_t finalCalcCount = 0;
    uint64_t startIdx = 0;
    uint64_t blockLength = 0;
    bool is_final = false;
    __aicore__ inline CommonTiling()
    {
    }
    __aicore__ inline void Init(uint64_t formerNum, uint64_t formerLength, uint64_t formerTileLength,
                                uint64_t formerFinalCalcCount, uint64_t tailNum, uint64_t tailLength,
                                uint64_t tailTileLength, uint64_t tailFinalCalcCount)
    {
        // 初始化配置信息
        uint64_t blockidx = GetBlockIdx();
        this->is_final = blockidx == (formerNum + tailNum) - 1;
        if (blockidx < formerNum)
        {
            this->blockLength = formerLength;
            this->bufferSize = formerTileLength;
            this->finalCalcCount = formerFinalCalcCount;
        }
        else
        {
            this->blockLength = tailLength;
            this->bufferSize = tailTileLength;
            this->finalCalcCount = tailFinalCalcCount;
        }
        this->loopCount = CEIL_DIV(blockLength, bufferSize);
        this->startIdx = blockidx * formerLength;
        if (blockidx >= formerNum)
        {
            this->startIdx = formerLength * formerNum + (blockidx - formerNum) * tailLength;
        }
    }
};


// 先定义一个向上取整函数
__aicore__ inline int64_t RoundUp(int64_t a, int64_t b)
{
    return (a + b - 1) / b;
}

class NdTensorSortByDimHelper
{
  public:
    GM_ADDR data_addr = nullptr;
    int64_t sortByDimPreSize = 0;
    int64_t sortByDimDimSize = 0;
    int64_t sortByDimDimLength = 0;
    int64_t sortByDimDimBlockLength = 0;
    int64_t startIdx = 0;
    int64_t length = 0;
    int64_t cur_k = 0;
    int64_t cur_i = 0;
    int64_t cur_j = 0;

  public:
    __aicore__ inline NdTensorSortByDimHelper()
    {
        data_addr = nullptr;
        sortByDimPreSize = 0;
        sortByDimDimSize = 0;
        sortByDimDimLength = 0;
        sortByDimDimBlockLength = 0;
        startIdx = 0;
        length = 0;
    }

    __aicore__ inline void reset()
    {
        cur_k = 0;
        cur_i = 0;
        cur_j = 0;
    }

    template <typename _DT_DATA> __aicore__ inline void read_to_tensor(LocalTensor<_DT_DATA> &tensor, int64_t count)
    {
        if (cur_j >= sortByDimDimSize)
        {
            cur_j = 0;
            ++cur_i;
        }
        if (cur_i >= sortByDimDimLength)
        {
            cur_i = 0;
            ++cur_k;
        }
        if (cur_k >= sortByDimPreSize)
        {
            return;
        }
        auto data_casted_ptr = CAST_GM_ADDR(_DT_DATA, data_addr, 0);
        int64_t k_length = sortByDimDimSize * sortByDimDimLength;
        int64_t counter = 0;
        for (; cur_k < sortByDimPreSize; ++cur_k)
        {
            for (; cur_i < sortByDimDimLength; ++cur_i)
            {
                if (cur_k * cur_i < startIdx)
                {
                    continue;
                }

                if (cur_k * cur_i >= startIdx + length)
                {
                    break;
                }

                for (; cur_j < sortByDimDimSize; ++cur_j)
                {
                    auto value = data_casted_ptr[cur_k * k_length + cur_j * sortByDimDimLength + cur_i];
                    tensor.SetValue(counter++, value);
                    if (counter >= count)
                    {
                        ++cur_j;
                        return;
                    }
                }
                cur_j = 0;
            }
            cur_i = 0;
        }
    }
};

//  a ^ x = e ^ (x * lna)
template <typename T> __aicore__ void MyPow(LocalTensor<T> dst, LocalTensor<T> a, T x, uint32_t calcCount)
{
    Ln(dst, a, calcCount);
    Muls(a, dst, x, calcCount);
    Exp(dst, a, calcCount);
}

template <typename _DT_X>
__aicore__ void polevl(LocalTensor<_DT_X> x, LocalTensor<_DT_X> ans, const float coef[], int N, int32_t calcCount)
{
    const float *p = coef;
    int i = N;
    // ans = *p++;
    Muls(ans, ans, _DT_X(0), calcCount);
    Adds(ans, ans, _DT_X(*p++), calcCount);
    do
    {
        // ans = ans * x + *p++;
        Mul(ans, ans, x, calcCount);
        Adds(ans, ans, _DT_X(*p++), calcCount);
    } while (--i);
}

template <typename _DT_T> class CompareHelper
{
  public:
    CommonTiling tilingData;
    DefBufVECCALC(CMP_RET);
    DefBufVECCALC(TEMP_RET);
    LocalTensor<_DT_T> target;
    LocalTensor<_DT_T> then_tensor;
    _DT_T else_scalar = _DT_T(0);
    LocalTensor<_DT_T> reverse_tensor_tensor;
    _DT_T reverse_else_scalar = _DT_T(0);
    uint32_t calcCount = 0;
    bool has_reverse = false;
    LocalTensor<uint8_t> *out_ret = nullptr;
    __aicore__ inline CompareHelper()
    {
    }

    __aicore__ inline void Init(CommonTiling &tiling, TPipe &pipe)
    {
        tilingData = tiling;
        InitTBufBuffer(CMP_RET, tilingData.bufferSize * sizeof(_DT_T));
        InitTBufBuffer(TEMP_RET, tilingData.bufferSize * sizeof(_DT_T));
    }

    __aicore__ inline void SetCalcCount(uint32_t calcCount)
    {
        this->calcCount = calcCount;
    }

    __aicore__ inline auto GetRetUint8()
    {
        if (out_ret)
            return out_ret->ReinterpretCast<uint8_t>();
        return TBufName(CMP_RET).Get<uint8_t>();
    }

    __aicore__ inline auto GetRetUint16()
    {
        if (out_ret)
            return out_ret->ReinterpretCast<uint16_t>();
        return TBufName(CMP_RET).Get<uint16_t>();
    }

    __aicore__ inline auto SetRet()
    {
        return TBufName(CMP_RET).Get<uint16_t>();
    }

    __aicore__ inline auto Begin(LocalTensor<_DT_T> &taret_tensor, LocalTensor<uint8_t> *outRet = nullptr)
    {
        target = taret_tensor;
        else_scalar = _DT_T(0);
        reverse_else_scalar = _DT_T(0);
        has_reverse = false;
        out_ret = outRet;
    }

    __aicore__ inline void Then(LocalTensor<_DT_T> &target_value)
    {
        then_tensor = target_value;
    }

    __aicore__ inline void ElseScalar(_DT_T target_value)
    {
        else_scalar = target_value;
    }

    __aicore__ inline void Reverse(LocalTensor<_DT_T> &target_value)
    {
        reverse_tensor_tensor = target_value;
        has_reverse = true;
    }

    __aicore__ inline void ReverseElseScalar(_DT_T target_value)
    {
        reverse_else_scalar = target_value;
    }

    __aicore__ inline void Or2Ret(LocalTensor<uint16_t> &target_value)
    {
        Or(GetRetUint16(), GetRetUint16(), target_value, ALIGN_TO(calcCount / 2, 2));
    }

    __aicore__ inline void And2Ret(LocalTensor<uint16_t> &target_value)
    {
        And(GetRetUint16(), GetRetUint16(), target_value, ALIGN_TO(calcCount / 2, 2));
    }

    __aicore__ inline void Or2Out(LocalTensor<uint16_t> &target_value)
    {
        Or(target_value, target_value, GetRetUint16(), ALIGN_TO(calcCount / 2, 2));
    }

    __aicore__ inline void And2Out(LocalTensor<uint16_t> &target_value)
    {
        And(target_value, target_value, GetRetUint16(), ALIGN_TO(calcCount / 2, 2));
    }

    __aicore__ inline void Copy2Out(LocalTensor<uint16_t> &target_value)
    {
        DataCopy(target_value, GetRetUint16(), ALIGN_TO(calcCount / 2, 2));
    }

    __aicore__ inline auto End()
    {
        TBufGet(CMP_RET, uint8_t);
        auto target_ret = out_ret ? *out_ret : TBufTensorName(CMP_RET);
        Select(target, target_ret, then_tensor, else_scalar, SELMODE::VSEL_TENSOR_SCALAR_MODE, calcCount);
        if (has_reverse)
        {
            TBufGet(TEMP_RET, _DT_T);
            Not(TBufTensorName(CMP_RET), target_ret, calcCount);
            Select(TBufTensorName(TEMP_RET), TBufTensorName(CMP_RET), reverse_tensor_tensor, reverse_else_scalar,
                   SELMODE::VSEL_TENSOR_SCALAR_MODE, calcCount);
            Add(target, target, TBufTensorName(TEMP_RET), calcCount);
        }
    }

    __aicore__ inline auto UserRetSelect(LocalTensor<_DT_T> &dst, LocalTensor<_DT_T> &src0, _DT_T src1 = _DT_T(0))
    {
        TBufGet(CMP_RET, uint8_t);
        auto target_ret = out_ret ? *out_ret : TBufTensorName(CMP_RET);
        Select(dst, target_ret, src0, src1, SELMODE::VSEL_TENSOR_SCALAR_MODE, calcCount);
    }

    __aicore__ inline void IfLessTan(LocalTensor<_DT_T> &flag_value, LocalTensor<_DT_T> &target_value)
    {
        TBufGet(CMP_RET, uint8_t);
        Compare(TBufTensorName(CMP_RET), flag_value, target_value, CMPMODE::LT, calcCount);
    }

    __aicore__ inline void IfGraterThan(LocalTensor<_DT_T> &flag_value, LocalTensor<_DT_T> &target_value)
    {
        TBufGet(CMP_RET, uint8_t);
        Compare(TBufTensorName(CMP_RET), flag_value, target_value, CMPMODE::GT, calcCount);
    }

    __aicore__ inline void IfGraterEqual(LocalTensor<_DT_T> &flag_value, LocalTensor<_DT_T> &target_value)
    {
        TBufGet(CMP_RET, uint8_t);
        Compare(TBufTensorName(CMP_RET), flag_value, target_value, CMPMODE::GE, calcCount);
    }

    __aicore__ inline void IfEqual(LocalTensor<_DT_T> &flag_value, LocalTensor<_DT_T> &target_value)
    {
        TBufGet(CMP_RET, uint8_t);
        Compare(TBufTensorName(CMP_RET), flag_value, target_value, CMPMODE::EQ, calcCount);
    }

    __aicore__ inline void IfNotEqual(LocalTensor<_DT_T> &flag_value, LocalTensor<_DT_T> &target_value)
    {
        TBufGet(CMP_RET, uint8_t);
        Compare(TBufTensorName(CMP_RET), flag_value, target_value, CMPMODE::NE, calcCount);
    }

    __aicore__ inline void IfLessEqual(LocalTensor<_DT_T> &flag_value, LocalTensor<_DT_T> &target_value)
    {
        TBufGet(CMP_RET, uint8_t);
        Compare(TBufTensorName(CMP_RET), flag_value, target_value, CMPMODE::LE, calcCount);
    }
};

} // namespace AscendC

#endif