#include "common.h"
#include "kernel_operator.h"
#include <cstdint>

namespace AscendC
{
template <typename _DT_INPUT_DATA, typename _DT_X1, typename _DT_X2, typename _DT_VALUE, typename _DT_Y>
class KernelAddcmul
{
  public:
    TPipe pipe;
    CommonTiling tilingData;
    DefInTensor(INPUT_DATA);
    DefInTensor(X1);
    DefInTensor(X2);
    DefOutTensor(Y);

    TypeOf(VALUE) value;
    DefGlobalTensor(VALUE);

    DefBufVECIN(VALUE);

    __aicore__ inline KernelAddcmul()
    {
    }
    __aicore__ inline void Init(GM_ADDR input_data, GM_ADDR x1, GM_ADDR x2, GM_ADDR value, GM_ADDR y,
                                CommonTiling &tiling)
    {
        // 初始化配置信息
        tilingData = tiling;

        // 切分global
        SetGlobalBuffer(INPUT_DATA, input_data, tilingData.startIdx, tilingData.blockLength);
        SetGlobalBuffer(X1, x1, tilingData.startIdx, tilingData.blockLength);
        SetGlobalBuffer(X2, x2, tilingData.startIdx, tilingData.blockLength);
        SetGlobalBuffer(VALUE, value, 0, 32);
        SetGlobalBuffer(Y, y, tilingData.startIdx, tilingData.blockLength);

        // 初始化队列
        InitQueueSimple(INPUT_DATA, tilingData.bufferSize);
        InitQueueSimple(X1, tilingData.bufferSize);
        InitQueueSimple(X2, tilingData.bufferSize);
        InitQueueSimple(Y, tilingData.bufferSize);

        // 初始化value
        InitTBufBuffer(VALUE, 32);
        TBufGet(VALUE, TypeOf(VALUE));
        DataCopy(TBufTensorName(VALUE), GTensorName(VALUE), 32);
        this->value = TBufTensorName(VALUE).GetValue(0);
    }

    __aicore__ inline void Process()
    {
        auto finnal_index = tilingData.loopCount - 1;
        for (uint32_t i = 0; i < tilingData.loopCount - 1; ++i)
        {
            CopyIn(i, tilingData.bufferSize);
            Compute(i, tilingData.bufferSize);
            CopyOut(i, tilingData.bufferSize);
        }
        auto finalCopyCount = tilingData.getFinalCopyCount<TypeOf(INPUT_DATA)>();
        CopyIn(finnal_index, finalCopyCount);
        Compute(finnal_index, tilingData.finalCalcCount);
        CopyOut(finnal_index, finalCopyCount);
    }

    __aicore__ inline int64_t GetStartIndex(uint32_t progress)
    {
        auto index = progress * tilingData.bufferSize;
        return index;
    }

    __aicore__ inline void CopyIn(uint32_t progress, uint32_t calcCount)
    {
        auto index = GetStartIndex(progress);
        EnQueGlobal2Local(INPUT_DATA, index, calcCount);
        EnQueGlobal2Local(X1, index, calcCount);
        EnQueGlobal2Local(X2, index, calcCount);
    }

    // Y = INPUT_DATA + VALUE * X1 * X2
    __aicore__ inline void Compute(uint32_t progress, uint32_t calcCount)
    {
        DeQueSimple(INPUT_DATA);
        DeQueSimple(X1);
        DeQueSimple(X2);
        QueAllocSimple(Y);
        Mul(LTensorName(Y), LTensorName(X1), LTensorName(X2), calcCount);
        Muls(LTensorName(Y), LTensorName(Y), this->value, calcCount);
        Add(LTensorName(Y), LTensorName(Y), LTensorName(INPUT_DATA), calcCount);
        EnQue(Y);
        QueFree(INPUT_DATA);
        QueFree(X1);
        QueFree(X2);
    }

    __aicore__ inline void CopyOut(uint32_t progress, uint32_t calcCount)
    {
        auto index = GetStartIndex(progress);
        DeQueLocal2Global(Y, index, calcCount)
    }
};

template <typename _DT_INPUT_DATA = int8_t, typename _DT_X1 = int8_t, typename _DT_X2 = int8_t,
          typename _DT_VALUE = int8_t, typename _DT_Y = int8_t>
class KernelAddcmulInt8
{
  public:
    TPipe pipe;
    CommonTiling tilingData;

    DefInTensor(INPUT_DATA);
    DefInTensor(X1);
    DefInTensor(X2);
    DefOutTensor(Y);

    int16_t value;
    DefGlobalTensor(VALUE);
    DefBufVECIN(VALUE);
    DefBufVECCALC(TEMP_HALF);
    DefBufVECCALC(TEMP_INT16_1);
    DefBufVECCALC(TEMP_INT16_2);

    __aicore__ inline KernelAddcmulInt8()
    {
    }
    __aicore__ inline void Init(GM_ADDR input_data, GM_ADDR x1, GM_ADDR x2, GM_ADDR value, GM_ADDR y,
                                CommonTiling &tiling)
    {
        tilingData = tiling;

        // 切分global
        SetGlobalBuffer(INPUT_DATA, input_data, tilingData.startIdx, tilingData.blockLength);
        SetGlobalBuffer(X1, x1, tilingData.startIdx, tilingData.blockLength);
        SetGlobalBuffer(X2, x2, tilingData.startIdx, tilingData.blockLength);
        SetGlobalBuffer(VALUE, value, tilingData.startIdx, tilingData.blockLength);
        SetGlobalBuffer(Y, y, tilingData.startIdx, tilingData.blockLength);

        // 初始化队列
        InitQueueSimple(INPUT_DATA, tilingData.bufferSize);
        InitQueueSimple(X1, tilingData.bufferSize);
        InitQueueSimple(X2, tilingData.bufferSize);
        InitQueueSimple(Y, tilingData.bufferSize);

        InitTBufBuffer(TEMP_HALF, tilingData.bufferSize * sizeof(half) * 2);
        InitTBufBuffer(TEMP_INT16_1, tilingData.bufferSize * sizeof(int16_t));
        InitTBufBuffer(TEMP_INT16_2, tilingData.bufferSize * sizeof(int16_t));

        // 初始化VALUE
        InitTBufBuffer(VALUE, 32);
        TBufGet(VALUE, TypeOf(VALUE));
        DataCopy(TBufTensorName(VALUE), GTensorName(VALUE), 32);
        this->value = TBufTensorName(VALUE).GetValue(0);
    }

    __aicore__ inline void Process()
    {
        auto finnal_index = tilingData.loopCount - 1;
        for (uint32_t i = 0; i < tilingData.loopCount - 1; ++i)
        {
            CopyIn(i, tilingData.bufferSize);
            Compute(i, tilingData.bufferSize);
            CopyOut(i, tilingData.bufferSize);
        }
        auto finalCopyCount = tilingData.getFinalCopyCount<TypeOf(INPUT_DATA)>();
        CopyIn(finnal_index, finalCopyCount);
        Compute(finnal_index, tilingData.finalCalcCount);
        CopyOut(finnal_index, finalCopyCount);
    }

    __aicore__ inline int64_t GetStartIndex(uint32_t progress)
    {
        auto index = progress * tilingData.bufferSize;
        return index;
    }

    __aicore__ inline void CopyIn(uint32_t progress, uint32_t calcCount)
    {
        auto index = GetStartIndex(progress);
        EnQueGlobal2Local(INPUT_DATA, index, calcCount);
        EnQueGlobal2Local(X1, index, calcCount);
        EnQueGlobal2Local(X2, index, calcCount);
    }

    // Y = INPUT_DATA + VALUE * X1 * X2
    __aicore__ inline void Compute(uint32_t progress, uint32_t calcCount)
    {
        DeQueSimple(INPUT_DATA);
        DeQueSimple(X1);
        DeQueSimple(X2);
        QueAllocSimple(Y);

        TBufGet(TEMP_HALF, half);
        TBufGet(TEMP_INT16_1, int16_t);
        TBufGet(TEMP_INT16_2, int16_t);

        // temp1 = x1
        Cast(TBufTensorName(TEMP_HALF), LTensorName(X1), RoundMode::CAST_NONE, calcCount);
        Cast(TBufTensorName(TEMP_INT16_1), TBufTensorName(TEMP_HALF), RoundMode::CAST_TRUNC, calcCount);

        // temp2 = x2
        Cast(TBufTensorName(TEMP_HALF), LTensorName(X2), RoundMode::CAST_NONE, calcCount);
        Cast(TBufTensorName(TEMP_INT16_2), TBufTensorName(TEMP_HALF), RoundMode::CAST_TRUNC, calcCount);

        // temp1 = temp1 * temp2
        Mul(TBufTensorName(TEMP_INT16_1), TBufTensorName(TEMP_INT16_1), TBufTensorName(TEMP_INT16_2), calcCount);

        // // temp1 = temp1 * value
        Muls(TBufTensorName(TEMP_INT16_1), TBufTensorName(TEMP_INT16_1), this->value, calcCount);

        // temp2 = input_data
        Cast(TBufTensorName(TEMP_HALF), LTensorName(INPUT_DATA), RoundMode::CAST_NONE, calcCount);
        Cast(TBufTensorName(TEMP_INT16_2), TBufTensorName(TEMP_HALF), RoundMode::CAST_TRUNC, calcCount);

        // temp1 = temp1 + temp2
        Add(TBufTensorName(TEMP_INT16_1), TBufTensorName(TEMP_INT16_1), TBufTensorName(TEMP_INT16_2), calcCount);

        // 转换为int16
        auto int8_ret = TBufTensorName(TEMP_INT16_1).ReinterpretCast<int8_t>();
        Cast(TBufTensorName(TEMP_HALF), int8_ret, RoundMode::CAST_NONE, calcCount * 2);

        // 取偶数位
        uint64_t rsvdCnt = 0;
        // 内置固定模式 1
        // 计算repeatCount
        uint64_t maxRepeatCount = 0xFFFF;
        uint64_t oneRepeatDataCount = 8 * 32 / sizeof(half);
        uint64_t needRepeatCount = (calcCount * 2 + oneRepeatDataCount - 1) / oneRepeatDataCount;
        uint64_t needMaxRepeat = (needRepeatCount + maxRepeatCount - 1) / maxRepeatCount;
        auto half_temp2 = TBufTensorName(TEMP_INT16_2).ReinterpretCast<half>();
        for (auto i = 0; i < needMaxRepeat; ++i)
        {
            auto repeatTimes = maxRepeatCount;
            auto index = i * maxRepeatCount * oneRepeatDataCount;
            if (i == needMaxRepeat - 1)
            {
                repeatTimes = needRepeatCount % maxRepeatCount;
            }
            GatherMask(half_temp2[index / 2], TBufTensorName(TEMP_HALF)[index], 1, false, 0,
                       {1, uint16_t(repeatTimes), 8, 0}, rsvdCnt);
        }

        Cast(LTensorName(Y), half_temp2, RoundMode::CAST_TRUNC, calcCount);
        // 构建循环参数
        EnQue(Y);
        QueFree(INPUT_DATA);
        QueFree(X1);
        QueFree(X2);
    }

    __aicore__ inline void CopyOut(uint32_t progress, uint32_t calcCount)
    {
        auto index = GetStartIndex(progress);
        DeQueLocal2Global(Y, index, calcCount)
    }
};
} // namespace AscendC

extern "C" __global__ __aicore__ void addcmul(GM_ADDR input_data, GM_ADDR x1, GM_ADDR x2, GM_ADDR value, GM_ADDR y,
                                              GM_ADDR workspace, GM_ADDR tiling)
{
    GET_TILING_DATA(tiling_data, tiling);
    CRATE_COMMON_TILING_SIMPLE;
    switch (tiling_data.dType)
    {
    case 0: {
        AscendC::KernelAddcmul<float, float, float, float, float> op;
        op.Init(input_data, x1, x2, value, y, commonTiling);
        op.Process();
    }
    break;
    case 1: {
        AscendC::KernelAddcmul<half, half, half, half, half> op;
        op.Init(input_data, x1, x2, value, y, commonTiling);
        op.Process();
    }
    break;
    case 2: {
        AscendC::KernelAddcmulInt8 op;
        op.Init(input_data, x1, x2, value, y, commonTiling);
        op.Process();
    }
    break;
    case 3: {
        AscendC::KernelAddcmul<int32_t, int32_t, int32_t, int32_t, int32_t> op;
        op.Init(input_data, x1, x2, value, y, commonTiling);
        op.Process();
    }
    break;
    }
}