/**
 * This program is free software, you can redistribute it and/or modify it.
 * Copyright (c) 2025 Huawei Technologies Co., Ltd.
 * This file is a part of the CANN Open Software.
 * Licensed under CANN Open Software License Agreement Version 2.0 (the "License").
 * Please refer to the License for details. You may not use this file except in compliance with the License.
 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING
 * BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
 * See LICENSE in the root of the software repository for the full text of the License.
 */

/*!
 * \file reduce_log_sum_exp.h
 * \brief
*/


#ifndef REDUCE_LOG_SUM_EXP_H
#define REDUCE_LOG_SUM_EXP_H

#include "kernel_operator.h"
#include "kernel_tiling/kernel_tiling.h"
#include "reduce_log_sum_exp_tiling_data.h"
#include "reduce_log_sum_exp_tiling_key.h"

namespace MyReduceLogSumExp{

using namespace AscendC;
using namespace std;

constexpr int32_t BUFFER_NUM = 2;
constexpr size_t MAX_DIM_NUM = 20;
    
template <typename TYPE_X, typename TYPE_Y>
class KernelReduceLogSumExp {
public:
    __aicore__ inline KernelReduceLogSumExp(){};
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR y, ReduceLogSumExpTilingData& tiling);
    __aicore__ inline void Process();

private:
    __aicore__ inline void CopyIn(uint32_t Offset);
    __aicore__ inline void CopyOut(uint32_t Offset);
    __aicore__ inline void Compute();
    __aicore__ inline void CalOffset(uint32_t i, uint32_t k, uint32_t &Offset);

private:
    TPipe pipe;
    
    TQue<QuePosition::VECIN, BUFFER_NUM> inQueueX;
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueY;
    
    TBuf<QuePosition::VECCALC> tmpFloatBuf;
    TBuf<QuePosition::VECCALC> maskBuf;
    TBuf<QuePosition::VECCALC> workBuf;
    TBuf<QuePosition::VECCALC> sumBuf;
    
    GlobalTensor<TYPE_X> xGm;
    GlobalTensor<TYPE_Y> yGm;    
    LocalTensor<float> sum; 
    
    uint32_t cycleDataNum;
    uint32_t cycleNum;
    uint32_t coreDataNum;
    uint32_t tileDataNum;
    uint32_t tileNum;
    uint32_t tailDataNum;
    uint32_t axesNum;
    uint32_t shapeDimNum;
    uint32_t inputNum;
    uint32_t processDataNum;
    uint32_t globalBufferIndex; 
    
    int32_t axes[MAX_DIM_NUM];
    int32_t shapeNum[MAX_DIM_NUM];
    int32_t reduceMask[MAX_DIM_NUM];
    int32_t strides[MAX_DIM_NUM];
};

template <typename TYPE_X, typename TYPE_Y>
__aicore__ inline void KernelReduceLogSumExp<TYPE_X, TYPE_Y>::Init(GM_ADDR x, GM_ADDR y, ReduceLogSumExpTilingData& tiling)
{
    ASSERT(GetBlockNum() != 0 && "block dim can not be zero!");
    uint32_t coreId = GetBlockIdx();
    globalBufferIndex = (tiling.coreDataNum + 1) * coreId;
    if (coreId < tiling.tailCoreNum) {
        this->coreDataNum = tiling.coreDataNum + 1;
    } else {
        this->coreDataNum = tiling.coreDataNum;
        globalBufferIndex -= (coreId - tiling.tailCoreNum);
    }
    this->tileDataNum = tiling.tileDataNum;
    this->tileNum = tiling.tileNum;
    this->tailDataNum = tiling.tailDataNum;
    this->cycleDataNum = tiling.cycleDataNum;
    this->cycleNum = tiling.cycleNum;
    this->inputNum = tiling.inputNum;
    this->shapeDimNum = tiling.shapeDimNum;
    this->axesNum = tiling.axesNum;   
    for (int32_t i = 0; i < MAX_DIM_NUM; i++) {
        this->shapeNum[i] = tiling.shapeNum[i];
        this->strides[i] = tiling.strides[i];
        this->reduceMask[i] = tiling.reduceMask[i];
        this->axes[i] = tiling.axes[i];
    }
    xGm.SetGlobalBuffer((__gm__ TYPE_X*)x, this->inputNum);
    yGm.SetGlobalBuffer((__gm__ TYPE_Y*)y + globalBufferIndex, this->coreDataNum);
    pipe.InitBuffer(inQueueX, BUFFER_NUM, this->tileDataNum * sizeof(TYPE_X));
    pipe.InitBuffer(outQueueY, BUFFER_NUM, this->tileDataNum * sizeof(TYPE_Y));
    pipe.InitBuffer(tmpFloatBuf, this->tileDataNum * sizeof(float));
    pipe.InitBuffer(sumBuf, 32);
    pipe.InitBuffer(maskBuf, 256);
    pipe.InitBuffer(workBuf, this->tileDataNum * sizeof(float));
}

template <typename TYPE_X, typename TYPE_Y>
__aicore__ inline void KernelReduceLogSumExp<TYPE_X, TYPE_Y>::CopyIn(uint32_t Offset)
{
    LocalTensor<TYPE_X> xLocal = inQueueX.AllocTensor<TYPE_X>();
    uint32_t blockLen = sizeof(TYPE_X);
    uint32_t srcStride = (strides[axes[axesNum - 1]] - 1) * (uint32_t)sizeof(TYPE_X);
    DataCopyExtParams copyParams = {(uint16_t)processDataNum, blockLen, srcStride, 0, 0};
    DataCopyPadExtParams<TYPE_X> padParams = {false, 0, 0, 0};
    DataCopyPad(xLocal, xGm[Offset], copyParams, padParams);
    inQueueX.EnQue(xLocal);  
}

template <typename TYPE_X, typename TYPE_Y>
__aicore__ inline void KernelReduceLogSumExp<TYPE_X, TYPE_Y>::CopyOut(uint32_t Offset)
{
    LocalTensor<TYPE_Y> yLocal = outQueueY.DeQue<TYPE_Y>();
    DataCopyExtParams copyParams = {(uint16_t)1, sizeof(TYPE_Y), 0, 0, 0};
    DataCopyPad(yGm[Offset], yLocal, copyParams);
    outQueueY.FreeTensor(yLocal);
}

template <typename TYPE_X, typename TYPE_Y>
__aicore__ inline void KernelReduceLogSumExp<TYPE_X, TYPE_Y>::Compute()
{
    LocalTensor<TYPE_X> indices = inQueueX.DeQue<TYPE_X>();
    LocalTensor<float> work = workBuf.Get<float>();
    LocalTensor<float> tmpFloat = tmpFloatBuf.Get<float>();
    uint64_t rsvdCnt = 0;
    if constexpr (std::is_same_v<TYPE_X, half>) {
        LocalTensor<uint16_t> mask = maskBuf.Get<uint16_t>();
        Duplicate<uint16_t>(mask, 0x8000u, 128);
        GatherMaskParams parms = {1, (uint16_t)processDataNum, 8, 0};
        GatherMask(indices, indices, mask, false, 0, parms, rsvdCnt);
        Cast(tmpFloat, indices, RoundMode::CAST_NONE, processDataNum);
        Exp(tmpFloat, tmpFloat, processDataNum);
        ReduceSum(tmpFloat, tmpFloat, work, processDataNum);
        Add(sum, sum, tmpFloat, 1);
    }
    else {
        LocalTensor<uint32_t> mask = maskBuf.Get<uint32_t>();
        Duplicate<uint32_t>(mask, 0x80808080u, 64);
        GatherMaskParams parms = {1, (uint16_t)processDataNum, 8, 0};
        GatherMask(indices, indices, mask, false, 0, parms, rsvdCnt);
        Exp(tmpFloat, indices, processDataNum);
        ReduceSum(tmpFloat, tmpFloat, work, processDataNum);
        Add(sum, sum, tmpFloat, 1);
    }
    inQueueX.FreeTensor(indices);
}

template <typename TYPE_X, typename TYPE_Y>
__aicore__ inline void KernelReduceLogSumExp<TYPE_X, TYPE_Y>::CalOffset(uint32_t i, uint32_t k, uint32_t &Offset) 
{
    int32_t shapeIdx[MAX_DIM_NUM] = {0};
    Offset = 0;
    uint32_t tmp = globalBufferIndex + k;
    for (int j = shapeDimNum - 1; j >= 0; j--) {
        if (reduceMask[j]) {
            continue;
        }
        shapeIdx[j] = tmp % shapeNum[j];
        tmp /= shapeNum[j];
    }
    tmp = i * cycleDataNum;
    for (int j = shapeDimNum - 1; j >= 0; j--) {
        if (reduceMask[j]) {
            shapeIdx[j] = tmp % shapeNum[j];
            tmp /= shapeNum[j];
        }
    }
    for (int j = 0; j < shapeDimNum; j++) {
        Offset += shapeIdx[j] * strides[j];
    }
}

template <typename TYPE_X, typename TYPE_Y>
__aicore__ inline void KernelReduceLogSumExp<TYPE_X, TYPE_Y>::Process()
{
    sum = sumBuf.Get<float>();
    for (int k = 0; k < coreDataNum; k++) {
        Duplicate(sum, 0.0f, 1);
        uint32_t CopyInOffset = 0;
        for (int i = 0; i < cycleNum; i++) {
            CalOffset(i, k, CopyInOffset);
            // compute
            this->processDataNum = this->tileDataNum;
            for (int32_t j = 0; j < this->tileNum - 1; j++) {
                CopyIn(CopyInOffset);
                Compute();
                CopyInOffset += this->tileDataNum;
            }
            this->processDataNum = this->tailDataNum;
            CopyIn(CopyInOffset);
            Compute();
        }
        LocalTensor<TYPE_Y> outputIndices = outQueueY.AllocTensor<TYPE_Y>();
        Ln(sum, sum, 1);
        if constexpr (std::is_same_v<TYPE_X, half>) {
            Cast(outputIndices, sum, RoundMode::CAST_ROUND, 1);
        } else {
            Adds(outputIndices, sum, 0.0f, 1);
        }
        outQueueY.EnQue(outputIndices);
        CopyOut(k);
    }
}

} // namespace MyReduceLogSumExp

#endif // REDUCE_LOG_SUM_EXP_H