/*
 * Copyright (c) 2025 Huawei Technologies Co., Ltd.
 * This file is a part of the CANN Open Software.
 * Licensed under CANN Open Software License Agreement Version 1.0 (the "License").
 * Please refer to the License for details. You may not use this file except in compliance with the License.
 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED,
 * INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
 * See LICENSE in the root of the software repository for the full text of the License.
 */

#ifndef EXAMPLES_MATRIX_MATMUL_CALLBACK_CUSTOM_TILING_H
#define EXAMPLES_MATRIX_MATMUL_CALLBACK_CUSTOM_TILING_H
#include "kernel_operator.h"
#define ASCENDC_CUBE_ONLY
#include "lib/matmul_intf.h"

namespace CustomMatmulCallback {
    const uint64_t BLOCK_CUBE = 16UL;
    const uint64_t OFFSET_LIST_SIZE = 50;
    const uint64_t NUMBER_OF_CORES_IN_OFFSET_LIST = 2;
    const uint64_t THIRD_INDEX_OF_COL = 2;
}

namespace CustomMatmulCallback {
template <typename AType, typename BType, typename CType, typename BiasType>
class MatmulCallbackKernel {
public:
    __aicore__ inline MatmulCallbackKernel(){};
    __aicore__ inline void Init(GM_ADDR a, GM_ADDR b, GM_ADDR bias, GM_ADDR c, GM_ADDR offsetList, GM_ADDR workspace,
        const TCubeTiling& tiling);
    __aicore__ inline void Process(AscendC::TPipe* pipe);
    static __aicore__ inline void CustomDataCopyInA(const AscendC::LocalTensor<int8_t> &aMatrix,
    const __gm__ void *gm, int row, int col, int useM, int useK, const uint64_t tilingPtr,
    const uint64_t dataPtr);
private:
    __aicore__ inline void CalcOffset(TCubeTiling& param, int32_t& offsetA, int32_t& offsetB,
    int32_t& offsetC, int32_t& offsetBias);

    AscendC::GlobalTensor<AType> aGlobal;
    AscendC::GlobalTensor<BType> bGlobal;
    AscendC::GlobalTensor<CType> cGlobal;
    AscendC::GlobalTensor<BiasType> biasGlobal;
    AscendC::GlobalTensor<uint64_t> offsetListGlobal;
    TCubeTiling tiling;
};

template <typename AType, typename BType, typename CType, typename BiasType>
__aicore__ inline void MatmulCallbackKernel<AType, BType, CType, BiasType>::Init(GM_ADDR a,
        GM_ADDR b, GM_ADDR bias, GM_ADDR c, GM_ADDR offsetList, GM_ADDR workspace, const TCubeTiling& tiling)
{
    this->tiling = tiling;
    aGlobal.SetGlobalBuffer(reinterpret_cast<__gm__ AType*>(a), tiling.M * tiling.Ka);
    bGlobal.SetGlobalBuffer(reinterpret_cast<__gm__ BType*>(b), tiling.Kb * tiling.N);
    cGlobal.SetGlobalBuffer(reinterpret_cast<__gm__ CType*>(c), tiling.M * tiling.N);
    biasGlobal.SetGlobalBuffer(reinterpret_cast<__gm__ BiasType*>(bias), tiling.N);
    offsetListGlobal.SetGlobalBuffer(reinterpret_cast<__gm__ uint64_t*>(offsetList), OFFSET_LIST_SIZE);

    int32_t offsetA = 0;
    int32_t offsetB = 0;
    int32_t offsetC = 0;
    int32_t offsetBias = 0;
    CalcOffset(this->tiling, offsetA, offsetB, offsetC, offsetBias);
    aGlobal = aGlobal[offsetA];
    bGlobal = bGlobal[offsetB];
    cGlobal = cGlobal[offsetC];
    biasGlobal = biasGlobal[offsetBias];
    if(GetSysWorkSpacePtr() == nullptr){
        return;
    }
}

template<typename AType, typename BType, typename CType, typename BiasType>
__aicore__ inline void MatmulCallbackKernel<AType, BType, CType, BiasType>::CustomDataCopyInA(
    const AscendC::LocalTensor<int8_t> &aMatrix, const __gm__ void *gm, int row, int col, int useM,
    int useK, const uint64_t tilingPtr, const uint64_t dataPtr){
        int tmpRow = row * BLOCK_CUBE;
        int tmpCol = col * BLOCK_CUBE;
        int gCol = tilingPtr;
        int height = useM;
        int width = useK;
        uint64_t curSrcAddr;
        uint64_t srcOffset;

        AscendC::LocalTensor<half> dst = aMatrix.template ReinterpretCast<half>();
        AscendC::GlobalTensor<uint64_t> dataGM;
        dataGM.SetGlobalBuffer(reinterpret_cast<__gm__ uint64_t*>(dataPtr), OFFSET_LIST_SIZE);

        int curBlock = AscendC::GetBlockIdx();
        int curSrcAddrIdx = curBlock * NUMBER_OF_CORES_IN_OFFSET_LIST + 1;
        if (col < THIRD_INDEX_OF_COL){
            curSrcAddr = dataGM.GetValue(curSrcAddrIdx);
        } else {
            curSrcAddr = dataGM.GetValue(curSrcAddrIdx + 1);
        }
        if (col == 0 || col == THIRD_INDEX_OF_COL){
            srcOffset = 0;
        } else {
            srcOffset = width;
        }

        AscendC::GlobalTensor<half> src;
        src.SetGlobalBuffer(reinterpret_cast<__gm__ half*>(curSrcAddr));

        AscendC::Nd2NzParams nd2nzParams;
        nd2nzParams.ndNum = 1;
        nd2nzParams.nValue = height;
        nd2nzParams.dValue = width;
        nd2nzParams.srcNdMatrixStride = 0;
        nd2nzParams.srcDValue = gCol;
        nd2nzParams.dstNzC0Stride = AscendC::Ceil(height, BLOCK_CUBE) * BLOCK_CUBE;
        nd2nzParams.dstNzNStride = 1;
        nd2nzParams.dstNzMatrixStride = 0;
        DataCopy(dst, src[srcOffset], nd2nzParams);
    }


template <typename AType, typename BType, typename CType, typename BiasType>
__aicore__ inline void MatmulCallbackKernel<AType, BType, CType, BiasType>::Process(AscendC::TPipe* pipe)
{
    AscendC::Matmul<AscendC::MatmulType<AscendC::TPosition::GM, CubeFormat::ND, AType>,
    AscendC::MatmulType<AscendC::TPosition::GM, CubeFormat::ND, BType>,
    AscendC::MatmulType<AscendC::TPosition::GM, CubeFormat::ND, CType>,
    AscendC::MatmulType<AscendC::TPosition::GM, CubeFormat::ND, BiasType>,
    CFG_NORM,
    AscendC::MatmulCallBackFunc<nullptr, CustomDataCopyInA, nullptr>> matmulObj;

    REGIST_MATMUL_OBJ(pipe, GetSysWorkSpacePtr(), matmulObj, &(this->tiling));

    int curBlock = AscendC::GetBlockIdx();
    int curBlockIdx = curBlock * NUMBER_OF_CORES_IN_OFFSET_LIST + 1;
    int base_offset = offsetListGlobal.GetValue(0);
    uint64_t srcBaseAddr1 = reinterpret_cast<uint64_t>(aGlobal.GetPhyAddr());
    uint64_t srcBaseAddr2 = reinterpret_cast<uint64_t>(aGlobal.GetPhyAddr(base_offset));

    offsetListGlobal.SetValue(curBlockIdx, srcBaseAddr1);
    offsetListGlobal.SetValue(curBlockIdx + 1, srcBaseAddr2);
    uint64_t offsetListGMPtr = reinterpret_cast<uint64_t>(offsetListGlobal.GetPhyAddr());

    matmulObj.SetUserDefInfo(tiling.Ka);
    matmulObj.SetSelfDefineData(offsetListGMPtr);

    matmulObj.SetTensorA(aGlobal, false);
    matmulObj.SetTensorB(bGlobal, false);
    if (tiling.isBias) {
        matmulObj.SetBias(biasGlobal);
    }
    matmulObj.IterateAll(cGlobal);
    matmulObj.End();
}


template <typename AType, typename BType, typename CType, typename BiasType>
__aicore__ inline void MatmulCallbackKernel<AType, BType, CType, BiasType>::CalcOffset(
    TCubeTiling& param, int32_t& offsetA, int32_t& offsetB, int32_t& offsetC, int32_t& offsetBias)
{
    int32_t blockIdx = AscendC::GetBlockIdx();
    auto temp0 = AscendC::Ceil(param.M, param.singleCoreM);
    auto temp1 = AscendC::Ceil(param.N, param.singleCoreN);
    auto temp2 = AscendC::Ceil(param.Ka, param.singleCoreK);

    auto divideKCoreNum = param.usedCoreNum / temp2;

    auto mCoreIndex = (blockIdx % divideKCoreNum) % temp0;
    auto nCoreIndex = (blockIdx % divideKCoreNum) / temp0;
    auto subKIndex = blockIdx / divideKCoreNum;

    offsetA = mCoreIndex * param.Ka * param.singleCoreM + subKIndex * param.singleCoreK;
    offsetB = subKIndex * param.singleCoreK * param.N + nCoreIndex * param.singleCoreN;
    offsetC = mCoreIndex * param.N * param.singleCoreM + nCoreIndex * param.singleCoreN;
    offsetBias = nCoreIndex * param.singleCoreN;

    int32_t gmUseM = param.M - mCoreIndex * param.singleCoreM;
    param.singleCoreM = gmUseM < param.singleCoreM ? gmUseM : param.singleCoreM;

    int32_t gmUseN = param.N - nCoreIndex * param.singleCoreN;
    param.singleCoreN = gmUseN < param.singleCoreN ? gmUseN : param.singleCoreN;

    int32_t gmUseK = param.Ka - subKIndex * param.singleCoreK;
    param.singleCoreK = gmUseK < param.singleCoreK ? gmUseK : param.singleCoreK;
}
}  // namespace CustomMatmulCALLBACK
#endif // EXAMPLES_MATRIX_MATMUL_CALLBACK_CUSTOM_TILING_H