/**
 * Copyright (c) 2025 Huawei Technologies Co., Ltd.
 * This file is a part of the CANN Open Software.
 * Licensed under CANN Open Software License Agreement Version 1.0 (the "License").
 * Please refer to the License for details. You may not use this file except in compliance with the License.
 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED,
 * INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
 * See LICENSE in the root of the software repository for the full text of the License.
 */

#ifndef EXAMPLES_MATRIX_MATMUL_L2CACHE_OP_KERNEL_MATMUL_L2_CACHE_CUSTOM_KERNEL_H
#define EXAMPLES_MATRIX_MATMUL_L2CACHE_OP_KERNEL_MATMUL_L2_CACHE_CUSTOM_KERNEL_H
#include "kernel_operator.h"
#define ASCENDC_CUBE_ONLY
#include "lib/matmul_intf.h"

namespace MatmulCustom {

template <typename AType, typename BType, typename CType, typename BiasType>
class MatmulL2CacheKernel {
public:
    __aicore__ inline MatmulL2CacheKernel(){};
    /**
      * @brief  Initialization before process.
      * @param  a: A matrix gm addr.
      * @param  b: B matrix gm addr.
      * @param  bias: Bias matrix gm addr.
      * @param  c: C matrix gm addr.
      * @param  workspace: workspace gm addr.
      * @param  tiling: Matmul tiling struct.
      * @retval None
      */
    __aicore__ inline void Init(GM_ADDR a, GM_ADDR b, GM_ADDR bias, GM_ADDR c, const TCubeTiling& tiling);
    /**
      * @brief  Process matrix calculation.
      * @param  pipe: The TPipe object which manages global memory and synchronization.
      * @retval None
      */
    __aicore__ inline void Process(AscendC::TPipe* pipe);

    AscendC::Matmul<AscendC::MatmulType<AscendC::TPosition::GM, CubeFormat::ND, AType>,
    AscendC::MatmulType<AscendC::TPosition::GM, CubeFormat::ND, BType>,
    AscendC::MatmulType<AscendC::TPosition::GM, CubeFormat::ND, CType>,
    AscendC::MatmulType<AscendC::TPosition::GM, CubeFormat::ND, BiasType>> matmulObj;

private:
    /**
      * @brief  Calculate the gm offset based on the blockIdx.
      * @param  blockIdx: Current Core blockidx.
      * @param  offsetA: Gm offset of A matrix.
      * @param  offsetB: Gm offset of B matrix.
      * @param  offsetC: Gm offset of C matrix.
      * @param  offsetBias: Gm offset of Bias matrix.
      * @retval None
      */
    __aicore__ inline void CalcOffset(
        int32_t blockIdx, int32_t& offsetA, int32_t& offsetB, int32_t& offsetC, int32_t& offsetBias);

    AscendC::GlobalTensor<AType> aGlobal;
    AscendC::GlobalTensor<BType> bGlobal;
    AscendC::GlobalTensor<CType> cGlobal;
    AscendC::GlobalTensor<BiasType> biasGlobal;
    TCubeTiling tiling;
};
}  // namespace MatmulCustom
#endif // EXAMPLES_MATRIX_MATMUL_L2CACHE_OP_KERNEL_MATMUL_L2_CACHE_CUSTOM_KERNEL_H