/**
 * Copyright (c) 2025 Huawei Technologies Co., Ltd.
 * This file is a part of the CANN Open Software.
 * Licensed under CANN Open Software License Agreement Version 1.0 (the "License").
 * Please refer to the License for details. You may not use this file except in compliance with the License.
 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED,
 * INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
 * See LICENSE in the root of the software repository for the full text of the License.
 */

#ifndef EXAMPLES_MATRIX_MATMUL_A2B2SHARE_OP_KERNEL_MATMUL_A2_B2_SHARE_CUSTOM_KERNEL_H
#define EXAMPLES_MATRIX_MATMUL_A2B2SHARE_OP_KERNEL_MATMUL_A2_B2_SHARE_CUSTOM_KERNEL_H
#include "kernel_operator.h"
#define ASCENDC_CUBE_ONLY
#include "lib/matmul_intf.h"

namespace MatmulCustom {

template <typename AType, typename BType, typename CType, typename BiasType>
class MatmulA2B2ShareKernel {
public:
    __aicore__ inline MatmulA2B2ShareKernel(){};
    /**
      * @brief  Initialization before process.
      * @param  a1: A matrix gm addr, for the first matmul.
      * @param  b1: B matrix gm addr, for the first matmul.
      * @param  a2: A matrix gm addr, for the second matmul.
      * @param  b2: B matrix gm addr, for the second matmul.
      * @param  bias: Bias matrix gm addr, two matmul share one.
      * @param  c1: C matrix gm addr, for the first matmul.
      * @param  c2: C matrix gm addr, for the second matmul.
      * @param  workspace: workspace gm addr.
      * @param  tiling: Matmul tiling struct.
      * @retval None
      */
    __aicore__ inline void Init(GM_ADDR a1, GM_ADDR b1, GM_ADDR a2, GM_ADDR b2, GM_ADDR bias,
        GM_ADDR c1, GM_ADDR c2, GM_ADDR workspace, const TCubeTiling& tiling);
    /**
      * @brief  Process matrix calculation.
      * @param  pipe: The TPipe object which manages global memory and synchronization.
      * @retval None
      */
    __aicore__ inline void Process(AscendC::TPipe* pipe);

private:
    // Init norm template config, set `isA2B2Shared=true` to enable the double buffer for L0A/L0B.
    constexpr static MatmulConfigMode CONFIG_MODE = MatmulConfigMode::CONFIG_NORM;
    constexpr static MatmulFuncParams FUNC_PARAMS{false, false, false, false, 0, IterateOrder::UNDEF,
        ScheduleType::INNER_PRODUCT, true, true, false, true/*isA2B2Shared*/, false};
    constexpr static MatmulConfig CFG_NORM_A2B2SHARE = GetMMConfig<CONFIG_MODE>(FUNC_PARAMS);
    /**
      * @brief  Calculate the gm offset based on the blockIdx.
      * @param  blockIdx: Current Core blockidx.
      * @param  offsetA: Gm offset of A matrix.
      * @param  offsetB: Gm offset of B matrix.
      * @param  offsetC: Gm offset of C matrix.
      * @param  offsetBias: Gm offset of Bias matrix.
      * @retval None
      */
    __aicore__ inline void CalcOffset(
        uint32_t blockIdx, uint32_t& offsetA, uint32_t& offsetB, uint32_t& offsetC, uint32_t& offsetBias);

    AscendC::GlobalTensor<AType> a1Global;
    AscendC::GlobalTensor<BType> b1Global;
    AscendC::GlobalTensor<CType> c1Global;
    AscendC::GlobalTensor<AType> a2Global;
    AscendC::GlobalTensor<BType> b2Global;
    AscendC::GlobalTensor<CType> c2Global;
    AscendC::GlobalTensor<BiasType> biasGlobal;
    TCubeTiling tiling;
};
}  // namespace MatmulCustom
#endif // EXAMPLES_MATRIX_MATMUL_A2B2SHARE_OP_KERNEL_MATMUL_A2_B2_SHARE_CUSTOM_KERNEL_H