/*
 * Copyright (c) 2025 Huawei Technologies Co., Ltd.
 * This file is a part of the CANN Open Software.
 * Licensed under CANN Open Software License Agreement Version 1.0 (the "License").
 * Please refer to the License for details. You may not use this file except in compliance with the License.
 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED,
 * INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
 * See LICENSE in the root of the software repository for the full text of the License.
 */

#ifndef EXAMPLES_MATRIX_BATCH_MATMUL_TSCM_OP_KERNEL_BATCH_MATMUL_TSCM_CUSTOM_IMPL_H
#define EXAMPLES_MATRIX_BATCH_MATMUL_TSCM_OP_KERNEL_BATCH_MATMUL_TSCM_CUSTOM_IMPL_H
#include "kernel_operator.h"
#include "lib/matmul_intf.h"

namespace BatchMatmulCustom {
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE>
class BatchMatmulKernel {
    public:
        __aicore__ inline BatchMatmulKernel(){};
        __aicore__ inline void Init(GM_ADDR a, GM_ADDR b, GM_ADDR bias, GM_ADDR c, GM_ADDR workspace, const TCubeTiling& tiling);
        __aicore__ inline void Process(AscendC::TPipe* pipe, int32_t batchA, int32_t batchB);
        AscendC::Matmul<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE> matmulObj;
    private:
        __aicore__ inline void CalcOffset(int32_t blockIdx, const TCubeTiling& tiling, int32_t& offsetA, int32_t& offsetB,
                                          int32_t& offsetC, int32_t& offsetBias);
        using aType = typename A_TYPE::T;
        using bType = typename B_TYPE::T;
        using cType = typename C_TYPE::T;
        using biasType = typename BIAS_TYPE::T;
        AscendC::GlobalTensor<aType> aGlobal;
        AscendC::GlobalTensor<bType> bGlobal;
        AscendC::GlobalTensor<cType> cGlobal;
        AscendC::GlobalTensor<biasType> biasGlobal;
        TCubeTiling tiling;
};
} // namespace BatchMatmulCustom
#endif // EXAMPLES_MATRIX_BATCH_MATMUL_TSCM_OP_KERNEL_BATCH_MATMUL_TSCM_CUSTOM_IMPL_H