/*
 * Copyright (c) 2025 Huawei Technologies Co., Ltd.
 * This file is a part of the CANN Open Software.
 * Licensed under CANN Open Software License Agreement Version 1.0 (the "License").
 * Please refer to the License for details. You may not use this file except in compliance with the License.
 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED,
 * INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
 * See LICENSE in the root of the software repository for the full text of the License.
 */

#ifndef EXAMPLES_MATRIX_MATMUL_CONSTANT_H
#define EXAMPLES_MATRIX_MATMUL_CONSTANT_H
#include "kernel_operator.h"
#define ASCENDC_CUBE_ONLY
#include "lib/matmul_intf.h"
namespace CustomMatmulConstant {
constexpr int32_t MAX_M = 10000; // custom matmul kernel support max value of M Dim shape
constexpr int32_t MAX_N = 10000; // custom matmul kernel support max value of N Dim shape
constexpr int32_t MAX_K = 10000; // custom matmul kernel support max value of K Dim shape
constexpr int32_t BASE_M = 128;  // BASEM * BASE_K * sizeof(typeC) <=L0A size
constexpr int32_t BASE_N = 256;  // BASEN * BASE_K * sizeof(typeB) <=L0B size
constexpr int32_t BASE_K = 64;   // BASEM * BASE_N * sizeof(typeC) <=L0C size
constexpr MatmulShapeParams shapeParams = { MAX_M,
                                            MAX_N,
                                            MAX_K,
                                            BASE_M,
                                            BASE_N,
                                            BASE_K };
constexpr MatmulConfig CUSTOM_CFG = GetMMConfig<MatmulConfigMode::CONFIG_MDL>(shapeParams);
struct MatmulProblemShape {
    int32_t useCoreNum;
    int32_t m;
    int32_t n;
    int32_t k;
    int32_t sm;
    int32_t sn;
    int32_t sk;
    int32_t isBias;
};

__aicore__ inline void CopyTiling(MatmulProblemShape *tiling, GM_ADDR tilingGM)
{
    int32_t *ptr = reinterpret_cast<int32_t *>(tiling);
    auto tiling32 = reinterpret_cast<__gm__ int32_t *>(tilingGM);
    for (size_t i = 0; i < sizeof(MatmulProblemShape) / sizeof(int32_t); ++i, ++ptr) {
        *ptr = *(tiling32 + i);
    }
}

template <typename aType, typename bType, typename cType, typename biasType>
class MatmulKernel {
public:
    __aicore__ inline MatmulKernel(){};
    __aicore__ inline void Init(GM_ADDR a, GM_ADDR b, GM_ADDR bias, GM_ADDR c, GM_ADDR workspace,
        GM_ADDR tiling);
    __aicore__ inline void Process(AscendC::TPipe* pipe);
    using A_TYPE = AscendC::MatmulType<AscendC::TPosition::GM, CubeFormat::ND, aType>;
    using B_TYPE = AscendC::MatmulType<AscendC::TPosition::GM, CubeFormat::ND, bType>;
    using C_TYPE = AscendC::MatmulType<AscendC::TPosition::GM, CubeFormat::ND, cType>;
    using BIAS_TYPE = AscendC::MatmulType<AscendC::TPosition::GM, CubeFormat::ND, biasType>;
    constexpr static auto CONSTANT_CFG = AscendC::GetMatmulApiTiling<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE>(CUSTOM_CFG);
    AscendC::Matmul<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, CONSTANT_CFG> matmulObj;
    MatmulProblemShape shapes;

private:
    __aicore__ inline void CalcOffset(
        const MatmulProblemShape& param, int32_t& offsetA, int32_t& offsetB,
        int32_t& offsetC, int32_t& offsetBias);
    AscendC::GlobalTensor<aType> aGlobal;
    AscendC::GlobalTensor<bType> bGlobal;
    AscendC::GlobalTensor<cType> cGlobal;
    AscendC::GlobalTensor<biasType> biasGlobal;
    int32_t mIdx;
    int32_t nIdx;
};

template <typename aType, typename bType, typename cType, typename biasType>
__aicore__ inline void MatmulKernel<aType, bType, cType, biasType>::Init(GM_ADDR a,
        GM_ADDR b, GM_ADDR bias, GM_ADDR c, GM_ADDR workspace, GM_ADDR tiling)
{
    CopyTiling(&shapes, tiling);
    if (AscendC::GetBlockIdx() >= shapes.useCoreNum) {
        return;
    }
    aGlobal.SetGlobalBuffer(reinterpret_cast<__gm__ aType*>(a), shapes.m * shapes.k);
    bGlobal.SetGlobalBuffer(reinterpret_cast<__gm__ bType*>(b), shapes.k * shapes.n);
    cGlobal.SetGlobalBuffer(reinterpret_cast<__gm__ cType*>(c), shapes.m * shapes.n);
    biasGlobal.SetGlobalBuffer(reinterpret_cast<__gm__ biasType*>(bias), shapes.n);
    int32_t offsetA = 0;
    int32_t offsetB = 0;
    int32_t offsetC = 0;
    int32_t offsetBias = 0;
    CalcOffset(shapes, offsetA, offsetB, offsetC, offsetBias);
    aGlobal = aGlobal[offsetA];
    bGlobal = bGlobal[offsetB];
    cGlobal = cGlobal[offsetC];
    biasGlobal = biasGlobal[offsetBias];
    if(GetSysWorkSpacePtr() == nullptr){
        return;
    }
}

template <typename aType, typename bType, typename cType, typename biasType>
__aicore__ inline void MatmulKernel<aType, bType, cType, biasType>::Process(AscendC::TPipe* pipe)
{
    REGIST_MATMUL_OBJ(pipe, GetSysWorkSpacePtr(), matmulObj, (TCubeTiling*)nullptr);
    matmulObj.SetOrgShape(shapes.m, shapes.n, shapes.k);
    auto tailM = shapes.m - mIdx * shapes.sm;
    tailM = tailM > shapes.sm ? shapes.sm : (tailM > 0 ? tailM : shapes.m);
    auto tailN = shapes.n - nIdx * shapes.sn;
    tailN = tailN > shapes.sn ? shapes.sn : (tailN > 0 ? tailN : shapes.n);
    matmulObj.SetTail(tailM, tailN, shapes.k);
    matmulObj.SetTensorA(aGlobal, false);
    matmulObj.SetTensorB(bGlobal, false);
    if (shapes.isBias) {
        matmulObj.SetBias(biasGlobal);
    }
    matmulObj.IterateAll(cGlobal);
    matmulObj.End();
}

template <typename aType, typename bType, typename cType, typename biasType>
__aicore__ inline void MatmulKernel<aType, bType, cType, biasType>::CalcOffset(
    const MatmulProblemShape& param, int32_t& offsetA, int32_t& offsetB, int32_t& offsetC,
    int32_t& offsetBias)
{
    auto blockIdx = AscendC::GetBlockIdx();
    auto mShape = (param.m + param.sm - 1) / param.sm;
    mIdx = blockIdx % mShape;
    nIdx = blockIdx / mShape;
    offsetA = mIdx * param.k * param.sm;
    offsetB = nIdx * param.sn;
    offsetC = mIdx * param.n * param.sm + nIdx * param.sn;
    offsetBias = nIdx * param.sn;
}
}  // namespace CustomMatmulConstant
#endif // EXAMPLES_MATRIX_MATMUL_CONSTANT_H