/*
 * Copyright (c) 2025 Huawei Technologies Co., Ltd.
 * This file is a part of the CANN Open Software.
 * Licensed under CANN Open Software License Agreement Version 1.0 (the "License").
 * Please refer to the License for details. You may not use this file except in compliance with the License.
 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED,
 * INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
 * See LICENSE in the root of the software repository for the full text of the License.
 */

#ifndef EXAMPLES_MATRIX_MATMUL_L2CACHE_OP_KERNEL_MATMUL_L2_CACHE_CUSTOM_KERNEL_H
#define EXAMPLES_MATRIX_MATMUL_L2CACHE_OP_KERNEL_MATMUL_L2_CACHE_CUSTOM_KERNEL_H
#include "kernel_operator.h"
#define ASCENDC_CUBE_ONLY
#include "lib/matmul_intf.h"
#include "l2_cache_optimizer.h"
namespace CustomMatmulL2Cache {
constexpr static auto GetCFG()
{
    auto cfg = CFG_MDL;
    cfg.singleCoreM = 10000;
    cfg.singleCoreN = 10000;
    cfg.singleCoreK = 10000;
    cfg.basicM = 128;
    cfg.basicN = 256;
    cfg.basicK = 64;
    cfg.enableSetBias = false;
    cfg.enUnitFlag = true;
    return cfg;
}
constexpr auto CUSTOM_CFG = GetCFG();
constexpr static auto GetL1Tiling(const MatmulApiStaticTiling& mmTiling)
{
    auto tiling = mmTiling;
    tiling.stepM = 1;
    tiling.stepN = 1;
    tiling.stepKa = 4;
    tiling.stepKb = 4;
    tiling.depthA1 = 8;
    tiling.depthB1 = 8;
    return tiling;
}
struct MatmulProblemShape {
    int32_t m;
    int32_t n;
    int32_t k;
    int32_t isBias;
};

__aicore__ inline void CopyTiling(MatmulProblemShape* tiling, GM_ADDR tilingGM)
{
    int32_t *ptr = reinterpret_cast<int32_t *>(tiling);
    auto tiling32 = reinterpret_cast<__gm__ int32_t *>(tilingGM);
    for (size_t i = 0; i < sizeof(MatmulProblemShape) / sizeof(int32_t); ++i, ++ptr) {
        *ptr = *(tiling32 + i);
    }
}

template <typename aType, typename bType, typename cType, typename biasType>
class MatmulKernel {
public:
    using BlockCoord = AscendC::Std::tuple<int64_t, int64_t, int64_t>;
    __aicore__ inline MatmulKernel(){};
    __aicore__ inline void Init(GM_ADDR a, GM_ADDR b, GM_ADDR bias, GM_ADDR c, GM_ADDR workspace,
        GM_ADDR tiling);
    __aicore__ inline void Process(AscendC::TPipe* pipe);
    using A_TYPE = AscendC::MatmulType<AscendC::TPosition::GM, CubeFormat::ND, aType>;
    using B_TYPE = AscendC::MatmulType<AscendC::TPosition::GM, CubeFormat::ND, bType>;
    using C_TYPE = AscendC::MatmulType<AscendC::TPosition::GM, CubeFormat::ND, cType>;
    using BIAS_TYPE = AscendC::MatmulType<AscendC::TPosition::GM, CubeFormat::ND, biasType>;
    constexpr static auto CONSTANT_CFG = AscendC::GetMatmulApiTiling<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE>(CUSTOM_CFG);
    constexpr static auto MM_TILING = GetL1Tiling(CONSTANT_CFG);
    // Use cubeOnly mode
    AscendC::Matmul<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_TILING> matmulObj;
    using L2CacheOpt = L2CacheOptimizer<MatmulProblemShape, MM_TILING>;
    MatmulProblemShape shapes;
private:
    __aicore__ inline AscendC::Coord<int64_t, int64_t, int64_t> CalcOffset(
        const MatmulProblemShape& param, const BlockCoord &blockCoord);
    __aicore__ inline int64_t GetTotalSize(int64_t m, int64_t n, int64_t k);
    AscendC::GlobalTensor<aType> aGlobal;
    AscendC::GlobalTensor<bType> bGlobal;
    AscendC::GlobalTensor<cType> cGlobal;
    AscendC::GlobalTensor<biasType> biasGlobal;
};

template <typename aType, typename bType, typename cType, typename biasType>
__aicore__ inline void MatmulKernel<aType, bType, cType, biasType>::Init(GM_ADDR a,
        GM_ADDR b, GM_ADDR bias, GM_ADDR c, GM_ADDR workspace, GM_ADDR tiling)
{
    CopyTiling(&shapes, tiling);
    aGlobal.SetGlobalBuffer(reinterpret_cast<__gm__ aType*>(a), shapes.m * shapes.k);
    bGlobal.SetGlobalBuffer(reinterpret_cast<__gm__ bType*>(b), shapes.k * shapes.n);
    cGlobal.SetGlobalBuffer(reinterpret_cast<__gm__ cType*>(c), shapes.m * shapes.n);
    biasGlobal.SetGlobalBuffer(reinterpret_cast<__gm__ biasType*>(bias), shapes.n);
}

template <typename aType, typename bType, typename cType, typename biasType>
__aicore__ inline void MatmulKernel<aType, bType, cType, biasType>::Process(AscendC::TPipe* pipe)
{
    REGIST_MATMUL_OBJ(pipe, GetSysWorkSpacePtr(), matmulObj, (TCubeTiling*)nullptr);
    auto curBlockIdx = AscendC::GetBlockIdx();
    auto blockNum = AscendC::GetBlockNum();
    if (curBlockIdx >= blockNum) {
        return;
    }
    L2CacheOpt l2Opt(shapes, blockNum);
    matmulObj.SetOrgShape(shapes.m, shapes.n, shapes.k);
    for (int64_t tileIdx = curBlockIdx; tileIdx < l2Opt.GetTileNum(); tileIdx += blockNum) {
        auto blockShape = l2Opt.GetBlockShape(tileIdx);
        if (Get<0>(blockShape) <= 0 || Get<1>(blockShape) <= 0) {
            return;
        }
        auto blockCoord = l2Opt.GetBlockCoord(tileIdx);
        matmulObj.SetTail(Get<0>(blockShape), Get<1>(blockShape), Get<2>(blockShape));
        const auto& offsetCoord = CalcOffset(shapes, blockCoord);
        int64_t offsetA = Get<0>(offsetCoord);
        int64_t offsetB = Get<1>(offsetCoord);
        int64_t offsetC = Get<2>(offsetCoord);
        matmulObj.SetTensorA(aGlobal[offsetA], false);
        matmulObj.SetTensorB(bGlobal[offsetB], false);
        if (shapes.isBias) {
            matmulObj.SetBias(biasGlobal);
        }
        matmulObj.IterateAll(cGlobal[offsetC]);
    }
    matmulObj.End();
}

template <typename aType, typename bType, typename cType, typename biasType>
__aicore__ inline AscendC::Coord<int64_t, int64_t, int64_t> MatmulKernel<aType, bType, cType, biasType>::CalcOffset(
    const MatmulProblemShape& param, const BlockCoord& blockCoord)
{
    AscendC::Coord<int64_t, int64_t> aCoord = AscendC::MakeCoord(Get<0>(blockCoord), Get<2>(blockCoord));
    AscendC::Coord<int64_t, int64_t> bCoord = AscendC::MakeCoord(Get<2>(blockCoord), Get<1>(blockCoord));
    AscendC::Coord<int64_t, int64_t> cCoord = AscendC::MakeCoord(Get<0>(blockCoord), Get<1>(blockCoord));
    auto aLayOut = AscendC::MakeLayout(AscendC::MakeShape(param.m, param.k), AscendC::MakeStride(param.k, 1));
    auto bLayOut = AscendC::MakeLayout(AscendC::MakeShape(param.k, param.n), AscendC::MakeStride(param.n, 1));
    auto cLayOut = AscendC::MakeLayout(AscendC::MakeShape(param.m, param.n), AscendC::MakeStride(param.n, 1));
    int64_t offsetA = aLayOut(aCoord);
    int64_t offsetB = bLayOut(bCoord);
    int64_t offsetC = cLayOut(cCoord);
    return {offsetA, offsetB, offsetC};
}
}  // namespace CustomMatmulL2Cache
#endif // EXAMPLES_MATRIX_MATMUL_CONSTANT_L2_H