/*
 * Copyright (c) 2025 Huawei Technologies Co., Ltd.
 * This file is a part of the CANN Open Software.
 * Licensed under CANN Open Software License Agreement Version 1.0 (the "License").
 * Please refer to the License for details. You may not use this file except in compliance with the License.
 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED,
 * INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
 * See LICENSE in the root of the software repository for the full text of the License.
 */

#include "matmul_a2_b2_share_custom_kernel.h"

#if ASCENDC_CPU_DEBUG
#define SET_G_CORE_TYPE_IS_AIC int g_coreType = 1
#else
#define SET_G_CORE_TYPE_IS_AIC
#endif

namespace MatmulCustom {

template <typename AType, typename BType, typename CType, typename BiasType>
__aicore__ inline void MatmulA2B2ShareKernel<AType, BType, CType, BiasType>::Init(
    GM_ADDR a1, GM_ADDR b1, GM_ADDR a2, GM_ADDR b2, GM_ADDR bias, GM_ADDR c1, GM_ADDR c2,
    GM_ADDR workspace, const TCubeTiling& tiling)
{
    this->tiling = tiling;

    // In the first matmul calculation, `a1 * b1 + bias = c1`.
    a1Global.SetGlobalBuffer(reinterpret_cast<__gm__ AType*>(a1), tiling.M * tiling.Ka);
    b1Global.SetGlobalBuffer(reinterpret_cast<__gm__ BType*>(b1), tiling.Kb * tiling.N);
    c1Global.SetGlobalBuffer(reinterpret_cast<__gm__ CType*>(c1), tiling.M * tiling.N);
    // In the second matmul calculation, `a2 * b2 + bias = c2`.
    a2Global.SetGlobalBuffer(reinterpret_cast<__gm__ AType*>(a2), tiling.M * tiling.Ka);
    b2Global.SetGlobalBuffer(reinterpret_cast<__gm__ BType*>(b2), tiling.Kb * tiling.N);
    c2Global.SetGlobalBuffer(reinterpret_cast<__gm__ CType*>(c2), tiling.M * tiling.N);

    biasGlobal.SetGlobalBuffer(reinterpret_cast<__gm__ BiasType*>(bias), tiling.N);

    uint32_t offsetA = 0;
    uint32_t offsetB = 0;
    uint32_t offsetC = 0;
    uint32_t offsetBias = 0;
    CalcOffset(AscendC::GetBlockIdx(), offsetA, offsetB, offsetC, offsetBias);
    a1Global = a1Global[offsetA];
    b1Global = b1Global[offsetB];
    c1Global = c1Global[offsetC];
    a2Global = a2Global[offsetA];
    b2Global = b2Global[offsetB];
    c2Global = c2Global[offsetC];
    biasGlobal = biasGlobal[offsetBias];

    if(GetSysWorkSpacePtr() == nullptr){
        return;
    }
}

template <typename AType, typename BType, typename CType, typename BiasType>
__aicore__ inline void MatmulA2B2ShareKernel<AType, BType, CType, BiasType>::Process(AscendC::TPipe* pipe)
{
    // In the first matmul calculation, `a1 * b1 + bias = c1`.
    AscendC::Matmul<AscendC::MatmulType<AscendC::TPosition::GM, CubeFormat::ND, AType>,
    AscendC::MatmulType<AscendC::TPosition::GM, CubeFormat::ND, BType>,
    AscendC::MatmulType<AscendC::TPosition::GM, CubeFormat::ND, CType>,
    AscendC::MatmulType<AscendC::TPosition::GM, CubeFormat::ND, BiasType>, CFG_NORM_A2B2SHARE> mm1;
    // In the second matmul calculation, `a2 * b2 + bias = c2`.
    AscendC::Matmul<AscendC::MatmulType<AscendC::TPosition::GM, CubeFormat::ND, AType>,
    AscendC::MatmulType<AscendC::TPosition::GM, CubeFormat::ND, BType>,
    AscendC::MatmulType<AscendC::TPosition::GM, CubeFormat::ND, CType>,
    AscendC::MatmulType<AscendC::TPosition::GM, CubeFormat::ND, BiasType>, CFG_NORM_A2B2SHARE> mm2;

    REGIST_MATMUL_OBJ(pipe, GetSysWorkSpacePtr(), mm1, &(this->tiling), mm2, &(this->tiling));

    mm1.SetTensorA(a1Global);
    mm1.SetTensorB(b1Global);
    mm2.SetTensorA(a2Global);
    mm2.SetTensorB(b2Global);
    if (tiling.isBias) {
        mm1.SetBias(biasGlobal);
        mm2.SetBias(biasGlobal);
    }
    mm1.IterateAll(c1Global);
    mm1.End();
    mm2.IterateAll(c2Global);
    mm2.End();
}


template <typename AType, typename BType, typename CType, typename BiasType>
__aicore__ inline void MatmulA2B2ShareKernel<AType, BType, CType, BiasType>::CalcOffset(
    uint32_t blockIdx, uint32_t& offsetA, uint32_t& offsetB, uint32_t& offsetC, uint32_t& offsetBias)
{
    TCubeTiling& param = this->tiling;

    auto temp0 = AscendC::Ceil(param.M, param.singleCoreM);
    auto temp1 = AscendC::Ceil(param.N, param.singleCoreN);

    auto mCoreIndex = (blockIdx % param.usedCoreNum) % temp0;
    auto nCoreIndex = (blockIdx % param.usedCoreNum) / temp0;
    auto subKIndex = blockIdx / param.usedCoreNum;

    offsetA = mCoreIndex * param.Ka * param.singleCoreM + subKIndex * param.singleCoreK;
    offsetB = subKIndex * param.singleCoreK * param.N + nCoreIndex * param.singleCoreN;
    offsetC = mCoreIndex * param.N * param.singleCoreM + nCoreIndex * param.singleCoreN;
    offsetBias = nCoreIndex * param.singleCoreN;

    uint32_t gmUseM = param.M - mCoreIndex * param.singleCoreM;
    param.singleCoreM = gmUseM < param.singleCoreM ? gmUseM : param.singleCoreM;

    uint32_t gmUseN = param.N - nCoreIndex * param.singleCoreN;
    param.singleCoreN = gmUseN < param.singleCoreN ? gmUseN : param.singleCoreN;

    uint32_t gmUseK = param.Ka - subKIndex * param.singleCoreK;
    param.singleCoreK = gmUseK < param.singleCoreK ? gmUseK : param.singleCoreK;
}
}  // namespace MatmulCustom

namespace {
__aicore__ inline void CopyTiling(TCubeTiling* tiling, GM_ADDR tilingGM)
{
    uint32_t* ptr = reinterpret_cast<uint32_t*>(tiling);
    auto tiling32 = reinterpret_cast<__gm__ uint32_t*>(tilingGM);

    for (int i = 0; i < sizeof(TCubeTiling) / sizeof(uint32_t); i++, ptr++) {
      *ptr = *(tiling32 + i);
    }
    return;
}
}

extern "C" __global__ __aicore__ void matmul_a2_b2_share_custom(
    GM_ADDR a1, GM_ADDR b1, GM_ADDR a2, GM_ADDR b2, GM_ADDR bias, GM_ADDR c1, GM_ADDR c2,
    GM_ADDR workspace, GM_ADDR tilingGm)
{
    if (g_coreType == AscendC::AIV) {
        return;
    }

    TCubeTiling tiling;
    CopyTiling(&tiling, tilingGm);
    AscendC::TPipe pipe;

    MatmulCustom::MatmulA2B2ShareKernel<half, half, float, float> matmulA2B2ShareKernel;
    matmulA2B2ShareKernel.Init(a1, b1, a2, b2, bias, c1, c2, workspace, tiling);
    matmulA2B2ShareKernel.Process(&pipe);
}

#ifndef ASCENDC_CPU_DEBUG
void matmul_a2_b2_share_custom_do(uint32_t blockDim, void* stream, GM_ADDR a1, GM_ADDR b1, GM_ADDR a2, GM_ADDR b2,
    GM_ADDR bias, GM_ADDR c1, GM_ADDR c2, GM_ADDR workspace, GM_ADDR tilingGm)
{
    matmul_a2_b2_share_custom<<<blockDim, nullptr, stream>>>(a1, b1, a2, b2, bias, c1, c2, workspace, tilingGm);
}
#endif