/**
 * Copyright (c) 2025 Huawei Technologies Co., Ltd.
 * This file is a part of the CANN Open Software.
 * Licensed under CANN Open Software License Agreement Version 1.0 (the "License").
 * Please refer to the License for details. You may not use this file except in compliance with the License.
 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED,
 * INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
 * See LICENSE in the root of the software repository for the full text of the License.
 */

#include "kernel_operator.h"
#include "matmul_partial_output_custom_kernel.h"

namespace {
/**
  * @brief  Copy tiling data to TCubeTiling ptr from tiling gm addr.
  * @param  tiling: TCubeTiling ptr which needs to copy tiling data.
  * @param  tilingGM: Tiling gm addr.
  * @retval None
  */
__aicore__ inline void CopyTiling(TCubeTiling* tiling, GM_ADDR tilingGM)
{
    uint32_t* ptr = reinterpret_cast<uint32_t*>(tiling);
    auto tiling32 = reinterpret_cast<__gm__ uint32_t*>(tilingGM);

    for (int i = 0; i < sizeof(TCubeTiling) / sizeof(uint32_t); i++, ptr++) {
      *ptr = *(tiling32 + i);
    }
    return;
}
}

namespace MatmulPartialOutputCustom {
template <typename AType, typename BType, typename CType, typename BiasType>
__aicore__ inline void MatmulKernel<AType, BType, CType, BiasType>::Init(
    GM_ADDR a, GM_ADDR b, GM_ADDR bias, GM_ADDR c, GM_ADDR tmp, const TCubeTiling& tiling, bool isTransA, bool isTransB)
{
    this->tiling = tiling;
    aGlobal.SetGlobalBuffer(reinterpret_cast<__gm__ AType*>(a), tiling.M * tiling.Ka);
    bGlobal.SetGlobalBuffer(reinterpret_cast<__gm__ BType*>(b), tiling.Kb * tiling.N);
    cGlobal.SetGlobalBuffer(reinterpret_cast<__gm__ CType*>(c), tiling.M * tiling.N);
    biasGlobal.SetGlobalBuffer(reinterpret_cast<__gm__ BiasType*>(bias), tiling.N);
    workspace.SetGlobalBuffer(reinterpret_cast<__gm__ CType*>(tmp), tiling.M * tiling.N * tiling.singleCoreK / tiling.baseK);

    int32_t offsetA = 0;
    int32_t offsetB = 0;
    int32_t offsetC = 0;
    int32_t offsetBias = 0;
    this->isTransA = isTransA;
    this->isTransB = isTransB;
    CalcOffset(AscendC::GetBlockIdx(), offsetA, offsetB, offsetC, offsetBias);
    aGlobal = aGlobal[offsetA];
    bGlobal = bGlobal[offsetB];
    cGlobal = cGlobal[offsetC];
    biasGlobal = biasGlobal[offsetBias];
    matmulObj.SetOrgShape(tiling.M, tiling.N, tiling.Ka, tiling.Kb);
    if (GetSysWorkSpacePtr() == nullptr) {
        return;
    }
}

template <typename AType, typename BType, typename CType, typename BiasType>
__aicore__ inline void MatmulKernel<AType, BType, CType, BiasType>::Process(AscendC::TPipe* pipe)
{
    if (matmul::GetBlockIdx() >= tiling.usedCoreNum) {
        return;
    }

    // process with tail block
    int tailM = tiling.M - mCoreIndex * tiling.singleCoreM;
    tailM = tailM < tiling.singleCoreM ? tailM : tiling.singleCoreM;
    int tailN = tiling.N - nCoreIndex * tiling.singleCoreN;
    tailN = tailN < tiling.singleCoreN ? tailN : tiling.singleCoreN;
    if (tailM < tiling.singleCoreM || tailN < tiling.singleCoreN) {
        matmulObj.SetTail(tailM, tailN);
    }

    matmulObj.SetTensorA(aGlobal, isTransA);
    matmulObj.SetTensorB(bGlobal, isTransB);
    uint32_t offset = 0;
    while (matmulObj.Iterate()) {
        matmulObj.GetTensorC(workspace[offset], 0, true); // data in workspace is not accumulated
        offset += tiling.baseM * tiling.baseN;
    }
    matmulObj.End();

    // after matrix multiplication is completed, perform accumulation
    AscendC::TBuf<AscendC::TPosition::VECIN> cMarix;
    AscendC::TBuf<AscendC::TPosition::VECIN> tmpMarix;
    pipe->InitBuffer(cMarix, tiling.baseM * tiling.baseN * sizeof(CType));
    pipe->InitBuffer(tmpMarix, tiling.baseM * tiling.baseN * sizeof(CType));
    AscendC::LocalTensor<CType> bufferC = cMarix.template Get<CType>();
    AscendC::LocalTensor<CType> bufferTmp = tmpMarix.template Get<CType>();
    uint32_t outputOffset = 0;
    auto mIter = AscendC::Ceil(tiling.singleCoreM, tiling.baseM);
    auto nIter = AscendC::Ceil(tiling.singleCoreN, tiling.baseN);
    auto kIter = AscendC::Ceil(tiling.singleCoreK, tiling.baseK);
    for (uint32_t i = 0; i < mIter * nIter; ++i) {
        for (uint32_t j = 0; j < kIter; ++j) {
            DataCopy(bufferTmp, workspace[outputOffset], tiling.baseM * tiling.baseN);
            AscendC::PipeBarrier<PIPE_ALL>();
            if (j == 0) {
                Clear(bufferC);
            }
            bufferC = bufferC + bufferTmp;
            AscendC::PipeBarrier<PIPE_ALL>();
            outputOffset += tiling.baseM * tiling.baseN;
        }
        DataCopy(cGlobal[i * tiling.baseM * tiling.baseN], bufferC, tiling.baseM * tiling.baseN);
        AscendC::PipeBarrier<PIPE_ALL>();
    }
}

template <typename AType, typename BType, typename CType, typename BiasType>
__aicore__ inline void MatmulKernel<AType, BType, CType, BiasType>::Clear(AscendC::LocalTensor<CType> &bufferC)
{
    for (uint32_t i = 0; i < bufferC.GetSize(); ++i) {
        bufferC.SetValue(i, static_cast<CType>(0));
    }
}

template <typename AType, typename BType, typename CType, typename BiasType>
__aicore__ inline void MatmulKernel<AType, BType, CType, BiasType>::CalcOffset(
    int32_t blockIdx, int32_t& offsetA, int32_t& offsetB, int32_t& offsetC, int32_t& offsetBias)
{
    const TCubeTiling& tiling = this->tiling;
    auto mSingleBlocks = (tiling.M + tiling.singleCoreM - 1) / tiling.singleCoreM; // split M into mSingleBlocks cores
    mCoreIndex = blockIdx % mSingleBlocks;
    nCoreIndex = blockIdx / mSingleBlocks;

    offsetA = mCoreIndex * tiling.Ka * tiling.singleCoreM;
    if (isTransA) {
        offsetA = mCoreIndex * tiling.singleCoreM;
    }
    offsetB = nCoreIndex * tiling.singleCoreN;
    if (isTransB) {
        offsetB = nCoreIndex * tiling.Kb * tiling.singleCoreN;
    }
    offsetC = mCoreIndex * tiling.N * tiling.singleCoreM + nCoreIndex * tiling.singleCoreN;
    offsetBias = nCoreIndex * tiling.singleCoreN;
}
} // namespace MatmulPartialOutputCustom

/**
  * @brief  matmul kernel function entry
  * @param  a: A matrix gm addr.
  * @param  b: B matrix gm addr.
  * @param  c: C matrix gm addr.
  * @param  workspace: Temporary gm space addr required by matmul calc.
  * @param  tilingGm: Tiling data addr. 
  * @retval None
  */
extern "C" __global__ __aicore__ void matmul_partial_output_custom(GM_ADDR a, GM_ADDR b,
    GM_ADDR bias, GM_ADDR c, GM_ADDR workspace, GM_ADDR tmp, GM_ADDR tilingGm)
{
    // prepare tiling
    TCubeTiling tiling;
    CopyTiling(&tiling, tilingGm);
    // define matmul kernel
    MatmulPartialOutputCustom::MatmulKernel<half, half, float, float> matmulKernel;
    AscendC::TPipe pipe;
    REGIST_MATMUL_OBJ(&pipe, GetSysWorkSpacePtr(), matmulKernel.matmulObj, &tiling);
    // init matmul kernel, isTransA=false, isTransB=false
    matmulKernel.Init(a, b, bias, c, tmp, tiling, false, false);
    // matmul kernel process
    matmulKernel.Process(&pipe);
}

#ifndef ASCENDC_CPU_DEBUG
void matmul_partial_output_custom_do(uint32_t blockDim, void* stream,
    GM_ADDR a, GM_ADDR b, GM_ADDR bias, GM_ADDR c, GM_ADDR workspace, GM_ADDR tmp, GM_ADDR tilingGm)
{
    matmul_partial_output_custom<<<blockDim, nullptr, stream>>>(a, b, bias, c, workspace, tmp, tilingGm);
}
#endif
