/*
 * Copyright (c) 2025 Huawei Technologies Co., Ltd.
 * This file is a part of the CANN Open Software.
 * Licensed under CANN Open Software License Agreement Version 1.0 (the "License").
 * Please refer to the License for details. You may not use this file except in compliance with the License.
 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED,
 * INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
 * See LICENSE in the root of the software repository for the full text of the License.
 */

#include "matmul_async_iterate_custom_kernel.h"

namespace MatmulCustom {

template <typename AType, typename BType, typename CType, typename BiasType>
__aicore__ inline void MatmulAsyncIterateKernel<AType, BType, CType, BiasType>::Init(GM_ADDR a,
        GM_ADDR b, GM_ADDR bias, GM_ADDR c, GM_ADDR workspace, const TCubeTiling& tiling)
{
    this->tiling = tiling;

    aGlobal.SetGlobalBuffer(reinterpret_cast<__gm__ AType*>(a), tiling.M * tiling.Ka);
    bGlobal.SetGlobalBuffer(reinterpret_cast<__gm__ BType*>(b), tiling.Kb * tiling.N);
    cGlobal.SetGlobalBuffer(reinterpret_cast<__gm__ CType*>(c), tiling.M * tiling.N);
    biasGlobal.SetGlobalBuffer(reinterpret_cast<__gm__ BiasType*>(bias), tiling.N);
    workspaceGlobal.SetGlobalBuffer(reinterpret_cast<__gm__ CType*>(workspace), tiling.M * tiling.N);

    uint32_t offsetA = 0;
    uint32_t offsetB = 0;
    uint32_t offsetC = 0;
    uint32_t offsetBias = 0;
    CalcOffset(AscendC::GetBlockIdx(), offsetA, offsetB, offsetC, offsetBias);
    aGlobal = aGlobal[offsetA];
    bGlobal = bGlobal[offsetB];
    cGlobal = cGlobal[offsetC];
    biasGlobal = biasGlobal[offsetBias];
    workspaceGlobal = workspaceGlobal[AscendC::GetBlockIdx() * tiling.singleCoreM * tiling.singleCoreN];

    if(GetSysWorkSpacePtr() == nullptr){
        return;
    }
}

template <typename AType, typename BType, typename CType, typename BiasType>
__aicore__ inline void MatmulAsyncIterateKernel<AType, BType, CType, BiasType>::Process(AscendC::TPipe* pipe)
{
    REGIST_MATMUL_OBJ(pipe, GetSysWorkSpacePtr(), matmulObj, &(this->tiling));

    matmulObj.SetTensorA(aGlobal);
    matmulObj.SetTensorB(bGlobal);
    if (this->tiling.isBias) {
        matmulObj.SetBias(biasGlobal);
    }

    matmulObj.SetWorkspace(workspaceGlobal);
    matmulObj.template Iterate<false>();

    uint32_t baseM = this->tiling.baseM;
    uint32_t baseN = this->tiling.baseN;
    pipe->InitBuffer(cInQueue, 1, baseM * baseN * sizeof(CType));
    pipe->InitBuffer(cOutQueue, 1, baseM * baseN * sizeof(CType));

    AscendC::DataCopyParams copyParams = {
        (uint16_t)baseM,
        (uint16_t)(baseN * sizeof(CType) / AscendC::DEFAULT_C0_SIZE),
        (uint16_t)0,
        (uint16_t)((this->tiling.N - baseN) * sizeof(CType) / AscendC::DEFAULT_C0_SIZE)
    };
    uint32_t iterateTimes = AscendC::Ceil(this->tiling.singleCoreM, baseM) *
            AscendC::Ceil(this->tiling.singleCoreN, baseN);
    for (uint32_t i = 0; i < iterateTimes; ++i) {
        // compute
        auto cInLocal = cInQueue.AllocTensor<CType>();
        matmulObj.template GetTensorC<false>(cInLocal);
        cInQueue.EnQue(cInLocal);
        // any vector operator
        auto src = cInQueue.DeQue<CType>();
        auto dst = cOutQueue.AllocTensor<CType>();
        DataCopy(dst, src, baseM * baseN);
        cOutQueue.EnQue(dst);
        cInQueue.FreeTensor(src);
        // copy out
        auto cOutLocal = cOutQueue.DeQue<CType>();
        DataCopy(cGlobal[CalcDstOffset(i)], cOutLocal, copyParams);
        cOutQueue.FreeTensor(cOutLocal);
    }
    matmulObj.End();
}


template <typename AType, typename BType, typename CType, typename BiasType>
__aicore__ inline void MatmulAsyncIterateKernel<AType, BType, CType, BiasType>::CalcOffset(
    uint32_t blockIdx, uint32_t& offsetA, uint32_t& offsetB, uint32_t& offsetC, uint32_t& offsetBias)
{
    auto mSingleBlocks = AscendC::Ceil(this->tiling.M, this->tiling.singleCoreM);
    auto mCoreIndx = blockIdx % mSingleBlocks;
    auto nCoreIndx = blockIdx / mSingleBlocks;

    offsetA = mCoreIndx * this->tiling.Ka * this->tiling.singleCoreM;
    offsetB = nCoreIndx * this->tiling.singleCoreN;
    offsetC = mCoreIndx * this->tiling.N * this->tiling.singleCoreM + nCoreIndx * this->tiling.singleCoreN;
    offsetBias = nCoreIndx * this->tiling.singleCoreN;

    // process with tail block
    int32_t tailM = this->tiling.M - mCoreIndx * this->tiling.singleCoreM;
    tailM = tailM < this->tiling.singleCoreM ? tailM : this->tiling.singleCoreM;
    int32_t tailN = this->tiling.N - nCoreIndx * this->tiling.singleCoreN;
    tailN = tailN < this->tiling.singleCoreN ? tailN : this->tiling.singleCoreN;
    if (tailM < this->tiling.singleCoreM || tailN < this->tiling.singleCoreN) {
        matmulObj.SetTail(tailM, tailN);
    }
}

template <typename aType, typename bType, typename CType, typename BiasType>
__aicore__ inline uint32_t MatmulAsyncIterateKernel<aType, bType, CType, BiasType>::CalcDstOffset(uint32_t i)
{
    uint32_t mIter = 0;
    uint32_t nIter = 0;
    if (this->tiling.iterateOrder != 1) {
        uint32_t mIterTimes = AscendC::Ceil(this->tiling.singleCoreM, this->tiling.baseM);
        mIter = i % mIterTimes;
        nIter = i / mIterTimes;
    } else {
        uint32_t nIterTimes = AscendC::Ceil(this->tiling.singleCoreN, this->tiling.baseN);
        mIter = i / nIterTimes;
        nIter = i % nIterTimes;
    }
    return (mIter * this->tiling.baseM * this->tiling.N + nIter * this->tiling.baseN);
}
}  // namespace MatmulCustom

namespace {
__aicore__ inline void CopyTiling(TCubeTiling* tiling, GM_ADDR tilingGM)
{
    uint32_t* ptr = reinterpret_cast<uint32_t*>(tiling);
    auto tiling32 = reinterpret_cast<__gm__ uint32_t*>(tilingGM);

    for (int i = 0; i < sizeof(TCubeTiling) / sizeof(uint32_t); i++, ptr++) {
      *ptr = *(tiling32 + i);
    }
    return;
}
}

extern "C" __global__ __aicore__ void matmul_async_iterate_custom(
    GM_ADDR a, GM_ADDR b, GM_ADDR bias, GM_ADDR c, GM_ADDR workspace, GM_ADDR tilingGm)
{
    TCubeTiling tiling;
    CopyTiling(&tiling, tilingGm);
    AscendC::TPipe pipe;

    MatmulCustom::MatmulAsyncIterateKernel<half, half, float, float> matmulAsyncIterateKernel;
    matmulAsyncIterateKernel.Init(a, b, bias, c, workspace, tiling);
    matmulAsyncIterateKernel.Process(&pipe);
}

#ifndef ASCENDC_CPU_DEBUG
void matmul_async_iterate_custom_do(uint32_t blockDim, void* stream,
    GM_ADDR a, GM_ADDR b, GM_ADDR bias, GM_ADDR c, GM_ADDR workspace, GM_ADDR tilingGm)
{
    matmul_async_iterate_custom<<<blockDim, nullptr, stream>>>(a, b, bias, c, workspace, tilingGm);
}
#endif