/**
 * @file mc2_matmul_compute.h
 *
 * Copyright (C) 2024. Huawei Technologies Co., Ltd. All rights reserved.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
 */

#ifndef MC2_MATMUL_COMPUTE_H
#define MC2_MATMUL_COMPUTE_H
#include "mc2_matmul_block.h"
#include "all_gather_matmul_custom_tiling.h"

namespace AscendC {
using namespace matmul;

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE>
class MatmulCompute {
    using A_T = typename A_TYPE::T;
    using B_T = typename B_TYPE::T;
    using C_T = typename C_TYPE::T;
    using BiasT = typename BIAS_TYPE::T;

public:
    __aicore__ inline MatmulCompute() {}
    __aicore__ inline void Init(AllGatherMatmulRCSTiling& cfg, TCubeTiling& tiling);
    __aicore__ inline void UpdateWeightBias(GM_ADDR bGM, GM_ADDR biasGM);
    __aicore__ inline void UpdateAddress(GM_ADDR aGM, uint32_t aSize, GM_ADDR cGM, uint32_t cSize);
    __aicore__ inline void Process();
    __aicore__ inline void End();

private:
    __aicore__ inline void SetOrgShapeAlign();

private:
    MatmulImpl<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, CFG_MDL> mm_;
    GlobalTensor<A_T> aGlobal;
    GlobalTensor<B_T> bGlobal;
    GlobalTensor<C_T> cGlobal;
    GlobalTensor<BiasT> biasGlobal;
    MatmulBaseBlock block_;
    TCubeTiling tiling_;
    AllGatherMatmulRCSTiling cfg_;
};

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE>
__aicore__ inline void MatmulCompute<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE>::UpdateWeightBias(
    GM_ADDR bGM, GM_ADDR biasGM)
{
    // MC2的计算流中默认B矩阵不变，GM地址无需偏移
    bGlobal.SetGlobalBuffer(reinterpret_cast<__gm__ B_T *>(bGM), tiling_.Kb * tiling_.N);
    biasGlobal.SetGlobalBuffer(reinterpret_cast<__gm__ BiasT *>(biasGM), tiling_.N);
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE>
__aicore__ inline void MatmulCompute<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE>::UpdateAddress(
    GM_ADDR aGM, uint32_t aSize, GM_ADDR cGM, uint32_t cSize)
{
    aGlobal.SetGlobalBuffer(reinterpret_cast<__gm__ A_T *>(aGM), aSize);
    cGlobal.SetGlobalBuffer(reinterpret_cast<__gm__ C_T *>(cGM), cSize);
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE>
__aicore__ inline void MatmulCompute<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE>::Init(AllGatherMatmulRCSTiling& cfg, TCubeTiling& tiling)
{
    // MatmulImpl初始化
    mm_.SetSubBlockIdx(0);
    mm_.Init(&tiling, GetTPipePtr());
    tiling_ = tiling;
    cfg_ = cfg;
    bool isTransA = cfg.isTransposeA > 0;
    bool isTransB = cfg.isTransposeB > 0;
    block_.Init(tiling, isTransA, isTransB);
    SetOrgShapeAlign();
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE>
__aicore__ inline void MatmulCompute<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE>::SetOrgShapeAlign()
{
    if constexpr (A_TYPE::format == CubeFormat::NZ && B_TYPE::format == CubeFormat::NZ) {
        auto alignKa = AlignUp(tiling_.Ka, C0_SIZE);
        auto alignKb = AlignUp(tiling_.Kb, C0_SIZE);
        auto alignM = AlignUp(tiling_.M, C0_SIZE);
        auto alignN = AlignUp(tiling_.N, C0_SIZE);
        mm_.SetOrgShape(alignM, alignN, alignKa, alignKb, cfg_.rankN);
    } else if (A_TYPE::format == CubeFormat::NZ) {
        auto alignKa = AlignUp(tiling_.Ka, C0_SIZE);
        auto alignM = AlignUp(tiling_.M, C0_SIZE);
        mm_.SetOrgShape(alignM, tiling_.N, alignKa, tiling_.Kb, cfg_.rankN);
    } else if (B_TYPE::format == CubeFormat::NZ) {
        auto alignKb = AlignUp(tiling_.Kb, C0_SIZE);
        auto alignN = AlignUp(tiling_.N, C0_SIZE);
        mm_.SetOrgShape(tiling_.M, alignN, tiling_.Ka, alignKb, cfg_.rankN);
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE>
__aicore__ inline void MatmulCompute<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE>::Process()
{
    // 每次block循环开始前需要计算初始blockIndex
    block_.InitBlockWithoutIndex();
    for (uint32_t i = 0; i < block_.args_.blockCnt; i++) {
        // calculate blcokCurrIndex
        block_.UpdateBlockIndex(i);
        if (block_.args_.blockCurrIdx < block_.args_.totalBlockCnt) {
            block_.UpdateBlockParams();
            block_.template CalcGMOffset<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE>();
            mm_.SetSingleShape(block_.args_.singleCoreM, block_.args_.singleCoreN, tiling_.singleCoreK);
            mm_.SetTensorA(aGlobal[block_.offset_.offsetA], block_.args_.isTransA);
            mm_.SetTensorB(bGlobal[block_.offset_.offsetB], block_.args_.isTransB);
            if (tiling_.isBias) {
                mm_.SetBias(biasGlobal[block_.offset_.offsetBias]);
            }
            mm_.Iterate();
            mm_.GetTensorC(cGlobal[block_.offset_.offsetC]);
            // 增加M等FIX同步
            event_t eventIDFixToM = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::FIX_M));
            SetFlag<HardEvent::FIX_M>(eventIDFixToM);
            WaitFlag<HardEvent::FIX_M>(eventIDFixToM);
        }
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE>
__aicore__ inline void MatmulCompute<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE>::End()
{
    mm_.End();
}
}
#endif // MC2_MATMUL_COMPUTE_H
