/**
 * Copyright (c) 2024 Huawei Technologies Co., Ltd.
 * This file is a part of the CANN Open Software.
 * Licensed under CANN Open Software License Agreement Version 1.0 (the "License").
 * Please refer to the License for details. You may not use this file except in compliance with the License.
 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED,
 * INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
 * See LICENSE in the root of the software repository for the full text of the License.
 */
/*!
 * \file batch_copy_cube_in_v200.h
 * \brief
 */

#ifndef IMPL_MATMUL_MODULES_STAGE_COPY_CUBE_IN_BATCH_BATCH_COPY_CUBE_IN_V200_H
#define IMPL_MATMUL_MODULES_STAGE_COPY_CUBE_IN_BATCH_BATCH_COPY_CUBE_IN_V200_H

#include "batch_copy_cube_in_intf.h"
#include "batch_layout.h"
#include "batch_data_copy_wrapper.h"
#include "../../../resource/cube_in_buffer/cube_in_buffer.h"
#include "../copy_cube_in_params.h"
#include "../../../param/matmul_var.h"

namespace matmul {
// Specialized Template Class of Batch Matmul CopyIn
// Batch Matmul ND Format Data CopyIn From GM/UB
template <typename IMPL, class INPUT_TYPE, const auto& MM_CFG>
class BatchCopyCubeIn<IMPL, INPUT_TYPE, MM_CFG,
                enable_if_t<(MatmulFeatureTrait<MM_CFG>::IsNeedUB()) &&
                                  GetCopyCubeInType<INPUT_TYPE, MM_CFG>() == CopyCubeInType::BMM &&
                                  (INPUT_TYPE::format == CubeFormat::ND)>>
{
private:
    MATMUL_USE_MODULE_ON(CubeInBuffer, INPUT_TYPE::TAG);
    MATMUL_USE_MODULE_ON(BatchLayout, INPUT_TYPE::TAG);
    MATMUL_USE_MODULE_ON(BatchDataCopyUtils, INPUT_TYPE::TAG);
    MATMUL_USE_MODULE_ON(CopyCubeInParams, INPUT_TYPE::TAG);
    MATMUL_USE_MODULE_ON(MatmulVar, INPUT_TYPE::TAG);
    MATMUL_USE_MODULE_ON(MatmulShapeInfo, INPUT_TYPE::TAG);
    MATMUL_USE_MODULE_ON(MatmulTensorInfo, INPUT_TYPE::TAG);
    MATMUL_USE_MODULE_ON(MatmulShapeTiling, INPUT_TYPE::TAG);

    using TransT = typename INPUT_TYPE::TRANS_T;
    using SrcT = typename INPUT_TYPE::T;

    constexpr static int32_t c0Size_ = AuxGetC0Size<TransT>();

    template <bool isTrans = false>
    __aicore__ constexpr int32_t GetSingleHeightAlign()
    {
        return CeilAlign(MATMUL_MODULE(MatmulShapeInfo)->template GetSingleHeight<isTrans>(), BLOCK_CUBE);
    }

    template <bool isTrans = false>
    __aicore__ constexpr int32_t GetSingleWidthAlign()
    {
        return CeilAlign(MATMUL_MODULE(MatmulShapeInfo)->template GetSingleWidth<isTrans>(), c0Size_);
    }

    __aicore__ inline int32_t GetSingleSizeAlign(const int32_t height, const int32_t width)
    {
        return CeilAlign(height, BLOCK_CUBE) * CeilAlign(width, c0Size_);
    }

    template <bool IS_TRANS = false, bool IS_INTRA_BLOCK = false, typename INPUT_TYPE_ALIAS = INPUT_TYPE>
    __aicore__ constexpr enable_if_t<INPUT_TYPE_ALIAS::TAG == InputTypeTag::A, int32_t> GetBaseUseHeight() const
    {
        if constexpr (IS_INTRA_BLOCK) {
            if constexpr (IS_TRANS) {
                return MATMUL_CONST_PARAM_VAR.baseUseK_;
            } else {
                return MATMUL_CONST_INTRA_BLOCK.baseUseM;
            }
        } else {
            if constexpr (IS_TRANS) {
                return MATMUL_CONST_PARAM_VAR.baseUseK_;
            } else {
                return MATMUL_CONST_PARAM_VAR.baseUseM_;
            }
        }
    }

    template <bool IS_TRANS = false, bool IS_INTRA_BLOCK = false, typename INPUT_TYPE_ALIAS = INPUT_TYPE>
    __aicore__ constexpr enable_if_t<INPUT_TYPE_ALIAS::TAG == InputTypeTag::B, int32_t> GetBaseUseHeight() const
    {
        if constexpr (IS_INTRA_BLOCK) {
            if constexpr (IS_TRANS) {
                return MATMUL_CONST_INTRA_BLOCK.baseUseN;
            } else {
                return MATMUL_CONST_PARAM_VAR.baseUseK_;
            }
        } else {
            if constexpr (IS_TRANS) {
                return MATMUL_CONST_PARAM_VAR.baseUseN_;
            } else {
                return MATMUL_CONST_PARAM_VAR.baseUseK_;
            }
        }
    }

public:
    inline __aicore__ BatchCopyCubeIn() = default;
    inline __aicore__ ~BatchCopyCubeIn() = default;

    __aicore__ inline void Init()
    {
        MATMUL_MODULE(CubeInBuffer)
            ->Init(MATMUL_MODULE(BatchLayout)->GetBatchNum() *
                       MATMUL_MODULE(CopyCubeInParams)->template GetSingleSizeAlign<INPUT_TYPE::isTrans>(),
                   1);
    }

    __aicore__ inline void SetInput(__gm__ SrcT *srcGlobalAddr, bool isTranspose = false)
    {
        MATMUL_MODULE(MatmulTensorInfo)->SetGlobalAddr(srcGlobalAddr, isTranspose);
        MATMUL_MODULE(CubeInBuffer)->Reset();
    }

    __aicore__ inline void SetInput(const TBuffAddr& address, bool isTranspose = false)
    {}

    __aicore__ inline void BatchLoad(LocalTensor<TransT>& dstTensor, const uint32_t matrixStride,
                                     const int32_t outerIdx, const int32_t splitIdx, const int32_t splitSize)
    {
        ASCENDC_ASSERT((MATMUL_MODULE(BatchLayout)->IsLayoutGValid()), {
            KERNEL_LOG(KERNEL_ERROR, "multi batch calculation of multiple lines of S is not supported");
        });
        if (MATMUL_MODULE(MatmulShapeInfo)->IsTranspose()) {
            return CopyBatchToCube<true>(dstTensor, matrixStride, outerIdx, splitIdx, splitSize,
                                         MATMUL_MODULE(MatmulShapeInfo)->template GetSingleHeight<true>(),
                                         MATMUL_MODULE(MatmulShapeInfo)->template GetSingleWidth<true>());
        } else {
            return CopyBatchToCube<false>(dstTensor, matrixStride, outerIdx, splitIdx, splitSize,
                                          MATMUL_MODULE(MatmulShapeInfo)->template GetSingleHeight<false>(),
                                          MATMUL_MODULE(MatmulShapeInfo)->template GetSingleWidth<false>());
        }
    }

    __aicore__ inline LocalTensor<TransT> LoadData(int curRow, int curCol, int tileHeight, int tileWidth)
    {
        LocalTensor<TransT> localTensor;
        localTensor.SetAddr(MATMUL_MODULE(MatmulTensorInfo)->GetLocalAddr());
        return localTensor;
    }

    __aicore__ inline void BatchDestroy()
    {
        MATMUL_MODULE(CubeInBuffer)->FreeTensor();
        MATMUL_MODULE(CubeInBuffer)->Destroy();
    }

    __aicore__ inline LocalTensor<TransT> AllocTensor(int32_t iterIndex = 0)
    {
        return MATMUL_MODULE(CubeInBuffer)->AllocTensor(iterIndex);
    }

    __aicore__ inline void ClearLoadData(const LocalTensor<TransT>& tensor = NULL_TENSOR<TransT>,
        int32_t curRow = 0, int32_t curCol = 0)
    {}

    __aicore__ inline void Destroy()
    {
        MATMUL_MODULE(CubeInBuffer)->Destroy();
    }

    __aicore__ inline void Reset()
    {
        MATMUL_MODULE(CubeInBuffer)->Reset();
    }

private:
    template <bool isTrans = false, InputTypeTag tag = INPUT_TYPE::TAG>
    __aicore__ inline enable_if_t<tag == InputTypeTag::A>
    CopyND2NZThroughVec(LocalTensor<TransT>& dstTensor, GlobalTensor<SrcT>& srcTensor, int32_t batchNum,
                        int32_t batchOuterIdx, int32_t splitOuterIdx)
    {
        int64_t matrixSplitSize =
            batchNum * splitOuterIdx * MATMUL_MODULE(CopyCubeInParams)->template GetSingleSizeAlign<isTrans>();
        int64_t srcOffset = 0, dstOffset = matrixSplitSize;
        bool ubEnough = MATMUL_MODULE(MatmulShapeInfo)->template GetSingleHeight<isTrans>() * c0Size_ <=
                                MATMUL_PARAM_VAR.tiling_.GetTransLength()
                            ? true
                            : false;
        if (ubEnough) {
            event_t eventIDMte3ToMte2 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE3_MTE2));
            for (auto iterBatch = 0; iterBatch < batchNum; ++iterBatch) {
                int colNum = Ceil(MATMUL_MODULE(MatmulShapeInfo)->template GetTotalCol<isTrans>() *
                                      MATMUL_MODULE(MatmulShapeInfo)->template GetBaseWidth<isTrans>(),
                                  c0Size_);
                for (auto i = 0; i < colNum; ++i) {
                    MATMUL_MODULE(BatchDataCopyUtils)
                        ->CopyND2NZ(dstTensor[dstOffset], srcTensor[srcOffset], 0, i * c0Size_,
                                    MATMUL_MODULE(MatmulShapeInfo)->template GetSingleHeight<isTrans>(), c0Size_,
                                    isTrans ? MATMUL_MODULE(MatmulShapeInfo)->template GetSingleWidth<isTrans>()
                                            : MATMUL_MODULE(MatmulShapeInfo)->template GetOrgWidth<isTrans>());
                    dstOffset += GetSingleHeightAlign<isTrans>() * c0Size_;
                    SetFlag<HardEvent::MTE3_MTE2>(eventIDMte3ToMte2);
                    WaitFlag<HardEvent::MTE3_MTE2>(eventIDMte3ToMte2);
                }

                srcOffset += MATMUL_MODULE(CopyCubeInParams)->GetSingleSize();
            }

            return;
        }

        if constexpr (isTrans) {
            MATMUL_MODULE(BatchDataCopyUtils)
                ->CopyND2NZOnTheFly(dstTensor[matrixSplitSize], srcTensor, 0, 0,
                                    MATMUL_MODULE(MatmulShapeInfo)->template GetSingleHeight<isTrans>(),
                                    batchNum * GetSingleWidthAlign<isTrans>(),
                                    batchNum * GetSingleWidthAlign<isTrans>());
        } else {
            bool isWidthAligned =
                MATMUL_MODULE(MatmulShapeInfo)->template GetSingleWidth<isTrans>() % c0Size_ == 0 ? true : false;
            if (isWidthAligned) {
                for (int iterBatch = 0; iterBatch < batchNum; ++iterBatch) {
                    srcTensor.SetAddr(srcOffset);
                    MATMUL_MODULE(BatchDataCopyUtils)
                        ->CopyND2NZOnTheFly(dstTensor[dstOffset], srcTensor, 0, 0,
                                            MATMUL_MODULE(MatmulShapeInfo)->template GetSingleHeight<isTrans>(),
                                            MATMUL_MODULE(MatmulShapeInfo)->template GetSingleWidth<isTrans>(),
                                            MATMUL_MODULE(MatmulShapeInfo)->template GetOrgWidth<isTrans>());
                    dstOffset +=
                        MATMUL_MODULE(MatmulShapeInfo)->template GetSingleHeight<isTrans>() * GetSingleWidthAlign<isTrans>();
                    srcOffset += MATMUL_MODULE(CopyCubeInParams)->GetSingleSize();
                }
            } else {
                for (int iterBatch = 0; iterBatch < batchNum; ++iterBatch) {
                    int64_t innerLoopSrcOffset = srcOffset;
                    for (auto i = 0; i < MATMUL_MODULE(MatmulShapeInfo)->template GetTotalRow<isTrans>(); ++i) {
                        srcTensor.SetAddr(innerLoopSrcOffset);
                        MATMUL_MODULE(BatchDataCopyUtils)
                            ->CopyND2NZOnTheFly(dstTensor[dstOffset], srcTensor, 0, 0, GetBaseUseHeight<isTrans>(),
                                                MATMUL_MODULE(MatmulShapeInfo)->template GetSingleWidth<isTrans>(),
                                                MATMUL_MODULE(MatmulShapeInfo)->template GetOrgWidth<isTrans>());
                        dstOffset += GetBaseUseHeight<isTrans>() * GetSingleWidthAlign<isTrans>();
                        innerLoopSrcOffset += static_cast<decltype(innerLoopSrcOffset)>(
                            GetBaseUseHeight<isTrans>() *
                            static_cast<decltype(innerLoopSrcOffset)>(
                                MATMUL_MODULE(MatmulShapeInfo)->template GetSingleWidth<isTrans>()));
                    }

                    srcOffset += MATMUL_MODULE(CopyCubeInParams)->GetSingleSize();
                }
            }
        }
    }

    template <bool isTrans = false, InputTypeTag tag = INPUT_TYPE::TAG>
    __aicore__ inline enable_if_t<tag == InputTypeTag::B>
    CopyND2NZThroughVec(LocalTensor<TransT>& dstTensor, GlobalTensor<SrcT>& srcTensor, int32_t batchNum,
                        int32_t batchOuterIdx, int32_t splitOuterIdx)
    {
        int64_t matrixSplitSize =
            batchNum * splitOuterIdx * MATMUL_MODULE(CopyCubeInParams)->template GetSingleSizeAlign<isTrans>();
        int64_t srcOffset = 0, dstOffset = matrixSplitSize;
        bool ubEnough = MATMUL_MODULE(MatmulShapeInfo)->template GetSingleHeight<isTrans>() * c0Size_ <=
                                MATMUL_PARAM_VAR.tiling_.GetTransLength()
                            ? true
                            : false;
        if (ubEnough) {
            auto colNum = Ceil(MATMUL_MODULE(MatmulShapeInfo)->template GetTotalCol<isTrans>() *
                                   MATMUL_MODULE(MatmulShapeInfo)->template GetBaseWidth<isTrans>(),
                               c0Size_);
            event_t eventIDMte3ToMte2 = static_cast<event_t>(GetTPipePtr()->FetchEventID(HardEvent::MTE3_MTE2));
            for (auto iterBatch = 0; iterBatch < batchNum; ++iterBatch) {
                for (auto i = 0; i < colNum; ++i) {
                    MATMUL_MODULE(BatchDataCopyUtils)
                        ->CopyND2NZ(dstTensor[dstOffset], srcTensor[srcOffset], 0, i * c0Size_,
                                    MATMUL_MODULE(MatmulShapeInfo)->template GetSingleHeight<isTrans>(), c0Size_,
                                    MATMUL_MODULE(MatmulShapeInfo)->template GetOrgWidth<isTrans>());
                    dstOffset += GetSingleHeightAlign<isTrans>() * c0Size_;
                    SetFlag<HardEvent::MTE3_MTE2>(eventIDMte3ToMte2);
                    WaitFlag<HardEvent::MTE3_MTE2>(eventIDMte3ToMte2);
                }

                if constexpr (isTrans) {
                    auto isWidthAligned =
                        MATMUL_MODULE(MatmulShapeInfo)->template GetOrgWidth<true>() % c0Size_ == 0 ? true : false;
                    if (!isWidthAligned) {
                        MATMUL_MODULE(BatchDataCopyUtils)
                            ->BatchCopyND2NZ(dstTensor[dstOffset], srcTensor[srcOffset], 0, colNum * c0Size_,
                                             MATMUL_MODULE(MatmulShapeInfo)->template GetSingleHeight<true>(),
                                             !isWidthAligned, MATMUL_MODULE(MatmulShapeInfo)->template GetOrgWidth<true>());
                        dstOffset += GetSingleHeightAlign<true>() * c0Size_;
                        SetFlag<HardEvent::MTE3_MTE2>(eventIDMte3ToMte2);
                        WaitFlag<HardEvent::MTE3_MTE2>(eventIDMte3ToMte2);
                    }
                }

                srcOffset += MATMUL_MODULE(CopyCubeInParams)->GetSingleSize();
            }

            return;
        }

        if constexpr (isTrans) {
            MATMUL_MODULE(BatchDataCopyUtils)
                ->CopyND2NZOnTheFly(dstTensor[matrixSplitSize], srcTensor, 0, 0,
                                    batchNum * GetSingleHeightAlign<isTrans>(),
                                    MATMUL_MODULE(MatmulShapeInfo)->template GetSingleWidth<isTrans>(),
                                    batchNum * MATMUL_MODULE(MatmulShapeInfo)->template GetOrgWidth<isTrans>());
        } else {
            MATMUL_MODULE(BatchDataCopyUtils)
                ->CopyND2NZOnTheFly(dstTensor[matrixSplitSize], srcTensor, 0, 0,
                                    MATMUL_MODULE(MatmulShapeInfo)->template GetSingleHeight(),
                                    batchNum * GetSingleWidthAlign(), batchNum * GetSingleWidthAlign());
        }
    }

    template <bool IS_TRANS = false>
    __aicore__ inline void CopyBatchToCube(LocalTensor<TransT>& dstTensor, const uint32_t matrixStride,
                                           const int32_t outerIdx, const int32_t splitIdx, const int32_t splitSize,
                                           const int32_t height, int32_t width)
    {
        auto srcStride = MATMUL_MODULE(BatchLayout)->GetSrcStride(matrixStride, height, width);
        auto iterNum = MATMUL_MODULE(BatchLayout)->GetLoopNum();
        auto batchNum = MATMUL_MODULE(BatchLayout)->GetBatchNum() / splitSize;
        auto singleSize = MATMUL_MODULE(CopyCubeInParams)->GetSingleSize();
        auto singleSizeAligned = GetSingleSizeAlign(height, width);
        int64_t batchOffset =
            outerIdx * MATMUL_MODULE(BatchLayout)->GetBatchNum() * MATMUL_MODULE(CopyCubeInParams)->GetSingleSize();
        int64_t iterOffset = 0;
        for (int32_t idx = 0; idx < iterNum; ++idx) {
            GlobalTensor<SrcT> srcGlobal;
            srcGlobal.SetGlobalBuffer(MATMUL_MODULE(MatmulTensorInfo)->GetGlobalAddr());
            srcGlobal.SetAddr(batchOffset + batchNum * splitIdx * MATMUL_MODULE(CopyCubeInParams)->GetSingleSize());

            if constexpr (ToMatmulConfig(MM_CFG).enVecND2NZ) {
                CopyND2NZThroughVec<IS_TRANS>(dstTensor, srcGlobal, batchNum, outerIdx, splitIdx);
                return;
            }

            MATMUL_MODULE(BatchDataCopyUtils)
                ->CopyND2NZOnTheFly(dstTensor[batchNum * splitIdx * singleSizeAligned],
                                    srcGlobal[batchOffset + batchNum * splitIdx * singleSize], 0, 0,
                                    MATMUL_MODULE(CopyCubeInParams)->template GetSingleHeightAlign<IS_TRANS>(),
                                    MATMUL_MODULE(CopyCubeInParams)->template GetSingleWidthAlign<IS_TRANS>(),
                                    MATMUL_MODULE(BatchLayout)->template GetSrcDValue<IS_TRANS>());
        }
    }
};

// Specialized Template Class of Batch Matmul CopyIn
// Batch Matmul NZ Format Data CopyIn From GM/UB
template <typename IMPL, class INPUT_TYPE, const auto& MM_CFG>
class BatchCopyCubeIn<IMPL, INPUT_TYPE, MM_CFG,
                enable_if_t<(MatmulFeatureTrait<MM_CFG>::IsNeedUB()) &&
                                  GetCopyCubeInType<INPUT_TYPE, MM_CFG>() == CopyCubeInType::BMM &&
                                  (INPUT_TYPE::format == CubeFormat::NZ)>>
{
private:
    using TransT = typename INPUT_TYPE::TRANS_T;
    using SrcT = typename INPUT_TYPE::T;

    constexpr static int32_t c0Size_ = AuxGetC0Size<TransT>();

    MATMUL_USE_MODULE_ON(MatmulVar, INPUT_TYPE::TAG);
    MATMUL_USE_MODULE_ON(MatmulShapeInfo, INPUT_TYPE::TAG);
    MATMUL_USE_MODULE_ON(CubeInBuffer, INPUT_TYPE::TAG);
    MATMUL_USE_MODULE_ON(BatchLayout, INPUT_TYPE::TAG);
    MATMUL_USE_MODULE_ON(BatchDataCopyUtils, INPUT_TYPE::TAG);
    MATMUL_USE_MODULE_ON(CopyCubeInParams, INPUT_TYPE::TAG);
    MATMUL_USE_MODULE_ON(MatmulTensorInfo, INPUT_TYPE::TAG);
    MATMUL_USE_MODULE_ON(MatmulShapeTiling, INPUT_TYPE::TAG);

public:
    inline __aicore__ BatchCopyCubeIn() = default;
    inline __aicore__ ~BatchCopyCubeIn() = default;

    __aicore__ inline void Init()
    {
        MATMUL_MODULE(CubeInBuffer)
            ->Init(MATMUL_MODULE(MatmulShapeTiling)->GetBatchNum() *
                       MATMUL_MODULE(CopyCubeInParams)->template GetSingleSizeAlign<INPUT_TYPE::isTrans>(),
                   1);
    }

    __aicore__ inline void SetInput(__gm__ SrcT *srcGlobalAddr, bool isTranspose = false)
    {
        MATMUL_MODULE(MatmulTensorInfo)->SetGlobalAddr(srcGlobalAddr, isTranspose);
        MATMUL_MODULE(CubeInBuffer)->Reset();
    }

    __aicore__ inline void SetInput(const TBuffAddr& address, bool isTranspose = false)
    {}

    __aicore__ inline void BatchLoad(LocalTensor<TransT>& dstTensor, const uint32_t matrixStride,
                                     const int32_t outerIdx, const int32_t splitIdx, const int32_t splitSize)
    {
        using TensorType =
            typename AscendC::Conditional<PhyPosIsGM(INPUT_TYPE::pos), GlobalTensor<SrcT>, LocalTensor<SrcT>>::type;
        TensorType srcTensor;
        auto batchOffset =
            outerIdx * MATMUL_MODULE(BatchLayout)->GetBatchNum() * MATMUL_MODULE(CopyCubeInParams)->GetSingleSize();
        if constexpr (PhyPosIsGM(INPUT_TYPE::pos)) {
            srcTensor.SetGlobalBuffer(MATMUL_MODULE(MatmulTensorInfo)->GetGlobalAddr());
            srcTensor.SetAddr(batchOffset);
        } else {
            srcTensor.SetAddr(MATMUL_PARAM_VAR.leftMatrix_);
            srcTensor = srcTensor[batchOffset];
        }

        if (MATMUL_MODULE(MatmulShapeInfo)->IsTranspose()) {
            CopyBatchToCube<TensorType, true, INPUT_TYPE::TAG == InputTypeTag::A>(
                dstTensor, srcTensor, outerIdx, splitIdx, splitSize,
                MATMUL_MODULE(MatmulShapeInfo)->template GetSingleHeight<true>(),
                MATMUL_MODULE(MatmulShapeInfo)->template GetSingleWidth<true>());
        } else {
            CopyBatchToCube<TensorType, false, INPUT_TYPE::TAG == InputTypeTag::B>(
                dstTensor, srcTensor, outerIdx, splitIdx, splitSize,
                MATMUL_MODULE(MatmulShapeInfo)->template GetSingleHeight<false>(),
                MATMUL_MODULE(MatmulShapeInfo)->template GetSingleWidth<false>());
        }
    }

    __aicore__ inline LocalTensor<TransT> LoadData(int curRow, int curCol, int tileHeight, int tileWidth)
    {
        LocalTensor<TransT> localTensor;
        localTensor.SetAddr(MATMUL_MODULE(MatmulTensorInfo)->GetLocalAddr());
        return localTensor;
    }

    __aicore__ inline void BatchDestroy()
    {
        MATMUL_MODULE(CubeInBuffer)->FreeTensor();
        MATMUL_MODULE(CubeInBuffer)->Destroy();
    }

    __aicore__ inline LocalTensor<TransT> AllocTensor(int32_t iterIndex = 0)
    {
        return MATMUL_MODULE(CubeInBuffer)->AllocTensor(iterIndex);
    }

    __aicore__ inline void ClearLoadData(const LocalTensor<TransT>& tensor = NULL_TENSOR<TransT>,
        int32_t curRow = 0, int32_t curCol = 0)
    {}

    __aicore__ inline void Destroy()
    {
        MATMUL_MODULE(CubeInBuffer)->Destroy();
    }

    __aicore__ inline void Reset()
    {
        MATMUL_MODULE(CubeInBuffer)->Reset();
    }

private:
    template <bool kIsRow = false>
    __aicore__ inline int32_t GetSingleSizeAlign(const int32_t height, const int32_t width)
    {
        if constexpr (kIsRow && IsSameTypeV<TransT, int8_t>) {
            return CeilAlign(height, c0Size_) * CeilAlign(width, c0Size_);
        } else {
            return CeilAlign(height, BLOCK_CUBE) * CeilAlign(width, c0Size_);
        }
    }

    template <bool isTrans = false>
    __aicore__ constexpr int32_t GetSingleHeightAlign()
    {
        return CeilAlign(MATMUL_MODULE(MatmulShapeInfo)->template GetSingleHeight<isTrans>(), BLOCK_CUBE);
    }

    template <bool isTrans = false>
    __aicore__ constexpr int32_t GetSingleWidthAlign()
    {
        return CeilAlign(MATMUL_MODULE(MatmulShapeInfo)->template GetSingleWidth<isTrans>(), c0Size_);
    }

    template <typename TensorType, bool isTrans = false, bool kIsRow = false>
    __aicore__ inline void CopyBatchToCube(LocalTensor<TransT>& dstTensor, TensorType& srcTensor,
                                           const int32_t outerIdx, const int32_t splitIdx, const int32_t splitSize,
                                           const int32_t height, const int32_t width)
    {
        auto batchNum = MATMUL_MODULE(BatchLayout)->GetBatchNum() / splitSize;
        auto singleSizeAlign = GetSingleWidthAlign<isTrans>() * GetSingleHeightAlign<isTrans>();
        auto batchOffset = outerIdx * MATMUL_MODULE(BatchLayout)->GetBatchNum() * singleSizeAlign;

        int32_t alignHeight = CeilAlign(height, BLOCK_CUBE);
        int32_t alignWidth = CeilAlign(width, c0Size_);
        for (auto i = 0; i < batchNum; ++i) {
            MATMUL_MODULE(BatchDataCopyUtils)
                ->BatchCopyNZ2NZ(dstTensor[(i + batchNum * splitIdx) * GetSingleSizeAlign<kIsRow>(height, width)],
                                 srcTensor[(i + batchNum * splitIdx) * singleSizeAlign], 0, 0, alignHeight, alignWidth,
                                 alignHeight, (kIsRow && IsSameTypeV<TransT, int8_t>));
        }
    }
};

} // namespace matmul
#endif // IMPL_MATMUL_MODULES_STAGE_COPY_CUBE_IN_BATCH_BATCH_COPY_CUBE_IN_V200_H
