/**
 * Copyright (c) 2024 Huawei Technologies Co., Ltd.
 * This file is a part of the CANN Open Software.
 * Licensed under CANN Open Software License Agreement Version 1.0 (the "License").
 * Please refer to the License for details. You may not use this file except in compliance with the License.
 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED,
 * INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
 * See LICENSE in the root of the software repository for the full text of the License.
 */
/*!
 * \file batch_copy_cube_in.h
 * \brief
 */

#ifndef IMPL_MATMUL_MODULES_STAGE_COPY_CUBE_IN_BATCH_BATCH_COPY_CUBE_IN_H
#define IMPL_MATMUL_MODULES_STAGE_COPY_CUBE_IN_BATCH_BATCH_COPY_CUBE_IN_H

#include "batch_copy_cube_in_intf.h"
#include "batch_layout.h"
#include "../data_copy_wrapper.h"
#include "../../../resource/cube_in_buffer/cube_in_buffer.h"
#include "../copy_cube_in_params.h"
#include "../../../param/matmul_var.h"

namespace matmul {
// Specialized Template Class of Batch Matmul CopyIn
// Batch Matmul ND Format Data CopyIn From GM/UB
template <typename IMPL, class INPUT_TYPE, const auto& MM_CFG>
class BatchCopyCubeIn<IMPL, INPUT_TYPE, MM_CFG,
                enable_if_t<!MatmulFeatureTrait<MM_CFG>::IsNeedUB() &&
                                  GetCopyCubeInType<INPUT_TYPE, MM_CFG>() == CopyCubeInType::BMM &&
                                  INPUT_TYPE::format == CubeFormat::ND>>
{
    MATMUL_USE_MODULE_ON(MatmulVar, INPUT_TYPE::TAG);
    MATMUL_USE_MODULE_ON(MatmulShapeInfo, INPUT_TYPE::TAG);
    MATMUL_USE_MODULE_ON(CubeInBuffer, INPUT_TYPE::TAG);
    MATMUL_USE_MODULE_ON(BatchLayout, INPUT_TYPE::TAG);
    MATMUL_USE_MODULE_ON(BatchDataCopyUtils, INPUT_TYPE::TAG);
    MATMUL_USE_MODULE_ON(CopyCubeInParams, INPUT_TYPE::TAG);
    MATMUL_USE_MODULE_ON(MatmulTensorInfo, INPUT_TYPE::TAG);
    MATMUL_USE_MODULE_ON(MatmulShapeTiling, INPUT_TYPE::TAG);
    using TransT = typename INPUT_TYPE::TRANS_T;
    using SrcT = typename INPUT_TYPE::T;

public:
    inline __aicore__ BatchCopyCubeIn() = default;
    inline __aicore__ ~BatchCopyCubeIn() = default;

    __aicore__ inline void Init()
    {
        MATMUL_MODULE(CubeInBuffer)
            ->Init(MATMUL_MODULE(MatmulShapeTiling)->GetBatchNum() *
                       MATMUL_MODULE(CopyCubeInParams)->template GetSingleSizeAlign<INPUT_TYPE::isTrans>(),
                   1);
    }

    __aicore__ inline void SetInput(__gm__ SrcT *srcGlobalAddr, bool isTranspose = false)
    {
        MATMUL_MODULE(MatmulTensorInfo)->SetGlobalAddr(srcGlobalAddr, isTranspose);
        MATMUL_MODULE(CubeInBuffer)->Reset();
    }

    __aicore__ inline void SetInput(const TBuffAddr& address, bool isTranspose = false)
    {}

    __aicore__ inline void BatchLoad(LocalTensor<TransT>& dstTensor, const uint32_t matrixStride,
                                     const int32_t outerIdx, const int32_t splitIdx, const int32_t splitSize)
    {
        ASCENDC_ASSERT((MATMUL_MODULE(BatchLayout)->IsLayoutGValid()), {
            KERNEL_LOG(KERNEL_ERROR, "multi batch calculation of multiple lines of S is not supported");
        });
        if (MATMUL_MODULE(MatmulShapeInfo)->IsTranspose()) {
            return CopyBatchToCube < true,
                   INPUT_TYPE::TAG == InputTypeTag::A > (dstTensor, matrixStride, outerIdx, splitIdx, splitSize,
                                                         MATMUL_MODULE(MatmulShapeInfo)->template GetSingleWidth(),
                                                         MATMUL_MODULE(MatmulShapeInfo)->template GetSingleHeight(),
                                                         MATMUL_MODULE(MatmulShapeInfo)->template GetSingleWidth<false, true>(),
                                                         MATMUL_MODULE(MatmulShapeInfo)->template GetSingleHeight<false, true>());
        } else {
            return CopyBatchToCube < false,
                   INPUT_TYPE::TAG == InputTypeTag::B > (dstTensor, matrixStride, outerIdx, splitIdx, splitSize,
                                                         MATMUL_MODULE(MatmulShapeInfo)->template GetSingleHeight(),
                                                         MATMUL_MODULE(MatmulShapeInfo)->template GetSingleWidth(),
                                                         MATMUL_MODULE(MatmulShapeInfo)->template GetSingleHeight<false, true>(),
                                                         MATMUL_MODULE(MatmulShapeInfo)->template GetSingleWidth<false, true>());
        }
    }

    __aicore__ inline LocalTensor<TransT> LoadData(int curRow, int curCol, int tileHeight, int tileWidth)
    {
        LocalTensor<TransT> localTensor;
        localTensor.SetAddr(MATMUL_MODULE(MatmulTensorInfo)->GetLocalAddr());
        return localTensor;
    }

    __aicore__ inline void BatchDestroy()
    {
        MATMUL_MODULE(CubeInBuffer)->FreeTensor();
        MATMUL_MODULE(CubeInBuffer)->Destroy();
    }

    __aicore__ inline LocalTensor<TransT> AllocTensor(int32_t iterIndex = 0)
    {
        return MATMUL_MODULE(CubeInBuffer)->AllocTensor(iterIndex);
    }

    __aicore__ inline void ClearLoadData(const LocalTensor<TransT>& tensor = NULL_TENSOR<TransT>,
        int32_t curRow = 0, int32_t curCol = 0)
    {}

    __aicore__ inline void Destroy()
    {
        MATMUL_MODULE(CubeInBuffer)->Destroy();
    }

    __aicore__ inline void Reset()
    {
        MATMUL_MODULE(CubeInBuffer)->Reset();
    }

private:
    template <bool IS_TRANS = false, bool IS_KROW = false>
    __aicore__ inline void CopyBatchToCube(LocalTensor<TransT>& dstTensor, const uint32_t matrixStride,
                                           const int32_t outerIdx, const int32_t splitIdx, const int32_t splitSize,
                                           const int32_t height, int32_t width, int32_t varHeight, int32_t varWidth)
    {
        auto srcStride = MATMUL_MODULE(BatchLayout)->GetSrcStride(matrixStride, varHeight, varWidth);
        auto srcDValue = MATMUL_MODULE(BatchLayout)->template GetSrcDValue<IS_TRANS>();
        auto iterNum = MATMUL_MODULE(BatchLayout)->GetLoopNum();
        auto batchNum = MATMUL_MODULE(BatchLayout)->GetBatchNum() / splitSize;
        auto baseSizeAlign = GetSingleSizeAlign(height, width);
        auto batchSingleSize = batchNum * MATMUL_MODULE(CopyCubeInParams)->GetSingleSize();
        int64_t batchOffset = outerIdx * MATMUL_MODULE(MatmulShapeTiling)->GetBatchNum() * varHeight * varWidth;
        int64_t iterOffset = 0;
        uint64_t dstOffset = batchNum * splitIdx * baseSizeAlign;
        uint64_t srcOffset = batchNum * splitIdx * srcStride;
        for (int32_t idx = 0; idx < iterNum; ++idx) {
            dstOffset += iterOffset;
            GlobalTensor<SrcT> srcGlobal;
            srcGlobal.SetGlobalBuffer(MATMUL_MODULE(MatmulTensorInfo)->GetGlobalAddr());
            srcGlobal.SetAddr(iterOffset + batchOffset);
            if (srcStride >= UINT16_MAX) {
                for (int i = 0; i < batchNum; ++i) {
                    MATMUL_MODULE(BatchDataCopyUtils)
                        ->BatchCopyND2NZ(dstTensor[dstOffset], srcGlobal[srcOffset], 0, 0, height, width, srcDValue);
                    dstOffset += baseSizeAlign;
                    srcOffset += srcStride;
                }
            } else {
                MATMUL_MODULE(BatchDataCopyUtils)
                    ->BatchCopyND2NZ(dstTensor[dstOffset], srcGlobal[srcOffset], 0, 0, height, width,
                                    srcDValue, batchNum, srcStride, baseSizeAlign);
            }
            iterOffset += batchSingleSize;
        }
    }

    __aicore__ inline int32_t GetSingleSizeAlign(const int32_t height, const int32_t width)
    {
        return CeilAlign(height, BLOCK_CUBE) * CeilAlign(width, c0Size_);
    }

private:
    constexpr static int32_t c0Size_ = AuxGetC0Size<TransT>();
};

// Specialized Template Class of Batch Matmul CopyIn
// Batch Matmul NZ Format Data CopyIn From GM/UB
template <typename IMPL, class INPUT_TYPE, const auto& MM_CFG>
class BatchCopyCubeIn<IMPL, INPUT_TYPE, MM_CFG,
                enable_if_t<(!MatmulFeatureTrait<MM_CFG>::IsNeedUB()) &&
                                  GetCopyCubeInType<INPUT_TYPE, MM_CFG>() == CopyCubeInType::BMM &&
                                  (INPUT_TYPE::format == CubeFormat::NZ)>>
{
    MATMUL_USE_MODULE_ON(MatmulVar, INPUT_TYPE::TAG);
    MATMUL_USE_MODULE_ON(MatmulShapeInfo, INPUT_TYPE::TAG);
    MATMUL_USE_MODULE_ON(CubeInBuffer, INPUT_TYPE::TAG);
    MATMUL_USE_MODULE_ON(BatchLayout, INPUT_TYPE::TAG);
    MATMUL_USE_MODULE_ON(BatchDataCopyUtils, INPUT_TYPE::TAG);
    MATMUL_USE_MODULE_ON(CopyCubeInParams, INPUT_TYPE::TAG);
    MATMUL_USE_MODULE_ON(MatmulTensorInfo, INPUT_TYPE::TAG);
    MATMUL_USE_MODULE_ON(MatmulShapeTiling, INPUT_TYPE::TAG);
    using TransT = typename INPUT_TYPE::TRANS_T;
    using SrcT = typename INPUT_TYPE::T;

public:
    inline __aicore__ BatchCopyCubeIn() = default;
    inline __aicore__ ~BatchCopyCubeIn() = default;
    __aicore__ inline void Init()
    {
        MATMUL_MODULE(CubeInBuffer)
            ->Init(MATMUL_MODULE(MatmulShapeTiling)->GetBatchNum() *
                       MATMUL_MODULE(CopyCubeInParams)->template GetSingleSizeAlign<INPUT_TYPE::isTrans>(),
                   1);
    }

    __aicore__ inline void SetInput(__gm__ SrcT *srcGlobalAddr, bool isTranspose = false)
    {
        MATMUL_MODULE(MatmulTensorInfo)->SetGlobalAddr(srcGlobalAddr, isTranspose);
        MATMUL_MODULE(CubeInBuffer)->Reset();
    }

    __aicore__ inline void SetInput(const TBuffAddr& address, bool isTranspose = false)
    {}

    __aicore__ inline void BatchLoad(LocalTensor<TransT>& dstTensor, const uint32_t matrixStride,
                                     const int32_t outerIdx, const int32_t splitIdx, const int32_t splitSize)
    {
        if (MATMUL_MODULE(MatmulShapeInfo)->IsTranspose()) {
            CopyBatchToCube<true, INPUT_TYPE::TAG == InputTypeTag::A>(
                dstTensor, outerIdx, splitIdx, splitSize, MATMUL_MODULE(MatmulShapeInfo)->template GetSingleWidth(),
                MATMUL_MODULE(MatmulShapeInfo)->template GetSingleHeight(),
                MATMUL_MODULE(MatmulShapeInfo)->template GetSingleWidth<false, true>(),
                MATMUL_MODULE(MatmulShapeInfo)->template GetSingleHeight<false, true>());
        } else {
            CopyBatchToCube<false, INPUT_TYPE::TAG == InputTypeTag::B>(
                dstTensor, outerIdx, splitIdx, splitSize, MATMUL_MODULE(MatmulShapeInfo)->template GetSingleHeight(),
                MATMUL_MODULE(MatmulShapeInfo)->template GetSingleWidth(),
                MATMUL_MODULE(MatmulShapeInfo)->template GetSingleHeight(),
                MATMUL_MODULE(MatmulShapeInfo)->template GetSingleWidth());
        }
    }

    __aicore__ inline LocalTensor<TransT> LoadData(int curRow, int curCol, int tileHeight, int tileWidth)
    {
        LocalTensor<TransT> localTensor;
        localTensor.SetAddr(MATMUL_MODULE(MatmulTensorInfo)->GetLocalAddr());
        return localTensor;
    }

    __aicore__ inline void BatchDestroy()
    {
        MATMUL_MODULE(CubeInBuffer)->FreeTensor();
        MATMUL_MODULE(CubeInBuffer)->Destroy();
    }

    __aicore__ inline LocalTensor<TransT> AllocTensor(int32_t iterIndex = 0)
    {
        return MATMUL_MODULE(CubeInBuffer)->AllocTensor(iterIndex);
    }

    __aicore__ inline void ClearLoadData(const LocalTensor<TransT>& tensor = NULL_TENSOR<TransT>,
        int32_t curRow = 0, int32_t curCol = 0)
    {}

    __aicore__ inline void Destroy()
    {
        MATMUL_MODULE(CubeInBuffer)->Destroy();
    }

    __aicore__ inline void Reset()
    {
        MATMUL_MODULE(CubeInBuffer)->Reset();
    }

private:
    template <bool IS_TRANS = false, bool IS_KROW = false>
    __aicore__ inline void CopyBatchToCube(LocalTensor<TransT>& dstTensor, const int32_t outerIdx,
                                           const int32_t splitIdx, const int32_t splitSize, const int32_t height,
                                           const int32_t width, int32_t varHeight, int32_t varWidth)
    {
        auto batchNum = MATMUL_MODULE(MatmulShapeTiling)->GetBatchNum() / splitSize;
        auto singleSizeAlign = GetSingleSizeAlign<IS_KROW, false>(height, width);
        auto singleSize = GetSingleSizeAlign<IS_KROW, false>(varHeight, varWidth);
        auto batchOffset = outerIdx * MATMUL_MODULE(MatmulShapeTiling)->GetBatchNum() * singleSize;
        bool iskRowDirec = IS_KROW && IsSameTypeV<TransT, int8_t>;

        GlobalTensor<SrcT> srcGlobal;
        srcGlobal.SetGlobalBuffer(MATMUL_MODULE(MatmulTensorInfo)->GetGlobalAddr());
        srcGlobal.SetAddr(batchOffset);

        int32_t alignHeight = CeilAlign(height, BLOCK_CUBE);
        int32_t alignWidth = CeilAlign(width, c0Size_);
        auto singleSizeAlignDst = GetSingleSizeAlign<IS_KROW>(height, width);
        uint64_t dstOffset = batchNum * splitIdx * singleSizeAlignDst;
        uint64_t srcOffset = batchNum * splitIdx * singleSizeAlign;
        for (int i = 0; i < batchNum; ++i) {
            MATMUL_MODULE(BatchDataCopyUtils)
                ->BatchCopyNZ2NZ(dstTensor[dstOffset], srcGlobal[srcOffset], 0, 0, alignHeight, alignWidth, alignHeight,
                                iskRowDirec);
            dstOffset += singleSizeAlignDst;
            srcOffset += singleSizeAlign;
        }
    }

    template <bool K_IS_ROW = false, bool K_ALIGN_C0SIZE = true>
    __aicore__ inline int32_t GetSingleSizeAlign(const int32_t height, const int32_t width)
    {
        if constexpr (K_IS_ROW && IsSameTypeV<TransT, int8_t> && K_ALIGN_C0SIZE) {
            return CeilAlign(height, c0Size_) * CeilAlign(width, c0Size_);
        } else {
            return CeilAlign(height, BLOCK_CUBE) * CeilAlign(width, c0Size_);
        }
    }

private:
    constexpr static int32_t c0Size_ = AuxGetC0Size<TransT>();
};

} // namespace matmul
#endif // IMPL_MATMUL_MODULES_STAGE_COPY_CUBE_IN_BATCH_BATCH_COPY_CUBE_IN_H
