/*
 * Copyright (c) 2025 Huawei Technologies Co., Ltd.
 * This file is a part of the CANN Open Software.
 * Licensed under CANN Open Software License Agreement Version 1.0 (the "License").
 * Please refer to the License for details. You may not use this file except in compliance with the License.
 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED,
 * INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
 * See LICENSE in the root of the software repository for the full text of the License.
 */

#include "kernel_operator.h"
#include "lib/matmul_intf.h"
#include "batch_matmul_tscm_custom_impl.h"

namespace BatchMatmulCustom {
__aicore__ inline void CopyTiling(TCubeTiling* tiling, GM_ADDR tilingGM)
{
    uint32_t* ptr = reinterpret_cast<uint32_t*>(tiling);
    auto tiling32 = reinterpret_cast<__gm__ uint32_t*>(tilingGM);

    for (int i = 0; i < sizeof(TCubeTiling) / sizeof(uint32_t); i++, ptr++) {
      *ptr = *(tiling32 + i);
    }
    return;
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE>
__aicore__ inline void BatchMatmulKernel<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE>::Init(GM_ADDR a, GM_ADDR b, GM_ADDR bias,
                                         GM_ADDR c, GM_ADDR workspace, const TCubeTiling& tiling)
{
    this->tiling = tiling;
    int32_t sizeA = tiling.ALayoutInfoB * tiling.ALayoutInfoS * tiling.ALayoutInfoN * tiling.ALayoutInfoG * tiling.ALayoutInfoD;
    int32_t sizeB = tiling.BLayoutInfoB * tiling.BLayoutInfoS * tiling.BLayoutInfoN * tiling.BLayoutInfoG * tiling.BLayoutInfoD;
    int32_t sizeC = tiling.CLayoutInfoB * tiling.CLayoutInfoS1 * tiling.CLayoutInfoN * tiling.CLayoutInfoG * tiling.CLayoutInfoS2;
    int32_t sizeBias = tiling.CLayoutInfoB * tiling.CLayoutInfoN * tiling.CLayoutInfoG * tiling.CLayoutInfoS2;

    aGlobal.SetGlobalBuffer(reinterpret_cast<__gm__ aType*>(a), sizeA);
    bGlobal.SetGlobalBuffer(reinterpret_cast<__gm__ bType*>(b), sizeB);
    cGlobal.SetGlobalBuffer(reinterpret_cast<__gm__ cType*>(c), sizeC);
    biasGlobal.SetGlobalBuffer(reinterpret_cast<__gm__ biasType*>(bias), sizeBias);

    if (GetSysWorkSpacePtr() == nullptr) {
        return;
    }
}

template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE>
__aicore__ inline void BatchMatmulKernel<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE>::Process(AscendC::TPipe* pipe, int32_t batchA, int32_t batchB)
{
    // Copy aMatrix from gm to tscm
    AscendC::TSCM<AscendC::TPosition::GM, 1> scm;
    pipe->InitBuffer(scm, 1, batchA * tiling.M * tiling.Ka * sizeof(aType));
    auto scmTensor = scm.AllocTensor<aType>();
    DataCopy(scmTensor, aGlobal, batchA * tiling.M * tiling.Ka);
    scm.EnQue(scmTensor);
    AscendC::LocalTensor<aType> scmLocal = scm.DeQue<aType>();

    matmulObj.SetTensorA(scmLocal, false);
    matmulObj.SetTensorB(bGlobal, true); // B transpose
    if (tiling.isBias) {
        matmulObj.SetBias(biasGlobal);
    }

    matmulObj.IterateBatch(cGlobal, batchA, batchB, false);
}
} // namespace BatchMatmulCustom

extern "C" __global__ __aicore__ void batch_matmul_tscm_custom(GM_ADDR a, GM_ADDR b, GM_ADDR bias, GM_ADDR c, GM_ADDR workspace,
                                                        GM_ADDR tilingGm)
{
    // prepare tiling
    TCubeTiling tiling;
    BatchMatmulCustom::CopyTiling(&tiling, tilingGm);
    // define matmul kernel
    using A_TYPE = AscendC::MatmulType<AscendC::TPosition::TSCM, CubeFormat::NZ, half, false, LayoutMode::NORMAL>;
    using B_TYPE = AscendC::MatmulType<AscendC::TPosition::GM, CubeFormat::ND, half, true, LayoutMode::NORMAL>;
    using C_TYPE = AscendC::MatmulType<AscendC::TPosition::GM, CubeFormat::ND, float, false, LayoutMode::NORMAL>;
    using BIAS_TYPE = AscendC::MatmulType<AscendC::TPosition::GM, CubeFormat::ND, float>;
    BatchMatmulCustom::BatchMatmulKernel<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE> batchMatmulKernel;
    AscendC::TPipe pipe;
    REGIST_MATMUL_OBJ(&pipe, GetSysWorkSpacePtr(), batchMatmulKernel.matmulObj, &tiling);
    // init matmul kernel
    batchMatmulKernel.Init(a, b, bias, c, workspace, tiling);
    // matmul kernel process
    batchMatmulKernel.Process(&pipe, 3, 3);
}

#ifndef ASCENDC_CPU_DEBUG
void batch_matmul_tscm_custom_do(uint32_t blockDim, void* stream, GM_ADDR a, GM_ADDR b, GM_ADDR bias,
                         GM_ADDR c, GM_ADDR workspace, GM_ADDR tilingGm)
{
    // invoke the kernel function through the <<<>>> symbol
    batch_matmul_tscm_custom<<<blockDim, nullptr, stream>>>(a, b, bias, c, workspace, tilingGm);
}
#endif