/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2023. All rights reserved.
 *
 * @brief load data instruction ut for ascend910B1
 *
 */
#include <gtest/gtest.h>
#include "kernel_operator.h"
#include "lib/matmul/tiling.h"
#include "impl/matmul/modules/matmul_param.h"
#include "impl/matmul/modules/matmul_policy.h"
#include "impl/matmul/modules/resource/cube_in_buffer/cube_in_buffer.h"
#include "impl/matmul/modules/matmul_private_modules.h"

using namespace std;
using namespace AscendC;
using namespace matmul;

namespace {
template <const auto& MM_CFG, typename IMPL, typename A_TYPE, typename B_TYPE, typename C_TYPE, typename BIAS_TYPE>
class CustomMatmulPolicy : public matmul::MatmulPolicy<MM_CFG, IMPL, A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE>
{
public:
    using CubeInBufferA = matmul::CubeInBuffer<IMPL, MatmulInputAType<A_TYPE, typename A_TYPE::T>, MM_CFG>;
    using CubeInBufferB = matmul::CubeInBuffer<IMPL, MatmulInputBType<B_TYPE, typename A_TYPE::T>, MM_CFG>;
};

constexpr MatmulConfig CFG_IBSHARE_NORM_DB = GetIBShareNormConfig(false, false, false, BatchMode::NONE, true);
template <class A_TYPE, class B_TYPE, class C_TYPE, class BIAS_TYPE, const MatmulConfig& MM_CFG, class MM_CB,
MATMUL_POLICY_DEFAULT_OF(MatmulPolicy)>
class MatmulImpl
: MATMUL_IMPORT_MODULE(CubeInBufferA)
, MATMUL_IMPORT_MODULE_PRIVATE(CubeInBufferParamsA) {
    MATMUL_ALLOW_USING(CubeInBufferA);
    MATMUL_ALLOW_USING_PRIVATE(CubeInBufferParamsA);

public:
    using CubeInBufferA::Init;
    using CubeInBufferA::Destroy;
    using CubeInBufferA::AllocTensor;
    using CubeInBufferA::FreeTensor;
    using CubeInBufferA::Hit;
    using CubeInBufferA::GetBuffer;
    using CubeInBufferA::Reset;
    using CubeInBufferA::GetIterIndex;
    using CubeInBufferA::EnQue;
    using CubeInBufferA::DeQue;
    using CubeInBufferA::SetOrgAddr;

public:
    using VAR_PARAMS =
        typename MatmulParams<A_TYPE, B_TYPE, C_TYPE, BIAS_TYPE, MM_CFG, GetMatmulVersion(MM_CFG)>::PARAMS;
    template <InputTypeTag TAG>
    using CubeInBufferParams =
        typename AscendC::Conditional<TAG == InputTypeTag::A, CubeInBufferParamsA, CubeInBufferParamsA>::type;
    MatmulImpl() {
        InitVar();
    }

    VAR_PARAMS& GetVar() {
        return var;
    }

    void InitVar() {
        var.tiling_.SetTiling(&tiling);
        var.tpipe_ = &pipe;
    }

    void SetInitParams(int32_t stepM, int32_t stepKa, int32_t baseM, int32_t baseK, int32_t kIter) {
        tiling.stepM = stepM;
        tiling.stepKa = stepKa;
        tiling.baseM = baseM;
        tiling.baseK = baseK;
        var.kIter_ = kIter;
        tiling.iterateOrder = 0;
    }

    void SetRuntimeParams(int32_t baseUseM, int32_t baseUseK) {
        var.baseUseM_ = baseUseM;
        var.baseUseK_ = baseUseK;
    }

private:
    TCubeTiling tiling;
    TPipe pipe;
    VAR_PARAMS var;
};
}

class test_cube_in_buffer_double_global_buffer : public testing::Test {
protected:
    void SetUp() {}
    void TearDown() {}

private:
    using A_TYPE_IBSHARE = matmul::MatmulType<AscendC::TPosition::GM, CubeFormat::ND, half, false, LayoutMode::NONE, true>;
    using B_TYPE = matmul::MatmulType<AscendC::TPosition::GM, CubeFormat::ND, half, false>;
    using C_TYPE = matmul::MatmulType<AscendC::TPosition::GM, CubeFormat::ND, float>;
    using BIAS_TYPE = matmul::MatmulType<AscendC::TPosition::GM, CubeFormat::ND, float>;

    MatmulImpl<A_TYPE_IBSHARE, B_TYPE, C_TYPE, BIAS_TYPE, CFG_IBSHARE_NORM_DB, void, CustomMatmulPolicy> mm;
    GlobalCache gCache;
};

TEST_F(test_cube_in_buffer_double_global_buffer, get_iter_index) {
    int32_t mIter = 2;
    int32_t kIter = 3;
    mm.SetInitParams(2, 2, 32, 32, kIter);
    mm.Init(1024, 4);
    ASSERT_EQ(mm.GetIterIndex(0, 1), 1);
    ASSERT_EQ(mm.GetIterIndex(1, 1), 1);
    ASSERT_EQ(mm.GetIterIndex(1, 2), 2);
}

TEST_F(test_cube_in_buffer_double_global_buffer, all_interface_normal)
{
    int32_t mIter = 2;
    int32_t kIter = 2;
    int32_t nIter = 2;
    int32_t hitCnt = 0;
    mm.SetInitParams(2, 2, 32, 32, kIter);
    mm.Init(1024, 4);
    GlobalTensor<half> fakeInput;
    mm.SetOrgAddr(fakeInput.address_);
    LocalTensor<half> fakeTensor;
    for (int32_t m = 0; m < mIter; m++) {
        for (int32_t n = 0; n < nIter; n++) {
            for (int32_t k = 0; k < kIter; k++) {
                int32_t iterIndex = mm.GetIterIndex(m, n);
                if (mm.Hit(iterIndex)) {
                    fakeTensor = mm.GetBuffer(iterIndex);
                    hitCnt++;
                } else {
                    fakeTensor = mm.AllocTensor(iterIndex);
                    mm.EnQue(fakeTensor);
                    mm.DeQue();
                }
            }
        }
        mm.Reset();
    }
    mm.Destroy();
    ASSERT_EQ(hitCnt, 0);
}
