/**
 * Copyright (c) Huawei Technologies Co., Ltd. 2023. All rights reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include "fallback_common.h"
#include "fallback_opapi.h"

#ifdef __cplusplus
extern "C" {
#endif

namespace fallback {
using namespace ge;
using namespace gert;
constexpr size_t QUANTMATMULV3_INPUTX1_INDEX = 0;
constexpr size_t QUANTMATMULV3_INPUTX2_INDEX = 1;
constexpr size_t QUANTMATMULV3_SCALE_INDEX = 2;
constexpr size_t QUANTMATMULV3_OFFSET_INDEX = 3;
constexpr size_t QUANTMATMULV3_BIAS_INDEX = 4;
constexpr size_t QUANTMATMULV3_PERTOKEN_SCALE_INDEX = 5;
constexpr size_t QUANTMATMULV3_OUTPUT_INDEX = 0;

graphStatus QuantBatchMatmulV3ExecuteFunc(OpExecuteContext* host_api_ctx)
{
    OP_CHECK(host_api_ctx == nullptr, OP_LOGE("aclnnfallback quant_batch_matmul_v3", "host_api_ctx is null"),
             return GRAPH_FAILED);
    auto x1 = host_api_ctx->GetInputTensor(QUANTMATMULV3_INPUTX1_INDEX);
    OP_CHECK(x1 == nullptr, OP_LOGE("aclnnfallback quant_batch_matmul_v3", "x1 is null"), return GRAPH_FAILED);

    auto x2 = host_api_ctx->GetInputTensor(QUANTMATMULV3_INPUTX2_INDEX);
    OP_CHECK(x2 == nullptr, OP_LOGE("aclnnfallback quant_batch_matmul_v3", "x2 is null"), return GRAPH_FAILED);

    auto scale = host_api_ctx->GetInputTensor(QUANTMATMULV3_SCALE_INDEX);
    OP_CHECK(scale == nullptr, OP_LOGE("aclnnfallback quant_batch_matmul_v3", "scale is null"),
             return GRAPH_FAILED);

    auto offset = host_api_ctx->GetOptionalInputTensor(QUANTMATMULV3_OFFSET_INDEX);

    auto bias = host_api_ctx->GetOptionalInputTensor(QUANTMATMULV3_BIAS_INDEX);

    auto pertokenScale = host_api_ctx->GetOptionalInputTensor(QUANTMATMULV3_PERTOKEN_SCALE_INDEX);

    auto output = host_api_ctx->GetOutputTensor(QUANTMATMULV3_OUTPUT_INDEX);

    auto attrs = host_api_ctx->GetAttrs();
    OP_CHECK(attrs == nullptr, OP_LOGE("aclnnfallback quant_batch_matmul_v3", "attrs is null"), return GRAPH_FAILED);
    const bool *transposeX1Ptr = attrs->GetBool(1);
    const bool *transposeX2Ptr = attrs->GetBool(2); // in QuantBatchMatmulV3 transpose attr idx is 1 and 2
    const bool transposeX1 = (transposeX1Ptr != nullptr ? *transposeX1Ptr : false);
    const bool transposeX2 = (transposeX2Ptr != nullptr ? *transposeX2Ptr : false);
    OP_CHECK(output == nullptr, OP_LOGE("aclnnfallback quant_batch_matmul_v3", "output is null"), return GRAPH_FAILED);
    // execute opapi
    auto apiRet = EXEC_OPAPI_CMD(aclnnQuantMatmulV4, x1, x2, scale, offset, pertokenScale, bias, transposeX1,
                                 transposeX2, output);
    OP_CHECK(apiRet != GRAPH_SUCCESS, OP_LOGE("aclnnfallback quant_batch_matmul_v3", "api_ret faild:%d", apiRet),
             return GRAPH_FAILED);

    return GRAPH_SUCCESS;
}

IMPL_OP(QuantBatchMatmulV3).OpExecuteFunc(QuantBatchMatmulV3ExecuteFunc);

}  // namespace fallback

#ifdef __cplusplus
}
#endif
