/**
 * Copyright (c) Huawei Technologies Co., Ltd. 2023. All rights reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include "fallback_common.h"
#include "fallback_opapi.h"

#ifdef __cplusplus
extern "C" {
#endif

namespace fallback {
using namespace ge;
using namespace gert;

constexpr size_t INDEX_GMM_INPUT_X = 0;
constexpr size_t INDEX_GMM_INPUT_WEIGHT = 1;
constexpr size_t INDEX_GMM_INPUT_BIAS = 2;
constexpr size_t INDEX_GMM_INPUT_SCALE = 3;
constexpr size_t INDEX_GMM_INPUT_OFFSET = 4;
constexpr size_t INDEX_GMM_INPUT_ANTIQUANT_SCALE = 5;
constexpr size_t INDEX_GMM_INPUT_ANTIQUANT_OFFSET = 6;
constexpr size_t INDEX_GMM_INPUT_GROUP_LIST = 7;
constexpr size_t INDEX_GMM_OUTPUT_Y = 0;

graphStatus PrepareInputTensorList(OpExecuteContext* host_api_ctx, std::vector<const gert::Tensor*>& tensorList, size_t index, size_t& num) {
  while (1) {
    auto inputGe = host_api_ctx->GetDynamicInputTensor(index, num);
    if (inputGe == nullptr) {break;}
    tensorList.push_back(inputGe);
    num++;
  }
  return GRAPH_SUCCESS;
}

graphStatus PrepareOutputTensorList(OpExecuteContext* host_api_ctx, std::vector<const gert::Tensor*>& tensorList, size_t index, size_t numGeWeight, int32_t splitItem) {
  size_t numGeY = 0;
  if (0 == splitItem || 1 == splitItem) { // Length of tensorListY equals that of tensorListWeight when split_item = 0 / 1
    numGeY = numGeWeight;
  }
  else if (2 == splitItem || 3 == splitItem) { // Length of tensorListY equals 1 when split_item = 2 / 3
    numGeY = 1;
  }
  else {
    OP_LOGE("aclnnfallback", "Invalid value of split_item: %d, which must be one of 0/1/2/3.", splitItem);
    return GRAPH_FAILED;
  }

  for (size_t k = 0; k < numGeY; k++) {
    auto outputGe = host_api_ctx->GetOutputTensor(index + k);
    if (outputGe == nullptr) {return GRAPH_FAILED;}
    tensorList.push_back(outputGe);
  }
  return GRAPH_SUCCESS;
}

aclIntArray *GetIntArray(OpExecuteContext* host_api_ctx) {
  auto groupListGe = host_api_ctx->GetOptionalInputTensor(INDEX_GMM_INPUT_GROUP_LIST);
  std::vector<int64_t> shape;
  if (groupListGe != nullptr) {
    auto groupListDataPtr = groupListGe->GetData<int64_t>();
    auto &groupListShape = groupListGe->GetStorageShape();
    if (groupListShape.GetDimNum() > 0) {
      auto groupListLength = groupListShape.GetDim(0);
      for (int64_t i = 0; i < groupListLength; i++) {
        int64_t groupListData = groupListDataPtr[i];
        shape.push_back(groupListData);
      }
    }
  }
  auto groupListIntArray = ConvertType(shape);
  return groupListIntArray;
}

graphStatus GroupedMatmulExecuteFunc(OpExecuteContext* host_api_ctx)
{
  OP_CHECK(host_api_ctx == nullptr, OP_LOGE("aclnnfallback", "host_api_ctx is null"), return GRAPH_FAILED);
  
  std::vector<const gert::Tensor*> geTensorListX;
  size_t numGeX = 0;
  PrepareInputTensorList(host_api_ctx, geTensorListX, INDEX_GMM_INPUT_X, numGeX);
  std::vector<const gert::Tensor*> geTensorListWeight;
  size_t numGeWeight = 0;
  PrepareInputTensorList(host_api_ctx, geTensorListWeight, INDEX_GMM_INPUT_WEIGHT, numGeWeight);
  std::vector<const gert::Tensor*> geTensorListBias;
  size_t numGeBias = 0;
  PrepareInputTensorList(host_api_ctx, geTensorListBias, INDEX_GMM_INPUT_BIAS, numGeBias);
  std::vector<const gert::Tensor*> geTensorListScale;
  size_t numGeScale = 0;
  PrepareInputTensorList(host_api_ctx, geTensorListScale, INDEX_GMM_INPUT_SCALE, numGeScale);
  std::vector<const gert::Tensor*> geTensorListOffset;
  size_t numGeOffset = 0;
  PrepareInputTensorList(host_api_ctx, geTensorListOffset, INDEX_GMM_INPUT_OFFSET, numGeOffset);
  std::vector<const gert::Tensor*> geTensorListAntiquantScale;
  size_t numGeAntiquantScale = 0;
  PrepareInputTensorList(host_api_ctx, geTensorListAntiquantScale, INDEX_GMM_INPUT_ANTIQUANT_SCALE, numGeAntiquantScale);
  std::vector<const gert::Tensor*> geTensorListAntiquantOffset;
  size_t numGeAntiquantOffset = 0;
  PrepareInputTensorList(host_api_ctx, geTensorListAntiquantOffset, INDEX_GMM_INPUT_ANTIQUANT_OFFSET, numGeAntiquantOffset);
  
  auto groupListIntArray = GetIntArray(host_api_ctx);
  auto attrs = host_api_ctx->GetAttrs();
  OP_CHECK(attrs == nullptr, OP_LOGE("aclnnfallback", "attrs is null"), return GRAPH_FAILED);
  const int64_t* splitItemGe = attrs->GetAttrPointer<int64_t>(0);
  OP_CHECK(splitItemGe == nullptr, OP_LOGE("aclnnfallback", "splitItemGe is null"), return GRAPH_FAILED);

  std::vector<const gert::Tensor*> geTensorListY;
  PrepareOutputTensorList(host_api_ctx, geTensorListY, INDEX_GMM_OUTPUT_Y, numGeWeight, *splitItemGe);

  // execute opapi
  auto api_ret = EXEC_OPAPI_CMD(aclnnGroupedMatmul,
                                geTensorListX,
                                geTensorListWeight,
                                geTensorListBias,
                                geTensorListScale,
                                geTensorListOffset,
                                geTensorListAntiquantScale,
                                geTensorListAntiquantOffset,
                                groupListIntArray,
                                *splitItemGe,
                                geTensorListY);
  OP_CHECK(api_ret != GRAPH_SUCCESS, OP_LOGE("aclnnfallback", "api_ret failed:%d", api_ret), return GRAPH_FAILED);
  return GRAPH_SUCCESS;
}

IMPL_OP(GroupedMatmul).OpExecuteFunc(GroupedMatmulExecuteFunc).HostInputs({INDEX_GMM_INPUT_GROUP_LIST});

}  // namespace fallback

#ifdef __cplusplus
}
#endif
