/**
 * Copyright (c) Huawei Technologies Co., Ltd. 2023. All rights reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/license/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include "level0/flash_attention_score_grad.h"
#include "opdev/format_utils.h"
#include "opdev/make_op_executor.h"
#include "opdev/op_def.h"
#include "opdev/op_dfx.h"
#include "opdev/op_executor.h"
#include "opdev/op_log.h"
#include "opdev/shape_utils.h"

using namespace op;

namespace l0op {
OP_TYPE_REGISTER(FlashAttentionScoreGrad);

const std::array<const aclTensor*, MAX_FAG_OUTPUT_CNT> FlashAttentionScoreGrad(
    const aclTensor* query, const aclTensor* key, const aclTensor* value, const aclTensor* dy,
    const aclTensor* pseShiftOptional, const aclTensor* dropMaskOptional, const aclTensor* paddingMaskOptional,
    const aclTensor* attenMaskOptional, const aclTensor* softmaxMaxOptional, const aclTensor* softmaxSumOptional,
    const aclTensor* softmaxInOptional, const aclTensor* attentionInOptional, const aclIntArray* prefixOptional,
    const aclIntArray* actualSeqQLenOptional, const aclIntArray* actualSeqKvLenOptional, double scaleValueOptional,
    double keepProbOptional, int64_t preTockensOptional, int64_t nextTockensOptional, int64_t headNum,
    char* inputLayout, int64_t innerPreciseOptional, int64_t sparseModeOptional, aclOpExecutor* executor) {
  L0_DFX(FlashAttentionScoreGrad, query, key, value, dy, pseShiftOptional, dropMaskOptional, paddingMaskOptional,
         attenMaskOptional, softmaxMaxOptional, softmaxSumOptional, softmaxInOptional, attentionInOptional,
         prefixOptional, actualSeqQLenOptional, actualSeqKvLenOptional, scaleValueOptional, keepProbOptional,
         preTockensOptional, nextTockensOptional, headNum, inputLayout, innerPreciseOptional, sparseModeOptional);

  auto dqOut = executor->AllocTensor(query->GetDataType(), op::Format::FORMAT_ND, op::Format::FORMAT_ND);
  auto dkOut = executor->AllocTensor(query->GetDataType(), op::Format::FORMAT_ND, op::Format::FORMAT_ND);
  auto dvOut = executor->AllocTensor(query->GetDataType(), op::Format::FORMAT_ND, op::Format::FORMAT_ND);
  auto dpseOut = executor->AllocTensor(query->GetDataType(), op::Format::FORMAT_ND, op::Format::FORMAT_ND);

  const aclTensor* prefix = nullptr;
  if (prefixOptional) {
    prefix = executor->ConvertToTensor(prefixOptional, op::DataType::DT_INT64);
    const_cast<aclTensor*>(prefix)->SetStorageFormat(op::Format::FORMAT_ND);
    const_cast<aclTensor*>(prefix)->SetViewFormat(op::Format::FORMAT_ND);
    const_cast<aclTensor*>(prefix)->SetOriginalFormat(op::Format::FORMAT_ND);
  }

  const aclTensor* actualSeqQLen = nullptr;
  if (actualSeqQLenOptional) {
    actualSeqQLen = executor->ConvertToTensor(actualSeqQLenOptional, op::DataType::DT_INT64);
    const_cast<aclTensor*>(actualSeqQLen)->SetStorageFormat(op::Format::FORMAT_ND);
    const_cast<aclTensor*>(actualSeqQLen)->SetViewFormat(op::Format::FORMAT_ND);
    const_cast<aclTensor*>(actualSeqQLen)->SetOriginalFormat(op::Format::FORMAT_ND);
  }

  const aclTensor* actualSeqKvLen = nullptr;
  if (actualSeqKvLenOptional) {
    actualSeqKvLen = executor->ConvertToTensor(actualSeqKvLenOptional, op::DataType::DT_INT64);
    const_cast<aclTensor*>(actualSeqKvLen)->SetStorageFormat(op::Format::FORMAT_ND);
    const_cast<aclTensor*>(actualSeqKvLen)->SetViewFormat(op::Format::FORMAT_ND);
    const_cast<aclTensor*>(actualSeqKvLen)->SetOriginalFormat(op::Format::FORMAT_ND);
  }

  auto ret = INFER_SHAPE(
      FlashAttentionScoreGrad,
      OP_INPUT(query, key, value, dy, pseShiftOptional, dropMaskOptional, paddingMaskOptional, attenMaskOptional,
               softmaxMaxOptional, softmaxSumOptional, softmaxInOptional, attentionInOptional, prefix, actualSeqQLen,
               actualSeqKvLen),
      OP_OUTPUT(dqOut, dkOut, dvOut, dpseOut),
      OP_ATTR(static_cast<float>(scaleValueOptional), static_cast<float>(keepProbOptional), preTockensOptional,
              nextTockensOptional, headNum, inputLayout, innerPreciseOptional, sparseModeOptional));
  if (ret != ACLNN_SUCCESS) {
    OP_LOGE(ACLNN_ERR_PARAM_INVALID, "Fag InferShape failed.");
    return {nullptr, nullptr, nullptr, nullptr};
  }

  ret = ADD_TO_LAUNCHER_LIST_AICORE(
      FlashAttentionScoreGrad,
      OP_INPUT(query, key, value, dy, pseShiftOptional, dropMaskOptional, paddingMaskOptional, attenMaskOptional,
               softmaxMaxOptional, softmaxSumOptional, softmaxInOptional, attentionInOptional, prefix, actualSeqQLen,
               actualSeqKvLen),
      OP_OUTPUT(dqOut, dkOut, dvOut, dpseOut),
      OP_ATTR(static_cast<float>(scaleValueOptional), static_cast<float>(keepProbOptional), preTockensOptional,
              nextTockensOptional, headNum, inputLayout, innerPreciseOptional, sparseModeOptional));
  if (ret != ACLNN_SUCCESS) {
    OP_LOGE(ACLNN_ERR_PARAM_INVALID, "Fag launch kernel failed.");
    return {nullptr, nullptr, nullptr, nullptr};
  }

  return {dqOut, dkOut, dvOut, dpseOut};
}

}  // namespace l0op
