/**
 * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

/*!
 * \file flash_attention_score_grad_us1s2_bbn2gs1s2_pre.h
 * \brief
 */
#ifndef _FLASH_ATTENTION_SCORE_GRAD_FLASH_ATTENTION_SCORE_GRAD_S1S2_BNGS1S2_PRE_KERNEL_H_
#define _FLASH_ATTENTION_SCORE_GRAD_FLASH_ATTENTION_SCORE_GRAD_S1S2_BNGS1S2_PRE_KERNEL_H_
#include "kernel_operator.h"

using namespace AscendC;

template <typename T1, typename T2, const uint32_t INPUT_LAYOUT = 0>
class FlashAttentionScoreGradS1S2BNGS1S2Pre {
 public:
  __aicore__ inline FlashAttentionScoreGradS1S2BNGS1S2Pre(){};
  __aicore__ inline void Init(__gm__ uint8_t* dq, __gm__ uint8_t* dk, __gm__ uint8_t* dv,
                              __gm__ uint8_t* actual_seq_kvlen, __gm__ uint8_t* drop_mask, __gm__ uint8_t* workspace,
                              const FlashAttentionScoreGradTilingDataUs1s2Bbn2gs1s2* ordTilingData, TPipe* pipe_in);
  __aicore__ inline void Process();
  __aicore__ inline void SyncALLCores();

  TPipe* pipe;
  TQue<QuePosition::VECIN, 1> helpQue;
  TQue<QuePosition::VECIN, 1> inputQue;
  TQue<QuePosition::VECIN, 1> castQue;
  TQue<QuePosition::VECOUT, 1> outQue;

  GlobalTensor<float> dqWorkSpaceGm, dkWorkSpaceGm, dvWorkSpaceGm;
  GlobalTensor<uint8_t> maskWorkSpaceGm;
  GlobalTensor<uint8_t> drop_maskGm;
  GlobalTensor<int32_t> syncGlobal;

  const FlashAttentionScoreGradTilingDataUs1s2Bbn2gs1s2* TilingData;
  constexpr static uint32_t SYNC_GLOBAL_WORKSPACE_SIZE = 16 * 1024;
  constexpr static uint32_t ADDR_ALIGN_SIZE = 512;
  constexpr static uint32_t HELP_LEN = 256;
  constexpr static uint32_t BIT8 = 8;
  constexpr static uint32_t NUMBER_8 = 8;
  constexpr static uint32_t B16_VECTOR_MASK = 128;
  constexpr static uint32_t S1S2_TND = 3;

  uint32_t cBlockIdx;
  // query
  uint32_t ubBaseSize;
  uint32_t qPreBlockFactor;
  uint32_t qPreBlockTotal;
  uint32_t qPreBlockTail;
  uint32_t qPostBlockTotal;
  uint32_t kvPreBlockFactor;
  uint32_t kvPreBlockTotal;
  uint32_t kvPreBlockTail;
  uint32_t kvPostBlockTotal;

  uint64_t initdqSize;
  uint64_t dqOffset;
  uint64_t initdkSize;
  uint64_t dkvOffset;

  bool isDropBoolMode;
  uint32_t maskUsedCoreNum;
  uint32_t maskUBProcessNum;
  uint32_t maskTailUBProcessNum;
  uint32_t maskUBLoop;

  DataCopyParams copyParams;
  DataCopyPadParams padParams;
  BinaryRepeatParams repParams;
  half padValue{1.0};
};

template <typename T1, typename T2, const uint32_t INPUT_LAYOUT>
__aicore__ inline void FlashAttentionScoreGradS1S2BNGS1S2Pre<T1, T2, INPUT_LAYOUT>::Init(
    __gm__ uint8_t* dq, __gm__ uint8_t* dk, __gm__ uint8_t* dv, __gm__ uint8_t* actual_seq_kvlen,
    __gm__ uint8_t* drop_mask, __gm__ uint8_t* workspace,
    const FlashAttentionScoreGradTilingDataUs1s2Bbn2gs1s2* orgTilingData, TPipe* pipe_in) {
  cBlockIdx = GetBlockIdx();

  TilingData = orgTilingData;
  pipe = pipe_in;

  // tiling_data
  qPreBlockFactor = TilingData->preTilingData.qPreBlockFactor;
  qPreBlockTotal = TilingData->preTilingData.qPreBlockTotal;
  qPreBlockTail = TilingData->preTilingData.qPreBlockTail;
  qPostBlockTotal = TilingData->postTilingData.qPostBlockTotal;
  kvPreBlockFactor = TilingData->preTilingData.kvPreBlockFactor;
  kvPreBlockTotal = TilingData->preTilingData.kvPreBlockTotal;
  kvPreBlockTail = TilingData->preTilingData.kvPreBlockTail;
  kvPostBlockTotal = TilingData->postTilingData.kvPostBlockTotal;

  maskUsedCoreNum = TilingData->preTilingData.maskCoreNum;

  drop_maskGm.SetGlobalBuffer((__gm__ uint8_t*)drop_mask);

  syncGlobal.SetGlobalBuffer((__gm__ int32_t*)workspace);
  uint64_t workspaceOffsets = SYNC_GLOBAL_WORKSPACE_SIZE;
  dqWorkSpaceGm.SetGlobalBuffer((__gm__ float*)workspace + workspaceOffsets / sizeof(T2));
  workspaceOffsets = (workspaceOffsets + ((uint64_t)qPostBlockTotal) * sizeof(float) + ADDR_ALIGN_SIZE) /
                     ADDR_ALIGN_SIZE * ADDR_ALIGN_SIZE;
  dkWorkSpaceGm.SetGlobalBuffer((__gm__ float*)workspace + workspaceOffsets / sizeof(T2));
  workspaceOffsets = (workspaceOffsets + ((uint64_t)kvPostBlockTotal) * sizeof(float) + ADDR_ALIGN_SIZE) /
                     ADDR_ALIGN_SIZE * ADDR_ALIGN_SIZE;
  dvWorkSpaceGm.SetGlobalBuffer((__gm__ float*)workspace + workspaceOffsets / sizeof(T2));

  InitOutput<int32_t>(syncGlobal[GetBlockIdx() * 8], 8, 0);

  initdqSize = cBlockIdx == qPreBlockTotal - 1 ? qPreBlockTail : qPreBlockFactor;
  dqOffset = ((uint64_t)cBlockIdx) * qPreBlockFactor;
  initdkSize = cBlockIdx == kvPreBlockTotal - 1 ? kvPreBlockTail : kvPreBlockFactor;
  dkvOffset = ((uint64_t)cBlockIdx) * kvPreBlockFactor;

  // dropMask params init
  isDropBoolMode = TilingData->s1s2BNGS1S2BaseParams.s2 % 8 != 0 && TilingData->s1s2BNGS1S2BaseParams.keepProb < 1;
  if constexpr (INPUT_LAYOUT == S1S2_TND) {
    if (!isDropBoolMode && TilingData->s1s2BNGS1S2BaseParams.keepProb < 1) {
      isDropBoolMode = (((__gm__ int64_t*)actual_seq_kvlen)[0] % 8 != 0);
      for (uint32_t i = 0; i + 1 < TilingData->s1s2BNGS1S2BaseParams.b; i++) {
        const int64_t seqS2iplus1 = ((__gm__ int64_t*)actual_seq_kvlen)[i + 1] - ((__gm__ int64_t*)actual_seq_kvlen)[i];
        isDropBoolMode = (isDropBoolMode || (seqS2iplus1 % 8 != 0));
      }
    }
  }
  if (isDropBoolMode) {
    workspaceOffsets = (workspaceOffsets + ((uint64_t)kvPostBlockTotal) * sizeof(float) + ADDR_ALIGN_SIZE) /
                       ADDR_ALIGN_SIZE * ADDR_ALIGN_SIZE;
    maskWorkSpaceGm.SetGlobalBuffer((__gm__ uint8_t*)workspace + workspaceOffsets);

    pipe->InitBuffer(helpQue, 1, HELP_LEN);
    pipe->InitBuffer(inputQue, 1, TilingData->preTilingData.inputBufferLen);
    pipe->InitBuffer(castQue, 1, TilingData->preTilingData.castBufferLen);
    pipe->InitBuffer(outQue, 1, TilingData->preTilingData.outputBufferLen);

    // reset params
    repParams.src0BlkStride = 1;
    repParams.src0RepStride = 0;
    repParams.src1BlkStride = 0;
    repParams.src1RepStride = 0;
    repParams.dstBlkStride = 1;
    repParams.dstRepStride = NUMBER_8;

    copyParams.blockCount = 1;
    copyParams.srcStride = 0;
    copyParams.dstStride = 0;
  }
}

template <typename T1, typename T2, const uint32_t INPUT_LAYOUT>
__aicore__ inline void FlashAttentionScoreGradS1S2BNGS1S2Pre<T1, T2, INPUT_LAYOUT>::Process() {
  // process
  if (g_coreType == AIV && cBlockIdx < TilingData->preTilingData.maskCoreNum) {
    // clear dq dk dv workspace
    InitOutput<float>(dkWorkSpaceGm[dkvOffset], initdkSize, 0);
    InitOutput<float>(dvWorkSpaceGm[dkvOffset], initdkSize, 0);
    InitOutput<float>(dqWorkSpaceGm[dqOffset], initdqSize, 0);

    if (!isDropBoolMode) {
      return;
    }

    maskUBLoop = TilingData->preTilingData.maskSingleCoreLoop;
    maskTailUBProcessNum = TilingData->preTilingData.maskLastLoopNum;
    if (unlikely(cBlockIdx == maskUsedCoreNum - 1)) {
      maskUBLoop = TilingData->preTilingData.maskTailCoreLoop;
      maskTailUBProcessNum = TilingData->preTilingData.maskTailCoreLastLoopNum;
    }

    // malloc tensor filled by 1.0
    auto helpTensor = helpQue.AllocTensor<half>();
    Duplicate<half>(helpTensor, padValue, HELP_LEN / sizeof(half));
    pipe_barrier(PIPE_V);

    uint64_t outputAddr = cBlockIdx * TilingData->preTilingData.maskSingleCoreNum;
    uint64_t inputAddr = cBlockIdx * TilingData->preTilingData.maskSingleCoreNum / BIT8;

    // process
    for (uint64_t idx = 0; idx < maskUBLoop; idx++) {
      maskUBProcessNum = TilingData->preTilingData.singleUBProcessNum;
      uint64_t outputOffset = idx * maskUBProcessNum;
      uint64_t inputOffset = idx * maskUBProcessNum / BIT8;
      if (unlikely(idx == maskUBLoop - 1)) {
        maskUBProcessNum = maskTailUBProcessNum;
      }

      // copyIn
      auto inputTensor = inputQue.AllocTensor<uint8_t>();
      copyParams.blockLen = maskUBProcessNum / BIT8;
      DataCopyPad(inputTensor, drop_maskGm[inputAddr + inputOffset], copyParams, padParams);
      inputQue.EnQue(inputTensor);
      inputQue.DeQue<uint8_t>();

      // select
      auto castTensor = castQue.AllocTensor<half>();
      uint8_t selectRepeat = (maskUBProcessNum + B16_VECTOR_MASK - 1) / B16_VECTOR_MASK;
      Select(castTensor, inputTensor, helpTensor, (half)0.0, SELMODE::VSEL_TENSOR_SCALAR_MODE, B16_VECTOR_MASK,
             selectRepeat, repParams);
      pipe_barrier(PIPE_V);
      inputQue.FreeTensor(inputTensor);

      // cast
      auto outputTensor = outQue.AllocTensor<uint8_t>();
      Cast(outputTensor, castTensor, RoundMode::CAST_ROUND, maskUBProcessNum);
      castQue.FreeTensor(castTensor);

      // copyOut
      outQue.EnQue(outputTensor);
      outQue.DeQue<uint8_t>();
      copyParams.blockLen = maskUBProcessNum;
      DataCopyPad(maskWorkSpaceGm[outputAddr + outputOffset], outputTensor, copyParams);
      outQue.FreeTensor(outputTensor);
    }
    helpQue.FreeTensor(helpTensor);
  }
}

template <typename T1, typename T2, const uint32_t INPUT_LAYOUT>
__aicore__ inline void FlashAttentionScoreGradS1S2BNGS1S2Pre<T1, T2, INPUT_LAYOUT>::SyncALLCores() {
  SyncAll();
}
#endif  // _FLASH_ATTENTION_SCORE_GRAD_FLASH_ATTENTION_SCORE_GRAD_S1S2_BNGS1S2_PRE_KERNEL_H_
