/**
 * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

/*!
 * \file flash_attention_score_grad_post.h
 * \brief common post process
 */
#ifndef _FLASH_ATTENTION_SCORE_GRAD_POST_H_
#define _FLASH_ATTENTION_SCORE_GRAD_POST_H_
#include "kernel_operator.h"

template <typename OUT_TYPE, typename TILING_TYPE, const bool CAST_DV>
class FlashAttentionScoreGradPost {
 public:
  __aicore__ inline FlashAttentionScoreGradPost(){};
  __aicore__ inline void Init(__gm__ uint8_t* dq, __gm__ uint8_t* dk, __gm__ uint8_t* dv, __gm__ uint8_t* workspace,
                              const TILING_TYPE* __restrict ordTilingData,
                              TPipe* pipe_in);
  __aicore__ inline void Process();

  constexpr static uint32_t BUFFER_NUM = 1;
  TPipe* pipe;
  TQue<QuePosition::VECIN, BUFFER_NUM> inQueue;
  TQue<QuePosition::VECOUT, BUFFER_NUM> outQueue;
  TBuf<> vecClc1;
  TBuf<> vecClc2;

  GlobalTensor<OUT_TYPE> dqGm, dkGm, dvGm;
  // input
  GlobalTensor<float> dqWorkSpaceGm, dkWorkSpaceGm, dvWorkSpaceGm;

  const TILING_TYPE* __restrict tilingData;
  constexpr static uint32_t SYNC_GLOBAL_WORKSPACE_SIZE = 16 * 1024;
  constexpr static uint32_t ADDR_ALIGN_SIZE = 512;

  uint32_t usedCoreNum;
  uint32_t cBlockIdx;
  // query
  uint32_t ubBaseSize;
  uint32_t qPostBlockFactor;
  uint32_t qPostBlockTotal;
  uint32_t qPostBaseNum;
  uint32_t qPostTailNum;
  uint32_t qSizeAlign;
  uint32_t kvPostBlockFactor;
  uint32_t kvPostBlockTotal;
  uint32_t kvPostBaseNum;
  uint32_t kvPostTailNum;
  uint32_t kvSizeAlign;
};

template <typename OUT_TYPE, typename TILING_TYPE, const bool CAST_DV>
__aicore__ inline void FlashAttentionScoreGradPost<OUT_TYPE, TILING_TYPE, CAST_DV>::Init(
    __gm__ uint8_t* dq, __gm__ uint8_t* dk, __gm__ uint8_t* dv, __gm__ uint8_t* workspace,
    const TILING_TYPE* __restrict ordTilingData, TPipe* pipe_in) {
  cBlockIdx = GetBlockIdx();

  tilingData = ordTilingData;
  pipe = pipe_in;

  dqGm.SetGlobalBuffer((__gm__ OUT_TYPE*)dq);
  dkGm.SetGlobalBuffer((__gm__ OUT_TYPE*)dk);
  dvGm.SetGlobalBuffer((__gm__ OUT_TYPE*)dv);

  // tiling_data
  usedCoreNum = tilingData->postTilingData.coreNum;
  ubBaseSize = tilingData->postTilingData.postUbBaseSize;
  qPostBlockFactor = tilingData->postTilingData.qPostBlockFactor;
  qPostBlockTotal = tilingData->postTilingData.qPostBlockTotal;
  qPostBaseNum = tilingData->postTilingData.qPostBaseNum;
  qPostTailNum = tilingData->postTilingData.qPostTailNum;
  kvPostBlockFactor = tilingData->postTilingData.kvPostBlockFactor;
  kvPostBlockTotal = tilingData->postTilingData.kvPostBlockTotal;
  kvPostBaseNum = tilingData->postTilingData.kvPostBaseNum;
  kvPostTailNum = tilingData->postTilingData.kvPostTailNum;
  qSizeAlign = tilingData->postTilingData.qSizeAlign;
  kvSizeAlign = tilingData->postTilingData.kvSizeAlign;

  dqWorkSpaceGm.SetGlobalBuffer((__gm__ float*)workspace + tilingData->postTilingData.dqWorkSpaceOffset / sizeof(float));
  dkWorkSpaceGm.SetGlobalBuffer((__gm__ float*)workspace + tilingData->postTilingData.dkWorkSpaceOffset / sizeof(float));
  
  if constexpr(CAST_DV) {
    dvWorkSpaceGm.SetGlobalBuffer((__gm__ float*)workspace + tilingData->postTilingData.dvWorkSpaceOffset / sizeof(float));
  }

  pipe->InitBuffer(inQueue, BUFFER_NUM, ubBaseSize * 2);
  pipe->InitBuffer(outQueue, BUFFER_NUM, ubBaseSize);
}

template <typename OUT_TYPE, typename TILING_TYPE, const bool CAST_DV>
__aicore__ inline void FlashAttentionScoreGradPost<OUT_TYPE, TILING_TYPE, CAST_DV>::Process() {
  bool isLastCore = (cBlockIdx == (usedCoreNum - 1));
  // init q
  uint64_t qBegin = cBlockIdx * qPostBlockFactor * qPostBaseNum;
  uint64_t qEnd = (cBlockIdx + 1) * qPostBlockFactor * qPostBaseNum;

  if (((cBlockIdx + 1) * qPostBlockFactor * qPostBaseNum) > qPostBlockTotal) {
    qEnd = qPostBlockTotal;
  }
  for (uint64_t i = qBegin; i < qEnd; i = i + qPostBaseNum) {
    LocalTensor<float> vecIn = inQueue.AllocTensor<float>();
    LocalTensor<OUT_TYPE> vecOut = outQueue.AllocTensor<OUT_TYPE>();
    uint32_t dataSize = i + qPostBaseNum < qPostBlockTotal ? qPostBaseNum : qPostTailNum;
    DataCopy(vecIn, dqWorkSpaceGm[i], (dataSize + 7) / 8 * 8); // dataSize(fp32) align 32B
    inQueue.EnQue(vecIn);
    inQueue.DeQue<float>();

    Muls(vecIn, vecIn, (float)tilingData->postTilingData.scaleValue, dataSize);
    pipe_barrier(PIPE_V);
    Cast(vecOut, vecIn, RoundMode::CAST_ROUND, dataSize);
    outQueue.EnQue(vecOut);
    outQueue.DeQue<OUT_TYPE>();
    DataCopy(dqGm[i], vecOut, (dataSize + 15) / 16 * 16); // dataSize(fp16) align 32B
    inQueue.FreeTensor(vecIn);
    outQueue.FreeTensor(vecOut);
  }
  pipe_barrier(PIPE_ALL);
  // init k
  uint64_t kvBegin = cBlockIdx * kvPostBlockFactor * kvPostBaseNum;
  uint64_t kvEnd = (cBlockIdx + 1) * kvPostBlockFactor * kvPostBaseNum;
  if (((cBlockIdx + 1) * kvPostBlockFactor * kvPostBaseNum) > kvPostBlockTotal) {
    kvEnd = kvPostBlockTotal;
  }

  for (uint64_t i = kvBegin; i < kvEnd; i = i + kvPostBaseNum) {
    LocalTensor<float> vecIn = inQueue.AllocTensor<float>();
    LocalTensor<OUT_TYPE> vecOut = outQueue.AllocTensor<OUT_TYPE>();
    uint32_t dataSize = i + kvPostBaseNum < kvPostBlockTotal ? kvPostBaseNum : kvPostTailNum;
    DataCopy(vecIn, dkWorkSpaceGm[i], (dataSize + 7) / 8 * 8); // dataSize(fp32) align 32B
    inQueue.EnQue(vecIn);
    inQueue.DeQue<float>();
    Muls(vecIn, vecIn, (float)tilingData->postTilingData.scaleValue, dataSize);
    pipe_barrier(PIPE_V);
    Cast(vecOut, vecIn, RoundMode::CAST_ROUND, dataSize);
    outQueue.EnQue(vecOut);
    outQueue.DeQue<OUT_TYPE>();
    DataCopy(dkGm[i], vecOut, (dataSize + 15) / 16 * 16); // dataSize(fp16) align 32B
    inQueue.FreeTensor(vecIn);
    outQueue.FreeTensor(vecOut);
  }
  pipe_barrier(PIPE_ALL);

  // init v
  if constexpr(CAST_DV) {
    for (uint64_t i = kvBegin; i < kvEnd; i = i + kvPostBaseNum) {
      LocalTensor<float> vecIn = inQueue.AllocTensor<float>();
      LocalTensor<OUT_TYPE> vecOut = outQueue.AllocTensor<OUT_TYPE>();
      uint32_t dataSize = i + kvPostBaseNum < kvPostBlockTotal ? kvPostBaseNum : kvPostTailNum;
      DataCopy(vecIn, dvWorkSpaceGm[i], (dataSize + 7) / 8 * 8); // dataSize(fp32) align 32B
      inQueue.EnQue(vecIn);
      inQueue.DeQue<float>();
      Cast(vecOut, vecIn, RoundMode::CAST_ROUND, dataSize);
      outQueue.EnQue(vecOut);
      outQueue.DeQue<OUT_TYPE>();
      DataCopy(dvGm[i], vecOut, (dataSize + 15) / 16 * 16); // dataSize(fp16) align 32B
      inQueue.FreeTensor(vecIn);
      outQueue.FreeTensor(vecOut);
    }
  }
}

#endif  // _FLASH_ATTENTION_SCORE_GRAD_POST_H_
