/*#include "kernel_operator.h"

extern "C" __global__ __aicore__ void leaky_relu_grad_custom(GM_ADDR dy, GM_ADDR x, GM_ADDR dx, GM_ADDR workspace, GM_ADDR tiling) {
    GET_TILING_DATA(tiling_data, tiling);
    // TODO: user kernel impl
}
*/
/**
 * Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved.
 * This file is a part of the CANN Open Software.
 * Licensed under CANN Open Software License Agreement Version 1.0 (the "License").
 * Please refer to the License for details. You may not use this file except in compliance with the License.
 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED,
 * INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
 * See LICENSE in the root of the software repository for the full text of the License.
 */

/**
 * @file leaky_relu_grad_custom.cpp
 */
#include "kernel_operator.h"
#define GENERAL_OP_IMPL(templateClass,...)                                          \
  do{                                                                               \
      GET_TILING_DATA(tiling_data, tiling);                                         \
      templateClass<__VA_ARGS__>op;                                                 \
      op.Init(dy, x, dx, tiling_data.negativeSlope,                                         \
              tiling_data.smallCoreDataNum, tiling_data.bigCoreDataNum,             \
              tiling_data.bigCoreLoopNum, tiling_data.smallCoreLoopNum,             \
              tiling_data.ubPartDataNum, tiling_data.smallCoreTailDataNum,          \
              tiling_data.bigCoreTailDataNum, tiling_data.tailBlockNum);            \
      op.Process();                                                                 \
  }while(0)

// tensor num for each queue
constexpr int32_t BUFFER_NUM = 2;

template<typename TYPE_DY, typename TYPE_X, typename TYPE_DX, bool IsExistBigCore> class KernelLeakyReluGrad {
    using T = TYPE_X;
public:
    __aicore__ inline KernelLeakyReluGrad() {}
    __aicore__ inline void Init(GM_ADDR dy, GM_ADDR x, GM_ADDR dx, float negativeSlope,
                                uint64_t smallCoreDataNum, uint64_t bigCoreDataNum, 
                                uint64_t bigCoreLoopNum, uint64_t smallCoreLoopNum, 
                                uint64_t ubPartDataNum, uint64_t smallCoreTailDataNum, 
                                uint64_t bigCoreTailDataNum, uint64_t tailBlockNum) 
    {

        ASSERT(AscendC::GetBlockNum() != 0 && "block dim can not be zero!");
        uint64_t coreNum = AscendC::GetBlockIdx();
        uint64_t globalBufferIndex = bigCoreDataNum * AscendC::GetBlockIdx();
        this->ubPartDataNum = ubPartDataNum;
        this->negativeSlope = negativeSlope;
        
        if constexpr (IsExistBigCore) 
        {
          if (coreNum < tailBlockNum) 
          { 
            this->coreDataNum = bigCoreDataNum;
            this->tileNum = bigCoreLoopNum;
            this->tailDataNum = bigCoreTailDataNum;
          }
          else 
          { 
            this->coreDataNum = smallCoreDataNum;
            this->tileNum = smallCoreLoopNum;
            this->tailDataNum = smallCoreTailDataNum;
            globalBufferIndex -= (bigCoreDataNum - smallCoreDataNum) * (AscendC::GetBlockIdx() - tailBlockNum);
          }
        }
        else
        {
          this->coreDataNum = smallCoreDataNum;
          this->tileNum = smallCoreLoopNum;
          this->tailDataNum = smallCoreTailDataNum;
          globalBufferIndex = smallCoreDataNum * AscendC::GetBlockIdx();
        }
          
        dyGm.SetGlobalBuffer((__gm__ T*)dy + globalBufferIndex, this->coreDataNum);
        xGm.SetGlobalBuffer((__gm__ T*)x + globalBufferIndex, this->coreDataNum);
        dxGm.SetGlobalBuffer((__gm__ T*)dx + globalBufferIndex, this->coreDataNum);
        
        pipe.InitBuffer(inQueueDy, BUFFER_NUM, this->ubPartDataNum * sizeof(T));
        pipe.InitBuffer(inQueueX, BUFFER_NUM, this->ubPartDataNum * sizeof(T));
        pipe.InitBuffer(outQueueDx, BUFFER_NUM, this->ubPartDataNum * sizeof(T));
        pipe.InitBuffer(tmpBuffer, this->ubPartDataNum * sizeof(T));
        pipe.InitBuffer(tmp1Buffer, this->ubPartDataNum * sizeof(T));
    }
    
    __aicore__ inline void Process()
    {
        int32_t loopCount = this->tileNum;
        this->processDataNum = this->ubPartDataNum;
        for (int32_t i = 0; i < loopCount-1; i++) 
        {
            CopyIn(i);
            Compute(i);
            CopyOut(i);
        }
        this->processDataNum = this->tailDataNum;
        CopyIn(loopCount-1);
        Compute(loopCount-1);
        CopyOut(loopCount-1);
    }

private:
    __aicore__ inline void CopyIn(int32_t progress)
    {
      AscendC::LocalTensor<T> dyLocal = inQueueDy.AllocTensor<T>();
      AscendC::LocalTensor<T> xLocal = inQueueX.AllocTensor<T>();
      AscendC::DataCopy(dyLocal, dyGm[progress * this->ubPartDataNum], this->processDataNum);
      AscendC::DataCopy(xLocal, xGm[progress * this->ubPartDataNum], this->processDataNum);
      inQueueDy.EnQue(dyLocal);
      inQueueX.EnQue(xLocal);
    }
    
    __aicore__ inline void Compute(int32_t progress)
    {
      AscendC::LocalTensor<T> dyLocal = inQueueDy.DeQue<T>();
      AscendC::LocalTensor<T> xLocal = inQueueX.DeQue<T>();
      AscendC::LocalTensor<T> dxLocal = outQueueDx.AllocTensor<T>();
      AscendC::LocalTensor<uint8_t> temp = tmpBuffer.Get<uint8_t>();
      AscendC::LocalTensor<T> negativePart = tmp1Buffer.Get<T>();
      
      // LeakyReLU梯度计算: dx = (x > 0) ? dy : negativeSlope * dy
      
      AscendC::CompareScalar(temp, xLocal, static_cast<T>(0.0f), AscendC::CMPMODE::GT, this->processDataNum);
      AscendC::DataCopy(negativePart, dyLocal, this->processDataNum);
      AscendC::Muls(negativePart, negativePart, static_cast<T>(this->negativeSlope), this->processDataNum);
      AscendC::Select(dxLocal, temp, dyLocal, negativePart, AscendC::SELMODE::VSEL_TENSOR_TENSOR_MODE, this->processDataNum);
      
      
      outQueueDx.EnQue<T>(dxLocal);
      inQueueDy.FreeTensor(dyLocal);
      inQueueX.FreeTensor(xLocal);
    }
    
    __aicore__ inline void CopyOut(int32_t progress)
    {
      AscendC::LocalTensor<T> dxLocal = outQueueDx.DeQue<T>();  
      AscendC::DataCopy(dxGm[progress * this->ubPartDataNum], dxLocal, this->processDataNum);
      outQueueDx.FreeTensor(dxLocal);
    }
    
private:
    AscendC::TPipe pipe;
    AscendC::TQue<AscendC::QuePosition::VECIN, BUFFER_NUM> inQueueDy;
    AscendC::TQue<AscendC::QuePosition::VECIN, BUFFER_NUM> inQueueX;
    AscendC::TQue<AscendC::QuePosition::VECOUT, BUFFER_NUM> outQueueDx;
    AscendC::TBuf<AscendC::TPosition::VECCALC> tmpBuffer;            // 临时计算缓冲区
    AscendC::TBuf<AscendC::TPosition::VECCALC> tmp1Buffer;
    AscendC::GlobalTensor<T> dyGm;
    AscendC::GlobalTensor<T> xGm;
    AscendC::GlobalTensor<T> dxGm;
    
    uint64_t coreDataNum;
    uint64_t tileNum;
    uint64_t ubPartDataNum;
    uint64_t tailDataNum;
    uint64_t processDataNum;
    float negativeSlope;
};

extern "C" __global__ __aicore__ void leaky_relu_grad_custom(GM_ADDR dy, GM_ADDR x, GM_ADDR dx, GM_ADDR workspace, GM_ADDR tiling)
{
    if(TILING_KEY_IS(1))
    {
      GENERAL_OP_IMPL(KernelLeakyReluGrad, DTYPE_DY, DTYPE_X, DTYPE_DX, true);
    }
     else if(TILING_KEY_IS(0))
    {
      GENERAL_OP_IMPL(KernelLeakyReluGrad, DTYPE_DY, DTYPE_X, DTYPE_DX, false);
    }
}
#ifndef ASCENDC_CPU_DEBUG
// call of kernel function
void leaky_relu_grad_custom_do(uint32_t blockDim, void* l2ctrl, void* stream, uint8_t* dy, uint8_t* x, uint8_t* dx,
    uint8_t* workspace, uint8_t* tiling)
{
    leaky_relu_grad_custom<<<blockDim, l2ctrl, stream>>>(dy, x, dx, workspace, tiling);
}
#endif