/**
 * This program is free software, you can redistribute it and/or modify it.
 * Copyright (c) 2025 Huawei Technologies Co., Ltd.
 * This file is a part of the CANN Open Software.
 * Licensed under CANN Open Software License Agreement Version 2.0 (the "License").
 * Please refer to the License for details. You may not use this file except in compliance with the License.
 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING
 * BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.
 * See LICENSE in the root of the software repository for the full text of the License.
 */

/*! \file erf.h
 *  \brief AscendC inline-erf kernel (aligned with erf.h style)
 */
#ifndef ERF_H
#define ERF_H

#include "kernel_operator.h"
#include "erf_tiling_data.h"
#include "erf_tiling_key.h"

namespace MyErf {

using namespace AscendC;
constexpr int32_t BUFFER_NUM = 2;
constexpr int32_t BUFFER_NUM0 = 1;

template <typename TYPE_X, typename TYPE_Y>
class KernelErf_FLOAT_1 {
public:
    __aicore__ inline KernelErf_FLOAT_1() = default;
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR y,
                                uint32_t smallCoreDataNum, uint32_t bigCoreDataNum,
                                uint32_t finalBigTileNum, uint32_t finalSmallTileNum,
                                uint16_t tileDataNum, uint16_t smallTailDataNum,
                                uint16_t bigTailDataNum, uint8_t tailBlockNum,AscendC::TPipe* pipe);
    __aicore__ inline void Process();

private:
    __aicore__ inline void CopyIn();
    __aicore__ inline void CopyOut();
    __aicore__ inline void Compute();

private:
    TQue<QuePosition::VECIN,  BUFFER_NUM0> inQueueX;
    TQue<QuePosition::VECOUT, BUFFER_NUM0> outQueueY;
    TBuf<QuePosition::VECCALC> tmpBuf;          // 中间多项式缓冲
    GlobalTensor<TYPE_X> xGm;
    GlobalTensor<TYPE_Y> yGm;

    uint32_t coreDataNum;     // 本核总元素
    uint32_t tileNum;         // 本核 tile 次数
    uint16_t tileDataNum;     // 每 tile 元素数（完整）
    uint16_t tailDataNum;     // 尾部 tile 元素数
    uint16_t processDataNum;  // 当前 tile 实际元素数
};


template <typename TYPE_X, typename TYPE_Y>
class KernelErf_FLOAT_2 {
public:
    __aicore__ inline KernelErf_FLOAT_2() = default;
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR y,
                                uint32_t smallCoreDataNum, uint32_t bigCoreDataNum,
                                uint32_t finalBigTileNum, uint32_t finalSmallTileNum,
                                uint16_t tileDataNum, uint16_t smallTailDataNum,
                                uint16_t bigTailDataNum, uint8_t tailBlockNum,AscendC::TPipe* pipe);
    __aicore__ inline void Process();

private:
    __aicore__ inline void CopyIn(int32_t progress);
    __aicore__ inline void CopyOut(int32_t progress);
    __aicore__ inline void Compute(int32_t progress);

private:
    TQue<QuePosition::VECIN,  BUFFER_NUM> inQueueX;
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueY;
    TBuf<QuePosition::VECCALC> tmpBuf;          // 中间多项式缓冲
    GlobalTensor<TYPE_X> xGm;
    GlobalTensor<TYPE_Y> yGm;

    uint32_t coreDataNum;     // 本核总元素
    uint32_t tileNum;         // 本核 tile 次数
    uint16_t tileDataNum;     // 每 tile 元素数（完整）
    uint16_t tailDataNum;     // 尾部 tile 元素数
    uint16_t processDataNum;  // 当前 tile 实际元素数
};


template <typename TYPE_X, typename TYPE_Y>
class KernelErf_OTHER_2 {
public:
    __aicore__ inline KernelErf_OTHER_2() = default;
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR y,
                                uint32_t smallCoreDataNum, uint32_t bigCoreDataNum,
                                uint32_t finalBigTileNum, uint32_t finalSmallTileNum,
                                uint16_t tileDataNum, uint16_t smallTailDataNum,
                                uint16_t bigTailDataNum, uint8_t tailBlockNum,AscendC::TPipe* pipe);
    __aicore__ inline void Process();

private:
    __aicore__ inline void CopyIn(int32_t progress);
    __aicore__ inline void CopyOut(int32_t progress);
    __aicore__ inline void Compute(int32_t progress);

private:
    TQue<QuePosition::VECIN,  BUFFER_NUM> inQueueX;
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueY;
    TBuf<QuePosition::VECCALC> tmpBuf,xBuf,yBuf;          // 中间多项式缓冲
    GlobalTensor<TYPE_X> xGm;
    GlobalTensor<TYPE_Y> yGm;

    uint32_t coreDataNum;     // 本核总元素
    uint32_t tileNum;         // 本核 tile 次数
    uint16_t tileDataNum;     // 每 tile 元素数（完整）
    uint16_t tailDataNum;     // 尾部 tile 元素数
    uint16_t processDataNum;  // 当前 tile 实际元素数
};



template <typename TYPE_X, typename TYPE_Y>
class KernelErf_OTHER_1 {
public:
    __aicore__ inline KernelErf_OTHER_1() = default;
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR y,
                                uint32_t smallCoreDataNum, uint32_t bigCoreDataNum,
                                uint32_t finalBigTileNum, uint32_t finalSmallTileNum,
                                uint16_t tileDataNum, uint16_t smallTailDataNum,
                                uint16_t bigTailDataNum, uint8_t tailBlockNum,AscendC::TPipe* pipe);
    __aicore__ inline void Process();

private:
    __aicore__ inline void CopyIn();
    __aicore__ inline void CopyOut();
    __aicore__ inline void Compute();

private:
    TQue<QuePosition::VECIN,  BUFFER_NUM0> inQueueX;
    TQue<QuePosition::VECOUT, BUFFER_NUM0> outQueueY;
    TBuf<QuePosition::VECCALC> tmpBuf,xBuf,yBuf;          // 中间多项式缓冲
    GlobalTensor<TYPE_X> xGm;
    GlobalTensor<TYPE_Y> yGm;

    uint32_t coreDataNum;     // 本核总元素
    uint32_t tileNum;         // 本核 tile 次数
    uint16_t tileDataNum;     // 每 tile 元素数（完整）
    uint16_t tailDataNum;     // 尾部 tile 元素数
    uint16_t processDataNum;  // 当前 tile 实际元素数
};




/* ========== 实现 ========== */
template <typename TYPE_X, typename TYPE_Y>
__aicore__ inline void KernelErf_FLOAT_1<TYPE_X, TYPE_Y>::Init(
    GM_ADDR x, GM_ADDR y,
    uint32_t smallCoreDataNum, uint32_t bigCoreDataNum,
    uint32_t finalBigTileNum,  uint32_t finalSmallTileNum,
    uint16_t tileDataNum,      uint16_t smallTailDataNum,
    uint16_t bigTailDataNum,   uint8_t  tailBlockNum,AscendC::TPipe* pipe)
{
    ASSERT(GetBlockNum() != 0 && "block dim can not be zero!");
    uint32_t coreId = GetBlockIdx();
    uint32_t globalOffset = bigCoreDataNum * coreId;

    this->tileDataNum = tileDataNum;
    if (coreId < tailBlockNum) {
        this->coreDataNum   = bigCoreDataNum;
        this->tileNum       = finalBigTileNum;
        this->tailDataNum   = bigTailDataNum;
    } else {
        this->coreDataNum   = smallCoreDataNum;
        this->tileNum       = finalSmallTileNum;
        this->tailDataNum   = smallTailDataNum;
        globalOffset -= (bigCoreDataNum - smallCoreDataNum) *
                        (coreId - tailBlockNum);
    }

    xGm.SetGlobalBuffer((__gm__ TYPE_X*)x + globalOffset, this->coreDataNum);
    yGm.SetGlobalBuffer((__gm__ TYPE_Y*)y + globalOffset, this->coreDataNum);

    pipe->InitBuffer(inQueueX,  BUFFER_NUM0, this->tileDataNum * sizeof(TYPE_X));
    pipe->InitBuffer(outQueueY, BUFFER_NUM0, this->tileDataNum * sizeof(TYPE_Y));
    pipe->InitBuffer(tmpBuf,    this->tileDataNum * sizeof(float));
   
}




/* ========== 实现 ========== */
template <typename TYPE_X, typename TYPE_Y>
__aicore__ inline void KernelErf_FLOAT_2<TYPE_X, TYPE_Y>::Init(
    GM_ADDR x, GM_ADDR y,
    uint32_t smallCoreDataNum, uint32_t bigCoreDataNum,
    uint32_t finalBigTileNum,  uint32_t finalSmallTileNum,
    uint16_t tileDataNum,      uint16_t smallTailDataNum,
    uint16_t bigTailDataNum,   uint8_t  tailBlockNum,AscendC::TPipe* pipe)
{
    ASSERT(GetBlockNum() != 0 && "block dim can not be zero!");
    uint32_t coreId = GetBlockIdx();
    uint32_t globalOffset = bigCoreDataNum * coreId;

    this->tileDataNum = tileDataNum;
    if (coreId < tailBlockNum) {
        this->coreDataNum   = bigCoreDataNum;
        this->tileNum       = finalBigTileNum;
        this->tailDataNum   = bigTailDataNum;
    } else {
        this->coreDataNum   = smallCoreDataNum;
        this->tileNum       = finalSmallTileNum;
        this->tailDataNum   = smallTailDataNum;
        globalOffset -= (bigCoreDataNum - smallCoreDataNum) *
                        (coreId - tailBlockNum);
    }

    xGm.SetGlobalBuffer((__gm__ TYPE_X*)x + globalOffset, this->coreDataNum);
    yGm.SetGlobalBuffer((__gm__ TYPE_Y*)y + globalOffset, this->coreDataNum);

    pipe->InitBuffer(inQueueX,  BUFFER_NUM, this->tileDataNum * sizeof(TYPE_X));
    pipe->InitBuffer(outQueueY, BUFFER_NUM, this->tileDataNum * sizeof(TYPE_Y));
    pipe->InitBuffer(tmpBuf,    this->tileDataNum * sizeof(float));
   
}

/* ========== 实现 ========== */
template <typename TYPE_X, typename TYPE_Y>
__aicore__ inline void KernelErf_OTHER_2<TYPE_X, TYPE_Y>::Init(
    GM_ADDR x, GM_ADDR y,
    uint32_t smallCoreDataNum, uint32_t bigCoreDataNum,
    uint32_t finalBigTileNum,  uint32_t finalSmallTileNum,
    uint16_t tileDataNum,      uint16_t smallTailDataNum,
    uint16_t bigTailDataNum,   uint8_t  tailBlockNum,AscendC::TPipe* pipe)
{
    ASSERT(GetBlockNum() != 0 && "block dim can not be zero!");
    uint32_t coreId = GetBlockIdx();
    uint32_t globalOffset = bigCoreDataNum * coreId;

    this->tileDataNum = tileDataNum;
    if (coreId < tailBlockNum) {
        this->coreDataNum   = bigCoreDataNum;
        this->tileNum       = finalBigTileNum;
        this->tailDataNum   = bigTailDataNum;
    } else {
        this->coreDataNum   = smallCoreDataNum;
        this->tileNum       = finalSmallTileNum;
        this->tailDataNum   = smallTailDataNum;
        globalOffset -= (bigCoreDataNum - smallCoreDataNum) *
                        (coreId - tailBlockNum);
    }

    xGm.SetGlobalBuffer((__gm__ TYPE_X*)x + globalOffset, this->coreDataNum);
    yGm.SetGlobalBuffer((__gm__ TYPE_Y*)y + globalOffset, this->coreDataNum);

    pipe->InitBuffer(inQueueX,  BUFFER_NUM, this->tileDataNum * sizeof(TYPE_X));
    pipe->InitBuffer(outQueueY, BUFFER_NUM, this->tileDataNum * sizeof(TYPE_Y));
    pipe->InitBuffer(tmpBuf,    this->tileDataNum * sizeof(float));
   
    pipe->InitBuffer(xBuf, this->tileDataNum * sizeof(float));
    pipe->InitBuffer(yBuf, this->tileDataNum * sizeof(float));
    
}




/* ========== 实现 ========== */
template <typename TYPE_X, typename TYPE_Y>
__aicore__ inline void KernelErf_OTHER_1<TYPE_X, TYPE_Y>::Init(
    GM_ADDR x, GM_ADDR y,
    uint32_t smallCoreDataNum, uint32_t bigCoreDataNum,
    uint32_t finalBigTileNum,  uint32_t finalSmallTileNum,
    uint16_t tileDataNum,      uint16_t smallTailDataNum,
    uint16_t bigTailDataNum,   uint8_t  tailBlockNum,AscendC::TPipe* pipe)
{
    ASSERT(GetBlockNum() != 0 && "block dim can not be zero!");
    uint32_t coreId = GetBlockIdx();
    uint32_t globalOffset = bigCoreDataNum * coreId;

    this->tileDataNum = tileDataNum;
    if (coreId < tailBlockNum) {
        this->coreDataNum   = bigCoreDataNum;
        this->tileNum       = finalBigTileNum;
        this->tailDataNum   = bigTailDataNum;
    } else {
        this->coreDataNum   = smallCoreDataNum;
        this->tileNum       = finalSmallTileNum;
        this->tailDataNum   = smallTailDataNum;
        globalOffset -= (bigCoreDataNum - smallCoreDataNum) *
                        (coreId - tailBlockNum);
    }

    xGm.SetGlobalBuffer((__gm__ TYPE_X*)x + globalOffset, this->coreDataNum);
    yGm.SetGlobalBuffer((__gm__ TYPE_Y*)y + globalOffset, this->coreDataNum);

    pipe->InitBuffer(inQueueX,  BUFFER_NUM0, this->tileDataNum * sizeof(TYPE_X));
    pipe->InitBuffer(outQueueY, BUFFER_NUM0, this->tileDataNum * sizeof(TYPE_Y));
    pipe->InitBuffer(tmpBuf,    this->tileDataNum * sizeof(float));

    pipe->InitBuffer(xBuf, this->tileDataNum * sizeof(float));
    pipe->InitBuffer(yBuf, this->tileDataNum * sizeof(float));
    
}





template <typename TYPE_X, typename TYPE_Y>
__aicore__ inline void KernelErf_OTHER_2<TYPE_X, TYPE_Y>::CopyIn(int32_t progress)
{
    LocalTensor<TYPE_X> xLocal = inQueueX.AllocTensor<TYPE_X>();
    DataCopy(xLocal, xGm[progress * tileDataNum], this->processDataNum);
    inQueueX.EnQue(xLocal);
}

template <typename TYPE_X, typename TYPE_Y>
__aicore__ inline void KernelErf_FLOAT_1<TYPE_X, TYPE_Y>::CopyIn()
{
    LocalTensor<TYPE_X> xLocal = inQueueX.AllocTensor<TYPE_X>();
    DataCopy(xLocal, xGm, this->processDataNum);
    inQueueX.EnQue(xLocal);
}


template <typename TYPE_X, typename TYPE_Y>
__aicore__ inline void KernelErf_OTHER_1<TYPE_X, TYPE_Y>::CopyIn()
{
    LocalTensor<TYPE_X> xLocal = inQueueX.AllocTensor<TYPE_X>();
    DataCopy(xLocal, xGm, this->processDataNum);
    inQueueX.EnQue(xLocal);
}

template <typename TYPE_X, typename TYPE_Y>
__aicore__ inline void KernelErf_FLOAT_2<TYPE_X, TYPE_Y>::CopyIn(int32_t progress)
{
    LocalTensor<TYPE_X> xLocal = inQueueX.AllocTensor<TYPE_X>();
    DataCopy(xLocal, xGm[progress * tileDataNum], this->processDataNum);
    inQueueX.EnQue(xLocal);
}


template <typename TYPE_X, typename TYPE_Y>
__aicore__ inline void KernelErf_OTHER_2<TYPE_X, TYPE_Y>::CopyOut(int32_t progress)
{
    LocalTensor<TYPE_Y> yLocal = outQueueY.DeQue<TYPE_Y>();
    DataCopy(yGm[progress * tileDataNum], yLocal, this->processDataNum);
    outQueueY.FreeTensor(yLocal);
}




template <typename TYPE_X, typename TYPE_Y>
__aicore__ inline void KernelErf_FLOAT_1<TYPE_X, TYPE_Y>::CopyOut()
{
    LocalTensor<TYPE_Y> yLocal = outQueueY.DeQue<TYPE_Y>();
    DataCopy(yGm, yLocal, this->processDataNum);
    outQueueY.FreeTensor(yLocal);
}




template <typename TYPE_X, typename TYPE_Y>
__aicore__ inline void KernelErf_OTHER_1<TYPE_X, TYPE_Y>::CopyOut()
{
    LocalTensor<TYPE_Y> yLocal = outQueueY.DeQue<TYPE_Y>();
    DataCopy(yGm, yLocal, this->processDataNum);
    outQueueY.FreeTensor(yLocal);
}


template <typename TYPE_X, typename TYPE_Y>
__aicore__ inline void KernelErf_FLOAT_2<TYPE_X, TYPE_Y>::CopyOut(int32_t progress)
{
    LocalTensor<TYPE_Y> yLocal = outQueueY.DeQue<TYPE_Y>();
    DataCopy(yGm[progress * tileDataNum], yLocal, this->processDataNum);
    outQueueY.FreeTensor(yLocal);
}


template <typename TYPE_X, typename TYPE_Y>
__aicore__ inline void KernelErf_OTHER_2<TYPE_X, TYPE_Y>::Compute(int32_t progress)
{
    AscendC::LocalTensor<float> xLocal, yLocal;
    
    xLocal = xBuf.Get<float>();
    AscendC::LocalTensor<TYPE_X> xOrigin = inQueueX.DeQue<TYPE_X>();
    AscendC::Cast(xLocal, xOrigin, AscendC::RoundMode::CAST_NONE, this->processDataNum);
    inQueueX.FreeTensor(xOrigin);
    yLocal = yBuf.Get<float>();
        
  
    AscendC::LocalTensor<float> x2 = tmpBuf.Get<float>();
        
        // 直接内联erf计算逻辑，消除策略类调用开销
        /*---------- 2. clamp ----------*/
    AscendC::Mins(xLocal, xLocal, (float)3.92, this->processDataNum);
    AscendC::Maxs(xLocal, xLocal, (float)-3.92, this->processDataNum);

        /*---------- 3. x² ----------*/
    AscendC::Mul(x2, xLocal, xLocal, this->processDataNum);
        /*---------- 4. 分子 P(x²)*x ----------*/
    AscendC::Muls(yLocal, x2, (float)0.053443748819, this->processDataNum);
    AscendC::Adds(yLocal, yLocal, (float)7.5517016694, this->processDataNum);
    AscendC::Mul(yLocal, yLocal, x2, this->processDataNum);
    AscendC::Adds(yLocal, yLocal, (float)101.62808918, this->processDataNum);
    AscendC::Mul(yLocal, yLocal, x2, this->processDataNum);
    AscendC::Adds(yLocal, yLocal, (float)1393.8061484, this->processDataNum);
    AscendC::Mul(yLocal, yLocal, x2, this->processDataNum);
    AscendC::Adds(yLocal, yLocal, (float)5063.7915060, this->processDataNum);
    AscendC::Mul(yLocal, yLocal, x2, this->processDataNum);
    AscendC::Adds(yLocal, yLocal, (float)29639.384698, this->processDataNum);
    AscendC::Mul(yLocal, yLocal, xLocal, this->processDataNum); // yLocal = numer

        /*---------- 5. 分母 Q(x²) ----------*/
    AscendC::Adds(xLocal, x2, (float)31.212858877, this->processDataNum);
    AscendC::Mul(xLocal, xLocal, x2, this->processDataNum);
    AscendC::Adds(xLocal, xLocal, (float)398.56963806, this->processDataNum);
    AscendC::Mul(xLocal, xLocal, x2, this->processDataNum);
    AscendC::Adds(xLocal, xLocal, (float)3023.1248150, this->processDataNum);
    AscendC::Mul(xLocal, xLocal, x2, this->processDataNum);
    AscendC::Adds(xLocal, xLocal, (float)13243.365831, this->processDataNum);
    AscendC::Mul(xLocal, xLocal, x2, this->processDataNum);
    AscendC::Adds(xLocal, xLocal, (float)26267.224157, this->processDataNum); // x2 = denom

        /*---------- 6. erf = numer / denom ----------*/
    AscendC::Div(yLocal, yLocal, xLocal, this->processDataNum);

   
    AscendC::LocalTensor<TYPE_X> yTarget = outQueueY.AllocTensor<TYPE_X>();
    AscendC::Cast(yTarget, yLocal, AscendC::RoundMode::CAST_RINT, this->processDataNum);
    outQueueY.EnQue(yTarget);
    
}






template <typename TYPE_X, typename TYPE_Y>
__aicore__ inline void KernelErf_OTHER_1<TYPE_X, TYPE_Y>::Compute()
{
    AscendC::LocalTensor<float> xLocal, yLocal;
   
    xLocal = xBuf.Get<float>();
    AscendC::LocalTensor<TYPE_X> xOrigin = inQueueX.DeQue<TYPE_X>();
    AscendC::Cast(xLocal, xOrigin, AscendC::RoundMode::CAST_NONE, this->processDataNum);
    inQueueX.FreeTensor(xOrigin);
    yLocal = yBuf.Get<float>();
  
    AscendC::LocalTensor<float> x2 = tmpBuf.Get<float>();
        
        // 直接内联erf计算逻辑，消除策略类调用开销
        /*---------- 2. clamp ----------*/
    AscendC::Mins(xLocal, xLocal, (float)3.92, this->processDataNum);
    AscendC::Maxs(xLocal, xLocal, (float)-3.92, this->processDataNum);

        /*---------- 3. x² ----------*/
    AscendC::Mul(x2, xLocal, xLocal, this->processDataNum);
        /*---------- 4. 分子 P(x²)*x ----------*/
    AscendC::Muls(yLocal, x2, (float)0.053443748819, this->processDataNum);
    AscendC::Adds(yLocal, yLocal, (float)7.5517016694, this->processDataNum);
    AscendC::Mul(yLocal, yLocal, x2, this->processDataNum);
    AscendC::Adds(yLocal, yLocal, (float)101.62808918, this->processDataNum);
    AscendC::Mul(yLocal, yLocal, x2, this->processDataNum);
    AscendC::Adds(yLocal, yLocal, (float)1393.8061484, this->processDataNum);
    AscendC::Mul(yLocal, yLocal, x2, this->processDataNum);
    AscendC::Adds(yLocal, yLocal, (float)5063.7915060, this->processDataNum);
    AscendC::Mul(yLocal, yLocal, x2, this->processDataNum);
    AscendC::Adds(yLocal, yLocal, (float)29639.384698, this->processDataNum);
    AscendC::Mul(yLocal, yLocal, xLocal, this->processDataNum); // yLocal = numer

        /*---------- 5. 分母 Q(x²) ----------*/
    AscendC::Adds(xLocal, x2, (float)31.212858877, this->processDataNum);
    AscendC::Mul(xLocal, xLocal, x2, this->processDataNum);
    AscendC::Adds(xLocal, xLocal, (float)398.56963806, this->processDataNum);
    AscendC::Mul(xLocal, xLocal, x2, this->processDataNum);
    AscendC::Adds(xLocal, xLocal, (float)3023.1248150, this->processDataNum);
    AscendC::Mul(xLocal, xLocal, x2, this->processDataNum);
    AscendC::Adds(xLocal, xLocal, (float)13243.365831, this->processDataNum);
    AscendC::Mul(xLocal, xLocal, x2, this->processDataNum);
    AscendC::Adds(xLocal, xLocal, (float)26267.224157, this->processDataNum); // x2 = denom

        /*---------- 6. erf = numer / denom ----------*/
    AscendC::Div(yLocal, yLocal, xLocal, this->processDataNum);

   
    AscendC::LocalTensor<TYPE_X> yTarget = outQueueY.AllocTensor<TYPE_X>();
    AscendC::Cast(yTarget, yLocal, AscendC::RoundMode::CAST_RINT, this->processDataNum);
    outQueueY.EnQue(yTarget);
       
}






template <typename TYPE_X, typename TYPE_Y>
__aicore__ inline void KernelErf_FLOAT_1<TYPE_X, TYPE_Y>::Compute()
{
    AscendC::LocalTensor<TYPE_X> xLocal, yLocal;
    
    xLocal = inQueueX.DeQue<TYPE_X>();
    yLocal = outQueueY.AllocTensor<TYPE_Y>();
    
  
    AscendC::LocalTensor<float> x2 = tmpBuf.Get<float>();
        
        // 直接内联erf计算逻辑，消除策略类调用开销
        /*---------- 2. clamp ----------*/
    AscendC::Mins(xLocal, xLocal, (float)3.92, this->processDataNum);
    AscendC::Maxs(xLocal, xLocal, (float)-3.92, this->processDataNum);

        /*---------- 3. x² ----------*/
    AscendC::Mul(x2, xLocal, xLocal, this->processDataNum);
        /*---------- 4. 分子 P(x²)*x ----------*/
    AscendC::Muls(yLocal, x2, (float)0.053443748819, this->processDataNum);
    AscendC::Adds(yLocal, yLocal, (float)7.5517016694, this->processDataNum);
    AscendC::Mul(yLocal, yLocal, x2, this->processDataNum);
    AscendC::Adds(yLocal, yLocal, (float)101.62808918, this->processDataNum);
    AscendC::Mul(yLocal, yLocal, x2, this->processDataNum);
    AscendC::Adds(yLocal, yLocal, (float)1393.8061484, this->processDataNum);
    AscendC::Mul(yLocal, yLocal, x2, this->processDataNum);
    AscendC::Adds(yLocal, yLocal, (float)5063.7915060, this->processDataNum);
    AscendC::Mul(yLocal, yLocal, x2, this->processDataNum);
    AscendC::Adds(yLocal, yLocal, (float)29639.384698, this->processDataNum);
    AscendC::Mul(yLocal, yLocal, xLocal, this->processDataNum); // yLocal = numer

        /*---------- 5. 分母 Q(x²) ----------*/
    AscendC::Adds(xLocal, x2, (float)31.212858877, this->processDataNum);
    AscendC::Mul(xLocal, xLocal, x2, this->processDataNum);
    AscendC::Adds(xLocal, xLocal, (float)398.56963806, this->processDataNum);
    AscendC::Mul(xLocal, xLocal, x2, this->processDataNum);
    AscendC::Adds(xLocal, xLocal, (float)3023.1248150, this->processDataNum);
    AscendC::Mul(xLocal, xLocal, x2, this->processDataNum);
    AscendC::Adds(xLocal, xLocal, (float)13243.365831, this->processDataNum);
    AscendC::Mul(xLocal, xLocal, x2, this->processDataNum);
    AscendC::Adds(xLocal, xLocal, (float)26267.224157, this->processDataNum); // x2 = denom

        /*---------- 6. erf = numer / denom ----------*/
    AscendC::Div(yLocal, yLocal, xLocal, this->processDataNum);

  
    outQueueY.EnQue(yLocal);
    inQueueX.FreeTensor(xLocal);
  
}




template <typename TYPE_X, typename TYPE_Y>
__aicore__ inline void KernelErf_FLOAT_2<TYPE_X, TYPE_Y>::Compute(int32_t progress)
{
    AscendC::LocalTensor<TYPE_X> xLocal, yLocal;

    xLocal = inQueueX.DeQue<TYPE_X>();
    yLocal = outQueueY.AllocTensor<TYPE_Y>();
     
  
    AscendC::LocalTensor<float> x2 = tmpBuf.Get<float>();
        
        // 直接内联erf计算逻辑，消除策略类调用开销
        /*---------- 2. clamp ----------*/
    AscendC::Mins(xLocal, xLocal, (float)3.92, this->processDataNum);
    AscendC::Maxs(xLocal, xLocal, (float)-3.92, this->processDataNum);

        /*---------- 3. x² ----------*/
    AscendC::Mul(x2, xLocal, xLocal, this->processDataNum);
        /*---------- 4. 分子 P(x²)*x ----------*/
    AscendC::Muls(yLocal, x2, (float)0.053443748819, this->processDataNum);
    AscendC::Adds(yLocal, yLocal, (float)7.5517016694, this->processDataNum);
    AscendC::Mul(yLocal, yLocal, x2, this->processDataNum);
    AscendC::Adds(yLocal, yLocal, (float)101.62808918, this->processDataNum);
    AscendC::Mul(yLocal, yLocal, x2, this->processDataNum);
    AscendC::Adds(yLocal, yLocal, (float)1393.8061484, this->processDataNum);
    AscendC::Mul(yLocal, yLocal, x2, this->processDataNum);
    AscendC::Adds(yLocal, yLocal, (float)5063.7915060, this->processDataNum);
    AscendC::Mul(yLocal, yLocal, x2, this->processDataNum);
    AscendC::Adds(yLocal, yLocal, (float)29639.384698, this->processDataNum);
    AscendC::Mul(yLocal, yLocal, xLocal, this->processDataNum); // yLocal = numer

        /*---------- 5. 分母 Q(x²) ----------*/
    AscendC::Adds(xLocal, x2, (float)31.212858877, this->processDataNum);
    AscendC::Mul(xLocal, xLocal, x2, this->processDataNum);
    AscendC::Adds(xLocal, xLocal, (float)398.56963806, this->processDataNum);
    AscendC::Mul(xLocal, xLocal, x2, this->processDataNum);
    AscendC::Adds(xLocal, xLocal, (float)3023.1248150, this->processDataNum);
    AscendC::Mul(xLocal, xLocal, x2, this->processDataNum);
    AscendC::Adds(xLocal, xLocal, (float)13243.365831, this->processDataNum);
    AscendC::Mul(xLocal, xLocal, x2, this->processDataNum);
    AscendC::Adds(xLocal, xLocal, (float)26267.224157, this->processDataNum); // x2 = denom

        /*---------- 6. erf = numer / denom ----------*/
    AscendC::Div(yLocal, yLocal, xLocal, this->processDataNum);


    outQueueY.EnQue(yLocal);
    inQueueX.FreeTensor(xLocal);
}




template <typename TYPE_X, typename TYPE_Y>
__aicore__ inline void KernelErf_OTHER_2<TYPE_X, TYPE_Y>::Process()
{
    int32_t loopCount = this->tileNum;
    this->processDataNum = this->tileDataNum;
    for (int32_t i = 0; i < loopCount - 1; i++) {
        CopyIn(i);
        Compute(i);
        CopyOut(i);
    }
    this->processDataNum = this->tailDataNum;
    CopyIn(loopCount - 1);
    Compute(loopCount - 1);
    CopyOut(loopCount - 1);
}



template <typename TYPE_X, typename TYPE_Y>
__aicore__ inline void KernelErf_FLOAT_1<TYPE_X, TYPE_Y>::Process()
{    
    this->processDataNum = this->tailDataNum;
    CopyIn();
    Compute();
    CopyOut();
}


template <typename TYPE_X, typename TYPE_Y>
__aicore__ inline void KernelErf_OTHER_1<TYPE_X, TYPE_Y>::Process()
{    
    this->processDataNum = this->tailDataNum;
    CopyIn();
    Compute();
    CopyOut();
}



template <typename TYPE_X, typename TYPE_Y>
__aicore__ inline void KernelErf_FLOAT_2<TYPE_X, TYPE_Y>::Process()
{
    int32_t loopCount = this->tileNum;
    this->processDataNum = this->tileDataNum;
    for (int32_t i = 0; i < loopCount - 1; i++) {
        CopyIn(i);
        Compute(i);
        CopyOut(i);
    }
    this->processDataNum = this->tailDataNum;
    CopyIn(loopCount - 1);
    Compute(loopCount - 1);
    CopyOut(loopCount - 1);
}



} // namespace MyErf
#endif // ERF_H





