#include "kernel_operator.h"
// tensor num for each queue
constexpr int32_t BUFFER_NUM = 2;

template<typename TYPE_X, typename TYPE_Z> 
class KernelLogit {
    using T = TYPE_X;
public:
    __aicore__ inline KernelLogit() {}
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR z, uint32_t smallCoreDataNum,
                                uint32_t bigCoreDataNum, uint32_t finalBigTileNum, 
                                uint32_t finalSmallTileNum, uint32_t tileDataNum, 
                                uint32_t smallTailDataNum, uint32_t bigTailDataNum, 
                                uint32_t tailBlockNum) 
    {
        ASSERT(AscendC::GetBlockNum() != 0 && "block dim can not be zero!");
        uint32_t coreNum = AscendC::GetBlockIdx();
        uint32_t globalBufferIndex = 0;

        if (coreNum < tailBlockNum) { 
            this->coreDataNum = bigCoreDataNum;
            this->tileNum = finalBigTileNum;
            this->tailDataNum = bigTailDataNum;
            globalBufferIndex = bigCoreDataNum * coreNum;
        }
        else { 
            this->coreDataNum = smallCoreDataNum;
            this->tileNum = finalSmallTileNum;
            this->tailDataNum = smallTailDataNum;
            // 修正：正确计算小核心的全局缓冲区索引
            globalBufferIndex = bigCoreDataNum * tailBlockNum + smallCoreDataNum * (coreNum - tailBlockNum);
        }

        this->tileDataNum = tileDataNum;
        xGm.SetGlobalBuffer((__gm__ TYPE_X*)x + globalBufferIndex, this->coreDataNum);
        zGm.SetGlobalBuffer((__gm__ TYPE_Z*)z + globalBufferIndex, this->coreDataNum);
        pipe.InitBuffer(inQueueX, BUFFER_NUM, this->tileDataNum * sizeof(TYPE_X));
        pipe.InitBuffer(outQueueZ, BUFFER_NUM, this->tileDataNum * sizeof(TYPE_Z));
        pipe.InitBuffer(tmpBuf0, this->tileDataNum * sizeof(TYPE_X));
        pipe.InitBuffer(tmpBuf1, this->tileDataNum * sizeof(TYPE_X));
        pipe.InitBuffer(tmpBuf2, this->tileDataNum * sizeof(TYPE_X));
        pipe.InitBuffer(tmpBuf3, this->tileDataNum * sizeof(TYPE_X));
    }
    
    __aicore__ inline void Process()
    {
        int32_t loopCount = this->tileNum;
        this->processDataNum = this->tileDataNum;

        for (int32_t i = 0; i < loopCount; i++) {
            if (i == this->tileNum - 1 && this->tailDataNum != this->tileDataNum) {
                this->processDataNum = this->tailDataNum;
            }
            CopyIn(i);
            Compute(i);
            CopyOut(i);
        }
    }

private:
    __aicore__ inline void CopyIn(int32_t progress)
    {
        AscendC::LocalTensor<TYPE_X> xLocal = inQueueX.AllocTensor<TYPE_X>();
        AscendC::DataCopy(xLocal, xGm[progress * this->tileDataNum], this->processDataNum);
        inQueueX.EnQue(xLocal);
    }

    __aicore__ inline void Compute(int32_t progress)
    {
        AscendC::LocalTensor<TYPE_X> xLocal = inQueueX.DeQue<TYPE_X>();
        AscendC::LocalTensor<TYPE_Z> zLocal = outQueueZ.AllocTensor<TYPE_Z>();
        AscendC::LocalTensor<TYPE_X> tmp0 = tmpBuf0.Get<TYPE_X>();
        AscendC::LocalTensor<TYPE_X> tmp1 = tmpBuf1.Get<TYPE_X>();
        AscendC::LocalTensor<TYPE_X> tmp2 = tmpBuf2.Get<TYPE_X>();
        AscendC::LocalTensor<TYPE_X> tmp3 = tmpBuf3.Get<TYPE_X>();

        // 修正：使用更合适的数值保护常数
        const TYPE_X eps = static_cast<TYPE_X>(1e-7);      // 避免下溢
        const TYPE_X one_minus_eps = static_cast<TYPE_X>(1.0 - 1e-7);  // 避免上溢
        
        // 步骤1: 输入范围保护，防止数值不稳定
        AscendC::Maxs(tmp1, xLocal, eps, this->processDataNum);           // max(x, eps)
        AscendC::Mins(tmp2, tmp1, one_minus_eps, this->processDataNum);   // min(max(x, eps), 1-eps)

        // logit(x) = ln(x / (1 - x)) = ln(x) - ln(1 - x)
        
        // ln(x_clipped)
        AscendC::Ln(tmp0, tmp2, this->processDataNum);
        
        // 1 - x_clipped
        AscendC::Duplicate(tmp1, static_cast<TYPE_X>(1.0), this->processDataNum);
        AscendC::Sub(tmp3, tmp1, tmp2, this->processDataNum);
        
        // ln(1 - x_clipped)
        AscendC::Ln(tmp1, tmp3, this->processDataNum);
        
        // ln(x) - ln(1-x)
        AscendC::Sub(zLocal, tmp0, tmp1, this->processDataNum);

        outQueueZ.EnQue<TYPE_Z>(zLocal);
        inQueueX.FreeTensor(xLocal);
    }

    __aicore__ inline void CopyOut(int32_t progress)
    {
        AscendC::LocalTensor<TYPE_Z> zLocal = outQueueZ.DeQue<TYPE_Z>();  
        AscendC::DataCopy(zGm[progress * this->tileDataNum], zLocal, this->processDataNum);
        outQueueZ.FreeTensor(zLocal);
    }

private:
    AscendC::TPipe pipe;
    AscendC::TQue<AscendC::QuePosition::VECIN, BUFFER_NUM> inQueueX;
    AscendC::TQue<AscendC::QuePosition::VECOUT, BUFFER_NUM> outQueueZ;
    AscendC::TBuf<AscendC::QuePosition::VECCALC> tmpBuf0;
    AscendC::TBuf<AscendC::QuePosition::VECCALC> tmpBuf1;
    AscendC::TBuf<AscendC::QuePosition::VECCALC> tmpBuf2;
    AscendC::TBuf<AscendC::QuePosition::VECCALC> tmpBuf3;
    AscendC::GlobalTensor<TYPE_X> xGm;
    AscendC::GlobalTensor<TYPE_Z> zGm;

    uint32_t coreDataNum;
    uint32_t tileNum;
    uint32_t tileDataNum;
    uint32_t tailDataNum;
    uint32_t processDataNum;
};

extern "C" __global__ __aicore__ void logit_custom(GM_ADDR x, GM_ADDR z, GM_ADDR workspace, GM_ADDR tiling) {
    GET_TILING_DATA(tiling_data, tiling);

    KernelLogit<DTYPE_X, DTYPE_Z> op;
    op.Init(x, z, tiling_data.smallCoreDataNum, 
            tiling_data.bigCoreDataNum, tiling_data.finalBigTileNum, 
            tiling_data.finalSmallTileNum, tiling_data.tileDataNum, 
            tiling_data.smallTailDataNum, tiling_data.bigTailDataNum, 
            tiling_data.tailBlockNum);  
    op.Process();
}