#include "kernel_operator.h"

constexpr int32_t BUFFER_NUM = 2;  // 双缓冲
constexpr int32_t SLOT_STRIDE = 64 / sizeof(float);

template<typename TYPE_X>class KernelLpNormV2 {
    using T = TYPE_X;
public:
    __aicore__ inline KernelLpNormV2() {}

    __aicore__ inline void Init(GM_ADDR x, GM_ADDR z, GM_ADDR workspace,
                                uint32_t smallCoreDataNum, uint32_t bigCoreDataNum,
                                uint32_t finalBigTileNum, uint32_t finalSmallTileNum,
                                uint32_t tileDataNum, uint32_t smallTailDataNum,
                                uint32_t bigTailDataNum, uint32_t tailBlockNum,
                                float p,uint32_t axis, uint32_t rows, uint32_t cols,
                                float epsilon,uint32_t dataTypeId) 
    {
        ASSERT(AscendC::GetBlockNum() != 0 && "block dim can not be zero!");
        uint32_t coreIdx = AscendC::GetBlockIdx();
        uint32_t globalBufferIndex = bigCoreDataNum * coreIdx;
        this->tileDataNum = tileDataNum;
        this->axis = axis;
        this->rows = rows;
        this->cols = cols;
        this->epsilon = epsilon;
        this->pVal = p;
        this->keyType = dataTypeId;
        
        if (coreIdx < tailBlockNum) {
            this->coreDataNum = bigCoreDataNum;//（单位：元素数）
            this->tileNum = finalBigTileNum;
            this->tailDataNum = bigTailDataNum;
        } else {
            this->coreDataNum = smallCoreDataNum;
            this->tileNum = finalSmallTileNum;
            this->tailDataNum = smallTailDataNum;
            globalBufferIndex -= (bigCoreDataNum - smallCoreDataNum) * (coreIdx - tailBlockNum);
        }

        // --- 安全检查：不要绑定超出输入数据范围的 GM 内存 ---
        uint32_t totalElements = rows * cols; // 输入总元素数
        if (globalBufferIndex >= totalElements) {
            // 没有为该 core 分配到输入数据，绑定空并将 coreDataNum 置 0，避免后面越界访问
            this->coreDataNum = 0;
            xGm.SetGlobalBuffer((__gm__ TYPE_X*)x, 0);
            zGm.SetGlobalBuffer((__gm__ TYPE_X*)z, 0);
        } else {
            uint32_t available = totalElements - globalBufferIndex;
            uint32_t bindLen = (available < this->coreDataNum) ? available : this->coreDataNum;
            // 以元素数为单位绑定 GM（SetGlobalBuffer 的第二个参数为元素数）
            xGm.SetGlobalBuffer((__gm__ TYPE_X*)x + globalBufferIndex, bindLen);
            zGm.SetGlobalBuffer((__gm__ TYPE_X*)z + globalBufferIndex, bindLen);
            // 同步 local 视图的实际可用长度（后续计算全用 coreDataNum）
            this->coreDataNum = bindLen;
        }

        
        // 全局工作区内存绑定（严格按axis分配）
        uint32_t workGmSize = 0;
        if(axis == 0){
            workGmSize = 1 * SLOT_STRIDE;
        }else if(axis == 1){
            workGmSize = cols * SLOT_STRIDE;
        }else if(axis == 2){
            workGmSize = rows * SLOT_STRIDE;
        }else {
            AscendC::PRINTF("illegal axis data , trans to 0");
            axis = 0;
            workGmSize = 1 * SLOT_STRIDE;
        }
        //多核操作时，更新全局缓存以64B为一个整体，若多核在64B内同时操作会导致随机覆写，故workGm设置大小为范数数量*16（范数以float存储）
        workGm.SetGlobalBuffer((__gm__ float*)workspace , workGmSize);

        // 分配本地缓冲区（双缓冲）
        pipe.InitBuffer(inQueueInput, BUFFER_NUM, this->tileDataNum * sizeof(TYPE_X));
        pipe.InitBuffer(outQueueOutput, BUFFER_NUM, this->tileDataNum * sizeof(TYPE_X));


        // 初始化计算缓冲区
        pipe.InitBuffer(poweredBuf, tileDataNum * sizeof(float));  // 存储abs(x)^p结果
        pipe.InitBuffer(tmp_p, tileDataNum * sizeof(float));       // 存储p值（向量计算适配）
        pipe.InitBuffer(tmp_norm, tileDataNum * sizeof(float));    // 存储范数结果（向量归一化适配）
        pipe.InitBuffer(tmp_base, 64 * sizeof(float));             // 存储sum+epsilon（范数计算用）
        pipe.InitBuffer(tmpBuffer, 64* sizeof(float));     //原子加操作临时缓冲
        if(axis == 1) {
            pipe.InitBuffer(col_base, cols * sizeof(float));
        } else if(axis == 2) {
            pipe.InitBuffer(row_base, rows * sizeof(float));
        }
        this->GlobalOffset = globalBufferIndex;
        this->blockIdx = AscendC::GetBlockIdx();
        this->blockNum = AscendC::GetBlockNum();

    }

    __aicore__ inline void Process() {
        if((this->pVal == -1 ||this->pVal == -2) &&  this->axis == 0){MAllProcess();}
        else if((this->pVal == -1 ||this->pVal == -2) &&  this->axis == 1){MColProcess();}
        else if((this->pVal == -1 ||this->pVal == -2) &&  this->axis == 2){MRowProcess();}
        else if (this->axis == 0) {AllProcess(); } 
        else if (this->axis == 1) {ColProcess(); }
        else if (this->axis == 2) {RowProcess(); }
    }

private:
// ---------------------------- Max/Min范数处理 -----------------------------
    __aicore__ inline void MAllProcess() {
    
        const uint32_t loopCount = this->tileNum;
        const uint32_t blockLen = this->coreDataNum;
        const uint32_t tileLen = this->tileDataNum;
        const uint32_t lastTileLen = this->tailDataNum;
        const uint32_t globalOffset = this->GlobalOffset;


        // ---------------- 1. 局部 ----------------
        AscendC::LocalTensor<float> localSum = tmp_base.Get<float>();
        if(this->pVal == -1)localSum.SetValue(0, 0x00000001);
        else localSum.SetValue(0, 0x7F7FFFFF);

        // ---------------- 2. 流水线搬入 + 本地计算 ----------------
        for (uint32_t t = 0; t < loopCount; ++t) {
            uint32_t curTileLen = (t == loopCount - 1) ? lastTileLen : tileLen;

            // 2.1 搬入
            CopyIn(t);
            AscendC::LocalTensor<TYPE_X> tileLocal = inQueueInput.DeQue<TYPE_X>();

            // 2.2 half -> float 转换
            AscendC::LocalTensor<float> tileFloat = poweredBuf.Get<float>();
            if (keyType == 1) { // half
                AscendC::Cast(tileFloat, tileLocal, AscendC::RoundMode::CAST_NONE, curTileLen);
            } else { // float
                
                for (uint32_t i = 0; i < curTileLen; ++i) {
                    tileFloat.SetValue(i, tileLocal.GetValue(i));
                }
            }

            // 2.3 abs 
            AscendC::LocalTensor<float> pTensor = tmp_p.Get<float>();
            AscendC::Duplicate(pTensor, pVal, curTileLen);
            AscendC::Abs(tileFloat, tileFloat, curTileLen);
            // 2.4 max || min

            float tileSum = 0.0f;
            if(this->pVal == -1){//max
                tileSum = 0x00000001;
                for (uint32_t i = 0; i < curTileLen; ++i) {
                    tileSum = tileSum > tileFloat.GetValue(i) ? tileSum : tileFloat.GetValue(i);
                }
            }else{//min

                tileSum = tileFloat.GetValue(0);
                for (uint32_t i = 1; i < curTileLen; ++i) {
                    tileSum = tileSum < tileFloat.GetValue(i) ? tileSum : tileFloat.GetValue(i);
                }
            }

            if(this->pVal == -1){//max
                if(localSum.GetValue(0) < tileSum)localSum.SetValue(0, tileSum);
            }else{//min
                if(localSum.GetValue(0) > tileSum)localSum.SetValue(0, tileSum);
            }

            inQueueInput.FreeTensor(tileLocal);
        }
        
        // ---------------- 3. 到全局缓冲 ----------------
        AscendC::LocalTensor<float> tmpBuf = tmpBuffer.Get<float>();
        AscendC::Duplicate(tmpBuf, 0.0f, 8);           // 清零
        tmpBuf.SetValue(0, localSum.GetValue(0));      // 只放第一个值

        if(this->pVal == -1){//max
            AscendC::SetAtomicMax<float>();
        }else{//min
            AscendC::SetAtomicMin<float>();
        }
        AscendC::SetAtomicMax<float>();
        AscendC::DataCopy(workGm[0], tmpBuf, 8);       // 搬 8 个 float (32B 对齐)
        
        AscendC::SetAtomicNone();
        

        // ---------------- 4. 硬同步 ----------------
        AscendC::DataCacheCleanAndInvalid<float, AscendC::CacheLine::SINGLE_CACHE_LINE, AscendC::DcciDst::CACHELINE_OUT>(workGm[0]);
        AscendC::SyncAll();
        
        float pNorm = workGm.GetValue(0);
        // ---------------- 5. 流水线归一化输出 ----------------
        for (uint32_t t = 0; t < loopCount; ++t) {
            uint32_t curTileLen = (t == loopCount - 1) ? lastTileLen : tileLen;

            CopyIn(t);
            AscendC::LocalTensor<TYPE_X> tileLocal = inQueueInput.DeQue<TYPE_X>();
            AscendC::LocalTensor<TYPE_X> tileOut = outQueueOutput.AllocTensor<TYPE_X>();
            AscendC::LocalTensor<float> normTensor = tmp_norm.Get<float>();

            // 复制全局范数到局部张量
            AscendC::Duplicate(normTensor, pNorm, curTileLen);

            // 元素级归一化
            float val = 0.0f;
            for (uint32_t i = 0; i < curTileLen; ++i) {
                if(keyType == 1){
                    val = static_cast<float>(tileLocal.GetValue(i));
                }
                else {
                    val = tileLocal.GetValue(i);
                }
                tileOut.SetValue(i, static_cast<TYPE_X>(val / normTensor.GetValue(i)));
            }

            outQueueOutput.EnQue(tileOut);
            inQueueInput.FreeTensor(tileLocal);
            CopyOut(t);
        }

    }

// ------------------------------ M-列范数处理 ------------------------------
    __aicore__ inline void MColProcess() {
        const uint32_t colNum = this->cols;
        const uint32_t rowNum = this->rows;
        const uint32_t totalElements = rowNum * colNum;

        const uint32_t tileNum = this->tileNum;
        const uint32_t tileLen = this->tileDataNum;
        const uint32_t lastTileLen = this->tailDataNum;
        const uint32_t blockLen = this->coreDataNum;
        const uint32_t globalOffset = this->GlobalOffset;

        const float POS_INF = 3.40282347e+38f;   // approx FLT_MAX
        const float NEG_INF = -3.40282347e+38f;

        // ---------------- 1. 局部累加 buffer ----------------
        AscendC::LocalTensor<float> localSum = col_base.Get<float>();
        for (uint32_t c = 0; c < colNum; ++c) {
            if(this->pVal == -1)localSum.SetValue(c,NEG_INF);
            else localSum.SetValue(c, POS_INF);
        }

        // ---------------- 2. 流水线：批量搬入 / 本地计算 ----------------
        for (uint32_t t = 0; t < tileNum; ++t) {
            uint32_t curTileLen = (t == tileNum - 1) ? lastTileLen : tileLen;

            // 2.1 搬入数据
            CopyIn(t);
            AscendC::LocalTensor<TYPE_X> tileLocal = inQueueInput.DeQue<TYPE_X>();
            AscendC::LocalTensor<float> tileFloat = poweredBuf.Get<float>();
            if (keyType == 1) { // half
                AscendC::Cast(tileFloat, tileLocal, AscendC::RoundMode::CAST_NONE, curTileLen);
            } else { // float
                for (uint32_t i = 0; i < curTileLen; ++i) {
                    tileFloat.SetValue(i, tileLocal.GetValue(i));
                }
            }

            // 2.2.2 本地计算：abs(x)
            AscendC::LocalTensor<float> pTensor = tmp_p.Get<float>();
            AscendC::Duplicate(pTensor, this->pVal, curTileLen);
            AscendC::Abs(tileFloat, tileFloat, curTileLen);

            // 2.3 累至 localSum
            for (uint32_t i = 0; i < curTileLen; ++i) {
                    uint32_t globalIdx = globalOffset + t * tileLen + i;
                    uint32_t colIdx = globalIdx % colNum;
                
                    if (colIdx < colNum) {  // 再次确认边界
                        if(this->pVal == -1){//max
                            if(tileFloat.GetValue(i) > localSum.GetValue(colIdx))localSum.SetValue(colIdx, tileFloat.GetValue(i));
                        }else{//min
                            if(tileFloat.GetValue(i) < localSum.GetValue(colIdx))localSum.SetValue(colIdx, tileFloat.GetValue(i));
                        }
                    }
                }
        
            inQueueInput.FreeTensor(tileLocal);
        }

        // ---------------- 3. 到全局 workGm ----------------
        AscendC::LocalTensor<float> tmpBuf = tmpBuffer.Get<float>();
        if(this->pVal == -1){//max
            AscendC::SetAtomicMax<float>();
        }else{//min
            AscendC::SetAtomicMin<float>();
        }
        for (uint32_t c = 0; c < colNum; ++c) {
            uint32_t index = c * SLOT_STRIDE;
            AscendC::Duplicate(tmpBuf, 0.0f, SLOT_STRIDE);
            tmpBuf.SetValue(0, localSum.GetValue(c));

            AscendC::DataCopy(workGm[index], tmpBuf, 8);  // 写入到独立槽位
            
            AscendC::PRINTF("[DEBUG][Core %u] AtomicWrite row=%u value=%f\n",AscendC::GetBlockIdx(), c, localSum.GetValue(c));//
        }
        AscendC::SetAtomicNone();
        //.---------------- 4. 硬同步 ----------------
        AscendC::SyncAll();

        // ---------------- 5. 归一化输出（流水线搬入 / 搬出） ----------------
        for (uint32_t t = 0; t < tileNum; ++t) {
            uint32_t curTileLen = (t == tileNum - 1) ? lastTileLen : tileLen;

            // 5.1 搬入数据
            CopyIn(t);
            AscendC::LocalTensor<TYPE_X> tileLocal = inQueueInput.DeQue<TYPE_X>();
            AscendC::LocalTensor<TYPE_X> tileOut = outQueueOutput.AllocTensor<TYPE_X>();

            // 5.2 归一化
            for (uint32_t i = 0; i < curTileLen; ++i) {
                uint32_t globalIdx = globalOffset + t * tileLen + i;
                if (globalIdx >= totalElements) {
                    tileOut.SetValue(i, static_cast<TYPE_X>(0.0f));
                    continue;
                }
                uint32_t rowIdx = globalIdx / colNum;
                uint32_t colIdx = globalIdx - rowIdx * colNum; 

                float norm = workGm.GetValue(colIdx * SLOT_STRIDE);
                if(this->keyType == 1){
                    float val = static_cast<float>(tileLocal.GetValue(i));
                    tileOut.SetValue(i, static_cast<TYPE_X>(val / norm));
                }
                else {
                    float val = tileLocal.GetValue(i);
                    tileOut.SetValue(i, val / norm);
                }
                
            }

            // 5.3 搬出结果
            outQueueOutput.EnQue(tileOut);
            inQueueInput.FreeTensor(tileLocal);
            CopyOut(t);
        }
    }

// ------------------------------ M-行范数处理 ------------------------------
    __aicore__ inline void MRowProcess() {
        const uint32_t colNum = this->cols;
        const uint32_t rowNum = this->rows;
        const uint32_t totalElements = rowNum * colNum; 

        const uint32_t tileNum = this->tileNum;
        const uint32_t tileLen = this->tileDataNum;
        const uint32_t lastTileLen = this->tailDataNum;
        const uint32_t blockLen = this->coreDataNum;
        const uint32_t globalOffset = this->GlobalOffset;

        AscendC::PRINTF("[DEBUG][Core %u] Enter MRowProcess: rows=%u cols=%u tileNum=%u tileLen=%u tail=%u\n",AscendC::GetBlockIdx(), rowNum, colNum, tileNum, tileLen, lastTileLen);//


        const float POS_INF = 3.40282347e+38f;   // approx FLT_MAX
        const float NEG_INF = -3.40282347e+38f;

        // ---------------- 1. 局部累加 buffer ----------------
        AscendC::LocalTensor<float> localSum = row_base.Get<float>();
        for (uint32_t c = 0; c < rowNum; ++c) {
            if(this->pVal == -1)localSum.SetValue(c,NEG_INF);
            else localSum.SetValue(c, POS_INF);
        }

        // ---------------- 2. 流水线：批量搬入 / 本地计算 ----------------
        for (uint32_t t = 0; t < tileNum; ++t) {
            uint32_t curTileLen = (t == tileNum - 1) ? lastTileLen : tileLen;

            // 2.1 搬入数据
            CopyIn(t);
            AscendC::PRINTF("[DEBUG][Core %u] Tile %u copyIn OK, curTileLen=%u\n",AscendC::GetBlockIdx(), t, curTileLen);//
            AscendC::LocalTensor<TYPE_X> tileLocal = inQueueInput.DeQue<TYPE_X>();
            AscendC::LocalTensor<float> tileFloat = poweredBuf.Get<float>();
            if (keyType == 1) { // half
                AscendC::Cast(tileFloat, tileLocal, AscendC::RoundMode::CAST_NONE, curTileLen);
            } else { // float
                for (uint32_t i = 0; i < curTileLen; ++i) {
                    tileFloat.SetValue(i, tileLocal.GetValue(i));
                }
            }
            
            for (uint32_t i = 0; i < min(4u, curTileLen); ++i) {//
                AscendC::PRINTF("[DEBUG][Core %u] tileLocal[%u]=%f\n",
                    AscendC::GetBlockIdx(), i, (float)tileLocal.GetValue(i));
            }
            // 2.2.2 本地计算：abs(x)
            AscendC::LocalTensor<float> pTensor = tmp_p.Get<float>();
            AscendC::Duplicate(pTensor, this->pVal, curTileLen);
            AscendC::Abs(tileFloat, tileFloat, curTileLen);

            AscendC::PRINTF("[DEBUG][Core %u] After Abs: tileFloat[0]=%f\n",AscendC::GetBlockIdx(), tileFloat.GetValue(0));//

            // 2.3 累加至 localSum
            for (uint32_t i = 0; i < curTileLen; ++i) {
                uint32_t globalIdx = globalOffset + t * tileLen + i;
                uint32_t rowIdx = globalIdx / colNum;
                if (rowIdx < rowNum) {  // 再次确认边界
                    if(this->pVal == -1){//max
                        if(tileFloat.GetValue(i) > localSum.GetValue(rowIdx))localSum.SetValue(rowIdx, tileFloat.GetValue(i));
                    }else{//min
                        if(tileFloat.GetValue(i) < localSum.GetValue(rowIdx))localSum.SetValue(rowIdx, tileFloat.GetValue(i));
                    }
                }
                
            }

            if (t == tileNum - 1) {//
                AscendC::PRINTF("[DEBUG][Core %u] localSum first 4 rows: %f %f %f %f\n",
                    AscendC::GetBlockIdx(),
                    localSum.GetValue(0),
                    localSum.GetValue(1),
                    localSum.GetValue(2),
                    localSum.GetValue(3));
            }

            inQueueInput.FreeTensor(tileLocal);
        }

        // ---------------- 3. 原子加到全局 workGm ----------------
        AscendC::LocalTensor<float> tmpBuf = tmpBuffer.Get<float>();
        if(this->pVal == -1){//max
            AscendC::SetAtomicMax<float>();
        }else{//min
            AscendC::SetAtomicMin<float>();
        }
        for (uint32_t c = 0; c < rowNum; ++c) {
            uint32_t index = c * SLOT_STRIDE;
            AscendC::Duplicate(tmpBuf, 0.0f, SLOT_STRIDE);
            tmpBuf.SetValue(0, localSum.GetValue(c));

            AscendC::DataCopy(workGm[index], tmpBuf, 8);  // 写入到独立槽位

            AscendC::PRINTF("[DEBUG][Core %u] AtomicWrite row=%u value=%f\n",AscendC::GetBlockIdx(), c, localSum.GetValue(c));//
        }
        AscendC::SetAtomicNone();
        //.---------------- 4. 硬同步 ----------------
        AscendC::SyncAll();

        // ---------------- 5. 归一化输出（流水线搬入 / 搬出） ----------------
        for (uint32_t t = 0; t < tileNum; ++t) {
            uint32_t curTileLen = (t == tileNum - 1) ? lastTileLen : tileLen;

            // 5.1 搬入数据
            CopyIn(t);
            AscendC::LocalTensor<TYPE_X> tileLocal = inQueueInput.DeQue<TYPE_X>();
            AscendC::LocalTensor<TYPE_X> tileOut = outQueueOutput.AllocTensor<TYPE_X>();


            // 5.2 归一化
            for (uint32_t i = 0; i < curTileLen; ++i) {
                uint32_t globalIdx = globalOffset + t * tileLen + i;
                if (globalIdx >= totalElements) {
                    tileOut.SetValue(i, static_cast<TYPE_X>(0.0f));
                    continue;
                }
                uint32_t rowIdx = globalIdx / colNum;

                float norm = workGm.GetValue(rowIdx * SLOT_STRIDE);
                if(this->keyType == 1){
                    float val = static_cast<float>(tileLocal.GetValue(i));
                    tileOut.SetValue(i, static_cast<TYPE_X>(val / norm));
                }
                else {
                    float val = tileLocal.GetValue(i);
                    tileOut.SetValue(i, val / norm);
                }

                
                if (t == 0 && i < 4) {//
                     AscendC::PRINTF("[DEBUG][Core %u] Read norm row=%u value=%f\n",AscendC::GetBlockIdx(), rowIdx, norm);
                    float outVal;
                    if (keyType == 1) outVal = (float)tileOut.GetValue(i);
                    else outVal = tileOut.GetValue(i);

                    AscendC::PRINTF("[DEBUG][Core %u] Normalize out[%u] = %f\n",AscendC::GetBlockIdx(), i, outVal);
                }
            }

           
            // 5.3 搬出结果
            outQueueOutput.EnQue(tileOut);
            inQueueInput.FreeTensor(tileLocal);
            CopyOut(t);
        }
}

// ------------------------------ 全局范数处理 ------------------------------
    __aicore__ inline void AllProcess() {
    
    const uint32_t loopCount = this->tileNum;
    const uint32_t blockLen = this->coreDataNum;
    const uint32_t tileLen = this->tileDataNum;
    const uint32_t lastTileLen = this->tailDataNum;
    const uint32_t globalOffset = this->GlobalOffset;

    AscendC::PRINTF("[Core %d] GlobalOffset=%u coreDataNum=%u tileNum=%u tileLen=%u lastTileLen=%u\n",
    AscendC::GetBlockIdx(), globalOffset, blockLen, loopCount, tileLen, lastTileLen);


    // ---------------- 1. 局部累加 ----------------
    AscendC::LocalTensor<float> localSum = tmp_base.Get<float>();
     float initVal = 0.0f;
    localSum.SetValue(0, initVal);  
    // ---------------- 2. 流水线搬入 + 本地计算 ----------------
    for (uint32_t t = 0; t < loopCount; ++t) {
        uint32_t curTileLen = (t == loopCount - 1) ? lastTileLen : tileLen;

        // 2.1 搬入
        CopyIn(t);
        AscendC::LocalTensor<TYPE_X> tileLocal = inQueueInput.DeQue<TYPE_X>();

        // 2.2 half -> float 转换
        AscendC::LocalTensor<float> tileFloat = poweredBuf.Get<float>();
        if (keyType == 1) { // half
            AscendC::Cast(tileFloat, tileLocal, AscendC::RoundMode::CAST_NONE, curTileLen);
        } else { // float
           // float -> float 直接复制
            for (uint32_t i = 0; i < curTileLen; ++i) {
                tileFloat.SetValue(i, tileLocal.GetValue(i));
            }

            //考虑使用auto viewCopyResult = l0op::ViewCopy(absResult, out, executor);
        }

        // 2.3 abs^p
        AscendC::LocalTensor<float> pTensor = tmp_p.Get<float>();
        AscendC::Duplicate(pTensor, pVal, curTileLen);
        AscendC::Abs(tileFloat, tileFloat, curTileLen);
        AscendC::Power(tileFloat, tileFloat, pTensor, curTileLen);

        // 2.4 累加到局部和
        float tileSum = 0.0f;
        for (uint32_t i = 0; i < curTileLen; ++i) {
            tileSum += tileFloat.GetValue(i);
        }
        localSum.SetValue(0, localSum.GetValue(0) + tileSum);
        
        AscendC::PRINTF("[Core %d] localSum : %f\n",  AscendC::GetBlockIdx(), localSum.GetValue(0) );
        inQueueInput.FreeTensor(tileLocal);
    }
     // ---------------- 3. 原子加到全局缓冲 ----------------
    AscendC::LocalTensor<float> tmpBuf = tmpBuffer.Get<float>();
    AscendC::Duplicate(tmpBuf, 0.0f, 8);           // 清零
    tmpBuf.SetValue(0, localSum.GetValue(0));      // 只放第一个值

    AscendC::SetAtomicAdd<float>();
    AscendC::DataCopy(workGm[0], tmpBuf, 8);       // 搬 8 个 float (32B 对齐)
    AscendC::SetAtomicNone();
    
    //---------------- 调试 ----------------

    AscendC::PRINTF("[Core %d] blockLength : %d\n", AscendC::GetBlockIdx(),  blockLen );
    AscendC::PRINTF("[Core %d] workGm[0] after atomic add: : %f\n",  AscendC::GetBlockIdx(),workGm.GetValue(0));

    // ---------------- 4. 硬同步 ----------------
    AscendC::DataCacheCleanAndInvalid<float, AscendC::CacheLine::SINGLE_CACHE_LINE, AscendC::DcciDst::CACHELINE_OUT>(workGm[0]);
    AscendC::SyncAll();

    // ---------------- 5. 核0计算全局范数 ----------------
    if (AscendC::GetBlockIdx() == 0) {
        float globalSum = workGm.GetValue(0);
        AscendC::LocalTensor<float> base = tmp_base.Get<float>();
        base.SetValue(0, globalSum + epsilon);

        AscendC::Ln(base, base, 1);
        AscendC::Muls(base, base, 1.0f / pVal, 1);
        AscendC::Exp(base, base, 1);

        workGm.SetValue(0, base.GetValue(0));
        AscendC::DataCacheCleanAndInvalid<float, AscendC::CacheLine::SINGLE_CACHE_LINE, AscendC::DcciDst::CACHELINE_OUT>(workGm[0]);
        
    }

    AscendC::SyncAll();
    float pNorm = workGm.GetValue(0);
    AscendC::PRINTF("[Core %d] Final pNorm = %f\n", AscendC::GetBlockIdx(), pNorm);

    // ---------------- 6. 流水线归一化输出 ----------------
    for (uint32_t t = 0; t < loopCount; ++t) {
        uint32_t curTileLen = (t == loopCount - 1) ? lastTileLen : tileLen;

        CopyIn(t);
        AscendC::LocalTensor<TYPE_X> tileLocal = inQueueInput.DeQue<TYPE_X>();
        AscendC::LocalTensor<TYPE_X> tileOut = outQueueOutput.AllocTensor<TYPE_X>();
        AscendC::LocalTensor<float> normTensor = tmp_norm.Get<float>();

        // 复制全局范数到局部张量
        AscendC::Duplicate(normTensor, pNorm, curTileLen);

        // 元素级归一化
         float val = 0.0f;
        for (uint32_t i = 0; i < curTileLen; ++i) {
            if(keyType == 1){
                val = static_cast<float>(tileLocal.GetValue(i));
            }
            else {
                val = tileLocal.GetValue(i);
            }
            tileOut.SetValue(i, static_cast<TYPE_X>(val / normTensor.GetValue(i)));
        }

        outQueueOutput.EnQue(tileOut);
        inQueueInput.FreeTensor(tileLocal);
        CopyOut(t);
    }

  }

  // ------------------------------ 列范数处理 ------------------------------
    __aicore__ inline void ColProcess() {
    const uint32_t colNum = this->cols;
    const uint32_t rowNum = this->rows;
    const uint32_t totalElements = rowNum * colNum;  // 1353

    const uint32_t tileNum = this->tileNum;
    const uint32_t tileLen = this->tileDataNum;
    const uint32_t lastTileLen = this->tailDataNum;
    const uint32_t blockLen = this->coreDataNum;
    const uint32_t globalOffset = this->GlobalOffset;

    // ---------------- 1. 局部累加 buffer ----------------
    AscendC::LocalTensor<float> localSum = col_base.Get<float>();
    for (uint32_t c = 0; c < colNum; ++c) {
        localSum.SetValue(c, 0.0f);
    }

    // ---------------- 2. 流水线：批量搬入 / 本地计算 ----------------
    for (uint32_t t = 0; t < tileNum; ++t) {
        uint32_t curTileLen = (t == tileNum - 1) ? lastTileLen : tileLen;

        // 2.1 搬入数据
        CopyIn(t);
        AscendC::LocalTensor<TYPE_X> tileLocal = inQueueInput.DeQue<TYPE_X>();
        AscendC::LocalTensor<float> tileFloat = poweredBuf.Get<float>();

        if (keyType == 1) { // half
            AscendC::Cast(tileFloat, tileLocal, AscendC::RoundMode::CAST_NONE, curTileLen);
        } else { // float
            for (uint32_t i = 0; i < curTileLen; ++i) {
                tileFloat.SetValue(i, tileLocal.GetValue(i));
            }
        }

        // 2.2.2 本地计算：abs(x)^p 并累加到 localSum
        AscendC::LocalTensor<float> pTensor = tmp_p.Get<float>();
        AscendC::Duplicate(pTensor, this->pVal, curTileLen);
        
        AscendC::Abs(tileFloat, tileFloat, curTileLen);
        AscendC::Power(tileFloat, tileFloat, pTensor, curTileLen);

        // 2.3 累加至 localSum
        for (uint32_t i = 0; i < curTileLen; ++i) {
                uint32_t globalIdx = globalOffset + t * tileLen + i;
                if (globalIdx >= totalElements) {  // 验证有效性
                    AscendC::PRINTF("[Core %d] Skip invalid globalIdx %d\n", blockIdx, globalIdx);
                    continue;
                }
                uint32_t colIdx = globalIdx % colNum;
              
                if (colIdx < colNum) {  // 再次确认边界
                    float prev = localSum.GetValue(colIdx);
                    localSum.SetValue(colIdx, prev + tileFloat.GetValue(i));
                }
            }
       
        inQueueInput.FreeTensor(tileLocal);
    }

    // ---------------- 3. 原子加到全局 workGm ----------------
    AscendC::LocalTensor<float> tmpBuf = tmpBuffer.Get<float>();
    AscendC::SetAtomicAdd<float>();
    for (uint32_t c = 0; c < colNum; ++c) {
        uint32_t index = c * SLOT_STRIDE;
        AscendC::Duplicate(tmpBuf, 0.0f, SLOT_STRIDE);
        tmpBuf.SetValue(0, localSum.GetValue(c));

        AscendC::DataCopy(workGm[index], tmpBuf, 8);  // 写入到独立槽位
        AscendC::PRINTF("[Core %d] workGm[%d] after atomic add: : %f\n",  AscendC::GetBlockIdx(),index, workGm.GetValue(index));
    }
    AscendC::SetAtomicNone();
    //.---------------- 4. 硬同步 ----------------
    AscendC::SyncAll();

    // ---------------- 5. 核0负责开 p 次方根 ----------------
    if (AscendC::GetBlockIdx() == 0) {
        AscendC::LocalTensor<float> base = tmp_base.Get<float>();
        AscendC::PRINTF("actual pnorm \n ");
        for (uint32_t c = 0; c < colNum; ++c) {
            uint32_t index = c * SLOT_STRIDE;

            base.SetValue(0, workGm.GetValue(index) + this->epsilon);

            AscendC::PRINTF("base set %f  ",base.GetValue(c * SLOT_STRIDE));
            AscendC::Ln(base, base, 1);
            AscendC::Muls(base, base, 1.0f / this->pVal, 1);
            AscendC::Exp(base, base, 1);
            workGm.SetValue(index, base.GetValue(0));

            AscendC::PRINTF("workGm[%d] set %f\n", c * SLOT_STRIDE, base.GetValue(0));

            AscendC::DataCacheCleanAndInvalid<float, AscendC::CacheLine::SINGLE_CACHE_LINE, AscendC::DcciDst::CACHELINE_OUT>(workGm[c * SLOT_STRIDE]);
        }
         AscendC::PRINTF(" \n ");
    }


    // ---------------- 6. 再次硬同步 ----------------
    AscendC::SyncAll();
    if(AscendC::GetBlockIdx() == 0){
            for (uint32_t c = 0; c < colNum; ++c) {
            if((c % 7 == 0) && (c != 0) ){
                AscendC::PRINTF(" %f \n ",  workGm.GetValue(c * SLOT_STRIDE));
            } else{
                AscendC::PRINTF(" %f ",  workGm.GetValue(c * SLOT_STRIDE));
            }
        }
    }

    // ---------------- 7. 归一化输出（流水线搬入 / 搬出） ----------------
    for (uint32_t t = 0; t < tileNum; ++t) {
        uint32_t curTileLen = (t == tileNum - 1) ? lastTileLen : tileLen;

        // 7.1 搬入数据
        CopyIn(t);
        AscendC::LocalTensor<TYPE_X> tileLocal = inQueueInput.DeQue<TYPE_X>();
        AscendC::LocalTensor<TYPE_X> tileOut = outQueueOutput.AllocTensor<TYPE_X>();

        // 7.2 归一化
        for (uint32_t i = 0; i < curTileLen; ++i) {
            uint32_t globalIdx = globalOffset + t * tileLen + i;
            if (globalIdx >= totalElements) {
                tileOut.SetValue(i, static_cast<TYPE_X>(0.0f));
                continue;
            }
            uint32_t rowIdx = globalIdx / colNum;
            uint32_t colIdx = globalIdx - rowIdx * colNum; 

            float norm = workGm.GetValue(colIdx * SLOT_STRIDE);
            if(this->keyType == 1){
              float val = static_cast<float>(tileLocal.GetValue(i));
              tileOut.SetValue(i, static_cast<TYPE_X>(val / norm));
            }
            else {
              float val = tileLocal.GetValue(i);
              tileOut.SetValue(i, val / norm);
            }
            
        }

        // 7.3 搬出结果
        outQueueOutput.EnQue(tileOut);
        inQueueInput.FreeTensor(tileLocal);
        CopyOut(t);
    }
}

  // ------------------------------ 行范数处理 ------------------------------
    __aicore__ inline void RowProcess() {
    const uint32_t colNum = this->cols;
    const uint32_t rowNum = this->rows;
    const uint32_t totalElements = rowNum * colNum;  // 1353

    const uint32_t tileNum = this->tileNum;
    const uint32_t tileLen = this->tileDataNum;
    const uint32_t lastTileLen = this->tailDataNum;
    const uint32_t blockLen = this->coreDataNum;
    const uint32_t globalOffset = this->GlobalOffset;

    // ---------------- 1. 局部累加 buffer ----------------
    AscendC::LocalTensor<float> localSum = row_base.Get<float>();
    for (uint32_t c = 0; c < rowNum; ++c) {
        localSum.SetValue(c, 0.0f);
    }

    // ---------------- 2. 流水线：批量搬入 / 本地计算 ----------------
    for (uint32_t t = 0; t < tileNum; ++t) {
        uint32_t curTileLen = (t == tileNum - 1) ? lastTileLen : tileLen;

        // 2.1 搬入数据
        CopyIn(t);
        if constexpr ( std::is_same_v<T, half> || std::is_same_v<T, bfloat16_t> ){

        }
        AscendC::LocalTensor<TYPE_X> tileLocal = inQueueInput.DeQue<TYPE_X>();
        AscendC::LocalTensor<float> tileFloat = poweredBuf.Get<float>();
        if (keyType == 1) { // half
            AscendC::Cast(tileFloat, tileLocal, AscendC::RoundMode::CAST_NONE, curTileLen);
        } else { // float
            for (uint32_t i = 0; i < curTileLen; ++i) {
                tileFloat.SetValue(i, tileLocal.GetValue(i));
            }
        }

        // 2.2.2 本地计算：abs(x)^p 并累加到 localSum
        AscendC::LocalTensor<float> pTensor = tmp_p.Get<float>();
        AscendC::Duplicate(pTensor, this->pVal, curTileLen);
        
        AscendC::Abs(tileFloat, tileFloat, curTileLen);
        AscendC::Power(tileFloat, tileFloat, pTensor, curTileLen);

        // 2.3 累加至 localSum
        for (uint32_t i = 0; i < curTileLen; ++i) {
                uint32_t globalIdx = globalOffset + t * tileLen + i;
                if (globalIdx >= totalElements) {  // 验证有效性
                    AscendC::PRINTF("[Core %d] Skip invalid globalIdx %d\n", blockIdx, globalIdx);
                    continue;
                }
                uint32_t rowIdx = globalIdx / colNum;
                if (rowIdx < rowNum) {  // 再次确认边界
                    localSum.SetValue(rowIdx, localSum.GetValue(rowIdx) + tileFloat.GetValue(i));
                }
            }
       
        inQueueInput.FreeTensor(tileLocal);
    }

    // ---------------- 3. 原子加到全局 workGm ----------------
    AscendC::LocalTensor<float> tmpBuf = tmpBuffer.Get<float>();
    AscendC::SetAtomicAdd<float>();
    for (uint32_t c = 0; c < rowNum; ++c) {
        uint32_t index = c * SLOT_STRIDE;
        AscendC::Duplicate(tmpBuf, 0.0f, 8);
        tmpBuf.SetValue(0, localSum.GetValue(c));

        AscendC::DataCopy(workGm[index], tmpBuf, 8);  // 写入到独立槽位

        AscendC::PRINTF("workGm[%d] set %f \n", index , tmpBuf.GetValue(0));
    }
    AscendC::SetAtomicNone();
    //.---------------- 4. 硬同步 ----------------
    AscendC::SyncAll();

    // ---------------- 5. 核0负责开 p 次方根 ----------------
    if (AscendC::GetBlockIdx() == 0) {
        AscendC::LocalTensor<float> base = tmp_base.Get<float>();
        AscendC::PRINTF("actual pnorm \n ");
        for (uint32_t c = 0; c < rowNum; ++c) {
            // 新增：检查索引是否超出workGm范围
            uint32_t index = c * SLOT_STRIDE;
            base.SetValue(0, workGm.GetValue(c * SLOT_STRIDE) + this->epsilon);
            AscendC::PRINTF("base set %f",base.GetValue(c * SLOT_STRIDE));
            AscendC::Ln(base, base, 1);
            AscendC::Muls(base, base, 1.0f / this->pVal, 1);
            AscendC::Exp(base, base, 1);
            workGm.SetValue(c * SLOT_STRIDE, base.GetValue(0));
            AscendC::PRINTF("workGm[%d] set %f \n",c * SLOT_STRIDE,base.GetValue(0));

            AscendC::DataCacheCleanAndInvalid<float,AscendC::CacheLine::SINGLE_CACHE_LINE,AscendC::DcciDst::CACHELINE_OUT>(workGm[c * SLOT_STRIDE]);
        }
         AscendC::PRINTF(" \n ");
    }


    // ---------------- 6. 再次硬同步 ----------------
    AscendC::SyncAll();
    if(AscendC::GetBlockIdx() == 0){
            for (uint32_t c = 0; c < rowNum; ++c) {
            if((c % 7 == 0) && (c != 0) ){
                AscendC::PRINTF(" %f \n ",  workGm.GetValue(c * SLOT_STRIDE));
            } else{
                AscendC::PRINTF(" %f ",  workGm.GetValue(c * SLOT_STRIDE));
            }
        }
    }

    // ---------------- 7. 归一化输出（流水线搬入 / 搬出） ----------------
    for (uint32_t t = 0; t < tileNum; ++t) {
        uint32_t curTileLen = (t == tileNum - 1) ? lastTileLen : tileLen;

        // 7.1 搬入数据
        CopyIn(t);
        AscendC::LocalTensor<TYPE_X> tileLocal = inQueueInput.DeQue<TYPE_X>();
        AscendC::LocalTensor<TYPE_X> tileOut = outQueueOutput.AllocTensor<TYPE_X>();

        // 7.2 归一化
        for (uint32_t i = 0; i < curTileLen; ++i) {
            uint32_t globalIdx = globalOffset + t * tileLen + i;
            if (globalIdx >= totalElements) {
                tileOut.SetValue(i, static_cast<TYPE_X>(0.0f));
                continue;
            }
            uint32_t rowIdx = globalIdx / colNum;

            float norm = workGm.GetValue(rowIdx * SLOT_STRIDE);
            if(this->keyType == 1){
              float val = static_cast<float>(tileLocal.GetValue(i));
              tileOut.SetValue(i, static_cast<TYPE_X>(val / norm));
            }
            else {
              float val = tileLocal.GetValue(i);
              tileOut.SetValue(i, val / norm);
            }
            
        }

        // 7.3 搬出结果
        outQueueOutput.EnQue(tileOut);
        inQueueInput.FreeTensor(tileLocal);
        CopyOut(t);
    }
}

    __aicore__ inline void CopyIn(int32_t progress) {
        uint32_t curTileLen = (progress == tileNum - 1) ? tailDataNum : tileDataNum;
        AscendC::LocalTensor<TYPE_X> xLocal = inQueueInput.AllocTensor<TYPE_X>();
        // 使用当前分块的实际长度curTileLen，而非未初始化的processDataNum
        AscendC::DataCopy(xLocal, xGm[progress * this->tileDataNum], curTileLen);
        inQueueInput.EnQue(xLocal);
    }

    __aicore__ inline void CopyOut(int32_t progress) {
        uint32_t curTileLen = (progress == tileNum - 1) ? tailDataNum : tileDataNum;
        AscendC::LocalTensor<TYPE_X> zLocal = outQueueOutput.DeQue<TYPE_X>();
        AscendC::DataCopy(zGm[progress *  this->tileDataNum], zLocal, curTileLen);
        outQueueOutput.FreeTensor(zLocal);
    }

private:
    AscendC::TPipe pipe;
    AscendC::TQue<AscendC::QuePosition::VECIN, BUFFER_NUM> inQueueInput;
    AscendC::TQue<AscendC::QuePosition::VECOUT, BUFFER_NUM> outQueueOutput;


    AscendC::GlobalTensor<TYPE_X> xGm;
    AscendC::GlobalTensor<TYPE_X> zGm;
    AscendC::GlobalTensor<float> workGm;      // 多核同步缓冲区（存储局部和/全局和）

    //  片上计算缓冲区（VECCALC）
    AscendC::TBuf<AscendC::TPosition::VECCALC> poweredBuf;  // 存储abs(x)^p结果
    AscendC::TBuf<AscendC::TPosition::VECCALC> tmp_p;       // 存储p值向量
    AscendC::TBuf<AscendC::TPosition::VECCALC> tmp_norm;    // 存储范数向量
    AscendC::TBuf<AscendC::TPosition::VECCALC> tmp_base;    // 存储sum+epsilon
    AscendC::TBuf<AscendC::TPosition::VECCALC> row_base; 
    AscendC::TBuf<AscendC::TPosition::VECCALC> col_base; 
    AscendC::TBuf<AscendC::TPosition::VECCALC> tmpBuffer;//临时搬运区

    uint32_t blockIdx;      // 当前核索引（0~blockNum-1）
    uint32_t blockNum;      // 总核数
    uint32_t GlobalOffset;

    uint32_t coreDataNum;
    uint32_t tileNum;
    uint32_t tileDataNum;
    uint32_t tailDataNum;

    uint32_t keyType;
    uint32_t axis;
    uint32_t rows;
    uint32_t cols;
    float epsilon;
    float pVal;
};

extern "C" __global__ __aicore__ void lp_norm_v2_custom(GM_ADDR x, GM_ADDR z, GM_ADDR workspace, GM_ADDR tiling) {
    GET_TILING_DATA(tiling_data, tiling);
    KernelLpNormV2<DTYPE_INPUT> op;
    op.Init(x, z, workspace,
        tiling_data.smallCoreDataNum,
        tiling_data.bigCoreDataNum,
        tiling_data.finalBigTileNum,
        tiling_data.finalSmallTileNum,
        tiling_data.tileDataNum,
        tiling_data.smallTailDataNum,
        tiling_data.bigTailDataNum,
        tiling_data.tailBlockNum,
        tiling_data.p,
        tiling_data.axis,
        tiling_data.rows,
        tiling_data.cols,
        tiling_data.epsilon,
        tiling_data.dataTypeId
        );

    op.Process();
}
