#include "kernel_operator.h"
#include "lp_norm_v2_custom_tiling.h"  
constexpr uint32_t LAST_TWO_TILE = 2;
constexpr uint32_t BUFFER_NUM = 2;
constexpr uint32_t SLOT_STRIDE = 64 / sizeof(float); // 全局缓冲区槽位步长（64B对齐）

template <typename dataType> 
class KernelLpNormV2 {
public:
  __aicore__ inline KernelLpNormV2() {}

  /**
   * @brief Kernel 初始化函数
   * @param x 输入张量 GM 地址
   * @param z 输出张量 GM 地址
   * @param work 临时张量 GM 地址
   * @param tiling tiling 参数结构体
   */
  __aicore__ inline void Init(GM_ADDR x, GM_ADDR work, GM_ADDR z, LpNormV2CustomTilingData tiling) {
// 获取当前块索引和总块数
    uint32_t blockIdx = AscendC::GetBlockIdx();
    uint32_t blockNum = AscendC::GetBlockNum();
    ASSERT(blockNum != 0 && "block dim can not be zero!");
    
    // 根据分块策略设置参数
    if (tiling.isEvenCore == 1U) {
        // 均匀分块
        this->blockLength = tiling.blockLength;
        this->tileNum = tiling.tileNum;
        this->tileLength = tiling.tileLength;
        this->lastTileLength = tiling.lastTileLength;
    } else {
        // 非均匀分块
         if (blockIdx < tiling.formerNum) {
            // 前部核
            this->blockLength = tiling.formerLength;
            this->tileNum = tiling.formerTileNum;
            this->tileLength = tiling.formerTileLength;
             this->lastTileLength = tiling.formerLastTileLength;
         } else {
             // 尾部核
             this->blockLength = tiling.tailLength;
             this->tileNum = tiling.tailTileNum;
             this->tileLength = tiling.tailTileLength;
            this->lastTileLength = tiling.tailLastTileLength;
        }
     }

    this->pVal = tiling.pValue;
    this->axis = tiling.axis;          // 范数计算维度（0:全局 1:列 2:行）
    this->epsilon = tiling.epsilon;          // 数值稳定性补偿（避免sum=0导致除零）
    this->cols = tiling.cols;          // 输入矩阵列数
    this->rows = tiling.rows;          // 输入矩阵行数
    this->keyType = tiling.dataType;

  // 计算全局内存偏移量
      uint32_t globalOffset = 0;
      if (tiling.isEvenCore == 0U) {
          if (blockIdx < tiling.formerNum) {
            // 前部核的偏移量
            globalOffset = blockIdx * tiling.formerLength;
         } else {
            // 尾部核的偏移量
             globalOffset = tiling.formerNum * tiling.formerLength + 
                          (blockIdx - tiling.formerNum) * tiling.tailLength;
        }
      } else {
            // 均匀分块的偏移量
            globalOffset = blockIdx * tiling.blockLength;
      }

    // 双缓冲使能（仅全局范数）
    // buffernum = (axis == 0) ? BUFFER_NUM : 1;
    buffernum = (axis == 0) ? 2 : 1;
    
    // 绑定全局内存缓冲区
    inputGm.SetGlobalBuffer((__gm__ dataType *)x + globalOffset, blockLength);
    outputGm.SetGlobalBuffer((__gm__ dataType *)z + globalOffset, blockLength);


    //工作区设置，一个全局范数要64B
    if (this->axis == 0) {
        workGm.SetGlobalBuffer((__gm__ float *)work , 1 * SLOT_STRIDE );                      // 全局规约
    } else if (this->axis == 1) {
        workGm.SetGlobalBuffer((__gm__ float *)work , this->cols * SLOT_STRIDE );       // 按列规约
    } else if (this->axis == 2) {
        workGm.SetGlobalBuffer((__gm__ float *)work , this->rows * SLOT_STRIDE );       // 按行规约
    } else {
        AscendC::PRINTF("Error: unsupported axis = %d\n", tiling.axis);
    }

    // 初始化片上队列缓冲区
    pipe.InitBuffer(inQueueInput, buffernum, tileLength * sizeof(dataType));
    pipe.InitBuffer(outQueueOutput, buffernum, tileLength * sizeof(dataType));

    // 初始化计算缓冲区
    pipe.InitBuffer(poweredBuf, tileLength * sizeof(float));  // 存储abs(x)^p结果
    pipe.InitBuffer(singalpoweredBuf, 1 * sizeof(float)); 
    pipe.InitBuffer(tmp_p, tileLength * sizeof(float));       // 存储p值（向量计算适配）
    pipe.InitBuffer(singaltmp_p, 1 * sizeof(float));       // 存储p值（向量计算适配）
    pipe.InitBuffer(tmp_norm, tileLength * sizeof(float));    // 存储范数结果（向量归一化适配）
    pipe.InitBuffer(singaltmp_norm, 1 * sizeof(float)); 
    pipe.InitBuffer(tmp_base, 1 * sizeof(float));             // 存储sum+epsilon（范数计算用）

    pipe.InitBuffer(tmpBuffer, 8 * sizeof(float));    //原子加操作临时缓冲
    

    pipe.InitBuffer(row_base, tiling.rows * sizeof(float)); 
    pipe.InitBuffer(col_base, tiling.cols * sizeof(float)); 
    pipe.InitBuffer(r2c_base, 1 * sizeof(float)); //临时存储计算行列范数归一化的临时范数

    this->GlobalOffset = globalOffset;
    this->blockIdx = AscendC::GetBlockIdx();
    this->blockNum = AscendC::GetBlockNum();


    uint32_t blockEnd = globalOffset + blockLength;
    if (blockEnd > rows * cols) {
        blockEnd = rows * cols;  // 修正边界
    }
    AscendC::PRINTF("[Core %d] 处理范围: [%d, %d) (长度: %d)\n", 
                blockIdx, globalOffset, blockEnd, blockEnd - globalOffset);
  }

  /**
   * @brief 核心计算流程
   */
  __aicore__ inline void Process() {
    if (axis == 0) {AllProcess(); } 
    else if (axis == 1) {ColProcess(); }
    else if (axis == 2) {RowProcess(); }
  }

private:
  // ------------------------------ 全局范数处理 ------------------------------
  __aicore__ inline void AllProcess() {
    
    const uint32_t loopCount = this->tileNum ;
    const uint32_t blockLen = this->blockLength;
    const uint32_t tileLen = this->tileLength;
    const uint32_t lastTileLen = this->lastTileLength;
    const uint32_t globalOffset = this->blockIdx * blockLen;

    // ---------------- 1. 局部累加 ----------------
    AscendC::LocalTensor<float> localSum = tmp_base.Get<float>();
    localSum.SetValue(0, 0.0f);

    // ---------------- 2. 流水线搬入 + 本地计算 ----------------
    for (uint32_t t = 0; t < loopCount; ++t) {
        uint32_t curTileLen = (t == loopCount - 1) ? lastTileLen : tileLen;

        // 2.1 搬入
        CopyIn(t);
        AscendC::LocalTensor<dataType> tileLocal = inQueueInput.DeQue<dataType>();

        // 2.2 half -> float 转换
        AscendC::LocalTensor<float> tileFloat = poweredBuf.Get<float>();
        if (keyType == 1) { // half
            AscendC::Cast(tileFloat, tileLocal, AscendC::RoundMode::CAST_NONE, curTileLen);
        } else { // float
           // float -> float 直接复制
            for (uint32_t i = 0; i < curTileLen; ++i) {
                tileFloat.SetValue(i, tileLocal.GetValue(i));
            }
            //考虑使用auto viewCopyResult = l0op::ViewCopy(absResult, out, executor);
        }

        // 2.3 abs^p
        AscendC::LocalTensor<float> pTensor = tmp_p.Get<float>();
        AscendC::Duplicate(pTensor, pVal, curTileLen);
        AscendC::Abs(tileFloat, tileFloat, curTileLen);
        AscendC::Power(tileFloat, tileFloat, pTensor, curTileLen);

        // 2.4 累加到局部和
        float tileSum = 0.0f;
        for (uint32_t i = 0; i < curTileLen; ++i) {
            tileSum += tileFloat.GetValue(i);
        }
        localSum.SetValue(0, localSum.GetValue(0) + tileSum);
        
        AscendC::PRINTF("[Core %d] localSum : %f\n",  AscendC::GetBlockIdx(), localSum.GetValue(0) );
        inQueueInput.FreeTensor(tileLocal);
    }
     // ---------------- 3. 原子加到全局缓冲 ----------------
    AscendC::LocalTensor<float> tmpBuf = tmpBuffer.Get<float>();
    AscendC::Duplicate(tmpBuf, 0.0f, 8);           // 清零
    tmpBuf.SetValue(0, localSum.GetValue(0));      // 只放第一个值

    AscendC::SetAtomicAdd<float>();
    AscendC::DataCopy(workGm[0], tmpBuf, 8);       // 搬 8 个 float (32B 对齐)
    // workGm.SetValue(0, localSum.GetValue(0)); 
    AscendC::SetAtomicNone();
    
    //---------------- 调试 ----------------

    AscendC::PRINTF("[Core %d] blockLength : %d\n", AscendC::GetBlockIdx(),  this->blockLength );
    AscendC::PRINTF("[Core %d] workGm[0] after atomic add: : %f\n",  AscendC::GetBlockIdx(),workGm.GetValue(0));

    // ---------------- 4. 硬同步 ----------------
    AscendC::DataCacheCleanAndInvalid<float, AscendC::CacheLine::SINGLE_CACHE_LINE, AscendC::DcciDst::CACHELINE_OUT>(workGm[0]);
    AscendC::SyncAll();

    // ---------------- 5. 核0计算全局范数 ----------------
    if (AscendC::GetBlockIdx() == 0) {
        float globalSum = workGm.GetValue(0);
        AscendC::LocalTensor<float> base = tmp_base.Get<float>();
        base.SetValue(0, globalSum + epsilon);

        AscendC::Ln(base, base, 1);
        AscendC::Muls(base, base, 1.0f / pVal, 1);
        AscendC::Exp(base, base, 1);

        workGm.SetValue(0, base.GetValue(0));
        
    }
    AscendC::SyncAll();
    AscendC::DataCacheCleanAndInvalid<float, AscendC::CacheLine::SINGLE_CACHE_LINE, AscendC::DcciDst::CACHELINE_OUT>(workGm[0]);
    float pNorm = workGm.GetValue(0);

    AscendC::PRINTF("[Core %d] Final pNorm = %f\n", AscendC::GetBlockIdx(), pNorm);

    // ---------------- 6. 流水线归一化输出 ----------------
    for (uint32_t t = 0; t < loopCount; ++t) {
        uint32_t curTileLen = (t == loopCount - 1) ? lastTileLen : tileLen;

        CopyIn(t);
        AscendC::LocalTensor<dataType> tileLocal = inQueueInput.DeQue<dataType>();
        AscendC::LocalTensor<dataType> tileOut = outQueueOutput.AllocTensor<dataType>();
        AscendC::LocalTensor<float> normTensor = tmp_norm.Get<float>();

        // 复制全局范数到局部张量
        AscendC::Duplicate(normTensor, pNorm, curTileLen);

        // 元素级归一化
         float val = 0.0f;
        for (uint32_t i = 0; i < curTileLen; ++i) {
            if(keyType == 1){
                val = static_cast<float>(tileLocal.GetValue(i));
            }
            else {
                val = tileLocal.GetValue(i);
            }
            tileOut.SetValue(i, static_cast<dataType>(val / normTensor.GetValue(i)));
        }

        outQueueOutput.EnQue(tileOut);
        inQueueInput.FreeTensor(tileLocal);
        CopyOut(t);
    }

  }

// ------------------------------ 列范数处理 ------------------------------核内 tile 对列索引计算可优化
  __aicore__ inline void ColProcess() {
    const uint32_t colNum = this->cols;
    const uint32_t rowNum = this->rows;
    const uint32_t totalElements = rowNum * colNum;  // 1353
    const uint32_t blockLen = this->blockLength;   // 本核负责的数据量
    const uint32_t tileNum = this->tileNum;
    const uint32_t tileLen = this->tileLength;
    const uint32_t lastTileLen = this->lastTileLength;
    const uint32_t globalOffset =this->GlobalOffset; // 每个核在 GM 的起始偏移

    // ---------------- 1. 局部累加 buffer ----------------
    AscendC::LocalTensor<float> localSum = col_base.Get<float>();
    for (uint32_t c = 0; c < colNum; ++c) {
        localSum.SetValue(c, 0.0f);
    }

    // ---------------- 2. 流水线：批量搬入 / 本地计算 ----------------
    for (uint32_t t = 0; t < tileNum; ++t) {
        uint32_t curTileLen = (t == tileNum - 1) ? lastTileLen : tileLen;

        // 2.1 搬入数据
        CopyIn(t);
        AscendC::LocalTensor<dataType> tileLocal = inQueueInput.DeQue<dataType>();
        AscendC::LocalTensor<float> tileFloat = poweredBuf.Get<float>();
        if (keyType == 1) { // half
            AscendC::Cast(tileFloat, tileLocal, AscendC::RoundMode::CAST_NONE, curTileLen);
        } else { // float
            for (uint32_t i = 0; i < curTileLen; ++i) {
                tileFloat.SetValue(i, tileLocal.GetValue(i));
            }
        }

        // 2.2.2 本地计算：abs(x)^p 并累加到 localSum
        AscendC::LocalTensor<float> pTensor = tmp_p.Get<float>();
        AscendC::Duplicate(pTensor, this->pVal, curTileLen);
        
        AscendC::Abs(tileFloat, tileFloat, curTileLen);
        AscendC::Power(tileFloat, tileFloat, pTensor, curTileLen);

        // 2.3 累加至 localSum
        for (uint32_t i = 0; i < curTileLen; ++i) {
                uint32_t globalIdx = globalOffset + t * tileLen + i;
                if (globalIdx >= totalElements) {  // 验证有效性
                    AscendC::PRINTF("[Core %d] Skip invalid globalIdx %d\n", blockIdx, globalIdx);
                    continue;
                }
                uint32_t rowIdx = globalIdx / colNum;
                uint32_t colIdx = globalIdx - rowIdx * colNum; 
                if (colIdx < colNum) {  // 再次确认边界
                    localSum.SetValue(colIdx, localSum.GetValue(colIdx) + tileFloat.GetValue(i));
                }
            }
       
        inQueueInput.FreeTensor(tileLocal);
    }

    // ---------------- 3. 原子加到全局 workGm ----------------
    AscendC::LocalTensor<float> tmpBuf = tmpBuffer.Get<float>();
    AscendC::SetAtomicAdd<float>();
    for (uint32_t c = 0; c < colNum; ++c) {
        uint32_t index = c * SLOT_STRIDE;
        AscendC::Duplicate(tmpBuf, 0.0f, 8);
        tmpBuf.SetValue(0, localSum.GetValue(c));
        AscendC::DataCopy(workGm[index], tmpBuf, 8);  // 写入到独立槽位
    }
    AscendC::SetAtomicNone();
    //.---------------- 4. 硬同步 ----------------
    AscendC::SyncAll();

    // ---------------- 5. 核0负责开 p 次方根 ----------------
    if (AscendC::GetBlockIdx() == 0) {
        AscendC::LocalTensor<float> base = tmp_base.Get<float>();
        AscendC::PRINTF("actual pnorm \n ");
        for (uint32_t c = 0; c < colNum; ++c) {
            // 新增：检查索引是否超出workGm范围
            uint32_t index = c * SLOT_STRIDE;
            base.SetValue(0, workGm.GetValue(c * SLOT_STRIDE) + this->epsilon);
            AscendC::Ln(base, base, 1);
            AscendC::Muls(base, base, 1.0f / this->pVal, 1);
            AscendC::Exp(base, base, 1);
            workGm.SetValue(c * SLOT_STRIDE, base.GetValue(0));
            if((c % 7 == 0) && (c != 0) ){
                AscendC::PRINTF(" %f \n ",  workGm.GetValue(c * SLOT_STRIDE));
            } else{
                AscendC::PRINTF(" %f ",  workGm.GetValue(c * SLOT_STRIDE));
            }
        }
         AscendC::PRINTF(" \n ");
    }


    // ---------------- 6. 再次硬同步 ----------------
    AscendC::SyncAll();
    // 新增：所有核读取workGm前先失效缓存
    for (uint32_t c = 0; c < colNum; ++c) {
        AscendC::DataCacheCleanAndInvalid<float,AscendC::CacheLine::SINGLE_CACHE_LINE,AscendC::DcciDst::CACHELINE_OUT>(workGm[c * SLOT_STRIDE]);
    }

    // ---------------- 7. 归一化输出（流水线搬入 / 搬出） ----------------
    for (uint32_t t = 0; t < tileNum; ++t) {
        uint32_t curTileLen = (t == tileNum - 1) ? lastTileLen : tileLen;

        // 7.1 搬入数据
        CopyIn(t);
        AscendC::LocalTensor<dataType> tileLocal = inQueueInput.DeQue<dataType>();
        AscendC::LocalTensor<dataType> tileOut = outQueueOutput.AllocTensor<dataType>();

        // 7.2 归一化
        for (uint32_t i = 0; i < curTileLen; ++i) {
            uint32_t globalIdx = globalOffset + t * tileLen + i;
            if (globalIdx >= totalElements) {
                tileOut.SetValue(i, static_cast<dataType>(0.0f));
                continue;
            }
            uint32_t rowIdx = globalIdx / colNum;
            uint32_t colIdx = globalIdx - rowIdx * colNum; 

            float norm = workGm.GetValue(colIdx * SLOT_STRIDE);
            if(this->keyType == 1){
              float val = static_cast<float>(tileLocal.GetValue(i));
              tileOut.SetValue(i, static_cast<dataType>(val / norm));
            }
            else {
              float val = tileLocal.GetValue(i);
              tileOut.SetValue(i, val / norm);
            }
            
        }

        // 7.3 搬出结果
        outQueueOutput.EnQue(tileOut);
        inQueueInput.FreeTensor(tileLocal);
        CopyOut(t);
    }
}

  // ------------------------------ 行范数处理 ------------------------------
__aicore__ inline void RowProcess() {
 const uint32_t colNum = this->cols;
    const uint32_t rowNum = this->rows;
    const uint32_t totalElements = rowNum * colNum;  // 1353
    const uint32_t blockLen = this->blockLength;   // 本核负责的数据量
    const uint32_t tileNum = this->tileNum;
    const uint32_t tileLen = this->tileLength;
    const uint32_t lastTileLen = this->lastTileLength;
    const uint32_t globalOffset =this->GlobalOffset; // 每个核在 GM 的起始偏移

    // ---------------- 1. 局部累加 buffer ----------------
    AscendC::LocalTensor<float> localSum = row_base.Get<float>();
    for (uint32_t c = 0; c < rowNum; ++c) {
        localSum.SetValue(c, 0.0f);
    }

    // ---------------- 2. 流水线：批量搬入 / 本地计算 ----------------
    for (uint32_t t = 0; t < tileNum; ++t) {
        uint32_t curTileLen = (t == tileNum - 1) ? lastTileLen : tileLen;

        // 2.1 搬入数据
        CopyIn(t);
        AscendC::LocalTensor<dataType> tileLocal = inQueueInput.DeQue<dataType>();
        AscendC::LocalTensor<float> tileFloat = poweredBuf.Get<float>();
        if (keyType == 1) { // half
            AscendC::Cast(tileFloat, tileLocal, AscendC::RoundMode::CAST_NONE, curTileLen);
        } else { // float
            for (uint32_t i = 0; i < curTileLen; ++i) {
                tileFloat.SetValue(i, tileLocal.GetValue(i));
            }
        }

        // 2.2.2 本地计算：abs(x)^p 并累加到 localSum
        AscendC::LocalTensor<float> pTensor = tmp_p.Get<float>();
        AscendC::Duplicate(pTensor, this->pVal, curTileLen);
        
        AscendC::Abs(tileFloat, tileFloat, curTileLen);
        AscendC::Power(tileFloat, tileFloat, pTensor, curTileLen);

        // 2.3 累加至 localSum
        for (uint32_t i = 0; i < curTileLen; ++i) {
                uint32_t globalIdx = globalOffset + t * tileLen + i;
                if (globalIdx >= totalElements) {  // 验证有效性
                    AscendC::PRINTF("[Core %d] Skip invalid globalIdx %d\n", blockIdx, globalIdx);
                    continue;
                }
                uint32_t rowIdx = globalIdx / colNum;
                if (rowIdx < rowNum) {  // 再次确认边界
                    localSum.SetValue(rowIdx, localSum.GetValue(rowIdx) + tileFloat.GetValue(i));
                }
            }
       
        inQueueInput.FreeTensor(tileLocal);
    }

    // ---------------- 3. 原子加到全局 workGm ----------------
    AscendC::LocalTensor<float> tmpBuf = tmpBuffer.Get<float>();
    AscendC::SetAtomicAdd<float>();
    for (uint32_t c = 0; c < rowNum; ++c) {
        uint32_t index = c * SLOT_STRIDE;
        AscendC::Duplicate(tmpBuf, 0.0f, 8);
        tmpBuf.SetValue(0, localSum.GetValue(c));
        AscendC::DataCopy(workGm[index], tmpBuf, 8);  // 写入到独立槽位
    }
    AscendC::SetAtomicNone();
    //.---------------- 4. 硬同步 ----------------
    AscendC::SyncAll();

    // ---------------- 5. 核0负责开 p 次方根 ----------------
    if (AscendC::GetBlockIdx() == 0) {
        AscendC::LocalTensor<float> base = tmp_base.Get<float>();
        AscendC::PRINTF("actual pnorm \n ");
        for (uint32_t c = 0; c < rowNum; ++c) {
            // 新增：检查索引是否超出workGm范围
            uint32_t index = c * SLOT_STRIDE;
            base.SetValue(0, workGm.GetValue(c * SLOT_STRIDE) + this->epsilon);
            AscendC::Ln(base, base, 1);
            AscendC::Muls(base, base, 1.0f / this->pVal, 1);
            AscendC::Exp(base, base, 1);
            workGm.SetValue(c * SLOT_STRIDE, base.GetValue(0));
            if((c % 7 == 0) && (c != 0) ){
                AscendC::PRINTF(" %f \n ",  workGm.GetValue(c * SLOT_STRIDE));
            } else{
                AscendC::PRINTF(" %f ",  workGm.GetValue(c * SLOT_STRIDE));
            }
        }
         AscendC::PRINTF(" \n ");
    }


    // ---------------- 6. 再次硬同步 ----------------
    AscendC::SyncAll();
    // 新增：所有核读取workGm前先失效缓存
    for (uint32_t c = 0; c < rowNum; ++c) {
        AscendC::DataCacheCleanAndInvalid<float,AscendC::CacheLine::SINGLE_CACHE_LINE,AscendC::DcciDst::CACHELINE_OUT>(workGm[c * SLOT_STRIDE]);
    }

    // ---------------- 7. 归一化输出（流水线搬入 / 搬出） ----------------
    for (uint32_t t = 0; t < tileNum; ++t) {
        uint32_t curTileLen = (t == tileNum - 1) ? lastTileLen : tileLen;

        // 7.1 搬入数据
        CopyIn(t);
        AscendC::LocalTensor<dataType> tileLocal = inQueueInput.DeQue<dataType>();
        AscendC::LocalTensor<dataType> tileOut = outQueueOutput.AllocTensor<dataType>();

        // 7.2 归一化
        for (uint32_t i = 0; i < curTileLen; ++i) {
            uint32_t globalIdx = globalOffset + t * tileLen + i;
            if (globalIdx >= totalElements) {
                tileOut.SetValue(i, static_cast<dataType>(0.0f));
                continue;
            }
            uint32_t rowIdx = globalIdx / colNum;

            float norm = workGm.GetValue(rowIdx * SLOT_STRIDE);
            if(this->keyType == 1){
              float val = static_cast<float>(tileLocal.GetValue(i));
              tileOut.SetValue(i, static_cast<dataType>(val / norm));
            }
            else {
              float val = tileLocal.GetValue(i);
              tileOut.SetValue(i, val / norm);
            }
            
        }

        // 7.3 搬出结果
        outQueueOutput.EnQue(tileOut);
        inQueueInput.FreeTensor(tileLocal);
        CopyOut(t);
    }
}


  // ------------------------------ 数据拷贝函数 ------------------------------
   /**
   * @brief 数据拷贝（连续地址）
   */
    __aicore__ inline void CopyIn(int32_t progress) {
        AscendC::LocalTensor<dataType> xLocal = inQueueInput.AllocTensor<dataType>();
        // 最后一个 tile 使用 lastTileLength，其余使用 tileLength
        uint32_t curTileLen = (progress == this->tileNum - 1) ? this->lastTileLength : this->tileLength;
        // 地址 = progress * tileLength（避免因 buffernum 导致的偏移错误）
        AscendC::DataCopy(xLocal, inputGm[progress * tileLength], curTileLen);
        inQueueInput.EnQue(xLocal);
    }   
 /**
   * @brief 结果写回
   */
  __aicore__ inline void CopyOut(int32_t progress) {
    AscendC::LocalTensor<dataType> zLocal = outQueueOutput.DeQue<dataType>();
    uint32_t curTileLen = (progress == this->tileNum - 1) ? this->lastTileLength : this->tileLength;

    AscendC::DataCopy(outputGm[progress * this->tileLength], zLocal, curTileLen);
    outQueueOutput.FreeTensor(zLocal);
  }

  
  // ------------------------------ 成员变量 ------------------------------
  // 1. 算法核心参数
  float pVal;
  uint32_t axis;          // 范数计算维度（0:全局 1:列 2:行）
  float epsilon;          // 数值稳定性补偿（避免sum=0导致除零）
  uint32_t cols;          // 输入矩阵列数
  uint32_t rows;          // 输入矩阵行数

  uint32_t keyType;       // 数据类型标识（0:float32 1:float16）
  float pNorm;            // 存储计算出的范数结果（全局/当前行/当前列）

  //tilng划分
  uint32_t blockLength;
  uint32_t tileNum;
  uint32_t tileLength;
  uint32_t lastTileLength;

  // 2. 多核调度参数
  uint32_t blockIdx;      // 当前核索引（0~blockNum-1）
  uint32_t blockNum;      // 总核数
  uint32_t buffernum;     // 缓冲区数量（1:单缓冲 2:双缓冲）
  uint32_t GlobalOffset;

  // 3. 管道与队列（AscendC硬件调度）
  AscendC::TPipe pipe;                                    // 计算管道
  AscendC::TQue<AscendC::TPosition::VECIN, BUFFER_NUM> inQueueInput;  // 输入数据队列
  AscendC::TQue<AscendC::TPosition::VECOUT, BUFFER_NUM> outQueueOutput;// 输出数据队列

  // 4. 全局内存缓冲区（GM）
  AscendC::GlobalTensor<dataType> inputGm;  // 输入张量GM地址
  AscendC::GlobalTensor<dataType> outputGm; // 输出张量GM地址
  AscendC::GlobalTensor<float> workGm;      // 多核同步缓冲区（存储局部和/全局和）


  // 5. 片上计算缓冲区（VECCALC）
  AscendC::TBuf<AscendC::TPosition::VECCALC> poweredBuf;  // 存储abs(x)^p结果
  AscendC::TBuf<AscendC::TPosition::VECCALC> singalpoweredBuf;  // 存储abs(x)^p结果
  AscendC::TBuf<AscendC::TPosition::VECCALC> tmp_p;       // 存储p值向量
  AscendC::TBuf<AscendC::TPosition::VECCALC> singaltmp_p; 
  AscendC::TBuf<AscendC::TPosition::VECCALC> tmp_norm;    // 存储范数向量
  AscendC::TBuf<AscendC::TPosition::VECCALC> singaltmp_norm;
  AscendC::TBuf<AscendC::TPosition::VECCALC> tmp_base;    // 存储sum+epsilon
  AscendC::TBuf<AscendC::TPosition::VECCALC> row_base; 
  AscendC::TBuf<AscendC::TPosition::VECCALC> col_base; 
  AscendC::TBuf<AscendC::TPosition::VECCALC> r2c_base; 
  AscendC::TBuf<AscendC::TPosition::VECCALC> tmpBuffer;//临时搬运区


};

// 实例化模板（支持float32和float16）
template class KernelLpNormV2<float>;
template class KernelLpNormV2<half>;

extern "C" __global__ __aicore__ void lp_norm_v2_custom(GM_ADDR x, GM_ADDR work, GM_ADDR z, LpNormV2CustomTilingData tiling) {

  // 根据数据类型选择实例化的Kernel
  if (tiling.dataType == 0) { // float32
    KernelLpNormV2<float> op;
    op.Init(x, work, z, tiling);
    op.Process();
  } else { // float16
    KernelLpNormV2<half> op;
    op.Init(x, work, z, tiling);
    op.Process();
  }
}

