#include "kernel_operator.h"
#include "cmath"

constexpr int32_t BUFFER_NUM = 2; // tensor num for each queue

template<typename TYPE_X, typename TYPE_Z> 
class KernelSoftMin {
    using T = TYPE_X;
public:
    __aicore__ inline KernelSoftMin() {}
    
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR z, const SoftMinCustomTilingData &tiling_data) 
    {
        this->tiling = tiling_data;
        ASSERT(AscendC::GetBlockNum() != 0 && "block dim can not be zero!");
        this->core_id = AscendC::GetBlockIdx();
          
        // 如果当前核没有分配到向量，直接返回
        if (tiling.core_vector_count[core_id] <= 0) {
            return ;
        }
        
        // 计算数据偏移
        uint32_t total_elements = tiling.batchSize * tiling.rows * tiling.cols;
        uint32_t vector_offset = tiling_data.core_vector_start[core_id];
        uint32_t element_offset = vector_offset * tiling_data.computeSize;

        // 设置全局内存
        xGm.SetGlobalBuffer((__gm__ TYPE_X *)x + element_offset, tiling.core_vector_count[core_id] * tiling_data.computeSize);
        zGm.SetGlobalBuffer((__gm__ TYPE_Z *)z + element_offset, tiling.core_vector_count[core_id] * tiling_data.computeSize);

        // 初始化缓冲区
        pipe.InitBuffer(inQueueX, BUFFER_NUM, tiling.aligned_compute_size * sizeof(TYPE_X));
        pipe.InitBuffer(outQueueZ, BUFFER_NUM, tiling.aligned_compute_size * sizeof(TYPE_Z));

        // ---- 修复点：reduce 输出至少需要 computeSize 大小 ----
        pipe.InitBuffer(tmpBuffer0, sizeof(TYPE_X)); // 用来存 min 和 sum
        pipe.InitBuffer(tmpBuffer1, tiling.aligned_compute_size * sizeof(TYPE_X)); // min / sum broadcast
        pipe.InitBuffer(tmpBuffer2, tiling.aligned_compute_size * sizeof(TYPE_X)); // min - x
        pipe.InitBuffer(tmpBuffer3, tiling.aligned_compute_size * sizeof(TYPE_X)); // 用来存 exp(-x)
        pipe.InitBuffer(tmpBuffer4, 32 * sizeof(TYPE_X)); // reduce 工作区
    }

    __aicore__ inline void Process()
    {
        int32_t loopCount = tiling.core_loop_times[this->core_id];
        this->processDataNum = tiling.aligned_compute_size;

        for (int32_t i = 0; i < loopCount; i++) 
        {
            
            CopyIn(i);
            Compute(i);
            CopyOut(i);
            
        }
    }

private:
    __aicore__ inline void CopyIn(int32_t progress)
    {
        AscendC::LocalTensor<TYPE_X> xLocal = inQueueX.AllocTensor<TYPE_X>();

        AscendC::DataCopyExtParams copyParams{
            1,
            static_cast<uint32_t>(tiling.computeSize * sizeof(TYPE_X)),
            0, 0, 0
        };

        AscendC::DataCopyPadExtParams<TYPE_X> padParams{
            true, 
            0,
            static_cast<uint8_t>(tiling.aligned_compute_size - tiling.computeSize),
            0
        };

        // 使用相对偏移，因为xGm的base已经是当前core的起始位置
        AscendC::DataCopyPad(xLocal[0], xGm[progress * tiling.computeSize], copyParams, padParams);

        inQueueX.EnQue(xLocal);
    }

    __aicore__ inline void Compute(int32_t progress)
    {
        AscendC::LocalTensor<TYPE_X> xLocal = inQueueX.DeQue<TYPE_X>();
        AscendC::LocalTensor<TYPE_Z> zLocal = outQueueZ.AllocTensor<TYPE_Z>();


        // 方法一：直接计算，无数据稳定处理
        // AscendC::LocalTensor<TYPE_X> negX = tmpBuffer1.Get<TYPE_X>();        // 存储 -x
        // AscendC::LocalTensor<TYPE_X> expResult = tmpBuffer2.Get<TYPE_X>();   // 存储 exp(-x)
        // AscendC::LocalTensor<TYPE_X> tileSum = tmpBuffer0.Get<TYPE_X>();     // 存储 sum
        // AscendC::LocalTensor<TYPE_X> sum_broadcast = tmpBuffer3.Get<TYPE_X>(); // 广播的sum

        // // 正确的softmin公式: softmin(x_i) = exp(-x_i) / sum_j(exp(-x_j))

        // // 1. 计算 -x
        // AscendC::Muls(negX, xLocal, static_cast<TYPE_X>(-1.0), this->processDataNum);

        // // 2. 计算 exp(-x)
        // AscendC::Exp(expResult, negX, this->processDataNum);

        // // 3. 计算 sum(exp(-x))
        // AscendC::LocalTensor<TYPE_X> workLocal = tmpBuffer4.Get<TYPE_X>();
        // AscendC::ReduceSum<TYPE_X>(tileSum, expResult, workLocal, tiling.computeSize);

        // // 4. 广播 sum
        // auto sumValue = tileSum.GetValue(0);
        // AscendC::Duplicate(sum_broadcast, sumValue, this->processDataNum);

        // // 5. 计算 softmin = exp(-x) / sum(exp(-x))
        // AscendC::Div(zLocal, expResult, sum_broadcast, this->processDataNum);

        // 方法二：数据稳定的softmin计算
        AscendC::LocalTensor<TYPE_X> tileMin = tmpBuffer0.Get<TYPE_X>();
        AscendC::LocalTensor<TYPE_X> min_broadcast = tmpBuffer1.Get<TYPE_X>();
        AscendC::LocalTensor<TYPE_X> shifted = tmpBuffer2.Get<TYPE_X>();
        AscendC::LocalTensor<TYPE_X> expResult = tmpBuffer3.Get<TYPE_X>();

        // 公式: softmin(x) = exp(-x) / sum(exp(-x))

        // 1. 计算 min
        // uint32_t shape_min[] = { 1, (uint32_t)this->processDataNum };
        // constexpr bool isReuse = true;
        // AscendC::ReduceMin<TYPE_X, AscendC::Pattern::Reduce::AR, isReuse>(tileMin, xLocal, shape_min, true);
        AscendC::LocalTensor<TYPE_X> min_workLocal = tmpBuffer4.Get<TYPE_X>();
        AscendC::ReduceMin<TYPE_X>(tileMin, xLocal, min_workLocal, tiling.computeSize, true);

        // 2. 广播 min
        auto minValue = tileMin.GetValue(0);
        AscendC::Duplicate(min_broadcast, minValue, this->processDataNum);

        // 3. 计算 min - x
        AscendC::Sub(shifted, min_broadcast, xLocal, this->processDataNum);

        // 4. exp(min-x)
        AscendC::Exp(expResult, shifted, this->processDataNum);

        // 5. sum(exp(min-x))
        // 重用tmpBuffer0
        AscendC::LocalTensor<TYPE_X> tileSum = tmpBuffer0.Get<TYPE_X>();
        AscendC::LocalTensor<TYPE_X> workLocal = tmpBuffer4.Get<TYPE_X>();
        AscendC::ReduceSum<TYPE_X>(tileSum, expResult, workLocal, tiling.computeSize);

        // 6. 广播 sum
        // 重用tmpBuffer1
        AscendC::LocalTensor<TYPE_X> sum_broadcast = tmpBuffer1.Get<TYPE_X>(); 
        auto sumValue = tileSum.GetValue(0);
        AscendC::Duplicate(sum_broadcast, sumValue, this->processDataNum);

        // 7. 计算 softmin = exp(-x) / sum
        AscendC::Div(zLocal, expResult, sum_broadcast, this->processDataNum);

        outQueueZ.EnQue(zLocal);
        inQueueX.FreeTensor(xLocal);
    }

    
    __aicore__ inline void CopyOut(int32_t progress)
    {
        AscendC::LocalTensor<TYPE_Z> zLocal = outQueueZ.DeQue<TYPE_Z>();
        
        // 输出时使用相对偏移(row)而不是全局偏移
        AscendC::DataCopyExtParams copyParams{
            1,
            static_cast<uint32_t>(tiling.computeSize * sizeof(TYPE_Z)),
            0,
            0,
            0
        };
        AscendC::DataCopyPad(zGm[progress * tiling.computeSize], 
                            zLocal, 
                            copyParams);

        outQueueZ.FreeTensor(zLocal);
    }

private:
    AscendC::TPipe pipe;
    AscendC::TQue<AscendC::QuePosition::VECIN, BUFFER_NUM> inQueueX;
    AscendC::TQue<AscendC::QuePosition::VECOUT, BUFFER_NUM> outQueueZ;
    AscendC::TBuf<AscendC::QuePosition::VECCALC> tmpBuffer0; // sum
    AscendC::TBuf<AscendC::QuePosition::VECCALC> tmpBuffer1; // -x
    AscendC::TBuf<AscendC::QuePosition::VECCALC> tmpBuffer2; // exp(-x)
    AscendC::TBuf<AscendC::QuePosition::VECCALC> tmpBuffer3; // sum broadcast
    AscendC::TBuf<AscendC::QuePosition::VECCALC> tmpBuffer4;

    AscendC::GlobalTensor<TYPE_X> xGm;
    AscendC::GlobalTensor<TYPE_Z> zGm;
    
    // 分片信息
    int32_t core_id;
    int32_t tileNum;
    int32_t ubPartDataNum;
    int32_t processDataNum;

    SoftMinCustomTilingData tiling;
};

extern "C" __global__ __aicore__ void soft_min_custom(GM_ADDR x, GM_ADDR z, GM_ADDR workspace, GM_ADDR tiling)
{
    GET_TILING_DATA(tiling_data, tiling);              
    if(TILING_KEY_IS(1))
    {
      KernelSoftMin<DTYPE_X, DTYPE_Z> op;
      op.Init(x, z, tiling_data);
      op.Process();       
    }
    else if(TILING_KEY_IS(2))
    {
      KernelSoftMin<DTYPE_X, DTYPE_Z> op;
      op.Init(x, z, tiling_data);
      op.Process();   
    }
}
