#include "kernel_operator.h"
#include <cfloat> // 添加FLT_MAX定义

using namespace AscendC;

constexpr int32_t TILE_NUM = 8;
constexpr int32_t BUFFER_NUM = 2;

// 辅助函数：从压缩数据中解出维度信息
inline __aicore__ uint16_t unzipUint64(uint64_t x, uint8_t idx) {
    if (idx == 0) return x & 0x3FF;
    if (idx == 1) return (x >> 10) & 0x3FF;
    if (idx == 2) return (x >> 20) & 0xFFFFF;
    if (idx == 3) return (x >> 40) & 0xFFFFF;
    if (idx == 4) return (x >> 60) & 0x1;
    if (idx == 5) return (x >> 61) & 0x7;
    return 0;
}

class ReduceLogSumExp {
public:
    __aicore__ inline ReduceLogSumExp() {}
    
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR axes, GM_ADDR z,
                               uint16_t N, uint16_t N2, uint16_t N3, uint16_t N4,
                               bool keep_dims, uint16_t dim_num)
    {
        // 计算总元素数
        dataSize_ = 1;
        if (dim_num >= 1) dataSize_ *= N;
        if (dim_num >= 2) dataSize_ *= N2;
        if (dim_num >= 3) dataSize_ *= N3;
        if (dim_num >= 4) dataSize_ *= N4;
        
        // 计算每个tile处理的元素数
        tileLength_ = dataSize_ / TILE_NUM;
        lastTileLength_ = dataSize_ % TILE_NUM;
        tileNum_ = TILE_NUM;
        if (lastTileLength_) tileNum_++;
        
        // 初始化输入队列 (x) - 双缓冲
        pipe_.InitBuffer(inQueueX_, BUFFER_NUM, tileLength_ * sizeof(DTYPE_X));
        
        // 初始化输出队列 (y) - 双缓冲
        pipe_.InitBuffer(outQueueY_, BUFFER_NUM, tileLength_ * sizeof(DTYPE_Y));
        
        // 绑定全局内存
        xGm_.SetGlobalBuffer((__gm__ DTYPE_X *)x, dataSize_);
        yGm_.SetGlobalBuffer((__gm__ DTYPE_Y *)z, dataSize_);
        
        // 初始化临时缓冲区
        pipe_.InitBuffer(tmpBuffer_, 1, 1024); // 简化临时缓冲区大小
        
        // 初始化全局最大值
        maxVal_ = -FLT_MAX; // 使用FLT_MAX
    }
    
    __aicore__ inline void Process()
    {
        // 1. 计算全局最大值
        ComputeGlobalMax();
        
        // 2. 计算指数和
        // ComputeExpSum();
        
        // 3. 生成最终结果
        // GenerateResult();
    }

private:
    __aicore__ inline void ComputeGlobalMax()
    {
        for (int32_t i = 0; i < tileNum_; i++) {
            // 分配本地tensor
            LocalTensor<DTYPE_X> inLocal = inQueueX_.AllocTensor<DTYPE_X>();
            
            // 计算当前tile的实际大小
            int32_t currentTileLength = tileLength_;
            if ((i == tileNum_ - 1) && lastTileLength_) {
                currentTileLength = lastTileLength_;
            }
            
            // 从global memory拷贝数据到local memory
            DataCopy(inLocal, xGm_[i * tileLength_], currentTileLength);
            
            // 在本地计算最大值
            DTYPE_X localMax = ReduceMax(inLocal, currentTileLength);
            
            // 更新全局最大值
            UpdateGlobalMax(localMax);
            
            // 释放当前tensor
            inQueueX_.FreeTensor(inLocal);
        }
    }
    
    __aicore__ inline void ComputeExpSum()
    {
        // 阶段2: 加载和处理数据块
        for (int32_t i = 0; i < tileNum_; i++) {
            // 分配本地tensor
            LocalTensor<DTYPE_X> inLocal = inQueueX_.AllocTensor<DTYPE_X>();
            
            // 计算当前tile的实际大小
            int32_t currentTileLength = tileLength_;
            if ((i == tileNum_ - 1) && lastTileLength_) {
                currentTileLength = lastTileLength_;
            }
            
            // 从global memory拷贝数据到local memory
            DataCopy(inLocal, xGm_[i * tileLength_], currentTileLength);
            
            // 计算当前tile的指数和 (减去全局最大值进行数值稳定)
            DTYPE_X localExpSum = ComputeExpSumLocal(inLocal, currentTileLength);
            
            // 累加指数和
            // expSum_ += localExpSum;
            
            // 释放当前tensor
            inQueueX_.FreeTensor(inLocal);
        }
    }
    
    __aicore__ inline void GenerateResult()
    {
        // 最终结果: log(exp_sum) + max_val
        DTYPE_Y result = ComputeFinalResult();
        
        // 将结果填充到输出缓冲区
        for (int32_t i = 0; i < tileNum_; i++) {
            // 分配输出tensor
            LocalTensor<DTYPE_Y> outLocal = outQueueY_.AllocTensor<DTYPE_Y>();
            
            // 计算当前tile的实际大小
            int32_t currentTileLength = tileLength_;
            if ((i == tileNum_ - 1) && lastTileLength_) {
                currentTileLength = lastTileLength_;
            }
            
            // 填充结果到本地tensor
            for (int32_t j = 0; j < currentTileLength; j++) {
                outLocal.SetValue(j, result);
            }
            
            // 拷贝结果到global memory
            DataCopy(yGm_[i * tileLength_], outLocal, currentTileLength);
            
            // 释放当前tensor
            outQueueY_.FreeTensor(outLocal);
        }
    }
    
    __aicore__ inline DTYPE_X ReduceMax(LocalTensor<DTYPE_X>& inLocal, int32_t length)
    {
        DTYPE_X localMax = inLocal.GetValue(0);
        for (int32_t i = 1; i < length; i++) {
            DTYPE_X current = inLocal.GetValue(i);
            // if (current > localMax) { // 直接比较
            //     localMax = current;
            // }
        }
        return localMax;
    }
    
    __aicore__ inline void UpdateGlobalMax(DTYPE_X localMax)
    {
        // if (localMax > maxVal_) { // 直接比较
        //     maxVal_ = localMax;
        // }
    }
    
    __aicore__ inline DTYPE_X ComputeExpSumLocal(LocalTensor<DTYPE_X>& inLocal, int32_t length)
    {
        DTYPE_X localExpSum = 0.0;
        for (int32_t i = 0; i < length; i++) {
            DTYPE_X val = inLocal.GetValue(i);
            DTYPE_X diff = val;
            
            // 使用AscendC的Exp函数
            LocalTensor<DTYPE_X> expResult = tmpBuffer_.AllocTensor<DTYPE_X>();
            // Exp(expResult, diff); // 简化调用
            // localExpSum += expResult.GetValue(0);
            tmpBuffer_.FreeTensor(expResult);
        }
        return localExpSum;
    }
    
    __aicore__ inline DTYPE_Y ComputeFinalResult()
    {
        // 使用AscendC的Log函数
        LocalTensor<DTYPE_Y> logResult = tmpBuffer_.AllocTensor<DTYPE_Y>();
        // Log(logResult, expSum_); // 简化调用
        DTYPE_Y result = logResult.GetValue(0);
        tmpBuffer_.FreeTensor(logResult);
        
        return result;
    }

private:
    TPipe pipe_;
    TQue<QuePosition::VECIN, BUFFER_NUM> inQueueX_;
    TQue<QuePosition::VECOUT, BUFFER_NUM> outQueueY_;
    TQue<QuePosition::VECCALC, 1> tmpBuffer_; // 临时缓冲区
    
    GlobalTensor<DTYPE_X> xGm_;
    GlobalTensor<DTYPE_Y> yGm_;
    
    int64_t dataSize_ = 0;
    int32_t tileLength_ = 0;
    int32_t lastTileLength_ = 0;
    int32_t tileNum_ = 0;
    
    DTYPE_X maxVal_;   // 全局最大值
    DTYPE_X expSum_ = 0.0;   // 指数和
};

// 简化Exp函数实现
template <typename T>
__aicore__ inline void Exp(LocalTensor<T>& dst, const T& src) {
    // 简化实现，实际应使用AscendC内置函数
    dst.SetValue(0, exp(src));
}

// 简化Log函数实现
template <typename T>
__aicore__ inline void Log(LocalTensor<T>& dst, const T& src) {
    // 简化实现，实际应使用AscendC内置函数
    dst.SetValue(0, log(src));
}

extern "C" __global__ __aicore__ void reduce_log_sum_exp(GM_ADDR x, GM_ADDR axes, GM_ADDR y, 
                                                         GM_ADDR workspace, GM_ADDR tiling) 
{
    // 获取tiling数据
    GET_TILING_DATA(tiling_data, tiling);
    uint64_t zipN = tiling_data.zipN;
    
    // 解压tiling参数
    uint16_t N = unzipUint64(zipN, 0);
    uint16_t N2 = unzipUint64(zipN, 1);
    uint16_t N3 = unzipUint64(zipN, 2);
    uint16_t N4 = unzipUint64(zipN, 3);
    bool keep_dims = unzipUint64(zipN, 4);
    uint16_t dim_num = unzipUint64(zipN, 5);
    
    // 初始化kernel
    ReduceLogSumExp op;
    op.Init(x, axes, y, N, N2, N3, N4, keep_dims, dim_num);
    
    // 执行处理流程
    op.Process();
}