#define K_MAX_SHAPE_DIM 0
#include "kernel_operator.h"
#include <type_traits>
using namespace AscendC;
constexpr int32_t BUFFER_NUM = 2;

template<typename T> class KernelGlobalAvgPool {
public:
    __aicore__ inline KernelGlobalAvgPool() {}
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR y, 
                            int32_t size, int32_t N, int32_t C, int32_t HW, float firstDimValueBack, int32_t ub_size) {
        ASSERT(GetBlockNum() != 0 && "block dim can not be zero!");

        this->size = size;
        this->N = N;
        this->C = C;
        this->HW = HW;
        this->firstDimValueBack = firstDimValueBack;
        this->ub_size = 31232;

        xGm.SetGlobalBuffer(reinterpret_cast<__gm__ DTYPE_X *>(x), size);
        yGm.SetGlobalBuffer(reinterpret_cast<__gm__ DTYPE_Y *>(y), N*C);

        pipe.InitBuffer(inQueueX, BUFFER_NUM, this->ub_size * sizeof(DTYPE_X));
        pipe.InitBuffer(QueueBuff, 492 * sizeof(float));

        pipe.InitBuffer(QueueTemp1, 1 * sizeof(DTYPE_X));
        pipe.InitBuffer(QueueTemp2, 1 * sizeof(DTYPE_X));
    }

    __aicore__ inline void Process() {
        int32_t loopCount = this->N * this->C;

        this->Count = this->HW / this->ub_size;
        // int32_t processDataNum = this->HW;
        int32_t tailDataNum = this->HW % this->ub_size;
        if(tailDataNum != 0) Count += 1;
        // this->processDataNum = this->tileDataNum;
        if constexpr (std::is_same_v<T, float>)
        {
            for (int32_t i = 0; i < loopCount; i++) 
            {
                this->processDataNum = this->ub_size;
                for (int32_t j = 0; j < this->Count; j++) 
                {
                    if (j == this->Count - 1) 
                    {
                        this->processDataNum = tailDataNum;
                    }
                    CopyIn(i, j);
                    Compute(i, j);
                }
            }
        }
        else
        {
            for (int32_t i = 0; i < loopCount; i++) 
            {
                this->processDataNum = this->ub_size;
                for (int32_t j = 0; j < this->Count; j++) 
                {
                    if (j == this->Count - 1) 
                    {
                        this->processDataNum = tailDataNum;
                    }
                    CopyIn(i, j);
                    Compute16(i, j);
                }
            }
        }
    }
private:
     __aicore__ inline void CopyIn(int32_t i, int32_t j)
    {
        LocalTensor<DTYPE_X> xLocal = inQueueX.AllocTensor<DTYPE_X>();
        DataCopy(xLocal, xGm[i*this->HW + j*this->ub_size], (this->processDataNum+31)/32*32);
        inQueueX.EnQue(xLocal);

    }
    __aicore__ inline void Compute(int32_t i, int32_t j)
    {
        LocalTensor<DTYPE_X> xLocal = inQueueX.DeQue<DTYPE_X>();
        float var1;
        if(j == 0)
        {
            this->sum_temp = 0;
        }
        auto temp1 = QueueTemp1.Get<DTYPE_X>();
        auto buff = QueueBuff.Get<DTYPE_X>();
        ReduceSum(temp1, xLocal, xLocal, this->processDataNum);
        var1 = temp1.GetValue(0);
        
        this->sum_temp += var1;
        if(j == this->Count-1)
        {
            var1 = this->sum_temp/this->HW;
            yGm.SetValue(i, (DTYPE_Y)var1);
        }
        inQueueX.FreeTensor(xLocal);
    }
    __aicore__ inline void Compute16(int32_t i, int32_t j)
    {
        LocalTensor<DTYPE_X> xLocal = inQueueX.DeQue<DTYPE_X>();
        DTYPE_Y var1;
        auto temp1 = QueueTemp1.Get<DTYPE_X>();
        auto temp2 = QueueTemp2.Get<DTYPE_X>();
        half sum = 0.0;        // 总和
        half c = 0.0;          // 高精度修正项
        half c1 = 0.0;         // 更高精度的修正项

        for (int j = 0; j < this->HW; j++) {
            temp1.SetValue(0, xLocal.GetValue(j));
            temp2.SetValue(0, c);
            Sub(temp1, temp1, temp2, 1);
            half y = temp1.GetValue(0);
            // float y = xLocal.GetValue(j) - c; // 剩余值
            temp1.SetValue(0, sum);
            temp2.SetValue(0, y);
            Add(temp1, temp1, temp2, 1);
            half t = temp1.GetValue(0);
            // float t = sum + y;        // 临时和
            temp1.SetValue(0, t);
            temp2.SetValue(0, sum);
            Sub(temp1, temp1, temp2, 1);
            temp2.SetValue(0, y);
            Sub(temp1, temp1, temp2, 1);
            c1 = temp1.GetValue(0);
            // c1 = (t - sum) - y;        // 计算修正项c1
            temp1.SetValue(0, sum);
            temp2.SetValue(0, c1);
            Add(temp2, temp1, temp2, 1);
            temp1.SetValue(0, t);
            Sub(temp1, temp1, temp2, 1);
            temp2.SetValue(0, y);
            Sub(temp1, temp1, temp2, 1);
            c1 = temp1.GetValue(0);
            // c = (t - (sum + c1)) - y;  // 更新修正项c
            sum = t;                   // 更新总和
        }
        var1 = (float)sum * (float)1.0 / (float)this->HW;
        yGm.SetValue(i, (DTYPE_Y)var1);
        inQueueX.FreeTensor(xLocal);
    }

private:
    TPipe pipe;
    TQue<QuePosition::VECIN, BUFFER_NUM> inQueueX;
    TBuf<QuePosition::VECCALC> QueueBuff, QueueTemp1, QueueTemp2;

    GlobalTensor<DTYPE_X> xGm;
    GlobalTensor<DTYPE_Y> yGm;

    int32_t size;
    int32_t N;
    int32_t C;
    int32_t HW;
    float firstDimValueBack;
    int32_t ub_size;

    float sum_temp;

    int32_t processDataNum;
    int32_t Count;

};
extern "C" __global__ __aicore__ void global_avg_pool(GM_ADDR x, GM_ADDR y, GM_ADDR workspace, GM_ADDR tiling) {
    GET_TILING_DATA(tiling_data, tiling);
    // TODO: user kernel impl
    KernelGlobalAvgPool<DTYPE_X> op;
    op.Init(x, y, 
            tiling_data.size, tiling_data.N, tiling_data.C, tiling_data.HW, tiling_data.firstDimValueBack, tiling_data.ub_size);
    op.Process();
}