/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2022-2023. All rights reserved.
 *
 * Function : z = x + y
 * This sample is a very basic sample that implements vector add on Ascend plaform.
 */
#include "kernel_operator.h"
#include "math.h"
#include <float.h>
#include<stdio.h>
// tensor num for each queue
using namespace AscendC;
const int32_t BUFFER_NUM=2;

template<typename TYPE_X, typename TYPE_Y> class KernelAdd {
    using T = TYPE_X;
public:
    __aicore__ inline KernelAdd() {}
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR y, int32_t total_size, 
                        int32_t batch_size, int32_t batch_number, int32_t stride_size, 
                        int32_t stride_number, int32_t reduce_size, int32_t reduce_number)
    {
        this->total_size = total_size;
        this->batch_size = batch_size;
        this->batch_number = batch_number;
        this->stride_size = stride_size;
        this->stride_number = stride_number;
        this->reduce_size = reduce_size;
        this->reduce_number = reduce_number;

        xGm.SetGlobalBuffer((__gm__ DTYPE_X *)x, this->total_size);
        yGm.SetGlobalBuffer((__gm__ DTYPE_X *)y, this->total_size);
        pipe.InitBuffer(tmp1, sizeof(float));
        pipe.InitBuffer(tmp2, sizeof(float));

        // For mode 1:
        this->processDataNum = this->reduce_number;
        this->tileDataNum = this->reduce_number;
        pipe.InitBuffer(inQueueX, BUFFER_NUM, this->tileDataNum * sizeof(TYPE_X));
        pipe.InitBuffer(outQueueY, BUFFER_NUM, this->tileDataNum * sizeof(TYPE_X));
        pipe.InitBuffer(worklocal, this->tileDataNum * sizeof(TYPE_X));

    }



    __aicore__ inline void Process()
    {
        auto t1 = tmp1.Get<float>();
        auto t2 = tmp1.Get<float>();

        for(int b = 0; b<batch_number;++b)
        {
            for (int s = 0;s<stride_number;++s)
            {
                float max = -FLT_MAX;
                float sum = 0;
                for (int r = 0; r<reduce_number; ++r)
                {
                    int32_t index = b*batch_size+s*stride_size+r*reduce_size;
                    auto value = float(xGm.GetValue(index));
                    if(value>max) max = value;
                }

                for (int r = 0; r<reduce_number; ++r)
                {
                    int32_t index = b*batch_size+s*stride_size+r*reduce_size;
                    auto value = float(xGm.GetValue(index))-max;
                    t1.SetValue(0, value);
                    AscendC::Exp(t1, t1, 1);
                    sum+=t1.GetValue(0);
                }

                for (int r = 0; r<reduce_number; ++r)
                {
                    int32_t index = b*batch_size+s*stride_size+r*reduce_size;
                    auto value = float(xGm.GetValue(index))-max;
                    t1.SetValue(0, value);
                    AscendC::Exp(t1, t1, 1);
                    yGm.SetValue(index, t1.GetValue(0)/sum);
                }


            }
        }
    }


    __aicore__ inline void Process_vector()
    {
        int32_t loopCount = this->batch_number;

        for (int32_t i = 0; i < loopCount; i++) {
            CopyIn(i);
            Compute(i);
            CopyOut(i);
        }
    }
private:

    __aicore__ inline void CopyIn(int32_t progress)
    {
      AscendC::LocalTensor<TYPE_X> xLocal = inQueueX.AllocTensor<TYPE_X>();
      AscendC::DataCopy(xLocal, xGm[progress * this->tileDataNum], this->processDataNum);
      inQueueX.EnQue(xLocal);
    }
    __aicore__ inline void Compute(int32_t progress)
    {
      AscendC::LocalTensor<TYPE_X> xLocal = inQueueX.DeQue<TYPE_X>();
      AscendC::LocalTensor<TYPE_X> yLocal = outQueueY.AllocTensor<TYPE_X>();
      auto w1 = worklocal.Get<TYPE_X>();

      AscendC::ReduceMax(yLocal, xLocal, w1, this->tileDataNum);
    //   AscendC::PipeBarrier<PIPE_V>();
      AscendC::Muls(yLocal, yLocal, TYPE_X(1), 1);
      float max_ = float(yLocal.GetValue(0));
      float max_reverse = -max_;
      AscendC::Adds(xLocal, xLocal, TYPE_X(max_reverse), this->tileDataNum);
      AscendC::Exp(xLocal, xLocal, this->tileDataNum);

      AscendC::ReduceSum(yLocal, xLocal, w1, this->tileDataNum);
      
      AscendC::Muls(yLocal, yLocal, TYPE_X(1), 1);
      float sum_ = float(yLocal.GetValue(0));
      float sum_reverse = 1/sum_;
       
      AscendC::Muls(yLocal, xLocal, TYPE_X(sum_reverse), this->tileDataNum);

      outQueueY.EnQue<TYPE_X>(yLocal);
      inQueueX.FreeTensor(xLocal);
    }
    __aicore__ inline void CopyOut(int32_t progress)
    {
      AscendC::LocalTensor<TYPE_X> yLocal = outQueueY.DeQue<TYPE_X>();  
      AscendC::DataCopy(yGm[progress * this->tileDataNum], yLocal, this->processDataNum);
      outQueueY.FreeTensor(yLocal);
    }


    TPipe pipe;
    GlobalTensor<DTYPE_X> xGm;
    GlobalTensor<DTYPE_Y> yGm;
    AscendC::TQue<AscendC::QuePosition::VECIN, BUFFER_NUM> inQueueX;
    AscendC::TQue<AscendC::QuePosition::VECOUT, BUFFER_NUM> outQueueY;
    AscendC::TBuf<AscendC::QuePosition::VECCALC> worklocal;
    AscendC::TBuf<AscendC::QuePosition::VECCALC> tmp1, tmp2;
    
    int32_t total_size, batch_size, batch_number, stride_size, stride_number, reduce_size, reduce_number;
    int32_t processDataNum, tileDataNum;
};

extern "C" __global__ __aicore__ void softmax(GM_ADDR x, GM_ADDR y, GM_ADDR workspace, GM_ADDR tiling)
{
    GET_TILING_DATA(tiling_data, tiling);
    KernelAdd<DTYPE_X, DTYPE_Y> op;
    op.Init(x, y, tiling_data.total_size, tiling_data.batch_size, tiling_data.batch_number, 
    tiling_data.stride_size, tiling_data.stride_number, tiling_data.reduce_size, tiling_data.reduce_number);  
    if (tiling_data.mode==0)
        op.Process();
    else
        op.Process_vector();
}
