/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2022-2023. All rights reserved.
 *
 * Function : z = x + y
 * This sample is a very basic sample that implements vector add on Ascend plaform.
 */
#include "kernel_operator.h"
#include "math.h"
#include <float.h>
#include<stdio.h>
// tensor num for each queue
using namespace AscendC;

template<typename TYPE_X, typename TYPE_Y> class KernelAdd {
    using T = TYPE_X;
public:
    __aicore__ inline KernelAdd() {}
    __aicore__ inline void Init(GM_ADDR x, GM_ADDR y, int32_t reduce_num, int32_t output_num, int32_t inputLength, int32_t outputLength,
                                int32_t* reduce_shapes, int32_t* output_shapes, 
                                int32_t* reduce_strides, int32_t* output_strides,
                                int32_t* reduce_strides2, int32_t* output_strides2)
    {
        this->reduce_num = reduce_num;
        this->output_num = output_num;
        this->inputLength = inputLength;
        this->outputLength = outputLength;
        this->reduce_shapes = reduce_shapes;
        this->output_shapes = output_shapes;
        this->reduce_strides = reduce_strides;
        this->output_strides = output_strides;
        this->reduce_strides2 = reduce_strides2;
        this->output_strides2 = output_strides2;

        
        xGm.SetGlobalBuffer((__gm__ DTYPE_X *)x, this->inputLength);
        yGm.SetGlobalBuffer((__gm__ DTYPE_X *)y, this->outputLength);
        pipe.InitBuffer(tmp1, sizeof(float));
        pipe.InitBuffer(tmp2, sizeof(float));
        pipe.InitBuffer(sum_buffer, 10240*sizeof(float));
        pipe.InitBuffer(work_buffer, 10240*sizeof(float));
    }

    // __aicore__  int32_t input_to_output(int32_t input_index)
    // {
    //     return 0;
    // }

    __aicore__ inline void Process()
    {
        auto s1 = sum_buffer.Get<float>();
        auto w1 = work_buffer.Get<float>();
        AscendC::Duplicate(s1, float(1), 10240);

        auto p1 = tmp1.Get<float>();
        AscendC::ReduceSum(p1, s1, w1, 555);
        auto value = p1.GetValue(0);


        for(int32_t output_index = 0; output_index<this->outputLength; ++output_index)
        {
            float max = -FLT_MAX;
            float sum = 0;
            int32_t offset1 = 0;
            int32_t res_index = output_index;
            for (int32_t output_axis = 0; output_axis<this->output_num; ++output_axis)
            {
                int32_t current_index = res_index/this->output_strides2[output_axis];
                res_index = res_index-current_index*this->output_strides2[output_axis];
                offset1+=current_index*this->output_strides[output_axis];
                
            }
            
            for(int32_t reduce_index = 0; reduce_index<(this->inputLength/this->outputLength); ++reduce_index)
            {
                int32_t input_index = offset1;
                int32_t res_index = reduce_index;
                for (int32_t reduce_axis = 0; reduce_axis<this->reduce_num; ++reduce_axis)
                {
                    int32_t current_index = res_index/this->reduce_strides2[reduce_axis];
                    res_index = res_index-current_index*this->reduce_strides2[reduce_axis];
                    input_index+=current_index*this->reduce_strides[reduce_axis];
                }

                auto input_value = static_cast<float>(xGm.GetValue(input_index));
                if(input_value>max) max = input_value;
            }


            for(int32_t reduce_index = 0; reduce_index<(this->inputLength/this->outputLength); ++reduce_index)
            {
                int32_t input_index = offset1;
                int32_t res_index = reduce_index;
                for (int32_t reduce_axis = 0; reduce_axis<this->reduce_num; ++reduce_axis)
                {
                    int32_t current_index = res_index/this->reduce_strides2[reduce_axis];
                    res_index = res_index-current_index*this->reduce_strides2[reduce_axis];
                    input_index+=current_index*this->reduce_strides[reduce_axis];
                }

                auto input_value = static_cast<float>(xGm.GetValue(input_index));
                p1.SetValue(0, input_value-max);
                AscendC::Exp(p1, p1, 1);
                sum+=p1.GetValue(0);
            }
            p1.SetValue(0, sum);
            AscendC::Ln(p1, p1, 1);
            yGm.SetValue(output_index, (DTYPE_X)(p1.GetValue(0)+max));
        }
    }
private:
    TPipe pipe;
    GlobalTensor<DTYPE_X> xGm;
    GlobalTensor<DTYPE_Y> yGm;


    AscendC::TBuf<AscendC::QuePosition::VECCALC> tmp1, tmp2;
    AscendC::TBuf<AscendC::QuePosition::VECCALC> sum_buffer, work_buffer;
    int32_t* reduce_shapes, *output_shapes, *reduce_strides, *output_strides, *reduce_strides2, *output_strides2;
    int32_t reduce_num, output_num, inputLength, outputLength;
};

extern "C" __global__ __aicore__ void log_sum_exp(GM_ADDR x, GM_ADDR y, GM_ADDR workspace, GM_ADDR tiling)
{
    GET_TILING_DATA(tiling_data, tiling);
    KernelAdd<DTYPE_X, DTYPE_Y> op;
    op.Init(x, y, tiling_data.reduce_num, tiling_data.output_num, tiling_data.inputLength, tiling_data.outputLength, tiling_data.reduce_shapes, tiling_data.output_shapes, tiling_data.reduce_strides, tiling_data.output_strides, tiling_data.reduce_strides2, tiling_data.output_strides2);  
    op.Process();
}
