
#include "log_sum_exp_tiling.h"
#include "register/op_def_registry.h"
#include "graph/utils/type_utils.h"
#include "tiling/platform/platform_ascendc.h"

namespace optiling {
const uint32_t BLOCK_SIZE = 32;
const uint32_t BUFFER_NUM = 2;
static ge::graphStatus TilingFunc(gert::TilingContext* context)
{
    LogSumExpTilingData tiling;
    uint64_t ubSize;
    auto ascendcPlatform = platform_ascendc::PlatformAscendC(context->GetPlatformInfo());
    ascendcPlatform.GetCoreMemSize(platform_ascendc::CoreMemType::UB, ubSize);
    auto coreNum = ascendcPlatform.GetCoreNum();
    const gert::TypedContinuousVector<int64_t> *axes = 
                        context->GetAttrs()->GetListInt(0);
    int64_t keepDim = *context->GetAttrs()->GetBool(1);
    tiling.set_keepDim(keepDim);
    
    uint32_t unitCount = 1;
    uint32_t stepSize = 1;
    uint32_t totalLength = 1;
    auto axesDimNum = axes->GetSize();
    if (axesDimNum != 0)
    {
        // 处理多轴情况，axes为空时，不需要计算
        // 仅处理了axes列表中为连续维度的情况，例如[0], [1, 2]之类
        // 因为是连续维度，所以定位通过axes列表的值，将shape中的维度分为三部分
        // 前部分（unitCount），中间部分(stepSize)，后部分(totalLength)
        auto shape = context->GetInputShape(0)->GetOriginShape();
        uint32_t dimNum = shape.GetDimNum();
        uint32_t maxdim = 0;
        uint32_t mindim = dimNum;
        for (int i = 0; i < axesDimNum; i++)
        {
        auto index = *(axes->GetData() + i);
        if (index < mindim)
            mindim = index;
        if (index > maxdim)
            maxdim = index;
        }
        if (maxdim > dimNum)
        {
        totalLength = context->GetInputShape(0)->GetStorageShape().GetShapeSize();
        stepSize = 1;
        unitCount = 1;
        }
        else
        {
        for (int i = 0; i < dimNum; i++)
        {
            if (i < mindim)
            {
            unitCount *= shape.GetDim(i);
            }
            else if (i > maxdim)
            {
            stepSize *= shape.GetDim(i);
            }
            else
            {
            totalLength *= shape.GetDim(i);
            }
        }
        }
    }
    else
    {
        stepSize = 1;
        unitCount = 1;
        totalLength = context->GetInputShape(0)->GetStorageShape().GetShapeSize();
    }

    
    
    tiling.set_totalLength(totalLength);
    tiling.set_stepSize(stepSize);
    tiling.set_unitCount(unitCount);
    

    // Based on the input length and the number of inputs, the number of bytes of the input data type is obtained
    uint32_t inputNum = totalLength;
    // uint32_t inputNum = context->GetInputShape(0)->GetStorageShape().GetShapeSize();
    uint32_t typeLength = 0;
    ge::TypeUtils::GetDataTypeLength(context->GetInputDesc(0)->GetDataType(), typeLength);
    uint32_t inputLength = inputNum * typeLength;
    uint32_t inputBytes = inputLength / inputNum;

    // There are a total of 3 shared UB spaces in the input and output. Float16 and float both need other 3 more Tbuf.

    uint32_t ubDataNumber = 6;
    // The number of 32B data blocks that can be used for each data. DOUBLE BUFFER is already counted here
    uint32_t tileBlockNum = (ubSize / BLOCK_SIZE / BUFFER_NUM) / ubDataNumber;
    uint32_t tileDataNum = (tileBlockNum * BLOCK_SIZE) / inputBytes;

    // Input data for 32B alignment
    uint32_t inputLengthAlgin32 = (((inputLength + BLOCK_SIZE - 1) / BLOCK_SIZE) * BLOCK_SIZE);
    // There is at least 32B of data on each core, satisfying several settings for several cores. The maximum number of audits is the actual number of audits
    // coreNum = (coreNum <  inputLengthAlgin32 / BLOCK_SIZE) ? coreNum : inputLengthAlgin32 / BLOCK_SIZE;
    // coreNum = (coreNum >= 1) ? coreNum : 1;
    coreNum = 1;
    uint32_t everyCoreInputBlockNum = inputLengthAlgin32 / BLOCK_SIZE / coreNum;
    uint32_t tailBlockNum = (inputLengthAlgin32 / BLOCK_SIZE) % coreNum;
    
    // Small chunks are calculated and sliced several times using the number of data on each core
    uint32_t smallCoreDataNum = everyCoreInputBlockNum * BLOCK_SIZE / inputBytes;
    uint32_t smallTileNum = everyCoreInputBlockNum / tileBlockNum;
    uint32_t finalSmallTileNum = (everyCoreInputBlockNum % tileBlockNum) == 0 ? smallTileNum : smallTileNum + 1;
    // Tail block calculation for small chunks of data
    // tailDataNum计算剩下来的内容
    uint32_t realInputLength = inputNum;
    uint32_t smallTailDataNum = realInputLength - (tileDataNum * smallTileNum);
    smallTailDataNum = smallTailDataNum == 0 ? tileDataNum : smallTailDataNum;
    // 因为smallCoreDataNum在传统的框架中还有决定全局内存分配的作用，因此此处引入了dim信息处理后需要构造一个新的变量负责管理所谓的smallCoreDataNum，否则会影响全局n内存的读取和分配
    // 在官方的LpNorm的例子中，是使用到了一个kernel侧的判断结构来得到数据大小的，此处我们仿照之。
    uint32_t realSmallCoreDataNum = stepSize * unitCount;
    
    // The total length of a large block of data is 32B larger than that of a small block of data
    everyCoreInputBlockNum += 1;
    uint32_t bigCoreDataNum = everyCoreInputBlockNum * BLOCK_SIZE / inputBytes;
    uint32_t bigTileNum = everyCoreInputBlockNum / tileBlockNum;
    uint32_t finalBigTileNum = (everyCoreInputBlockNum % tileBlockNum) == 0 ? bigTileNum : bigTileNum + 1;
    uint32_t bigTailDataNum = bigCoreDataNum - tileDataNum * bigTileNum;
    bigTailDataNum = bigTailDataNum == 0 ? tileDataNum : bigTailDataNum;
    
    printf("unitCount: %u\n", unitCount);
    printf("stepSize: %u\n", stepSize);
    printf("totalLength: %u\n", totalLength);
    printf("tileDataNum: %u\n", tileDataNum);
    printf("smallCoreDataNum: %u\n", realSmallCoreDataNum);
    printf("bigCoreDataNum: %u\n", bigCoreDataNum);
    printf("inputLengthAlgin32: %u\n", inputLengthAlgin32);
    printf("finalSmallTileNum: %u\n", finalSmallTileNum);
    printf("smallTailDataNum: %u\n", smallTailDataNum);
    printf("tailBlockNum: %u\n", tailBlockNum);

    // smallCoreDataNum在单core环境中代表了分配gm的大小
    tiling.set_smallCoreDataNum(realSmallCoreDataNum);
    tiling.set_bigCoreDataNum(bigCoreDataNum);
    tiling.set_tileDataNum(tileDataNum);
    tiling.set_smallTailDataNum(smallTailDataNum);
    tiling.set_bigTailDataNum(bigTailDataNum);
    tiling.set_finalSmallTileNum(finalSmallTileNum);
    tiling.set_finalBigTileNum(finalBigTileNum);
    tiling.set_tailBlockNum(tailBlockNum);
    
    context->SetBlockDim(coreNum);
    tiling.SaveToBuffer(context->GetRawTilingData()->GetData(), context->GetRawTilingData()->GetCapacity());
    context->GetRawTilingData()->SetDataSize(tiling.GetDataSize());
    size_t *currentWorkspace = context->GetWorkspaceSizes(1);
    currentWorkspace[0] = 0;
    return ge::GRAPH_SUCCESS;
}
}



namespace ge {
static ge::graphStatus InferShape(gert::InferShapeContext* context)
{
    const gert::Shape* x1_shape = context->GetInputShape(0);
    gert::Shape* y_shape = context->GetOutputShape(0);
    *y_shape = *x1_shape;
    return GRAPH_SUCCESS;
}
}


namespace ops {
class LogSumExp : public OpDef {
public:
    explicit LogSumExp(const char* name) : OpDef(name)
    {
        this->Input("x")
            .ParamType(REQUIRED)
            .DataType({ge::DT_FLOAT, ge::DT_FLOAT16})
            .Format({ge::FORMAT_ND, ge::FORMAT_ND})
            .UnknownShapeFormat({ge::FORMAT_ND, ge::FORMAT_ND});
        this->Output("y")
            .ParamType(REQUIRED)
            .DataType({ge::DT_FLOAT, ge::DT_FLOAT16})
            .Format({ge::FORMAT_ND, ge::FORMAT_ND})
            .UnknownShapeFormat({ge::FORMAT_ND, ge::FORMAT_ND});
        this->Attr("dim").ListInt();
        this->Attr("keep_dim").AttrType(OPTIONAL).Bool(false);

        this->SetInferShape(ge::InferShape);

        this->AICore()
            .SetTiling(optiling::TilingFunc);
        this->AICore().AddConfig("ascend310b");
        this->AICore().AddConfig("ascend910b");

    }
};

OP_ADD(LogSumExp);
}
