
#include "nll_loss_tiling.h"
#include "register/op_def_registry.h"
#include "tiling/platform/platform_ascendc.h"
#include <iostream>

namespace optiling {
static ge::graphStatus TilingFunc(gert::TilingContext* context)
{

  NLLLossTilingData tiling;
      auto ascendcPlatform = platform_ascendc::PlatformAscendC(context->GetPlatformInfo());
    //auto socVersion = ascendcPlatform.GetSocVersion(); //获取当前硬件平台版本型号

    //ub 196352 
    //Cube核 20 
    //Vector核数 40
    // uint64_t ub_size;
    // ascendcPlatform.GetCoreMemSize(platform_ascendc::CoreMemType::UB, ub_size); //获取ub大小
    // uint32_t aivNum = ascendcPlatform.GetCoreNum(); //获取Vector内核数
    // ascendcPlatform.GetCoreNumAic() //AI Core上的Cube核 
    // ascendcPlatform.GetCoreNumAiv() //AI Core中Vector核数
    //std::cout << aivNum << " " << ub_size << " " << ascendcPlatform.GetCoreNumAic() << " " << ascendcPlatform.GetCoreNumAiv() << std::endl;
    // auto ret = context->SetNeedAtomic(true);

    auto x_shape = context->GetInputShape(0)->GetStorageShape();//获取运行时Shape
    auto x_shape_DimNum = x_shape.GetDimNum();

    int16_t N, C;
    int8_t reduction;

    if(x_shape_DimNum == 2)
    {
        N = x_shape.GetDim(0);
        C = x_shape.GetDim(1);
    } 
    else
    {
        N = 1;
        C = x_shape.GetDim(0);
    } 

    const char *reduction_attr = context->GetAttrs()->GetAttrPointer<char>(0);
    // int ignore_index_attr = *context->GetAttrs()->GetInt(1);
    uint32_t num_cores = 1; //使用的内核数
    // auto uniqueExecutor = CREATE_EXECUTOR();

    
    if((strcmp(reduction_attr, "sum") == 0)&&(C%16 == 0) && C >= 1024)//  && C == 1024 && N == 1024)
    {
        // reduction = 1;
        // context->SetTilingKey(1);
        // context->GetInputTensor(0)->SetPlacement(TensorPlacement::kOnHost);
        // auto addr = context->GetInputTensor(0)->GetData<float>();//GetTensorData().GetAddr());
        // std::cerr << *addr << std::endl;
        // printf("%f\n", addr[0]);
        // std::cout << addr[0] << " " << addr[1] << " " << addr[2] << " " << addr[3] << std::endl;
        // int8_t formerNum;
        // int16_t tailLength;
        context->SetTilingKey(1);
        // formerNum = N % 2;
        // tailLength = N / 2;
        // num_cores = 2;
        //30 10
        //25 9.6
        //24 9.28
        //20 x
        //22 x
        //32 8.9
        ////最小16
        //20 4.44
        //24 4.96
        //32 5.32
        //N == 1024
        //C == 1024
        // if(N == 1025 && C == 1024)
        // {
            // if(C >= 2048)
            // {
            //     formerNum = N % 16;
            //     tailLength = N / 16;
            //     num_cores = 1;
            // }
            // else if(C == 1024)
            // {
                // formerNum = N % 16;
                // tailLength = N / 16;
                num_cores = 16;
            // }
            // else
            // {
            //     formerNum = N % 40;
            //     tailLength = N / 40;
            //     num_cores = 40;
            // }
        // }
        // else
        // {
        //     formerNum = 0;
        //     tailLength = 1;
        //     num_cores = N;
        // }
        // tiling.set_formerNum(formerNum);
        // tiling.set_tailLength(tailLength);
    }
    else if(strcmp(reduction_attr, "mean") == 0)
    {
        reduction = 0;
        num_cores = 1;
        context->SetTilingKey(2);
        tiling.set_reduction(reduction);
    }
    else
    {
        reduction = 1;
        num_cores = 1;
        context->SetTilingKey(2);
        tiling.set_reduction(reduction);
    }
    
    tiling.set_N(N);
    tiling.set_C(C);
    

  context->SetBlockDim(num_cores); //设置参与计算的VectorCore或者CubeCore核数
  tiling.SaveToBuffer(context->GetRawTilingData()->GetData(), context->GetRawTilingData()->GetCapacity());
  context->GetRawTilingData()->SetDataSize(tiling.GetDataSize());
  return ge::GRAPH_SUCCESS;
}
}


namespace ge {
static ge::graphStatus InferShape(gert::InferShapeContext* context)
{
    // const gert::Shape* x1_shape = context->GetInputShape(0);
    // gert::Shape* y_shape = context->GetOutputShape(0);
    // *y_shape = *x1_shape;
    return GRAPH_SUCCESS;
}
}


namespace ops {
class NLLLoss : public OpDef {
public:
    explicit NLLLoss(const char* name) : OpDef(name)
    {
        this->Input("x")
            .ParamType(REQUIRED)
            .DataType({ge::DT_FLOAT})
            .Format({ge::FORMAT_ND});
        this->Input("target")
            .ParamType(REQUIRED)
            .DataType({ge::DT_INT32})
            .Format({ge::FORMAT_ND});
        this->Input("weight")
            .ParamType(REQUIRED)
            .DataType({ge::DT_FLOAT})
            .Format({ge::FORMAT_ND});
        this->Output("y")
            .ParamType(REQUIRED)
            .DataType({ge::DT_FLOAT})
            .Format({ge::FORMAT_ND});
        this->Attr("reduction").AttrType(OPTIONAL).String("mean");
        this->Attr("ignore_index").AttrType(OPTIONAL).Int(-100);

        this->SetInferShape(ge::InferShape);

        this->AICore()
            .SetTiling(optiling::TilingFunc);
        this->AICore().AddConfig("ascend910b");

    }
};

OP_ADD(NLLLoss);
}
