/**
 * @file matmul_leakyrelu_custom.cpp
 *
 * Copyright (C) 2024. Huawei Technologies Co., Ltd. All rights reserved.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
 */
#include "quant_matmul_v2_tiling.h"

#include <cmath>
#include <string>
#include <sstream>
#include <tuple>

#include "assert.h"
#include "register/op_def_registry.h"
#include "tiling/matrix/matmul_tiling.h"
#include "tiling/platform/platform_ascendc.h"
#include "tiling/select/selectwithbytesmask_tiling.h"


namespace {
constexpr size_t WORK_SPACE_RESERVE_SIZE = 16 * 1024 * 1024;

static std::map<ge::DataType, matmul_tiling::DataType> GE_DTYPE_TO_MM_DTYPE = {
    {ge::DT_FLOAT16, matmul_tiling::DataType::DT_FLOAT16},
    {ge::DT_FLOAT, matmul_tiling::DataType::DT_FLOAT},
    {ge::DT_BF16, matmul_tiling::DataType::DT_BF16},
    {ge::DT_INT8, matmul_tiling::DataType::DT_INT8},
    {ge::DT_INT32, matmul_tiling::DataType::DT_INT32},
};

struct PlatformInfo{
    uint64_t ub_size;
    uint64_t l1_size;
    uint64_t l2_size;
    uint64_t l0c_size;
    uint32_t aivec_num;
    uint32_t aicube_num;
};

}

namespace utils{

namespace detail{

template <int N>
constexpr int get_base(){
    return get_base<N-1>() * 10;
}

template <>
constexpr int get_base<0>(){
    return 1;
}

} // namespace detail

constexpr uint64_t get_tiling_key() {return 0;}

template <class T, class... Args>
constexpr uint64_t get_tiling_key(T key, Args... keys) {
    static_assert(sizeof...(Args) < 64);
    static_assert(std::is_same<T,bool>::value);
    return static_cast<uint64_t>(key) * detail::get_base<sizeof...(Args)>() + get_tiling_key(keys...);
}


} //namespace utils

namespace optiling {

// static bool check_use_basic_tiling(QuantMatmulV2Info &input_params, const PlatformInfo &hw_info) 
constexpr uint64_t ONE_BLK_SIZE = 32;
constexpr uint64_t BASIC_BLOCK_SIZE_64 = 64;
constexpr uint64_t BASIC_BLOCK_SIZE_128 = 128;
constexpr uint64_t BASIC_BLOCK_SIZE_256 = 256;
constexpr uint64_t BASIC_BLOCK_SIZE_512 = 512;
constexpr uint64_t BASIC_BLOCK_SIZE = 256 * 128;
constexpr uint64_t BASIC_BLOCK_K_128_BYTE = 128;
constexpr uint64_t L0C_SIZE_256_KB = 262144;
constexpr uint64_t HALF_FACTOR = 2;
constexpr uint64_t BASIC_BLOCK_LIMIT_L2_SIZE = 128;
constexpr uint64_t BASIC_BLOCK_L2_TILE_MAX = 45;
constexpr uint64_t BASIC_BLOCK_L2_TILE_MIN = 15;
constexpr uint64_t INNER_LEN_L1_MAX = 1024;
constexpr uint64_t INNER_LEN_L1_MEDIUM = 512;
constexpr uint64_t INNER_LEN_L1_MIN = 256;
constexpr double LIMIT_RATIO = 0.9;
constexpr double MN_CLOSE_RATIO = 0.1;
constexpr uint64_t IDX_L2_LOAD = 2;
constexpr uint64_t INNER_MIN = 1024;
constexpr uint64_t ROUND_BIG_SHAPE = 5;
constexpr uint64_t SELECT_COL_PARAM = 5;

constexpr double L2_SPLIT_RATIO = 100.0 / 192;
constexpr double L2_SPLIT_RATIO_FOR_MIX = 110.0 / 192;
constexpr double L2_TILE_TAIL_RATIO = 0.8;
constexpr uint32_t L2_TILE_NUM = 4;
constexpr uint32_t L2_TILE_INDEX = 0;
constexpr uint32_t L2_TILE_TAIL_INDEX = 1;
constexpr uint32_t L2_TAIL_TILE_INDEX = 2;
constexpr uint32_t L2_TAIL_INDEX = 3;
constexpr uint32_t OUT_TAIL_INDEX = 0;
constexpr uint32_t INNER_TAIL_INDEX = 1;
constexpr uint32_t OUT_L2_SPLIT_INDEX = 2;
constexpr uint32_t INNER_L2_SPLIT_INDEX = 3;

constexpr uint64_t SELECT_COL_ROW_FIRST_MULTI = 5;
constexpr uint64_t MAX_CLASH_NUM = 9;

const std::vector<uint64_t> ALL_BASE = {64, 80, 96, 128, 160, 192, 224, 256, 320};
const std::vector<uint64_t> ND_BASE = {64, 80,96, 128, 160, 192, 256, 320};

static uint64_t ceil_div(uint64_t a, uint64_t b){
    if (b == 0){
        return a;
    }
    return (a + b - 1) / b;
}

static uint64_t ceil_align(uint64_t a, uint64_t b = 16){ return ceil_div(a, b) * b; }

uint64_t QuantMatmulV2Info::getMatmulApiMSize() { return m_size_per_npu > 0 ? m_size_per_npu : m_size; }

uint64_t QuantMatmulV2Info::getTotalMatmulApiMSize(uint64_t base_m){
    if (m_size_per_npu > 0){
        return ceil_align(m_size_per_npu, base_m) * ceil_div(m_size, m_size_per_npu);
    } else {
        return m_size;
    }
}

uint64_t QuantMatmulV2Info::getTotalBaseMCnt(uint64_t base_m){
    return ceil_div(getTotalMatmulApiMSize(base_m), base_m);
}

static void set_l0c_db(QuantMatmulV2Info &input_params, BasicTiling &basic_tiling){
    basic_tiling.db_l0c = basic_tiling.base_m * basic_tiling.base_n * sizeof(int32_t) <= 16384 ? 2 : 1;
    if (input_params.has_bias && input_params.bias_dtype ==  ge::DT_INT32 && basic_tiling.base_n >= 128){
        basic_tiling.db_l0c = 1;
    }
}


static uint64_t get_total_cnt(QuantMatmulV2Info &input_params, uint64_t base_m, uint64_t base_n){
    uint64_t total_cnt = 1;
    uint64_t m_cnt = input_params.getTotalBaseMCnt(base_m);
    uint64_t n_cnt = ceil_div(input_params.n_size, base_n);
    total_cnt = m_cnt * n_cnt;
    return total_cnt;
}

static void determine_calc_order(QuantMatmulV2Info &input_params, BasicTiling &basic_tiling){
    uint64_t m_total_cnt = input_params.getTotalBaseMCnt(basic_tiling.base_m);
    uint64_t n_total_cnt = ceil_div(input_params.n_size, basic_tiling.base_n);
    uint64_t round = ceil_div(m_total_cnt * n_total_cnt, basic_tiling.used_core_num);

    // auto core_dist_col_first = calc_core_distribution(m_total_cnt, n_total_cnt, COL_FIRST, round, basic_tiling.used_core_num);
    // auto core_dist_row_first = calc_core_distribution(m_total_cnt, n_total_cnt, ROW_FIRST, round, basic_tiling.used_core_num);
    // uint64_t m_clash_col_first_case = std::get<0>(core_dist_col_first);
    // uint64_t n_clash_col_first_case = std::get<1>(core_dist_col_first);
    // uint64_t core_clash_col_first_case = std::max(m_clash_col_first_case, n_clash_col_first_case);

    // uint64_t m_clash_row_first_case = std::get<0>(core_dist_row_first);
    // uint64_t n_clash_row_first_case = std::get<1>(core_dist_row_first);
    // uint64_t core_clash_row_first_cast = std::max(m_clash_row_first_case, n_clash_row_first_case);

    // if (core_clash_core_first_case >= MAX_CLASH_NUM && core_clash_core_first_case > core_clash_row_first_case){
    //     return;
    // }

    //divisible_core_layout(basic_tiling, m_total_cnt, n_total_cnt, basic_tiling.cal_order, round);
    basic_tiling.cal_order = ROW_FIRST;

}


static void calc_l0_tiling(QuantMatmulV2Info &input_params, BasicTiling &basic_tiling, const PlatformInfo &hw_info){
    basic_tiling.base_m = 128;
    basic_tiling.base_n = 256;
    // select a branch: input_params.b_format == ge::FORMAT_FRACTAL_NZ
    // modify_base_n_for_split_trans(input_params, basic_tiling, ALL_BASE); split_trans_num == 0
    // select a branch: input_params.split_trans_num == 0
    basic_tiling.base_k = 128;
    determine_calc_order(input_params, basic_tiling);
    uint64_t total_cnt = get_total_cnt(input_params, basic_tiling.base_m, basic_tiling.base_n);
    uint64_t core_num = hw_info.aicube_num;
    std::cout << "basic_tiling.used_core_num: " << total_cnt << " " << core_num << std::endl;
    basic_tiling.used_core_num = std::min(total_cnt, core_num);
    basic_tiling.used_core_num = 20;
    set_l0c_db(input_params, basic_tiling);


}
static void calc_l1_tiling(QuantMatmulV2Info &input_params, BasicTiling &basic_tiling, const PlatformInfo &hw_info){
    basic_tiling.step_m = 1;
    basic_tiling.step_n = 1;
    basic_tiling.depth_a1 = 8;
    basic_tiling.depth_b1 = 8;
    basic_tiling.step_ka = 4;
    basic_tiling.step_kb = 4;
    // select a branch: input_params.is_basic_tiling = True
    return;
}
static void do_l2_cache_tiling(QuantMatmulV2Info &input_params, BasicTiling &basic_tiling, const PlatformInfo &hw_info){
    basic_tiling.m_tile_cnt_l2 = 1;
    basic_tiling.n_tile_cnt_l2 = 1;
    basic_tiling.m_tile_block = input_params.getTotalBaseMCnt(basic_tiling.base_m);
    basic_tiling.n_tile_block = ceil_div(input_params.n_size, basic_tiling.base_n);

    uint64_t m_size = input_params.getTotalMatmulApiMSize(basic_tiling.base_m);
    uint64_t size_a = get_size_with_data_type(m_size * input_params.k_size, input_params.a_dtype);
    uint64_t size_b = get_size_with_data_type(input_params.k_size * input_params.n_size, input_params.b_dtype);
    uint64_t size_c = get_size_with_data_type(m_size * input_params.n_size, input_params.c_dtype);

    uint64_t total_size = size_a + size_b + size_c;
    uint64_t limit_size = BASIC_BLOCK_LIMIT_L2_SIZE * MB_SIZE;
    uint64_t tile_limit = BASIC_BLOCK_L2_TILE_MIN;
    double l2_thre_size = hw_info.l2_size * L2_SPLIT_RATIO;
    if (input_params.c_dtype == ge::DT_BF16){
        l2_thre_size = hw_info.l2_size *L2_SPLIT_RATIO_FOR_MIX;
    }

    if (total_size < l2_thre_size && size_a < limit_size && size_b < limit_size && size_c < limit_size){
        return;
    }

    uint64_t m_l2_split = get_shape_with_data_type(m_size, input_params.a_dtype);
    uint64_t n_l2_split = get_shape_with_data_type(input_params.n_size, input_params.b_dtype);
    uint64_t m_tile = 1;
    uint64_t n_tile = 1;
    basic_tiling.cal_order = 1; //calc_tile

    uint64_t m_tile_block = ceil_div(m_l2_split, basic_tiling.base_m);
    uint64_t n_tile_block = ceil_div(n_l2_split, basic_tiling.base_n);
    m_tile = ceil_div(m_size, (m_tile_block * basic_tiling.base_m));
    n_tile = ceil_div(input_params.n_size, (n_tile_block * basic_tiling.base_n));

    if (m_tile_block >= m_size / basic_tiling.base_m || size_a <= tile_limit * MB_SIZE){
        m_tile = 1;
        m_tile_block = ceil_div(m_size, basic_tiling.base_m);
    }
    if (n_tile_block >= input_params.n_size / basic_tiling.base_m || size_b <= tile_limit * MB_SIZE){
        n_tile = 1;
        n_tile_block = ceil_div(input_params.n_size, basic_tiling.base_n);
    }

    basic_tiling.m_tile_cnt_l2 = m_tile;
    basic_tiling.n_tile_cnt_l2 = n_tile;
    basic_tiling.m_tile_block = m_tile_block;
    basic_tiling.n_tile_block = n_tile_block;
    return;


}
static void calc_ub_tiling(QuantMatmulV2TilingData &tilingdata, QuantMatmulV2Info &input_params, const PlatformInfo &hw_info, uint64_t base_m, uint64_t base_n){
    uint64_t ub_size = hw_info.ub_size;
    std::cout << "ub_size:" << ub_size << std::endl;
    uint64_t need_ub_size = 0;
    uint32_t ub_calc_n = base_n;
    uint64_t UB_EXTRE_BYTE = 8;
    uint64_t ub_calc = (NUM_DB * (sizeof(int32_t) + sizeof(int16_t)) + UB_EXTRE_BYTE) * ub_calc_n;
    if (!input_params.is_per_tensor){
        ub_calc += NUM_DB * ge::GetSizeByDataType(input_params.scale_dtype) * ub_calc_n;
    }
    if (input_params.is_pertoken || (input_params.bias_dtype != ge::DT_INT32)){
        ub_calc += sizeof(float) * ub_calc_n;
    }
    if (input_params.is_pertoken){
        need_ub_size += base_m * ONE_BLK_SIZE;
        ub_calc += NUM_DB * sizeof(float);
        need_ub_size += NUM_DB * sizeof(float) * 7;
        ub_calc += sizeof(float) * ub_calc_n;
    }
    if (input_params.bias_dtype != ge::DT_INT32){
        ub_calc += sizeof(float) * ub_calc_n;
        need_ub_size += NUM_DB * ge::GetSizeByDataType(input_params.bias_dtype) * ub_calc_n + sizeof(float) * ub_calc_n;
    }
    ub_size -= need_ub_size;
    uint32_t ub_calc_m = std::min(std::min(ub_size / ub_calc, static_cast<uint64_t>(base_m)), input_params.m_size);

    tilingdata.params.set_ub_calc_m(ub_calc_m);
    tilingdata.params.set_ub_calc_n(ub_calc_n);
    tilingdata.params.set_need_ub_buffer(ub_calc_m * ub_calc_n * UB_EXTRE_BYTE);
}

static void set_matmul_tiling_from_basic_tiling(QuantMatmulV2TilingData &tiling_data, QuantMatmulV2Info &input_params, BasicTiling &basic_tiling){
    tiling_data.matmul_tiling.set_M(input_params.m_size);
    tiling_data.matmul_tiling.set_N(input_params.n_size);
    tiling_data.matmul_tiling.set_Ka(input_params.k_size);
    tiling_data.matmul_tiling.set_Kb(input_params.k_size);
    tiling_data.matmul_tiling.set_usedCoreNum(basic_tiling.used_core_num);
    tiling_data.matmul_tiling.set_singleCoreM(basic_tiling.base_m);
    tiling_data.matmul_tiling.set_singleCoreN(basic_tiling.base_n);
    tiling_data.matmul_tiling.set_singleCoreK(input_params.k_size);


    tiling_data.matmul_tiling.set_baseM(basic_tiling.base_m);
    tiling_data.matmul_tiling.set_baseN(basic_tiling.base_n);
    tiling_data.matmul_tiling.set_baseK(basic_tiling.base_k);

    tiling_data.matmul_tiling.set_depthA1(basic_tiling.depth_a1);
    tiling_data.matmul_tiling.set_depthB1(basic_tiling.depth_b1);
    tiling_data.matmul_tiling.set_stepM(basic_tiling.step_m);
    tiling_data.matmul_tiling.set_stepN(basic_tiling.step_n);
    tiling_data.matmul_tiling.set_stepKa(basic_tiling.step_ka);
    tiling_data.matmul_tiling.set_stepKb(basic_tiling.step_kb);
    //tiling_data.matmul_tiling.set_isBias(input_params.has_bias ? 1 : 0);
    tiling_data.matmul_tiling.set_isBias(0);
    tiling_data.matmul_tiling.set_iterateOrder(basic_tiling.iterate_order);
    tiling_data.matmul_tiling.set_dbL0A(2);
    tiling_data.matmul_tiling.set_dbL0B(2);
    tiling_data.matmul_tiling.set_dbL0C(basic_tiling.db_l0c);
    tiling_data.matmul_tiling.set_shareL0CSize(131072);
    tiling_data.matmul_tiling.set_batchM(1);
    tiling_data.matmul_tiling.set_batchN(1);
    tiling_data.matmul_tiling.set_singleBatchM(1);
    tiling_data.matmul_tiling.set_singleBatchN(1);
    
    tiling_data.tile_l2cache_tiling.set_n_tile_cnt_l2(basic_tiling.n_tile_cnt_l2);
    tiling_data.tile_l2cache_tiling.set_m_tile_cnt_l2(basic_tiling.m_tile_cnt_l2);
    tiling_data.tile_l2cache_tiling.set_m_tile_block(basic_tiling.m_tile_block);
    // tiling_data.tile_l2cache_tiling.set_m_tile_block(26);
    // tiling_data.tile_l2cache_tiling.set_m_tile_cnt_l2(10);
    tiling_data.tile_l2cache_tiling.set_n_tile_block(basic_tiling.n_tile_block);
    tiling_data.tile_l2cache_tiling.set_cal_order(basic_tiling.cal_order);
    tiling_data.tile_l2cache_tiling.set_is_basic_tiling(1U);
    
}

static ge::graphStatus quant_matmul_v2_tiling_func(gert::TilingContext* context) {
    //1.get hardware info
    uint64_t ub_size = 0;
    uint64_t l1_size = 0;
    uint64_t l2_size = 192 * MB_SIZE;
    uint64_t l0c_size = 0;
    uint32_t aivec_num = 0;
    uint32_t aicube_num = 0;
    auto platform_info = platform_ascendc::PlatformAscendC(context->GetPlatformInfo());
    aivec_num = platform_info.GetCoreNumAiv();
    aicube_num = platform_info.GetCoreNumAic();
    platform_info.GetCoreMemSize(platform_ascendc::CoreMemType::UB, ub_size);
    platform_info.GetCoreMemSize(platform_ascendc::CoreMemType::L1, l1_size);
    platform_info.GetCoreMemSize(platform_ascendc::CoreMemType::L0_C, l0c_size);

    std::cout << "platform_info.GetCoreNum():" << platform_info.GetCoreNum() 
    << "platform_info.GetCoreNumAic():" << platform_info.GetCoreNumAic()
    << "platform_info.GetCoreNumAiv():" << platform_info.GetCoreNumAiv()
    << "platform_info.GetCoreNumVector():" << platform_info.GetCoreNumVector()
    << std::endl;

    uint32_t sliceNum = aivec_num;
    //context->SetBlockDim(platform_info.CalcTschBlockDim(sliceNum, aicube_num, aivec_num));
    PlatformInfo hw_info = {ub_size, l1_size, l2_size, l0c_size, aivec_num, aicube_num};

    //2. get op info
    QuantMatmulV2Info input_params;
    const auto &mat_a_shape = context->GetInputShape(0)->GetStorageShape();
    const auto &mat_b_shape = context->GetInputShape(1)->GetStorageShape();
    const auto &scale_shape = context->GetInputShape(2)->GetStorageShape();
    auto mat_b_desc = context->GetInputDesc(1);
    auto x2_format = static_cast<ge::Format>(ge::GetPrimaryFormat(mat_b_desc->GetStorageFormat()));
    if (x2_format == ge::Format::FORMAT_ND) {
        input_params.b_format = ge::FORMAT_ND;
    }
    input_params.is_per_tensor = scale_shape.GetDim(0) == 1;
    input_params.trans_a = *(context->GetAttrs()->GetAttrPointer<bool>(1));
    input_params.trans_b = *(context->GetAttrs()->GetAttrPointer<bool>(2));

    input_params.m_size = input_params.trans_a ? mat_a_shape.GetDim(1) : mat_a_shape.GetDim(0);
    input_params.k_size = input_params.trans_b ? mat_b_shape.GetDim(1) : mat_b_shape.GetDim(0);
    input_params.n_size = input_params.trans_b ? mat_b_shape.GetDim(0) : mat_b_shape.GetDim(1);

    input_params.c_dtype = context->GetOutputDesc(0)->GetDataType();
    input_params.out_dtype = *(context->GetAttrs()->GetAttrPointer<int32_t>(0)); //只支持int32或者bf16输出，前者纯C，后者mix
    
    if (context->GetOptionalInputShape(3) != nullptr){
        input_params.has_bias = true;
        input_params.bias_dtype = context->GetOptionalInputDesc(3)->GetDataType();
    }
    // force chage tiling data
    input_params.has_bias = false;

    if (context->GetOptionalInputShape(4) != nullptr && context->GetOptionalInputDesc(4)->GetDataType() == ge::DT_FLOAT){
        if (context->GetOptionalInputDesc(4)->GetDataType() != ge::DT_FLOAT){
            throw std::runtime_error("pretoken_scale should be float");
        }
        input_params.is_pertoken = true;
    }

    if (!input_params.is_pertoken && input_params.c_dtype != ge::DT_BF16){
        throw std::runtime_error("quant_matmul_v2 support out_dtype bf16 wiht pertoken");
    }

    // 3. do tiling
    QuantMatmulV2TilingData tiling_data;
    BasicTiling basic_tiling;
    uint64_t split_trans_num = *(context->GetAttrs()->GetAttrPointer<int32_t>(3));
    if (split_trans_num < 0){
        throw std::runtime_error("quant_matmul_v2 only support split_trans_num >= 0, 0 by default");
    }

    input_params.split_trans_num = static_cast<uint64_t>(split_trans_num);
    if (split_trans_num > 0 && input_params.c_dtype !=  ge::DT_BF16){
        throw std::runtime_error("quant_matmul_v2 only support split_trans_num with bf16 output");
    }
    tiling_data.params.set_split_trans_num(static_cast<uint32_t>(split_trans_num));

    // basic tiling/kernel
    // select a brach: check_use_basic_tiling(input_params, hw_info);->True

    input_params.is_basic_tiling = true;
    calc_l0_tiling(input_params, basic_tiling, hw_info);
    context->SetBlockDim(basic_tiling.used_core_num);
    calc_l1_tiling(input_params, basic_tiling, hw_info);
    do_l2_cache_tiling(input_params, basic_tiling, hw_info);
    set_matmul_tiling_from_basic_tiling(tiling_data, input_params, basic_tiling);

    tiling_data.params.set_is_pertoken(static_cast<uint32_t>(input_params.is_pertoken));
    tiling_data.params.set_is_per_tensor(static_cast<uint32_t>(input_params.is_per_tensor));
    tiling_data.params.set_bias_dtype(static_cast<uint32_t>(input_params.bias_dtype));
    tiling_data.params.set_rank_size(static_cast<uint32_t>(*(context->GetAttrs()->GetAttrPointer<int>(4))));
    tiling_data.params.set_rank(static_cast<uint32_t>(*(context->GetAttrs()->GetAttrPointer<int>(5))));
    tiling_data.params.set_p_value(static_cast<uint32_t>(*(context->GetAttrs()->GetAttrPointer<int>(6))));



    if(input_params.c_dtype == ge::DT_BF16){
        bool is_pertokenOpt = 
        input_params.m_size <= basic_tiling.base_m && input_params.is_pertoken && !input_params.is_basic_tiling;
        //select a branch: is_pertokenOpt->false
        calc_ub_tiling(tiling_data, input_params, hw_info, basic_tiling.base_m, basic_tiling.base_n);

        //select a brach: input_params.is_basic_tiling -> True
        uint64_t used_work_space_size = sizeof(int32_t) * static_cast<uint64_t>(tiling_data.matmul_tiling.get_baseM()) *
                                        tiling_data.matmul_tiling.get_baseN() *
                                        tiling_data.matmul_tiling.get_usedCoreNum() * tiling_data.params.get_p_value() * NUM_DB;
        input_params.bf16_extre_work_space_size = used_work_space_size;
        input_params.lib_api_work_space_size += input_params.bf16_extre_work_space_size;
    }


    if (tiling_data.GetDataSize() % sizeof(uint64_t) != 0){
        throw std::runtime_error("tiling_data.GetDataSize failed");
        return ge::GRAPH_FAILED;
    }
    auto block_dim = tiling_data.matmul_tiling.get_usedCoreNum();
    // context->SetBlockDim(block_dim);
    tiling_data.SaveToBuffer(context->GetRawTilingData()->GetData(), context->GetRawTilingData()->GetCapacity());
    context->GetRawTilingData()->SetDataSize(tiling_data.GetDataSize());
    size_t *workspaces = context->GetWorkspaceSizes(1);
    workspaces[0] = input_params.lib_api_work_space_size;
    auto tiling_key = utils::get_tiling_key(
        input_params.is_pertoken,
        input_params.m_size <= basic_tiling.base_m && input_params.is_pertoken && !input_params.is_basic_tiling,
        input_params.is_basic_tiling, input_params.trans_a, input_params.trans_b
    );
    context->SetTilingKey(tiling_key);


    
    std::cout << "tiling_data.params" << std::endl;
    std::cout << "params, is_per_tensor:" << tiling_data.params.get_is_per_tensor() << "; is_pertoken:" << tiling_data.params.get_is_pertoken() 
              << "; ub_calc_m:" << tiling_data.params.get_ub_calc_m() << "; ub_calc_n:" << tiling_data.params.get_ub_calc_n() << "; need_ub_buffer:" << tiling_data.params.get_need_ub_buffer()
              << "; real_single_core_m:" << tiling_data.params.get_real_single_core_m() << "; real_single_core_n:" << tiling_data.params.get_real_single_core_n()
              << "; bias_dtype:" << tiling_data.params.get_bias_dtype()
              << "; rank_size" << tiling_data.params.get_rank_size()
              << "; rank" << tiling_data.params.get_rank()
              << "; p_value" << tiling_data.params.get_p_value()
              << std::endl; //<< "; split_trans_num:" << tiling_data.params.get_split_trans_num() << std::endl;
    std::cout << "basic_tling, m_size_per_npu:" << input_params.m_size_per_npu << "; m_size:" << input_params.m_size << "; n_size:" << input_params.n_size
              << "; k_size:" << input_params.k_size << "; used_core_num:" << basic_tiling.used_core_num << "; base_m:" << basic_tiling.base_m
              << "; base_n:" << basic_tiling.base_n << "; base_k:" << basic_tiling.base_k << "; single_core_k:" << basic_tiling.single_core_k
              << "; depth_a1:" << basic_tiling.depth_a1 << "; depth_b1:" << basic_tiling.depth_b1 << "; step_m:" << basic_tiling.step_m
              << "; step_n:" << basic_tiling.step_n << "; step_ka:" << basic_tiling.step_ka << "; step_kb:" << basic_tiling.step_kb
              << "; iterator_order:" << basic_tiling.iterate_order << "; db_l0c:" << basic_tiling.db_l0c << "; m_tile_cnt_l2:" << basic_tiling.m_tile_cnt_l2
              << "; n_tile_cnt_l2:" << basic_tiling.n_tile_cnt_l2 << "; m_tile_block" << basic_tiling.m_tile_block << "; n_tile_block" << basic_tiling.n_tile_block << "; cal_order" << basic_tiling.cal_order
              << "; is_mclash:" << basic_tiling.is_mclash << "; is_nclash:" << basic_tiling.is_nclash << std::endl;
    std::cout << "api_tiling, used_core_num:"<< tiling_data.matmul_tiling.get_usedCoreNum() <<"; M:" << tiling_data.matmul_tiling.get_M() << "; N:" << tiling_data.matmul_tiling.get_N()
              << "; Ka:" << tiling_data.matmul_tiling.get_Ka() << "; Kb:" << tiling_data.matmul_tiling.get_Kb()
              << "; singleCoreM:" << tiling_data.matmul_tiling.get_singleCoreM() << "; singleCoreN:" << tiling_data.matmul_tiling.get_singleCoreN()
              << "; singleCoreK:" << tiling_data.matmul_tiling.get_singleCoreK() << "; baseM" << tiling_data.matmul_tiling.get_baseM()
              << "; baseN:" << tiling_data.matmul_tiling.get_baseN() << "; baseK:" << tiling_data.matmul_tiling.get_baseK()
              << "; depthA1:" << tiling_data.matmul_tiling.get_depthA1() << "; depthB1:" << tiling_data.matmul_tiling.get_depthB1()
              << "; stepM:" << tiling_data.matmul_tiling.get_stepM() << "; stepN:" << tiling_data.matmul_tiling.get_stepN()
              << "; stepKa:" << tiling_data.matmul_tiling.get_stepKa() << "; stepKb:" << tiling_data.matmul_tiling.get_stepKb()
              << "; isBias:" << tiling_data.matmul_tiling.get_isBias() << "; iterateOrder:" << tiling_data.matmul_tiling.get_iterateOrder()
              << "; trans_length: " << tiling_data.matmul_tiling.get_transLength()
              << "; iterate_order:" << ((tiling_data.matmul_tiling.get_iterateOrder() == 1) ? "order_m" : "order_n")
              << "; share_mode:" << tiling_data.matmul_tiling.get_shareMode() << "; dbL0A: " << tiling_data.matmul_tiling.get_dbL0A()
              << "; dbL0B: " << tiling_data.matmul_tiling.get_dbL0B() << "; dbL0C: " << tiling_data.matmul_tiling.get_dbL0C()
              << "; shareL1Size: " << tiling_data.matmul_tiling.get_shareL1Size() 
              << "; shareL0CSize: " << tiling_data.matmul_tiling.get_shareL0CSize()
              << "; shareUbSize: " << tiling_data.matmul_tiling.get_shareUbSize()
              << "; batchM: " << tiling_data.matmul_tiling.get_batchM()
              << "; batchN: " << tiling_data.matmul_tiling.get_batchN()
              << "; singleBatchM: " << tiling_data.matmul_tiling.get_singleBatchM()
              << "; singleBatchN: " << tiling_data.matmul_tiling.get_singleBatchN() << std::endl;
    
    

    return ge::GRAPH_SUCCESS;

}






} // namespace optiling


namespace ge {

static ge::graphStatus quant_matmul_v2_infer_shape(gert::InferShapeContext* context) {
    auto transpose_a = *(context->GetAttrs()->GetAttrPointer<bool>(1));
    auto transpose_b = *(context->GetAttrs()->GetAttrPointer<bool>(2));
    const auto* a_shape = context->GetInputShape(0);
    const auto* b_shape = context->GetInputShape(1);
    int32_t m = transpose_a ? a_shape->GetDim(1) : a_shape->GetDim(0);
    int32_t n = transpose_b ? b_shape->GetDim(0) : b_shape->GetDim(1);
    auto* res_shape = context->GetOutputShape(0);
    *res_shape = *a_shape;
    res_shape->SetDim(0, m);
    res_shape->SetDim(1, n);
    return ge::GRAPH_SUCCESS;

}

static ge::graphStatus quant_matmul_v2_infer_datatype(gert::InferDataTypeContext* context) {
    int32_t output_type_int = *(context->GetAttrs()->GetAttrPointer<int32_t>(0));
    if (output_type_int == 0){
        context->SetOutputDataType(0, ge::DT_INT8);        
    } else if (output_type_int == 1){
        context->SetOutputDataType(0, ge::DT_FLOAT16);
    } else if (output_type_int == 2){
        context->SetOutputDataType(0, ge::DT_BF16);
    } else if (output_type_int == 3){
        context->SetOutputDataType(0, ge::DT_INT32);
    }

    return GRAPH_SUCCESS;
}
}

namespace ops {
class QuantMatmulV2 : public OpDef {
public:
    explicit QuantMatmulV2(const char *name) : OpDef(name){
        this->Input("x1")
            .ParamType(REQUIRED)
            .DataType({ge::DT_INT8, ge::DT_INT8, ge::DT_INT8, ge::DT_INT8})
            .Format({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND})
            .UnknownShapeFormat({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND})
            .IgnoreContiguous();
        this->Input("x2")
            .ParamType(REQUIRED)
            .DataType({ge::DT_INT8, ge::DT_INT8, ge::DT_INT8, ge::DT_INT8})
            .Format({ge::FORMAT_FRACTAL_NZ, ge::FORMAT_ND, ge::FORMAT_FRACTAL_NZ, ge::FORMAT_ND})
            .UnknownShapeFormat({ge::FORMAT_FRACTAL_NZ, ge::FORMAT_ND, ge::FORMAT_FRACTAL_NZ, ge::FORMAT_ND})
            .IgnoreContiguous();
        this->Input("scale")
            .ParamType(REQUIRED)
            .DataType({ge::DT_BF16, ge::DT_BF16, ge::DT_BF16, ge::DT_BF16})
            .Format({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND})
            .UnknownShapeFormat({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND});
        this->Input("bias")
            .ParamType(OPTIONAL)
            .DataType({ge::DT_BF16, ge::DT_BF16, ge::DT_BF16, ge::DT_BF16})
            .Format({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND})
            .UnknownShapeFormat({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND});
        this->Input("pretoken_scale")
            .ParamType(OPTIONAL)
            .DataType({ge::DT_FLOAT, ge::DT_FLOAT, ge::DT_FLOAT, ge::DT_FLOAT})
            .Format({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND})
            .UnknownShapeFormat({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND});
        this->Output("y")
            .ParamType(REQUIRED)
            .DataType({ge::DT_BF16, ge::DT_BF16, ge::DT_INT32, ge::DT_INT32})
            .Format({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND})
            .UnknownShapeFormat({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND});
    
        
        for(int i = 0; i < 8; i++){
        std::string str_i = std::to_string(i);
        this->Output(("buff"+str_i).c_str())
            .ParamType(REQUIRED)
            .DataType({ge::DT_BF16, ge::DT_BF16, ge::DT_BF16, ge::DT_BF16})
            .Format({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND})
            .UnknownShapeFormat({ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND, ge::FORMAT_ND});
        }

        this->Attr("output_type").AttrType(REQUIRED).Int(); // 1: fp16, 0: int8, 3: int32, 2: bf16
        this->Attr("transpose_a").AttrType(OPTIONAL).Bool(false);
        this->Attr("transpose_b").AttrType(OPTIONAL).Bool(false);
        this->Attr("split_trans_num").AttrType(REQUIRED).Int(0);
        this->Attr("rank_size").AttrType(REQUIRED).Int(8);
        this->Attr("rank").AttrType(REQUIRED).Int(0);
        this->Attr("p_value").AttrType(REQUIRED).Int(2);

        this->SetInferShape(ge::quant_matmul_v2_infer_shape);
        this->SetInferDataType(ge::quant_matmul_v2_infer_datatype);
        this->AICore().SetTiling(optiling::quant_matmul_v2_tiling_func);
        this->AICore().AddConfig("ascend910b");
    }
    
};

OP_ADD(QuantMatmulV2);
} // namespace ops
