/**
 * Copyright (c) Huawei Technologies Co., Ltd. 2022. All rights reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

/* !
 * \file gather_v2.cpp
 * \brief
 */
#include "gatherv2_tiling_rt2.h"

#include <nlohmann/json.hpp>
#include <sstream>
#include <cctype>
#include "log.h"
#include "graph/types.h"
#include "graph/operator.h"
#include "register/op_tiling_info.h"
#include "register/op_tiling_attr_utils.h"
#include "register/op_impl_registry.h"
#include "exe_graph/runtime/tiling_context.h"
#include "vector_op_info.h"
#include "graph/small_vector.h"
#include "op_tiling.h"
#include "exe_graph/lowering/shape_utils.h"


namespace optilingGather {
const size_t INPUT_IDX_AXIS = 2;
const int64_t HALF_UB = 2;
const int64_t DATA_VALUE = 1024;
const int64_t NUM_32 = 32;
const int64_t ACTUAL_NUM = 56.5;
const int64_t GATE_VALUE = 0.012;
const int64_t BLOCK_SIZE = 32;
const int64_t PARAMS_CACHED_UB = 100 * 1024;
const int64_t RESERVED_UB_SIZE = 8 * 1024;
const int64_t INDICES_MIN_NUM_FOR_CACHE = 10 * 1024;
const int64_t CACHE_MODE_UB_SLICE = 6;
const int64_t NOT_ALIGN_SUPPORT_DATA_SIZE = 4;
const int64_t RESERVED_UB_SIZE_2K = 2 * 1024;
const int64_t TRANS_POSE_LINE_SIZE = 16;
const int64_t SUPPORT_PARAM_SIZE = 2;
const int64_t THREE_PART_UB_SIZE = 3;
const int64_t FOUR_PART_UB_SIZE = 4;
const int64_t ALIGN_FOR_ONCE_UB_SIZE = 64;
// A. block tiling: indices tiling
// 1. one params row size is smaller than 32B
// params is not cache
const int64_t TILING_MODE_1 = 1;
// params is cache in UB
const int64_t TILING_MODE_4 = 4;
// params is cache in L1
const int64_t TILING_MODE_13 = 13;

// 2. one params row size is greater than or equal to 32B
// params_row is not 32B aligned
const int64_t TILING_MODE_2 = 2;
// the data of one params row can not store in half UB, need tiling
const int64_t TILING_MODE_5 = 5;

// 3. params_row is 32B aligned
// params is not cache in UB or L1
const int64_t TILING_MODE_3 = 3;
// params is cache in UB
const int64_t TILING_MODE_6 = 6;
// params is cache in L1
const int64_t TILING_MODE_7 = 7;

// B. block tiling: params_pre tiling
// 1. one params row size is smaller than 32B
// params is not cache
const int64_t TILING_MODE_8 = 8;
// params is cache in UB
const int64_t TILING_MODE_9 = 9;

// 2. params_row is 32B aligned
// params is not cache in UB or L1
const int64_t TILING_MODE_10 = 10;
// params is cache in UB
const int64_t TILING_MODE_11 = 11;
// params is cache in L1
const int64_t TILING_MODE_12 = 12;

// tiling_mode with impl_mode
const int64_t TILING_MODE_14 = 14;

// sampling indices and sorting topN for cache indices
const int64_t TILING_MODE_15 = 15;
static const int64_t TILING_MODE_15_MAX_PARAM_NUM_SIZE = 1024;
// sampling indices for not align
const int64_t TILING_MODE_16 = 16;
const int64_t TILING_MODE_17 = 17;
const int64_t TILING_MODE_18 = 18;

// tiling_mode with batch_dims
// 1.one params row size is smaller than 32B
// 1.1 params is cached in UB
const int64_t TILING_MODE_20 = 20;
const int64_t TILING_MODE_21 = 21;
const int64_t TILING_MODE_22 = 22;
// 1.2 params is not cached in UB
const int64_t TILING_MODE_23 = 23;
const int64_t TILING_MODE_24 = 24;
const int64_t TILING_MODE_25 = 25;

// 2.one params row size is large than 32B and not align
const int64_t TILING_MODE_26 = 26;
const int64_t TILING_MODE_27 = 27;
const int64_t TILING_MODE_28 = 28;

// 3.one params row size is align
// 3.1 params is cached in UB
const int64_t TILING_MODE_29 = 29;
const int64_t TILING_MODE_30 = 30;
const int64_t TILING_MODE_31 = 31;
// 3.2 params is not cached in UB
const int64_t TILING_MODE_32 = 32;
const int64_t TILING_MODE_33 = 33;
const int64_t TILING_MODE_34 = 34;

// 4. large params row size
const int64_t TILING_MODE_35 = 35;
const int64_t TILING_MODE_36 = 36;
const int64_t TILING_MODE_37 = 37;

// 5. small indices row size
const int64_t TILING_MODE_38 = 38;
const int64_t TILING_MODE_39 = 39;

// 6. small params and indices row size
const int64_t TILING_MODE_40 = 40;
const int64_t TILING_MODE_41 = 41;

std::vector<std::vector<int64_t>> AD_Trustlist_RT = { { 200, 50 }, { 200, 29 }, { 200, 1468 }, { 200, 7 },  { 200, 1 },
                                                      { 200, 10 }, { 200, 20 }, { 200, 544 },  { 200, 23 }, { 200, 5 },
                                                      { 200, 11 }, { 200, 19 }, { 200, 8 },    { 32, 39 } };

// define impl_mode of gather_v2 attr
static const int64_t IMPL_MODE_HIGH_PERFORMANCE_VALUE = 1;
static const int64_t TILING_MODE_15_MAX_PARAM_NUM = 256;

inline bool IsConstTensor(const gert::Tensor *input_tensor)
{
    if (input_tensor != nullptr) {
        if (input_tensor->GetAddr() == nullptr) {
            // empty tensor
            return input_tensor->GetShapeSize() == 0;
        }
        return true;
    }
    return false;
}

template <typename T> bool GetConstInt(gert::TilingContext *context, const int64_t const_input_idx, T &value)
{
    const gert::Tensor *const_tensor = context->GetInputTensor(const_input_idx);
    if (const_tensor == nullptr) {
        HCCL_ERROR("const_tensor is null");
        return false;
    }
    if (!IsConstTensor(const_tensor)) {
        HCCL_ERROR("the input is not const tensor, will return failed.");
        return false;
    }

    ge::DataType dtype = const_tensor->GetDataType();
    switch (dtype) {
        case ge::DT_UINT64:
            value = static_cast<T>(const_tensor->GetData<uint64_t>()[0]);
            break;
        case ge::DT_INT64:
            value = static_cast<T>(const_tensor->GetData<int64_t>()[0]);
            break;
        case ge::DT_UINT32:
            value = static_cast<T>(const_tensor->GetData<uint32_t>()[0]);
            break;
        case ge::DT_INT32:
            value = static_cast<T>(const_tensor->GetData<int32_t>()[0]);
            break;
        default:
            return false;
    }
    HCCL_INFO("GetConstInt, GetConstInt of value is %d", value);
    return true;
}

bool CheckAndUpdateAxisAndBatchdims(int64_t &axis, gert::Shape &x_shape, gert::Shape &indies_shape,
    int64_t &batch_dims, int64_t params_dims, int64_t indices_dims)
{
    HCCL_DEBUG("GatherV2 CheckAndUpdateAxisAndBatchdims begin");
    CHK_PRT_RET(params_dims <= 0 || indices_dims <= 0,
        HCCL_ERROR("GatherV2:, GatherV2Tiling: params_dims or indices_dims is 0."), false);

    CHK_PRT_RET(axis < -params_dims || axis >= params_dims,
        HCCL_ERROR("GatherV2:, op GatherV2Tiling: axis is invalid"), false);

    if (axis < 0) {
        axis += params_dims;
    }

    if (batch_dims != 0) {
        CHK_PRT_RET(batch_dims < -indices_dims || batch_dims > indices_dims,
            HCCL_ERROR("GatherV2:, op GatherV2Tiling: batch_dims is invalid."), false);
        if (batch_dims < 0) {
            batch_dims += indices_dims;
        }
        CHK_PRT_RET(batch_dims >= params_dims,
            HCCL_ERROR("GatherV2:, op GatherV2Tiling: batch_dims must be less than rank(params)."), false);
        CHK_PRT_RET(batch_dims > axis,
            HCCL_ERROR("GatherV2:, op GatherV2Tiling: batch_dims must be less than or equal to axis."), false);
        for (int64_t i = 0; i < batch_dims; i++) {
            if (x_shape.GetDim(i) != indies_shape.GetDim(i)) {
                HCCL_ERROR("GatherV2, op GatherV2Tiling: Params.shape[:batch_dims] "
                    "should be equal to indices.shape[:batch_dims].");
                return false;
            }
        }
    }
    HCCL_DEBUG("GatherV2 CheckAndUpdateAxisAndBatchdims end");
    return true;
}

bool DoCacheModeAlignCheck(int64_t axis, GatherV2TilingParams *params, const GatherV2CompileInfo *compile_info)
{
    HCCL_DEBUG("GatherV2 DoCacheModeAlignCheck begin");
    CHK_PRT_RET(compile_info->impl_mode != IMPL_MODE_HIGH_PERFORMANCE_VALUE,
        HCCL_DEBUG("GatherV2, "
        "[DoCacheModeAlignCheck] no need simpiling for topn cache, becase current is not high_performance"),
        false);
    CHK_PRT_RET(params->indices_num <= INDICES_MIN_NUM_FOR_CACHE,
        HCCL_DEBUG("GatherV2, [DoCacheModeAlignCheck] no need simpiling for topn cache, but indices_num is %ld",
        params->indices_num),
        false);
    CHK_PRT_RET(axis != 0,
        HCCL_DEBUG("GatherV2, [DoCacheModeAlignCheck] no need simpiling for topn cache, but axis is %ld", axis),
        false);
    CHK_PRT_RET(compile_info->core_num == 0,
        HCCL_DEBUG("GatherV2, [DoCacheModeAlignCheck] no need simpiling for topn cache, but need_core_num is %ld",
        compile_info->core_num),
        false);

    // if input param size less than cache n number buffer, no need cache mode
    int64_t cache_n_num_max_size = (compile_info->ub_size - RESERVED_UB_SIZE) / CACHE_MODE_UB_SLICE;
    cache_n_num_max_size = cache_n_num_max_size / BLOCK_SIZE * BLOCK_SIZE;
    CHK_PRT_RET(params->params_total * compile_info->params_dsize < cache_n_num_max_size,
        HCCL_DEBUG("GatherV2, [DoCacheModeAlignCheck] no need simpiling for topn cache, "
            "but cache_n_num_max_size is %ld", cache_n_num_max_size), false);

    int64_t one_param_row_size = params->params_row * compile_info->params_dsize;
    CHK_PRT_RET(((one_param_row_size > TILING_MODE_15_MAX_PARAM_NUM_SIZE) || (one_param_row_size < BLOCK_SIZE) ||
        (one_param_row_size % BLOCK_SIZE != 0) || (one_param_row_size * BLOCK_SIZE > cache_n_num_max_size)),
        HCCL_DEBUG("GatherV2, "
        "[DoCacheModeTilingAlign] no need cache mode, but params_row_size is %ld, cache_n_num_max_size is %ld",
        one_param_row_size, cache_n_num_max_size),
        false);

    HCCL_DEBUG("GatherV2 DoCacheModeAlignCheck end");
    return true;
}

bool DoCacheModeTilingAlign(int64_t axis, GatherV2TilingParams *params, const GatherV2CompileInfo *compile_info)
{
    HCCL_DEBUG("GatherV2 DoCacheModeTilingAlign begin");
    if (!DoCacheModeAlignCheck(axis, params, compile_info)) {
        return false;
    }

    params->tiling_mode = TILING_MODE_15;
    params->need_core_num = compile_info->core_num;
    params->indices_num_each_core = (params->indices_num + params->need_core_num - 1) / params->need_core_num;
    params->indices_num_remaining = params->indices_num / params->need_core_num;

    params->tail_process_core = params->indices_num % params->need_core_num;
    if (params->tail_process_core == 0) {
        params->tail_process_core = params->need_core_num;
    }
    HCCL_DEBUG("GatherV2, [DoCacheModeTilingAlign] For the core which blockId < %ld, %ld indices will be process",
        params->tail_process_core, params->indices_num_each_core);
    HCCL_DEBUG("GatherV2, [DoCacheModeTilingAlign] For the core which blockId >= %ld, %ld indices will be process",
        params->tail_process_core, params->indices_num_remaining);

    HCCL_DEBUG("GatherV2 DoCacheModeTilingAlign end");
    return true;
}

bool DoCacheModeNotAlignCheck(int64_t axis, int64_t one_param_row_size, GatherV2TilingParams *params,
    const GatherV2CompileInfo *compile_info)
{
    HCCL_DEBUG("GatherV2 DoCacheModeNotAlignCheck begin");
    CHK_PRT_RET(compile_info->impl_mode != IMPL_MODE_HIGH_PERFORMANCE_VALUE,
        HCCL_DEBUG("GatherV2, "
        "[DoCacheModeNotAlignCheck] no need simpiling for topn cache, becase current is not high_performance"),
        false);
    CHK_PRT_RET(compile_info->params_dsize < SUPPORT_PARAM_SIZE,
        HCCL_DEBUG("GatherV2, [DoCacheModeNotAlignCheck] params not support uint8/int8, but type size is :%ld",
        compile_info->params_dsize),
        false);
    CHK_PRT_RET(compile_info->indices_dsize != NOT_ALIGN_SUPPORT_DATA_SIZE,
        HCCL_DEBUG("GatherV2, [DoCacheModeNotAlignCheck] indices only support int32, but type size is :%ld",
        compile_info->indices_dsize),
        false);
    CHK_PRT_RET((one_param_row_size % BLOCK_SIZE == 0),
        HCCL_DEBUG("GatherV2, [DoCacheModeNotAlignCheck] no need cache mode, but params_row is %ld",
        params->params_row),
        false);
    CHK_PRT_RET(axis != 0,
        HCCL_DEBUG("GatherV2, [DoCacheModeNotAlignCheck] no need simpiling for topn cache, but axis is %ld", axis),
        false);
    CHK_PRT_RET(compile_info->core_num == 0,
        HCCL_DEBUG("GatherV2, [DoCacheModeNotAlignCheck] no need simpiling for topn cache, but need_core_num is %ld",
        compile_info->core_num),
        false);
    int64_t six_part_ub_size = (compile_info->ub_size - RESERVED_UB_SIZE_2K) / CACHE_MODE_UB_SLICE;
    six_part_ub_size = six_part_ub_size / BLOCK_SIZE * BLOCK_SIZE;
    int64_t align_param_row = (one_param_row_size + BLOCK_SIZE - 1) / BLOCK_SIZE * BLOCK_SIZE;
    CHK_PRT_RET((align_param_row * TRANS_POSE_LINE_SIZE > six_part_ub_size),
        HCCL_DEBUG("GatherV2, "
        "[DoCacheModeNotAlignCheck] no need cache mode, but align_param_row is %ld, params_row is %ld",
        align_param_row, params->params_row),
        false);

    HCCL_DEBUG("GatherV2 DoCacheModeNotAlignCheck end");
    return true;
}

bool DoCacheModeTilingNotAlian(int64_t axis, GatherV2TilingParams *params, const GatherV2CompileInfo *compile_info)
{
    HCCL_DEBUG("GatherV2 DoCacheModeTilingNotAlian begin");
    int64_t one_param_row_size = params->params_row * compile_info->params_dsize;
    if (!DoCacheModeNotAlignCheck(axis, one_param_row_size, params, compile_info)) {
        return false;
    }

    params->need_core_num = compile_info->core_num;
    params->indices_num_each_core = (params->indices_num + params->need_core_num - 1) / params->need_core_num;
    params->indices_num_remaining = params->indices_num / params->need_core_num;
    CHK_PRT_RET(params->indices_num_remaining * one_param_row_size < BLOCK_SIZE,
        HCCL_DEBUG("GatherV2, "
        "[DoCacheModeTilingNotAlian] no need simpiling for topn cache, but indices_num_each_core is "
        "%ld, params_d_size is %ld, need_core_num is %ld",
        params->indices_num_remaining, compile_info->params_dsize, params->need_core_num),
        false);

    params->tail_process_core = params->indices_num % params->need_core_num;
    if (params->tail_process_core == 0) {
        params->tail_process_core = params->need_core_num;
    }
    params->tiling_mode = TILING_MODE_16;

    HCCL_DEBUG("GatherV2, [DoCacheModeTilingNotAlian] For the core which blockId < %ld, %ld indices will be process",
        params->tail_process_core, params->indices_num_each_core);
    HCCL_DEBUG("GatherV2, [DoCacheModeTilingNotAlian] For the core which blockId >= %ld, %ld indices will be process",
        params->tail_process_core, params->indices_num_remaining);

    HCCL_DEBUG("GatherV2 DoCacheModeTilingNotAlian end");
    return true;
}

bool DoCpuPreprocessCheck(int64_t axis, int64_t one_param_row_size, int64_t part_ub_size, GatherV2TilingParams *params,
    const GatherV2CompileInfo *compile_info)
{
    HCCL_DEBUG("GatherV2 DoCpuPreprocessCheck begin");
    CHK_PRT_RET(part_ub_size == 0,
        HCCL_DEBUG("GatherV2, [DoCpuPreprocessCheck] no need simpiling for topn cache, but part_ub_size is %ld",
        part_ub_size),
        false);
    CHK_PRT_RET(compile_info->is_preprocessed,
        HCCL_DEBUG("GatherV2, [DoCpuPreprocessCheck] no need simpiling for topn cache, but is_preprocessed is %ld",
        compile_info->is_preprocessed),
        false);

    CHK_PRT_RET((compile_info->params_dsize < SUPPORT_PARAM_SIZE) && (one_param_row_size % BLOCK_SIZE != 0),
        HCCL_DEBUG("GatherV2, [DoCpuPreprocessCheck] params not support uint8/int8, but type size is :%ld",
        one_param_row_size),
        false);
    CHK_PRT_RET(axis != 0,
        HCCL_DEBUG("GatherV2, [DoCpuPreprocessCheck] no need simpiling for topn cache, but axis is %ld", axis), false);
    CHK_PRT_RET(compile_info->core_num == 0,
        HCCL_DEBUG("GatherV2, [DoCpuPreprocessCheck] no need simpiling for topn cache, but need_core_num is %ld",
        compile_info->core_num),
        false);
    int64_t one_part_ub_size = (compile_info->ub_size - RESERVED_UB_SIZE_2K) / part_ub_size;
    one_part_ub_size = one_part_ub_size / BLOCK_SIZE * BLOCK_SIZE;
    int64_t align_param_row = (one_param_row_size + BLOCK_SIZE - 1) / BLOCK_SIZE * BLOCK_SIZE;
    int64_t row_num_once_ub = 0;
    if (one_param_row_size % BLOCK_SIZE != 0) {
        CHK_PRT_RET((align_param_row * TRANS_POSE_LINE_SIZE > one_part_ub_size),
            HCCL_DEBUG("GatherV2, [DoCpuPreprocessCheck] no need cache mode, but align_param_row is %ld, "
                "params_row is %ld", align_param_row, params->params_row), false);
        row_num_once_ub = one_part_ub_size / (align_param_row * TRANS_POSE_LINE_SIZE) / ALIGN_FOR_ONCE_UB_SIZE;

        CHK_PRT_RET((row_num_once_ub == 0),
            HCCL_DEBUG("GatherV2, "
            "[DoCpuPreprocessCheck] row_num_once_ub is 0, but one_part_ub_size is %ld, align_param_row is %ld",
            one_part_ub_size, align_param_row),
            false);
    } else {
        CHK_PRT_RET((align_param_row > one_part_ub_size),
            HCCL_DEBUG("GatherV2, "
            "[DoCacheModeTilingwithCpuPreprocess] no need cache mode, but align_param_row is %ld, params_row is %ld",
            align_param_row, params->params_row),
            false);
        row_num_once_ub = one_part_ub_size / align_param_row / ALIGN_FOR_ONCE_UB_SIZE;
        CHK_PRT_RET((row_num_once_ub == 0),
            HCCL_DEBUG("GatherV2, "
            "[DoCpuPreprocessCheck] row_num_once_ub is 0, but one_part_ub_size is %ld, align_param_row is %ld",
            one_part_ub_size, align_param_row),
            false);
    }

    HCCL_DEBUG("GatherV2 DoCpuPreprocessCheck end");
    return true;
}

bool DoCacheModeTilingwithCpuPreprocess(int64_t axis, GatherV2TilingParams *params,
    const GatherV2CompileInfo *compile_info)
{
    HCCL_DEBUG("GatherV2 DoCacheModeTilingwithCpuPreprocess begin");
    CHK_PRT_RET(compile_info->impl_mode != IMPL_MODE_HIGH_PERFORMANCE_VALUE,
        HCCL_DEBUG("GatherV2, [DoCacheModeTilingwithCpuPreprocess] no need cpu cache, not high_performance"), false);

    int64_t one_param_row_size = params->params_row * compile_info->params_dsize;

    if (one_param_row_size % BLOCK_SIZE != 0) {
        if (!DoCpuPreprocessCheck(axis, one_param_row_size, FOUR_PART_UB_SIZE, params, compile_info)) {
            return false;
        }
    } else {
        if (!DoCpuPreprocessCheck(axis, one_param_row_size, THREE_PART_UB_SIZE, params, compile_info)) {
            return false;
        }
    }

    params->need_core_num = compile_info->core_num;
    params->indices_num_each_core = (params->indices_num + params->need_core_num - 1) / params->need_core_num;
    params->indices_num_remaining = params->indices_num / params->need_core_num;

    CHK_PRT_RET(params->indices_num_remaining * one_param_row_size < BLOCK_SIZE,
        HCCL_DEBUG("GatherV2 [DoCacheModeTilingwithCpuPreprocess] no need simpiling for topn cache, "
            "but indices_num_remaining is "
            "%ld, params_dsize is %ld, need_core_num is %ld",
            params->indices_num_remaining, compile_info->params_dsize, params->need_core_num), false);
    params->tail_process_core = params->indices_num % params->need_core_num;
    if (params->tail_process_core == 0) {
        params->tail_process_core = params->need_core_num;
    }

    if (one_param_row_size % BLOCK_SIZE != 0) {
        params->tiling_mode = TILING_MODE_17;
    } else {
        params->tiling_mode = TILING_MODE_18;
    }

    HCCL_DEBUG(
        "GatherV2 [DoCacheModeTilingwithCpuPreprocess] For the core which blockId < %ld, %ld indices will be process",
        params->tail_process_core, params->indices_num_each_core);
    HCCL_DEBUG("GatherV2, "
        "[DoCacheModeTilingwithCpuPreprocess] For the core which blockId >= %ld, %ld indices will be process",
        params->tail_process_core, params->indices_num_remaining);

    HCCL_DEBUG("GatherV2 DoCacheModeTilingwithCpuPreprocess end");
    return true;
}

bool DoImplModeTiling(GatherV2TilingParams *params, const GatherV2CompileInfo *compile_info)
{
    HCCL_DEBUG("GatherV2 DoImplModeTiling begin");
    CHK_PRT_RET(compile_info->impl_mode != IMPL_MODE_HIGH_PERFORMANCE_VALUE,
        HCCL_DEBUG("GatherV2, [DoImplModeTiling] no need cache params row 0 for impl_mode is not high_performance"),
        false);
    CHK_PRT_RET(params->params_total * compile_info->params_dsize <= PARAMS_CACHED_UB,
        HCCL_DEBUG("GatherV2, [DoImplModeTiling] no need cache params row 0 for all params can be cached in UB"),
        false);
    CHK_PRT_RET(params->indices_num < compile_info->core_num * BLOCK_SIZE / compile_info->params_dsize,
        HCCL_DEBUG("GatherV2, [DoImplModeTiling] no need cache params row 0 for the num of indices is small"), false);

    params->tiling_mode = TILING_MODE_14;
    params->need_core_num = compile_info->core_num;
    params->indices_num_each_core = (params->indices_num + params->need_core_num - 1) / params->need_core_num;
    params->indices_num_remaining = params->indices_num / params->need_core_num;

    params->tail_process_core = params->indices_num % params->need_core_num;
    if (params->tail_process_core == 0) {
        params->tail_process_core = params->need_core_num;
    }
    HCCL_DEBUG("GatherV2, [DoImplModeTiling] For the core which blockId < %ld, %ld indices will be process",
        params->tail_process_core, params->indices_num_each_core);
    HCCL_DEBUG("GatherV2, [DoImplModeTiling] For the core which blockId >= %ld, %ld indices will be process",
        params->tail_process_core, params->indices_num_remaining);

    HCCL_DEBUG("GatherV2 DoImplModeTiling end");
    return true;
}

// compute tiling params for tiling_mode 10&11&12
bool BlockAlignForParamsTiling(GatherV2TilingParams *params, int64_t indices_num_per_loop, int64_t res_ub_size,
    int64_t params_dsize)
{
    HCCL_DEBUG("GatherV2 BlockAlignForParamsTiling begin");
    CHK_PRT_RET(indices_num_per_loop == 0, HCCL_ERROR("GatherV2, indices_num_per_loop = 0 is not support"), false);
    params->indices_loop_num = params->indices_num_each_core / indices_num_per_loop;
    params->indices_row_num_once = indices_num_per_loop;
    if (params->indices_num_each_core % params->indices_row_num_once != 0) {
        params->indices_row_num_last = params->indices_num_each_core % params->indices_row_num_once;
    }

    params->row_num_once_ub = res_ub_size / (params->params_row * params_dsize);
    CHK_PRT_RET((params->row_num_once_ub == 0),
        HCCL_ERROR("GatherV2, Devide by row_num_once_ub[%ld] exception.", params->row_num_once_ub), false);
    params->inner_loop_num = params->indices_row_num_once / params->row_num_once_ub;
    if (params->indices_row_num_once % params->row_num_once_ub != 0) {
        params->row_num_once_tail_ub = params->indices_row_num_once % params->row_num_once_ub;
    }

    params->row_num_last_ub = res_ub_size / (params->params_row * params_dsize);
    CHK_PRT_RET((params->row_num_last_ub == 0),
        HCCL_ERROR("GatherV2, Devide by row_num_last_ub[%ld] exception.", params->row_num_last_ub), false);
    params->inner_loop_num_last = params->indices_row_num_last / params->row_num_last_ub;
    if (params->indices_row_num_last % params->row_num_last_ub != 0) {
        params->row_num_last_tail_ub = params->indices_row_num_last % params->row_num_last_ub;
    }
    HCCL_DEBUG("GatherV2 BlockAlignForParamsTiling end");
    return true;
}

// compute tiling params for tiling_mode 1&4&13
bool BlockLessForIndicesTiling(GatherV2TilingParams *params, int64_t indices_num_per_loop, int64_t res_ub_size,
    int64_t params_d_size, int64_t block_num)
{
    HCCL_DEBUG("GatherV2 BlockLessForIndicesTiling begin");
    CHK_PRT_RET(indices_num_per_loop == 0 || block_num == 0,
        HCCL_ERROR("GatherV2, indices_num_per_loop or block_num = 0 is not support"), false);
    params->indices_loop_num = params->indices_num_each_core / indices_num_per_loop;
    params->indices_row_num_once = indices_num_per_loop;
    if (params->indices_num_each_core % params->indices_row_num_once != 0) {
        params->indices_row_num_last = params->indices_num_each_core % params->indices_row_num_once;
    }

    params->row_num_once_ub = res_ub_size / (params->params_row * params_d_size);
    if (int(params->row_num_once_ub % block_num) != 0) {
        params->row_num_once_ub = int(params->row_num_once_ub / block_num) * block_num;
    }
    CHK_PRT_RET((params->row_num_once_ub == 0),
        HCCL_ERROR("Gather Tiling, Devide by row_num_once_ub[%ld] exception.", params->row_num_once_ub), false);
    params->inner_loop_num = params->indices_row_num_once / params->row_num_once_ub;
    if (params->indices_row_num_once % params->row_num_once_ub != 0) {
        params->row_num_once_tail_ub = params->indices_row_num_once % params->row_num_once_ub;
    }
    if (params->inner_loop_num > 0 && params->row_num_once_tail_ub > 0 &&
        params->row_num_once_tail_ub * params->params_row < block_num) {
        params->inner_loop_num = params->inner_loop_num - 1;
        params->row_num_once_tail_ub = params->row_num_once_tail_ub + params->row_num_once_ub;
    }

    params->row_num_last_ub = res_ub_size / (params->params_row * params_d_size);
    if (int(params->row_num_last_ub % block_num) != 0) {
        params->row_num_last_ub = int(params->row_num_last_ub / block_num) * block_num;
    }
    CHK_PRT_RET((params->row_num_last_ub == 0),
        HCCL_ERROR("Gather Tiling, Devide by row_num_last_ub[%ld] exception.", params->row_num_last_ub), false);
    params->inner_loop_num_last = params->indices_row_num_last / params->row_num_last_ub;
    if (params->indices_row_num_last % params->row_num_last_ub != 0) {
        params->row_num_last_tail_ub = params->indices_row_num_last % params->row_num_last_ub;
    }
    if (params->inner_loop_num_last > 0 && params->row_num_last_tail_ub > 0 &&
        params->row_num_last_tail_ub * params->params_row < block_num) {
        params->inner_loop_num_last = params->inner_loop_num_last - 1;
        params->row_num_last_tail_ub = params->row_num_last_tail_ub + params->row_num_once_ub;
    }
    HCCL_DEBUG("GatherV2 BlockLessForIndicesTiling end");
    return true;
}

// compute tiling params for tiling_mode 8&9
bool BlockLessForParamsTiling(GatherV2TilingParams *params, int64_t indices_num_per_loop, int64_t res_ub_size,
    int64_t params_dsize, int64_t block_num)
{
    HCCL_DEBUG("GatherV2 BlockLessForParamsTiling begin");
    CHK_PRT_RET(indices_num_per_loop == 0 || block_num == 0,
        HCCL_ERROR("Gather Tiling, indices_num_per_loop or block_num = 0 is not support"), false);
    params->indices_loop_num = params->indices_num_each_core / indices_num_per_loop;
    params->indices_row_num_once = indices_num_per_loop;
    if (params->indices_num_each_core % params->indices_row_num_once != 0) {
        params->indices_row_num_last = params->indices_num_each_core % params->indices_row_num_once;
    }

    params->row_num_once_ub = res_ub_size / (params->params_row * params_dsize);
    if (int(params->row_num_once_ub % block_num) != 0) {
        params->row_num_once_ub = int(params->row_num_once_ub / block_num) * block_num;
    }
    CHK_PRT_RET((params->row_num_once_ub == 0),
        HCCL_ERROR("Gather Tiling, Devide by row_num_once_ub[%ld] exception.", params->row_num_once_ub), false);
    params->inner_loop_num = params->indices_row_num_once / params->row_num_once_ub;
    if (params->indices_row_num_once % params->row_num_once_ub != 0) {
        params->row_num_once_tail_ub = params->indices_row_num_once % params->row_num_once_ub;
    }
    if (params->inner_loop_num > 0 && params->row_num_once_tail_ub > 0 &&
        params->row_num_once_tail_ub * params->params_row < block_num) {
        params->inner_loop_num = params->inner_loop_num - 1;
        params->row_num_once_tail_ub = params->row_num_once_tail_ub + params->row_num_once_ub;
    }

    params->row_num_last_ub = res_ub_size / (params->params_row * params_dsize);
    if (int(params->row_num_last_ub % block_num) != 0) {
        params->row_num_last_ub = int(params->row_num_last_ub / block_num) * block_num;
    }
    CHK_PRT_RET((params->row_num_last_ub == 0),
        HCCL_ERROR("Gather Tiling, Devide by row_num_last_ub[%ld] exception.", params->row_num_last_ub), false);
    params->inner_loop_num_last = params->indices_row_num_last / params->row_num_last_ub;
    if (params->indices_row_num_last % params->row_num_last_ub != 0) {
        params->row_num_last_tail_ub = params->indices_row_num_last % params->row_num_last_ub;
    }
    if (params->inner_loop_num_last > 0 && params->row_num_last_tail_ub > 0 &&
        params->row_num_last_tail_ub * params->params_row < block_num) {
        params->inner_loop_num_last = params->inner_loop_num_last - 1;
        params->row_num_last_tail_ub = params->row_num_last_tail_ub + params->row_num_once_ub;
    }
    HCCL_DEBUG("GatherV2 BlockLessForParamsTiling end");
    return true;
}

void CalNeedCore(GatherV2TilingParams *params, const GatherV2CompileInfo *compile_info)
{
    HCCL_DEBUG("GatherV2 CalNeedCore begin");
    while (params->need_core_num > 1) {
        params->need_core_num = params->need_core_num / 2;
        params->indices_num_each_core = params->indices_num / params->need_core_num;
        params->indices_num_remaining = params->indices_num % params->need_core_num;
        if (params->indices_num_each_core * params->params_row * compile_info->params_dsize > BLOCK_SIZE) {
            break;
        }
    }
    HCCL_DEBUG("GatherV2 CalNeedCore end");
}

// compute tiling params for tiling_mode 3&6&7
bool BlockAlignForIndicesTiling(GatherV2TilingParams *params, int64_t indices_num_per_loop, int64_t res_ub_size,
    int64_t params_d_size)
{
    HCCL_DEBUG("GatherV2 BlockAlignForIndicesTiling begin");
    if (indices_num_per_loop == 0) {
        HCCL_ERROR("gather_v2, indices_num_per_loop = 0 is not support");
        return false;
    }
    params->indices_loop_num = (params->indices_num_each_core) / indices_num_per_loop;
    params->indices_row_num_once = indices_num_per_loop;
    if ((params->indices_num_each_core) % (params->indices_row_num_once) != 0) {
        params->indices_row_num_last = (params->indices_num_each_core) % (params->indices_row_num_once);
    }

    params->row_num_once_ub = res_ub_size / ((params->params_row) * params_d_size);
    CHK_PRT_RET((params->row_num_once_ub == 0),
        HCCL_ERROR("Gather Tiling:, Devide by row_num_once_ub[%ld] exception.", params->row_num_once_ub), false);
    params->inner_loop_num = (params->indices_row_num_once) / (params->row_num_once_ub);
    if ((params->indices_row_num_once) % (params->row_num_once_ub) != 0) {
        params->row_num_once_tail_ub = (params->indices_row_num_once) % (params->row_num_once_ub);
    }

    params->row_num_last_ub = res_ub_size / ((params->params_row) * params_d_size);
    CHK_PRT_RET((params->row_num_last_ub == 0),
        HCCL_ERROR("Gather Tiling:, Devide by row_num_last_ub[%ld] exception.", params->row_num_last_ub), false);
    params->inner_loop_num_last = (params->indices_row_num_last) / (params->row_num_last_ub);
    if ((params->indices_row_num_last) % params->row_num_last_ub != 0) {
        params->row_num_last_tail_ub = (params->indices_row_num_last) % (params->row_num_last_ub);
    }
    HCCL_DEBUG("GatherV2 BlockAlignForIndicesTiling end");
    return true;
}

bool ParamsPreTiling(GatherV2TilingParams *params, const GatherV2CompileInfo *compile_info, int64_t half_ub_size,
    int64_t half_remain_ub_size, int64_t params_total_ceil, int64_t params_row_ceil)
{
    HCCL_DEBUG("GatherV2 ParamsPreTiling begin");
    params->need_core_num = compile_info->core_num;
    params->tail_process_core = 0;
    params->params_pre_each_core = (params->params_pre) / (params->need_core_num);
    params->params_pre_remaining = (params->params_pre) % (params->need_core_num);
    params->indices_num_each_core = params->indices_num;
    int64_t half_remain_params_elem = half_remain_ub_size / (compile_info->params_dsize);
    int64_t res_ub_size = half_ub_size;
    int64_t half_ub_indices_elem = half_ub_size / (compile_info->indices_dsize);
    int64_t indices_num_per_loop = half_ub_indices_elem;
    int64_t block_num = BLOCK_SIZE / (compile_info->params_dsize);

    if ((params->indices_num_each_core) * (params->params_row) * (compile_info->params_dsize) <= BLOCK_SIZE) {
        params->need_core_num = 1;
        params->tail_process_core = 0;
        params->params_pre_each_core = params->params_pre;
        params->params_pre_remaining = 0;
    }

    if ((params->params_row) * (compile_info->params_dsize) < BLOCK_SIZE) {
        if (params_total_ceil <= PARAMS_CACHED_UB / (compile_info->params_dsize)) {
            params->tiling_mode = TILING_MODE_8;
        } else {
            params->tiling_mode = TILING_MODE_9;
        }

        if (params->tiling_mode == TILING_MODE_8) {
            indices_num_per_loop = half_remain_ub_size / (compile_info->indices_dsize);
            res_ub_size = half_remain_ub_size;
        }

        if (!BlockLessForParamsTiling(params, indices_num_per_loop, res_ub_size, compile_info->params_dsize,
            block_num)) {
            return false;
        }
    } else {
        if (params_total_ceil <= PARAMS_CACHED_UB / (compile_info->params_dsize) &&
            params_row_ceil <= half_remain_params_elem) {
            params->tiling_mode = TILING_MODE_10;
        } else if (params_total_ceil <= (compile_info->l1_size) / (compile_info->params_dsize)) {
            params->tiling_mode = TILING_MODE_11;
        } else {
            params->tiling_mode = TILING_MODE_12;
        }

        if (params->tiling_mode == TILING_MODE_10) {
            indices_num_per_loop = half_remain_ub_size / (compile_info->indices_dsize);
            res_ub_size = half_remain_ub_size;
        }

        if (!BlockAlignForParamsTiling(params, indices_num_per_loop, res_ub_size, compile_info->params_dsize)) {
            return false;
        }
    }
    HCCL_DEBUG("GatherV2 ParamsPreTiling end");
    return true;
}

bool ParamsSmall32B(GatherV2TilingParams *params, const GatherV2CompileInfo *compile_info, int64_t params_total_ceil,
    int64_t indices_num_per_loop, int64_t half_remain_ub_size, int64_t res_ub_size, int64_t block_num)
{
    HCCL_DEBUG("GatherV2 ParamsSmall32B begin");
    if (params_total_ceil <= PARAMS_CACHED_UB / (compile_info->params_dsize)) {
        params->tiling_mode = TILING_MODE_4;
    } else if (params_total_ceil <= ((compile_info->l1_size) / (compile_info->params_dsize))) {
        params->tiling_mode = TILING_MODE_13;
    } else {
        params->tiling_mode = TILING_MODE_1;
    }
    if (((params->params_row) < BLOCK_SIZE) &&
        ((params->indices_num_each_core) * (params->params_row) * (compile_info->params_dsize) <= BLOCK_SIZE)) {
        CalNeedCore(params, compile_info);
    }

    if (params->tiling_mode == TILING_MODE_4) {
        indices_num_per_loop = half_remain_ub_size / (compile_info->indices_dsize);
        res_ub_size = half_remain_ub_size;
    }

    if (!BlockLessForIndicesTiling(params, indices_num_per_loop, res_ub_size, compile_info->params_dsize, block_num)) {
        HCCL_ERROR("GatherV2, BlockLessForIndicesTiling is false");
        return false;
    }
    HCCL_DEBUG("GatherV2 ParamsSmall32B end");
    return true;
}

bool ParamsGreater32B(GatherV2TilingParams *params, const GatherV2CompileInfo *compile_info,
    int64_t half_ub_params_elem, int64_t half_remain_ub_size, int64_t half_ub_size, int64_t params_total_ceil,
    int64_t params_row_ceil)
{
    HCCL_DEBUG("GatherV2 ParamsGreater32B begin");
    int64_t half_ub_indices_elem = half_ub_size / (compile_info->indices_dsize);
    int64_t half_remain_params_elem = half_remain_ub_size / (compile_info->params_dsize);
    int64_t indices_num_per_loop = half_ub_indices_elem;
    int64_t res_ub_size = half_ub_size;
    int64_t block_num = BLOCK_SIZE / (compile_info->params_dsize);
    float mode_7_gate_value = ACTUAL_NUM - GATE_VALUE * params->params_total / DATA_VALUE;
    if (params_row_ceil <= half_ub_params_elem) {
        if ((params->params_row) * (compile_info->params_dsize) % BLOCK_SIZE != 0) { // not 32B aligned
            params->tiling_mode = TILING_MODE_2;

            params->indices_loop_num = (params->indices_num_each_core) / half_ub_indices_elem;
            params->indices_row_num_once = half_ub_indices_elem;
            if ((params->indices_num_each_core) % (params->indices_row_num_once) != 0) {
                params->indices_row_num_last = (params->indices_num_each_core) % (params->indices_row_num_once);
            }
        } else { // 32B aligned
            if (params_total_ceil <= PARAMS_CACHED_UB / (compile_info->params_dsize) &&
                params_row_ceil <= half_remain_params_elem) {
                params->tiling_mode = TILING_MODE_6;
            } else if (params_total_ceil <= (compile_info->l1_size) / (compile_info->params_dsize) &&
                (params->indices_num) > mode_7_gate_value) {
                params->tiling_mode = TILING_MODE_7;
            } else {
                params->tiling_mode = TILING_MODE_3;
            }
            if (params->tiling_mode == TILING_MODE_6) {
                indices_num_per_loop = half_remain_ub_size / (compile_info->indices_dsize);
                res_ub_size = half_remain_ub_size;
            }

            if (!BlockAlignForIndicesTiling(params, indices_num_per_loop, res_ub_size, compile_info->params_dsize)) {
                return false;
            }
        }
    } else {
        params->tiling_mode = TILING_MODE_5; // one params row need tiling

        params->indices_loop_num = params->indices_num_each_core / half_ub_indices_elem;
        params->indices_row_num_once = indices_num_per_loop;
        if ((params->indices_num_each_core) % (params->indices_row_num_once) != 0) {
            params->indices_row_num_last = (params->indices_num_each_core) % (params->indices_row_num_once);
        }

        params->one_row_loop = (params->params_row) / half_ub_params_elem;
        params->one_row_tail = (params->params_row) % half_ub_params_elem;
        if (params->one_row_loop > 0 && (params->one_row_tail) > 0 && (params->one_row_tail) < block_num) {
            params->one_row_loop = (params->one_row_loop) - 1;
            params->one_row_tail = half_ub_params_elem + (params->one_row_tail);
        }
    }
    HCCL_DEBUG("GatherV2 ParamsGreater32B end");
    return true;
}

bool ParamsIndicesTiling(GatherV2TilingParams *params, const GatherV2CompileInfo *compile_info, int64_t half_ub_size,
    int64_t half_remain_ub_size, int64_t half_ub_params_elem, int64_t params_total_ceil, int64_t params_row_ceil)
{
    HCCL_DEBUG("GatherV2 ParamsIndicesTiling begin");
    params->need_core_num = compile_info->core_num;
    params->tail_process_core = 0;
    params->indices_num_each_core = (params->indices_num) / (params->need_core_num);
    params->indices_num_remaining = (params->indices_num) % (params->need_core_num);
    int64_t half_ub_indices_elem = half_ub_size / (compile_info->indices_dsize);
    int64_t indices_num_per_loop = half_ub_indices_elem;
    int64_t res_ub_size = half_ub_size;
    int64_t block_num = BLOCK_SIZE / (compile_info->params_dsize);
    if (params->indices_num <= params->need_core_num) {
        params->need_core_num = params->indices_num;
        params->tail_process_core = 0;
        params->indices_num_each_core = 1;
        params->indices_num_remaining = 0;
    }

    // one params row size is smaller than 32B
    if ((params->params_row) * (compile_info->params_dsize) < BLOCK_SIZE) {
        if (!ParamsSmall32B(params, compile_info, params_total_ceil, indices_num_per_loop, half_remain_ub_size,
            res_ub_size, block_num)) {
            HCCL_ERROR("GatherV2, ParamsSmall32B is false");
            return false;
        }
    } else { // one params row size is greater than or equal to 32B
        if (!ParamsGreater32B(params, compile_info, half_ub_params_elem, half_remain_ub_size, half_ub_size,
            params_total_ceil, params_row_ceil)) {
            HCCL_ERROR("GatherV2, ParamsGreater32B is false");
            return false;
        }
    }
    HCCL_DEBUG("GatherV2 ParamsIndicesTiling end");
    return true;
}

bool CheckADTrustListForRt2(const gert::Shape &indices_shape)
{
    HCCL_DEBUG("GatherV2 CheckADTrustListForRt2 begin");
    std::vector<int64_t> v_indices_shape;
    int64_t indices_dim = indices_shape.GetDimNum();

    for (int64_t index = 0; index < indices_dim; index++) {
        v_indices_shape.push_back(indices_shape.GetDim(index));
    }

    for (auto it = AD_Trustlist_RT.begin(); it != AD_Trustlist_RT.end(); it++) {
        if (*it == v_indices_shape) {
            HCCL_DEBUG("GatherV2, [CheckADTrustListForRt2] match the AD_Trustlist, no need EMB");
            return true;
        }
    }

    HCCL_DEBUG("GatherV2 CheckADTrustListForRt2 end");
    return false;
}

int64_t GetPartShapeSize(const gert::Shape &shape, size_t begin, size_t end)
{
    int64_t size = 1;
    for (size_t i = begin; i < end; i++) {
        size *= shape[i];
    }
    return size;
}

bool TilingWithoutBatchDims(gert::TilingContext *context, const GatherV2CompileInfo *compile_info,
    GatherV2TilingParams *params, int64_t axis, int64_t params_dims, int64_t indices_dims)
{
    HCCL_DEBUG("GatherV2 TilingWithoutBatchDims begin");
    int64_t available_ub_size = (compile_info->ub_size) - 2 * 1024; // reserved 2K
    int64_t half_ub_size = available_ub_size / 2;
    // params shape convert to 3D:[params_pre, params_axis, params_row]  indies_shape.GetDimNum();
    // indices shape convert to 1D:[indices_num]
    // output tensor, y shape convert to:[params_pre, indices_num, params_row]
    auto &x_shape = gert::EnsureNotScalar(context->GetInputShape(0)->GetStorageShape());
    auto &indies_shape = gert::EnsureNotScalar(context->GetInputShape(1)->GetStorageShape());
    if (axis == 0) {
        params->params_pre = 1;
    } else {
        for (int64_t i = 0; i < axis; i++) {
            params->params_pre *= x_shape.GetDim(i);
        }
    }
    params->params_axis = x_shape.GetDim(axis);

    if (axis + 1 < params_dims) {
        for (int64_t i = axis + 1; i < params_dims; i++) {
            params->params_row *= x_shape.GetDim(i);
        }
    } else {
        params->params_row = 1;
    }
    params->params_total = GetPartShapeSize(x_shape, 0, params_dims);
    int64_t block_num = BLOCK_SIZE / (compile_info->params_dsize);
    int64_t params_total_ceil = ((params->params_total) + block_num - 1) / block_num * block_num;
    int64_t params_row_ceil = ((params->params_row) + block_num - 1) / block_num * block_num;
    for (int i = 0; i < indices_dims; i++) {
        params->indices_num = (params->indices_num) * indies_shape.GetDim(i);
    }

    int64_t half_remain_ub_size = (available_ub_size - PARAMS_CACHED_UB) / HALF_UB;
    int64_t half_ub_params_elem = half_ub_size / (compile_info->params_dsize);
    if (half_ub_params_elem == 0) {
        HCCL_ERROR("GatherV2, half_ub_params_elem is 0");
        return false;
    }

    if (!CheckADTrustListForRt2(indies_shape) && DoCacheModeTilingwithCpuPreprocess(axis, params, compile_info)) {
        HCCL_DEBUG("GatherV2, [GatherV2TIKTiling] end of tiling for topn cache is DoCacheMode with CpuPreprocess");
        return true;
    }

    if (DoCacheModeTilingAlign(axis, params, compile_info)) {
        HCCL_DEBUG("GatherV2, [GatherV2TIKTiling] end of tiling for DoCacheModeTilingAlign");
        return true;
    }

    if (DoCacheModeTilingNotAlian(axis, params, compile_info)) {
        HCCL_DEBUG("GatherV2, [GatherV2TIKTiling] end of tiling for DoCacheModeTilingNotAlian");
        return true;
    }

    // the data of the formula gained from actual tests
    // set a gate value for tiling_mode_7 to optimized some data_move processes

    if (DoImplModeTiling(params, compile_info)) {
        HCCL_DEBUG("GatherV2, [GatherV2TIKTiling] end of tiling for impl_mode is high_performance");
        return true;
    }

    if (params->params_pre >= (compile_info->core_num) && params_row_ceil <= half_ub_params_elem &&
        ((params->params_row) * (compile_info->params_dsize) < BLOCK_SIZE ||
        (params->params_row) * (compile_info->params_dsize) % BLOCK_SIZE == 0)) {
        if (!ParamsPreTiling(params, compile_info, half_ub_size, half_remain_ub_size, params_total_ceil,
            params_row_ceil)) {
            HCCL_ERROR("GatherV2, ParamsPreTiling is false");
            return false;
        }
    } else {
        if (!ParamsIndicesTiling(params, compile_info, half_ub_size, half_remain_ub_size, half_ub_params_elem,
            params_total_ceil, params_row_ceil)) {
            HCCL_ERROR("GatherV2, ParamsIndicesTiling is false");
            return false;
        }
    }
    HCCL_DEBUG("GatherV2 TilingWithoutBatchDims end");
    return true;
}

bool LargeRowProcess(GatherV2TilingParams *params, const GatherV2CompileInfo *compile_info,
    int64_t half_ub_params_elem, int64_t half_size_ub)
{
    HCCL_DEBUG("GatherV2 LargeRowProcess begin");
    CHK_PRT_RET(half_ub_params_elem == 0, HCCL_ERROR("GatherV2:, half_ub_params_elem = 0 is not support"), false);
    params->one_row_loop = (params->params_row) / half_ub_params_elem;
    params->one_row_tail = (params->params_row) % half_ub_params_elem;
    int64_t block_num = BLOCK_SIZE / (compile_info->params_dsize);
    if ((params->one_row_loop) > 0 && (params->one_row_tail) > 0 && (params->one_row_tail) < block_num) {
        params->one_row_loop = (params->one_row_loop) - 1;
        params->one_row_tail = half_ub_params_elem + (params->one_row_tail);
    }

    if ((params->params_batch_each_core) * (params->indices_row) * (compile_info->indices_dsize) <= half_size_ub) {
        params->indices_row_num_once = params->indices_row;
        params->tiling_mode = TILING_MODE_35;
    } else if ((params->indices_row) * (compile_info->indices_dsize) <= half_size_ub) {
        params->indices_row_num_once = params->indices_row;
        params->tiling_mode = TILING_MODE_36;
    } else {
        int64_t indices_num_per_loop = half_size_ub / (compile_info->indices_dsize);
        params->indices_loop_num = (params->indices_row) / indices_num_per_loop;
        params->indices_row_num_once = indices_num_per_loop;
        if ((params->indices_row) % (params->indices_row_num_once) != 0) {
            params->indices_row_num_last = (params->indices_num_each_core) % (params->indices_row_num_once);
        }
        params->tiling_mode = TILING_MODE_37;
    }
    HCCL_DEBUG("GatherV2 LargeRowProcess end");
    return true;
}

bool CalcCacheIndices(GatherV2TilingParams *params, int64_t indices_num_per_loop, int64_t res_ub_size,
    int64_t params_d_size, int64_t tiling_mode)
{
    HCCL_DEBUG("GatherV2 CalcCacheIndices begin");
    CHK_PRT_RET(params_d_size == 0, HCCL_ERROR("GatherV2:, params_d_size= 0 is not support"), false);
    params->indices_row_num_once = indices_num_per_loop;
    params->row_num_once_ub = res_ub_size / ((params->params_row) * params_d_size);
    int64_t block_num = BLOCK_SIZE / params_d_size;
    int64_t align_unit;
    if (tiling_mode == TILING_MODE_38 || tiling_mode == TILING_MODE_39) {
        align_unit = params->indices_row * block_num;
    } else if (tiling_mode == TILING_MODE_40 || tiling_mode == TILING_MODE_41) {
        align_unit = (params->params_pre) * (params->indices_row) * block_num;
    } else if ((params->params_row) * params_d_size >= BLOCK_SIZE) {
        align_unit = 1;
    } else {
        align_unit = block_num;
    }

    if (int((params->row_num_once_ub) % align_unit) != 0) {
        params->row_num_once_ub = int((params->row_num_once_ub) / align_unit) * align_unit;
    }
    CHK_PRT_RET((params->row_num_once_ub == 0),
        HCCL_ERROR("Gather Tiling:, Devide by row_num_once_ub[%ld] exception.", params->row_num_once_ub), false);
    params->inner_loop_num = (params->indices_row_num_once) / (params->row_num_once_ub);
    if ((params->indices_row_num_once) % (params->row_num_once_ub) != 0) {
        params->row_num_once_tail_ub = (params->indices_row_num_once) % (params->row_num_once_ub);
    }
    if ((params->inner_loop_num) > 0 && (params->row_num_once_tail_ub) > 0 &&
        (params->row_num_once_tail_ub) * (params->params_row) < block_num) {
        params->inner_loop_num = (params->inner_loop_num) - 1;
        params->row_num_once_tail_ub = (params->row_num_once_tail_ub) + (params->row_num_once_ub);
    }
    params->tiling_mode = tiling_mode;
    HCCL_DEBUG("GatherV2 CalcCacheIndices end");
    return true;
}

bool CalcWithBatchDims(GatherV2TilingParams *params, int64_t indices_num_per_loop, int64_t res_ub_size,
    int64_t params_d_size)
{
    HCCL_DEBUG("GatherV2 CalcWithBatchDims begin");
    if (indices_num_per_loop == 0 || params_d_size == 0) {
        HCCL_ERROR("gather_v2, indices_num_per_loop or params_d_size= 0 is not support");
        return false;
    }
    params->indices_loop_num = (params->indices_row) / indices_num_per_loop;
    params->indices_row_num_once = indices_num_per_loop;
    int64_t block_num = BLOCK_SIZE / params_d_size;
    if ((params->params_row) * params_d_size >= BLOCK_SIZE) {
        block_num = 1;
    }
    if ((params->indices_row) % (params->indices_row_num_once) != 0) {
        params->indices_row_num_last = (params->indices_num_each_core) % (params->indices_row_num_once);
    }
    if ((params->indices_loop_num) > 0 &&
        (params->indices_row_num_last) * (params->indices_row) * (params->params_row) < block_num) {
        params->indices_loop_num -= 1;
        params->indices_row_num_last += params->indices_row_num_once;
    }

    params->row_num_once_ub = res_ub_size / ((params->params_row) * params_d_size);
    if (int((params->row_num_once_ub) % block_num) != 0) {
        params->row_num_once_ub = int((params->row_num_once_ub) / block_num) * block_num;
    }
    CHK_PRT_RET((params->row_num_once_ub == 0),
        HCCL_ERROR("Gather Tiling:, Devide by row_num_once_ub[%ld] exception.", params->row_num_once_ub), false);
    params->inner_loop_num = (params->indices_row_num_once) / (params->row_num_once_ub);
    if ((params->indices_row_num_once) % (params->row_num_once_ub) != 0) {
        params->row_num_once_tail_ub = (params->indices_row_num_once) % (params->row_num_once_ub);
    }
    if ((params->inner_loop_num) > 0 && (params->row_num_once_tail_ub) > 0 &&
        (params->row_num_once_tail_ub) * (params->params_row) < block_num) {
        params->inner_loop_num = params->inner_loop_num - 1;
        params->row_num_once_tail_ub = params->row_num_once_tail_ub + params->row_num_once_ub;
    }

    params->row_num_last_ub = params->row_num_once_ub;
    params->inner_loop_num_last = (params->indices_row_num_last) / (params->row_num_once_ub);
    if ((params->indices_row_num_last) % (params->row_num_once_ub) != 0) {
        params->row_num_last_tail_ub = (params->indices_row_num_last) % (params->row_num_once_ub);
    }
    if ((params->inner_loop_num_last) > 0 && (params->row_num_last_tail_ub) > 0 &&
        (params->row_num_last_tail_ub) * (params->params_row) < block_num) {
        params->inner_loop_num_last = params->inner_loop_num_last - 1;
        params->row_num_last_tail_ub = params->row_num_last_tail_ub + params->row_num_once_ub;
    }
    HCCL_DEBUG("GatherV2 CalcWithBatchDims begin");
    return true;
}

bool IndicesCachedProcess(GatherV2TilingParams *params, const GatherV2CompileInfo *compile_info, int64_t aval_ub_size,
    int64_t mode_cache_all, int64_t mode_cache_row, int64_t mode_without_cache)
{
    HCCL_DEBUG("GatherV2 IndicesCachedProcess begin");
    int64_t indices_num_per_loop = 1;
    if (params->params_batch_each_core * params->indices_row * compile_info->indices_dsize <= aval_ub_size) {
        indices_num_per_loop = params->indices_row;
        if (!CalcCacheIndices(params, indices_num_per_loop, aval_ub_size,
            compile_info->params_dsize, mode_cache_all)) {
            return false;
        }
    } else if (params->indices_row * compile_info->indices_dsize <= aval_ub_size) {
        indices_num_per_loop = params->indices_row;
        if (!CalcCacheIndices(params, indices_num_per_loop, aval_ub_size,
            compile_info->params_dsize, mode_cache_row)) {
            return false;
        }
    } else {
        indices_num_per_loop = aval_ub_size / compile_info->indices_dsize;
        params->tiling_mode = mode_without_cache;
        if (!CalcWithBatchDims(params, indices_num_per_loop, aval_ub_size, compile_info->params_dsize)) {
            return false;
        }
    }
    HCCL_DEBUG("GatherV2 IndicesCachedProcess end");
    return true;
}

bool SmallRowProcess(GatherV2TilingParams *params, const GatherV2CompileInfo *compile_info, int64_t mode_with_cache,
    int64_t mode_without_cache, int64_t half_remain_size_ub, int64_t half_size_ub)
{
    HCCL_DEBUG("GatherV2 SmallRowProcess begin");
    if (mode_with_cache == TILING_MODE_38 || mode_without_cache == TILING_MODE_39) {
        params->params_batch_each_core = params->params_pre / params->need_core_num;
        params->params_batch_remaining = params->params_pre % params->need_core_num;
    }
    params->tail_process_core = params->need_core_num - 1;
    params->indices_num_each_core = params->params_batch_each_core * params->indices_row;
    params->indices_num_remaining = 0;
    int64_t block_num = BLOCK_SIZE / compile_info->params_dsize;
    int64_t indices_num_per_loop = params->indices_num_each_core;
    int64_t params_total_ceil = (params->params_total + block_num - 1) / block_num * block_num;
    int64_t params_row_ceil = (params->params_row + block_num - 1) / block_num * block_num;
    int64_t half_remain_params_elem = half_remain_size_ub / compile_info->params_dsize;
    if (params_total_ceil <= PARAMS_CACHED_UB / compile_info->params_dsize &&
        params_row_ceil <= half_remain_params_elem) {
        if (!CalcCacheIndices(params, indices_num_per_loop, half_remain_size_ub, compile_info->params_dsize,
            mode_with_cache)) {
            return false;
        }
    } else {
        if (!CalcCacheIndices(params, indices_num_per_loop, half_size_ub, compile_info->params_dsize,
            mode_without_cache)) {
            return false;
        }
    }
    HCCL_DEBUG("GatherV2 SmallRowProcess end");
    return true;
}

void CalNeedCoreWithBatchDims(GatherV2TilingParams *params, const GatherV2CompileInfo *compile_info)
{
    while (params->need_core_num > 1) {
        params->need_core_num = params->need_core_num / 2;
        params->params_batch_each_core = params->params_batch / params->need_core_num;
        params->params_batch_remaining = params->params_batch % params->need_core_num;
        params->indices_num_each_core = params->params_batch_each_core * params->indices_row;
        params->indices_num_remaining = params->params_batch_remaining * params->indices_row;
        if (params->indices_num_each_core * params->params_pre * params->params_row * compile_info->params_dsize >
            BLOCK_SIZE) {
            break;
        }
    }
}

void ParasPreProcess(GatherV2TilingParams *params, const GatherV2CompileInfo *compile_info,
    gert::TilingContext *context, int64_t axis, int64_t batch_dims, int64_t &indices_batch)
{
    HCCL_DEBUG("GatherV2 ParasPreProcess begin");
    auto &x_shape = gert::EnsureNotScalar(context->GetInputShape(0)->GetStorageShape());
    auto &indices_shape = gert::EnsureNotScalar(context->GetInputShape(1)->GetStorageShape());
    int64_t indices_dims = indices_shape.GetDimNum();

    // params shape convert to 4D:[params_batch, params_pre, params_axis, params_row]
    // indices shape convert to 1D:[indices_batch, indices_row]
    // output tensor, y shape convert to:[params_batch, params_pre, indices_row, params_row]
    for (int64_t i = 0; i < batch_dims; i++) {
        indices_batch = indices_batch * indices_shape.GetDim(i);
    }
    params->params_batch = indices_batch;
    for (int64_t i = batch_dims; i < indices_dims; i++) {
        params->indices_row = (params->indices_row) * indices_shape.GetDim(i);
    }

    if (axis == batch_dims) {
        params->params_pre = 1;
    } else {
        for (int64_t i = batch_dims; i < axis; i++) {
            params->params_pre = (params->params_pre) * x_shape.GetDim(i);
        }
    }
    params->params_axis = x_shape.GetDim(axis);
    int64_t params_dims = x_shape.GetDimNum();
    if (axis + 1 < params_dims) {
        for (int64_t i = axis + 1; i < params_dims; i++) {
            params->params_row = (params->params_row) * x_shape.GetDim(i);
        }
    } else {
        params->params_row = 1;
    }

    for (int64_t i = 0; i < indices_dims; i++) {
        params->indices_num = (params->indices_num) * indices_shape.GetDim(i);
    }
    HCCL_DEBUG("GatherV2 ParasPreProcess end");
}

bool WithBatchDimsSmall(GatherV2TilingParams *params, const GatherV2CompileInfo *compile_info,
    int64_t half_remain_params_elem, int64_t half_size_ub, int64_t params_total_ceil, int64_t params_row_ceil)
{
    HCCL_DEBUG("GatherV2 WithBatchDimsSmall begin");
    int64_t available_ub_size = compile_info->ub_size - 2 * 1024; // reserved 2K
    int64_t half_remain_size_ub = (available_ub_size - PARAMS_CACHED_UB) / HALF_UB;
    if ((params->indices_row) * (params->params_row) * (compile_info->params_dsize) <= BLOCK_SIZE) {
        if ((params->params_pre) * (params->indices_row) * (params->params_row) * (compile_info->params_dsize) <=
            NUM_32 * BLOCK_SIZE) {
            if ((params->indices_num_each_core) * (params->params_row) * (compile_info->params_dsize) <= BLOCK_SIZE) {
                CalNeedCoreWithBatchDims(params, compile_info);
            }
            params->params_total =
                (params->params_batch_each_core) * (params->params_pre) * (params->params_axis) * (params->params_row);
            if (!SmallRowProcess(params, compile_info, TILING_MODE_40, TILING_MODE_41, half_remain_size_ub,
                half_size_ub)) {
                return false;
            }
        } else {
            params->need_core_num =
                ((params->params_pre) < (compile_info->core_num)) ? (params->params_pre) : (compile_info->core_num);
            params->params_total =
                (params->params_batch) * (params->params_pre) * (params->params_axis) * (params->params_row);
            if (!SmallRowProcess(params, compile_info, TILING_MODE_38, TILING_MODE_39, half_remain_size_ub,
                half_size_ub)) {
                return false;
            }
        }
        return true;
    }
    if (params_total_ceil <= PARAMS_CACHED_UB / (compile_info->params_dsize) &&
        params_row_ceil <= half_remain_params_elem) {
        if (!IndicesCachedProcess(params, compile_info, half_remain_size_ub, TILING_MODE_20, TILING_MODE_21,
            TILING_MODE_22)) {
            return false;
        }
    } else {
        if (!IndicesCachedProcess(params, compile_info, half_size_ub, TILING_MODE_23, TILING_MODE_24,
            TILING_MODE_25)) {
            return false;
        }
    }
    HCCL_DEBUG("GatherV2 WithBatchDimsSmall end");
    return true;
}

bool WithBatchDimsSmallCeil(GatherV2TilingParams *params, const GatherV2CompileInfo *compile_info,
    int64_t half_size_ub, int64_t half_remain_size_ub, int64_t half_remain_params_elem, int64_t params_total_ceil,
    int64_t params_row_ceil)
{
    HCCL_DEBUG("GatherV2 WithBatchDimsSmallCeil begin");
    if ((params->params_row) * (compile_info->params_dsize) % BLOCK_SIZE != 0) {
        if (!IndicesCachedProcess(params, compile_info, half_size_ub, TILING_MODE_26, TILING_MODE_27,
            TILING_MODE_28)) {
            return false;
        }
    } else {
        if (params_total_ceil <= PARAMS_CACHED_UB / (compile_info->params_dsize) &&
            params_row_ceil <= half_remain_params_elem) {
            if (!IndicesCachedProcess(params, compile_info, half_remain_size_ub, TILING_MODE_29,
                TILING_MODE_30,
                TILING_MODE_31)) {
                return false;
            }
        } else {
            if (!IndicesCachedProcess(params, compile_info, half_size_ub, TILING_MODE_32, TILING_MODE_33,
                TILING_MODE_34)) {
                return false;
            }
        }
    }
    HCCL_DEBUG("GatherV2 WithBatchDimsSmallCeil end");
    return true;
}

bool WithBatchDimsBig(GatherV2TilingParams *params, const GatherV2CompileInfo *compile_info, int64_t params_row_ceil,
    int64_t half_size_ub, int64_t half_ub_params_elem, int64_t params_total_ceil, int64_t half_remain_params_elem)
{
    HCCL_DEBUG("GatherV2 WithBatchDimsBig begin");
    int64_t available_ub_size = compile_info->ub_size - 2 * 1024; // reserved 2K
    int64_t half_remain_size_ub = (available_ub_size - PARAMS_CACHED_UB) / HALF_UB;
    if (params_row_ceil <= half_ub_params_elem) {
        if (!WithBatchDimsSmallCeil(params, compile_info, half_size_ub, half_remain_size_ub, half_remain_params_elem,
            params_total_ceil, params_row_ceil)) {
            return false;
        }
    } else {
        if (!LargeRowProcess(params, compile_info, half_ub_params_elem, half_size_ub)) {
            return false;
        }
    }
    HCCL_DEBUG("GatherV2 WithBatchDimsBig end");
    return true;
}

bool TilingWithBatchDims(gert::TilingContext *context, GatherV2TilingParams *params,
    const GatherV2CompileInfo *compile_info, int64_t axis, int64_t batch_dims)
{
    HCCL_DEBUG("GatherV2 TilingWithBatchDims begin");
    int64_t available_ub_size = compile_info->ub_size - 2 * 1024; // reserved 2K
    int64_t half_size_ub = available_ub_size / 2;
    int64_t block_num = BLOCK_SIZE / compile_info->params_dsize;
    int64_t indices_batch = 1;
    int64_t half_remain_size_ub = 1;
    ParasPreProcess(params, compile_info, context, axis, batch_dims, indices_batch);

    half_remain_size_ub = (available_ub_size - PARAMS_CACHED_UB) / HALF_UB;
    int64_t half_remain_params_elem = half_remain_size_ub / (compile_info->params_dsize);
    int64_t half_ub_params_elem = half_size_ub / compile_info->params_dsize;
    params->need_core_num = (indices_batch < compile_info->core_num) ? indices_batch : compile_info->core_num;
    params->tail_process_core = 0;
    params->params_batch_each_core = (params->params_batch) / (params->need_core_num);
    params->params_batch_remaining = (params->params_batch) % (params->need_core_num);
    params->indices_num_each_core = (params->params_batch_each_core) * (params->indices_row);
    params->indices_num_remaining = (params->params_batch_remaining) * (params->indices_row);

    if ((params->indices_num_each_core) * (params->params_row) * (compile_info->params_dsize) <= BLOCK_SIZE) {
        params->need_core_num = 1;
        params->tail_process_core = 0;
        params->params_batch_each_core = params->params_batch;
        params->params_batch_remaining = 0;
        params->indices_num_each_core = (params->params_batch_each_core) * (params->indices_row);
        params->indices_num_remaining = (params->params_batch_remaining) * (params->indices_row);
    }
    params->params_total =
        (params->params_batch_each_core) * (params->params_pre) * (params->params_axis) * (params->params_row);
    int64_t params_total_ceil = ((params->params_total) + block_num - 1) / block_num * block_num;
    int64_t params_row_ceil = ((params->params_row) + block_num - 1) / block_num * block_num;

    if ((params->params_row) * (compile_info->params_dsize) < BLOCK_SIZE) {
        if (!WithBatchDimsSmall(params, compile_info, half_remain_params_elem, half_size_ub, params_total_ceil,
            params_row_ceil)) {
            HCCL_ERROR("GatherV2, WithBatchDimsSmall is false");
            return false;
        }
    } else {
        if (!WithBatchDimsBig(params, compile_info, params_row_ceil, half_size_ub, half_ub_params_elem,
            params_total_ceil, half_remain_params_elem)) {
            HCCL_ERROR("GatherV2, WithBatchDimsBig is false");
            return false;
        }
    }
    HCCL_DEBUG("GatherV2 TilingWithBatchDims end");
    return true;
}

void PrintGatherV2Params(GatherV2TilingParams *params, const std::string &op_type)
{
    HCCL_DEBUG(" GatherV2 tiling_mode=%ld.", params->tiling_mode);
    HCCL_DEBUG(" GatherV2 params_pre=%ld.", params->params_pre);
    HCCL_DEBUG(" GatherV2 params_axis=%ld.", params->params_axis);
    HCCL_DEBUG(" GatherV2 params_row=%ld.", params->params_row);
    HCCL_DEBUG(" GatherV2 indices_num=%ld.", params->indices_num);
    HCCL_DEBUG(" GatherV2 cache_params=%ld.", params->cache_params);
    HCCL_DEBUG(" GatherV2 need_core_num=%ld.", params->need_core_num);
    HCCL_DEBUG(" GatherV2 tail_process_core=%ld.", params->tail_process_core);
    HCCL_DEBUG(" GatherV2 indices_num_each_core=%ld.", params->indices_num_each_core);
    HCCL_DEBUG(" GatherV2 indices_num_remaining=%ld.", params->indices_num_remaining);
    HCCL_DEBUG(" GatherV2 indices_loop_num=%ld.", params->indices_loop_num);
    HCCL_DEBUG(" GatherV2 indices_row_num_once=%ld.", params->indices_row_num_once);
    HCCL_DEBUG(" GatherV2 indices_row_num_last=%ld.", params->indices_row_num_last);
    HCCL_DEBUG(" GatherV2 row_num_once_ub=%ld.", params->row_num_once_ub);
    HCCL_DEBUG(" GatherV2 row_num_once_tail_ub=%ld.", params->row_num_once_tail_ub);
    HCCL_DEBUG(" GatherV2 inner_loop_num=%ld.", params->inner_loop_num);
    HCCL_DEBUG(" GatherV2 row_num_last_ub=%ld.", params->row_num_last_ub);
    HCCL_DEBUG(" GatherV2 row_num_last_tail_ub=%ld.", params->row_num_last_tail_ub);
    HCCL_DEBUG(" GatherV2 inner_loop_num_last=%ld.", params->inner_loop_num_last);
    HCCL_DEBUG(" GatherV2 params_total=%ld.", params->params_total);
    HCCL_DEBUG(" GatherV2 one_row_loop=%ld.", params->one_row_loop);
    HCCL_DEBUG(" GatherV2 one_row_tail=%ld.", params->one_row_tail);
    HCCL_DEBUG(" GatherV2 params_pre_each_core=%ld.", params->params_pre_each_core);
    HCCL_DEBUG(" GatherV2 params_pre_remaining=%ld.", params->params_pre_remaining);
    HCCL_DEBUG(" GatherV2 indices_row=%ld.", params->indices_row);
    HCCL_DEBUG(" GatherV2 params_batch_each_core=%ld.", params->params_batch_each_core);
    HCCL_DEBUG(" GatherV2 params_batch_remaining=%ld.", params->params_batch_remaining);
    HCCL_DEBUG(" GatherV2 params_batch=%ld.", params->params_batch);
}

void InitGatherCompileParams(GatherV2TilingParams *params)
{
    params->params_pre = 1;
    params->params_axis = 1;
    params->params_row = 1;
    params->indices_num = 1;
    params->indices_row = 1;
    params->params_batch_each_core = 1;
    params->params_batch = 1;
}

std::string ToString(const gert::Shape &shape)
{
    int64_t x_dim = shape.GetDimNum();

    bool first = true;
    std::stringstream ss;
    ss << "[";
    for (int i = 0; i < x_dim; i++) {
        if (first) {
            first = false;
            ss << shape.GetDim(i);
        } else {
            ss << ", " << shape.GetDim(i);
        }
    }
    ss << "]";
    return ss.str();
}

bool GatherTIKTiling(gert::TilingContext *context, const GatherV2CompileInfo *compile_info)
{
    HCCL_DEBUG("GatherV2 GatherTIKTiling begin");
    auto indeces_tensor = context->GetInputDesc(1);
    if (indeces_tensor == nullptr) {
        HCCL_ERROR("Autotiling compile info is null");
        return false;
    }
    gert::Shape &x_shape =
        const_cast<gert::Shape &>(gert::EnsureNotScalar(context->GetInputShape(0)->GetStorageShape()));
    HCCL_DEBUG("%s: x_shape:%s", context->GetNodeName(), ToString(x_shape).c_str());

    gert::Shape &indies_shape =
        const_cast<gert::Shape &>(gert::EnsureNotScalar(context->GetInputShape(1)->GetStorageShape()));
    HCCL_DEBUG("%s: indies_shape:%s", context->GetNodeName(), ToString(indies_shape).c_str());

    int64_t x_dim = x_shape.GetDimNum();
    int64_t indices_dim = indies_shape.GetDimNum();
    int64_t axis = 0;
    if (compile_info->is_gather_v2) {
        HCCL_DEBUG("%s optype is gatherv2", context->GetNodeName());
    }

    int64_t batch_dims = 0;
    if (!(compile_info->is_gather_v2) && batch_dims != 0) {
        HCCL_DEBUG("%s optype is gather and batch_dims != 0", context->GetNodeName());
        axis = batch_dims;
    }
    HCCL_DEBUG("%s axis is %d, batch_dims is %d", context->GetNodeName(), axis, batch_dims);
    if (!CheckAndUpdateAxisAndBatchdims(axis, x_shape, indies_shape, batch_dims, x_dim, indices_dim)) {
        HCCL_ERROR("GatherV2, op GatherV2Tiling: [CheckAndUpdateAxisAndBatchdims] failed.");
        return false;
    }

    auto params = context->GetTilingData<GatherV2TilingParams>();
    if (params == nullptr) {
        HCCL_ERROR("params info is null");
        return false;
    }
    InitGatherCompileParams(params);
    
    if (batch_dims == 0) {
        if (!TilingWithoutBatchDims(context, compile_info, params, axis, x_dim, indices_dim)) {
            HCCL_ERROR("GatherV2, op GatherV2Tiling: [TilingWithoutBatchDims] failed.");
            return false;
        }
    } else {
        if (!TilingWithBatchDims(context, params, compile_info, axis, batch_dims)) {
            HCCL_ERROR("GatherV2, op GatherV2Tiling: [TilingWithBatchDims] failed.");
            return false;
        }
    }
    PrintGatherV2Params(params, "GatherV2");
    // block_dim, core num used in tik op
    context->SetBlockDim(params->need_core_num);
    HCCL_DEBUG("GatherV2 GatherTIKTiling end");
    return true;
}

ge::graphStatus GatherTiling(gert::TilingContext *context)
{
    HCCL_DEBUG("%s GatherTiling running begin", context->GetNodeName());
    auto compile_info = reinterpret_cast<const GatherV2CompileInfo *>(context->GetCompileInfo());
    if (compile_info == nullptr) {
        HCCL_ERROR("compile_info is null");
        return ge::GRAPH_FAILED;
    }
    if (compile_info->is_tik) {
        CHK_PRT_RET(!GatherTIKTiling(context, compile_info),
            HCCL_ERROR("%s call TIKTiling failed", context->GetNodeName()), ge::GRAPH_FAILED);
    }

    HCCL_DEBUG("%s GatherTiling running end", context->GetNodeName());
    return ge::GRAPH_SUCCESS;
}

ge::graphStatus TilingPrepareForGatherV2(gert::TilingParseContext *context)
{
    HCCL_DEBUG("%s TilingPrepareForGatherV2 running.", context->GetNodeName());
    auto compile_info = TbeReduce::GetCompileInfoPtr<GatherV2CompileInfo>(context);
    if (compile_info == nullptr) {
        HCCL_ERROR("compile_info is null");
        return ge::GRAPH_FAILED;
    }
    std::unique_ptr<nlohmann::json> parsed_object_cinfo = TbeReduce::GetCompileInfoJson(context);
    if (parsed_object_cinfo == nullptr) {
        HCCL_ERROR("parsed_object_cinfo is null");
        return ge::GRAPH_FAILED;
    }
    if (ReadCompileItem(*parsed_object_cinfo, "is_tik", compile_info->is_tik)) {
        const nlohmann::json &all_vars = (*parsed_object_cinfo)["vars"];
        HCCL_DEBUG("%s all_vars:%s", context->GetNodeName(), all_vars.dump().c_str());
        CHK_PRT_RET(!ReadCompileItem(all_vars, "core_num", compile_info->core_num),
            HCCL_ERROR("%s TilingPrepareForGatherV2, get core_num error", context->GetNodeName()), ge::GRAPH_FAILED);
        CHK_PRT_RET(compile_info->core_num < 1,
            HCCL_ERROR("%s GatherParseFunc, core_num should be greater than 0", context->GetNodeName()),
            ge::GRAPH_FAILED);
        CHK_PRT_RET(!ReadCompileItem(all_vars, "ub_size", compile_info->ub_size),
            HCCL_ERROR("%s TilingPrepareForGatherV2, get ub_size error", context->GetNodeName()), ge::GRAPH_FAILED);
        CHK_PRT_RET(!ReadCompileItem(all_vars, "l1_size", compile_info->l1_size),
            HCCL_ERROR("%s TilingPrepareForGatherV2, get l1_size error", context->GetNodeName()), ge::GRAPH_FAILED);
        CHK_PRT_RET(!ReadCompileItem(all_vars, "params_dsize", compile_info->params_dsize),
            HCCL_ERROR("%s TilingPrepareForGatherV2, get params_dsize error", context->GetNodeName()),
            ge::GRAPH_FAILED);
        CHK_PRT_RET(!ReadCompileItem(all_vars, "indices_dsize", compile_info->indices_dsize),
            HCCL_ERROR("%s TilingPrepareForGatherV2, get indices_dsize error", context->GetNodeName()),
            ge::GRAPH_FAILED);
        CHK_PRT_RET(!ReadCompileItem(all_vars, "is_preprocessed", compile_info->is_preprocessed),
            HCCL_ERROR("%s TilingPrepareForGatherV2, get is_preprocessed error", context->GetNodeName()),
            ge::GRAPH_FAILED);
        ReadCompileItem(all_vars, "impl_mode", compile_info->impl_mode, 0);
        CHK_PRT_RET(!ReadCompileItem(*parsed_object_cinfo, "is_gather_v2", compile_info->is_gather_v2),
            HCCL_ERROR("%s TilingPrepareForGatherV2, get is_gather_v2 error", context->GetNodeName()),
            ge::GRAPH_FAILED);
    } else {
        HCCL_DEBUG("%s will use gather AotoTiling", context->GetNodeName());
        compile_info->dsl_compile_info = TbeReduce::ParseAutoTiling(context, *parsed_object_cinfo);
        CHK_PRT_RET(compile_info->dsl_compile_info == nullptr,
            HCCL_ERROR("%s CreateGatherTilingHandler failed", context->GetNodeName()), ge::GRAPH_FAILED);

        compile_info->is_tik = false;
    }
    HCCL_DEBUG("%s TilingPrepareForGatherV2 GRAPH_SUCCESS.", context->GetNodeName());
    return ge::GRAPH_SUCCESS;
}

// register tiling interface of the GatherV2 and Gather op.
} // namespace optilingGather
