#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Huawei Technologies Co., Ltd. 2022. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================

import json
import contextlib
import os
import math
import typing
import re
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.platform import tf_logging as logging
from tensorflow.core.framework import attr_value_pb2
from npu_bridge.npu_cpu.npu_cpu_ops import gen_npu_cpu_ops
from npu_bridge.hccl.hccl_ops import allgather
from hccl.manage.api import create_group
from hccl.manage.api import set_ps_table_num
from npu_bridge.embedding.embedding_resource import NpuEmbeddingResource
from npu_bridge.embedding import embedding_optimizer
from npu_bridge.embedding.embedding_table_map_policy import NoneTableMapPolicy, AutoMergeTableMapPolicy
from npu_bridge.embedding.embedding_utils import EmbeddingVariableOption, CounterFilter, PaddingParamsOption, \
    CompletionKeyOption, check_common_init_params, check_each_initializer, check_init_params_type
from npu_bridge.embedding.embedding_utils import EvictOption

_INT32_MAX_VALUE = 2147483647
_SAVE_EVICT_COMM_GROUP = "_set_evict_group_0"


@contextlib.contextmanager
def specified_ps_engine_scope():
    """
    Enable the non npu compilation of operators within the scope.
    """
    attrs = {
        "_process_node_engine_id": attr_value_pb2.AttrValue(s=tf.compat.as_bytes("PS"))
    }
    with ops.get_default_graph()._attr_scope(attrs):
        yield


class EsInitializer:
    """Initializer for embedding service table."""

    def __init__(self, initializer_mode, min=-0.01, max=0.01, constant_value=1.0, mu=0.0, sigma=1.0, seed=0):
        self.initializer_mode = initializer_mode
        self.min = min
        self.max = max
        self.constant_value = constant_value
        self.mu = mu
        self.sigma = sigma
        self.seed = seed


# 提供 embedding_service table initializer method
# min 下限值, float 类型
# max 上限值, float 类型
# initializer_mode 初始化方式, string 类型
# constant_value 常量初始化的常量值, float 类型
# mu 正态分布的均值, float 类型
# sigma 正态分布的标准差, float 类型
def es_initializer(initializer_mode, min=-2.0, max=2.0, constant_value=0.0, mu=0.0, sigma=1.0, seed=0):
    """Operator for init initializer."""
    if initializer_mode is None:
        raise ValueError("initializer_mode can not be None.")
    if initializer_mode == 'random_uniform':
        if (min is None) or (max is None) or \
                (not isinstance(min, (float, int))) or (not isinstance(max, (float, int))):
            raise ValueError("If initializer is random_uniform, min and max can not be None, must be int or float.")
    if initializer_mode == 'truncated_normal':
        if (min is None) or (max is None) or (mu is None) or (sigma is None):
            raise ValueError("If initializer is truncated_normal, min, max, mu and sigma can not be None")
        if (not isinstance(min, (float, int))) or (not isinstance(max, (float, int))) or \
                (not isinstance(mu, (float, int))) or (not isinstance(sigma, (float, int))):
            raise ValueError("If initializer is truncated_normal, min, max, mu and sigma must be int or float.")
    if initializer_mode == 'constant':
        if (constant_value is None) or (not isinstance(constant_value, (float, int))):
            raise ValueError("If initializer is constant, constant_value can not be None, must be float or int.")
    if min > max:
        raise ValueError("Initializer min value can not be larger than max value.")
    if (initializer_mode != 'constant') and (initializer_mode != 'random_uniform') and \
            (initializer_mode != 'truncated_normal'):
        raise ValueError("Initializer mode must be random_uniform or truncated normal or constant.")
    return EsInitializer(initializer_mode=initializer_mode,
                         min=min,
                         max=max,
                         constant_value=constant_value,
                         mu=mu,
                         sigma=sigma,
                         seed=seed)


class ESWorker:
    """ Embedding service class. """

    def __init__(self):
        self._server_ip_to_ps_num = {}
        self._update_config_params()

        self._init_embedding_hash_maps = {}
        self._init_partition_maps = {}
        # storage each ps table's params
        self._table_to_embedding_dim = {}
        self._table_to_max_num = {}
        self._table_to_optimizer = {}
        self._table_to_initializer = {}
        self._table_to_slot_var_num = {}
        self._table_to_counter_filter = {}
        self._use_counter_filter = False
        self._use_padding_key = False
        self._use_evict = False
        self._train_mode = True
        self._train_level = False
        self._optimizer = None
        self._init_table_flag = False

        self._small_table_name_list = []
        self._small_table_variable_list = []
        self._small_table_variable_dim_list = []
        self._ps_table_count = 0
        self._table_name_to_id = {}
        self._table_id_to_name = {}
        self._table_id_to_initializer = {}
        self._table_id_to_steps_to_live = {}

        self._ps_table_id_list = []
        self._existing_lookup_table_ids = []
        # storage lookup: table_id list, lookup result list, lookup key list
        self._ps_lookup_index = 0
        self._ps_table_has_lookup = []
        self._ps_table_lookup_key = []
        self._ps_table_lookup_result = []
        # host unique
        self.key_recovery_matrix = []
        self.use_host_unique = False
        # storage all inited table names
        self._table_name_has_init = []
        # only storage all inited PS table names
        self._ps_table_name_list = []
        # feature_mapping export and import
        self._feature_mapping_name_list = []
        # now only use for adagrad accum
        self._ps_table_id_to_optimizer_mode = {}
        self._ps_table_id_to_optimizer_params = {}

        # use for small table merge
        self.user_defined_table_infos = []
        self.table_map_policy = None
        self.table_create_infos = []
        self.total_variable_table = []
        self._small_table_embedding_dim = -1
        # if all small table do not merge
        self._small_table_to_variable = {}
        self._small_table_to_multihot_lens = {}
        self._small_table_name_to_multihot_lens = {}
        self._small_table_name_to_max_vocabulary_size = {}
        self.total_embedding_count = 0
        self._npu_table_to_embedding_dim = {}
        self._need_table_merge = False
        self._only_merge_to_one_table = True
        self._small_table_init = False
        # use for counter filter
        self._table_use_counter_filter = {}

        self._default_key_or_value = 1
        self._filter_freq = None
        self._default_key = None
        self._default_value = None

        # use for evict option
        self._steps_to_live = 0
        self._use_completion_key = False
        self._table_id_to_completion_option = {}
        self._user_group_set = set()

    # 提供 embedding_service table initializer method
    # table_id embedding 表索引, int 类型
    # min 下限值, float 类型
    # max 上限值, float 类型
    # initializer_mode 初始化方式, string 类型
    # constant_value 常量初始化的常量值, float 类型
    # mu 正态分布的均值, float 类型
    # sigma 正态分布的标准差, float 类型
    def initializer(self, table_id, initializer_mode, min=-2.0, max=2.0, constant_value=0.0, mu=0.0, sigma=1.0):
        """Operator for init initializer."""
        if (table_id is None) or (initializer_mode is None):
            raise ValueError("table_id and initializer_mode can not be None.")
        check_each_initializer(initializer_mode=initializer_mode, min_value=min, max_value=max,
                               constant_value=constant_value, mu=mu, sigma=sigma)
        if (not isinstance(table_id, int)) or (table_id < 0) or (table_id >= _INT32_MAX_VALUE):
            raise ValueError("table_id value is false, must be [0, 2147483647) and int type, please check.")
        if min > max:
            raise ValueError("Initializer min value can not be larger than max value.")
        if (initializer_mode != 'constant') and (initializer_mode != 'random_uniform') and \
                (initializer_mode != 'truncated_normal'):
            raise ValueError("Initializer mode must be random_uniform or truncated normal or constant.")
        self._table_id_to_initializer[table_id] = EsInitializer(min=min,
                                                                max=max,
                                                                initializer_mode=initializer_mode,
                                                                constant_value=constant_value,
                                                                mu=mu,
                                                                sigma=sigma)

    # embedding variable option
    # 包括特征准入及淘汰策略，特征存储策略及通信策略等
    # 暂时只使用特征准入option
    def embedding_variable_option(self, filter_option=None, padding_option=None, evict_option=None,
                                  completion_option=None, storage_option=None, feature_freezing_option=None,
                                  communication_option=None):
        if (filter_option is not None) and (not isinstance(filter_option, CounterFilter)):
            raise ValueError("If padding_option isn't None, it must be CounterFilter type.")
        if filter_option is not None:
            self._use_counter_filter = True
        if (padding_option is not None) and (not isinstance(padding_option, PaddingParamsOption)):
            raise TypeError("If padding_option isn't None, it must be EmbeddingPaddingParamsOption type.")
        if (completion_option is not None) and (not isinstance(completion_option, CompletionKeyOption)):
            raise TypeError("If completion_option isn't None, it must be EmbeddingPaddingCompletionKeyOption type.")
        if (evict_option is not None) and (not isinstance(evict_option, EvictOption)):
            raise TypeError("When evict_option is not None, it must be EvictOption type.")
        return EmbeddingVariableOption(filter_option=filter_option, padding_option=padding_option,
                                       evict_option=evict_option, completion_option=completion_option,
                                       storage_option=storage_option, feature_freezing_option=feature_freezing_option,
                                       communication_option=communication_option)

    # new version
    # 提供embedding init功能
    # @param vocabulary_size 表的初始大小, int 类型
    # @param table_id, int32 类型
    # @param max_batch_size, int32 类型
    # @param optimizer, 支持EmbeddingAdamOptimizer，EmbeddingAdagradOptimizer，EmbeddingAdamwOptimizer
    # @param initializer, string 类型
    # @param embedding_dim, int32 类型
    def get_embedding_variable(self, name, init_vocabulary_size, embedding_dim, key_dtype=tf.int64,
                               value_dtype=tf.float32, partitioner=None,
                               initializer=tf.random_uniform_initializer(minval=-0.01, maxval=0.01, seed=1234),
                               embedding_type="PS", ev_option=None, max_feature_count=None, multihot_lens=None,
                               optimizer=None, allow_merge=False, mask_zero=False):
        """ Operator for get embedding variable according to embedding type. """
        check_common_init_params(name=name, init_vocabulary_size=init_vocabulary_size, embedding_dim=embedding_dim,
                                 embedding_type=embedding_type, mask_zero=mask_zero)
        if embedding_type == "data_parallel":
            self._check_and_update_small_init_params(name=name, init_vocabulary_size=init_vocabulary_size,
                                                     embedding_dim=embedding_dim, multihot_lens=multihot_lens,
                                                     key_dtype=key_dtype, value_dtype=value_dtype,
                                                     allow_merge=allow_merge, initializer=initializer)
            new_small_table_info = dict(
                name=name,
                max_vocabulary_size=init_vocabulary_size,
                embedding_dim=embedding_dim,
                multihot_lens=multihot_lens,
                allow_merge=allow_merge,
                initializer=initializer)
            self._small_table_embedding_dim = embedding_dim
            self.user_defined_table_infos.append(new_small_table_info)
            return new_small_table_info
        elif embedding_type == "PS":
            table_id = self._check_and_update_ps_init_params(name=name, init_vocabulary_size=init_vocabulary_size,
                                                             embedding_dim=embedding_dim,
                                                             max_feature_count=max_feature_count, ev_option=ev_option)
            self._ps_lookup_index = self._ps_table_count
            self._table_to_embedding_dim[table_id] = embedding_dim
            self._table_to_max_num[table_id] = max_feature_count
            # storage the table id for embedding PS table
            self._ps_table_id_list.append(table_id)
            self._ps_table_name_list.append(name)
            if len(self._ps_table_id_list) > 1024:
                raise ValueError("Now only 1024 PS embedding tables can be init.")
            bucket_size = math.ceil(init_vocabulary_size / self._ps_num)
            if optimizer is None:
                self._train_mode = False
                self._table_to_slot_var_num[table_id] = 0
            else:
                self._check_ps_opt_and_initializer(optimizer=optimizer, initializer=initializer, table_id=table_id)
                self._set_ps_optimizer_params(table_id=table_id, optimizer=optimizer, embedding_dim=embedding_dim,
                                              max_feature_count=max_feature_count, mask_zero=mask_zero,
                                              ev_option=ev_option)
                # new train or continue train from a checkpoint
                if initializer is not None:
                    self._train_level = True
            with specified_ps_engine_scope():
                self._init_partition_maps[table_id] = \
                    gen_npu_cpu_ops.init_partition_map(ps_num=ops.convert_to_tensor(self._ps_num),
                                                       ps_ids=ops.convert_to_tensor(self._ps_ids),
                                                       partition_num=65537)
                self._init_partition_maps.get(table_id)._set_attr("_embedding_dim",
                                                                  attr_value_pb2.AttrValue(i=embedding_dim))
                self._init_partition_maps.get(table_id)._set_attr("_max_key_num",
                                                                  attr_value_pb2.AttrValue(i=max_feature_count))
                return self._init_hashmap_and_table_import(bucket_size, table_id, embedding_dim, ev_option)

    # new version
    # 提供embedding lookup功能
    # @param name str 类型
    # @param ids int64 类型
    # @return values float32 类型
    def embedding_lookup(self, name: str, ids: typing.Any, actual_keys_input=None, unique_indices=None, key_count=None):
        """ Operator for look up in embedding table. """
        table_id = self._check_ps_lookup_params(name=name, ids=ids)
        if self._table_to_counter_filter.get(table_id) is not None:
            filter_mode = "counter"
            self._filter_freq = self._table_to_counter_filter.get(table_id).filter_freq
            self._default_key_or_value = 1 if self._table_to_counter_filter.get(table_id). \
                                                  default_key_or_value is True else 0
            self._default_key = self._table_to_counter_filter.get(table_id).default_key
            self._default_value = self._table_to_counter_filter.get(table_id).default_value
        else:
            filter_mode = "no_filter"
            # useless
            self._filter_freq = 1
            self._default_key_or_value = 1
            self._default_key = 0
            self._default_value = -1
        # whether to use host unique to improve performance
        self.use_host_unique = False
        use_counter_filter = False
        if (actual_keys_input is not None) and (unique_indices is not None):
            self.use_host_unique = True
            if key_count is not None:
                use_counter_filter = True

        result = self._call_lookup_op(table_id=table_id, ids=ids, actual_keys_input=actual_keys_input,
                                      unique_indices=unique_indices, filter_mode=filter_mode,
                                      use_counter_filter=use_counter_filter, key_count=key_count)

        self._filter_freq = None
        self._default_key_or_value = 1
        self._default_key = None
        self._default_value = None
        if (self._ps_lookup_index != 0) or (self._existing_lookup_table_ids.count(table_id) != 0):
            self._ps_table_has_lookup.append(table_id)
            self._ps_table_lookup_key.append(ids)
            self._ps_table_lookup_result.append(result)
            self._ps_lookup_index = self._ps_lookup_index - 1
            if self.use_host_unique:
                self.key_recovery_matrix.append(unique_indices)
        # restore table id that has called lookup, if this table call lookup again, key and values must be stored.
        self._existing_lookup_table_ids.append(table_id)
        return result

    # new version
    # 提供embedding update功能
    # @param loss 类型
    def embedding_update(self, loss):
        """ Operator for update in embedding table. """
        params = self._ps_table_lookup_result
        input_ids_list = self._ps_table_lookup_key
        table_ids = self._ps_table_has_lookup
        self._check_update_params(params, input_ids_list, table_ids, loss)
        # Call HCCL python API
        set_ps_table_num(self._ps_table_count)
        if (not isinstance(params, (list, tuple)) and not isinstance(table_ids, (list, tuple))
                and not isinstance(input_ids_list, (list, tuple))):
            params = [params]
            table_ids = [table_ids]
            input_ids_list = [input_ids_list]
        for table_id in table_ids:
            if table_id not in self._ps_table_id_list:
                raise ValueError("this table has not yet initialized.")
        if (len(params) != len(table_ids)) or (len(params) != len(input_ids_list)) \
                or (len(table_ids) != len(input_ids_list)):
            raise ValueError("The length of params, table_ids, input_ids_list should be equal.")
        embedding_grads = tf.gradients(loss, params)
        update_op = []
        self._ps_table_lookup_result = []
        self._ps_table_lookup_key = []
        self._ps_table_has_lookup = []

        if self.use_host_unique:
            key_before_unique = []
            for i in range(len(table_ids)):
                key_before_unique.append(tf.gather(input_ids_list[i], self.key_recovery_matrix[i]))
        with specified_ps_engine_scope():
            for i in range(len(table_ids)):
                if embedding_grads[i] is None:
                    continue
                if self.use_host_unique:
                    params_grads = [tf.IndexedSlices(embedding_grads[i], key_before_unique[i],
                                                     dense_shape=params[i].shape)]
                else:
                    params_grads = [tf.IndexedSlices(embedding_grads[i], input_ids_list[i],
                                                     dense_shape=params[i].shape)]
                var_refs = [NpuEmbeddingResource(table_ids[i])]
                update_op.append(
                    self._table_to_optimizer.get(table_ids[i]).apply_gradients(list(zip(params_grads, var_refs))))
            return update_op

    def padding_param(self, padding_key, mask=True):
        if not isinstance(padding_key, int):
            raise TypeError("padding_key must be int, please check.")
        if not isinstance(mask, bool):
            raise TypeError("mask must be bool, please check.")
        self._use_padding_key = True
        return PaddingParamsOption(padding_key=padding_key, mask=mask)

    def completion_key(self, completion_key, mask=True):
        if not isinstance(completion_key, int):
            raise TypeError("completion_key must be int, please check.")
        if not isinstance(mask, bool):
            raise TypeError("mask must be bool, please check.")
        self._use_completion_key = True
        completion_key_mask = 1 if mask is True else 0
        return CompletionKeyOption(completion_key=completion_key, mask=completion_key_mask)

    def counter_filter(self, filter_freq, default_key=None, default_value=None):
        if not isinstance(filter_freq, int):
            raise TypeError("filter_freq must be int, please check.")
        if filter_freq < 0:
            raise ValueError("filter_freq must can not be smaller than 0.")
        if (default_key is None) and (default_value is None):
            raise ValueError("default_key and default_value can not be both None.")
        if (default_key is not None) and (default_value is not None):
            raise ValueError("default_key and default_value can not be both set.")
        if default_key is None and (not isinstance(default_value, (int, float))):
            raise TypeError("When default_value is not None, it must be float or int, please check.")
        if default_value is None and (not isinstance(default_key, int)):
            raise TypeError("When default_key is not None, it must be int, please check.")
        self._use_counter_filter = True
        if default_key is None:
            return CounterFilter(filter_freq=filter_freq, default_key_or_value=False,
                                 default_key=0, default_value=default_value)
        else:
            return CounterFilter(filter_freq=filter_freq, default_key_or_value=True,
                                 default_key=default_key, default_value=1)

    def evict_option(self, steps_to_live):
        if not isinstance(steps_to_live, int):
            raise TypeError("steps_to_live must be int, please check.")
        if steps_to_live <= 0:
            raise ValueError("steps_to_live must must be greater than 0.")
        self._use_evict = True
        return EvictOption(steps_to_live=steps_to_live)

    def init_table(self, table_map_policy=AutoMergeTableMapPolicy()):
        if len(self.user_defined_table_infos) == 0:
            raise ValueError("small table has not been created.")
        self.total_embedding_count = 0
        # Only in train scene, and multi-device, with this API be first called
        # FIX cbg bug
        if (os.environ.get("RANK_SIZE") is not None) and (int(os.environ.get("RANK_SIZE")) > 1) and \
            (self._small_table_init is False):
            rank_size = int(os.environ.get("RANK_SIZE"))
            rank_list = []
            for i in range(rank_size):
                rank_list.append(i)
            create_group("user_group_fm", rank_size, rank_list)
        self._create_variable_for_small_table(table_map_policy)

    # new version
    def embeddings_lookup(self, ids_list, name=None):
        if ids_list is None:
            raise ValueError("ids_list can not be None.")
        env_dist = os.environ
        rank_size = int(env_dist.get("RANK_SIZE"))
        rank_id = int(env_dist.get("RANK_ID"))
        if rank_size < 1:
            raise ValueError('Rank size from env must be at least 1, 'f' Received: {rank_size}.')
        if rank_id < 0 or rank_id >= rank_size:
            raise ValueError('Rank id from env must be at least 0, and smaller than Rank Size.'
                             'But Rank id 'f' Received: {rank_id}.')

        ids_list_shape_list = ids_list.get_shape().as_list()
        if not self._need_table_merge:
            return self._small_table_lookup_v1(name, rank_id, rank_size, ids_list)

        if self.total_embedding_count != len(self.table_create_infos) or self.total_embedding_count == 0:
            raise ValueError("Must init_table() first!")
        (in_slot_size_group, slot_to_table, table_to_input_group, \
         table_to_slot, table_to_output_slots, in_slot_vocabulary_size_group, table_to_vocabulary_slots) = \
            (self.table_map_policy.in_slot_size_group, self.table_map_policy.slot_to_table, \
             self.table_map_policy.table_to_input_groups, self.table_map_policy.table_to_slot, \
             self.table_map_policy.table_to_output_slots, self.table_map_policy.in_slot_vocabulary_size_group, \
             self.table_map_policy.table_to_vocabulary_slots)

        total_in_slot_num = 0
        for in_slot_size in in_slot_size_group:
            total_in_slot_num += in_slot_size
        if ids_list_shape_list[1] != total_in_slot_num:
            raise ValueError("size of ids_list is not the same as all small tables.")

        if self.total_embedding_count == 1:
            return self._small_table_lookup_v2(rank_id, rank_size, in_slot_size_group,
                                               ids_list, table_to_output_slots, table_to_slot,
                                               in_slot_vocabulary_size_group, ids_list_shape_list[0])

        return self._small_table_lookup_v3(rank_id, rank_size, ids_list, in_slot_size_group, slot_to_table,
                                           table_to_input_group, table_to_output_slots, table_to_slot,
                                           table_to_vocabulary_slots, ids_list_shape_list[0])

    def save_embedding(self, name: str, path: str, step=None):
        """ Operator for save values in table_id embedding table. """
        self._check_save_or_restore_params(name=name, path=path)
        env_dist = os.environ
        table_id = self._table_name_to_id.get(name)
        step = tf.cast(step, dtype=tf.int64)
        if int(os.environ.get("RANK_ID")) != 0:
            step = -1
        self._create_comm_group_for_allgather()
        with specified_ps_engine_scope():
            file_path_tensor = ops.convert_to_tensor(path, name="file_path")
            ps_id_tensor = ops.convert_to_tensor(-1, name="ps_id")
            table_id_tensor = ops.convert_to_tensor([table_id], name="table_id")
            step_to_live = self._table_id_to_steps_to_live.get(table_id, 0)
            embedding_table_export = \
                gen_npu_cpu_ops.embedding_table_export(file_path=file_path_tensor,
                                                       ps_id=ps_id_tensor,
                                                       table_id=table_id_tensor,
                                                       embedding_dim=[self._table_to_embedding_dim.get(table_id)],
                                                       value_total_len=[self._table_to_embedding_dim.get(table_id)],
                                                       export_mode="all",
                                                       only_var_flag=True,
                                                       file_type="bin",
                                                       table_name=[name],
                                                       global_step=step,
                                                       steps_to_live_list=[step_to_live])
            return tf.group([embedding_table_export])

    def save_embeddings(self, path: str, step=None):
        """ Operator for save values in all embedding tables. """
        if len(self._ps_table_name_list) != 0:
            self._check_save_or_restore_params_v2(path=path, save_flag=True)
        feature_mapping_export_list = None
        step = tf.cast(step, dtype=tf.int64)
        if len(self._small_table_variable_list) != 0:
            feature_mapping_export_list = self._call_feature_mapping_export_op(path, True, step)
            if self._ps_table_count == 0:
                return feature_mapping_export_list
        if int(os.environ.get("RANK_ID")) != 0:
            step = -1
        self._create_comm_group_for_allgather()
        return self._call_embeddings_export_op(path=path, feature_mapping_export_list=feature_mapping_export_list,
                                               step=step)

    def restore_embedding(self, name: str, path: str, step=None):
        self._check_save_or_restore_params(name=name, path=path)
        table_id = self._table_name_to_id.get(name)
        if (step is None) or ((os.environ.get("RANK_ID") is not None) and (int(os.environ.get("RANK_ID")) != 0)):
            step = -1
        else:
            step = tf.cast(step, dtype=tf.int64)
        self._create_comm_group_for_allgather()
        with specified_ps_engine_scope():
            embedding_table_import = \
                gen_npu_cpu_ops.embedding_table_import(ps_id=ops.convert_to_tensor(-1),
                                                       file_path=ops.convert_to_tensor(path),
                                                       table_id=ops.convert_to_tensor([table_id]),
                                                       global_step=step,
                                                       embedding_dim=[self._table_to_embedding_dim.get(table_id)],
                                                       value_total_len=[self._table_to_embedding_dim.get(table_id)],
                                                       only_var_flag=True,
                                                       file_type="bin",
                                                       table_name=[name])
            return tf.group([embedding_table_import])

    def restore_embeddings(self, path: str, step=None):
        if len(self._ps_table_name_list) != 0:
            self._check_save_or_restore_params_v2(path=path, save_flag=False)
        if step is None:
            step = -1
        else:
            step = tf.cast(step, dtype=tf.int64)
        if len(self._small_table_variable_list) != 0:
            feature_mapping_import_list = self._call_feature_mapping_import_op(path=path, import_value=True, step=step)
            if self._ps_table_count == 0:
                return feature_mapping_import_list
        if (os.environ.get("RANK_ID") is not None) and (int(os.environ.get("RANK_ID")) != 0):
            step = -1
        self._create_comm_group_for_allgather()
        with specified_ps_engine_scope():
            table_id_list = []
            embedding_dim_list = []
            for table_id in self._ps_table_id_list:
                table_id_list.append(table_id)
                embedding_dim_list.append(self._table_to_embedding_dim.get(table_id))
            embedding_table_import = \
                gen_npu_cpu_ops.embedding_table_import(ps_id=ops.convert_to_tensor(-1),
                                                       file_path=ops.convert_to_tensor(path),
                                                       table_id=ops.convert_to_tensor(table_id_list),
                                                       global_step=step,
                                                       embedding_dim=embedding_dim_list,
                                                       value_total_len=embedding_dim_list,
                                                       only_var_flag=True,
                                                       file_type="bin",
                                                       table_name=self._ps_table_name_list)
            if len(self._small_table_variable_list) == 0:
                return tf.group([embedding_table_import])
        return embedding_table_import, feature_mapping_import_list

    def save_checkpoint(self, name: str, path: str, save_filtered_features=False, step=None):
        """ Operator for save values and optimizer params in table_id embedding table. """
        self._check_save_or_restore_params(name=name, path=path)
        if not isinstance(save_filtered_features, bool):
            raise TypeError("save_filtered_features must be bool.")
        step = tf.cast(step, dtype=tf.int64)
        if int(os.environ.get("RANK_ID")) != 0:
            step = -1
        self._create_comm_group_for_allgather()
        table_id = self._table_name_to_id.get(name)
        with specified_ps_engine_scope():
            file_path_tensor = ops.convert_to_tensor(path, name="file_path")
            ps_id_tensor = ops.convert_to_tensor(-1, name="ps_id")
            table_id_tensor = ops.convert_to_tensor([table_id], name="table_id")
            step_to_live = self._table_id_to_steps_to_live.get(table_id, 0)
            embedding_table_export = \
                gen_npu_cpu_ops.embedding_table_export(file_path=file_path_tensor,
                                                       ps_id=ps_id_tensor,
                                                       table_id=table_id_tensor,
                                                       embedding_dim=[self._table_to_embedding_dim.get(table_id)],
                                                       value_total_len=[self._table_to_embedding_dim.get(table_id) *
                                                                        (self._table_to_slot_var_num.get(
                                                                            table_id) + 1) + 2],
                                                       export_mode="all",
                                                       only_var_flag=False,
                                                       file_type="bin",
                                                       table_name=[name],
                                                       filter_export_flag=save_filtered_features,
                                                       global_step=step,
                                                       steps_to_live_list=[step_to_live])
            with tf.control_dependencies([embedding_table_export]):
                embedding_compute_var_export = \
                    gen_npu_cpu_ops.embedding_compute_var_export(file_path=file_path_tensor,
                                                                 ps_id=ps_id_tensor,
                                                                 table_id=table_id_tensor,
                                                                 global_step=step,
                                                                 table_name=[name])
                return tf.group([embedding_compute_var_export])

    def save_checkpoints(self, path: str, save_filtered_features=False, export_feature_mapping=False, step=None):
        """ Operator for save values and optimizer params in all embedding tables. """
        if len(self._ps_table_name_list) != 0:
            self._check_save_or_restore_params_v2(path=path, save_flag=True)
        if not isinstance(save_filtered_features, bool):
            raise TypeError("save_filtered_features must be bool.")
        feature_mapping_export_list = None
        step = tf.cast(step, dtype=tf.int64)
        if export_feature_mapping or len(self._small_table_variable_list) != 0:
            feature_mapping_export_list = self._call_feature_mapping_export_op(path, False, step)
            if self._ps_table_count == 0:
                return feature_mapping_export_list
        if int(os.environ.get("RANK_ID")) != 0:
            step = -1
        self._create_comm_group_for_allgather()
        return self._call_ckpts_export_op(path=path, feature_mapping_export_list=feature_mapping_export_list,
                                          save_filtered_features=save_filtered_features, step=step)

    def restore_checkpoint(self, name: str, path: str, step=None):
        """ Operator for restore values and optimizer params in table_id embedding table. """
        self._check_save_or_restore_params(name=name, path=path)
        if (step is None) or ((os.environ.get("RANK_ID") is not None) and (int(os.environ.get("RANK_ID")) != 0)):
            step = -1
        else:
            step = tf.cast(step, dtype=tf.int64)
        self._create_comm_group_for_allgather()
        table_id = self._table_name_to_id.get(name)
        with specified_ps_engine_scope():
            file_path_tensor = ops.convert_to_tensor(path, name="file_path")
            ps_id_tensor = ops.convert_to_tensor(-1, name="ps_id")
            table_id_tensor = ops.convert_to_tensor([table_id], name="table_id")
            embedding_table_import = \
                gen_npu_cpu_ops.embedding_table_import(ps_id=ps_id_tensor,
                                                       file_path=file_path_tensor,
                                                       table_id=table_id_tensor,
                                                       global_step=step,
                                                       embedding_dim=[self._table_to_embedding_dim.get(table_id)],
                                                       value_total_len=[self._table_to_embedding_dim.get(table_id) *
                                                                        (self._table_to_slot_var_num.get(
                                                                            table_id) + 1) + 2],
                                                       only_var_flag=False,
                                                       file_type="bin",
                                                       table_name=[name])
            with tf.control_dependencies([embedding_table_import]):
                embedding_compute_var_import = \
                    gen_npu_cpu_ops.embedding_compute_var_import(file_path=file_path_tensor,
                                                                 ps_id=ps_id_tensor,
                                                                 table_id=table_id_tensor,
                                                                 global_step=step,
                                                                 table_name=[name])
                return tf.group([embedding_compute_var_import])

    def restore_checkpoints(self, path: str, import_feature_mapping=False, step=None):
        """ Operator for restore values and optimizer params in all embedding tables. """
        if len(self._ps_table_name_list) != 0:
            self._check_save_or_restore_params_v2(path=path, save_flag=False)
        if step is None:
            step = -1
        else:
            step = tf.cast(step, dtype=tf.int64)
        if import_feature_mapping or len(self._small_table_variable_list) != 0:
            feature_mapping_import_list = self._call_feature_mapping_import_op(path=path, import_value=False,
                                                                               step=step)
            if self._ps_table_count == 0:
                return feature_mapping_import_list
        if (os.environ.get("RANK_ID") is not None) and (int(os.environ.get("RANK_ID")) != 0):
            step = -1
        self._create_comm_group_for_allgather()
        with specified_ps_engine_scope():
            table_id_list = []
            embedding_dim_list = []
            value_total_len_list = []
            for table_id in self._ps_table_id_list:
                table_id_list.append(table_id)
                embedding_dim_list.append(self._table_to_embedding_dim.get(table_id))
                value_total_len_list.append(self._table_to_embedding_dim.get(table_id) *
                                            (self._table_to_slot_var_num.get(table_id) + 1) + 2)
            file_path_tensor = ops.convert_to_tensor(path, name="file_path")
            ps_id_tensor = ops.convert_to_tensor(-1, name="ps_id")
            table_id_tensor = ops.convert_to_tensor(table_id_list, name="table_id")
            embedding_table_import = \
                gen_npu_cpu_ops.embedding_table_import(ps_id=ps_id_tensor,
                                                       file_path=file_path_tensor,
                                                       table_id=table_id_tensor,
                                                       global_step=step,
                                                       embedding_dim=embedding_dim_list,
                                                       value_total_len=value_total_len_list,
                                                       only_var_flag=False,
                                                       file_type="bin",
                                                       table_name=self._ps_table_name_list)
            with tf.control_dependencies([embedding_table_import]):
                embedding_compute_var_import = \
                    gen_npu_cpu_ops.embedding_compute_var_import(file_path=file_path_tensor,
                                                                 ps_id=ps_id_tensor,
                                                                 table_id=table_id_tensor,
                                                                 global_step=step,
                                                                 table_name=self._ps_table_name_list)
                if len(self._small_table_variable_list) == 0:
                    return tf.group([embedding_compute_var_import])
        return embedding_compute_var_import, feature_mapping_import_list

    def save_incremental_embedding(self, name: str, path: str, step=None):
        """ Operator for save incremental values in table_id embedding table. """
        self._check_save_or_restore_params(name=name, path=path)
        table_id = self._table_name_to_id.get(name)
        step = tf.cast(step, dtype=tf.int64)
        if int(os.environ.get("RANK_ID")) != 0:
            step = -1
        self._create_comm_group_for_allgather()
        with specified_ps_engine_scope():
            file_path_tensor = ops.convert_to_tensor(path, name="file_path")
            ps_id_tensor = ops.convert_to_tensor(-1, name="ps_id")
            table_id_tensor = ops.convert_to_tensor([table_id], name="table_id")
            step_to_live = self._table_id_to_steps_to_live.get(table_id, 0)
            embedding_table_export = \
                gen_npu_cpu_ops.embedding_table_export(file_path=file_path_tensor,
                                                       ps_id=ps_id_tensor,
                                                       table_id=table_id_tensor,
                                                       embedding_dim=[self._table_to_embedding_dim.get(table_id)],
                                                       value_total_len=[self._table_to_embedding_dim.get(table_id)],
                                                       export_mode="new",
                                                       only_var_flag=True,
                                                       file_type="bin",
                                                       table_name=[name],
                                                       global_step=step,
                                                       steps_to_live_list=[step_to_live])
            return tf.group([embedding_table_export])

    def save_incremental_embeddings(self, path: str, step=None):
        """ Operator for save incremental values in all embedding tables. """
        self._check_save_or_restore_params_v2(path=path, save_flag=True)
        step = tf.cast(step, dtype=tf.int64)
        if int(os.environ.get("RANK_ID")) != 0:
            step = -1
        self._create_comm_group_for_allgather()
        with specified_ps_engine_scope():
            table_id_list = []
            embedding_dim_list = []
            steps_list = []
            for table_id in self._ps_table_id_list:
                table_id_list.append(table_id)
                embedding_dim_list.append(self._table_to_embedding_dim.get(table_id))
                steps_list.append(self._table_id_to_steps_to_live.get(table_id, 0))
            file_path_tensor = ops.convert_to_tensor(path, name="file_path")
            ps_id_tensor = ops.convert_to_tensor(-1, name="ps_id")
            table_id_tensor = ops.convert_to_tensor(table_id_list, name="table_id")
            embedding_table_export = \
                gen_npu_cpu_ops.embedding_table_export(file_path=file_path_tensor,
                                                       ps_id=ps_id_tensor,
                                                       table_id=table_id_tensor,
                                                       embedding_dim=embedding_dim_list,
                                                       value_total_len=embedding_dim_list,
                                                       export_mode="new",
                                                       only_var_flag=True,
                                                       file_type="bin",
                                                       table_name=self._ps_table_name_list,
                                                       global_step=step,
                                                       steps_to_live_list=steps_list)
            return tf.group([embedding_table_export])

    def restore_incremental_embedding(self, name: str, path: str, step=None):
        self._check_save_or_restore_params(name=name, path=path)
        if (step is None) or ((os.environ.get("RANK_ID") is not None) and (int(os.environ.get("RANK_ID")) != 0)):
            step = -1
        else:
            step = tf.cast(step, dtype=tf.int64)
        self._create_comm_group_for_allgather()
        table_id = self._table_name_to_id.get(name)
        with specified_ps_engine_scope():
            embedding_table_import = \
                gen_npu_cpu_ops.embedding_table_import(ps_id=ops.convert_to_tensor(-1),
                                                       file_path=ops.convert_to_tensor(path),
                                                       table_id=ops.convert_to_tensor([table_id]),
                                                       global_step=step,
                                                       embedding_dim=[self._table_to_embedding_dim.get(table_id)],
                                                       value_total_len=[self._table_to_embedding_dim.get(table_id)],
                                                       only_var_flag=True,
                                                       file_type="bin",
                                                       table_name=[name])
            return tf.group([embedding_table_import])

    def restore_incremental_embeddings(self, path: str, step=None):
        self._check_save_or_restore_params_v2(path=path, save_flag=False)
        if (step is None) or ((os.environ.get("RANK_ID") is not None) and (int(os.environ.get("RANK_ID")) != 0)):
            step = -1
        else:
            step = tf.cast(step, dtype=tf.int64)
        self._create_comm_group_for_allgather()
        with specified_ps_engine_scope():
            table_id_list = []
            embedding_dim_list = []
            for table_id in self._ps_table_id_list:
                table_id_list.append(table_id)
                embedding_dim_list.append(self._table_to_embedding_dim.get(table_id))
            embedding_table_import = \
                gen_npu_cpu_ops.embedding_table_import(ps_id=ops.convert_to_tensor(-1),
                                                       file_path=ops.convert_to_tensor(path),
                                                       table_id=ops.convert_to_tensor(table_id_list),
                                                       global_step=step,
                                                       embedding_dim=embedding_dim_list,
                                                       value_total_len=embedding_dim_list,
                                                       only_var_flag=True,
                                                       file_type="bin",
                                                       table_name=self._ps_table_name_list)
            return tf.group([embedding_table_import])

    def embedding_evict(self, steps_to_live: int):
        """ Operator for evict values in all embedding tables. """
        if not isinstance(steps_to_live, int):
            raise ValueError("steps_to_live must be int.")
        if steps_to_live <= 0:
            raise ValueError("steps_to_live must be greater than zero.")
        self._steps_to_live = steps_to_live
        self._create_comm_group_for_allgather()
        table_id_list = []
        with specified_ps_engine_scope():
            for table_id in self._ps_table_id_list:
                table_id_list.append(table_id)
            embedding_table_evict = \
            gen_npu_cpu_ops.embedding_table_evict(var_handle=ops.convert_to_tensor(table_id_list),
                                                  global_step=1,
                                                  steps_to_live=self._steps_to_live)
            return tf.group([embedding_table_evict])

    def _update_config_params(self):
        env_dist = os.environ
        cluster_config_file = env_dist.get("ESCLUSTER_CONFIG_PATH")
        if cluster_config_file is None:
            raise ValueError("EsClusterConfig env is null, check your env config.")
        with open(cluster_config_file, encoding='utf-8') as b:
            es_cluster_config_json = json.load(b)
            self._ps_num = int(es_cluster_config_json["psNum"])
            self._ps_ids = []
            self._ps_ids_list = es_cluster_config_json["psCluster"]
            for each_ps in self._ps_ids_list:
                self._server_ip_to_ps_num[each_ps["ctrlPanel"]["ipaddr"]] = 0
            for each_ps in self._ps_ids_list:
                self._ps_ids.append(each_ps["id"])
                ctrl_panel = each_ps["ctrlPanel"]
                self._server_ip_to_ps_num[ctrl_panel["ipaddr"]] += 1
        self._check_max_ps_num()

    def _check_max_ps_num(self):
        for each_server_ps_num in self._server_ip_to_ps_num:
            if self._server_ip_to_ps_num[each_server_ps_num] > 4:
                raise ValueError("PS num of one server can not exceed 4, please check config params.")

    def _check_and_update_small_init_params(self, name, init_vocabulary_size, embedding_dim, multihot_lens, key_dtype,
                                            value_dtype, allow_merge, initializer):
        if name not in self._small_table_name_list:
            self._small_table_name_list.append(name)
            self._feature_mapping_name_list.append(name)
        else:
            raise ValueError("This small table has been initialized.")
        if (init_vocabulary_size is None) or (embedding_dim is None) or (multihot_lens is None):
            raise ValueError("max_vocabulary_size or embedding_dim or multihot_lens can not be None.")
        if (key_dtype is None) or (value_dtype is None):
            raise ValueError("key_dtype and value_dtype can not be None.")
        check_init_params_type(key_dtype=key_dtype, value_dtype=value_dtype,
                               init_vocabulary_size=init_vocabulary_size, embedding_dim=embedding_dim,
                               multihot_lens=multihot_lens, allow_merge=allow_merge)
        if init_vocabulary_size <= 0 or embedding_dim <= 0 or multihot_lens <= 0:
            raise ValueError("init_vocabulary_size, embedding_dim, multihot_lens must be greater than zero.")
        if initializer is None:
            raise ValueError("Initializer can not be None.")
        if allow_merge:
            raise ValueError("allow_merge do not support now.")
            self._need_table_merge = True
        if isinstance(initializer, EsInitializer):
            if initializer.initializer_mode == "random_uniform":
                self._table_id_to_initializer[table_id] = \
                    tf.random_uniform_initializer(minval=initializer.min, maxval=initializer.max,
                                                  seed=initializer.seed, dtype=value_dtype)
            elif initializer.initializer_mode == "truncated_normal":
                self._table_id_to_initializer[table_id] = \
                    tf.truncated_normal_initializer(stddev=initializer.stddev, mean=initializer.mean,
                                                    seed=initializer.seed, dtype=value_dtype)
            elif initializer.initializer_mode == "constant":
                self._table_id_to_initializer[table_id] = \
                    tf.constant_initializer(value=initializer.value, dtype=value_dtype)
        elif not callable(initializer):
            if ops.convert_to_tensor(initializer).dtype.base_dtype != tf.float32:
                raise ValueError("Initializer type '%s' and explict dtype tf.float32 don't match." % init_dtype)

    def _check_and_update_ps_init_params(self, name, init_vocabulary_size, embedding_dim, max_feature_count, ev_option):
        steps_to_live = 0
        if max_feature_count is None:
            raise ValueError("For ps table, max_feature_count can not be None.")
        if (ev_option is not None) and (not isinstance(ev_option, EmbeddingVariableOption)):
            raise TypeError("For ps table, ev_option must be EmbeddingVariableOption type.")
        if (ev_option is not None) and (ev_option.evict_option is not None):
            steps_to_live = ev_option.evict_option.steps_to_live
        if not isinstance(max_feature_count, int):
            raise ValueError("For ps table, max_feature_count must be int.")
        if init_vocabulary_size >= _INT32_MAX_VALUE:
            raise ValueError("init_vocabulary_size exceeds int32 max value.")
        if max_feature_count <= 0:
            raise ValueError("For ps table, max_feature_count must be greater than zero.")
        if name not in self._table_name_has_init:
            table_id = self._ps_table_count
            self._table_name_to_id[name] = table_id
            self._table_id_to_name[table_id] = name
            self._table_id_to_steps_to_live[table_id] = steps_to_live
            self._ps_table_count += 1
            self._table_name_has_init.append(name)
        else:
            raise ValueError("This table has been initialized.")
        return table_id

    def _check_ps_opt_and_initializer(self, optimizer, initializer, table_id):
        if (not isinstance(optimizer, embedding_optimizer.AdamOptimizer)) and \
                (not isinstance(optimizer, embedding_optimizer.AdagradOptimizer)) and \
                (not isinstance(optimizer, embedding_optimizer.AdamWOptimizer)) and \
                (not isinstance(optimizer, embedding_optimizer.SgdOptimizer)) and \
                (not isinstance(optimizer, embedding_optimizer.RmspropOptimizer)) and \
                (not isinstance(optimizer, embedding_optimizer.FtrlOptimizer)):
            raise ValueError(
                "Optimizer should be one of AdamOptimizer, AdagradOptimizer, AdamWOptimizer, "
                "SGDOptimizer, RmspropOptimizer and FtrlOptimizer.")
        if initializer is not None:
            if isinstance(initializer, EsInitializer):
                self._table_id_to_initializer[table_id] = initializer
            elif isinstance(initializer, tf.initializers.truncated_normal):
                if initializer.dtype != tf.float32:
                    raise TypeError("initializer dtype error.")
                self._table_id_to_initializer[table_id] = \
                    EsInitializer(initializer_mode="truncated_normal", mu=initializer.mean,
                                  sigma=initializer.stddev, seed=initializer.seed)
            elif isinstance(initializer, tf.initializers.random_uniform):
                if initializer.dtype != tf.float32:
                    raise TypeError("initializer dtype error.")
                self._table_id_to_initializer[table_id] = \
                    EsInitializer(initializer_mode="random_uniform", min=initializer.minval,
                                  max=initializer.maxval, seed=initializer.seed)
            elif isinstance(initializer, tf.initializers.constant):
                if initializer.dtype != tf.float32:
                    raise TypeError("initializer dtype error.")
                self._table_id_to_initializer[table_id] = \
                    EsInitializer(initializer_mode="constant", constant_value=initializer.value)
            else:
                raise TypeError("initializer must be EsInitializer or tensorflow initializer, and only support"
                                "random_uniform, truncated_normal and constant value.")

    def _update_optimizer_slot_var_num(self, table_id):
        # adam, adamw, rmsprop include m and v, 2 slots; adagrad include accumulator, 1 slot; sgd include 0 slot
        if isinstance(self._optimizer, embedding_optimizer.AdagradOptimizer):
            self._table_to_slot_var_num[table_id] = 1
        elif isinstance(self._optimizer, embedding_optimizer.SgdOptimizer):
            self._table_to_slot_var_num[table_id] = 0
        else:
            self._table_to_slot_var_num[table_id] = 2

    def _check_ps_lookup_params(self, name, ids):
        if (name is None) or (ids is None):
            raise ValueError("table name or ids must be specified.")
        if not isinstance(name, str):
            raise TypeError("embedding table name must be string.")
        regex = re.compile('[@!#$%^&*()<>?/\|}{~:]')
        if regex.search(name) is not None:
            raise ValueError("table name contains illegal character.")
        if ids.dtype != tf.int64:
            raise ValueError("dtype of ids must be tf.int64.")
        if not self._init_table_flag:
            raise ValueError("embedding table must init first!")
        table_id = self._table_name_to_id.get(name)
        if table_id not in self._ps_table_id_list:
            raise ValueError("this ps table has not yet initialized.")
        return table_id

    def _check_update_params(self, params, input_ids_list, table_ids, loss):
        if (loss is None) or (params is None) or (table_ids is None) or (input_ids_list is None):
            raise ValueError("loss or params or table_ids or input_ids_list is None.")
        if (isinstance(loss, str)) or (isinstance(params, str)) or isinstance(table_ids, str) or \
                isinstance(input_ids_list, str):
            raise ValueError("loss, params, table_ids and input_ids_list can not be str.")
        if not self._init_table_flag:
            raise ValueError("embedding must init first!")

    def _check_save_or_restore_params(self, name, path):
        if path is None or name is None:
            raise ValueError("table name, embedding table path can not be None.")
        if not isinstance(name, str):
            raise TypeError("embedding table name must be string.")
        regex = re.compile('[@!#$%^&*()<>?/\|}{~:]')
        if regex.search(name) is not None:
            raise ValueError("table name contains illegal character.")
        if not self._init_table_flag:
            raise ValueError("Not any table has been initialized.")
        if name not in self._ps_table_name_list:
            raise ValueError("this table has not yet initialized.")
        if path[-1] == '/':
            raise ValueError("path format is wrong, please check.")

    def _check_save_or_restore_params_v2(self, path, save_flag):
        if path is None:
            raise ValueError("embedding table path can not be None.")
        if path[-1] == '/':
            raise ValueError("path format is wrong, please check.")
        if not self._init_table_flag:
            raise ValueError("Not any table has been initialized.")
        if save_flag:
            env_dist = os.environ
            rank_id = int(env_dist.get("RANK_ID"))
            if rank_id != 0:
                logging.warn("Only minimal_rank_id device in each server can run save graph."
                             "Else, save graph will raise unexpected error. Please Check.")

    def _init_counter_filter(self, table_id, ev_option):
        if (ev_option is not None) and (ev_option.filter_option is not None):
            filter_mode = "counter"
            self._table_to_counter_filter[table_id] = ev_option.filter_option
            self._table_use_counter_filter[table_id] = 1
        else:
            filter_mode = "no_filter"
            self._table_use_counter_filter[table_id] = 0
        return filter_mode

    def _set_ps_optimizer_params(self, table_id, optimizer, embedding_dim, max_feature_count, mask_zero, ev_option):
        self._optimizer = optimizer
        self._optimizer.embedding_dim = embedding_dim
        self._optimizer.max_num = max_feature_count
        self._optimizer.mask_zero = 1 if mask_zero is True else 0
        self._init_ps_opt_padding_key(ev_option=ev_option)
        self._init_ps_opt_completion_key(table_id=table_id, ev_option=ev_option)
        self._table_to_optimizer[table_id] = self._optimizer
        self._ps_table_id_to_optimizer_params[table_id] = []
        self._update_optimizer_slot_var_num(table_id=table_id)

    def _init_ps_opt_padding_key(self, ev_option):
        if (ev_option is not None) and (ev_option.padding_option is not None):
            self._optimizer.padding_key = ev_option.padding_option.padding_key
            self._optimizer.padding_key_mask = 1 if ev_option.padding_option.mask is True else 0
        else:
            self._optimizer.padding_key = 0
            self._optimizer.padding_key_mask = 1
        if not self._optimizer.padding_key_mask:
            self._optimizer.embedding_flags = 1
        else:
            self._optimizer.embedding_flags = 0

    def _init_ps_opt_completion_key(self, table_id, ev_option):
        if (ev_option is not None) and (ev_option.completion_option is not None):
            self._optimizer.completion_key = ev_option.completion_option.completion_key
            self._optimizer.completion_key_mask = ev_option.completion_option.mask
            self._table_id_to_completion_option[table_id] = ev_option.completion_option
        else:
            self._optimizer.completion_key = 0
            self._optimizer.completion_key_mask = True
            self._table_id_to_completion_option[table_id] = CompletionKeyOption(completion_key=0,
                                                                                mask=1)

    def _init_optimizer_mode_and_params(self, table_id):
        if isinstance(self._table_to_optimizer.get(table_id), embedding_optimizer.AdagradOptimizer):
            self._ps_table_id_to_optimizer_mode[table_id] = "adagrad"
            self._ps_table_id_to_optimizer_params[table_id].append(
                self._table_to_optimizer.get(table_id).initial_accumulator_value
            )
            self._ps_table_id_to_optimizer_params[table_id].append(
                self._table_to_optimizer.get(table_id).initial_accumulator_value
            )
        if isinstance(self._table_to_optimizer.get(table_id), embedding_optimizer.AdamOptimizer):
            self._ps_table_id_to_optimizer_mode[table_id] = "adam"
            self._ps_table_id_to_optimizer_params[table_id].append(0)
            self._ps_table_id_to_optimizer_params[table_id].append(0)
        if isinstance(self._table_to_optimizer.get(table_id), embedding_optimizer.AdamWOptimizer):
            self._ps_table_id_to_optimizer_mode[table_id] = "adamw"
            self._ps_table_id_to_optimizer_params[table_id].append(0)
            self._ps_table_id_to_optimizer_params[table_id].append(0)
        if isinstance(self._table_to_optimizer.get(table_id), embedding_optimizer.SgdOptimizer):
            self._ps_table_id_to_optimizer_mode[table_id] = "sgd"
            self._ps_table_id_to_optimizer_params[table_id].append(0)
            self._ps_table_id_to_optimizer_params[table_id].append(0)
        if isinstance(self._table_to_optimizer.get(table_id), embedding_optimizer.RmspropOptimizer):
            self._ps_table_id_to_optimizer_mode[table_id] = "rmsprop"
            self._ps_table_id_to_optimizer_params[table_id].append(
                self._table_to_optimizer.get(table_id).ms)
            self._ps_table_id_to_optimizer_params[table_id].append(
                self._table_to_optimizer.get(table_id).mom)
        if isinstance(self._table_to_optimizer.get(table_id), embedding_optimizer.FtrlOptimizer):
            self._ps_table_id_to_optimizer_mode[table_id] = "ftrl"
            self._ps_table_id_to_optimizer_params[table_id].append(
                self._table_to_optimizer.get(table_id).accum)
            self._ps_table_id_to_optimizer_params[table_id].append(
                self._table_to_optimizer.get(table_id).linear)

    def _init_hashmap_and_table_import(self, bucket_size, table_id, embedding_dim, ev_option):
        filter_mode = self._init_counter_filter(table_id, ev_option)
        self._init_optimizer_mode_and_params(table_id)

        with tf.control_dependencies([self._init_partition_maps.get(table_id)]):
            if self._train_mode:
                if self._train_level:
                    self._init_embedding_hash_maps[table_id] = \
                        gen_npu_cpu_ops.init_embedding_hashmap(table_id=ops.convert_to_tensor(table_id),
                                                               bucket_size=bucket_size,
                                                               value_total_len=embedding_dim *
                                                                               (self._table_to_slot_var_num.get(
                                                                                   table_id) + 1) + 2,
                                                               embedding_dim=embedding_dim,
                                                               initializer_mode=
                                                               self._table_id_to_initializer.get(table_id)
                                                               .initializer_mode,
                                                               constant_value=
                                                               self._table_id_to_initializer.get(table_id).
                                                               constant_value,
                                                               min=self._table_id_to_initializer.get(table_id).min,
                                                               max=self._table_id_to_initializer.get(table_id).max,
                                                               mu=self._table_id_to_initializer.get(table_id).mu,
                                                               sigma=self._table_id_to_initializer.get(table_id).sigma,
                                                               seed=self._table_id_to_initializer.get(table_id).seed,
                                                               seed2=self._table_id_to_initializer.get(table_id).seed,
                                                               filter_mode=filter_mode,
                                                               optimizer_mode=
                                                               self._ps_table_id_to_optimizer_mode.get(table_id),
                                                               optimizer_params=
                                                               self._ps_table_id_to_optimizer_params.get(table_id))
                else:
                    self._init_embedding_hash_maps[table_id] = \
                        gen_npu_cpu_ops.init_embedding_hashmap(table_id=ops.convert_to_tensor(table_id),
                                                               bucket_size=bucket_size,
                                                               value_total_len=embedding_dim *
                                                                               (self._table_to_slot_var_num.get(
                                                                                   table_id) + 1) + 2,
                                                               embedding_dim=embedding_dim,
                                                               initializer_mode=None, constant_value=None,
                                                               min=None, max=None, mu=None, sigma=None,
                                                               seed=None, seed2=None, filter_mode=filter_mode,
                                                               optimizer_mode=
                                                               self._ps_table_id_to_optimizer_mode.get(table_id),
                                                               optimizer_params=
                                                               self._ps_table_id_to_optimizer_params.get(table_id))
            else:
                self._init_embedding_hash_maps[table_id] = \
                    gen_npu_cpu_ops.init_embedding_hashmap(table_id=ops.convert_to_tensor(table_id),
                                                           bucket_size=bucket_size,
                                                           value_total_len=embedding_dim,
                                                           embedding_dim=embedding_dim,
                                                           initializer_mode=None, constant_value=None,
                                                           min=None, max=None, mu=None, sigma=None,
                                                           seed=None, seed2=None, filter_mode=filter_mode,
                                                           optimizer_mode=
                                                           self._ps_table_id_to_optimizer_mode.get(table_id),
                                                           optimizer_params=
                                                           self._ps_table_id_to_optimizer_params.get(table_id))
        self._init_table_flag = True
        self._init_table_flag = True
        if self._train_mode:
            return tf.group(
                [tf.initializers.variables(self._optimizer.variables()), self._init_embedding_hash_maps.get(table_id)],
                name=self._table_id_to_name.get(table_id) + "_init")
        else:
            return tf.group([self._init_embedding_hash_maps.get(table_id)],
                            name=self._table_id_to_name.get(table_id) + "_init")

    def _call_lookup_op(self, table_id, ids, actual_keys_input=None, unique_indices=None,
                        filter_mode=None, use_counter_filter=False, key_count=None):
        if self._train_mode:
            if self.use_host_unique:
                if use_counter_filter:
                    key_count = key_count
                else:
                    key_count = ids
                result = gen_npu_cpu_ops. \
                    fake_remote_lookup_uniqued(table_id=ops.convert_to_tensor(table_id),
                                               keys=ids,
                                               actual_keys_input=actual_keys_input,
                                               unique_indices=unique_indices,
                                               key_count=key_count,
                                               embedding_dim=[self._table_to_embedding_dim.get(table_id)],
                                               initializer_mode=[self._table_id_to_initializer.get(table_id)
                                               .initializer_mode],
                                               constant_value=[self._table_id_to_initializer.get(table_id)
                                               .constant_value],
                                               min=[self._table_id_to_initializer.get(table_id).min],
                                               max=[self._table_id_to_initializer.get(table_id).max],
                                               mu=[self._table_id_to_initializer.get(table_id).mu],
                                               sigma=[self._table_id_to_initializer.get(table_id).sigma],
                                               seed=[self._table_id_to_initializer.get(table_id).seed],
                                               seed2=[self._table_id_to_initializer.get(table_id).seed],
                                               value_total_len=[self._table_to_embedding_dim
                                               .get(table_id) * (self._table_to_slot_var_num.get(table_id) + 1) + 2],
                                               filter_mode=[filter_mode],
                                               filter_freq=[self._filter_freq],
                                               default_key_or_value=[self._default_key_or_value],
                                               default_key=[self._default_key],
                                               default_value=[self._default_value],
                                               optimizer_mode=[self._ps_table_id_to_optimizer_mode.get(table_id)],
                                               optimizer_params=[self._ps_table_id_to_optimizer_params.
                                               get(table_id)[0], self._ps_table_id_to_optimizer_params.
                                               get(table_id)[1]],
                                               completion_key=[self._table_id_to_completion_option
                                               .get(table_id).completion_key],
                                               completion_key_mask=[self._table_id_to_completion_option
                                               .get(table_id).mask]
                                               )
            else:
                result = gen_npu_cpu_ops. \
                    embedding_table_find_and_init(table_id=ops.convert_to_tensor(table_id),
                                                  keys=ids,
                                                  embedding_dim=[self._table_to_embedding_dim.get(table_id)],
                                                  initializer_mode=[self._table_id_to_initializer.get(table_id)
                                                  .initializer_mode],
                                                  constant_value=[self._table_id_to_initializer.get(table_id)
                                                  .constant_value],
                                                  min=[self._table_id_to_initializer.get(table_id).min],
                                                  max=[self._table_id_to_initializer.get(table_id).max],
                                                  mu=[self._table_id_to_initializer.get(table_id).mu],
                                                  sigma=[self._table_id_to_initializer.get(table_id).sigma],
                                                  seed=[self._table_id_to_initializer.get(table_id).seed],
                                                  seed2=[self._table_id_to_initializer.get(table_id).seed],
                                                  value_total_len=[self._table_to_embedding_dim.get(table_id) *
                                                                   (self._table_to_slot_var_num.get(table_id) + 1) + 2],
                                                  filter_mode=[filter_mode],
                                                  filter_freq=[self._filter_freq],
                                                  default_key_or_value=[self._default_key_or_value],
                                                  default_key=[self._default_key],
                                                  default_value=[self._default_value],
                                                  optimizer_mode=[self._ps_table_id_to_optimizer_mode.get(table_id)],
                                                  optimizer_params=[self._ps_table_id_to_optimizer_params
                                                  .get(table_id)[0], self._ps_table_id_to_optimizer_params
                                                  .get(table_id)[1]],
                                                  completion_key=[self._table_id_to_completion_option
                                                  .get(table_id).completion_key],
                                                  completion_key_mask=[self._table_id_to_completion_option
                                                  .get(table_id).mask]
                                                  )
        else:
            result = gen_npu_cpu_ops.embedding_table_find(table_id=ops.convert_to_tensor(table_id),
                                                          keys=ids,
                                                          embedding_dim=[self._table_to_embedding_dim.get(table_id)],
                                                          default_value=[self._default_value])
        result.op._set_attr("_embedding_dim", attr_value_pb2.AttrValue(i=self._table_to_embedding_dim.get(table_id)))
        result.op._set_attr("_max_key_num", attr_value_pb2.AttrValue(i=self._table_to_max_num.get(table_id)))
        result.op._set_attr("_use_counter_filter",
                            attr_value_pb2.AttrValue(i=self._table_use_counter_filter.get(table_id)))
        return result

    def _call_embeddings_export_op(self, path, feature_mapping_export_list, step):
        with specified_ps_engine_scope():
            table_id_list = []
            embedding_dim_list = []
            steps_list = []
            for table_id in self._ps_table_id_list:
                table_id_list.append(table_id)
                embedding_dim_list.append(self._table_to_embedding_dim.get(table_id))
                steps_list.append(self._table_id_to_steps_to_live.get(table_id, 0))
            file_path_tensor = ops.convert_to_tensor(path, name="file_path")
            ps_id_tensor = ops.convert_to_tensor(-1, name="ps_id")
            table_id_tensor = ops.convert_to_tensor(table_id_list, name="table_id")
            embedding_table_export = \
                gen_npu_cpu_ops.embedding_table_export(file_path=file_path_tensor,
                                                       ps_id=ps_id_tensor,
                                                       table_id=table_id_tensor,
                                                       embedding_dim=embedding_dim_list,
                                                       value_total_len=embedding_dim_list,
                                                       export_mode="all",
                                                       only_var_flag=True,
                                                       file_type="bin",
                                                       table_name=self._ps_table_name_list,
                                                       global_step=step,
                                                       steps_to_live_list=steps_list)
            if len(self._small_table_variable_list) == 0:
                return tf.group([embedding_table_export])
        return embedding_table_export, feature_mapping_export_list

    def _call_ckpts_export_op(self, path, feature_mapping_export_list, save_filtered_features, step):
        with specified_ps_engine_scope():
            table_id_list = []
            embedding_dim_list = []
            value_total_len_list = []
            steps_list = []
            for table_id in self._ps_table_id_list:
                table_id_list.append(table_id)
                embedding_dim_list.append(self._table_to_embedding_dim.get(table_id))
                steps_list.append(self._table_id_to_steps_to_live.get(table_id, 0))
                value_total_len_list.append(self._table_to_embedding_dim.get(table_id) *
                                            (self._table_to_slot_var_num.get(table_id) + 1) + 2)
            file_path_tensor = ops.convert_to_tensor(path, name="file_path")
            ps_id_tensor = ops.convert_to_tensor(-1, name="ps_id")
            table_id_tensor = ops.convert_to_tensor(table_id_list, name="table_id")
            embedding_table_export = \
                gen_npu_cpu_ops.embedding_table_export(file_path=file_path_tensor,
                                                       ps_id=ps_id_tensor,
                                                       table_id=table_id_tensor,
                                                       embedding_dim=embedding_dim_list,
                                                       value_total_len=value_total_len_list,
                                                       export_mode="all",
                                                       only_var_flag=False,
                                                       file_type="bin",
                                                       table_name=self._ps_table_name_list,
                                                       filter_export_flag=save_filtered_features,
                                                       global_step=step,
                                                       steps_to_live_list=steps_list)
            with tf.control_dependencies([embedding_table_export]):
                embedding_compute_var_export = \
                    gen_npu_cpu_ops.embedding_compute_var_export(file_path=file_path_tensor,
                                                                 ps_id=ps_id_tensor,
                                                                 table_id=table_id_tensor,
                                                                 global_step=step,
                                                                 table_name=self._ps_table_name_list)
                if len(self._small_table_variable_list) == 0:
                    return tf.group([embedding_compute_var_export])
        return embedding_compute_var_export, feature_mapping_export_list

    def _create_variable_for_small_table(self, table_map_policy):
        if not self._need_table_merge:
            self._create_variable_when_no_merge()
        else:
            self._create_variable_when_need_merge(table_map_policy)
        self.user_defined_table_infos = []
        self._small_table_name_list = []
        self._small_table_init = True

    def _create_variable_when_no_merge(self):
        for user_table_info in self.user_defined_table_infos:
            if self._small_table_init or (int(os.environ.get("RANK_ID")) != 0):
                with tf.compat.v1.variable_scope("es", reuse=tf.compat.v1.AUTO_REUSE):
                    self._small_table_to_variable[user_table_info['name']] = \
                        tf.get_variable(user_table_info['name'], shape=[user_table_info['max_vocabulary_size'],
                                                                        user_table_info['embedding_dim']],
                                        initializer=user_table_info['initializer'], dtype=tf.float32)
            else:
                with tf.compat.v1.variable_scope("es"):
                    self._small_table_to_variable[user_table_info['name']] = \
                        tf.get_variable(user_table_info['name'], shape=[user_table_info['max_vocabulary_size'],
                                                                        user_table_info['embedding_dim']],
                                        initializer=user_table_info['initializer'], dtype=tf.float32)
            self._small_table_to_multihot_lens[self.total_embedding_count] = user_table_info['multihot_lens']
            self._small_table_name_to_max_vocabulary_size[user_table_info['name']] = \
                user_table_info['max_vocabulary_size']
            self._small_table_name_to_multihot_lens[user_table_info['name']] = \
                user_table_info['multihot_lens']
            self._small_table_variable_list.append(user_table_info['name'] + ":0")
            self._small_table_variable_dim_list.append(user_table_info['embedding_dim'])
            self.total_embedding_count += 1

    def _create_variable_when_need_merge(self, table_map_policy):
        self.total_variable_table = []
        if (not isinstance(table_map_policy, NoneTableMapPolicy)) and \
                (not isinstance(table_map_policy, AutoMergeTableMapPolicy)):
            raise TypeError("table_map_policy should be NoneTableMapPolicy or AutoMergeTableMapPolicy.")
        self.table_map_policy = table_map_policy
        self.table_create_infos = self.table_map_policy.map_table_infos(self.user_defined_table_infos)
        for table_info_ in self.table_create_infos:
            if self._small_table_init or (int(os.environ.get("RANK_ID")) != 0):
                with tf.compat.v1.variable_scope("es", reuse=tf.compat.v1.AUTO_REUSE):
                    self.total_variable_table.append(tf.get_variable('ES' + str(self.total_embedding_count),
                                                                     shape=[table_info_['max_vocabulary_size'],
                                                                            table_info_['embedding_dim']],
                                                                     initializer=table_info_['initializer'],
                                                                     dtype=tf.float32
                                                                     ))
            else:
                with tf.compat.v1.variable_scope("es"):
                    self.total_variable_table.append(tf.get_variable('ES' + str(self.total_embedding_count),
                                                                     shape=[table_info_['max_vocabulary_size'],
                                                                            table_info_['embedding_dim']],
                                                                     initializer=table_info_['initializer'],
                                                                     dtype=tf.float32
                                                                     ))
            self._npu_table_to_embedding_dim[self.total_embedding_count] = table_info_['embedding_dim']
            self._small_table_variable_list.append('ES' + str(self.total_embedding_count) + ":0")
            self._small_table_variable_dim_list.append(table_info_['embedding_dim'])
            self.total_embedding_count += 1

    def _small_table_lookup_v1(self, name, rank_id, rank_size, ids_list):
        if not isinstance(name, str):
            raise TypeError("embedding table name must be string.")
        if self.total_embedding_count == 0:
            raise ValueError("Must init_table() first!")
        hash_key_shape = ids_list.get_shape().as_list()
        if rank_size > 1 and (hash_key_shape[0] is not None):
            hash_key = allgather(tensor=ids_list, rank_size=rank_size, group="user_group_fm")
            non_hash_key = gen_npu_cpu_ops.embedding_feature_mapping_v2(feature_id=hash_key, table_name=name,
                                                                        table_total_size=[1], table_actual_size=[1])
            recovery_matrix = []
            for i in range(hash_key_shape[0]):
                recovery_matrix.append(rank_id * hash_key_shape[0] + i)
            local_non_hash_keys = tf.gather(non_hash_key, recovery_matrix)
        else:
            hash_key = ids_list
            local_non_hash_keys = gen_npu_cpu_ops.embedding_feature_mapping_v2(feature_id=hash_key, table_name=name,
                                                                               table_total_size=[1],
                                                                               table_actual_size=[1])
        return tf.nn.embedding_lookup(self._small_table_to_variable[name], local_non_hash_keys)

    def _small_table_lookup_v2(self, rank_id, rank_size, in_slot_size_group, ids_list, table_to_output_slots,
                               table_to_slot, in_slot_vocabulary_size_group, batch_size):
        # all small table merge to One table
        hash_key_shape = ids_list.get_shape().as_list()
        actual_size = [i * batch_size * rank_size for i in in_slot_size_group]
        if rank_size > 1 and (hash_key_shape[0] is not None):
            hash_key = allgather(tensor=ids_list, rank_size=rank_size, group="user_group_fm")
            non_hash_key = gen_npu_cpu_ops.embedding_feature_mapping_v2(
                feature_id=hash_key, table_name=self._small_table_variable_list[0][:-2],
                table_total_size=in_slot_vocabulary_size_group, table_actual_size=actual_size)
            recovery_matrix = []
            for i in range(hash_key_shape[0]):
                recovery_matrix.append(rank_id * hash_key_shape[0] + i)
            local_non_hash_keys = tf.gather(non_hash_key, recovery_matrix)
        else:
            hash_key = ids_list
            local_non_hash_keys = gen_npu_cpu_ops.embedding_feature_mapping_v2(
                feature_id=hash_key, table_name=self._small_table_variable_list[0][:-2],
                table_total_size=in_slot_vocabulary_size_group, table_actual_size=actual_size)

        output_slots = [None for _ in in_slot_size_group]
        tid = 0
        table_embedding = tf.nn.embedding_lookup(self.total_variable_table[tid], local_non_hash_keys)
        out_embedding_splited = tf.split(table_embedding, table_to_output_slots[0], axis=1)
        for out_emb, sid in zip(out_embedding_splited, table_to_slot[0]):
            output_slots[sid] = out_emb
        return output_slots

    def _small_table_lookup_v3(self, rank_id, rank_size, ids_list, in_slot_size_group, slot_to_table,
                               table_to_input_group, table_to_output_slots, table_to_slot,
                               table_to_vocabulary_slots, batch_size):
        # All small tables merge to two or more tables
        indices_split = tf.split(ids_list, in_slot_size_group, axis=1)
        for tid in range(self.total_embedding_count):
            table_to_input_group[tid] = []
        for sid, indices in enumerate(indices_split):
            tid = slot_to_table[sid]
            table_to_input_group[tid].append(indices)

        output_slots = [None for _ in in_slot_size_group]
        for tid, table_input_group in enumerate(table_to_input_group):
            table_input_hash = tf.concat(table_input_group, axis=1)
            hash_key_shape = table_input_hash.get_shape().as_list()
            actual_size = [i * batch_size * rank_size for i in table_to_output_slots[tid]]
            if rank_size > 1 and (hash_key_shape[0] is not None):
                hash_key = allgather(tensor=table_input_hash, rank_size=rank_size, group="user_group_fm")
                non_hash_key = gen_npu_cpu_ops.embedding_feature_mapping_v2(
                    feature_id=hash_key, table_name=self._small_table_variable_list[tid][:-2],
                    table_total_size=table_to_vocabulary_slots[tid], table_actual_size=actual_size)
                recovery_matrix = []
                for i in range(hash_key_shape[0]):
                    recovery_matrix.append(rank_id * hash_key_shape[0] + i)
                local_non_hash_keys = tf.gather(non_hash_key, recovery_matrix)
            else:
                hash_key = table_input_hash
                local_non_hash_keys = gen_npu_cpu_ops.embedding_feature_mapping_v2(
                    feature_id=hash_key, table_name=self._small_table_variable_list[tid][:-2],
                    table_total_size=table_to_vocabulary_slots[tid], table_actual_size=actual_size)
            table_embedding = tf.nn.embedding_lookup(self.total_variable_table[tid], local_non_hash_keys)
            out_embedding_splited = tf.split(table_embedding, table_to_output_slots[tid], axis=1)
            for out_emb, sid in zip(out_embedding_splited, table_to_slot[tid]):
                output_slots[sid] = out_emb
        return output_slots

    def _refresh_small_table(self):
        unique_small_tables = set()
        global_small_table_list = []
        for v in self._small_table_variable_list:
            if v not in unique_small_tables:
                unique_small_tables.add(v)
                global_small_table_list.append(v)
        small_table_num = len(global_small_table_list)
        self._small_table_variable_dim_list = self._small_table_variable_dim_list[:small_table_num]
        self._small_table_variable_list = self._small_table_variable_list[:small_table_num]
        return global_small_table_list, small_table_num

    def _call_feature_mapping_export_op(self, path, export_value, step):
        feature_mapping_export_list = []
        global_small_table_list, num = self._refresh_small_table()
        index = 0
        # aicpu only support handle 128 tables at one time
        while index < num:
            iter_max = min(index + 128, num)
            table_name_list = []
            offset_list = []
            embedding_dim_list = []
            while index < iter_max:
                table_name_list.append(global_small_table_list[index][:-2])
                embedding_dim_list.append(self._small_table_variable_dim_list[index])
                offset_list.append(0)
                index += 1
            table_name_tensor = ops.convert_to_tensor(table_name_list)
            feature_size = gen_npu_cpu_ops.embedding_feature_mapping_table_size(table_name=table_name_tensor)
            feature_id, offset_id = gen_npu_cpu_ops.embedding_feature_mapping_find(table_name=table_name_tensor,
                                                                                   feature_size=feature_size,
                                                                                   num=len(table_name_list))
            if export_value:
                tvar = tf.trainable_variables()
                for x in tvar:
                    if x.name[3:-2] in table_name_list:
                        idx = table_name_list.index(x.name[3:-2])
                        offset_list[idx] = tf.reshape(tf.gather(x, offset_id[idx]), [-1])
                values = tf.concat(offset_list, axis=0)
            else:
                values = 0
            feature_mapping_export = gen_npu_cpu_ops.embedding_feature_mapping_export(file_path=path,
                                                                                      table_name=table_name_tensor,
                                                                                      feature_id=feature_id,
                                                                                      offset_id=offset_id,
                                                                                      values=values,
                                                                                      global_step=step,
                                                                                      embedding_dim=
                                                                                      embedding_dim_list)
            feature_mapping_export_list.append(feature_mapping_export)
        return feature_mapping_export_list

    def _call_feature_mapping_import_op(self, path, import_value, step):
        feature_mapping_import_list = []
        global_small_table_list, num = self._refresh_small_table()
        index = 0
        # aicpu only support handle 128 tables at one time
        while index < num:
            iter_max = min(index + 128, num)
            table_name_list = []
            embedding_dim_list = []
            while index < iter_max:
                table_name_list.append(global_small_table_list[index][:-2])
                embedding_dim_list.append(self._small_table_variable_dim_list[index])
                index += 1

            feature_size = \
                gen_npu_cpu_ops.embedding_feature_mapping_file_size(file_path=path,
                                                                    table_name=ops.convert_to_tensor(table_name_list),
                                                                    embedding_dim=embedding_dim_list,
                                                                    global_step=step,
                                                                    only_offset_flag=import_value)
            feature_id, offset_id, values = \
                gen_npu_cpu_ops.embedding_feature_mapping_import(file_path=path,
                                                                 table_name=ops.convert_to_tensor(table_name_list),
                                                                 feature_size=feature_size,
                                                                 embedding_dim=embedding_dim_list,
                                                                 global_step=step,
                                                                 only_offset_flag=import_value,
                                                                 num=len(table_name_list))
            feature_mapping_insert = \
                gen_npu_cpu_ops.embedding_feature_mapping_insert(table_name=ops.convert_to_tensor(table_name_list),
                                                                 feature_id=feature_id,
                                                                 offset_id=offset_id)
            feature_mapping_import_list.append(feature_mapping_insert)
        return feature_mapping_import_list

    def _create_comm_group_for_allgather(self):
        if (os.environ.get("RANK_SIZE") is not None) and (int(os.environ.get("RANK_SIZE")) > 1) and \
                (_SAVE_EVICT_COMM_GROUP not in self._user_group_set):
            rank_size = int(os.environ.get("RANK_SIZE"))
            rank_list = []
            for i in range(rank_size):
                rank_list.append(i)
            create_group(_SAVE_EVICT_COMM_GROUP, rank_size, rank_list)
            self._user_group_set.add(_SAVE_EVICT_COMM_GROUP)
