#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Huawei Technologies Co., Ltd. 2023-2023. All rights reserved.

import abc
from collections import defaultdict
from typing import Any, Callable, Dict, Optional, Union, List, Tuple

from mpi4py import MPI
import tensorflow as tf
from tensorflow import Tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.framework import ops

from rec_sdk_common.log.log import LoggingProxy as logger
from rec_sdk_common.communication.hccl.hccl_info import get_device_id, get_rank_id, get_rank_size, get_rank_to_device_dict
from rec_sdk_common.util.tf_adapter import hccl_ops
from mx_rec.constants.constants import ASCEND_SPARSE_LOOKUP_ENTRANCE, All2allGradientsOp, ASCAnchorAttr
from mx_rec.core.asc.build_graph import get_preprocessed_tensor_for_asc
from mx_rec.core.asc.feature_spec import FeatureSpec, get_feature_spec, set_temporary_feature_spec_attribute
from mx_rec.core.asc.swap_args import SwapArgs, SwapDataType
from mx_rec.util.initialize import ConfigInitializer
from mx_rec.validator.emb_validator import check_emb_init_params, check_emb_lookup_params
from mx_rec.util.ops import import_host_pipeline_ops
import mxrec_pybind

host_pipeline_ops = import_host_pipeline_ops()


class BaseSparseEmbedding(metaclass=abc.ABCMeta):
    """
    Abstract base class for sparse embedding table.
    """

    # Global dict used for graph modification, used id-like tensor and `AnchorAttr` as key.
    anchor_tensor_specs: Dict[Tensor, Dict[str, Any]] = defaultdict(dict)

    def __init__(self, config: dict):
        self._embedding_size = config.get("embedding_size")
        if isinstance(self._embedding_size, int):
            self._embedding_size = tf.TensorShape([self._embedding_size])
        self._table_name = config.get("table_name")
        self._key_dtype = config.get("key_dtype")
        self._emb_initializer = config.get("emb_initializer")
        self._is_save = config.get("is_save")
        self._init_param = config.get("init_param")
        self._is_hbm = True if config.get("host_vocabulary_size") <= 0 else False
        self._ssd_data_path = list(config.get("ssd_data_path"))
        self._is_dp = config.get("is_dp")
        self._send_count = 0
        self._slice_device_vocabulary_size = 0
        self._slice_host_vocabulary_size = 0
        self._slice_ssd_vocabulary_size = 0
        self._emb_size = self._embedding_size.as_list()[0]
        self._is_grad = False
        self._ext_emb_size = None
        self._variable = None
        self._multi_lookup_times = {True: 0, False: 0}

        self._all2all_gradients_op = All2allGradientsOp.mapping(config.get("all2all_gradients_op"))
        self._device_vocabulary_size = config.get("device_vocabulary_size")
        self._host_vocabulary_size = config.get("host_vocabulary_size")
        self._ssd_vocabulary_size = config.get("ssd_vocabulary_size")
        self._padding_keys = config.get("padding_keys")
        self._padding_keys_mask = config.get("padding_keys_mask")
        self._padding_keys_len = config.get("padding_keys_len")
        self._ext_coefficient = 1
        self._default_name_count = -1
        self._same_table_send_count = 0
        self._lookup_result = dict()
        self._modify_graph = False

        self._rank_size = get_rank_size()
        self._rank_id = get_rank_id()
        self._device_id = get_device_id()
        self._use_static = ConfigInitializer.get_instance().use_static

        self._set_slice_vocab_size()
        self._set_ext_emb_size()
        self._init_sliced_variable()

        self.peer_mem = None
        if ConfigInitializer.get_instance().use_lccl:
            device_ids = get_rank_to_device_dict()
            comm_server_rank_id = min(device_ids.values())
            peer_mem_ = mxrec_pybind.get_peer_mem(self._rank_id, comm_server_rank_id, self._rank_size)
            logger.debug("Get peer_mem_:%s.", peer_mem_)
            self.peer_mem = tf.constant(peer_mem_, dtype=tf.int64)

    @property
    def embedding_size(self):
        return self._embedding_size

    @property
    def table_name(self):
        return self._table_name

    @property
    def key_dtype(self):
        return self._key_dtype

    @property
    def emb_initializer(self):
        return self._emb_initializer

    @property
    def is_save(self):
        return self._is_save

    @property
    def init_param(self):
        return self._init_param

    @property
    def send_count(self):
        return self._send_count

    @property
    def rank_size(self):
        return self._rank_size

    @property
    def ssd_vocabulary_size(self):
        return self._ssd_vocabulary_size

    @property
    def slice_device_vocabulary_size(self):
        return self._slice_device_vocabulary_size

    @property
    def slice_host_vocabulary_size(self):
        return self._slice_host_vocabulary_size

    @property
    def slice_ssd_vocabulary_size(self):
        return self._slice_ssd_vocabulary_size

    @property
    def emb_size(self):
        return self._emb_size

    @property
    def is_grad(self):
        return self._is_grad

    @property
    def ext_emb_size(self):
        return self._ext_emb_size

    @property
    def variable(self):
        return self._variable

    @property
    def multi_lookup_times(self):
        return self._multi_lookup_times

    @property
    def ssd_data_path(self):
        return self._ssd_data_path

    @property
    def is_hbm(self):
        return self._is_hbm

    @property
    def is_dp(self):
        return self._is_dp

    @table_name.setter
    def table_name(self, table_name: str) -> None:
        self._table_name = table_name

    @property
    def padding_keys(self):
        return self._padding_keys

    @property
    def padding_keys_mask(self):
        return self._padding_keys_mask

    @property
    def padding_keys_len(self):
        return self._padding_keys_len

    @property
    def use_static(self):
        return self._use_static

    @send_count.setter
    def send_count(self, send_count: int):
        self._send_count = send_count

    @ext_emb_size.setter
    def ext_emb_size(self, ext_emb_size: int):
        self._ext_emb_size = ext_emb_size

    @is_grad.setter
    def is_grad(self, is_grad: bool):
        self._is_grad = is_grad

    @use_static.setter
    def use_static(self, use_static: bool):
        self._use_static = use_static

    @staticmethod
    def get_anchor_attribute(anchor: tf.Tensor, attr: ASCAnchorAttr) -> Union["BaseSparseEmbedding", FeatureSpec, bool]:
        """
        Acquire some attrs of ID type tensor.

        Args:
            anchor: ID type tensor.
            attr: attribute name.

        Returns: find value for given 'anchor' and 'attr' in anchor_spec dict.
        """
        if not isinstance(anchor, tf.Tensor):
            raise TypeError("Anchor must be a Tensor.")

        if attr not in ASCAnchorAttr:
            raise ValueError("Given attr must be limited in Enum 'ASCAnchorAttr'.")

        specs = BaseSparseEmbedding.anchor_tensor_specs.get(anchor)
        if specs is None:
            raise KeyError(f"Given anchor '{anchor}' was not registered.")

        return specs.get(attr)
    
    @staticmethod
    def _get_access_and_evict_threshold(table_feature_specs: List[FeatureSpec]) -> Tuple[int, int]:
        access_threshold_set = set(spec.access_threshold for spec in table_feature_specs)
        evict_threshold_set = set(spec.eviction_threshold for spec in table_feature_specs)
        if len(access_threshold_set) != 1 or len(evict_threshold_set) != 1:
            raise ValueError("the access and evict threshold param must be same in one table when multi lookup.")
        return access_threshold_set.pop(), evict_threshold_set.pop()

    @abc.abstractmethod
    def capacity(self) -> int:
        """
        获取稀疏表的容量.
        Returns: 稀疏表的容量
        """
        pass

    @abc.abstractmethod
    def _set_slice_vocab_size(self):
        pass

    @abc.abstractmethod
    def _get_update_grad(
        self, local_grad: tf.Tensor, result: dict, table: Union[tf.compat.v1.Variable, tf.Tensor]
    ) -> Union[tf.IndexedSlices, tf.Tensor]:
        pass

    @abc.abstractmethod
    def _get_local_embeddings(
        self, table: Union[tf.compat.v1.Variable, tf.Tensor], result: dict, feature_spec: FeatureSpec, **kwargs
    ) -> tf.Tensor:
        pass

    @abc.abstractmethod
    def _get_sparse_forward_result(
        self,
        sparse_forward_fn: Callable,
        table: Union[tf.compat.v1.Variable, tf.Tensor],
        result: dict,
        is_training: bool,
    ) -> tf.Tensor:
        pass

    def size(self) -> int:
        """
        获取稀疏表的大小.
        Returns: 稀疏表的大小
        """
        return ConfigInitializer.get_instance().hybrid_manager_config.asc_manager.get_table_size(self._table_name)

    def register_anchor_attribute(self, anchor_ids: tf.Tensor, feature_spec: FeatureSpec, kwargs: dict):
        """
        注册anchor ids的相关属性.

        Args:
            anchor_ids: lookup传入的ids
            feature_spec: 根据ids创建的FeatureSpec
            kwargs: lookup参数字典

        Returns: None
        """
        self.anchor_tensor_specs[anchor_ids][ASCAnchorAttr.TABLE_INSTANCE] = self
        self.anchor_tensor_specs[anchor_ids][ASCAnchorAttr.IS_TRAINING] = kwargs.get("is_train")
        self.anchor_tensor_specs[anchor_ids][ASCAnchorAttr.FEATURE_SPEC] = feature_spec
        self.anchor_tensor_specs[anchor_ids][ASCAnchorAttr.IS_GRAD] = kwargs.get("is_grad")

    def get_default_lookup_name(self) -> str:
        """
        获取该表此次lookup的默认名字.
        Returns: lookup的默认名字
        """
        self._default_name_count += 1
        default_name = "sparse_lookup_%d" % self._default_name_count
        logger.debug("getting one default lookup name %s.", default_name)
        return default_name

    def increase_multi_lookup_times(self, is_training: bool):
        """
        增加该表的一次查询次数，用于校验一表多查次数.

        Args:
            is_training: 当前流程是训练还是推理

        Returns: None
        """
        self._multi_lookup_times[is_training] = self._multi_lookup_times.get(is_training) + 1

    def lookup(self, ids: tf.Tensor, send_count: Optional[int], **kwargs) -> tf.Tensor:
        """
        稀疏表的lookup，自动改图模式.

        Args:
            ids: 此次lookup的tensor
            send_count: all2all通信参数
            **kwargs: lookup参数字典

        Returns: lookup结果
        """
        is_training = kwargs.get("is_train")
        # After starting the pipeline, graph modification cannot be executed, hence no changes to the computational
        # graph are allowed. The exception is the scenario of exporting the pb model.
        experimental_mode = ConfigInitializer.get_instance().train_params_config.experimental_mode
        if experimental_mode is None and ConfigInitializer.get_instance().hybrid_manager_config.freeze and is_training:
            raise RuntimeError("Cannot build new sparse forward graph after emb cache management was built.")

        # record send count
        eval_mode = (
            not is_training
            and ConfigInitializer.get_instance().train_params_config.get_training_mode_channel_id(True) is None
        )
        if (
            is_training
            or eval_mode
            or "train_and_evaluate" in ConfigInitializer.get_instance().train_params_config.bool_gauge_set
        ):
            self._same_table_send_count += send_count if send_count is not None else 0

        # create feature spec
        feature_spec = get_feature_spec(self._table_name, kwargs.get("access_and_evict_config"))
        feature_spec.set_feat_attribute(ids, is_training)

        # record anchor ids
        anchor_ids = tf.identity(ids, name="ids")
        tf.compat.v1.add_to_collection(ASCEND_SPARSE_LOOKUP_ENTRANCE, anchor_ids)
        self.register_anchor_attribute(anchor_ids, feature_spec, kwargs)

        # set modify graph
        self._modify_graph = kwargs.get("modify_graph", True)
        if not self._modify_graph and not self._is_hbm:
            raise RuntimeError("when the 'ddr or ssd' mode are used, the 'modify graph' is required")

        # return the stub tensor of the lookup result
        if not self._use_static:
            kwargs["lookup_ids"] = ids
        mock_lookup_result = self._lookup_forward(feature_spec, send_count, **kwargs)
        mock_lookup_result = tf.identity(mock_lookup_result, name=ASCAnchorAttr.MOCK_LOOKUP_RESULT.value)
        if not kwargs.get("is_grad"):
            mock_lookup_result = tf.stop_gradient(mock_lookup_result, name="mock_stop_grad_lookup_res")
        self.anchor_tensor_specs[anchor_ids][ASCAnchorAttr.MOCK_LOOKUP_RESULT] = mock_lookup_result
        logger.debug("Return the stub tensor `%s` of the `%s` table.", mock_lookup_result, self._table_name)
        return mock_lookup_result

    def lookup_for_feat_spec(self, feature_spec: FeatureSpec, send_count: Optional[int], **kwargs) -> tf.Tensor:
        """
        稀疏表的lookup，FeatureSpec模式.

        Args:
            feature_spec: 此次lookup的tensor的包装类
            send_count: all2all通信参数
            **kwargs: lookup参数字典

        Returns: lookup结果
        """
        spec_name = feature_spec.name
        is_training = kwargs.get("is_train")
        if spec_name in self._lookup_result and is_training in self._lookup_result.get(spec_name):
            lookup_result = self._lookup_result.get(spec_name).get(is_training)
            if not kwargs.get("is_grad"):
                return tf.stop_gradient(lookup_result, name="stop_grad_lookup_result")
            return lookup_result

        if not self._use_static and not self._modify_graph and kwargs.get("batch") is None:
            raise RuntimeError("when the 'feature spec' mode and 'dynamic shape' are used, the 'batch' is required")
        if not self._modify_graph and not self._is_hbm:
            raise RuntimeError("when the 'ddr or ssd' mode are used, the 'modify graph' is required")
        table_name = feature_spec.table_name
        same_table_feature_spec = ConfigInitializer.get_instance().feature_spec_config.table_name_to_feature_spec[
            table_name
        ][is_training]
        logger.debug(
            "The feature spec of the same table is %s, table name is %s.",
            ([fs.name for fs in same_table_feature_spec],),
            self._table_name,
        )

        same_table_spec_count = len(same_table_feature_spec)
        if same_table_spec_count == 0:
            raise RuntimeError(f"spec_name {spec_name} not in table {table_name}.")

        if same_table_spec_count == 1:
            lookup_result = self._lookup_forward(feature_spec, send_count, **kwargs)
            if spec_name not in self._lookup_result:
                self._lookup_result[spec_name] = {}
            if not kwargs.get("is_grad"):
                lookup_result = tf.stop_gradient(lookup_result, name="stop_grad_lookup_result")
            self._lookup_result[spec_name][is_training] = lookup_result
            return lookup_result

        # 改图模式下FeatureSpec是按照lookup顺序创建的，无需对ids进行排序；fs模式下手动创建FeatureSpec，不一定有序
        if not self._modify_graph:
            same_table_feature_spec = sorted(same_table_feature_spec, key=lambda x: x.name)

        access_threshold, evict_threshold = self._get_access_and_evict_threshold(same_table_feature_spec)
        mock_feature_spec = FeatureSpec(f"mock_feature_spec_{table_name}", table_name=table_name,
                                        access_threshold=access_threshold,
                                        eviction_threshold=evict_threshold)

        if self._use_static:
            tensor_list = []
            tensor_split_list = [feat_spec.split for feat_spec in same_table_feature_spec]
            total_feature_count = sum(tensor_split_list)
        else:
            tensor_list = self.__get_tensor_list(same_table_feature_spec, **kwargs)
            tensor_split_list = [tf.math.reduce_prod(array_ops.shape(tensor)) for tensor in tensor_list]
            total_feature_count = tf.add_n(tensor_split_list)
        set_temporary_feature_spec_attribute(mock_feature_spec, total_feature_count)

        kwargs["multi_lookup"] = True
        total_send_count = self._same_table_send_count if self._modify_graph else send_count * same_table_spec_count
        lookup_result = self._lookup_forward(mock_feature_spec, total_send_count, **kwargs)
        logger.debug("multi lookup table %s via %s.", table_name, tensor_split_list)
        self.__split_lookup_result(same_table_feature_spec, tensor_split_list, tensor_list, lookup_result, is_training)

        # 当一表多查完成后，将此表对应的feature specs列表清空，便于estimator模式下多轮eval时不会累加上轮eval的feature specs
        ConfigInitializer.get_instance().feature_spec_config.clear_same_table_feature_spec(self.table_name, is_training)
        if not kwargs.get("is_grad"):
            return tf.stop_gradient(self._lookup_result.get(spec_name).get(is_training), name="stop_grad_lookup_res")
        return self._lookup_result.get(spec_name).get(is_training)
    
    def _init_sliced_variable(self):
        if (
            ConfigInitializer.get_instance().hybrid_manager_config.freeze
            and self._table_name in ConfigInitializer.get_instance().sparse_embed_config.name_to_var_dict
        ):
            self._variable = tf.compat.v1.get_variable(
                self._table_name, trainable=False, shape=(self._slice_device_vocabulary_size, self._emb_size)
            )

            experimental_mode = ConfigInitializer.get_instance().train_params_config.experimental_mode
            # In the export saved model mode, during the expansion scenario, variables also need to be recorded
            # during eval/predict. Otherwise, an empty variable will cause the saver to be created as None.
            if not experimental_mode and ConfigInitializer.get_instance().use_dynamic_expansion:
                return

            self._record(eval_flag=True)
            tf.compat.v1.add_to_collection(
                ConfigInitializer.get_instance().train_params_config.ascend_global_hashtable_collection,
                self._variable,
            )
            return

        check_emb_init_params(self._is_hbm, self._embedding_size)
        self._initialize_variables()
        tf.compat.v1.add_to_collection(
            ConfigInitializer.get_instance().train_params_config.ascend_global_hashtable_collection, self._variable
        )

    def _set_ext_emb_size(self):
        # 初始设置_ext_emb_size等于_emb_size，改图阶段会根据优化器的不同而exchange该值
        self._ext_emb_size = self._emb_size * self._ext_coefficient
        logger.debug("Init table, ext_emb_size is set to be %s.", self._ext_emb_size)

    def _get_preprocessed_tensor(self, feature_spec: FeatureSpec, channel_id: int, send_count: Optional[int]) -> dict:
        config = dict(
            batch_size=feature_spec.batch_size,
            feat_cnt=feature_spec.feat_cnt,
            send_count=send_count,
            rank_size=self._rank_size,
            channel_id=channel_id,
            table_name=self._table_name,
            is_hbm=self._is_hbm,
            ext_emb_size=self._ext_emb_size,
            emb_size=self._emb_size,
            use_dynamic_expansion=ConfigInitializer.get_instance().use_dynamic_expansion,
            device_id=self._device_id,
            is_dp=self._is_dp,
        )

        return get_preprocessed_tensor_for_asc(self._variable, config)

    def _lookup_forward(self, feature_spec: FeatureSpec, send_count: Optional[int], **kwargs) -> tf.Tensor:
        is_training = kwargs.get("is_train")
        hashtable_params = dict(
            slice_device_vocabulary_size=self._slice_device_vocabulary_size,
            slice_host_vocabulary_size=self._slice_host_vocabulary_size,
            send_count=send_count,
            table_name=self._table_name,
            is_hbm=self._is_hbm,
        )
        check_emb_lookup_params(hashtable_params, feature_spec, send_count, is_training)
        if ConfigInitializer.get_instance().use_static:
            self._send_count = send_count
        channel_id = ConfigInitializer.get_instance().train_params_config.get_training_mode_channel_id(is_training)
        result = self._get_preprocessed_tensor(feature_spec, channel_id, send_count)

        @tf.custom_gradient
        def sparse_forward(table):
            def compute_all2all_gradient(local_grad_):
                if self._all2all_gradients_op == All2allGradientsOp.SUM_GRADIENTS_AND_DIV_BY_RANKSIZE:
                    try:
                        local_grad_ = local_grad_ / get_rank_size()
                    except ZeroDivisionError as exp:
                        raise ZeroDivisionError("Rank size cannot be zero.") from exp
                return local_grad_

            def lccl_uss(bp_all2all_args, unique_grads):
                if self._use_static:
                    send_count_matrix = tf.constant(
                        [[bp_all2all_args * self._emb_size] * self._rank_size] * self._rank_size, dtype=tf.int64
                    )
                else:
                    send_count_matrix = bp_all2all_args
                unique_local_grad = host_pipeline_ops.lccl_all_uss(
                    send_data=unique_grads,
                    send_count_matrix=send_count_matrix,
                    shape_vec=result.get("unique_keys"),
                    peer_mem=self.peer_mem,
                    restore=result.get("restore_vector_second"),
                    rank=self._rank_id,
                    rank_size=self._rank_size,
                    dim=self._emb_size,
                )
                unique_local_grad = tf.reshape(unique_local_grad, [-1, self._emb_size])
                unique_local_grad = compute_all2all_gradient(unique_local_grad)
                return ops.IndexedSlices(
                    values=unique_local_grad, indices=result.get("unique_keys"), dense_shape=tf.shape(table)
                )

            def grad(lookup_grad):
                logger.debug("Into lookup grad function, feature spec name: %s.", feature_spec.name)
                embedding_grad = tf.reshape(lookup_grad, [-1, self._emb_size])
                unique_grads = tf.compat.v1.unsorted_segment_sum(
                    embedding_grad, result.get("restore_vector"), unique_embeddings_shape[0]
                )
                bp_all2all_args = all2all_args if (self._use_static or self.is_dp) else tf.transpose(all2all_args)
                hot, cold = tf.split(
                    unique_grads,
                    [
                        tf.shape(result.get("hot_pos"))[0],
                        tf.shape(unique_grads)[0] - tf.shape(result.get("hot_pos"))[0],
                    ],
                    axis=0,
                )
                unique_grads = tf.tensor_scatter_nd_add(cold, tf.expand_dims(result.get("hot_pos"), 1), hot)
                if not (
                    ConfigInitializer.get_instance().optimizer_config.optimizer_instance.derivative == 2
                    and ConfigInitializer.get_instance().use_lccl
                ):
                    local_grad = self.__get_own_emb(unique_grads, bp_all2all_args, result.get("id_offsets"), True)
                    local_grad = compute_all2all_gradient(local_grad)

                    return self._get_update_grad(local_grad, result, table)
                return lccl_uss(bp_all2all_args, unique_grads)

            logger.debug("fp rank size: %s", self._rank_size)
            all2all_args = send_count if self._use_static else result.get("all2all_args")

            if (
                (not ConfigInitializer.get_instance().use_dynamic_expansion)
                and (not self._use_static)
                and ConfigInitializer.get_instance().use_lccl
            ):
                unique_embeddings_ = host_pipeline_ops.lccl_gather_all(
                    emb_table=table,
                    lookup=tf.abs(result.get("id_offsets")),
                    send_count_matrix=all2all_args,
                    shape_vec=result.get("unique_shape"),
                    peer_mem=self.peer_mem,
                    rank=self._rank_id,
                    rank_size=self._rank_size,
                    dim=self._emb_size,
                )
                unique_embeddings = tf.reshape(unique_embeddings_, [-1, self._emb_size])
            else:
                local_embeddings = self._get_local_embeddings(table, result, feature_spec, **kwargs)
                unique_embeddings = self.__get_own_emb(
                    local_embeddings, all2all_args, result.get("unique_shape"), False
                )
            unique_embeddings = tf.concat(
                [tf.gather(unique_embeddings, result.get("hot_pos"), name="hot_pos"), unique_embeddings], axis=0
            )

            if self._use_static:
                unique_embeddings_shape = unique_embeddings.shape.as_list()
            else:
                unique_embeddings_shape = tf.shape(unique_embeddings)

            notify_hybridmgmt_op = self.__generate_lookup_id_notify_hybrid(is_training)
            with tf.control_dependencies([notify_hybridmgmt_op]):
                embeddings = tf.gather(
                    unique_embeddings, result.get("restore_vector"), axis=0, name="gather_for_restore_vector"
                )

            if self._use_static:
                return tf.reshape(embeddings, feature_spec.dims + [self._emb_size]), grad

            if kwargs.get("multi_lookup"):
                return tf.reshape(embeddings, [-1, self._emb_size]), grad

            feature_spec_tensor = None
            if not self._modify_graph:
                feature_spec_tensor = kwargs.get("batch").get(feature_spec.index_key)
            modify_graph_tensor = kwargs.get("lookup_ids")
            tensor = feature_spec_tensor if not self._modify_graph else modify_graph_tensor
            if tensor is None:
                raise KeyError(f"key or ids does not exist in batch, now modify graph is {self._modify_graph}.")
            dest_shape = array_ops.concat([array_ops.shape(tensor), [self._emb_size]], 0)

            return array_ops.reshape(embeddings, dest_shape), grad

        ddr_control_ops = tf.no_op(name="place_holder_swap_op")
        swap_args = SwapArgs()
        swap_args.set_data(
            SwapDataType.CONTROL.value, var_name=self._table_name, var_channel=channel_id, control_ops=ddr_control_ops
        )
        with tf.control_dependencies([ddr_control_ops]):
            return self._get_sparse_forward_result(sparse_forward, self._variable, result, is_training)

    def _initialize_variables(self):
        initialized_tensor = (
            self._emb_initializer(self._slice_device_vocabulary_size + self._embedding_size) * self._init_param
        )
        self._variable = tf.compat.v1.get_variable(self._table_name, trainable=False, initializer=initialized_tensor)

        # make sure sparse table variable will not be saved and restored within tf checkpoint.
        ConfigInitializer.get_instance().sparse_embed_config.insert_removing_var_list(self._variable.name)

        self._record()

    def _record(self, eval_flag=False):
        ConfigInitializer.get_instance().sparse_embed_config.insert_table_instance(
            self._table_name, self._variable, self, eval_flag
        )
        logger.debug("Device vocabulary_size for table %s is %s.", self._table_name, self._device_vocabulary_size)
        logger.debug(
            "Slice_device_vocabulary_size for table %s is %s.", self._table_name, self._slice_device_vocabulary_size
        )
        logger.debug("Host vocabulary size for table %s is %s.", self._table_name, self._host_vocabulary_size)
        logger.debug(
            "Slice host vocabulary_size for table %s is %s.", self._table_name, self._slice_host_vocabulary_size
        )
        logger.debug("SSD vocabulary size for table %s is %s.", self._table_name, self._ssd_vocabulary_size)
        logger.debug("Slice ssd vocabulary_size for table %s is %s.", self._table_name, self._slice_ssd_vocabulary_size)

    def __get_own_emb(
        self, emb: tf.Tensor, all2all_args: Union[int, tf.Tensor], vec_info: tf.Tensor, is_back: bool
    ) -> tf.Tensor:
        src_emb = emb
        reshape_info = (
            [all2all_args * self._rank_size, self._emb_size]
            if (self._use_static and not self.is_dp)
            else [-1, self._emb_size]
        )

        # The single-server static shape cases and dp cases do not require alltoall.
        if (self._rank_size == 1 and self._use_static) or self.is_dp:
            return tf.reshape(src_emb, reshape_info)

        if self._use_static:
            emb_send_cnt = tf.constant([all2all_args * self._emb_size] * self._rank_size, dtype=tf.int64)
            emb_send_offset = tf.constant(
                [all2all_args * self._emb_size * i for i in range(self._rank_size)], dtype=tf.int64
            )
            if ConfigInitializer.get_instance().use_lccl and is_back:
                send_count_matrix = tf.constant(
                    [[all2all_args * self._emb_size] * self._rank_size] * self._rank_size, dtype=tf.int64
                )
                vec_info = tf.constant([1] * all2all_args * self._rank_size, dtype=tf.int32)
                vec_info = tf.reshape(vec_info, [-1, 1])
                src_emb = host_pipeline_ops.lccl_all_to_all(
                    send_data=emb,
                    send_count_matrix=send_count_matrix,
                    shape_vec=vec_info,
                    peer_mem=self.peer_mem,
                    rank=self._rank_id,
                    rank_size=self._rank_size,
                    dim=self._emb_size,
                )
            else:
                src_emb = hccl_ops.all_to_all_v(
                    send_data=emb,
                    send_counts=emb_send_cnt,
                    send_displacements=emb_send_offset,
                    recv_counts=emb_send_cnt,
                    recv_displacements=emb_send_offset,
                )
        else:
            if ConfigInitializer.get_instance().use_lccl:
                src_emb = host_pipeline_ops.lccl_all_to_all(
                    send_data=emb,
                    send_count_matrix=all2all_args,
                    shape_vec=vec_info,
                    peer_mem=self.peer_mem,
                    rank=self._rank_id,
                    rank_size=self._rank_size,
                    dim=self._emb_size,
                )
            else:
                src_emb = hccl_ops.all_to_all_v_c(send_data=emb, send_count_matrix=all2all_args, rank=self._rank_id)

        return tf.reshape(src_emb, reshape_info)

    def __get_tensor_list(self, same_table_feature_spec: list, **kwargs) -> list:
        same_table_tensor_list = []
        for feat_spec in same_table_feature_spec:
            feature_spec_tensor_dict = kwargs.get("batch")
            modify_graph_tensor_dict = kwargs.get("feature_spec_name_ids_dict")
            batch_tensor_dict = feature_spec_tensor_dict if not self._modify_graph else modify_graph_tensor_dict
            if batch_tensor_dict is None:
                raise KeyError(
                    f"The tensor dict of batch does not exist in kwargs, and modify graph is `{self._modify_graph}`."
                )

            feature_spec_tensor = batch_tensor_dict.get(feat_spec.index_key)
            modify_graph_tensor = batch_tensor_dict.get(feat_spec.name)
            tensor = feature_spec_tensor if not self._modify_graph else modify_graph_tensor
            if tensor is None:
                tensor_key = feat_spec.index_key if not self._modify_graph else feat_spec.name
                raise KeyError(f"Key `{tensor_key}` does not exist in batch_tensor_dict.")
            same_table_tensor_list.append(tensor)
        return same_table_tensor_list

    def __split_lookup_result(
        self,
        same_table_feature_spec: list,
        tensor_split_list: list,
        tensor_list: list,
        lookup_result: tf.Tensor,
        is_training: bool,
    ):
        lookup_result_split = tf.split(lookup_result, tensor_split_list)
        if len(lookup_result_split) != len(same_table_feature_spec) or (
            not self._use_static and len(same_table_feature_spec) != len(tensor_list)
        ):
            raise RuntimeError(
                f"shape not match. len(lookup_result_split): {len(lookup_result_split)},"
                f"len(same_table_feature_spec): {len(same_table_feature_spec)}"
                f"len(tensor_list): {len(tensor_list)}"
            )
        for idx, (one_feature_spec, one_result) in enumerate(zip(same_table_feature_spec, lookup_result_split)):
            if one_feature_spec.name not in self._lookup_result:
                self._lookup_result[one_feature_spec.name] = {}
            if self._use_static:
                dest_shape = one_feature_spec.dims + [self._emb_size]
            else:
                dest_shape = array_ops.concat([array_ops.shape(tensor_list[idx]), [self._emb_size]], 0)
            self._lookup_result[one_feature_spec.name][is_training] = array_ops.reshape(one_result, dest_shape)

    def __generate_lookup_id_notify_hybrid(self, is_training: bool):
        """
        用于打桩的op节点，它的name用于标识此次的sparse lookup是train还是eval，后续在session run的时候，
        通过图反向查找该子图中查找到此op，最后通过名称判断session run是调用的哪个通道，并通知c++侧进行计数和唤醒操作。

        Args:
            is_training: 当前流程是训练还是推理

        Returns: 指定名字的tf.no_op()
        """
        channel_id = ConfigInitializer.get_instance().train_params_config.get_training_mode_channel_id(is_training)
        channel_name = "d2h_notify_hybridmgmt_{}".format(channel_id)
        notify_hybridmgmt_op = tf.no_op(channel_name)
        logger.debug("The notify hybridmgmg op of table `%s` is `%s`.", self._table_name, notify_hybridmgmt_op.name)
        return notify_hybridmgmt_op
