# -*- coding: utf-8 -*-

import time
import argparse
import tensorflow as tf
import os
import sys
import math
import collections
from tensorflow.python.client import timeline
from rank_model_tf.common.DNN import DNN
import json
from model.embedding.EmbeddingLayer import EmbeddingLayer
import model.io.data_util_v1 as data_util

from tensorflow.python.ops import partitioned_variables
import logging

logging.basicConfig(level=logging.INFO,
                   format = '%(asctime)s - %(levelname)s - %(message)s')



hash_slots = [1,2,100,101,102,103,104,105,106,200,201,202,203,204,205,206,207,208,209,210,211,212]


def input_fn(data_files, batch_size, epoch=1, key_max_size=100000000):
    return data_util.input_fn(data_files, hash_slots=hash_slots, batch_size=batch_size, epoch=epoch, key_max_size=key_max_size)




class MyModel(object):
    def __init__(self) -> None:
        pass

    def build_model(self):
        model = DNN([1024,512,256,1], name="dnn")
        self.logit = model.logit(self._input_layer)


    def set_inputs(self, input_layer):
        self._input_layer = input_layer





class MyModel_deeprec(object):
    def __init__(self, num_ps_replicas=None) -> None:
        self.dense_var_scope = "dense"
        self.sparse_var_scope = "sparse"
        self.is_training = True
        self._num_ps_replicas = num_ps_replicas
        self._embedding_size = 16

        # self._input_layer_partitioner = partitioned_variables.min_max_variable_partitioner(
        #                                                         max_partitions=num_ps_replicas,
        #                                                         min_slice_size=8 <<
        #                                                         20) if self._num_ps_replicas is not None and self._num_ps_replicas>0 else None
        
        self._input_layer_partitioner = partitioned_variables.fixed_size_partitioner(num_shards=self._num_ps_replicas) if self._num_ps_replicas is not None and self._num_ps_replicas>0 else None

    def build(self, features, labels, keys):
        self.global_step = tf.train.get_or_create_global_step()
        self.feature = features
        self.label = labels
        with tf.device('/cpu:0'):
            self.build_embedding_layer()
        
        with tf.variable_scope(self.dense_var_scope, reuse=tf.AUTO_REUSE):
            self._model = MyModel()
            self._model.set_inputs(self._input_layer)
            self._model.build_model()
            self.build_loss()
            self.build_train_op()


    def build_embedding_layer(self):
        logging.info("_num_ps_replicas:{}".format(self._num_ps_replicas))
        pooling_strategy = dict([(slot, "sum") for slot in hash_slots])
        self._embedding_layer = EmbeddingLayer(slot_ids=hash_slots,
                                               embedding_size=self._embedding_size,
                                               step_evict=None,
                                               filter_freq=None,
                                               pooling_strategy=pooling_strategy)
        pooling_embs, not_pooling_embs = self._embedding_layer.getEmbs(self.feature)
        self._input_layer = tf.concat([pooling_embs[slot].embedding for slot in hash_slots], axis=-1)

        
        
    def build_loss(self):
        label = tf.slice(self.label, [0,0], [-1,1])
        self.not_mean_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=label, logits=self._model.logit)
        # self.total_loss = tf.reduce_sum(self.not_mean_loss)
        self.total_loss = tf.reduce_mean(self.not_mean_loss)
        # tf.print('loss: %f',self.total_loss)         # 要用 tf.print()替代 print  


    def build_train_op(self):
        dense_opt = tf.compat.v1.train.AdamOptimizer(learning_rate=0.00005)
        # sparse_opt = tf.compat.v1.train.FtrlOptimizer(learning_rate=0.002, l1_regularization_strength=0.002, l2_regularization=0.0)
        sparse_opt = tf.compat.v1.train.FtrlOptimizer(learning_rate=0.002, l1_regularization_strength=0.002)
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        train_ops = []
        with tf.control_dependencies(update_ops):
            train_ops.append(dense_opt.minimize(self.total_loss, global_step=self.global_step, 
                                                var_list=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.dense_var_scope)))
            train_ops.append(sparse_opt.minimize(self.total_loss, 
                                                var_list=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self._embedding_layer._layer_name)))
            self.train_op = tf.group(*train_ops)







# generate dataset pipline
def build_model_input(filename, batch_size, num_epochs):
    def parse_csv(value):
        tf.logging.info('Parsing {}'.format(filename))
        cont_defaults = [[0.0] for i in range(1, 14)]
        cate_defaults = [[' '] for i in range(1, 27)]
        label_defaults = [[0]]
        column_headers = TRAIN_DATA_COLUMNS
        record_defaults = label_defaults + cont_defaults + cate_defaults
        columns = tf.io.decode_csv(value, record_defaults=record_defaults)
        all_columns = collections.OrderedDict(zip(column_headers, columns))
        labels = all_columns.pop(LABEL_COLUMN[0])
        features = all_columns
        return features, labels


    '''Work Queue Feature'''
    if args.workqueue and not args.tf:
        from tensorflow.python.ops.work_queue import WorkQueue
        work_queue = WorkQueue([filename], num_epochs=num_epochs)
        # For multiple files：
        # work_queue = WorkQueue([filename, filename1,filename2,filename3])
        files = work_queue.input_dataset()
    else:
        files = filename
    # Extract lines from input files using the Dataset API.
    dataset = tf.data.TextLineDataset(files)
    # dataset = dataset.shuffle(buffer_size=20000,
    #                               seed=args.seed)  # fix seed for reproducing
    if not args.workqueue:
        dataset = dataset.repeat(num_epochs)
    dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
    dataset = dataset.batch(batch_size)
    dataset = dataset.map(parse_csv, num_parallel_calls=tf.data.experimental.AUTOTUNE)
    # dataset = dataset.map(parse_csv, num_parallel_calls=28)
    # dataset = dataset.prefetch(2)
    dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
    return dataset


# generate feature columns
def build_feature_columns():
    # Notes: Statistics of Kaggle's Criteo Dataset has been calculated in advance to save time.
    mins_list = [
        0.0, -3.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
    ]
    range_list = [
        1539.0, 22069.0, 65535.0, 561.0, 2655388.0, 233523.0, 26297.0, 5106.0,
        24376.0, 9.0, 181.0, 1807.0, 6879.0
    ]

    def make_minmaxscaler(min, range):
        def minmaxscaler(col):
            return (col - min) / range

        return minmaxscaler

    deep_columns = []
    wide_columns = []
    if args.group_embedding and not args.tf:
        with tf.feature_column.group_embedding_column_scope(name="categorical"):
            for column_name in FEATURE_COLUMNS:
                if column_name in CATEGORICAL_COLUMNS:
                    categorical_column = tf.feature_column.categorical_column_with_hash_bucket(
                        column_name, hash_bucket_size=10000, dtype=tf.string)
                    wide_columns.append(categorical_column)

                    if not args.tf:
                        '''Feature Elimination of EmbeddingVariable Feature'''
                        if args.ev_elimination == 'gstep':
                            # Feature elimination based on global steps
                            evict_opt = tf.GlobalStepEvict(steps_to_live=4000)
                        elif args.ev_elimination == 'l2':
                            # Feature elimination based on l2 weight
                            evict_opt = tf.L2WeightEvict(l2_weight_threshold=1.0)
                        else:
                            evict_opt = None
                        '''Feature Filter of EmbeddingVariable Feature'''
                        if args.ev_filter == 'cbf':
                            # CBF-based feature filter
                            filter_option = tf.CBFFilter(
                                filter_freq=3,
                                max_element_size=2**30,
                                false_positive_probability=0.01,
                                counter_type=tf.int64)
                        elif args.ev_filter == 'counter':
                            # Counter-based feature filter
                            filter_option = tf.CounterFilter(filter_freq=3)
                        else:
                            filter_option = None
                        ev_opt = tf.EmbeddingVariableOption(
                            evict_option=evict_opt, filter_option=filter_option)

                        if args.ev:
                            '''Embedding Variable Feature'''
                            categorical_column = tf.feature_column.categorical_column_with_embedding(
                                column_name, dtype=tf.string, ev_option=ev_opt)
                        elif args.adaptive_emb:
                            '''                 Adaptive Embedding Feature Part 2 of 2
                            Expcet the follow code, a dict, 'adaptive_mask_tensors', is need as the input of
                            'tf.feature_column.input_layer(adaptive_mask_tensors=adaptive_mask_tensors)'.
                            For column 'COL_NAME',the value of adaptive_mask_tensors['$COL_NAME'] is a int32
                            tensor with shape [batch_size].
                            '''
                            categorical_column = tf.feature_column.categorical_column_with_adaptive_embedding(
                                column_name,
                                hash_bucket_size=HASH_BUCKET_SIZES[column_name],
                                dtype=tf.string,
                                ev_option=ev_opt)
                        elif args.dynamic_ev:
                            '''Dynamic-dimension Embedding Variable'''
                            print(
                                "Dynamic-dimension Embedding Variable isn't really enabled in model."
                            )
                            sys.exit()

                    if args.tf or not args.emb_fusion:
                        embedding_column = tf.feature_column.embedding_column(
                            categorical_column,
                            dimension=EMBEDDING_DIMENSIONS[column_name],
                            combiner='mean')
                    else:
                        '''Embedding Fusion Feature'''
                        embedding_column = tf.feature_column.embedding_column(
                            categorical_column,
                            dimension=EMBEDDING_DIMENSIONS[column_name],
                            combiner='mean',
                            do_fusion=args.emb_fusion)

                    deep_columns.append(embedding_column)
                else:
                    normalizer_fn = None
                    i = CONTINUOUS_COLUMNS.index(column_name)
                    normalizer_fn = make_minmaxscaler(mins_list[i], range_list[i])
                    column = tf.feature_column.numeric_column(
                        column_name, normalizer_fn=normalizer_fn, shape=(1, ))
                    wide_columns.append(column)
                    deep_columns.append(column)
    else:
        for column_name in FEATURE_COLUMNS:
            if column_name in CATEGORICAL_COLUMNS:
                categorical_column = tf.feature_column.categorical_column_with_hash_bucket(
                    column_name, hash_bucket_size=10000, dtype=tf.string)
                wide_columns.append(categorical_column)

                if not args.tf:
                    '''Feature Elimination of EmbeddingVariable Feature'''
                    if args.ev_elimination == 'gstep':
                        # Feature elimination based on global steps
                        evict_opt = tf.GlobalStepEvict(steps_to_live=4000)
                    elif args.ev_elimination == 'l2':
                        # Feature elimination based on l2 weight
                        evict_opt = tf.L2WeightEvict(l2_weight_threshold=1.0)
                    else:
                        evict_opt = None
                    '''Feature Filter of EmbeddingVariable Feature'''
                    if args.ev_filter == 'cbf':
                        # CBF-based feature filter
                        filter_option = tf.CBFFilter(
                            filter_freq=3,
                            max_element_size=2**30,
                            false_positive_probability=0.01,
                            counter_type=tf.int64)
                    elif args.ev_filter == 'counter':
                        # Counter-based feature filter
                        filter_option = tf.CounterFilter(filter_freq=3)
                    else:
                        filter_option = None
                    ev_opt = tf.EmbeddingVariableOption(
                        evict_option=evict_opt, filter_option=filter_option)

                    if args.ev:
                        '''Embedding Variable Feature'''
                        categorical_column = tf.feature_column.categorical_column_with_embedding(
                            column_name, dtype=tf.string, ev_option=ev_opt)
                    elif args.adaptive_emb:
                        '''                 Adaptive Embedding Feature Part 2 of 2
                        Expcet the follow code, a dict, 'adaptive_mask_tensors', is need as the input of
                        'tf.feature_column.input_layer(adaptive_mask_tensors=adaptive_mask_tensors)'.
                        For column 'COL_NAME',the value of adaptive_mask_tensors['$COL_NAME'] is a int32
                        tensor with shape [batch_size].
                        '''
                        categorical_column = tf.feature_column.categorical_column_with_adaptive_embedding(
                            column_name,
                            hash_bucket_size=HASH_BUCKET_SIZES[column_name],
                            dtype=tf.string,
                            ev_option=ev_opt)
                    elif args.dynamic_ev:
                        '''Dynamic-dimension Embedding Variable'''
                        print(
                            "Dynamic-dimension Embedding Variable isn't really enabled in model."
                        )
                        sys.exit()

                if args.tf or not args.emb_fusion:
                    embedding_column = tf.feature_column.embedding_column(
                        categorical_column,
                        dimension=EMBEDDING_DIMENSIONS[column_name],
                        combiner='mean')
                else:
                    '''Embedding Fusion Feature'''
                    embedding_column = tf.feature_column.embedding_column(
                        categorical_column,
                        dimension=EMBEDDING_DIMENSIONS[column_name],
                        combiner='mean',
                        do_fusion=args.emb_fusion)

                deep_columns.append(embedding_column)
            else:
                normalizer_fn = None
                i = CONTINUOUS_COLUMNS.index(column_name)
                normalizer_fn = make_minmaxscaler(mins_list[i], range_list[i])
                column = tf.feature_column.numeric_column(
                    column_name, normalizer_fn=normalizer_fn, shape=(1, ))
                wide_columns.append(column)
                deep_columns.append(column)

    return wide_columns, deep_columns




def train(sess_config,
          input_hooks,
          model,
          data_init_op,
          checkpoint_dir,
          tf_config=None,
          server=None):
    model.is_training = True
    hooks = []
    hooks.extend(input_hooks)

    scaffold = tf.train.Scaffold(
        local_init_op=tf.group(tf.local_variables_initializer(), data_init_op),
        saver=tf.train.Saver(max_to_keep=args.keep_checkpoint_max))

    # stop_hook = tf.train.StopAtStepHook(last_step=steps)
    # hooks.append(stop_hook)
    log_hook = tf.train.LoggingTensorHook(
        {
            'steps': model.global_step,
            'loss': model.total_loss
        }, every_n_iter=100)
    hooks.append(log_hook)
    if args.timeline > 0:
        hooks.append(
            tf.train.ProfilerHook(save_steps=args.timeline,
                                  output_dir=checkpoint_dir))
    save_steps = args.save_steps if args.save_steps or args.no_eval else 10000
    '''
                            Incremental_Checkpoint
    Please add `save_incremental_checkpoint_secs` in 'tf.train.MonitoredTrainingSession'
    it's default to None, Incremental_save checkpoint time in seconds can be set
    to use incremental checkpoint function, like `tf.train.MonitoredTrainingSession(
        save_incremental_checkpoint_secs=args.incremental_ckpt)`
    '''
    if args.incremental_ckpt and not args.tf:
        print("Incremental_Checkpoint is not really enabled.")
        print("Please see the comments in the code.")
        sys.exit()

    with tf.train.MonitoredTrainingSession(
            master=server.target if server else '',
            is_chief=tf_config['is_chief'] if tf_config else True,
            hooks=hooks,
            scaffold=scaffold,
            checkpoint_dir=checkpoint_dir,
            save_checkpoint_steps=save_steps,
            summary_dir=checkpoint_dir,
            save_summaries_steps=args.save_steps,
            config=sess_config) as sess:
        while not sess.should_stop():
            sess.run([model.total_loss, model.train_op])
    logging.info("Training completed.")




def _dummy_train(sess_config, 
                data_init_op,
                model:MyModel_deeprec
                ):
    hooks = []
    # hooks.extend(input_hooks)

    scaffold = tf.train.Scaffold(
        local_init_op=tf.group(tf.local_variables_initializer(), data_init_op),
        saver=tf.train.Saver(max_to_keep=args.keep_checkpoint_max))

    # stop_hook = tf.train.StopAtStepHook(last_step=steps)
    # log_hook = tf.train.LoggingTensorHook(
    #     {
    #         'steps': model.global_step,
    #         'loss': model.loss
    #     }, every_n_iter=100)
    # hooks.append(stop_hook)
    # hooks.append(log_hook)
    # if args.timeline > 0:
    #     hooks.append(
    #         tf.train.ProfilerHook(save_steps=args.timeline,
    #                               output_dir=checkpoint_dir))
    # save_steps = args.save_steps if args.save_steps or args.no_eval else steps
    '''
                            Incremental_Checkpoint
    Please add `save_incremental_checkpoint_secs` in 'tf.train.MonitoredTrainingSession'
    it's default to None, Incremental_save checkpoint time in seconds can be set
    to use incremental checkpoint function, like `tf.train.MonitoredTrainingSession(
        save_incremental_checkpoint_secs=args.incremental_ckpt)`
    '''
    # if args.incremental_ckpt and not args.tf:
    #     print("Incremental_Checkpoint is not really enabled.")
    #     print("Please see the comments in the code.")
    #     sys.exit()

    with tf.train.MonitoredTrainingSession(
            # master=server.target if server else '',
            master='',
            # is_chief=tf_config['is_chief'] if tf_config else True,
            is_chief=True,
            hooks=hooks,
            scaffold=scaffold,
            # checkpoint_dir=checkpoint_dir,
            # save_checkpoint_steps=save_steps,
            # summary_dir=checkpoint_dir,
            # save_summaries_steps=args.save_steps,
            config=sess_config) as sess:
        count = 0
        while not sess.should_stop() and count < 10000:
            print(sess.run([model.total_loss, model.train_op]))
            count += 1
    print("Training completed.")




def main(tf_config=None, server=None):
    train_file = args.data_location
    test_file = args.data_location
    train_file += '/train.csv'
    test_file += '/eval.csv'

    batch_size =  args.batch_size

    tf.set_random_seed(args.seed)

    model_dir = os.path.join(args.output_dir,
                             'taobao_' + str(int(time.time())))
    checkpoint_dir = args.checkpoint if args.checkpoint else model_dir
    print("Saving model checkpoints to " + checkpoint_dir)

    # create data pipline of train & test dataset
    data_path="data/hash/"
    train_files = os.listdir(data_path)
    train_files = [data_path+f for f in train_files]
    logging.info(train_files)
    with tf.device('/cpu:0'):
        features, labels, keys = input_fn(train_files, batch_size=batch_size)


    num_ps_replicas = len(tf_config['ps_hosts']) if tf_config else 0
    # Session config
    sess_config = tf.ConfigProto()
    # sess_config.log_device_placement=True
    if tf_config:
        sess_config.device_filters.append("/job:ps")
    sess_config.inter_op_parallelism_threads = args.inter
    sess_config.intra_op_parallelism_threads = args.intra

    # Session hooks
    hooks = []

    if args.smartstaged and not args.tf:
        '''Smart staged Feature'''
        next_element = tf.staged(next_element, num_threads=4, capacity=40)
        sess_config.graph_options.optimizer_options.do_smart_stage = True
        hooks.append(tf.make_prefetch_hook())
    if args.op_fusion and not args.tf:
        '''Auto Graph Fusion'''
        sess_config.graph_options.optimizer_options.do_op_fusion = True
    if args.micro_batch and not args.tf:
        '''Auto Mirco Batch'''
        sess_config.graph_options.optimizer_options.micro_batch_num = args.micro_batch

    # create model
    model = MyModel_deeprec(num_ps_replicas)
    model.build(features, labels, keys)


    # Run model training and evaluation+
    train_init_op = tf.no_op()
    train(sess_config, hooks, model, train_init_op,
          checkpoint_dir, tf_config, server)





def __main(tf_config=None, server=None):
    tf.set_random_seed(args.seed)

    # create data pipline of train & test dataset
    # features, labels, keys = data_util.input_fn(["data/hash/0"], [1,100], batch_size=2)
    lines = input_fn(["data/hash/0"], batch_size=2)
    # test_lines = data_util.test_line(["data/hash/0"], hash_slots=hash_slots)

    # Session config
    sess_config = tf.ConfigProto()
    # if tf_config:
        # sess_config.device_filters.append("/job:ps")
    sess_config.inter_op_parallelism_threads = args.inter
    sess_config.intra_op_parallelism_threads = args.intra

    # Session hooks
    scaffold = tf.train.Scaffold(
        # local_init_op=tf.group(tf.local_variables_initializer(), data_init_op),
        local_init_op=tf.local_variables_initializer(),
        # saver=tf.train.Saver(max_to_keep=args.keep_checkpoint_max)
        )
    with tf.train.MonitoredTrainingSession(
        # master=server.target if server else '',
        master='',
        # is_chief=tf_config['is_chief'] if tf_config else True,
        is_chief=True,
        hooks=[],
        scaffold=scaffold,
        # checkpoint_dir=checkpoint_dir,
        # save_checkpoint_steps=save_steps,
        # summary_dir=checkpoint_dir,
        # save_summaries_steps=args.save_steps,
        config=sess_config) as sess:
        count = 0
        while not sess.should_stop() and count < 3:
            print(sess.run([lines]))
            count += 1













def boolean_string(string):
    low_string = string.lower()
    if low_string not in {'false', 'true'}:
        raise ValueError('Not a valid boolean string')
    return low_string == 'true'

# Get parse
def get_arg_parser():
    parser = argparse.ArgumentParser()
    parser.add_argument('--data_location',
                        help='Full path of train data',
                        required=False,
                        default='./data')
    parser.add_argument('--steps',
                        help='set the number of steps on train dataset',
                        type=int,
                        default=0)
    parser.add_argument('--batch_size',
                        help='Batch size to train. Default is 512',
                        type=int,
                        default=1)
    parser.add_argument('--output_dir',
                        help='Full path to model output directory. \
                            Default to ./result. Covered by --checkpoint. ',
                        required=False,
                        default='./result')
    parser.add_argument('--checkpoint',
                        help='Full path to checkpoints input/output. \
                            Default to ./result/$MODEL_TIMESTAMP',
                        required=False)
    parser.add_argument('--save_steps',
                        help='set the number of steps on saving checkpoints',
                        type=int,
                        default=0)
    parser.add_argument('--seed',
                        help='set the random seed for tensorflow',
                        type=int,
                        default=2021)
    parser.add_argument('--optimizer',
                        type=str, \
                        choices=['adam', 'adamasync', 'adagraddecay', 'adagrad'],
                        default='adamasync')
    parser.add_argument('--linear_learning_rate',
                        help='Learning rate for linear model',
                        type=float,
                        default=0.2)
    parser.add_argument('--deep_learning_rate',
                        help='Learning rate for deep model',
                        type=float,
                        default=0.01)
    parser.add_argument('--keep_checkpoint_max',
                        help='Maximum number of recent checkpoint to keep',
                        type=int,
                        default=1)
    parser.add_argument('--timeline',
                        help='number of steps on saving timeline. Default 0',
                        type=int,
                        default=0)
    parser.add_argument('--protocol',
                        type=str,
                        choices=['grpc', 'grpc++', 'star_server'],
                        default='star_server')
    parser.add_argument('--inter',
                        help='set inter op parallelism threads.',
                        type=int,
                        default=0)
    parser.add_argument('--intra',
                        help='set inter op parallelism threads.',
                        type=int,
                        default=0)
    parser.add_argument('--input_layer_partitioner', \
                        help='slice size of input layer partitioner, units MB. Default 8MB',
                        type=int,
                        default=8)
    parser.add_argument('--dense_layer_partitioner', \
                        help='slice size of dense layer partitioner, units KB. Default 16KB',
                        type=int,
                        default=16)
    parser.add_argument('--bf16',
                        help='enable DeepRec BF16 in deep model. Default FP32',
                        action='store_true')
    parser.add_argument('--no_eval',
                        help='not evaluate trained model by eval dataset.',
                        action='store_true')
    parser.add_argument('--tf', \
                        help='Use TF 1.15.5 API and disable DeepRec feature to run a baseline.',
                        action='store_true')
    parser.add_argument('--smartstaged', \
                        help='Whether to enable smart staged feature of DeepRec, Default to True.',
                        type=boolean_string,
                        default=False)
    parser.add_argument('--emb_fusion', \
                        help='Whether to enable embedding fusion, Default to True.',
                        type=boolean_string,
                        default=False)
    parser.add_argument('--ev', \
                        help='Whether to enable DeepRec EmbeddingVariable. Default False.',
                        type=boolean_string,
                        default=False)
    parser.add_argument('--ev_elimination', \
                        help='Feature Elimination of EmbeddingVariable Feature. Default closed.',
                        type=str,
                        choices=[None, 'l2', 'gstep'],
                        default=None)
    parser.add_argument('--ev_filter', \
                        help='Feature Filter of EmbeddingVariable Feature. Default closed.',
                        type=str,
                        choices=[None, 'counter', 'cbf'],
                        default=None)
    parser.add_argument('--op_fusion', \
                        help='Whether to enable Auto graph fusion feature. Default to True',
                        type=boolean_string,
                        default=False)
    parser.add_argument('--micro_batch',
                        help='Set num for Auto Mirco Batch. Default close.',
                        type=int,
                        default=0)  #TODO: Defautl to True
    parser.add_argument('--adaptive_emb', \
                        help='Whether to enable Adaptive Embedding. Default to False.',
                        type=boolean_string,
                        default=False)
    parser.add_argument('--dynamic_ev', \
                        help='Whether to enable Dynamic-dimension Embedding Variable. Default to False.',
                        type=boolean_string,
                        default=False)#TODO:enable
    parser.add_argument('--incremental_ckpt', \
                        help='Set time of save Incremental Checkpoint. Default 0 to close.',
                        type=int,
                        default=0)
    parser.add_argument('--workqueue', \
                        help='Whether to enable Work Queue. Default to False.',
                        type=boolean_string,
                        default=False)
    parser.add_argument("--parquet_dataset", \
                        help='Whether to enable Parquet DataSet. Defualt to True.',
                        type=boolean_string,
                        default=False)
    parser.add_argument("--parquet_dataset_shuffle", \
                        help='Whether to enable shuffle operation for Parquet Dataset. Default to False.',
                        type=boolean_string,
                        default=False)
    parser.add_argument("--group_embedding", \
                        help='Whether to enable Group Embedding. Defualt to None.',
                        type=str,
                        choices=[None, 'localized', 'collective'],
                        default=None)

    return parser






# Parse distributed training configuration and generate cluster information
def generate_cluster_info(TF_CONFIG):
    print(TF_CONFIG)
    tf_config = json.loads(TF_CONFIG)
    cluster_config = tf_config.get('cluster')
    ps_hosts = []
    worker_hosts = []
    chief_hosts = []
    for key, value in cluster_config.items():
        if 'ps' == key:
            ps_hosts = value
        elif 'worker' == key:
            worker_hosts = value
        elif 'chief' == key:
            chief_hosts = value
    if chief_hosts:
        worker_hosts = chief_hosts + worker_hosts

    if not ps_hosts or not worker_hosts:
        print('TF_CONFIG ERROR')
        sys.exit()
    task_config = tf_config.get('task')
    task_type = task_config.get('type')
    task_index = task_config.get('index') + (1 if task_type == 'worker'
                                             and chief_hosts else 0)

    if task_type == 'chief':
        task_type = 'worker'

    is_chief = True if task_index == 0 else False
    cluster = tf.train.ClusterSpec({'ps': ps_hosts, 'worker': worker_hosts})
    server = tf.distribute.Server(cluster,
                                  job_name=task_type,
                                  task_index=task_index,
                                  protocol=args.protocol)
    if task_type == 'ps':
        server.join()
    elif task_type == 'worker':
        tf_config = {
            'ps_hosts': ps_hosts,
            'worker_hosts': worker_hosts,
            'type': task_type,
            'index': task_index,
            'is_chief': is_chief
        }
        tf_device = tf.device(
            tf.train.replica_device_setter(
                worker_device='/job:worker/task:%d' % task_index,
                # worker_device='/job:worker/task:%d/gpu:0' % task_index,
                ps_device="/job:ps/cpu:0",
                cluster=cluster))
        return tf_config, server, tf_device
    else:
        print("Task type or index error.")
        sys.exit()




# Some DeepRec's features are enabled by ENV.
# This func is used to set ENV and enable these features.
# A triple quotes comment is used to introduce these features and play an emphasizing role.
def set_env_for_DeepRec():
    '''
    Set some ENV for these DeepRec's features enabled by ENV.
    More Detail information is shown in https://deeprec.readthedocs.io/zh/latest/index.html.
    START_STATISTIC_STEP & STOP_STATISTIC_STEP: On CPU platform, DeepRec supports memory optimization
        in both stand-alone and distributed trainging. It's default to open, and the
        default start and stop steps of collection is 1000 and 1100. Reduce the initial
        cold start time by the following settings.
    MALLOC_CONF: On CPU platform, DeepRec can use memory optimization with the jemalloc library.
        Please preload libjemalloc.so by `LD_PRELOAD=./libjemalloc.so.2 python ...`
    '''
    os.environ['START_STATISTIC_STEP'] = '100'
    os.environ['STOP_STATISTIC_STEP'] = '110'
    os.environ['MALLOC_CONF']= \
        'background_thread:true,metadata_thp:auto,dirty_decay_ms:20000,muzzy_decay_ms:20000'
    if args.group_embedding == "collective":
        tf.config.experimental.enable_distributed_strategy(strategy="collective")
        if args.smartstaged and not args.tf:
            os.environ["TF_GPU_THREAD_COUNT"] = "16"


if __name__ == '__main__':
    parser = get_arg_parser()
    args = parser.parse_args()

    if not args.tf: 
        set_env_for_DeepRec()

    TF_CONFIG = os.getenv('TF_CONFIG')
    if not TF_CONFIG:
        main()
    else:
        tf_config, server, tf_device = generate_cluster_info(TF_CONFIG)
        # logging.info("tf_device----------")
        # logging.info(tf_device.to_string())
        with tf_device:
            main(tf_config, server)
