'''
@Company: TWL
@Author: xue jian
@Email: xuejian@kanzhun.com
@Date: 2020-04-09 19:12:44
'''
# -*- coding: UTF-8 -*-

from base_train import BaseTrain
import tensorflow as tf
# from tensorflow.python.feature_column.feature_column import _LazyBuilder
import numpy as np
# import math

preds_out = open("deepfm_preds_out_t", 'w')

class TensorflowTrain(BaseTrain):
    def __init__(self, file_path, dates, watch_num, batch_size, thread_num, read_data_parallel, fea_len_dict, cate_fea_dict, conti_fea_cut, tf_scope, all_use_features, use_data_set=False, variable_dir='', save_dir='', only_predict=False):
        super(TensorflowTrain, self).__init__(file_path, dates, watch_num, batch_size, thread_num, read_data_parallel)
        self.all_use_features = all_use_features
        self.fea_len_dict = fea_len_dict
        self.cate_fea_dict = cate_fea_dict
        self.conti_fea_cut = conti_fea_cut
        self.sess = None
        self.fea_ph_dict = {}
        self.fea_emb_tensor = {}
        self.fea_use_len_dict = {}
        self.use_data_set = use_data_set
        self.tf_scope = tf_scope
        for fea in all_use_features:
            self.fea_use_len_dict[fea] = 0
        self.label_ph = None
        self.predict = None
        self.loss = None
        self.train_op = None
        self.iter_label = None
        self.iter_feature = None
        self.variable_dir = variable_dir
        self.only_predict = only_predict
        self.save_dir = save_dir
        self.table_init = None

    def init_model(self):
        # self.label_ph = tf.placeholder(dtype=tf.float32, shape=(None,), name='label')
        self.sess = tf.Session(config=tf.ConfigProto(device_count={"CPU": self.thread_num}, log_device_placement=False))
        if self.only_predict:
            tf.saved_model.loader.load(self.sess, ['wide_deep'], self.variable_dir)
            return
        self.get_feature_placeholder()
        self.make_feature_column()
        self.define_network()
        # self.sess = tf.Session()
        self.table_init = tf.tables_initializer()
        self.sess.run(self.table_init)
        if tf.gfile.Exists(self.variable_dir):
            self.variable_restore(self.variable_dir)
        else:
            self.sess.run(tf.global_variables_initializer())
        print("fea_len_dict = ", self.fea_use_len_dict)


    def get_feature_placeholder(self):
        # with tf.variable_scope(self.tf_scope):
        for fea in self.all_use_features:
            if self.fea_group[fea] == 0:
                self.fea_ph_dict[fea] = tf.placeholder(dtype=tf.int64, shape=(None,), name=self.fea_code[fea])
            elif self.fea_group[fea] == 1:
                self.fea_ph_dict[fea] = tf.placeholder(dtype=tf.float32, shape=(None,), name=self.fea_code[fea])
            elif self.fea_group[fea] == 2:
                self.fea_ph_dict[fea] = tf.placeholder(dtype=tf.int64, shape=(None, None), name=self.fea_code[fea])


    def handle_list_fea(self, fea_emb):
        return tf.reduce_mean(fea_emb, axis=1)

    def make_feature_column(self):
        with tf.variable_scope(self.tf_scope):
            for i, fea in enumerate(self.all_use_features):
                input_layer_name = '/input_layer'
                if i > 0:
                    input_layer_name += ('_' + str(i))
                if not tf.gfile.Exists(self.variable_dir):
                    ckpt_to_load_from = None
                    tensor_name_in_ckpt = None
                else:
                    ckpt_to_load_from = self.variable_dir
                    tensor_name_in_ckpt = self.tf_scope + input_layer_name + '/' + fea
                # if self.fea_group[fea] == 0:
                #     if ckpt_to_load_from != None:
                #         tensor_name_in_ckpt += '_embedding/embedding_weights'
                #     cate_tmp_column = tf.feature_column.categorical_column_with_vocabulary_list(fea, self.cate_fea_dict[fea],
                #                                                                                 dtype=tf.int64,default_value=-2)
                #     cate_emb_column = tf.feature_column.embedding_column(cate_tmp_column, self.fea_len_dict[fea], ckpt_to_load_from=ckpt_to_load_from, tensor_name_in_ckpt=tensor_name_in_ckpt)
                #     self.fea_emb_tensor[fea] = tf.feature_column.input_layer({fea: self.fea_ph_dict[fea]}, [cate_emb_column])  # shape=[B, emb_size]
                # elif self.fea_group[fea] == 1:
                #     if ckpt_to_load_from != None:
                #         tensor_name_in_ckpt += '_bucketized_embedding/embedding_weights'
                #     conti_tmp_column = tf.feature_column.bucketized_column(tf.feature_column.numeric_column(fea),
                #                                                            boundaries=self.conti_fea_cut[fea])
                #     conti_emb_column = tf.feature_column.embedding_column(conti_tmp_column, self.fea_len_dict[fea], ckpt_to_load_from=ckpt_to_load_from, tensor_name_in_ckpt=tensor_name_in_ckpt)
                #     self.fea_emb_tensor[fea] = tf.feature_column.input_layer({fea: self.fea_ph_dict[fea]},
                #                                                              [conti_emb_column])  # shape=[B, emb_size]
                # elif self.fea_group[fea] == 2:
                #     if ckpt_to_load_from != None:
                #         tensor_name_in_ckpt += '_embedding/embedding_weights'
                #     list_tmp_column = tf.feature_column.categorical_column_with_vocabulary_list(fea, self.cate_fea_dict[fea],
                #                                                                                 dtype=tf.int64, default_value=-2)
                #     list_emb_column = tf.feature_column.embedding_column(list_tmp_column, self.fea_len_dict[fea], ckpt_to_load_from=ckpt_to_load_from, tensor_name_in_ckpt=tensor_name_in_ckpt)
                #     self.fea_emb_tensor[fea] = tf.feature_column.input_layer({fea: self.fea_ph_dict[fea]},
                #                                                              [list_emb_column])  # shape=[B, emb_size]

                if self.fea_group[fea] == 0:
                    cate_tmp_column = tf.feature_column.categorical_column_with_vocabulary_list(fea, self.cate_fea_dict[fea],
                                                                                                dtype=tf.int64,default_value=-2)
                    cate_emb_column = tf.feature_column.embedding_column(cate_tmp_column, self.fea_len_dict[fea])
                    self.fea_emb_tensor[fea] = tf.feature_column.input_layer({fea: self.fea_ph_dict[fea]}, [cate_emb_column])  # shape=[B, emb_size]
                elif self.fea_group[fea] == 1:
                    conti_tmp_column = tf.feature_column.bucketized_column(tf.feature_column.numeric_column(fea),
                                                                           boundaries=self.conti_fea_cut[fea])
                    conti_emb_column = tf.feature_column.embedding_column(conti_tmp_column, self.fea_len_dict[fea])
                    self.fea_emb_tensor[fea] = tf.feature_column.input_layer({fea: self.fea_ph_dict[fea]},
                                                                             [conti_emb_column])  # shape=[B, emb_size]
                elif self.fea_group[fea] == 2:
                    default_value = -2
#                    if fea == 'b2g_workyears_recent10':
#                        default_value = 10
                    list_tmp_column = tf.feature_column.categorical_column_with_vocabulary_list(fea, self.cate_fea_dict[fea],
                                                                                                dtype=tf.int64, default_value=default_value)
                    #list_emb_column = tf.feature_column.embedding_column(list_tmp_column, self.fea_len_dict[fea]) #combiner default mean
                    list_emb_column = tf.feature_column.embedding_column(list_tmp_column, self.fea_len_dict[fea], combiner="sum")
                    self.fea_emb_tensor[fea] = tf.feature_column.input_layer({fea: self.fea_ph_dict[fea]},
                                                                             [list_emb_column])  # shape=[B, emb_size]

    def define_network(self):
        #子类自定义
        print('define')


    def get_fea_emb(self, fea_name, length):
        self.fea_use_len_dict[fea_name] += length
        return self.fea_emb_tensor[fea_name][:, self.fea_use_len_dict[fea_name] - length:self.fea_use_len_dict[fea_name]]  # shape=[B, length]

    def concat_fm(self, a, b, length):
        result = []
        for fea in a:
            a_emb = self.get_fea_emb(fea, int(length * len(b)))
            b_emb = []
            for fea_b in b:
                b_emb.append(self.get_fea_emb(fea_b, length))
            b_emb = tf.concat(b_emb, axis=1)
            result.append(tf.reduce_sum(a_emb * b_emb, axis=1))
        return tf.add_n(result)

    def sum_pooling_fm(self, a, b, length):
        result = []
        for fea in a:
            a_emb = self.get_fea_emb(fea, length)
            b_emb = []
            for fea_b in b:
                b_emb.append(self.get_fea_emb(fea_b, length))
            b_emb = tf.add_n(b_emb)
            result.append(tf.reduce_sum(a_emb * b_emb, axis=1))
        return tf.add_n(result)

    def get_nn_input(self, a, lens):
        final_emb = []
        for i, fea in enumerate(a):
            if type(lens) == list:
                final_emb.append(self.get_fea_emb(fea, lens[i]))
            else:
                final_emb.append(self.get_fea_emb(fea, lens))
        return tf.concat(final_emb, axis=1)

    def nn_tower(self, nn_input, nn_size, name):
        with tf.variable_scope(self.tf_scope):
            nn_out = nn_input
            for i, size in enumerate(nn_size):
                if i == len(nn_size) - 1 and size == 1:
                    nn_out = tf.layers.dense(nn_out, size, kernel_initializer=tf.glorot_normal_initializer(), name=name+str(i))
                else:
                    nn_out = tf.layers.dense(nn_out, size, activation=tf.nn.relu,
                                             kernel_initializer=tf.glorot_normal_initializer(), name=name+str(i))
        return tf.reshape(nn_out, [-1])

    def make_feed_dict(self, d, label):
        feed_dict = {}
        feed_dict[self.label_ph] = label
        for fea in self.all_use_features:
            if self.fea_group[fea] == 0:
#                print(fea)
#                print(d[:, self.fea_index[fea]])
                feed_dict[self.fea_ph_dict[fea]] = d[:, self.fea_index[fea]].astype(np.int64)
            elif self.fea_group[fea] == 1:
                feed_dict[self.fea_ph_dict[fea]] = d[:, self.fea_index[fea]].astype(np.float)
            elif self.fea_group[fea] == 2:
#                print(fea)
                fea_list_str = d[:, self.fea_index[fea]]
#                print(fea_list_str)
                fea_list_final = []
                for fea_str in fea_list_str:
                    fea_list_final.append(fea_str.split(','))
#                print(fea_list_final)
                feed_dict[self.fea_ph_dict[fea]] = np.asarray(fea_list_final).astype(np.int64)
        return feed_dict

    def make_feed_dict_only_predict(self, d, label):
        feed_dict = {}
        feed_dict[self.sess.graph.get_tensor_by_name("label:0")] = label
        for fea in self.all_use_features:
            if self.fea_group[fea] == 0:
                feed_dict[self.sess.graph.get_tensor_by_name(self.fea_code[fea] + ":0")] = d[:, self.fea_index[fea]].astype(np.int64)
            elif self.fea_group[fea] == 1:
                feed_dict[self.sess.graph.get_tensor_by_name(self.fea_code[fea] + ":0")] = d[:, self.fea_index[fea]].astype(np.float)
            elif self.fea_group[fea] == 2:
#                print(fea)
                fea_list_str = d[:, self.fea_index[fea]]
#                print(fea_list_str)
                fea_list_final = []
                for fea_str in fea_list_str:
                    fea_list_final.append(fea_str.split(','))
#                print(fea_list_final)
                feed_dict[self.sess.graph.get_tensor_by_name(self.fea_code[fea] + ":0")] = np.asarray(fea_list_final).astype(np.int64)
        return feed_dict

    def batch_train(self, batch_data, batch_label):
        if self.only_predict:
            f_d = self.make_feed_dict_only_predict(batch_data, batch_label)
            output = self.sess.graph.get_tensor_by_name('predict:0')
            out_loss = self.sess.graph.get_tensor_by_name('loss:0')
            pred, pred_loss = self.sess.run([output, out_loss], feed_dict=f_d)

            batch_sess = batch_data[:, self.fea_index['sessionid']]
            batch_jobid = batch_data[:, self.fea_index['job_id']]
            batch_expid = batch_data[:, self.fea_index['exp_id']]
            for si in range(len(batch_sess)):
                key = batch_sess[si] + '\t' + batch_jobid[si] + '\t' + batch_expid[si]
                value = pred[si][0]
                preds_out.write(key + '\t' + str(value) + '\t' + str(batch_label[si]) + '\n')

        else:
            f_d = self.make_feed_dict(batch_data, batch_label)
            pred, pred_loss, _ = self.sess.run([self.predict, self.loss, self.train_op], feed_dict=f_d)
        pred.reshape([-1, ])
        #print('pred = ', pred)
        return pred, pred_loss.mean()
    def train(self):
        if not self.use_data_set:
            super(TensorflowTrain, self).train()
            self.variable_save(self.save_dir)
            return
        self.init_model()
        def _parse_line_py(line):
            line = str(line, encoding="utf8")
            line = line.split("\t")
            tmp_line = []
            line_label = 0
            for j, l in enumerate(line):
                if j == self.fea_index['deal_type']:
                    line_label = int(l != "list")
                    tmp_line.append(str(line_label))
                elif l == 'NULL':
                    tmp_line.append("-2")
                else:
                    tmp_line.append(l)
            return line_label, tmp_line

        def _parse_function(line):
            return tf.py_func(_parse_line_py, [line], [tf.int64, tf.string])
        date_paths = []
        for date in self.dates:
            date_paths.append(self.get_train_file(date))

        dataset = tf.data.TextLineDataset(date_paths, buffer_size=self.batch_size*500)
        dataset = dataset.map(_parse_function)
        dataset = dataset.batch(self.batch_size)
        dataset = dataset.prefetch(20)

        iterator = dataset.make_one_shot_iterator()
        iter_label, iter_feature = iterator.get_next()

        all_pred = []
        all_label = []
        loss_watch = []
        while True:
            try:
                batch_label, batch_data = self.sess.run([iter_label, iter_feature])
                pred, pred_loss = self.batch_train(batch_data, batch_label)
                all_label.extend(batch_label.tolist())
                all_pred.extend(pred.tolist())
                loss_watch.append(pred_loss)
                if len(all_label) >= self.watch_num:
                    auc = self.cal_auc(np.asarray(all_label), np.asarray(all_pred))
                    print("auc = ", auc)
                    self.aucs.append(auc)
                    self.losses.append(np.asarray(loss_watch).mean())
                    all_pred = []
                    all_label = []

            except tf.errors.OutOfRangeError:
                print("End of dataset")
                break
        print("aucs = ", self.aucs)
        print("losses = ", self.losses)
        self.variable_save(self.save_dir)

    def variable_save(self, path):
        if path != '':
            self.save_model_f()
            builder = tf.saved_model.builder.SavedModelBuilder(path)
            builder.add_meta_graph_and_variables(self.sess, ["wide_deep"], legacy_init_op=self.table_init)
            builder.save()
            #saver = tf.train.Saver(max_to_keep=1)
            #saver.save(self.sess, path)

    def save_model_f(self):
        m_f = open(self.save_dir + 'f1-b-deepfm-s-all-C00.f', 'w')
        lines = []
        for fea in self.all_use_features:
            if self.fea_group[fea] == 0:
                lines.append("{0}:4\tlong".format(self.fea_code[fea]))
            elif self.fea_group[fea] == 1:
                lines.append("{0}:4\tfloat".format(self.fea_code[fea]))
            elif self.fea_group[fea] == 2:
                lines.append("{0}:4\tlong_array".format(self.fea_code[fea]))
        m_f.write(('\n').join(lines))
        m_f.close()

    def variable_restore(self, dir):
        # Finds the filename of latest saved checkpoint file.
        path = tf.train.latest_checkpoint(dir)
        saver = tf.train.Saver()
        saver.restore(self.sess, path)