#-*-coding:utf8-*-

import sys, os
import numpy as np
import tensorflow as tf
from mask_lm import DataProcessor, MaskLM
from data_processor import InferenceDataProcessor
import modeling
import optimization
import time
import random
#tf.logging.set_verbosity(tf.logging.INFO)
tf.logging.set_verbosity(tf.logging.ERROR)

class MConfig:
    pass


def inference(FLAGS):
    PY_OR_SK = 'all'

    rand_type_emb = True
    args = MConfig()
    args.use_mgpu = False
    args.seed = 1
    args.py_dim = int(FLAGS.py_dim)
    args.multi_task = True if int(FLAGS.multi_task) > 0 else False
    init_checkpoint = None if len(FLAGS.init_checkpoint.strip()) < 3 else FLAGS.init_checkpoint.strip()
    tf.random.set_random_seed(args.seed)
    np.random.seed(args.seed)
    random.seed(args.seed)
     
    gpuid_list = FLAGS.gpuid_list.strip().split(',')

    max_sen_len = FLAGS.max_sen_len
    train_path = FLAGS.train_path
    test_file = FLAGS.test_path
    out_dir = FLAGS.output_dir
    train_tfrecord_dir = FLAGS.train_tfrecord_path
    batch_size = FLAGS.batch_size
    bert_config_path = FLAGS.bert_config_path
    EPOCH = FLAGS.epoch
    learning_rate = FLAGS.learning_rate
    vocab_file = FLAGS.vocab_file

    keep_prob = FLAGS.keep_prob
    tf_config = tf.ConfigProto(log_device_placement=False, allow_soft_placement=True)
    tf_config.gpu_options.allow_growth = True

    data_processor = InferenceDataProcessor(max_sen_len=512, vocab_file=vocab_file)
    model = MaskLM(bert_config_path, num_class=len(data_processor.get_label_list()), pyid2seq=data_processor.PYID2SEQ, skid2seq=data_processor.SKID2SEQ, py_dim=args.py_dim, py_or_sk=PY_OR_SK, keep_prob=keep_prob, multi_task=args.multi_task)
    
    data_processor = DataProcessor(train_path, max_sen_len, vocab_file, os.path.join(train_tfrecord_dir,"01.tfrecord"), label_list=None, is_training=False)
    train_num = data_processor.num_examples
    train_data = data_processor.build_data_generator(batch_size)
    iterator = train_data.make_one_shot_iterator()
    input_ids, input_mask, pinyin_ids, masked_pinyin_ids, masked_stroke_ids, lmask, label_ids = iterator.get_next()

    print ('input-ids:', id(input_ids), input_ids)

    input_ids.set_shape([None, max_sen_len])
    input_mask.set_shape([None, max_sen_len])
    pinyin_ids.set_shape([None, max_sen_len])
    lmask.set_shape([None, max_sen_len])
    label_ids.set_shape([None, max_sen_len])
    masked_pinyin_ids.set_shape([None, max_sen_len])
    (loss, probs, golds, mask, py_loss) = model.create_model(input_ids, input_mask, 
                                              masked_pinyin_ids, masked_stroke_ids, lmask, label_ids, pinyin_ids, is_training=False)
    
    with tf.Session(config=tf_config) as sess:
        # if init_checkpoint is not None:
        #     print ('google_bert_init')
        #     tvars = tf.trainable_variables()
        #     (assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint + '/bert_model.ckpt')
        #     keys = [x for x in assignment_map]
        #     for k in keys:
        #         if (rand_type_emb is True) and ('token_type_embeddings' in k):
        #             del assignment_map[k]
        #             continue
        #         print(k, '\t', assignment_map[k])

        #     tf.train.init_from_checkpoint(init_checkpoint + '/bert_model.ckpt', assignment_map)

        # init = tf.global_variables_initializer()
        # sess.run(init)
        tf.get_variable_scope().reuse_variables()
        saver = tf.train.Saver()
        best_model_path = os.path.join(out_dir, 'bert_model.ckpt')
        checkpoint_dir = out_dir
        ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
        print("loaded model")

        train_loss, train_probs, train_mask = sess.run([loss,probs,input_mask]) 

        # train_probs = train_probs.reshape([batch_size,max_sen_len,-1])
        x=train_probs.argmax(axis=-1).reshape([batch_size,max_sen_len])
        for x_line,mask_line in zip(x,train_mask):
            out_tokens = data_processor.tokenizer.convert_ids_to_tokens(x_line[mask_line==1])
            print(''.join(out_tokens[1:-1]))

    pass

 
if __name__ == '__main__':
    dirname = os.path.dirname(__file__)
    flags = tf.flags
    ## Required parameters
    flags.DEFINE_string("gpuid_list", '0', "i.e:0,1,2")
    PREFIX = './pretrain_data'
    ## Optional
    flags.DEFINE_string("train_path", '/home/thingclub/workspace/github/PLOME/pre_train_src/datas/pretrain_corpus_examples.txt', "train path ")
    flags.DEFINE_string("test_path", '/home/thingclub/workspace/github/PLOME/pre_train_src/datas/pretrain_corpus_examples.txt', "test path ")
    flags.DEFINE_string("train_tfrecord_path", os.path.join(dirname,'train_tfrecords'), "train path ")
    flags.DEFINE_string("output_dir", os.path.join(dirname,'pretrain_plome_output'), "out dir ")
    flags.DEFINE_string("vocab_file", os.path.join(dirname,'pretrain_plome_output/vocab.txt'), 'vocab')
    flags.DEFINE_string("init_checkpoint", os.path.join(dirname,'pretrain_plome_output'), '')
    flags.DEFINE_string("bert_config_path", os.path.join(dirname,'pretrain_plome_output/bert_config.json'), '')
    flags.DEFINE_string("label_list", '', 'max_sen_len')
    flags.DEFINE_integer("max_sen_len", 512, 'max_sen_len')
    flags.DEFINE_integer("batch_size", 8, 'batch_size')
    flags.DEFINE_integer("py_dim", 32, 'use_pinyin')
    flags.DEFINE_integer("multi_task", 0, 'multi_task')
    flags.DEFINE_integer("epoch", 2, 'batch_size')
    flags.DEFINE_float("learning_rate", 5e-5, 'filter_punc')
    flags.DEFINE_float("keep_prob", 0.9, 'keep prob in dropout')
    flags.DEFINE_string("use_mgpu", '0', 'keep prob in dropout')

    flags.mark_flag_as_required('gpuid_list')
    flags.mark_flag_as_required('train_path')
    flags.mark_flag_as_required('output_dir')
    flags.mark_flag_as_required('train_tfrecord_path')
    flags.mark_flag_as_required('max_sen_len')

    FLAGS = flags.FLAGS
    #FLAGS.bert_config_path = '%s/bert_config.json' % FLAGS.output_dir
    os.environ["CUDA_VISIBLE_DEVICES"] = FLAGS.gpuid_list
    print ('Confings:')
    print ('\tgpuid_list=', FLAGS.gpuid_list)
    print ('\ttrain_path=', FLAGS.train_path)
    print ('\ttest_path=', FLAGS.test_path)
    print ('\toutput_dir=', FLAGS.output_dir)
    print ('\tmax_sen_len=', FLAGS.max_sen_len)
    print ('\tbert_config_path=', FLAGS.bert_config_path)
    print ('\tmulti_task=', FLAGS.multi_task)
    inference(FLAGS)

