#/usr/bin/env python
#-*-coding:utf-8-*-



from collections import OrderedDict
import json

import numpy as np
import tensorflow as tf

from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report

from data_utils import SegBatcher
from birnn_model import BirnnModel


def init_mode_config(vocab_size):
    model_config = OrderedDict()

    model_config["rnn_size"] = FLAGS.rnn_size
    model_config["rnn_type"] = FLAGS.rnn_type
    model_config['vocab_size'] = vocab_size
    model_config["batch_size"] = FLAGS.batch_size
    model_config["num_sampled"] = FLAGS.num_sampled
    model_config['embedding_size'] = FLAGS.embedding_size
    model_config['lr'] = FLAGS.lr
    model_config['clip'] = FLAGS.clip
    conf =open("model_config.json", 'w')
    json.dump( model_config,conf, ensure_ascii=FLAGS)
    return model_config


def load_vocab(path):
    word_to_id = json.load(open(path))
    id_to_word = {v:k for k,v in word_to_id.items()}
    return word_to_id, id_to_word


def main(argv):
    # todo create map file
    word_to_id, id_to_word = load_vocab(FLAGS.vocab_file)

    model_config = init_mode_config(len(word_to_id))

    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True

    with tf.Graph().as_default():


        model = BirnnModel(model_config, True)
        train_batcher = SegBatcher(FLAGS.train_file, FLAGS.batch_size, num_epochs=FLAGS.max_epoch)

        # dev_batcher = SegBatcher(FLAGS.dev_file, FLAGS.batch_size, num_epochs=1)
        # test_batcher = SegBatcher(FLAGS.test_file, FLAGS.batch_size, num_epochs=1)

        tf.global_variables_initializer()

        sv = tf.train.Supervisor(logdir=FLAGS.out_dir, save_model_secs=FLAGS.save_model_secs, )

        with sv.managed_session() as sess:
            sess.as_default()
            threads = tf.train.start_queue_runners(sess=sess)
            loss = []
            x_batch, y_batch, sent_len = sess.run(train_batcher.next_batch_op)
            # print(len(x_batch))
            # print(len(y_batch))
            while not sv.should_stop():
                sent_len = np.reshape(sent_len, newshape=(-1))
                feed_dict = {
                    model.inputs: x_batch,
                    model.targets: y_batch,
                    model.lengths :sent_len,
                }

                loss, step, _ = sess.run(
                    [model.loss, model.global_step, model.train_op], feed_dict)
                print("step: loss ",step, loss)
                if step % 10 == 0:
                    ckpt = sv.saver.save(sess, FLAGS.out_dir + "/model", global_step = step)
                    print("ckpt: ", ckpt)



if __name__ == "__main__":
    tf.app.flags.DEFINE_string("train_file", "./data/train.tfrecord", "path of train recoard path")
    # tf.app.flags.DEFINE_string("dev_file", "", "path of dev recoard path")
    # tf.app.flags.DEFINE_string("test_file", "", "path of dev recoard path")

    tf.app.flags.DEFINE_string("vocab_file", "./word_to_id.json", "vocab file ")
    tf.app.flags.DEFINE_string("out_dir", "./models", "log path of the supervisor")

    tf.app.flags.DEFINE_integer("max_epoch", 100, "max epoch")
    tf.app.flags.DEFINE_integer("batch_size", 512, "batch size")
    tf.app.flags.DEFINE_integer("num_sampled", 32, "num_sampled ")
    tf.app.flags.DEFINE_integer("embedding_size", 268, "embedding size")

    tf.app.flags.DEFINE_integer("rnn_size", 256, "the embedding size of char or word")

    tf.app.flags.DEFINE_string("optimizer", "adam", "optimizer ")
    tf.app.flags.DEFINE_string("rnn_type", 'lstm', 'rnn type')

    tf.app.flags.DEFINE_float("lr", 0.0001, "learning rate")
    tf.app.flags.DEFINE_integer("save_model_secs", 30, "save model every second")
    tf.app.flags.DEFINE_boolean("debug", True, "if debug ")
    tf.app.flags.DEFINE_float("clip", 5.0, "clip")

    FLAGS = tf.flags.FLAGS
    tf.app.run()