#!/usr/bin/env python
# -*-coding:utf-8-*-


import os
import pickle

import numpy as np
import tensorflow as tf
from tensorflow.contrib.crf import viterbi_decode

import data_utils
from config import Segconfig
from bi_lstm import BiLSTM
from utils import make_path

from sklearn.metrics import accuracy_score
from sklearn.metrics.classification import classification_report

import datetime
import word2vec

flags = tf.app.flags
flags.DEFINE_boolean("clean", False, "clean train folder")
flags.DEFINE_boolean("train", False, "Wither train the model")
# configurations for the model
flags.DEFINE_integer("char_dim", 100, "Embedding size for characters")
flags.DEFINE_integer("lstm_dim", 100, "Num of hidden units in LSTM")

# configurations for training
flags.DEFINE_float("clip", 5, "Gradient clip")
flags.DEFINE_float("dropout", 0.5, "Dropout rate")
flags.DEFINE_float("batch_size", 64, "batch size")
flags.DEFINE_float("lr", 0.001, "Initial learning rate")
flags.DEFINE_string("optimizer", "adam", "Optimizer for training")
flags.DEFINE_boolean("pre_emb", True, "Wither use pre-trained embedding")
flags.DEFINE_boolean("zeros", False, "Wither replace digits with zero")
flags.DEFINE_boolean("lower", True, "Wither lower case")

flags.DEFINE_integer("max_epoch", 50, "maximum training epochs")
flags.DEFINE_integer("steps_check", 100, "steps per checkpoint")
flags.DEFINE_string("ckpt_path", "../../results/tag/ckpt", "Path to save model")
flags.DEFINE_string("summary_path", "summary", "Path to store summaries")
flags.DEFINE_string("log_file", "train.log", "File for log")
flags.DEFINE_string("map_file", "../../results/tag/maps.pkl", "file for maps")
flags.DEFINE_string("vocab_file", "../../data/word_vocab.txt", "File for vocab")
flags.DEFINE_string("config_file", "config_file", "File for config")
flags.DEFINE_string("script", "conlleval", "evaluation script")
flags.DEFINE_string("result_path", "../../results", "Path for results")
flags.DEFINE_string("embed_file", "../../data/word_vec_100.txt", "Path for pre_trained embedding")
flags.DEFINE_string("train_file", os.path.join("../../data", "tag_train.txt"), "Path for train data")
flags.DEFINE_string("dev_file", os.path.join("../../data", "tag_dev.txt"), "Path for dev data")
flags.DEFINE_string("test_file", os.path.join("../../data", "tag_test.txt"), "Path for test data")

FLAGS = tf.app.flags.FLAGS
assert FLAGS.clip < 5.1, "gradient clip should't be too much"
assert 0 <= FLAGS.dropout < 1, "dropout rate between 0 and 1"
assert FLAGS.lr > 0, "learning rate must larger than zero"
assert FLAGS.optimizer in ["adam", "sgd", "adagrad"]


def config_model(char_to_id, tag_to_id):
    config = Segconfig()
    config.char_dim = FLAGS.char_dim
    config.clip = FLAGS.clip
    config.num_chars = len(char_to_id)
    config.num_tags = len(tag_to_id)
    config.lstm_dim = FLAGS.lstm_dim
    config.batch_size = FLAGS.batch_size
    config.dropout_keep = 1.0 - FLAGS.dropout
    config.optimizer = FLAGS.optimizer
    config.lr = FLAGS.lr
    return config


def train():
    # load data sets
    train_sentences = data_utils.load_sentences(FLAGS.train_file, [0, 1])
    dev_sentences = data_utils.load_sentences(FLAGS.dev_file, [0, 1])
    test_sentences = data_utils.load_sentences(FLAGS.test_file, [0, 1])

    print("train sentences %d" % (len(train_sentences)))
    print("test sentences %d" % (len(test_sentences)))
    print("dev sentences %d" % (len(dev_sentences)))

    tag_to_id, id_to_tag = data_utils.create_data_map(train_sentences + test_sentences + dev_sentences, -1)
    word_to_id, id_to_word = data_utils.load_vocab(FLAGS.vocab_file)

    with open(FLAGS.map_file, "wb") as f:
        pickle.dump([word_to_id, id_to_word, tag_to_id, id_to_tag], f)


    print("vocab size : %d" % (len(word_to_id)))
    print("num of tags : %d" % (len(tag_to_id)))
    train_sentences = data_utils.create_dataset(train_sentences, word_to_id, tag_to_id)
    dev_sentences = data_utils.create_dataset(dev_sentences, word_to_id, tag_to_id)
    test_sentences = data_utils.create_dataset(test_sentences, word_to_id, tag_to_id)
    print(train_sentences[0])

    train_manager = data_utils.BatchManager(train_sentences, FLAGS.batch_size)
    dev_manager = data_utils.BatchManager(dev_sentences, 64)
    # test_manager = data_utils.BatchManager(test_sentences, 64)

    make_path(FLAGS)
    config = config_model(word_to_id, tag_to_id)

    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    steps_per_epoch = train_manager.len_data

    with tf.Session(config=tf_config) as sess:
        model = BiLSTM(config)

        sess.run(tf.global_variables_initializer())

        print("load pre word2vec ...")

        wv = word2vec.Word2vec()
        embed = wv.load_w2v_array(FLAGS.embed_file, id_to_word, )
        word_embedding = tf.constant(embed, dtype=tf.float32)
        t_assign_embedding = tf.assign(model.char_lookup, word_embedding)
        sess.run(t_assign_embedding)

        print("start training")
        loss = []

        saver = tf.train.Saver(tf.global_variables())

        def dev_step(x_batch, y_batch, writer=None):
            """
            Evaluates model on a dev set
            """

            feed_dict = {
                model.char_inputs: x_batch,
                model.targets: y_batch,
                model.dropout: 1.0
            }

            step, loss, logits, lengths = sess.run(
                [model.global_step, model.loss, model.logits, model.lengths],
                feed_dict)

            paths = []
            small = -1000.0
            start = np.asarray([[small] * config.num_tags + [0]])

            for score, length in zip(logits, lengths):
                score = score[:length]
                pad = small * np.ones([length, 1])
                logits = np.concatenate([score, pad], axis=1)
                logits = np.concatenate([start, logits], axis=0)
                path, _ = viterbi_decode(logits, model.trans.eval())
                paths.append(path[1:])

            count = (np.sum(lengths))

            old_label = []
            predictions = np.concatenate(paths, axis=0)
            for y, length in zip(y_batch, lengths):
                y = y[:length]
                old_label.append(y)
            old_label = np.concatenate(old_label, axis=0)
            return old_label, predictions

        last_acc = 0.0
        for i in range(100):
            for batch in train_manager.iter_batch(shuffle=True):
                chars, tags = batch

                feed_dict = {
                    model.char_inputs: np.asarray(chars),
                    model.targets: np.array(tags),
                    model.dropout: FLAGS.dropout,
                }

                global_step, batch_loss, _ = sess.run([model.global_step, model.loss, model.train_op], feed_dict)
                loss.append(batch_loss)

                if global_step % FLAGS.steps_check == 0:
                    iteration = global_step // steps_per_epoch + 1
                    print("iteration:{} step:{}/{}, NER loss:{:>9.6f} ".format(iteration, global_step % steps_per_epoch,
                                                                               steps_per_epoch, np.mean(loss)))
                    loss = []

            all_old = []
            all_predictions = []

            for dev_batch in dev_manager.iter_batch(shuffle=True):
                dev_chars, dev_tags = dev_batch
                old_label, prediction = dev_step(dev_chars, dev_tags)
                all_old.extend(old_label)
                all_predictions.extend(prediction)

            print(classification_report(all_old, all_predictions))
            acc = accuracy_score(all_old, all_predictions)
            print("iteration: {} total acc: {:>9.6f}".format(iteration, acc))

            checkpoint_path = os.path.join(FLAGS.ckpt_path, "ner.ckpt")
            path = saver.save(sess, checkpoint_path, global_step=global_step)
            print("Saved model checkpoint to {}\n".format(path))

            if acc <= last_acc:
                print("exit dev acc < alst_acc:%f, %f "%(acc, last_acc))
                exit()
            last_acc = acc


def main(_):
    train()


if __name__ == "__main__":
    tf.app.run(main)
