#!/usr/bin/env python
# -*-coding:utf-8-*-



import os
import pickle
import numpy as np
import tensorflow as tf
from tensorflow.contrib.crf import viterbi_decode
from sklearn.metrics import accuracy_score
from sklearn.metrics.classification import classification_report

import data_utils
from bi_lstm import BiLSTM
import word2vec
from config import TrainConfig, BiLSTMConfig

map_file_path = "/home/wuzheng/Pyprojects/bsner/data/maps.pkl"


def train(train_config, model_config):
    # load data sets

    train_sentences = data_utils.load_sentences(train_config.train_path, [0, 1])
    dev_sentences = data_utils.load_sentences(train_config.dev_path, [0, 1])
    test_sentences = data_utils.load_sentences(train_config.test_path, [0, 1])

    print("train sentences %d" % (len(train_sentences)))
    print("test sentences %d" % (len(test_sentences)))
    print("dev sentences %d" % (len(dev_sentences)))

    # todo 在load_map_file 里面做掉
    word_to_id, id_to_tag = data_utils.load_map_file(map_file_path, "char_map", "seg_map")
    tag_to_id = {v: k for k, v in id_to_tag.items()}
    id_to_word = {i: w for w, i in word_to_id.items()}


    train_sentences = data_utils.create_dataset(train_sentences, word_to_id, tag_to_id)
    dev_sentences = data_utils.create_dataset(dev_sentences, word_to_id, tag_to_id)

    print(train_sentences[0])
    train_manager = data_utils.BatchManager(train_sentences, train_config.batch_size)
    dev_manager = data_utils.BatchManager(dev_sentences, 64)

    model_config.num_chars = len(word_to_id)
    model_config.num_tags = len(tag_to_id)

    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    steps_per_epoch = train_manager.len_data

    with tf.Session(config=tf_config) as sess:
        model = BiLSTM(model_config)
        sess.run(tf.global_variables_initializer())
        print("load pre word2vec ...")

        wv = word2vec.Word2vec()
        embed = wv.load_w2v_array(train_config.pre_embed_path, id_to_word)

        word_embedding = tf.constant(embed, dtype=tf.float32)
        t_assign_embedding = tf.assign(model.char_lookup, word_embedding)
        sess.run(t_assign_embedding)
        print("start training")
        loss = []
        saver = tf.train.Saver(tf.global_variables())

        def dev_step(x_batch, y_batch, writer=None):
            """
            Evaluates model on a dev set
            """
            feed_dict = {
                model.char_inputs: x_batch,
                model.targets: y_batch,
                model.dropout: 1.0
            }
            step, loss, logits, lengths = sess.run(
                [model.global_step, model.loss, model.logits, model.lengths], feed_dict)
            paths = []
            small = -1000.0
            start = np.asarray([[small] * model_config.num_tags + [0]])

            for score, length in zip(logits, lengths):
                score = score[:length]
                pad = small * np.ones([length, 1])
                logits = np.concatenate([score, pad], axis=1)
                logits = np.concatenate([start, logits], axis=0)
                path, _ = viterbi_decode(logits, model.trans.eval())
                paths.append(path[1:])

            old_label = []
            predictions = np.concatenate(paths, axis=0)
            for y, length in zip(y_batch, lengths):
                y = y[:length]
                old_label.append(y)
            old_label = np.concatenate(old_label, axis=0)
            return old_label, predictions

        last_acc = 0.0
        for i in range(100):
            for batch in train_manager.iter_batch(shuffle=True):
                chars, tags = batch
                feed_dict = {
                    model.char_inputs: np.asarray(chars),
                    model.targets: np.array(tags),
                    model.dropout: train_config.dropout,
                }

                global_step, batch_loss, _ = sess.run([model.global_step, model.loss, model.train_op], feed_dict)
                loss.append(batch_loss)

                if global_step % train_config.steps_check == 0:
                    iteration = global_step // steps_per_epoch + 1
                    print("iteration:{} step:{}/{},  loss:{:>9.6f} ".format(iteration, global_step % steps_per_epoch,
                                                                            steps_per_epoch, np.mean(loss)))
                    loss = []

            all_old = []
            all_predictions = []
            for dev_batch in dev_manager.iter_batch(shuffle=True):
                dev_chars, dev_tags = dev_batch
                old_label, prediction = dev_step(dev_chars, dev_tags)
                all_old.extend(old_label)
                all_predictions.extend(prediction)

            print(classification_report(all_old, all_predictions))
            acc = accuracy_score(all_old, all_predictions)
            print("iteration: {} total acc: {:>9.6f}".format(iteration, acc))

            checkpoint_path = os.path.join(train_config.out_dir, "model.ckpt")
            path = saver.save(sess, checkpoint_path, global_step=global_step)
            print("Saved model checkpoint to {}\n".format(path))
            if acc <= last_acc:
                print("exit dev acc < alst_acc:%f, %f " % (acc, last_acc))
                exit()
            last_acc = acc


if __name__ == "__main__":
    train_config_path = "./config/train.json"
    model_config_path = "./config/lstm.json"
    train_config = TrainConfig()
    model_config = BiLSTMConfig()

    train_config.load(train_config_path)
    model_config.load(model_config_path)
    train(train_config, model_config)
