# -*- coding:utf-8 -*-

import os
import sys

import numpy as np
import tensorflow as tf

from PE.PEutils import Load

flags = tf.flags
logging = tf.logging
flags.DEFINE_string('model', 'small', 'type of model')
flags.DEFINE_string('action', 'train', 'type of action: train, pred')
flags.DEFINE_string('path', 'model/', 'path of data & model')
FLAGS = flags.FLAGS


class Config(object):
    num_steps = 50
    hidden_size = 150
    num_layers = 1
    epoch = 20
    max_epoch = 50
    keep_prob = 0.5
    l2 = 0.001
    lr = 0.1
    lr_decay = 0.5
    vocab_size = -1
    target_size = -1


class NERModel(object):
    def __init__(self, config):
        num_steps = config.num_steps
        hidden_size = config.hidden_size
        target_size = config.target_size
        self.config = config

        self._input_data = tf.placeholder(tf.int32, [num_steps, 1])
        self._target = tf.placeholder(tf.float32, [num_steps, target_size])
        self._dropout = tf.placeholder(tf.float32)
        self.lengths = tf.placeholder(tf.int32, [1])

        lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(hidden_size, state_is_tuple=True)
        lstm_cell = tf.nn.rnn_cell.DropoutWrapper(lstm_cell, output_keep_prob=self._dropout)
        cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * config.num_layers, state_is_tuple=True)
        self._init_state = cell.zero_state(1, tf.float32)

        with tf.device('/cpu:0'):
            embedding = tf.get_variable('embedding', [vocab_size, hidden_size])
            inputs = tf.nn.embedding_lookup(embedding, self._input_data)

        inputs = tf.nn.dropout(inputs, self._dropout)

        inputs = [tf.squeeze(i, squeeze_dims=[0]) for i in tf.split(0, num_steps, inputs)]

        outputs, state = tf.nn.rnn(cell, inputs, initial_state=self._init_state, sequence_length=self.lengths)
        softmax_w = tf.get_variable("softmax_w", [hidden_size, target_size])
        softmax_b = tf.get_variable("softmax_b", [target_size])
        output = tf.squeeze(outputs)
        self._logits = logits = tf.add(tf.matmul(output, softmax_w), softmax_b)
        self._loss = loss = tf.nn.softmax_cross_entropy_with_logits(logits, self._target)
        self._cost = cost = tf.reduce_mean(loss)
        self._final_state = state

        self._lr = tf.Variable(0.0, trainable=False)
        optimizer = tf.train.GradientDescentOptimizer(self._lr)
        self._train_op = optimizer.minimize(cost)

    def assign_lr(self, session, lr_value):
        session.run(tf.assign(self._lr, lr_value))

    @property
    def input_data(self):
        return self._input_data

    @property
    def target(self):
        return self._target

    @property
    def dropout(self):
        return self._dropout

    @property
    def init_state(self):
        return self._init_state

    @property
    def cost(self):
        return self._cost

    @property
    def final_state(self):
        return self._final_state

    @property
    def lr(self):
        return self._lr

    @property
    def train_op(self):
        return self._train_op

    @property
    def prediction(self):
        return self._logits

    def run_epoch(self, session, input_data, input_labels, input_lengths, verbose=True):
        total_steps = len(input_data)
        total_loss = []
        total_correct_examples = 0
        total_processed_examples = 0
        for step, (x, y, z) in enumerate(zip(input_data, input_labels, input_lengths)):
            feed_dict = {self.input_data: np.expand_dims(x, -1), self.target: y, self.dropout: self.config.keep_prob,
                         self.lengths: [z]}
            cost, prediction, _ = session.run([self.cost, self.prediction, self.train_op], feed_dict=feed_dict)
            total_loss.append(cost)
            total_processed_examples += len(x)
            prediction = np.argmax(prediction, axis=1)
            l = np.argmax(y, axis=1)
            total_correct_examples += sum(prediction == l)
            if verbose and step % (total_steps // 100) == 1:
                sys.stdout.write('\r{} / {} : loss = {}'.format(step, total_steps, np.mean(total_loss)))
                sys.stdout.flush()
        return np.mean(total_loss), total_correct_examples / total_processed_examples

    def predict(self, session, X, lengths, y=None):
        dp = 1
        losses = []
        results = []
        if np.any(y):
            for step, (x, y, z) in enumerate(zip(X, y, lengths)):
                feed_dict = {self.input_data: np.expand_dims(x, -1), self.target: y, self.dropout: dp,
                             self.lengths: [z]}
                cost, prediction = session.run([self.cost, self.prediction], feed_dict=feed_dict)
                losses.append(cost)
                prediction = prediction.argmax(axis=1)
                results.append(prediction)
            return np.mean(losses), np.array(results)
        else:
            for step, (x, z) in enumerate(zip(X, lengths)):
                feed_dict = {self.input_data: np.expand_dims(x, -1), self.dropout: dp, self.lengths: [z]}
                prediction = session.run(self.prediction, feed_dict=feed_dict)
                prediction = prediction.argmax(axis=1)
                results.append(prediction)
            return np.array(results)


def print_confusion(confusion, vocab):
    total_guessed_tags = confusion.sum(axis=0)
    total_true_tags = confusion.sum(axis=1)
    print(confusion)
    for i in range(vocab.get_label_num):
        prec = confusion[i, i] / total_guessed_tags[i]
        recall = confusion[i, i] / total_true_tags[i]
        print('Tag: {} - P: {:2.4f} / R: {:2.4f}'.format(vocab.decode_label(i), prec, recall))


def calculate_confusion(vocab, prediction, labels):
    confusion = np.zeros([vocab.get_label_num, vocab.get_label_num], dtype=np.int32)
    for i in range(len(labels)):
        for j in range(len(labels[i])):
            correct_label = np.argmax(labels[i, j])
            guessed_label = prediction[i, j]
            confusion[correct_label, guessed_label] += 1
    return confusion


def main():
    print("model config is {}".format(FLAGS.model))
    config = Config()

    ld = Load(path=FLAGS.path)
    config.target_size = ld.get_target_num

    confif = tf.ConfigProto()
    confif.gpu_options.allow_growth = True
    confif.gpu_options.per_process_gpu_memory_fraction = 0.5

    if FLAGS.action == 'train':

        train_input = ld.get_input
        train_target = ld.get_target
        print(train_input)
        print(train_target)

        with tf.Graph().as_default(), tf.Session(config=confif) as session:
            initializer = tf.truncated_normal_initializer(stddev=1.0)  # adjust stddev
            with tf.variable_scope("model", initializer=initializer):
                model = NERModel(config)
            tf.initialize_all_variables().run()
            saver = tf.train.Saver()

            for i in range(config.max_epoch):
                lr_decay = config.lr_decay ** max(0, i - config.epoch)
                model.assign_lr(session, config.lr * lr_decay)

                print("Epoch: {} Learning Rate: {:.3f}".format(i, session.run(model.lr)))
                train_loss, train_precision = model.run_epoch(session, train_sen, train_label, train_length)
                print("\nTraining Loss: {:.3f} \tTraining Precision: {:.3f}".format(train_loss, train_precision))
                valid_loss, valid_prediction = model.predict(session, valid_sen, valid_length, y=valid_label)
                print("Valid Loss: {:.3f}".format(valid_loss))
            # print_confusion(calculate_confusion(vocab, valid_prediction, valid_label), vocab)

            checkpoint_dir = os.path.abspath(FLAGS.path)
            checkpoint_file = os.path.join(checkpoint_dir, "model_" + FLAGS.model + ".ckpt")
            if not os.path.exists(checkpoint_dir):
                os.mkdir(checkpoint_dir)
            save_path = saver.save(session, checkpoint_file)
            print("Model saved in file: {}".format(save_path))

            test_prediction = model.predict(session, test_sen, test_length)
        # print_prediction(test_prediction, vocab, test_length)
    elif FLAGS.action == 'pred':
        # with tf.device('/gpu:1'):
        with tf.Graph().as_default(), tf.Session(config=confif) as session:
            with tf.variable_scope("model"):
                m = NERModel(config=config)

            checkpoint_dir = os.path.abspath(FLAGS.path)
            checkpoint_file = os.path.join(checkpoint_dir, "model_" + FLAGS.model + ".ckpt")
            saver = tf.train.Saver()
            saver.restore(session, checkpoint_file)
            print("Model restored.")
            print("Please input a sentence(cut by a space)")

            while True:
                sen = input()
                if sen == 'exit':
                    break
                elif sen == '':
                    continue
                sens, length = vocab.input_transform(sen, config.num_steps)
                pred = m.predict(session, sens, length)
                tags = []
                for i in range(length[0]):
                    tags.append(vocab.decode_label(pred[0, i]))
                print(tags)


def print_prediction(prediction, vocab, length):
    fp = open(FLAGS.path + "test_prediction", "w")
    for i in range(len(prediction)):
        for j in range(length[i]):
            fp.write("{}\n".format(vocab.decode_label(prediction[i, j])))
        fp.write("\n")
    fp.flush()
    fp.close()


if __name__ == '__main__':
    main()
# tf.app.run()
