#!/usr/bin/python
# -*- coding: utf-8 -*-
import logging
import os
import time

import tensorflow as tf

from corpus_helper_word import CorpusHelper
from text_cnn import TextCNN

logging.basicConfig(level=logging.INFO,
                    format='%(levelname)s, %(asctime)s: %(message)s',
                    datefmt='%Y-%m-%d %H:%M:%S')

tf.logging.set_verbosity(tf.logging.INFO)
# Parameters
# ==================================================

# Model Hyperparameters
tf.flags.DEFINE_integer("embedding_dim", 256,
                        "Dimensionality of character embedding (default: 128)")
tf.flags.DEFINE_string("filter_sizes", "1,2,3",
                       "Comma-separated filter sizes (default: '3,4,5')")
tf.flags.DEFINE_integer(
    "num_filters", 512, "Number of filters per filter size (default: 128)")
tf.flags.DEFINE_float("dropout_keep_prob", 0.5,
                      "Dropout keep probability (default: 0.5)")
tf.flags.DEFINE_float("l2_reg_lambda", 0,
                      "L2 regularization lambda (default: 0.0)")

# Training parameters
tf.flags.DEFINE_integer("batch_size", 512, "Batch Size (default: 64)")
tf.flags.DEFINE_integer("log_every", 100,
                        "log after this many steps (default: 100)")
tf.flags.DEFINE_integer("evaluate_every", 1000,
                        "Evaluate model on dev set after this many steps (default: 1000)")
tf.flags.DEFINE_integer("checkpoint_every", 1000,
                        "Save model after this many steps (default: 10000)")
tf.flags.DEFINE_integer("num_checkpoints", 5,
                        "Number of checkpoints to store (default: 5)")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True,
                        "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False,
                        "Log placement of ops on devices")

tf.flags.DEFINE_boolean("train_embedding", False, "whether to use embedding or not.")

FLAGS = tf.flags.FLAGS

os.environ['CUDA_VISIBLE_DEVICES'] = '7'

corpus_helper = CorpusHelper()
corpus_helper.load_train_data()


def main(argv=None):
    # Training
    # ==================================================

    with tf.Graph().as_default():
        session_conf = tf.ConfigProto(
            allow_soft_placement=FLAGS.allow_soft_placement,
            log_device_placement=FLAGS.log_device_placement)
        sess = tf.Session(config=session_conf)
        with sess.as_default():
            cnn = TextCNN(
                sequence_length=corpus_helper.train_x.shape[1],
                num_classes=corpus_helper.train_y.shape[1],
                vocab_size=corpus_helper.vocab_size,
                embedding_size=FLAGS.embedding_dim,
                filter_sizes=list(map(int, FLAGS.filter_sizes.split(","))),
                num_filters=FLAGS.num_filters,
                pretrained_embedding=corpus_helper.embeddings,
                l2_reg_lambda=FLAGS.l2_reg_lambda,
                train_embedding=FLAGS.train_embedding,
            )

            logging.info('-' * 75)
            for var in tf.trainable_variables():
                logging.info(var)
            logging.info('-' * 75)

            # Define Training procedure(初始lr对训练影响)
            init_learning_rate = 0.01
            global_step = tf.Variable(0, name="global_step", trainable=False)
            deacy_step = 2000
            deacy_rate = 0.5
            learning_rate = tf.train.exponential_decay(init_learning_rate, global_step, deacy_step, deacy_rate)

            optimizer = tf.train.AdamOptimizer(learning_rate)
            grads_and_vars = optimizer.compute_gradients(cnn.loss)
            train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)

            # Output directory for models and summaries
            timestamp = str(int(time.time()))
            out_dir = os.path.abspath(os.path.join(os.path.curdir, "textcnn_words", timestamp))
            logging.info("Writing to %s" % out_dir)

            # Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it
            checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
            checkpoint_prefix = os.path.join(checkpoint_dir, "model")
            if not os.path.exists(checkpoint_dir):
                os.makedirs(checkpoint_dir)
            saver = tf.train.Saver(tf.global_variables(), max_to_keep=FLAGS.num_checkpoints)

            # Initialize all variables
            sess.run(tf.global_variables_initializer())

            def train_step(x_batch, y_batch, epoch):
                """
                A single training step
                """
                feed_dict = {
                    cnn.input_x: x_batch,
                    cnn.input_y: y_batch,
                    cnn.dropout_keep_prob: FLAGS.dropout_keep_prob
                }
                _, step, loss, rloss, accuracy, lr = sess.run(
                    [train_op, global_step, cnn.loss, cnn.rloss, cnn.accuracy, learning_rate], feed_dict)

                if step % FLAGS.log_every == 0:
                    logging.info("epoch %d, step %d, loss %.4f [%.4f], acc %.4f, lr %.6f" % (
                        epoch, step, loss, rloss, accuracy, lr))

            def dev_step(x_batch, y_batch, epoch):
                """
                Evaluates model on a dev set
                """
                feed_dict = {
                    cnn.input_x: x_batch,
                    cnn.input_y: y_batch,
                    cnn.dropout_keep_prob: 1.0
                }
                step, loss, rloss, accuracy, lr = sess.run(
                    [global_step, cnn.loss, cnn.rloss, cnn.accuracy, learning_rate], feed_dict)

                logging.info("=" * 75)
                logging.info("Evaluation:")
                logging.info("epoch %d, step %d, loss %.4f [%.4f], acc %.4f, lr %.6f" % (
                    epoch, step, loss, rloss, accuracy, lr))
                logging.info("=" * 75)

            # Training loop. For each batch...
            for x_batch, y_batch, epoch, step in corpus_helper.next_batch(FLAGS.batch_size):
                train_step(x_batch, y_batch, epoch)

                if step % FLAGS.checkpoint_every == 0:
                    path = saver.save(sess, checkpoint_prefix, global_step=step)
                    logging.info("Saved model checkpoint to %s" % path)

                if step % FLAGS.evaluate_every == 0:
                    dev_step(corpus_helper.test_x, corpus_helper.test_y, epoch)


if __name__ == '__main__':
    tf.app.run()
