#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import json
import logging
import os

import tensorflow as tf
import numpy as np

import utils
from model import Model
from utils import read_data
from utils import index_data

from flags import parse_args
FLAGS, unparsed = parse_args()


logging.basicConfig(
    format='%(asctime)s - %(levelname)s - %(filename)s:%(lineno)d - %(message)s', level=logging.DEBUG)


# 原始数据 QuanSongCi.txt
vocabulary = read_data(FLAGS.text)
vocabulary_size = len(vocabulary)
print('Data size', vocabulary_size)


with open(FLAGS.dictionary, encoding='utf-8') as inf:
    dictionary = json.load(inf, encoding='utf-8')

with open(FLAGS.reverse_dictionary, encoding='utf-8') as inf:
    reverse_dictionary = json.load(inf, encoding='utf-8')

reverse_list = [reverse_dictionary[str(i)]
                for i in range(len(reverse_dictionary))]

model = Model(learning_rate=FLAGS.learning_rate, batch_size=FLAGS.batch_size, num_steps=FLAGS.num_steps)
model.build()


with tf.Session() as sess:
    summary_string_writer = tf.summary.FileWriter(FLAGS.output_dir, sess.graph)

    saver = tf.train.Saver(max_to_keep=5)
    sess.run(tf.global_variables_initializer())
    sess.run(tf.local_variables_initializer())
    logging.debug('Initialized')

    try:
        checkpoint_path = tf.train.latest_checkpoint(FLAGS.output_dir)
        saver.restore(sess, checkpoint_path)
        logging.debug('restore from [{0}]'.format(checkpoint_path))

    except Exception:
        logging.debug('no check point found....')

    for x in range(1):
        # logging.debug('epoch [{0}]....'.format(x))
        state = sess.run(model.initial_state)
        
        for input, lable in utils.get_train_data(vocabulary, batch_size=FLAGS.batch_size, num_steps=FLAGS.num_steps):
            input_data = utils.index_data(input, dictionary)
            lable_data = utils.index_data(lable, dictionary)

            feed_dict = {model.X: input_data,
                         model.Y: lable_data,
                         model.keep_prob: 0.6,
                         model.initial_state: state}

            gs, _, state, l, summary_string = sess.run([model.global_step,
                                                        model.optimizer,
                                                        model.final_state,
                                                        model.loss,
                                                        model.merged_summary_op],
                                                       feed_dict=feed_dict)

            summary_string_writer.add_summary(summary_string, gs)
            if gs % 10 == 0:
                epoch_n = int(gs/(vocabulary_size/FLAGS.batch_size/FLAGS.num_steps))
                logging.debug('epoch [{0}] step [{1}] loss [{2}]'.format(epoch_n, gs, l))
                save_path = saver.save(sess, os.path.join(FLAGS.output_dir, "model.ckpt"), global_step=gs)

            if gs >= FLAGS.max_steps:
                break

        saver.save(sess, os.path.join(FLAGS.output_dir, "model.ckpt"), global_step=gs)

    summary_string_writer.close()
