#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File       :   GeneratePoem.py    
@Contact    :   LJL959@QQ.com.com
@License    :   (C)Copyright 2019-2020
@Modify Time      @Author    @Version    @Desciption
------------      -------    --------    -----------
2020/4/7 11:18   LiuJiaoLong      1.0         None
@Description : 模型训练与诗歌生成
'''
import os
import tensorflow as tf
import numpy as np
from Model.LSTMModel import LSTMModel

config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
batch_size = 64
poem_file = './data/poems.txt'


# 训练模型
def train(words, poem_vector, x_batches, y_baches):
    input_data = tf.placeholder(tf.int32, [batch_size, None])
    output_targets = tf.placeholder(tf.int32, [batch_size, None])
    end_points = LSTMModel(len(words), input_data=input_data, output_data=output_targets, batch_size=batch_size)
    saver = tf.train.Saver(tf.global_variables())
    init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
    # 为了更直观的展示训练过程，进行可视化处理，将图形、训练过程等数据合并在一起
    merge = tf.summary.merge_all()
    with tf.Session(config=config) as sess:
        writer = tf.summary.FileWriter('./Logs', sess.graph)
        sess.run(init_op)
        start_epoch = 0
        model_dir = "./SaveModel/"
        epochs = 50
        checkpoint = tf.train.latest_checkpoint(model_dir)
        if checkpoint:
            saver.restore(sess, checkpoint)
            print("## restore from the checkpoint {0}".format(checkpoint))
            start_epoch += int(checkpoint.split('-')[-1])
            print("##start training---")
        try:
            for epoch in range(start_epoch, epochs):
                n_chunk = len(poem_vector) // batch_size
                for n in range(n_chunk):
                    loss, _, _ = sess.run([end_points['total_loss'], end_points['last_state'], end_points['train_op'], ],
                                          feed_dict={input_data: x_batches[n], output_targets: y_baches[n]})
                    print('Epoch: %d, batch: %d, training loss: %.6f' % (epoch, n, loss))
                    if epoch % 5 == 0:
                        saver.save(sess, os.path.join(model_dir, "poem"), global_step=epoch)
                        result = sess.run(merge, feed_dict={input_data: x_batches[n], output_targets: y_baches[n]})
                        writer.add_summary(result, epoch*n_chunk+n)
        except KeyboardInterrupt:
            print("## Interrupt manually, try saving checkpoint for now....")
            saver.save(sess, os.path.join(model_dir, "poem"), global_step=epoch)
            print("## Last epoch were saved,next time will start from epcoh {}.".format(epoch))


# to_word函数：将词向量转成文字
def to_word(predict, vocabs):
    t = np.cumsum(predict)
    s = np.sum(predict)
    sample = int(np.searchsorted(t, np.random.rand(1) * s))
    # t的长度为vocabs_size+1，随机生成一个数字然后判断能插入第几个位置来取字
    if sample > len(vocabs):
        sample = len(vocabs) - 1
    return vocabs[sample]


# 定义诗歌生成函数
def gen_poem(words, to_num):
    batch_size = 1
    print('模型保存目录为：{}'.format('./SaveModel'))
    input_data = tf.placeholder(tf.int32, [batch_size, None])
    end_points = LSTMModel(len(words), input_data=input_data, batch_size=batch_size)
    saver = tf.train.Saver(tf.global_variables())
    init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
    with tf.Session(config=config) as sess:
        sess.run(init_op)
        checkpoint = tf.train.latest_checkpoint('./SaveModel/')
        saver.restore(sess, checkpoint)

        x = np.array(to_num('B')) .reshape(1, 1)
        _, last_state = sess.run([end_points['prediction'], end_points['last_state']], feed_dict={input_data: x})

        word = input_data("请输入起始字符：")
        poem_ = ''
        while word != 'E':
            poem_ += word
            x = np.array(to_num(word)).reshape(1, 1)
            predict, last_state = sess.run([end_points['prediction'], end_points['last_state']], feed_dict={input_data: x, end_points['initial_state']:last_state})
            word = to_word(predict, words)
        print(poem_)
        return poem_


# 生成指定风格的古诗
def generate(words, to_num, style_words="狂杀将军战燕然，大漠孤烟黄河骑。"):
    batch_size = 1
    input_data = tf.placeholder(tf.int32, [batch_size, None])
    end_points = LSTMModel(len(words), input_data=input_data, batch_size=batch_size)
    saver = tf.train.Saver(tf.global_variables())
    init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
    with tf.Session(config=config) as sess:
        sess.run(init_op)
        checkpoint = tf.train.latest_checkpoint('./SaveModel')
        saver.restore(sess, checkpoint)
        x = np.array(to_num('B')).reshape(1, 1)
        _, last_state = sess.run([end_points['prediction'], end_points['last_state']], feed_dict={input_data: x})
        if style_words:
            for word in style_words:
                x = np.array(to_num(word)).reshape(1, 1)
                last_state = sess.run(end_points['last_state'], feed_dict={input_data: x, end_points['initial_state']: last_state})
        # start_words = list("少小离家老大回")
        start_words = list(input("请输入起始语句："))
        start_words_len = len(start_words)

        result = start_words.copy()
        max_len = 200
        for i in range(max_len):
            if i < start_words_len:
                w = start_words[i]
                x = np.array(to_num(w)).reshape(1, 1)
                predict, last_state = sess.run([end_points['prediction'], end_points['last_state']], feed_dict={input_data: x, end_points['initial_state']: last_state})
            else:
                predict, last_state = sess.run([end_points['prediction'], end_points['last_state']], feed_dict={input_data: x, end_points['initial_state']: last_state})
                w = to_word(predict, words)
                x = np.array(to_num(w)).reshape(1, 1)
                if w == 'E':
                    break
                result.append(w)
        print(''.join(result))
