# coding:utf-8
# Author : hiicy redldw
# Date : 2019/05/28
import tensorflow as tf
from tensorflow.contrib import seq2seq
from tensorflow.python.layers.core import Dense
import numpy as np
from tensorflow.python.ops.rnn import dynamic_rnn,bidirectional_dynamic_rnn
"""
encoder:输入：batch,vocab_size
decoder:输入:batch,vocab_size
        状态输入：encoder的状态输出: 向量 <=> dim
        目标:batch,None
    网络最后用词汇量作dense
定义decoder阶段要是用的Cell -》TrainingHelper+BasicDecoder的组合定义解码器-》调用dynamic_decode进行解码。
添加注意力机制主要是在第一步，对Cell进行包裹
"""


class Seq2SeqModel(object):
    def __init__(self, rnn_size, layer_size, encoder_vocab_size,
                 decoder_vocab_size, embedding_dim, grad_clip, is_inference=False):
        self.input_x = tf.placeholder(tf.int32, shape=[None, None], name='input_ids')
        # define embedding layer
        with tf.variable_scope('embedding'):
            # tf.Variable 训练会修改的
            encoder_embedding = tf.Variable(tf.truncated_normal(shape=[encoder_vocab_size, embedding_dim], stddev=0.1),
                                            name='encoder_embedding')
            decoder_embedding = tf.Variable(tf.truncated_normal(shape=[decoder_vocab_size, embedding_dim], stddev=0.1),
                                            name='decoder_embedding')
        # define encoder
        with tf.variable_scope('encoder'):
            encoder = self._get_simple_lstm(rnn_size, layer_size)
        with tf.device('/gpu:2'):
            input_x_embedded = tf.nn.embedding_lookup(encoder_embedding, self.input_x)

        encoder_outputs, encoder_state = dynamic_rnn(encoder, input_x_embedded, dtype=tf.float32)

        if is_inference:
            self.start_tokens = tf.placeholder(tf.int32, shape=[None], name='start_tokens')
            self.end_token = tf.placeholder(tf.int32, name='end_token')
            # 貌似是用t-1时刻的输出经过embedding再转为t时刻的输入
            helper = seq2seq.GreedyEmbeddingHelper(decoder_embedding, self.start_tokens, self.end_token)
        else:
            self.target_ids = tf.placeholder(tf.int32, shape=[None, None], name='target_ids')
            self.decoder_seq_length = tf.placeholder(tf.int32, shape=[None], name='batch_seq_length')
            with tf.device('/gpu:2'):
                target_embeddeds = tf.nn.embedding_lookup(decoder_embedding, self.target_ids)
            # 返回的样本id是rnn输出中最大的
            helper = seq2seq.TrainingHelper(target_embeddeds, self.decoder_seq_length)

        with tf.variable_scope('decoder'):
            fc_layer = Dense(decoder_vocab_size)
            decoder_cell = self._get_simple_lstm(rnn_size, layer_size)
            decoder = seq2seq.BasicDecoder(decoder_cell, helper, encoder_state, fc_layer)
        # step() repeatedly on the Decoder object
        logits, final_state, final_sequence_length = seq2seq.dynamic_decode(decoder)

        if not is_inference:
            targets = tf.reshape(self.target_ids, [-1])
            logits_flat = tf.reshape(logits.rnn_output, [-1, decoder_vocab_size])
            print('shape logits_flat:{}'.format(logits_flat.shape))
            print('shape logits:{}'.format(logits.rnn_output.shape))

            self.cost = tf.losses.sparse_softmax_cross_entropy(targets, logits_flat)

            tvars = tf.trainable_variables()
            # 用损失，训练变量，得到梯度，修正梯度
            grads, _ = tf.clip_by_global_norm(tf.gradients(self.cost, tvars), grad_clip)

            optimzer = tf.train.AdamOptimizer(1e-3)
            self.train_op = optimzer.apply_gradients(zip(grads, tvars))
        else:
            self.prob = tf.nn.softmax(logits)

    def _get_simple_lstm(self, rnn_size, layer_size):
        lstm_layer = [tf.nn.rnn_cell.LSTMCell(rnn_size) for _ in range(layer_size)]
        return tf.nn.rnn_cell.MultiRNNCell(lstm_layer)


# 随机序列生成器
def random_sequences(length_from, length_to, vocab_lower, vocab_upper, batch_size):
    def random_length():
        if length_from == length_to:
            return length_from
        return np.random.randint(length_from, length_to + 1)

    while True:
        yield [
            np.random.randint(low=vocab_lower, high=vocab_upper, size=random_length()).tolist()
            for _ in range(batch_size)
        ]


# 填充序列
def make_batch(inputs, max_sequence_length=None):
    sequence_lengths = [len(seq) for seq in inputs]
    batch_size = len(inputs)
    if max_sequence_length is None:
        max_sequence_length = max(sequence_lengths)
    # REW:pad序列的手写好方法
    inputs_batch_major = np.zeros(shape=[batch_size, max_sequence_length], dtype=np.int32)
    for i, seq in enumerate(inputs):
        for j, element in enumerate(seq):
            inputs_batch_major[i, j] = element
    inputs_time_major = inputs_batch_major.swapaxes(0, 1)
    return inputs_time_major, sequence_lengths


epochs = 2
vocab_size = 2000
input_embedding_size = 64
encoder_hidden_units = 128
decoder_hidden_units = 128
batches = random_sequences(0, 10, 0, vocab_size, 16)
# 构建图
encoder_inputs = tf.placeholder(shape=(None, None), dtype=tf.int32, name='encoder_inputs')
decoder_inputs = tf.placeholder(shape=(None, None), dtype=tf.int32, name='decoder_inputs')
decoder_targets = tf.placeholder(shape=(None, None), dtype=tf.int32, name='decoder_targets')
# 转词维度
embeddings = tf.Variable(tf.random_uniform([vocab_size, input_embedding_size], -1.0, 1.0), dtype=tf.float32)
encoder_inputs_embedded = tf.nn.embedding_lookup(embeddings, encoder_inputs)
decoder_inputs_embedded = tf.nn.embedding_lookup(embeddings, decoder_inputs)
# 定义encoder
encoder_cell = tf.nn.rnn_cell.LSTMCell(encoder_hidden_units)
encoder_output, encoder_final_state = tf.nn.dynamic_rnn(encoder_cell, encoder_inputs_embedded,
                                                        dtype=tf.float32, time_major=True)
decoder_cell = tf.contrib.rnn.LSTMCell(decoder_hidden_units)
decoder_outputs, decoder_final_state = tf.nn.dynamic_rnn(
    decoder_cell, decoder_inputs_embedded,
    initial_state=encoder_final_state,
    dtype=tf.float32, time_major=True, scope="plain_decoder",
)
decoder_logits = tf.layers.Dense(vocab_size)(decoder_outputs)
decoder_prediction = tf.argmax(decoder_logits, 2)
stepwise_cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
    labels=tf.one_hot(decoder_targets, depth=vocab_size, dtype=tf.float32),
    logits=decoder_logits,
)
loss = tf.reduce_mean(stepwise_cross_entropy)
train_op = tf.train.AdamOptimizer().minimize(loss)
train_graph = tf.get_default_graph()
loss_track = []
EOS = "<EOS>"
# 创建会话
with tf.Session(graph=train_graph) as sess:
    sess.run(tf.global_variables_initializer())
    for epoch in range(epochs):
        batch = next(batches)
        encoder_inputs_, _ = make_batch(batch)
        decoder_targets_, _ = make_batch([(sequence) + [EOS] for sequence in batch])
        decoder_inputs_, _ = make_batch([[EOS] + (sequence) for sequence in batch])
        feed_dict = {encoder_inputs: encoder_inputs_, decoder_inputs: decoder_inputs_,
                     decoder_targets: decoder_targets_,
                     }
        _, l = sess.run([train_op, loss], feed_dict)
        loss_track.append(l)
        if epoch == 0 or epoch % 1000 == 0:
            print('loss: {}'.format(sess.run(loss, feed_dict)))
            predict_ = sess.run(decoder_prediction, feed_dict)
            for i, (inp, pred) in enumerate(zip(feed_dict[encoder_inputs].T, predict_.T)):
                print('input > {}'.format(inp))
                print('predicted > {}'.format(pred))
                if i >= 20:
                    break
