# coding:utf-8
# Author : hiicy redldw
# Date : 2019/05/30
import tensorflow as tf
from tensorflow.contrib import seq2seq
from tensorflow.python.ops.rnn import dynamic_rnn
# https://zhuanlan.zhihu.com/p/47929039
def get_encoder_layer(input_data,source_vocab_size,encoding_embedding_size,rnn_size,source_sequence_length,num_layer):
    encoder_embedding = tf.Variable(tf.truncated_normal([source_sequence_length,encoding_embedding_size],stddev=0.1))
    embedding_inputs = tf.nn.embedding_lookup(encoder_embedding,input_data)

    rnn_cell = tf.nn.rnn_cell.MultiRNNCell([tf.nn.rnn_cell.GRUCell(rnn_size) for _ in range(num_layer)])
    # sequence_length:灵活计算长度，不用每个batch都填充一样 节省空间
    # states:(c,h)
    outputs, states = dynamic_rnn(rnn_cell,embedding_inputs,sequence_length=source_sequence_length)
    return outputs,states


def decoding_layer(target_letter_to_int, decoding_embedding_size, num_layers,source_sequence_length,
                   rnn_size,source,sequence_length,batch_size,encoder_final_state,
                   target_sequence_length, max_target_sequence_length, encoder_output, decoder_input):
    '''
    构造Decoder层
    参数：
    - target_letter_to_int: target数据的映射表
    - decoding_embedding_size: embed向量大小
    - num_layers: 堆叠的RNN单元数量
    - rnn_size: RNN单元的隐层结点数量
    - target_sequence_length: target数据序列长度
    - max_target_sequence_length: target数据序列最大长度
    - encoder_state: encoder端编码的状态向量
    - decoder_input: decoder端输入
    '''

    target_vocab_size = len(target_letter_to_int)
    decoder_embeddings = tf.Variable(tf.random_uniform([target_vocab_size, decoding_embedding_size]))
    decoder_embed_input = tf.nn.embedding_lookup(decoder_embeddings, decoder_input)
    def get_decoder_cell(rnn_size):
        decoder_cell = tf.nn.rnn_cell.LSTMCell(rnn_size,
                                           initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2))
        return decoder_cell

    cell = tf.nn.rnn_cell.MultiRNNCell([get_decoder_cell(rnn_size) for _ in range(num_layers)])
    # 加入attention机制
    attension_menchian = seq2seq.LuongAttention(rnn_size, encoder_output, source_sequence_length)
    decoder_cell = seq2seq.AttentionWrapper(cell, attension_menchian, rnn_size)

    decoder_initial_state = decoder_cell.zero_state(batch_size=batch_size, dtype=tf.float32).clone(
        cell_state=encoder_final_state)
    # decoder_initial_state = decoder_cell.zero_state(batch_size=batch_size, dtype=tf.float32)

    # 3. Output全连接层
    output_layer = tf.layers.Dense(target_vocab_size,
                         kernel_initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1))

    # 4. Training decoder
    with tf.variable_scope("decode"):
        # 得到help对象: read（解析) 输入;貌似控制每个cell在不同的时间节点，给定上一个时刻的输出，如何决定下一个时刻的输入
        training_helper = tf.contrib.seq2seq.TrainingHelper(inputs=decoder_embed_input,
                                                            sequence_length=target_sequence_length,
                                                            time_major=False)
        # 构造decoder
        training_decoder = tf.contrib.seq2seq.BasicDecoder(decoder_cell,
                                                           training_helper,
                                                           decoder_initial_state,
                                                           output_layer)
        # seq2seq 的开始解码了
        training_decoder_output, final_state, final_sequence_lengths = seq2seq.dynamic_decode(training_decoder,
                                                                          impute_finished=True,
                                                                          maximum_iterations=max_target_sequence_length)

    return training_decoder_output


training_logits = tf.identity(training_decoder_output.rnn_output, 'logits')
masks = tf.sequence_mask(target_sequence_length, max_target_sequence_length, dtype=tf.float32, name='masks')

with tf.name_scope("optimization"):
    # Loss function
    cost = tf.contrib.seq2seq.sequence_loss(
        training_logits,
        targets,
        masks)

    # Optimizer
    '''
    optimizer = tf.train.AdamOptimizer(lr)
    # Gradient Clipping
    gradients = optimizer.compute_gradients(cost)
    capped_gradients = [(tf.clip_by_value(grad, -5., 5.), var) for grad, var in gradients if grad is not None]
    train_op = optimizer.apply_gradients(capped_gradients)
    '''
    # simple minimize Optimizer



