'''

A set of functions in sequence-to-sequence models in Tensorflow.

Sequence-to-sequence recurrent neural networks can learn complex functions
that map input sequences to output sequences. These models yield very good results


The tensorflow tutorial can be found at https://www.tensorflow.org/versions/master/tutorials/seq2seq/index.html

'''

import tensorflow as tf

try:
    from tensorflow.contrib.rnn.python.ops import rnn_cell_impl
except ImportError:
    from tensorflow.python.ops import rnn_cell_impl


try:
    linear = rnn_cell_impl._linear
except AttributeError:
       from tensorflow.contrib.rnn.python.ops import core_rnn_cell
       linear = core_rnn_cell._linear


def _extract_argmax_and_embed(embedding,output_projection=None,update_embedding=True):
    '''
    Get a loop function that extracts the previous symbol and embeds it.

    Args:
          embedding: embedding tensor for symbols.
          output_projection: None or a pair (W,B). If provided, each fed previous output will first be multiplied by W and added B.
          update_embedding: Boolean; if False, the grandients will not propagate through the embeddings.

    Returns:
           A loop function 
    '''
    def loop_function(prev, _):
         if output_projection is not None:
            prev = tf.nn.xw_plus_b(prev, output_projection[0], output_projection[1])
            preb_symbol = tf.argmax(prev,1)
            # Note that gradients will not propagate through the second parameter of
            # embedding_lookup. 
            emb_prev = tf.nn.embedding_lookup(embedding,prev_symbol)
            if not update_embedding:
               emb_prev = tf.stop_grandient(emb_prev)
            return emb_prev
    return loop_function



def attention_decoder(decoder_inputs, initial_state, attention_states, cell, output_size=None, num_heads=1, loop_function=None, dtype=tf.float32, scope = None, initial_state_attention=False, attn_num_hidden=128):
    '''
    RNN decoder with attention for the sequence-to-sequence model.
    
    Here, 'attention' means that during decoding, the RNN can look up information in the additional tensor [attention_states], and foucuses on a few entries from the tensor.

    Args:
         decoder_inputs: A list of 2D tensor [batch_size,input_size]
         initial_state: 2D tensor [batch_size, cell.state_size]
         attention_states: 3D tensor [batch_size,attn_length,attn_size]
         cell: rnn_cell.RNNCell defining the cell function and size
         output_size: size of the output vectors; if None, use cell.output_size.
         num_heads: number of the attention heads that read from attention_states.
         loop_function: if not none, this function will be applied to i-th output in order to generate (i+1)-th input, and decoder_inputs will be ignored, except for the first element.
         For example: next = loop_function(prev,i)
                      * prev is a 2D tensor [batch_size, output_size]
                      * i is an integer, the step number
                      * next is a 2D tensor [batch_size,input_size]
         dtype: The dtype to use for the RNN initial state (default:tf.float32)
         scope: VariableScope for the created subgraph; default to 'attention_decoder'
         initial_state_attention: If False (default), initial attentions are zero.
            If True, initialize the attentions from the initial state and attention
            states -- useful when we wish to resume decoding from a previously
            stored decoder state and attention states.


    Returns:
         A tuple of the form (outputs, state), where:
             outputs: A list of the same length as decoder_inputs of 2D tensor with shape [batch_size, output_size]. These present the generated outputs.


                Output i is computed from input i (which is either the i-th element
                of decoder_inputs or loop_function(output {i-1}, i)) as follows.
                First, we run the cell on a combination of the input and previous
                attention masks:
                    cell_output, new_state = cell(linear(input, prev_attn), prev_state).
                Then, we calculate new attention masks:
                    new_attn = softmax(V^T * tanh(W * attention_states + U * new_state))
                and then we calculate the output:
                    output = linear(cell_output, new_attn).

             state: The state of each decoder cell at the final time-step.
                    2D tensor [batch_size, cell.state_size]
    Raises:
        ValueError: when num_heads is not positive, there are no inputs, or shapes
            of attention_states are not set.
    '''

    assert num_heads == 1, 'We only consider the case where num_heads=1!'
    if not decoder_inputs:
         raise ValueError("Must provide at least 1 input to attention decoder.")
    if not attention_states.get_shape()[1:2].is_fully_defined():
        raise ValueError("Shape[1] and [2] of attention_states must be known: %s"
                         % attention_states.get_shape())
    if output_size is None:
        output_size = cell.output_size

    with 




