import tensorflow as tf

'''
step 1: X = [x_1, x_2, ..., x_T]
step 2: compute attention   K = V = x
        alpha_i = softmax(a(k_i, q)) = softmax(a(xi, q))
        
        a(xi, q):
                v * tanh(w * x + b)     # 加
                xi * q                  # 点积
                xi * q / sqrt(d)        # 缩放点积
                xi * W * q              # 双线性
step 3: sum(alpha_i * xi)

q 可通过自定义变量，然后训练获得， 也可设置为 q=k=v(transformer)
'''


'''
encoder output: hi  i = 1...T
decoder state:  s

Attention:
    c = sum(alpha_i * hi)



Attention is All You Need
    Attention(Q, K, V) = softmax(Q * K_T / sqrt(d_k)) * V


Neural Machine Translation By Jointly Learning to Align and Translate
    e_i = a(s, hi)      i = 1...T,  a: v*tanh(w*x+b)
    alpha_i = exp(e_i) / sum(e_k)    k = 1...T  tf.nn.softmax
    c = sum(alpha_i * hi)

'''


# 加法
def attention(inputs, inputs_len):
    '''
    Args:
        inputs: [batch_size, seq_len, hidden_size]
        inputs_len: [batch_size]
    Return:
        outputs: [batch_size, hidden_size]
    '''
    _, maxlen, hidden_size = inputs.get_shape().as_list()

    inputs_expand = tf.expand_dims(inputs, axis=2)  # [batch_size, seq_len, 1, hidden_size]
    w_h = tf.get_variable('w_h', [1, 1, hidden_size, hidden_size])
    v = tf.get_variable('v', [hidden_size])

    h = tf.nn.conv2d(inputs_expand, w_h, [1, 1, 1, 1], 'SAME')
    e = tf.reduce_sum(v * tf.tanh(h), [2, 3])   # [batch_size, seq_len]

    mask = tf.sequence_mask(inputs_len, maxlen=maxlen, dtype=tf.float32)
    att = e * mask + (1 - mask) * (-1e6)
    att = tf.nn.softmax(att, axis=-1)

    o = tf.matmul(tf.transpose(inputs, [0, 2, 1]), tf.expand_dims(att, 2))

    return tf.reshape(o, [-1, hidden_size])


# 加法
def attention_bjl(inputs, inputs_len):
    '''
    Args:
        inputs: [batch_size, seq_len, hidden_size]
        inputs_len: [batch_size]
    Return:
        outputs: [batch_size, hidden_size]
    '''
    _, seq_len, hidden_size = inputs.get_shape().as_list()

    wb = tf.layers.dense(inputs, hidden_size, activation=tf.tanh, use_bias=True)
    v = tf.get_variable('v', [hidden_size, 1])
    e = tf.matmul(tf.reshape(wb), (-1, hidden_size), v)
    e = tf.reshape(e, (-1, seq_len))

    mask = tf.sequence_mask(inputs_len, maxlen=seq_len, dtype=tf.float32)
    att = e * mask + (1-mask)*(-1e6)
    alpha = tf.nn.softmax(att, axis=-1)

    o = tf.matmul(tf.transpose(inputs, [0, 2, 1]), tf.expand_dims(alpha, 2))

    return tf.reshape(o, [-1, hidden_size])


# 点积
def attention_dot(inputs, inputs_len, query):
    '''
    Args:
        inputs: [batch_size, seq_len, hidden_size]
        inputs_len: [batch_size]
        query: [batch_size, hidden_size]
    Return:
        outputs: [batch_size, hidden_size]
    '''
    _, seq_len, hidden_size = inputs.get_shape().as_list()

    state = tf.expand_dims(query, 2)
    e = tf.squeeze(tf.matmul(inputs, state), 2)  # [batch_size, seq_len]

    mask = ...

    pass


# gpt q-k-v
def attention_qkv(q, k, v, train=False, scale=False):
    '''
    Args:
        q: [batch_size, num_head, seq_len, hidden_size]
        k: [batch_size, num_head, hidden_size, seq_len]
        v: [batch_size, num_head, seq_len, hidden_size]
    '''
    def dropout(x, pdrop, train):
        if train and pdrop > 0:
            x = tf.nn.dropout(x, 1 - pdrop)
        return x

    def mask_attn_weights(w):
        n = tf.shape(w)[-1]     # seq_len
        b = tf.matrix_band_part(tf.ones([n, n]), -1, 0)     # lower triangular part
        b = tf.reshape(b, [1, 1, n, n])
        w = w * b + -1e9 * (1 - b)
        return w

    w = tf.matmul(q, k)     # [batch_size, num_head, seq_len, seq_len]

    if scale:
        n_state = tf.shape(v)[-1]
        w = w * tf.rsqrt(tf.cast(n_state, tf.float32))

    w = mask_attn_weights(w)
    w = tf.nn.softmax(w)

    w = dropout(w, 0.1, train)

    a = tf.matmul(w, v)     # [batch_size, num_head, seq_len, hidden_size]
    return a