import tensorflow as tf

def _variable(name, shape, initializer):
    var = tf.get_variable(name, shape, initializer=initializer)
    return var

def _variable_with_weight_decay(name, shape, wd):
    var = _variable(name, shape, tf.contrib.layers.xavier_initializer(uniform=True, seed=None, dtype=tf.float32))
    if wd is not None:
        weight_decay = tf.nn.l2_loss(var) * wd
        # L2正则化
        tf.add_to_collection('weightdecay_losses', weight_decay)
    return var

def LSTM(dense):

    with tf.name_scope("LSTM"):
        lstm_cell = tf.contrib.rnn.LSTMCell(68, forget_bias=1.0, state_is_tuple=True)
        outputs, status = tf.nn.dynamic_rnn(lstm_cell, dense, dtype=tf.float32)
        # out [batch,n_steps,n_hidden]

    return outputs

def attention(inputs, attention_size, time_major=False):
    with tf.name_scope("Attention"):
        if isinstance(inputs, tuple):
            # In case of Bi-RNN, concatenate the forward and the backward RNN outputs.
            inputs = tf.concat(inputs, 2)

        if time_major:
            # (T,B,D) => (B,T,D)
            inputs = tf.transpose(inputs, [1, 0, 2])

        inputs_shape = inputs.shape
        sequence_length = inputs_shape[1].value  # the length of sequences processed in the antecedent RNN layer
        hidden_size = inputs_shape[2].value  # hidden size of the RNN layer

        # Attention mechanism
        W_omega = tf.Variable(tf.random_normal([hidden_size, attention_size], stddev=0.1))
        b_omega = tf.Variable(tf.random_normal([attention_size], stddev=0.1))
        u_omega = tf.Variable(tf.random_normal([attention_size], stddev=0.1))

        v = tf.tanh(tf.matmul(tf.reshape(inputs, [-1, hidden_size]), W_omega) + tf.reshape(b_omega, [1, -1]))
        vu = tf.matmul(v, tf.reshape(u_omega, [-1, 1]))
        exps = tf.reshape(tf.exp(vu), [-1, sequence_length])
        alphas = exps / tf.reshape(tf.reduce_sum(exps, 1), [-1, 1])

        # Output of Bi-RNN is reduced with attention vector
        output = tf.reduce_sum(inputs * tf.reshape(alphas, [-1, sequence_length, 1]), 1)

    return output

def out_layer(input):
    with tf.name_scope('out') as scope:
        weights = _variable_with_weight_decay(scope + 'weight', [68, 2], 0.0005)
        bias = _variable_with_weight_decay(scope + 'biases', [2], 0.000)
        out = tf.matmul(input, weights) + bias
    return out

def model(x):
    lstm_out = LSTM(x)
    attention_out = attention(lstm_out, 64, False)
    out = out_layer(attention_out)
    return out

def training(loss, learning_rate):
    with tf.name_scope('optimizer'):
        train_op = tf.train.AdamOptimizer(learning_rate).minimize(loss)
    return train_op

def evaluation(logits, labels):
    with tf.variable_scope('accuracy', reuse=tf.AUTO_REUSE) as scope:
        accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1)), tf.float32), name="accuracy")
        tf.summary.scalar(scope.name + '/accuracy', accuracy)
    return accuracy

def loss(logits, labels):
    with tf.variable_scope('loss', reuse=tf.AUTO_REUSE) as scope:
        cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(
            logits=logits, labels=labels, name='cross_entropy_per_example')
        loss = tf.reduce_mean(cross_entropy, name='loss')
        tf.summary.scalar(scope.name + '/loss', loss)
    return loss

def load_model(sess, saver, ckpt_path):
    latest_ckpt = tf.train.latest_checkpoint(ckpt_path)
    if latest_ckpt:

        saver = tf.train.import_meta_graph(latest_ckpt + '.meta', clear_devices=True)
        print ('resume from', latest_ckpt)
        saver.restore(sess, latest_ckpt)
        return int(latest_ckpt[latest_ckpt.rindex('-') + 1:])
    else:
        print ('building model from scratch')
        sess.run(tf.global_variables_initializer())
        return -1