# coding:utf-8
# Author : hiicy redldw
# Date : 2019/05/29

import tensorflow as tf
def zero_pad(X, seq_len):
    return np.array([x[:seq_len - 1] + [0] * max(seq_len - len(x), 1) for x in X])


def get_vocabulary_size(X):
    return max([max(x) for x in X]) + 1  # plus the 0th word


def fit_in_vocabulary(X, voc_size):
    return [[w for w in x if w < voc_size] for x in X]


def batch_generator(X, y, batch_size):
    """Primitive batch generator
    """
    size = X.shape[0]
    X_copy = X.copy()
    y_copy = y.copy()
    indices = np.arange(size)
    np.random.shuffle(indices)
    X_copy = X_copy[indices]
    y_copy = y_copy[indices]
    i = 0
    while True:
        if i + batch_size <= size:
            yield X_copy[i:i + batch_size], y_copy[i:i + batch_size]
            i += batch_size
        else:
            i = 0
            indices = np.arange(size)
            np.random.shuffle(indices)
            X_copy = X_copy[indices]
            y_copy = y_copy[indices]
            continue

# 基于文本分类的attention 没有decoder的隐藏层？ self-attention好像
# REW:attention 接受encoder隐藏层输出
def attention(inputs, attention_size, time_major=False, return_alphas=False):
    """
    先通过双GRU获取隐藏输入
    返回中间态,以及权重
    """
    if isinstance(inputs,tuple or list):
        inputs = tf.concat(inputs,axis=2)
    if time_major:
        inputs = tf.transpose(inputs,perm=(1,0,2))
    hiddenSize = inputs.shape[2].value

    # 建立attention需要的权重
    W_omg = tf.Variable(tf.truncated_normal([hiddenSize,attention_size],stddev=0.1))
    B_omg = tf.Variable(tf.truncated_normal([attention_size]))
    U_omg = tf.Variable(tf.truncated_normal([attention_size]))

    with tf.name_scope('hidden_resparent'):
        # inputs:B,T,D
        v = tf.tanh(tf.tensordot(inputs,W_omg)+B_omg)  # B,T,A
    au = tf.tensordot(v,U_omg,axes=1,name='vu') # B T shape
    alphas = tf.nn.softmax(au,axis=1) # B,T shape  算出每个时间步的权重
    outputs=tf.reduce_sum(inputs * tf.expand_dims(alphas,-1),1) # B,D
    if not return_alphas:
        return outputs
    else:
        return outputs, alphas

import numpy as np
from keras.datasets import imdb
from tensorflow.contrib.rnn import GRUCell
from tensorflow.python.ops.rnn import bidirectional_dynamic_rnn as bi_rnn
from tqdm import tqdm

# from utils import get_vocabulary_size, fit_in_vocabulary, zero_pad, batch_generator

NUM_WORDS = 10000
INDEX_FROM = 3
SEQUENCE_LENGTH = 250
EMBEDDING_DIM = 100
HIDDEN_SIZE = 150
ATTENTION_SIZE = 50
KEEP_PROB = 0.8
BATCH_SIZE = 256
NUM_EPOCHS = 3  # Model easily overfits without pre-trained words embeddings, that's why train for a few epochs
DELTA = 0.5
MODEL_PATH = './model'

# Load the data set
(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=NUM_WORDS, index_from=INDEX_FROM)

# Sequences pre-processing
vocabulary_size = get_vocabulary_size(X_train)
X_test = fit_in_vocabulary(X_test, vocabulary_size)
X_train = zero_pad(X_train, SEQUENCE_LENGTH)
X_test = zero_pad(X_test, SEQUENCE_LENGTH)

with tf.name_scope("Inputs"):
    batch_ph = tf.placeholder(tf.int32,shape=(None,SEQUENCE_LENGTH))
    keepprob_ph = tf.placeholder(tf.float32,name='droput')
    target_ph = tf.placeholder(tf.float32,shape=(None,),name='target')
    seq_len_ph = tf.placeholder(tf.int32, [None], name='seq_len_ph')
# Embedding layer
with tf.name_scope('Embedding_layer'):
    embedding = tf.Variable(tf.truncated_normal([NUM_WORDS,EMBEDDING_DIM],stddev=0.1))
    tf.summary.histogram('embeddings_var', embedding)
    input_embedding = tf.nn.embedding_lookup(embedding,batch_ph)

with tf.name_scope("bi_rnn"):
    cellFw = GRUCell(HIDDEN_SIZE)
    cellBw = GRUCell(HIDDEN_SIZE)
    # 虽然占位符，可以任意长度，但是也局限于不同batch，相同batch内长度还是一样
    outputs,ostates = bi_rnn(cellFw,cellBw,input_embedding,sequence_length=seq_len_ph)
    tf.summary.histogram('RNN_outputs', outputs)
with tf.name_scope("attention"):
    # 双向GRU + attention
    attention_output, alphas = attention(outputs,ATTENTION_SIZE,return_alphas=True)
    tf.summary.histogram('alphas', alphas)

drop_C = tf.nn.dropout(attention_output,keepprob_ph)

with tf.name_scope('full-connect'):
    FW = tf.Variable(tf.truncated_normal(HIDDEN_SIZE*2,1,stddev=0.1))  # *2 因为是bi_rnn
    FB = tf.Variable(tf.constant(0., shape=[1]))
    x = tf.nn.xw_plus_b(drop_C,FW,FB)
    y_hat = tf.squeeze(x)  # 降掉维度为1的维度
    tf.summary.histogram('W', FW)  # FAQ:
with tf.name_scope('Metrics'):
    loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=y_hat,labels=target_ph))
    tf.summary.scalar('loss', loss)
    optimzer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)
    accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.round(tf.sigmoid(y_hat)), target_ph), tf.float32)) # FAQ:


merged = tf.summary.merge_all()

# Batch generators
train_batch_generator = batch_generator(X_train, y_train, BATCH_SIZE)
test_batch_generator = batch_generator(X_test, y_test, BATCH_SIZE)

train_writer = tf.summary.FileWriter('./logdir/train', accuracy.graph)
test_writer = tf.summary.FileWriter('./logdir/test', accuracy.graph)

session_conf = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))

saver = tf.train.Saver()

if __name__ == "__main__":
    with tf.Session(config=session_conf) as sess:
        sess.run(tf.global_variables_initializer())
        print("Start learning...")
        for epoch in range(NUM_EPOCHS):
            loss_train = 0
            loss_test = 0
            accuracy_train = 0
            accuracy_test = 0

            print("epoch: {}\t".format(epoch), end="")

            # Training
            num_batches = X_train.shape[0] // BATCH_SIZE
            for b in tqdm(range(num_batches)):
                x_batch, y_batch = next(train_batch_generator)
                # REW:取出0代表的估计是结尾的长度然后+1 得到该序列长度 传入bidirectional_dynamic_rnn可以处理任意变长序列
                seq_len = np.array([list(x).index(0) + 1 for x in x_batch])  # actual lengths of sequences
                loss_tr, acc, _, summary = sess.run([loss, accuracy, optimzer, merged],
                                                    feed_dict={batch_ph: x_batch,
                                                               target_ph: y_batch,
                                                               seq_len_ph: seq_len,
                                                               keepprob_ph: KEEP_PROB})
                accuracy_train += acc
                loss_train = loss_tr * DELTA + loss_train * (1 - DELTA)
                train_writer.add_summary(summary, b + num_batches * epoch)
            accuracy_train /= num_batches

            # Testing
            num_batches = X_test.shape[0] // BATCH_SIZE
            for b in tqdm(range(num_batches)):
                x_batch, y_batch = next(test_batch_generator)
                seq_len = np.array([list(x).index(0) + 1 for x in x_batch])  # actual lengths of sequences
                loss_test_batch, acc, summary = sess.run([loss, accuracy, merged],
                                                         feed_dict={batch_ph: x_batch,
                                                                    target_ph: y_batch,
                                                                    seq_len_ph: seq_len,
                                                                    keepprob_ph: 1.0})
                accuracy_test += acc
                loss_test += loss_test_batch
                test_writer.add_summary(summary, b + num_batches * epoch)
            accuracy_test /= num_batches
            loss_test /= num_batches

            print("loss: {:.3f}, val_loss: {:.3f}, acc: {:.3f}, val_acc: {:.3f}".format(
                loss_train, loss_test, accuracy_train, accuracy_test
            ))
        train_writer.close()
        test_writer.close()
        saver.save(sess, MODEL_PATH)
        print("Run 'tensorboard --logdir=./logdir' to checkout tensorboard logs.")