import tensorflow as tf
import numpy as np
from onehour_tensorflow.w20191209.gen_vocab import load_vocab
from dataset import load_csv_dataset

'''
GRU 公式参考 https://blog.csdn.net/gzj_1101/article/details/79376798
Tensor : 数据 Variable placeholder
Tensor Rank
1 Rank 0
[1,2,3] Rank 1 [3] 3
[[1,2], [3,4]] Rank 2 [2,2] [[1,2],[3,4],[5,6]] 3*2
1 2
3 4
5 6
[[[]]] Rank 3
[[[[]]]] Rank 4 shape [2,3,4,5] 2*3*4*5
...
Operation： 节点 操作 add sub mul ....
'''
hidden_units = 64
steps = 9
batch_size = 5
vocab = load_vocab()
vocab_size = len(vocab)
embeds = tf.get_variable("w_embs", shape=(vocab_size, hidden_units), dtype=tf.float32, initializer=tf.random_normal_initializer)

def input_layer(inputs, name_scope='input_layer'):
    '''
    :param inputs: [batchsize, ids]
    :return:
    '''
    with tf.name_scope(name=name_scope):
        inputs = inputs
    return inputs

def layer(inputs, scope_name='layer'):
    with tf.variable_scope(scope_name):
        # tf.random_normal(shape=[3,4,5])
        with tf.name_scope('layer'):
            with tf.name_scope('init_w'):
                Wzh = tf.Variable(initial_value=np.random.rand(hidden_units, hidden_units), dtype=tf.float32, name='Wzh')  # shape 2*2
                Wzx = tf.Variable(initial_value=np.random.rand(hidden_units, hidden_units), dtype=tf.float32, name='Wzx')

                Wrh = tf.Variable(initial_value=np.random.rand(hidden_units, hidden_units), dtype=tf.float32, name='Wrh')
                Wrx = tf.Variable(initial_value=np.random.rand(hidden_units, hidden_units), dtype=tf.float32, name='Wrx')

                Wh = tf.Variable(initial_value=np.random.rand(hidden_units, hidden_units), dtype=tf.float32, name='Wh')
                Wx = tf.Variable(initial_value=np.random.rand(hidden_units, hidden_units), dtype=tf.float32, name='Wx')
            '''
            t-step
            h0 = init_state
            while  :
            '''
            init_state = 0
            bs = tf.shape(inputs)[0]
            ht_1 = tf.zeros(shape=[bs, hidden_units])
            hidden_state_list = []
            for i in range(steps):
                with tf.name_scope('step_%d'%i):
                    #t = 0
                    #xt = tf.expand_dims(x[:,t], axis=1)
                    xt = inputs[:,i] # [ht0, ht1, ht2,...,ht9]
                    #print('before xt.shape', xt)
                    #xt = tf.expand_dims(xt, axis=1)
                    #print('after xt.shape', xt)
                    #print('xt',xt)
                    zt = tf.sigmoid(tf.matmul(ht_1, Wzh, name='a_') + tf.matmul(xt, Wzx, name='b_'), name='zt_cal')
                    rt = tf.sigmoid(tf.matmul(ht_1, Wrh, name='c_') + tf.matmul(xt, Wrx, name='d_'), name='rt_cal')
                    h_t = tf.tanh(tf.matmul(tf.multiply(rt, ht_1), Wh, name='e_') + tf.matmul(xt, Wx, name='f_'), name='h_t_call')
                    ht = tf.multiply((1 - zt), ht_1) + tf.multiply(zt, h_t)
                    hidden_state_list.append(ht)
                    ht_1 = ht
                    #t += 1
            output = tf.stack(hidden_state_list, axis=1)
            #print('output:', output)
    return output

def output_layer(inputs, name_scope='output_layer'):
    with tf.name_scope(name=name_scope):
        #w = tf.Variable(initial_value=np.random.rand(hidden_units, vocab_size), dtype=tf.float32, name='output_w')
        #b = tf.Variable(initial_value=np.random.rand(vocab_size), dtype=tf.float32, name='output_b')
        #outputs = tf.add(tf.matmul(inputs, tf.expand_dims(w, axis=0)), b)
        outputs = tf.layers.dense(inputs, units=vocab_size, activation=tf.nn.relu)
        return outputs

x = tf.placeholder(shape=(None, steps, hidden_units), dtype=tf.float32, name='input_placeholder')
y = tf.placeholder(shape=(None, steps), dtype=tf.int64, name='target_placeholder')
#x = tf.constant(value=np.random.rand(hidden_units, steps), dtype=tf.float32) #  1 2 3 4 5 6 7 8 9 10
inputs = input_layer(x)
for i in range(1):
    inputs = layer(inputs, scope_name='layer_%d'%i)
outputs = output_layer(inputs)
logits = tf.nn.softmax(outputs)
pred = tf.argmax(logits, axis=-1)
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits))
train_op = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)
#---------------------------------开始Train--------------------------------------
ds = load_csv_dataset(batch_size=batch_size, embeddings=embeds)
it = ds.make_initializable_iterator()
ele = it.get_next()
sess = tf.Session()
sess.run(tf.global_variables_initializer())
sess.run(it.initializer)

epochs = 0
max_epochs = 1000
step = 0
while True:
    try:
        src_ids, tgt_ids = sess.run(ele)
        #print('tgt_ids', tgt_ids)
        # logits_val = sess.run(logits, feed_dict={x:src_ids})
        loss_val, _ = sess.run([loss, train_op], feed_dict={x:src_ids, y:tgt_ids})
        print('loss_val:', loss_val)
        if step % 100 == 0:
            pred_val, logits_val = sess.run([pred, logits], feed_dict={x:src_ids})
            print('pred_val:', logits_val[0][0][pred_val[0][0]])
            print('pred_val:', pred_val)
            print('label:', tgt_ids)
        step += 1
    except tf.errors.OutOfRangeError:
        epochs += 1
        if epochs > max_epochs:
            break
        sess.run(it.initializer)
# print('zt', zt_val)
# print('rt', rt_val)
# print('h_t', h_t_val)
# print('ht', ht_val)

writer = tf.summary.FileWriter('./logs')
writer.add_graph(tf.get_default_graph())
writer.flush()

