import tensorflow as tf
import numpy as np
from onehour_tensorflow.w20191209.gen_vocab import load_vocab

'''
GRU 公式参考 https://blog.csdn.net/gzj_1101/article/details/79376798
Tensor : 数据 Variable placeholder
Tensor Rank
1 Rank 0
[1,2,3] Rank 1 [3] 3
[[1,2], [3,4]] Rank 2 [2,2] [[1,2],[3,4],[5,6]] 3*2
1 2
3 4
5 6
[[[]]] Rank 3
[[[[]]]] Rank 4 shape [2,3,4,5] 2*3*4*5
...
Operation： 节点 操作 add sub mul ....
'''
hidden_units = 10
steps = 10
vocab = load_vocab()
vocab_size = len(vocab)
embedding_tensor = tf.random_normal(shape=(vocab_size, hidden_units), dtype=tf.float32)
def layer(inputs, scope_name='layer'):
    with tf.variable_scope(scope_name):
        # tf.random_normal(shape=[3,4,5])
        with tf.name_scope('layer'):
            with tf.name_scope('init_w'):
                Wzh = tf.Variable(initial_value=np.random.rand(hidden_units, hidden_units), dtype=tf.float32, name='Wzh')  # shape 2*2
                Wzx = tf.Variable(initial_value=np.random.rand(hidden_units, hidden_units), dtype=tf.float32, name='Wzx')

                Wrh = tf.Variable(initial_value=np.random.rand(hidden_units, hidden_units), dtype=tf.float32, name='Wrh')
                Wrx = tf.Variable(initial_value=np.random.rand(hidden_units, hidden_units), dtype=tf.float32, name='Wrx')

                Wh = tf.Variable(initial_value=np.random.rand(hidden_units, hidden_units), dtype=tf.float32, name='Wh')
                Wx = tf.Variable(initial_value=np.random.rand(hidden_units, hidden_units), dtype=tf.float32, name='Wx')
            '''
            t-step
            h0 = init_state
            while  :
            '''
            init_state = 0
            ht_1 = tf.zeros(shape=[hidden_units,1])
            hidden_state_list = []
            for i in range(steps):
                with tf.name_scope('step_%d'%i):
                    #t = 0
                    #xt = tf.expand_dims(x[:,t], axis=1)
                    xt = inputs[i] # [ht0, ht1, ht2,...,ht9]
                    print('before xt.shape', xt)
                    #xt = tf.expand_dims(xt, axis=1)
                    #print('after xt.shape', xt)
                    #print('xt',xt)
                    zt = tf.sigmoid(tf.matmul(Wzh, ht_1, name='a_') + tf.matmul(Wzx, xt, name='b_'), name='zt_cal')
                    rt = tf.sigmoid(tf.matmul(Wrh, ht_1, name='c_') + tf.matmul(Wrx, xt, name='d_'), name='rt_cal')
                    h_t = tf.tanh(tf.matmul(Wh, tf.multiply(rt, ht_1), name='e_') + tf.matmul(Wx, xt, name='f_'), name='h_t_call')
                    ht = tf.multiply((1 - zt), ht_1) + tf.multiply(zt, h_t)
                    hidden_state_list.append(ht)
                    ht_1 = ht
                    #t += 1
    return hidden_state_list
x = tf.placeholder(shape=(hidden_units, steps), dtype=tf.float32, name='input_placeholder')
#x = tf.constant(value=np.random.rand(hidden_units, steps), dtype=tf.float32) #  1 2 3 4 5 6 7 8 9 10
inputs = [tf.expand_dims(x[:,i], axis=1) for i in range(steps)]
for i in range(6):
    inputs = layer(inputs, scope_name='layer_%d'%i)
print('inputs:', inputs)
'''
[32 100]
1 2  1 2  7  10   a11 a12  b11 b12  a11*b11+a12*b21  a11*b12+a12*b22
3 4  3 4  15 22   a21 a22  b21 b22  a21*b11+a22*b21  a21*b12+a22*b22
'''

sess = tf.Session()
sess.run(tf.global_variables_initializer())
inputs_val = sess.run(inputs, feed_dict={x:np.random.rand(hidden_units, steps)})
print(inputs_val)
# print('zt', zt_val)
# print('rt', rt_val)
# print('h_t', h_t_val)
# print('ht', ht_val)

writer = tf.summary.FileWriter('./logs')
writer.add_graph(tf.get_default_graph())
writer.flush()

