import tensorflow as tf
import numpy as np
tf.set_random_seed(777)
# Teach hello: hihell -> ihello
#x:hihell y:ihello

# 建立字典
idx2char = ['h', 'i', 'e', 'l', 'o']   # h:0, i:1, e:2, l:3, o:4
# 构造数据集hihell
x_data = [0, 1, 0, 2, 3, 3]   # hihell
x_2d = np.eye(5)[x_data]  #独热编码
x_one_hot = x_2d.reshape(1, -1, 5)  # 变成RNN模型输入模式：(样本个数m，序列长度,输入数据纬度)
# x_one_hot = [[[1, 0, 0, 0, 0],   # h 0
#               [0, 1, 0, 0, 0],   # i 1
#               [1, 0, 0, 0, 0],   # h 0
#               [0, 0, 1, 0, 0],   # e 2
#               [0, 0, 0, 1, 0],   # l 3
#               [0, 0, 0, 1, 0]]]  # l 3
# print(x_2d)
# print(x_one_hot)
y_data = [[1, 0, 2, 3, 3, 4]]    # 输出的表达(m,序列长度,结果(不用独热编码))

#定义RNN输入数据参数(样本个数m，序列长度,输入数据纬度)
batch_size = 1   # 训练样本个数m(one sentence)
sequence_length = 6  # 序列长度 |ihello| == 6
input_dim = 5  # 输入数据的维度=独热编码长度one-hot size

#rnn模型的参数
hidden_size = 8  # 隐藏层数量output from the LSTM. 5 to directly predict one-hot
num_classes = 5 # 类别总数

#训练参数
learning_rate = 0.1  #学习率

#定义占位符
X = tf.placeholder(tf.float32, [None, sequence_length, input_dim])  # (样本个数m，序列长度, 输入数据纬度)
Y = tf.placeholder(tf.int32, [None, sequence_length])  # (样本个数m，序列长度) label

#建立模型
#定义LSTM单元, 隐藏层单元个数
# cell = tf.contrib.rnn.BasicLSTMCell(num_units=hidden_size, state_is_tuple=True) #报警
cell = tf.contrib.rnn.LSTMCell(num_units=hidden_size, state_is_tuple=True)#定义LSTM单元的参数，num_units=隐藏层单元个数，state_is_tuple=状态state返回元组
initial_state = cell.zero_state(batch_size, tf.float32) #设置state初始状态0
#定义rnn动态模型: initial_state初始state
outputs, _states = tf.nn.dynamic_rnn(cell, X, initial_state=initial_state, dtype=tf.float32)  #outputs全部单元的输出；_state最后一个单元的输出
print('rnn输出', outputs.shape)  #(1,6,8) 1：样本个数,6:6个序列长度，8：隐藏层单元个数

# 全连接层
# X_for_fc = tf.reshape(outputs, [-1, hidden_size])
# print(X_for_fc.shape) # (6,8)

# fc_w = tf.get_variable("fc_w", [hidden_size, num_classes])
# fc_b = tf.get_variable("fc_b", [num_classes])
# outputs = tf.matmul(X_for_fc, fc_w) + fc_b
#定义全连网络：inputs:输入 num_outputs:输出类别 actiation_fn=none不定义激活函数
outputs = tf.contrib.layers.fully_connected(inputs=outputs, num_outputs=num_classes, activation_fn=None)
print('全连接输出', outputs.shape) #经过一层全连接 变为(6,5) [sequence_length,num_classes]

# 改变维度(样本个数m=batch_size,序列长度sequence_length,输出类别num_classes)，准备计算序列损失sequence_loss
outputs = tf.reshape(outputs, [batch_size, sequence_length, num_classes])#print(outputs.shape)   (1,6,5)
# 计算序列的权重
weights = tf.ones([batch_size, sequence_length]) # 输出序列sequence的每一位的权重都是1: All weights are 1 (equal weights)
#计算序列的代价：Logits(样本个数m,序列长度,输出类别),targets=输出y(不需要独热)(m,序列长度,结果(不用独热编码))
sequence_loss = tf.contrib.seq2seq.sequence_loss(logits=outputs, targets=Y, weights=weights)
#计算平均代价
loss = tf.reduce_mean(sequence_loss)
#定义Adam优化器
train = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)
# 定义预测值：注意是axis=2
prediction = tf.argmax(outputs, axis=2) #最后的outputs是三维的, 所以axis=2
accuracy = tf.reduce_mean(tf.cast(tf.equal(prediction, tf.cast(Y, dtype=tf.int64)),tf.float32))
# accuracy = tf.reduce_mean(tf.cast(tf.equal(prediction, np.array(Y)),tf.float32))

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for i in range(50):
        #训练模型，计算loss,train,accuracy
        l, _, acc = sess.run([loss, train, accuracy], feed_dict={X: x_one_hot, Y: y_data})
        #预测结果
        result = sess.run(prediction, feed_dict={X: x_one_hot})
        print(i, "loss:", l, "prediction: ", result, "true Y: ", y_data, acc)

        #将预测结果，查字典后转化为输出字符串
        result_str = [idx2char[c] for c in np.squeeze(result)]  #result=[[1 0 2 3 3 4]] result.shape(1,6), squeeze压缩维度中为1的维度，squeeze之后的维度变成(6,)
        print("\tPrediction str: ", result_str, ''.join(result_str))

        if acc >= 1.0:
            break
    # 用新数据测试
    t_data = [0, 2, 3, 3, 0, 1]  # hellhi
    result = sess.run(prediction, feed_dict={X: np.eye(5)[t_data].reshape(1, -1, 5)})
    print(result)
    result_str = [idx2char[c] for c in np.squeeze(result)]
    print("\tPrediction str: ", result_str, ''.join(result_str))

'''
0 loss: 1.71584 prediction:  [[2 2 2 3 3 2]] true Y:  [[1, 0, 2, 3, 3, 4]]
	Prediction str:  eeelle
1 loss: 1.56447 prediction:  [[3 3 3 3 3 3]] true Y:  [[1, 0, 2, 3, 3, 4]]
	Prediction str:  llllll
2 loss: 1.46284 prediction:  [[3 3 3 3 3 3]] true Y:  [[1, 0, 2, 3, 3, 4]]
	Prediction str:  llllll
3 loss: 1.38073 prediction:  [[3 3 3 3 3 3]] true Y:  [[1, 0, 2, 3, 3, 4]]
	Prediction str:  llllll
4 loss: 1.30603 prediction:  [[3 3 3 3 3 3]] true Y:  [[1, 0, 2, 3, 3, 4]]
	Prediction str:  llllll
5 loss: 1.21498 prediction:  [[3 3 3 3 3 3]] true Y:  [[1, 0, 2, 3, 3, 4]]
	Prediction str:  llllll
6 loss: 1.1029 prediction:  [[3 0 3 3 3 4]] true Y:  [[1, 0, 2, 3, 3, 4]]
	Prediction str:  lhlllo
7 loss: 0.982386 prediction:  [[1 0 3 3 3 4]] true Y:  [[1, 0, 2, 3, 3, 4]]
	Prediction str:  ihlllo
8 loss: 0.871259 prediction:  [[1 0 3 3 3 4]] true Y:  [[1, 0, 2, 3, 3, 4]]
	Prediction str:  ihlllo
9 loss: 0.774338 prediction:  [[1 0 2 3 3 4]] true Y:  [[1, 0, 2, 3, 3, 4]]
	Prediction str:  ihello
10 loss: 0.676005 prediction:  [[1 0 2 3 3 4]] true Y:  [[1, 0, 2, 3, 3, 4]]
	Prediction str:  ihello

'''
