from python_ai.common.xcommon import *
import tensorflow.compat.v1 as tf
import tensorflow as tsf
import numpy as np

np.random.seed(777)
tf.set_random_seed(777)

# Teach hello: hihell -> ihello

#设置参数
sequence_length = 6  # 序列长度 |ihello| == 6
input_dim = 5  # 独热编码长度one-hot size

hidden_size = 8  # 隐藏层数量output from the LSTM. 5 to directly predict one-hot
num_classes = 5 # 类别总数
learning_rate = 0.1  #学习率

# 建立字典
idx2char = np.array(['h', 'i', 'e', 'l', 'o'])
char2idx = dict()
for i, c in enumerate(idx2char):
    char2idx[c] = i
# 构造数据集
batch_size = 3   # 批大小one sentence
x_data = [0, 1, 0, 2, 3, 3,
          0, 1, 0, 2, 3, 3,
          0, 1, 0, 2, 3, 3,]   # hihell
# x_one_hot = [[[1, 0, 0, 0, 0],   # h 0
#               [0, 1, 0, 0, 0],   # i 1
#               [1, 0, 0, 0, 0],   # h 0
#               [0, 0, 1, 0, 0],   # e 2
#               [0, 0, 0, 1, 0],   # l 3
#               [0, 0, 0, 1, 0]]]  # l 3
x_2d = np.eye(5)[x_data]
x_one_hot = x_2d.reshape(batch_size, -1, 5)  #独热编码
check_shape(x_one_hot, 'x_one_hot')
# print(x_2d)
# print(x_one_hot)
y_data = [[1, 0, 2, 3, 3, 4],
          [1, 0, 2, 3, 3, 4],
          [1, 0, 2, 3, 3, 4],]    # ihello  不用独热编码
y_data = np.array(y_data)
check_shape(y_data, 'y_data')

#定义占位符
X = tf.placeholder(tf.float32, [None, sequence_length, input_dim])  # X input_dim独热one-hot
Y = tf.placeholder(tf.int32, [None, sequence_length])  # Y label
n_samples = tf.shape(X)[0]
print('X', X)
print('Y', Y)

#建立模型
#定义LSTM单元
# cell = tf.nn.rnn_cell.BasicLSTMCell(num_units=hidden_size, state_is_tuple=True) #报警
cell = tf.nn.rnn_cell.LSTMCell(num_units=hidden_size, state_is_tuple=True)#返回元组
initial_state = cell.zero_state(n_samples, tf.float32) #设置初始状态0
check_shape(initial_state, 'initial_state')
outputs, _states = tf.nn.dynamic_rnn(cell, X, initial_state=initial_state, dtype=tf.float32)
# print('rnn输出', outputs.shape)  #(3,6,8)
check_shape(outputs, 'outputs')
check_shape(_states, '_states')  # (2,)

# 全连接层
X_for_fc = tf.reshape(outputs, [-1, hidden_size])
# print(X_for_fc.shape) # (6,8)
check_shape(X_for_fc, 'X_for_fc')

# fc_w = tf.get_variable("fc_w", [hidden_size, num_classes])
# fc_b = tf.get_variable("fc_b", [num_classes])
# outputs = tf.matmul(X_for_fc, fc_w) + fc_b
outputs = tsf.contrib.layers.fully_connected(inputs=X_for_fc, num_outputs=num_classes, activation_fn=None)
# print('全连接输出', outputs.shape) #经过一层全连接 变为(18,5) [sequence_length,num_classes]
check_shape(outputs, 'outputs')

# 改变维度准备计算序列损失reshape out for sequence_loss
outputs = tf.reshape(outputs, [n_samples, sequence_length, num_classes])#print(outputs.shape)   (3,6,5)
check_shape(outputs, 'outputs after reshape')
# 计算序列损失
weights = tf.ones([n_samples, sequence_length]) # 所有的权重都是1 All weights are 1 (equal weights)
check_shape(weights, 'weights')
sequence_loss = tsf.contrib.seq2seq.sequence_loss(logits=outputs, targets=Y, weights=weights)
check_shape(sequence_loss, 'sequence_loss')
loss = tf.reduce_mean(sequence_loss)
train = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)
# 预测值
prediction = tf.argmax(outputs, axis=2) #最后的outputs是三维的 所以axis=2
accuracy = tf.reduce_mean(tf.cast(tf.equal(prediction, np.array(y_data)),tf.float32))
# accuracy = tf.reduce_mean(tf.cast(tf.equal(prediction, tf.cast(Y, dtype=tf.int64)),tf.float32))

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    sep('x_idx')
    x_idx = x_one_hot.argmax(axis=2)
    print(x_idx)
    sep('x_letters')
    x_letters = idx2char[x_idx]
    print(x_letters)
    sep('x_join')
    x_join = ''.join(x_letters.ravel())
    print(x_join)
    sep('y_data')
    print(y_data)
    sep('y_letters')
    y_letters = idx2char[y_data]
    print(y_letters)
    sep('y_join')
    y_join = ''.join(y_letters.ravel())
    print(y_join)

    for i in range(50):
        l, _, acc, result = sess.run([loss, train, accuracy, prediction], feed_dict={X: x_one_hot, Y: y_data})
        print(f'#{i + 1} loss = {l}, acc = {acc}')
        #预测结果查字典后输出字符串 print char using dic
        print(x_join)
        print(y_join)
        print(''.join(idx2char[result.ravel()]))

        if acc >= 1.0:
            break

    # 用新数据测试
    sep('用新数据测试')
    t_data = np.array([0, 2, 3, 3, 0, 1])  # hellhi
    t_x_join = ''.join(idx2char[t_data])
    print(t_x_join)
    X_test_input = np.eye(5)[t_data].reshape(1, -1, 5)
    check_shape(X_test_input, 'X_test_input')
    result = sess.run(prediction, feed_dict={X: X_test_input})
    result_str = ''.join(idx2char[result.ravel()])
    print(result_str)

    sep('EOF')

'''
0 loss: 1.71584 prediction:  [[2 2 2 3 3 2]] true Y:  [[1, 0, 2, 3, 3, 4]]
	Prediction str:  eeelle
1 loss: 1.56447 prediction:  [[3 3 3 3 3 3]] true Y:  [[1, 0, 2, 3, 3, 4]]
	Prediction str:  llllll
2 loss: 1.46284 prediction:  [[3 3 3 3 3 3]] true Y:  [[1, 0, 2, 3, 3, 4]]
	Prediction str:  llllll
3 loss: 1.38073 prediction:  [[3 3 3 3 3 3]] true Y:  [[1, 0, 2, 3, 3, 4]]
	Prediction str:  llllll
4 loss: 1.30603 prediction:  [[3 3 3 3 3 3]] true Y:  [[1, 0, 2, 3, 3, 4]]
	Prediction str:  llllll
5 loss: 1.21498 prediction:  [[3 3 3 3 3 3]] true Y:  [[1, 0, 2, 3, 3, 4]]
	Prediction str:  llllll
6 loss: 1.1029 prediction:  [[3 0 3 3 3 4]] true Y:  [[1, 0, 2, 3, 3, 4]]
	Prediction str:  lhlllo
7 loss: 0.982386 prediction:  [[1 0 3 3 3 4]] true Y:  [[1, 0, 2, 3, 3, 4]]
	Prediction str:  ihlllo
8 loss: 0.871259 prediction:  [[1 0 3 3 3 4]] true Y:  [[1, 0, 2, 3, 3, 4]]
	Prediction str:  ihlllo
9 loss: 0.774338 prediction:  [[1 0 2 3 3 4]] true Y:  [[1, 0, 2, 3, 3, 4]]
	Prediction str:  ihello
10 loss: 0.676005 prediction:  [[1 0 2 3 3 4]] true Y:  [[1, 0, 2, 3, 3, 4]]
	Prediction str:  ihello

'''
