import tensorflow.compat.v1 as tf
import numpy as np
from tensorflow.contrib.seq2seq import sequence_loss

# random seeds
np.random.seed(777)
tf.set_random_seed(777)

# 2.	完成rnn题目：（每题4分）
# (1)	Data 处理：
# ①	手动输入构造数据集" if you want you like"。
sentence = 'if you want you like'
x_sentences = [sentence[:-1]]
y_sentences = [sentence[1:]]

# ②	数据处理将对应字母转换成数据。
dict = set(list(sentence))
n_dict_len = len(dict)
idx2char = list(dict)
char2idx = {}
for i, ch in enumerate(idx2char):
    char2idx[ch] = i
x_idx = [[char2idx[ch] for ch in sent] for sent in x_sentences]
x_idx = np.array(x_idx)
y_idx = [[char2idx[ch] for ch in sent] for sent in y_sentences]
y_idx = np.array(y_idx)

# ③	规范调整x_data,y_data.
# ⑤	规范占位符并且对应数据进行onehot编码。
x_data = np.eye(n_dict_len)[x_idx]
y_data = y_idx

# ④	规范调整rnn所需要的参数。（如字典长度，时间步数，隐藏单元=26，序列长度=20，输出类别=13）
n_steps = len(x_sentences[0])
n_hidden = 26
alpha = 0.01

# ⑥	搭建LSTMCell模型，选用长短期记忆形rnn模型，加入一层隐藏层。
ph_x = tf.placeholder(tf.float32, [None, n_steps, n_dict_len], 'ph_x')
n_samples = tf.shape(ph_x)[0]
ph_y = tf.placeholder(tf.int32, [None, n_steps], 'ph_y')
cell = tf.nn.rnn_cell.LSTMCell(n_hidden)

# ⑦	规范rnn初始化状态h_0;
# 不传入initial_state，则initial_state就为0
# ⑧	调用dynamic_rnn方法，准确输出rnn输出值，并将输出结果进行全连接。
outputs, _ = tf.nn.dynamic_rnn(cell, ph_x, dtype=tf.float32)

# ⑨	调用seq2seq.sequence_loss求出loss值
w = tf.ones([n_samples, n_steps])
cost = tf.reduce_mean(sequence_loss(logits=outputs, targets=ph_y, weights=w))
train = tf.train.AdamOptimizer(learning_rate=alpha).minimize(cost)
predict = tf.argmax(outputs, axis=2)
acc = tf.reduce_mean(
    tf.cast(
        tf.equal(
            tf.cast(predict, dtype=tf.int32),
            tf.cast(ph_y, dtype=tf.int32)
        ),
        tf.float32
    )
)

# ⑩	规范会话，求出预测值，注意维度问题，用字典输出字符。
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())

    print('Training started.')
    iters = 1000
    group = int(np.ceil(iters / 10))
    for i in range(iters):
        costv, _, accv = sess.run([cost, train, acc], feed_dict={ph_x: x_data, ph_y: y_data})
        if np.isclose(1.0, accv):
            print('Training converged.')
            break
        if i % group == 0:
            print(f'#{i + 1}: cost = {costv}, acc = {accv}')
    if i % group != 0:
        print(f'#{i + 1}: cost = {costv}, acc = {accv}')
    print('Training over.')

    print('求出预测值，用字典输出字符:')
    predict_v = sess.run(predict, feed_dict={ph_x: x_data})
    predict_sentences = [''.join([idx2char[i] for i in row]) for row in predict_v]
    print(predict_sentences)
