# 1.按照要求，使用rnn处理以下内容（每题10分）		sample = "hihello"
# (1)数据预处理
from keras import utils, Sequential, layers, activations, optimizers, losses

sample = "hihello"
# ②使用上面的sample，将hihell作为特征，ihello作为标签
# 'hihell'
x_data = sample[:-1]
# 'ihello'
y_data = sample[1:]
# 'h','i','e','l','o'
char_set = set(sample)
# onehot_dim=5
onehot_dim = len(char_set)
# seq_len=6
seq_len = len(x_data)
# ①将出现的单词按照字典形式进行处理
# {0:'h',1:'i',2:'e',3:'l',4:'o'}
int_to_char = {i: j for (i, j) in enumerate(char_set)}
# {'h':0,'i':1,'e':2,'l':3,'o':4}
char_to_int = {j: i for (i, j) in enumerate(char_set)}
# x_data='hihell'
# x_data=[0,1,0,2,3,3]
x_data = [char_to_int[i] for i in x_data]
# y_data='ihello'
# y_data=[1,0,2,3,3,4]
y_data = [char_to_int[i] for i in y_data]
# ③设置合理的时间序列，将x进行对应的处理
# utils.to_categorical(x_data, onehot_dim) 纬度 是多少 （6，5）
#  utils.to_categorical(x_data, onehot_dim).reshape(-1, seq_len, onehot_dim)纬度 （1，6，5）
x_data = utils.to_categorical(x_data, onehot_dim).reshape(-1, seq_len, onehot_dim)
y_data = utils.to_categorical(y_data, onehot_dim).reshape(-1, seq_len, onehot_dim)
print()
# (2)模型操作
# ①使用LSTM模型进行处理
# ②叠加一层lstm模型，元素数量相同
# ③使用对应方式将数据进行softmax处理
model = Sequential([
    layers.LSTM(units=128, return_sequences=True),
    layers.LSTM(units=128, return_sequences=True),
    layers.Dense(units=onehot_dim, activation=activations.softmax)
])
model.build(input_shape=(None, seq_len, onehot_dim))
model.summary()

# # ④合理编译模型
model.compile(optimizer=optimizers.Adam(), loss=losses.categorical_crossentropy, metrics='acc')
# ⑤训练模型
model.fit(x_data, y_data, batch_size=100, epochs=1000)

# ⑥预测结果[[0,2,4,1,3,3]]
res = model.predict_classes(x_data)

print()
# {0:'h',1:'i',2:'e',3:'l',4:'o'}
# res[0]=[0,2,4,1,3,3]
# 'h','e','o','i','l','l'
# 'heoill'
# ⑦将预测结果进行打印，核对结果
print("".join(int_to_char[i] for i in res[0]))
