import torch
import torch.nn as nn
from torch.autograd import Variable

#hihell->ihello的rnn类+底层案例

torch.manual_seed(777)  # reproducibility

# hyperparameters
learning_rate = 0.1
num_epochs = 15

#创建id-char的字典
idx2char = ['h', 'i', 'e', 'l', 'o']

# Teach hello: hihell -> ihello
x_data = [[0, 1, 0, 2, 3, 3]]   # hihell
x_one_hot = [[[1, 0, 0, 0, 0],   # h 0
              [0, 1, 0, 0, 0],   # i 1
              [1, 0, 0, 0, 0],   # h 0
              [0, 0, 1, 0, 0],   # e 2
              [0, 0, 0, 1, 0],   # l 3
              [0, 0, 0, 1, 0]]]  # l 3

y_data = [1, 0, 2, 3, 3, 4]    # ihello
#转换成torch数据类型
inputs = torch.Tensor(x_one_hot)
labels = torch.LongTensor(y_data) # 转为整数形式
#转换成torch的变量
inputs = Variable(inputs)
labels = Variable(labels)

#定义rnn参数
num_classes = 5  #输出类别
input_size = 5  # 输入数据的维度 one-hot size
hidden_size = 5  # 隐藏层单元个数  # output from the LSTM. 5 to directly predict one-hot
batch_size = 1   # 样本数量批次 one sentence
sequence_length = 6  # 序列长度 |ihello| == 6
num_layers = 1  # rnn单元堆叠层数 one-layer rnn

#创建rnn模型类
class RNN(nn.Module):
    def __init__(self, num_classes, input_size, hidden_size, num_layers):
        super(RNN, self).__init__()
        self.num_classes = num_classes
        self.num_layers = num_layers
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.sequence_length = sequence_length

        # batch_first 第一个维度值是数量
        #input_size输入数据维度 hidden_size隐藏层大小 num_layersrnn单元堆叠层数
        self.rnn = nn.RNN(input_size=input_size, hidden_size=hidden_size,
                          num_layers=num_layers, batch_first=True)
        #定义rnn单元输出的全连接网络fc:fully connected
        self.fc = nn.Linear(hidden_size, num_classes)
    # 正向传播
    def forward(self, x):
        # 初始化隐藏层状态信息【样本数量，层数， 神经元数量】
        h_0 = Variable(torch.zeros(
            x.size(0), self.num_layers, self.hidden_size))

        # 将输入x变成rnn输入：[样本数,序列长度,输入数据维度]Reshape input【样本数，时间步数量，特征数】
        x.view(x.size(0), self.sequence_length, self.input_size)
        #out是所有rnn单元的输出，_是最后一个单元输出
        out, _ = self.rnn(x, h_0) # out 输出的结果
        #将输出变成维度[m*序列长度, 隐藏层单元个数]
        out = out.view(-1, self.hidden_size) # 神经元个数 【6,5】
        # out = torch.reshape(out,[-1, self.hidden_size])
        #输入到全连接网络fc
        out = self.fc(out)

        return out


# Instantiate RNN model
rnn = RNN(num_classes, input_size, hidden_size, num_layers)

# 定义代价和优化器 Set loss and optimizer function
criterion = torch.nn.CrossEntropyLoss()    # Softmax is internally computed.
optimizer = torch.optim.Adam(rnn.parameters(), lr=learning_rate)

# 训练模型 Train the model
for epoch in range(num_epochs):

    outputs = rnn(inputs)  #前向传播->h
    optimizer.zero_grad()  #梯度清零
    loss = criterion(outputs, labels)  #计算代价
    loss.backward()  #反向传播,求梯度
    optimizer.step() #走一步，更新梯度

    _, idx = outputs.max(1) #axis=1方向上取最大值，_值，idx最大值对应的下标
    idx = idx.data.numpy()
    #将下标转换成字符
    result_str = [idx2char[c] for c in idx.squeeze()] #squeeze压缩为1的维度
    print("epoch: %d, loss: %1.3f" % (epoch + 1, loss.item()))
    print("Predicted string: ", ''.join(result_str))

print("Learning finished!")
