import torch
# hello -> ohlol
# 独热编码：将文本字符串每个字符分类编码
# e ----> 0
# h ----> 1
# l ----> 2
# o ----> 3
# hello 编码为 10223 ，使用矩阵表示
# [
#     [0,1,0,0],
#     [1,0,0,0],
#     [0,0,1,0],
#     [0,0,0,1]
# ]
# 多分类问题，输出维度为4,经过softmax求的映射之后的概率分别是多少，再利用输出对应的独热变量，计算loss
input_size = 4
hidden_size = 4
batch_size = 1

# 构建输出字典
idx2char = ['e', 'h', 'l', 'o']
x_data = [1,0,2,2,3]
y_data = [3,1,2,3,2]

one_hot_look_up = [
    [0,1,0,0],
    [1,0,0,0],
    [0,0,1,0],
    [0,0,0,1]
]
# 构造独热向量，此时向量维度为(seq_len,input_size)
x_one_hot = [one_hot_look_up[x] for x in x_data]
# view(-1,...)保留seq_len,并添加batch_size,input_size两个维度
inputs = torch.Tensor(x_one_hot).view(-1,batch_size,input_size)
# 将labels转换为(seq_len,1)的维度
labels = torch.LongTensor(y_data).view(-1,1)

class Model(torch.nn.Module):
    def __init__(self, input_size, hidden_size, batch_size):
        super(Model, self).__init__()
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.batch_size = batch_size
        self.rnncell = torch.nn.RNNCell(input_size=input_size, hidden_size=hidden_size)

    def forward(self, input):
        hidden = torch.zeros(self.batch_size, self.hidden_size)
        hidden = self.rnncell(input, hidden)
        return hidden
        
model = Model(input_size=input_size, hidden_size=hidden_size, batch_size=batch_size)
loss = torch.nn.CrossEntropyLoss(reduction="mean")
optimizer = torch.optim.Adam(params=model.parameters(), lr=0.001)

def train():
    loss_total = 0
    optimizer.zero_grad()
    print("Predict String: ", end='')
    # 逐行样本训练
    for input,label in zip(inputs, labels):
        outputs = model(input)
        loss_total += loss(outputs, label) 
        # 多分类最大，找出四个类别中概率最大的下标
        _,idx = outputs.max(dim=1)
        print(idx2char[idx.item()], end='')

    
    loss_total.backward()
    optimizer.step()   
    return loss_total.item() 

if __name__ == "__main__":
    for epoch in range(100000):
        loss_val = train()
        print(",loss: %.4f " %(loss_val))