import torch

num_class = 4 # 4个类别
input_size = 4 # 输入维度是 4
hidden_size = 8 # 隐层是8个维度
embedding = 10  # 嵌入到10维空间
batch_size = 1
num_layers = 2  # 2层RNN
seq_len = 5     # 序列长度为5

# 准备数据
idx2char = ['e', 'h', 'l', 'o']
x_data = [[1,0,2,2,3]] # hello 维度(batch,seqlen)
y_data = [3,1,2,3,2] # ohlol 维度(batch*seqlen)

inputs = torch.LongTensor(x_data)
labels = torch.LongTensor(y_data)

class Model(torch.nn.Module):
    def __init__(self):
        super(Model, self).__init__()
        self.embedding = torch.nn.Embedding(input_size, embedding)
        # batch_first=True, the input and output tensors are provided as (batch_size,seq_len,input_size)
        self.rnn = torch.nn.RNN(input_size=embedding, hidden_size=hidden_size, num_layers=num_layers, batch_first=True)
        self.fc = torch.nn.Linear(hidden_size, num_class)

    def forward(self,x):
        # Expected hidden size (2, 5, 8), got [2, 1, 8]
        print(num_layers,x.size(0), hidden_size)
        # num_layers,batch_size,hidden_size
        hidden = torch.zeros(num_layers, x.size(0), hidden_size)
        x = self.embedding(x)
        # torch.Size([1, 5, 10])
        print(x.size())
        x,_ = self.rnn(x, hidden)
        x = self.fc(x)
        # 转换为二维矩阵，seq*batch_size*num_class ===> (seq*batch_size,num_class)
        return x.view(-1, num_class)
    
model = Model()
loss = torch.nn.CrossEntropyLoss(reduction="mean")
optimizer = torch.optim.Adam(params=model.parameters(), lr=0.001)

def train():
    loss_total = 0
    optimizer.zero_grad()
    outputs = model(inputs)
    loss_total = loss(outputs, labels)
    loss_total.backward()
    optimizer.step()   
    _,idx = outputs.max(dim=1)
    idx = idx.data.numpy()
    print("predicted:", ''.join(idx2char[x] for x in idx), end='')
    return loss_total.item() 

if __name__ == "__main__":
    for epoch in range(10000):
        loss_val = train()
        print(",loss: %.4f " %(loss_val))    