import torch
from torch import nn

char_box = ['e','h','l','o']

char_hello = [1,0,2,2,3]
char_ohlol = [3,1,2,3,2]

inputs = torch.tensor(char_hello).view(5,1)
print(inputs.shape)
targets = torch.tensor(char_ohlol)

class MyNet(nn.Module):
    def __init__(self,input_size,embedding_size,hidden_size,num_layers):
        super(MyNet,self).__init__()
        #嵌入层 4*10
        self.emb = nn.Embedding(num_embeddings=input_size,embedding_dim=embedding_size)
        #输入维度是4 输出维度也是4
        self.rnn = nn.RNN(input_size=embedding_size,hidden_size=hidden_size,num_layers=num_layers)
        self.lstm = nn.LSTM(input_size=embedding_size,hidden_size=hidden_size,num_layers=num_layers)
        self.gru = nn.GRU(input_size=embedding_size,hidden_size=hidden_size,num_layers=num_layers)

    def forward(self,inputs): #输入的inputs是一个（5,1）,RNN中的inputs应该是(seq_len,batch_size,input_size)
        inputs = self.emb(inputs) #(5,1,10)

        #RNN(经典的循环神经网络)，我们在之前已经实现了他的cell的构成RNN   hn的形状是(num_layers,batch_size,hidden_size)
        # hn = torch.zeros(num_layers,inputs.size(1),hidden_size)
        # outputs,hn = self.rnn(inputs,hn) #outputs的形状应该是(seq_len,batch_size,hidden_size)

        #LSTM（长短期记忆） 包含输入门，输出门，遗忘门（特点），是现在常用的循环神经网络
        #inputs.size(1) 返回第二个维度的大小 已知inputs为（5,1）返回1
        hn = torch.zeros(num_layers,inputs.size(1),hidden_size)
        cn = torch.zeros(num_layers, inputs.size(1), hidden_size)
        #(hn , cn)用来接受一个元组
        outputs ,(hn , cn) = self.lstm(inputs,(hn,cn))

        #GRU （门控循环单元）
        # hn = torch.zeros(num_layers,inputs.size(1),hidden_size)
        # outputs ,hn = self.gru(inputs,hn)
        #调整为二维 -1代表自动计算
        return outputs.view(-1,4) #(5,1,4)->(5,4)

input_size = 4
embedding_size = 10
hidden_size = 4
num_layers = 1

mynet = MyNet(input_size,embedding_size,hidden_size, num_layers)

epoch = 30
loss_fn = nn.CrossEntropyLoss()
optim = torch.optim.Adam(mynet.parameters(),lr = 1e-1)

if __name__ == '__main__':
    for i in range(epoch):
        print(f'----------第{i+1}轮训练开始----------')
        mynet.train()

        outputs = mynet(inputs)
        result = outputs.argmax(axis=1)
        for num in result:
            print(char_box[num],end='') #输出完整单词
        print() #换行

        loss = loss_fn(outputs,targets)  #outputs的形状必须是(N,C),targets的形状是(N)的一维数组
        optim.zero_grad()
        loss.backward()
        optim.step()

        print(f'第{i+1}轮训练结束,loss={loss}')



