# 使用RNN 有嵌入层和线性层
num_class = 4     # 4个类别
dictionary_size = 4  # 单词总数,词典大小
hidden_size = 8   # 隐层是8个维度
embedding_dim = 10 # 嵌入10维空间
batch_size = 1
num_layers = 2    # 两层的RNN
seq_len = 5       # 序列长度是5
# 准备数据
idx2char = ['e','h','l','o'] # 字典
x_data = [[1,0,2,2,3]] # hello  维度（batch,seqlen）
y_data = [3,1,2,3,2]   # ohlol  维度 (batch*seqlen,)
inputs = torch.LongTensor(x_data) # inputs:(batchsize,seqlen) (1,5)
labels = torch.LongTensor(y_data) # label:(batchsize*seqlen) (5,)
# 构造模型
class Model(nn.Module):
    def __init__(self):
        super(Model, self).__init__()
        self.embedding =nn.Embedding(dictionary_size, 
                                     embedding_dim)
        # dictionary_size:单词总数,这里共4个单词,即词典的大小
        # embedding_dim:表示每个单词用10维的向量表示
        self.rnn = nn.RNN(input_size=embedding_dim,
                              hidden_size=hidden_size,
                              num_layers=num_layers,
                              batch_first=True)
        self.linear = nn.Linear(hidden_size,num_class) # 10维降为4维。
    def forward(self,xs):
        x = self.embedding(xs) # xs:(batch,seqlen) (1,5)
        # x:(batch,seqlen,embeddingsize) (1,5,10)
        # x的形状符合nn.RNN指定batch_first=True的输入维度要求
        hidden = torch.zeros(num_layers, # num_layer:2
                                 x.size(0),  # batch_size:1
                               hidden_size)# hidden_size:8
         o,_ = self.rnn(x,hidden) 
        # o:(batch_size * seqlen * hidden_size) (1,5,8)
        out = self.linear(o) 
        # out:(batch_size * seqlen * num_class) (1,5,4)
        return out
model = Model()
# 损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(),lr=0.05) 
# lr=0.01学习得太慢 
