import torch 
import torch.nn as nn
import torch.nn.functional as F

device=torch.device("cuda" if torch.cuda.is_available()else "cpu")
print(torch.cuda.is_available())

batch_size=8
seq_len=4
input_size=6
num_classes=4
hidden_size=64

OnelayerRNNFlag=True

class OnelayerRNN(nn.Module):
    def __init__(self,impout_size,hidden_size,num_classes):#,num_layer
        super(OnelayerRNN,self).__init__()
        self.hideen_size=hidden_size
        #self.num_layer=num_layer
        self.i2h=nn.Linear(impout_size+hidden_size,hidden_size)
        print("i2h ",self.i2h)
        self.h2o=nn.Linear(hidden_size,num_classes)
    def forward(self,x):
        print("forwardx= ",x,"forwardxsize=",x.size())#xsize=[8,4,6]
        batch_size,seq_len,_=x.size()
        h_t=torch.zeros(batch_size,self.hideen_size,device=x.device)
        print("h_t.size= ",h_t.size())#h_size=[8,64]
        for t in range(seq_len):
            x_t=x[:,t,:]
            combined=torch.cat((x_t,h_t),dim=1)
            h_t=torch.tanh(self.i2h(combined))
            output=self.h2o(h_t)
        return output
#单层RNN
if OnelayerRNNFlag:
    x=torch.randn(batch_size,seq_len,input_size)
    print("x=",x)
    print("x.shape",x.size())
    y=torch.randint(0,num_classes,(batch_size,))
    print("y=",y)
    model=OnelayerRNN(impout_size=6,hidden_size=64,num_classes=4)
    optimizer=torch.optim.Adam(model.parameters(),lr=0.0001)
    criterion=nn.CrossEntropyLoss()
    for epoch in range(5):
        output=model(x)
        print(output)
        loss=criterion(output,y)
        print("L1",loss)
        #optimizer.zero_grad()
        #反向传播，计算损失函数对模型参数的梯度。
        loss.backward()
        print("L2",loss)
        #根据梯度 ​更新模型参数​（如权重和偏置）
        optimizer.step()
        print(f"epoch {epoch},loss {loss.item():.4f}")

