model = CNNLSTM()
criterion = torch.nn.MSELoss()
# criterion = torch.nn.MultiLabelSoftMarginLoss()
# criterion =torch.nn.L1Loss()
# 输入x和目标y中每个元素之间的平均绝对误差（MAE）
optimizer = torch.optim.Adam(model.parameters(),lr=0.001)
tensor_x = torch.from_numpy(x_train)
tensor_y = torch.from_numpy(y_train)
dataset = Data.TensorDataset(tensor_x ,tensor_y)
loader= Data.DataLoader(dataset,batch_size=128,shuffle=True,drop_last = False) 
# drop_last：（数据类型 bool）丢弃最后数据,默认为False。
# drop_last为True会将多出来不足一个的batch的数据丢弃
epochs = 12000
iter=0
train_loss=[]
# 每次迭代包含的损失信息,将它们可视化出来
for epoch in range(epochs):
    for step, (batch_x, batch_y) in enumerate(loader):
        # batch_x:(128,10,12)
        # batch_y:(128,)
        outputs = model(batch_x) # (128,1)
        batch_y=torch.reshape(batch_y,(-1,1)) # (128,1)
        loss = criterion(outputs, batch_y)  # 计算损失
        optimizer.zero_grad()  # 将每次传播时的梯度累积清除
        loss.backward()  # 反向传播
        optimizer.step()
        iter += 1
        if iter % 100 == 0:
            print("epoch:%d  iter: %d, loss: %1.5f" % (epoch,iter, loss.item()))
    train_loss.append(loss.data.numpy())
torch.save(model.state_dict(), "./state_dict_TemperModel.pt") 
#保存模型学习到的参数 
