import torch
from d2l import torch as d2l
from torch import nn
loss_func = nn.MSELoss()
def log_mse(net,features,labels):
    clipped_preds = torch.clamp(net(features),1,float('inf'))
    rmse = torch.sqrt(loss_func(torch.log(clipped_preds),torch.log(labels)))
    return rmse.item()

def train(net,train_features,train_labels,test_features,test_labels,
          num_epochs,lr,weight_decay,batch_size):
    train_ls,test_ls = [],[]
    train_iter = d2l.load_array((train_features,train_labels),batch_size)
    optimizer = torch.optim.Adam(net.parameters(),lr=lr,weight_decay=weight_decay)
    for _ in range(num_epochs):
        for X,y in train_iter:
            optimizer.zero_grad()
            l = loss_func(net(X),y)
            l.backward()
            optimizer.step()
        train_ls.append(log_mse(net,train_features,train_labels))
        if test_labels is not None:
            test_ls.append(log_mse(net,test_features,test_labels))
    return train_ls,test_ls