from tqdm.auto import tqdm

def train(model, dloader, device, optimizer, criterion, epoch):
    model.train()
    running_loss = 0.
    cnt = 0
    with tqdm(total=len(dloader), desc=f"Training PFNN epoch {epoch}") as pbar:
        for i, batch in enumerate(dloader):
            optimizer.zero_grad()
            cnt += 1
            X = batch['X'].to(device)
            Y = batch['Y'].to(device)
            P = batch['P'].to(device)
            out = model(X, P)
            loss = criterion(out, Y)
            loss.backward()
            optimizer.step()
            running_loss += loss.item()
            pbar.set_postfix({
                'epoch': epoch,
                'loss': running_loss / cnt,
            })
            pbar.update(1)
    train_loss = running_loss / cnt
    return train_loss