title
stringlengths 5
164
| labels
sequence | bodyText
stringlengths 0
46.7k
|
---|---|---|
How to correctly test the model by using multiple test data loaders ? | [
"question"
] | How to correctly test the model by using multiple test data loaders ?
My benechmark has two test datasets, so I just want to test those two datasets in one test epoch. However, I don't know how to correctly using those two data loader in the test_* fucntions. Here is my code:
Code
DataModule
class MNISTData(pl.LightningDataModule):
def __init__(self, data_dir="./", batch_size=100):
super(MNISTData, self).__init__()
MNIST(data_dir, train=True, download=True)
MNIST(data_dir, train=False, download=True)
self.data_dir = data_dir
self.batch_size = batch_size
self.transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
def setup(self, stage=None):
# Assign train/val datasets for use in dataloaders
if stage == 'fit' or stage is None:
mnist_full = MNIST(self.data_dir, train=True, transform=self.transform)
self.mnist_train, self.mnist_val = random_split(mnist_full, [55000, 5000])
# Assign test dataset for use in dataloader(s)
if stage == 'test' or stage is None:
mnist_test = MNIST(self.data_dir, train=False, transform=self.transform)
self.mnist_testA, self.mnist_testB = random_split(mnist_test, [5000, 5000])
def train_dataloader(self):
return DataLoader(self.mnist_train, self.batch_size, shuffle=True, num_workers=16, pin_memory=True)
def val_dataloader(self):
return DataLoader(self.mnist_val, self.batch_size, num_workers=16, pin_memory=True)
def test_dataloader(self):
return [
DataLoader(self.mnist_testA, self.batch_size, num_workers=16, pin_memory=True),
DataLoader(self.mnist_testB, self.batch_size, num_workers=16, pin_memory=True),
]
Module
class MNISTModel(pl.LightningModule):
def __init__(self, data_dir="./", batch_size=100, n_train=1, n_val=1, n_test=2):
super(MNISTModel, self).__init__()
self.n_train = n_train
self.train_accu = MeanAccuracy()
self.train_loss = MeanLoss()
self.n_val = n_val
self.val_accu = MeanAccuracy()
self.val_loss = MeanLoss()
self.n_test = n_test
self.test_accu_list = nn.ModuleList([MeanAccuracy() for i in range(n_test)])
self.test_loss_list = nn.ModuleList([MeanLoss() for i in range(n_test)])
self.conv1 = nn.Sequential(
nn.Conv2d(1, 256, 3, 1, 1, bias=False),
nn.BatchNorm2d(256), nn.LeakyReLU()
)
self.conv2 = nn.Sequential(
nn.Conv2d(256, 512, 3, 1, 1, bias=False),
nn.BatchNorm2d(512), nn.LeakyReLU()
)
self.head = nn.Sequential(
nn.Conv2d(512, 10, 1, 1, 0, bias=False),
nn.AdaptiveAvgPool2d(1)
)
def forward(self, x):
# called with self(x)
x = self.conv1(x)
x = self.conv2(x)
x = self.head(x).view(x.shape[0], 10)
return x
def training_step(self, batch, batch_idx, train_idx=0):
# REQUIRED
x, y = batch
y_hat = self(x)
return y_hat, y, train_idx
def training_step_end(self, res):
y_hat, y, idx = res
loss = self.train_loss(y_hat, y)
accu = self.train_accu(y_hat, y)
return loss
def training_epoch_end(self, outs):
mean_loss = self.train_loss.compute()
mean_accu = self.train_accu.compute()
log_dict = {"train_loss": mean_loss, "train_accu": mean_accu}
self.log_dict(log_dict, prog_bar=True, on_step=False, on_epoch=True)
def validation_step(self, batch, batch_idx, val_idx=0):
# OPTIONAL
x, y = batch
y_hat = self(x)
return y_hat, y, val_idx
def validation_step_end(self, res):
y_hat, y, idx = res
loss = self.val_loss(y_hat, y)
accu = self.val_accu(y_hat, y)
def validation_epoch_end(self, losses):
mean_loss = self.val_loss.compute()
mean_accu = self.val_accu.compute()
log_dict = {"val_loss": mean_loss, "val_accu": mean_accu}
self.log_dict(log_dict, prog_bar=True, on_step=False, on_epoch=True)
def test_step(self, batch, batch_idx, test_idx=0):
x, y = batch
y_hat = self(x)
return y_hat, y, test_idx
def test_step_end(self, res):
y_hat, y, idx = res
loss = self.test_loss_list[idx](y_hat, y)
accu = self.test_accu_list[idx](y_hat, y)
def test_epoch_end(self, losses):
for idx in range(self.n_test):
mean_loss = self.test_loss_list[idx].compute()
mean_accu = self.test_accu_list[idx].compute()
log_dic = {f"test{idx}_loss": mean_loss, f"test{idx}_accu": mean_accu}
self.log_dict(log_dic, prog_bar=True, on_step=False, on_epoch=True)
def configure_optimizers(self):
optimizer = Adam(self.parameters(), lr=0.05)
exp_lr = ExponentialLR(optimizer, gamma=0.98)
return [optimizer], [exp_lr]
What have you tried?
After I testing the model, I got the information like this:
--------------------------------------------------------------------------------
DATALOADER:0 TEST RESULTS
{'test0_accu': tensor([0.5268], device='cuda:0'),
'test0_loss': tensor([1.2401], device='cuda:0'),
'test1_accu': tensor([0.5278], device='cuda:0'),
'test1_loss': tensor([1.2341], device='cuda:0')}
--------------------------------------------------------------------------------
DATALOADER:1 TEST RESULTS
{'test0_accu': tensor([0.5268], device='cuda:0'),
'test0_loss': tensor([1.2401], device='cuda:0'),
'test1_accu': tensor([0.5278], device='cuda:0'),
'test1_loss': tensor([1.2341], device='cuda:0')}
--------------------------------------------------------------------------------
So, What can I do to change the information format like this:
--------------------------------------------------------------------------------
DATALOADER:0 TEST RESULTS
{'test0_accu': tensor([0.5268], device='cuda:0'),
'test0_loss': tensor([1.2401], device='cuda:0')}
--------------------------------------------------------------------------------
DATALOADER:1 TEST RESULTS
{'test1_accu': tensor([0.5278], device='cuda:0'),
'test1_loss': tensor([1.2341], device='cuda:0')}
-------------------------------------------------------------------------------- |
Bounded memory leak caused by `trainer.evalutaion_loop.outputs` | [
"bug",
"help wanted",
"priority: 0"
] | π Bug
trainer.evalutaion_loop.outputs caches the outputs of every validation steps in def run_evaluation(self, max_batches=None): of trainer:
pytorch-lightning/pytorch_lightning/trainer/trainer.py
Line 659
in
d71659b
self.evaluation_loop.outputs.append(dl_outputs)
It's not reset until the start of the next validation epoch:
pytorch-lightning/pytorch_lightning/trainer/trainer.py
Line 621
in
d71659b
self.evaluation_loop.setup(model, max_batches, dataloaders)
pytorch-lightning/pytorch_lightning/trainer/evaluation_loop.py
Lines 124 to 128
in
d71659b
def setup(self, model, max_batches, dataloaders):
# bookkeeping
self.outputs = []
self.predictions = PredictionCollection(self.trainer.global_rank, self.trainer.world_size)
Please reproduce using the BoringModel
To Reproduce
Sorry, my working environment forbids me to use google drive.
import torch
from pytorch_lightning import LightningModule
from torch.utils.data import Dataset
class BoringModel(LightningModule):
def __init__(self):
super().__init__()
self.layer = torch.nn.Linear(32, 2)
def forward(self, x):
return self.layer(x)
def loss(self, batch, prediction):
# An arbitrary loss to have a loss that updates the model weights during `Trainer.fit` calls
return torch.nn.functional.mse_loss(prediction, torch.ones_like(prediction))
def on_train_epoch_start(self):
print('Before delete:', torch.cuda.memory_allocated())
for out in self.trainer.evaluation_loop.outputs[0]:
if 'x' in out:
del out['x']
print('After delete:', torch.cuda.memory_allocated())
def training_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
return {"loss": loss}
def training_step_end(self, training_step_outputs):
return training_step_outputs
def training_epoch_end(self, outputs) -> None:
torch.stack([x["loss"] for x in outputs]).mean()
def validation_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
return {"x": loss}
def validation_epoch_end(self, outputs) -> None:
torch.stack([x['x'] for x in outputs]).mean()
def configure_optimizers(self):
optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)
return [optimizer], [lr_scheduler]
def test_x(tmpdir):
# init model
model = BoringModel()
# Initialize a trainer
trainer = pl.Trainer(
max_epochs=1,
progress_bar_refresh_rate=20,
gpus=[0],
)
# Train the model β‘
trainer.fit(model, train, val)
Execution
test_x(tmpdir)
Output
GPU available: True, used: True
TPU available: None, using: 0 TPU cores
LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]
| Name | Type | Params
---------------------------------
0 | layer | Linear | 66
---------------------------------
66 Trainable params
0 Non-trainable params
66 Total params
Validation sanity check: 0%
0/2 [00:00<?, ?it/s]
Epoch 0: 100%
626/626 [00:00<00:00, 653.87it/s, loss=2.5e-14, v_num=6]
Before delete: 2048
After delete: 1024
Expected behavior
There shouldn't be such cached tensors.
This may cause OOM in some cases that OOM can be avoided.
For example,
On the first training epoch, a model fitted just right in the GPU memory runs fine w/o OOM.
After the first validation epoch, some GPU tensors are retained and occupy some portion of the memory.
On the second training epoch, the same model encounters OOM error.
Clear all references to those validation output tensors at the end of the validation epoch.
Maybe, more specifically, at here?
pytorch-lightning/pytorch_lightning/trainer/evaluation_loop.py
Lines 224 to 229
in
d71659b
def __run_eval_epoch_end(self, num_dataloaders, using_eval_result):
model = self.trainer.get_model()
# with a single dataloader don't pass an array
outputs = self.outputs
eval_results = outputs
Environment
CUDA:
GPU:
Tesla T4
available: True
version: 10.1
Packages:
numpy: 1.19.5
pyTorch_debug: True
pyTorch_version: 1.7.0+cu101
pytorch-lightning: 1.1.6
tqdm: 4.41.1
System:
OS: Linux
architecture:
64bit
processor: x86_64
python: 3.6.9
version: #1 SMP Thu Jul 23 08:00:38 PDT 2020
Additional context |
is the LightningModule init args must be dict? | [
"help wanted",
"question",
"docs"
] | π Bug
my LightningModule init args is object which parse from cmd line, when i use load_from_checkpoint() the args change to dict, is these right? which cause my orignal code access such as opt.batch_size can't use anymore, pytorch_lightning is 1.1.6
PyTorch Version (e.g., 1.0):
OS (e.g., Linux): mac
How you installed PyTorch (conda, pip, source): pip
Build command you used (if compiling from source):
Python version: 3.6
CUDA/cuDNN version: no
GPU models and configuration: no
Any other relevant information: no
Additional context |
[docs] Add documentation for non-slurm computing cluster setup | [
"docs"
] | Make the documentation for the functionality of #1387
See the discussion at #1345 |
Apply some previously ignored flake8 rules | [
"good first issue",
"won't fix",
"refactor"
] | pytorch-lightning/setup.cfg
Lines 84 to 87
in
ae14fca
# TODO: delete these next 3 because we no longer use black
E203 # whitespace before ':'. Opposite convention enforced by black
E231 # missing whitespace after ',', ';', or ':'; for black
W503 # line break before binary operator, need for black
Each of these should be removed and format fixed in separate PRs
Context: #5739
Tracker:
E203
E231
W504: Just remove, should not need formatting changes
Contributors are welcome π€ |
strange behavior with tests in PL: tests influence each other | [
"bug",
"help wanted",
"priority: 0",
"ci"
] | π Bug
I observed that some tests under tests/trainer have very strange behavior. The order in which the tests are executed seems to matter and they are influencing each other!
To Reproduce
Checkout release/1.2-dev branch
Remove the predict() method in one of the accelerators (GPUAccelerator is enough)
Run py.test -v tests/trainer
You will see that a ton of unrelated tests fail, but none of them report "Accelerator has not attribute predict". Which should be the only case why a test would fail. But be prepared, it gets funnier.
Run only a single test out of all these that failed, for example:
py.test -v tests\trainer\optimization\test_optimizers.py::test_lr_scheduler_strict
And it passes! What?
Expected behavior
Tests should not have an influence on each other.
Environment
CUDA:
- GPU:
- GeForce GTX 1070
- available: True
- version: 10.2
Packages:
- numpy: 1.18.1
- pyTorch_debug: True
- pyTorch_version: 1.7.0
- pytorch-lightning: 1.2.0dev
- tqdm: 4.55.1
System:
- OS: Windows
- architecture:
- 64bit
- WindowsPE
- processor: AMD64 Family 23 Model 113 Stepping 0, AuthenticAMD
- python: 3.7.7
- version: 10.0.19041
Also on my linux node, same behavior
Additional context |
Densenet architectures providing non-deterministic results | [
"question"
] | β Questions and Help
Before asking:
Try to find answers to your questions in the Lightning Forum!
Search for similar issues.
Search the docs.
I have tried looking for answers in other forums but couldn't find anything related to my question.
What is your question?
I can't seem to obtain deterministic results using Densenets (https://github.com/gpleiss/efficient_densenet_pytorch). I was able to obtain deterministic results with a relatively simpler architecture, LitAutoEncoder. I was wondering if that's because of the large number of convolution layers involved in Densenet models.
Code
The Densenet code I am using is as follows,
#!/usr/bin/env python3
import os
import time
import torch
from torchvision import datasets, transforms
import argparse
import json
import pprint
import copy
import sys
import shutil
import numpy as np
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as cp
from collections import OrderedDict
from torch.utils.data import TensorDataset, DataLoader
from pytorch_lightning.callbacks import Callback, ModelCheckpoint
from pytorch_lightning import LightningModule, Trainer, seed_everything
import pytorch_lightning as pl
def _bn_function_factory(norm, relu, conv):
def bn_function(*inputs):
concated_features = torch.cat(inputs, 1)
bottleneck_output = conv(relu(norm(concated_features)))
return bottleneck_output
return bn_function
class _DenseLayer(pl.LightningModule):
def __init__(self,
num_input_features,
growth_rate,
bn_size,
drop_rate,
efficient=False):
super(_DenseLayer, self).__init__()
self.add_module('norm1', nn.BatchNorm2d(num_input_features)),
self.add_module('relu1', nn.ReLU(inplace=True)),
self.add_module(
'conv1',
nn.Conv2d(num_input_features,
bn_size * growth_rate,
kernel_size=1,
stride=1,
bias=False)),
self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate)),
self.add_module('relu2', nn.ReLU(inplace=True)),
self.add_module(
'conv2',
nn.Conv2d(bn_size * growth_rate,
growth_rate,
kernel_size=3,
stride=1,
padding=1,
bias=False)),
self.drop_rate = drop_rate
self.efficient = efficient
def forward(self, *prev_features):
bn_function = _bn_function_factory(self.norm1, self.relu1, self.conv1)
if self.efficient and any(prev_feature.requires_grad
for prev_feature in prev_features):
bottleneck_output = cp.checkpoint(bn_function, *prev_features)
else:
bottleneck_output = bn_function(*prev_features)
new_features = self.conv2(self.relu2(self.norm2(bottleneck_output)))
if self.drop_rate > 0:
new_features = F.dropout(new_features,
p=self.drop_rate,
training=self.training)
return new_features
class _Transition(nn.Sequential):
def __init__(self, num_input_features, num_output_features):
super(_Transition, self).__init__()
self.add_module('norm', nn.BatchNorm2d(num_input_features))
self.add_module('relu', nn.ReLU(inplace=True))
self.add_module(
'conv',
nn.Conv2d(num_input_features,
num_output_features,
kernel_size=1,
stride=1,
bias=False))
self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2))
class _DenseBlock(pl.LightningModule):
def __init__(self,
num_layers,
num_input_features,
bn_size,
growth_rate,
drop_rate,
efficient=False):
super(_DenseBlock, self).__init__()
for i in range(num_layers):
layer = _DenseLayer(
num_input_features + i * growth_rate,
growth_rate=growth_rate,
bn_size=bn_size,
drop_rate=drop_rate,
efficient=efficient,
)
self.add_module('denselayer%d' % (i + 1), layer)
def forward(self, init_features):
features = [init_features]
for name, layer in self.named_children():
new_features = layer(*features)
features.append(new_features)
return torch.cat(features, 1)
class DenseNet(pl.LightningModule):
r"""Densenet-BC model class, based on
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`
Args:
growth_rate (int) - how many filters to add each layer (`k` in paper)
block_config (list of 3 or 4 ints) - how many layers in each pooling block
num_init_features (int) - the number of filters to learn in the first convolution layer
bn_size (int) - multiplicative factor for number of bottle neck layers
(i.e. bn_size * k features in the bottleneck layer)
drop_rate (float) - dropout rate after each dense layer
num_classes (int) - number of classification classes
small_inputs (bool) - set to True if images are 32x32. Otherwise assumes images are larger.
efficient (bool) - set to True to use checkpointing. Much more memory efficient, but slower.
"""
def __init__(self,
growth_rate=12,
block_config=(16, 16, 16),
compression=0.5,
num_init_features=24,
bn_size=4,
drop_rate=0,
num_classes=10,
small_inputs=True,
efficient=False):
super(DenseNet, self).__init__()
assert 0 < compression <= 1, 'compression of densenet should be between 0 and 1'
# First convolution
if small_inputs:
self.features = nn.Sequential(
OrderedDict([
('conv0',
nn.Conv2d(3,
num_init_features,
kernel_size=3,
stride=1,
padding=1,
bias=False)),
]))
else:
self.features = nn.Sequential(
OrderedDict([
('conv0',
nn.Conv2d(3,
num_init_features,
kernel_size=7,
stride=2,
padding=3,
bias=False)),
]))
self.features.add_module('norm0',
nn.BatchNorm2d(num_init_features))
self.features.add_module('relu0', nn.ReLU(inplace=True))
self.features.add_module(
'pool0',
nn.MaxPool2d(kernel_size=3,
stride=2,
padding=1,
ceil_mode=False))
# Each denseblock
num_features = num_init_features
for i, num_layers in enumerate(block_config):
block = _DenseBlock(
num_layers=num_layers,
num_input_features=num_features,
bn_size=bn_size,
growth_rate=growth_rate,
drop_rate=drop_rate,
efficient=efficient,
)
self.features.add_module('denseblock%d' % (i + 1), block)
num_features = num_features + num_layers * growth_rate
if i != len(block_config) - 1:
trans = _Transition(num_input_features=num_features,
num_output_features=int(num_features *
compression))
self.features.add_module('transition%d' % (i + 1), trans)
num_features = int(num_features * compression)
# Final batch norm
self.features.add_module('norm_final', nn.BatchNorm2d(num_features))
# Linear layer
self.classifier = nn.Linear(num_features, num_classes)
# Initialization
for name, param in self.named_parameters():
if 'conv' in name and 'weight' in name:
n = param.size(0) * param.size(2) * param.size(3)
param.data.normal_().mul_(math.sqrt(2. / n))
elif 'norm' in name and 'weight' in name:
param.data.fill_(1)
elif 'norm' in name and 'bias' in name:
param.data.fill_(0)
elif 'classifier' in name and 'bias' in name:
param.data.fill_(0)
def forward(self, x):
features = self.features(x)
out = F.relu(features, inplace=True)
out = F.adaptive_avg_pool2d(out, (1, 1))
out = torch.flatten(out, 1)
out = self.classifier(out)
return out
def ce_loss(self, logits, labels):
return F.cross_entropy(logits, labels)
def training_step(self, train_batch, batch_idx):
x, y = train_batch
logits = self.forward(x)
loss = self.ce_loss(logits, y)
self.log('train_loss',
loss,
sync_dist=True,
on_epoch=True,
on_step=True)
return {'loss': loss}
def training_epoch_end(self, outputs):
avg_loss = torch.stack([x['loss'] for x in outputs]).mean()
self.log('avg_train_loss', avg_loss, on_epoch=True, sync_dist=True)
def validation_step(self, val_batch, batch_idx):
x, y = val_batch
logits = self.forward(x)
loss = self.ce_loss(logits, y)
self.log('val_loss', loss, on_step=True, on_epoch=True, sync_dist=True)
return {'rval_loss': loss}
def validation_epoch_end(self, outputs):
avg_loss = torch.stack([x['rval_loss'] for x in outputs]).mean()
self.log('avg_val_loss', avg_loss, on_epoch=True, sync_dist=True)
def configure_optimizers(self):
optimizer = torch.optim.SGD(self.parameters(),
lr=0.1,
momentum=0.9,
nesterov=True,
weight_decay=1e-4)
lr_scheduler = {
'scheduler':
torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
factor=0.75,
patience=5,
threshold=5e-3,
threshold_mode='abs',
cooldown=0,
min_lr=1e-6,
verbose=True),
'name':
'red_pl_lr',
'monitor':
'train_loss_epoch'
}
return [optimizer], [lr_scheduler]
class DataModule(pl.LightningDataModule):
def __init__(self, batch_size=16):
super().__init__()
self.batch_size = batch_size
def setup(self, stage=None):
valid_size = 5000
data = "/projects/data/"
mean = [0.5071, 0.4867, 0.4408]
stdv = [0.2675, 0.2565, 0.2761]
train_transforms = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=stdv),
])
test_transforms = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=stdv),
])
# Datasets
self.train_set = datasets.CIFAR10(data,
train=True,
transform=train_transforms,
download=True)
self.test_set = datasets.CIFAR10(data,
train=False,
transform=test_transforms,
download=False)
if valid_size:
valid_set = datasets.CIFAR10(data,
train=True,
transform=test_transforms)
indices = torch.randperm(len(self.train_set))
train_indices = indices[:len(indices) - valid_size]
valid_indices = indices[len(indices) - valid_size:]
self.train_set = torch.utils.data.Subset(self.train_set, train_indices)
self.valid_set = torch.utils.data.Subset(valid_set, valid_indices)
else:
self.valid_set = None
def train_dataloader(self):
return DataLoader(self.train_set,
batch_size=self.batch_size,
num_workers=0,
pin_memory=True)
def val_dataloader(self):
return DataLoader(self.valid_set,
batch_size=self.batch_size,
num_workers=0,
pin_memory=True)
class my_callbacks(Callback):
def __init__(self) -> None:
self.metrics: List = []
def on_epoch_end(self, trainer: Trainer,
pl_module: LightningModule) -> None:
metrics_dict = copy.copy(trainer.callback_metrics)
new_metrics_dict = {k: v.item() for k, v in metrics_dict.items()}
pl_module.print(json.dumps(new_metrics_dict, indent=4, sort_keys=True),
flush=True)
seed_everything(22)
p_callback = my_callbacks()
data_module = DataModule()
model = DenseNet(
growth_rate=12,
block_config=(16, 16),
num_init_features=64,
num_classes=10,
small_inputs=True,
efficient=False,
)
trainer = pl.Trainer(
gpus=-1,
accelerator='ddp',
benchmark=True,
callbacks=[p_callback],
max_epochs=2,
deterministic=True,
progress_bar_refresh_rate=0)
trainer.fit(model, data_module)
The code I am using for LitAutoEncoder is as follows,
#!/usr/bin/env python3
import os
import json
import time
import copy
from argparse import ArgumentParser
import torch
from torch import nn
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torch.utils.data import random_split
from torchvision.datasets import MNIST
from torchvision import transforms
import pytorch_lightning as pl
from pytorch_lightning.metrics.functional import accuracy
from pytorch_lightning.callbacks import Callback
from torchvision.datasets.mnist import MNIST
from torchvision import transforms
from pytorch_lightning import LightningModule, Trainer
pl.seed_everything(22)
batch_size = 32
dataset = MNIST(os.getcwd(),
train=True,
download=True,
transform=transforms.ToTensor())
mnist_test = MNIST(os.getcwd(),
train=False,
download=True,
transform=transforms.ToTensor())
mnist_train, mnist_val = random_split(dataset, [55000, 5000])
train_loader = DataLoader(mnist_train, batch_size=batch_size, num_workers=4, pin_memory=True)
val_loader = DataLoader(mnist_val, batch_size=batch_size, num_workers=4, pin_memory=True)
test_loader = DataLoader(mnist_test, batch_size=batch_size, num_workers=4, pin_memory=True)
class LitAutoEncoder(pl.LightningModule):
def __init__(self, batch_size=32, lr=1e-3):
super().__init__()
self.encoder = nn.Sequential(nn.Linear(28 * 28, 64), nn.ReLU(),
nn.Linear(64, 3))
self.decoder = nn.Sequential(nn.Linear(3, 64), nn.ReLU(),
nn.Linear(64, 28 * 28))
self.batch_size = batch_size
self.learning_rate = lr
def forward(self, x):
# in lightning, forward defines the prediction/inference actions
embedding = self.encoder(x)
return embedding
def training_step(self, batch, batch_idx):
x, y = batch
x = x.view(x.size(0), -1)
z = self.encoder(x)
x_hat = self.decoder(z)
loss = F.mse_loss(x_hat, x)
self.log('train_loss', loss, on_step=True, on_epoch=True, sync_dist=True)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
x = x.view(x.size(0), -1)
z = self.encoder(x)
x_hat = self.decoder(z)
loss = F.mse_loss(x_hat, x)
self.log('val_loss', loss, on_step=True, on_epoch=True, sync_dist=True)
def test_step(self, batch, batch_idx):
x, y = batch
x = x.view(x.size(0), -1)
z = self.encoder(x)
x_hat = self.decoder(z)
loss = F.mse_loss(x_hat, x)
self.log('test_loss', loss, on_step=True, on_epoch=True, sync_dist=True)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)
return optimizer
class my_callbacks(Callback):
def __init__(self) -> None:
self.metrics: List = []
def on_epoch_end(self, trainer: Trainer,
pl_module: LightningModule) -> None:
metrics_dict = copy.copy(trainer.callback_metrics)
new_metrics_dict = {k: v.item() for k, v in metrics_dict.items()}
pl_module.print(json.dumps(new_metrics_dict, indent=4, sort_keys=True),
flush=True)
model = LitAutoEncoder()
p_callback = my_callbacks()
trainer = pl.Trainer(progress_bar_refresh_rate=0,
max_epochs=2,
gpus=-1,
callbacks=[p_callback],
benchmark=True,
accelerator='ddp',
deterministic=True)
trainer.fit(model, train_loader, val_loader)
What have you tried?
I am running all of my jobs on a supercomputer. I have tried running the code multiple times on the same node to remove any randomness due to having a different machine, but apparently, that doesn't make any difference.
What's your environment?
OS: Linux
Packaging: pip
Version: Pytorch-Lightning 1.2.0rc0 |
Problem with syncing logged values with multi-gpu and ddp | [
"bug",
"help wanted"
] | π Bug
When logging values with sync_dist and ddp (on two GPUs), the logged value is changed and the wrong value of averaged metric is logged.
It can be reproduced with dummy training_step() and batch_size=1
def training_step(self, batch, batch_idx):
loss = torch.tensor(1.0, device=self.device, requires_grad=True)
self.log('loss', loss, on_epoch=True, sync_dist=True)
return loss
The logged and returned value is 2.0
To Reproduce
This was discovered running ddp with two GPUs
Expected behavior
Logged value 1.0, value of loss not changed
Environment
CUDA:
GPU:
GeForce GTX TITAN X
GeForce GTX TITAN X
GeForce GTX TITAN X
GeForce GTX TITAN X
available: True
version: 11.0
Packages:
numpy: 1.19.4
pyTorch_debug: True
pyTorch_version: 1.7.0
pytorch-lightning: 1.1.6
tqdm: 4.54.0
System:
OS: Linux
architecture:
64bit
ELF
processor:
python: 3.7.8
version: 1 SMP PREEMPT Sun, 27 Dec 2020 10:50:46 +0000
Additional context
Logging was done with Comet.ml |
Pytorch-Lightning save and continue training from state_dict. | [
"bug",
"duplicate",
"help wanted",
"waiting on author"
] | π Feature
Save the model and other checkpoint at any step as a dict. And load these points to retraining the model on datasets.
Motivation
Recently, I am working on federated learning, an learning paradiam where the training is runing on different clients. And the clients share one common model which is aggregate from local client models at server. This training precedure ask the local clients could stop and send the middle models to the server after a given epoch or steps end. And then these middle models would be aggregated at server to get the shared common model. Next, the clients load common model and continue training. This process would execute several rounds.
Pitch
For a LightningModule, I can get its state_dict at anytime. And the Trainer could suspend its traning procedure and recover traning from state_dict sended from server. I tested trainer.fit() by calling two times, with max_epochs. The model could retraining from a given state_dict. But for max_steps, the trainer will execute the second trainer.fit() from the first batch and finish after one epoch. Not from the last breakpoint to next max_steps. |
Is there a guide for code and test structure for new contributors? | [
"docs",
"priority: 2"
] | β Questions and Help
Is there a guide for code and test structure for new contributors?
Something like an architecture document, to be able to get into the code without reading most of it, and probably making mistakes.
I want to be able to contribute when I have time, but just getting into the code will take a while, and I was hoping this learning curve could be cut by some factor.
For example,
class EvalModelTemplate(
ModelTemplateData,
ModelTemplateUtils,
TrainingStepVariations,
ValidationStepVariations,
ValidationEpochEndVariations,
TestStepVariations,
TestEpochEndVariations,
TrainDataloaderVariations,
ValDataloaderVariations,
TestDataloaderVariations,
ConfigureOptimizersPool,
LightningModule,
):
has quite a few interfaces, which do things, and I doubt one has to know everything before being able to contribute.
Other basic issues I am facing-
How to enable GPUs for tests? It seems they are disabled by design.
How to see test (using pytest) prints as they go, without waiting for them to finish?
Which basic models, dataloaders, ect. to choose for basic tests? What are the options?
architecture overview of PL, top down |
Errors within try/except of train(self) are misrepresented as checkpointing MisconfigurationException | [
"feature",
"help wanted"
] | π Bug
I think I found a bug, where errors probably caused by users are misrepresented as checkpointing MisconfigurationException even though the checkpointing is configured correctly.
This happens when errors are raised within training (such as RuntimeErrors or CUDA-OOM errors) and bubble up to the try/except command in def train(self) function, which can be found here:
pytorch-lightning/pytorch_lightning/trainer/trainer.py
Line 550
in
65247ec
try:
As these errors are not excepted here, the code continues to execute the code in finally. This calls self.train_loop.on_train_end() which proceeds to save a checkpoint. If one monitors on a validation metric, such as val/accuracy, no value has been saved as the error occured during the training. Thus, in turn a MisconfigurationException is raised by the checkpointing code, stating that the monitored metric is not found in the returned metrics.
To Reproduce
configure checkpointing to monitor a validation metric, which (by definition) must not be logged during training.
raise any error during training, such as RuntimeError but not KeybordInterrupt (that is the only one which is caught).
Expected behavior
The error should not be glossed over by the finally statement and be raised all the way to the top, so the user can see it and fix the underlying bug.
Environment
OS: Linux
env-type: conda
pytorch-lightning 1.1.4
pytorch 1.7.0
python 3.8.6
CUDA/cuDNN version: 11.0
GPU models and configuration: One v100, 32GB vRAM
Lazy patch:
A very hacky way of at least letting the user know about their error, is to modify train such that after except KeyboardInterrupt all other errors are caught with except Exception as e and then immediately printing the error with print(e). Unfortunately raising the error with raise e does not work because the finally code is executed first, raising its own MisconfigurationException.
The full code of train would look like this (changes are below the excepted KeyboardInterrupt):
def train(self):
self.run_sanity_check(self.get_model())
# set stage for logging
self.logger_connector.set_stage("train")
self.checkpoint_connector.has_trained = False
# enable train mode
model = self.get_model()
model.train()
torch.set_grad_enabled(True)
# reload data when needed
self.train_loop.reset_train_val_dataloaders(model)
# hook
self.train_loop.on_train_start()
try:
if self.train_loop.should_skip_training():
return
# run all epochs
for epoch in range(self.current_epoch, self.max_epochs):
# hook
self.train_loop.on_train_epoch_start(epoch)
with self.profiler.profile("run_training_epoch"):
# run train epoch
self.train_loop.run_training_epoch()
if self.max_steps and self.max_steps <= self.global_step:
return
# update LR schedulers
self.optimizer_connector.update_learning_rates(interval='epoch')
# early stopping
met_min_epochs = epoch >= self.min_epochs - 1
met_min_steps = self.global_step >= self.min_steps if self.min_steps else True
if self.should_stop:
if met_min_epochs and met_min_steps:
return
log.info(
'Trainer was signaled to stop but required minimum epochs'
f' ({self.min_epochs}) or minimum steps ({self.min_steps}) has'
' not been met. Training will continue...'
)
except KeyboardInterrupt:
rank_zero_warn('Detected KeyboardInterrupt, attempting graceful shutdown...')
# user could press ctrl+c many times... only shutdown once
if not self.interrupted:
self.interrupted = True
self._state = TrainerState.INTERRUPTED
self.on_keyboard_interrupt()
except Exception as e:
print(e) # better would be raising it, but that would be executed after the finally
finally:
# hook
self.train_loop.on_train_end()
Test
A test to catch this problem in the future would be to checkpoint on a validation metric, raise any error in the training (except KeyboardInterrupt which is excepted) and assert that it bubbles up all the way. |
Error while using distributed_backed = "ddp" | [
"bug",
"question",
"distributed"
] | My code works perfectly fine with distributed_backend='dp', but fails when I use distributed_backend='ddp' with the following error:
Traceback (most recent call last):
File "/scratch/nvarshn2/explore/test_ddp.py", line 89, in <module>
trainer.fit(model, train_data, val_data)
File "/home/nvarshn2/.conda/envs/pytorch_lightning_with_deepseed_env/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py", line 510, in fit
results = self.accelerator_backend.train()
File "/home/nvarshn2/.conda/envs/pytorch_lightning_with_deepseed_env/lib/python3.6/site-packages/pytorch_lightning/accelerators/ddp_accelerator.py", line 158, in train
results = self.ddp_train(process_idx=self.task_idx, model=model)
File "/home/nvarshn2/.conda/envs/pytorch_lightning_with_deepseed_env/lib/python3.6/site-packages/pytorch_lightning/accelerators/ddp_accelerator.py", line 307, in ddp_train
results = self.train_or_test()
File "/home/nvarshn2/.conda/envs/pytorch_lightning_with_deepseed_env/lib/python3.6/site-packages/pytorch_lightning/accelerators/accelerator.py", line 74, in train_or_test
results = self.trainer.train()
File "/home/nvarshn2/.conda/envs/pytorch_lightning_with_deepseed_env/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py", line 561, in train
self.train_loop.run_training_epoch()
File "/home/nvarshn2/.conda/envs/pytorch_lightning_with_deepseed_env/lib/python3.6/site-packages/pytorch_lightning/trainer/training_loop.py", line 541, in run_training_epoch
for batch_idx, (batch, is_last_batch) in train_dataloader:
File "/home/nvarshn2/.conda/envs/pytorch_lightning_with_deepseed_env/lib/python3.6/site-packages/pytorch_lightning/profiler/profilers.py", line 85, in profile_iterable
value = next(iterator)
File "/home/nvarshn2/.conda/envs/pytorch_lightning_with_deepseed_env/lib/python3.6/site-packages/pytorch_lightning/trainer/connectors/data_connector.py", line 45, in _with_is_last
it = iter(iterable)
File "/home/nvarshn2/.conda/envs/pytorch_lightning_with_deepseed_env/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 352, in __iter__
return self._get_iterator()
File "/home/nvarshn2/.conda/envs/pytorch_lightning_with_deepseed_env/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 294, in _get_iterator
return _MultiProcessingDataLoaderIter(self)
File "/home/nvarshn2/.conda/envs/pytorch_lightning_with_deepseed_env/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 801, in __init__
w.start()
File "/home/nvarshn2/.conda/envs/pytorch_lightning_with_deepseed_env/lib/python3.6/multiprocessing/process.py", line 105, in start
self._popen = self._Popen(self)
File "/home/nvarshn2/.conda/envs/pytorch_lightning_with_deepseed_env/lib/python3.6/multiprocessing/context.py", line 223, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "/home/nvarshn2/.conda/envs/pytorch_lightning_with_deepseed_env/lib/python3.6/multiprocessing/context.py", line 277, in _Popen
return Popen(process_obj)
File "/home/nvarshn2/.conda/envs/pytorch_lightning_with_deepseed_env/lib/python3.6/multiprocessing/popen_fork.py", line 19, in __init__
self._launch(process_obj)
File "/home/nvarshn2/.conda/envs/pytorch_lightning_with_deepseed_env/lib/python3.6/multiprocessing/popen_fork.py", line 66, in _launch
self.pid = os.fork()
OSError: [Errno 12] Cannot allocate memory
Code:
import os
import torch
from torch.utils.data import Dataset
from pytorch_lightning import LightningModule, Trainer
class RandomDataset(Dataset):
def __init__(self, size, length):
self.len = length
self.data = torch.randn(length, size)
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return self.len
class BoringModel(LightningModule):
def __init__(self):
super().__init__()
self.layer = torch.nn.Linear(32, 2)
def forward(self, x):
return self.layer(x)
def loss(self, batch, prediction):
# An arbitrary loss to have a loss that updates the model weights during `Trainer.fit` calls
return torch.nn.functional.mse_loss(prediction, torch.ones_like(prediction))
def step(self, x):
x = self.layer(x)
out = torch.nn.functional.mse_loss(x, torch.ones_like(x))
return out
def training_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
return {"loss": loss}
def training_step_end(self, training_step_outputs):
return training_step_outputs
def training_epoch_end(self, outputs) -> None:
torch.stack([x["loss"] for x in outputs]).mean()
def validation_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
return {"x": loss}
def validation_epoch_end(self, outputs) -> None:
torch.stack([x['x'] for x in outputs]).mean()
def test_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
return {"y": loss}
def test_epoch_end(self, outputs) -> None:
torch.stack([x["y"] for x in outputs]).mean()
def configure_optimizers(self):
optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)
return [optimizer], [lr_scheduler]
if __name__ == '__main__':
train_data = torch.utils.data.DataLoader(RandomDataset(32, 64), num_workers=8)
val_data = torch.utils.data.DataLoader(RandomDataset(32, 64), num_workers=8)
model = BoringModel()
trainer = Trainer(
limit_train_batches=1,
limit_val_batches=1,
max_epochs=1,
gpus=-1,
distributed_backend="ddp",
)
trainer.fit(model, train_data, val_data)
Note: I am using 4 gpus and a single machine
What could be the reason behind this? |
hi, how to plot different(e.g. training acc and val acc) in tensorboard in same window? | [
"question"
] | Hi, thanks for your great work!
I want plot training accuracy and validation accuracy in same tensorboard, so
I can realize overfiting very convient
Thanks a lot!! |
Completely overwrite validation/test block (including the batch-level loop) | [
"feature",
"help wanted",
"won't fix",
"refactor",
"design"
] | π Feature
Motivation
In certain cases, for example, an image+text multimodal retrieval model, the training and validation/testing logic can be very different. Specifically:
In training, for each input query, we construct the corresponding batches by sampling randomly from the dataset;
In validation/testing, for each input query, we need to loop through all candidates within the dataset
I'll be happy to go into the details, but in short, for these use cases, we often need a COMPLETELY different pipeline for validation/testing, which doesn't necessarily fit into the 'loop' as we would use in typical training scenarios.
Pitch
I would propose to have a higher-level function that could:
Overwrite the entire validation/testing logic, including the batch-level loop - basically, redefines the validation/testing behaviors
Have full control over what elements go to what device (i.e., manually define .cuda() or .cpu() - this is because, for retrieval-type tasks, it is highly impractical to stuff everything in VRAM at the same time, chances are we need to take things in and out of GPUs. |
Modify ModelCheckpoint class to support additional options for cloud storages | [
"feature",
"help wanted",
"won't fix",
"checkpointing",
"priority: 2"
] | π Feature
Modify pytorch_lightning.callbacks.ModelCheckpoint class to make it possible to provide additional configuration options for cloud storage.
Motivation
We at @blue-yonder started building a prototype that uses PyTorch-Lighting as a high-level training loop library. It works great, but we stumbled upon an issue with saving a model's checkpoint into a cloud storage. It looks like currently, the ModelCheckpoint class doesn't work properly with cloud file systems. If you look at the source code, you'll find a function called get_filesystem that doesn't delegate any storage_options to the underlying fsspec.open function. Therefore, it is not possible to anyhow provide credentials for a blob store.
def get_filesystem(path: Union[str, Path]):
path = str(path)
if "://" in path:
# use the fileystem from the protocol specified
# we need to delegate storage_options to this call like
# fsspec.filesystem(..., **storage_options)
return fsspec.filesystem(path.split(":", 1)[0])
else:
# use local filesystem
return fsspec.filesystem("file")
In our use-case, we want to store a checkpoint on Azure Blob Store. We need to provide parameters like account_name, account_key and sas_token in order to access the blob. The same is true when resuming training from a checkpoint. It is not possible to directly use the trainer's interface, like:
pl.Trainer(resume_from_checkpoint="az://checkpoints/checkpoint.ckpt")
Because there is no way to provide credentials. In general, the checkpoint callback doesn't work well with cloud storage that requires some additional configuration options in addition to a file path.
Pitch
We created an ad-hoc implementation that inherits from ModelCheckpoint and makes it capable of working with Azure Blob Storage.
class AzureModelCheckpoint(ModelCheckpoint):
"""An alternative implementation of ``pytorch_lightning.callbacks.ModelCheckpoint`` that
saves checkpoints into Azure Blob Store.
"""
def __init__(
self, account_name: str, account_key: str, sas_token: str, **checkpoint_options
):
self.account_name = account_name
self.account_key = account_key
self.sas_token = sas_token
self._parent_method = getattr(self, "_ModelCheckpoint__init_ckpt_dir")
setattr(self, "_ModelCheckpoint__init_ckpt_dir", self.__init_ckpt_dir)
super().__init__(**checkpoint_options)
def __init_ckpt_dir(self, filepath, dirpath, filename, save_top_k):
if filename is not None and dirpath is None:
raise NotImplementedError(
"setting filename is not supported; use dirpath instead"
)
# passing "local" path to trick the original implementation and
# initialize the local filesystem instead of azure blob store in
# order to properly override the initialized object afterwards
fake_path = dirpath.replace("az://", "/")
self._parent_method(None, fake_path, filename, save_top_k)
self._fs = get_filesystem(
str(dirpath) if dirpath else "",
account_name=self.account_name,
account_key=self.account_key,
sas_token=self.sas_token,
)
self.dirpath = dirpath or None
self.filename = filename or None
def on_pretrain_routine_start(self, trainer, pl_module):
super().on_pretrain_routine_start(trainer, pl_module)
def save_function(filepath: str, weights_only: bool = False):
checkpoint = trainer.checkpoint_connector.dump_checkpoint(weights_only)
if trainer.is_global_zero:
try:
atomic_save(
checkpoint,
filepath,
account_name=self.account_name,
account_key=self.account_key,
sas_token=self.sas_token,
)
except AttributeError as err:
if LightningModule.CHECKPOINT_HYPER_PARAMS_KEY in checkpoint:
del checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_KEY]
rank_zero_warn(
"Warning, `hyper_parameters` dropped from checkpoint."
f"An attribute is not picklable {err}"
)
atomic_save(
checkpoint,
filepath,
account_name=self.account_name,
account_key=self.account_key,
sas_token=self.sas_token,
)
self.save_function = save_function
Sure enough, it is a rather hackish side way and should be easily achievable by modifying the original checkpoint class instead, so it properly forwards storage options to the fsspec calls. Also, maybe changing Trainer or CheckpointConnector a bit to forward store's credentials when restoring from a checkpoint as well.
Alternatives
An alternative solution is to provide a custom implementation of checkpoint callback to save checkpoints into a cloud storage that requires authentication. To resume training from a checkpoint stored in such storage, download it locally into some temporary location and then use restore_from_checkpoint option. (This approach we use currently.) While it does what we need, this alternative isn't too convenient and requires extra effort.
Additional context
We use pytorch-lightning==1.0.8 and Python 3.6.6..
Could you please inform us if this functionality is somewhere on the roadmap? Or was this issue already raised before? There is a similar one about generic file system support, but it doesn't seem to address the credentials question. Also, we're ready to open a PR with a basic implementation if it helps.
Thank you! |
MultiTask Training on multi-gpus returns NaN and inf in model output during Validation phase | [
"bug",
"help wanted"
] | π Bug
I'm trying to train a multihead ResNet on images. Train data size is 2700 and valid data size is 600. Each batch is [n, 3, 224, 224, 320] and normalized to have values [0,1]. I've already trained a single head resnet on this dataset many times and never encountered a problem with any datapoint so I am sure the data is good.
When I train the model using 2 heads on 1 GPU for 5 epochs, everything runs smoothly. If I try 2 GPUs, after the progress bar is done on train phase and starts validation phase, I start getting all NaNs from the feature extractor part of my network. I put in breakpoints to check the batch data and it has no NaNs or infs anywhere. I even made my validation data the same as my train data to check if the valid data was broken and feature extractor still outputs NaNs, even after just going through the same data during train phase with no error!
I can't find any pytorch lightning examples of MultiHead training, so if those exist please point me to those. I've followed the multi-gpu training docs for pytorch lightning to a T so have no idea what's going on. I'm also setting the random seed at beginning of program using pl.seed_everything(64). I'm using DDP and 16bit precision.
PyTorch Version (e.g., 1.0): 1.7.1+cu101
OS (e.g., Linux): Linux
How you installed PyTorch (conda, pip, source): pip
Build command you used (if compiling from source):
Python version: 3.7.6
CUDA/cuDNN version: 10.1
GPU models and configuration: 4 V100 on google cloud VM
Any other relevant information: 32 cores, 128GB mem
Additional context
Running ResNet101 on 3D images. Trying to train 3 classification heads (each head only looks at certain classes). Implemented my own MultiHeadLoss wrapper to calculate total loss from each head:
class MultiHeadLoss(nn.Module):
"""Sum of all losses."""
def __init__(self, model_heads):
super().__init__()
self.model_heads = model_heads
def forward(self, outputs_targets_dict):
losses = []
for head in self.model_heads:
if head.name in outputs_targets_dict:
output = outputs_targets_dict[head.name]["output"]
target = outputs_targets_dict[head.name]["target"]
head_loss = head.loss_fn(output, target)
losses.append(head_loss)
return sum(losses)
My network module looks like this:
class ModelTemplate(pl.LightningModule):
def __init__(self, model_body=gin.REQUIRED, model_heads=gin.REQUIRED):
super().__init__()
self.model_body = model_body
self.model_heads = nn.ModuleList()
[self.model_heads.append(head(self.model_body.out_features)) for head in model_heads]
def forward(self, x):
features = self.model_body.forward(x)
head_out = [head(features) for head in self.model_heads]
return head_out
Each head looks like this:
class Classification(pl.LightningModule):
def __init__(self, in_features, task_labels, name="classification"):
super().__init__()
self.register_buffer("task_labels", torch.Tensor(task_labels))
self.name = name
self.fc = nn.Linear(in_features, len(task_labels))
self.loss_fn = WeightedFocalLoss(criterion=nn.CrossEntropyLoss(reduction="none"))
self.activation_fn = nn.Softmax(dim=1)
def forward(self, x):
logits = self.fc(x)
return logits
Run the train job like this:
trainer = Trainer(
default_root_dir=results_dir,
callbacks=callbacks,
accelerator="ddp",
gpus=gin.query_parameter("Trainer.gpus"),
max_epochs=gin.query_parameter("Trainer.max_epochs"),
precision=16,
num_sanity_val_steps=False,
logger=mlf_logger,
profiler="simple",
)
trainer.fit(model=train_module, datamodule=datamodule) |
Hanging with TPUs on GCE VM | [
"bug",
"help wanted",
"priority: 0",
"accelerator: tpu"
] | π Bug
Seems like training of any model hangs indefinitely when running on a Google Compute Engine VM.
Mainly I've been trying this example model but I've also tried the LitAutoEncoder from this page.
Note that all unit tests pass, including the 8-core model training.
There seem to be 2 key areas that trigger a hang:
Eval loop starting up. If I delay the eval loop with check_val_every_n_epoch=50 + max_epochs=60, the model will train all 50 epochs. But once the eval loop starts up, it will typically hang before finishing the 1st eval loop.
Train loop finishing. If I avoid the eval loop (e.g. check_val_every_n_epoch=100 + max_epochs=50), then the model will finish all 50 training epochs and then the process will hang.
There seems to be something wrong with multiprocesses starting or stopping. Since this unit test training works but real training hangs, I'm wondering if there is something important about @pl_multi_process_test that allows tests to succeed. Maybe we need to add that functionality to the more general TPU training?
Let me know if there are other things I should try.
Description
Typically the hang looks something like this:
Epoch 0: 94%|βββββββββββββββββββββββββββββ | 45/48 [00:53<00:03, 1.19s/it, loss=0.947, v_num=2, train_loss=0.524]
Validating: 75%|ββββββββββββββββββββββββββββββββββββββββββββββββββ | 12/16 [00:18<00:03, 1.13it/s]
(hangs...)
In other words, it gets stuck midway through an epoch.
When I kill the process I see:
Epoch 0: 88%|βββββββββββββββββββββββββββ | 42/48 [00:18<00:02, 2.26it/s, loss=1.15, v_num=12, train_loss=0.185^CTraceback (most recent call last):ββββββββββββββββββββββββββββββββββββββββββββ | 15/16 [00:15<00:00, 1.99it/s]
File "computer_vision_fine_tuning.py", line 455, in <module>
main(get_args())
File "computer_vision_fine_tuning.py", line 437, in main
trainer.fit(model)
File "/anaconda3/envs/torch-xla-1.7-1/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py", line 510, in fit
results = self.accelerator_backend.train()
File "/anaconda3/envs/torch-xla-1.7-1/lib/python3.6/site-packages/pytorch_lightning/accelerators/tpu_accelerator.py", line 114, in train
start_method=self.start_method
File "/anaconda3/envs/torch-xla-1.7-1/lib/python3.6/site-packages/torch_xla/distributed/xla_multiprocessing.py", line 395, in spawn
start_method=start_method)
File "/anaconda3/envs/torch-xla-1.7-1/lib/python3.6/site-packages/torch/multiprocessing/spawn.py", line 157, in start_processes
while not context.join():
File "/anaconda3/envs/torch-xla-1.7-1/lib/python3.6/site-packages/torch/multiprocessing/spawn.py", line 77, in join
timeout=timeout,
File "/anaconda3/envs/torch-xla-1.7-1/lib/python3.6/multiprocessing/connection.py", line 911, in wait
ready = selector.select(timeout)
File "/anaconda3/envs/torch-xla-1.7-1/lib/python3.6/selectors.py", line 376, in select
fd_event_list = self._poll.poll(timeout)
KeyboardInterrupt
Experiments so far:
torch-xla-nightly + pip install pytorch-lightning: Hangs
torch-xla-1.7 + pip install pytorch-lightning: Hangs
torch-xla-1.7 + pip install 'pytorch-lightning==1.0.0': Hangs
torch-xla-1.7 + pip install 'pytorch-lightning==0.9.0': Crashes using same model as above (i.e. python computer_vision_fine_tuning.py). I also tried this start model from 0.9.0 documentation and it also crashes with Value out of range (expected to be in range of [-1, 0], but got 1)
torch-xla-1.6 + pip install pytorch-lightning: Hangs
pytorch nightly docker image + pip install pytorch-lightning: Hangs
To Reproduce
Make a GCE VM using the PyTorch/XLA image
conda activate torch-xla-1.7
pip install pytorch-lightning
git clone https://github.com/PyTorchLightning/pytorch-lightning.git
cd pytorch-lightning/pl_examples/domain_templates
vim computer_vision_fine_tuning.py
add tpu_cores=8 to Trainer and remove any GPU args
make a TPU
export TPU_IP_ADDRESS=<the TPU's IP>
export XRT_TPU_CONFIG="tpu_worker;0;$TPU_IP_ADDRESS:8470"
python computer_vision_fine_tuning.py
Expected behavior
training / eval completes and the process exits
Please reproduce using the BoringModel
I think I can't make my colab public due to work restrictions. The base BoringModel fails for me anyway on the "Data" cell:
!pip install wandb
from pl_bolts.datasets import RandomDataset, DummyDataset, RandomDictDataset
ImportError Traceback (most recent call last)
<ipython-input-3-c3916d211d14> in <module>()
1 # some other options for random data
2 get_ipython().system('pip install wandb')
----> 3 from pl_bolts.datasets import RandomDataset, DummyDataset, RandomDictDataset
4 frames
/usr/local/lib/python3.6/dist-packages/pl_bolts/datasets/imagenet_dataset.py in <module>()
10 import numpy as np
11 import torch
---> 12 from torch._six import PY3
13
14 from pl_bolts.utils import _TORCHVISION_AVAILABLE
ImportError: cannot import name 'PY3'
Environment
You can get the script and run it with:
wget https://raw.githubusercontent.com/PyTorchLightning/pytorch-lightning/master/tests/collect_env_details.py
# For security purposes, please check the contents of collect_env_details.py before running it.
python collect_env_details.py
CUDA:
- GPU:
- available: False
- version: 10.2
Packages:
- numpy: 1.19.2
- pyTorch_debug: True
- pyTorch_version: 1.7.0
- pytorch-lightning: 1.1.7
- tqdm: 4.56.0
System:
- OS: Linux
- architecture:
- 64bit
-
- processor:
- python: 3.6.10
- version: #1 SMP Debian 4.9.246-2 (2020-12-17) |
Multigpu with different RAM capabilities | [
"feature",
"help wanted"
] | I couldn't find a way to use more than one gpu that have different RAM capabilities (it fails when the smallest GPU reach it's capacity). Is there a way to solve this?
Thanks! |
failing Profiler with PT 1.8 | [
"bug",
"help wanted"
] | π Bug
#5840 (comment)
Please reproduce using the BoringModel
there is some incompatibility, to be fixed in other PR
FAILED tests/metrics/test_composition.py::test_metrics_mod[2.0-expected_result2]
FAILED tests/models/test_horovod.py::test_result_reduce_horovod - RuntimeErro...
FAILED tests/trainer/test_trainer.py::test_pytorch_profiler_describe - Assert...
FAILED tests/trainer/test_trainer.py::test_pytorch_profiler_value_errors - As...
FAILED tests/trainer/test_trainer.py::test_pytorch_profiler_nested - Assertio...
Additional context |
How can I apply many data batches to a function on GPU | [
"question"
] | How can I apply many data batches to a function on GPU using PL? I like to do something similar to SPMD in MATLAB.
As an example, assume that I have 5 batches of 64 2D-points. I can generate it by
p_batch=torch.randn(5,64,2)#five batches of points, 64 points in each batch, two dimesnions
I want to apply the following function to each set of the 64 points.
def MyFunction(p,r,Orig_Window,Area,h,pi):#p.shape is 64,2
correction=torch.empty(p.shape[0],p.shape[0])
for i in range(p.shape[0]):
for j in range(p.shape[0]):
other_polygon =translate(Orig_Window, xoff=p[i,0]-p[j,0], yoff=p[i,1]-p[j,1])
correction[i,j] = Area/Orig_Window.intersection(other_polygon).area
d=torch.cdist(p,p)+10000*torch.eye(p.shape[0])
Save=torch.empty(r.shape[0])
for R in range(r.shape[0]):
Logic=(d<=r[R])
Save[R]=(correction*Logic).sum()
...
return ...
Is there any way that I can process all the five point clouds (or batches) in parallel using one GPU? |
training with ddp get replicas mismatch error | [
"bug",
"help wanted",
"won't fix",
"distributed",
"priority: 1"
] | Hi,
I've been getting this replicas error with ddp training.
setup: windows 10, torch 1.7.1, pytorch-lightning 1.1.7, on a 3 gpus machine.
The model training was working well with ddp on another machine 2 gpus (same setup w/ win10, torch 1.7.1 and pl 1.1.7)
the code crashed after printed the following error message:
self.reducer = dist.Reducer(
RuntimeError: replicas[0][0] in this process with sizes [12, 6] appears not to match sizes of the same param in process 0.
(Note: the sizes [12, 6] in error message changes in different run, could be any numbers, such as sizes[128, 45], etc.)
I then tried with setting accelerator='ddp_spawn', this makes the replicas error disappear. But just as being warned in documentation, ddp_spawn is very unstable, e.g, smaller batches, lower gpu utilizations, longer training time, etc. and the training can hardly proceed to 7-8 epches, because it always mysteriously crashes with memory error.
So still need to figure out how to revert back to ddp mode.
asked the Pytorch forum, their answer is as follow:
"This happens if the model parameters are not the same across all replicas in DDP. Have you tried printing the sizes of all the params in the model from each rank (using model.parameters())? This would be the first thing to verify mismatched sizes."
and I did printed the number model parameters in each process, they are the same. (The printing is after model initiation, but before the Trainer initiation, which will then initialize underline ddp, which is where error happened)
I understand that, in ddp mode, the program is restarted in each process, while in ddp_spawn mode, it's been carrying on in the subprocess -- does this different approaches of multiprocessing caused the model or model parameters that were copied to each gpu were different?
Below is how lightning Trainer is initialed and then fit is called (very standard steps):
self.trainer = pl.Trainer(
max_epochs=configs[βmax_epochsβ],
gpus=[0, 1, 3],
accelerator=βddpβ,
weights_summary=βtopβ,
gradient_clip_val=0.1,
limit_train_batches=30,
callbacks=[lr_logger, early_stop_callback, checkpoint_callback],
)
model = β¦
self.trainer.fit(
model,
train_dataloader=self.train_dataloader,
val_dataloaders=self.val_dataloader,
)
Please help! |
Access dataset directly in validation_epoch_end | [
"feature",
"help wanted"
] | π Feature
Motivation / Pitch
Not sure if duplicated but:
I would propose to allow users to add additional arguments to be passed into functions such as validation_epoch_end().
One of the examples being: during validation, we might need to fetch some additional information from the dataset (e.g., len(dataset)), and currently it is a bit hard to achieve without passing the dataset into the model during init. |
Calling fit mutiple times fails to call teardown | [
"help wanted",
"question",
"waiting on author"
] | πWhen calling fit mutiple times teardown(self, stage), on_train_end(self) are called mutiple times
Please reproduce using the BoringModel
If your create a new teacher instance everything works great but if you dont it fails.
Here is the colab to reproduce the error:
https://colab.research.google.com/drive/1G6korxcAO12wXLwhCMxTHkG3-bjMnfsc?usp=sharing
PyTorch Version (e.g., 1.0): 1.7
OS (e.g., Linux): Linux
How you installed PyTorch (conda, pip, source): conda
Build command you used (if compiling from source):
Python version: 3.7.6
CUDA/cuDNN version: CUDA 10.2 Driver Version: 440.100
GPU models and configuration: 2080Ti and 1080Ti also 4x2080Ti DDP |
BaseFinetuning Callback freeze_before_training is never called | [
"bug",
"help wanted"
] | π Bug
I think there might be a bug in the implementation of the BaseFinetuning callback, and in particular in the following lines:
pytorch-lightning/pytorch_lightning/callbacks/finetuning.py
Lines 236 to 237
in
a028171
def on_before_accelerator_backend_setup(self, trainer, pl_module):
self.freeze_before_training(pl_module)
freeze_before_training is meant to be over-riden by child classes so that the freezing logic is implemented before the start of training. However, the call to this method happens using the on_before_accelerator_backend_setup hook. I am not sure where this hook has come from as I was unable to find it in the latest version of the documentation:
Testing the callback using the MilestonesFinetuningCallback from the related example resulted in the layers not being freezed before training.
To make this work, I had to change the callback hook that freeze_before_training is called to on_pretrain_routine_start.
Apologies I wasn't able to provide a piece of code to reproduce this issue.
On a separate note, the MilestonesFinetuningCallback in the example uses wrong signatures:
pytorch-lightning/pl_examples/domain_templates/computer_vision_fine_tuning.py
Line 72
in
a028171
self.freeze(module=pl_module.feature_extractor, train_bn=self.train_bn)
here module should be modules to match the signature of the freeze method of the parent class:
pytorch-lightning/pytorch_lightning/callbacks/finetuning.py
Line 147
in
a028171
def freeze(modules: Union[Module, Iterable[Union[Module, Iterable]]], train_bn: bool = True) -> None:
Same applies to unfreeze_and_add_param_group:
pytorch-lightning/pl_examples/domain_templates/computer_vision_fine_tuning.py
Lines 77 to 79
in
a028171
self.unfreeze_and_add_param_group(
module=pl_module.feature_extractor[-5:], optimizer=optimizer, train_bn=self.train_bn
)
and
pytorch-lightning/pl_examples/domain_templates/computer_vision_fine_tuning.py
Lines 83 to 84
in
a028171
self.unfreeze_and_add_param_group(
module=pl_module.feature_extractor[:-5], optimizer=optimizer, train_bn=self.train_bn
Compare to:
pytorch-lightning/pytorch_lightning/callbacks/finetuning.py
Lines 197 to 198
in
a028171
def unfreeze_and_add_param_group(
modules: Union[Module, Iterable[Union[Module, Iterable]]],
Please reproduce using the BoringModel
Bug is in branch release/1.2-dev so I wouldn't be able to reproduce using pip installation.
##Β Environment
PyTorch Version (e.g., 1.0): 1.6.0+cu101
OS (e.g., Linux): Linux
How you installed PyTorch (conda, pip, source): pip
Build command you used (if compiling from source):
Python version: 3.7.7
CUDA/cuDNN version: 10.1
GPU models and configuration: 4 X GeForce GTX 1080 Ti
Any other relevant information:
Additional context
Happy to submit a PR to fix these issues. |
Unexpected behaviour of hooking inside callback | [
"bug",
"help wanted",
"priority: 1"
] | π Bug
on_epoch_end hook in callback called between train and validation epochs.
To Reproduce
Use following callback
class BuggyCallback(Callback):
def on_epoch_end(self, trainer: Trainer, pl_module: LightningModule):
print("I'm called")
Expected behavior
Line with I'm called should appear after train and validation cycles. Instead of this, that line printed exactly after the training cycle, before validation.
Environment
PyTorch Version (e.g., 1.0): 1.7.1
OS (e.g., Linux): MacOS BigSur
How you installed PyTorch (conda, pip, source): conda
Build command you used (if compiling from source): -
Python version: 3.8.5
CUDA/cuDNN version: CPU version
GPU models and configuration: -
Any other relevant information: -
Additional context
I checked manually that it works properly in the 1.1.7 version. Thus, the bug appears only in the last version of lightning. |
Make reduce_on_plateau more flexible | [
"feature",
"help wanted",
"won't fix"
] | π Feature
I want to have freedom to use a custom reduce on plateau type scheduler.
Motivation
Right now, only torch.optim.lr_scheduler.ReduceLROnPlateau is supported out of the box. See here. This means even if I specify {'reduce_on_plateau': True} in:
def configure_optimizers(self):
optimizer = TrainConf.optimizer(self.parameters(), **TrainConf.optimizer_params)
scheduler = TrainConf.scheduler(optimizer, **TrainConf.scheduler_params)
return {
'optimizer': optimizer,
'lr_scheduler': {
'scheduler': scheduler,
'interval': TrainConf.scheduler_interval,
'monitor': 'val_lwlrap',
'reduce_on_plateau': isinstance(scheduler,
(WarmupReduceLROnPlateau, # this is my custom class
ReduceLROnPlateau)),
}
}
this will be overridden downstream.
Pitch
At the very least TrainerOptimizersMixin.configure_schedulers should check to see if the key was provided, and if so, don't overwrite it. So I propose changing
scheduler['reduce_on_plateau'] = isinstance(
scheduler['scheduler'], optim.lr_scheduler.ReduceLROnPlateau
)
to
if 'reduce_on_plateau' not in scheduler:
scheduler['reduce_on_plateau'] = isinstance(
scheduler['scheduler'], optim.lr_scheduler.ReduceLROnPlateau
)
But in the long run I'd suggest we allow the user to override a method of Trainer like this:
def step_scheduler(self):
...
That way we could access the variables needed and provide our own signature to the step method of the scheduler class.
Additional context
My particular custom scheduler looks like
class WarmupReduceLROnPlateau(ReduceLROnPlateau):
"""
Subclassing torch.optim.lr_scheduler.ReduceLROnPlateau
added warmup parameters
"""
def __init__(self, optimizer, mode='min', factor=0.1, patience=10,
threshold=1e-4, threshold_mode='rel', cooldown=0,
min_lr=0, eps=1e-8, warmup_itrs=0, warmup_type='lin',
start_lr=1e-16, verbose=False):
super().__init__(optimizer, mode=mode, factor=factor, patience=patience,
threshold=threshold, threshold_mode=threshold_mode,
cooldown=cooldown, min_lr=min_lr, eps=eps, verbose=verbose)
self.warmup_itrs = warmup_itrs
self.warmup_type = warmup_type
self.start_lr = start_lr
self.default_lrs = []
self.itr = 0
for param_group in optimizer.param_groups:
self.default_lrs.append(param_group['lr'])
def step(self, metrics):
if self.itr < self.warmup_itrs:
for i, param_group in enumerate(self.optimizer.param_groups):
if self.warmup_type == 'exp':
new_lr = self.start_lr * \
(self.default_lrs[i] /
self.start_lr)**(self.itr/self.warmup_itrs)
if self.warmup_type == 'lin':
new_lr = self.start_lr + \
(self.default_lrs[i] - self.start_lr) * \
(self.itr/self.warmup_itrs)
param_group['lr'] = new_lr
elif self.itr == self.warmup_itrs:
param_group['lr'] = self.default_lrs[i]
else:
super.step(metrics)
self.itr += 1
If this gets any traction I'd love to contribute a PR (would be my first to a widely used repo!) |
ClearML logger (ex Allegro Trains AI) | [
"feature",
"help wanted",
"won't fix",
"logger"
] | π Feature
Motivation
ClearML is opensourced self-hosted service for MLOps, logging and experiment management.
https://github.com/allegroai/clearml/
Previously known as Allegro Trains AI.
Their repository has more than 2k stars on GitHub and very active community.
Pitch
I would like to have an implementation for pytorch_lightning.loggers.ClearMLLogger.
from pytorch_lightning import Trainer
from pytorch_lightning.loggers import ClearMLLogger
clearml_logger = ClearMLLogger(
project_name="default",
task_name="train"
)
trainer = Trainer(logger=clearml_logger)
Alternatives
Additional context |
ModelCheckpoint doesn't delete checkpoints from s3 storage using Tensorboard Logger | [
"bug",
"help wanted",
"won't fix",
"waiting on author",
"priority: 1"
] | π Bug
When using ModelCheckpoint with TensorboardLogger with a S3 bucket url path the models checkpoints are correclty uploaded into the cloud directory set by the logger but but past epochs versions are not deleted. If, instead, I use directly the ModelCheckpoint with dirpath=<s3-url> while saving tensorboard logs locally, then the checkpoints are both uploaded and deleted correctly on my s3 bucket.
Expected behavior
ModelCheckpoint should delete past checkpoints on cloud storage also when using the TensorboardLogger
Environment
PyTorch Version: 1.7.1
OS: Ubuntu 20.04
Python version: 3.8 |
Learning rate loaded as Namespace from argparse | [
"help wanted",
"question"
] | π Bug
When I'm loading the model with argument from ArgumentParser I receive a strange output with learning_rate being treated as argparse Namespace.
To Reproduce
Here is my basic class that saves hyperparameters.
from argparse import ArgumentParser
import pytorch_lightning as pl
class Trial(pl.LightningModule):
"""Pytorch lightning based class which provides training logic."""
def __init__(self, learning_rate: float = 1e-5, adam_epsilon: float = 1e-8, warmup_steps: int = 0,
min_epochs: int = 5, num_encoder_layers_to_train: int = 2, ):
super(Trial, self).__init__()
self.save_hyperparameters()
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument("--learning_rate", default=1e-5, type=float)
parser.add_argument("--adam_epsilon", default=1e-8, type=float)
parser.add_argument("--warmup_steps", default=0, type=int)
parser.add_argument("--num_encoder_layers_to_train", default=2, type=int)
return parser
if I call:
model = Trial()
the output is like that:
"adam_epsilon": 1e-08
"learning_rate": 1e-05
"min_epochs": 5
"num_encoder_layers_to_train": 2
"warmup_steps": 0
However after model instance in such a way:
def parse_args(args=None):
parser = ArgumentParser()
parser = Trial.add_model_specific_args(parser)
opt, unknown = parser.parse_known_args(args)
return opt
def setup(args):
model = Trial(args)
return model
args = parse_args()
model = setup(args)
I receive:
"adam_epsilon": 1e-08
"learning_rate": Namespace(adam_epsilon=1e-08, learning_rate=1e-05, num_encoder_layers_to_train=2, warmup_steps=0)
"min_epochs": 5
"num_encoder_layers_to_train": 2
"warmup_steps": 0
Expected behavior
Output from the argparse is the same as the basic one and there is no problem with parameters being treated as Namespace.
Environment
Checked on Colab, link:
https://colab.research.google.com/drive/1txQKsOiTDNpyVr8Ih_NnGk4S9ZR_0M13?usp=sharing
Thank you for your response. |
init_optimizers(self, model) | [
"bug",
"help wanted"
] | π Bug
pytorch_lightning/trainer/optimizers.py in init_optimizers(self, model) fails to load monitor value when configure_optimizers() returns a tuple with multiple optimizers in dictionary form, each with its own LR Scheduler and Monitor Value. The bug seems to occur in the elif clause in line 56, where no monitor value is attempted to be extracted, so monitor val is always None in this case.
Please reproduce using SomeModel class
Example class below:
class SomeModel(TrainingBehavior):
"""
Regular ResNet model wrapper
"""
def __init__(self, hparams={}, num_outputs=11, input_size=sample_input_size, stacks=5):
super(SomeModel, self).__init__()
self.classifier = LegitClassifier()
self.descriminator = SomeDescriminator()
.........
def configure_optimizers(self):
optimizer1 = optim.Adam(self.classifier.parameters())
optimizer2 = optim.Adam(self.descriminator.parameters())
scheduler1 = optim.lr_scheduler.ReduceLROnPlateau(optimizer1, verbose=True, patience=4, factor=0.1)
scheduler2 = optim.lr_scheduler.ReduceLROnPlateau(optimizer2, verbose=True, patience=4, factor=0.1)
return (
{'optimizer': optimizer1, 'lr_scheduler': scheduler1, 'monitor': 'val_loss'},
{'optimizer': optimizer2, 'lr_scheduler': scheduler2, 'monitor': 'val_loss'},
)
Expected behavior
You get an exception when trainer attempts to load model.
trainer = pl.Trainer()
trainer.fit(model)
**---------------------------------------------------------------------------
MisconfigurationException Traceback (most recent call last)
in
53 limit_test_batches=0.01)
54
---> 55 trainer.fit(model)
56 trainer.test(model)
57 model_metrics.append(trainer.progress_bar_metrics)
/net/10.57.1.2/vol/homes/martinezniev1/complexcode/env/lib64/python3.6/site-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloader, val_dataloaders, datamodule)
492 # ----------------------------
493 self.accelerator_backend = self.accelerator_connector.select_accelerator()
--> 494 self.accelerator_backend.setup(model)
495
496 # ----------------------------
/net/10.57.1.2/vol/homes/martinezniev1/complexcode/env/lib64/python3.6/site-packages/pytorch_lightning/accelerators/dp_accelerator.py in setup(self, model)
53 # CHOOSE OPTIMIZER
54 # allow for lr schedulers as well
---> 55 self.setup_optimizers(model)
56
57 # init torch data parallel
/net/10.57.1.2/vol/homes/martinezniev1/complexcode/env/lib64/python3.6/site-packages/pytorch_lightning/accelerators/accelerator.py in setup_optimizers(self, model)
148 return
149
--> 150 optimizers, lr_schedulers, optimizer_frequencies = self.trainer.init_optimizers(model)
151 self.trainer.optimizers = optimizers
152 self.trainer.lr_schedulers = lr_schedulers
/net/10.57.1.2/vol/homes/martinezniev1/complexcode/env/lib64/python3.6/site-packages/pytorch_lightning/trainer/optimizers.py in init_optimizers(self, model)
78 )
79
---> 80 lr_schedulers = self.configure_schedulers(lr_schedulers, monitor=monitor)
81 _validate_scheduler_optimizer(optimizers, lr_schedulers)
82
/net/10.57.1.2/vol/homes/martinezniev1/complexcode/env/lib64/python3.6/site-packages/pytorch_lightning/trainer/optimizers.py in configure_schedulers(self, schedulers, monitor)
130 if monitor is None:
131 raise MisconfigurationException(
--> 132 'configure_optimizers must include a monitor when a ReduceLROnPlateau scheduler is used.'
133 ' For example: {"optimizer": optimizer, "lr_scheduler": scheduler, "monitor": "metric_to_track"}'
134 )
MisconfigurationException: configure_optimizers must include a monitor when a ReduceLROnPlateau scheduler is used. For example: {"optimizer": optimizer, "lr_scheduler": scheduler, "monitor": "metric_to_track"}**
Environment
PyTorch Version (e.g., 1.0): 1.7
OS (e.g., Linux): Linux
Python version: 3.7
CUDA/cuDNN version: 10.2 |
The header link is broken | [
"docs"
] | The link below is broken:
[pytorch-lightning page] -> header menu (γ»γ»γ») β Get Started
Currently:
https://pytorch-lightning.readthedocs.io/en/latest/introduction_guide.html
Fix:
https://pytorch-lightning.readthedocs.io/en/latest/starter/introduction_guide.html |
Training loop get filled with unwanted logs. | [
"bug",
"help wanted"
] | π Bug
During the training process, there are lots of unwanted logs printing about different function runtimes. I am using the latest pytorch_lightning version. |
support len(datamodule) | [
"feature",
"help wanted",
"good first issue",
"data handling"
] | Let's add support for len(datamodule) so we can get the following:
len(datamodule)
# prints:
# train_dataloder_1: 200 samples
# train_dataloader_2: 500 samples
# tval_dataloder_1: 200 samples
# val_dataloader_2: 500 samples
# test_dataloder_1: 200 samples
# test_dataloader_2: 500 samples
cc @edenafek |
Add ipython kernel detection and give warning when accelerator = "ddp" | [
"feature",
"help wanted"
] | π Feature
Add ipython kernel detection and give warning when accelerator = "ddp"
Motivation
When users try to use ddp as accelerator in Jupyter Notebook or Jupyter Lab, the trainer will be stuck forever and no hints about the cause. So, to better inform developers, it will be great to detect whether the code is run in a ipython kernel.
Pitch
When users initialize a trainer by Trainer(accelerator = 'ddp') in a ipython kernel, give a warning or exception. Something like "ddp will not work in ipython kernel" should suffice.
Additional context
Please see this to implement it. I can do a PR, but I am not sure if it is appropriate to place code for detection in Trainer.__init__() |
How to use a callback to run a periodical process inside the training loop? | [
"question",
"waiting on author"
] | I want to modify a certain function during the training loop (let's save every 10 000 global training step). I am using multiple GPUs. Currently, I have implemented it inside the training_step. While the update is happening I want to make sure other DDP processes works. In the following code, I make sure if the global_rank is zero do the update.
def training_step(self, batch, batch_idx) -> Dict:
if (not batch_idx==0 and batch_idx%10000==0) and (self.trainer.global_rank==0):
#do the update
Sometimes this process crashes because, while one GPU is updating the function, other trying to use it.
So is there a call back that I can use to execute my command, where I can make sure the process waits until this step is finish. More like on_end_of_global_training_step?
One more question: how can I get the global_step count inside the training loop? |
use of pdb.set_trace() inside LightningModule | [
"question",
"won't fix"
] | So being new to lightning, I have rather a basic question that I couldn't quite find the answer to in the docs. So in regular pytorch I can use python's pdb module to pause the execution at any point and like check tensor dimensions/values, etc :
from pdb import set_trace
set_trace()
However I seem not be able to do so with lightning, whether in forward() or training_step() or any other method in the lightning module I can pause the execution at any point (obviously without having to go into lightning's internal modules). Is this supposed to be not possible or what am i missing? |
Add `ignore` param to `save_hyperparameters` | [
"feature"
] | π Enhancement
Add ignore param to self.save_hyperparameters(). It would support types list and str.
Motivation
I believe Users should be given an option to explicitly mention which parameters should not be saved. |
Mypy complaining about `transfer_batch_to_device` as abstract method | [
"bug",
"help wanted"
] | π Bug
In my projects, mypy complains if I don't override transfer_batch_to_device in DataModule. This is probably due to a design choice, since transfer_batch_to_device is an abstract method. However, it's clear that overriding the method is not mandatory, as the same behavior is obtained (probably somewhere else in lightiningmodule and trainer code, I couldn't find it), and therefore I think it shouldn't be abstractmethod
I believe we can design this a little better, by making clear that it's not mandatory to override transfer_batch_to_device
To Reproduce
Simply implement a subclass of DataModule and use mypy to check your code
Expected behavior
I expect to
Environment
PyTorch Version (e.g., 1.7): `.7
OS (e.g., Linux): LINUX
How you installed PyTorch (conda, pip, source): pip
Build command you used (if compiling from source):
Python version: 3.8.4
CUDA/cuDNN version: 11.0
GPU models and configuration:
Any other relevant information:
Additional context |
module 'pytorch_lightning.metrics.classification' has no attribute 'AUROC' | [
"bug",
"help wanted",
"working as intended"
] | π Bug
module 'pytorch_lightning.metrics.classification' has no attribute 'AUROC' when I try to use pytorch_lightning.metrics.classification.AUROC
To Reproduce
import pytorch_lightning as pl
auroc = pl.metrics.classification.AUROC()
-pytorch_lightning version == 1.1.8
Additional context
Similarly AUC metric is also missing |
Support Tensorboard logs dir structure costumizaton | [
"feature",
"help wanted",
"design",
"logger"
] | Feature request:
As a user, i would like to have control over checkpoint dir name and structure.
Motivation:
From @janhenriklambrechts (janhenrik.lambrechts@gmail.com):
My logging version directory of a single pytorch-lighting project with tensorboard logs looks something like this:
βββ checkpoints
β βββ epoch=86-max_acc=0.7446.ckpt
β βββ last.ckpt
βββ events.out.tfevents.1613393562.lthpc.29645.0
βββ hparams.yaml
βββ train_acc_classifier_1
β βββ events.out.tfevents.1613393662.lthpc.29645.8
βββ train_acc_classifier_2
β βββ events.out.tfevents.1613393662.lthpc.29645.7
βββ train_acc_classifier_3
β βββ events.out.tfevents.1613393662.lthpc.29645.6
βββ train_acc_classifier_4
β βββ events.out.tfevents.1613393662.lthpc.29645.5
βββ val_acc_classifier_1
β βββ events.out.tfevents.1613393563.lthpc.29645.4
βββ val_acc_classifier_2
β βββ events.out.tfevents.1613393563.lthpc.29645.3
βββ val_acc_classifier_3
β βββ events.out.tfevents.1613393563.lthpc.29645.2
βββ val_acc_classifier_4
βββ events.out.tfevents.1613393563.lthpc.29645.1
my network has multiple exits and I thus want to log the val_acc (or train_acc) of all these exits in a single graph which is why I need to log these setups as different runs.
As this is very unorganised and I would want to be able to in one scp -r call get all my tensorboard runs to my laptop.
Therefore I was wondering if there is a way in PL to log not directly in my version_0 folder but in version_0/tb_logs
I would want to be able to tell PL to save my logs in a subfolder tensorboard_logs as such that I only have two folders checkpoints and tensorboard_logs in this version_0 directory.
I would have assumed that the log_dir parameter of the TensorboardLogger would be this functionality, however in this case the checkpoints and logs are still together but now in a different main folder.
cc @Borda @tchaton @justusschock @awaelchli @edward-io @ananthsub @rohitgr7 @kamil-kaczmarek @Raalsky @Blaizzy |
Avoid patching LightningModule methods during training | [
"feature",
"help wanted",
"let's do it!",
"refactor"
] | π Feature
Can we implement the dataloaders without π-patching the methods in LightningModule?
Motivation
Currently, we patch the LightningModule methods in the trainer when also a DataModule is used.
pytorch-lightning/pytorch_lightning/trainer/connectors/data_connector.py
Line 115
in
5157ba5
def attach_datamodule(self, model, datamodule: Optional[LightningDataModule], stage: str) -> None:
A datamodule's dataloader methods have precedence over the once defined in the LightningModule, but the LightningModule code should not be altered. The user does not know that this happens, and after training is complete, the user may wishes to continue using the model instance.
Pitch
Store the dataloader references in the trainer (or data connector) directly, without "attaching" them to the user's model.
This would also enable typing inference as mentioned by @gianscarpe.
Alternatives
Keep as is, but user will not be happy.
It's also harder to debug the way it is right now. |
readme typo in master and newest branch | [
"docs"
] | π Documentation
For typos and doc fixes, please go ahead and:
On master and the newest branch, variable loss under training_step is referenced before assignment.
class LitAutoEncoder(pl.LightningModule):
def __init__(self):
super().__init__()
self.encoder = nn.Sequential(nn.Linear(28 * 28, 128), nn.ReLU(), nn.Linear(128, 3))
self.decoder = nn.Sequential(nn.Linear(3, 128), nn.ReLU(), nn.Linear(128, 28 * 28))
def forward(self, x):
# in lightning, forward defines the prediction/inference actions
embedding = self.encoder(x)
return embedding
def training_step(self, batch, batch_idx):
# training_step defined the train loop. It is independent of forward
x, y = batch
x = x.view(x.size(0), -1)
z = self.encoder(x)
x_hat = self.decoder(z)
# the order of underline two lines are incorrect, variable loss referenced before assignment
# self.log('train_loss', loss)
# loss = F.mse_loss(x_hat, x)
loss = F.mse_loss(x_hat, x)
self.log('train_loss', loss)
return loss
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)
return optimizer |
Validation loss Tensor object is print in progress bar, it is expected only value | [
"bug",
"help wanted"
] | π Bug
When I add validation loss in progress bar training, tensor object is printed whereas only loss value is expected.
For example :
Epoch 1: 100%|ββββββββββ| 5/5 [00:04<00:00, 1.21it/s, loss=82.423, v_num=52, val_loss=tensor(76.4331, dtype=torch.float32)]
Validation loss is added with the following command : self.log('val_loss', loss, prog_bar=True)
I tried self.log('val_loss', loss.item(), prog_bar=True) with no effect.
To Reproduce
The bug is reproductible with the minimal code example (https://github.com/PyTorchLightning/pytorch-lightning/blob/master/pl_examples/bug_report_model.py). See code sample below with validation_step overridden :
Code sample
class TestModel(BoringModel):
def validation_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
self.log('val_loss', loss, prog_bar=True)
return {"x": loss}
Expected behavior
It is expected to only obtain value of validation loss in progress bar and not tensor object.
Environment
CUDA:
GPU:
available: False
version: None
Packages:
numpy: 1.19.1
pyTorch_debug: False
pyTorch_version: 1.6.0
pytorch-lightning: 0.10.0
tqdm: 4.50.1
System:
OS: Darwin
architecture:
64bit
processor: i386
python: 3.8.6
version: Darwin Kernel Version 18.7.0: Thu Jun 18 20:50:10 PDT 2020; root:xnu-4903.278.43~1/RELEASE_X86_64 |
Subclassing ProgressBarBase causes pylint crash | [
"bug",
"help wanted",
"3rd party"
] | π Bug
Subclassing ProgressBarBase causes pylint crash
To Reproduce
Code sample
Say file: prog.py
from pytorch_lightning.callbacks.progress import ProgressBarBase
class ProgressBar(ProgressBarBase):
...
Run pylint:
$ pylint prog.py
Exceptions:
Traceback (most recent call last):
File "/home/pwwang/.cache/pypoetry/virtualenvs/prog-taq1idOW-py3.8/lib/python3.8/site-packages/astroid/decorators.py", line 32, in cached
return cache[func]
KeyError: >
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/pwwang/.cache/pypoetry/virtualenvs/prog-taq1idOW-py3.8/bin/pylint", line 10, in
sys.exit(run_pylint())
File "/home/pwwang/.cache/pypoetry/virtualenvs/prog-taq1idOW-py3.8/lib/python3.8/site-packages/pylint/init.py", line 22, in run_pylint
PylintRun(sys.argv[1:])
File "/home/pwwang/.cache/pypoetry/virtualenvs/prog-taq1idOW-py3.8/lib/python3.8/site-packages/pylint/lint/run.py", line 349, in init
linter.check(args)
File "/home/pwwang/.cache/pypoetry/virtualenvs/prog-taq1idOW-py3.8/lib/python3.8/site-packages/pylint/lint/pylinter.py", line 862, in check
self._check_files(
File "/home/pwwang/.cache/pypoetry/virtualenvs/prog-taq1idOW-py3.8/lib/python3.8/site-packages/pylint/lint/pylinter.py", line 896, in _check_files
self._check_file(get_ast, check_astroid_module, name, filepath, modname)
File "/home/pwwang/.cache/pypoetry/virtualenvs/prog-taq1idOW-py3.8/lib/python3.8/site-packages/pylint/lint/pylinter.py", line 922, in _check_file
check_astroid_module(ast_node)
File "/home/pwwang/.cache/pypoetry/virtualenvs/prog-taq1idOW-py3.8/lib/python3.8/site-packages/pylint/lint/pylinter.py", line 1054, in check_astroid_module
retval = self._check_astroid_module(
File "/home/pwwang/.cache/pypoetry/virtualenvs/prog-taq1idOW-py3.8/lib/python3.8/site-packages/pylint/lint/pylinter.py", line 1099, in _check_astroid_module
walker.walk(ast_node)
File "/home/pwwang/.cache/pypoetry/virtualenvs/prog-taq1idOW-py3.8/lib/python3.8/site-packages/pylint/utils/ast_walker.py", line 75, in walk
self.walk(child)
File "/home/pwwang/.cache/pypoetry/virtualenvs/prog-taq1idOW-py3.8/lib/python3.8/site-packages/pylint/utils/ast_walker.py", line 72, in walk
callback(astroid)
File "/home/pwwang/.cache/pypoetry/virtualenvs/prog-taq1idOW-py3.8/lib/python3.8/site-packages/pylint/checkers/variables.py", line 1206, in visit_importfrom
module = self._check_module_attrs(node, module, name_parts[1:])
File "/home/pwwang/.cache/pypoetry/virtualenvs/prog-taq1idOW-py3.8/lib/python3.8/site-packages/pylint/checkers/variables.py", line 1884, in _check_module_attrs
module = next(module.getattr(name)[0].infer())
File "/home/pwwang/.cache/pypoetry/virtualenvs/prog-taq1idOW-py3.8/lib/python3.8/site-packages/astroid/scoped_nodes.py", line 547, in getattr
result = [self.import_module(name, relative_only=True)]
File "/home/pwwang/.cache/pypoetry/virtualenvs/prog-taq1idOW-py3.8/lib/python3.8/site-packages/astroid/scoped_nodes.py", line 642, in import_module
return MANAGER.ast_from_module_name(absmodname)
File "/home/pwwang/.cache/pypoetry/virtualenvs/prog-taq1idOW-py3.8/lib/python3.8/site-packages/astroid/manager.py", line 189, in ast_from_module_name
return self.ast_from_file(found_spec.location, modname, fallback=False)
File "/home/pwwang/.cache/pypoetry/virtualenvs/prog-taq1idOW-py3.8/lib/python3.8/site-packages/astroid/manager.py", line 98, in ast_from_file
return AstroidBuilder(self).file_build(filepath, modname)
File "/home/pwwang/.cache/pypoetry/virtualenvs/prog-taq1idOW-py3.8/lib/python3.8/site-packages/astroid/builder.py", line 138, in file_build
return self._post_build(module, encoding)
File "/home/pwwang/.cache/pypoetry/virtualenvs/prog-taq1idOW-py3.8/lib/python3.8/site-packages/astroid/builder.py", line 158, in _post_build
self.delayed_assattr(delayed)
File "/home/pwwang/.cache/pypoetry/virtualenvs/prog-taq1idOW-py3.8/lib/python3.8/site-packages/astroid/builder.py", line 234, in delayed_assattr
if not _can_assign_attr(inferred, node.attrname):
File "/home/pwwang/.cache/pypoetry/virtualenvs/prog-taq1idOW-py3.8/lib/python3.8/site-packages/astroid/builder.py", line 59, in _can_assign_attr
slots = node.slots()
File "/home/pwwang/.cache/pypoetry/virtualenvs/prog-taq1idOW-py3.8/lib/python3.8/site-packages/astroid/decorators.py", line 34, in cached
cache[func] = result = func(*args, **kwargs)
File "/home/pwwang/.cache/pypoetry/virtualenvs/prog-taq1idOW-py3.8/lib/python3.8/site-packages/astroid/scoped_nodes.py", line 2833, in slots
slots = list(grouped_slots())
File "/home/pwwang/.cache/pypoetry/virtualenvs/prog-taq1idOW-py3.8/lib/python3.8/site-packages/astroid/scoped_nodes.py", line 2818, in grouped_slots
for cls in self.mro()[:-1]:
File "/home/pwwang/.cache/pypoetry/virtualenvs/prog-taq1idOW-py3.8/lib/python3.8/site-packages/astroid/scoped_nodes.py", line 2904, in mro
return self._compute_mro(context=context)
File "/home/pwwang/.cache/pypoetry/virtualenvs/prog-taq1idOW-py3.8/lib/python3.8/site-packages/astroid/scoped_nodes.py", line 2894, in _compute_mro
return _c3_merge(unmerged_mro, self, context)
File "/home/pwwang/.cache/pypoetry/virtualenvs/prog-taq1idOW-py3.8/lib/python3.8/site-packages/astroid/scoped_nodes.py", line 83, in _c3_merge
raise exceptions.InconsistentMroError(
astroid.exceptions.InconsistentMroError: Cannot create a consistent method resolution order for MROs (tqdm, Comparable, object), (tqdm_asyncio, tqdm, Comparable, object), (tqdm, tqdm_asyncio) of class <ClassDef.tqdm l.31 at 0x7f1ccee9f9a0>.
Expected behavior
There should be no exceptions
Environment
CUDA:
- GPU:
- available: False
- version: 10.1
Packages:
- numpy: 1.19.2
- pyTorch_debug: False
- pyTorch_version: 1.4.0
- pytorch-lightning: 0.10.0
- tqdm: 4.50.2
- pylint: 2.6.0
System:
- OS: Linux
- architecture:
- 64bit
- ELF
- processor:
- python: 3.8.3
- version: #1 SMP Wed Sep 23 19:45:38 PDT 2020
Additional context
I found this is caused by tqdm. However, tqdm itself alone didn't cause the crash. I tried prog.py with a simple tqdm use without any problems with pylint. This is why I posted it here, instead of tqdm's repo.
I tried various tqdm versions, ending up with 4.48.0, everything worked fine, but with any versions later, the exceptions were raised.
This was also reproduced with python 3.7. |
TPU error | [
"bug",
"help wanted",
"accelerator: tpu"
] | Hi,
I am getting a TPU error on Colab and I am using the latest version of lightning.
Notebook
Trainer:
trainer = pl.Trainer(tpu_cores=8, precision=16, logger=logger, checkpoint_callback=checkpoint_callback, progress_bar_refresh_rate=50, accumulate_grad_batches=2, fast_dev_run=False,\
default_root_dir=root_path, auto_lr_find=True, gradient_clip_val=0.5,\
profiler=True, max_epochs=1000, callbacks=[lr_monitor, early_stop, PrintTableMetricsCallback()])
Stack trace:
GPU available: False, used: False
TPU available: True, using: 8 TPU cores
Using native 16bit precision.
training on 8 TPU cores
INIT TPU local core: 0, global rank: 0 with XLA_USE_BF16=1
Exception in device=TPU:0: dictionary update sequence element #0 has length 1; 2 is required
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/torch_xla/distributed/xla_multiprocessing.py", line 330, in _mp_start_fn
_start_fn(index, pf_cfg, fn, args)
File "/usr/local/lib/python3.6/dist-packages/torch_xla/distributed/xla_multiprocessing.py", line 324, in _start_fn
fn(gindex, *args)
File "/usr/local/lib/python3.6/dist-packages/pytorch_lightning/accelerators/tpu_backend.py", line 122, in tpu_train_in_process
self.trainer.train_loop.setup_training(model)
File "/usr/local/lib/python3.6/dist-packages/pytorch_lightning/trainer/training_loop.py", line 132, in setup_training
self.trainer.logger.save()
File "/usr/local/lib/python3.6/dist-packages/pytorch_lightning/utilities/distributed.py", line 35, in wrapped_fn
return fn(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/pytorch_lightning/loggers/tensorboard.py", line 220, in save
save_hparams_to_yaml(hparams_file, self.hparams)
File "/usr/local/lib/python3.6/dist-packages/pytorch_lightning/core/saving.py", line 378, in save_hparams_to_yaml
yaml.dump(hparams, fp)
File "/usr/local/lib/python3.6/dist-packages/yaml/__init__.py", line 290, in dump
return dump_all([data], stream, Dumper=Dumper, **kwds)
File "/usr/local/lib/python3.6/dist-packages/yaml/__init__.py", line 278, in dump_all
dumper.represent(data)
File "/usr/local/lib/python3.6/dist-packages/yaml/representer.py", line 27, in represent
node = self.represent_data(data)
File "/usr/local/lib/python3.6/dist-packages/yaml/representer.py", line 48, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "/usr/local/lib/python3.6/dist-packages/yaml/representer.py", line 207, in represent_dict
return self.represent_mapping('tag:yaml.org,2002:map', data)
File "/usr/local/lib/python3.6/dist-packages/yaml/representer.py", line 118, in represent_mapping
node_value = self.represent_data(item_value)
File "/usr/local/lib/python3.6/dist-packages/yaml/representer.py", line 52, in represent_data
node = self.yaml_multi_representers[data_type](self, data)
File "/usr/local/lib/python3.6/dist-packages/yaml/representer.py", line 343, in represent_object
'tag:yaml.org,2002:python/object:'+function_name, state)
File "/usr/local/lib/python3.6/dist-packages/yaml/representer.py", line 118, in represent_mapping
node_value = self.represent_data(item_value)
ValueError: dictionary update sequence element #0 has length 1; 2 is required
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
<ipython-input-16-2e5877a52826> in <module>()
4 trainer = pl.Trainer(tpu_cores=8, precision=16, logger=logger, checkpoint_callback=checkpoint_callback, progress_bar_refresh_rate=50, accumulate_grad_batches=2, fast_dev_run=False, default_root_dir=root_path, auto_lr_find=True, gradient_clip_val=0.5, profiler=True, max_epochs=1000, callbacks=[lr_monitor, early_stop, PrintTableMetricsCallback()])
5
----> 6 trainer.fit(model_one)
4 frames
/usr/local/lib/python3.6/dist-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloader, val_dataloaders, datamodule)
420 self.call_hook('on_fit_start')
421
--> 422 results = self.accelerator_backend.train()
423 self.accelerator_backend.teardown()
424
/usr/local/lib/python3.6/dist-packages/pytorch_lightning/accelerators/tpu_backend.py in train(self)
95 args=(model, self.trainer, self.mp_queue),
96 nprocs=self.trainer.tpu_cores,
---> 97 start_method=self.start_method
98 )
99
/usr/local/lib/python3.6/dist-packages/torch_xla/distributed/xla_multiprocessing.py in spawn(fn, args, nprocs, join, daemon, start_method)
393 join=join,
394 daemon=daemon,
--> 395 start_method=start_method)
396
397
/usr/local/lib/python3.6/dist-packages/torch/multiprocessing/spawn.py in start_processes(fn, args, nprocs, join, daemon, start_method)
155
156 # Loop on join until it returns True or raises an exception.
--> 157 while not context.join():
158 pass
159
/usr/local/lib/python3.6/dist-packages/torch/multiprocessing/spawn.py in join(self, timeout)
110 raise Exception(
111 "process %d terminated with exit code %d" %
--> 112 (error_index, exitcode)
113 )
114
Exception: process 0 terminated with exit code 17 |
clean-up metric testing | [
"feature",
"ci"
] | π Feature
apply comments in #4043 which were ignored and make the tests easier too understand
Motivation
now the testing so black-boxing, and confusing with the same function SK names in different files |
backward callback does not work on pytorch-lightning version 1.0.0rc3 | [
"bug",
"help wanted"
] | In pytorch-lightning version 0.10 the following code works well. However in pytorch-lightning version 1.0.0rc3, the code does not work
and gives the following error:
TypeError: backward() missing 1 required positional argument: 'optimizer_idx'
Code sample
def configure_optimizers(self):
optimizer = optim.Adam(self.parameters(), lr=self.hparams.lr)
return optimizer
def backward(self, trainer, loss, optimizer, optimizer_idx):
loss.backward(retain_graph=True) |
Data Parallel bug (return outputs not being moved to same device) | [
"bug",
"help wanted",
"priority: 0",
"waiting on author",
"strategy: dp",
"logging"
] | π Bug
Under backend='dp' doesn't handle reduction of the loss across multiple GPUs correctly. This is present in v0.10--v1.0.0rc4
To Reproduce
Code sample
import torch
import pytorch_lightning as ptl
from pytorch_lightning import LightningModule
from torch.utils.data import Dataset
class RandomDictDataset(Dataset):
def __init__(self, size, length):
self.len = length
self.data = torch.randn(length, size)
def __getitem__(self, index):
a = self.data[index]
b = a + 2
return {"a": a, "b": b}
def __len__(self):
return self.len
class RandomDictStringDataset(Dataset):
def __init__(self, size, length):
self.len = length
self.data = torch.randn(length, size)
def __getitem__(self, index):
return {"id": str(index), "x": self.data[index]}
def __len__(self):
return self.len
class RandomDataset(Dataset):
def __init__(self, size, length):
self.len = length
self.data = torch.randn(length, size)
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return self.len
class BoringModel(LightningModule):
def __init__(self):
"""
Testing PL Module
Use as follows:
- subclass
- modify the behavior for what you want
class TestModel(BaseTestModel):
def training_step(...):
# do your own thing
or:
model = BaseTestModel()
model.training_epoch_end = None
"""
super().__init__()
self.layer = torch.nn.Linear(32, 2)
def forward(self, x):
return self.layer(x)
def loss(self, batch, prediction):
# An arbitrary loss to have a loss that updates the model weights during `Trainer.fit` calls
return torch.nn.functional.cross_entropy(
prediction,
torch.ones(len(prediction), dtype=torch.long, device=prediction.device),
)
def training_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
self.log("loss", loss)
return loss
def validation_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
self.log("loss", loss)
return loss
def test_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
return loss
def configure_optimizers(self):
optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)
return [optimizer], [lr_scheduler]
def train_dataloader(self):
return torch.utils.data.DataLoader(RandomDataset(32, 64), batch_size=16)
def val_dataloader(self):
return torch.utils.data.DataLoader(RandomDataset(32, 64), batch_size=16)
def test_dataloader(self):
return torch.utils.data.DataLoader(RandomDataset(32, 64), batch_size=16)
def main():
model = BoringModel()
trainer = ptl.Trainer(
distributed_backend="dp",
gpus=4,
)
trainer.fit(model)
if __name__ == "__main__":
main()
Produces the following
GPU available: True, used: True
TPU available: False, using: 0 TPU cores
LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1,2,3]
| Name | Type | Params
---------------------------------
0 | layer | Linear | 66
/home/user/.conda/envs/env/lib/python3.8/site-packages/pytorch_lightning/utilities/distributed.py:45: UserWarning: The dataloader, val dataloader 0, does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` (try 104 which is the number of cpus on this machine) in the `DataLoader` init to improve performance.
warnings.warn(*args, **kwargs)
Validation sanity check: 0it [00:00, ?it/s]/home/user/.conda/envs/env/lib/python3.8/site-packages/torch/nn/parallel/_functions.py:61: UserWarning: Was asked to gather along dimension 0, but all input tensors were scalars; will instead unsqueeze and return a vector.
warnings.warn('Was asked to gather along dimension 0, but all '
/home/user/.conda/envs/env/lib/python3.8/site-packages/pytorch_lightning/utilities/distributed.py:45: UserWarning: The dataloader, train dataloader, does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` (try 104 which is the number of cpus on this machine) in the `DataLoader` init to improve performance.
warnings.warn(*args, **kwargs)
Epoch 1: 50%|ββββββββββββββββTraceback (most recent call last):ββββββββββββββββββββββββββββββββββββ | 4/8 [00:00<00:00, 184.41it/s, loss=0.497, v_num=53]
File "dp_bug.py", line 118, in <module>
main()
File "dp_bug.py", line 114, in main
trainer.fit(model)
File "/home/user/.conda/envs/env/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py", line 440, in fit
results = self.accelerator_backend.train()
File "/home/user/.conda/envs/env/lib/python3.8/site-packages/pytorch_lightning/accelerators/dp_accelerator.py", line 97, in train
results = self.train_or_test()
File "/home/user/.conda/envs/env/lib/python3.8/site-packages/pytorch_lightning/accelerators/accelerator.py", line 53, in train_or_test
results = self.trainer.train()
File "/home/user/.conda/envs/env/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py", line 483, in train
self.train_loop.run_training_epoch()
File "/home/user/.conda/envs/env/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py", line 557, in run_training_epoch
self.trainer.run_evaluation(test_mode=False)
File "/home/user/.conda/envs/env/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py", line 599, in run_evaluation
eval_loop_results = self.evaluation_loop.log_epoch_metrics(deprecated_eval_results, epoch_logs, test_mode)
File "/home/user/.conda/envs/env/lib/python3.8/site-packages/pytorch_lightning/trainer/evaluation_loop.py", line 210, in log_epoch_metrics
eval_loop_results = self.trainer.logger_connector.on_evaluation_epoch_end(
File "/home/user/.conda/envs/env/lib/python3.8/site-packages/pytorch_lightning/trainer/connectors/logger_connector.py", line 113, in on_evaluation_epoch_end
self._log_on_evaluation_epoch_end_metrics(epoch_logs)
File "/home/user/.conda/envs/env/lib/python3.8/site-packages/pytorch_lightning/trainer/connectors/logger_connector.py", line 178, in _log_on_evaluation_epoch_end_metrics
reduced_epoch_metrics = dl_metrics[0].__class__.reduce_on_epoch_end(dl_metrics)
File "/home/user/.conda/envs/env/lib/python3.8/site-packages/pytorch_lightning/core/step_result.py", line 433, in reduce_on_epoch_end
recursive_stack(result)
File "/home/user/.conda/envs/env/lib/python3.8/site-packages/pytorch_lightning/core/step_result.py", line 552, in recursive_stack
result[k] = collate_tensors(v)
File "/home/user/.conda/envs/env/lib/python3.8/site-packages/pytorch_lightning/core/step_result.py", line 574, in collate_tensors
return torch.stack(items)
RuntimeError: All input tensors must be on the same device. Received cuda:3 and cuda:1
Exception ignored in: <function tqdm.__del__ at 0x7fcf54050a60>
Traceback (most recent call last):
File "/home/user/.conda/envs/env/lib/python3.8/site-packages/tqdm/std.py", line 1087, in __del__
File "/home/user/.conda/envs/env/lib/python3.8/site-packages/tqdm/std.py", line 1294, in close
File "/home/user/.conda/envs/env/lib/python3.8/site-packages/tqdm/std.py", line 1472, in display
File "/home/user/.conda/envs/env/lib/python3.8/site-packages/tqdm/std.py", line 1090, in __repr__
File "/home/user/.conda/envs/env/lib/python3.8/site-packages/tqdm/std.py", line 1434, in format_dict
TypeError: cannot unpack non-iterable NoneType object
Specifically note the line saying
RuntimeError: All input tensors must be on the same device. Received cuda:3 and cuda:1
Expected behavior
Environment
PyTorch Version (e.g., 1.0): 1.6.0
OS (e.g., Linux): Ubuntu 18.04
How you installed PyTorch (conda, pip, source): conda
Build command you used (if compiling from source): N/A
Python version: 3.8.5
CUDA/cuDNN version: 11.0
GPU models and configuration: 8 GPU (RTX 2080Ti)
Any other relevant information:
Additional context
This works on v0.9.0:
import torch
import pytorch_lightning as ptl
from pytorch_lightning import LightningModule
from torch.utils.data import Dataset
class RandomDictDataset(Dataset):
def __init__(self, size, length):
self.len = length
self.data = torch.randn(length, size)
def __getitem__(self, index):
a = self.data[index]
b = a + 2
return {"a": a, "b": b}
def __len__(self):
return self.len
class RandomDictStringDataset(Dataset):
def __init__(self, size, length):
self.len = length
self.data = torch.randn(length, size)
def __getitem__(self, index):
return {"id": str(index), "x": self.data[index]}
def __len__(self):
return self.len
class RandomDataset(Dataset):
def __init__(self, size, length):
self.len = length
self.data = torch.randn(length, size)
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return self.len
class BoringModel(LightningModule):
def __init__(self):
"""
Testing PL Module
Use as follows:
- subclass
- modify the behavior for what you want
class TestModel(BaseTestModel):
def training_step(...):
# do your own thing
or:
model = BaseTestModel()
model.training_epoch_end = None
"""
super().__init__()
self.layer = torch.nn.Linear(32, 2)
def forward(self, x):
return self.layer(x)
def loss(self, batch, prediction):
# An arbitrary loss to have a loss that updates the model weights during `Trainer.fit` calls
return torch.nn.functional.cross_entropy(
prediction,
torch.ones(len(prediction), dtype=torch.long, device=prediction.device),
)
def training_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
return {"loss": loss}
def validation_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
return {"val_loss": loss}
def test_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
return {"test_loss": loss}
def configure_optimizers(self):
optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)
return [optimizer], [lr_scheduler]
def train_dataloader(self):
return torch.utils.data.DataLoader(RandomDataset(32, 64), batch_size=16)
def val_dataloader(self):
return torch.utils.data.DataLoader(RandomDataset(32, 64), batch_size=16)
def test_dataloader(self):
return torch.utils.data.DataLoader(RandomDataset(32, 64), batch_size=16)
def main():
model = BoringModel()
trainer = ptl.Trainer(
distributed_backend="dp",
gpus=4,
# log_every_n_steps=5,
# flush_logs_every_n_steps=20,
# benchmark=True,
# gradient_clip_val=20,
)
trainer.fit(model)
if __name__ == "__main__":
main()
but causes this error under v1.0.0rc4
GPU available: True, used: True
TPU available: False, using: 0 TPU cores
LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0,1,2,3]
| Name | Type | Params
---------------------------------
0 | layer | Linear | 66
/home/user/.conda/envs/env/lib/python3.8/site-packages/pytorch_lightning/utilities/distributed.py:45: UserWarning: The dataloader, val dataloader 0, does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` (try 104 which is the number of cpus on this machine) in the `DataLoader` init to improve performance.
warnings.warn(*args, **kwargs)
/home/user/.conda/envs/env/lib/python3.8/site-packages/pytorch_lightning/utilities/distributed.py:45: UserWarning: The dataloader, train dataloader, does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` (try 104 which is the number of cpus on this machine) in the `DataLoader` init to improve performance.
warnings.warn(*args, **kwargs)
Epoch 0: 0%| | 0/8 [00:00<?, ?it/s]Traceback (most recent call last):
File "dp_bug.py", line 116, in <module>
main()
File "dp_bug.py", line 112, in main
trainer.fit(model)
File "/home/user/.conda/envs/env/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py", line 440, in fit
results = self.accelerator_backend.train()
File "/home/user/.conda/envs/env/lib/python3.8/site-packages/pytorch_lightning/accelerators/dp_accelerator.py", line 97, in train
results = self.train_or_test()
File "/home/user/.conda/envs/env/lib/python3.8/site-packages/pytorch_lightning/accelerators/accelerator.py", line 53, in train_or_test
results = self.trainer.train()
File "/home/user/.conda/envs/env/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py", line 483, in train
self.train_loop.run_training_epoch()
File "/home/user/.conda/envs/env/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py", line 529, in run_training_epoch
batch_output = self.run_training_batch(batch, batch_idx, dataloader_idx)
File "/home/user/.conda/envs/env/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py", line 661, in run_training_batch
opt_closure_result = self.training_step_and_backward(
File "/home/user/.conda/envs/env/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py", line 753, in training_step_and_backward
self.backward(result, optimizer, opt_idx)
File "/home/user/.conda/envs/env/lib/python3.8/site-packages/pytorch_lightning/trainer/training_loop.py", line 767, in backward
result.closure_loss = self.trainer.accelerator_backend.backward(
File "/home/user/.conda/envs/env/lib/python3.8/site-packages/pytorch_lightning/accelerators/accelerator.py", line 83, in backward
model.backward(closure_loss, optimizer, opt_idx)
File "/home/user/.conda/envs/env/lib/python3.8/site-packages/pytorch_lightning/core/lightning.py", line 1077, in backward
loss.backward()
File "/home/user/.conda/envs/env/lib/python3.8/site-packages/torch/tensor.py", line 185, in backward
torch.autograd.backward(self, gradient, retain_graph, create_graph)
File "/home/user/.conda/envs/env/lib/python3.8/site-packages/torch/autograd/__init__.py", line 121, in backward
grad_tensors = _make_grads(tensors, grad_tensors)
File "/home/user/.conda/envs/env/lib/python3.8/site-packages/torch/autograd/__init__.py", line 47, in _make_grads
raise RuntimeError("grad can be implicitly created only for scalar outputs")
RuntimeError: grad can be implicitly created only for scalar outputs
Exception ignored in: <function tqdm.__del__ at 0x7fed7b0c1a60>
Traceback (most recent call last):
File "/home/user/.conda/envs/env/lib/python3.8/site-packages/tqdm/std.py", line 1087, in __del__
File "/home/user/.conda/envs/env/lib/python3.8/site-packages/tqdm/std.py", line 1294, in close
File "/home/user/.conda/envs/env/lib/python3.8/site-packages/tqdm/std.py", line 1472, in display
File "/home/user/.conda/envs/env/lib/python3.8/site-packages/tqdm/std.py", line 1090, in __repr__
File "/home/user/.conda/envs/env/lib/python3.8/site-packages/tqdm/std.py", line 1434, in format_dict
TypeError: cannot unpack non-iterable NoneType object |
Logging on step does not work anymore | [
"bug",
"help wanted"
] | π Bug
Logging on step does not seem to work properly.
To Reproduce
Run the following MNIST example.
Code sample
import os
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader, random_split
from torchvision import transforms
from torchvision.datasets import MNIST
class MNISTDataModule(pl.LightningDataModule):
def __init__(self, batch_size=32):
super().__init__()
self.batch_size = batch_size
# When doing distributed training, Datamodules have two optional arguments for
# granular control over download/prepare/splitting data:
# OPTIONAL, called only on 1 GPU/machine
def prepare_data(self):
MNIST(os.getcwd(), train=True, download=True)
MNIST(os.getcwd(), train=False, download=True)
# OPTIONAL, called for every GPU/machine (assigning state is OK)
def setup(self, stage):
# transforms
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
)
# split dataset
if stage == "fit":
mnist_train = MNIST(os.getcwd(), train=True, transform=transform)
self.mnist_train, self.mnist_val = random_split(mnist_train, [55000, 5000])
if stage == "test":
self.mnist_test = MNIST(os.getcwd(), train=False, transform=transform)
# return the dataloader for each split
def train_dataloader(self):
mnist_train = DataLoader(self.mnist_train, batch_size=self.batch_size)
return mnist_train
def val_dataloader(self):
mnist_val = DataLoader(self.mnist_val, batch_size=self.batch_size)
return mnist_val
def test_dataloader(self):
mnist_test = DataLoader(self.mnist_test, batch_size=self.batch_size)
return mnist_test
class LitAutoEncoder(pl.LightningModule):
def __init__(self):
super().__init__()
self.encoder = nn.Sequential(
nn.Linear(28 * 28, 64), nn.ReLU(), nn.Linear(64, 3)
)
self.decoder = nn.Sequential(
nn.Linear(3, 64), nn.ReLU(), nn.Linear(64, 28 * 28)
)
def forward(self, x):
# in lightning, forward defines the prediction/inference actions
embedding = self.encoder(x)
return embedding
def training_step(self, batch, batch_idx):
# training_step defined the train loop. It is independent of forward
x, y = batch
x = x.view(x.size(0), -1)
z = self.encoder(x)
x_hat = self.decoder(z)
loss = F.mse_loss(x_hat, x)
# Logging to TensorBoard by default
self.log("train_loss", loss, on_step=True)
return loss
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)
return optimizer
from pytorch_lightning.loggers import CSVLogger
logger = CSVLogger("csv_logs")
# init model
model = LitAutoEncoder()
# init data
dm = MNISTDataModule(batch_size=512)
# train
trainer = pl.Trainer(max_epochs=1, logger=logger)
trainer.fit(model, dm)
# test
trainer.test(datamodule=dm)
import pandas as pd
pd.read_csv("csv_logs/default/version_0/metrics.csv")
Output:
train_loss
epoch
step
0.790777
0
49
0.614327
0
99
0.582780
0
149
0.594851
0
199
0.545873
0
249
Expected behavior
The loss should be recorded at each step, but is recorded every 50 steps instead:
Environment
PyTorch Version (e.g., 1.0): 1.6.9
OS (e.g., Linux): Linux
How you installed PyTorch (conda, pip, source): conda
Python version: 3.7.9
CUDA/cuDNN version: not used for the above example
GPU models and configuration: not used for the above example |
Why does the example not use the prepare data hook? | [
"won't fix",
"example",
"docs"
] | I would expect the out of the box example to use the proper prepare_data hook to enable multi node training. Is there a reason that the mnist data is downloaded in main rather than in the style of the sample from the blog post?
If relevant I'd be happy to contribute a patch.
pytorch-lightning/pl_examples/basic_examples/mnist.py
Line 86
in
86c7062
dataset = MNIST('', train=True, download=True, transform=transforms.ToTensor()) |
using LBFGS optimizer in pytorch lightening the model is not converging as compared to native pytoch + LBFGS | [
"bug",
"help wanted",
"priority: 1"
] | Common bugs:
Comparing the results of LBFGS + Pytorch lightening to native pytorch + LBFGS, Pytorch lightening is not able to update wights and model is not converging. there are some issues to point out:
Adam + Pytorch lightening on MNIST works fine, however LBFGS + Pytorch lightening is not working as expected.
LBFGS + Native pytorch works very well, however when we try LBFGS + Pytorch lightening it does not work as expected.
π Bug
LBFGS + Pytorch Lightening has problem converging and weights are updating as compared to Adam + Pytorch lightening.
Code sample
import os
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torchvision.datasets import MNIST
from torchvision import transforms,datasets
from torch.utils.data import DataLoader,random_split
import pytorch_lightning as pl
from IPython.display import clear_output
class LightningMNISTClassifier(pl.LightningModule):
def __init__(self):
super(LightningMNISTClassifier,self).__init__()
self.layer_1 = nn.Linear(28 * 28, 128)
self.layer_2 = nn.Linear(128, 256)
self.layer_3 = nn.Linear(256, 10)
def forward(self, x):
batch_size, channels, width, height = x.size()
x=x.view(batch_size,-1)
# layer 1
x = self.layer_1(x)
x = torch.relu(x)
# layer 2
x = self.layer_2(x)
x = torch.relu(x)
# layer 3
x = self.layer_3(x)
# probability distribution over labels
x = torch.log_softmax(x, dim=1)
return x
def prepare_data(self):
transform=transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])
# prepare transforms standard to MNIST
mnist_train = MNIST(os.getcwd(), train=True, download=True, transform=transform)
mnist_test = MNIST(os.getcwd(), train=False, download=True, transform=transform)
self.mnist_train, self.mnist_val = random_split(mnist_train, [55000, 5000])
def train_dataloader(self):
return DataLoader(self.mnist_train,batch_size=1024)
# def val_dataloader(self):
# return DataLoader(self.mnist_val,batch_size=1024)
# def test_dataloader(self):
# return DataLoader(self.mnist_test,batch_size=1024)
def configure_optimizers(self):
# optimizer=optim.Adam(self.parameters(),lr=1e-3)
optimizer = optim.LBFGS(self.parameters(), lr=1e-2)
return optimizer
# def backward(self, trainer, loss, optimizer):
# loss.backward(retain_graph=True)
def optimizer_step(self, current_epoch, batch_nb, optimizer, optimizer_idx,
second_order_closure, on_tpu=False, using_native_amp=False,
using_lbfgs=False):
# update params
optimizer.step(second_order_closure)
def cross_entropy_loss(self,logits,labels):
return F.nll_loss(logits,labels)
def training_step(self,train_batch,batch_idx):
x,y=train_batch
logits=self.forward(x)
loss=self.cross_entropy_loss(logits,y)
return {'loss':loss}
def training_epoch_end(self,outputs):
avg_loss=torch.stack([x['loss'] for x in outputs]).mean()
print('epoch={}, avg_Train_loss={:.2f}'.format(self.current_epoch,avg_loss.item()))
# return {'avg_train_loss':avg_loss}
# def validation_step(self,val_batch,batch_idx):
# x,y=val_batch
# logits=self.forward(x)
# loss=self.cross_entropy_loss(logits,y)
# return {'val_loss':loss}
# def validation_epoch_end(self,outputs):
# avg_loss=torch.stack([x['val_loss'] for x in outputs]).mean()
# print('epoch={}, avg_Test_loss={:.2f}'.format(self.current_epoch,avg_loss.item()))
# return {'avg_val_loss':avg_loss}
model=LightningMNISTClassifier()
#from pytorch_lightning.callbacks import EarlyStopping
trainer=pl.Trainer(max_epochs=400,gpus=1,
# check_val_every_n_epoch=2,
# accumulate_grad_batches=5,
# early_stop_callback=early_stop,
# limit_train_batches=50,
# val_check_interval=0.25,
progress_bar_refresh_rate=0,
# num_sanity_val_steps=0,
weights_summary=None)
clear_output(wait=True)
trainer.fit(model)Preformatted text.
Expected behavior
Environment
Please copy and paste the output from our
environment collection script
(or fill out the checklist below manually).
You can get the script and run it with:
wget https://raw.githubusercontent.com/PyTorchLightning/pytorch-lightning/master/tests/collect_env_details.py
# For security purposes, please check the contents of collect_env_details.py before running it.
python collect_env_details.py
Environment:
-Colab and pycharm
-PyTorch version: 1.6.0+CPU and GPU
-pytorch-lightning==1.0.0rc3 |
1.0.0rc4 Save to TorchScript self.eval() device error | [
"bug",
"help wanted"
] | π Bug
Likely self.eval() sends the model to the CPU, resulting in saving to a TorchScript file fails.
To Reproduce
Run the code sample below.
Code sample
Script can also be downloaded from: https://gist.github.com/NumesSanguis/388b4cfab2a8945afa85e8b79cd0c794
Most relevant code (extended to_torchscript to support trace):
# ...
# use TorchScript trace
def to_torchscript(...):
# ...
print(f"\n\nExample inputs device: {example_inputs.device}\n")
scripted_module = torch.jit.trace(func=self.eval(), example_inputs=example_inputs, **kwargs)
# ...
return scripted_module
#...
if __name__ == '__main__':
# ...
# UNTIL HERE WORKS FINE
# save model as TorchScript using eval()
model.to_torchscript(file_path="example.pt", torchscript_approach='trace')
CLICK ME for full code
from __future__ import annotations # don't crash on non-imported libraries when type checking
from typing import Dict, Optional, Union
import torch
from torch import nn as nn
import pytorch_lightning as pl
from pytorch_lightning import Trainer
from torch.utils.data import Dataset, DataLoader
from torch.nn import functional as F
class MyModel(pl.LightningModule):
def __init__(self): # **kwargs # sample_rate
super().__init__()
# self.save_hyperparameters()
# 1 sec of audio
self.input_layer = nn.Linear(8000, 400, bias=True)
self.hidden_layer = nn.Linear(400, 128, bias=True)
self.output_layer = nn.Linear(128, 3, bias=True)
self.criterion = nn.CrossEntropyLoss()
def forward(self, input):
x = F.relu_(self.input_layer(input))
x = F.relu_(self.hidden_layer(x))
output = self.output_layer(x) # torch.sigmoid()
return output
def calculate_loss(self, prediction, target):
loss = self.criterion(prediction, target)
return loss
def training_step(self, batch, batch_idx):
input, target = batch
# !!! use first batch to create an example input !!!
if self.example_input_array is None:
# we only need 1 sample, not a whole batch
self.example_input_array = input
prediction = self(input)
loss = self.calculate_loss(prediction, target)
return loss
def validation_step(self, batch, batch_idx):
input, target = batch
prediction = self(input)
loss = self.calculate_loss(prediction, target)
return loss
def test_step(self, batch, batch_idx):
input, target = batch
prediction = self(input)
loss = self.calculate_loss(prediction, target)
return loss
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=1e-3)
# TODO create pull request for this
def to_torchscript(
self, file_path: Optional[str] = None, torchscript_approach: Optional[str] = 'script',
example_inputs: torch.Tensor = None, **kwargs
) -> Union[ScriptModule, Dict[str, ScriptModule]]:
# training or eval/test?
mode = self.training
with torch.no_grad():
if torchscript_approach == 'script':
scripted_module = torch.jit.script(self.eval(), **kwargs)
elif torchscript_approach == 'trace':
if example_inputs is None:
example_inputs = self.example_input_array
print(f"\n\nExample inputs device: {example_inputs.device}\n")
scripted_module = torch.jit.trace(func=self.eval(), example_inputs=example_inputs, **kwargs)
else:
raise ValueError(f"torchscript_approach only supports 'script' or 'trace', but value given was:"
f"{torchscript_approach}")
# set back whether we were training or not
self.train(mode)
if file_path is not None:
torch.jit.save(scripted_module, file_path)
return scripted_module
# DATA
class SimpleDataset(Dataset):
def __init__(self, sample_rate=8000):
self.sample_rate = sample_rate
def __len__(self):
return 16
def __getitem__(self, idx):
# 0, 1 or 2
target = torch.randint(0, 3, size=(1, )).squeeze()
# size 8000/16000 of 0.0, 0.5, or 1.0
input = torch.full((self.sample_rate,), (target.float()/2).item())
# torch.empty(self.sample_rate,).fill_(target.float()/2)
return input, target
class SimpleDatamodule(pl.LightningDataModule):
def setup(self, stage: str = None):
pass
def train_dataloader(self):
return DataLoader(SimpleDataset(), batch_size=4)
def val_dataloader(self):
return DataLoader(SimpleDataset(), batch_size=4)
def test_dataloader(self):
return DataLoader(SimpleDataset(), batch_size=4)
if __name__ == '__main__':
sr = 8000
checkpoint_location = "example.ckpt"
# network
model = MyModel()
# data
dm = SimpleDatamodule()
# train
trainer = Trainer(max_epochs=2, deterministic=True, gpus=1) # gpus=1,
trainer.fit(model, dm)
# save
trainer.save_checkpoint(checkpoint_location)
# UNTIL HERE WORKS FINE
# save model as TorchScript using eval()
model.to_torchscript(file_path="example.pt", torchscript_approach='trace')
Expected behavior
Model and Tensor input both being evaluated at device GPU.
Actual behavior
RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!
CLICK ME for full Traceback
yes, even hidden code blocks!
$ python save_as_torchscript_device_error.py
GPU available: True, used: True
TPU available: False, using: 0 TPU cores
LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]
| Name | Type | Params
--------------------------------------------------
0 | input_layer | Linear | 3 M
1 | hidden_layer | Linear | 51 K
2 | output_layer | Linear | 387
3 | criterion | CrossEntropyLoss | 0
/home/*user*/anaconda3/envs/pytorchlit10/lib/python3.8/site-packages/pytorch_lightning/utilities/distributed.py:45: UserWarning: The dataloader, val dataloader 0, does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` (try 16 which is the number of cpus on this machine) in the `DataLoader` init to improve performance.
warnings.warn(*args, **kwargs)
/home/*user*/anaconda3/envs/pytorchlit10/lib/python3.8/site-packages/pytorch_lightning/utilities/distributed.py:45: UserWarning: The dataloader, train dataloader, does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` (try 16 which is the number of cpus on this machine) in the `DataLoader` init to improve performance.
warnings.warn(*args, **kwargs)
Epoch 1: 100%|βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ| 8/8 [00:00<00:00, 148.42it/s, loss=0.954, v_num=55]
Example inputs device: cuda:0
Traceback (most recent call last):
File "/home/*user*/anaconda3/envs/pytorchlit10/lib/python3.8/site-packages/torch/jit/__init__.py", line 692, in run_mod_and_filter_tensor_outputs
outs = wrap_retval(mod(*_clone_inputs(inputs)))
RuntimeError: The following operation failed in the TorchScript interpreter.
Traceback of TorchScript (most recent call last):
File "<string>", line 3, in forward
def addmm(self: Tensor, mat1: Tensor, mat2: Tensor, beta: number = 1.0, alpha: number = 1.0):
return self + mat1.mm(mat2)
~~~~~~~~~~~~~~~~~~~ <--- HERE
def batch_norm(input : Tensor, running_mean : Optional[Tensor], running_var : Optional[Tensor], training : bool, momentum : float, eps : float) -> Tensor:
RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "eval_gpu_test.py", line 130, in <module>
model.to_torchscript(file_path="example.pt", torchscript_approach='trace')
File "eval_gpu_test.py", line 71, in to_torchscript
scripted_module = torch.jit.trace(func=self.eval(), example_inputs=example_inputs, **kwargs)
File "/home/*user*/anaconda3/envs/pytorchlit10/lib/python3.8/site-packages/torch/jit/__init__.py", line 953, in trace
return trace_module(func, {'forward': example_inputs}, None,
File "/home/*user*/anaconda3/envs/pytorchlit10/lib/python3.8/site-packages/torch/jit/__init__.py", line 1118, in trace_module
_check_trace([inputs], func, check_trace_method,
File "/home/*user*/anaconda3/envs/pytorchlit10/lib/python3.8/site-packages/torch/autograd/grad_mode.py", line 15, in decorate_context
return func(*args, **kwargs)
File "/home/*user*/anaconda3/envs/pytorchlit10/lib/python3.8/site-packages/torch/jit/__init__.py", line 734, in _check_trace
traced_outs = run_mod_and_filter_tensor_outputs(traced_func, inputs, 'trace')
File "/home/*user*/anaconda3/envs/pytorchlit10/lib/python3.8/site-packages/torch/jit/__init__.py", line 696, in run_mod_and_filter_tensor_outputs
raise TracingCheckError(*graph_diagnostic_info(),
torch.jit.TracingCheckError: Tracing failed sanity checks!
Encountered an exception while running the trace with test inputs.
Exception:
The following operation failed in the TorchScript interpreter.
Traceback of TorchScript (most recent call last):
File "<string>", line 3, in forward
def addmm(self: Tensor, mat1: Tensor, mat2: Tensor, beta: number = 1.0, alpha: number = 1.0):
return self + mat1.mm(mat2)
~~~~~~~~~~~~~~~~~~~ <--- HERE
def batch_norm(input : Tensor, running_mean : Optional[Tensor], running_var : Optional[Tensor], training : bool, momentum : float, eps : float) -> Tensor:
RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!
Environment
CUDA:
GPU:
GeForce GTX 1080 Ti
available: True
version: 10.2
Packages:
numpy: 1.19.1
pyTorch_debug: False
pyTorch_version: 1.6.0
pytorch-lightning: 1.0.0rc4
tqdm: 4.50.1
System:
OS: Linux (Ubuntu 18.04)
architecture:
64bit
ELF
processor: x86_64
python: 3.8.5
version: #119-Ubuntu SMP Tue Sep 8 12:30:01 UTC 2020
Additional context
Might it be related to one of these eval() changes in v0.10?:
https://pytorch-lightning.readthedocs.io/en/stable/CHANGELOG.html?highlight=eval#changed |
Avoid unnecessary DDP synchronization when gradient_accumulation_steps > 1 | [
"feature",
"help wanted"
] | π Feature
Avoid unnecessary DDP synchronization when gradient_accumulation_steps > 1
Motivation
When training large models the synchronization is costly and the actual speedup from 2 gpus is much lower than 200%
Pitch
We can use DDP no_sync feature to avoid synchronization in steps that doesn't call optimizer_step |
Recursive Summary for Models | [
"feature",
"help wanted"
] | π Feature
At the moment whenever you start training you get a print out of the model's modules and number of parameters. It would be great if you were able to recursively traverse modules and print out the modules and parameters in these submodules. This could be enabled through a possible flag verbose, which would further print out the recursive definitions.
Currently:
| Name | Type | Params
-----------------------------------------------
0 | features | Motion_Features | 34 K
1 | classifier | Prediction_Head | 2 M
Future:
| Name | Type | Params
-------------------------------------
0 | features | Motion_Features | 34 K
- 0 | conv1 | Conv2d | 8 K
- 1 | conv2 | Conv2d | 13 K
- 2 | mp1 | MaxPool2d | 0
- 3 | conv3 | Conv2d | 4 K
- 4 | conv4 | Conv2d | 3 K
- 5 | mp2 | MaxPool2d | 0
- 6 | conv5 | Conv2d | 3 K
- 7 | conv6 | Conv2d | 580
1 | classifier | Prediction_Head | 2 M
- 8 | lin1 | Linear | 2 M
- 9 | head1 | Linear | 20 K
- 10 | head2 | Linear | 20 K
- 11 | out1 | Linear | 1 K
- 12 | out2 | Linear | 101
Motivation
This would enable user to easily see all of the layers they are using and the total number of parameters being used for all network components.
Pitch
I would like someone to rewrite summarize to have a verbose flag to further print out each of the submodules and number of parameters. |
Memory leak when using Metric with list state | [
"help wanted"
] | π Bug
I tried implementing a custom Metric to use as a loss when training. It seems to compute the desired values fine, however like the title states the metric quickly consumes all the memory on my single GPU. Models that previously required less than half of my GPU memory now run into OOMs after less than one epoch.
I tried replicating the issue in this Colab notebook, however the dataset and training procedure are too lightweight to replicated the resource consumption issue.
Can anyone confirm whether it's an issue with how I handle the list state myself or a bug with Metric itself? The only metric I found that uses a list state that I could draw inspiration from is the explained_variance. However, I thought I should be able to do things differently and wouldn't need to store the targets and preds in the state, only the computed results.
To Reproduce
Colab notebook that tries to reproduce the issue (with no success) on a toy example.
If it can be of further help, my real use-case is the following, where I implement the dice loss as a metric with which to train my model:
class DifferentiableDiceCoefficient(Metric):
"""Computes a differentiable version of the dice coefficient."""
def __init__(
self,
include_background: bool = False,
nan_score: float = 0.0,
no_fg_score: float = 0.0,
reduction: str = "elementwise_mean",
compute_on_step: bool = True,
dist_sync_on_step: bool = False,
process_group: Optional[Any] = None,
):
super().__init__(
compute_on_step=compute_on_step, dist_sync_on_step=dist_sync_on_step, process_group=process_group
)
self.include_background = include_background
self.nan_score = nan_score
self.no_fg_score = no_fg_score
assert reduction in ("elementwise_mean", "none")
self.reduction = reduction
self.add_state("dice_by_steps", [])
def update(self, input: torch.Tensor, target: torch.Tensor) -> None:
self.dice_by_steps += [
differentiable_dice_score(
input=input,
target=target,
bg=self.include_background,
nan_score=self.nan_score,
no_fg_score=self.no_fg_score,
reduction=self.reduction,
)
]
def compute(self) -> torch.Tensor:
return torch.mean(torch.stack(self.dice_by_steps), 0)
Expected behavior
I would expect to be able to use metrics/losses implemented using the Metric API exactly like I would if they inherited from nn.Module. |
Slurm resubmit at the end of epoch. | [
"feature",
"help wanted",
"won't fix"
] | From my understanding, the current resubmit will stop the model at the middle of epoch, which may have problem with dataloader resuming.
Is it possible that lightning automatically estimates that if a new epoch can be finished within the time limit, and decide if to halt or continue at the end of each epoch. |
How to log by epoch for both training and validation on 1.0.0rc4 / 1.0.0rc5 / 1.0.0 | [
"question"
] | What is your question?
I have been trying out pytorch-lightning 1.0.0rc5 and wanted to log only on epoch end for both training and validation while having in the x-axis the epoch number. I noticed that training_epoch_end now does not allow to return anything. Though I noticed that for training I can achieve what I want by doing:
def training_epoch_end(self, outputs):
loss = compute_epoch_loss_from_outputs(outputs)
self.log('step', self.trainer.current_epoch)
self.log('loss', {'train': loss})
It sets the step to be the epoch number and used for the x-axis just as I wanted. I have not found in the documentation if this is how it is intended to be logged. I am also a bit confused about the result objects. Nevertheless, this code seems quite simple and logical, so I thought this could be one of the possible intended ways of logging per epoch.
I tried to do the same for validation as follows:
def validation_epoch_end(self, outputs):
loss = compute_epoch_loss_from_outputs(outputs)
self.log('step', self.trainer.current_epoch)
self.log('loss', {'valid': loss})
However, in the case of validation the x-axis is the number of batches in validation and an additional step graph appears in tensorboard.
Based on this I have some questions. Is this an intended way of logging per epoch? If yes, is the idea that the same behavior is obtained for both training and validation? If this is not the intended way of logging per epoch, where can I read about how this is planned for version 1.0.0?
What's your environment?
OS: Linux
Packaging: pip
Version: 1.0.0rc5 |
Graceful interrupt not graceful | [
"feature",
"help wanted",
"won't fix"
] | The current key interrupt will call on_train_end which will save the checkpoint. However when resuming from the saved checkpoint, it starts a new epoch, which is not graceful: presumably it should restart from the middle of the epoch(ideally); otherwise it should not save the interrupted checkpoint with the same name as normal checkpoints (for example, save as a new name). |
Unable to save model using torch.save in pytorch lightning version 0.10 | [
"bug",
"help wanted"
] | π Bug
torch.save(trainer.model, "model.pth") throwing error in pytorch lightning version 0.10
Please reproduce using the following code
import os
import torch
from torch import nn
import torch.nn.functional as F
from torchvision.datasets import MNIST
from torch.utils.data import DataLoader, random_split
from torchvision import transforms
import pytorch_lightning as pl
class LitAutoEncoder(pl.LightningModule):
def __init__(self):
super().__init__()
self.encoder = nn.Sequential(nn.Linear(28 * 28, 128), nn.ReLU(), nn.Linear(128, 3))
self.decoder = nn.Sequential(nn.Linear(3, 128), nn.ReLU(), nn.Linear(128, 28 * 28))
def forward(self, x):
# in lightning, forward defines the prediction/inference actions
embedding = self.encoder(x)
return embedding
def training_step(self, batch, batch_idx):
# training_step defined the train loop. It is independent of forward
x, y = batch
x = x.view(x.size(0), -1)
z = self.encoder(x)
x_hat = self.decoder(z)
loss = F.mse_loss(x_hat, x)
return loss
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)
return optimizer
dataset = MNIST(os.getcwd(), download=True, transform=transforms.ToTensor())
train, val = random_split(dataset, [55000, 5000])
autoencoder = LitAutoEncoder()
trainer = pl.Trainer(max_epochs=1)
trainer.fit(autoencoder, DataLoader(train), DataLoader(val))
torch.save(trainer.model, "model.pth")
`
To Reproduce
Install pytorch lightning 0.9 and run the script mentioned above. Model will get saved successfully.
Upgrade pytorch lightning version to 0.10 using pip install pytorch_lightning==0.10 and run the same script, the error would be reproduced
Expected behavior
Model should be saved as model.pth file.
Environment
CUDA:
GPU:
available: False
version: 10.2
Packages:
numpy: 1.19.2
pyTorch_debug: False
pyTorch_version: 1.6.0
pytorch-lightning: 0.10.0
tqdm: 4.48.2
System:
OS: Linux
architecture:
64bit
ELF
processor: x86_64
python: 3.8.2
version: 119~16.04.1-Ubuntu SMP Tue Sep 8 14:54:40 UTC 2020
Additional context
Stack Trace
Traceback (most recent call last):
File "encoder.py", line 43, in <module>
torch.save(trainer.model, "model.pth")
File "/home/ubuntu/anaconda3/envs/pytorch_p38/lib/python3.8/site-packages/torch/serialization.py", line 364, in save
_save(obj, opened_zipfile, pickle_module, pickle_protocol)
File "/home/ubuntu/anaconda3/envs/pytorch_p38/lib/python3.8/site-packages/torch/serialization.py", line 466, in _save
pickler.dump(obj)
TypeError: 'NoneType' object is not callable |
Weird warning popping out. Unable to understand | [
"question"
] | I'm getting this warning only after first epoch
/usr/local/lib/python3.6/dist-packages/torch/optim/lr_scheduler.py:200: UserWarning: Please also save or load the state of the optimizer when saving or loading the scheduler.
warnings.warn(SAVE_STATE_WARNING, UserWarning)
I'm unable to understand this. |
How do you correctly subclass an Accelerator? | [
"question"
] | β Questions and Help
What is your question?
I am trying to subclass DDPCPUSpawnAccelerator for test purposes.
Trainer now has an accelerator argument for you to pass an accelerator object. On the other hand, the accelerator has a trainer argument. I asume the later can be initialized to None to avoid this chicken and egg issue.
However, the following snippet seems to get stuck.
Code
from pytorch_lightning import Trainer
from pytorch_lightning.accelerators import DDPCPUSpawnAccelerator
from pytorch_lightning.cluster_environments import TorchElasticEnvironment
from tests.base.boring_model import BoringModel
class TestAccelerator(DDPCPUSpawnAccelerator):
def __init__(self, nprocs):
super().__init__(None, nprocs, cluster_environment=TorchElasticEnvironment())
def test_accelerator(tmpdir):
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=2,
limit_val_batches=2,
max_epochs=2,
log_every_n_steps=1,
weights_summary=None,
accelerator=TestAccelerator(2)
)
trainer.fit(model)
What's your environment?
OS: Linux
Packaging: pip
Version: Master |
Strange validation_step and global_step behavior after every epoch | [
"feature",
"help wanted",
"won't fix",
"discussion"
] | π Bug
The following was produced with the BoringModel provided. With val_check_interval=10, last validation_step of first epoch was at global_step=59 and first validation_step of second epoch was at global_step=73, and so on. It seems like it is always off by len(dataset) % val_check_interval every time after an epoch. I think it is because the counter resets per epoch. In this case, the length of dataset is 64 and val_check_interval is 10. After the last validation_step in the first epoch at global_step=59, the first epoch ended at global_step=63, the counter resets and so the first validation_step of second epoch is at global_step=73.
Also, a while back I submitted an issue also related to val_check_interval. I think it would be nice to be able to log training_step and validation_step at the same global_step. My suggestion is to simply trigger validation_step when global_step % val_check_interval == 0, so in training_step we can manually trigger logging (for example, logging images) with this simple global_step % val_check_interval == 0 condition. This would greatly help debug models and plot graphs that training and validation logs are aligned. Thanks in advance :)
validation_step at global_step = 0
override any method to prove your bug
validation_step at global_step = 9
validation_step at global_step = 19
validation_step at global_step = 29
validation_step at global_step = 39
validation_step at global_step = 49
validation_step at global_step = 59
override any method to prove your bug
validation_step at global_step = 73
validation_step at global_step = 83
validation_step at global_step = 93
validation_step at global_step = 103
validation_step at global_step = 113
validation_step at global_step = 123
override any method to prove your bug
validation_step at global_step = 137
validation_step at global_step = 147
validation_step at global_step = 157
validation_step at global_step = 167
validation_step at global_step = 177
validation_step at global_step = 187
override any method to prove your bug
validation_step at global_step = 201
validation_step at global_step = 211
validation_step at global_step = 221
validation_step at global_step = 231
validation_step at global_step = 241
validation_step at global_step = 251
override any method to prove your bug
validation_step at global_step = 265
validation_step at global_step = 275
validation_step at global_step = 285
validation_step at global_step = 295
validation_step at global_step = 305
validation_step at global_step = 315
Please reproduce using the BoringModel and post here
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------
# --------------------------------------------
# --------------------------------------------
# USE THIS MODEL TO REPRODUCE A BUG YOU REPORT
# --------------------------------------------
# --------------------------------------------
# --------------------------------------------
import os
import torch
from torch.utils.data import Dataset
from pytorch_lightning import Trainer, LightningModule
class RandomDataset(Dataset):
def __init__(self, size, length):
self.len = length
self.data = torch.randn(length, size)
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return self.len
class BoringModel(LightningModule):
def __init__(self):
"""
Testing PL Module
Use as follows:
- subclass
- modify the behavior for what you want
class TestModel(BaseTestModel):
def training_step(...):
# do your own thing
or:
model = BaseTestModel()
model.training_epoch_end = None
"""
super().__init__()
self.layer = torch.nn.Linear(32, 2)
def forward(self, x):
return self.layer(x)
def loss(self, batch, prediction):
# An arbitrary loss to have a loss that updates the model weights during `Trainer.fit` calls
return torch.nn.functional.mse_loss(prediction, torch.ones_like(prediction))
def step(self, x):
x = self.layer(x)
out = torch.nn.functional.mse_loss(x, torch.ones_like(x))
return out
def training_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
return {"loss": loss}
def training_step_end(self, training_step_outputs):
return training_step_outputs
def training_epoch_end(self, outputs) -> None:
torch.stack([x["loss"] for x in outputs]).mean()
def validation_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
return {"x": loss}
def validation_epoch_end(self, outputs) -> None:
torch.stack([x['x'] for x in outputs]).mean()
def test_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
return {"y": loss}
def test_epoch_end(self, outputs) -> None:
torch.stack([x["y"] for x in outputs]).mean()
def configure_optimizers(self):
optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)
return [optimizer], [lr_scheduler]
def run_test():
class TestModel(BoringModel):
def on_train_epoch_start(self) -> None:
print('override any method to prove your bug')
def validation_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
print("validation_step at global_step = ", self.global_step)
return {"x": loss}
# fake data
train_data = torch.utils.data.DataLoader(RandomDataset(32, 64))
val_data = torch.utils.data.DataLoader(RandomDataset(32, 64))
test_data = torch.utils.data.DataLoader(RandomDataset(32, 64))
# model
model = TestModel()
trainer = Trainer(
default_root_dir=os.getcwd(),
# limit_train_batches=1,
limit_val_batches=1,
val_check_interval=10,
max_epochs=5,
weights_summary=None,
)
trainer.fit(model, train_data, val_data)
trainer.test(test_dataloaders=test_data)
if __name__ == '__main__':
run_test()
To Reproduce
Expected behavior
validation_step at global_step = 0
override any method to prove your bug
validation_step at global_step = 9
validation_step at global_step = 19
validation_step at global_step = 29
validation_step at global_step = 39
validation_step at global_step = 49
validation_step at global_step = 59
override any method to prove your bug
validation_step at global_step = 69
validation_step at global_step = 79
validation_step at global_step = 89
validation_step at global_step = 99
validation_step at global_step = 109
validation_step at global_step = 119
...
Environment
Please copy and paste the output from our
environment collection script
(or fill out the checklist below manually).
You can get the script and run it with:
wget https://raw.githubusercontent.com/PyTorchLightning/pytorch-lightning/master/tests/collect_env_details.py
# For security purposes, please check the contents of collect_env_details.py before running it.
python collect_env_details.py
CUDA:
- GPU:
- Tesla T4
- available: True
- version: 10.2
Packages:
- numpy: 1.19.1
- pyTorch_debug: False
- pyTorch_version: 1.6.0
- pytorch-lightning: 1.0.1
- tqdm: 4.50.1
System:
- OS: Linux
- architecture:
- 64bit
- ELF
- processor: x86_64
- python: 3.8.5
- version: #25~18.04.1-Ubuntu SMP Fri Sep 11 21:07:02 UTC 2020
Additional context |
Expand to_torchscript to support also TorchScript's trace method | [
"feature",
"help wanted"
] | π Feature
Allow for the user to easily choose between TorchScript's script or trace method to create a module.
Motivation
While TorchScript's script method will work for simple models, it will not always work out of the box when models rely on Python variables to be set. This requires the user to manually annotate the model to not run into issues with script().
TorchScript's trace method on the other hand creates a traced module that is determined by running a Tensor through the network and tracks what happens during this process.
This always works, but loses design choices if present in the model.
Both script and trace have their use cases, and with a minimal extension of this function, both methods can be used.
Pitch
Add a method argument that can be set to either script or trace (default to script, which results in the current behaviour).
Add a example_inputs argument that defaults to None and can be set to any Tensor. If None is provided, this function will automatically try to use self.example_input_array. The example input is automatically send to the correct device.
Note: example_inputs's name cannot be changed as this is the name of the argument trace() expects. If named otherwise, there can be a conflict with kwargs.
This change should not break any older scripts, as it by defaults uses script.
Alternatives
Make no change and require the user to overwrite this function to use trace.
Additional context
Please assign me for this request. |
the self.log problem in validation_step() | [
"bug",
"help wanted"
] | as doc say we should use self.log in last version,
but the loged data are strange if we change EvalResult() to self.log(on_epoch=True)
Then we check the data in tensorboard, the self.log() will only log the result of last batch each epoch, instead of the mean of them.
That is quite unreliable about this issue, it must be turned back to EvalResult() for correct experiments. |
missing scaler.scale in manual_backward? | [
"bug",
"help wanted"
] | π Bug
When using manual_backward and precision=16, an exception is raised:
RuntimeError: unscale_() has already been called on this optimizer since the last update().
As far as I can see, the scaler is actually never used to scale the loss during manual_backward and in the example here it is not mentioned that one has to do it before.
Also, I cannot find any 'update' call
colab model to reproduce:
https://colab.research.google.com/drive/1n7-NxC0IJ3gYJGZLsoeOtu-oQc3I8UjF?usp=sharing |
on_after_backward is called before gradient is unscale_ when using mixed precision | [
"bug",
"help wanted",
"priority: 0"
] | π Bug
From my understanding, one of the main purpose of callback on_after_backward, is to check and log gradients. However, when AMP is being enabled, the gradients you are accessing are not unscaled. I.E. all the numbers and norms you look at will be super large and not really useful.
To Reproduce
Use following code snippet to log some layer's l2 norm.
def on_after_backward(self):
with torch.no_grad():
if (self.model.*****.weight.grad is not None):
norm_value = self.model.*****.weight.grad.detach().norm(2).item()
self.log("norm2", norm_value)
The value registered will be wildly different when using 32bit precision vs. when you enable Native AMP using:
amp_backend='native',
precision=16,
Expected behavior
The weights accessible inside the on_after_backward callback should ideally be consistent, with or without AMP.
More specifically, lightning should consider calling the self.trainer.scaler.unscale_ before on_after_backward. Or maybe provide the option to do so.
Edit: Sorry I just realized this might not be possible since unscale_ should be called after gradient accumulation but on_after_backward is called for every step. This lead to another issue: when conducting gradient accumulation i sometimes need to manually check if current step is The Step to do weight update (the last step of accumulation). One possible solution for both of these issues is perhaps another separate callback that will be called every N accumulation steps. Right after _unscale but right before gradient clipping and updating? Although this will definitely make things a bit complicated.
Environment
CUDA:
GPU:
TITAN RTX
GeForce RTX 2080 SUPER
available: True
version: 10.2
Packages:
numpy: 1.19.1
pyTorch_debug: False
pyTorch_version: 1.6.0
pytorch-lightning: 1.0.1
tqdm: 4.49.0
System:
OS: Linux
architecture:
64bit
processor: x86_64
python: 3.7.9
version: #52-Ubuntu SMP Thu Sep 10 10:58:49 UTC 2020
Additional context
Currently Lightning calls self.trainer.scaler.unscale_(optimizer) if gradient_clip_val is enabled. Hence if i want to use norm clipping currently i can't manually do un-scale inside the callback. |
Trainer: min_steps arguments overwrites early_stopping functionality | [
"help wanted",
"question"
] | Hi,
I observed the following behaviour:
If I set min_steps to a large number (guaranteeing that the early stopping callback gets activated), the trainer will continue training even after reaching the min_steps. This error does not occur with min_epochs.
It will print "Trainer was signaled to stop but required minimum epochs (1) or minimum steps (1000) has not been met. Training will continue..." and continue training until the max_epoch flag.
Please reproduce using [the BoringModel and post here]
https://colab.research.google.com/drive/1MCmXCwiPpuDYPkKhmxcI431Jk4U2PoZl#scrollTo=4Dk6Ykv8lI7X
Expected behavior
My understanding is that if early stopping is triggered, the trainer should proceed until the number of minimum epochs is finished or minimum steps is reached and then trigger the early stopping.
Environment
It's all in the colab above. |
Accuracy RuntimeError: cannot infer num_classes when target is all zero | [
"bug",
"help wanted"
] | π Bug
ptl.metrics.Accuracy can't infer num_classes from the target vector.
Throws RuntimeError: cannot infer num_classes when target is all zero is each GPU, but inspection of attributes shows that should work.
The error is triggered in this call:
File /aidio/lightning_modules.py", line 340, in validation_step
self.val_acc(y_pred, y_target)
Where y_pred is:
tensor([[ 0.0339, -0.0203, 0.0025, -0.1014, 0.0925, 0.0877, 0.0401, -0.1383,
0.0699, -0.0062, -0.0090, 0.0610, -0.1358, -0.0682, -0.0389, 0.1113,
-0.0719, 0.0433, 0.1169, 0.0307, 0.0602, -0.1659, -0.1272, 0.1022,
0.0464, 0.0248, -0.0447, -0.0293, 0.0052, -0.1008, -0.0643, -0.0774],
[ 0.0339, -0.0203, 0.0025, -0.1014, 0.0925, 0.0877, 0.0401, -0.1383,
0.0699, -0.0062, -0.0090, 0.0610, -0.1358, -0.0682, -0.0389, 0.1113,
-0.0719, 0.0433, 0.1169, 0.0307, 0.0602, -0.1660, -0.1272, 0.1022,
0.0464, 0.0248, -0.0447, -0.0293, 0.0052, -0.1009, -0.0643, -0.0774],
[ 0.0339, -0.0203, 0.0025, -0.1014, 0.0925, 0.0877, 0.0401, -0.1383,
0.0699, -0.0062, -0.0090, 0.0610, -0.1358, -0.0681, -0.0389, 0.1113,
-0.0718, 0.0432, 0.1169, 0.0307, 0.0602, -0.1659, -0.1271, 0.1022,
0.0464, 0.0248, -0.0447, -0.0292, 0.0052, -0.1008, -0.0643, -0.0773],
[ 0.0339, -0.0203, 0.0025, -0.1014, 0.0925, 0.0877, 0.0401, -0.1383,
0.0699, -0.0062, -0.0090, 0.0610, -0.1358, -0.0682, -0.0389, 0.1113,
-0.0719, 0.0433, 0.1169, 0.0307, 0.0602, -0.1660, -0.1272, 0.1022,
0.0464, 0.0248, -0.0447, -0.0293, 0.0052, -0.1009, -0.0643, -0.0774],
[ 0.0339, -0.0203, 0.0025, -0.1014, 0.0925, 0.0877, 0.0401, -0.1383,
0.0699, -0.0062, -0.0090, 0.0610, -0.1358, -0.0682, -0.0389, 0.1113,
-0.0718, 0.0433, 0.1169, 0.0307, 0.0602, -0.1659, -0.1272, 0.1022,
0.0464, 0.0248, -0.0447, -0.0293, 0.0052, -0.1008, -0.0643, -0.0774],
[ 0.0339, -0.0203, 0.0025, -0.1014, 0.0925, 0.0877, 0.0401, -0.1383,
0.0699, -0.0062, -0.0090, 0.0610, -0.1358, -0.0682, -0.0389, 0.1113,
-0.0719, 0.0433, 0.1169, 0.0307, 0.0602, -0.1660, -0.1272, 0.1022,
0.0464, 0.0248, -0.0447, -0.0293, 0.0052, -0.1009, -0.0643, -0.0774]],
device='cuda:3')
and y_target is:
tensor([0, 0, 0, 0, 0, 0], device='cuda:3')
To Reproduce
I can't reproduce the error in a CPU environment with the given values, but is somehow triggering the error on DDP mode on 4 GPUs.
But the error is triggering on evaluation loop before the epoch 0 even starts, so i guess it's like a health check or something.
This is my pythroch lighning scheme:
def __init__(self, hparams, num_classes, train_dataset, eval_dataset, test_dataset, *args, **kwargs):
super().__init__(*args, **kwargs)
self.hparams = hparams
self.wd = hparams.weight_decay
self.lr = hparams.learning_rate
self.batch_size = hparams.batch_size
self.loss = torch.nn.CrossEntropyLoss()
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.test_dataset = test_dataset
self.train_acc = ptl.metrics.Accuracy()
self.val_acc = ptl.metrics.Accuracy()
self.test_acc = ptl.metrics.Accuracy()
# After this constructor should define self.model and self.optimizer
def forward(self, x):
"""
No special modification required for lightning, define as you normally would
:param x:
:return:
"""
return self.model(x)
def training_step(self, batch, batch_idx):
"""
Lightning calls this inside the training loop
:param batch:
:return:
"""
# forward pass
x, y_target = batch['x'], batch['y']
y_pred = self.forward(x)
# calculate metrics
loss = self.loss(y_pred, y_target)
self.train_acc(y_pred, y_target)
# log metrics
self.log('train_loss', loss, prog_bar=True, )
self.log('train_acc', self.train_acc, prog_bar=True, )
return loss
def validation_step(self, batch, batch_idx):
"""
Lightning calls this inside the validation loop
:param batch:
:return:
"""
x, y_target = batch['x'], batch['y']
y_pred = self.forward(x)
# calculate metrics
loss = self.loss(y_pred, y_target)
self.val_acc(y_pred, y_target)
# gather results
self.log('val_loss', loss, prog_bar=True, )
self.log('val_acc', self.val_acc, prog_bar=True, )
return loss
Expected behavior
Accuracy should be able to calculate the number of classes from y_pred instead of y_target. As, Accuracy parameters are:
preds (float or long tensor): (N, ...) or (N, C, ...) where C is the number of classes
target (long tensor): (N, ...)
Even if the y_target are all 0s, the number of classes could be inferred from y_pred.shape[1].
This error shouldn't be appearing.
Environment
PyTorch Version (e.g., 1.0): 1.6.1
OS (e.g., Linux): Linux
Pytorch-Lighting Version: 1.0.1
Python version: 3.8.5
GPU models and configuration: 4 x GPU ~10 GB on DDP
Any other relevant information:
Additional context
The model I'm running has a high number of classes and a low batch size. The model is huge, so I need to distribute small batches across many GPUs. So my case of use will be always a classification training with a batch size of ~10 elements and with ~ 50 classes. Thus, there will be many times that I'll need to get the accuracy with a target vector that doesn't have all the possible classes.
I think the number of classes shouldn't be inferred from the target vector in runtime in the case of use.
The complete stack trace for each GPU is:
Traceback (most recent call last):
File "/mnt/ialabnas/homes/voyanedel/aidio/model_manager.py", line 271, in <module>
helper.train()
File "/mnt/ialabnas/homes/voyanedel/aidio/model_manager.py", line 106, in train
self.trainer.fit(self.module)
File "/mnt/ialabnas/homes/voyanedel/miniconda3/envs/py385/lib/python3.8/site-packages/pytorch_lightning/trainer/states.py", line 48, in wrapped_fn
result = fn(self, *args, **kwargs)
File "/mnt/ialabnas/homes/voyanedel/miniconda3/envs/py385/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py", line 1046, in fit
self.accelerator_backend.train(model)
File "/mnt/ialabnas/homes/voyanedel/miniconda3/envs/py385/lib/python3.8/site-packages/pytorch_lightning/accelerators/ddp_backend.py", line 57, in train
self.ddp_train(process_idx=self.task_idx, mp_queue=None, model=model)
File "/mnt/ialabnas/homes/voyanedel/miniconda3/envs/py385/lib/python3.8/site-packages/pytorch_lightning/accelerators/ddp_backend.py", line 224, in ddp_train
results = self.trainer.run_pretrain_routine(model)
File "/mnt/ialabnas/homes/voyanedel/miniconda3/envs/py385/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py", line 1224, in run_pretrain_routine
self._run_sanity_check(ref_model, model)
File "/mnt/ialabnas/homes/voyanedel/miniconda3/envs/py385/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py", line 1257, in _run_sanity_check
eval_results = self._evaluate(model, self.val_dataloaders, max_batches, False)
File "/mnt/ialabnas/homes/voyanedel/miniconda3/envs/py385/lib/python3.8/site-packages/pytorch_lightning/trainer/evaluation_loop.py", line 333, in _evaluate
output = self.evaluation_forward(model, batch, batch_idx, dataloader_idx, test_mode)
File "/mnt/ialabnas/homes/voyanedel/miniconda3/envs/py385/lib/python3.8/site-packages/pytorch_lightning/trainer/evaluation_loop.py", line 661, in evaluation_forward
output = model(*args)
File "/mnt/ialabnas/homes/voyanedel/miniconda3/envs/py385/lib/python3.8/site-packages/torch/nn/modules/module.py", line 722, in _call_impl
result = self.forward(*input, **kwargs)
File "/mnt/ialabnas/homes/voyanedel/miniconda3/envs/py385/lib/python3.8/site-packages/pytorch_lightning/overrides/data_parallel.py", line 174, in forward
output = self.module.validation_step(*inputs[0], **kwargs[0])
File "/mnt/ialabnas/homes/voyanedel/aidio/lightning_modules.py", line 344, in validation_step
raise e
File "/mnt/ialabnas/homes/voyanedel/aidio/lightning_modules.py", line 340, in validation_step
self.val_acc(y_pred, y_target)
File "/mnt/ialabnas/homes/voyanedel/miniconda3/envs/py385/lib/python3.8/site-packages/pytorch_lightning/metrics/metric.py", line 84, in __call__
return apply_to_collection(self._orig_call(*args, **kwargs), torch.Tensor,
File "/mnt/ialabnas/homes/voyanedel/miniconda3/envs/py385/lib/python3.8/site-packages/pytorch_lightning/metrics/converters.py", line 81, in new_func
result = function_to_decorate(*args, **kwargs)
File "/mnt/ialabnas/homes/voyanedel/miniconda3/envs/py385/lib/python3.8/site-packages/pytorch_lightning/metrics/converters.py", line 81, in new_func
result = function_to_decorate(*args, **kwargs)
File "/mnt/ialabnas/homes/voyanedel/miniconda3/envs/py385/lib/python3.8/site-packages/pytorch_lightning/metrics/converters.py", line 58, in new_func
return func_to_decorate(*args, **kwargs)
File "/mnt/ialabnas/homes/voyanedel/miniconda3/envs/py385/lib/python3.8/site-packages/torch/nn/modules/module.py", line 722, in _call_impl
result = self.forward(*input, **kwargs)
File "/mnt/ialabnas/homes/voyanedel/miniconda3/envs/py385/lib/python3.8/site-packages/pytorch_lightning/metrics/classification.py", line 87, in forward
return accuracy(pred=pred, target=target,
File "/mnt/ialabnas/homes/voyanedel/miniconda3/envs/py385/lib/python3.8/site-packages/pytorch_lightning/metrics/functional/classification.py", line 270, in accuracy
raise RuntimeError("cannot infer num_classes when target is all zero")
RuntimeError: cannot infer num_classes when target is all zero
Traceback (most recent call last):
File "/mnt/ialabnas/homes/voyanedel/aidio/model_manager.py", line 271, in <module>
helper.train()
File "/mnt/ialabnas/homes/voyanedel/aidio/model_manager.py", line 106, in train
self.trainer.fit(self.module)
File "/mnt/ialabnas/homes/voyanedel/miniconda3/envs/py385/lib/python3.8/site-packages/pytorch_lightning/trainer/states.py", line 48, in wrapped_fn
result = fn(self, *args, **kwargs)
File "/mnt/ialabnas/homes/voyanedel/miniconda3/envs/py385/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py", line 1058, in fit
results = self.accelerator_backend.spawn_ddp_children(model)
File "/mnt/ialabnas/homes/voyanedel/miniconda3/envs/py385/lib/python3.8/site-packages/pytorch_lightning/accelerators/ddp_backend.py", line 123, in spawn_ddp_children
results = self.ddp_train(local_rank, mp_queue=None, model=model, is_master=True)
File "/mnt/ialabnas/homes/voyanedel/miniconda3/envs/py385/lib/python3.8/site-packages/pytorch_lightning/accelerators/ddp_backend.py", line 224, in ddp_train
results = self.trainer.run_pretrain_routine(model)
File "/mnt/ialabnas/homes/voyanedel/miniconda3/envs/py385/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py", line 1224, in run_pretrain_routine
self._run_sanity_check(ref_model, model)
File "/mnt/ialabnas/homes/voyanedel/miniconda3/envs/py385/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py", line 1257, in _run_sanity_check
eval_results = self._evaluate(model, self.val_dataloaders, max_batches, False)
File "/mnt/ialabnas/homes/voyanedel/miniconda3/envs/py385/lib/python3.8/site-packages/pytorch_lightning/trainer/evaluation_loop.py", line 333, in _evaluate
output = self.evaluation_forward(model, batch, batch_idx, dataloader_idx, test_mode)
File "/mnt/ialabnas/homes/voyanedel/miniconda3/envs/py385/lib/python3.8/site-packages/pytorch_lightning/trainer/evaluation_loop.py", line 661, in evaluation_forward
output = model(*args)
File "/mnt/ialabnas/homes/voyanedel/miniconda3/envs/py385/lib/python3.8/site-packages/torch/nn/modules/module.py", line 722, in _call_impl
result = self.forward(*input, **kwargs)
File "/mnt/ialabnas/homes/voyanedel/miniconda3/envs/py385/lib/python3.8/site-packages/pytorch_lightning/overrides/data_parallel.py", line 174, in forward
output = self.module.validation_step(*inputs[0], **kwargs[0])
File "/mnt/ialabnas/homes/voyanedel/aidio/lightning_modules.py", line 344, in validation_step
raise e
File "/mnt/ialabnas/homes/voyanedel/aidio/lightning_modules.py", line 340, in validation_step
self.val_acc(y_pred, y_target)
File "/mnt/ialabnas/homes/voyanedel/miniconda3/envs/py385/lib/python3.8/site-packages/pytorch_lightning/metrics/metric.py", line 84, in __call__
return apply_to_collection(self._orig_call(*args, **kwargs), torch.Tensor,
File "/mnt/ialabnas/homes/voyanedel/miniconda3/envs/py385/lib/python3.8/site-packages/pytorch_lightning/metrics/converters.py", line 81, in new_func
result = function_to_decorate(*args, **kwargs)
File "/mnt/ialabnas/homes/voyanedel/miniconda3/envs/py385/lib/python3.8/site-packages/pytorch_lightning/metrics/converters.py", line 81, in new_func
result = function_to_decorate(*args, **kwargs)
File "/mnt/ialabnas/homes/voyanedel/miniconda3/envs/py385/lib/python3.8/site-packages/pytorch_lightning/metrics/converters.py", line 58, in new_func
return func_to_decorate(*args, **kwargs)
File "/mnt/ialabnas/homes/voyanedel/miniconda3/envs/py385/lib/python3.8/site-packages/torch/nn/modules/module.py", line 722, in _call_impl
result = self.forward(*input, **kwargs)
File "/mnt/ialabnas/homes/voyanedel/miniconda3/envs/py385/lib/python3.8/site-packages/pytorch_lightning/metrics/classification.py", line 87, in forward
return accuracy(pred=pred, target=target,
File "/mnt/ialabnas/homes/voyanedel/miniconda3/envs/py385/lib/python3.8/site-packages/pytorch_lightning/metrics/functional/classification.py", line 270, in accuracy
raise RuntimeError("cannot infer num_classes when target is all zero")
RuntimeError: cannot infer num_classes when target is all zero |
Update the replay buffer in DQN | [
"question",
"won't fix"
] | Line 263-307 in DQN code in Bolts
It states that the training_step method should carry out a single step through the environment to update the buffer. However, I only see the loss calculation.
def training_step(self, batch: Tuple[torch.Tensor, torch.Tensor], _) -> OrderedDict:
"""
Carries out a single step through the environment to update the replay buffer.
Then calculates loss based on the minibatch recieved
Args:
batch: current mini batch of replay data
_: batch number, not used
Returns:
Training loss and log metrics
"""
# calculates training loss
loss = dqn_loss(batch, self.net, self.target_net)
if self.trainer.use_dp or self.trainer.use_ddp2:
loss = loss.unsqueeze(0)
# Soft update of target network
if self.global_step % self.sync_rate == 0:
self.target_net.load_state_dict(self.net.state_dict())
log = {
"total_reward": self.total_rewards[-1],
"avg_reward": self.avg_rewards,
"train_loss": loss,
"episodes": self.done_episodes,
"episode_steps": self.total_episode_steps[-1]
}
status = {
"steps": self.global_step,
"avg_reward": self.avg_rewards,
"total_reward": self.total_rewards[-1],
"episodes": self.done_episodes,
"episode_steps": self.total_episode_steps[-1],
"epsilon": self.agent.epsilon,
}
return OrderedDict(
{
"loss": loss,
"avg_reward": self.avg_rewards,
"log": log,
"progress_bar": status,
}
) |
How to log metric value in checkpoint file name with default save dir? | [
"question"
] | Now checkpoint default save to dir named with job id, such as version_1986332/checkpoints/epoch=415.ckpt, I want to save it as version_1986332/checkpoints/epoch=415-loss=0.1.ckpt. I know I can set filepath='my/path/sample-mnist-{epoch:02d}-{val_loss:.2f}', but it won't automatically save to version_{job id} dir. How can I only change checkpoints's filename? |
Issue with epoch count with repeated save/restore | [
"bug",
"help wanted",
"priority: 0",
"checkpointing"
] | π Bug
I'm trying to save and restore the state of both a model and a pytorch-lightning trainer.
I suspect the epoch count is wrong because I'm not able to save and restore several times with the same max_epoch count.
Here's what I do:
Step 1: run model for max_epochs = 1. Save checkpoint (gets saved as epoch=0.ckpt)
Step 2: load previous checkpoint and rerun again with max_epochs = 1. No training is run (because 1 epoch was already run before). A checkpoint is saved again, however this is called epoch=1.ckpt.
Step 3: load checkpoint from step 2 and rerun again with max_epochs = 1. Training fails because it believes step 2 was run for 2 epochs (and here max is 1)
Output:
pytorch_lightning.utilities.exceptions.MisconfigurationException:
you restored a checkpoint with current_epoch=2
but the Trainer(max_epochs=1)
Code below to reproduce.
What am I doing wrong? this should be a possible scenario right?
Thanks!
To Reproduce
Run code below 3 times from same location
import os
import torch
from torch import nn
import torch.nn.functional as F
from torchvision.datasets import MNIST
from torch.utils.data import DataLoader, random_split
from torchvision import transforms
import pytorch_lightning as pl
from pathlib import Path
class LitAutoEncoder(pl.LightningModule):
def __init__(self):
super().__init__()
self.encoder = nn.Sequential(nn.Linear(28 * 28, 128), nn.ReLU(), nn.Linear(128, 3))
self.decoder = nn.Sequential(nn.Linear(3, 128), nn.ReLU(), nn.Linear(128, 28 * 28))
def forward(self, x):
# in lightning, forward defines the prediction/inference actions
embedding = self.encoder(x)
return embedding
def training_step(self, batch, batch_idx):
# training_step defined the train loop. It is independent of forward
x, y = batch
x = x.view(x.size(0), -1)
z = self.encoder(x)
x_hat = self.decoder(z)
loss = F.mse_loss(x_hat, x)
self.log('train_loss', loss)
return loss
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)
return optimizer
dataset = MNIST(os.getcwd(), download=True, transform=transforms.ToTensor())
train, val, _ = random_split(dataset, [5500, 500, len(dataset) - 6000])
def get_last_checkpoint(checkpoint_folder):
if os.path.exists(checkpoint_folder):
past_experiments = sorted(Path(checkpoint_folder).iterdir(), key=os.path.getmtime)
for experiment in past_experiments[::-1]:
experiment_folder = os.path.join(experiment, "checkpoints")
if os.path.exists(experiment_folder):
checkpoints = os.listdir(experiment_folder)
if len(checkpoints):
checkpoints.sort()
path = os.path.join(experiment_folder, checkpoints[-1])
return path
return None
chk = get_last_checkpoint('lightning_logs')
if chk is not None:
print("loading from ", chk)
autoencoder = LitAutoEncoder.load_from_checkpoint(chk)
else:
autoencoder = LitAutoEncoder()
trainer = pl.Trainer(max_epochs=1, resume_from_checkpoint=chk)
trainer.fit(autoencoder, DataLoader(train), DataLoader(val))
Expected behavior
Should not increase epochs on second run. Should be able to load checkpoint and save unchanged (several times)
Environment
CUDA:
- GPU:
- Quadro P2000 with Max-Q Design
- available: True
- version: 10.2
Packages:
- numpy: 1.19.2
- pyTorch_debug: True
- pyTorch_version: 1.8.0.dev20201014
- pytorch-lightning: 1.0.2
- tqdm: 4.50.2
System:
- OS: Windows
- architecture:
- 64bit
- WindowsPE
- processor: Intel64 Family 6 Model 158 Stepping 10, GenuineIntel
- python: 3.7.9
- version: 10.0.17763
Additional context |
Hparams are not automatically saved to WandB logger in 1.0.2 | [
"bug",
"help wanted"
] | π Bug
When I update to 1.0.2, when I assign self.hparams = args in Lightning module, the hparams are not logged in WandB anymore. This bug is not present in 1.0.0 however. Snippets of my code.
...
parser.add_argument("--description", type=str, default="Trainig")
...
args = parser.parse_args()
main(args)
# Inside main
logger = WandbLogger(name=args.description, project="myProject")
model = MyModule(args)
# Inside LightningModule
class MyModule(pl.LightningModule):
def __init__(self, args):
super().__init__()
self.hparams = args
... |
Resume training from a finished-training model will results in a new incorrect checkpoint | [
"bug",
"duplicate",
"help wanted"
] | To verify a model if a model has finished training. I ran the training script again.
However, when I try to evaluate with the model, I got error that:
pytorch_lightning.utilities.exceptions.MisconfigurationException:
you restored a checkpoint with current_epoch=11
but the Trainer(max_epochs=10)
After digging a little bit, it seems that since the model has finished training, it will directly go to on_train_end, and save the model checkpoint with currect_epoch+1.
I am wondering if the design of saving checkpoints with "next epoch" is reasonable? Why not add the epoch number after resuming? |
To many backwards with LBFGS | [
"bug",
"help wanted"
] | π Bug
When using LBFGS we have one backward step to much, because we call backward before the optimiser step (also for gradient accumulation), but the optimizer step get's a closure and therefore calls backward again.
To Reproduce
import torch
import pytorch_lightning as ptl
from pytorch_lightning import LightningModule
from torch.utils.data import Dataset
class RandomDictDataset(Dataset):
def __init__(self, size, length):
self.len = length
self.data = torch.randn(length, size)
def __getitem__(self, index):
a = self.data[index]
b = a + 2
return {"a": a, "b": b}
def __len__(self):
return self.len
class RandomDictStringDataset(Dataset):
def __init__(self, size, length):
self.len = length
self.data = torch.randn(length, size)
def __getitem__(self, index):
return {"id": str(index), "x": self.data[index]}
def __len__(self):
return self.len
class RandomDataset(Dataset):
def __init__(self, size, length):
self.len = length
self.data = torch.randn(length, size)
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return self.len
class BoringModel(LightningModule):
def __init__(self):
"""
Testing PL Module
Use as follows:
- subclass
- modify the behavior for what you want
class TestModel(BaseTestModel):
def training_step(...):
# do your own thing
or:
model = BaseTestModel()
model.training_epoch_end = None
"""
super().__init__()
self.layer = torch.nn.Linear(32, 2)
def forward(self, x):
return self.layer(x)
def loss(self, batch, prediction):
# An arbitrary loss to have a loss that updates the model weights during `Trainer.fit` calls
return torch.nn.functional.cross_entropy(
prediction,
torch.ones(len(prediction), dtype=torch.long, device=prediction.device),
)
def training_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
self.log("loss", loss)
return loss
def validation_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
self.log("loss", loss)
return loss
def test_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
return loss
def configure_optimizers(self):
optimizer = torch.optim.LBFGS(self.parameters())
return optimizer
def train_dataloader(self):
return torch.utils.data.DataLoader(RandomDataset(32, 64), batch_size=16)
def val_dataloader(self):
return torch.utils.data.DataLoader(RandomDataset(32, 64), batch_size=16)
def test_dataloader(self):
return torch.utils.data.DataLoader(RandomDataset(32, 64), batch_size=16)
def main():
model = BoringModel()
trainer = ptl.Trainer(
distributed_backend="dp",
gpus=1,
)
trainer.fit(model)
if __name__ == "__main__":
main()```
### Environment
Please copy and paste the output from our
[environment collection script](https://raw.githubusercontent.com/PyTorchLightning/pytorch-lightning/master/tests/collect_env_details.py)
(or fill out the checklist below manually).
You can get the script and run it with:
wget https://raw.githubusercontent.com/PyTorchLightning/pytorch-lightning/master/tests/collect_env_details.py
For security purposes, please check the contents of collect_env_details.py before running it.
python collect_env_details.py
* CUDA:
- GPU:
- GeForce RTX 2080 Ti
- GeForce RTX 2080 Ti
- available: True
- version: 10.2
* Packages:
- numpy: 1.19.2
- pyTorch_debug: False
- pyTorch_version: 1.6.0
- pytorch-lightning: 20201015
- tqdm: 4.50.2
* System:
- OS: Linux
- architecture:
- 64bit
- ELF
- processor: x86_64
- python: 3.8.5
- version: #52~18.04.1-Ubuntu SMP PREEMPT Thu Sep 10 13:34:23 UTC 2020
### Additional context
Probably we can fix this. by passing a closure to all optimisers (to be more consistent). |
ModelCheckpoint(monitor='val_loss') crashes when with self.log("val_loss") | [
"bug",
"help wanted"
] | π Bug
ModelCheckpoint is crashing with MisconfigurationException when using monitor and using self.log inside the validation_epoch_end function.
Please reproduce using the BoringModel and post here
https://colab.research.google.com/drive/1vqVx1l2tp9adKAeTUS8Q-zUUBQnZtbTY
To Reproduce
N/A
Expected behavior
ModelCheckpoint to work and monitor my val_loss
Environment
Please copy and paste the output from our
environment collection script
(or fill out the checklist below manually).
You can get the script and run it with:
wget https://raw.githubusercontent.com/PyTorchLightning/pytorch-lightning/master/tests/collect_env_details.py
# For security purposes, please check the contents of collect_env_details.py before running it.
python collect_env_details.py
CUDA:
GPU:
Tesla T4
available: True
version: 10.1
Packages:
numpy: 1.18.5
pyTorch_debug: False
pyTorch_version: 1.6.0+cu101
pytorch-lightning: 1.0.2
tqdm: 4.41.1
System:
OS: Linux
architecture:
64bit
processor: x86_64
python: 3.6.9
version: #1 SMP Thu Jul 23 08:00:38 PDT 2020
Additional context
N/A |
Viewing validation statistics by epoch (on x-axis) broken in Wandb | [
"bug",
"help wanted"
] | π Bug
Here's the boring model.
If you want to view the charts in Weights and Biases with epoch on the X-axis, you get a message that there is "no data availible". Viewing with the step/global step on the X-axis still works. See the two images below:
With epoch on x-axis
With global_step on x-axis
I suspect this is related to the values that PL sends with the logs, for example this is what the CSVLogger produces:
For some strange reason, there is no epoch value for validation data - the place that should definitely have it...
This bug is extremely annoying if you are, for example, experimenting with different batch sizes - then just comparing steps is meaningless...
Environment
* CUDA:
- GPU:
- Tesla T4
- available: True
- version: 10.1
* Packages:
- numpy: 1.18.5
- pyTorch_debug: False
- pyTorch_version: 1.6.0+cu101
- pytorch-lightning: 1.0.2
- tqdm: 4.41.1
* System:
- OS: Linux
- architecture:
- 64bit
-
- processor: x86_64
- python: 3.6.9
- version: #1 SMP Thu Jul 23 08:00:38 PDT 2020
P.S.: Default PL in boring model should be changed to 1.0.2. |
loss=None and no logs when automatic_optimization=False | [
"bug",
"docs",
"logger"
] | π Bug
I think there is a bug when automatic_optimization=False. The loss=None (
pytorch-lightning/pytorch_lightning/trainer/training_loop.py
Line 336
in
72f1976
loss=untouched_loss,
) and this means that all the checkpoint_callbacks cannot work. There is no way to set the loss.
I also add that in the documentation (https://pytorch-lightning.readthedocs.io/en/latest/optimizers.html#manual-optimization) the training_step does not return anything. However, if it does not return anything, all the logs do not work because of:
pytorch-lightning/pytorch_lightning/trainer/training_loop.py
Line 681
in
72f1976
if opt_closure_result is None:
.
Expected behavior
There should be a way to set the loss, and the behaviour when nothing is returned in training_step should be clear.
Environment
* CUDA:
- GPU:
- GeForce RTX 2080 Ti
- GeForce RTX 2080 Ti
- available: True
- version: 10.2
* Packages:
- numpy: 1.19.1
- pyTorch_debug: False
- pyTorch_version: 1.6.0
- pytorch-lightning: 1.0.2
- tqdm: 4.48.2
* System:
- OS: Linux
- architecture:
- 64bit
- ELF
- processor: x86_64
- python: 3.6.9
- version: #26-Ubuntu SMP Mon Jun 24 09:32:08 UTC 2019 |
<auto_select_gpus=True, gpus=-1> raise MisconfigurationException("GPUs requested but none are available.") | [
"bug",
"help wanted"
] | π Bug
auto_select_gpus
if auto_select_gpus enabled and gpus is an integer, pick available gpus automatically.
but, if you set gpus is -1, raise MisconfigurationException("GPUs requested but none are available.")
Please reproduce using [the BoringModel and post here]
bug_auto_select_gpus
Expected behavior
pick all gpus
Environment
CUDA:
GPU:
Quadro RTX 6000
available: True
version: 10.1
Packages:
numpy: 1.19.2
pyTorch_debug: False
pyTorch_version: 1.6.0+cu101
pytorch-lightning: 0.9.0
tqdm: 4.50.2
System:
OS: Linux
architecture:
64bit
processor: x86_64
python: 3.6.9
version: #206-Ubuntu SMP Fri Feb 28 05:02:04 UTC 2020
Additional context
(cc. @inmoonlight) |
[Feature] Add on_after_backward in Callback. Enable ModelGradTrackerCallback | [
"duplicate",
"feature",
"help wanted",
"good first issue",
"callback"
] | π Feature
Motivation
The call_hook on_after_backward is already implemented, but not added in Callback class.
It boils down to add it within Callback Class
Pitch
Adding this new hook within callback could be used to implement something likeModelGradTrackerCallback.
Alternatives
Additional context |
use of add_embedding as logger.experiment.add_embedding | [
"bug",
"help wanted",
"3rd party"
] | π Bug
def validation_step(self, batch, batch_idx):
x, y = batch
mu, logvar = self.encode(x.view(-1, 784))
z = self.reparameterize(mu, logvar)
x_rec =self(z)
val_loss = self.loss_function(x_rec, x, mu, logvar)
if batch_idx == 0:
n = min(x.size(0), 8)
comparison = torch.cat([x[:n],
x_rec.view(args.batch_size, 1, 28, 28)[:n]])
# self.logger.experiment.add_image('images_loop',comparison,0)
grid = torchvision.utils.make_grid(comparison)
self.logger.experiment.add_image('images1', grid, 0)
self.logger.experiment.add_embedding(mu, metadata=y.tolist(), label_img=x, tag='embeddings_mu')
The console output:
Validation sanity check: 0it [00:00, ?it/s]Traceback (most recent call last):
File "/home/maulik/Documents/Tools/Learning/Deep_learning/Pytorch_lightning/vae.py", line 164, in <module>
trainer.fit(vae, train_loader, val_loader)
File "/home/maulik/anaconda3/envs/ml/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py", line 440, in fit
results = self.accelerator_backend.train()
File "/home/maulik/anaconda3/envs/ml/lib/python3.8/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py", line 54, in train
results = self.train_or_test()
File "/home/maulik/anaconda3/envs/ml/lib/python3.8/site-packages/pytorch_lightning/accelerators/accelerator.py", line 66, in train_or_test
results = self.trainer.train()
File "/home/maulik/anaconda3/envs/ml/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py", line 462, in train
self.run_sanity_check(self.get_model())
File "/home/maulik/anaconda3/envs/ml/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py", line 648, in run_sanity_check
_, eval_results = self.run_evaluation(test_mode=False, max_batches=self.num_sanity_val_batches)
File "/home/maulik/anaconda3/envs/ml/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py", line 568, in run_evaluation
output = self.evaluation_loop.evaluation_step(test_mode, batch, batch_idx, dataloader_idx)
File "/home/maulik/anaconda3/envs/ml/lib/python3.8/site-packages/pytorch_lightning/trainer/evaluation_loop.py", line 171, in evaluation_step
output = self.trainer.accelerator_backend.validation_step(args)
File "/home/maulik/anaconda3/envs/ml/lib/python3.8/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py", line 78, in validation_step
output = self.__validation_step(args)
File "/home/maulik/anaconda3/envs/ml/lib/python3.8/site-packages/pytorch_lightning/accelerators/gpu_accelerator.py", line 86, in __validation_step
output = self.trainer.model.validation_step(*args)
File "/home/maulik/Documents/Tools/Learning/Deep_learning/Pytorch_lightning/vae.py", line 84, in validation_step
self.logger.experiment.add_embedding(mu, metadata=y.tolist(), label_img=x, tag='embeddings_mu')
File "/home/maulik/anaconda3/envs/ml/lib/python3.8/site-packages/torch/utils/tensorboard/writer.py", line 788, in add_embedding
fs = tf.io.gfile.get_filesystem(save_path)
AttributeError: module 'tensorflow._api.v2.io.gfile' has no attribute 'get_filesystem'
Workaround
30966-pytorch-issue
Please fix.
It works sometimes by adding
import tensorflow as tf
import tensorboard as tb
tf.io.gfile = tb.compat.tensorflow_stub.io.gfile
Thanks
Maulik |
Checkpoint is saving the model based on the last val_metric_step value and not val_metric_epoch | [
"help wanted",
"docs",
"checkpointing"
] | π Bug
Checkpoint callback did not save some models even thought they achieved better result in the monitored metric, than the currently top k saved models
Expected behavior
Checkpoint callback saving the best scoring models based on a metric
Environment
I am using pytorch-lightning 1.0.2
Update:
I changed the checkpoint call back to add the value I am monitoring to the name of the saved checkpoint, what I notice it's not the epoch value, but the last step in the epoch value, so it's not taking the metric average value, but taking only the last one. |
Advice on how to use a self-supervised regression scheme within a single step in pl | [
"question",
"won't fix"
] | Hi
I have the following scheme:
class refine_P(LightningModule):
def __init__(
self, hparams,
):
self.model = #
self.regressor #
def training_step(self, batch, batch_idx, is_train=True):
out = self.model(batch)
self.regressor.reset_parameters()
base_loss = self.base_loss(out)
while condition:
out = self.regressor(out)
loss = self.regress_loss(out)
loss.backward()
...
return base_loss
I.e - Without using pl, it will be just two training procedures, one within the other.
My question is, Can I somehow create a trainer within this module, and call this trainer each training step of refine_P? |
Add total params to weights_summary table | [
"duplicate",
"feature",
"help wanted",
"won't fix"
] | π Feature
Add total number of parameters when printing the weights_summary table.
Motivation
Since the total number of parameters for each layer is already calculated, it would be really informative if a total sum of number of parameters were also provided. Something like https://github.com/TylerYep/torch-summary or https://github.com/nmhkahn/torchsummaryX provide in their table.
Pitch
When printing weights_summary, there should also appear a summary of the whole model information, something like:
Totals
Total params 21.84k
Trainable params 21.84k
Non-trainable params 0.0
Alternatives
Maybe use one of the given tools (https://github.com/TylerYep/torch-summary or https://github.com/nmhkahn/torchsummaryX) inside?
Additional context
Example from https://github.com/TylerYep/torch-summary model summary:
==========================================================================================
Layer (type:depth-idx) Output Shape Param #
==========================================================================================
ββConv2d: 1-1 [-1, 10, 24, 24] 260
ββConv2d: 1-2 [-1, 20, 8, 8] 5,020
ββDropout2d: 1-3 [-1, 20, 8, 8] --
ββLinear: 1-4 [-1, 50] 16,050
ββLinear: 1-5 [-1, 10] 510
==========================================================================================
Total params: 21,840
Trainable params: 21,840
Non-trainable params: 0
==========================================================================================
Input size (MB): 0.00
Forward/backward pass size (MB): 0.05
Params size (MB): 0.08
Estimated Total Size (MB): 0.14
==========================================================================================
Example from https://github.com/nmhkahn/torchsummaryX model summary:
=================================================================
Kernel Shape Output Shape Params Mult-Adds
Layer
0_conv1 [1, 10, 5, 5] [1, 10, 24, 24] 260.0 144.0k
1_conv2 [10, 20, 5, 5] [1, 20, 8, 8] 5.02k 320.0k
2_conv2_drop - [1, 20, 8, 8] - -
3_fc1 [320, 50] [1, 50] 16.05k 16.0k
4_fc2 [50, 10] [1, 10] 510.0 500.0
-----------------------------------------------------------------
Totals
Total params 21.84k
Trainable params 21.84k
Non-trainable params 0.0
Mult-Adds 480.5k
================================================================= |
segmentation fault when import pytorch_lightning | [
"question"
] | I am trying the very minimum code which imports torch and pytorch_lightning, with the following code:
import pytorch_lightning as pl
The import of pytorch_lightning fails immediately with the error: 29144 segmentation fault (core dumped) python
I am using pytorch-dev 1.7.0 as it is required for cuda11 with new GPUs, and I installed pytorch-lightning from source.
OS: Linux
Packaging: pip
Version 1.0.2 |
Batch size finder is not working if batch_size is specified in LightningDataModule | [
"bug",
"help wanted",
"trainer: tune"
] | π Bug
The batch size finder won't work if the batch size is specified in the LightningDataModule, only (it is natural to define it there).
An instance of a LightningModule always has the attribute hparams; the batch_size finder raises a MisconfigurationException if batch_size isn't found there.
Please reproduce using the BoringModel and post here
https://colab.research.google.com/drive/1gruW3UwitVijzkhcUYIzHlIpoIB1shzt?usp=sharing
Expected behavior
Batch size finder works.
Environment
CUDA:
GPU:
Tesla T4
available: True
version: 10.1
Packages:
numpy: 1.18.5
pyTorch_debug: False
pyTorch_version: 1.6.0+cu101
pytorch-lightning: 0.10.0
tqdm: 4.41.1
System:
OS: Linux
architecture:
64bit
processor: x86_64
python: 3.6.9
version: #1 SMP Thu Jul 23 08:00:38 PDT 2020 |
Gif on the main repo page is outdated | [
"docs"
] | It uses TrainResult which was deprecated |
Circulation training with different seed increases memory | [
"bug",
"help wanted",
"won't fix"
] | π Bug
I reproduce using [the BoringModel and post here]
https://colab.research.google.com/drive/1HvWVVTK8j2Nj52qU4Q4YCyzOm0_aLQF3?usp=sharing
Because of the needs of my project, I need to run the program over and over again to measure the performance of the model. So Each time I give the model a different SEED. This can also lead to memory leaks. This makes me wonder why giving different seedings would cause this problem.
based on BoringModel code οΌI change seed. colab link :
https://colab.research.google.com/drive/1KUz-IFZ8RMK9O2Gd4XSaYbRe8iFFSD09?usp=sharing
Expected behavior
codeοΌ
for i in range(0,50): test_x(tmpdir) print(torch.cuda.max_memory_allocated()/ 1024**2,'\n','\n','\n','\n')
The torch.cuda.max_memory_allocated() is increasing.
I ran out of memory in the second loop in real situation
Environment
CUDA:
GPU:
Tesla P4
available: True
version: 10.1
Packages:
numpy: 1.18.5
pyTorch_debug: False
pyTorch_version: 1.6.0+cu101
pytorch-lightning: 1.0.1
tqdm: 4.41.1
System:
OS: Linux
architecture:
64bit
processor: x86_64
python: 3.6.9
version: #1 SMP Thu Jul 23 08:00:38 PDT 2020
Additional context
Wait online for a solution. I've brought this up before, but it's off, I'm not familiar with Github, so I guess I should repost the issue to get attention again. #4161 |
Metrics do not support multilabel tasks. | [
"bug",
"help wanted"
] | π Bug
Scikit-learn metrics deal well will multilabel tasks, but this doesn't seem to be supported in Pytorch-Lightning metrics. There is this #3350 , but it seems to confuse multiclass with multilabel (multiple values to predict).
To Reproduce
Given predictions tensor:
tensor([[0., 0.],
[0., 0.],
[0., 0.],
[0., 0.],
[0., 0.],
[0., 0.],
[0., 0.],
[0., 0.]])
and labels tensor:
tensor([[1, 0],
[1, 0],
[1, 0],
[1, 0],
[1, 0],
[1, 0],
[1, 0],
[1, 0]])
The call to f1_score(met_preds,labels, class_reduction='macro') yields tensor(0.3333), because it flattens the tensors and macro-averages per class.
Expected behavior
I would expect it to be consistent with the call to
sk_f1_score(labels.numpy(), met_preds.numpy(), average='macro'), which yields 0.0, because it treats each column separately and macro-averages them per task.
This discrepancy also occurs for other metrics. For example sklearn deals with multilabel accuracy by using subset accuracy (0 here), but PL produces an accuracy score of 0.5.
Environment
PyTorch Version : 1.6
OS (e.g., Linux): OSX
How you installed PyTorch (conda, pip, source): conda
Python version: 3.7.8
Pytorch-Lightning version : 1.0.2 |
.fit() hangs when using DDP with relative imports in main code | [
"bug",
"help wanted",
"won't fix",
"distributed",
"priority: 2"
] | π Bug
My training script is a module within a package. If my module uses relative imports and ddp backend, it throws an error about relative imports and hangs. Using ddp_spawn backend and relative imports works as expected.
The process becomes unresponsive even to Ctrl-C and I have to kill it and its subprocesses by PID.
Traceback (most recent call last):
File "/home/shrek/pytorch-lightning-segmentation-lapa/seg_lapa/train.py", line 13, in <module>
from .networks.deeplab.deeplab import DeepLab
ImportError: attempted relative import with no known parent package
Using absolute imports instead of relative imports fixes the issue.
Please reproduce using the BoringModel
Not sure how to reproduce on the BoringModel, since this bug involves creating a python package and importing modulest.
To Reproduce
Create a simple python package and import module using relative imports
# Relative Imports - hangs
from .networks.deeplab.deeplab import DeepLab
from .loss_func import CrossEntropy2D
from .dataloaders import LapaDataset, DatasetSplit
# Absolute Imports - works
# from seg_lapa.networks.deeplab.deeplab import DeepLab
# from seg_lapa.loss_func import CrossEntropy2D
# from seg_lapa.dataloaders import LapaDataset, DatasetSplit
trainer = pl.Trainer(gpus=[0, 1], distributed_backend="ddp", num_nodes=1, precision=32)
trainer.fit(model, train_loader)
Expected behavior
I would expect ddp to work regardless of absolute/relative imports. The model should start training per normal.
Environment
* CUDA:
- GPU:
- GeForce GTX 1080 Ti
- GeForce GTX 1080 Ti
- available: True
- version: 10.2
* Packages:
- numpy: 1.19.2
- pyTorch_debug: False
- pyTorch_version: 1.6.0
- pytorch-lightning: 1.0.2
- tqdm: 4.50.2
* System:
- OS: Linux
- architecture:
- 64bit
- ELF
- processor: x86_64
- python: 3.8.5
- version: #109-Ubuntu SMP Fri Jun 19 11:33:10 UTC 2020
PyTorch Version (e.g., 1.0): 1.6.0
OS (e.g., Linux): Ubuntu 18.04
How you installed PyTorch (conda, pip, source): pip install within conda env
Build command you used (if compiling from source): N/A
Python version: 3.8.5
CUDA/cuDNN version:
GPU models and configuration:
Any other relevant information:
Additional context |
Function IoU - Input Expectations | [
"docs"
] | π Documentation
Changing input description to include that predictions should be an integer mask rather than raw logits [b, h, w] vs. [b, c, h, w]
If logits are provided solution runs with warning about number of classes being set incorrectly - from feeding the returned determined num_classes from get_num_classes() to stat_scores_multiple_classes() where it is called again with the now incorrect number of classes. |
GAN example doesn't work | [
"bug",
"help wanted"
] | π Bug
The GAN example for MNIST doesn't work as advised in the script basic_gan_module.py
Please reproduce using the BoringModel and post here
To reproduce the issue just run:
basic_gan_module.py --gpus 1
To Reproduce
Expected behavior
Train the GAN network well for MNIST dataset with the default setting.
Environment
CUDA:
GPU:
GeForce GTX 1080 Ti
available: True
version: 10.1
Packages:
numpy: 1.19.1
pyTorch_debug: False
pyTorch_version: 1.6.0
pytorch-lightning: 1.0.2
tqdm: 4.50.2
System:
OS: Linux
architecture:
64bit
ELF
processor: x86_64
python: 3.8.5
version: #1 SMP Tue Aug 25 17:23:54 UTC 2020 |
Tidy up returns from `ddp_train()` of accelerators | [
"bug",
"help wanted",
"priority: 0",
"distributed"
] | π Bug
There are currently inconsistent returns from the method ddp_train of accelerator classes.
For example, the method ddp_train of DDP2Accelerator returns results while ddp_train of DDPSpawnAccelerator returns None. Although I am not familiar with distributed training, it seems that both of the methods should return results (or both should return None) for consistency.
pytorch-lightning/pytorch_lightning/accelerators/ddp_spawn_accelerator.py
Line 76
in
eddf35a
def ddp_train(self, process_idx, mp_queue, model, is_master=False, proc_offset=0):
pytorch-lightning/pytorch_lightning/accelerators/ddp2_accelerator.py
Line 120
in
f37444f
def ddp_train(self, process_idx, mp_queue, model):
Expected returns
All methods whose names are the same (i.e. ddp_train()) should return the same type of object (e.g. results).
Additional context
This inconsistency was found while handling #4232. |
slurm auto re-queue inconsistency | [
"bug",
"help wanted",
"won't fix",
"checkpointing",
"environment: slurm",
"priority: 1"
] | Hi! I submitted a slurm job-array with pytorch lightning functionality. I used the suggested signal (#SBATCH --signal=SIGUSR1@90) and set distributed_backend to 'ddp' in the Trainer call. I did notice successful auto-resubmission this morning whenever my jobs were pre-emptied; however, I now notice that several of them have not completed and are not queued either. Wondering if this has been reported by someone earlier and any clue why this could happen? Is there a maximum to the number of times the jobs would be re-queued or other slurm rules that may prevent requeuing, etc.? My jobs might have been pre-emptied several times as I was running them on the low priority "non-capped" queue so as to occupy maximum number of gpus whenever they become available (already using my quota of high/medium priority queues). Thanks in advance! |
@abstractmethod on virtual methods | [
"bug",
"help wanted",
"good first issue",
"won't fix",
"priority: 1"
] | There are a number of methods of the LightningDataModule which are marked as @abstractmethod even though they're not really abstract i.e. transfer_batch_to_device.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract methods are overridden.
The abstract methods can be called using any of the normal
'super' call mechanisms. abstractmethod() may be used to declare
abstract methods for properties and descriptors.
The issue is the linter complains that my DataModule cannot be instantiated since I've not implemented transfer_batch_to_device and my IDE highlights my lightning modules red.
Is there any need for this decorator? It seems that transfer_batch_to_device is virtual, not abstract, and all python methods are virtual. |
WandbLogger fails in 1.0.2 due to non-JSON serializable object | [
"bug",
"help wanted"
] | π Bug
After updating to PL 1.0.2, the WandbLogger fails with the following TypeError:
Traceback (most recent call last):
File "wandblogger_issue.py", line 12, in <module>
wandb_logger.log_hyperparams(vars(args))
File "/home/groups/mignot/miniconda3/envs/pl/lib/python3.7/site-packages/pytorch_lightning/utilities/distributed.py", line 35, in wrapped_fn
return fn(*args, **kwargs)
File "/home/groups/mignot/miniconda3/envs/pl/lib/python3.7/site-packages/pytorch_lightning/loggers/wandb.py", line 138, in log_hyperparams
self.experiment.config.update(params, allow_val_change=True)
File "/home/groups/mignot/miniconda3/envs/pl/lib/python3.7/site-packages/wandb/sdk/wandb_config.py", line 87, in update
self._callback(data=self._as_dict())
File "/home/groups/mignot/miniconda3/envs/pl/lib/python3.7/site-packages/wandb/sdk/wandb_run.py", line 587, in _config_callback
self._backend.interface.publish_config(data)
File "/home/groups/mignot/miniconda3/envs/pl/lib/python3.7/site-packages/wandb/interface/interface.py", line 496, in publish_config
cfg = self._make_config(config_dict)
File "/home/groups/mignot/miniconda3/envs/pl/lib/python3.7/site-packages/wandb/interface/interface.py", line 232, in _make_config
update.value_json = json_dumps_safer(json_friendly(v)[0])
File "/home/groups/mignot/miniconda3/envs/pl/lib/python3.7/site-packages/wandb/util.py", line 524, in json_dumps_safer
return json.dumps(obj, cls=WandBJSONEncoder, **kwargs)
File "/home/groups/mignot/miniconda3/envs/pl/lib/python3.7/json/__init__.py", line 238, in dumps
**kw).encode(obj)
File "/home/groups/mignot/miniconda3/envs/pl/lib/python3.7/json/encoder.py", line 199, in encode
chunks = self.iterencode(o, _one_shot=True)
File "/home/groups/mignot/miniconda3/envs/pl/lib/python3.7/json/encoder.py", line 257, in iterencode
return _iterencode(o, 0)
File "/home/groups/mignot/miniconda3/envs/pl/lib/python3.7/site-packages/wandb/util.py", line 480, in default
return json.JSONEncoder.default(self, obj)
File "/home/groups/mignot/miniconda3/envs/pl/lib/python3.7/json/encoder.py", line 179, in default
raise TypeError(f'Object of type {o.__class__.__name__} '
TypeError: Object of type function is not JSON serializable
To Reproduce
Run the following code snippet to reproduce:
from argparse import ArgumentParser
from pprint import pprint
from pytorch_lightning import Trainer
from pytorch_lightning.loggers import WandbLogger
if __name__ == "__main__":
parser = ArgumentParser()
parser = Trainer.add_argparse_args(parent_parser=parser)
args = parser.parse_args()
pprint(vars(args))
wandb_logger = WandbLogger()
wandb_logger.log_hyperparams(vars(args))
Expected behavior
Hyperparams are logged as usual without any TypeError.
Environment
* CUDA:
- GPU:
- available: False
- version: 10.2
* Packages:
- numpy: 1.19.1
- pyTorch_debug: False
- pyTorch_version: 1.6.0
- pytorch-lightning: 1.0.2
- tensorboard: 2.3.0
- tqdm: 4.50.2
* System:
- OS: Linux
- architecture:
- 64bit
-
- processor: x86_64
- python: 3.7.9
- version: #1 SMP Mon Jul 29 17:46:05 UTC 2019
Additional context
Pretty printing the arguments gives the following clue about the error:
{'accelerator': None,
'accumulate_grad_batches': 1,
'amp_backend': 'native',
'amp_level': 'O2',
'auto_lr_find': False,
'auto_scale_batch_size': False,
'auto_select_gpus': False,
'automatic_optimization': True,
'benchmark': False,
'check_val_every_n_epoch': 1,
'checkpoint_callback': True,
'default_root_dir': None,
'deterministic': False,
'distributed_backend': None,
'fast_dev_run': False,
'flush_logs_every_n_steps': 100,
'gpus': <function _gpus_arg_default at 0x7f26b7788f80>,
'gradient_clip_val': 0,
'limit_test_batches': 1.0,
'limit_train_batches': 1.0,
'limit_val_batches': 1.0,
'log_every_n_steps': 50,
'log_gpu_memory': None,
'logger': True,
'max_epochs': 1000,
'max_steps': None,
'min_epochs': 1,
'min_steps': None,
'num_nodes': 1,
'num_processes': 1,
'num_sanity_val_steps': 2,
'overfit_batches': 0.0,
'precision': 32,
'prepare_data_per_node': True,
'process_position': 0,
'profiler': None,
'progress_bar_refresh_rate': 1,
'reload_dataloaders_every_epoch': False,
'replace_sampler_ddp': True,
'resume_from_checkpoint': None,
'sync_batchnorm': False,
'terminate_on_nan': False,
'tpu_cores': <function _gpus_arg_default at 0x7f26b7788f80>,
'track_grad_norm': -1,
'truncated_bptt_steps': None,
'val_check_interval': 1.0,
'weights_save_path': None,
'weights_summary': 'top'}
I assume the issue comes from the gpus and tpu_cores values, which are function calls, when not explicitly supplied as arguments. |
k-fold cross validation using DataModule | [
"duplicate",
"feature",
"help wanted",
"question",
"data handling"
] | DataModule is great!
I'm wondering how it can be applied to handle with k-fold cross-validation. |
Limit_train_batches vs val_check_interval | [
"question",
"won't fix"
] | Does limit_train_batches=0.5 and val_check_interval=0.5 effectively do the same thing (minus impacting the total number of epochs)? That is, if my data loader is shuffling and I use limit_train_batches, can I safely assume that after 2 epochs I will have gone through the whole dataset or will I only go through the same 50% of the training data twice? The docs are not super clear on this point and this issue is a bit confusing as to what is going on: #2928
Also, is this statement true or false depending on single GPU vs DDP?
Thank you! |
Segmentation Fault when training 3D CNN using 4 GPUs with batch_size=8 | [
"bug",
"help wanted",
"won't fix"
] | π Bug
Trying to train 3D CNN using 4 GPUs, batch_size = 8, and num_workers >= 4 (ddp backend). I'm using a GCP VM with 16 cores and 60GB memory. The data is stored on a mounted disk and is roughly 3 TB.
I can successfully train using 2 GPUs, batch_size=4, and num_workers=4, but whenever I try increasing number of GPUs, batch size, and num_workers, a segmentation fault get's thrown. It's not the same segfault stack trace everytime, but the image below is most common. I tried increasing cores and memory to 32/120 and it still errors out. The memory used doesn't stay stable either. I see the buff/cache increasing to about 85% of available mem, then segfault gets thrown shortly after.
I followed the PyTorch Lightning website carefully and made sure the code didn't have anything that could cause slow down or memory leak. I followed all the tips for fast performance, multi-gpu training, etc. that's listed on docs website.
One more observation:
To test my code, I run training for 3 epochs using about 60 training examples and 60 validation. The test run works perfectly fine with batch_size=8, 4 GPUs, and num_workers=4. When I switch to normal training mode with full dataset, it errors out. I've noticed that anything more than 64 examples causes segfault.
Expected behavior
Environment
PyTorch Version (e.g., 1.0): 1.6.0+cu101
OS (e.g., Linux): Linux (Debian)
How you installed PyTorch (conda, pip, source): pip
Build command you used (if compiling from source):
Python version: 3.7.6
CUDA/cuDNN version: 10.1
GPU models and configuration: 4 x Nvidia Tesla V100
Any other relevant information:
Additional context
Data is stored as .npy files |
auto_select_gpu does not free memory allocated on GPU for DDP/Horovod | [
"bug",
"help wanted",
"won't fix",
"distributed",
"priority: 1"
] | π Bug
If using both auto_select_gpu=True and the ddp or horovod accelerator, the memory allocated by pick_single_gpu is not freed by torch.
This can't be reproduced on Colab since DDP isn't supported.
To Reproduce
Paste this code into a file and run it:
import pytorch_lightning as pl
from torch import nn
from torchvision.models import resnet18
from torchvision.datasets import CIFAR10
from torchvision.transforms import ToTensor
from torch.utils.data import DataLoader
from torch.optim import SGD
class Model(pl.LightningModule):
def __init__(self):
super().__init__()
self.model = nn.Linear(3072, 10)
def train_dataloader(self):
dataset = CIFAR10("__pycache__", download=True, transform=ToTensor())
return DataLoader(dataset)
def configure_optimizers(self):
return SGD(self.model.parameters(), lr=0.01)
def training_step(self, batch, _):
x, label = batch
y = self.model(x.view(x.size(0), -1))
return nn.functional.cross_entropy(y, label)
trainer = pl.Trainer(accelerator="ddp", gpus=2, auto_select_gpus=True)
model = Model()
trainer.fit(model)
Expected behavior
The same amount of memory is allocated on both GPUs. However, nvidia-smi shows this:
Environment
* CUDA:
- GPU:
- GeForce RTX 2080 Ti
- GeForce RTX 2080 Ti
- GeForce RTX 2080 Ti
- GeForce RTX 2080 Ti
- available: True
- version: 10.2
* Packages:
- numpy: 1.19.2
- pyTorch_debug: False
- pyTorch_version: 1.6.0
- pytorch-lightning: 1.0.3
- tqdm: 4.50.1
* System:
- OS: Linux
- architecture:
- 64bit
- ELF
- processor: x86_64
- python: 3.8.5
- version: #110-Ubuntu SMP Tue Jun 23 02:39:32 UTC 2020 |
Problem with truncated_bptt_steps | [
"help wanted",
"working as intended"
] | π Bug
When setting truncated_bptt_steps we can observed 3 bugs:
1. An exception is raised when num_sanity_val_steps=2:
/usr/local/lib/python3.6/dist-packages/pytorch_lightning/core/lightning.py in get_progress_bar_dict(self)
1355
1356 if self.trainer.truncated_bptt_steps is not None:
-> 1357 tqdm_dict["split_idx"] = self.trainer.split_idx
1358
1359 if self.trainer.logger is not None and self.trainer.logger.version is not None:
AttributeError: 'Trainer' object has no attribute 'split_idx'
2. tbptt_split_batch method does not split.
To observed this behavior we need to turn off num_sanity_val_steps (num_sanity_val_steps=0) to ensure that the first error does not occur. Next we need to implement costume tbptt_split_batch method.
def tbptt_split_batch(self, batch, split_size):
x, y = batch
print("x", x.shape, "y", y.shape)
print("split_size", split_size)
print("seq_len", x.shape[1])
splits = super().tbptt_split_batch(batch, batch_size)
print("pl spliter:", len(splits), [s[0].shape for s in splits])
splits = []
for t in range(0, x.shape[1], split_size):
batch_split = []
for i, x in enumerate(batch):
split_x = x[:, t: t + split_size]
batch_split.append(split_x)
splits.append(batch_split)
print("my spliter:", len(splits), [s[0].shape for s in splits])
return splits
output:
x torch.Size([32, 20, 100]) y torch.Size([32, 20])
split_size 10
seq_len 20
pl spliter: 1 [torch.Size([32, 20, 100])]
my spliter: 2 [torch.Size([32, 10, 100]), torch.Size([32, 10, 100])]
Each element of the batch should have structure (batch, time, ...) according to the documentation.
3. An exception is raised when training model with fixed tbptt_split_batch method.
/usr/local/lib/python3.6/dist-packages/torch/autograd/__init__.py in backward(tensors, grad_tensors, retain_graph, create_graph, grad_variables)
125 Variable._execution_engine.run_backward(
126 tensors, grad_tensors, retain_graph, create_graph,
--> 127 allow_unreachable=True) # allow_unreachable flag
128
129
RuntimeError: Trying to backward through the graph a second time, but the saved intermediate results have already been freed. Specify retain_graph=True when calling backward the first time.
4. Question about detaching hidden values.
Why do we have to detach hidden values if back propagation is computed at the end of the batch, after model used all split bptt mini batches?
From the documentation:
# Truncated back-propagation through time
def training_step(self, batch, batch_idx, hiddens):
# hiddens are the hiddens from the previous truncated backprop step
out, hiddens = self.lstm(data, hiddens)
return {
"loss": ...,
"hiddens": hiddens # remember to detach() this
}
Please reproduce using the BoringModel and post here
https://colab.research.google.com/drive/1rvNzw9V4L_jyLG-cA_83jHUbm5YMlahr?usp=sharing
Expected behavior
Code should not crash
tbptt_split_batch method should work correctly.
Code should not crash
A request to explain why we need to detach hidden values.
Environment
CUDA:
GPU:
Tesla T4
available: True
version: 10.1
Packages:
numpy: 1.18.5
pyTorch_debug: False
pyTorch_version: 1.6.0+cu101
pytorch-lightning: 1.0.3
tqdm: 4.41.1
System:
OS: Linux
architecture:
64bit
processor: x86_64
python: 3.6.9
version: #1 SMP Thu Jul 23 08:00:38 PDT 2020 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.