library
stringclasses 1
value | test_file
stringclasses 785
values | test_function
stringlengths 1
295
| before
stringlengths 0
448k
| after
stringlengths 0
487k
| context_before
stringclasses 947
values | context_after
stringlengths 0
16.3k
| commit_before
stringclasses 1
value | commit_after
stringclasses 1
value | change_type
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|---|
torch
|
test/distributed/pipeline/sync/test_phony.py
|
forward
|
def forward(ctx, input):
phony = get_phony(input.device, requires_grad=False)
return phony.detach()
|
import torch
from torch.distributed.pipeline.sync.phony import get_phony
class Phonify(torch.autograd.Function):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
test_pipe_without_rpc
|
def test_pipe_without_rpc():
model = nn.Sequential(nn.Linear(1, 1))
with pytest.raises(RuntimeError, match='Please initialize RPC framework'):
pipe = Pipe(model, chunks=1)
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
test_parameters
|
def test_parameters(setup_rpc):
model = nn.Sequential(nn.Linear(1, 1))
pipe = Pipe(model, chunks=1)
assert list(pipe.parameters()) != []
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
test_checkpoint_eval
|
def test_checkpoint_eval(setup_rpc):
model = nn.Sequential(nn.Linear(1, 1))
model = Pipe(model, chunks=2)
input = torch.rand(2, 1)
def find_grad_fn(grad_fn, name):
if grad_fn is None:
return False
if grad_fn.__class__.__name__ == name:
return True
for next_grad_fn, _ in grad_fn.next_functions:
if find_grad_fn(next_grad_fn, name):
return True
return False
model.train()
train_output = model(input)
assert find_grad_fn(train_output.local_value().grad_fn, "CheckpointBackward")
assert find_grad_fn(train_output.local_value().grad_fn, "RecomputeBackward")
model.eval()
eval_output = model(input)
assert not find_grad_fn(eval_output.local_value().grad_fn, "CheckpointBackward")
assert not find_grad_fn(eval_output.local_value().grad_fn, "RecomputeBackward")
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
find_grad_fn
|
def find_grad_fn(grad_fn, name):
if grad_fn is None:
return False
if grad_fn.__class__.__name__ == name:
return True
for next_grad_fn, _ in grad_fn.next_functions:
if find_grad_fn(next_grad_fn, name):
return True
return False
model.train()
train_output = model(input)
assert find_grad_fn(train_output.local_value().grad_fn, "CheckpointBackward")
assert find_grad_fn(train_output.local_value().grad_fn, "RecomputeBackward")
model.eval()
eval_output = model(input)
assert not find_grad_fn(eval_output.local_value().grad_fn, "CheckpointBackward")
assert not find_grad_fn(eval_output.local_value().grad_fn, "RecomputeBackward")
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
test_checkpoint_non_float_input
|
def test_checkpoint_non_float_input(setup_rpc):
class ForkNonFloat(nn.Module):
def forward(self, input):
return (input * 2, torch.tensor([False]))
class JoinNonFloat(nn.Module):
def forward(self, input, non_float):
return input * 2
model = nn.Sequential(ForkNonFloat(), JoinNonFloat())
model = Pipe(model, chunks=1, checkpoint="always")
input = torch.rand(1, requires_grad=True)
output = model(input)
output.backward()
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
forward
|
def forward(self, input):
return (input * 2, torch.tensor([False]))
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
class ForkNonFloat(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
forward
|
def forward(self, input):
return (input * 2, torch.tensor([False]))
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
class ForkNonFloat(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
test_no_grad
|
def test_no_grad(setup_rpc):
model = nn.Sequential(nn.Linear(1, 1))
model = Pipe(model, chunks=2)
input = torch.rand(2, 1)
latent = None
def hook(module, input, output):
_ = module
_ = input
nonlocal latent
latent = output
partition = model.partitions[0]
partition.register_forward_hook(hook)
with torch.no_grad():
model(input)
assert latent.grad_fn is None
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
hook
|
def hook(module, input, output):
_ = module
_ = input
nonlocal latent
latent = output
partition = model.partitions[0]
partition.register_forward_hook(hook)
with torch.no_grad():
model(input)
assert latent.grad_fn is None
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
test_exception
|
def test_exception(setup_rpc):
class ExpectedException(Exception):
pass
class Raise(nn.Module):
def forward(self, *_):
raise ExpectedException()
model = nn.Sequential(Raise())
model = Pipe(model, chunks=1)
with pytest.raises(ExpectedException):
model(torch.rand(1))
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
forward
|
def forward(self, input):
return (input * 2, torch.tensor([False]))
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
class ForkNonFloat(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
test_exception_early_stop_asap
|
def test_exception_early_stop_asap(setup_rpc):
"""Even the first partitions have finished to process, the partition before
the failed partition should be killed as soon as possible.
"""
class ExpectedException(Exception):
pass
class Pass(nn.Module):
def forward(self, x):
return x
counter = 0
class Counter(nn.Module):
def forward(self, x):
time.sleep(0.1)
nonlocal counter
counter += 1
return x
class Raise(nn.Module):
def forward(self, x):
raise ExpectedException()
model = nn.Sequential(Pass(), Pass(), Counter(), Raise())
model = Pipe(model, chunks=3)
with pytest.raises(ExpectedException):
model(torch.rand(3))
# If the early stop doesn't work, it would be 3 instead.
assert counter == 2
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
__init__
|
def __init__(self, value):
self.value = value
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
class MyString:
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
test_verify_nested_modules
|
def test_verify_nested_modules(setup_rpc):
model = nn.Sequential(
nn.Sequential(
nn.Linear(32, 16).cuda(0),
nn.Linear(16, 8).cuda(0)
),
nn.Sequential(
nn.Linear(8, 4).cuda(1),
nn.Linear(4, 2).cuda(1)
),
)
pipe = Pipe(model)
out = pipe(torch.rand(10, 32).cuda(0))
assert out.local_value().device == torch.device("cuda:1")
assert out.local_value().size() == torch.Size([10, 2])
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
test_verify_module_duplicate_parameters_on_same_device
|
def test_verify_module_duplicate_parameters_on_same_device(setup_rpc):
class Surrogate(nn.Module):
def __init__(self, module):
super().__init__()
self.module = module
conv = nn.Conv2d(3, 3, 1)
model = nn.Sequential(Surrogate(conv), Surrogate(conv))
Pipe(model)
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
__init__
|
def __init__(self, value):
self.value = value
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
class MyString:
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
__init__
|
def __init__(self, value):
self.value = value
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
class MyString:
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
forward
|
def forward(self, input):
return (input * 2, torch.tensor([False]))
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
class ForkNonFloat(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
forward
|
def forward(self, input):
return (input * 2, torch.tensor([False]))
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
class ForkNonFloat(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
forward
|
def forward(self, input):
return (input * 2, torch.tensor([False]))
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
class ForkNonFloat(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
forward
|
def forward(self, input):
return (input * 2, torch.tensor([False]))
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
class ForkNonFloat(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
forward
|
def forward(self, input):
return (input * 2, torch.tensor([False]))
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
class ForkNonFloat(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
forward
|
def forward(self, input):
return (input * 2, torch.tensor([False]))
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
class ForkNonFloat(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
forward
|
def forward(self, input):
return (input * 2, torch.tensor([False]))
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
class ForkNonFloat(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
forward
|
def forward(self, input):
return (input * 2, torch.tensor([False]))
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
class ForkNonFloat(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_worker.py
|
test_compute_success
|
def test_compute_success():
"""Task.compute returns (True, (task, batch)) on success."""
def _42():
return Batch(torch.tensor(42))
with spawn_workers([torch.device("cpu")]) as (in_queues, out_queues):
t = Task(CPUStream, compute=_42, finalize=None)
in_queues[0].put(t)
ok, (task, batch) = out_queues[0].get()
assert ok
assert task is t
assert isinstance(batch, Batch)
assert batch[0].item() == 42
|
import threading
import pytest
import torch
from torch.distributed.pipeline.sync.microbatch import Batch
from torch.distributed.pipeline.sync.stream import CPUStream
from torch.distributed.pipeline.sync.worker import Task, spawn_workers
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_worker.py
|
_42
|
def _42():
return Batch(torch.tensor(42))
with spawn_workers([torch.device("cpu")]) as (in_queues, out_queues):
t = Task(CPUStream, compute=_42, finalize=None)
in_queues[0].put(t)
ok, (task, batch) = out_queues[0].get()
assert ok
assert task is t
assert isinstance(batch, Batch)
assert batch[0].item() == 42
|
import threading
import pytest
import torch
from torch.distributed.pipeline.sync.microbatch import Batch
from torch.distributed.pipeline.sync.stream import CPUStream
from torch.distributed.pipeline.sync.worker import Task, spawn_workers
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_worker.py
|
zero_div
|
def zero_div():
0 / 0
with spawn_workers([torch.device("cpu")]) as (in_queues, out_queues):
t = Task(CPUStream, compute=zero_div, finalize=None)
in_queues[0].put(t)
ok, exc_info = out_queues[0].get()
assert not ok
assert isinstance(exc_info, tuple)
assert issubclass(exc_info[0], ZeroDivisionError)
|
import threading
import pytest
import torch
from torch.distributed.pipeline.sync.microbatch import Batch
from torch.distributed.pipeline.sync.stream import CPUStream
from torch.distributed.pipeline.sync.worker import Task, spawn_workers
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_worker.py
|
detect_grad_enabled
|
def detect_grad_enabled():
x = torch.rand(1, requires_grad=torch.is_grad_enabled())
return Batch(x)
with torch.set_grad_enabled(grad_mode):
with spawn_workers([torch.device("cpu")]) as (in_queues, out_queues):
task = Task(CPUStream, compute=detect_grad_enabled, finalize=None)
in_queues[0].put(task)
ok, (_, batch) = out_queues[0].get()
assert ok
assert batch[0].requires_grad == grad_mode
|
import threading
import pytest
import torch
from torch.distributed.pipeline.sync.microbatch import Batch
from torch.distributed.pipeline.sync.stream import CPUStream
from torch.distributed.pipeline.sync.worker import Task, spawn_workers
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_worker.py
|
test_worker_per_device
|
def test_worker_per_device():
cpu = torch.device("cpu")
cpu0 = torch.device("cpu", index=0)
fake1 = fake_device()
fake2 = fake_device()
with spawn_workers([cpu, cpu, cpu0, fake1, fake2]) as (in_queues, out_queues):
assert len(in_queues) == len(out_queues) == 5
# 0: cpu, 1: cpu, 2: cpu0
assert in_queues[0] is in_queues[1] is in_queues[2]
assert out_queues[0] is out_queues[1] is out_queues[2]
# 3: fake1, 4: fake2
assert in_queues[3] is not in_queues[4]
assert out_queues[3] is not out_queues[4]
|
import threading
import pytest
import torch
from torch.distributed.pipeline.sync.microbatch import Batch
from torch.distributed.pipeline.sync.stream import CPUStream
from torch.distributed.pipeline.sync.worker import Task, spawn_workers
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipelining/model_registry.py
|
__init__
|
def __init__(self, d_hid):
super().__init__()
self.mm_param0 = torch.nn.Parameter(torch.randn(d_hid, d_hid))
self.mm_param1 = torch.nn.Parameter(torch.randn(d_hid, d_hid))
self.cval = torch.nn.Buffer(torch.randn((d_hid,), requires_grad=False))
self.lin0 = torch.nn.Linear(d_hid, d_hid)
self.lin1 = torch.nn.Linear(d_hid, d_hid)
|
import torch
from torch.autograd import Function
from torch.distributed.pipelining import pipe_split, SplitPoint
class ExampleCode(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/model_registry.py
|
forward
|
def forward(self, x):
x = torch.mm(x, self.mm_param0)
x = torch.relu(x)
# try passing a value that doesn't require_grad across skip boundaries
a_constant = self.cval.clone()
x = self.lin0(x)
pipe_split()
x = torch.relu(x) + a_constant
x = torch.mm(x, self.mm_param1)
x = self.lin1(x)
x = torch.relu(x)
return x
|
import torch
from torch.autograd import Function
from torch.distributed.pipelining import pipe_split, SplitPoint
class ExampleCode(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/model_registry.py
|
__init__
|
def __init__(self, d_hid):
super().__init__()
self.mm_param0 = torch.nn.Parameter(torch.randn(d_hid, d_hid))
self.mm_param1 = torch.nn.Parameter(torch.randn(d_hid, d_hid))
self.cval = torch.nn.Buffer(torch.randn((d_hid,), requires_grad=False))
self.lin0 = torch.nn.Linear(d_hid, d_hid)
self.lin1 = torch.nn.Linear(d_hid, d_hid)
|
import torch
from torch.autograd import Function
from torch.distributed.pipelining import pipe_split, SplitPoint
class ExampleCode(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/model_registry.py
|
__init__
|
def __init__(self, d_hid):
super().__init__()
self.mm_param0 = torch.nn.Parameter(torch.randn(d_hid, d_hid))
self.mm_param1 = torch.nn.Parameter(torch.randn(d_hid, d_hid))
self.cval = torch.nn.Buffer(torch.randn((d_hid,), requires_grad=False))
self.lin0 = torch.nn.Linear(d_hid, d_hid)
self.lin1 = torch.nn.Linear(d_hid, d_hid)
|
import torch
from torch.autograd import Function
from torch.distributed.pipelining import pipe_split, SplitPoint
class ExampleCode(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
test_deferred_batch_norm_params
|
def test_deferred_batch_norm_params(checkpoint, setup_rpc):
bn = nn.BatchNorm2d(3)
pipe_bn = deepcopy(bn)
pipe = Pipe(
nn.Sequential(pipe_bn), chunks=1, checkpoint=checkpoint, deferred_batch_norm=True
)
x = torch.rand(4, 3, 10, 10)
pipe(x).local_value().mean().backward()
bn(x).mean().backward()
assert pipe[0].weight.grad is not None
assert pipe[0].bias.grad is not None
assert torch.allclose(pipe[0].weight.grad, bn.weight.grad, atol=1e-4)
assert torch.allclose(pipe[0].bias.grad, bn.bias.grad, atol=1e-4)
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
test_devices
|
def test_devices(setup_rpc):
a = nn.Linear(1, 1)
b = nn.Linear(1, 1)
c = nn.Linear(1, 1)
# There are extra two devices.
model = nn.Sequential(a, b, c)
model = Pipe(model)
cpu = torch.device("cpu")
# Extra devices must be discarded.
assert model.devices == [cpu, cpu, cpu]
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
test_merged_partitions
|
def test_merged_partitions(setup_rpc):
a = nn.Linear(1, 1).to(0)
b = nn.Sequential(nn.Linear(1, 1), nn.Linear(1, 2)).to(0)
c = nn.Linear(1, 1)
d = nn.Linear(1, 2)
model = nn.Sequential(a, b, c, d)
model = Pipe(model)
assert isinstance(model.partitions, nn.ModuleList)
assert isinstance(model.partitions[0], PipeSequential)
assert isinstance(model.partitions[1], PipeSequential)
assert list(model.partitions[0]) == [a, b[0], b[1]]
assert list(model.partitions[1]) == [c]
assert list(model.partitions[2]) == [d]
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
test_deny_moving
|
def test_deny_moving(setup_rpc):
a = nn.Linear(1, 1)
b = nn.Linear(1, 1)
model = nn.Sequential(a, b)
model = Pipe(model)
# Moving is denied.
with pytest.raises(TypeError):
model.cuda()
with pytest.raises(TypeError):
model.cpu()
with pytest.raises(TypeError):
model.to(torch.device("cuda"))
with pytest.raises(TypeError):
model.to(0)
with pytest.raises(TypeError):
model.to("cuda")
with pytest.raises(TypeError):
model.to(device=0)
with pytest.raises(TypeError):
model.to(torch.rand(1))
with pytest.raises(TypeError):
model.to(tensor=torch.rand(1))
# Casting is allowed.
model.half()
model.to(torch.double)
model.to(dtype=torch.float)
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
test_empty_module
|
def test_empty_module(setup_rpc):
# Empty sequential module is not illegal.
model = nn.Sequential()
model = Pipe(model)
assert model(torch.tensor(42)).local_value() == torch.tensor(42)
# But only tensor or tensors is legal in Pipe.
with pytest.raises(TypeError):
model(42)
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
test_named_children
|
def test_named_children(setup_rpc):
a = nn.Linear(1, 1)
b = nn.Linear(1, 1)
model = nn.Sequential(OrderedDict([("a", a), ("b", b)]))
model = Pipe(model)
names = {n for n, _ in model.named_modules()}
assert "partitions.0.0" in names
assert "partitions.1.0" in names
# Pipe doesn't support __getattr__. Unlike nn.Sequential, Pipe requires
# several methods in its namespace.
with pytest.raises(AttributeError):
model.a
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
test_verify_module_non_sequential
|
def test_verify_module_non_sequential(setup_rpc):
with pytest.raises(TypeError, match="module must be nn.Sequential to be partitioned"):
Pipe(nn.Module())
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
forward
|
def forward(self, input):
return (input * 2, torch.tensor([False]))
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
class ForkNonFloat(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
__init__
|
def __init__(self, value):
self.value = value
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
class MyString:
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
forward
|
def forward(self, input):
return (input * 2, torch.tensor([False]))
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
class ForkNonFloat(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
test_with_device_wrapper
|
def test_with_device_wrapper(setup_rpc):
fc1 = nn.Linear(16, 8).cuda(0)
fc2 = nn.Linear(8, 4).cuda(1)
dropout = nn.Dropout()
model = nn.Sequential(fc1, fc2, WithDevice(dropout, 'cuda:1'))
model = Pipe(model, chunks=8)
assert torch.device('cuda:1') == model(torch.rand(16, 16).cuda(0)).local_value().device
assert [torch.device('cuda:0'), torch.device('cuda:1')] == model.devices
model = nn.Sequential(fc1, WithDevice(dropout, 'cuda:1'))
model = Pipe(model, chunks=8)
assert torch.device('cuda:1') == model(torch.rand(16, 16).cuda(0)).local_value().device
assert [torch.device('cuda:0'), torch.device('cuda:1')] == model.devices
model = nn.Sequential(fc1, WithDevice(fc2, 'cuda:0'))
model = Pipe(model, chunks=8)
assert torch.device('cuda:0') == model(torch.rand(16, 16).cuda(0)).local_value().device
assert [torch.device('cuda:0')] == model.devices
assert torch.device('cuda:0') == fc2.weight.device
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipeline.py
|
test_clock_cycles
|
def test_clock_cycles():
assert list(_clock_cycles(1, 1)) == [[(0, 0)]]
assert list(_clock_cycles(1, 3)) == [[(0, 0)], [(0, 1)], [(0, 2)]]
assert list(_clock_cycles(3, 1)) == [[(0, 0)], [(1, 0)], [(2, 0)]]
assert list(_clock_cycles(3, 3)) == [
[(0, 0)],
[(1, 0), (0, 1)],
[(2, 0), (1, 1), (0, 2)],
[(2, 1), (1, 2)],
[(2, 2)],
]
assert list(_clock_cycles(4, 2)) == [
[(0, 0)],
[(1, 0), (0, 1)],
[(2, 0), (1, 1)],
[(3, 0), (2, 1)],
[(3, 1)],
]
|
from torch.distributed.pipeline.sync.pipeline import _clock_cycles
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_stream.py
|
test_new_stream_cuda
|
def test_new_stream_cuda(self):
stream = new_stream(torch.device("cuda"))
assert isinstance(stream, torch.cuda.Stream)
assert stream != torch.cuda.default_stream()
|
import pytest
import torch
from torch.distributed.pipeline.sync.stream import (
CPUStream,
current_stream,
default_stream,
get_device,
is_cuda,
new_stream,
record_stream,
use_device,
use_stream,
wait_stream,
)
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
class TestNewStream:
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_stream.py
|
test_current_stream_cuda
|
def test_current_stream_cuda(self):
stream = current_stream(torch.device("cuda"))
assert isinstance(stream, torch.cuda.Stream)
assert stream == torch.cuda.current_stream()
|
import pytest
import torch
from torch.distributed.pipeline.sync.stream import (
CPUStream,
current_stream,
default_stream,
get_device,
is_cuda,
new_stream,
record_stream,
use_device,
use_stream,
wait_stream,
)
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
class TestCurrentStream:
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
forward
|
def forward(self, input):
return (input * 2, torch.tensor([False]))
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
class ForkNonFloat(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
forward
|
def forward(self, input):
return (input * 2, torch.tensor([False]))
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
class ForkNonFloat(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
forward
|
def forward(self, input):
return (input * 2, torch.tensor([False]))
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
class ForkNonFloat(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
test_nested_input
|
def test_nested_input(setup_rpc):
class NestedInput(nn.Module):
def __init__(self):
super().__init__()
self.fc_a = nn.Linear(1, 1)
self.fc_b = nn.Linear(1, 1)
def forward(self, inp):
return inp
model = nn.Sequential(NestedInput())
model = Pipe(model, chunks=2)
a = torch.rand(10, 1, requires_grad=True)
b = torch.rand(10, 1, requires_grad=True)
# TypeError: expected Tensor, but got tuple
with pytest.raises(TypeError):
model((a, (a, b))).local_value()
# TypeError: expected Tensor, but got list
with pytest.raises(TypeError):
model((a, [a, b])).local_value()
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
__init__
|
def __init__(self, value):
self.value = value
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
class MyString:
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
forward
|
def forward(self, input):
return (input * 2, torch.tensor([False]))
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
class ForkNonFloat(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
test_input_pair
|
def test_input_pair(setup_rpc):
class Two(nn.Module):
def __init__(self):
super().__init__()
self.fc_a = nn.Linear(1, 1)
self.fc_b = nn.Linear(1, 1)
def forward(self, a, b):
return (self.fc_a(a), self.fc_b(b))
model = nn.Sequential(Two())
model = Pipe(model, chunks=2)
a = torch.rand(10, 1, requires_grad=True)
b = torch.rand(10, 1, requires_grad=True)
a_out, b_out = model(a, b).local_value()
loss = (a_out + b_out).mean()
loss.backward()
assert a.grad is not None
assert b.grad is not None
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
__init__
|
def __init__(self, value):
self.value = value
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
class MyString:
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
forward
|
def forward(self, input):
return (input * 2, torch.tensor([False]))
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
class ForkNonFloat(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
test_multi_sequence_input
|
def test_multi_sequence_input(setup_rpc):
class MultiSeq(nn.Module):
def forward(self, tup1, tup2):
return tup1, tup2
model = Pipe(nn.Sequential(MultiSeq()))
with pytest.raises(TypeError):
model(
[torch.rand(10), torch.rand(10)],
[torch.rand(10), torch.rand(10)]
)
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_stream.py
|
test_wait_stream_cuda_cuda
|
def test_wait_stream_cuda_cuda(self, cuda_sleep):
source = current_stream(torch.device("cuda"))
target = new_stream(torch.device("cuda"))
self._test_wait_stream(source, target, cuda_sleep)
|
import pytest
import torch
from torch.distributed.pipeline.sync.stream import (
CPUStream,
current_stream,
default_stream,
get_device,
is_cuda,
new_stream,
record_stream,
use_device,
use_stream,
wait_stream,
)
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
class TestWaitStream:
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_stream.py
|
test_record_stream_shifted_view
|
def test_record_stream_shifted_view(self, cuda_sleep):
# Issue: https://github.com/pytorch/pytorch/issues/27366
stream_alloc = new_stream(torch.device("cuda"))
with torch.cuda.stream(stream_alloc):
x = torch.rand(2, device=torch.device("cuda"))
y = x[1:]
assert y.data_ptr() > x.data_ptr()
stream = new_stream(torch.device("cuda"))
with use_stream(stream):
cuda_sleep(0.5)
record_stream(y, stream)
data_ptr = x.data_ptr()
del x, y
stream_alloc.synchronize()
with torch.cuda.stream(stream_alloc):
z = torch.rand(2, device=torch.device("cuda"))
assert z.data_ptr() != data_ptr
|
import pytest
import torch
from torch.distributed.pipeline.sync.stream import (
CPUStream,
current_stream,
default_stream,
get_device,
is_cuda,
new_stream,
record_stream,
use_device,
use_stream,
wait_stream,
)
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
class TestRecordStream:
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_transparency.py
|
sum_grad
|
def sum_grad(parameters):
return sum([p.grad.sum() for p in parameters if p.grad is not None])
|
import torch
from torch import nn
from torch.distributed.pipeline.sync import Pipe
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_transparency.py
|
zero_grad
|
def zero_grad(parameters):
for p in parameters:
p.grad = None
inputs = torch.rand(8, 1)
model = nn.Sequential(nn.Linear(1, 2), nn.Linear(2, 4), nn.Linear(4, 2), nn.Linear(2, 1),)
# Without Pipe
outputs = model(inputs)
loss = outputs.mean()
loss.backward()
grad_without_pipe = sum_grad(model.parameters())
zero_grad(model.parameters())
# With Pipe
model = Pipe(model, chunks=4)
outputs = model(inputs).local_value()
loss = outputs.mean()
loss.backward()
grad_with_pipe = sum_grad(model.parameters())
# Both grads should be identical.
assert torch.allclose(grad_with_pipe, grad_without_pipe)
|
import torch
from torch import nn
from torch.distributed.pipeline.sync import Pipe
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_worker.py
|
test_compute_multithreading
|
def test_compute_multithreading():
"""Task.compute should be executed on multiple threads."""
thread_ids = set()
def log_thread_id():
thread_id = threading.current_thread().ident
thread_ids.add(thread_id)
return Batch(())
with spawn_workers([fake_device() for _ in range(2)]) as (in_queues, out_queues):
for i in range(2):
t = Task(CPUStream, compute=log_thread_id, finalize=None)
in_queues[i].put(t)
for i in range(2):
out_queues[i].get()
assert len(thread_ids) == 2
|
import threading
import pytest
import torch
from torch.distributed.pipeline.sync.microbatch import Batch
from torch.distributed.pipeline.sync.stream import CPUStream
from torch.distributed.pipeline.sync.worker import Task, spawn_workers
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_worker.py
|
log_thread_id
|
def log_thread_id():
thread_id = threading.current_thread().ident
thread_ids.add(thread_id)
return Batch(())
with spawn_workers([fake_device() for _ in range(2)]) as (in_queues, out_queues):
for i in range(2):
t = Task(CPUStream, compute=log_thread_id, finalize=None)
in_queues[i].put(t)
for i in range(2):
out_queues[i].get()
assert len(thread_ids) == 2
|
import threading
import pytest
import torch
from torch.distributed.pipeline.sync.microbatch import Batch
from torch.distributed.pipeline.sync.stream import CPUStream
from torch.distributed.pipeline.sync.worker import Task, spawn_workers
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
forward
|
def forward(self, input):
return (input * 2, torch.tensor([False]))
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
class ForkNonFloat(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
test_input_singleton
|
def test_input_singleton(setup_rpc):
class One(nn.Module):
def __init__(self):
super().__init__()
self.fc = nn.Linear(1, 1)
def forward(self, a):
return (self.fc(a),)
model = nn.Sequential(One())
model = Pipe(model, chunks=2)
a = torch.rand(10, 1, requires_grad=True)
(a_out,) = model(a).local_value()
loss = a_out.mean()
loss.backward()
assert all(p.grad is not None for p in model.parameters())
assert a.grad is not None
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
__init__
|
def __init__(self, value):
self.value = value
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
class MyString:
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
forward
|
def forward(self, input):
return (input * 2, torch.tensor([False]))
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
class ForkNonFloat(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
test_input_varargs
|
def test_input_varargs(setup_rpc):
model = nn.Sequential(nn.Linear(1, 1))
model = Pipe(model)
a = torch.rand(1)
b = torch.rand(1)
# TypeError: forward() takes 2 positional arguments but 3 were given
with pytest.raises(TypeError):
model(a, b)
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
test_non_tensor
|
def test_non_tensor(setup_rpc):
class NonTensor(nn.Module):
def forward(self, _):
return "hello"
model = nn.Sequential(NonTensor())
model = Pipe(model)
x = torch.rand(1)
with pytest.raises(TypeError):
model(x)
with pytest.raises(TypeError):
model("hello")
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
forward
|
def forward(self, input):
return (input * 2, torch.tensor([False]))
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
class ForkNonFloat(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
forward
|
def forward(self, input):
return (input * 2, torch.tensor([False]))
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
class ForkNonFloat(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
forward
|
def forward(self, input):
return (input * 2, torch.tensor([False]))
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
class ForkNonFloat(nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipelining/model_registry.py
|
forward
|
def forward(self, x):
x = torch.mm(x, self.mm_param0)
x = torch.relu(x)
# try passing a value that doesn't require_grad across skip boundaries
a_constant = self.cval.clone()
x = self.lin0(x)
pipe_split()
x = torch.relu(x) + a_constant
x = torch.mm(x, self.mm_param1)
x = self.lin1(x)
x = torch.relu(x)
return x
|
import torch
from torch.autograd import Function
from torch.distributed.pipelining import pipe_split, SplitPoint
class ExampleCode(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/model_registry.py
|
compute_dW
|
def compute_dW(self):
grad_output_fc1 = self.cached_context["fc1"].pop(0)
grad_output_fc2 = self.cached_context["fc2"].pop(0)
cached_input_fc1 = self.cached_context["fc1_input"].pop(0)
cached_input_fc2 = self.cached_context["fc2_input"].pop(0)
dW2 = grad_output_fc2.t().mm(cached_input_fc2)
db2 = grad_output_fc2.sum(0)
dW1 = grad_output_fc1.t().mm(cached_input_fc1)
db1 = grad_output_fc1.sum(0)
if self.fc1_weight.grad is not None:
self.fc1_weight.grad += dW1
self.fc1_bias.grad += db1
self.fc2_weight.grad += dW2
self.fc2_bias.grad += db2
else:
self.fc1_weight.grad = dW1
self.fc1_bias.grad = db1
self.fc2_weight.grad = dW2
self.fc2_bias.grad = db2
|
import torch
from torch.autograd import Function
from torch.distributed.pipelining import pipe_split, SplitPoint
class MLPModuleWithDw(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/model_registry.py
|
toggle
|
def toggle(self):
self.use_custom_logic = not self.use_custom_logic
# Multi-MLP model With Dw
|
import torch
from torch.autograd import Function
from torch.distributed.pipelining import pipe_split, SplitPoint
class MLPModuleWithDw(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/model_registry.py
|
__init__
|
def __init__(self, d_hid):
super().__init__()
self.mm_param0 = torch.nn.Parameter(torch.randn(d_hid, d_hid))
self.mm_param1 = torch.nn.Parameter(torch.randn(d_hid, d_hid))
self.cval = torch.nn.Buffer(torch.randn((d_hid,), requires_grad=False))
self.lin0 = torch.nn.Linear(d_hid, d_hid)
self.lin1 = torch.nn.Linear(d_hid, d_hid)
|
import torch
from torch.autograd import Function
from torch.distributed.pipelining import pipe_split, SplitPoint
class ExampleCode(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/model_registry.py
|
forward
|
def forward(self, x):
x = torch.mm(x, self.mm_param0)
x = torch.relu(x)
# try passing a value that doesn't require_grad across skip boundaries
a_constant = self.cval.clone()
x = self.lin0(x)
pipe_split()
x = torch.relu(x) + a_constant
x = torch.mm(x, self.mm_param1)
x = self.lin1(x)
x = torch.relu(x)
return x
|
import torch
from torch.autograd import Function
from torch.distributed.pipelining import pipe_split, SplitPoint
class ExampleCode(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/model_registry.py
|
toggle
|
def toggle(self):
self.use_custom_logic = not self.use_custom_logic
# Multi-MLP model With Dw
|
import torch
from torch.autograd import Function
from torch.distributed.pipelining import pipe_split, SplitPoint
class MLPModuleWithDw(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/model_registry.py
|
compute_dW
|
def compute_dW(self):
grad_output_fc1 = self.cached_context["fc1"].pop(0)
grad_output_fc2 = self.cached_context["fc2"].pop(0)
cached_input_fc1 = self.cached_context["fc1_input"].pop(0)
cached_input_fc2 = self.cached_context["fc2_input"].pop(0)
dW2 = grad_output_fc2.t().mm(cached_input_fc2)
db2 = grad_output_fc2.sum(0)
dW1 = grad_output_fc1.t().mm(cached_input_fc1)
db1 = grad_output_fc1.sum(0)
if self.fc1_weight.grad is not None:
self.fc1_weight.grad += dW1
self.fc1_bias.grad += db1
self.fc2_weight.grad += dW2
self.fc2_bias.grad += db2
else:
self.fc1_weight.grad = dW1
self.fc1_bias.grad = db1
self.fc2_weight.grad = dW2
self.fc2_bias.grad = db2
|
import torch
from torch.autograd import Function
from torch.distributed.pipelining import pipe_split, SplitPoint
class MLPModuleWithDw(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/test_backward.py
|
test_stage_backward
|
def test_stage_backward(self):
# MLP as a stage module
mod = MLPModule(d_hid)
x = torch.randn(batch_size, d_hid)
# As in a pipeline stage, the inputs to this stage requires gradients
x.requires_grad_(True)
target = torch.randn(batch_size, d_hid)
loss_fn = torch.nn.MSELoss(reduction="sum")
# Make a copy
ref_mod = copy.deepcopy(mod)
ref_x = x.detach().requires_grad_(x.requires_grad)
ref_target = target.detach()
# Forward and backward in stage manner
out = mod(x)
loss = loss_fn(out, target)
grad_inputs = stage_backward(
stage_output=loss,
output_grads=None,
input_values=(x,),
)
# Run reference
ref_out = ref_mod(ref_x)
ref_loss = loss_fn(ref_out, ref_target)
ref_loss.backward()
torch.testing.assert_close(grad_inputs[0], ref_x.grad)
# Every rank checks gradients
for name, p in mod.named_parameters():
ref_p = ref_mod.get_parameter(name)
try:
torch.testing.assert_close(p.grad, ref_p.grad)
except AssertionError:
print(f"Gradient test failed for {name}: {p.grad} vs {ref_p.grad}")
raise
|
import copy
from model_registry import MLPModule
import torch
from torch.distributed.pipelining._backward import (
stage_backward,
stage_backward_input,
stage_backward_weight,
)
from torch.testing._internal.common_utils import run_tests, TestCase
d_hid = 512
batch_size = 256
class StageBackwardTests(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/test_backward.py
|
test_stage_backward_input
|
def test_stage_backward_input(self):
# MLP as a stage module
mod = MLPModule(d_hid)
x = torch.randn(batch_size, d_hid)
# As in a pipeline stage, the inputs to this stage requires gradients
x.requires_grad_(True)
target = torch.randn(batch_size, d_hid)
loss_fn = torch.nn.MSELoss(reduction="sum")
# Make a copy
ref_mod = copy.deepcopy(mod)
ref_x = x.detach().requires_grad_(x.requires_grad)
ref_target = target.detach()
# Forward, then backward of loss with respect to inputs
out = mod(x)
loss = loss_fn(out, target)
dinputs, param_groups = stage_backward_input(
stage_outputs=(loss,),
output_grads=None,
input_values=[x],
weights=mod.parameters(),
)
# Run reference
ref_out = ref_mod(ref_x)
ref_loss = loss_fn(ref_out, ref_target)
ref_loss.backward()
torch.testing.assert_close(x.grad, ref_x.grad)
torch.testing.assert_close(dinputs[0], ref_x.grad)
for name, p in mod.named_parameters():
# Check that the weight gradients were not updated
self.assertEqual(p.grad, None)
|
import copy
from model_registry import MLPModule
import torch
from torch.distributed.pipelining._backward import (
stage_backward,
stage_backward_input,
stage_backward_weight,
)
from torch.testing._internal.common_utils import run_tests, TestCase
d_hid = 512
batch_size = 256
class StageBackwardTests(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/test_backward.py
|
test_stage_backward_weight
|
def test_stage_backward_weight(self):
# MLP as a stage module
mod = MLPModule(d_hid)
x = torch.randn(batch_size, d_hid)
# As in a pipeline stage, the inputs to this stage requires gradients
x.requires_grad_(True)
target = torch.randn(batch_size, d_hid)
loss_fn = torch.nn.MSELoss(reduction="sum")
# Make a copy
ref_mod = copy.deepcopy(mod)
ref_x = x.detach().requires_grad_(x.requires_grad)
ref_target = target.detach()
# Forward, then backward of loss with respect to inputs
out = mod(x)
loss = loss_fn(out, target)
dinputs, param_groups = stage_backward_input(
stage_outputs=(loss,),
output_grads=None,
input_values=[x],
weights=mod.parameters(),
)
# backward of loss with respect to weights
dweights = stage_backward_weight(mod.parameters(), param_groups)
# Run reference
ref_out = ref_mod(ref_x)
ref_loss = loss_fn(ref_out, ref_target)
ref_loss.backward()
# Every rank checks gradients
for name, p in mod.named_parameters():
ref_p = ref_mod.get_parameter(name)
try:
torch.testing.assert_close(p.grad, ref_p.grad)
except AssertionError:
print(f"Gradient test failed for {name}: {p.grad} vs {ref_p.grad}")
raise
|
import copy
from model_registry import MLPModule
import torch
from torch.distributed.pipelining._backward import (
stage_backward,
stage_backward_input,
stage_backward_weight,
)
from torch.testing._internal.common_utils import run_tests, TestCase
d_hid = 512
batch_size = 256
class StageBackwardTests(TestCase):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/model_registry.py
|
forward
|
def forward(self, x):
x = torch.mm(x, self.mm_param0)
x = torch.relu(x)
# try passing a value that doesn't require_grad across skip boundaries
a_constant = self.cval.clone()
x = self.lin0(x)
pipe_split()
x = torch.relu(x) + a_constant
x = torch.mm(x, self.mm_param1)
x = self.lin1(x)
x = torch.relu(x)
return x
|
import torch
from torch.autograd import Function
from torch.distributed.pipelining import pipe_split, SplitPoint
class ExampleCode(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/model_registry.py
|
__init__
|
def __init__(self, d_hid):
super().__init__()
self.mm_param0 = torch.nn.Parameter(torch.randn(d_hid, d_hid))
self.mm_param1 = torch.nn.Parameter(torch.randn(d_hid, d_hid))
self.cval = torch.nn.Buffer(torch.randn((d_hid,), requires_grad=False))
self.lin0 = torch.nn.Linear(d_hid, d_hid)
self.lin1 = torch.nn.Linear(d_hid, d_hid)
|
import torch
from torch.autograd import Function
from torch.distributed.pipelining import pipe_split, SplitPoint
class ExampleCode(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/model_registry.py
|
forward
|
def forward(self, x):
x = torch.mm(x, self.mm_param0)
x = torch.relu(x)
# try passing a value that doesn't require_grad across skip boundaries
a_constant = self.cval.clone()
x = self.lin0(x)
pipe_split()
x = torch.relu(x) + a_constant
x = torch.mm(x, self.mm_param1)
x = self.lin1(x)
x = torch.relu(x)
return x
|
import torch
from torch.autograd import Function
from torch.distributed.pipelining import pipe_split, SplitPoint
class ExampleCode(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/model_registry.py
|
__init__
|
def __init__(self, d_hid):
super().__init__()
self.mm_param0 = torch.nn.Parameter(torch.randn(d_hid, d_hid))
self.mm_param1 = torch.nn.Parameter(torch.randn(d_hid, d_hid))
self.cval = torch.nn.Buffer(torch.randn((d_hid,), requires_grad=False))
self.lin0 = torch.nn.Linear(d_hid, d_hid)
self.lin1 = torch.nn.Linear(d_hid, d_hid)
|
import torch
from torch.autograd import Function
from torch.distributed.pipelining import pipe_split, SplitPoint
class ExampleCode(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/model_registry.py
|
forward
|
def forward(self, x):
x = torch.mm(x, self.mm_param0)
x = torch.relu(x)
# try passing a value that doesn't require_grad across skip boundaries
a_constant = self.cval.clone()
x = self.lin0(x)
pipe_split()
x = torch.relu(x) + a_constant
x = torch.mm(x, self.mm_param1)
x = self.lin1(x)
x = torch.relu(x)
return x
|
import torch
from torch.autograd import Function
from torch.distributed.pipelining import pipe_split, SplitPoint
class ExampleCode(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/model_registry.py
|
forward
|
def forward(self, x):
x = torch.mm(x, self.mm_param0)
x = torch.relu(x)
# try passing a value that doesn't require_grad across skip boundaries
a_constant = self.cval.clone()
x = self.lin0(x)
pipe_split()
x = torch.relu(x) + a_constant
x = torch.mm(x, self.mm_param1)
x = self.lin1(x)
x = torch.relu(x)
return x
|
import torch
from torch.autograd import Function
from torch.distributed.pipelining import pipe_split, SplitPoint
class ExampleCode(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/model_registry.py
|
backward
|
def backward(ctx, grad_output):
input_val, weight, bias = ctx.saved_tensors
grad_input = grad_output.mm(weight)
ctx.module.cached_context[ctx.layer_idx].append(grad_output.clone())
ctx.module.cached_context[str(ctx.layer_idx) + "_input"].append(
input_val.clone()
)
return grad_input, None, None, None, None
|
import torch
from torch.autograd import Function
from torch.distributed.pipelining import pipe_split, SplitPoint
class CustomLinearDx(Function):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/model_registry.py
|
forward
|
def forward(self, x):
x = torch.mm(x, self.mm_param0)
x = torch.relu(x)
# try passing a value that doesn't require_grad across skip boundaries
a_constant = self.cval.clone()
x = self.lin0(x)
pipe_split()
x = torch.relu(x) + a_constant
x = torch.mm(x, self.mm_param1)
x = self.lin1(x)
x = torch.relu(x)
return x
|
import torch
from torch.autograd import Function
from torch.distributed.pipelining import pipe_split, SplitPoint
class ExampleCode(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/model_registry.py
|
backward
|
def backward(ctx, grad_output):
input_val, weight, bias = ctx.saved_tensors
grad_input = grad_output.mm(weight)
ctx.module.cached_context[ctx.layer_idx].append(grad_output.clone())
ctx.module.cached_context[str(ctx.layer_idx) + "_input"].append(
input_val.clone()
)
return grad_input, None, None, None, None
|
import torch
from torch.autograd import Function
from torch.distributed.pipelining import pipe_split, SplitPoint
class CustomLinearDx(Function):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipelining/model_registry.py
|
__init__
|
def __init__(self, d_hid):
super().__init__()
self.mm_param0 = torch.nn.Parameter(torch.randn(d_hid, d_hid))
self.mm_param1 = torch.nn.Parameter(torch.randn(d_hid, d_hid))
self.cval = torch.nn.Buffer(torch.randn((d_hid,), requires_grad=False))
self.lin0 = torch.nn.Linear(d_hid, d_hid)
self.lin1 = torch.nn.Linear(d_hid, d_hid)
|
import torch
from torch.autograd import Function
from torch.distributed.pipelining import pipe_split, SplitPoint
class ExampleCode(torch.nn.Module):
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
added
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
test_public_attrs
|
def test_public_attrs(setup_rpc):
class MyString:
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
model = nn.Sequential(nn.Linear(1, 1))
pipe = Pipe(model, chunks=42.000, checkpoint=MyString("always"))
assert pipe.devices == [torch.device("cpu")]
assert pipe.chunks == 42
assert isinstance(pipe.chunks, int)
assert pipe.checkpoint == "always"
assert isinstance(pipe.checkpoint, str)
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
__init__
|
def __init__(self, value):
self.value = value
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
class MyString:
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
__str__
|
def __str__(self):
return self.value
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
class MyString:
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
test_sequential_like
|
def test_sequential_like(setup_rpc):
a = nn.Linear(1, 1)
b = nn.Linear(1, 1)
model = nn.Sequential(a, b)
model = Pipe(model)
assert len(model) == 2
assert list(model) == [a, b]
assert model[0] is a
assert model[1] is b
with pytest.raises(IndexError):
_ = model[2]
assert model[-1] is b
assert model[-2] is a
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
test_chunks_less_than_1
|
def test_chunks_less_than_1(setup_rpc):
model = nn.Sequential(nn.Linear(1, 1))
with pytest.raises(ValueError):
Pipe(model, chunks=0)
with pytest.raises(ValueError):
Pipe(model, chunks=-1)
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
||
torch
|
test/distributed/pipeline/sync/test_pipe.py
|
test_batch_size_indivisible
|
def test_batch_size_indivisible(setup_rpc):
model = nn.Sequential(nn.Linear(1, 1))
model = Pipe(model, chunks=4)
with pytest.warns(None) as record:
model(torch.rand(7, 1))
# Indivisible batch size is legal.
assert not record
|
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
|
c263bd43e8e8502d4726643bc6fd046f0130ac0e
|
32f585d9346e316e554c8d9bf7548af9f62141fc
|
deleted
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.