library
stringclasses
1 value
test_file
stringclasses
785 values
test_function
stringlengths
1
295
before
stringlengths
0
448k
after
stringlengths
0
487k
context_before
stringclasses
947 values
context_after
stringlengths
0
16.3k
commit_before
stringclasses
1 value
commit_after
stringclasses
1 value
change_type
stringclasses
3 values
torch
test/distributed/pipeline/sync/skip/test_leak.py
forward
def forward(self, input): yield stash("skip", input) return input # noqa: B901
import pytest import torch from torch import nn from torch.distributed.pipeline.sync import Pipe, is_checkpointing, is_recomputing from torch.distributed.pipeline.sync.skip import pop, skippable, stash from torch.distributed.pipeline.sync.skip.tracker import current_skip_tracker @skippable(stash=["skip"]) class Stash(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/skip/test_leak.py
backward
def backward(ctx, grad): assert portal_tensor_life_is(0, skip_tracker=ctx.skip_tracker) return grad
import pytest import torch from torch import nn from torch.distributed.pipeline.sync import Pipe, is_checkpointing, is_recomputing from torch.distributed.pipeline.sync.skip import pop, skippable, stash from torch.distributed.pipeline.sync.skip.tracker import current_skip_tracker class F(torch.autograd.Function):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/skip/test_leak.py
forward
def forward(self, input): yield stash("skip", input) return input # noqa: B901
import pytest import torch from torch import nn from torch.distributed.pipeline.sync import Pipe, is_checkpointing, is_recomputing from torch.distributed.pipeline.sync.skip import pop, skippable, stash from torch.distributed.pipeline.sync.skip.tracker import current_skip_tracker @skippable(stash=["skip"]) class Stash(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/skip/test_leak.py
deny
def deny(*args, **kwargs): raise AssertionError("tried to create Portal without Pipe") monkeypatch.setattr("torch.distributed.pipeline.sync.skip.portal.Portal.__init__", deny) model = nn.Sequential(Stash(), Pop()) input = torch.rand(10, requires_grad=True) if train: model.train() output = model(input) output.norm().backward() else: model.eval() with torch.no_grad(): model(input)
import pytest import torch from torch import nn from torch.distributed.pipeline.sync import Pipe, is_checkpointing, is_recomputing from torch.distributed.pipeline.sync.skip import pop, skippable, stash from torch.distributed.pipeline.sync.skip.tracker import current_skip_tracker
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/skip/test_portal.py
test_copy_returns_on_next_device
def test_copy_returns_on_next_device(): portal = Portal(torch.rand(1), tensor_life=1) prev_stream = default_stream(torch.device("cpu")) next_stream = default_stream(torch.device("cuda")) phony = torch.zeros(0, requires_grad=True) assert phony.device.type == "cpu" phony = portal.copy(prev_stream, next_stream, phony) assert phony.device.type == "cuda"
import pytest import torch from torch.distributed.pipeline.sync.dependency import fork, join from torch.distributed.pipeline.sync.skip.portal import Portal from torch.distributed.pipeline.sync.stream import default_stream
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/skip/test_portal.py
test_blue_orange
def test_blue_orange(): tensor1 = torch.rand(1, requires_grad=True) tensor2 = torch.rand(1, requires_grad=True) # Same with: output = tensor1*2 + tensor2 # # +----------------------+ # | | # tensor2 -- PortalBlue -+ +- PortalOrange -+ # | | | # tensor1 ------------ Join -- Fork --- Mul --- Add -- output # main = tensor1 portal = Portal(tensor2, tensor_life=2) phony = portal.blue() main = join(main, phony) main, phony = fork(main) sub = portal.orange(phony) output = main * 2 + sub output.backward() assert torch.allclose(tensor1.grad, torch.tensor([2.0])) assert torch.allclose(tensor2.grad, torch.tensor([1.0]))
import pytest import torch from torch.distributed.pipeline.sync.dependency import fork, join from torch.distributed.pipeline.sync.skip.portal import Portal from torch.distributed.pipeline.sync.stream import default_stream
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/skip/test_portal.py
test_blue_orange_not_requires_grad
def test_blue_orange_not_requires_grad(): tensor1 = torch.rand(1, requires_grad=True) tensor2 = torch.rand(1) # Same with: output = tensor1*2 + tensor2 # # +----------------------+ # | | # tensor2 -- PortalBlue -+ +- PortalOrange -+ # | | | # tensor1 ------------ Join -- Fork --- Mul --- Add -- output # main = tensor1 portal = Portal(tensor2, tensor_life=2) phony = portal.blue() main = join(main, phony) main, phony = fork(main) sub = portal.orange(phony) output = main * 2 + sub output.backward() assert torch.allclose(tensor1.grad, torch.tensor([2.0])) assert tensor2.grad is None
import pytest import torch from torch.distributed.pipeline.sync.dependency import fork, join from torch.distributed.pipeline.sync.skip.portal import Portal from torch.distributed.pipeline.sync.stream import default_stream
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/conftest.py
cuda_sleep
def cuda_sleep(): # Warm-up CUDA. torch.empty(1, device="cuda") # From test/test_cuda.py in PyTorch. start = torch.cuda.Event(enable_timing=True) end = torch.cuda.Event(enable_timing=True) start.record() torch.cuda._sleep(1000000) end.record() end.synchronize() cycles_per_ms = 1000000 / start.elapsed_time(end) def cuda_sleep(seconds): torch.cuda._sleep(int(seconds * cycles_per_ms * 1000)) return cuda_sleep
import tempfile import pytest import torch import torch.distributed as dist
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/conftest.py
cuda_sleep
def cuda_sleep(): # Warm-up CUDA. torch.empty(1, device="cuda") # From test/test_cuda.py in PyTorch. start = torch.cuda.Event(enable_timing=True) end = torch.cuda.Event(enable_timing=True) start.record() torch.cuda._sleep(1000000) end.record() end.synchronize() cycles_per_ms = 1000000 / start.elapsed_time(end) def cuda_sleep(seconds): torch.cuda._sleep(int(seconds * cycles_per_ms * 1000)) return cuda_sleep
import tempfile import pytest import torch import torch.distributed as dist
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/conftest.py
setup_rpc
def setup_rpc(scope="session"): file = tempfile.NamedTemporaryFile() dist.rpc.init_rpc( name="worker0", rank=0, world_size=1, rpc_backend_options=dist.rpc.TensorPipeRpcBackendOptions( init_method="file://{}".format(file.name), ) ) yield dist.rpc.shutdown()
import tempfile import pytest import torch import torch.distributed as dist
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/conftest.py
pytest_ignore_collect
def pytest_ignore_collect(path, config): "Skip this directory if distributed modules are not enabled." return not dist.is_available()
import tempfile import pytest import torch import torch.distributed as dist
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/skip/test_portal.py
test_use_grad
def test_use_grad(): tensor = torch.rand(1, requires_grad=True) portal = Portal(tensor, tensor_life=1) portal.put_grad(tensor) assert portal.use_grad() is tensor # Gradient in a portal is ephemeral. with pytest.raises(RuntimeError): portal.use_grad() class TestTensorLife: @pytest.fixture def new_portal(self): portal = None def new_portal(tensor_life): nonlocal portal tensor = torch.rand(1, requires_grad=True) portal = Portal(tensor, tensor_life) return portal, tensor yield new_portal # A test using this fixture must exhaust the tensor in the portal. with pytest.raises(RuntimeError): portal.check_tensor_life() assert portal.tensor is None def test_tensor_life_0(self, new_portal): portal, tensor = new_portal(0) assert portal.tensor is None def test_tensor_life_1(self, new_portal): portal, tensor = new_portal(1) assert portal.tensor is tensor portal.blue() def test_tensor_life_2(self, new_portal): portal, tensor = new_portal(2) assert portal.tensor is tensor phony = portal.blue() assert portal.orange(phony).data_ptr() == tensor.data_ptr() def test_tensor_life_3(self, new_portal): portal, tensor = new_portal(3) assert portal.tensor is tensor phony = portal.blue() assert portal.orange(phony).data_ptr() == tensor.data_ptr() assert portal.orange(phony).data_ptr() == tensor.data_ptr() def test_tensor_life_4(self, new_portal): portal, tensor = new_portal(4) assert portal.tensor is tensor phony = portal.blue() assert portal.orange(phony).data_ptr() == tensor.data_ptr() assert portal.orange(phony).data_ptr() == tensor.data_ptr() portal.blue() def test_tensor_life_3_plus_1(self, new_portal): portal, tensor = new_portal(3) assert portal.tensor is tensor phony = portal.blue() assert portal.orange(phony).data_ptr() == tensor.data_ptr() assert portal.orange(phony).data_ptr() == tensor.data_ptr() another_tensor = torch.rand(1, requires_grad=True) portal.put_tensor(another_tensor, tensor_life=1) portal.blue()
import pytest import torch from torch.distributed.pipeline.sync.dependency import fork, join from torch.distributed.pipeline.sync.skip.portal import Portal from torch.distributed.pipeline.sync.stream import default_stream
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/skip/test_portal.py
new_portal
def new_portal(self): portal = None def new_portal(tensor_life): nonlocal portal tensor = torch.rand(1, requires_grad=True) portal = Portal(tensor, tensor_life) return portal, tensor yield new_portal # A test using this fixture must exhaust the tensor in the portal. with pytest.raises(RuntimeError): portal.check_tensor_life() assert portal.tensor is None
import pytest import torch from torch.distributed.pipeline.sync.dependency import fork, join from torch.distributed.pipeline.sync.skip.portal import Portal from torch.distributed.pipeline.sync.stream import default_stream class TestTensorLife:
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/skip/test_portal.py
new_portal
def new_portal(self): portal = None def new_portal(tensor_life): nonlocal portal tensor = torch.rand(1, requires_grad=True) portal = Portal(tensor, tensor_life) return portal, tensor yield new_portal # A test using this fixture must exhaust the tensor in the portal. with pytest.raises(RuntimeError): portal.check_tensor_life() assert portal.tensor is None
import pytest import torch from torch.distributed.pipeline.sync.dependency import fork, join from torch.distributed.pipeline.sync.skip.portal import Portal from torch.distributed.pipeline.sync.stream import default_stream class TestTensorLife:
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/skip/test_portal.py
test_tensor_life_0
def test_tensor_life_0(self, new_portal): portal, tensor = new_portal(0) assert portal.tensor is None
import pytest import torch from torch.distributed.pipeline.sync.dependency import fork, join from torch.distributed.pipeline.sync.skip.portal import Portal from torch.distributed.pipeline.sync.stream import default_stream class TestTensorLife:
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/skip/test_portal.py
test_tensor_life_1
def test_tensor_life_1(self, new_portal): portal, tensor = new_portal(1) assert portal.tensor is tensor portal.blue()
import pytest import torch from torch.distributed.pipeline.sync.dependency import fork, join from torch.distributed.pipeline.sync.skip.portal import Portal from torch.distributed.pipeline.sync.stream import default_stream class TestTensorLife:
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/skip/test_portal.py
test_tensor_life_2
def test_tensor_life_2(self, new_portal): portal, tensor = new_portal(2) assert portal.tensor is tensor phony = portal.blue() assert portal.orange(phony).data_ptr() == tensor.data_ptr()
import pytest import torch from torch.distributed.pipeline.sync.dependency import fork, join from torch.distributed.pipeline.sync.skip.portal import Portal from torch.distributed.pipeline.sync.stream import default_stream class TestTensorLife:
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/skip/test_portal.py
test_tensor_life_3
def test_tensor_life_3(self, new_portal): portal, tensor = new_portal(3) assert portal.tensor is tensor phony = portal.blue() assert portal.orange(phony).data_ptr() == tensor.data_ptr() assert portal.orange(phony).data_ptr() == tensor.data_ptr()
import pytest import torch from torch.distributed.pipeline.sync.dependency import fork, join from torch.distributed.pipeline.sync.skip.portal import Portal from torch.distributed.pipeline.sync.stream import default_stream class TestTensorLife:
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/skip/test_portal.py
test_tensor_life_4
def test_tensor_life_4(self, new_portal): portal, tensor = new_portal(4) assert portal.tensor is tensor phony = portal.blue() assert portal.orange(phony).data_ptr() == tensor.data_ptr() assert portal.orange(phony).data_ptr() == tensor.data_ptr() portal.blue()
import pytest import torch from torch.distributed.pipeline.sync.dependency import fork, join from torch.distributed.pipeline.sync.skip.portal import Portal from torch.distributed.pipeline.sync.stream import default_stream class TestTensorLife:
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/skip/test_portal.py
test_tensor_life_3_plus_1
def test_tensor_life_3_plus_1(self, new_portal): portal, tensor = new_portal(3) assert portal.tensor is tensor phony = portal.blue() assert portal.orange(phony).data_ptr() == tensor.data_ptr() assert portal.orange(phony).data_ptr() == tensor.data_ptr() another_tensor = torch.rand(1, requires_grad=True) portal.put_tensor(another_tensor, tensor_life=1) portal.blue()
import pytest import torch from torch.distributed.pipeline.sync.dependency import fork, join from torch.distributed.pipeline.sync.skip.portal import Portal from torch.distributed.pipeline.sync.stream import default_stream class TestTensorLife:
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/skip/test_stash_pop.py
skip_tracker
def skip_tracker(): skip_tracker = SkipTracker() with use_skip_tracker(skip_tracker): yield skip_tracker
import pytest import torch from torch import nn from torch.distributed.pipeline.sync.skip import pop, skippable, stash from torch.distributed.pipeline.sync.skip.tracker import SkipTracker, use_skip_tracker
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_balance.py
__init__
def __init__(self, seconds): super().__init__() self.seconds = seconds
import time import pytest import torch from torch import nn from torch.distributed.pipeline.sync._balance import balance_by_size, balance_by_time, blockpartition from torch.distributed.pipeline.sync._balance.profile import layerwise_sandbox skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required") devices = ["cpu"] class Delay(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_balance.py
forward
def forward(self, x): time.sleep(self.seconds) return x
import time import pytest import torch from torch import nn from torch.distributed.pipeline.sync._balance import balance_by_size, balance_by_time, blockpartition from torch.distributed.pipeline.sync._balance.profile import layerwise_sandbox skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required") devices = ["cpu"] class Delay(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_balance.py
forward
def forward(self, x): time.sleep(self.seconds) return x
import time import pytest import torch from torch import nn from torch.distributed.pipeline.sync._balance import balance_by_size, balance_by_time, blockpartition from torch.distributed.pipeline.sync._balance.profile import layerwise_sandbox skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required") devices = ["cpu"] class Delay(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_balance.py
__init__
def __init__(self, seconds): super().__init__() self.seconds = seconds
import time import pytest import torch from torch import nn from torch.distributed.pipeline.sync._balance import balance_by_size, balance_by_time, blockpartition from torch.distributed.pipeline.sync._balance.profile import layerwise_sandbox skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required") devices = ["cpu"] class Delay(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_balance.py
forward
def forward(self, x): time.sleep(self.seconds) return x
import time import pytest import torch from torch import nn from torch.distributed.pipeline.sync._balance import balance_by_size, balance_by_time, blockpartition from torch.distributed.pipeline.sync._balance.profile import layerwise_sandbox skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required") devices = ["cpu"] class Delay(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_balance.py
__init__
def __init__(self, seconds): super().__init__() self.seconds = seconds
import time import pytest import torch from torch import nn from torch.distributed.pipeline.sync._balance import balance_by_size, balance_by_time, blockpartition from torch.distributed.pipeline.sync._balance.profile import layerwise_sandbox skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required") devices = ["cpu"] class Delay(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/skip/test_verify_skippables.py
test_pop_again
def test_pop_again(): @skippable(stash=["foo"]) class Layer1(nn.Module): pass @skippable(pop=["foo"]) class Layer2(nn.Module): pass @skippable(pop=["foo"]) class Layer3(nn.Module): pass with pytest.raises(TypeError) as e: verify_skippables(nn.Sequential(Layer1(), Layer2(), Layer3())) assert "'2' redeclared 'foo' as poppable" in str(e.value)
import pytest from torch import nn from torch.distributed.pipeline.sync.skip import Namespace, skippable, verify_skippables
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/skip/test_verify_skippables.py
test_stash_pop_together_different_names
def test_stash_pop_together_different_names(): @skippable(stash=["foo"]) class Layer1(nn.Module): pass @skippable(pop=["foo"], stash=["bar"]) class Layer2(nn.Module): pass @skippable(pop=["bar"]) class Layer3(nn.Module): pass verify_skippables(nn.Sequential(Layer1(), Layer2(), Layer3()))
import pytest from torch import nn from torch.distributed.pipeline.sync.skip import Namespace, skippable, verify_skippables
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/skip/test_verify_skippables.py
test_stash_pop_together_same_name
def test_stash_pop_together_same_name(): @skippable(stash=["foo"], pop=["foo"]) class Layer1(nn.Module): pass with pytest.raises(TypeError) as e: verify_skippables(nn.Sequential(Layer1())) assert "'0' declared 'foo' both as stashable and as poppable" in str(e.value)
import pytest from torch import nn from torch.distributed.pipeline.sync.skip import Namespace, skippable, verify_skippables
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/skip/test_verify_skippables.py
test_double_stash_pop
def test_double_stash_pop(): @skippable(stash=["foo"]) class Layer1(nn.Module): pass @skippable(pop=["foo"]) class Layer2(nn.Module): pass @skippable(stash=["foo"]) class Layer3(nn.Module): pass @skippable(pop=["foo"]) class Layer4(nn.Module): pass with pytest.raises(TypeError) as e: verify_skippables(nn.Sequential(Layer1(), Layer2(), Layer3(), Layer4())) assert "'2' redeclared 'foo' as stashable" in str(e.value) assert "'3' redeclared 'foo' as poppable" in str(e.value)
import pytest from torch import nn from torch.distributed.pipeline.sync.skip import Namespace, skippable, verify_skippables
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/skip/test_verify_skippables.py
test_double_stash_pop_but_isolated
def test_double_stash_pop_but_isolated(): @skippable(stash=["foo"]) class Layer1(nn.Module): pass @skippable(pop=["foo"]) class Layer2(nn.Module): pass @skippable(stash=["foo"]) class Layer3(nn.Module): pass @skippable(pop=["foo"]) class Layer4(nn.Module): pass ns1 = Namespace() ns2 = Namespace() verify_skippables( nn.Sequential(Layer1().isolate(ns1), Layer2().isolate(ns1), Layer3().isolate(ns2), Layer4().isolate(ns2),) )
import pytest from torch import nn from torch.distributed.pipeline.sync.skip import Namespace, skippable, verify_skippables
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_balance.py
test_blockpartition
def test_blockpartition(): assert blockpartition.solve([1, 2, 3, 4, 5, 6], partitions=2) == [[1, 2, 3, 4], [5, 6]]
import time import pytest import torch from torch import nn from torch.distributed.pipeline.sync._balance import balance_by_size, balance_by_time, blockpartition from torch.distributed.pipeline.sync._balance.profile import layerwise_sandbox skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required") devices = ["cpu"]
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_balance.py
test_blockpartition_zeros
def test_blockpartition_zeros(): assert blockpartition.solve([0, 0], partitions=2) == [[0], [0]]
import time import pytest import torch from torch import nn from torch.distributed.pipeline.sync._balance import balance_by_size, balance_by_time, blockpartition from torch.distributed.pipeline.sync._balance.profile import layerwise_sandbox skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required") devices = ["cpu"]
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_balance.py
test_blockpartition_non_positive_partitions
def test_blockpartition_non_positive_partitions(): with pytest.raises(ValueError): blockpartition.solve([42], partitions=0) with pytest.raises(ValueError): blockpartition.solve([42], partitions=-1)
import time import pytest import torch from torch import nn from torch.distributed.pipeline.sync._balance import balance_by_size, balance_by_time, blockpartition from torch.distributed.pipeline.sync._balance.profile import layerwise_sandbox skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required") devices = ["cpu"]
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_balance.py
test_balance_by_time
def test_balance_by_time(device): class Delay(nn.Module): def __init__(self, seconds): super().__init__() self.seconds = seconds def forward(self, x): time.sleep(self.seconds) return x model = nn.Sequential(*[Delay(i / 10) for i in [1, 2, 3, 4, 5, 6]]) sample = torch.rand(1) balance = balance_by_time(2, model, sample, device=device) assert balance == [4, 2]
import time import pytest import torch from torch import nn from torch.distributed.pipeline.sync._balance import balance_by_size, balance_by_time, blockpartition from torch.distributed.pipeline.sync._balance.profile import layerwise_sandbox skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required") devices = ["cpu"]
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_balance.py
forward
def forward(self, x): time.sleep(self.seconds) return x
import time import pytest import torch from torch import nn from torch.distributed.pipeline.sync._balance import balance_by_size, balance_by_time, blockpartition from torch.distributed.pipeline.sync._balance.profile import layerwise_sandbox skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required") devices = ["cpu"] class Delay(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_balance.py
test_already_has_grad
def test_already_has_grad(): model = nn.Sequential(nn.Conv2d(3, 3, 1)) sample = torch.rand(1, 3, 32, 32) model(sample).norm().backward() with pytest.raises(ValueError, match="some parameter already has gradient"): balance_by_time(1, model, sample, device="cpu")
import time import pytest import torch from torch import nn from torch.distributed.pipeline.sync._balance import balance_by_size, balance_by_time, blockpartition from torch.distributed.pipeline.sync._balance.profile import layerwise_sandbox skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required") devices = ["cpu"]
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_bugs.py
test_python_autograd_function
def test_python_autograd_function(setup_rpc): # A Python autograd function might fail with this error: # # RuntimeError: Returning Variables sharing storage with other Variables # that require grad is not supported in Python functions. Please submit a # feature request if you hit this error. # # It doesn't look like an essential restriction. But it happens on the # current PyTorch version. To avoid it, we should detach the tensor before # returning by identity autograd functions, such as Wait, Fork, and Join. # class Identity(torch.autograd.Function): @staticmethod def forward(ctx, input): return input @staticmethod def backward(ctx, grad): return grad class M(nn.Module): def forward(self, input): return Identity.apply(input) model = nn.Sequential(M(), M()) model = Pipe(model, checkpoint="always") x = torch.rand(42) y = model(x) assert torch.allclose(x, y.local_value())
import pytest import torch from torch import nn import torch.nn.functional as F from torch.distributed.pipeline.sync import Pipe
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_bugs.py
backward
def backward(ctx, grad): return grad
import pytest import torch from torch import nn import torch.nn.functional as F from torch.distributed.pipeline.sync import Pipe class Identity(torch.autograd.Function):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_bugs.py
test_tuple_wait
def test_tuple_wait(cuda_sleep, setup_rpc): # In v0.0.3, Wait is applied to only the first tensor on a micro-batch. # Under this behavior, if checkpointing was disabled, there's a possibility # that gradient accumulations on other tensors are not synchronized # properly to the copy stream. class Sleep(torch.autograd.Function): @staticmethod def forward(ctx, x): return x.detach() @staticmethod def backward(ctx, grad): with torch.cuda.device(grad.device): cuda_sleep(0.05) return grad class Layer1(nn.Module): def __init__(self): super().__init__() self.ones = nn.Parameter(torch.ones(32, 3, 32, 32, requires_grad=True)) def forward(self, a, b): a = a * self.ones return a * 1, b * 2, b * 3 class Layer2(nn.Module): def __init__(self): super().__init__() self.ones = nn.Parameter(torch.ones(32, 3, 32, 32, requires_grad=True)) def forward(self, a, b, c): a = a * self.ones b = Sleep.apply(b) return a + b + c model = nn.Sequential(Layer1().cuda(0), Layer2().cuda(1)) model = Pipe(model, chunks=32, checkpoint="never") a = torch.rand(1024, 3, 32, 32, device=0, requires_grad=True) b = torch.rand(1024, 3, 32, 32, device=0, requires_grad=True) y = model(a, b) y.local_value().norm().backward() torch.cuda.synchronize(0) torch.cuda.synchronize(1) assert torch.isclose(b.grad.norm().cpu(), torch.tensor(5.000))
import pytest import torch from torch import nn import torch.nn.functional as F from torch.distributed.pipeline.sync import Pipe
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_balance.py
forward
def forward(self, x): time.sleep(self.seconds) return x
import time import pytest import torch from torch import nn from torch.distributed.pipeline.sync._balance import balance_by_size, balance_by_time, blockpartition from torch.distributed.pipeline.sync._balance.profile import layerwise_sandbox skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required") devices = ["cpu"] class Delay(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_balance.py
test_sandbox_during_profiling
def test_sandbox_during_profiling(device): model = nn.Sequential(nn.BatchNorm2d(3)) before = {k: v.clone() for k, v in model.state_dict().items()} sample = torch.rand(1, 3, 10, 10) balance_by_time(1, model, sample, device=device) after = model.state_dict() assert before.keys() == after.keys() for key, value in before.items(): assert torch.allclose(after[key], value), key
import time import pytest import torch from torch import nn from torch.distributed.pipeline.sync._balance import balance_by_size, balance_by_time, blockpartition from torch.distributed.pipeline.sync._balance.profile import layerwise_sandbox skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required") devices = ["cpu"]
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_balance.py
test_not_training
def test_not_training(): class AssertTraining(nn.Module): def forward(self, x): assert self.training return x model = nn.Sequential(AssertTraining()) model.eval() assert not model.training sample = torch.rand(1) balance_by_time(1, model, sample, device="cpu") assert not model.training
import time import pytest import torch from torch import nn from torch.distributed.pipeline.sync._balance import balance_by_size, balance_by_time, blockpartition from torch.distributed.pipeline.sync._balance.profile import layerwise_sandbox skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required") devices = ["cpu"]
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_balance.py
forward
def forward(self, x): time.sleep(self.seconds) return x
import time import pytest import torch from torch import nn from torch.distributed.pipeline.sync._balance import balance_by_size, balance_by_time, blockpartition from torch.distributed.pipeline.sync._balance.profile import layerwise_sandbox skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required") devices = ["cpu"] class Delay(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_balance.py
forward
def forward(self, x): time.sleep(self.seconds) return x
import time import pytest import torch from torch import nn from torch.distributed.pipeline.sync._balance import balance_by_size, balance_by_time, blockpartition from torch.distributed.pipeline.sync._balance.profile import layerwise_sandbox skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required") devices = ["cpu"] class Delay(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_balance.py
forward
def forward(self, x): time.sleep(self.seconds) return x
import time import pytest import torch from torch import nn from torch.distributed.pipeline.sync._balance import balance_by_size, balance_by_time, blockpartition from torch.distributed.pipeline.sync._balance.profile import layerwise_sandbox skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required") devices = ["cpu"] class Delay(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_balance.py
test_balance_by_size_tuple
def test_balance_by_size_tuple(): class Twin(nn.Module): def forward(self, x): return x, x.detach() class Add(nn.Module): def forward(self, a, b): return a + b model = nn.Sequential(Twin(), Add()) sample = torch.rand(1, requires_grad=True) balance_by_size(1, model, sample)
import time import pytest import torch from torch import nn from torch.distributed.pipeline.sync._balance import balance_by_size, balance_by_time, blockpartition from torch.distributed.pipeline.sync._balance.profile import layerwise_sandbox skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required") devices = ["cpu"]
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_balance.py
forward
def forward(self, x): time.sleep(self.seconds) return x
import time import pytest import torch from torch import nn from torch.distributed.pipeline.sync._balance import balance_by_size, balance_by_time, blockpartition from torch.distributed.pipeline.sync._balance.profile import layerwise_sandbox skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required") devices = ["cpu"] class Delay(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_bugs.py
backward
def backward(ctx, grad): return grad
import pytest import torch from torch import nn import torch.nn.functional as F from torch.distributed.pipeline.sync import Pipe class Identity(torch.autograd.Function):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_bugs.py
__init__
def __init__(self): super().__init__() self.ones = nn.Parameter(torch.ones(32, 3, 32, 32, requires_grad=True))
import pytest import torch from torch import nn import torch.nn.functional as F from torch.distributed.pipeline.sync import Pipe class Layer1(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_bugs.py
__init__
def __init__(self): super().__init__() self.ones = nn.Parameter(torch.ones(32, 3, 32, 32, requires_grad=True))
import pytest import torch from torch import nn import torch.nn.functional as F from torch.distributed.pipeline.sync import Pipe class Layer1(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_bugs.py
test_parallel_randoms
def test_parallel_randoms(setup_rpc): class Dropouts(nn.Module): def forward(self, x): for _ in range(100): x = F.dropout(x, p=0.001) return x model = nn.Sequential(Dropouts(), Dropouts()) x = torch.rand(10, 10, requires_grad=True) model = Pipe(model, chunks=10, checkpoint="always") y = model(x) y = y.local_value() y.norm().backward() assert y.to(torch.bool).tolist() == x.grad.to(torch.bool).tolist()
import pytest import torch from torch import nn import torch.nn.functional as F from torch.distributed.pipeline.sync import Pipe
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_checkpoint.py
test_serial_checkpoints
def test_serial_checkpoints(device): # Copied from https://github.com/pytorch/pytorch/pull/18568. timeline = [] class Log(torch.autograd.Function): @staticmethod def forward(ctx, name, x): ctx.name = name timeline.append(f"{name}:forward") return x.detach() @staticmethod def backward(ctx, grad_output): name = ctx.name timeline.append(f"{name}:backward") return None, grad_output a = torch.rand(1, device=device, requires_grad=True) b = torch.rand(1, device=device, requires_grad=True) # Increase the next function sequence number. _ = a + 1 + 2 + 3 + 4 + 5 a = checkpoint(partial(Log.apply, "a"), a) a, phony = fork(a) b = join(b, phony) b = checkpoint(partial(Log.apply, "b"), b) c = torch.cat((a, b)) out = c.sum() # +--> {a} --Checkpoint(Log)--> {a} # {out} --Sum--> {c} --Cat ^-----------------------------+ # +--> {b} --Checkpoint(Log)--> {b} --First--> {b} out.backward() assert timeline == ["a:forward", "b:forward", "b:forward", "b:backward", "a:forward", "a:backward"] # |----------------------| |-----------------------| |-----------------------| # forward pass Checkpoint(Log[b]) Checkpoint(Log[a])
from functools import partial import pytest import torch from torch import nn import torch.cuda from torch.distributed.pipeline.sync.checkpoint import Checkpointing, checkpoint, is_checkpointing, is_recomputing from torch.distributed.pipeline.sync.dependency import fork, join from torch.distributed.pipeline.sync.microbatch import Batch devices = ["cpu"]
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/optim/test_zero_redundancy_optimizer.py
test_sharding
def test_sharding(self): """ Check ZeroRedundancyOptimizer's parameter sharding at construction time. NOTE: The correctness of this test depends on the ZeRO implementation using the sorted-greedy partitioning algorithm. For details, see ``ZeroRedundancyOptimizer._partition_parameters()`` in zero_redundancy_optimizer.py. """ self.dist_init(self.rank) LR = 0.01 sizes = [9, 7, 5, 3] params = [] for size in sizes * self.world_size: params.append(torch.rand(size, 1)) o = ZeroRedundancyOptimizer(params, optimizer_class=SGD, lr=LR) self.assertEqual( sum([x.numel() for x in o.optim.param_groups[0]["params"]]), sum(sizes), )
def test_sharding(self): """ Check ZeroRedundancyOptimizer's parameter sharding at construction time. NOTE: The correctness of this test depends on the ZeRO implementation using the sorted-greedy partitioning algorithm. For details, see ``ZeroRedundancyOptimizer._partition_parameters()`` in zero_redundancy_optimizer.py. """ self.dist_init(self.rank) LR = 0.01 sizes = [9, 7, 5, 3] params = [] for size in sizes * self.world_size: params.append(torch.rand(size, 1)) o = ZeroRedundancyOptimizer(params, optimizer_class=SGD, lr=LR) self.assertEqual( sum(x.numel() for x in o.optim.param_groups[0]["params"]), sum(sizes), )
import copy import os import sys import unittest from contextlib import suppress from typing import Any, cast, List import numpy as np import torch import torch.distributed as dist from torch.distributed.algorithms.ddp_comm_hooks.ddp_zero_hook import ( hook_with_zero_step, hook_with_zero_step_interleaved, ) from torch.distributed.algorithms.ddp_comm_hooks.default_hooks import allreduce_hook from torch.distributed.algorithms.join import Join, Joinable, JoinHook from torch.distributed.optim import ZeroRedundancyOptimizer from torch.distributed.optim.zero_redundancy_optimizer import _broadcast_object from torch.nn.parallel import DistributedDataParallel as DDP from torch.optim import AdamW, SGD from torch.testing._internal import common_distributed from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, IS_WINDOWS, parametrize, run_tests, TEST_WITH_ASAN, TEST_WITH_DEV_DBG_ASAN, ) import torchvision BACKEND = _get_backend_for_tests() class TestZeroRedundancyOptimizerDistributed(TestZeroRedundancyOptimizer): from torch.nn.parallel._replicated_tensor_ddp_utils import ( _ddp_replicated_tensor, ) from torch.nn.parallel._replicated_tensor_ddp_utils import ( _ddp_replicated_tensor, )
import copy import os import sys import unittest from contextlib import nullcontext from typing import Any, cast, List import numpy as np import torch import torch.distributed as dist from torch.distributed.algorithms.ddp_comm_hooks.ddp_zero_hook import ( hook_with_zero_step, hook_with_zero_step_interleaved, ) from torch.distributed.algorithms.ddp_comm_hooks.default_hooks import allreduce_hook from torch.distributed.algorithms.join import Join, Joinable, JoinHook from torch.distributed.optim import ZeroRedundancyOptimizer from torch.distributed.optim.zero_redundancy_optimizer import _broadcast_object from torch.nn.parallel import DistributedDataParallel as DDP from torch.optim import AdamW, SGD from torch.testing._internal import common_distributed from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, IS_WINDOWS, parametrize, run_tests, TEST_WITH_ASAN, TEST_WITH_DEV_DBG_ASAN, ) import torchvision BACKEND = _get_backend_for_tests() class TestZeroRedundancyOptimizerDistributed(TestZeroRedundancyOptimizer):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/optim/test_zero_redundancy_optimizer.py
all_trainable
def all_trainable(): params = [] sizes = [9, 7, 5, 3] sizes_world = sizes * self.world_size for size in sizes_world[:-1]: params.append(torch.rand(size, 1)) # Make sure that the params are trainable so that they are factored # into the size-based parameter partitioning for p in params: p.requires_grad = True o = ZeroRedundancyOptimizer(params, optimizer_class=SGD, lr=LR) self.assertEqual(len(o.param_groups), 1) o.add_param_group({"params": [torch.rand(3, 1)]}) # Verify that new group is added to the correct partition, making # all partitions have the same elements self.assertEqual(len(o.param_groups), 2) self.assertEqual( sum([x.numel() for g in o.optim.param_groups for x in g["params"]]), sum(sizes), ) self.assertEqual(len(o.optim.param_groups), 2) # Test a pathological config with a first big non-trainable param
def all_trainable(): params = [] sizes = [9, 7, 5, 3] sizes_world = sizes * self.world_size for size in sizes_world[:-1]: params.append(torch.rand(size, 1)) # Make sure that the params are trainable so that they are factored # into the size-based parameter partitioning for p in params: p.requires_grad = True o = ZeroRedundancyOptimizer(params, optimizer_class=SGD, lr=LR) self.assertEqual(len(o.param_groups), 1) o.add_param_group({"params": [torch.rand(3, 1)]}) # Verify that new group is added to the correct partition, making # all partitions have the same elements self.assertEqual(len(o.param_groups), 2) self.assertEqual( sum(x.numel() for g in o.optim.param_groups for x in g["params"]), sum(sizes), ) self.assertEqual(len(o.optim.param_groups), 2) # Test a pathological config with a first big non-trainable param
import copy import os import sys import unittest from contextlib import suppress from typing import Any, cast, List import numpy as np import torch import torch.distributed as dist from torch.distributed.algorithms.ddp_comm_hooks.ddp_zero_hook import ( hook_with_zero_step, hook_with_zero_step_interleaved, ) from torch.distributed.algorithms.ddp_comm_hooks.default_hooks import allreduce_hook from torch.distributed.algorithms.join import Join, Joinable, JoinHook from torch.distributed.optim import ZeroRedundancyOptimizer from torch.distributed.optim.zero_redundancy_optimizer import _broadcast_object from torch.nn.parallel import DistributedDataParallel as DDP from torch.optim import AdamW, SGD from torch.testing._internal import common_distributed from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, IS_WINDOWS, parametrize, run_tests, TEST_WITH_ASAN, TEST_WITH_DEV_DBG_ASAN, ) import torchvision BACKEND = _get_backend_for_tests() from torch.nn.parallel._replicated_tensor_ddp_utils import ( _ddp_replicated_tensor, ) from torch.nn.parallel._replicated_tensor_ddp_utils import ( _ddp_replicated_tensor, )
import copy import os import sys import unittest from contextlib import nullcontext from typing import Any, cast, List import numpy as np import torch import torch.distributed as dist from torch.distributed.algorithms.ddp_comm_hooks.ddp_zero_hook import ( hook_with_zero_step, hook_with_zero_step_interleaved, ) from torch.distributed.algorithms.ddp_comm_hooks.default_hooks import allreduce_hook from torch.distributed.algorithms.join import Join, Joinable, JoinHook from torch.distributed.optim import ZeroRedundancyOptimizer from torch.distributed.optim.zero_redundancy_optimizer import _broadcast_object from torch.nn.parallel import DistributedDataParallel as DDP from torch.optim import AdamW, SGD from torch.testing._internal import common_distributed from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, IS_WINDOWS, parametrize, run_tests, TEST_WITH_ASAN, TEST_WITH_DEV_DBG_ASAN, ) import torchvision BACKEND = _get_backend_for_tests()
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/pipeline/sync/test_checkpoint.py
backward
def backward(ctx, grad_output): name = ctx.name timeline.append(f"{name}:backward") return None, grad_output
from functools import partial import pytest import torch from torch import nn import torch.cuda from torch.distributed.pipeline.sync.checkpoint import Checkpointing, checkpoint, is_checkpointing, is_recomputing from torch.distributed.pipeline.sync.dependency import fork, join from torch.distributed.pipeline.sync.microbatch import Batch devices = ["cpu"] class Log(torch.autograd.Function):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_checkpoint.py
test_not_requires_grad
def test_not_requires_grad(): x = Batch(torch.rand(1, requires_grad=False)) assert not x[0].requires_grad def f(x): return x * 2 chk = Checkpointing(f, x) x = chk.checkpoint() assert x[0].requires_grad chk.recompute(x) assert x[0].requires_grad x.tensor.backward()
from functools import partial import pytest import torch from torch import nn import torch.cuda from torch.distributed.pipeline.sync.checkpoint import Checkpointing, checkpoint, is_checkpointing, is_recomputing from torch.distributed.pipeline.sync.dependency import fork, join from torch.distributed.pipeline.sync.microbatch import Batch devices = ["cpu"]
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_checkpoint.py
f
def f(x): return x * 2 chk = Checkpointing(f, x) x = chk.checkpoint() assert x[0].requires_grad chk.recompute(x) assert x[0].requires_grad x.tensor.backward()
from functools import partial import pytest import torch from torch import nn import torch.cuda from torch.distributed.pipeline.sync.checkpoint import Checkpointing, checkpoint, is_checkpointing, is_recomputing from torch.distributed.pipeline.sync.dependency import fork, join from torch.distributed.pipeline.sync.microbatch import Batch devices = ["cpu"]
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_checkpoint.py
f
def f(x): return x * 2 chk = Checkpointing(f, x) x = chk.checkpoint() assert x[0].requires_grad chk.recompute(x) assert x[0].requires_grad x.tensor.backward()
from functools import partial import pytest import torch from torch import nn import torch.cuda from torch.distributed.pipeline.sync.checkpoint import Checkpointing, checkpoint, is_checkpointing, is_recomputing from torch.distributed.pipeline.sync.dependency import fork, join from torch.distributed.pipeline.sync.microbatch import Batch devices = ["cpu"]
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_checkpoint.py
test_random_in_checkpoint
def test_random_in_checkpoint(device): dropout = nn.Dropout(p=0.5) torch.manual_seed(0) x = torch.randn(3, 3, device=device, requires_grad=True) y = dropout(x) y.norm().backward() torch.manual_seed(0) chk_x = torch.randn(3, 3, device=device, requires_grad=True) chk_y = checkpoint(dropout, chk_x) chk_y.norm().backward() assert torch.allclose(x.grad, chk_x.grad)
from functools import partial import pytest import torch from torch import nn import torch.cuda from torch.distributed.pipeline.sync.checkpoint import Checkpointing, checkpoint, is_checkpointing, is_recomputing from torch.distributed.pipeline.sync.dependency import fork, join from torch.distributed.pipeline.sync.microbatch import Batch devices = ["cpu"]
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_checkpoint.py
test_detect_checkpointing_recomputing
def test_detect_checkpointing_recomputing(): logs = [] class Detect(nn.Module): def forward(self, input): logs.append((is_checkpointing(), is_recomputing())) return input model = Detect() input = torch.rand(1, requires_grad=True) output = checkpoint(model, input) output.backward() assert logs == [(True, False), (False, True)]
from functools import partial import pytest import torch from torch import nn import torch.cuda from torch.distributed.pipeline.sync.checkpoint import Checkpointing, checkpoint, is_checkpointing, is_recomputing from torch.distributed.pipeline.sync.dependency import fork, join from torch.distributed.pipeline.sync.microbatch import Batch devices = ["cpu"]
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_checkpoint.py
test_detect_checkpointing_recomputing_without_checkpoint
def test_detect_checkpointing_recomputing_without_checkpoint(): logs = [] class Detect(nn.Module): def forward(self, input): logs.append((is_checkpointing(), is_recomputing())) return input model = Detect() input = torch.rand(1, requires_grad=True) output = model(input) output.backward() assert logs == [(False, False)]
from functools import partial import pytest import torch from torch import nn import torch.cuda from torch.distributed.pipeline.sync.checkpoint import Checkpointing, checkpoint, is_checkpointing, is_recomputing from torch.distributed.pipeline.sync.dependency import fork, join from torch.distributed.pipeline.sync.microbatch import Batch devices = ["cpu"]
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_checkpoint.py
test_non_grad_output
def test_non_grad_output(): class ForkNonGrad(nn.Module): def forward(self, input): return (input * 2, torch.rand(1)) model = ForkNonGrad() input = torch.rand(1, requires_grad=True) output = checkpoint(model, input) output[0].backward()
from functools import partial import pytest import torch from torch import nn import torch.cuda from torch.distributed.pipeline.sync.checkpoint import Checkpointing, checkpoint, is_checkpointing, is_recomputing from torch.distributed.pipeline.sync.dependency import fork, join from torch.distributed.pipeline.sync.microbatch import Batch devices = ["cpu"]
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_copy.py
_test_copy_wait
def _test_copy_wait(prev_stream, next_stream, cuda_sleep=None): device = get_device(prev_stream) with use_stream(prev_stream): if is_cuda(prev_stream): cuda_sleep(0.5) x = torch.ones(100, device=device, requires_grad=True) (y,) = Copy.apply(prev_stream, next_stream, x) (y,) = Wait.apply(prev_stream, next_stream, x) with use_stream(next_stream): assert torch.allclose(y.sum(), torch.tensor(100.0, device=device)) y.norm().backward() with use_stream(prev_stream): assert torch.allclose(x.grad.sum(), torch.tensor(10.0, device=device))
import pytest import torch from torch.distributed.pipeline.sync.copy import Copy, Wait from torch.distributed.pipeline.sync.stream import CPUStream, current_stream, get_device, is_cuda, new_stream, use_stream skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_copy.py
test_copy_wait_cuda_cuda
def test_copy_wait_cuda_cuda(cuda_sleep): prev_stream = current_stream(torch.device("cuda")) next_stream = new_stream(torch.device("cuda")) _test_copy_wait(prev_stream, next_stream, cuda_sleep)
import pytest import torch from torch.distributed.pipeline.sync.copy import Copy, Wait from torch.distributed.pipeline.sync.stream import CPUStream, current_stream, get_device, is_cuda, new_stream, use_stream skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_copy.py
test_wait_multiple_tensors
def test_wait_multiple_tensors(): a = torch.rand(1, requires_grad=True) b = torch.rand(1, requires_grad=True) a, b = Wait.apply(CPUStream, CPUStream, a, b) assert a.grad_fn is b.grad_fn assert a.grad_fn.__class__ is Wait._backward_cls
import pytest import torch from torch.distributed.pipeline.sync.copy import Copy, Wait from torch.distributed.pipeline.sync.stream import CPUStream, current_stream, get_device, is_cuda, new_stream, use_stream skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_deferred_batch_norm.py
tilt_dist
def tilt_dist(input): # Tilt variance by channel. rgb = input.transpose(0, 1) rgb[0] *= 1 rgb[1] *= 10 rgb[2] *= 100 # Tilt mean by single batch. for i, single in enumerate(input): single += 2 ** i return input
from copy import deepcopy from itertools import chain import pytest import torch from torch import nn, optim from torch.distributed.pipeline.sync.batchnorm import DeferredBatchNorm CHUNKS = 4
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_deferred_batch_norm.py
test_running_stats
def test_running_stats(momentum): bn = nn.BatchNorm2d(3, momentum=momentum) dbn = DeferredBatchNorm.convert_deferred_batch_norm(deepcopy(bn), chunks=CHUNKS) input = torch.rand(16, 3, 224, 224) input = tilt_dist(input) bn(input) chunked_forward(dbn, input) assert torch.allclose(bn.running_mean, dbn.running_mean, atol=1e-4) assert torch.allclose(bn.running_var, dbn.running_var, atol=1e-4)
from copy import deepcopy from itertools import chain import pytest import torch from torch import nn, optim from torch.distributed.pipeline.sync.batchnorm import DeferredBatchNorm CHUNKS = 4
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_deferred_batch_norm.py
test_convert_deferred_batch_norm
def test_convert_deferred_batch_norm(): bn = nn.BatchNorm2d(3, track_running_stats=False) bn = DeferredBatchNorm.convert_deferred_batch_norm(bn, chunks=CHUNKS) assert type(bn) is nn.BatchNorm2d # because of track_running_stats=False dbn = DeferredBatchNorm(3, chunks=CHUNKS) dbn_again = DeferredBatchNorm.convert_deferred_batch_norm(dbn, chunks=CHUNKS) assert dbn is dbn_again dbn_again = DeferredBatchNorm.convert_deferred_batch_norm(dbn, chunks=CHUNKS + 1) assert dbn is not dbn_again # because of different chunks
from copy import deepcopy from itertools import chain import pytest import torch from torch import nn, optim from torch.distributed.pipeline.sync.batchnorm import DeferredBatchNorm CHUNKS = 4
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_deferred_batch_norm.py
test_eval
def test_eval(): bn = nn.BatchNorm2d(3) dbn = DeferredBatchNorm.convert_deferred_batch_norm(deepcopy(bn), chunks=CHUNKS) input = torch.rand(16, 3, 224, 224) input = tilt_dist(input) bn(input) chunked_forward(dbn, input) bn.eval() dbn.eval() assert torch.allclose(bn(input), dbn(input), atol=1e-4)
from copy import deepcopy from itertools import chain import pytest import torch from torch import nn, optim from torch.distributed.pipeline.sync.batchnorm import DeferredBatchNorm CHUNKS = 4
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_deferred_batch_norm.py
test_optimize
def test_optimize(): bn = nn.BatchNorm2d(3) dbn = DeferredBatchNorm.convert_deferred_batch_norm(deepcopy(bn), chunks=CHUNKS) opt = optim.SGD(chain(bn.parameters(), dbn.parameters()), lr=1.0) for i in range(5): input = torch.rand(16, 3, 224, 224) input = tilt_dist(input) # train y = bn(input) a = y.sum() a.backward() y = chunked_forward(dbn, input) b = y.sum() b.backward() opt.step() # eval bn.eval() dbn.eval() with torch.no_grad(): assert torch.allclose(bn(input), dbn(input), atol=1e-1 * (10 ** i))
from copy import deepcopy from itertools import chain import pytest import torch from torch import nn, optim from torch.distributed.pipeline.sync.batchnorm import DeferredBatchNorm CHUNKS = 4
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_deferred_batch_norm.py
test_conv_bn
def test_conv_bn(): bn = nn.Sequential(nn.Conv2d(3, 3, 1), nn.BatchNorm2d(3)) dbn = DeferredBatchNorm.convert_deferred_batch_norm(deepcopy(bn), chunks=CHUNKS) input = torch.rand(16, 3, 224, 224) input = tilt_dist(input) opt = optim.SGD(chain(bn.parameters(), dbn.parameters()), lr=0.1) # 1st step a = bn(input) b = chunked_forward(dbn, input) # Outputs are different. (per-mini-batch vs. per-micro-batch) assert not torch.allclose(a, b) a.sum().backward() b.sum().backward() opt.step() opt.zero_grad() # Conv layers are also trained differently because of their different outputs. assert not torch.allclose(bn[0].weight, dbn[0].weight) # But BNs track identical running stats. assert torch.allclose(bn[1].running_mean, dbn[1].running_mean, atol=1e-4) assert torch.allclose(bn[1].running_var, dbn[1].running_var, atol=1e3) # 2nd step a = bn(input) b = chunked_forward(dbn, input) a.sum().backward() b.sum().backward() # BNs can't track identical running stats due to the different conv layers. assert not torch.allclose(bn[1].running_mean, dbn[1].running_mean, atol=1e-4) assert not torch.allclose(bn[1].running_var, dbn[1].running_var, atol=1e3)
from copy import deepcopy from itertools import chain import pytest import torch from torch import nn, optim from torch.distributed.pipeline.sync.batchnorm import DeferredBatchNorm CHUNKS = 4
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_deferred_batch_norm.py
test_input_requiring_grad
def test_input_requiring_grad(): dbn = DeferredBatchNorm(3, chunks=CHUNKS) input = torch.rand(16, 3, 224, 224) input = tilt_dist(input) input.requires_grad = True chunked_forward(dbn, input) assert not dbn.sum.requires_grad assert dbn.sum.grad_fn is None
from copy import deepcopy from itertools import chain import pytest import torch from torch import nn, optim from torch.distributed.pipeline.sync.batchnorm import DeferredBatchNorm CHUNKS = 4
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_dependency.py
test_fork_join
def test_fork_join(): logs = [] class Log(torch.autograd.Function): @staticmethod def forward(ctx, number, tensor): ctx.number = number return tensor.detach() @staticmethod def backward(ctx, grad): logs.append(ctx.number) return None, grad a = torch.rand(1, device="cpu", requires_grad=True) b = torch.rand(1, device="cuda", requires_grad=True) a = Log.apply(1, a) a, phony = fork(a) b = join(a, phony) b = Log.apply(2, b) b = b.to("cpu") (a + b).backward() assert logs == [2, 1]
import weakref import pytest import torch from torch.distributed.pipeline.sync.dependency import Fork, Join, fork, join
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_dependency.py
backward
def backward(ctx, grad): logs.append(ctx.number) return None, grad
import weakref import pytest import torch from torch.distributed.pipeline.sync.dependency import Fork, Join, fork, join class Log(torch.autograd.Function):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_inplace.py
forward
def forward(self, foo_bar): # 'foo' requires grad but 'bar' does not. In-place operation on # 'bar' won't cause a RuntimeError. foo, bar = foo_bar # add_(1) is not idempotent, in contrast to relu_(). If it is # executed multiple times, it will accumulates each difference onto # 'bar'. bar.add_(1) # 'bar' is still captured by checkpointing. 'foo' will get # incorrect grad. return foo * bar
import pytest import torch from torch import nn from torch.distributed.pipeline.sync import Pipe class M(nn.Module):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_microbatch.py
test_batch_atomic
def test_batch_atomic(): x = torch.tensor(42) b = Batch(x) assert b.atomic assert b.tensor is x with pytest.raises(AttributeError): b.tensors assert list(b) == [x] assert len(b) == 1 assert b[0] is x
import pytest import torch import torch.cuda from torch.distributed.pipeline.sync.microbatch import Batch, check, gather, scatter
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_microbatch.py
test_batch_non_atomic
def test_batch_non_atomic(): x, y = torch.tensor(42), torch.tensor(21) b = Batch((x, y)) assert not b.atomic with pytest.raises(AttributeError): b.tensor assert list(b) == [x, y] assert len(b) == 2 assert b[0] is x assert b[1] is y
import pytest import torch import torch.cuda from torch.distributed.pipeline.sync.microbatch import Batch, check, gather, scatter
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_microbatch.py
test_batch_call
def test_batch_call(): a = Batch(torch.tensor(42)) b = Batch((torch.tensor(42), torch.tensor(21))) def f(x): return x def g(x, y): return x, y assert a.call(f).atomic assert not b.call(g).atomic
import pytest import torch import torch.cuda from torch.distributed.pipeline.sync.microbatch import Batch, check, gather, scatter
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_microbatch.py
f
def f(x): return x
import pytest import torch import torch.cuda from torch.distributed.pipeline.sync.microbatch import Batch, check, gather, scatter
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_microbatch.py
g
def g(x, y): return x, y assert a.call(f).atomic assert not b.call(g).atomic
import pytest import torch import torch.cuda from torch.distributed.pipeline.sync.microbatch import Batch, check, gather, scatter
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_microbatch.py
test_batch_setitem_by_index
def test_batch_setitem_by_index(): a = Batch(torch.tensor(42)) b = Batch((torch.tensor(42), torch.tensor(21))) a[0] = torch.tensor(0) b[0] = torch.tensor(0) assert a.atomic assert a[0].item() == 0 assert not b.atomic assert len(b) == 2 assert b[0].item() == 0 assert b[1].item() == 21
import pytest import torch import torch.cuda from torch.distributed.pipeline.sync.microbatch import Batch, check, gather, scatter
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_microbatch.py
test_batch_setitem_by_slice
def test_batch_setitem_by_slice(): a = Batch(torch.tensor(42)) b = Batch((torch.tensor(42), torch.tensor(21))) a[:] = (torch.tensor(0),) b[:] = (torch.tensor(0),) assert a.atomic assert a[0].item() == 0 assert not b.atomic assert len(b) == 1 assert b[0].item() == 0
import pytest import torch import torch.cuda from torch.distributed.pipeline.sync.microbatch import Batch, check, gather, scatter
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_microbatch.py
test_check
def test_check(): check(torch.device("cpu"), torch.tensor(42)) check(torch.device("cpu"), torch.tensor(4), torch.tensor(2)) with pytest.raises(TypeError): check(torch.device("cpu"), 42) with pytest.raises(TypeError): check(torch.device("cpu"), "str") with pytest.raises(TypeError): check(torch.device("cpu"), (torch.tensor(4), 2))
import pytest import torch import torch.cuda from torch.distributed.pipeline.sync.microbatch import Batch, check, gather, scatter
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_microbatch.py
test_gather_tensors
def test_gather_tensors(): a = torch.zeros(1, 1) b = torch.zeros(1, 1) ab = gather([Batch(a), Batch(b)]) assert ab.size() == (2, 1)
import pytest import torch import torch.cuda from torch.distributed.pipeline.sync.microbatch import Batch, check, gather, scatter
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_dependency.py
test_fork_join_enable_grad
def test_fork_join_enable_grad(): x = torch.rand(1, requires_grad=True) with torch.enable_grad(): x2, p = fork(x) assert p.requires_grad assert x2 is not x x = x2 assert x.requires_grad assert p.requires_grad assert x.grad_fn.__class__ is Fork._backward_cls assert p.grad_fn.__class__ is Fork._backward_cls with torch.enable_grad(): x2 = join(x, p) assert x2 is not x x = x2 assert x.requires_grad assert x.grad_fn.__class__ is Join._backward_cls
import weakref import pytest import torch from torch.distributed.pipeline.sync.dependency import Fork, Join, fork, join
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_dependency.py
do_not_apply
def do_not_apply(*args): raise AssertionError("Function.apply called") monkeypatch.setattr("torch.autograd.Function.apply", do_not_apply) x = torch.rand(1, requires_grad=True) with torch.no_grad(): x2, p = fork(x) assert not p.requires_grad assert x2 is x x = x2 with torch.no_grad(): x2 = join(x, p) assert x2 is x x = x2
import weakref import pytest import torch from torch.distributed.pipeline.sync.dependency import Fork, Join, fork, join
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_dependency.py
test_fork_leak
def test_fork_leak(): leak = None class F(torch.autograd.Function): @staticmethod def forward(ctx, input): return input @staticmethod def backward(ctx, grad): nonlocal leak leak = weakref.ref(ctx) return grad x = torch.rand(1, requires_grad=True) x = F.apply(x) x, phony = fork(x) x = join(x, phony) x.backward() del x, phony assert leak() is None
import weakref import pytest import torch from torch.distributed.pipeline.sync.dependency import Fork, Join, fork, join
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_dependency.py
backward
def backward(ctx, grad): logs.append(ctx.number) return None, grad
import weakref import pytest import torch from torch.distributed.pipeline.sync.dependency import Fork, Join, fork, join class Log(torch.autograd.Function):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_dependency.py
test_join_when_fork_not_requires_grad
def test_join_when_fork_not_requires_grad(): x = torch.rand(2, 1) a, b = x.chunk(2) assert not a.requires_grad a, p = fork(a) assert not a.requires_grad assert not p.requires_grad assert not b.requires_grad b = join(b, p) assert not b.requires_grad
import weakref import pytest import torch from torch.distributed.pipeline.sync.dependency import Fork, Join, fork, join
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_dependency.py
test_join_when_fork_requires_grad
def test_join_when_fork_requires_grad(): x = torch.rand(2, 1) a, b = x.chunk(2) a.requires_grad_() assert a.requires_grad a, p = fork(a) assert a.requires_grad assert p.requires_grad assert not b.requires_grad b = join(b, p) assert b.requires_grad
import weakref import pytest import torch from torch.distributed.pipeline.sync.dependency import Fork, Join, fork, join
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_inplace.py
test_inplace_incorrect_grad
def test_inplace_incorrect_grad(setup_rpc): class M(nn.Module): def forward(self, foo_bar): # 'foo' requires grad but 'bar' does not. In-place operation on # 'bar' won't cause a RuntimeError. foo, bar = foo_bar # add_(1) is not idempotent, in contrast to relu_(). If it is # executed multiple times, it will accumulates each difference onto # 'bar'. bar.add_(1) # 'bar' is still captured by checkpointing. 'foo' will get # incorrect grad. return foo * bar model = nn.Sequential(M()) model = Pipe(model, [1], devices=["cpu"], checkpoint="always") foo = torch.tensor([1.0], requires_grad=True) bar = torch.tensor([1.0]) output = model((foo, bar)).local_value() del model output.backward() # The gradient of 'foo' should be 2, but it is 3 actually because # bar.add_(1) was executed twice due to checkpointing. assert foo.grad.item() == 2.0
import pytest import torch from torch import nn from torch.distributed.pipeline.sync import Pipe
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_microbatch.py
test_gather_tuples
def test_gather_tuples(): a = (torch.zeros(1, 1), torch.zeros(2, 2)) b = (torch.zeros(1, 1), torch.zeros(2, 2)) ab = gather([Batch(a), Batch(b)]) assert isinstance(ab, tuple) assert ab[0].size() == (2, 1) assert ab[1].size() == (4, 2)
import pytest import torch import torch.cuda from torch.distributed.pipeline.sync.microbatch import Batch, check, gather, scatter
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_microbatch.py
test_scatter_tensor
def test_scatter_tensor(): ab = torch.zeros(2, 1) a, b = scatter(ab, chunks=2) assert a.tensor.size() == (1, 1) assert b.tensor.size() == (1, 1)
import pytest import torch import torch.cuda from torch.distributed.pipeline.sync.microbatch import Batch, check, gather, scatter
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_microbatch.py
test_scatter_multiple_tensors
def test_scatter_multiple_tensors(): ab = (torch.zeros(2, 1), torch.zeros(4, 2)) a, b = scatter(*ab, chunks=2) assert list(a)[0].size() == (1, 1) assert list(b)[0].size() == (1, 1) assert list(a)[1].size() == (2, 2) assert list(b)[1].size() == (2, 2)
import pytest import torch import torch.cuda from torch.distributed.pipeline.sync.microbatch import Batch, check, gather, scatter
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_phony.py
test_phony_size
def test_phony_size(): p = get_phony(torch.device("cpu"), requires_grad=False) assert p.size() == (0,)
import torch from torch.distributed.pipeline.sync.phony import get_phony
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_phony.py
test_phony_requires_grad
def test_phony_requires_grad(): p1 = get_phony(torch.device("cpu"), requires_grad=True) p2 = get_phony(torch.device("cpu"), requires_grad=False) assert p1.requires_grad assert not p2.requires_grad
import torch from torch.distributed.pipeline.sync.phony import get_phony
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_phony.py
test_cached_phony
def test_cached_phony(): p1 = get_phony(torch.device("cpu"), requires_grad=True) p2 = get_phony(torch.device("cpu"), requires_grad=True) assert p1 is p2 p3 = get_phony(torch.device("cpu"), requires_grad=False) p4 = get_phony(torch.device("cpu"), requires_grad=False) assert p3 is p4 assert p1 is not p3
import torch from torch.distributed.pipeline.sync.phony import get_phony
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted
torch
test/distributed/pipeline/sync/test_phony.py
test_phony_in_autograd_function
def test_phony_in_autograd_function(): class Phonify(torch.autograd.Function): @staticmethod def forward(ctx, input): phony = get_phony(input.device, requires_grad=False) return phony.detach() x = torch.rand(1, requires_grad=True) p1 = Phonify.apply(x) p2 = get_phony(torch.device("cpu"), requires_grad=True) assert p1 is not p2 assert p1.grad_fn is not None assert p2.grad_fn is None
import torch from torch.distributed.pipeline.sync.phony import get_phony
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
deleted