library
stringclasses
1 value
test_file
stringclasses
785 values
test_function
stringlengths
1
295
before
stringlengths
0
448k
after
stringlengths
0
487k
context_before
stringclasses
947 values
context_after
stringlengths
0
16.3k
commit_before
stringclasses
1 value
commit_after
stringclasses
1 value
change_type
stringclasses
3 values
torch
test/distributed/test_dynamo_distributed.py
forward
def forward(self, inputs): return self.net(inputs)
def forward(self, inputs): if self.ctx_manager is not None: with self.ctx_manager(): return self.net(inputs) else: return self.net(inputs)
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging class ToyModel(nn.Module): from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton class ToyModel(nn.Module): from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_dynamo_distributed.py
get_hf_bert
def get_hf_bert(rank): # Note: use @import_transformers_or_skip on your test case if you use this # in a multiprocessing test try: from transformers import BertConfig, AutoModelForMaskedLM except ImportError as e: raise unittest.SkipTest("Unable to import transformers") from e batch_size, max_length, config, device = 4, 512, BertConfig(), f"cuda:{rank}" model = AutoModelForMaskedLM.from_config(config).to(device) input_ids = torch.randint(0, config.vocab_size, (batch_size, max_length)).to(device) decoder_ids = torch.randint(0, config.vocab_size, (batch_size, max_length)).to(device) inputs = {'input_ids': input_ids, 'labels': decoder_ids} model.train() return model, inputs class CheckSplitsCompiler: def __init__(self): self.compiler_called = 0 def compile_fn(self, gm, example_inputs): self.compiler_called += 1 return gm # This simulates DDP, but it doesn't actually do any process communication; # it just has enough properties so that the dynamo distributed optimization is # able to optimize. Feel free to simulate more properties as necessary. The # other important thing is patching _active_ddp_module, which is what actually # triggers DDP optimization class FakeDDP(nn.Module): def __init__(self, module): super().__init__() self.module = module bucket_cap_mb = 25 self.bucket_bytes_cap = int(bucket_cap_mb * 1024 * 1024) @contextmanager def _inside_ddp_forward(self): DDP._active_ddp_module = self try: yield except Exception: raise finally: DDP._active_ddp_module = None def forward(self, *inputs, **kwargs): with self._inside_ddp_forward(): return self.module.forward(*inputs, **kwargs)
def get_hf_bert(rank): # Note: use @import_transformers_or_skip on your test case if you use this # in a multiprocessing test try: from transformers import AutoModelForMaskedLM, BertConfig except ImportError as e: raise unittest.SkipTest("Unable to import transformers") from e batch_size, max_length, config, device = 4, 512, BertConfig(), f"cuda:{rank}" model = AutoModelForMaskedLM.from_config(config).to(device) input_ids = torch.randint(0, config.vocab_size, (batch_size, max_length)).to(device) decoder_ids = torch.randint(0, config.vocab_size, (batch_size, max_length)).to( device ) inputs = {"input_ids": input_ids, "labels": decoder_ids} model.train() return model, inputs class CheckSplitsCompiler: def __init__(self) -> None: self.compiler_called = 0 def compile_fn(self, gm, example_inputs): self.compiler_called += 1 return gm # This simulates DDP, but it doesn't actually do any process communication; # it just has enough properties so that the dynamo distributed optimization is # able to optimize. Feel free to simulate more properties as necessary. The # other important thing is patching _active_ddp_module, which is what actually # triggers DDP optimization class FakeDDP(nn.Module): def __init__(self, module, bucket_cap_mb=25): super().__init__() self.module = module self.bucket_bytes_cap = int(bucket_cap_mb * 1024 * 1024) @contextmanager def _inside_ddp_forward(self): DDP._active_ddp_module = self try: yield finally: DDP._active_ddp_module = None def forward(self, *inputs, **kwargs): with self._inside_ddp_forward(): return self.module.forward(*inputs, **kwargs)
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_dynamo_distributed.py
__init__
def __init__(self, in_feat=10, hidden_feat=5000, out_feat=5): super().__init__() self.net = nn.Sequential( *[nn.Linear(in_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, out_feat), nn.ReLU()] )
def __init__(self, in_feat=10, hidden_feat=5000, out_feat=5, ctx_manager=None): super().__init__() self.ctx_manager = ctx_manager self.net = nn.Sequential( *[nn.Linear(in_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, out_feat), nn.ReLU()] )
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging class ToyModel(nn.Module): from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton class ToyModel(nn.Module): from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_dynamo_distributed.py
__init__
def __init__(self, in_feat=10, hidden_feat=5000, out_feat=5): super().__init__() self.net = nn.Sequential( *[nn.Linear(in_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, out_feat), nn.ReLU()] )
def __init__(self, in_feat=10, hidden_feat=5000, out_feat=5, ctx_manager=None): super().__init__() self.ctx_manager = ctx_manager self.net = nn.Sequential( *[nn.Linear(in_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, out_feat), nn.ReLU()] )
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging class ToyModel(nn.Module): from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton class ToyModel(nn.Module): from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_dynamo_distributed.py
_inside_ddp_forward
def _inside_ddp_forward(self): DDP._active_ddp_module = self try: yield except Exception: raise finally: DDP._active_ddp_module = None
def _inside_ddp_forward(self): DDP._active_ddp_module = self try: yield finally: DDP._active_ddp_module = None
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging from transformers import BertConfig, AutoModelForMaskedLM class FakeDDP(nn.Module): from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig class FakeDDP(nn.Module): from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_dynamo_distributed.py
forward
def forward(self, inputs): return self.net(inputs)
def forward(self, inputs): if self.ctx_manager is not None: with self.ctx_manager(): return self.net(inputs) else: return self.net(inputs)
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging class ToyModel(nn.Module): from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton class ToyModel(nn.Module): from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_dynamo_distributed.py
run_hf_bert_ddp
def run_hf_bert_ddp(self, model, inputs, backend): reset_rng_state() correct_outputs = model(**inputs) correct_loss = correct_outputs.loss correct_loss.backward() reset_rng_state() opt_model = torch._dynamo.optimize(backend)(model) opt_outputs = opt_model(**inputs) opt_loss = opt_outputs.loss opt_loss.backward() inputs_flat = [inputs[k] for k in inputs] correct_results = collect_results(model, correct_outputs.logits, correct_loss, inputs_flat) opt_results = collect_results(opt_model, opt_outputs.logits, opt_loss, inputs_flat) self.assertTrue(same(correct_results, opt_results)) class TestFakeDistributedSingleProc(torch._dynamo.test_case.TestCase): @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch") @patch.object(config, "optimize_ddp", True) @patch.object(torch._inductor.config, "fallback_random", True) def test_hf_bert_ddp_inductor(self): model, inputs = get_hf_bert(0) model = FakeDDP(model) run_hf_bert_ddp(self, model, inputs, "inductor") @patch.object(config, "optimize_ddp", True) def test_hf_bert_ddp_aot_eager(self): model, inputs = get_hf_bert(0) model = FakeDDP(model) run_hf_bert_ddp(self, model, inputs, "aot_eager") @patch.object(config, "optimize_ddp", True) def test_issue90375(self): class Model(nn.Module): def forward(self): return torch.randn(3) * torch.randn(3) model = Model() model = FakeDDP(model) opt_model = torch._dynamo.optimize("aot_eager")(model) opt_model() # Are these tests failing? Check and see if TestFakeDistributedSingleProc has a # single process version; if it's just a problem in the Dynamo distributed # optimizer, you should be able to repro it single process! @requires_nccl() class TestMultiProc(DynamoDistributedMultiProcTestCase): """ Note: MultiProcTestCase spawns processes per test and is slow. Prefer MultiThreadedTestCase for most tests. Perhaps use this one sparingly for integration tests. """ @skip_if_lt_x_gpu(2) @patch.object(config, "optimize_ddp", False) def test_ddp_baseline_aot_eager_multiprocess(self): with _dynamo_dist_per_rank_init(self.rank, self.world_size): self.assertFalse(config.optimize_ddp) m, inputs, correct_outputs = get_model(f"cuda:{self.rank}") m = DDP(m, device_ids=[self.rank]) m = torch._dynamo.optimize("aot_eager")(m) outputs = m(inputs) self.assertTrue(same(correct_outputs, outputs)) @skip_if_lt_x_gpu(2) @import_transformers_or_skip() @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch") @patch.object(config, "optimize_ddp", True) @patch.object(torch._inductor.config, "fallback_random", True) def test_hf_bert_ddp_inductor(self): with _dynamo_dist_per_rank_init(self.rank, self.world_size): model, inputs = get_hf_bert(self.rank) model = DDP(model) run_hf_bert_ddp(self, model, inputs, "inductor") @skip_if_lt_x_gpu(2) @import_transformers_or_skip() @patch.object(config, "optimize_ddp", True) def test_hf_bert_ddp_aot_eager(self): with _dynamo_dist_per_rank_init(self.rank, self.world_size): model, inputs = get_hf_bert(self.rank) model = DDP(model) run_hf_bert_ddp(self, model, inputs, "aot_eager") @skip_if_lt_x_gpu(1) def test_fsdp_aot_eager(self): with _dynamo_dist_per_rank_init(self.rank, self.world_size): # Test with basic FSDP wrapping (outer wrap around whole model) m, inputs, correct_outputs = get_model(f"cuda:{self.rank}") fsdp_m = FSDP(m, use_orig_params=True) fsdp_m = torch._dynamo.optimize("aot_eager")(fsdp_m) outputs = fsdp_m(inputs) self.assertTrue(same(correct_outputs, outputs)) # Test with recursive wrapping, nested FSDP around each Linear m, inputs, correct_outputs = get_model(f"cuda:{self.rank}") fsdp_m = FSDP( m, auto_wrap_policy=functools.partial( transformer_auto_wrap_policy, transformer_layer_cls=(nn.Linear, ) ), use_orig_params=True ) fsdp_m = torch._dynamo.optimize("aot_eager")(fsdp_m) outputs = fsdp_m(inputs) self.assertTrue(same(correct_outputs, outputs)) @skip_if_lt_x_gpu(1) @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch") def test_fsdp_inductor(self): with _dynamo_dist_per_rank_init(self.rank, self.world_size): # Test with basic FSDP wrapping (outer wrap around whole model) m, inputs, correct_outputs = get_model(f"cuda:{self.rank}") fsdp_m = FSDP(m, use_orig_params=True) fsdp_m = torch._dynamo.optimize("inductor")(fsdp_m) outputs = fsdp_m(inputs) self.assertTrue(same(correct_outputs, outputs)) # Test with recursive wrapping, nested FSDP around each Linear m, inputs, correct_outputs = get_model(f"cuda:{self.rank}") fsdp_m = FSDP( m, auto_wrap_policy=functools.partial( transformer_auto_wrap_policy, transformer_layer_cls=(nn.Linear, ) ), use_orig_params=True ) fsdp_m = torch._dynamo.optimize("inductor")(fsdp_m) outputs = fsdp_m(inputs) self.assertTrue(same(correct_outputs, outputs)) @import_transformers_or_skip() @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch") # TODO(whc) Investigate why cudagraphs breaks inductor+fsdp for hf_bert @patch.object(torch._inductor.config.triton, "cudagraphs", False) @patch.object(torch._inductor.config, "fallback_random", True) def test_hf_bert_fsdp(self): from transformers.models.bert.modeling_bert import BertLayer def apply_fsdp(model, wrap_policy): model = FSDP( copy.deepcopy(model), auto_wrap_policy=wrap_policy, use_orig_params=True ) return model with _dynamo_dist_per_rank_init(self.rank, self.world_size): for (wrap_policy, test_instance) in ( ( None, "FSDP without recursive wrapping" ), ( functools.partial( transformer_auto_wrap_policy, transformer_layer_cls=(BertLayer, ) ), "FSDP with recursive wrapping BertLayer instances" ) ): print(f"Running hf_bert test for {test_instance}") model, inputs = get_hf_bert(self.rank) reset_rng_state() eager_model = apply_fsdp(model, wrap_policy) correct_outputs = eager_model(**inputs) correct_loss = correct_outputs.loss correct_loss.backward() reset_rng_state() opt_model = apply_fsdp(model, wrap_policy) opt_model = torch._dynamo.optimize("inductor")(opt_model) opt_outputs = opt_model(**inputs) opt_loss = opt_outputs.loss opt_loss.backward() inputs_flat = [inputs[k] for k in inputs] correct_results = collect_results(eager_model, correct_outputs.logits, correct_loss, inputs_flat) opt_results = collect_results(opt_model, opt_outputs.logits, opt_loss, inputs_flat) self.assertTrue(same(correct_results, opt_results)) @requires_nccl() class TestSingleProc(DynamoDistributedSingleProcTestCase): """ Test harness initializes dist process group. Test simple things here since they are simpler to debug. Use TestMultiProc for things that really need to run on multiple nodes """ def get_model(self, bsz=20, in_feat=10, hidden_feat=5000, out_feat=5): m = ToyModel(in_feat=in_feat, hidden_feat=hidden_feat, out_feat=out_feat).to(self.device) m.apply(init_weights) inputs = torch.rand(bsz, in_feat).to(self.device) outputs = m(inputs) return m, inputs, outputs @patch.object(config, "optimize_ddp", False) def test_ddp_baseline_aot_eager(self): from torch.nn.parallel import DistributedDataParallel as DDP m, inputs, correct_outputs = self.get_model() ddp_m = DDP(m, device_ids=self.device_ids) ddp_m = torch._dynamo.optimize("aot_eager")(ddp_m) outputs = ddp_m(inputs) self.assertTrue(same(correct_outputs, outputs)) @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch") @patch.object(config, "optimize_ddp", False) def test_ddp_baseline_inductor(self): from torch.nn.parallel import DistributedDataParallel as DDP m, inputs, correct_outputs = self.get_model() ddp_m = DDP(m, device_ids=self.device_ids) ddp_m = torch._dynamo.optimize("inductor")(ddp_m) outputs = ddp_m(inputs) self.assertTrue(same(correct_outputs, outputs)) @patch.object(config, "optimize_ddp", True) def test_graph_split(self): """ Just ensures that the appropriate number of splits happen (based on bucket size and model parameters) - verifies the number of times the user-provided compiler is called by the DDPOptimizer which is doing the graph splitting """ m, inputs, correct_outputs = self.get_model() ddp_m = DDP(m, device_ids=self.device_ids, bucket_cap_mb=25) check_splits_compiler = CheckSplitsCompiler() @torch._dynamo.optimize(check_splits_compiler.compile_fn) def opt_fn(inputs): return ddp_m(inputs) opt_outputs = opt_fn(inputs) self.assertTrue(same(correct_outputs, opt_outputs)) self.assertEqual(check_splits_compiler.compiler_called, 3) # ensure compatibilty with dynamo explain explain_out = torch._dynamo.explain(ddp_m, inputs) break_reasons = explain_out[4] self.assertEqual(len(break_reasons), 3) self.assertTrue(all(["DDPOptimizer" in r.reason for r in break_reasons])) @patch.object(config, "optimize_ddp", True) @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch") def test_graph_split_inductor(self): """ Same as above, but using inductor backend. We observed issues with inductor/fx interface in the past. """ m, inputs, correct_outputs = self.get_model() ddp_m = DDP(m, device_ids=self.device_ids, bucket_cap_mb=25) @torch._dynamo.optimize("inductor") def opt_fn(inputs): return ddp_m(inputs) opt_outputs = opt_fn(inputs) self.assertTrue(same(correct_outputs, opt_outputs)) @patch.object(config, "optimize_ddp", True) def test_no_split(self): """ Ensures the DDPOptimizer returns a correct, compiled module without introducing graph splits. (Based on model parmeters fitting in the bucket) """ # DDP will always do a 'first bucket' with a really small size; so only a tiny model will escape this m, inputs, correct_outputs = self.get_model(hidden_feat=5) ddp_m = DDP(m, device_ids=self.device_ids, bucket_cap_mb=250) check_splits_compiler = CheckSplitsCompiler() @torch._dynamo.optimize(check_splits_compiler.compile_fn) def opt_fn(inputs): return ddp_m(inputs) opt_outputs = opt_fn(inputs) self.assertTrue(same(correct_outputs, opt_outputs)) self.assertEqual(check_splits_compiler.compiler_called, 1) @patch.object(config, "optimize_ddp", True) def test_aot_autograd(self): """ Explicitly check AotAutograd family of compilers work, since they require example inputs propagated between graph splits. """ m, inputs, correct_outputs = self.get_model() ddp_m = DDP(m, device_ids=self.device_ids, bucket_cap_mb=25) @torch._dynamo.optimize("aot_eager") def opt_fn(inputs): return ddp_m(inputs) opt_outputs = opt_fn(inputs) opt_outputs.sum().backward() self.assertTrue(same(correct_outputs, opt_outputs)) @patch.object(config, "optimize_ddp", True) def test_custom_layer(self): """ Just ensures that the appropriate number of splits happen (based on bucket size and model parameters) - verifies the number of times the user-provided compiler is called by the DDPOptimizer which is doing the graph splitting """ m, inputs, correct_outputs = get_custom_model(self.device) ddp_m = DDP(m, device_ids=self.device_ids, bucket_cap_mb=1) check_splits_compiler = CheckSplitsCompiler() @torch._dynamo.optimize(check_splits_compiler.compile_fn) def opt_fn(inputs): return ddp_m(*inputs) opt_outputs = opt_fn(inputs) self.assertTrue(same(correct_outputs, opt_outputs)) self.assertEqual(check_splits_compiler.compiler_called, 3) @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch") def test_empty_graph_inductor(self): def fn(): get_world_size = torch.distributed.distributed_c10d.get_world_size() return (get_world_size,) opt_fn = torch._dynamo.optimize("inductor")(fn) res = None try: res = opt_fn()[0] except Exception: pass self.assertEqual(res, 1) @patch.object(config, "optimize_ddp", False) def test_ignored_parameters(self): """ Verifies ddp graph-split logic ignores parameters marked to ignore on DDP module. Hooks up graph-split optimizer manually so it can peek at internal state. """ m, inputs, correct_outputs = get_custom_model(self.device) parameters_to_ignore = ["seq.2.weight", "seq.4.linear.bias"] DDP._set_params_and_buffers_to_ignore_for_model(m, parameters_to_ignore) ddp_m = DDP(m, device_ids=self.device_ids, bucket_cap_mb=25) parameter_ids_to_ignore = [ id(ddp_m.module.get_parameter(p)) for p in ddp_m.parameters_to_ignore ] check_splits_compiler = CheckSplitsCompiler() ddp_optimizer = DDPOptimizer( bucket_bytes_cap=ddp_m.bucket_bytes_cap, backend_compile_fn=check_splits_compiler.compile_fn ) @torch._dynamo.optimize(ddp_optimizer.compile_fn) def opt_fn(inputs): return ddp_m(*inputs) opt_outputs = opt_fn(inputs) self.assertTrue(same(correct_outputs, opt_outputs)) self.assertEqual(check_splits_compiler.compiler_called, 2) for b in ddp_optimizer.buckets: for p_id in b.param_ids: self.assertFalse(p_id in parameter_ids_to_ignore) def test_fsdp_orig_params_assert(self): # Test with basic FSDP wrapping (outer wrap around whole model) m, inputs, correct_outputs = get_model(f"cuda:{self.rank}") fsdp_m = FSDP(m, use_orig_params=False) fsdp_m = torch._dynamo.optimize()(fsdp_m) self.assertRaisesRegex(AssertionError, "Dynamo only supports FSDP with use_orig_params=True", fsdp_m, inputs) if __name__ == "__main__": from torch._dynamo.test_case import run_tests run_tests()
def run_hf_bert_ddp(self, model, inputs, backend): reset_rng_state() correct_outputs = model(**inputs) correct_loss = correct_outputs.loss correct_loss.backward() reset_rng_state() opt_model = torch._dynamo.optimize(backend)(model) opt_outputs = opt_model(**inputs) opt_loss = opt_outputs.loss opt_loss.backward() inputs_flat = [inputs[k] for k in inputs] correct_results = collect_results( model, correct_outputs.logits, correct_loss, inputs_flat ) opt_results = collect_results(opt_model, opt_outputs.logits, opt_loss, inputs_flat) self.assertTrue(same(correct_results, opt_results)) class TestFakeDistributedSingleProc(torch._dynamo.test_case.TestCase): @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch") @patch.object(config, "optimize_ddp", True) @patch.object(torch._inductor.config, "fallback_random", True) def test_hf_bert_ddp_inductor(self): model, inputs = get_hf_bert(0) model = FakeDDP(model) run_hf_bert_ddp(self, model, inputs, "inductor") @patch.object(config, "optimize_ddp", True) def test_hf_bert_ddp_aot_eager(self): model, inputs = get_hf_bert(0) model = FakeDDP(model) run_hf_bert_ddp(self, model, inputs, "aot_eager") @patch.object(config, "optimize_ddp", True) def test_issue90375(self): class Model(nn.Module): def forward(self): return torch.randn(3) * torch.randn(3) model = Model() model = FakeDDP(model) opt_model = torch._dynamo.optimize("aot_eager")(model) opt_model() @patch.object(config, "optimize_ddp", True) def test_symbol_splitting(self): class Model(nn.Module): def __init__(self) -> None: super().__init__() self.weight1 = nn.Parameter(torch.randn(512, 512)) self.weight2 = nn.Parameter(torch.randn(512, 512)) def forward(self, x): x = torch.cat([x, x]) y = x @ self.weight1 z = x + y @ self.weight2 return z model = Model() model = FakeDDP(model) opt_model = torch.compile(dynamic=True)(model) opt_model(torch.randn(20, 512)) @config.patch(optimize_ddp=True, capture_scalar_outputs=True) def test_unbacked_symbol_splitting_direct(self): class Model(nn.Module): def __init__(self) -> None: super().__init__() self.weight1 = nn.Parameter(torch.randn(512, 512)) self.weight2 = nn.Parameter(torch.randn(512, 512)) def forward(self, x, y): u0, u1 = y.tolist() x = torch.cat([x, x]) y = x @ self.weight1 z = (x + y @ self.weight2) * u0 return z model = Model() model = FakeDDP(model) opt_model = torch.compile(dynamic=True)(model) opt_model(torch.randn(20, 512), torch.tensor([12, 13])) @config.patch(optimize_ddp=True, capture_scalar_outputs=True) def test_unbacked_symbol_splitting_indirect(self): class Model(nn.Module): def __init__(self) -> None: super().__init__() self.weight1 = nn.Parameter(torch.randn(512, 512)) self.weight2 = nn.Parameter(torch.randn(512, 512)) def forward(self, x, y): u0, u1 = y.tolist() a = torch.ones(u0) x = torch.cat([x, x]) y = x @ self.weight1 z = (x + y @ self.weight2) * a.sum() return z model = Model() model = FakeDDP(model) opt_model = torch.compile(dynamic=True)(model) opt_model(torch.randn(20, 512), torch.tensor([12, 13])) @config.patch(optimize_ddp=True, capture_scalar_outputs=True) def test_unbacked_symbol_splitting_torture_multi(self): class Model(nn.Module): def __init__(self) -> None: super().__init__() self.weight1 = nn.Parameter(torch.randn(512, 512)) self.weight2 = nn.Parameter(torch.randn(512, 512)) self.weight3 = nn.Parameter(torch.randn(512, 512)) def forward(self, x, y): # partition one (contains the u0 def) u0, u1 = y.tolist() x = torch.cat([x, x]) y1 = x @ self.weight1 # partition two (contains the variable) y2 = y1 @ self.weight2 a = torch.ones(u0) # partition three z = (x + y2 @ self.weight3) * a.sum() return z model = Model() model = FakeDDP(model, bucket_cap_mb=1) opt_model = torch.compile(dynamic=True)(model) opt_model(torch.randn(20, 512), torch.tensor([12, 13])) @config.patch(optimize_ddp=True, capture_dynamic_output_shape_ops=True) def test_unbacked_symbol_splitting_no_binding(self): class Model(nn.Module): def __init__(self) -> None: super().__init__() self.weight1 = nn.Parameter(torch.randn(512, 512)) self.weight2 = nn.Parameter(torch.randn(512, 512)) def forward(self, x, y): nz = y.nonzero() x = torch.cat([x, x]) y = x @ self.weight1 z = (x + y @ self.weight2) * (nz + 1).sum() return z model = Model() model = FakeDDP(model) opt_model = torch.compile(dynamic=True)(model) opt_model(torch.randn(20, 512), torch.tensor([0.0, 12.0, 0.0, 11.0])) @patch.object(config, "optimize_ddp", True) def test_call_method_forward(self): class Model(nn.Module): def __init__( self, ): super().__init__() layers = [] for l in range(2): layer = nn.ModuleList( [ nn.LayerNorm(96), nn.MultiheadAttention( embed_dim=96, num_heads=4, batch_first=True ), ] ) layers.append(layer) self.layers = nn.ModuleList(layers) def forward(self, x: torch.Tensor) -> torch.Tensor: # x: [Batch, Freq, Time, Feature] B, F, T, H = x.shape for m in self.layers: x = x.reshape(B * F, T, H) x = m[0](x) x, attn = m[1].forward(x, x, x) x = x.reshape(B, F, T, H) return x model = Model() model = FakeDDP(model) opt_model = torch.compile(model) opt_model(torch.randn(2, 129, 100, 96)) # Are these tests failing? Check and see if TestFakeDistributedSingleProc has a # single process version; if it's just a problem in the Dynamo distributed # optimizer, you should be able to repro it single process! @requires_nccl() class TestMultiProc(DynamoDistributedMultiProcTestCase): """ Note: MultiProcTestCase spawns processes per test and is slow. Prefer MultiThreadedTestCase for most tests. Perhaps use this one sparingly for integration tests. """ @skip_if_lt_x_gpu(2) @config.patch(optimize_ddp=False, enable_compiler_collectives=True) def test_ddp_baseline_aot_eager_multiprocess(self): with _dynamo_dist_per_rank_init(self.rank, self.world_size): self.assertFalse(config.optimize_ddp) m, inputs, correct_outputs = get_model(f"cuda:{self.rank}") m = DDP(m, device_ids=[self.rank]) m = torch._dynamo.optimize("aot_eager")(m) outputs = m(inputs) self.assertTrue(same(correct_outputs, outputs)) def _test_hf_bert_ddp_inductor(self, static_graph): with _dynamo_dist_per_rank_init(self.rank, self.world_size): model, inputs = get_hf_bert(self.rank) model = DDP(model, static_graph=static_graph) run_hf_bert_ddp(self, model, inputs, "inductor") @skip_if_lt_x_gpu(2) @import_transformers_or_skip() @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch") @config.patch(optimize_ddp=True, enable_compiler_collectives=True) @patch.object(torch._inductor.config, "fallback_random", True) def test_hf_bert_ddp_inductor(self): self._test_hf_bert_ddp_inductor(static_graph=False) @skip_if_lt_x_gpu(2) @import_transformers_or_skip() @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch") @config.patch(optimize_ddp=True, enable_compiler_collectives=True) @patch.object(torch._inductor.config, "fallback_random", True) def test_hf_bert_ddp_inductor_static_graph(self): self._test_hf_bert_ddp_inductor(static_graph=True) def _test_hf_bert_aot_eager(self, static_graph): with _dynamo_dist_per_rank_init(self.rank, self.world_size): model, inputs = get_hf_bert(self.rank) model = DDP(model, static_graph=static_graph) run_hf_bert_ddp(self, model, inputs, "aot_eager") @skip_if_lt_x_gpu(2) @import_transformers_or_skip() @config.patch(optimize_ddp=True, enable_compiler_collectives=True) def test_hf_bert_ddp_aot_eager(self): self._test_hf_bert_aot_eager(static_graph=False) @skip_if_lt_x_gpu(2) @import_transformers_or_skip() @config.patch(optimize_ddp=True, enable_compiler_collectives=True) def test_hf_bert_ddp_aot_eager_static_graph(self): self._test_hf_bert_aot_eager(static_graph=True) @skip_if_lt_x_gpu(2) @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch") @config.patch(optimize_ddp=False, enable_compiler_collectives=True) def test_ddp_activation_checkpointing(self): from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) class MyModel(torch.nn.Module): def __init__(self) -> None: super().__init__() self.fc1 = torch.nn.Linear(64, 32) self.fc2 = torch.nn.Linear(32, 16) self.fc3 = torch.nn.Linear(16, 8) def forward(self, inp): return self.fc3(self.fc2(self.fc1(inp))) with _dynamo_dist_per_rank_init(self.rank, self.world_size): self.assertFalse(config.optimize_ddp) model = MyModel().to(device="cuda") # Activation checkpointing for Linear layers. non_reentrant_wrapper = functools.partial( checkpoint_wrapper, checkpoint_impl=CheckpointImpl.NO_REENTRANT, ) check_fn = lambda submodule: isinstance( # noqa: E731 submodule, torch.nn.Linear ) apply_activation_checkpointing( model, checkpoint_wrapper_fn=non_reentrant_wrapper, check_fn=check_fn ) model = DDP(model) x = torch.randn(10, 64).cuda() correct_outputs = model(x) opt_model = torch.compile(model) outputs = opt_model(x) self.assertTrue(same(correct_outputs, outputs)) @config.patch(enable_compiler_collectives=True) @skip_if_lt_x_gpu(1) def test_fsdp_aot_eager(self): with _dynamo_dist_per_rank_init(self.rank, self.world_size): # Test with basic FSDP wrapping (outer wrap around whole model) m, inputs, correct_outputs = get_model(f"cuda:{self.rank}") fsdp_m = FSDP(m, use_orig_params=True) fsdp_m = torch._dynamo.optimize("aot_eager")(fsdp_m) outputs = fsdp_m(inputs) self.assertTrue(same(correct_outputs, outputs)) # Test with recursive wrapping, nested FSDP around each Linear m, inputs, correct_outputs = get_model(f"cuda:{self.rank}") fsdp_m = FSDP( m, auto_wrap_policy=functools.partial( transformer_auto_wrap_policy, transformer_layer_cls=(nn.Linear,) ), use_orig_params=True, ) fsdp_m = torch._dynamo.optimize("aot_eager")(fsdp_m) outputs = fsdp_m(inputs) self.assertTrue(same(correct_outputs, outputs)) @config.patch(enable_compiler_collectives=True) @skip_if_lt_x_gpu(1) def test_fsdp_setattr(self): with _dynamo_dist_per_rank_init(self.rank, self.world_size): # Test with basic FSDP wrapping (outer wrap around whole model) from torch._dynamo.utils import counters counters.clear() m, inputs, correct_outputs = get_mutating_model(f"cuda:{self.rank}") fsdp_m = FSDP(m, use_orig_params=True) fsdp_m = torch.compile(fsdp_m, backend="eager", fullgraph=False) outputs = fsdp_m(inputs) self.assertTrue(same(correct_outputs, outputs)) self.assertEqual(len(counters["graph_break"]), 1) first_graph_break = list(counters["graph_break"].keys())[0] # noqa: RUF015 self.assertTrue("setattr" not in first_graph_break) @config.patch(enable_compiler_collectives=True) @skip_if_lt_x_gpu(1) @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch") def test_fsdp_inductor(self): with _dynamo_dist_per_rank_init(self.rank, self.world_size): # Test with basic FSDP wrapping (outer wrap around whole model) m, inputs, correct_outputs = get_model(f"cuda:{self.rank}") fsdp_m = FSDP(m, use_orig_params=True) fsdp_m = torch._dynamo.optimize("inductor")(fsdp_m) outputs = fsdp_m(inputs) self.assertTrue(same(correct_outputs, outputs)) # Test with recursive wrapping, nested FSDP around each Linear m, inputs, correct_outputs = get_model(f"cuda:{self.rank}") fsdp_m = FSDP( m, auto_wrap_policy=functools.partial( transformer_auto_wrap_policy, transformer_layer_cls=(nn.Linear,) ), use_orig_params=True, ) fsdp_m = torch._dynamo.optimize("inductor")(fsdp_m) outputs = fsdp_m(inputs) self.assertTrue(same(correct_outputs, outputs)) @config.patch(enable_compiler_collectives=True) @skip_if_lt_x_gpu(1) @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch") def test_fsdp_activation_checkpointing(self): with _dynamo_dist_per_rank_init(self.rank, self.world_size): model, inputs = get_toy_model_for_activation_checkpointing( f"cuda:{self.rank}" ) is_inner = lambda module: isinstance(module, ToyInnerModel) # noqa: E731 wrap_policy = functools.partial(lambda_auto_wrap_policy, lambda_fn=is_inner) model = apply_fsdp_with_checkpointing(model, wrap_policy, is_inner) correct_outputs = model(inputs) cnt = torch._dynamo.testing.CompileCounterWithBackend("inductor") opt_model = torch._dynamo.optimize(cnt)(model) outputs = opt_model(inputs) self.assertTrue(same(correct_outputs, outputs)) # Each FSDP module is a separate graph self.assertEqual(cnt.frame_count, 2) self.assertTrue( find_first_node(cnt.graphs[0], tag_activation_checkpoint) is not None ) @import_transformers_or_skip() @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch") # TODO(whc) Investigate why cudagraphs breaks inductor+fsdp for hf_bert @patch.object(torch._inductor.config.triton, "cudagraphs", False) @patch.object(torch._inductor.config, "fallback_random", True) @config.patch(enable_compiler_collectives=True) @unittest.skipIf( PLATFORM_SUPPORTS_FLASH_ATTENTION or PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, "Inaccurate results with fused SDPA kernels", ) def test_hf_bert_fsdp(self): def apply_fsdp(model, wrap_policy): model = FSDP( copy.deepcopy(model), auto_wrap_policy=wrap_policy, use_orig_params=True ) return model with _dynamo_dist_per_rank_init(self.rank, self.world_size): for wrap_policy, test_instance in ( (None, "FSDP without recursive wrapping"), ): print(f"Running hf_bert test for {test_instance}") model, inputs = get_hf_bert(self.rank) reset_rng_state() eager_model = apply_fsdp(model, wrap_policy) correct_outputs = eager_model(**inputs) correct_loss = correct_outputs.loss correct_loss.backward() reset_rng_state() opt_model = apply_fsdp(model, wrap_policy) opt_model = torch._dynamo.optimize("inductor")(opt_model) opt_outputs = opt_model(**inputs) opt_loss = opt_outputs.loss opt_loss.backward() inputs_flat = [inputs[k] for k in inputs] correct_results = collect_results( eager_model, correct_outputs.logits, correct_loss, inputs_flat ) opt_results = collect_results( opt_model, opt_outputs.logits, opt_loss, inputs_flat ) self.assertTrue(same(correct_results, opt_results)) @import_transformers_or_skip() @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch") # TODO(whc) Investigate why cudagraphs breaks inductor+fsdp for hf_bert @patch.object(torch._inductor.config.triton, "cudagraphs", False) @patch.object(torch._inductor.config, "fallback_random", True) @config.patch(guard_nn_modules=True, enable_compiler_collectives=True) def test_hf_bert_fsdp_activation_checkpointing(self): from transformers.models.bert.modeling_bert import BertLayer with _dynamo_dist_per_rank_init(self.rank, self.world_size): for wrap_policy, test_instance in ( ( functools.partial( transformer_auto_wrap_policy, transformer_layer_cls=(BertLayer,) ), "FSDP with recursive wrapping BertLayer instances", ), ): print( f"Running hf_bert_activation_checkpointing test for {test_instance}" ) model, inputs = get_hf_bert(self.rank) check_fn = lambda submodule: isinstance( # noqa: E731 submodule, BertLayer ) reset_rng_state() eager_model = apply_fsdp_with_checkpointing( model, wrap_policy, check_fn ) correct_outputs = eager_model(**inputs) correct_loss = correct_outputs.loss correct_loss.backward() reset_rng_state() opt_model = apply_fsdp_with_checkpointing(model, wrap_policy, check_fn) opt_model = torch._dynamo.optimize("inductor")(opt_model) opt_outputs = opt_model(**inputs) opt_loss = opt_outputs.loss opt_loss.backward() inputs_flat = [inputs[k] for k in inputs] correct_results = collect_results( eager_model, correct_outputs.logits, correct_loss, inputs_flat ) opt_results = collect_results( opt_model, opt_outputs.logits, opt_loss, inputs_flat ) self.assertTrue(same(correct_results, opt_results)) @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch") @config.patch(enable_compiler_collectives=True) def test_compiler_collectives_automatic_dynamic_tensor(self): with _dynamo_dist_per_rank_init(self.rank, self.world_size): class SimpleModel(nn.Module): def __init__(self, input_size, output_size): super().__init__() self.linear = nn.Linear(input_size, output_size) def forward(self, x): return self.linear(x) torch._dynamo.utils.clear_compilation_metrics() model = SimpleModel(10, 2).to(self.rank) model.forward = torch.compile(model.forward) ddp_model = DDP(model, device_ids=[self.rank]) loss_fn = nn.CrossEntropyLoss() optimizer = optim.SGD(ddp_model.parameters(), lr=0.001) def B(s): return [torch.randn(s, 10), torch.randint(0, 2, (s,))] if self.rank == 0: dataloader = [B(5), B(8), B(6)] else: dataloader = [B(6), B(6), B(3)] for data, labels in dataloader: data, labels = data.to(self.rank), labels.to(self.rank) optimizer.zero_grad() output = ddp_model(data) loss = loss_fn(output, labels) loss.backward() optimizer.step() metrics = torch._dynamo.utils.get_compilation_metrics() # Number of compiles same on all nodes res = [None] * self.world_size torch.distributed.all_gather_object(res, len(metrics)) for r in res[1:]: self.assertEqual(res[0], r) @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch") @config.patch(enable_compiler_collectives=True) def test_compiler_collectives_automatic_dynamic_scalar(self): with _dynamo_dist_per_rank_init(self.rank, self.world_size): torch._dynamo.utils.clear_compilation_metrics() # TODO: This should be possible to do inside the function, but device = f"cuda:{self.rank}" @torch.compile() def f(x, y): return x + torch.ones(y, device=device).sum() if self.rank == 0: dataloader = [3, 3, 7] else: dataloader = [3, 4, 9] for data in dataloader: f(torch.randn(5, device=self.rank), data) metrics = torch._dynamo.utils.get_compilation_metrics() # Number of compiles same on all nodes res = [None] * self.world_size torch.distributed.all_gather_object(res, len(metrics)) for r in res[1:]: self.assertEqual(res[0], r) @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch") @config.patch(enable_compiler_collectives=True) def test_compiler_collectives_automatic_dynamic_speculation_divergence(self): with _dynamo_dist_per_rank_init(self.rank, self.world_size): torch._dynamo.utils.clear_compilation_metrics() # TODO: This should be possible to do inside the function, but device = f"cuda:{self.rank}" @torch.compile() def f(x, y): zx = x.shape zy = y.shape return x.sum() + y.sum() if self.rank == 0: dataloader = [4, 4] else: dataloader = [3, 4] for data in dataloader: f( torch.randn(data, device=self.rank), torch.randn(data, device=self.rank), ) metrics = torch._dynamo.utils.get_compilation_metrics() # Number of compiles same on all nodes res = [None] * self.world_size torch.distributed.all_gather_object(res, len(metrics)) for r in res[1:]: self.assertEqual(res[0], r) @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch") @config.patch(enable_compiler_collectives=True) def test_compiler_collectives_graph_break_empty_graph_still_collective(self): with _dynamo_dist_per_rank_init(self.rank, self.world_size): torch._dynamo.utils.clear_compilation_metrics() device = f"cuda:{self.rank}" @torch.compile() def f(x, y): z = y print("woof") zx = x.shape zy = y.shape return x.sum() + y.sum() if self.rank == 0: dataloader = [5, 5, 6] else: dataloader = [3, 4, 5] for data in dataloader: f( torch.randn(data, device=self.rank), torch.randn(data, device=self.rank), ) metrics = torch._dynamo.utils.get_compilation_metrics() # Number of compiles same on all nodes res = [None] * self.world_size torch.distributed.all_gather_object(res, len(metrics)) for r in res[1:]: self.assertEqual(res[0], r) @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch") @config.patch(enable_compiler_collectives=True) def test_compiler_collectives_dim_mismatch(self): with _dynamo_dist_per_rank_init(self.rank, self.world_size): torch._dynamo.utils.clear_compilation_metrics() @torch.compile() def f(x, y): zx = x.shape zy = y.shape return x.sum() + y.sum() if self.rank == 0: dataloader = [[4, 2]] else: dataloader = [[3]] for data in dataloader: f( torch.randn(data, device=self.rank), torch.randn(data, device=self.rank), ) metrics = torch._dynamo.utils.get_compilation_metrics() res = [None] * self.world_size torch.distributed.all_gather_object(res, len(metrics)) for r in res[1:]: self.assertEqual(res[0], r) @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch") @config.patch(enable_compiler_collectives=True) def test_compiler_collectives_missing_source(self): with _dynamo_dist_per_rank_init(self.rank, self.world_size): torch._dynamo.utils.clear_compilation_metrics() @torch.compile() def f(rank, xs): return xs[rank].sum() xs = [] for _ in range(self.world_size): xs.append(torch.randn(10, device=self.rank)) f(self.rank, xs) metrics = torch._dynamo.utils.get_compilation_metrics() res = [None] * self.world_size torch.distributed.all_gather_object(res, len(metrics)) for r in res[1:]: self.assertEqual(res[0], r) @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch") @patch.object(torch._inductor.config, "fx_graph_cache", False) @patch.object(torch._inductor.config, "fx_graph_remote_cache", False) def test_asymmetric_compilation(self): from torch._dynamo.comptime import comptime with _dynamo_dist_per_rank_init(self.rank, self.world_size): torch._dynamo.utils.clear_compilation_metrics() device = f"cuda:{self.rank}" pg = dist.distributed_c10d._get_default_group() cnt = torch._dynamo.testing.CompileCounter() sleep_time = 5 @torch._dynamo.optimize(cnt) def f(x): if self.rank == 0: comptime.sleep(sleep_time) y = 2 * x return y.sum() backend = pg._get_backend(torch.device(device)) backend._set_default_timeout(timedelta(seconds=sleep_time - 2)) x = torch.ones(4, device=device) # NCCL startup is lazy w = pg.allreduce(x) w.wait() f(x) if self.rank != 0: # test fails with NCCL timeout without this line dist.distributed_c10d._add_ephemeral_timeout_for_all_pgs( timedelta(seconds=sleep_time) ) w = pg.allreduce(x) w.wait() torch.cuda.synchronize(device) metrics = torch._dynamo.utils.get_compilation_metrics() # Number of compiles same on all nodes res = [None] * self.world_size torch.distributed.all_gather_object(res, len(metrics)) for r in res[1:]: self.assertEqual(res[0], r) @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch") @patch.object(torch._inductor.config, "fx_graph_cache", True) @patch.object(torch._inductor.config, "fx_graph_remote_cache", False) @patch.object(torch._inductor.config, "sleep_sec_TESTING_ONLY", 10) def test_asymmetric_compilation_with_fx_cache(self): from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache with fresh_inductor_cache(), _dynamo_dist_per_rank_init( self.rank, self.world_size ): torch._dynamo.utils.clear_compilation_metrics() device = f"cuda:{self.rank}" pg = dist.distributed_c10d._get_default_group() @torch.compile def f(x): y = 2 * x return y.sum() backend = pg._get_backend(torch.device(device)) backend._set_default_timeout(timedelta(seconds=5)) counters.clear() x = torch.ones(4, device=device) f(x) self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 1) self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 0) self.assertEqual(counters["inductor"]["fxgraph_cache_bypass"], 0) w = pg.allreduce(x) w.wait() torch.cuda.synchronize(device) torch._dynamo.reset() if self.rank == 0: with fresh_inductor_cache(): f(x) self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 2) self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 0) self.assertEqual(counters["inductor"]["fxgraph_cache_bypass"], 0) else: f(x) self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 1) self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 1) self.assertEqual(counters["inductor"]["fxgraph_cache_bypass"], 0) w = pg.allreduce(x) w.wait() torch.cuda.synchronize(device) @requires_nccl() @requires_cuda class TestSingleProc(DynamoDistributedSingleProcTestCase): """ Test harness initializes dist process group. Test simple things here since they are simpler to debug. Use TestMultiProc for things that really need to run on multiple nodes """ def get_model( self, bsz=20, in_feat=10, hidden_feat=5000, out_feat=5, ctx_manager=None ): m = ToyModel( in_feat=in_feat, hidden_feat=hidden_feat, out_feat=out_feat, ctx_manager=ctx_manager, ).to(self.device) m.apply(init_weights) inputs = torch.rand(bsz, in_feat).to(self.device) outputs = m(inputs) return m, inputs, outputs @patch.object(config, "optimize_ddp", False) def test_ddp_baseline_aot_eager(self): from torch.nn.parallel import DistributedDataParallel as DDP m, inputs, correct_outputs = self.get_model() ddp_m = DDP(m, device_ids=self.device_ids) ddp_m = torch._dynamo.optimize("aot_eager")(ddp_m) outputs = ddp_m(inputs) self.assertTrue(same(correct_outputs, outputs)) @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch") @patch.object(config, "optimize_ddp", False) def test_ddp_baseline_inductor(self): from torch.nn.parallel import DistributedDataParallel as DDP m, inputs, correct_outputs = self.get_model() ddp_m = DDP(m, device_ids=self.device_ids) ddp_m = torch._dynamo.optimize("inductor")(ddp_m) outputs = ddp_m(inputs) self.assertTrue(same(correct_outputs, outputs)) @patch.object(config, "optimize_ddp", True) def test_graph_split(self): assert config.optimize_ddp """ Just ensures that the appropriate number of splits happen (based on bucket size and model parameters) - verifies the number of times the user-provided compiler is called by the DDPOptimizer which is doing the graph splitting """ m, inputs, correct_outputs = self.get_model() ddp_m = DDP(m, device_ids=self.device_ids, bucket_cap_mb=25) check_splits_compiler = CheckSplitsCompiler() @torch._dynamo.optimize(check_splits_compiler.compile_fn) def opt_fn(inputs): return ddp_m(inputs) opt_outputs = opt_fn(inputs) self.assertTrue(same(correct_outputs, opt_outputs)) self.assertEqual(check_splits_compiler.compiler_called, 3) # ensure compatibility with dynamo explain explain_out = torch._dynamo.explain(ddp_m)(inputs) break_reasons = explain_out.break_reasons self.assertEqual(len(break_reasons), 3) self.assertTrue(all("DDPOptimizer" in r.reason for r in break_reasons)) @patch.object(config, "optimize_ddp", True) def test_graph_split_ctx_manager(self): """ Ensures that we get the right number of splits and that the respective context managers' effects are applied to the computation. """ for get_compiler in [ lambda: CheckSplitsCompiler(), lambda: None, ]: for ctx_manager, output_test in [ ( lambda: torch.autocast( torch.device(self.device).type, torch.float16 ), lambda out: self.assertEqual(out.dtype, torch.float16), ), (torch.enable_grad, lambda out: self.assertTrue(out.requires_grad)), (torch.no_grad, lambda out: self.assertTrue(not out.requires_grad)), ]: m, inputs, correct_outputs = self.get_model( out_feat=1000, hidden_feat=1000, in_feat=1000, ctx_manager=ctx_manager, ) # inp - 1000 * 1000 matrix of float32 (4 bytes) = 4MB # hidden - 1000 * 1000 matrix of float32 (4 bytes) = 4MB bucket_cap_mb = 3.5 # 4MB ddp_m = DDP(m, device_ids=self.device_ids, bucket_cap_mb=bucket_cap_mb) compiler = get_compiler() @torch._dynamo.optimize( compiler.compile_fn if compiler else "aot_eager" ) def opt_fn(inputs): return ddp_m(inputs) opt_outputs = opt_fn(inputs) self.assertTrue(same(correct_outputs, opt_outputs)) if compiler: self.assertEqual(compiler.compiler_called, 4) output_test(opt_outputs) # ensure compatibility with dynamo explain explain_out = torch._dynamo.explain(ddp_m)(inputs) break_reasons = explain_out.break_reasons self.assertEqual(len(break_reasons), 4) self.assertTrue(all("DDPOptimizer" in r.reason for r in break_reasons)) @patch.object(config, "optimize_ddp", True) @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch") def test_graph_split_inductor(self): assert config.optimize_ddp """ Same as above, but using inductor backend. We observed issues with inductor/fx interface in the past. """ m, inputs, correct_outputs = self.get_model() ddp_m = DDP(m, device_ids=self.device_ids, bucket_cap_mb=25) @torch._dynamo.optimize("inductor") def opt_fn(inputs): return ddp_m(inputs) opt_outputs = opt_fn(inputs) self.assertTrue(same(correct_outputs, opt_outputs)) @torch._inductor.config.patch( {"layout_optimization": True, "keep_output_stride": False} ) @patch.object(config, "optimize_ddp", True) def _test_graph_split_inductor_layout_optimizations_impl(self, context): assert config.optimize_ddp channel_dim = 512 # channel dim must be > 64 for inductor to do layout optimization and use NHWC class ToyModelConv(nn.Module): def __init__(self) -> None: super().__init__() self.net = nn.Sequential( *[ nn.Conv2d(channel_dim, channel_dim, 1, stride=1, bias=False), nn.ReLU(), ] + [ nn.Conv2d(channel_dim, channel_dim, 1, stride=1, bias=False), nn.ReLU(), ] + [ nn.Conv2d(channel_dim, channel_dim, 1, stride=1, bias=False), nn.ReLU(), ] + [ nn.Conv2d(channel_dim, channel_dim, 1, stride=1, bias=False), nn.ReLU(), ] ) def forward(self, inputs): return self.net(inputs) def get_model(): m = ToyModelConv().to(self.device) m.apply(init_weights) inputs = torch.rand(2, channel_dim, channel_dim, 128).to(self.device) outputs = m(inputs) return m, inputs, outputs with context(): m, inputs, correct_outputs = get_model() ddp_m = DDP(m, device_ids=self.device_ids, bucket_cap_mb=25) @torch._dynamo.optimize("inductor") def opt_fn(inputs): return ddp_m(inputs) opt_outputs = opt_fn(inputs) self.assertTrue(same(correct_outputs, opt_outputs)) @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch") def test_graph_split_inductor_layout_optimizations_training(self): self._test_graph_split_inductor_layout_optimizations_impl( contextlib.nullcontext ) @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch") def test_graph_split_inductor_layout_optimizations_inference(self): self._test_graph_split_inductor_layout_optimizations_impl(torch.no_grad) @patch.object(config, "optimize_ddp", True) @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch") def test_graph_split_inductor_transpose(self): assert config.optimize_ddp B = 100 N = 30 D = 50 K = 70 class Foo(nn.Module): def __init__(self) -> None: super().__init__() self.linear0 = nn.Linear(N, K) self.linear1 = torch.nn.Linear(D * K, 2048) def forward(self, x): xt = x.transpose(2, 1) xt = self.linear0(xt).flatten(1) return self.linear1(xt) mod = Foo().to(self.device) compiled_mod = torch.compile(mod, backend="inductor") ddp_compiled_mod = DDP(compiled_mod, device_ids=self.device_ids) x = torch.randn((B, N, D), dtype=torch.float32, device=self.device) self.assertTrue(same(mod(x), ddp_compiled_mod(x))) x_1 = torch.randn((B * 2, N, D), dtype=torch.float32, device=self.device) self.assertTrue(same(mod(x_1), ddp_compiled_mod(x_1))) x_2 = torch.randn((B * 3, N, D), dtype=torch.float32, device=self.device) self.assertTrue(same(mod(x_2), ddp_compiled_mod(x_2))) @patch.object(config, "optimize_ddp", True) def test_no_split(self): """ Ensures the DDPOptimizer returns a correct, compiled module without introducing graph splits. (Based on model parameters fitting in the bucket) """ # DDP will always do a 'first bucket' with a really small size; so only a tiny model will escape this m, inputs, correct_outputs = self.get_model(hidden_feat=5) ddp_m = DDP(m, device_ids=self.device_ids, bucket_cap_mb=250) check_splits_compiler = CheckSplitsCompiler() @torch._dynamo.optimize(check_splits_compiler.compile_fn) def opt_fn(inputs): return ddp_m(inputs) opt_outputs = opt_fn(inputs) self.assertTrue(same(correct_outputs, opt_outputs)) self.assertEqual(check_splits_compiler.compiler_called, 1) @patch.object(config, "optimize_ddp", True) def test_aot_autograd(self): """ Explicitly check AotAutograd family of compilers work, since they require example inputs propagated between graph splits. """ m, inputs, correct_outputs = self.get_model() ddp_m = DDP(m, device_ids=self.device_ids, bucket_cap_mb=25) @torch._dynamo.optimize("aot_eager") def opt_fn(inputs): return ddp_m(inputs) opt_outputs = opt_fn(inputs) opt_outputs.sum().backward() self.assertTrue(same(correct_outputs, opt_outputs)) @patch.object(config, "optimize_ddp", True) def test_custom_layer(self): """ Just ensures that the appropriate number of splits happen (based on bucket size and model parameters) - verifies the number of times the user-provided compiler is called by the DDPOptimizer which is doing the graph splitting """ m, inputs, correct_outputs = get_custom_model(self.device) ddp_m = DDP(m, device_ids=self.device_ids, bucket_cap_mb=1) check_splits_compiler = CheckSplitsCompiler() @torch._dynamo.optimize(check_splits_compiler.compile_fn) def opt_fn(inputs): return ddp_m(*inputs) opt_outputs = opt_fn(inputs) self.assertTrue(same(correct_outputs, opt_outputs)) self.assertEqual(check_splits_compiler.compiler_called, 3) @unittest.skipIf(not has_triton(), "Inductor+gpu needs triton and recent GPU arch") def test_empty_graph_inductor(self): def fn(): get_world_size = torch.distributed.distributed_c10d.get_world_size() return (get_world_size,) opt_fn = torch._dynamo.optimize("inductor")(fn) res = None try: res = opt_fn()[0] except Exception: pass self.assertEqual(res, 1) @patch.object(config, "optimize_ddp", False) def test_ignored_parameters(self): """ Verifies ddp graph-split logic ignores parameters marked to ignore on DDP module. Hooks up graph-split optimizer manually so it can peek at internal state. """ m, inputs, correct_outputs = get_custom_model(self.device) parameters_to_ignore = ["seq.2.weight", "seq.4.linear.bias"] DDP._set_params_and_buffers_to_ignore_for_model(m, parameters_to_ignore) ddp_m = DDP(m, device_ids=self.device_ids, bucket_cap_mb=25) parameter_ids_to_ignore = [ id(ddp_m.module.get_parameter(p)) for p in ddp_m.parameters_to_ignore ] check_splits_compiler = CheckSplitsCompiler() ddp_optimizer = DDPOptimizer( bucket_bytes_cap=ddp_m.bucket_bytes_cap, backend_compile_fn=check_splits_compiler.compile_fn, ) @torch._dynamo.optimize(ddp_optimizer.compile_fn) def opt_fn(inputs): return ddp_m(*inputs) opt_outputs = opt_fn(inputs) self.assertTrue(same(correct_outputs, opt_outputs)) self.assertEqual(check_splits_compiler.compiler_called, 2) for b in ddp_optimizer.buckets: for p_id in b.param_ids: self.assertFalse(p_id in parameter_ids_to_ignore) @patch.object(config, "optimize_ddp", True) def test_higher_order_op(self): from torch.utils.checkpoint import checkpoint N = 1000 class InnerModule(torch.nn.Module): def __init__(self) -> None: super().__init__() self.linear1 = torch.nn.Linear(N, N) self.linear2 = torch.nn.Linear(N, N) def forward(self, x): a = self.linear1(x) a = self.linear2(a) return a class MockModule(torch.nn.Module): def __init__(self) -> None: super().__init__() self.inner_mod1 = InnerModule() self.inner_mod2 = InnerModule() def forward(self, x): a = checkpoint(self.inner_mod1, x, use_reentrant=False) a = torch.cos(a) a = checkpoint(self.inner_mod2, a, use_reentrant=False) a = torch.cos(a) return a mod = MockModule().cuda() mod = DDP(mod, bucket_cap_mb=1) x = torch.randn(N, N, device="cuda", requires_grad=True) args = (x,) backend = "aot_eager" cnt = torch._dynamo.testing.CompileCounterWithBackend(backend) with self.assertRaisesRegex( torch._dynamo.exc.BackendCompilerFailed, "DDPOptimizer backend: Found a higher order op in the graph", ): torch.compile(mod, backend=cnt)(*args) def test_fsdp_orig_params_assert(self): # Test with basic FSDP wrapping (outer wrap around whole model) m, inputs, correct_outputs = get_model(f"cuda:{self.rank}") fsdp_m = FSDP(m, use_orig_params=False) fsdp_m = torch._dynamo.optimize()(fsdp_m) self.assertRaisesRegex( AssertionError, "Dynamo only supports FSDP with use_orig_params=True", fsdp_m, inputs, ) def test_fsdp_skip_guards(self): """ It's currently difficult to test dynamo guards. Most guards tests are indirect- modify something and observe that the guard in question failed. In this case, since the FSDP guards were already deemed useless and skipping them is expected to have no practical effect, it's pretty contrived to even try to make those guards fail. Instead, we observe the 'guard source' printed by dynamo's comptime print_guards function. Note: comptime prints the guards before the time they get installed or not installed, so in both cases (skip or no skip) the same guards get printed. The difference is that in the skip case, they show up with a special 'guard source' which will cuase them to not be installed. So all we check for is the expected guard source 'local_fsdp_module'. """ global GUARDS_FILE GUARDS_FILE = StringIO() for skip_guards, expected_guard_source in ( (True, "local_fsdp_module"), (False, "local_unspecialized_nn_module"), ): torch._dynamo.reset() class ToyModel(nn.Module): def __init__(self, in_feat=10, hidden_feat=5000, out_feat=5): super().__init__() self.net = nn.Sequential( *[nn.Linear(in_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, out_feat), nn.ReLU()] ) def forward(self, inputs): out = self.net(inputs) @comptime def _(ctx): ctx.print_guards(file=GUARDS_FILE) return out device = f"cuda:{self.rank}" m = ToyModel( in_feat=10, hidden_feat=5000, out_feat=5, ).to(device) inputs = torch.rand(20, 10).to(device) m.apply(init_weights) correct_outputs = m(inputs) fsdp_m = FSDP(m, use_orig_params=True) with torch._dynamo.config.patch(skip_fsdp_guards=skip_guards): opt_m = torch._dynamo.optimize("aot_eager")(fsdp_m) outputs = opt_m(inputs) # far from an exhaustive check of all the expected guards, just check a couple of them. FileCheck().check("""local "L['self']" TYPE_MATCH""").check( f"""{expected_guard_source} "L['self']._modules['net']" TYPE_MATCH""" ).check( f"""{expected_guard_source} "L['self']._modules['net']._modules['0']" TYPE_MATCH""" ).run( GUARDS_FILE.getvalue() ) self.assertTrue(same(correct_outputs, outputs)) def test_fsdp_skip_register_attr_or_module(self): """ ensure FSDP module is not registered as attrbutes in the fx graph see `not source.guard_source().is_fsdp_module()` before calling `register_attr_or_module` in variables/builder.py """ class ToyModel(nn.Module): def __init__(self, in_feat=10, hidden_feat=5000, out_feat=5): super().__init__() self.net = nn.Sequential( *[nn.Linear(in_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] ) def forward(self, inputs): out = self.net(inputs) return out torch._dynamo.reset() device = f"cuda:{self.rank}" m = ToyModel( in_feat=10, hidden_feat=5000, out_feat=5, ).to(device) inputs = torch.rand(20, 10).to(device) m.apply(init_weights) correct_outputs = m(inputs) fsdp_m = FSDP(m, use_orig_params=True) def debug_compiler(gm, _): for node in gm.graph.nodes: if node.op == "get_attr": for name in [ "l__self___net_0_weight", "l__self___net_0_bias", "l__self___net_2_weight", "l__self___net_2_bias", ]: self.assertFalse( name in node.name, f"FSDP module {name} should not be registered as attributes", ) return gm opt_m = torch._dynamo.optimize(backend=debug_compiler)(fsdp_m) outputs = opt_m(inputs) self.assertTrue(same(correct_outputs, outputs)) def test_fsdp_dup_tensors_same_source(self): """ Tests that FSDP-managed modules' parameters and buffers with the same source are de-duplicated, meaning that they are each only passed once as a graph input. """ class DuplicateModule(nn.Module): def __init__(self) -> None: super().__init__() self._param = torch.randn((3,), device="cuda") self._buf = torch.nn.Buffer( torch.randn((3,), requires_grad=False, device="cuda") ) def forward(self, x: torch.Tensor) -> torch.Tensor: # Use `_param` and `_buf` each twice in this compiled forward # to exercise if they are de-duplicated by TorchDynamo z = x + self._buf + self._buf z += self._param + self._param return z model = DuplicateModule() fsdp_model = FSDP(copy.deepcopy(model), use_orig_params=True) fsdp_model = torch._dynamo.optimize("aot_eager")(fsdp_model) inp = torch.randn((2, 3), device="cuda") local_out = model(inp) fsdp_out = fsdp_model(inp) self.assertEqual(local_out, fsdp_out) @patch.object(config, "guard_nn_modules", True) def test_fsdp_dup_tensors_diff_source(self): """ Tests that FSDP-managed modules' parameters and buffers with different source do not result in incorrect AOTAutograd de-dup guards like ``a is b``, where ``a`` and ``b`` are certainly not the same. We check this by checking for per-invocation recompiles. """ class BufModule(nn.Module): def __init__(self) -> None: super().__init__() self._buf = nn.Buffer( torch.randn((3,), requires_grad=False, device="cuda") ) def forward(self, x: torch.Tensor) -> torch.Tensor: return x + self._buf class Model(nn.Module): def __init__(self) -> None: super().__init__() self._param = nn.Parameter(torch.randn((1,), device="cuda")) self._buf_module = BufModule() # Share the buffer, meaning same tensor but different source self._buf = self._buf_module._buf def forward(self, x: torch.Tensor) -> torch.Tensor: # Use the same buffer tensor twice in the compiled forward, # including a data mutation to trigger de-dup logic self._buf.mul_(2) z = x + self._buf z = self._buf_module(z) z += self._param return z fsdp_model = FSDP(Model(), use_orig_params=True) cnt = torch._dynamo.testing.CompileCounterWithBackend("aot_eager") fsdp_model = torch._dynamo.optimize(cnt)(fsdp_model) inp = torch.randn((2, 3), device="cuda") for _ in range(15): fsdp_model(inp) # Check for no recompiles (if there were incorrect de-dup guards, then # the frame count would be equal to the number of forward calls) self.assertEqual(cnt.frame_count, 1) def test_fsdp_staticmethod(self): """ Tests that Dynamo compiles staticmethods for FSDP-managed modules correctly both when the staticmethod is invoked from the class and from the object itself. """ class ModuleWithStaticMethod(nn.Module): def __init__(self, use_self: bool): super().__init__() self._use_self = use_self torch.manual_seed(42) # force `_param` to be deterministic self._param = nn.Parameter(torch.randn((3,), device="cuda")) def forward(self, x: torch.Tensor) -> torch.Tensor: if self._use_self: z = self._add(x, self._param) else: z = ModuleWithStaticMethod._add(x, self._param) z *= 2 return z @staticmethod def _add(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: return x + y model = ModuleWithStaticMethod(False) x = torch.randn((2, 3), device="cuda") ref_out = model(x) test_outs: List[torch.Tensor] = [] for use_self in (False, True): model = ModuleWithStaticMethod(use_self) fsdp_model = FSDP(model, use_orig_params=True) cnt = torch._dynamo.testing.CompileCounterWithBackend("aot_eager") fsdp_model = torch._dynamo.optimize(cnt)(fsdp_model) test_outs.append(fsdp_model(x)) # Check for no recompiles, which could happen if incorrectly # passing args to the staticmethod (e.g. doubly passing `self`) # 3 is expected here for 1 forward. # Graph 1 should be add and imul self.assertEqual(cnt.frame_count, 1) for test_out in test_outs: self.assertEqual(test_out, ref_out) def test_async_subclass_no_specialize(self): cnt = torch._dynamo.testing.CompileCounterWithBackend("eager") @torch.compile(backend=cnt, fullgraph=True, dynamic=True) def f(x): return x + 1 f(_maybe_wrap_tensor(torch.randn(10))) f(_maybe_wrap_tensor(torch.randn(12))) self.assertEqual(cnt.frame_count, 1) if __name__ == "__main__": from torch._dynamo.test_case import run_tests run_tests()
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_dynamo_distributed.py
forward
def forward(self, inputs): return self.net(inputs)
def forward(self, inputs): if self.ctx_manager is not None: with self.ctx_manager(): return self.net(inputs) else: return self.net(inputs)
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging class ToyModel(nn.Module): from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton class ToyModel(nn.Module): from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_dynamo_distributed.py
forward
def forward(self, inputs): return self.net(inputs)
def forward(self, inputs): if self.ctx_manager is not None: with self.ctx_manager(): return self.net(inputs) else: return self.net(inputs)
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging class ToyModel(nn.Module): from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton class ToyModel(nn.Module): from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_dynamo_distributed.py
forward
def forward(self, inputs): return self.net(inputs)
def forward(self, inputs): if self.ctx_manager is not None: with self.ctx_manager(): return self.net(inputs) else: return self.net(inputs)
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging class ToyModel(nn.Module): from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton class ToyModel(nn.Module): from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_dynamo_distributed.py
forward
def forward(self, inputs): return self.net(inputs)
def forward(self, inputs): if self.ctx_manager is not None: with self.ctx_manager(): return self.net(inputs) else: return self.net(inputs)
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging class ToyModel(nn.Module): from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton class ToyModel(nn.Module): from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_dynamo_distributed.py
forward
def forward(self, inputs): return self.net(inputs)
def forward(self, inputs): if self.ctx_manager is not None: with self.ctx_manager(): return self.net(inputs) else: return self.net(inputs)
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging class ToyModel(nn.Module): from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton class ToyModel(nn.Module): from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_dynamo_distributed.py
forward
def forward(self, inputs): return self.net(inputs)
def forward(self, inputs): if self.ctx_manager is not None: with self.ctx_manager(): return self.net(inputs) else: return self.net(inputs)
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging class ToyModel(nn.Module): from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton class ToyModel(nn.Module): from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_dynamo_distributed.py
test_hf_bert_ddp_inductor_static_graph
def test_hf_bert_ddp_inductor_static_graph(self): self._test_hf_bert_ddp_inductor(static_graph=True)
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig @requires_nccl() class TestMultiProc(DynamoDistributedMultiProcTestCase): from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_dynamo_distributed.py
forward
def forward(self, inputs): return self.net(inputs)
def forward(self, inputs): if self.ctx_manager is not None: with self.ctx_manager(): return self.net(inputs) else: return self.net(inputs)
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging class ToyModel(nn.Module): from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton class ToyModel(nn.Module): from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_functional_api.py
new_subgroups
def new_subgroups(group_size: int, pg_tag=None): world_size = dist.get_world_size() subgroups = [] cur_subgroup = None for subgroup_id in range(world_size // group_size): start_rank = subgroup_id * group_size end_rank = start_rank + group_size ranks_in_subgroup = list(range(start_rank, end_rank)) subgroup = c10d._new_group_with_tag( ranks=ranks_in_subgroup, pg_tag=pg_tag, ) subgroups.append(subgroup) rank = dist.get_rank() if rank in ranks_in_subgroup: cur_subgroup = subgroup return cur_subgroup, subgroups class TestExpand(MultiThreadedTestCase): @property def world_size(self): return 4 def setUp(self): super().setUp() self._spawn_threads() def test_expand_1d_rank_list(self): tag, rankset, group_size = ft_c._expand_group([0, 1, 2, 3]) self.assertEqual("", tag) self.assertEqual([0, 1, 2, 3], rankset) self.assertEqual(4, group_size) tag, rankset, group_size = ft_c._expand_group([0, 1, 2, 3], "bla") self.assertEqual("bla", tag) def test_expand_2d_rank_list(self): tag, rankset, group_size = ft_c._expand_group([[0, 1], [2, 3]]) self.assertEqual("", tag) self.assertEqual([0, 1, 2, 3], rankset) self.assertEqual(2, group_size) tag, rankset, group_size = ft_c._expand_group([[0, 1], [2, 3]], "blu") self.assertEqual("blu", tag) with self.assertRaisesRegex(ValueError, "group sizes must be identical"): ft_c._expand_group([[0], [1, 2, 3]]) def test_expand_process_group(self): tag, rankset, group_size = ft_c._expand_group(dist.group.WORLD) self.assertEqual(c10d._get_group_tag(dist.group.WORLD), tag) self.assertEqual([0, 1, 2, 3], rankset) self.assertEqual(4, group_size) tag, rankset, group_size = ft_c._expand_group(dist.group.WORLD, "bla") self.assertEqual("bla", tag) my_pg, others = new_subgroups(group_size=2) tag, rankset, group_size = ft_c._expand_group(my_pg) self.assertEqual(c10d._get_group_tag(my_pg), tag) self.assertEqual(dist.get_process_group_ranks(my_pg), rankset) self.assertEqual(2, group_size) my_pg = None for i in range(dist.get_world_size()): group = c10d._new_group_with_tag([i], pg_tag="my_pg") if i == dist.get_rank(): my_pg = group tag, rankset, group_size = ft_c._expand_group(my_pg) self.assertEqual("my_pg", tag) self.assertEqual([dist.get_rank()], rankset) self.assertEqual(1, group_size) tag, rankset, group_size = ft_c._expand_group(my_pg, "bla") self.assertEqual("bla", tag) def test_expand_device_mesh(self): mesh = dt.DeviceMesh("cpu", torch.arange(4)) tag, rankset, group_size = ft_c._expand_group(mesh) self.assertEqual(c10d._get_group_tag(mesh.get_group(mesh_dim=0)), tag) self.assertEqual([0, 1, 2, 3], rankset) self.assertEqual(4, group_size) mesh = dt.DeviceMesh("cpu", torch.arange(4)) tag, rankset, group_size = ft_c._expand_group(mesh) self.assertEqual(c10d._get_group_tag(mesh.get_group(mesh_dim=0)), tag) self.assertEqual([0, 1, 2, 3], rankset) self.assertEqual(4, group_size) def test_expand_device_mesh_tuple(self): mesh = dt.DeviceMesh("cpu", torch.arange(4).view(2, 2)) with self.assertRaisesRegex(AssertionError, "Only 1D mesh"): tag, rankset, group_size = ft_c._expand_group(mesh) tag, rankset, group_size = ft_c._expand_group((mesh, 0)) self.assertEqual(c10d._get_group_tag(mesh.get_group(mesh_dim=0)), tag) expected_rankset = [0, 2] if dist.get_rank() in [0, 2] else [1, 3] self.assertEqual(expected_rankset, rankset) self.assertEqual(2, group_size) tag, rankset, group_size = ft_c._expand_group((mesh, 1)) expected_rankset = [0, 1] if dist.get_rank() in [0, 1] else [2, 3] self.assertEqual(c10d._get_group_tag(mesh.get_group(mesh_dim=1)), tag) self.assertEqual(expected_rankset, rankset) self.assertEqual(2, group_size) class TestPgTag(MultiThreadedTestCase): @property def world_size(self): return 4 def setUp(self): super().setUp() self._spawn_threads() """ The behavior we want is as follow: - rankset+tag will always result in the same PG. Do we enforce this by failing creation of new PGs or returning existing ones? Return existing one. - default tag gives existing behavior. This means we should create duplicates. - _expand_group on _default-tagged pg should always resolve to it This mean we can't depend on empty tag + rankset. """ def test_pg_creation_with_tag(self): my_group, _ = new_subgroups(group_size=2, pg_tag="blu") my_group2, _ = new_subgroups(group_size=2, pg_tag="blu") self.assertEqual(my_group, my_group2) my_group3, _ = new_subgroups(group_size=2, pg_tag="blu2") self.assertNotEqual(my_group, my_group3) my_group4, _ = new_subgroups(group_size=2) self.assertNotEqual(my_group, my_group4) my_group5, _ = new_subgroups(group_size=2) self.assertNotEqual(my_group4, my_group5) def test_pg_lookup_roundtrip(self): pg_tag0, _ = new_subgroups(group_size=2, pg_tag="blu") pg_tag1, _ = new_subgroups(group_size=2, pg_tag="blu2") pg_notag0, _ = new_subgroups(group_size=2) pg_notag1, _ = new_subgroups(group_size=2) def roundtrip(pg): tag, rankset, _ = ft_c._expand_group(pg) return c10d._find_pg_by_ranks_and_tag(tag, rankset) self.assertEqual(pg_tag0, roundtrip(pg_tag0)) self.assertEqual(pg_tag1, roundtrip(pg_tag1)) self.assertEqual(pg_notag0, roundtrip(pg_notag0)) self.assertEqual(pg_notag1, roundtrip(pg_notag1)) def test_pg_lookup_with_tag(self): pg_tag0, _ = new_subgroups(group_size=2, pg_tag="blu") pg_tag1, _ = new_subgroups(group_size=2, pg_tag="bla") pg_notag0, _ = new_subgroups(group_size=2) def roundtrip(pg, pg_tag): tag, rankset, _ = ft_c._expand_group(pg, pg_tag) return c10d._find_pg_by_ranks_and_tag(tag, rankset) self.assertEqual(pg_tag0, roundtrip(pg_tag1, "blu")) self.assertEqual(pg_tag0, roundtrip(pg_notag0, "blu")) # Cannot erase the tag of a PG self.assertEqual(pg_tag0, roundtrip(pg_tag0, "")) def test_find_or_create_pg(self): pg = c10d._find_or_create_pg_by_ranks_and_tag("blu", [0, 1, 2, 3], 2) pg_tag0, _ = new_subgroups(group_size=2, pg_tag="blu") self.assertEqual(pg, pg_tag0) def test_find_root_pg(self): pg = c10d._find_pg_by_ranks_and_tag("", [0, 1, 2, 3]) self.assertEqual(dist.group.WORLD, pg) @instantiate_parametrized_tests class TestTraceableCollectives(MultiThreadedTestCase): @property def world_size(self): return 4 def setUp(self): super().setUp() self._spawn_threads() @parametrize("device", ["cpu", "cuda"]) def test_broadcast(self, device): if device == "cuda": if torch.cuda.device_count() < self.world_size: self.skipTest("Not enough CUDA devices") torch.cuda.set_device(dist.get_rank()) if dist.get_rank() == 0: tensor = torch.ones([4], device=device) else: tensor = torch.zeros([4], device=device) mesh = dt.DeviceMesh(device, torch.arange(4)) res = ft_c.broadcast(tensor, 0, mesh) self.assertEqual(res, torch.ones([4], device=device)) @parametrize("device", ["cpu", "cuda"]) def test_all_reduce_eager(self, device): if device == "cuda": if torch.cuda.device_count() < self.world_size: self.skipTest("Not enough CUDA devices") torch.cuda.set_device(dist.get_rank()) tensor = torch.ones([4], device=device) mesh = dt.DeviceMesh(device, torch.arange(4)) res = ft_c.all_reduce(tensor, "sum", mesh) self.assertEqual(res, torch.tensor([4, 4, 4, 4], dtype=torch.float)) mesh = dt.DeviceMesh(device, torch.arange(4).view(2, 2)) res2 = ft_c.all_reduce(tensor, "sum", (mesh, 1)) self.assertEqual(res2, torch.tensor([2, 2, 2, 2], dtype=torch.float)) @parametrize("device", ["cpu", "cuda"]) def test_all_reduce_coalesced_eager(self, device): if device == "cuda": if torch.cuda.device_count() < self.world_size: self.skipTest("Not enough CUDA devices") torch.cuda.set_device(dist.get_rank()) t0 = torch.ones([4], device=device) t1 = torch.ones([6], device=device) + 2 mesh = dt.DeviceMesh(device, torch.arange(4)) res = ft_c.all_reduce_coalesced([t0, t1], "sum", mesh) self.assertEqual(res[0], t0 * 4) self.assertEqual(res[1], t1 * 4) @parametrize("device", ["cpu", "cuda"]) def test_all_gather_tensor(self, device): if device == "cuda": if torch.cuda.device_count() < self.world_size: self.skipTest("Not enough CUDA devices") torch.cuda.set_device(dist.get_rank()) # testing 1d/2d mesh mesh_1d = dt.DeviceMesh(device, torch.arange(self.world_size)) mesh_2d = dt.DeviceMesh(device, torch.arange(self.world_size).view(2, 2)) for mesh in [mesh_1d, mesh_2d]: dims_to_gather = [0, 1, 2] for dim in dims_to_gather: output_size = [3, 3, 3] output_size[dim] *= mesh.size(0) # each rank have its own tensor, all_gather gives a bigger tensor local_tensor = torch.ones([3, 3, 3], device=device) gathered_tensor = ft_c.all_gather_tensor( local_tensor, gather_dim=dim, group=(mesh, 0) ) self.assertEqual(gathered_tensor, torch.ones(output_size)) @parametrize("device", ["cpu", "cuda"]) def test_all_gather_into_tensor_coalesced(self, device): if device == "cuda": if torch.cuda.device_count() < self.world_size: self.skipTest("Not enough CUDA devices") torch.cuda.set_device(dist.get_rank()) tensors = [torch.ones([4], device=device), torch.ones([4], device=device) + 1] mesh = dt.DeviceMesh(device, torch.arange(4)) res = ft_c.all_gather_into_tensor_coalesced(tensors, mesh) self.assertEqual(2, len(res)) self.assertEqual(torch.ones([4 * dist.get_world_size()], device=device), res[0]) self.assertEqual( torch.ones([4 * dist.get_world_size()], device=device) + 1, res[1] ) @parametrize("device", ["cpu", "cuda"]) def test_reduce_scatter_tensor(self, device): if device == "cuda": if torch.cuda.device_count() < self.world_size: self.skipTest("Not enough CUDA devices") torch.cuda.set_device(dist.get_rank()) # testing 1d/2d mesh mesh_1d = dt.DeviceMesh(device, torch.arange(self.world_size)) mesh_2d = dt.DeviceMesh(device, torch.arange(self.world_size).view(2, 2)) for mesh in [mesh_1d, mesh_2d]: dims_to_scatter = [0, 1] for dim in dims_to_scatter: group_size = mesh.size(0) input_size = [3, 3] output_size = [3, 3] output_size[dim] *= group_size input_tensor = torch.ones(output_size, device=device) res_num = 1 * group_size rs_tensor = ft_c.reduce_scatter_tensor( input_tensor, "sum", scatter_dim=dim, group=(mesh, 0) ) self.assertEqual(rs_tensor, torch.ones(input_size) * res_num) @parametrize("device", ["cpu", "cuda"]) def test_reduce_scatter_into_tensor_coalesced(self, device): if device == "cuda": if torch.cuda.device_count() < self.world_size: self.skipTest("Not enough CUDA devices") torch.cuda.set_device(dist.get_rank()) tensors = [ torch.ones([4], dtype=torch.int64, device=device), torch.ones([4], dtype=torch.int64, device=device) + 1, ] mesh = dt.DeviceMesh(device, torch.arange(4)) res = ft_c.reduce_scatter_tensor_coalesced(tensors, "sum", [0, 0], mesh) self.assertEqual(2, len(res)) self.assertEqual(torch.tensor([4], device=device), res[0]) self.assertEqual(torch.tensor([8], device=device), res[1]) class TestMetaCollectives(TestCase): def test_all_reduce(self): x = torch.rand((2, 3, 4), device="meta") out = ft_c.all_reduce(x, "sum", "0") self.assertEqual(x.size(), out.size()) class TestGradCollectives(MultiThreadedTestCase): @property def world_size(self): return 2 def setUp(self): super().setUp() self._spawn_threads() def test_all_reduce(self): x = torch.rand([4], requires_grad=True) y = torch.rand([4], requires_grad=True) out = ft_c.all_reduce(x, "sum", dist.group.WORLD) (out + y).sum().backward() self.assertIsNone(x.grad) class TestMakeFx(TestCase): def setUp(self): # make_fx is not thread-safe due to patching nd mutating global states # so create a fake_pg. self.rank = 0 self.world_size = 2 store = FakeStore() dist.init_process_group( backend="fake", world_size=self.world_size, rank=self.rank, store=store, ) def tearDown(self): super().tearDown() self.assertFalse(torch.fx._symbolic_trace.is_fx_tracing()) def test_all_reduce_tracing(self): def allred(input): return ft_c.all_reduce(input, "sum", group=dist.group.WORLD) + 1 graph = make_fx(allred)(torch.rand(4)) FileCheck().check("all_reduce").check("wait_tensor").run(str(graph.graph)) mesh = dt.DeviceMesh("cpu", torch.arange(self.world_size)) def allred_mesh(input): return ft_c.all_reduce(input, "sum", mesh) + 1 mesh_graph = make_fx(allred_mesh)(torch.rand(4)) FileCheck().check_not("get_attr").check("wait_tensor").run( str(mesh_graph.graph) ) def allred_mesh_dim(input): return ft_c.all_reduce(input, "sum", (mesh, 0)) + 1 mesh_dim_graph = make_fx(allred_mesh_dim)(torch.rand(4)) FileCheck().check_not("get_attr").check("wait_tensor").run( str(mesh_dim_graph.graph) ) BACKEND = dist.Backend.NCCL if torch.cuda.is_available() else dist.Backend.GLOO WORLD_SIZE = 2
import os import sys import unittest from functools import partial, wraps import torch import torch.distributed as dist import torch.distributed._functional_collectives as ft_c import torch.distributed._tensor as dt import torch.distributed.distributed_c10d as c10d from functorch import make_fx from torch._inductor.utils import run_and_get_code from torch.testing import FileCheck from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.testing._internal.common_distributed import ( MultiProcessTestCase, MultiThreadedTestCase, requires_nccl, TEST_SKIPS, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_functional_api.py
world_size
def world_size(self): return 4
import os import sys import unittest from functools import partial, wraps import torch import torch.distributed as dist import torch.distributed._functional_collectives as ft_c import torch.distributed._tensor as dt import torch.distributed.distributed_c10d as c10d from functorch import make_fx from torch._inductor.utils import run_and_get_code from torch.testing import FileCheck from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.testing._internal.common_distributed import ( MultiProcessTestCase, MultiThreadedTestCase, requires_nccl, TEST_SKIPS, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) class TestExpand(MultiThreadedTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_functional_api.py
setUp
def setUp(self): super().setUp() self._spawn_threads()
import os import sys import unittest from functools import partial, wraps import torch import torch.distributed as dist import torch.distributed._functional_collectives as ft_c import torch.distributed._tensor as dt import torch.distributed.distributed_c10d as c10d from functorch import make_fx from torch._inductor.utils import run_and_get_code from torch.testing import FileCheck from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.testing._internal.common_distributed import ( MultiProcessTestCase, MultiThreadedTestCase, requires_nccl, TEST_SKIPS, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) class TestExpand(MultiThreadedTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_functional_api.py
test_expand_1d_rank_list
def test_expand_1d_rank_list(self): tag, rankset, group_size = ft_c._expand_group([0, 1, 2, 3]) self.assertEqual("", tag) self.assertEqual([0, 1, 2, 3], rankset) self.assertEqual(4, group_size) tag, rankset, group_size = ft_c._expand_group([0, 1, 2, 3], "bla") self.assertEqual("bla", tag)
import os import sys import unittest from functools import partial, wraps import torch import torch.distributed as dist import torch.distributed._functional_collectives as ft_c import torch.distributed._tensor as dt import torch.distributed.distributed_c10d as c10d from functorch import make_fx from torch._inductor.utils import run_and_get_code from torch.testing import FileCheck from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.testing._internal.common_distributed import ( MultiProcessTestCase, MultiThreadedTestCase, requires_nccl, TEST_SKIPS, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) class TestExpand(MultiThreadedTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_functional_api.py
test_expand_2d_rank_list
def test_expand_2d_rank_list(self): tag, rankset, group_size = ft_c._expand_group([[0, 1], [2, 3]]) self.assertEqual("", tag) self.assertEqual([0, 1, 2, 3], rankset) self.assertEqual(2, group_size) tag, rankset, group_size = ft_c._expand_group([[0, 1], [2, 3]], "blu") self.assertEqual("blu", tag) with self.assertRaisesRegex(ValueError, "group sizes must be identical"): ft_c._expand_group([[0], [1, 2, 3]])
import os import sys import unittest from functools import partial, wraps import torch import torch.distributed as dist import torch.distributed._functional_collectives as ft_c import torch.distributed._tensor as dt import torch.distributed.distributed_c10d as c10d from functorch import make_fx from torch._inductor.utils import run_and_get_code from torch.testing import FileCheck from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.testing._internal.common_distributed import ( MultiProcessTestCase, MultiThreadedTestCase, requires_nccl, TEST_SKIPS, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) class TestExpand(MultiThreadedTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_functional_api.py
test_expand_process_group
def test_expand_process_group(self): tag, rankset, group_size = ft_c._expand_group(dist.group.WORLD) self.assertEqual(c10d._get_group_tag(dist.group.WORLD), tag) self.assertEqual([0, 1, 2, 3], rankset) self.assertEqual(4, group_size) tag, rankset, group_size = ft_c._expand_group(dist.group.WORLD, "bla") self.assertEqual("bla", tag) my_pg, others = new_subgroups(group_size=2) tag, rankset, group_size = ft_c._expand_group(my_pg) self.assertEqual(c10d._get_group_tag(my_pg), tag) self.assertEqual(dist.get_process_group_ranks(my_pg), rankset) self.assertEqual(2, group_size) my_pg = None for i in range(dist.get_world_size()): group = c10d._new_group_with_tag([i], pg_tag="my_pg") if i == dist.get_rank(): my_pg = group tag, rankset, group_size = ft_c._expand_group(my_pg) self.assertEqual("my_pg", tag) self.assertEqual([dist.get_rank()], rankset) self.assertEqual(1, group_size) tag, rankset, group_size = ft_c._expand_group(my_pg, "bla") self.assertEqual("bla", tag)
import os import sys import unittest from functools import partial, wraps import torch import torch.distributed as dist import torch.distributed._functional_collectives as ft_c import torch.distributed._tensor as dt import torch.distributed.distributed_c10d as c10d from functorch import make_fx from torch._inductor.utils import run_and_get_code from torch.testing import FileCheck from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.testing._internal.common_distributed import ( MultiProcessTestCase, MultiThreadedTestCase, requires_nccl, TEST_SKIPS, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) class TestExpand(MultiThreadedTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_functional_api.py
test_expand_device_mesh
def test_expand_device_mesh(self): mesh = dt.DeviceMesh("cpu", torch.arange(4)) tag, rankset, group_size = ft_c._expand_group(mesh) self.assertEqual(c10d._get_group_tag(mesh.get_group(mesh_dim=0)), tag) self.assertEqual([0, 1, 2, 3], rankset) self.assertEqual(4, group_size) mesh = dt.DeviceMesh("cpu", torch.arange(4)) tag, rankset, group_size = ft_c._expand_group(mesh) self.assertEqual(c10d._get_group_tag(mesh.get_group(mesh_dim=0)), tag) self.assertEqual([0, 1, 2, 3], rankset) self.assertEqual(4, group_size)
import os import sys import unittest from functools import partial, wraps import torch import torch.distributed as dist import torch.distributed._functional_collectives as ft_c import torch.distributed._tensor as dt import torch.distributed.distributed_c10d as c10d from functorch import make_fx from torch._inductor.utils import run_and_get_code from torch.testing import FileCheck from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.testing._internal.common_distributed import ( MultiProcessTestCase, MultiThreadedTestCase, requires_nccl, TEST_SKIPS, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) class TestExpand(MultiThreadedTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_functional_api.py
test_expand_device_mesh_tuple
def test_expand_device_mesh_tuple(self): mesh = dt.DeviceMesh("cpu", torch.arange(4).view(2, 2)) with self.assertRaisesRegex(AssertionError, "Only 1D mesh"): tag, rankset, group_size = ft_c._expand_group(mesh) tag, rankset, group_size = ft_c._expand_group((mesh, 0)) self.assertEqual(c10d._get_group_tag(mesh.get_group(mesh_dim=0)), tag) expected_rankset = [0, 2] if dist.get_rank() in [0, 2] else [1, 3] self.assertEqual(expected_rankset, rankset) self.assertEqual(2, group_size) tag, rankset, group_size = ft_c._expand_group((mesh, 1)) expected_rankset = [0, 1] if dist.get_rank() in [0, 1] else [2, 3] self.assertEqual(c10d._get_group_tag(mesh.get_group(mesh_dim=1)), tag) self.assertEqual(expected_rankset, rankset) self.assertEqual(2, group_size)
import os import sys import unittest from functools import partial, wraps import torch import torch.distributed as dist import torch.distributed._functional_collectives as ft_c import torch.distributed._tensor as dt import torch.distributed.distributed_c10d as c10d from functorch import make_fx from torch._inductor.utils import run_and_get_code from torch.testing import FileCheck from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.testing._internal.common_distributed import ( MultiProcessTestCase, MultiThreadedTestCase, requires_nccl, TEST_SKIPS, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) class TestExpand(MultiThreadedTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_functional_api.py
world_size
def world_size(self): return 4
import os import sys import unittest from functools import partial, wraps import torch import torch.distributed as dist import torch.distributed._functional_collectives as ft_c import torch.distributed._tensor as dt import torch.distributed.distributed_c10d as c10d from functorch import make_fx from torch._inductor.utils import run_and_get_code from torch.testing import FileCheck from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.testing._internal.common_distributed import ( MultiProcessTestCase, MultiThreadedTestCase, requires_nccl, TEST_SKIPS, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) class TestExpand(MultiThreadedTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_functional_api.py
setUp
def setUp(self): super().setUp() self._spawn_threads()
import os import sys import unittest from functools import partial, wraps import torch import torch.distributed as dist import torch.distributed._functional_collectives as ft_c import torch.distributed._tensor as dt import torch.distributed.distributed_c10d as c10d from functorch import make_fx from torch._inductor.utils import run_and_get_code from torch.testing import FileCheck from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.testing._internal.common_distributed import ( MultiProcessTestCase, MultiThreadedTestCase, requires_nccl, TEST_SKIPS, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) class TestExpand(MultiThreadedTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_dynamo_distributed.py
f
@requires_nccl() class TestSingleProc(DynamoDistributedSingleProcTestCase): """ Test harness initializes dist process group. Test simple things here since they are simpler to debug. Use TestMultiProc for things that really need to run on multiple nodes """
def f(x, y): return x + torch.ones(y, device=device).sum() if self.rank == 0: dataloader = [3, 3, 7] else: dataloader = [3, 4, 9] for data in dataloader: f(torch.randn(5, device=self.rank), data) metrics = torch._dynamo.utils.get_compilation_metrics() # Number of compiles same on all nodes res = [None] * self.world_size torch.distributed.all_gather_object(res, len(metrics)) for r in res[1:]: self.assertEqual(res[0], r)
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_dynamo_distributed.py
get_model
def get_model(device, bsz=20, in_feat=10, hidden_feat=5000, out_feat=5): m = ToyModel(in_feat=in_feat, hidden_feat=hidden_feat, out_feat=out_feat).to(device) m.apply(init_weights) inputs = torch.rand(bsz, in_feat).to(device) outputs = m(inputs) return m, inputs, outputs
def get_model( device, bsz=20, in_feat=10, hidden_feat=5000, out_feat=5, ctx_manager=None ): m = ToyModel( in_feat=in_feat, hidden_feat=hidden_feat, out_feat=out_feat, ctx_manager=ctx_manager, ).to(device) m.apply(init_weights) inputs = torch.rand(bsz, in_feat).to(device) outputs = m(inputs) return m, inputs, outputs class MutatingModel(nn.Module): def __init__(self, in_feat=10, hidden_feat=5000, out_feat=5, ctx_manager=None): super().__init__() self.ctx_manager = ctx_manager self.net = nn.Sequential( *[nn.Linear(in_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, out_feat), nn.ReLU()] ) self.state = 1 def forward(self, inputs): self.state = 2 return self.net(inputs) * self.state
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_dynamo_distributed.py
opt_fn
def opt_fn(inputs): return ddp_m(inputs) opt_outputs = opt_fn(inputs) self.assertTrue(same(correct_outputs, opt_outputs)) self.assertEqual(check_splits_compiler.compiler_called, 3) # ensure compatibilty with dynamo explain explain_out = torch._dynamo.explain(ddp_m, inputs) break_reasons = explain_out[4] self.assertEqual(len(break_reasons), 3) self.assertTrue(all(["DDPOptimizer" in r.reason for r in break_reasons]))
def opt_fn(inputs): return ddp_m(inputs) opt_outputs = opt_fn(inputs) self.assertTrue(same(correct_outputs, opt_outputs)) self.assertEqual(check_splits_compiler.compiler_called, 3) # ensure compatibility with dynamo explain explain_out = torch._dynamo.explain(ddp_m)(inputs) break_reasons = explain_out.break_reasons self.assertEqual(len(break_reasons), 3) self.assertTrue(all("DDPOptimizer" in r.reason for r in break_reasons))
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_dynamo_distributed.py
opt_fn
def opt_fn(inputs): return ddp_m(inputs) opt_outputs = opt_fn(inputs) self.assertTrue(same(correct_outputs, opt_outputs)) self.assertEqual(check_splits_compiler.compiler_called, 3) # ensure compatibilty with dynamo explain explain_out = torch._dynamo.explain(ddp_m, inputs) break_reasons = explain_out[4] self.assertEqual(len(break_reasons), 3) self.assertTrue(all(["DDPOptimizer" in r.reason for r in break_reasons]))
def opt_fn(inputs): return ddp_m(inputs) opt_outputs = opt_fn(inputs) self.assertTrue(same(correct_outputs, opt_outputs)) self.assertEqual(check_splits_compiler.compiler_called, 3) # ensure compatibility with dynamo explain explain_out = torch._dynamo.explain(ddp_m)(inputs) break_reasons = explain_out.break_reasons self.assertEqual(len(break_reasons), 3) self.assertTrue(all("DDPOptimizer" in r.reason for r in break_reasons))
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_dynamo_distributed.py
opt_fn
def opt_fn(inputs): return ddp_m(inputs) opt_outputs = opt_fn(inputs) self.assertTrue(same(correct_outputs, opt_outputs)) self.assertEqual(check_splits_compiler.compiler_called, 3) # ensure compatibilty with dynamo explain explain_out = torch._dynamo.explain(ddp_m, inputs) break_reasons = explain_out[4] self.assertEqual(len(break_reasons), 3) self.assertTrue(all(["DDPOptimizer" in r.reason for r in break_reasons]))
def opt_fn(inputs): return ddp_m(inputs) opt_outputs = opt_fn(inputs) self.assertTrue(same(correct_outputs, opt_outputs)) self.assertEqual(check_splits_compiler.compiler_called, 3) # ensure compatibility with dynamo explain explain_out = torch._dynamo.explain(ddp_m)(inputs) break_reasons = explain_out.break_reasons self.assertEqual(len(break_reasons), 3) self.assertTrue(all("DDPOptimizer" in r.reason for r in break_reasons))
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_dynamo_distributed.py
f
def f(x, y): return x + torch.ones(y, device=device).sum() if self.rank == 0: dataloader = [3, 3, 7] else: dataloader = [3, 4, 9] for data in dataloader: f(torch.randn(5, device=self.rank), data) metrics = torch._dynamo.utils.get_compilation_metrics() # Number of compiles same on all nodes res = [None] * self.world_size torch.distributed.all_gather_object(res, len(metrics)) for r in res[1:]: self.assertEqual(res[0], r)
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_dynamo_distributed.py
f
def f(x, y): return x + torch.ones(y, device=device).sum() if self.rank == 0: dataloader = [3, 3, 7] else: dataloader = [3, 4, 9] for data in dataloader: f(torch.randn(5, device=self.rank), data) metrics = torch._dynamo.utils.get_compilation_metrics() # Number of compiles same on all nodes res = [None] * self.world_size torch.distributed.all_gather_object(res, len(metrics)) for r in res[1:]: self.assertEqual(res[0], r)
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_dynamo_distributed.py
f
def f(x, y): return x + torch.ones(y, device=device).sum() if self.rank == 0: dataloader = [3, 3, 7] else: dataloader = [3, 4, 9] for data in dataloader: f(torch.randn(5, device=self.rank), data) metrics = torch._dynamo.utils.get_compilation_metrics() # Number of compiles same on all nodes res = [None] * self.world_size torch.distributed.all_gather_object(res, len(metrics)) for r in res[1:]: self.assertEqual(res[0], r)
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_dynamo_distributed.py
f
def f(x, y): return x + torch.ones(y, device=device).sum() if self.rank == 0: dataloader = [3, 3, 7] else: dataloader = [3, 4, 9] for data in dataloader: f(torch.randn(5, device=self.rank), data) metrics = torch._dynamo.utils.get_compilation_metrics() # Number of compiles same on all nodes res = [None] * self.world_size torch.distributed.all_gather_object(res, len(metrics)) for r in res[1:]: self.assertEqual(res[0], r)
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_dynamo_distributed.py
f
def f(x, y): return x + torch.ones(y, device=device).sum() if self.rank == 0: dataloader = [3, 3, 7] else: dataloader = [3, 4, 9] for data in dataloader: f(torch.randn(5, device=self.rank), data) metrics = torch._dynamo.utils.get_compilation_metrics() # Number of compiles same on all nodes res = [None] * self.world_size torch.distributed.all_gather_object(res, len(metrics)) for r in res[1:]: self.assertEqual(res[0], r)
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_dynamo_distributed.py
test_asymmetric_compilation_with_fx_cache
def test_asymmetric_compilation_with_fx_cache(self): from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache with fresh_inductor_cache(), _dynamo_dist_per_rank_init( self.rank, self.world_size ): torch._dynamo.utils.clear_compilation_metrics() device = f"cuda:{self.rank}" pg = dist.distributed_c10d._get_default_group() @torch.compile def f(x): y = 2 * x return y.sum() backend = pg._get_backend(torch.device(device)) backend._set_default_timeout(timedelta(seconds=5)) counters.clear() x = torch.ones(4, device=device) f(x) self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 1) self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 0) self.assertEqual(counters["inductor"]["fxgraph_cache_bypass"], 0) w = pg.allreduce(x) w.wait() torch.cuda.synchronize(device) torch._dynamo.reset() if self.rank == 0: with fresh_inductor_cache(): f(x) self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 2) self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 0) self.assertEqual(counters["inductor"]["fxgraph_cache_bypass"], 0) else: f(x) self.assertEqual(counters["inductor"]["fxgraph_cache_miss"], 1) self.assertEqual(counters["inductor"]["fxgraph_cache_hit"], 1) self.assertEqual(counters["inductor"]["fxgraph_cache_bypass"], 0) w = pg.allreduce(x) w.wait() torch.cuda.synchronize(device)
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig @requires_nccl() class TestMultiProc(DynamoDistributedMultiProcTestCase): from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_functional_api.py
test_pg_creation_with_tag
def test_pg_creation_with_tag(self): my_group, _ = new_subgroups(group_size=2, pg_tag="blu") my_group2, _ = new_subgroups(group_size=2, pg_tag="blu") self.assertEqual(my_group, my_group2) my_group3, _ = new_subgroups(group_size=2, pg_tag="blu2") self.assertNotEqual(my_group, my_group3) my_group4, _ = new_subgroups(group_size=2) self.assertNotEqual(my_group, my_group4) my_group5, _ = new_subgroups(group_size=2) self.assertNotEqual(my_group4, my_group5)
import os import sys import unittest from functools import partial, wraps import torch import torch.distributed as dist import torch.distributed._functional_collectives as ft_c import torch.distributed._tensor as dt import torch.distributed.distributed_c10d as c10d from functorch import make_fx from torch._inductor.utils import run_and_get_code from torch.testing import FileCheck from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.testing._internal.common_distributed import ( MultiProcessTestCase, MultiThreadedTestCase, requires_nccl, TEST_SKIPS, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) class TestPgTag(MultiThreadedTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_functional_api.py
test_pg_lookup_roundtrip
def test_pg_lookup_roundtrip(self): pg_tag0, _ = new_subgroups(group_size=2, pg_tag="blu") pg_tag1, _ = new_subgroups(group_size=2, pg_tag="blu2") pg_notag0, _ = new_subgroups(group_size=2) pg_notag1, _ = new_subgroups(group_size=2) def roundtrip(pg): tag, rankset, _ = ft_c._expand_group(pg) return c10d._find_pg_by_ranks_and_tag(tag, rankset) self.assertEqual(pg_tag0, roundtrip(pg_tag0)) self.assertEqual(pg_tag1, roundtrip(pg_tag1)) self.assertEqual(pg_notag0, roundtrip(pg_notag0)) self.assertEqual(pg_notag1, roundtrip(pg_notag1))
import os import sys import unittest from functools import partial, wraps import torch import torch.distributed as dist import torch.distributed._functional_collectives as ft_c import torch.distributed._tensor as dt import torch.distributed.distributed_c10d as c10d from functorch import make_fx from torch._inductor.utils import run_and_get_code from torch.testing import FileCheck from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.testing._internal.common_distributed import ( MultiProcessTestCase, MultiThreadedTestCase, requires_nccl, TEST_SKIPS, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) class TestPgTag(MultiThreadedTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_functional_api.py
roundtrip
def roundtrip(pg): tag, rankset, _ = ft_c._expand_group(pg) return c10d._find_pg_by_ranks_and_tag(tag, rankset) self.assertEqual(pg_tag0, roundtrip(pg_tag0)) self.assertEqual(pg_tag1, roundtrip(pg_tag1)) self.assertEqual(pg_notag0, roundtrip(pg_notag0)) self.assertEqual(pg_notag1, roundtrip(pg_notag1))
import os import sys import unittest from functools import partial, wraps import torch import torch.distributed as dist import torch.distributed._functional_collectives as ft_c import torch.distributed._tensor as dt import torch.distributed.distributed_c10d as c10d from functorch import make_fx from torch._inductor.utils import run_and_get_code from torch.testing import FileCheck from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.testing._internal.common_distributed import ( MultiProcessTestCase, MultiThreadedTestCase, requires_nccl, TEST_SKIPS, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_functional_api.py
test_pg_lookup_with_tag
def test_pg_lookup_with_tag(self): pg_tag0, _ = new_subgroups(group_size=2, pg_tag="blu") pg_tag1, _ = new_subgroups(group_size=2, pg_tag="bla") pg_notag0, _ = new_subgroups(group_size=2) def roundtrip(pg, pg_tag): tag, rankset, _ = ft_c._expand_group(pg, pg_tag) return c10d._find_pg_by_ranks_and_tag(tag, rankset) self.assertEqual(pg_tag0, roundtrip(pg_tag1, "blu")) self.assertEqual(pg_tag0, roundtrip(pg_notag0, "blu")) # Cannot erase the tag of a PG self.assertEqual(pg_tag0, roundtrip(pg_tag0, ""))
import os import sys import unittest from functools import partial, wraps import torch import torch.distributed as dist import torch.distributed._functional_collectives as ft_c import torch.distributed._tensor as dt import torch.distributed.distributed_c10d as c10d from functorch import make_fx from torch._inductor.utils import run_and_get_code from torch.testing import FileCheck from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.testing._internal.common_distributed import ( MultiProcessTestCase, MultiThreadedTestCase, requires_nccl, TEST_SKIPS, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) class TestPgTag(MultiThreadedTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_functional_api.py
roundtrip
def roundtrip(pg): tag, rankset, _ = ft_c._expand_group(pg) return c10d._find_pg_by_ranks_and_tag(tag, rankset) self.assertEqual(pg_tag0, roundtrip(pg_tag0)) self.assertEqual(pg_tag1, roundtrip(pg_tag1)) self.assertEqual(pg_notag0, roundtrip(pg_notag0)) self.assertEqual(pg_notag1, roundtrip(pg_notag1))
import os import sys import unittest from functools import partial, wraps import torch import torch.distributed as dist import torch.distributed._functional_collectives as ft_c import torch.distributed._tensor as dt import torch.distributed.distributed_c10d as c10d from functorch import make_fx from torch._inductor.utils import run_and_get_code from torch.testing import FileCheck from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.testing._internal.common_distributed import ( MultiProcessTestCase, MultiThreadedTestCase, requires_nccl, TEST_SKIPS, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, )
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_functional_api.py
test_find_or_create_pg
def test_find_or_create_pg(self): pg = c10d._find_or_create_pg_by_ranks_and_tag("blu", [0, 1, 2, 3], 2) pg_tag0, _ = new_subgroups(group_size=2, pg_tag="blu") self.assertEqual(pg, pg_tag0)
import os import sys import unittest from functools import partial, wraps import torch import torch.distributed as dist import torch.distributed._functional_collectives as ft_c import torch.distributed._tensor as dt import torch.distributed.distributed_c10d as c10d from functorch import make_fx from torch._inductor.utils import run_and_get_code from torch.testing import FileCheck from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.testing._internal.common_distributed import ( MultiProcessTestCase, MultiThreadedTestCase, requires_nccl, TEST_SKIPS, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) class TestPgTag(MultiThreadedTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_functional_api.py
test_find_root_pg
def test_find_root_pg(self): pg = c10d._find_pg_by_ranks_and_tag("", [0, 1, 2, 3]) self.assertEqual(dist.group.WORLD, pg)
import os import sys import unittest from functools import partial, wraps import torch import torch.distributed as dist import torch.distributed._functional_collectives as ft_c import torch.distributed._tensor as dt import torch.distributed.distributed_c10d as c10d from functorch import make_fx from torch._inductor.utils import run_and_get_code from torch.testing import FileCheck from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.testing._internal.common_distributed import ( MultiProcessTestCase, MultiThreadedTestCase, requires_nccl, TEST_SKIPS, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) class TestPgTag(MultiThreadedTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_functional_api.py
world_size
def world_size(self): return 4
import os import sys import unittest from functools import partial, wraps import torch import torch.distributed as dist import torch.distributed._functional_collectives as ft_c import torch.distributed._tensor as dt import torch.distributed.distributed_c10d as c10d from functorch import make_fx from torch._inductor.utils import run_and_get_code from torch.testing import FileCheck from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.testing._internal.common_distributed import ( MultiProcessTestCase, MultiThreadedTestCase, requires_nccl, TEST_SKIPS, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) class TestExpand(MultiThreadedTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_functional_api.py
setUp
def setUp(self): super().setUp() self._spawn_threads()
import os import sys import unittest from functools import partial, wraps import torch import torch.distributed as dist import torch.distributed._functional_collectives as ft_c import torch.distributed._tensor as dt import torch.distributed.distributed_c10d as c10d from functorch import make_fx from torch._inductor.utils import run_and_get_code from torch.testing import FileCheck from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.testing._internal.common_distributed import ( MultiProcessTestCase, MultiThreadedTestCase, requires_nccl, TEST_SKIPS, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) class TestExpand(MultiThreadedTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_fake_pg.py
test_fake_pg_tracing
def test_fake_pg_tracing(self): store = dist.HashStore() dist.init_process_group(backend="fake", rank=0, world_size=2, store=store) default_pg = dist.distributed_c10d._get_default_group() def allgather_fn(tensor): return funcol.all_gather_tensor(tensor, 0, default_pg) gm = make_fx(allgather_fn)(torch.randn(2, 2, device="cuda")) FileCheck().check("all_gather").check("wait_tensor").run(str(gm.graph))
import sys import unittest import torch import torch.distributed as dist import torch.distributed._functional_collectives as funcol import torch.nn as nn from torch.distributed._tensor import DeviceMesh, init_device_mesh, Shard from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, RowwiseParallel, ) from torch.fx.experimental.proxy_tensor import make_fx from torch.testing import FileCheck from torch.testing._internal.common_utils import run_tests, TestCase from torch.testing._internal.distributed._tensor.common_dtensor import MLPModule from torch.testing._internal.distributed.fake_pg import FakeStore HAS_CUDA = torch.cuda.is_available() class TestFakePG(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_fake_pg.py
allgather_fn
def allgather_fn(tensor): return funcol.all_gather_tensor(tensor, 0, default_pg) gm = make_fx(allgather_fn)(torch.randn(2, 2, device="cuda")) FileCheck().check("all_gather").check("wait_tensor").run(str(gm.graph))
import sys import unittest import torch import torch.distributed as dist import torch.distributed._functional_collectives as funcol import torch.nn as nn from torch.distributed._tensor import DeviceMesh, init_device_mesh, Shard from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, RowwiseParallel, ) from torch.fx.experimental.proxy_tensor import make_fx from torch.testing import FileCheck from torch.testing._internal.common_utils import run_tests, TestCase from torch.testing._internal.distributed._tensor.common_dtensor import MLPModule from torch.testing._internal.distributed.fake_pg import FakeStore HAS_CUDA = torch.cuda.is_available()
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_fake_pg.py
test_broadcast
def test_broadcast(self): store = FakeStore() dist.init_process_group(backend="fake", rank=0, world_size=2, store=store) # src == rank output = torch.ones(3, 3) dist.broadcast(output, src=0) self.assertEqual(tuple(output.shape), (3, 3)) # src != rank output = torch.ones(3, 3) dist.broadcast(output, src=1) self.assertEqual(tuple(output.shape), (3, 3))
import sys import unittest import torch import torch.distributed as dist import torch.distributed._functional_collectives as funcol import torch.nn as nn from torch.distributed._tensor import DeviceMesh, init_device_mesh, Shard from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, RowwiseParallel, ) from torch.fx.experimental.proxy_tensor import make_fx from torch.testing import FileCheck from torch.testing._internal.common_utils import run_tests, TestCase from torch.testing._internal.distributed._tensor.common_dtensor import MLPModule from torch.testing._internal.distributed.fake_pg import FakeStore HAS_CUDA = torch.cuda.is_available() class TestFakePG(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_fake_pg.py
test_scatter
def test_scatter(self): store = FakeStore() dist.init_process_group(backend="fake", rank=0, world_size=2, store=store) # src == rank output = torch.ones(3, 3) to_scatter = [torch.ones(3, 3) * rank for rank in range(2)] dist.scatter(output, to_scatter) self.assertEqual(tuple(output.shape), (3, 3)) # src != rank output = torch.ones(3, 3) dist.scatter(output, None, src=1) self.assertEqual(tuple(output.shape), (3, 3))
import sys import unittest import torch import torch.distributed as dist import torch.distributed._functional_collectives as funcol import torch.nn as nn from torch.distributed._tensor import DeviceMesh, init_device_mesh, Shard from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, RowwiseParallel, ) from torch.fx.experimental.proxy_tensor import make_fx from torch.testing import FileCheck from torch.testing._internal.common_utils import run_tests, TestCase from torch.testing._internal.distributed._tensor.common_dtensor import MLPModule from torch.testing._internal.distributed.fake_pg import FakeStore HAS_CUDA = torch.cuda.is_available() class TestFakePG(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_fake_pg.py
test_alltoall
def test_alltoall(self): store = FakeStore() dist.init_process_group(backend="fake", rank=0, world_size=2, store=store) output_list = [torch.ones(3, 3) for _ in range(2)] input_list = [torch.ones(3, 3) for _ in range(2)] dist.all_to_all(output_list, input_list) self.assertEqual(len(output_list), 2) for output in output_list: self.assertEqual(tuple(output.shape), (3, 3))
import sys import unittest import torch import torch.distributed as dist import torch.distributed._functional_collectives as funcol import torch.nn as nn from torch.distributed._tensor import DeviceMesh, init_device_mesh, Shard from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, RowwiseParallel, ) from torch.fx.experimental.proxy_tensor import make_fx from torch.testing import FileCheck from torch.testing._internal.common_utils import run_tests, TestCase from torch.testing._internal.distributed._tensor.common_dtensor import MLPModule from torch.testing._internal.distributed.fake_pg import FakeStore HAS_CUDA = torch.cuda.is_available() class TestFakePG(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_fake_pg.py
test_alltoall_base
def test_alltoall_base(self): store = FakeStore() dist.init_process_group(backend="fake", rank=0, world_size=2, store=store) out_tensor = torch.ones(3, 3) in_tensor = torch.ones(3, 3) output_split = [1, 1] input_split = [1, 1] dist.all_to_all_single(out_tensor, in_tensor, output_split, input_split) self.assertEqual(tuple(out_tensor.shape), (3, 3))
import sys import unittest import torch import torch.distributed as dist import torch.distributed._functional_collectives as funcol import torch.nn as nn from torch.distributed._tensor import DeviceMesh, init_device_mesh, Shard from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, RowwiseParallel, ) from torch.fx.experimental.proxy_tensor import make_fx from torch.testing import FileCheck from torch.testing._internal.common_utils import run_tests, TestCase from torch.testing._internal.distributed._tensor.common_dtensor import MLPModule from torch.testing._internal.distributed.fake_pg import FakeStore HAS_CUDA = torch.cuda.is_available() class TestFakePG(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_fake_pg.py
test_send
def test_send(self): store = FakeStore() dist.init_process_group(backend="fake", rank=0, world_size=2, store=store) tensor = torch.ones(3, 3) dist.send(tensor, 1) self.assertEqual(tuple(tensor.shape), (3, 3))
import sys import unittest import torch import torch.distributed as dist import torch.distributed._functional_collectives as funcol import torch.nn as nn from torch.distributed._tensor import DeviceMesh, init_device_mesh, Shard from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, RowwiseParallel, ) from torch.fx.experimental.proxy_tensor import make_fx from torch.testing import FileCheck from torch.testing._internal.common_utils import run_tests, TestCase from torch.testing._internal.distributed._tensor.common_dtensor import MLPModule from torch.testing._internal.distributed.fake_pg import FakeStore HAS_CUDA = torch.cuda.is_available() class TestFakePG(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_fake_pg.py
test_fsdp_tp_fake_e2e
def test_fsdp_tp_fake_e2e(self): world_size = 4 tp_size = 2 store = dist.HashStore() dist.init_process_group( backend="fake", rank=0, world_size=world_size, store=store ) device_mesh = DeviceMesh("cuda", torch.arange(0, world_size).view(-1, tp_size)) device_mesh = init_device_mesh( "cuda", (world_size // tp_size, tp_size), mesh_dim_names=["dp", "tp"] ) sequence_parallelize_plan = { "net1": ColwiseParallel(input_layouts=Shard(0)), "net2": RowwiseParallel(output_layouts=Shard(0)), } pairwise_parallelize_plan = { "net1": ColwiseParallel(), "net2": RowwiseParallel(), } for parallel_plan in [sequence_parallelize_plan, pairwise_parallelize_plan]: my_module = parallelize_module( MLPModule(device="cuda"), device_mesh["tp"], parallel_plan, ) sharded_module = FSDP( my_module, use_orig_params=True, device_mesh=device_mesh["dp"] ) optim = torch.optim.Adam(sharded_module.parameters(), lr=0.0001) for i in range(10): dp_rank = dist.get_rank() torch.manual_seed(i + dp_rank) input = torch.randn(20, 10).cuda(dist.get_rank()) x = sharded_module(input) loss = x.sum() loss.backward() optim.step()
import sys import unittest import torch import torch.distributed as dist import torch.distributed._functional_collectives as funcol import torch.nn as nn from torch.distributed._tensor import DeviceMesh, init_device_mesh, Shard from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, RowwiseParallel, ) from torch.fx.experimental.proxy_tensor import make_fx from torch.testing import FileCheck from torch.testing._internal.common_utils import run_tests, TestCase from torch.testing._internal.distributed._tensor.common_dtensor import MLPModule from torch.testing._internal.distributed.fake_pg import FakeStore HAS_CUDA = torch.cuda.is_available() class TestFakePG(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_dynamo_distributed.py
apply_fsdp
def apply_fsdp(model, wrap_policy): model = FSDP( copy.deepcopy(model), auto_wrap_policy=wrap_policy, use_orig_params=True ) return model with _dynamo_dist_per_rank_init(self.rank, self.world_size): for (wrap_policy, test_instance) in ( ( None, "FSDP without recursive wrapping" ), ( functools.partial( transformer_auto_wrap_policy, transformer_layer_cls=(BertLayer, ) ), "FSDP with recursive wrapping BertLayer instances" ) ): print(f"Running hf_bert test for {test_instance}") model, inputs = get_hf_bert(self.rank) reset_rng_state() eager_model = apply_fsdp(model, wrap_policy) correct_outputs = eager_model(**inputs) correct_loss = correct_outputs.loss correct_loss.backward() reset_rng_state() opt_model = apply_fsdp(model, wrap_policy) opt_model = torch._dynamo.optimize("inductor")(opt_model) opt_outputs = opt_model(**inputs) opt_loss = opt_outputs.loss opt_loss.backward() inputs_flat = [inputs[k] for k in inputs] correct_results = collect_results(eager_model, correct_outputs.logits, correct_loss, inputs_flat) opt_results = collect_results(opt_model, opt_outputs.logits, opt_loss, inputs_flat) self.assertTrue(same(correct_results, opt_results))
def apply_fsdp(model, wrap_policy): model = FSDP( copy.deepcopy(model), auto_wrap_policy=wrap_policy, use_orig_params=True ) return model with _dynamo_dist_per_rank_init(self.rank, self.world_size): for wrap_policy, test_instance in ( (None, "FSDP without recursive wrapping"), ): print(f"Running hf_bert test for {test_instance}") model, inputs = get_hf_bert(self.rank) reset_rng_state() eager_model = apply_fsdp(model, wrap_policy) correct_outputs = eager_model(**inputs) correct_loss = correct_outputs.loss correct_loss.backward() reset_rng_state() opt_model = apply_fsdp(model, wrap_policy) opt_model = torch._dynamo.optimize("inductor")(opt_model) opt_outputs = opt_model(**inputs) opt_loss = opt_outputs.loss opt_loss.backward() inputs_flat = [inputs[k] for k in inputs] correct_results = collect_results( eager_model, correct_outputs.logits, correct_loss, inputs_flat ) opt_results = collect_results( opt_model, opt_outputs.logits, opt_loss, inputs_flat ) self.assertTrue(same(correct_results, opt_results))
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_dynamo_distributed.py
__init__
def __init__(self, in_feat=10, hidden_feat=5000, out_feat=5): super().__init__() self.net = nn.Sequential( *[nn.Linear(in_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, out_feat), nn.ReLU()] )
def __init__(self, in_feat=10, hidden_feat=5000, out_feat=5, ctx_manager=None): super().__init__() self.ctx_manager = ctx_manager self.net = nn.Sequential( *[nn.Linear(in_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, out_feat), nn.ReLU()] )
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging class ToyModel(nn.Module): from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton class ToyModel(nn.Module): from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_dynamo_distributed.py
forward
def forward(self, inputs): return self.net(inputs)
def forward(self, inputs): if self.ctx_manager is not None: with self.ctx_manager(): return self.net(inputs) else: return self.net(inputs)
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging class ToyModel(nn.Module): from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton class ToyModel(nn.Module): from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_dynamo_distributed.py
B
def B(s): return [torch.randn(s, 10), torch.randint(0, 2, (s,))] if self.rank == 0: dataloader = [B(5), B(8), B(6)] else: dataloader = [B(6), B(6), B(3)] for data, labels in dataloader: data, labels = data.to(self.rank), labels.to(self.rank) optimizer.zero_grad() output = ddp_model(data) loss = loss_fn(output, labels) loss.backward() optimizer.step() metrics = torch._dynamo.utils.get_compilation_metrics() # Number of compiles same on all nodes res = [None] * self.world_size torch.distributed.all_gather_object(res, len(metrics)) for r in res[1:]: self.assertEqual(res[0], r)
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_dynamo_distributed.py
f
def f(x, y): return x + torch.ones(y, device=device).sum() if self.rank == 0: dataloader = [3, 3, 7] else: dataloader = [3, 4, 9] for data in dataloader: f(torch.randn(5, device=self.rank), data) metrics = torch._dynamo.utils.get_compilation_metrics() # Number of compiles same on all nodes res = [None] * self.world_size torch.distributed.all_gather_object(res, len(metrics)) for r in res[1:]: self.assertEqual(res[0], r)
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_dynamo_distributed.py
test_fsdp_dup_tensors_diff_source
def test_fsdp_dup_tensors_diff_source(self): """ Tests that FSDP-managed modules' parameters and buffers with different source do not result in incorrect AOTAutograd de-dup guards like ``a is b``, where ``a`` and ``b`` are certainly not the same. We check this by checking for per-invocation recompiles. """ class BufModule(nn.Module): def __init__(self) -> None: super().__init__() self._buf = nn.Buffer( torch.randn((3,), requires_grad=False, device="cuda") ) def forward(self, x: torch.Tensor) -> torch.Tensor: return x + self._buf class Model(nn.Module): def __init__(self) -> None: super().__init__() self._param = nn.Parameter(torch.randn((1,), device="cuda")) self._buf_module = BufModule() # Share the buffer, meaning same tensor but different source self._buf = self._buf_module._buf def forward(self, x: torch.Tensor) -> torch.Tensor: # Use the same buffer tensor twice in the compiled forward, # including a data mutation to trigger de-dup logic self._buf.mul_(2) z = x + self._buf z = self._buf_module(z) z += self._param return z fsdp_model = FSDP(Model(), use_orig_params=True) cnt = torch._dynamo.testing.CompileCounterWithBackend("aot_eager") fsdp_model = torch._dynamo.optimize(cnt)(fsdp_model) inp = torch.randn((2, 3), device="cuda") for _ in range(15): fsdp_model(inp) # Check for no recompiles (if there were incorrect de-dup guards, then # the frame count would be equal to the number of forward calls) self.assertEqual(cnt.frame_count, 1)
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache @requires_nccl() @requires_cuda class TestSingleProc(DynamoDistributedSingleProcTestCase): from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_dynamo_distributed.py
test_fsdp_staticmethod
def test_fsdp_staticmethod(self): """ Tests that Dynamo compiles staticmethods for FSDP-managed modules correctly both when the staticmethod is invoked from the class and from the object itself. """ class ModuleWithStaticMethod(nn.Module): def __init__(self, use_self: bool): super().__init__() self._use_self = use_self torch.manual_seed(42) # force `_param` to be deterministic self._param = nn.Parameter(torch.randn((3,), device="cuda")) def forward(self, x: torch.Tensor) -> torch.Tensor: if self._use_self: z = self._add(x, self._param) else: z = ModuleWithStaticMethod._add(x, self._param) z *= 2 return z @staticmethod def _add(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: return x + y model = ModuleWithStaticMethod(False) x = torch.randn((2, 3), device="cuda") ref_out = model(x) test_outs: List[torch.Tensor] = [] for use_self in (False, True): model = ModuleWithStaticMethod(use_self) fsdp_model = FSDP(model, use_orig_params=True) cnt = torch._dynamo.testing.CompileCounterWithBackend("aot_eager") fsdp_model = torch._dynamo.optimize(cnt)(fsdp_model) test_outs.append(fsdp_model(x)) # Check for no recompiles, which could happen if incorrectly # passing args to the staticmethod (e.g. doubly passing `self`) # 3 is expected here for 1 forward. # Graph 1 should be add and imul self.assertEqual(cnt.frame_count, 1) for test_out in test_outs: self.assertEqual(test_out, ref_out)
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache @requires_nccl() @requires_cuda class TestSingleProc(DynamoDistributedSingleProcTestCase): from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_dynamo_distributed.py
__init__
def __init__(self, in_feat=10, hidden_feat=5000, out_feat=5): super().__init__() self.net = nn.Sequential( *[nn.Linear(in_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, out_feat), nn.ReLU()] )
def __init__(self, in_feat=10, hidden_feat=5000, out_feat=5, ctx_manager=None): super().__init__() self.ctx_manager = ctx_manager self.net = nn.Sequential( *[nn.Linear(in_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, out_feat), nn.ReLU()] )
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging class ToyModel(nn.Module): from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton class ToyModel(nn.Module): from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_dynamo_distributed.py
test_async_subclass_no_specialize
def test_async_subclass_no_specialize(self): cnt = torch._dynamo.testing.CompileCounterWithBackend("eager") @torch.compile(backend=cnt, fullgraph=True, dynamic=True) def f(x): return x + 1 f(_maybe_wrap_tensor(torch.randn(10))) f(_maybe_wrap_tensor(torch.randn(12))) self.assertEqual(cnt.frame_count, 1)
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache @requires_nccl() @requires_cuda class TestSingleProc(DynamoDistributedSingleProcTestCase): from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_dynamo_distributed.py
f
if __name__ == "__main__": from torch._dynamo.test_case import run_tests run_tests()
def f(x, y): return x + torch.ones(y, device=device).sum() if self.rank == 0: dataloader = [3, 3, 7] else: dataloader = [3, 4, 9] for data in dataloader: f(torch.randn(5, device=self.rank), data) metrics = torch._dynamo.utils.get_compilation_metrics() # Number of compiles same on all nodes res = [None] * self.world_size torch.distributed.all_gather_object(res, len(metrics)) for r in res[1:]: self.assertEqual(res[0], r)
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_fake_pg.py
tearDown
def tearDown(self): super().tearDown() dist.destroy_process_group()
import sys import unittest import torch import torch.distributed as dist import torch.distributed._functional_collectives as funcol import torch.nn as nn from torch.distributed._tensor import DeviceMesh, init_device_mesh, Shard from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, RowwiseParallel, ) from torch.fx.experimental.proxy_tensor import make_fx from torch.testing import FileCheck from torch.testing._internal.common_utils import run_tests, TestCase from torch.testing._internal.distributed._tensor.common_dtensor import MLPModule from torch.testing._internal.distributed.fake_pg import FakeStore HAS_CUDA = torch.cuda.is_available() class TestFakePG(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_fake_pg.py
test_all_reduce
def test_all_reduce(self): store = FakeStore() dist.init_process_group(backend="fake", rank=1, world_size=2, store=store) output = torch.ones(3, 3) * dist.get_rank() dist.all_reduce(output) self.assertEqual(tuple(output.shape), (3, 3))
import sys import unittest import torch import torch.distributed as dist import torch.distributed._functional_collectives as funcol import torch.nn as nn from torch.distributed._tensor import DeviceMesh, init_device_mesh, Shard from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, RowwiseParallel, ) from torch.fx.experimental.proxy_tensor import make_fx from torch.testing import FileCheck from torch.testing._internal.common_utils import run_tests, TestCase from torch.testing._internal.distributed._tensor.common_dtensor import MLPModule from torch.testing._internal.distributed.fake_pg import FakeStore HAS_CUDA = torch.cuda.is_available() class TestFakePG(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_fake_pg.py
test_allgather
def test_allgather(self): store = FakeStore() dist.init_process_group(backend="fake", rank=1, world_size=2, store=store) input_tensor = torch.ones(3, 3) * dist.get_rank() output_tensors = [torch.empty_like(input_tensor) for _ in range(2)] dist.all_gather(output_tensors, input_tensor) for _, out_tensor in enumerate(output_tensors): self.assertEqual(tuple(out_tensor.shape), (3, 3))
import sys import unittest import torch import torch.distributed as dist import torch.distributed._functional_collectives as funcol import torch.nn as nn from torch.distributed._tensor import DeviceMesh, init_device_mesh, Shard from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, RowwiseParallel, ) from torch.fx.experimental.proxy_tensor import make_fx from torch.testing import FileCheck from torch.testing._internal.common_utils import run_tests, TestCase from torch.testing._internal.distributed._tensor.common_dtensor import MLPModule from torch.testing._internal.distributed.fake_pg import FakeStore HAS_CUDA = torch.cuda.is_available() class TestFakePG(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_dynamo_distributed.py
test_fsdp_skip_guards
def test_fsdp_skip_guards(self): """ It's currently difficult to test dynamo guards. Most guards tests are indirect- modify something and observe that the guard in question failed. In this case, since the FSDP guards were already deemed useless and skipping them is expected to have no practical effect, it's pretty contrived to even try to make those guards fail. Instead, we observe the 'guard source' printed by dynamo's comptime print_guards function. Note: comptime prints the guards before the time they get installed or not installed, so in both cases (skip or no skip) the same guards get printed. The difference is that in the skip case, they show up with a special 'guard source' which will cuase them to not be installed. So all we check for is the expected guard source 'local_fsdp_module'. """ global GUARDS_FILE GUARDS_FILE = StringIO() for skip_guards, expected_guard_source in ( (True, "local_fsdp_module"), (False, "local_unspecialized_nn_module"), ): torch._dynamo.reset() class ToyModel(nn.Module): def __init__(self, in_feat=10, hidden_feat=5000, out_feat=5): super().__init__() self.net = nn.Sequential( *[nn.Linear(in_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, out_feat), nn.ReLU()] ) def forward(self, inputs): out = self.net(inputs) @comptime def _(ctx): ctx.print_guards(file=GUARDS_FILE) return out device = f"cuda:{self.rank}" m = ToyModel( in_feat=10, hidden_feat=5000, out_feat=5, ).to(device) inputs = torch.rand(20, 10).to(device) m.apply(init_weights) correct_outputs = m(inputs) fsdp_m = FSDP(m, use_orig_params=True) with torch._dynamo.config.patch(skip_fsdp_guards=skip_guards): opt_m = torch._dynamo.optimize("aot_eager")(fsdp_m) outputs = opt_m(inputs) # far from an exhaustive check of all the expected guards, just check a couple of them. FileCheck().check("""local "L['self']" TYPE_MATCH""").check( f"""{expected_guard_source} "L['self']._modules['net']" TYPE_MATCH""" ).check( f"""{expected_guard_source} "L['self']._modules['net']._modules['0']" TYPE_MATCH""" ).run( GUARDS_FILE.getvalue() ) self.assertTrue(same(correct_outputs, outputs))
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache @requires_nccl() @requires_cuda class TestSingleProc(DynamoDistributedSingleProcTestCase): from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_dynamo_distributed.py
__init__
def __init__(self, in_feat=10, hidden_feat=5000, out_feat=5): super().__init__() self.net = nn.Sequential( *[nn.Linear(in_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, out_feat), nn.ReLU()] )
def __init__(self, in_feat=10, hidden_feat=5000, out_feat=5, ctx_manager=None): super().__init__() self.ctx_manager = ctx_manager self.net = nn.Sequential( *[nn.Linear(in_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, out_feat), nn.ReLU()] )
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging class ToyModel(nn.Module): from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton class ToyModel(nn.Module): from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_dynamo_distributed.py
forward
def forward(self, inputs): return self.net(inputs)
def forward(self, inputs): if self.ctx_manager is not None: with self.ctx_manager(): return self.net(inputs) else: return self.net(inputs)
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging class ToyModel(nn.Module): from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton class ToyModel(nn.Module): from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_dynamo_distributed.py
_
def _(ctx): ctx.print_guards(file=GUARDS_FILE) return out
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_dynamo_distributed.py
test_fsdp_skip_register_attr_or_module
def test_fsdp_skip_register_attr_or_module(self): """ ensure FSDP module is not registered as attrbutes in the fx graph see `not source.guard_source().is_fsdp_module()` before calling `register_attr_or_module` in variables/builder.py """ class ToyModel(nn.Module): def __init__(self, in_feat=10, hidden_feat=5000, out_feat=5): super().__init__() self.net = nn.Sequential( *[nn.Linear(in_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] ) def forward(self, inputs): out = self.net(inputs) return out torch._dynamo.reset() device = f"cuda:{self.rank}" m = ToyModel( in_feat=10, hidden_feat=5000, out_feat=5, ).to(device) inputs = torch.rand(20, 10).to(device) m.apply(init_weights) correct_outputs = m(inputs) fsdp_m = FSDP(m, use_orig_params=True) def debug_compiler(gm, _): for node in gm.graph.nodes: if node.op == "get_attr": for name in [ "l__self___net_0_weight", "l__self___net_0_bias", "l__self___net_2_weight", "l__self___net_2_bias", ]: self.assertFalse( name in node.name, f"FSDP module {name} should not be registered as attributes", ) return gm opt_m = torch._dynamo.optimize(backend=debug_compiler)(fsdp_m) outputs = opt_m(inputs) self.assertTrue(same(correct_outputs, outputs))
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache @requires_nccl() @requires_cuda class TestSingleProc(DynamoDistributedSingleProcTestCase): from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_dynamo_distributed.py
__init__
def __init__(self, in_feat=10, hidden_feat=5000, out_feat=5): super().__init__() self.net = nn.Sequential( *[nn.Linear(in_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, out_feat), nn.ReLU()] )
def __init__(self, in_feat=10, hidden_feat=5000, out_feat=5, ctx_manager=None): super().__init__() self.ctx_manager = ctx_manager self.net = nn.Sequential( *[nn.Linear(in_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, out_feat), nn.ReLU()] )
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging class ToyModel(nn.Module): from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton class ToyModel(nn.Module): from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_dynamo_distributed.py
forward
def forward(self, inputs): return self.net(inputs)
def forward(self, inputs): if self.ctx_manager is not None: with self.ctx_manager(): return self.net(inputs) else: return self.net(inputs)
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging class ToyModel(nn.Module): from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton class ToyModel(nn.Module): from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_dynamo_distributed.py
debug_compiler
def debug_compiler(gm, _): for node in gm.graph.nodes: if node.op == "get_attr": for name in [ "l__self___net_0_weight", "l__self___net_0_bias", "l__self___net_2_weight", "l__self___net_2_bias", ]: self.assertFalse( name in node.name, f"FSDP module {name} should not be registered as attributes", ) return gm opt_m = torch._dynamo.optimize(backend=debug_compiler)(fsdp_m) outputs = opt_m(inputs) self.assertTrue(same(correct_outputs, outputs))
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_functional_api.py
test_reduce_scatter_into_tensor_coalesced
def test_reduce_scatter_into_tensor_coalesced(self, device): if device == "cuda": if torch.cuda.device_count() < self.world_size: self.skipTest("Not enough CUDA devices") torch.cuda.set_device(dist.get_rank()) tensors = [ torch.ones([4], dtype=torch.int64, device=device), torch.ones([4], dtype=torch.int64, device=device) + 1, ] mesh = dt.DeviceMesh(device, torch.arange(4)) res = ft_c.reduce_scatter_tensor_coalesced(tensors, "sum", [0, 0], mesh) self.assertEqual(2, len(res)) self.assertEqual(torch.tensor([4], device=device), res[0]) self.assertEqual(torch.tensor([8], device=device), res[1])
import os import sys import unittest from functools import partial, wraps import torch import torch.distributed as dist import torch.distributed._functional_collectives as ft_c import torch.distributed._tensor as dt import torch.distributed.distributed_c10d as c10d from functorch import make_fx from torch._inductor.utils import run_and_get_code from torch.testing import FileCheck from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.testing._internal.common_distributed import ( MultiProcessTestCase, MultiThreadedTestCase, requires_nccl, TEST_SKIPS, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) @instantiate_parametrized_tests class TestTraceableCollectives(MultiThreadedTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_functional_api.py
test_all_reduce
def test_all_reduce(self): x = torch.rand((2, 3, 4), device="meta") out = ft_c.all_reduce(x, "sum", "0") self.assertEqual(x.size(), out.size())
import os import sys import unittest from functools import partial, wraps import torch import torch.distributed as dist import torch.distributed._functional_collectives as ft_c import torch.distributed._tensor as dt import torch.distributed.distributed_c10d as c10d from functorch import make_fx from torch._inductor.utils import run_and_get_code from torch.testing import FileCheck from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.testing._internal.common_distributed import ( MultiProcessTestCase, MultiThreadedTestCase, requires_nccl, TEST_SKIPS, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) class TestMetaCollectives(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_functional_api.py
world_size
def world_size(self): return 4
import os import sys import unittest from functools import partial, wraps import torch import torch.distributed as dist import torch.distributed._functional_collectives as ft_c import torch.distributed._tensor as dt import torch.distributed.distributed_c10d as c10d from functorch import make_fx from torch._inductor.utils import run_and_get_code from torch.testing import FileCheck from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.testing._internal.common_distributed import ( MultiProcessTestCase, MultiThreadedTestCase, requires_nccl, TEST_SKIPS, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) class TestExpand(MultiThreadedTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_functional_api.py
setUp
def setUp(self): super().setUp() self._spawn_threads()
import os import sys import unittest from functools import partial, wraps import torch import torch.distributed as dist import torch.distributed._functional_collectives as ft_c import torch.distributed._tensor as dt import torch.distributed.distributed_c10d as c10d from functorch import make_fx from torch._inductor.utils import run_and_get_code from torch.testing import FileCheck from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.testing._internal.common_distributed import ( MultiProcessTestCase, MultiThreadedTestCase, requires_nccl, TEST_SKIPS, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) class TestExpand(MultiThreadedTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_functional_api.py
test_all_reduce
def test_all_reduce(self): x = torch.rand((2, 3, 4), device="meta") out = ft_c.all_reduce(x, "sum", "0") self.assertEqual(x.size(), out.size())
import os import sys import unittest from functools import partial, wraps import torch import torch.distributed as dist import torch.distributed._functional_collectives as ft_c import torch.distributed._tensor as dt import torch.distributed.distributed_c10d as c10d from functorch import make_fx from torch._inductor.utils import run_and_get_code from torch.testing import FileCheck from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.testing._internal.common_distributed import ( MultiProcessTestCase, MultiThreadedTestCase, requires_nccl, TEST_SKIPS, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) class TestMetaCollectives(TestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_dynamo_distributed.py
forward
def forward(self, inputs): return self.net(inputs)
def forward(self, inputs): if self.ctx_manager is not None: with self.ctx_manager(): return self.net(inputs) else: return self.net(inputs)
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging class ToyModel(nn.Module): from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton class ToyModel(nn.Module): from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_dynamo_distributed.py
get_model
def get_model(device, bsz=20, in_feat=10, hidden_feat=5000, out_feat=5): m = ToyModel(in_feat=in_feat, hidden_feat=hidden_feat, out_feat=out_feat).to(device) m.apply(init_weights) inputs = torch.rand(bsz, in_feat).to(device) outputs = m(inputs) return m, inputs, outputs
def get_model( device, bsz=20, in_feat=10, hidden_feat=5000, out_feat=5, ctx_manager=None ): m = ToyModel( in_feat=in_feat, hidden_feat=hidden_feat, out_feat=out_feat, ctx_manager=ctx_manager, ).to(device) m.apply(init_weights) inputs = torch.rand(bsz, in_feat).to(device) outputs = m(inputs) return m, inputs, outputs class MutatingModel(nn.Module): def __init__(self, in_feat=10, hidden_feat=5000, out_feat=5, ctx_manager=None): super().__init__() self.ctx_manager = ctx_manager self.net = nn.Sequential( *[nn.Linear(in_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, hidden_feat), nn.ReLU()] + [nn.Linear(hidden_feat, out_feat), nn.ReLU()] ) self.state = 1 def forward(self, inputs): self.state = 2 return self.net(inputs) * self.state
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_dynamo_distributed.py
opt_fn
def opt_fn(inputs): return ddp_m(inputs) opt_outputs = opt_fn(inputs) self.assertTrue(same(correct_outputs, opt_outputs)) self.assertEqual(check_splits_compiler.compiler_called, 3) # ensure compatibilty with dynamo explain explain_out = torch._dynamo.explain(ddp_m, inputs) break_reasons = explain_out[4] self.assertEqual(len(break_reasons), 3) self.assertTrue(all(["DDPOptimizer" in r.reason for r in break_reasons]))
def opt_fn(inputs): return ddp_m(inputs) opt_outputs = opt_fn(inputs) self.assertTrue(same(correct_outputs, opt_outputs)) self.assertEqual(check_splits_compiler.compiler_called, 3) # ensure compatibility with dynamo explain explain_out = torch._dynamo.explain(ddp_m)(inputs) break_reasons = explain_out.break_reasons self.assertEqual(len(break_reasons), 3) self.assertTrue(all("DDPOptimizer" in r.reason for r in break_reasons))
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_dynamo_distributed.py
forward
def forward(self, inputs): return self.net(inputs)
def forward(self, inputs): if self.ctx_manager is not None: with self.ctx_manager(): return self.net(inputs) else: return self.net(inputs)
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging class ToyModel(nn.Module): from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton class ToyModel(nn.Module): from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_dynamo_distributed.py
opt_fn
def opt_fn(inputs): return ddp_m(inputs) opt_outputs = opt_fn(inputs) self.assertTrue(same(correct_outputs, opt_outputs)) self.assertEqual(check_splits_compiler.compiler_called, 3) # ensure compatibilty with dynamo explain explain_out = torch._dynamo.explain(ddp_m, inputs) break_reasons = explain_out[4] self.assertEqual(len(break_reasons), 3) self.assertTrue(all(["DDPOptimizer" in r.reason for r in break_reasons]))
def opt_fn(inputs): return ddp_m(inputs) opt_outputs = opt_fn(inputs) self.assertTrue(same(correct_outputs, opt_outputs)) self.assertEqual(check_splits_compiler.compiler_called, 3) # ensure compatibility with dynamo explain explain_out = torch._dynamo.explain(ddp_m)(inputs) break_reasons = explain_out.break_reasons self.assertEqual(len(break_reasons), 3) self.assertTrue(all("DDPOptimizer" in r.reason for r in break_reasons))
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_dynamo_distributed.py
opt_fn
def opt_fn(inputs): return ddp_m(inputs) opt_outputs = opt_fn(inputs) self.assertTrue(same(correct_outputs, opt_outputs)) self.assertEqual(check_splits_compiler.compiler_called, 3) # ensure compatibilty with dynamo explain explain_out = torch._dynamo.explain(ddp_m, inputs) break_reasons = explain_out[4] self.assertEqual(len(break_reasons), 3) self.assertTrue(all(["DDPOptimizer" in r.reason for r in break_reasons]))
def opt_fn(inputs): return ddp_m(inputs) opt_outputs = opt_fn(inputs) self.assertTrue(same(correct_outputs, opt_outputs)) self.assertEqual(check_splits_compiler.compiler_called, 3) # ensure compatibility with dynamo explain explain_out = torch._dynamo.explain(ddp_m)(inputs) break_reasons = explain_out.break_reasons self.assertEqual(len(break_reasons), 3) self.assertTrue(all("DDPOptimizer" in r.reason for r in break_reasons))
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_dynamo_distributed.py
opt_fn
def opt_fn(inputs): return ddp_m(inputs) opt_outputs = opt_fn(inputs) self.assertTrue(same(correct_outputs, opt_outputs)) self.assertEqual(check_splits_compiler.compiler_called, 3) # ensure compatibilty with dynamo explain explain_out = torch._dynamo.explain(ddp_m, inputs) break_reasons = explain_out[4] self.assertEqual(len(break_reasons), 3) self.assertTrue(all(["DDPOptimizer" in r.reason for r in break_reasons]))
def opt_fn(inputs): return ddp_m(inputs) opt_outputs = opt_fn(inputs) self.assertTrue(same(correct_outputs, opt_outputs)) self.assertEqual(check_splits_compiler.compiler_called, 3) # ensure compatibility with dynamo explain explain_out = torch._dynamo.explain(ddp_m)(inputs) break_reasons = explain_out.break_reasons self.assertEqual(len(break_reasons), 3) self.assertTrue(all("DDPOptimizer" in r.reason for r in break_reasons))
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_dynamo_distributed.py
opt_fn
def opt_fn(inputs): return ddp_m(inputs) opt_outputs = opt_fn(inputs) self.assertTrue(same(correct_outputs, opt_outputs)) self.assertEqual(check_splits_compiler.compiler_called, 3) # ensure compatibilty with dynamo explain explain_out = torch._dynamo.explain(ddp_m, inputs) break_reasons = explain_out[4] self.assertEqual(len(break_reasons), 3) self.assertTrue(all(["DDPOptimizer" in r.reason for r in break_reasons]))
def opt_fn(inputs): return ddp_m(inputs) opt_outputs = opt_fn(inputs) self.assertTrue(same(correct_outputs, opt_outputs)) self.assertEqual(check_splits_compiler.compiler_called, 3) # ensure compatibility with dynamo explain explain_out = torch._dynamo.explain(ddp_m)(inputs) break_reasons = explain_out.break_reasons self.assertEqual(len(break_reasons), 3) self.assertTrue(all("DDPOptimizer" in r.reason for r in break_reasons))
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_dynamo_distributed.py
test_higher_order_op
def test_higher_order_op(self): from torch.utils.checkpoint import checkpoint N = 1000 class InnerModule(torch.nn.Module): def __init__(self) -> None: super().__init__() self.linear1 = torch.nn.Linear(N, N) self.linear2 = torch.nn.Linear(N, N) def forward(self, x): a = self.linear1(x) a = self.linear2(a) return a class MockModule(torch.nn.Module): def __init__(self) -> None: super().__init__() self.inner_mod1 = InnerModule() self.inner_mod2 = InnerModule() def forward(self, x): a = checkpoint(self.inner_mod1, x, use_reentrant=False) a = torch.cos(a) a = checkpoint(self.inner_mod2, a, use_reentrant=False) a = torch.cos(a) return a mod = MockModule().cuda() mod = DDP(mod, bucket_cap_mb=1) x = torch.randn(N, N, device="cuda", requires_grad=True) args = (x,) backend = "aot_eager" cnt = torch._dynamo.testing.CompileCounterWithBackend(backend) with self.assertRaisesRegex( torch._dynamo.exc.BackendCompilerFailed, "DDPOptimizer backend: Found a higher order op in the graph", ): torch.compile(mod, backend=cnt)(*args)
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache @requires_nccl() @requires_cuda class TestSingleProc(DynamoDistributedSingleProcTestCase): from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_dynamo_distributed.py
forward
def forward(self, inputs): return self.net(inputs)
def forward(self, inputs): if self.ctx_manager is not None: with self.ctx_manager(): return self.net(inputs) else: return self.net(inputs)
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging class ToyModel(nn.Module): from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton class ToyModel(nn.Module): from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_dynamo_distributed.py
forward
def forward(self, inputs): return self.net(inputs)
def forward(self, inputs): if self.ctx_manager is not None: with self.ctx_manager(): return self.net(inputs) else: return self.net(inputs)
import copy import functools import random import unittest from unittest.mock import patch import numpy as np import torch import torch._dynamo from torch._dynamo.backends.distributed import DDPOptimizer import torch._dynamo.test_case from contextlib import contextmanager from torch import nn from torch._dynamo import config from torch._dynamo.utils import same from torch._dynamo.testing import collect_results from torch._inductor.utils import has_triton from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.testing._internal.common_distributed import ( DynamoDistributedSingleProcTestCase, DynamoDistributedMultiProcTestCase, import_transformers_or_skip, skip_if_lt_x_gpu, requires_nccl, _dynamo_dist_per_rank_init, ) import torch._dynamo.logging class ToyModel(nn.Module): from transformers import BertConfig, AutoModelForMaskedLM from transformers.models.bert.modeling_bert import BertLayer from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch._dynamo.test_case import run_tests
import contextlib import copy import functools import random import unittest from contextlib import contextmanager from datetime import timedelta from io import StringIO from typing import List from unittest.mock import patch import numpy as np import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed as dist import torch.optim as optim from torch import nn from torch._C import FileCheck from torch._dynamo import config from torch._dynamo.backends.distributed import DDPOptimizer from torch._dynamo.comptime import comptime from torch._dynamo.testing import collect_results from torch._dynamo.utils import same from torch._higher_order_ops.wrap import tag_activation_checkpoint from torch.distributed._functional_collectives import _maybe_wrap_tensor from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.wrap import ( lambda_auto_wrap_policy, transformer_auto_wrap_policy, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, ) from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, import_transformers_or_skip, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import requires_cuda from torch.utils._triton import has_triton class ToyModel(nn.Module): from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from transformers import AutoModelForMaskedLM, BertConfig from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( apply_activation_checkpointing, checkpoint_wrapper, CheckpointImpl, ) from torch._dynamo.utils import counters from transformers.models.bert.modeling_bert import BertLayer from torch._dynamo.comptime import comptime from torch._dynamo.utils import counters from torch._inductor.utils import fresh_inductor_cache from torch.nn.parallel import DistributedDataParallel as DDP from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.checkpoint import checkpoint from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
modified
torch
test/distributed/test_functional_api.py
world_size
def world_size(self): return 4
import os import sys import unittest from functools import partial, wraps import torch import torch.distributed as dist import torch.distributed._functional_collectives as ft_c import torch.distributed._tensor as dt import torch.distributed.distributed_c10d as c10d from functorch import make_fx from torch._inductor.utils import run_and_get_code from torch.testing import FileCheck from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.testing._internal.common_distributed import ( MultiProcessTestCase, MultiThreadedTestCase, requires_nccl, TEST_SKIPS, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) class TestExpand(MultiThreadedTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_functional_api.py
process_group
def process_group(self): return dist.group.WORLD
import os import sys import unittest from functools import partial, wraps import torch import torch.distributed as dist import torch.distributed._functional_collectives as ft_c import torch.distributed._tensor as dt import torch.distributed.distributed_c10d as c10d from functorch import make_fx from torch._inductor.utils import run_and_get_code from torch.testing import FileCheck from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.testing._internal.common_distributed import ( MultiProcessTestCase, MultiThreadedTestCase, requires_nccl, TEST_SKIPS, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) BACKEND = dist.Backend.NCCL if torch.cuda.is_available() else dist.Backend.GLOO WORLD_SIZE = 2 class TestCollectivesWithNCCL(MultiProcessTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_functional_api.py
dist_init
def dist_init(self): dist.init_process_group( backend=BACKEND, world_size=self.world_size, rank=self.rank, init_method=f"file://{self.file_name}", ) # set device for nccl pg for collectives if BACKEND == "nccl": torch.cuda.set_device(self.rank)
import os import sys import unittest from functools import partial, wraps import torch import torch.distributed as dist import torch.distributed._functional_collectives as ft_c import torch.distributed._tensor as dt import torch.distributed.distributed_c10d as c10d from functorch import make_fx from torch._inductor.utils import run_and_get_code from torch.testing import FileCheck from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.testing._internal.common_distributed import ( MultiProcessTestCase, MultiThreadedTestCase, requires_nccl, TEST_SKIPS, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) BACKEND = dist.Backend.NCCL if torch.cuda.is_available() else dist.Backend.GLOO WORLD_SIZE = 2 class TestCollectivesWithNCCL(MultiProcessTestCase):
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_functional_api.py
allreduce
def allreduce(t, pg): return ft_c.all_reduce(t, "sum", pg) compiled_allreduce = torch.compile(allreduce, fullgraph=True) compiled_allreduce(torch.randn(8, device=self.device), self.process_group)
import os import sys import unittest from functools import partial, wraps import torch import torch.distributed as dist import torch.distributed._functional_collectives as ft_c import torch.distributed._tensor as dt import torch.distributed.distributed_c10d as c10d from functorch import make_fx from torch._inductor.utils import run_and_get_code from torch.testing import FileCheck from torch.testing._internal.distributed.fake_pg import FakeStore from torch.utils._triton import has_triton from torch.testing._internal.common_distributed import ( MultiProcessTestCase, MultiThreadedTestCase, requires_nccl, TEST_SKIPS, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, run_tests, TestCase, ) BACKEND = dist.Backend.NCCL if torch.cuda.is_available() else dist.Backend.GLOO WORLD_SIZE = 2
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_inductor_collectives.py
matmul_cat_col
def matmul_cat_col(a, b, c, d, e, f, *, tag, ranks, group_size): x = torch.matmul(a, b) y = torch.matmul(c, d) z = torch.cat((x, y)) ar = torch.ops.c10d_functional.all_reduce(z, "sum", tag, ranks, group_size) g = torch.matmul(e, f) ar = torch.ops.c10d_functional.wait_tensor(ar) out = torch.add(ar, g.repeat(2, 1)) return (out,)
import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed._functional_collectives as _functional_collectives from torch._C import FileCheck from torch._dynamo.testing import CompileCounter from torch._dynamo.utils import same from torch._inductor.compile_fx import compile_fx as inductor_compile_fx from torch._inductor.utils import run_and_get_triton_code from torch.distributed.distributed_c10d import GroupMember from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, requires_cuda, ) from torch.utils._triton import has_triton import torch.distributed as dist from torch.distributed._functional_collectives import REDUCE_OP_TO_STR from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_inductor_collectives.py
compile
def compile(func, example_inputs): graph = make_fx(func)(*example_inputs) return inductor_compile_fx(graph, example_inputs) with _dynamo_dist_per_rank_init(self.rank, self.world_size): example = functools.partial( example, **self.get_world_trs(), ) t = torch.randn(4, 4, device="cuda") inputs = (t if self.rank == 0 else torch.zeros(4, 4, device="cuda"), 0) eager_out = example(*inputs) self.assertTrue(same(t, eager_out)) compiled_func = compile(example, inputs) compiled_out = compiled_func(*inputs) self.assertTrue(same(eager_out, compiled_out))
import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed._functional_collectives as _functional_collectives from torch._C import FileCheck from torch._dynamo.testing import CompileCounter from torch._dynamo.utils import same from torch._inductor.compile_fx import compile_fx as inductor_compile_fx from torch._inductor.utils import run_and_get_triton_code from torch.distributed.distributed_c10d import GroupMember from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, requires_cuda, ) from torch.utils._triton import has_triton import torch.distributed as dist from torch.distributed._functional_collectives import REDUCE_OP_TO_STR from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_inductor_collectives.py
test_allreduce_inductor_cudagraph_trees
def test_allreduce_inductor_cudagraph_trees(self): """ Tests whether cudagraph trees support all_reduce from nccl """ import torch.distributed as dist # dist.all_reduce is an inplace op in eager mode but a functionanlized op in compiled mode. # so we define eager_func and func separately for the same semantic. def eager_func(x): y = x * x dist.all_reduce(y, op=dist.ReduceOp.SUM) x = torch.nn.functional.silu(x) return x * y def func(x): y = x * x y = dist.all_reduce(y, op=dist.ReduceOp.SUM) x = torch.nn.functional.silu(x) return x * y options = { "triton.cudagraphs": True, "triton.cudagraph_trees": True, } with _dynamo_dist_per_rank_init(self.rank, self.world_size): compiled_func = torch.compile( func, backend="inductor", fullgraph=True, options=options, dynamic=None ) for nelem in [1024, 2048, 4096]: x = torch.randn(nelem, device="cuda", dtype=torch.bfloat16) golden_out = eager_func(x) for _ in range(3): compiled_out = compiled_func(x) self.assertEqual(golden_out, compiled_out)
import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed._functional_collectives as _functional_collectives from torch._C import FileCheck from torch._dynamo.testing import CompileCounter from torch._dynamo.utils import same from torch._inductor.compile_fx import compile_fx as inductor_compile_fx from torch._inductor.utils import run_and_get_triton_code from torch.distributed.distributed_c10d import GroupMember from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, requires_cuda, ) from torch.utils._triton import has_triton @requires_nccl() class TestCollectivesMultiProc(DynamoDistributedMultiProcTestCase): import torch.distributed as dist from torch.distributed._functional_collectives import REDUCE_OP_TO_STR from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_inductor_collectives.py
eager_func
def eager_func(x): y = x * x dist.all_reduce(y, op=dist.ReduceOp.SUM) x = torch.nn.functional.silu(x) return x * y
import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed._functional_collectives as _functional_collectives from torch._C import FileCheck from torch._dynamo.testing import CompileCounter from torch._dynamo.utils import same from torch._inductor.compile_fx import compile_fx as inductor_compile_fx from torch._inductor.utils import run_and_get_triton_code from torch.distributed.distributed_c10d import GroupMember from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, requires_cuda, ) from torch.utils._triton import has_triton import torch.distributed as dist from torch.distributed._functional_collectives import REDUCE_OP_TO_STR from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_inductor_collectives.py
func
def func(x): y = x * x y = dist.all_reduce(y, op=dist.ReduceOp.SUM) x = torch.nn.functional.silu(x) return x * y options = { "triton.cudagraphs": True, "triton.cudagraph_trees": True, } with _dynamo_dist_per_rank_init(self.rank, self.world_size): compiled_func = torch.compile( func, backend="inductor", fullgraph=True, options=options, dynamic=None ) for nelem in [1024, 2048, 4096]: x = torch.randn(nelem, device="cuda", dtype=torch.bfloat16) golden_out = eager_func(x) for _ in range(3): compiled_out = compiled_func(x) self.assertEqual(golden_out, compiled_out)
import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed._functional_collectives as _functional_collectives from torch._C import FileCheck from torch._dynamo.testing import CompileCounter from torch._dynamo.utils import same from torch._inductor.compile_fx import compile_fx as inductor_compile_fx from torch._inductor.utils import run_and_get_triton_code from torch.distributed.distributed_c10d import GroupMember from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, requires_cuda, ) from torch.utils._triton import has_triton import torch.distributed as dist from torch.distributed._functional_collectives import REDUCE_OP_TO_STR from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added
torch
test/distributed/test_inductor_collectives.py
eager_func
def eager_func(x): y = x * x dist.all_reduce(y, op=dist.ReduceOp.SUM) x = torch.nn.functional.silu(x) return x * y
import functools import unittest from unittest.mock import patch import torch import torch._dynamo import torch._dynamo.logging import torch._dynamo.test_case import torch.distributed._functional_collectives as _functional_collectives from torch._C import FileCheck from torch._dynamo.testing import CompileCounter from torch._dynamo.utils import same from torch._inductor.compile_fx import compile_fx as inductor_compile_fx from torch._inductor.utils import run_and_get_triton_code from torch.distributed.distributed_c10d import GroupMember from torch.fx.experimental.proxy_tensor import make_fx from torch.testing._internal.common_distributed import ( _dynamo_dist_per_rank_init, DynamoDistributedMultiProcTestCase, DynamoDistributedSingleProcTestCase, requires_nccl, skip_if_lt_x_gpu, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, parametrize, requires_cuda, ) from torch.utils._triton import has_triton import torch.distributed as dist from torch.distributed._functional_collectives import REDUCE_OP_TO_STR from torch._dynamo.test_case import run_tests
c263bd43e8e8502d4726643bc6fd046f0130ac0e
32f585d9346e316e554c8d9bf7548af9f62141fc
added